summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.gitignore25
-rw-r--r--LICENSE.txt45
-rw-r--r--engine/LICENSE.txt45
-rw-r--r--engine/docker/Dockerfile23
-rwxr-xr-xengine/kubernetes/org-onap-fgps-dev/.helmignore21
-rwxr-xr-xengine/kubernetes/org-onap-fgps-dev/Chart.yaml19
-rwxr-xr-xengine/kubernetes/org-onap-fgps-dev/resources/config/solver.json72
-rwxr-xr-xengine/kubernetes/org-onap-fgps-dev/templates/configmap.yaml21
-rwxr-xr-xengine/kubernetes/org-onap-fgps-dev/templates/deployment.yaml76
-rwxr-xr-xengine/kubernetes/org-onap-fgps-dev/templates/service.yaml41
-rwxr-xr-xengine/kubernetes/org-onap-fgps-dev/values.yaml97
-rwxr-xr-xengine/src/config/planner.json69
-rwxr-xr-xengine/src/config/solver.json72
-rwxr-xr-xengine/src/config/solver_test.json72
-rw-r--r--engine/src/config/version.json9
-rwxr-xr-xengine/src/requirements.txt7
-rw-r--r--engine/src/tools/README.md69
-rw-r--r--engine/src/tools/crim.py216
-rw-r--r--engine/src/tools/lib/__init__.py18
-rw-r--r--engine/src/tools/lib/common.py73
-rw-r--r--engine/src/tools/lib/hosts.json100
-rw-r--r--engine/src/tools/lib/song.py153
-rw-r--r--engine/src/tools/lib/tables.py308
-rw-r--r--engine/src/tools/lock.py92
-rw-r--r--engine/src/tools/ppdb.py91
-rw-r--r--engine/src/valet/__init__.py18
-rw-r--r--engine/src/valet/bootstrapper.py121
-rw-r--r--engine/src/valet/engine/__init__.py18
-rw-r--r--engine/src/valet/engine/app_manager/__init__.py18
-rw-r--r--engine/src/valet/engine/app_manager/app.py716
-rw-r--r--engine/src/valet/engine/app_manager/app_handler.py307
-rw-r--r--engine/src/valet/engine/app_manager/app_parser.py257
-rw-r--r--engine/src/valet/engine/app_manager/group.py139
-rw-r--r--engine/src/valet/engine/app_manager/server.py171
-rw-r--r--engine/src/valet/engine/db_connect/__init__.py18
-rw-r--r--engine/src/valet/engine/db_connect/db_apis/__init__.py18
-rw-r--r--engine/src/valet/engine/db_connect/db_apis/mem_db.py117
-rw-r--r--engine/src/valet/engine/db_connect/db_apis/music.py388
-rw-r--r--engine/src/valet/engine/db_connect/db_handler.py533
-rw-r--r--engine/src/valet/engine/db_connect/locks.py152
-rw-r--r--engine/src/valet/engine/resource_manager/__init__.py18
-rw-r--r--engine/src/valet/engine/resource_manager/compute_manager.py201
-rw-r--r--engine/src/valet/engine/resource_manager/metadata_manager.py424
-rw-r--r--engine/src/valet/engine/resource_manager/naming.py146
-rw-r--r--engine/src/valet/engine/resource_manager/nova_compute.py544
-rw-r--r--engine/src/valet/engine/resource_manager/resource.py1589
-rw-r--r--engine/src/valet/engine/resource_manager/resource_handler.py299
-rw-r--r--engine/src/valet/engine/resource_manager/resources/__init__.py18
-rw-r--r--engine/src/valet/engine/resource_manager/resources/datacenter.py85
-rw-r--r--engine/src/valet/engine/resource_manager/resources/flavor.py67
-rw-r--r--engine/src/valet/engine/resource_manager/resources/group.py401
-rw-r--r--engine/src/valet/engine/resource_manager/resources/group_rule.py52
-rw-r--r--engine/src/valet/engine/resource_manager/resources/host.py428
-rw-r--r--engine/src/valet/engine/resource_manager/resources/host_group.py108
-rw-r--r--engine/src/valet/engine/resource_manager/resources/numa.py264
-rw-r--r--engine/src/valet/engine/resource_manager/topology_manager.py237
-rw-r--r--engine/src/valet/engine/search/__init__.py18
-rw-r--r--engine/src/valet/engine/search/avail_resources.py76
-rw-r--r--engine/src/valet/engine/search/constraint_solver.py117
-rw-r--r--engine/src/valet/engine/search/filters/__init__.py18
-rw-r--r--engine/src/valet/engine/search/filters/affinity_filter.py69
-rw-r--r--engine/src/valet/engine/search/filters/aggregate_instance_filter.py106
-rw-r--r--engine/src/valet/engine/search/filters/az_filter.py74
-rw-r--r--engine/src/valet/engine/search/filters/cpu_filter.py57
-rw-r--r--engine/src/valet/engine/search/filters/disk_filter.py50
-rw-r--r--engine/src/valet/engine/search/filters/diversity_filter.py62
-rw-r--r--engine/src/valet/engine/search/filters/dynamic_aggregate_filter.py141
-rw-r--r--engine/src/valet/engine/search/filters/exclusivity_filter.py81
-rw-r--r--engine/src/valet/engine/search/filters/filter_utils.py117
-rw-r--r--engine/src/valet/engine/search/filters/mem_filter.py56
-rw-r--r--engine/src/valet/engine/search/filters/no_exclusivity_filter.py53
-rw-r--r--engine/src/valet/engine/search/filters/numa_filter.py84
-rw-r--r--engine/src/valet/engine/search/filters/quorum_diversity_filter.py106
-rw-r--r--engine/src/valet/engine/search/optimizer.py494
-rw-r--r--engine/src/valet/engine/search/resource.py264
-rw-r--r--engine/src/valet/engine/search/search.py708
-rw-r--r--engine/src/valet/engine/search/search_helper.py43
-rwxr-xr-xengine/src/valet/rules/VNF_Rack_Diversity_RDN.json8
-rwxr-xr-xengine/src/valet/rules/VNF_Rack_Quorum_RDN.json8
-rwxr-xr-xengine/src/valet/rules/VNF_host_diversity_RDN.json8
-rwxr-xr-xengine/src/valet/rules/test_host_affinity_rule.json8
-rwxr-xr-xengine/src/valet/rules/test_host_diveristy_rule0.json8
-rwxr-xr-xengine/src/valet/rules/test_host_diveristy_rule1.json8
-rwxr-xr-xengine/src/valet/rules/test_host_diveristy_rule2.json8
-rwxr-xr-xengine/src/valet/rules/test_host_exclusivity.json8
-rwxr-xr-xengine/src/valet/rules/test_host_exclusivity2.json8
-rw-r--r--engine/src/valet/solver/__init__.py18
-rw-r--r--engine/src/valet/solver/ostro.py529
-rw-r--r--engine/src/valet/utils/__init__.py18
-rw-r--r--engine/src/valet/utils/decryption.py44
-rw-r--r--engine/src/valet/utils/logger.py349
-rw-r--r--engine/src/valet/valet_main.py88
-rw-r--r--valetapi/Jenkinsfile276
-rw-r--r--valetapi/LICENSE.txt45
-rwxr-xr-xvaletapi/kubernetes/org-onap-fgps-dev/Chart.yaml18
-rwxr-xr-xvaletapi/kubernetes/org-onap-fgps-dev/resources/config/application.properties33
-rwxr-xr-xvaletapi/kubernetes/org-onap-fgps-dev/resources/config/auth.properties28
-rwxr-xr-xvaletapi/kubernetes/org-onap-fgps-dev/resources/config/logback.xml28
-rwxr-xr-xvaletapi/kubernetes/org-onap-fgps-dev/resources/config/logmessages.properties29
-rwxr-xr-xvaletapi/kubernetes/org-onap-fgps-dev/resources/config/resources.properties34
-rwxr-xr-xvaletapi/kubernetes/org-onap-fgps-dev/resources/config/system.properties44
-rwxr-xr-xvaletapi/kubernetes/org-onap-fgps-dev/resources/config/version.properties27
-rwxr-xr-xvaletapi/kubernetes/org-onap-fgps-dev/templates/configmap.yaml21
-rwxr-xr-xvaletapi/kubernetes/org-onap-fgps-dev/templates/deployment.yaml104
-rwxr-xr-xvaletapi/kubernetes/org-onap-fgps-dev/templates/service.yaml40
-rwxr-xr-xvaletapi/kubernetes/org-onap-fgps-dev/values.yaml104
-rw-r--r--valetapi/opt/etc/config/application.properties50
-rw-r--r--valetapi/opt/etc/config/logback.xml25
-rw-r--r--valetapi/opt/etc/config/system.properties48
-rw-r--r--valetapi/pom.xml515
-rwxr-xr-xvaletapi/src/main/docker/Dockerfile18
-rwxr-xr-xvaletapi/src/main/docker/startService.sh9
-rw-r--r--valetapi/src/main/java/org/onap/fgps/api/ApplicationStartup.java92
-rw-r--r--valetapi/src/main/java/org/onap/fgps/api/ValetServiceApplication.java49
-rw-r--r--valetapi/src/main/java/org/onap/fgps/api/WebConfiguration.java66
-rw-r--r--valetapi/src/main/java/org/onap/fgps/api/annotation/AafRoleRequired.java74
-rw-r--r--valetapi/src/main/java/org/onap/fgps/api/annotation/BasicAuthRequired.java59
-rw-r--r--valetapi/src/main/java/org/onap/fgps/api/annotation/PropertyBasedAuthorization.java64
-rw-r--r--valetapi/src/main/java/org/onap/fgps/api/beans/KeySpaceRequest.java44
-rw-r--r--valetapi/src/main/java/org/onap/fgps/api/beans/Status.java65
-rw-r--r--valetapi/src/main/java/org/onap/fgps/api/beans/schema/Schema.java262
-rw-r--r--valetapi/src/main/java/org/onap/fgps/api/config/HttpConfig.java82
-rw-r--r--valetapi/src/main/java/org/onap/fgps/api/config/SpringServletConfig.java71
-rw-r--r--valetapi/src/main/java/org/onap/fgps/api/controller/AdminController.java66
-rw-r--r--valetapi/src/main/java/org/onap/fgps/api/controller/ValetGroupsController.java144
-rw-r--r--valetapi/src/main/java/org/onap/fgps/api/controller/ValetServicePlacementController.java150
-rw-r--r--valetapi/src/main/java/org/onap/fgps/api/controller/ValetUtilityController.java298
-rw-r--r--valetapi/src/main/java/org/onap/fgps/api/dao/SchemaDAO.java177
-rw-r--r--valetapi/src/main/java/org/onap/fgps/api/dao/ValetServicePlacementDAO.java155
-rw-r--r--valetapi/src/main/java/org/onap/fgps/api/eelf/configuration/Configuration.java159
-rw-r--r--valetapi/src/main/java/org/onap/fgps/api/eelf/configuration/EELFLogger.java634
-rw-r--r--valetapi/src/main/java/org/onap/fgps/api/eelf/configuration/EELFManager.java502
-rw-r--r--valetapi/src/main/java/org/onap/fgps/api/eelf/configuration/SLF4jWrapper.java1043
-rw-r--r--valetapi/src/main/java/org/onap/fgps/api/eelf/exception/EELFException.java78
-rw-r--r--valetapi/src/main/java/org/onap/fgps/api/eelf/i18n/EELFMsgs.java85
-rw-r--r--valetapi/src/main/java/org/onap/fgps/api/eelf/i18n/EELFResolvableErrorEnum.java52
-rw-r--r--valetapi/src/main/java/org/onap/fgps/api/eelf/i18n/EELFResourceManager.java715
-rw-r--r--valetapi/src/main/java/org/onap/fgps/api/exception/CipherUtilException.java67
-rw-r--r--valetapi/src/main/java/org/onap/fgps/api/exception/MissingRoleException.java62
-rw-r--r--valetapi/src/main/java/org/onap/fgps/api/helpers/Helper.java58
-rw-r--r--valetapi/src/main/java/org/onap/fgps/api/interceptor/AuthorizationInterceptor.java203
-rw-r--r--valetapi/src/main/java/org/onap/fgps/api/interceptor/DarknessInterceptor.java65
-rw-r--r--valetapi/src/main/java/org/onap/fgps/api/interceptor/VersioningInterceptor.java88
-rw-r--r--valetapi/src/main/java/org/onap/fgps/api/logging/EELFLoggerDelegate.java447
-rw-r--r--valetapi/src/main/java/org/onap/fgps/api/logging/aspect/AuditLog.java49
-rw-r--r--valetapi/src/main/java/org/onap/fgps/api/logging/aspect/EELFLoggerAdvice.java247
-rw-r--r--valetapi/src/main/java/org/onap/fgps/api/logging/aspect/EELFLoggerAspect.java105
-rw-r--r--valetapi/src/main/java/org/onap/fgps/api/logging/aspect/MetricsLog.java49
-rw-r--r--valetapi/src/main/java/org/onap/fgps/api/logging/format/AlarmSeverityEnum.java56
-rw-r--r--valetapi/src/main/java/org/onap/fgps/api/logging/format/AuditLogFormatter.java111
-rw-r--r--valetapi/src/main/java/org/onap/fgps/api/proxy/AAFProxy.java232
-rw-r--r--valetapi/src/main/java/org/onap/fgps/api/proxy/DBProxy.java215
-rw-r--r--valetapi/src/main/java/org/onap/fgps/api/service/ValetGroupsService.java141
-rw-r--r--valetapi/src/main/java/org/onap/fgps/api/service/ValetPlacementService.java1017
-rw-r--r--valetapi/src/main/java/org/onap/fgps/api/service/ValetUtilityService.java44
-rw-r--r--valetapi/src/main/java/org/onap/fgps/api/utils/CipherUtil.java214
-rw-r--r--valetapi/src/main/java/org/onap/fgps/api/utils/Constants.java100
-rw-r--r--valetapi/src/main/java/org/onap/fgps/api/utils/DBInitializationRequests.java50
-rw-r--r--valetapi/src/main/java/org/onap/fgps/api/utils/Helper.java48
-rw-r--r--valetapi/src/main/java/org/onap/fgps/api/utils/KeyProperties.java122
-rw-r--r--valetapi/src/main/java/org/onap/fgps/api/utils/MusicDBConstants.java48
-rw-r--r--valetapi/src/main/java/org/onap/fgps/api/utils/SystemProperties.java193
-rw-r--r--valetapi/src/main/java/org/onap/fgps/api/utils/UserUtils.java76
-rw-r--r--valetapi/src/main/java/org/onap/fgps/api/utils/YamlToJsonConverter.java60
-rwxr-xr-xvaletapi/src/main/jenkins/versioning.groovy66
-rw-r--r--valetapi/src/main/resources/application.properties52
-rw-r--r--valetapi/src/main/resources/auth.properties48
-rwxr-xr-xvaletapi/src/main/resources/banner.txt13
-rw-r--r--valetapi/src/main/resources/key.properties24
-rw-r--r--valetapi/src/main/resources/keystore.p12bin0 -> 2583 bytes
-rw-r--r--valetapi/src/main/resources/logback.xml212
-rwxr-xr-xvaletapi/src/main/resources/logmessages.properties29
-rw-r--r--valetapi/src/main/resources/resources.properties39
-rw-r--r--valetapi/src/main/resources/version.properties28
-rw-r--r--valetapi/src/test/java/org/onap/fgps/api/componenttest/mockito/ITComponentTest.java54
-rw-r--r--valetapi/src/test/java/org/onap/fgps/controller/ValetGroupsControllerTest.java107
-rw-r--r--valetapi/src/test/java/org/onap/fgps/controller/ValetServicePlacementControllerTest.java221
-rw-r--r--valetapi/src/test/java/org/onap/fgps/controller/ValetServicePlacementControllerTest1.java88
-rw-r--r--valetapi/src/test/java/org/onap/fgps/service/ValetGroupsServiceTest.java103
-rw-r--r--valetapi/src/test/java/org/onap/fgps/service/ValetPlacementServiceTest.java335
-rwxr-xr-xvaletapi/src/test/resources/application-test.properties37
181 files changed, 25923 insertions, 0 deletions
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..6000f78
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,25 @@
+/target/
+*.class
+
+/.classpath
+/.project
+/.settings/
+
+*.pyc
+.DS_Store
+
+# Java Compiled class file
+*.class
+
+# Log files
+Valet-Engine/log/*
+
+# junk directories
+.old
+
+# pycharm directory
+src/.idea
+
+.checkstyle
+.project
+.pydevproject
diff --git a/LICENSE.txt b/LICENSE.txt
new file mode 100644
index 0000000..cce0021
--- /dev/null
+++ b/LICENSE.txt
@@ -0,0 +1,45 @@
+
+The following licence applies to all files in this and subdirectories. Licences
+are included in individual source files where appropriate, and if it differs
+from this text, it supersedes this. Any file that does not have licence text
+defaults to being covered by this text; not all files support the addition of
+licenses.
+
+/*
+* ============LICENSE_START==========================================
+* ONAP - F-GPS
+* ===================================================================
+* Copyright (c) 2019 AT&T Intellectual Property. All rights reserved.
+* ===================================================================
+*
+* Unless otherwise specified, all software contained herein is licensed
+* under the Apache License, Version 2.0 (the "License");
+* you may not use this software except in compliance with the License.
+* You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*
+*
+*
+* Unless otherwise specified, all documentation contained herein is licensed
+* under the Creative Commons License, Attribution 4.0 Intl. (the "License");
+* you may not use this documentation except in compliance with the License.
+* You may obtain a copy of the License at
+*
+* https://creativecommons.org/licenses/by/4.0/
+*
+* Unless required by applicable law or agreed to in writing, documentation
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*
+* ============LICENSE_END============================================
+*
+*/
diff --git a/engine/LICENSE.txt b/engine/LICENSE.txt
new file mode 100644
index 0000000..f7449f4
--- /dev/null
+++ b/engine/LICENSE.txt
@@ -0,0 +1,45 @@
+
+The following licence applies to all files in this and subdirectories. Licences
+are included in individual source files where appropriate, and if it differs
+from this text, it supersedes this. Any file that does not have licence text
+defaults to being covered by this text; not all files support the addition of
+licenses.
+
+/*
+* ============LICENSE_START==========================================
+* ONAP - F-GPS Engine
+* ===================================================================
+* Copyright (c) 2019 AT&T Intellectual Property. All rights reserved.
+* ===================================================================
+*
+* Unless otherwise specified, all software contained herein is licensed
+* under the Apache License, Version 2.0 (the "License");
+* you may not use this software except in compliance with the License.
+* You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*
+*
+*
+* Unless otherwise specified, all documentation contained herein is licensed
+* under the Creative Commons License, Attribution 4.0 Intl. (the "License");
+* you may not use this documentation except in compliance with the License.
+* You may obtain a copy of the License at
+*
+* https://creativecommons.org/licenses/by/4.0/
+*
+* Unless required by applicable law or agreed to in writing, documentation
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*
+* ============LICENSE_END============================================
+*
+*/
diff --git a/engine/docker/Dockerfile b/engine/docker/Dockerfile
new file mode 100644
index 0000000..f8bf14e
--- /dev/null
+++ b/engine/docker/Dockerfile
@@ -0,0 +1,23 @@
+FROM python:2.7.15-alpine
+
+COPY src/ /opt/engine
+WORKDIR /opt/engine
+
+ARG HTTP_PROXY
+ARG HTTPS_PROXY
+
+ENV HTTP_PROXY ${HTTP_PROXY}
+ENV HTTPS_PROXY ${HTTPS_PROXY}
+
+RUN apk --update add --virtual build-dep gcc musl-dev linux-headers libc-dev \
+ && pip install -r requirements.txt \
+ && apk del build-dep
+
+ENV HTTP_PROXY ""
+ENV HTTPS_PROXY ""
+
+RUN addgroup -g 825 -S valetg && adduser -u 825 -S valetu -G valetg && chown -R valetu:valetg /opt/engine
+USER valetu:valetg
+RUN date > /home/valetu/imagedate.txt
+
+CMD ["python", "valet/valet_main.py", "/opt/config/solver.json"]
diff --git a/engine/kubernetes/org-onap-fgps-dev/.helmignore b/engine/kubernetes/org-onap-fgps-dev/.helmignore
new file mode 100755
index 0000000..f0c1319
--- /dev/null
+++ b/engine/kubernetes/org-onap-fgps-dev/.helmignore
@@ -0,0 +1,21 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
diff --git a/engine/kubernetes/org-onap-fgps-dev/Chart.yaml b/engine/kubernetes/org-onap-fgps-dev/Chart.yaml
new file mode 100755
index 0000000..2749f1a
--- /dev/null
+++ b/engine/kubernetes/org-onap-fgps-dev/Chart.yaml
@@ -0,0 +1,19 @@
+# Copyright © 2019 AT&T
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: v1
+appVersion: "1.0"
+description: A Helm chart for valet engine Kubernetes
+name: valetengine
+version: 1.0.0
diff --git a/engine/kubernetes/org-onap-fgps-dev/resources/config/solver.json b/engine/kubernetes/org-onap-fgps-dev/resources/config/solver.json
new file mode 100755
index 0000000..62f179e
--- /dev/null
+++ b/engine/kubernetes/org-onap-fgps-dev/resources/config/solver.json
@@ -0,0 +1,72 @@
+{
+ "engine": {
+ "id": "valet01",
+ "timeout": 300,
+ "dha": "false",
+ "ek": "123"
+ },
+ "logging": {
+ "path": "{{.Values.logging.path}}",
+ "name": "{{.Values.logging.name}}",
+ "format": "{{.Values.logging.format}}",
+ "size": 10000000,
+ "level": "{{.Values.logging.level}}",
+ "lk": "234"
+ },
+ "db": {
+ "mode": "music",
+ "keyspace": "{{.Values.db.keyspace}}",
+ "requests_table": "{{.Values.db.requests_table}}",
+ "results_table": "{{.Values.db.results_table}}",
+ "group_rules_table": "{{.Values.db.group_rules_table}}",
+ "groups_table": "{{.Values.db.groups_table}}",
+ "stacks_table": "{{.Values.db.stacks_table}}",
+ "resources_table": "{{.Values.db.resources_table}}",
+ "stack_id_map_table": "{{.Values.db.stack_id_map_table}}",
+ "regions_table": "{{.Values.db.regions_table}}",
+ "dk": "789"
+ },
+ "music": {
+ "hosts": [ "{{.Values.music.host1}}", "{{.Values.music.host2}}", "{{.Values.music.host3}}" ],
+ "port": "8080",
+ "path": "/MUSIC/rest/v2",
+ "timeout": 10,
+ "retries": 3,
+ "replication_factor": 3,
+ "lock_timeout": 300,
+ "userid": "{{.Values.music.userid}}",
+ "password": "{{.Values.music.password}}",
+ "namespace": "{{.Values.music.namespace}}"
+ },
+ "compute": {
+ "mode": "nova",
+ "default_cpu_allocation_ratio": 1.0,
+ "default_ram_allocation_ratio": 1.0,
+ "default_disk_allocation_ratio": 1.0,
+ "batch_sync_interval": 3600
+ },
+ "nova": {
+ "project_name": "{{.Values.nova.project_name}}",
+ "admin_view_username": "{{.Values.nova.admin_view_username}}",
+ "admin_username": "{{.Values.nova.admin_username}}",
+ "admin_view_password": "{{.Values.nova.admin_view_password}}",
+ "admin_password": "{{.Values.nova.admin_password}}"
+ },
+ "topology": {
+ "mode": "{{.Values.topology.mode}}"
+ },
+ "naming": {
+ "rack_codes": [
+ "r"
+ ],
+ "host_codes": [
+ "a",
+ "c",
+ "u",
+ "f",
+ "o",
+ "p",
+ "s"
+ ]
+ }
+}
diff --git a/engine/kubernetes/org-onap-fgps-dev/templates/configmap.yaml b/engine/kubernetes/org-onap-fgps-dev/templates/configmap.yaml
new file mode 100755
index 0000000..25c8816
--- /dev/null
+++ b/engine/kubernetes/org-onap-fgps-dev/templates/configmap.yaml
@@ -0,0 +1,21 @@
+# Copyright © 2019 AT&T
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: engine-configs
+ namespace: org-onap-fgps-dev
+data:
+{{ tpl (.Files.Glob "resources/config/*").AsConfig . | indent 2 }}
diff --git a/engine/kubernetes/org-onap-fgps-dev/templates/deployment.yaml b/engine/kubernetes/org-onap-fgps-dev/templates/deployment.yaml
new file mode 100755
index 0000000..aebeacc
--- /dev/null
+++ b/engine/kubernetes/org-onap-fgps-dev/templates/deployment.yaml
@@ -0,0 +1,76 @@
+# Copyright © 2019 AT&T
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+ name: {{ .Chart.Name }}
+ namespace: {{ .Values.global.ns }}
+ labels:
+ app: {{ .Chart.Name }}
+ chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service }}
+spec:
+ replicas: {{ .Values.replicaCount }}
+ template:
+ metadata:
+ labels:
+ app: {{ .Chart.Name }}
+ release: {{ .Release.Name }}
+ spec:
+ serviceAccount: "default"
+ volumes:
+ - name: engine-volume
+ configMap:
+ name: engine-configs
+ items:
+ - key: solver.json
+ path: solver.json
+ - name: engine-logs
+ hostPath:
+ path: /opt/logs/engine
+ imagePullSecrets:
+ - name: pullsecret
+ initContainers:
+ - name: init-cont
+ image: ubuntu:16.04
+ command: ['bash', '-c', 'useradd --uid 825 valetu && chown -R valetu /engine']
+ volumeMounts:
+ - mountPath: /engine
+ name: engine-logs
+ containers:
+ - env:
+ - name: properties
+ value: solver.json
+ image: "nexus.onap.org:5100/org.onap.fgps/engine:latest"
+ imagePullPolicy: Always
+ name: "engine"
+ volumeMounts:
+ - name: engine-volume
+ mountPath: /opt/config/solver.json
+ subPath: solver.json
+ - name: engine-logs
+ mountPath: /engine
+ ports:
+ - containerPort: 80
+ protocol: TCP
+ resources:
+ requests:
+ memory: "500Mi"
+ cpu: "250m"
+ limits:
+ memory: "1Gi"
+ cpu: "500m"
+ restartPolicy: Always
diff --git a/engine/kubernetes/org-onap-fgps-dev/templates/service.yaml b/engine/kubernetes/org-onap-fgps-dev/templates/service.yaml
new file mode 100755
index 0000000..8b9d1d0
--- /dev/null
+++ b/engine/kubernetes/org-onap-fgps-dev/templates/service.yaml
@@ -0,0 +1,41 @@
+# Copyright © 2019 AT&T
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ .Chart.Name }}
+ namespace: {{ .Values.global.ns }}
+ labels:
+ app: {{ .Chart.Name }}
+ chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service }}
+ annotations:
+spec:
+ type: {{ .Values.service.type }}
+ ports:
+ {{if eq .Values.service.type "NodePort" -}}
+ - port: {{ .Values.service.externalPort }}
+ nodePort: {{ .Values.service.nodePort }}
+ name: {{ .Values.service.portName }}
+ {{- else -}}
+ - port: {{ .Values.service.externalPort }}
+ targetPort: {{ .Values.service.internalPort }}
+ name: {{ .Values.service.portName }}
+ {{- end}}
+ selector:
+ app: {{ .Chart.Name }}
+ release: {{ .Release.Name }}
+
diff --git a/engine/kubernetes/org-onap-fgps-dev/values.yaml b/engine/kubernetes/org-onap-fgps-dev/values.yaml
new file mode 100755
index 0000000..223f036
--- /dev/null
+++ b/engine/kubernetes/org-onap-fgps-dev/values.yaml
@@ -0,0 +1,97 @@
+# Copyright © 2019 AT&T
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+global:
+ loggingRepository: docker.elastic.co
+ loggingImage: beats/filebeat:5.5.0
+ repositoryCred:
+ user: repo_user@valet.onap.org
+ password: repo_password
+ repository: nexus.onap.org:5100
+ commonConfigPrefix: engine
+ ns: org-onap-fgps-dev
+ image:
+ filebeat: docker.elastic.co/beats/filebeat:5.5.0
+ repository: nexus.onap.org:5100
+ name: org.onap.fgps/engine:latest
+pullPolicy: Always
+nodePortPrefix: 300
+dataRootDir: /dockerdata-nfs
+config:
+ aai:
+ serviceName: aai.onap.org
+ port: 8443
+ msb:
+ serviceName: msb-iag
+ port: 80
+
+service:
+ type: NodePort
+ name: engine
+ externalPort: 80
+ internalPort: 80
+ nodePort: 30632
+ portName: engine
+ingress:
+ enabled: false
+replicaCount: 1
+liveness:
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ enabled: true
+readiness:
+ initialDelaySeconds: 10
+ periodSeconds: 10
+
+
+logging:
+ path: "/engine/"
+ name: debug1.log
+ format: "%(asctime)s.%(msecs)03d [%(levelname)-5.5s] [%(name)s] - %(message)s"
+ size: 10000000
+ level: debug
+ lk: '234'
+db:
+ mode: music
+ keyspace: pn1
+ requests_table: requests
+ results_table: results
+ group_rules_table: group_rules
+ groups_table: groups
+ stacks_table: stacks
+ resources_table: resources
+ stack_id_map_table: stack_id_map
+ regions_table: regions
+ dk: '789'
+music:
+ host1: music1.onap.org
+ host2: music2.onap.org
+ host3: music3.onap.org
+ port: '8080'
+ path: "/MUSIC/rest/v2"
+ timeout: 10
+ retries: 3
+ replication_factor: 3
+ lock_timeout: 300
+ userid: musicuser@onap.org
+ password: encrypted_music_password
+ namespace: org.onap.dev.music
+nova:
+ project_name: admin
+ admin_view_username: openstack_username
+ admin_username: openstack_username
+ admin_view_password: encrypted_openstack_password
+ admin_password: encrypted_openstack_password
+topology:
+ mode: naming \ No newline at end of file
diff --git a/engine/src/config/planner.json b/engine/src/config/planner.json
new file mode 100755
index 0000000..2e14702
--- /dev/null
+++ b/engine/src/config/planner.json
@@ -0,0 +1,69 @@
+{
+ "engine": {
+ "id": "valet01",
+ "timeout": 3600
+ },
+ "logging": {
+ "path": "/engine",
+ "name": "planner.log",
+ "format": "%(asctime)s.%(msecs)03d [%(levelname)-5.5s] [%(name)s] - %(message)s",
+ "size": 100000000,
+ "level": "debug"
+ },
+ "db": {
+ "mode": "mem_db",
+ "keyspace": "pn2",
+ "requests_table": "requests",
+ "results_table": "results",
+ "group_rules_table": "group_rules",
+ "groups_table": "groups",
+ "stacks_table": "stacks",
+ "resources_table": "resources",
+ "stack_id_map_table": "stack_id_map",
+ "regions_table": "regions"
+ },
+ "music": {
+ "hosts": [ "music_host_1.onap.org", "music_host_2.onap.org" ],
+ "port": "8080",
+ "path": "/MUSIC/rest/v2",
+ "timeout": 10,
+ "retries": 3,
+ "replication_factor": 3,
+ "lock_timeout": 300,
+ "userid": "musicuser@onap.org",
+ "password": "encrypted_music_password",
+ "namespace": "org.onap.dev.music"
+ },
+ "compute": {
+ "mode": "planner",
+ "default_cpu_allocation_ratio": 1.0,
+ "default_ram_allocation_ratio": 1.0,
+ "default_disk_allocation_ratio": 1.0,
+ "batch_sync_interval": 3600
+ },
+ "nova": {
+ "project_name": "admin",
+ "admin_view_username": "openstack_username",
+ "admin_username": "openstack_username",
+ "admin_view_password": "encrypted_openstack_password",
+ "admin_password": "encrypted_openstack_password"
+ },
+ "topology": {
+ "mode": "naming",
+ "num_of_hosts_per_rack": 18
+ },
+ "naming": {
+ "rack_codes": [
+ "r"
+ ],
+ "host_codes": [
+ "a",
+ "c",
+ "u",
+ "f",
+ "o",
+ "p",
+ "s"
+ ]
+ }
+}
diff --git a/engine/src/config/solver.json b/engine/src/config/solver.json
new file mode 100755
index 0000000..fd8ce29
--- /dev/null
+++ b/engine/src/config/solver.json
@@ -0,0 +1,72 @@
+{
+ "engine": {
+ "id": "valet01",
+ "timeout": 300,
+ "dha": "false",
+ "ek": "123"
+ },
+ "logging": {
+ "path": "/engine/",
+ "name": "solver.log",
+ "format": "%(asctime)s.%(msecs)03d [%(levelname)-5.5s] [%(name)s] - %(message)s",
+ "size": 10000000,
+ "level": "debug",
+ "lk": "234"
+ },
+ "db": {
+ "mode": "music",
+ "keyspace": "pn2",
+ "requests_table": "requests",
+ "results_table": "results",
+ "group_rules_table": "group_rules",
+ "groups_table": "groups",
+ "stacks_table": "stacks",
+ "resources_table": "resources",
+ "stack_id_map_table": "stack_id_map",
+ "regions_table": "regions",
+ "dk": "789"
+ },
+ "music": {
+ "hosts": [ "music_host_1.onap.org", "music_host_2.onap.org" ],
+ "port": "8080",
+ "path": "/MUSIC/rest/v2",
+ "timeout": 10,
+ "retries": 3,
+ "replication_factor": 3,
+ "lock_timeout": 300,
+ "userid": "musicuser@onap.org",
+ "password": "encrypted_music_password",
+ "namespace": "org.onap.dev.music"
+ },
+ "compute": {
+ "mode": "nova",
+ "default_cpu_allocation_ratio": 1.0,
+ "default_ram_allocation_ratio": 1.0,
+ "default_disk_allocation_ratio": 1.0,
+ "batch_sync_interval": 3600
+ },
+ "nova": {
+ "project_name": "admin",
+ "admin_view_username": "openstack_username",
+ "admin_username": "openstack_username",
+ "admin_view_password": "encrypted_openstack_password",
+ "admin_password": "encrypted_openstack_password"
+ },
+ "topology": {
+ "mode": "naming"
+ },
+ "naming": {
+ "rack_codes": [
+ "r"
+ ],
+ "host_codes": [
+ "a",
+ "c",
+ "u",
+ "f",
+ "o",
+ "p",
+ "s"
+ ]
+ }
+}
diff --git a/engine/src/config/solver_test.json b/engine/src/config/solver_test.json
new file mode 100755
index 0000000..a5b00e5
--- /dev/null
+++ b/engine/src/config/solver_test.json
@@ -0,0 +1,72 @@
+{
+ "engine": {
+ "id": "valet01",
+ "timeout": 300,
+ "dha": "true",
+ "ek": "123"
+ },
+ "logging": {
+ "path": "/engine",
+ "name": "solver_test.log",
+ "format": "%(asctime)s.%(msecs)03d [%(levelname)-5.5s] [%(name)s] - %(message)s",
+ "size": 10000000,
+ "level": "debug",
+ "lk": "234"
+ },
+ "db": {
+ "mode": "mem_db",
+ "keyspace": "Valet_IST",
+ "requests_table": "requests",
+ "results_table": "results",
+ "group_rules_table": "group_rules",
+ "groups_table": "groups",
+ "stacks_table": "stacks",
+ "resources_table": "resources",
+ "stack_id_map_table": "stack_id_map",
+ "regions_table": "regions",
+ "dk": "789"
+ },
+ "music": {
+ "hosts": [ "music_host_1.onap.org", "music_host_2.onap.org" ],
+ "port": "8080",
+ "path": "/MUSIC/rest/v2",
+ "timeout": 10,
+ "retries": 3,
+ "replication_factor": 3,
+ "lock_timeout": 300,
+ "userid": "musicuser@onap.org",
+ "password": "encrypted_music_password",
+ "namespace": "org.onap.dev.music"
+ },
+ "compute": {
+ "mode": "sim",
+ "default_cpu_allocation_ratio": 1.0,
+ "default_ram_allocation_ratio": 1.0,
+ "default_disk_allocation_ratio": 1.0,
+ "batch_sync_interval": 3600
+ },
+ "nova": {
+ "project_name": "admin",
+ "admin_view_username": "openstack_username",
+ "admin_username": "openstack_username",
+ "admin_view_password": "encrypted_openstack_password",
+ "admin_password": "encrypted_openstack_password"
+ },
+ "topology": {
+ "mode": "naming"
+ },
+ "naming": {
+ "rack_codes": [
+ "r"
+ ],
+ "host_codes": [
+ "a",
+ "c",
+ "u",
+ "f",
+ "o",
+ "p",
+ "s"
+ ]
+ }
+}
diff --git a/engine/src/config/version.json b/engine/src/config/version.json
new file mode 100644
index 0000000..baf15cc
--- /dev/null
+++ b/engine/src/config/version.json
@@ -0,0 +1,9 @@
+{
+ "version": {
+ "major": 0,
+ "minor": 4,
+ "patch": 22,
+ "full": "0.4.22",
+ "type": "C"
+ }
+} \ No newline at end of file
diff --git a/engine/src/requirements.txt b/engine/src/requirements.txt
new file mode 100755
index 0000000..f623b01
--- /dev/null
+++ b/engine/src/requirements.txt
@@ -0,0 +1,7 @@
+six==1.12.0
+Crypto==1.4.1
+requests==2.21.0
+python-novaclient==2.18.0
+python-keystoneclient==3.6.0
+pycrypto==2.6.1
+pytz==2018.9
diff --git a/engine/src/tools/README.md b/engine/src/tools/README.md
new file mode 100644
index 0000000..9296f39
--- /dev/null
+++ b/engine/src/tools/README.md
@@ -0,0 +1,69 @@
+### Valet Tools for development, test, and production support
+
+|File| Description |
+|---|---|
+|crim.py|Commandline Rest Interface for Music<br>*read, add, delete from the music database*|
+|lock.py|Manual (Un)Locking Of Valet Regions<br>*from the regions (locking) table*|
+|ppdb.py|pretty print database<br>*try to make the database data readable*|
+|lib/common|collection of functions<br>- **set_argument** - *Get arg from file, cmdline, a pipe or prompt user*<br>- **list2string** - *join list and return as a string*<br>- **chop** - *like perl*|
+|lib/hosts.json|*contains all the currently known (by me) hosts for music and valet*|
+|lib/logger.py|*like the official logger but allows logger to point to file and or console*|
+|lib/tables.py|*Tables object, that handles each valet table as small subclass (could replace db_handler.py)*|
+|lib/song.py|*Song is music for scripts, a subclass of music.py, with script helpers*|
+
+#### Examples
+`$ crim.py -?`
+
+Show help message and exit (a lot more options than I am showing here...)
+
+`$ crim.py -names -read requests -read results`
+
+Show the contents of the requests and results tables in the default keyspace
+
+`$ crim.py -n -r q -r u`
+
+Same as above, but with using [watch](https://linux.die.net/man/1/watch "watch(1) - Linux man page") to execute the script repeatadly displaying the output
+Also this is an example of using shortcuts for arguments
+
+`$ watch crim.py -n -r q -r u`
+
+Show the contents of the regions tables (locking) in the all the known keyspaces
+
+`$ crim.py -K all -r regions`
+
+Delete the cw keyspace - this is used for testing to "clean" the database
+
+`$ crim.py -sD cw`
+
+Show the database stuff for the pn2 keyspace
+
+`$ crim.py -show -K pn`
+
+Show the config stuff for the pn2 keyspace
+
+`$ crim.py -ShowConfig -K pn`
+
+Show the database tables and definitions (hardcoded, not a query from the database)
+
+`$ crim.py -viewSchema`
+
+Show the requests record with id create-0000-0003
+
+`$ crim.py -i create-0000-0003 -r q`
+
+Show the resources record in the pk2 keyspace with id "reg6:alan_stack_N003"
+
+`$ crim.py -r s -K pk2 -i "reg6:alan_stack_N003"`
+
+##### Testing Example
+
+Here we are going to copy a record from one environment to another
+
+Get a record from the request table, into a file; *Note:* Not the default config file...
+
+`$ crim.py -config ../test/solver.json -r q -K ist -id "create-abc-10099" > z`
+
+Put that record from the file into the request table of another keyspace
+
+`$ crim.py -c ../test/solver.json -t q -K pk2 -a i -f z`
+
diff --git a/engine/src/tools/crim.py b/engine/src/tools/crim.py
new file mode 100644
index 0000000..a92ac6c
--- /dev/null
+++ b/engine/src/tools/crim.py
@@ -0,0 +1,216 @@
+#
+# -------------------------------------------------------------------------
+# Copyright (c) 2019 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
+#!/usr/bin/env python2.7
+
+"""
+Commandline Rest Interface Music (CRIM)
+For help invoke with crim.py --help
+"""
+
+
+import argparse
+import json
+import os
+from pprint import pprint
+import re
+import sys
+import traceback
+
+from lib.song import Song
+from lib.tables import Tables
+
+sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))) # ../../..
+from valet.utils.logger import Logger
+
+
+# options <<<1
+def options():
+ toc = Tables.option_choices()
+
+ parser = argparse.ArgumentParser(description='\033[34mCommandline Rest-Interface to Music.\033[0m', add_help=False)
+ Song.add_args(parser)
+
+ group = parser.add_argument_group("Choose table\n\033[48;5;227m" + toc["alias"] + "\033[0m")
+
+ group.add_argument('-action', help='perform insert, update, delete, delete all, create table', choices=['i', 'u', 'd', 'D', 'c'])
+ group.add_argument('-id', metavar='ID', action="append", help='id(s) in a table; (multiples allowed)')
+ group.add_argument('-file', help='json file required for -action create')
+ group.add_argument('-table', metavar='table', help='perform action on this table', default="request", choices=toc["choices"])
+
+ group = parser.add_argument_group("Add/Delete Schema")
+ group.add_argument("-sA", "-schemaAdd", metavar='keyspace', dest="schemaAdd", help="create keyspace")
+ group.add_argument("-sD", "-schemaDel", metavar='keyspace', dest="schemaDel", help="delete keyspace")
+
+ group = parser.add_argument_group("Query Tables")
+ group.add_argument('-read', metavar='all|table', action='append', help='read all or tables (multiples allowed)', choices=toc["choices"])
+ group.add_argument('-names', action='store_true', help='show names of tables on read')
+ group.add_argument('-Raw', action='store_true', help='dont strip out the music added fields')
+
+ group = parser.add_argument_group("Other Output")
+ group.add_argument("-?", "--help", action="help", help="show this help message and exit")
+ group.add_argument("-json", metavar='FILE', help="view json file")
+ group.add_argument('-show', action='store_true', help='show db stuff')
+ group.add_argument('-ShowConfig', action='store_true', help='show config stuff')
+ group.add_argument('-vt', '-viewTables', action='store_true', dest="viewTables", help='list tables (hardcoded)')
+ group.add_argument('-vs', '-viewSchema', action='store_true', dest="viewSchema", help='list table schema (hardcoded)')
+
+ return parser.parse_args() # >>>1
+
+
+# setTable <<<1
+def _set_table(opts_table):
+ """ set table based on requested table. """
+
+ for sub_table in Tables.__subclasses__():
+ if opts_table in sub_table.table_alias():
+ return sub_table(music, logger)
+# >>>1
+
+
+question = lambda q: raw_input(q).lower().strip()[0] == "y"
+
+
+def clean_env(var):
+ if var in os.environ:
+ del os.environ[var]
+
+
+""" MAIN """
+if __name__ == "__main__":
+ clean_env('HTTP_PROXY')
+ clean_env('http_proxy')
+ opts = options()
+
+ # Get logger; Read config and strike up a song <<<1
+ logger = Logger().get_logger('console')
+ config = json.loads(open(opts.config).read())
+ music = Song(opts, config, logger)
+ # >>>1
+
+ if opts.viewTables:
+ for table in Tables.__subclasses__():
+ print (re.sub("[[\]',]", '', str(table.table_alias()))).split(" ")[0]
+ sys.exit(0)
+
+ if opts.viewSchema:
+ for table in Tables.__subclasses__():
+ sys.stdout.write(re.sub("[[\]',]", '', str(table.table_alias())) + ' ')
+ print json.dumps(table.schema, sort_keys=True, indent=2), "\n"
+ sys.exit(0)
+
+ """ Keyspace Create """ # <<<1
+ if opts.schemaAdd:
+ sys.exit(music.create_keyspace(opts.schemaAdd))
+
+ """ Keyspace Delete """ # <<<1
+ if opts.schemaDel:
+ if question("You sure you wanna delete keyspace '%s'? [y/n] " % opts.schemaDel):
+ sys.exit(music.drop_keyspace(opts.schemaDel))
+
+ # all the tables listed with '-read's <<<1
+ if opts.read:
+ if 'all' in opts.read:
+ if music.keyspace == "all":
+ sys.exit("read all tables for all keyspaces is not currently supported")
+
+ for table in Tables.__subclasses__():
+ table(music, logger).read(raw=opts.Raw, names=True)
+ sys.exit(0)
+
+ if music.keyspace == "all":
+ opts.names = True
+ for keyspace in Song.Keyspaces.keys():
+ music.keyspace = Song.Keyspaces[keyspace]
+ print "\n----------------- %s : %s -----------------" % (keyspace, music.keyspace)
+ # noinspection PyBroadException
+ try:
+ for tName in opts.read:
+ _set_table(tName).read(ids=opts.id, json_file=opts.file, names=opts.names, raw=opts.Raw)
+ except Exception as e:
+ pass
+ sys.exit(0)
+
+ for tName in opts.read:
+ _set_table(tName).read(ids=opts.id, json_file=opts.file, names=opts.names, raw=opts.Raw)
+ sys.exit(0)
+
+ table = _set_table(opts.table)
+
+ # show all db stuff <<<1
+ if opts.show or opts.ShowConfig:
+ if opts.show:
+ print "music"
+ pprint(music.__dict__, indent=2)
+ print "\nrest"
+ pprint(music.rest.__dict__, indent=2)
+
+ if table is not None:
+ print "\n", table.table()
+ pprint(table.__dict__, indent=2)
+
+ if opts.ShowConfig:
+ print (json.dumps(config, indent=4))
+
+ sys.exit(0)
+ # >>>1
+
+ """ VIEW JSON FILE open, convert to json, convert to string and print it """ # <<<1
+ if opts.json:
+ # noinspection PyBroadException
+ try:
+ print (json.dumps(json.loads(open(opts.json).read()), indent=4))
+ except Exception as e:
+ print (traceback.format_exc())
+ sys.exit(2)
+ sys.exit(0)
+
+ """ Insert use json file to add record to database """ # <<<1
+ if opts.action == 'i':
+ table.create(opts.file)
+ sys.exit(0)
+
+ """ CREATE Table """ # <<<1
+ if opts.action == 'c':
+ table.create_table()
+ sys.exit(0)
+
+ """ UPDATE use json file to update db record """ # <<<1
+ if opts.action == 'u':
+ if not opts.file or not os.path.exists(opts.file):
+ print "--file filename (filename exists) is required for update"
+ sys.exit(1)
+
+ table.update(opts.file)
+ sys.exit(0)
+
+ """ DELETE use id to delete record from db -- requres ID """ # <<<1
+ if opts.action == 'd':
+ if not opts.id:
+ print "--id ID is required for delete"
+ sys.exit(1)
+
+ if opts.id:
+ table.delete(opts.id)
+ sys.exit(0)
+
+ """ DELETE ALL from table""" # <<<1
+ if opts.action == 'D':
+ table.clean_table()
+ sys.exit(0)
+# >>>1
diff --git a/engine/src/tools/lib/__init__.py b/engine/src/tools/lib/__init__.py
new file mode 100644
index 0000000..bd50995
--- /dev/null
+++ b/engine/src/tools/lib/__init__.py
@@ -0,0 +1,18 @@
+#
+# -------------------------------------------------------------------------
+# Copyright (c) 2019 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
diff --git a/engine/src/tools/lib/common.py b/engine/src/tools/lib/common.py
new file mode 100644
index 0000000..4973f4e
--- /dev/null
+++ b/engine/src/tools/lib/common.py
@@ -0,0 +1,73 @@
+#
+# -------------------------------------------------------------------------
+# Copyright (c) 2019 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
+
+
+import os
+import select
+import sys
+
+
+def set_argument(arg=None, prompt=None, multiline=False):
+ """Return argument from file, cmd line, read from a pipe or prompt user """
+
+ if arg:
+ if os.path.isfile(arg):
+ f = open(arg)
+ message = f.readlines()
+ f.close()
+ else:
+ message = arg
+ else:
+ if sys.stdin in select.select([sys.stdin], [], [], .5)[0]:
+ message = sys.stdin.readlines()
+ else:
+ print prompt,
+ if multiline:
+ sentinel = ''
+ message = list(iter(raw_input, sentinel))
+ else:
+ message = [raw_input()]
+
+ return message
+
+
+def list2string(message):
+ return ''.join(message)
+
+
+def chop(message):
+
+ if message.endswith('\n'):
+ message = message[:-1]
+
+ return message
+
+
+# MAIN
+if __name__ == "__main__":
+
+ msg = set_argument(sys.argv[0])
+ for row in msg:
+ print row,
+ print "\n", list2string(msg)
+
+ msg = set_argument(prompt="Message? ")
+ for row in msg:
+ print row,
+ print "\n", list2string(msg)
diff --git a/engine/src/tools/lib/hosts.json b/engine/src/tools/lib/hosts.json
new file mode 100644
index 0000000..f45096b
--- /dev/null
+++ b/engine/src/tools/lib/hosts.json
@@ -0,0 +1,100 @@
+{
+ "music": {
+ "dev": {
+ "hosts": {
+ "id": "openstack region a",
+ "a": [ "255.255.255.0", "255.255.255.1", "255.255.255.2" ],
+ "b": [ "255.255.255.3", "255.255.255.4", "255.255.255.5" ]
+ }
+ },
+
+ "ist": {
+ "hosts": {
+ "id": "ist",
+ "a": [ "255.255.255.6", "255.255.255.7", "255.255.255.8" ],
+ "b": [ "255.255.255.9", "255.255.255.10", "255.255.255.11" ]
+ }
+ },
+
+ "e2e": {
+ "hosts": {
+ "id": "e2e",
+ "a": [ "255.255.255.12", "255.255.255.13", "255.255.255.14" ],
+ "b": [ "255.255.255.15", "255.255.255.16", "255.255.255.17" ]
+ }
+ },
+
+ "music": {
+ "hosts": {
+ "id": "music dev",
+ "a": [ "255.255.255.18" ],
+ "b": [ "255.255.255.19" ]
+ }
+ }
+ },
+
+ "valet": {
+ "dev": [
+ {
+ "ip": "255.255.255.20",
+ "fqdn": "dev1.site.onap.org"
+ },
+ {
+ "ip": "255.255.255.21",
+ "fqdn": "dev2.site.onap.org"
+ },
+ {
+ "ip": "255.255.255.22",
+ "fqdn": "dev3.site.onap.org"
+ },
+ {
+ "ip": "255.255.255.23",
+ "fqdn": "dev4.site.onap.org"
+ }
+ ],
+ "ist": [
+ {
+ "ip": "255.255.255.24",
+ "fqdn": "ist1.site.onap.org"
+ },
+ {
+ "ip": "255.255.255.25",
+ "fqdn": "ist2.site.onap.org"
+ },
+ {
+ "ip": "255.255.255.26",
+ "fqdn": "ist3.site.onap.org"
+ },
+ {
+ "ip": "255.255.255.27",
+ "fqdn": "ist4.site.onap.org"
+ },
+ {
+ "ip": "255.255.255.28",
+ "fqdn": "ist5.site.onap.org"
+ },
+ {
+ "ip": "255.255.255.29",
+ "fqdn": "ist6.site.onap.org"
+ }
+ ],
+ "e2e": [
+ {
+ "ip": "255.255.255.30",
+ "fqdn": "e2e1.site.onap.org"
+ },
+ {
+ "ip": "255.255.255.31",
+ "fqdn": "e2e2.site.onap.org"
+ },
+ {
+ "ip": "255.255.255.32",
+ "fqdn": "e2e3.site.onap.org"
+ },
+ {
+ "ip": "255.255.255.33",
+ "fqdn": "e2e4.site.onap.org"
+ }
+ ]
+ }
+}
diff --git a/engine/src/tools/lib/song.py b/engine/src/tools/lib/song.py
new file mode 100644
index 0000000..fdbba99
--- /dev/null
+++ b/engine/src/tools/lib/song.py
@@ -0,0 +1,153 @@
+#
+# -------------------------------------------------------------------------
+# Copyright (c) 2019 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
+"""Song is music for scripts
+
+ This is a subclass of music that scripts can use to add an option to override:
+ add_args - the commandline arguments that Song uses
+ keyspace - to allow the same script to run vs other databases
+ hosts table - to allow the same script to run vs other databases on other machines
+ connect - login may be different for other databases
+"""
+
+
+import argparse
+import json
+import os
+import sys
+
+sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))) # ../../..
+from valet.utils.logger import Logger
+from valet.engine.db_connect.db_apis.music import Music
+
+
+def hosts(opts, config):
+ """override configs hosts"""
+
+ hosts_json = os.path.join(sys.path[-1], "tools", "lib", "hosts.json")
+ _hosts = json.loads(open(hosts_json).read())
+ config["music"]["hosts"] = _hosts["music"][opts.env or "dev"]["hosts"]["a"]
+
+ if opts.verbose:
+ print "hosts: " + str(config["music"]["hosts"])
+
+
+# noinspection PyBroadException
+class Song(Music):
+ """music but with script helpers"""
+
+ Keyspaces = {
+ "cw": "valet_TestDB123",
+ "cmw": "valet_cmw",
+ "pk": "valet_TestDB420",
+ "pk2": "valetdb2",
+ "pn": "pn2",
+ "st": "valet_saisree",
+ "ist": "valet_IST",
+ "gj": "valet_TestDB2"
+ }
+ Keyspaces.update(dict((v, v) for k, v in Keyspaces.iteritems())) # full name is valid too
+
+ def __init__(self, opts, config, logger):
+ if opts.env:
+ hosts(opts, config)
+
+ self.keyspace = config["db"]["keyspace"]
+ self.defaultKeyspace = True
+
+ if opts.Keyspace:
+ if opts.Keyspace == "all":
+ self.keyspace = opts.Keyspace
+ else:
+ self.keyspace = Song.Keyspaces[opts.Keyspace]
+ self.defaultKeyspace = False
+
+ if opts.db:
+ self.keyspace = opts.db
+ self.defaultKeyspace = False
+
+ # TODO cmw: move keyspace into music object, pass in like config["keyspace"] = self.keyspace
+
+ super(Song, self).__init__(config, logger)
+
+ @staticmethod
+ def add_args(parser):
+ """add common parser arguments"""
+ default_config = "/opt/config/solver.json"
+ if not os.path.isfile(default_config):
+ default_config = os.path.join(sys.path[-1], "config", "solver.json")
+
+ valid_keyspaces = Song.Keyspaces.keys()
+ valid_keyspaces.append("all")
+ valid_keyspaces_str = "{" + ",".join(valid_keyspaces) + "}"
+
+ valid_hosts = ["a1", "a2", "a3", "b3", "ab", "m"]
+ valid_env = ["dev", "ist", "e2e"]
+
+ song_args = parser.add_argument_group("Common Music Arguments")
+ song_args.add_argument('-env', metavar=valid_env, help='pick set of hosts -deprecated', choices=valid_env)
+ song_args.add_argument('-host', metavar=valid_hosts, help='pick set of hosts -deprecated', choices=valid_hosts)
+ ex = song_args.add_mutually_exclusive_group()
+ ex.add_argument('-Keyspace', metavar=valid_keyspaces_str, help='override configs keyspace with a users', choices=valid_keyspaces)
+ ex.add_argument('-db', metavar='keyspace_string', help='override keyspace with typed in value')
+ song_args.add_argument('-config', metavar='file', default=default_config, help="default: " + default_config)
+ song_args.add_argument('-verbose', action='store_true', help="verbose output")
+
+ def create_keyspace(self, keyspace):
+ """override creates a keyspace."""
+
+ data = {
+ 'replicationInfo': {
+ "DC2": 3,
+ "DC1": 3,
+ "class": "NetworkTopologyStrategy"
+ },
+ 'durabilityOfWrites': True,
+ 'consistencyInfo': {
+ 'type': 'eventual',
+ },
+ }
+
+ path = '/keyspaces/%s' % keyspace
+ try:
+ self.rest.request(method='post', path=path, data=data)
+ return 0
+ except Exception:
+ # "this exception should be handled here but it's done in music :("
+ return -1
+
+
+def main():
+ parser = argparse.ArgumentParser(description='Extended Music DB.', add_help=False)
+ Song.add_args(parser)
+ parser.add_argument("-?", "--help", action="help", help="show this help message and exit")
+ opts = parser.parse_args()
+
+ logger = Logger().get_logger('console')
+
+ config = json.loads(open(opts.config).read())
+ music = Song(opts, config, logger)
+
+ print json.dumps(config.get("music"))
+ print (music.keyspace)
+
+
+# MAIN
+if __name__ == "__main__":
+ main()
+ sys.exit(0)
diff --git a/engine/src/tools/lib/tables.py b/engine/src/tools/lib/tables.py
new file mode 100644
index 0000000..7ad4836
--- /dev/null
+++ b/engine/src/tools/lib/tables.py
@@ -0,0 +1,308 @@
+#
+# -------------------------------------------------------------------------
+# Copyright (c) 2019 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
+
+
+import json
+import os
+import sys
+import time
+from datetime import datetime
+from textwrap import TextWrapper
+
+import pytz
+
+
+class Tables(object):
+ """parent class for all tables."""
+
+ schema = None
+ alias = None
+
+ def __init__(self, music, logger):
+ """Initializer. Accepts target host list, port, and path."""
+
+ self.music = music
+ self.logger = logger
+ self.key = None
+ self.keyspace = music.keyspace
+ self.tz = pytz.timezone('America/New_York')
+
+ @classmethod
+ def table_alias(cls):
+ s = cls.__name__
+ s = s[0].lower() + s[1:]
+ return [s] + cls.alias
+
+ @staticmethod
+ def option_choices():
+ choices = []
+ aliases = None
+ for table in Tables.__subclasses__():
+ choices = choices + table.table_alias()
+ for alias in choices:
+ if aliases:
+ aliases = aliases + ' ' + alias
+ else:
+ aliases = alias
+
+ choices.append('all')
+
+ return {
+ "alias": TextWrapper(subsequent_indent=" ", initial_indent=" ", width=79).fill(aliases),
+ "choices": choices
+ }
+
+ def table(self):
+ """Return tables name (same as class name but all lc)."""
+
+ s = type(self).__name__
+ return s[0].lower() + s[1:]
+
+ def utc2local(self, utc):
+ """Change utc time to local time for readability."""
+
+ return " (" + datetime.fromtimestamp(utc, self.tz).strftime('%Y-%m-%d %I:%M:%S %Z%z') + ")"
+
+ def get_rows(self, ids=None, raw=False):
+ """ get_rows read table, or rows by id and return rows array """
+
+ # TODO if ids == None and hasattr(self, 'ids'): ids = self.ids
+ key = None if (ids is None) else self.key
+ rows = []
+
+ for row_id in ids or [None]:
+ if raw:
+ rows.append(json.dumps(self.music.read_row(self.keyspace, self.table(), key, row_id), sort_keys=True, indent=4))
+ continue
+
+ result = self.music.read_row(self.keyspace, self.table(), key, row_id)["result"]
+
+ # strip "Row n"
+ for _, data in sorted(result.iteritems()):
+ rows.append(data)
+
+ if raw:
+ return rows
+
+ if len(rows) == 1:
+ rows = rows[0] # one row? not a list
+
+ return rows
+
+ def read(self, ids=None, json_file=None, raw=False, rows=None, names=None):
+ """ read rows (array or single dict) to stdout or to a file """
+
+ if rows is None:
+ rows = self.get_rows(ids, raw)
+
+ if raw:
+ if names:
+ print "\n" + self.table()
+ for row in rows:
+ print row
+ return
+
+ if isinstance(rows, list):
+ for row in rows:
+ if not ("timestamp" in row or "expire_time" in row):
+ break
+ for key in (["timestamp", "expire_time"]):
+ if (not (key in row)) or (row[key] is None):
+ continue
+ try:
+ row[key] = row[key] + self.utc2local(float(row[key])/1000)
+ except ValueError:
+ row[key] = "Error: "+ row[key]
+
+ else:
+ row = rows
+ for key in (["timestamp", "expire_time"]):
+ if (not (key in row)) or (row[key] is None):
+ continue
+ try:
+ row[key] = row[key] + self.utc2local(float(row[key])/1000)
+ except ValueError:
+ row[key] = "Error: "+ row[key]
+
+ if json_file is None:
+ if names:
+ print "\n" + self.table()
+ print json.dumps(rows, sort_keys=True, indent=4)
+ return
+
+ fh = open(json_file, "w")
+ fh.write(json.dumps(rows, sort_keys=True, indent=4))
+ fh.close()
+
+ def create(self, json_file=None):
+ """ add records from a file to db """
+
+ if json_file and os.path.exists(json_file):
+ inf = open(json_file)
+ f = json_file
+ else:
+ inf = sys.stdin
+ f = "stdin"
+
+ self.logger.info("Create " + self.table() + " from: " + f)
+ self.insert(json.loads(inf.read()))
+
+ def insert(self, data):
+ """ add records """
+
+ self.logger.debug(data)
+
+ if isinstance(data, list):
+ for row in data:
+ if "timestamp" in row:
+ row['timestamp'] = int(round(time.time() * 1000))
+ self.music.create_row(self.keyspace, self.table(), row)
+ else:
+ row = data
+ if "timestamp" in row:
+ row['timestamp'] = int(round(time.time() * 1000))
+ self.music.create_row(self.keyspace, self.table(), row)
+
+ def create_table(self):
+ """ create table """
+
+ self.logger.info(self.schema)
+ self.music.create_table(self.keyspace, self.table(), self.schema)
+
+ def update(self, json_file):
+ """Update a row. Not atomic."""
+
+ self.logger.info("Update " + self.table() + " from: " + json_file)
+ data = json.loads(open(json_file).read())
+ self.logger.debug(data)
+
+ if isinstance(data, list):
+ for row in data:
+ self.music.update_row_eventually(self.keyspace, self.table(), row)
+ else:
+ self.music.update_row_eventually(self.keyspace, self.table(), data)
+
+ def delete(self, ids):
+ """ delete records db based on id """
+
+ for row_id in ids:
+ self.logger.info("Delete from" + self.table() + " id: " + row_id)
+ self.music.delete_row_eventually(self.keyspace, self.table(), self.key, row_id)
+
+ def clean_table(self):
+ """ delete all records in table """
+
+ ids = []
+ rows = self.get_rows()
+
+ if isinstance(rows, list):
+ for row in rows:
+ ids.append(row[self.key])
+ else:
+ row = rows
+ ids.append(row[self.key])
+
+ for row_id in ids:
+ self.music.delete_row_eventually(self.keyspace, self.table(), self.key, row_id)
+
+# Subclasses of Tables:
+
+
+class Requests(Tables):
+ alias = ["req", "q"]
+ key = "request_id"
+ schema = json.loads('{ "request_id": "text", "timestamp": "text", "request": "text", "PRIMARY KEY": "(request_id)" }')
+
+ def __init__(self, music, logger):
+ Tables.__init__(self, music, logger)
+ self.key = Requests.key
+
+
+class Results(Tables):
+ alias = ["resu", "u"]
+ key = "request_id"
+ schema = json.loads('{ "request_id": "text", "status": "text", "timestamp": "text", "result": "text", "PRIMARY KEY": "(request_id)" }')
+
+ def __init__(self, music, logger):
+ Tables.__init__(self, music, logger)
+ self.key = Results.key
+
+class Group_rules(Tables):
+ alias = ["rule", "gr"]
+ key = "id"
+ schema = json.loads('{ "id": "text", "app_scope": "text", "type": "text", "level": "text", "members": "text", "description": "text", "groups": "text", "status": "text", "timestamp": "text", "PRIMARY KEY": "(id)" }')
+
+ def __init__(self, music, logger):
+ Tables.__init__(self, music, logger)
+ self.key = Group_rules.key
+
+
+class Stacks(Tables):
+ alias = ["stack", "s"]
+ key = "id"
+ schema = json.loads('{ "id": "text", "last_status": "text", "datacenter": "text", "stack_name": "text", "uuid": "text", "tenant_id": "text", "metadata": "text", "servers": "text", "prior_servers": "text", "state": "text", "prior_State": "text", "timestamp": "text", "PRIMARY KEY": "(id)" }')
+
+ def __init__(self, music, logger):
+ Tables.__init__(self, music, logger)
+ self.key = Stacks.key
+
+
+class Stack_id_map(Tables):
+ alias = ["map", "m"]
+ key = "request_id"
+ schema = json.loads('{ "request_id": "text", "stack_id": "text", "timestamp": "text", "PRIMARY KEY": "(request_id)" }')
+
+ def __init__(self, music, logger):
+ Tables.__init__(self, music, logger)
+ self.key = Stack_id_map.key
+
+
+class Resources(Tables):
+ alias = ["reso", "o"]
+ key = "id"
+ schema = json.loads('{ "id": "text", "url": "text", "resource": "text", "timestamp": "text", "requests": "text", "PRIMARY KEY": "(id)" }')
+
+ def __init__(self, music, logger):
+ Tables.__init__(self, music, logger)
+ self.key = Resources.key
+
+
+class Regions(Tables):
+ alias = ["reg", "i", "lock"]
+ key = "region_id"
+ schema = json.loads('{ "region_id ": "text", "timestamp": "text", "last_updated ": "text", "keystone_url": "text", "locked_by": "text", "locked_time ": "text", "expire_time": "text", "PRIMARY KEY": "(region_id)" }')
+
+ def __init__(self, music, logger):
+ Tables.__init__(self, music, logger)
+ self.key = Regions.key
+
+
+class Groups(Tables):
+ alias = ["group", "g"]
+ key = "id"
+ schema = json.loads('{ "id ": "text", "uuid": "text", "type ": "text", "level": "text", "factory": "text", "rule_id ": "text", "metadata ": "text", "server_list": "text", "member_hosts": "text", "status": "text", "PRIMARY KEY": "(id)" }')
+
+ def __init__(self, music, logger):
+ Tables.__init__(self, music, logger)
+ self.key = Groups.key
+
+
+if __name__ == "__main__":
+
+ print Tables.option_choices()
diff --git a/engine/src/tools/lock.py b/engine/src/tools/lock.py
new file mode 100644
index 0000000..a77eb8a
--- /dev/null
+++ b/engine/src/tools/lock.py
@@ -0,0 +1,92 @@
+#
+# -------------------------------------------------------------------------
+# Copyright (c) 2019 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
+#!/usr/bin/env python2.7
+
+
+import argparse
+import json
+import os
+import sys
+
+import lib.tables as tables
+from lib.song import Song
+
+sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
+from valet.engine.db_connect.db_handler import DBHandler
+from valet.engine.db_connect.locks import Locks, later
+from valet.utils.logger import Logger
+
+
+def options():
+ parser = argparse.ArgumentParser(description='\033[34mManual Locking And Unlocking Of Valet Regions.\033[0m', add_help=False)
+ Song.add_args(parser)
+
+ g1 = parser.add_argument_group('Control Music Locks')
+ ex = g1.add_mutually_exclusive_group()
+ ex.add_argument('-unlock', metavar="region", help='unlock this region')
+ ex.add_argument('-lock', metavar="region", help='lock this region')
+
+ g2 = parser.add_argument_group('Update Locks In Region Table')
+ ex = g2.add_mutually_exclusive_group()
+ ex.add_argument('-delete', metavar="region", help='delete region from table')
+ ex.add_argument('-add', metavar="region", help='update/add region to table')
+ g2.add_argument('-timeout', metavar="seconds", help='seconds till region expires (for -add)')
+
+ group = parser.add_argument_group("Change The Output")
+ group.add_argument("-?", "--help", action="help", help="show this help message and exit")
+ group.add_argument('-show', action='store_true', help='print out regions (locking) table')
+
+ return parser.parse_args()
+
+
+# MAIN
+if __name__ == "__main__":
+ opts = options()
+
+ logger = Logger().get_logger('console')
+ config = json.loads(open(opts.config).read())
+ music_config = config.get("music")
+ music_config["keyspace"] = config.get("db")["keyspace"]
+ music = Song(opts, config, logger)
+ dbh = DBHandler(music, config.get("db"), logger)
+
+ if opts.add:
+ timeout = opts.timeout if opts.timeout else config["engine"]["timeout"]
+ dbh.add_region(opts.add, later(seconds=int(timeout)))
+ logger.debug("added region to table")
+
+ if opts.lock:
+ if Locks(dbh, 0).got_lock(opts.lock) == "ok":
+ logger.debug("added region lock")
+ else:
+ logger.debug("failed to add region lock")
+
+ if opts.unlock:
+ Locks.unlock(dbh, opts.unlock)
+ logger.debug("deleted region lock '%s'" % opts.unlock)
+
+ if opts.delete:
+ dbh.delete_region(opts.delete)
+ logger.debug("deleted region from table")
+
+ if opts.show:
+ music.keyspace = dbh.keyspace
+ tables.Regions(music, logger).read(names=True)
+
+ sys.exit(0)
diff --git a/engine/src/tools/ppdb.py b/engine/src/tools/ppdb.py
new file mode 100644
index 0000000..c89f0f2
--- /dev/null
+++ b/engine/src/tools/ppdb.py
@@ -0,0 +1,91 @@
+#
+# -------------------------------------------------------------------------
+# Copyright (c) 2019 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
+#!/usr/bin/env python2.7
+
+
+import sys
+import os
+import json
+import argparse
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(description='parse results.')
+ parser.add_argument('-f', '--file', help='json file required for create/view')
+ parser.add_argument('-verbose', action='store_true', help='more output than you want')
+ opts = parser.parse_args()
+
+ if opts.file and os.path.exists(opts.file):
+ inf = open(opts.file)
+ else:
+ inf = sys.stdin
+
+ results = json.loads(inf.read())
+ if opts.verbose:
+ print (json.dumps(results, sort_keys=True, indent=4))
+ print "---------------------------------------------"
+
+ if "request" in results.keys():
+ key = "request"
+ result = json.loads(results[key])
+ print (json.dumps(result, sort_keys=True, indent=4))
+ sys.exit(0)
+
+ if "result" in results.keys():
+ result = results["result"]
+
+ if not isinstance(result, list):
+ sys.stdout.write("result ")
+ sys.stdout.flush()
+ result = json.loads(result)
+ print (json.dumps(result, sort_keys=True, indent=4))
+ sys.exit(0)
+
+ for _, row in result.iteritems():
+ rr = json.loads(row["result"])
+
+ # for k, d in row.iteritems():
+ # print ("%s) %s"% (k, d))
+
+ sys.stdout.write("result ")
+ sys.stdout.flush()
+ print json.dumps(rr, indent=4)
+ # for f in rr:
+ # for line in (json.dumps(f, sort_keys=True, indent=4)).splitlines():
+ # print "\t%s"%line
+ # print "}"
+
+ sys.exit(0)
+
+ if "resource" in results.keys():
+ key = "resource"
+ result = json.loads(results[key])
+ print (json.dumps(result, sort_keys=True, indent=4))
+
+ if not isinstance(result, list):
+ sys.stdout.write("resource ")
+ sys.stdout.flush()
+ result = json.loads(result)
+ print (json.dumps(result, sort_keys=True, indent=4))
+ sys.exit(0)
+
+ print (json.dumps(result, sort_keys=True, indent=4))
+ sys.exit(0)
+
+ print (results.keys())
diff --git a/engine/src/valet/__init__.py b/engine/src/valet/__init__.py
new file mode 100644
index 0000000..bd50995
--- /dev/null
+++ b/engine/src/valet/__init__.py
@@ -0,0 +1,18 @@
+#
+# -------------------------------------------------------------------------
+# Copyright (c) 2019 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
diff --git a/engine/src/valet/bootstrapper.py b/engine/src/valet/bootstrapper.py
new file mode 100644
index 0000000..08bc41b
--- /dev/null
+++ b/engine/src/valet/bootstrapper.py
@@ -0,0 +1,121 @@
+#
+# -------------------------------------------------------------------------
+# Copyright (c) 2019 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
+import glob
+import json
+import os
+import sys
+
+from valet.engine.app_manager.app_handler import AppHandler
+from valet.engine.db_connect.db_apis.music import Music
+from valet.engine.db_connect.db_handler import DBHandler
+from valet.engine.db_connect.locks import Locks
+from valet.engine.resource_manager.compute_manager import ComputeManager
+from valet.engine.resource_manager.metadata_manager import MetadataManager
+from valet.engine.resource_manager.naming import Naming
+from valet.engine.resource_manager.nova_compute import NovaCompute
+from valet.engine.resource_manager.resource_handler import ResourceHandler
+from valet.engine.resource_manager.topology_manager import TopologyManager
+from valet.engine.search.optimizer import Optimizer
+
+
+class Bootstrapper(object):
+ """Bootstrap valet-engine.
+
+ Instantiate and configure all valet-engine sub-systems.
+ """
+
+ def __init__(self, _config, _logger):
+ self.config = _config
+ self.logger = _logger
+
+ self.valet_id = None
+
+ self.dbh = None
+ self.ah = None
+ self.rh = None
+ self.compute = None
+ self.topology = None
+ self.metadata = None
+ self.optimizer = None
+
+ self.lock = None
+
+ def config_valet(self):
+ """Set all required modules and configure them."""
+
+ self.valet_id = self.config["engine"]["id"]
+
+ # Set DB connection.
+ db_config = self.config.get("db")
+ self.logger.info("launch engine -- keyspace: %s" % db_config.get("keyspace"))
+ db = Music(self.config, self.logger)
+
+ self.dbh = DBHandler(db, db_config, self.logger)
+
+ # Set lock to deal with datacenters in parallel.
+ self.lock = Locks(self.dbh, self.config["engine"]["timeout"])
+
+ # Set backend platform connection.
+ compute_config = self.config.get("compute")
+ compute_source = NovaCompute(self.config,
+ self.logger)
+
+ topology_config = self.config.get("topology")
+ topology_source = Naming(self.config.get("naming"), self.logger)
+
+ self.compute = ComputeManager(compute_source, self.logger)
+ self.topology = TopologyManager(topology_source, self.logger)
+ self.metadata = MetadataManager(compute_source, self.logger)
+
+ # Set resource handler.
+ self.rh = ResourceHandler("ResourceHandler",
+ self.dbh,
+ self.compute,
+ self.metadata,
+ self.topology,
+ compute_config,
+ self.logger)
+
+ dha = self.config["engine"]["dha"]
+ use_dha = True
+ if dha == "false" or not dha:
+ use_dha = False
+
+ # Set application handler.
+ self.ah = AppHandler(self.dbh, use_dha, self.logger)
+
+ # Set optimizer for placement decisions.
+ self.optimizer = Optimizer(self.logger)
+
+ # Read initial Valet Group rules and create in DB.
+ root = os.path.dirname(os.path.dirname(os.path.realpath(sys.argv[0])))
+ for rule_file in glob.glob(root + "/valet/rules/" + "*.json"):
+ rule = json.loads(open(rule_file).read())
+ self.dbh.create_group_rule(
+ rule["name"],
+ rule["app_scope"],
+ rule["type"],
+ rule["level"],
+ rule["members"],
+ rule["description"]
+ )
+
+ self.logger.debug("rule (" + rule["name"] + ") created")
+
+ return True
diff --git a/engine/src/valet/engine/__init__.py b/engine/src/valet/engine/__init__.py
new file mode 100644
index 0000000..bd50995
--- /dev/null
+++ b/engine/src/valet/engine/__init__.py
@@ -0,0 +1,18 @@
+#
+# -------------------------------------------------------------------------
+# Copyright (c) 2019 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
diff --git a/engine/src/valet/engine/app_manager/__init__.py b/engine/src/valet/engine/app_manager/__init__.py
new file mode 100644
index 0000000..bd50995
--- /dev/null
+++ b/engine/src/valet/engine/app_manager/__init__.py
@@ -0,0 +1,18 @@
+#
+# -------------------------------------------------------------------------
+# Copyright (c) 2019 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
diff --git a/engine/src/valet/engine/app_manager/app.py b/engine/src/valet/engine/app_manager/app.py
new file mode 100644
index 0000000..de6fb8f
--- /dev/null
+++ b/engine/src/valet/engine/app_manager/app.py
@@ -0,0 +1,716 @@
+#
+# -------------------------------------------------------------------------
+# Copyright (c) 2019 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
+import copy
+import json
+import re
+import six
+
+from valet.engine.app_manager.app_parser import Parser
+from valet.engine.app_manager.group import Group
+from valet.engine.app_manager.server import Server
+
+
+class App(object):
+ """Container to deliver the status of request."""
+
+ def __init__(self, _req_id, _use_dha, _logger):
+
+ self.last_req_id = _req_id
+
+ self.datacenter_id = None
+
+ # stack_id given from the platform (e.g., OpenStack)
+ self.app_id = None
+
+ # stack_name as datacenter_id + ":" + tenant_id + ":" + vf_module_name
+ self.app_name = None
+
+ self.tenant_id = None
+
+ # Info for group rule scope
+ self.service_instance_id = None
+ self.vnf_instance_id = None
+ self.vnf_instance_name = None
+
+ # Stack resources. key = server's orch_id
+ self.stack = {}
+
+ self.resource = None
+
+ self.logger = _logger
+
+ self.parser = Parser(self.logger)
+
+ self.groups = {} # all valet groups used in this app
+ self.servers = {} # requested servers (e.g., VMs)
+
+ # Store prior server info.
+ self.prior_servers = {}
+
+ self.status = "ok"
+
+ self.state = "plan"
+
+ self.prior_state = "unknown"
+
+ # Check if do not want rollback for create
+ self.suppress_rollback = False
+
+ # For placement optimization
+ self.total_CPU = 0
+ self.total_mem = 0
+ self.total_local_vol = 0
+ self.optimization_priority = None
+
+ self.use_dha = _use_dha
+
+ def set_resource(self, _resource):
+ self.resource = _resource
+
+ def init_for_create(self, _req):
+ """Validate and init app info based on the given request."""
+
+ self.state = "create"
+
+ if "datacenter" in _req.keys():
+ if "id" in _req["datacenter"].keys():
+ if _req["datacenter"]["id"] is not None:
+ self.datacenter_id = _req["datacenter"]["id"].strip()
+ else:
+ self.status = "datacenter_id is None"
+ return
+ else:
+ self.status = "no datacenter_id in request"
+ return
+
+ if "url" in _req["datacenter"].keys():
+ if _req["datacenter"]["url"] is not None:
+ if not self._match_url(_req["datacenter"]["url"].strip()):
+ self.status = "mal-formed url"
+ return
+ else:
+ self.status = "url is None"
+ return
+ else:
+ self.status = "no url in request"
+ return
+ else:
+ self.status = "no datacenter info in request"
+ return
+
+ if "tenant_id" in _req.keys():
+ if _req["tenant_id"] is not None:
+ self.tenant_id = _req["tenant_id"]
+ else:
+ self.status = "tenant_id is None"
+ return
+ else:
+ self.status = "no tenant_id in request"
+ return
+
+ if "service_instance_id" in _req.keys():
+ if _req["service_instance_id"] is not None:
+ self.service_instance_id = _req["service_instance_id"]
+ else:
+ self.status = "service_id is None"
+ return
+ else:
+ self.status = "no service_instance_id in request"
+ return
+
+ if "vnf_instance_id" in _req.keys():
+ if _req["vnf_instance_id"] is not None:
+ self.vnf_instance_id = _req["vnf_instance_id"]
+ else:
+ self.status = "vnf_id is None"
+ return
+ else:
+ self.status = "no vnf_instance_id in request"
+ return
+
+ if "vnf_instance_name" in _req.keys():
+ if _req["vnf_instance_name"] is not None:
+ self.vnf_instance_name = _req["vnf_instance_name"]
+ else:
+ self.status = "vnf_name is None"
+ return
+ else:
+ self.status = "no vnf_instance_name in request"
+ return
+
+ if "stack_name" in _req.keys():
+ if _req["stack_name"] is not None:
+ self.app_name = self.datacenter_id + ":" + self.tenant_id + ":" + _req["stack_name"].strip()
+ else:
+ self.status = "stack_name is None"
+ return
+ else:
+ self.status = "no stack_name in request"
+ return
+
+ if "stack" in _req.keys():
+ self.stack = self._validate_stack(_req["stack"])
+ else:
+ self.status = "no stack in request"
+ return
+
+ def init_for_delete(self, _req):
+ """Validate and init app info for marking delete."""
+
+ self.state = "delete"
+
+ if "datacenter" in _req.keys():
+ if "id" in _req["datacenter"].keys():
+ if _req["datacenter"]["id"] is not None:
+ self.datacenter_id = _req["datacenter"]["id"].strip()
+ else:
+ self.status = "datacenter_id is None"
+ return
+ else:
+ self.status = "no datacenter_id in request"
+ return
+ else:
+ self.status = "no datacenter info in request"
+ return
+
+ if "tenant_id" in _req.keys():
+ if _req["tenant_id"] is not None:
+ self.tenant_id = _req["tenant_id"]
+ else:
+ self.status = "tenant_id is None"
+ return
+ else:
+ self.status = "no tenant_id in request"
+ return
+
+ if "stack_name" in _req.keys():
+ if _req["stack_name"] is not None:
+ self.app_name = self.datacenter_id + ":" + self.tenant_id + ":" + _req["stack_name"].strip()
+ else:
+ self.status = "stack_name is None"
+ return
+ else:
+ self.status = "no stack_name in request"
+ return
+
+ def init_for_confirm(self, _req):
+ """Validate and init app info for confirm."""
+
+ # Tempoary state and will depends on prior state
+ self.state = "confirm"
+
+ if "stack_id" in _req.keys():
+ if _req["stack_id"] is not None:
+ stack_id = _req["stack_id"].strip()
+
+ stack_id_elements = stack_id.split('/', 1)
+ if len(stack_id_elements) > 1:
+ self.app_id = stack_id_elements[1]
+ else:
+ self.app_id = stack_id
+
+ self.logger.debug("stack_id = " + self.app_id)
+
+ else:
+ self.status = "null stack_id in request"
+ return
+ else:
+ self.status = "no stack_id in request"
+ return
+
+ def init_for_rollback(self, _req):
+ """Validate and init app info for rollback."""
+
+ # Tempoary state and will depends on prior state
+ self.state = "rollback"
+
+ if "stack_id" in _req.keys():
+ if _req["stack_id"] is not None:
+ stack_id = _req["stack_id"].strip()
+
+ stack_id_elements = stack_id.split('/', 1)
+ if len(stack_id_elements) > 1:
+ self.app_id = stack_id_elements[1]
+ else:
+ self.app_id = stack_id
+
+ self.logger.debug("stack_id = " + self.app_id)
+ else:
+ # If the stack fails, stack_id can be null.
+ self.app_id = "none"
+
+ self.logger.debug("stack_id = None")
+ else:
+ self.status = "no stack_id in request"
+ return
+
+ if "suppress_rollback" in _req.keys():
+ self.suppress_rollback = _req["suppress_rollback"]
+
+ if "error_message" in _req.keys():
+ # TODO(Gueyoung): analyze the error message.
+
+ if _req["error_message"] is None:
+ self.logger.warning("error message from platform: none")
+ else:
+ self.logger.warning("error message from platform:" + _req["error_message"])
+
+ def init_prior_app(self, _prior_app):
+ """Init with the prior app info."""
+
+ self.datacenter_id = _prior_app.get("datacenter")
+
+ self.app_name = _prior_app.get("stack_name")
+
+ if _prior_app["uuid"] != "none":
+ # Delete case.
+ self.app_id = _prior_app.get("uuid")
+
+ self.tenant_id = _prior_app.get("tenant_id")
+
+ metadata = json.loads(_prior_app.get("metadata"))
+ self.service_instance_id = metadata.get("service_instance_id")
+ self.vnf_instance_id = metadata.get("vnf_instance_id")
+ self.vnf_instance_name = metadata.get("vnf_instance_name")
+
+ self.servers = json.loads(_prior_app.get("servers"))
+
+ prior_state = _prior_app.get("state")
+
+ if self.state == "confirm":
+ if prior_state == "create":
+ self.state = "created"
+ elif prior_state == "delete":
+ self.state = "deleted"
+ elif self.state == "rollback":
+ if prior_state == "create":
+ if self.suppress_rollback and self.app_id != "none":
+ self.state = "created"
+ else:
+ self.state = "deleted"
+
+ if prior_state == "delete":
+ self.state = "created"
+ elif self.state == "delete":
+ self.prior_state = prior_state
+ self.prior_servers = copy.deepcopy(self.servers)
+ else:
+ self.status = "unknown state"
+
+ def _validate_stack(self, _stack):
+ """Check if the stack is for Valet to make decision.
+
+ And, check if the format is correct.
+ """
+
+ if len(_stack) == 0 or "resources" not in _stack.keys():
+ self.status = "na: no resource in stack"
+ self.logger.warning("non-applicable to valet: no resource in stack")
+ return {}
+
+ stack = {}
+
+ for rk, r in _stack["resources"].iteritems():
+ if "type" not in r.keys():
+ self.status = "type key is missing in stack"
+ return None
+
+ if r["type"] == "OS::Nova::Server":
+ if "properties" not in r.keys():
+ self.status = "properties key is missing in stack"
+ return None
+
+ if "name" not in r["properties"].keys():
+ self.status = "name property is missing in stack"
+ return None
+
+ if r["properties"]["name"] is None:
+ self.status = "name property is none"
+ return None
+
+ if "flavor" not in r["properties"].keys():
+ self.status = "flavor property is missing in stack"
+ return None
+
+ if r["properties"]["flavor"] is None:
+ self.status = "flavor property is none"
+ return None
+
+ stack[rk] = r
+
+ if len(stack) == 0:
+ self.status = "na: no server resource in stack"
+ self.logger.warning("non-applicable to valet: no server resource in stack")
+ return {}
+
+ first_resource = stack[stack.keys()[0]]
+ apply_valet = False
+
+ # To apply Valet decision, availability_zone must exist.
+ # And its value contains host variable as a list element.
+ if "availability_zone" in first_resource["properties"].keys():
+ az_value = first_resource["properties"]["availability_zone"]
+ if isinstance(az_value, list):
+ apply_valet = True
+
+ for rk, r in stack.iteritems():
+ if apply_valet:
+ if "availability_zone" not in r["properties"].keys():
+ self.status = "az is missing in stack for valet"
+ return None
+ else:
+ az_value = r["properties"]["availability_zone"]
+ if not isinstance(az_value, list):
+ self.status = "host variable is missing in stack for valet"
+ return None
+
+ if az_value[0] in ("none", "None") or az_value[1] in ("none", "None"):
+ self.status = "az value is missing in stack"
+ return None
+ else:
+ if "availability_zone" in r["properties"].keys():
+ az_value = r["properties"]["availability_zone"]
+ if isinstance(az_value, list):
+ self.status = "host variable exists in stack for non-valet application"
+ return None
+
+ if not apply_valet:
+ self.status = "na: pass valet"
+ self.logger.warning("non-applicable to valet")
+ return {}
+ else:
+ return stack
+
+ def init_valet_groups(self):
+ """Create Valet groups from input request."""
+
+ for rk, r in self.stack.iteritems():
+ properties = r.get("properties", {})
+ metadata = properties.get("metadata", {})
+
+ if len(metadata) > 0:
+ valet_rules = metadata.get("valet_groups", None)
+
+ if valet_rules is not None and valet_rules != "":
+ rule_list = []
+ if isinstance(valet_rules, six.string_types):
+ rules = valet_rules.split(",")
+ for gr in rules:
+ rule_list.append(gr.strip())
+ else:
+ self.status = "incorrect valet group metadata format"
+ self.logger.error(self.status)
+ return
+
+ # Check rule validation of valet_groups.
+ self.status = self.resource.check_valid_rules(self.tenant_id,
+ rule_list,
+ use_ex=self.use_dha)
+ if self.status != "ok":
+ self.logger.error(self.status)
+ return
+
+ self.status = self._make_valet_groups(properties.get("name"),
+ properties["availability_zone"][0],
+ rule_list)
+ if self.status != "ok":
+ self.logger.error(self.status)
+ return
+
+ # Check and create server groups if they do not exist.
+ scheduler_hints = properties.get("scheduler_hints", {})
+ if len(scheduler_hints) > 0:
+ for hint_key in scheduler_hints.keys():
+ if hint_key == "group":
+ hint = scheduler_hints[hint_key]
+ self.status = self._make_group(properties.get("name"), hint)
+ if self.status != "ok":
+ self.logger.error(self.status)
+ return
+
+ def _make_valet_groups(self, _rk, _az, _rule_list):
+ """Create Valet groups that each server is involved."""
+
+ for rn in _rule_list:
+ rule = self.resource.group_rules[rn]
+
+ # Valet group naming convention.
+ # It contains datacenter id and availability_zone
+ # followed by service id and vnf id
+ # depending on scope.
+ # And concatenate rule name.
+ # Exception: quorum-diversity
+
+ group_id = self.datacenter_id + ":"
+
+ if rule.rule_type != "quorum-diversity":
+ group_id += _az + ":"
+
+ if rule.app_scope == "lcp":
+ group_id += rn
+ elif rule.app_scope == "service":
+ group_id += self.service_instance_id + ":" + rn
+ elif rule.app_scope == "vnf":
+ group_id += self.service_instance_id + ":" + self.vnf_instance_id + ":" + rn
+ else:
+ return "unknown app_scope value"
+
+ if group_id in self.groups.keys():
+ group = self.groups[group_id]
+ else:
+ group = Group(group_id)
+ group.group_type = rule.rule_type
+ group.factory = "valet"
+ group.level = rule.level
+
+ self.groups[group_id] = group
+
+ group.server_list.append(self.app_name + ":" + _rk)
+
+ return "ok"
+
+ def _make_group(self, _rk, _group_hint):
+ """Create the group request based on scheduler hint."""
+
+ if isinstance(_group_hint, dict):
+ # _group_hint is a single key/value pair
+ g = _group_hint[_group_hint.keys()[0]]
+
+ r_type = g.get("type", "none")
+ if r_type != "OS::Nova::ServerGroup":
+ return "support only ServerGroup resource"
+
+ properties = g.get("properties", {})
+ if len(properties) == 0:
+ return "no properties"
+
+ group_name = properties.get("name", None)
+ if group_name is None:
+ return "no group name"
+ group_name = group_name.strip()
+
+ policies = properties.get("policies", [])
+ if len(policies) == 0:
+ return "no policy of the group"
+
+ if len(policies) > 1:
+ return "multiple policies"
+
+ # TODO: exclude soft-affinity and soft-anti-affinity?
+
+ if group_name in self.groups.keys():
+ group = self.groups[group_name]
+ else:
+ group = Group(group_name)
+
+ policy = policies[0].strip()
+ if policy == "anti-affinity":
+ group_type = "diversity"
+ else:
+ group_type = policy
+
+ group.group_type = group_type
+ group.factory = "server-group"
+ group.level = "host"
+
+ self.groups[group_name] = group
+ else:
+ # group hint is uuid string.
+ rg = self.resource.get_group_by_uuid(_group_hint)
+ if rg is None:
+ return "unknown group found while making group"
+
+ # TODO: exclude soft-affinity and soft-anti-affinity?
+
+ if rg.name in self.groups.keys():
+ group = self.groups[rg.name]
+ else:
+ group = Group(rg.name)
+
+ group.group_type = rg.group_type
+ group.factory = rg.factory
+ group.level = "host"
+
+ self.groups[rg.name] = group
+
+ if group is not None:
+ group.server_list.append(self.app_name + ":" + _rk)
+
+ return "ok"
+
+ def parse(self):
+ """Extract servers and merge groups from stack for search."""
+
+ (self.servers, self.groups) = self.parser.set_servers(self.app_name,
+ self.stack,
+ self.groups)
+
+ if len(self.servers) == 0 and len(self.groups) == 0:
+ self.status = "parse error for " + self.app_name + ": " + self.parser.status
+ return False
+
+ return True
+
+ def set_weight(self):
+ """Set relative weight of each servers and groups."""
+
+ for _, s in self.servers.iteritems():
+ self._set_server_weight(s)
+
+ for _, g in self.groups.iteritems():
+ self._set_server_weight(g)
+
+ for _, g in self.groups.iteritems():
+ self._set_group_resource(g)
+
+ for _, g in self.groups.iteritems():
+ self._set_group_weight(g)
+
+ def _set_server_weight(self, _v):
+ """Set relative weight of each server against available resource amount."""
+
+ if isinstance(_v, Group):
+ for _, sg in _v.subgroups.iteritems():
+ self._set_server_weight(sg)
+ else:
+ if self.resource.CPU_avail > 0:
+ _v.vCPU_weight = float(_v.vCPUs) / float(self.resource.CPU_avail)
+ else:
+ _v.vCPU_weight = 1.0
+ self.total_CPU += _v.vCPUs
+
+ if self.resource.mem_avail > 0:
+ _v.mem_weight = float(_v.mem) / float(self.resource.mem_avail)
+ else:
+ _v.mem_weight = 1.0
+ self.total_mem += _v.mem
+
+ if self.resource.local_disk_avail > 0:
+ _v.local_volume_weight = float(_v.local_volume_size) / float(self.resource.local_disk_avail)
+ else:
+ if _v.local_volume_size > 0:
+ _v.local_volume_weight = 1.0
+ else:
+ _v.local_volume_weight = 0.0
+ self.total_local_vol += _v.local_volume_size
+
+ def _set_group_resource(self, _g):
+ """Sum up amount of resources of servers for each affinity group."""
+
+ if isinstance(_g, Server):
+ return
+
+ for _, sg in _g.subgroups.iteritems():
+ self._set_group_resource(sg)
+ _g.vCPUs += sg.vCPUs
+ _g.mem += sg.mem
+ _g.local_volume_size += sg.local_volume_size
+
+ def _set_group_weight(self, _group):
+ """Set relative weight of each affinity group against available resource amount."""
+
+ if self.resource.CPU_avail > 0:
+ _group.vCPU_weight = float(_group.vCPUs) / float(self.resource.CPU_avail)
+ else:
+ if _group.vCPUs > 0:
+ _group.vCPU_weight = 1.0
+ else:
+ _group.vCPU_weight = 0.0
+
+ if self.resource.mem_avail > 0:
+ _group.mem_weight = float(_group.mem) / float(self.resource.mem_avail)
+ else:
+ if _group.mem > 0:
+ _group.mem_weight = 1.0
+ else:
+ _group.mem_weight = 0.0
+
+ if self.resource.local_disk_avail > 0:
+ _group.local_volume_weight = float(_group.local_volume_size) / float(self.resource.local_disk_avail)
+ else:
+ if _group.local_volume_size > 0:
+ _group.local_volume_weight = 1.0
+ else:
+ _group.local_volume_weight = 0.0
+
+ for _, sg in _group.subgroups.iteritems():
+ if isinstance(sg, Group):
+ self._set_group_weight(sg)
+
+ def set_optimization_priority(self):
+ """Determine the optimization priority among different types of resources."""
+
+ if len(self.groups) == 0 and len(self.servers) == 0:
+ return
+
+ if self.resource.CPU_avail > 0:
+ app_cpu_weight = float(self.total_CPU) / float(self.resource.CPU_avail)
+ else:
+ if self.total_CPU > 0:
+ app_cpu_weight = 1.0
+ else:
+ app_cpu_weight = 0.0
+
+ if self.resource.mem_avail > 0:
+ app_mem_weight = float(self.total_mem) / float(self.resource.mem_avail)
+ else:
+ if self.total_mem > 0:
+ app_mem_weight = 1.0
+ else:
+ app_mem_weight = 0.0
+
+ if self.resource.local_disk_avail > 0:
+ app_local_vol_weight = float(self.total_local_vol) / float(self.resource.local_disk_avail)
+ else:
+ if self.total_local_vol > 0:
+ app_local_vol_weight = 1.0
+ else:
+ app_local_vol_weight = 0.0
+
+ opt = [("cpu", app_cpu_weight),
+ ("mem", app_mem_weight),
+ ("lvol", app_local_vol_weight)]
+
+ self.optimization_priority = sorted(opt, key=lambda resource: resource[1], reverse=True)
+
+ def reset_servers(self):
+ """Get servers back from containers (i.e., affinity groups)"""
+
+ servers = []
+ for _, g in self.groups.iteritems():
+ g.get_servers(servers)
+
+ for s in servers:
+ self.servers[s.vid] = s
+
+ def _match_url(self, _url):
+ """Check if the URL is a correct form."""
+
+ regex = re.compile(
+ r'^(?:http|ftp)s?://' # http:// or https://
+ r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # domain
+ r'localhost|' # localhost
+ r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip
+ r'(?::\d+)?' # optional port
+ r'(?:/?|[/?]\S+)$', re.IGNORECASE)
+
+ if re.match(regex, _url):
+ return True
+ else:
+ return False
diff --git a/engine/src/valet/engine/app_manager/app_handler.py b/engine/src/valet/engine/app_manager/app_handler.py
new file mode 100644
index 0000000..14ef35c
--- /dev/null
+++ b/engine/src/valet/engine/app_manager/app_handler.py
@@ -0,0 +1,307 @@
+#
+# -------------------------------------------------------------------------
+# Copyright (c) 2019 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
+import operator
+import time
+
+from valet.engine.app_manager.app import App
+
+
+class AppHistory(object):
+ """Data container for scheduling decisions."""
+
+ def __init__(self, _key):
+ self.decision_key = _key
+ self.status = None
+ self.result = None
+ self.timestamp = None
+
+
+class AppHandler(object):
+ """Handler for all requested applications."""
+
+ def __init__(self, _dbh, _use_dha, _logger):
+ self.dbh = _dbh
+
+ self.decision_history = {}
+ self.max_decision_history = 5000
+ self.min_decision_history = 1000
+
+ self.use_dha = _use_dha
+
+ self.logger = _logger
+
+ def validate_for_create(self, _req_id, _req):
+ """Validate create request and return app."""
+
+ app = App(_req_id, self.use_dha, self.logger)
+ app.init_for_create(_req)
+
+ if app.status != "ok" and not app.status.startswith("na:"):
+ self.logger.error(app.status)
+ else:
+ self.logger.info("got 'create' for app = " + app.app_name)
+
+ return app
+
+ def validate_for_update(self, _req_id, _req):
+ """Validate update request and return app."""
+
+ app = App(_req_id, self.use_dha, self.logger)
+
+ # Use create validation module.
+ app.init_for_create(_req)
+ app.state = "update"
+
+ if app.status != "ok" and not app.status.startswith("na:"):
+ self.logger.error(app.status)
+ else:
+ self.logger.info("got 'update' for app = " + app.app_name)
+
+ return app
+
+ def validate_for_delete(self, _req_id, _req):
+ """Validate delete request and return app."""
+
+ app = App(_req_id, self.use_dha, self.logger)
+ app.init_for_delete(_req)
+
+ if app.status != "ok":
+ self.logger.error(app.status)
+ return app
+
+ prior_app = self.dbh.get_stack(app.app_name)
+ if prior_app is None:
+ return None
+
+ if len(prior_app) == 0:
+ app.status = "na: no prior request via valet"
+ return app
+
+ # Once taking prior app, valet deterimnes placements or error.
+ app.init_prior_app(prior_app)
+
+ self.logger.info("got 'delete' for app = " + app.app_name)
+
+ return app
+
+ def validate_for_confirm(self, _req_id, _req):
+ """Validate confirm request and return app."""
+
+ app = App(_req_id, self.use_dha, self.logger)
+ app.init_for_confirm(_req)
+
+ if app.status != "ok":
+ self.logger.error(app.status)
+ return app
+
+ stack_id_map = self.dbh.get_stack_id_map(app.last_req_id)
+ if stack_id_map is None:
+ return None
+
+ if len(stack_id_map) == 0:
+ app.status = "na: not handled request via valet"
+ return app
+
+ prior_app = self.dbh.get_stack(stack_id_map.get("stack_id"))
+ if prior_app is None:
+ return None
+
+ if len(prior_app) == 0:
+ app.status = "cannot find prior stack info"
+ return app
+
+ # Once taking prior app, valet deterimnes placements or error.
+ app.init_prior_app(prior_app)
+
+ self.logger.info("got 'confirm' for app = " + app.app_name)
+
+ return app
+
+ def validate_for_rollback(self, _req_id, _req):
+ """Validate rollback request and return app."""
+
+ app = App(_req_id, self.use_dha, self.logger)
+ app.init_for_rollback(_req)
+
+ if app.status != "ok":
+ self.logger.error(app.status)
+ return app
+
+ stack_id_map = self.dbh.get_stack_id_map(app.last_req_id)
+ if stack_id_map is None:
+ return None
+
+ if len(stack_id_map) == 0:
+ app.status = "na: not handled request via valet"
+ return app
+
+ prior_app = self.dbh.get_stack(stack_id_map.get("stack_id"))
+ if prior_app is None:
+ return None
+
+ if len(prior_app) == 0:
+ app.status = "cannot find prior stack info"
+ return app
+
+ # Once taking prior app, valet deterimnes placements or error.
+ app.init_prior_app(prior_app)
+
+ self.logger.info("got 'rollback' for app = " + app.app_name)
+
+ return app
+
+ def set_for_create(self, _app):
+ """Set for stack-creation request."""
+
+ # Set Valet groups.
+ _app.init_valet_groups()
+ if _app.status != "ok":
+ return
+
+ # Set flavor properties for each server.
+ for rk, r in _app.stack.iteritems():
+ if "vcpus" not in r["properties"].keys():
+ flavor = _app.resource.get_flavor(r["properties"]["flavor"])
+
+ if flavor is None:
+ _app.status = "fail to get flavor details"
+ self.logger.error(_app.status)
+ return
+
+ if flavor.status != "enabled":
+ # TODO(Gueyoung): what to do if flavor is disabled?
+ self.logger.warning("disabled flavor = " + flavor.name)
+
+ r["properties"]["vcpus"] = flavor.vCPUs
+ r["properties"]["mem"] = flavor.mem_cap
+ r["properties"]["local_volume"] = flavor.disk_cap
+
+ if len(flavor.extra_specs) > 0:
+ extra_specs = {}
+ for mk, mv in flavor.extra_specs.iteritems():
+ extra_specs[mk] = mv
+ r["properties"]["extra_specs"] = extra_specs
+
+ # Set servers.
+ # Once parsing app, valet deterimnes placements or error.
+ if not _app.parse():
+ self.logger.error(_app.status)
+ return
+
+ return
+
+ def set_for_update(self, _app):
+ """Set for stack-update request."""
+
+ # Set servers.
+ # Once parsing app, valet deterimnes placements or error.
+ if not _app.parse():
+ self.logger.error(_app.status)
+ return
+
+ # Skip stack-update and rely on platform at this version.
+ _app.status = "na:update: pass stack-update"
+
+ return
+
+ def check_history(self, _req_id):
+ """Check if the request is determined already."""
+
+ if _req_id in self.decision_history.keys():
+ status = self.decision_history[_req_id].status
+ result = self.decision_history[_req_id].result
+ return status, result
+ else:
+ return None, None
+
+ def record_history(self, _req_id, _status, _result):
+ """Record an app placement decision."""
+
+ if _req_id not in self.decision_history.keys():
+ if len(self.decision_history) > self.max_decision_history:
+ self._flush_decision_history()
+
+ app_history = AppHistory(_req_id)
+ app_history.status = _status
+ app_history.result = _result
+ app_history.timestamp = time.time()
+
+ self.decision_history[_req_id] = app_history
+
+ def _flush_decision_history(self):
+ """Unload app placement decisions."""
+
+ count = 0
+ num_of_removes = len(self.decision_history) - self.min_decision_history
+
+ remove_item_list = []
+ for decision in (sorted(self.decision_history.values(), key=operator.attrgetter('timestamp'))): # type: AppHistory
+ remove_item_list.append(decision.decision_key)
+ count += 1
+ if count == num_of_removes:
+ break
+
+ for dk in remove_item_list:
+ del self.decision_history[dk]
+
+ def store_app(self, _app):
+ """Store new app or update existing app."""
+
+ if _app.state == "create":
+ metadata = {"service_instance_id": _app.service_instance_id, "vnf_instance_id": _app.vnf_instance_id,
+ "vnf_instance_name": _app.vnf_instance_name}
+
+ servers = {}
+ for sk, s in _app.servers.iteritems():
+ servers[sk] = s.get_json_info()
+
+ if not self.dbh.create_stack(_app.app_name,
+ _app.status,
+ _app.datacenter_id, _app.app_name, "none",
+ _app.tenant_id, metadata,
+ servers, {},
+ _app.state, "none"):
+ return False
+ elif _app.state in ("delete", "created"):
+ metadata = {"service_instance_id": _app.service_instance_id, "vnf_instance_id": _app.vnf_instance_id,
+ "vnf_instance_name": _app.vnf_instance_name}
+
+ if not self.dbh.update_stack(_app.app_name,
+ _app.status,
+ _app.datacenter_id, _app.app_name, _app.app_id,
+ _app.tenant_id, metadata,
+ _app.servers, _app.prior_servers,
+ _app.state, _app.prior_state):
+ return False
+ elif _app.state == "deleted":
+ if not self.dbh.delete_stack(_app.app_name):
+ return False
+ else:
+ self.logger.error("unknown operaton")
+ return False
+
+ # To manage the map between request_id and Heat stack requested
+ if _app.state in ("create", "delete"):
+ if not self.dbh.create_stack_id_map(_app.last_req_id, _app.app_name):
+ return False
+ elif _app.state in ("created", "deleted"):
+ if not self.dbh.delete_stack_id_map(_app.last_req_id):
+ return False
+
+ return True
diff --git a/engine/src/valet/engine/app_manager/app_parser.py b/engine/src/valet/engine/app_manager/app_parser.py
new file mode 100644
index 0000000..9e6336b
--- /dev/null
+++ b/engine/src/valet/engine/app_manager/app_parser.py
@@ -0,0 +1,257 @@
+#
+# -------------------------------------------------------------------------
+# Copyright (c) 2019 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
+import six
+
+from valet.engine.app_manager.group import Group, LEVEL
+from valet.engine.app_manager.server import Server
+
+
+class Parser(object):
+
+ def __init__(self, _logger):
+ self.logger = _logger
+
+ self.status = "ok"
+
+ def set_servers(self, _app_name, _stack, _groups):
+ """Parse stack resources to set servers (e.g., VMs) for search."""
+
+ servers = {}
+
+ for rk, r in _stack.iteritems():
+ properties = r.get("properties")
+
+ server_name = properties["name"].strip()
+ server_id = _app_name + ":" + server_name
+
+ server = Server(server_id, rk)
+
+ server.name = server_name
+
+ flavor_id = properties.get("flavor")
+ if isinstance(flavor_id, six.string_types):
+ server.flavor = flavor_id.strip()
+ else:
+ server.flavor = str(flavor_id)
+
+ image_id = properties.get("image", None)
+ if image_id is not None:
+ if isinstance(image_id, six.string_types):
+ server.image = image_id.strip()
+ else:
+ server.image = str(image_id)
+
+ if "vcpus" in properties.keys():
+ server.vCPUs = int(properties.get("vcpus"))
+ server.mem = int(properties.get("mem"))
+ server.local_volume_size = int(properties.get("local_volume"))
+
+ ess = properties.get("extra_specs", {})
+ if len(ess) > 0:
+ extra_specs = {}
+ for mk, mv in ess.iteritems():
+ extra_specs[mk] = mv
+ server.extra_specs_list.append(extra_specs)
+
+ az = properties.get("availability_zone", None)
+ if az is not None:
+ server.availability_zone = az[0].strip()
+ server.host_assignment_variable = az[1].strip()
+ if len(az) == 3:
+ server.host_assignment_inx = az[2]
+
+ servers[server_id] = server
+
+ if self._merge_diversity_groups(_groups, servers) is False:
+ self.status = "fail while merging diversity groups"
+ return {}, {}
+ if self._merge_quorum_diversity_groups(_groups, servers) is False:
+ self.status = "fail while merging quorum-diversity groups"
+ return {}, {}
+ if self._merge_exclusivity_groups(_groups, servers) is False:
+ self.status = "fail while merging exclusivity groups"
+ return {}, {}
+ if self._merge_affinity_groups(_groups, servers) is False:
+ self.status = "fail while merging affinity groups"
+ return {}, {}
+
+ # To delete all exclusivity and diversity groups after merging
+ groups = {gk: g for gk, g in _groups.iteritems() if g.group_type == "affinity"}
+
+ return servers, groups
+
+ def _merge_diversity_groups(self, _groups, _servers):
+ """Merge diversity sub groups."""
+
+ for level in LEVEL:
+ for gk, group in _groups.iteritems():
+ if group.level == level and group.group_type == "diversity":
+ for sk in group.server_list:
+ if sk in _servers.keys():
+ group.subgroups[sk] = _servers[sk]
+ _servers[sk].diversity_groups[group.vid] = group
+ else:
+ self.status = "invalid server = " + sk + " in group = " + group.vid
+ return False
+
+ return True
+
+ def _merge_quorum_diversity_groups(self, _groups, _servers):
+ """Merge quorum-diversity sub groups."""
+
+ for level in LEVEL:
+ for gk, group in _groups.iteritems():
+ if group.level == level and group.group_type == "quorum-diversity":
+ for sk in group.server_list:
+ if sk in _servers.keys():
+ group.subgroups[sk] = _servers[sk]
+ _servers[sk].quorum_diversity_groups[group.vid] = group
+ else:
+ self.status = "invalid server = " + sk + " in group = " + group.vid
+ return False
+
+ return True
+
+ def _merge_exclusivity_groups(self, _groups, _servers):
+ """Merge exclusivity sub groups."""
+
+ for level in LEVEL:
+ for gk, group in _groups.iteritems():
+ if group.level == level and group.group_type == "exclusivity":
+ for sk in group.server_list:
+ if sk in _servers.keys():
+ group.subgroups[sk] = _servers[sk]
+ _servers[sk].exclusivity_groups[group.vid] = group
+ else:
+ self.status = "invalid server = " + sk + " in group = " + group.vid
+ return False
+
+ return True
+
+ def _merge_affinity_groups(self, _groups, _servers):
+ """Merge affinity subgroups."""
+
+ # To track each server's or group's parent group (i.e., affinity)
+ affinity_map = {}
+
+ # To make cannonical order of groups
+ group_list = [gk for gk in _groups.keys()]
+ group_list.sort()
+
+ for level in LEVEL:
+ for gk in group_list:
+ if gk in _groups.keys():
+ if _groups[gk].level == level and _groups[gk].group_type == "affinity":
+ group = _groups[gk]
+ else:
+ continue
+ else:
+ continue
+
+ group.server_list.sort()
+
+ for sk in group.server_list:
+ if sk in _servers.keys():
+ self._merge_server(group, sk, _servers, affinity_map)
+ else:
+ if sk not in affinity_map.keys():
+ self.status = "invalid server = " + sk + " in group = " + group.vid
+ return False
+
+ # If server belongs to the other group already,
+ # take the group as a subgroup of this group
+ if affinity_map[sk].vid != group.vid:
+ if group.is_parent_affinity(sk):
+ self._set_implicit_grouping(sk, group, affinity_map, _groups)
+
+ return True
+
+ def _merge_server(self, _group, _sk, _servers, _affinity_map):
+ """Merge a server into the group."""
+
+ _group.subgroups[_sk] = _servers[_sk]
+ _servers[_sk].surgroup = _group
+ _affinity_map[_sk] = _group
+
+ self._add_implicit_diversity_groups(_group, _servers[_sk].diversity_groups)
+ self._add_implicit_quorum_diversity_groups(_group, _servers[_sk].quorum_diversity_groups)
+ self._add_implicit_exclusivity_groups(_group, _servers[_sk].exclusivity_groups)
+ self._add_memberships(_group, _servers[_sk])
+
+ del _servers[_sk]
+
+ def _add_implicit_diversity_groups(self, _group, _diversity_groups):
+ """Add subgroup's diversity groups."""
+
+ for dk, div_group in _diversity_groups.iteritems():
+ if LEVEL.index(div_group.level) >= LEVEL.index(_group.level):
+ _group.diversity_groups[dk] = div_group
+
+ def _add_implicit_quorum_diversity_groups(self, _group, _quorum_diversity_groups):
+ """Add subgroup's quorum diversity groups."""
+
+ for dk, div_group in _quorum_diversity_groups.iteritems():
+ if LEVEL.index(div_group.level) >= LEVEL.index(_group.level):
+ _group.quorum_diversity_groups[dk] = div_group
+
+ def _add_implicit_exclusivity_groups(self, _group, _exclusivity_groups):
+ """Add subgroup's exclusivity groups."""
+
+ for ek, ex_group in _exclusivity_groups.iteritems():
+ if LEVEL.index(ex_group.level) >= LEVEL.index(_group.level):
+ _group.exclusivity_groups[ek] = ex_group
+
+ def _add_memberships(self, _group, _v):
+ """Add subgroups's host-aggregates and AZs."""
+
+ for extra_specs in _v.extra_specs_list:
+ _group.extra_specs_list.append(extra_specs)
+
+ if isinstance(_v, Server):
+ if _v.availability_zone is not None:
+ if _v.availability_zone not in _group.availability_zone_list:
+ _group.availability_zone_list.append(_v.availability_zone)
+
+ if isinstance(_v, Group):
+ for az in _v.availability_zone_list:
+ if az not in _group.availability_zone_list:
+ _group.availability_zone_list.append(az)
+
+ def _set_implicit_grouping(self, _vk, _g, _affinity_map, _groups):
+ """Take server's most top parent as a child group of this group _g."""
+
+ tg = _affinity_map[_vk] # Where _vk currently belongs to
+
+ if tg.vid in _affinity_map.keys(): # If the parent belongs to the other parent group
+ self._set_implicit_grouping(tg.vid, _g, _affinity_map, _groups)
+ else:
+ if LEVEL.index(tg.level) > LEVEL.index(_g.level):
+ tg.level = _g.level
+
+ if _g.is_parent_affinty(tg.vid):
+ _g.subgroups[tg.vid] = tg
+ tg.surgroup = _g
+ _affinity_map[tg.vid] = _g
+
+ self._add_implicit_diversity_groups(_g, tg.diversity_groups)
+ self._add_implicit_quorum_diversity_groups(_g, tg.quorum_diversity_groups)
+ self._add_implicit_exclusivity_groups(_g, tg.exclusivity_groups)
+ self._add_memberships(_g, tg)
+
+ del _groups[tg.vid]
diff --git a/engine/src/valet/engine/app_manager/group.py b/engine/src/valet/engine/app_manager/group.py
new file mode 100644
index 0000000..69c9339
--- /dev/null
+++ b/engine/src/valet/engine/app_manager/group.py
@@ -0,0 +1,139 @@
+#
+# -------------------------------------------------------------------------
+# Copyright (c) 2019 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
+import six
+
+
+LEVEL = ["host", "rack", "cluster"]
+
+FLAVOR_TYPES = ["gv", "nv", "nd", "ns", "ss"]
+
+
+class Group(object):
+ """Container to keep requested valet groups.
+
+ TODO(Gueyoung): create parent class to make common functions
+ """
+
+ def __init__(self, _id):
+ """Define group info and parameters."""
+
+ # ID consists of
+ # datacenter_id + [service_instance_id] + [vnf_instance_id] + rule_name
+ self.vid = _id
+
+ # Type is either affinity, diversity, quorum_diversity, or exclusivity
+ self.group_type = None
+
+ self.factory = None
+
+ # Level is host or rack
+ self.level = None
+
+ # To build containment tree
+ self.surgroup = None # parent 'affinity' object
+ self.subgroups = {} # child affinity group (or server) objects
+
+ # Group objects of this container
+ self.diversity_groups = {}
+ self.quorum_diversity_groups = {}
+ self.exclusivity_groups = {}
+
+ self.availability_zone_list = []
+
+ self.extra_specs_list = []
+
+ self.vCPUs = 0
+ self.mem = 0 # MB
+ self.local_volume_size = 0 # GB
+
+ self.vCPU_weight = -1
+ self.mem_weight = -1
+ self.local_volume_weight = -1
+
+ # To record which servers (e.g., VMs) in given request are assigned
+ # to this group.
+ self.server_list = []
+
+ self.sort_base = -1
+
+ def get_exclusivities(self, _level):
+ """Return exclusivity group requested with a level (host or rack).
+
+ Note: each affinity group must have a single exclusivity group of the level.
+ """
+
+ exclusivities = {}
+
+ for exk, group in self.exclusivity_groups.iteritems():
+ if group.level == _level:
+ exclusivities[exk] = group
+
+ return exclusivities
+
+ def need_numa_alignment(self):
+ """Check if this server requires NUMA alignment."""
+
+ if len(self.extra_specs_list) > 0:
+ for es in self.extra_specs_list:
+ for key, req in six.iteritems(es):
+ if key == "hw:numa_nodes" and req == 1:
+ return True
+
+ return False
+
+ def is_parent_affinity(self, _vk):
+ """Check recursively if _vk is located in the group."""
+
+ exist = False
+
+ for sgk, sg in self.subgroups.iteritems():
+ if sgk == _vk:
+ exist = True
+ break
+
+ if isinstance(sg, Group):
+ if sg.is_parent_affinity(_vk):
+ exist = True
+ break
+
+ return exist
+
+ def get_servers(self, _servers):
+ """Get all child servers."""
+
+ for _, sg in self.subgroups.iteritems():
+ if isinstance(sg, Group):
+ sg.get_servers(_servers)
+ else:
+ if sg not in _servers:
+ _servers.append(sg)
+
+ def get_flavor_types(self):
+ flavor_type_list = []
+
+ for extra_specs in self.extra_specs_list:
+ for k, v in extra_specs.iteritems():
+ k_elements = k.split(':')
+ if len(k_elements) > 1:
+ if k_elements[0] == "aggregate_instance_extra_specs":
+ if k_elements[1].lower() in FLAVOR_TYPES:
+ if v == "true":
+ flavor_type_list.append(k_elements[1])
+
+ return flavor_type_list
diff --git a/engine/src/valet/engine/app_manager/server.py b/engine/src/valet/engine/app_manager/server.py
new file mode 100644
index 0000000..9052285
--- /dev/null
+++ b/engine/src/valet/engine/app_manager/server.py
@@ -0,0 +1,171 @@
+#
+# -------------------------------------------------------------------------
+# Copyright (c) 2019 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
+import six
+
+from valet.engine.app_manager.group import FLAVOR_TYPES
+
+
+class Server(object):
+ """Container to keep requested server (e.g., VM)."""
+
+ def __init__(self, _id, _orch_id):
+ """Define server info and parameters."""
+
+ # ID consists of stack_name + ":" + server_name,
+ # where stack_name = datacenter_id + ":" + tenant_id + ":" + vf_module_name
+ self.vid = _id
+
+ # ID in stack
+ self.orch_id = _orch_id
+
+ # one given in Heat stack
+ self.name = None
+
+ # Affinity group object
+ self.surgroup = None
+
+ self.diversity_groups = {}
+ self.quorum_diversity_groups = {}
+ self.exclusivity_groups = {}
+
+ self.availability_zone = None
+
+ self.flavor = None
+ self.image = None
+
+ self.vCPUs = 0
+ self.mem = 0 # MB
+ self.local_volume_size = 0 # GB
+ self.extra_specs_list = []
+
+ self.vCPU_weight = -1
+ self.mem_weight = -1
+ self.local_volume_weight = -1
+
+ # To return placement.
+ # If stack is nested, should point the index to distinguish
+ self.host_assignment_variable = None
+ self.host_assignment_inx = -1
+
+ # Placement result
+ self.host_group = None # e.g., rack
+ self.host = None
+ self.numa = None
+
+ # Request state is 'create', 'migrate', 'rebuild', 'delete'
+ # 'created', 'migrated', 'rebuilt'
+ self.state = "plan"
+
+ # To inform if the current placement violates rules and requirements
+ self.status = "valid"
+
+ self.sort_base = -1
+
+ def get_exclusivities(self, _level):
+ """Return exclusivity group requested with a level (host or rack).
+
+ Note: each server must have a single exclusivity group of the level.
+ """
+
+ exclusivities = {}
+
+ for exk, group in self.exclusivity_groups.iteritems():
+ if group.level == _level:
+ exclusivities[exk] = group
+
+ return exclusivities
+
+ def need_numa_alignment(self):
+ """Check if this server requires NUMA alignment."""
+
+ if len(self.extra_specs_list) > 0:
+ for es in self.extra_specs_list:
+ for key, req in six.iteritems(es):
+ if key == "hw:numa_nodes" and int(req) == 1:
+ return True
+
+ return False
+
+ def get_flavor_types(self):
+ flavor_type_list = []
+
+ for extra_specs in self.extra_specs_list:
+ for k, v in extra_specs.iteritems():
+ k_elements = k.split(':')
+ if len(k_elements) > 1:
+ if k_elements[0] == "aggregate_instance_extra_specs":
+ if k_elements[1].lower() in FLAVOR_TYPES:
+ if v == "true":
+ flavor_type_list.append(k_elements[1])
+
+ return flavor_type_list
+
+ def get_json_info(self):
+ """Format server info as JSON."""
+
+ if self.surgroup is None:
+ surgroup_id = "none"
+ else:
+ surgroup_id = self.surgroup.vid
+
+ diversity_groups = []
+ for divk in self.diversity_groups.keys():
+ diversity_groups.append(divk)
+
+ quorum_diversity_groups = []
+ for divk in self.quorum_diversity_groups.keys():
+ quorum_diversity_groups.append(divk)
+
+ exclusivity_groups = []
+ for exk in self.exclusivity_groups.keys():
+ exclusivity_groups.append(exk)
+
+ if self.availability_zone is None:
+ availability_zone = "none"
+ else:
+ availability_zone = self.availability_zone
+
+ if self.host_group is None:
+ host_group = "none"
+ else:
+ host_group = self.host_group
+
+ if self.numa is None:
+ numa = "none"
+ else:
+ numa = self.numa
+
+ return {'name': self.name,
+ 'orch_id': self.orch_id,
+ 'surgroup': surgroup_id,
+ 'diversity_groups': diversity_groups,
+ 'quorum_diversity_groups': quorum_diversity_groups,
+ 'exclusivity_groups': exclusivity_groups,
+ 'availability_zones': availability_zone,
+ 'extra_specs_list': self.extra_specs_list,
+ 'flavor': self.flavor,
+ 'image': self.image,
+ 'cpus': self.vCPUs,
+ 'mem': self.mem,
+ 'local_volume': self.local_volume_size,
+ 'host_group': host_group,
+ 'host': self.host,
+ 'numa': numa,
+ 'state': self.state,
+ 'status': self.status}
diff --git a/engine/src/valet/engine/db_connect/__init__.py b/engine/src/valet/engine/db_connect/__init__.py
new file mode 100644
index 0000000..bd50995
--- /dev/null
+++ b/engine/src/valet/engine/db_connect/__init__.py
@@ -0,0 +1,18 @@
+#
+# -------------------------------------------------------------------------
+# Copyright (c) 2019 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
diff --git a/engine/src/valet/engine/db_connect/db_apis/__init__.py b/engine/src/valet/engine/db_connect/db_apis/__init__.py
new file mode 100644
index 0000000..bd50995
--- /dev/null
+++ b/engine/src/valet/engine/db_connect/db_apis/__init__.py
@@ -0,0 +1,18 @@
+#
+# -------------------------------------------------------------------------
+# Copyright (c) 2019 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
diff --git a/engine/src/valet/engine/db_connect/db_apis/mem_db.py b/engine/src/valet/engine/db_connect/db_apis/mem_db.py
new file mode 100644
index 0000000..b706c63
--- /dev/null
+++ b/engine/src/valet/engine/db_connect/db_apis/mem_db.py
@@ -0,0 +1,117 @@
+#
+# -------------------------------------------------------------------------
+# Copyright (c) 2019 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
+import copy
+
+
+class MemDB(object):
+
+ def __init__(self, _config, _logger):
+ self.logger = _logger
+
+ self.keyspace = _config.get("keyspace")
+ self.requests_table = _config.get("requests_table")
+ self.results_table = _config.get("results_table")
+ self.group_rules_table = _config.get("group_rules_table")
+ self.groups_table = _config.get("groups_table")
+ self.stacks_table = _config.get("stacks_table")
+ self.resources_table = _config.get("resources_table")
+ self.stack_id_map_table = _config.get("stack_id_map_table")
+
+ self.requests = {}
+ self.results = {}
+ self.group_rules = {}
+ self.groups = {}
+ self.stacks = {}
+ self.resources = {}
+ self.stack_id_map = {}
+
+ def read_all_rows(self, keyspace, table):
+ rows = {"result": {}}
+
+ if table == self.requests_table:
+ for k, v in self.requests.iteritems():
+ rows["result"][k] = copy.deepcopy(v)
+ elif table == self.results_table:
+ for k, v in self.results.iteritems():
+ rows["result"][k] = copy.deepcopy(v)
+ elif table == self.group_rules_table:
+ for k, v in self.group_rules.iteritems():
+ rows["result"][k] = copy.deepcopy(v)
+ elif table == self.groups_table:
+ for k, v in self.groups.iteritems():
+ rows["result"][k] = copy.deepcopy(v)
+
+ return rows
+
+ def insert_atom(self, keyspace, table, data, name=None, value=None):
+ if table == self.requests_table:
+ self.requests[data['request_id']] = data
+ elif table == self.results_table:
+ self.results[data['request_id']] = data
+ elif table == self.group_rules_table:
+ self.group_rules[data['id']] = data
+ elif table == self.groups_table:
+ self.groups[data['id']] = data
+ elif table == self.resources_table:
+ self.resources[data['id']] = data
+ elif table == self.stacks_table:
+ self.stacks[data['id']] = data
+ elif table == self.stack_id_map_table:
+ self.stack_id_map[data['request_id']] = data
+
+ def delete_atom(self, keyspace, table, pk_name, pk_value):
+ if table == self.requests_table:
+ if pk_value in self.requests.keys():
+ del self.requests[pk_value]
+ elif table == self.groups_table:
+ if pk_value in self.groups.keys():
+ del self.groups[pk_value]
+ elif table == self.results_table:
+ if pk_value in self.results.keys():
+ del self.results[pk_value]
+
+ def read_row(self, keyspace, table, pk_name, pk_value):
+ row = {"result": {}}
+
+ if table == self.requests_table:
+ if pk_value in self.requests.keys():
+ row["result"]["row 0"] = copy.deepcopy(self.requests[pk_value])
+ elif table == self.results_table:
+ if pk_value in self.results.keys():
+ row["result"]["row 0"] = copy.deepcopy(self.results[pk_value])
+ elif table == self.resources_table:
+ if pk_value in self.resources.keys():
+ row["result"]["row 0"] = copy.deepcopy(self.resources[pk_value])
+ elif table == self.group_rules_table:
+ if pk_value in self.group_rules.keys():
+ row["result"]["row 0"] = copy.deepcopy(self.group_rules[pk_value])
+ elif table == self.stack_id_map_table:
+ if pk_value in self.stack_id_map.keys():
+ row["result"]["row 0"] = copy.deepcopy(self.stack_id_map[pk_value])
+ elif table == self.stacks_table:
+ if pk_value in self.stacks.keys():
+ row["result"]["row 0"] = copy.deepcopy(self.stacks[pk_value])
+
+ return row
+
+ def create_lock(self, _key):
+ return "$x--0000000000"
+
+ def delete_lock(self, _key):
+ pass
diff --git a/engine/src/valet/engine/db_connect/db_apis/music.py b/engine/src/valet/engine/db_connect/db_apis/music.py
new file mode 100644
index 0000000..85e2d31
--- /dev/null
+++ b/engine/src/valet/engine/db_connect/db_apis/music.py
@@ -0,0 +1,388 @@
+#
+# -------------------------------------------------------------------------
+# Copyright (c) 2019 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
+
+import base64
+import json
+import requests
+
+from valet.utils.decryption import decrypt
+
+
+class REST(object):
+ """Helper class for REST operations."""
+
+ def __init__(self, hosts, port, path, timeout, retries,
+ userid, password, ns, logger):
+ """Initializer. Accepts target host list, port, and path."""
+
+ self.hosts = hosts # List of IP or FQDNs
+ self.port = port # Port Number
+ self.path = path # Path starting with /
+ self.timeout = float(timeout) # REST request timeout in seconds
+ self.retries = retries # Retires before failing over to next Music server.
+ self.userid = userid
+ self.password = password
+ self.ns = ns
+ self.logger = logger # For logging
+
+ self.urls = []
+ for host in self.hosts:
+ # Must end without a slash
+ self.urls.append('http://%(host)s:%(port)s%(path)s' % {
+ 'host': host,
+ 'port': self.port,
+ 'path': self.path,
+ })
+
+ def __headers(self, content_type='application/json'):
+ """Returns HTTP request headers."""
+
+ headers = {
+ 'ns': self.ns,
+ 'accept': content_type,
+ 'content-type': content_type,
+ 'authorization': 'Basic %s' % base64.b64encode(self.userid + ':' + self.password)
+ }
+
+ return headers
+
+ def request(self, method='get', content_type='application/json', path='/',
+ data=None, raise400=True):
+ """ Performs HTTP request """
+
+ if method not in ('post', 'get', 'put', 'delete'):
+ raise KeyError("Method must be: post, get, put, or delete.")
+
+ method_fn = getattr(requests, method)
+
+ if data:
+ data_json = json.dumps(data)
+ else:
+ data_json = None
+
+ response = None
+ timeout = False
+ err_message = ""
+ full_url = ""
+ for url in self.urls:
+ # Try each url in turn. First one to succeed wins.
+ full_url = url + path
+
+ for attempt in range(self.retries):
+ # Ignore the previous exception.
+ try:
+ my_headers = self.__headers(content_type)
+ for header_key in my_headers:
+ if (type(my_headers[header_key]).__name__ == 'unicode'):
+ my_headers[header_key] = my_headers[header_key].encode('ascii', 'ignore')
+ response = method_fn(full_url, data=data_json,
+ headers=my_headers,
+ timeout=self.timeout)
+ if raise400 or not response.status_code == 400:
+ response.raise_for_status()
+ return response
+
+ except requests.exceptions.Timeout as err:
+ err_message = err.message
+ response = requests.Response()
+ response.url = full_url
+ if not timeout:
+ self.logger.warning("Music: %s Timeout" % url, errorCode='availability')
+ timeout = True
+
+ except requests.exceptions.RequestException as err:
+ err_message = err.message
+ self.logger.debug("Music: %s Request Exception" % url)
+ self.logger.debug(" method = %s" % method)
+ self.logger.debug(" timeout = %s" % self.timeout)
+ self.logger.debug(" err = %s" % err)
+ self.logger.debug(" full url = %s" % full_url)
+ self.logger.debug(" request data = %s" % data_json)
+ self.logger.debug(" request headers = %s" % my_headers)
+ self.logger.debug(" status code = %s" % response.status_code)
+ self.logger.debug(" response = %s" % response.text)
+ self.logger.debug(" response headers = %s" % response.headers)
+
+ # If we get here, an exception was raised for every url,
+ # but we passed so we could try each endpoint. Raise status
+ # for the last attempt (for now) so that we report something.
+ if response is not None:
+ self.logger.debug("Music: Full Url: %s", full_url)
+ self.logger.debug("Music: %s ", err_message)
+ response.raise_for_status()
+
+
+class Music(object):
+ """Wrapper for Music API"""
+
+ def __init__(self, _config, _logger):
+ """Initializer. Accepts a lock_timeout for atomic operations."""
+
+ self.logger = _logger
+
+ pw = decrypt(_config["engine"]["ek"],
+ _config["logging"]["lk"],
+ _config["db"]["dk"],
+ _config["music"]["password"])
+
+ kwargs = {
+ 'hosts': _config["music"]["hosts"],
+ 'port': _config["music"]["port"],
+ 'path': _config["music"]["path"],
+ 'timeout': _config["music"]["timeout"],
+ 'retries': _config["music"]["retries"],
+ 'userid': _config["music"]["userid"],
+ 'password': pw,
+ 'ns': _config["music"]["namespace"],
+ 'logger': _logger,
+ }
+ self.rest = REST(**kwargs)
+
+ self.lock_names = []
+ self.lock_timeout = _config["music"]["lock_timeout"]
+
+ self.replication_factor = _config["music"]["replication_factor"]
+
+ @staticmethod
+ def __row_url_path(keyspace, table, pk_name=None, pk_value=None):
+ """Returns a Music-compliant row URL path."""
+
+ path = '/keyspaces/%(keyspace)s/tables/%(table)s/rows' % {
+ 'keyspace': keyspace,
+ 'table': table,
+ }
+
+ if pk_name and pk_value:
+ path += '?%s=%s' % (pk_name, pk_value)
+
+ return path
+
+ def create_keyspace(self, keyspace):
+ """Creates a keyspace."""
+
+ data = {
+ 'replicationInfo': {
+ # 'class': 'NetworkTopologyStrategy',
+ # 'dc1': self.replication_factor,
+ 'class': 'SimpleStrategy',
+ 'replication_factor': self.replication_factor,
+ },
+ 'durabilityOfWrites': True,
+ 'consistencyInfo': {
+ 'type': 'eventual',
+ },
+ }
+
+ path = '/keyspaces/%s' % keyspace
+ response = self.rest.request(method='post', path=path, data=data)
+
+ return response.ok
+
+ def drop_keyspace(self, keyspace):
+ """Drops a keyspace."""
+
+ data = {
+ 'consistencyInfo': {
+ 'type': 'eventual',
+ },
+ }
+
+ path = '/keyspaces/%s' % keyspace
+ response = self.rest.request(method='delete', path=path, data=data)
+
+ return response.ok
+
+ def create_table(self, keyspace, table, schema):
+ """Creates a table."""
+
+ data = {
+ 'fields': schema,
+ 'consistencyInfo': {
+ 'type': 'eventual',
+ },
+ }
+ self.logger.debug(data)
+
+ path = '/keyspaces/%(keyspace)s/tables/%(table)s' % {
+ 'keyspace': keyspace,
+ 'table': table,
+ }
+
+ response = self.rest.request(method='post', path=path, data=data)
+
+ return response.ok
+
+ def create_index(self, keyspace, table, index_field, index_name=None):
+ """Creates an index for the referenced table."""
+
+ data = None
+ if index_name:
+ data = {
+ 'index_name': index_name,
+ }
+
+ pstr = '/keyspaces/%(keyspace)s/tables/%(table)s/index/%(index_field)s'
+ path = pstr % {
+ 'keyspace': keyspace,
+ 'table': table,
+ 'index_field': index_field,
+ }
+
+ response = self.rest.request(method='post', path=path, data=data)
+
+ return response.ok
+
+ def version(self):
+ """Returns version string."""
+
+ path = '/version'
+ response = self.rest.request(method='get', content_type='text/plain', path=path)
+
+ return response.text
+
+ def create_lock(self, lock_name):
+ """Returns the lock id. Use for acquiring and releasing."""
+
+ path = '/locks/create/%s' % lock_name
+ response = self.rest.request(method='post', path=path)
+
+ return json.loads(response.text)["lock"]["lock"]
+
+ def acquire_lock(self, lock_id):
+ """Acquire a lock."""
+
+ path = '/locks/acquire/%s' % lock_id
+ response = self.rest.request(method='get', path=path, raise400=False)
+
+ return json.loads(response.text)["status"] == "SUCCESS"
+
+ def release_lock(self, lock_id):
+ """Release a lock."""
+
+ path = '/locks/release/%s' % lock_id
+ response = self.rest.request(method='delete', path=path)
+
+ return response.ok
+
+ def delete_lock(self, lock_name):
+ """Deletes a lock by name."""
+
+ path = '/locks/delete/%s' % lock_name
+ response = self.rest.request(method='delete', path=path, raise400=False)
+
+ return response.ok
+
+ def delete_all_locks(self):
+ """Delete all locks created during the lifetime of this object."""
+
+ # TODO(JD): Shouldn't this really be part of internal cleanup?
+ # FIXME: It can be several API calls. Any way to do in one fell swoop?
+ for lock_name in self.lock_names:
+ self.delete_lock(lock_name)
+
+ def create_row(self, keyspace, table, values):
+ """Create a row."""
+
+ # self.logger.debug("MUSIC: create_row "+ table)
+
+ data = {
+ 'values': values,
+ 'consistencyInfo': {
+ 'type': 'eventual',
+ },
+ }
+
+ path = '/keyspaces/%(keyspace)s/tables/%(table)s/rows' % {
+ 'keyspace': keyspace,
+ 'table': table,
+ }
+ response = self.rest.request(method='post', path=path, data=data)
+
+ return response.ok
+
+ def insert_atom(self, keyspace, table, values, name=None, value=None):
+ """Atomic create/update row."""
+
+ data = {
+ 'values': values,
+ 'consistencyInfo': {
+ 'type': 'atomic',
+ }
+ }
+
+ path = self.__row_url_path(keyspace, table, name, value)
+ method = 'post'
+
+ # self.logger.debug("MUSIC: Method: %s ", (method.upper()))
+ # self.logger.debug("MUSIC: Path: %s", (path))
+ # self.logger.debug("MUSIC: Data: %s", (data))
+
+ self.rest.request(method=method, path=path, data=data)
+
+ def update_row_eventually(self, keyspace, table, values):
+ """Update a row. Not atomic."""
+
+ data = {
+ 'values': values,
+ 'consistencyInfo': {
+ 'type': 'eventual',
+ },
+ }
+
+ path = self.__row_url_path(keyspace, table)
+ response = self.rest.request(method='post', path=path, data=data)
+
+ return response.ok
+
+ def delete_row_eventually(self, keyspace, table, pk_name, pk_value):
+ """Delete a row. Not atomic."""
+
+ data = {
+ 'consistencyInfo': {
+ 'type': 'eventual',
+ },
+ }
+
+ path = self.__row_url_path(keyspace, table, pk_name, pk_value)
+ response = self.rest.request(method='delete', path=path, data=data)
+
+ return response.ok
+
+ def delete_atom(self, keyspace, table, pk_name, pk_value):
+ """Atomic delete row."""
+
+ data = {
+ 'consistencyInfo': {
+ 'type': 'atomic',
+ }
+ }
+ path = self.__row_url_path(keyspace, table, pk_name, pk_value)
+ self.rest.request(method='delete', path=path, data=data)
+
+ def read_row(self, keyspace, table, pk_name, pk_value):
+ """Read one row based on a primary key name/value."""
+
+ path = self.__row_url_path(keyspace, table, pk_name, pk_value)
+ response = self.rest.request(path=path)
+ return response.json()
+
+ def read_all_rows(self, keyspace, table):
+ """Read all rows."""
+
+ return self.read_row(keyspace, table, pk_name=None, pk_value=None)
diff --git a/engine/src/valet/engine/db_connect/db_handler.py b/engine/src/valet/engine/db_connect/db_handler.py
new file mode 100644
index 0000000..2165b8a
--- /dev/null
+++ b/engine/src/valet/engine/db_connect/db_handler.py
@@ -0,0 +1,533 @@
+#
+# -------------------------------------------------------------------------
+# Copyright (c) 2019 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
+import json
+import operator
+
+from valet.engine.db_connect.locks import Locks, now
+
+
+class DBHandler(object):
+
+ def __init__(self, _db, _config, _logger):
+ self.keyspace = _config.get("keyspace")
+ self.requests_table = _config.get("requests_table")
+ self.results_table = _config.get("results_table")
+ self.group_rules_table = _config.get("group_rules_table")
+ self.groups_table = _config.get("groups_table")
+ self.stacks_table = _config.get("stacks_table")
+ self.resources_table = _config.get("resources_table")
+ self.stack_id_map_table = _config.get("stack_id_map_table")
+ self.regions_table = _config.get("regions_table")
+
+ self.db = _db
+
+ self.logger = _logger
+
+ def get_requests(self):
+ """Get requests from valet-api."""
+
+ request_list = []
+
+ try:
+ rows = self.db.read_all_rows(self.keyspace, self.requests_table)
+ except Exception as e:
+ self.logger.error("DB: while reading requests: " + str(e))
+ return []
+
+ if rows is not None and len(rows) > 0:
+ for key, row in rows.iteritems():
+ if key == "status":
+ if row == "FAILURE":
+ self.logger.error("DB: Failure in " + self.requests_table)
+ return []
+ continue
+ elif key == "error":
+ continue
+ elif key == "result":
+ for _, dbrow in row.iteritems():
+ request_list.append(dbrow)
+
+ if len(request_list) > 0:
+ # NOTE(Gueyoung): Sort by timestamp if timestamp is based UDT.
+ # Currently, ping's timestamp is always -1, while all others 0.
+ # This is to provide the priority to ping request.
+ request_list.sort(key=operator.itemgetter("timestamp"))
+
+ return request_list
+
+ def return_request(self, _req_id, _status, _result):
+ """Finalize the request by
+
+ Create result in results table and delete handled request from requests table.
+ """
+
+ # TODO(Gueyoung): avoid duplicated results.
+
+ # Wait randomly with unique seed (Valet instance ID).
+ # random.seed(_seed)
+ # r = random.randint(1, 100)
+ # delay = float(r) / 100.0
+ # time.sleep(delay)
+
+ if not self._create_result(_req_id, _status, _result):
+ return False
+
+ if not self._delete_request(_req_id):
+ return False
+
+ return True
+
+ def get_results(self):
+ """Get results."""
+
+ result_list = []
+
+ try:
+ rows = self.db.read_all_rows(self.keyspace, self.results_table)
+ except Exception as e:
+ self.logger.error("DB: while reading results: " + str(e))
+ return None
+
+ if rows is not None and len(rows) > 0:
+ for key, row in rows.iteritems():
+ if key == "status":
+ continue
+ elif key == "error":
+ continue
+ elif key == "result":
+ for _, dbrow in row.iteritems():
+ result_list.append(dbrow)
+
+ return result_list
+
+ def _create_result(self, _req_id, _status, _result):
+ """Return result of request by putting it into results table."""
+
+ data = {
+ 'request_id': _req_id,
+ 'status': json.dumps(_status),
+ 'result': json.dumps(_result),
+ 'timestamp': now()
+ }
+ try:
+ self.db.insert_atom(self.keyspace, self.results_table, data)
+ except Exception as e:
+ self.logger.error("DB: while putting placement result: " + str(e))
+ return False
+
+ return True
+
+ def _delete_request(self, _req_id):
+ """Delete finished request."""
+
+ try:
+ self.db.delete_atom(self.keyspace, self.requests_table,
+ 'request_id', _req_id)
+ except Exception as e:
+ self.logger.error("DB: while deleting handled request: " + str(e))
+ return False
+
+ return True
+
+ def clean_expired_regions(self):
+ """Delete regions from the regions table that have expired.
+
+ Return the list of locked regions."""
+
+ locked_regions = []
+
+ try:
+ result = self.db.read_row(self.keyspace, self.regions_table, None, None)["result"]
+ except Exception as e:
+ self.logger.error("DB: while reading locked regions: " + str(e))
+ return None
+
+ for _, data in sorted(result.iteritems()):
+ if int(data["expire_time"]) < now():
+
+ self.logger.warning("lock on %s has timed out and is revoked" % data["region_id"])
+
+ Locks.unlock(self, data["region_id"])
+
+ if not self.delete_region(data["region_id"]):
+ return None
+ else:
+ locked_regions.append(data["region_id"])
+
+ return locked_regions
+
+ def delete_region(self, region_id):
+ """Delete from regions table."""
+
+ try:
+ self.db.delete_atom(self.keyspace, self.regions_table,
+ 'region_id', region_id)
+ except Exception as e:
+ self.logger.error("DB: while deleting expired lock: " + str(e))
+ return False
+
+ return True
+
+ def add_region(self, region_id, expire_time, update=False):
+ """Add/update locking info into region table."""
+
+ data = {
+ "region_id": region_id,
+ "locked_by": "hostname",
+ "expire_time": expire_time
+ }
+
+ name = value = None
+ if update:
+ name = "region_id"
+ value = region_id
+
+ try:
+ self.db.insert_atom(self.keyspace, self.regions_table, data, name, value)
+ except Exception as e:
+ self.logger.error("DB: while adding locked region: " + str(e))
+ return False
+
+ return True
+
+ def create_stack_id_map(self, _req_id, _stack_id):
+ """Create request map entry."""
+
+ data = {
+ 'request_id': _req_id,
+ 'stack_id': _stack_id
+ }
+ try:
+ self.db.insert_atom(self.keyspace, self.stack_id_map_table, data)
+ except Exception as e:
+ self.logger.error("DB: while creating request map: " + str(e))
+ return False
+
+ return True
+
+ def get_stack_id_map(self, _req_id):
+ """Get stack id."""
+
+ try:
+ row = self.db.read_row(self.keyspace, self.stack_id_map_table,
+ "request_id", _req_id)
+ except Exception as e:
+ self.logger.error("DB: while reading stack_id: " + str(e))
+ return None
+
+ if len(row) > 0:
+ if "result" in row.keys():
+ if len(row["result"]) > 0:
+ return row["result"][row["result"].keys()[0]]
+ else:
+ return {}
+ else:
+ return {}
+ else:
+ return {}
+
+ def delete_stack_id_map(self, _req_id):
+ """Delete map of confirmed or rollbacked request."""
+
+ try:
+ self.db.delete_atom(self.keyspace, self.stack_id_map_table,
+ 'request_id', _req_id)
+ except Exception as e:
+ self.logger.error("DB: while deleting request id map: " + str(e))
+ return False
+
+ return True
+
+ def get_group_rules(self):
+ """Get all valet group rules."""
+
+ rule_list = []
+
+ try:
+ rows = self.db.read_all_rows(self.keyspace, self.group_rules_table)
+ except Exception as e:
+ self.logger.error("DB: while reading group rules: " + str(e))
+ return None
+
+ if len(rows) > 0:
+ for key, row in rows.iteritems():
+ if key == "result":
+ for _, dbrow in row.iteritems():
+ rule_list.append(dbrow)
+
+ return rule_list
+
+ def get_group_rule(self, _id):
+ """Get valet group rule."""
+
+ try:
+ row = self.db.read_row(self.keyspace, self.group_rules_table, "id", _id)
+ except Exception as e:
+ self.logger.error("DB: while reading group rule: " + str(e))
+ return None
+
+ if len(row) > 0:
+ if "result" in row.keys():
+ if len(row["result"]) > 0:
+ return row["result"][row["result"].keys()[0]]
+ else:
+ return {}
+ else:
+ return {}
+ else:
+ return {}
+
+ def create_group_rule(self, _name, _scope, _type, _level, _members, _desc):
+ """Create a group rule."""
+
+ data = {
+ 'id': _name,
+ 'app_scope': _scope,
+ 'type': _type,
+ 'level': _level,
+ 'members': json.dumps(_members),
+ 'description': _desc,
+ 'groups': json.dumps([]),
+ 'status': "enabled"
+ }
+ try:
+ self.db.insert_atom(self.keyspace, self.group_rules_table, data)
+ except Exception as e:
+ self.logger.error("DB: while creating a group rule: " + str(e))
+ return False
+
+ return True
+
+ def get_valet_groups(self):
+ """Get all valet groups."""
+
+ group_list = []
+
+ try:
+ rows = self.db.read_all_rows(self.keyspace, self.groups_table)
+ except Exception as e:
+ self.logger.error("DB: while reading groups: " + str(e))
+ return None
+
+ if len(rows) > 0:
+ for key, row in rows.iteritems():
+ if key == "result":
+ for _, dbrow in row.iteritems():
+ group_list.append(dbrow)
+
+ return group_list
+
+ def create_valet_group(self, _id, _g_info):
+ """Create a group."""
+
+ data = {
+ 'id': _id,
+ 'uuid': _g_info.get("uuid"),
+ 'type': _g_info.get("group_type"),
+ 'level': _g_info.get("level"),
+ 'factory': _g_info.get("factory"),
+ 'rule_id': _g_info.get("rule_id"),
+ 'metadata': json.dumps(_g_info.get("metadata")),
+ 'server_list': json.dumps(_g_info.get("server_list")),
+ 'member_hosts': json.dumps(_g_info.get("member_hosts")),
+ 'status': _g_info.get("status")
+ }
+ try:
+ self.db.insert_atom(self.keyspace, self.groups_table, data)
+ except Exception as e:
+ self.logger.error("DB: while creating a group: " + str(e))
+ return False
+
+ return True
+
+ def update_valet_group(self, _id, _g_info):
+ """Update group."""
+
+ data = {
+ 'id': _id,
+ 'uuid': _g_info.get("uuid"),
+ 'type': _g_info.get("group_type"),
+ 'level': _g_info.get("level"),
+ 'factory': _g_info.get("factory"),
+ 'rule_id': _g_info.get("rule_id"),
+ 'metadata': json.dumps(_g_info.get("metadata")),
+ 'server_list': json.dumps(_g_info.get("server_list")),
+ 'member_hosts': json.dumps(_g_info.get("member_hosts")),
+ 'status': _g_info.get("status")
+ }
+ try:
+ self.db.insert_atom(self.keyspace, self.groups_table, data,
+ name='id', value=_id)
+ except Exception as e:
+ self.logger.error("DB: while updating group: " + str(e))
+ return False
+
+ return True
+
+ def delete_valet_group(self, _id):
+ """Delete finished request."""
+
+ try:
+ self.db.delete_atom(self.keyspace, self.groups_table, 'id', _id)
+ except Exception as e:
+ self.logger.error("DB: while deleting valet group: " + str(e))
+ return False
+
+ return True
+
+ def get_resource(self, _dc_id):
+ """Get datacenter's resource."""
+
+ try:
+ row = self.db.read_row(self.keyspace, self.resources_table, "id", _dc_id)
+ except Exception as e:
+ self.logger.error("DB: while reading datacenter resource: " + str(e))
+ return None
+
+ if len(row) > 0:
+ if "result" in row.keys():
+ if len(row["result"]) > 0:
+ return row["result"][row["result"].keys()[0]]
+ else:
+ return {}
+ else:
+ return {}
+ else:
+ return {}
+
+ def create_resource(self, _k, _url, _requests, _resource):
+ """Create a new resource status."""
+
+ data = {
+ 'id': _k,
+ 'url': _url,
+ 'requests': json.dumps(_requests),
+ 'resource': json.dumps(_resource)
+ }
+ try:
+ self.db.insert_atom(self.keyspace, self.resources_table, data)
+ except Exception as e:
+ self.logger.error("DB: while inserting resource status: " + str(e))
+ return False
+
+ return True
+
+ def update_resource(self, _k, _url, _requests, _resource):
+ """Update resource status."""
+
+ data = {
+ 'id': _k,
+ 'url': _url,
+ 'requests': json.dumps(_requests),
+ 'resource': json.dumps(_resource)
+ }
+ try:
+ self.db.insert_atom(self.keyspace, self.resources_table, data,
+ name='id', value=_k)
+ except Exception as e:
+ self.logger.error("DB: while updating resource status: " + str(e))
+ return False
+
+ return True
+
+ def get_stack(self, _id):
+ """Get stack info."""
+
+ try:
+ row = self.db.read_row(self.keyspace, self.stacks_table, 'id', _id)
+ except Exception as e:
+ self.logger.error("DB: while getting stack info: " + str(e))
+ return None
+
+ if len(row) > 0:
+ if "result" in row.keys():
+ if len(row["result"]) > 0:
+ return row["result"][row["result"].keys()[0]]
+ else:
+ return {}
+ else:
+ return {}
+ else:
+ return {}
+
+ def create_stack(self, _id, _status, _dc, _name, _uuid,
+ _tenant_id, _metadata,
+ _servers, _old_servers,
+ _state, _old_state):
+ """Store new stack info."""
+
+ data = {
+ 'id': _id,
+ 'last_status': _status,
+ 'datacenter': _dc,
+ 'stack_name': _name,
+ 'uuid': _uuid,
+ 'tenant_id': _tenant_id,
+ 'metadata': json.dumps(_metadata),
+ 'servers': json.dumps(_servers),
+ 'prior_servers': json.dumps(_old_servers),
+ 'state': _state,
+ 'prior_state': _old_state
+ }
+ try:
+ self.db.insert_atom(self.keyspace, self.stacks_table, data)
+ except Exception as e:
+ self.logger.error("DB: while storing app: " + str(e))
+ return False
+
+ return True
+
+ def delete_stack(self, _id):
+ """Delete stack."""
+
+ try:
+ self.db.delete_atom(self.keyspace, self.stacks_table, 'id', _id)
+ except Exception as e:
+ self.logger.error("DB: while deleting app: " + str(e))
+ return False
+
+ return True
+
+ def update_stack(self, _id, _status, _dc, _name, _uuid,
+ _tenant_id, _metadata,
+ _servers, _old_servers,
+ _state, _old_state):
+ """Store updated stack info."""
+
+ data = {
+ 'id': _id,
+ 'last_status': _status,
+ 'datacenter': _dc,
+ 'stack_name': _name,
+ 'uuid': _uuid,
+ 'tenant_id': _tenant_id,
+ 'metadata': json.dumps(_metadata),
+ 'servers': json.dumps(_servers),
+ 'prior_servers': json.dumps(_old_servers),
+ 'state': _state,
+ 'prior_state': _old_state
+ }
+ try:
+ self.db.insert_atom(self.keyspace, self.stacks_table, data,
+ name='id', value=_id)
+ except Exception as e:
+ self.logger.error("DB: while updating stack: " + str(e))
+ return False
+
+ return True
diff --git a/engine/src/valet/engine/db_connect/locks.py b/engine/src/valet/engine/db_connect/locks.py
new file mode 100644
index 0000000..b367e8b
--- /dev/null
+++ b/engine/src/valet/engine/db_connect/locks.py
@@ -0,0 +1,152 @@
+#
+# -------------------------------------------------------------------------
+# Copyright (c) 2019 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
+import re
+import time
+
+from valet.utils.logger import Logger
+
+
+def now():
+ return int(round(time.time() * 1000))
+
+
+def later(minutes=0, seconds=0):
+ # Consider 20 sec as a lead time.
+ seconds -= 20
+ return int(round(time.time() * 1000)) + (minutes * 60 + seconds) * 1000
+
+
+class Locks(object):
+ """Manage locking as a semaphore.
+
+ A region lock that manages locks and the region table to
+ lock the entire time while working on a region.
+ """
+
+ Lockspace = "engine"
+
+ def __init__(self, dbh, timeout=None):
+ self.dbh = dbh
+ self.db = dbh.db
+ self.timeout = timeout
+
+ self.locked_regions = []
+ self.expired = 0
+ self.locked = False
+ self.region = None
+ self.key = None
+
+ def set_regions(self):
+ """Set locked regions."""
+
+ lr = self.dbh.clean_expired_regions()
+ if lr is None:
+ return False
+
+ self.locked_regions = lr
+
+ return True
+
+ def _add_region(self, region):
+ """Set when to expire and update/add to region table."""
+
+ self.expired = later(seconds=self.timeout)
+ self.region = region
+
+ if not self.dbh.add_region(self.region, self.expired):
+ return None
+
+ return "yes"
+
+ def is_my_turn(self, region):
+ """Try for a lock, unless you know its already locked.
+
+ If you already have the lock, just update the expire time."""
+
+ if self.expired < now():
+ self.locked = False
+
+ if self.locked:
+ if not self.region == region:
+ return "no"
+
+ return self._add_region(region)
+
+ if region in self.locked_regions:
+ return "no"
+
+ self.db.logger.debug("try lock region: " + region)
+
+ if self._add_region(region) is None:
+ return None
+
+ status = self.got_lock(region)
+ if status is None:
+ return None
+
+ if status == "fail":
+ self.locked = False
+ return "no"
+
+ self.locked = True
+
+ return "yes"
+
+ def got_lock(self, key):
+ """I got lock if I get the first (0) lock"""
+
+ self.key = '%s.%s.%s' % (self.dbh.keyspace, Locks.Lockspace, key)
+
+ try:
+ lock_id = self.db.create_lock(self.key)
+ except Exception as e:
+ Logger.get_logger('debug').error("DB: while creating lock: " + str(e))
+ return None
+
+ if 0 == int(re.search('-(\d+)$', lock_id).group(1)):
+ return "ok"
+ else:
+ return "fail"
+
+ def done_with_my_turn(self):
+ """Release lock and clear from region table."""
+
+ if not self.locked:
+ return "ok"
+
+ try:
+ self.db.delete_lock(self.key)
+ except Exception as e:
+ Logger.get_logger('debug').error("DB: while deleting lock: " + str(e))
+ return None
+
+ if not self.dbh.delete_region(self.region):
+ return None
+
+ self.locked = False
+ self.region = None
+
+ return "ok"
+
+ @staticmethod
+ def unlock(dbh, key):
+ """Removes the lock for a key."""
+
+ key = '%s.%s.%s' % (dbh.keyspace, Locks.Lockspace, key)
+ dbh.db.delete_lock(key)
diff --git a/engine/src/valet/engine/resource_manager/__init__.py b/engine/src/valet/engine/resource_manager/__init__.py
new file mode 100644
index 0000000..bd50995
--- /dev/null
+++ b/engine/src/valet/engine/resource_manager/__init__.py
@@ -0,0 +1,18 @@
+#
+# -------------------------------------------------------------------------
+# Copyright (c) 2019 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
diff --git a/engine/src/valet/engine/resource_manager/compute_manager.py b/engine/src/valet/engine/resource_manager/compute_manager.py
new file mode 100644
index 0000000..81a95ee
--- /dev/null
+++ b/engine/src/valet/engine/resource_manager/compute_manager.py
@@ -0,0 +1,201 @@
+#
+# -------------------------------------------------------------------------
+# Copyright (c) 2019 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
+#!/bin/python
+
+
+from valet.engine.resource_manager.resources.host import Host
+
+
+class ComputeManager(object):
+ """Resource Manager to maintain compute host resources."""
+
+ def __init__(self, _source, _logger):
+ """Define compute hosts and server allocations."""
+
+ self.source = _source
+
+ self.hosts = {}
+
+ self.logger = _logger
+
+ def get_hosts(self, _resource):
+ """Check any inconsistency and perform garbage collection if necessary."""
+
+ self.logger.info("set compute hosts...")
+
+ # Init first
+ self.hosts.clear()
+
+ # Get available hosts only
+ if self.source.get_hosts(self.hosts) != "ok":
+ self.logger.warning("fail to set hosts from source (e.g., nova)")
+ return False
+
+ # Get servers
+ if self.source.get_servers_in_hosts(self.hosts) != "ok":
+ self.logger.warning("fail to set servers from source (e.g., nova)")
+ return False
+
+ self._check_host_updated(_resource)
+
+ self._check_server_placements(_resource)
+
+ return True
+
+ def _check_host_updated(self, _resource):
+ """Check if hosts and their properties are changed."""
+
+ for hk in self.hosts.keys():
+ if hk not in _resource.hosts.keys():
+ _resource.hosts[hk] = Host(hk)
+ _resource.mark_host_updated(hk)
+
+ self.logger.info("new host (" + hk + ") added")
+
+ for rhk in _resource.hosts.keys():
+ if rhk not in self.hosts.keys():
+ _resource.hosts[rhk].status = "disabled"
+ _resource.mark_host_updated(rhk)
+
+ self.logger.info("host (" + rhk + ") disabled")
+
+ for hk in self.hosts.keys():
+ host = self.hosts[hk]
+ rhost = _resource.hosts[hk]
+
+ if self._is_host_resources_updated(host, rhost):
+ _resource.mark_host_updated(hk)
+
+ def _is_host_resources_updated(self, _host, _rhost):
+ """Check the resource amount consistency."""
+
+ resource_updated = False
+
+ if _host.original_vCPUs != _rhost.original_vCPUs:
+ _rhost.original_vCPUs = _host.original_vCPUs
+
+ self.logger.info("host (" + _rhost.name + ") updated (origin CPU updated)")
+ resource_updated = True
+
+ if _host.vCPUs_used != _rhost.vCPUs_used:
+ _rhost.vCPUs_used = _host.vCPUs_used
+
+ self.logger.info("host (" + _rhost.name + ") updated (CPU updated)")
+ resource_updated = True
+
+ if _host.original_mem_cap != _rhost.original_mem_cap:
+ _rhost.original_mem_cap = _host.original_mem_cap
+
+ self.logger.info("host (" + _rhost.name + ") updated (origin mem updated)")
+ resource_updated = True
+
+ if _host.free_mem_mb != _rhost.free_mem_mb:
+ _rhost.free_mem_mb = _host.free_mem_mb
+
+ self.logger.info("host (" + _rhost.name + ") updated (mem updated)")
+ resource_updated = True
+
+ if _host.original_local_disk_cap != _rhost.original_local_disk_cap:
+ _rhost.original_local_disk_cap = _host.original_local_disk_cap
+
+ self.logger.info("host (" + _rhost.name + ") updated (origin disk updated)")
+ resource_updated = True
+
+ if _host.free_disk_gb != _rhost.free_disk_gb:
+ _rhost.free_disk_gb = _host.free_disk_gb
+
+ self.logger.info("host (" + _rhost.name + ") updated (local disk space updated)")
+ resource_updated = True
+
+ if _host.disk_available_least != _rhost.disk_available_least:
+ _rhost.disk_available_least = _host.disk_available_least
+
+ self.logger.info("host (" + _rhost.name + ") updated (least disk space updated)")
+ resource_updated = True
+
+ return resource_updated
+
+ def _check_server_placements(self, _resource):
+ """Check the consistency of server placements with nova."""
+
+ # To keep how server placements changed.
+ # key =
+ # If uuid is available, uuid
+ # Else stack_id:name
+ # value = {new_host, old_host, server_info}
+ # The server's state must be either 'created', 'migrated', or 'rebuilt'.
+ # That is, deal with only the server which placement decision is confirmed.
+ # If value of new_host (from nova) exists but not for old_host (valet),
+ # the server is unknown one to valet.
+ # If value of new_host not exists but exist for old_host,
+ # the server is deleted by nova.
+ # If value exists both in new_host and old_host,
+ # the server is moved from old to new host.
+ # If value not exist neither,
+ # the server is placed as planned.
+ change_of_placements = {}
+
+ for hk, host in self.hosts.iteritems():
+ rhost = _resource.hosts[hk]
+
+ for s_info in host.server_list:
+ if s_info["stack_id"] != "none":
+ sid = s_info["stack_id"] + ":" + s_info["name"]
+ else:
+ sid = s_info["uuid"]
+
+ change_of_placements[sid] = {}
+ change_of_placements[sid]["info"] = s_info
+
+ if not rhost.has_server(s_info):
+ change_of_placements[sid]["new_host"] = hk
+
+ self.logger.info("host (" + hk + ") updated (server added)")
+ else:
+ change_of_placements[sid]["host"] = hk
+
+ for rhk, rhost in _resource.hosts.iteritems():
+ if not rhost.is_available():
+ continue
+
+ host = self.hosts[rhk]
+
+ for s_info in rhost.server_list:
+ # Deal with confirmed placements only.
+ if s_info["state"] not in ("created", "migrated", "rebuilt"):
+ continue
+
+ if s_info["stack_id"] != "none":
+ sid = s_info["stack_id"] + ":" + s_info["name"]
+ else:
+ sid = s_info["uuid"]
+
+ if not host.has_server(s_info):
+ if sid in change_of_placements.keys():
+ change_of_placements[sid]["old_host"] = rhk
+
+ self.logger.info("server (" + sid + ") is migrated`")
+ else:
+ change_of_placements[sid] = {}
+ change_of_placements[sid]["old_host"] = rhk
+ change_of_placements[sid]["info"] = s_info
+
+ self.logger.info("server (" + sid + ") is deleted")
+
+ _resource.change_of_placements = change_of_placements
diff --git a/engine/src/valet/engine/resource_manager/metadata_manager.py b/engine/src/valet/engine/resource_manager/metadata_manager.py
new file mode 100644
index 0000000..f34e3f0
--- /dev/null
+++ b/engine/src/valet/engine/resource_manager/metadata_manager.py
@@ -0,0 +1,424 @@
+#
+# -------------------------------------------------------------------------
+# Copyright (c) 2019 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
+#!/bin/python
+
+
+import json
+
+from copy import deepcopy
+
+
+class MetadataManager(object):
+ """Metadata Manager to maintain flavors and groups."""
+
+ def __init__(self, _source, _logger):
+ self.source = _source
+
+ self.groups = {}
+
+ self.flavors = {}
+
+ self.logger = _logger
+
+ def get_groups(self, _resource):
+ """Set groups (availability-zones, host-aggregates, server groups)
+
+ from platform (e.g., nova).
+ """
+
+ self.logger.info("set metadata (groups)...")
+
+ # Init first
+ self.groups.clear()
+
+ # Get enabled groups only
+ if self.source.get_groups(self.groups) != "ok":
+ self.logger.warning("fail to set groups from source (e.g., nova)")
+ return False
+
+ self._check_group_updated(_resource)
+
+ self._check_host_memberships_updated(_resource)
+
+ return True
+
+ def _check_group_updated(self, _resource):
+ """Check any inconsistency for groups."""
+
+ for gk in self.groups.keys():
+ if gk not in _resource.groups.keys():
+ _resource.groups[gk] = deepcopy(self.groups[gk])
+ _resource.groups[gk].updated = True
+
+ self.logger.info("new group (" + gk + ") added")
+
+ for rgk in _resource.groups.keys():
+ rg = _resource.groups[rgk]
+
+ if rg.factory != "valet":
+ if rgk not in self.groups.keys():
+ rg.status = "disabled"
+ rg.updated = True
+
+ self.logger.info("group (" + rgk + ") disabled")
+
+ for gk in self.groups.keys():
+ g = self.groups[gk]
+ rg = _resource.groups[gk]
+
+ if rg.uuid is None and g.uuid is not None:
+ rg.uuid = g.uuid
+ rg.updated = True
+
+ self.logger.info("group (" + gk + ") uuid updated")
+
+ # TODO: Clean up resource.hosts if each is not in any AZ members.
+
+ if g.group_type == "aggr":
+ if not gk.startswith("valet:"):
+ if self._is_group_metadata_updated(g, rg):
+ rg.updated = True
+
+ self.logger.info("group (" + gk + ") metadata updated")
+
+ if g.group_type == "az" or g.group_type == "aggr":
+ if self._is_member_hosts_updated(g, _resource):
+ rg.updated = True
+
+ self.logger.info("group (" + gk + ") member hosts updated")
+
+ if g.factory == "server-group":
+ if self._is_new_servers(g, rg):
+ rg.updated = True
+
+ self.logger.info("group (" + gk + ") server_list updated")
+
+ def _is_group_metadata_updated(self, _g, _rg):
+ """Check any change in metadata of group."""
+
+ updated = False
+
+ for mdk in _g.metadata.keys():
+ if mdk not in _rg.metadata.keys():
+ _rg.metadata[mdk] = _g.metadata[mdk]
+ updated = True
+
+ for rmdk in _rg.metadata.keys():
+ if rmdk not in _g.metadata.keys():
+ del _rg.metadata[rmdk]
+ updated = True
+
+ for mdk in _g.metadata.keys():
+ mdv = _g.metadata[mdk]
+ rmdv = _rg.metadata[mdk]
+ if mdv != rmdv:
+ _rg.metadata[mdk] = mdv
+ updated = True
+
+ return updated
+
+ def _is_member_hosts_updated(self, _g, _resource):
+ """Check any change in member hosts of group."""
+
+ updated = False
+
+ _rg = _resource.groups[_g.name]
+
+ for hk in _g.member_hosts.keys():
+ if hk not in _rg.member_hosts.keys():
+ if hk in _resource.hosts.keys():
+ if _resource.hosts[hk].is_available():
+ _rg.member_hosts[hk] = deepcopy(_g.member_hosts[hk])
+ updated = True
+ # else not needed
+
+ for rhk in _rg.member_hosts.keys():
+ if rhk not in _resource.hosts.keys() or \
+ not _resource.hosts[rhk].is_available() or \
+ rhk not in _g.member_hosts.keys():
+ del _rg.member_hosts[rhk]
+ updated = True
+
+ return updated
+
+ def _is_new_servers(self, _g, _rg):
+ """Check if there is any new server."""
+
+ updated = False
+
+ for s_info in _g.server_list:
+ exist = False
+ for rs_info in _rg.server_list:
+ if rs_info.get("uuid") == s_info.get("uuid"):
+ exist = True
+ break
+
+ if not exist:
+ _rg.server_list.append(s_info)
+ updated = True
+
+ return updated
+
+ def _check_host_memberships_updated(self, _resource):
+ """Check host memberships consistency."""
+
+ for gk, g in _resource.groups.iteritems():
+ # Other group types will be handled later
+ if g.factory != "valet" and g.status == "enabled":
+ for hk in g.member_hosts.keys():
+ host = _resource.hosts[hk]
+ if gk not in host.memberships.keys() or g.updated:
+ host.memberships[gk] = g
+ _resource.mark_host_updated(hk)
+
+ self.logger.info("host (" + hk + ") updated (update membership - " + gk + ")")
+
+ for hk, host in _resource.hosts.iteritems():
+ if host.is_available():
+ for gk in host.memberships.keys():
+ if gk in _resource.groups.keys():
+ g = _resource.groups[gk]
+ if g.factory != "valet":
+ if g.status == "enabled":
+ if g.updated:
+ host.memberships[gk] = g
+ _resource.mark_host_updated(hk)
+
+ self.logger.info("host (" + hk + ") updated (update membership - " + gk + ")")
+ else:
+ del host.memberships[gk]
+ _resource.mark_host_updated(hk)
+
+ self.logger.info("host (" + hk + ") updated (remove membership - " + gk + ")")
+ else:
+ del host.memberships[gk]
+ _resource.mark_host_updated(hk)
+
+ self.logger.info("host (" + hk + ") updated (remove membership - " + gk + ")")
+
+ def create_exclusive_aggregate(self, _group, _hosts):
+ """Set Host-Aggregate to apply Exclusivity."""
+
+ az = _hosts[0].get_availability_zone()
+
+ # To remove 'az:' header from name
+ az_name_elements = az.name.split(':', 1)
+ if len(az_name_elements) > 1:
+ az_name = az_name_elements[1]
+ else:
+ az_name = az.name
+
+ status = self.source.set_aggregate(_group.name, az_name)
+ if status != "ok":
+ return status
+
+ self.logger.debug("dynamic host-aggregate(" + _group.name + ") created")
+
+ aggregates = {}
+ status = self.source.get_aggregates(aggregates)
+ if status != "ok":
+ return status
+
+ if _group.name in aggregates.keys():
+ _group.uuid = aggregates[_group.name].uuid
+
+ if len(_group.metadata) > 0:
+ metadata = {}
+ for mk, mv in _group.metadata.iteritems():
+ if mk == "prior_metadata":
+ metadata[mk] = json.dumps(mv)
+ else:
+ metadata[mk] = mv
+
+ status = self.source.set_metadata_of_aggregate(_group.uuid, metadata)
+ if status != "ok":
+ return status
+
+ self.logger.debug("dynamic host-aggregate(" + _group.name + ") metadata created")
+
+ for host in _hosts:
+ if host.name in _group.metadata.keys():
+ aggr_uuids = _group.metadata[host.name].split(',')
+
+ for uuid in aggr_uuids:
+ status = self.source.remove_host_from_aggregate(int(uuid), host.name)
+ if status != "ok":
+ return status
+
+ self.logger.debug("host-aggregate(" + uuid + ") host(" + host.name + ") removed")
+
+ status = self.source.add_host_to_aggregate(_group.uuid, host.name)
+ if status != "ok":
+ return status
+
+ self.logger.debug("dynamic host-aggregate(" + _group.name + ") host(" + host.name + ") added")
+ else:
+ status = "dynamic host-aggregate not found"
+ self.logger.error(status)
+ return status
+
+ return "ok"
+
+ def update_exclusive_aggregate(self, _id, _metadata, _host, _old_aggregates):
+ """Update Host-Aggregate to apply Exclusivity."""
+
+ if len(_metadata) > 0:
+ metadata = {}
+ for mk, mv in _metadata.iteritems():
+ if mk == "prior_metadata":
+ metadata[mk] = json.dumps(mv)
+ else:
+ metadata[mk] = mv
+
+ status = self.source.set_metadata_of_aggregate(_id, metadata)
+ if status != "ok":
+ return status
+
+ self.logger.debug("dynamic host-aggregate(" + str(_id) + ") metadata updated")
+
+ for oa in _old_aggregates:
+ status = self.source.remove_host_from_aggregate(oa.uuid, _host)
+ if status != "ok":
+ return status
+
+ self.logger.debug("host-aggregate(" + oa.name + ") host(" + _host + ") removed")
+
+ status = self.source.add_host_to_aggregate(_id, _host)
+ if status != "ok":
+ return status
+
+ self.logger.debug("dynamic host-aggregate(" + str(_id) + ") host(" + _host + ") added")
+
+ return "ok"
+
+ def remove_host_from_exclusive_aggregate(self, _id, _metadata, _host, _old_aggregates):
+ """Remove host from Host-Aggregate to apply Exclusivity."""
+
+ if len(_metadata) > 0:
+ metadata = {}
+ for mk, mv in _metadata.iteritems():
+ if mk == "prior_metadata":
+ metadata[mk] = json.dumps(mv)
+ else:
+ metadata[mk] = mv
+
+ status = self.source.set_metadata_of_aggregate(_id, metadata)
+ if status != "ok":
+ return status
+
+ self.logger.debug("dynamic host-aggregate(" + str(_id) + ") metadata updated")
+
+ status = self.source.remove_host_from_aggregate(_id, _host)
+ if status != "ok":
+ return status
+
+ self.logger.debug("dynamic host-aggregate(" + str(_id) + ") host(" + _host + ") removed")
+
+ for oa in _old_aggregates:
+ status = self.source.add_host_to_aggregate(oa.uuid, _host)
+ if status != "ok":
+ return status
+
+ self.logger.debug("host-aggregate(" + oa.name + ") host(" + _host + ") added")
+
+ return "ok"
+
+ def remove_exclusive_aggregate(self, _id):
+ """Remove Host-Aggregate."""
+
+ status = self.source.delete_aggregate(_id)
+ if status != "ok":
+ return status
+
+ self.logger.debug("dynamic host-aggregate(" + str(_id) + ") removed")
+
+ return "ok"
+
+ def get_flavors(self, _resource):
+ """Set flavors from nova."""
+
+ self.logger.info("set metadata (flavors)...")
+
+ # Init first
+ self.flavors.clear()
+
+ # Get enabled flavors only
+ if self.source.get_flavors(self.flavors, detailed=False) != "ok":
+ return False
+
+ self._check_flavor_update(_resource, False)
+
+ return True
+
+ def _check_flavor_update(self, _resource, _detailed):
+ """Check flavor info consistency."""
+
+ for fk in self.flavors.keys():
+ if fk not in _resource.flavors.keys():
+ _resource.flavors[fk] = deepcopy(self.flavors[fk])
+ _resource.flavors[fk].updated = True
+
+ self.logger.info("new flavor (" + fk + ":" + self.flavors[fk].flavor_id + ") added")
+
+ for rfk in _resource.flavors.keys():
+ rf = _resource.flavors[rfk]
+ if rfk not in self.flavors.keys():
+ rf.status = "disabled"
+ rf.updated = True
+
+ self.logger.info("flavor (" + rfk + ":" + rf.flavor_id + ") removed")
+
+ if _detailed:
+ for fk in self.flavors.keys():
+ f = self.flavors[fk]
+ rf = _resource.flavors[fk]
+ if self._is_flavor_spec_updated(f, rf):
+ rf.updated = True
+
+ self.logger.info("flavor (" + fk + ":" + rf.flavor_id + ") spec updated")
+
+ def _is_flavor_spec_updated(self, _f, _rf):
+ """Check flavor's spec consistency."""
+
+ spec_updated = False
+
+ if _f.vCPUs != _rf.vCPUs or _f.mem_cap != _rf.mem_cap or _f.disk_cap != _rf.disk_cap:
+ _rf.vCPUs = _f.vCPUs
+ _rf.mem_cap = _f.mem_cap
+ _rf.disk_cap = _f.disk_cap
+ spec_updated = True
+
+ for sk in _f.extra_specs.keys():
+ if sk not in _rf.extra_specs.keys():
+ _rf.extra_specs[sk] = _f.extra_specs[sk]
+ spec_updated = True
+
+ for rsk in _rf.extra_specs.keys():
+ if rsk not in _f.extra_specs.keys():
+ del _rf.extra_specs[rsk]
+ spec_updated = True
+
+ for sk in _f.extra_specs.keys():
+ sv = _f.extra_specs[sk]
+ rsv = _rf.extra_specs[sk]
+ if sv != rsv:
+ _rf.extra_specs[sk] = sv
+ spec_updated = True
+
+ return spec_updated
diff --git a/engine/src/valet/engine/resource_manager/naming.py b/engine/src/valet/engine/resource_manager/naming.py
new file mode 100644
index 0000000..bdf5211
--- /dev/null
+++ b/engine/src/valet/engine/resource_manager/naming.py
@@ -0,0 +1,146 @@
+#
+# -------------------------------------------------------------------------
+# Copyright (c) 2019 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
+#!/bin/python
+
+
+import copy
+import re
+
+from sre_parse import isdigit
+from valet.engine.resource_manager.resources.host_group import HostGroup
+
+
+class Naming(object):
+ """Using cannonical naming convention to capture datacenter layout."""
+
+ def __init__(self, _config, _logger):
+ self.logger = _logger
+
+ self.rack_code_list = _config.get("rack_codes")
+ self.host_code_list = _config.get("host_codes")
+
+ def get_topology(self, _datacenter, _host_groups, _hosts, _rhosts):
+ """Set datacenter resource structure (racks, hosts)."""
+
+ status = "ok"
+
+ for rhk, rhost in _rhosts.iteritems():
+ h = copy.deepcopy(rhost)
+
+ (rack_name, parsing_status) = self._set_layout_by_name(rhk)
+ if parsing_status != "ok":
+ self.logger.warning(parsing_status + " in host_name (" + rhk + ")")
+
+ if rack_name == "none":
+ h.host_group = _datacenter
+ _datacenter.resources[rhk] = h
+ else:
+ if rack_name not in _host_groups.keys():
+ host_group = HostGroup(rack_name)
+ host_group.host_type = "rack"
+ _host_groups[host_group.name] = host_group
+ else:
+ host_group = _host_groups[rack_name]
+
+ h.host_group = host_group
+ host_group.child_resources[rhk] = h
+
+ _hosts[h.name] = h
+
+ for hgk, hg in _host_groups.iteritems():
+ hg.parent_resource = _datacenter
+ _datacenter.resources[hgk] = hg
+
+ if "none" in _host_groups.keys():
+ self.logger.warning("some hosts are into unknown rack")
+
+ return status
+
+ def _set_layout_by_name(self, _host_name):
+ """Set the rack-host layout, use host nameing convention.
+
+ Naming convention includes
+ zone name is any word followed by at least one of [0-9]
+ rack name is rack_code followd by at least one of [0-9]
+ host name is host_code followed by at least one of [0-9]
+ an example is
+ 'abcd_001A' (as a zone_name) +
+ 'r' (as a rack_code) + '01A' +
+ 'c' (as a host_code) + '001A'
+ """
+
+ zone_name = None
+ rack_name = None
+ host_name = None
+
+ # To check if zone name follows the rule
+ index = 0
+ for c in _host_name:
+ if isdigit(c):
+ break
+ index += 1
+ zone_indicator = _host_name[index:]
+ if len(zone_indicator) == 0:
+ return 'none', "no numberical digit in name"
+
+ # To extract rack indicator
+ for rack_code in self.rack_code_list:
+ rack_index_list = [rc.start() for rc in re.finditer(rack_code, zone_indicator)]
+
+ start_of_rack_index = -1
+ for rack_index in rack_index_list:
+ rack_prefix = rack_index + len(rack_code)
+ if rack_prefix > len(zone_indicator):
+ continue
+
+ # Once rack name follows the rule
+ if isdigit(zone_indicator[rack_prefix]):
+ rack_indicator = zone_indicator[rack_prefix:]
+
+ # To extract host indicator
+ for host_code in self.host_code_list:
+ host_index_list = [hc.start() for hc in re.finditer(host_code, rack_indicator)]
+
+ start_of_host_index = -1
+ for host_index in host_index_list:
+ host_prefix = host_index + len(host_code)
+ if host_prefix > len(rack_indicator):
+ continue
+
+ if isdigit(rack_indicator[host_prefix]):
+ host_name = rack_indicator[host_index:]
+ start_of_host_index = rack_index + host_index + 1
+ break
+
+ if host_name is not None:
+ rack_name = zone_indicator[rack_index:start_of_host_index]
+ break
+
+ if rack_name is not None:
+ start_of_rack_index = index + rack_index
+ break
+
+ if rack_name is not None:
+ zone_name = _host_name[:start_of_rack_index]
+ break
+
+ if rack_name is None:
+ return 'none', "no host or rack name found in " + _host_name
+ else:
+ return zone_name + rack_name, "ok"
diff --git a/engine/src/valet/engine/resource_manager/nova_compute.py b/engine/src/valet/engine/resource_manager/nova_compute.py
new file mode 100644
index 0000000..6887eb8
--- /dev/null
+++ b/engine/src/valet/engine/resource_manager/nova_compute.py
@@ -0,0 +1,544 @@
+#
+# -------------------------------------------------------------------------
+# Copyright (c) 2019 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
+import json
+import time
+import traceback
+
+from novaclient import client as nova_client
+
+from valet.engine.resource_manager.resources.flavor import Flavor
+from valet.engine.resource_manager.resources.group import Group
+from valet.engine.resource_manager.resources.host import Host
+from valet.utils.decryption import decrypt
+
+
+# Nova API version
+VERSION = 2
+
+
+# noinspection PyBroadException
+class NovaCompute(object):
+ """Source to collect resource status (i.e., OpenStack Nova).
+
+ Manupulate Host-Aggregate with Valet placement decisions.
+ """
+
+ def __init__(self, _config, _logger):
+ self.logger = _logger
+
+ self.nova = None
+
+ self.novas = {}
+ self.last_activate_urls = {}
+ self.life_time = 43200 # 12 hours
+
+ # TODO(Gueyoung): handle both admin and admin_view accounts.
+
+ pw = decrypt(_config["engine"]["ek"],
+ _config["logging"]["lk"],
+ _config["db"]["dk"],
+ _config["nova"]["admin_view_password"])
+
+ self.admin_username = _config["nova"]["admin_view_username"]
+ self.admin_password = pw
+ self.project = _config["nova"]["project_name"]
+
+ def set_client(self, _auth_url):
+ """Set nova client."""
+
+ try:
+ # TODO: add timeout=_timeout?
+ self.novas[_auth_url] = nova_client.Client(VERSION,
+ self.admin_username,
+ self.admin_password,
+ self.project,
+ _auth_url)
+
+ self.last_activate_urls[_auth_url] = time.time()
+
+ self.nova = self.novas[_auth_url]
+ return True
+ except Exception:
+ self.logger.error(traceback.format_exc())
+ return False
+
+ def valid_client(self, _auth_url):
+ """Check if nova connection is valid."""
+
+ if _auth_url not in self.novas.keys():
+ return False
+
+ if _auth_url not in self.last_activate_urls.keys():
+ return False
+
+ elapsed_time = time.time() - self.last_activate_urls[_auth_url]
+
+ if elapsed_time > self.life_time:
+ return False
+
+ self.nova = self.novas[_auth_url]
+
+ return True
+
+ def get_groups(self, _groups):
+ """Get server-groups, availability-zones and host-aggregates
+
+ from OpenStack Nova.
+ """
+
+ status = self._get_availability_zones(_groups)
+ if status != "ok":
+ self.logger.error(status)
+ return status
+
+ status = self.get_aggregates(_groups)
+ if status != "ok":
+ self.logger.error(status)
+ return status
+
+ status = self._get_server_groups(_groups)
+ if status != "ok":
+ self.logger.error(status)
+ return status
+
+ return "ok"
+
+ def _get_availability_zones(self, _groups):
+ """Set AZs."""
+
+ try:
+ # TODO: try hosts_list = self.nova.hosts.list()?
+
+ az_list = self.nova.availability_zones.list(detailed=True)
+
+ for a in az_list:
+ if a.zoneState["available"]:
+ # NOTE(Gueyoung): add 'az:' to avoid conflict with
+ # Host-Aggregate name.
+ az_id = "az:" + a.zoneName
+
+ az = Group(az_id)
+
+ az.group_type = "az"
+ az.factory = "nova"
+ az.level = "host"
+
+ # TODO: Get AZ first with init Compute Hosts?
+
+ for hk, h_info in a.hosts.iteritems():
+ if "nova-compute" in h_info.keys():
+ if h_info["nova-compute"]["active"] and \
+ h_info["nova-compute"]["available"]:
+ az.member_hosts[hk] = []
+
+ _groups[az_id] = az
+
+ except Exception:
+ self.logger.error(traceback.format_exc())
+ return "error while setting availability-zones from Nova"
+
+ return "ok"
+
+ def get_aggregates(self, _groups):
+ """Set host-aggregates and corresponding hosts."""
+
+ try:
+ aggregate_list = self.nova.aggregates.list()
+
+ for a in aggregate_list:
+ if not a.deleted:
+ aggregate = Group(a.name)
+
+ aggregate.uuid = a.id
+
+ aggregate.group_type = "aggr"
+ aggregate.factory = "nova"
+ aggregate.level = "host"
+
+ metadata = {}
+ for mk in a.metadata.keys():
+ if mk == "prior_metadata":
+ metadata[mk] = json.loads(a.metadata.get(mk))
+ else:
+ metadata[mk] = a.metadata.get(mk)
+ aggregate.metadata = metadata
+
+ for hn in a.hosts:
+ aggregate.member_hosts[hn] = []
+
+ _groups[aggregate.name] = aggregate
+
+ except Exception:
+ self.logger.error(traceback.format_exc())
+ return "error while setting host-aggregates from Nova"
+
+ return "ok"
+
+ def set_aggregate(self, _name, _az):
+ """Create a Host-Aggregate."""
+
+ try:
+ self.nova.aggregates.create(_name, _az)
+ except Exception:
+ self.logger.error(traceback.format_exc())
+ return "error while setting a host-aggregate in Nova"
+
+ return "ok"
+
+ def add_host_to_aggregate(self, _aggr, _host):
+ """Add a Host into the Host-Aggregate."""
+
+ try:
+ self.nova.aggregates.add_host(_aggr, _host)
+ except Exception:
+ self.logger.error(traceback.format_exc())
+ return "error while adding a host into host-aggregate in Nova"
+
+ return "ok"
+
+ def delete_aggregate(self, _aggr):
+ """Delete the Host-Aggregate."""
+
+ try:
+ self.nova.aggregates.delete(_aggr)
+ except Exception:
+ self.logger.error(traceback.format_exc())
+ return "error while deleting host-aggregate from Nova"
+
+ return "ok"
+
+ def remove_host_from_aggregate(self, _aggr, _host):
+ """Remove the Host from the Host-Aggregate."""
+
+ try:
+ self.nova.aggregates.remove_host(_aggr, _host)
+ except Exception:
+ self.logger.error(traceback.format_exc())
+ return "error while removing host from host-aggregate in Nova"
+
+ return "ok"
+
+ def set_metadata_of_aggregate(self, _aggr, _metadata):
+ """Set metadata.
+
+ Note that Nova adds key/value pairs into metadata instead of replacement.
+ """
+
+ try:
+ self.nova.aggregates.set_metadata(_aggr, _metadata)
+ except Exception:
+ self.logger.error(traceback.format_exc())
+ return "error while setting metadata of host-aggregate in Nova"
+
+ return "ok"
+
+ def _get_server_groups(self, _groups):
+ """Set host-aggregates and corresponding hosts."""
+
+ try:
+ # NOTE(Gueyoung): novaclient v2.18.0 does not have 'all_projects=True' param.
+ server_group_list = self.nova.server_groups.list()
+
+ for g in server_group_list:
+ server_group = Group(g.name)
+
+ server_group.uuid = g.id
+
+ # TODO: Check len(g.policies) == 1
+ # policy is either 'affinity', 'anti-affinity', 'soft-affinity',
+ # or 'soft-anti-affinity'
+ if g.policies[0] == "anti-affinity":
+ server_group.group_type = "diversity"
+ else:
+ server_group.group_type = g.policies[0]
+ server_group.factory = "server-group"
+ server_group.level = "host"
+
+ # Members attribute is a list of server uuids
+ for s_uuid in g.members:
+ s_info = {}
+ s_info["stack_id"] = "none"
+ s_info["stack_name"] = "none"
+ s_info["uuid"] = s_uuid
+ s_info["orch_id"] = "none"
+ s_info["name"] = "none"
+ s_info["flavor_id"] = "none"
+ s_info["vcpus"] = -1
+ s_info["mem"] = -1
+ s_info["disk"] = -1
+ s_info["numa"] = "none"
+ s_info["image_id"] = "none"
+ s_info["tenant_id"] = "none"
+ s_info["state"] = "created"
+ s_info["status"] = "valid"
+
+ server_group.server_list.append(s_info)
+
+ # TODO: Check duplicated name as group identifier
+ _groups[server_group.name] = server_group
+
+ except Exception:
+ self.logger.error(traceback.format_exc())
+ return "error while setting server-groups from Nova"
+
+ return "ok"
+
+ def get_hosts(self, _hosts):
+ """Set host resources info."""
+
+ # TODO: Deprecated as of version 2.43
+ status = self._get_hosts(_hosts)
+ if status != "ok":
+ self.logger.error(status)
+ return status
+
+ status = self._get_host_details(_hosts)
+ if status != "ok":
+ self.logger.error(status)
+ return status
+
+ return "ok"
+
+ # TODO: Deprecated as of version 2.43
+ def _get_hosts(self, _hosts):
+ """Init hosts."""
+
+ try:
+ host_list = self.nova.hosts.list()
+
+ for h in host_list:
+ if h.service == "compute":
+ host = Host(h.host_name)
+ _hosts[host.name] = host
+ except Exception:
+ self.logger.error(traceback.format_exc())
+ return "error while setting hosts from Nova"
+
+ return "ok"
+
+ def _get_host_details(self, _hosts):
+ """Get each host's resource status."""
+
+ try:
+ # TODO: marker: the last UUID of return, limit: the number of hosts returned.
+ # with_servers=True
+ host_list = self.nova.hypervisors.list(detailed=True)
+
+ for hv in host_list:
+ if hv.service['host'] in _hosts.keys():
+ if hv.status != "enabled" or hv.state != "up":
+ del _hosts[hv.service['host']]
+ else:
+ host = _hosts[hv.service['host']]
+
+ host.uuid = hv.id
+
+ host.status = hv.status
+ host.state = hv.state
+ host.original_vCPUs = float(hv.vcpus)
+ host.vCPUs_used = float(hv.vcpus_used)
+ host.original_mem_cap = float(hv.memory_mb)
+ host.free_mem_mb = float(hv.free_ram_mb)
+ host.original_local_disk_cap = float(hv.local_gb)
+ host.free_disk_gb = float(hv.free_disk_gb)
+ host.disk_available_least = float(hv.disk_available_least)
+
+ # TODO: cpu_info:topology:sockets
+
+ except Exception:
+ self.logger.error(traceback.format_exc())
+ return "error while setting host resources from Nova"
+
+ return "ok"
+
+ def get_servers_in_hosts(self, _hosts):
+ """Set servers in hosts."""
+
+ (status, server_list) = self.get_server_detail()
+ if status != "ok":
+ self.logger.error(status)
+ return status
+
+ for s in server_list:
+ s_info = {}
+
+ if "stack-id" in s.metadata.keys():
+ s_info["stack_id"] = s.metadata["stack-id"]
+ else:
+ s_info["stack_id"] = "none"
+ s_info["stack_name"] = "none"
+
+ s_info["uuid"] = s.id
+
+ s_info["orch_id"] = "none"
+ s_info["name"] = s.name
+
+ s_info["flavor_id"] = s.flavor["id"]
+
+ if "vcpus" in s.flavor.keys():
+ s_info["vcpus"] = s.flavor["vcpus"]
+ s_info["mem"] = s.flavor["ram"]
+ s_info["disk"] = s.flavor["disk"]
+ s_info["disk"] += s.flavor["ephemeral"]
+ s_info["disk"] += s.flavor["swap"] / float(1024)
+ else:
+ s_info["vcpus"] = -1
+ s_info["mem"] = -1
+ s_info["disk"] = -1
+
+ s_info["numa"] = "none"
+
+ try:
+ s_info["image_id"] = s.image["id"]
+ except TypeError:
+ self.logger.warning("In get_servers_in_hosts, expected s.image to have id tag, but it's actually " + s.image)
+ s_info["image_id"] = s.image
+
+ s_info["tenant_id"] = s.tenant_id
+
+ s_info["state"] = "created"
+ s_info["status"] = "valid"
+
+ s_info["host"] = s.__getattr__("OS-EXT-SRV-ATTR:host")
+
+ # s_info["power_state"] = s.__getattr__("OS-EXT-STS:power_state")
+ # s_info["vm_state"] = s.__getattr__("OS-EXT-STS:vm_state")
+ # s_info["task_state"] = s.__getattr__("OS-EXT-STS:task_state")
+
+ if s_info["host"] in _hosts.keys():
+ host = _hosts[s_info["host"]]
+ host.server_list.append(s_info)
+
+ return "ok"
+
+ def get_server_detail(self, project_id=None, host_name=None, server_name=None, uuid=None):
+ """Get the detail of server with search by option."""
+
+ # TODO: Get servers' info in each host
+ # Minimum requirement for server info: s["metadata"]["stack-id"],
+ # More: s["flavor"]["id"], s["tenant_id"]
+ # Maybe: s["image"], server.__getattr__("OS-EXT-AZ:availability_zone"), s["status"]
+ # and scheduler_hints?
+ try:
+ options = {"all_tenants": 1}
+ if project_id is not None:
+ options["project_id"] = project_id
+ if host_name is not None:
+ options["host"] = host_name
+ if server_name is not None:
+ options["name"] = server_name
+ if uuid is not None:
+ options["uuid"] = uuid
+
+ # TODO: search by vm_state?
+
+ if len(options) > 0:
+ server_list = self.nova.servers.list(detailed=True, search_opts=options)
+ else:
+ server_list = self.nova.servers.list(detailed=True)
+
+ except Exception:
+ self.logger.error(traceback.format_exc())
+ return "error while getting server detail from nova", None
+
+ return "ok", server_list
+
+ def get_flavors(self, _flavors, detailed=True):
+ """Get flavors."""
+
+ if detailed:
+ result_status = self._get_flavors(_flavors, True)
+ else:
+ result_status = self._get_flavors(_flavors, False)
+
+ if result_status != "ok":
+ self.logger.error(result_status)
+
+ return result_status
+
+ def _get_flavors(self, _flavors, _detailed):
+ """Get a list of all flavors."""
+
+ try:
+ flavor_list = self.nova.flavors.list(detailed=_detailed)
+
+ for f in flavor_list:
+ flavor = self._set_flavor(f, _detailed)
+ _flavors[flavor.name] = flavor
+ except Exception:
+ self.logger.error(traceback.format_exc())
+ return "error while getting flavors"
+
+ # To get non-public flavors.
+ try:
+ flavor_list = self.nova.flavors.list(detailed=_detailed, is_public=False)
+
+ for f in flavor_list:
+ if f.name not in _flavors.keys():
+ flavor = self._set_flavor(f, _detailed)
+ _flavors[flavor.name] = flavor
+ except Exception:
+ self.logger.error(traceback.format_exc())
+ return "error while getting flavors"
+
+ return "ok"
+
+ def get_flavor(self, _flavor_id):
+ """Get the flavor."""
+
+ try:
+ f = self.nova.flavors.get(_flavor_id)
+ flavor = self._set_flavor(f, True)
+ except Exception:
+ self.logger.error(traceback.format_exc())
+ return None
+
+ return flavor
+
+ def _set_flavor(self, _f, _detailed):
+ """Set flavor with detailed infomation."""
+
+ flavor = Flavor(_f.name)
+
+ flavor.flavor_id = _f.id
+
+ if _detailed:
+ # NOTE(Gueyoung): This is not allowed with current credential.
+ # if getattr(_f, "OS-FLV-DISABLED:disabled"):
+ # flavor.status = "disabled"
+
+ flavor.vCPUs = float(_f.vcpus)
+ flavor.mem_cap = float(_f.ram)
+
+ root_gb = float(_f.disk)
+ ephemeral_gb = 0.0
+ if hasattr(_f, "OS-FLV-EXT-DATA:ephemeral"):
+ ephemeral_gb = float(getattr(_f, "OS-FLV-EXT-DATA:ephemeral"))
+ swap_mb = 0.0
+ if hasattr(_f, "swap"):
+ sw = getattr(_f, "swap")
+ if sw != '':
+ swap_mb = float(sw)
+ flavor.disk_cap = root_gb + ephemeral_gb + swap_mb / float(1024)
+
+ extra_specs = _f.get_keys()
+ for sk, sv in extra_specs.iteritems():
+ flavor.extra_specs[sk] = sv
+
+ return flavor
diff --git a/engine/src/valet/engine/resource_manager/resource.py b/engine/src/valet/engine/resource_manager/resource.py
new file mode 100644
index 0000000..0f2b550
--- /dev/null
+++ b/engine/src/valet/engine/resource_manager/resource.py
@@ -0,0 +1,1589 @@
+#
+# -------------------------------------------------------------------------
+# Copyright (c) 2019 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
+import json
+import six
+import time
+
+from valet.engine.app_manager.group import LEVEL
+from valet.engine.resource_manager.resources.datacenter import Datacenter
+from valet.engine.resource_manager.resources.flavor import Flavor
+from valet.engine.resource_manager.resources.group import Group
+from valet.engine.resource_manager.resources.host import Host
+from valet.engine.resource_manager.resources.host_group import HostGroup
+from valet.engine.resource_manager.resources.numa import NUMA
+
+
+class Resource(object):
+ """Container for resource status of a datacenter and all metadata."""
+
+ def __init__(self, _datacenter, _dbh, _compute, _metadata, _topology, _logger):
+ self.dbh = _dbh
+
+ self.compute = _compute
+ self.metadata = _metadata
+ self.topology = _topology
+
+ self.group_rules = {}
+
+ self.datacenter = None
+ self.datacenter_id = _datacenter.get("id")
+ self.datacenter_url = _datacenter.get("url", "none")
+
+ self.host_groups = {}
+ self.hosts = {}
+
+ self.change_of_placements = {}
+
+ self.groups = {}
+ self.flavors = {}
+
+ self.CPU_avail = 0
+ self.mem_avail = 0
+ self.local_disk_avail = 0
+
+ self.default_cpu_allocation_ratio = 1.0
+ self.default_ram_allocation_ratio = 1.0
+ self.default_disk_allocation_ratio = 1.0
+
+ self.new = False
+
+ # To keep unconfirmed requests.
+ # If exist, do NOT sync with platform for the next request.
+ self.pending_requests = []
+
+ self.logger = _logger
+
+ def set_config(self, _cpu_ratio, _ram_ratio, _disk_ratio):
+ self.default_cpu_allocation_ratio = _cpu_ratio
+ self.default_ram_allocation_ratio = _ram_ratio
+ self.default_disk_allocation_ratio = _disk_ratio
+
+ def set_group_rules(self, _rules):
+ self.group_rules = _rules
+
+ def load_resource_from_db(self):
+ """Load datacenter's resource info from DB.
+
+ Note: all resources in DB are enabled ones.
+ """
+
+ self.logger.info("load datacenter resource info from DB")
+
+ # Load Valet groups first.
+ valet_group_list = self.dbh.get_valet_groups()
+ if valet_group_list is None:
+ return None
+
+ valet_groups = {}
+ for vg in valet_group_list:
+ vgk = vg.get("id")
+ dc_id = vgk.split(':', 1)
+
+ if dc_id[0] == self.datacenter_id:
+ if vg["rule_id"] in self.group_rules.keys():
+ vg["metadata"] = json.loads(vg["metadata"])
+ vg["server_list"] = json.loads(vg["server_list"])
+ vg["member_hosts"] = json.loads(vg["member_hosts"])
+ vg["group_type"] = vg["type"]
+
+ valet_groups[vgk] = vg
+
+ self._load_groups(valet_groups)
+
+ dcr = self.dbh.get_resource(self.datacenter_id)
+ if dcr is None:
+ return None
+
+ if len(dcr) == 0:
+ return "no resource found for datacenter = " + self.datacenter_id
+
+ if self.datacenter_url == "none":
+ self.datacenter_url = dcr["url"]
+
+ pending_requests = json.loads(dcr["requests"])
+ for req in pending_requests:
+ self.pending_requests.append(req)
+
+ resource = json.loads(dcr["resource"])
+
+ groups = resource.get("groups")
+ if groups:
+ self._load_groups(groups)
+
+ flavors = resource.get("flavors")
+ if flavors:
+ self._load_flavors(flavors)
+
+ if len(self.flavors) == 0:
+ self.logger.warning("no flavors in db record")
+
+ hosts = resource.get("hosts")
+ if hosts:
+ self._load_hosts(hosts)
+
+ if len(self.hosts) == 0:
+ self.logger.warning("no hosts in db record")
+
+ host_groups = resource.get("host_groups")
+ if host_groups:
+ self._load_host_groups(host_groups)
+
+ if len(self.host_groups) == 0:
+ self.logger.warning("no host_groups (rack)")
+
+ dc = resource.get("datacenter")
+ self._load_datacenter(dc)
+
+ for ck in dc.get("children"):
+ if ck in self.host_groups.keys():
+ self.datacenter.resources[ck] = self.host_groups[ck]
+ elif ck in self.hosts.keys():
+ self.datacenter.resources[ck] = self.hosts[ck]
+
+ hgs = resource.get("host_groups")
+ if hgs:
+ for hgk, hg in hgs.iteritems():
+ host_group = self.host_groups[hgk]
+
+ pk = hg.get("parent")
+ if pk == self.datacenter.name:
+ host_group.parent_resource = self.datacenter
+ elif pk in self.host_groups.keys():
+ host_group.parent_resource = self.host_groups[pk]
+
+ for ck in hg.get("children"):
+ if ck in self.hosts.keys():
+ host_group.child_resources[ck] = self.hosts[ck]
+ elif ck in self.host_groups.keys():
+ host_group.child_resources[ck] = self.host_groups[ck]
+
+ hs = resource.get("hosts")
+ if hs:
+ for hk, h in hs.iteritems():
+ host = self.hosts[hk]
+
+ pk = h.get("parent")
+ if pk == self.datacenter.name:
+ host.host_group = self.datacenter
+ elif pk in self.host_groups.keys():
+ host.host_group = self.host_groups[pk]
+
+ for _, g in self.groups.iteritems():
+ for hk in g.member_hosts.keys():
+ if hk not in self.hosts.keys() and \
+ hk not in self.host_groups.keys():
+ del g.member_hosts[hk]
+
+ self._update_compute_avail()
+
+ return "ok"
+
+ def _load_groups(self, _groups):
+ """Take JSON group data as defined in /resources/group and
+ create Group instance.
+ """
+
+ for gk, g in _groups.iteritems():
+ group = Group(gk)
+
+ group.status = g.get("status")
+
+ group.uuid = g.get("uuid")
+
+ group.group_type = g.get("group_type")
+ group.level = g.get("level")
+ group.factory = g.get("factory")
+
+ rule_id = g.get("rule_id")
+ if rule_id != "none" and rule_id in self.group_rules.keys():
+ group.rule = self.group_rules[rule_id]
+
+ for mk, mv in g["metadata"].iteritems():
+ group.metadata[mk] = mv
+
+ for s_info in g["server_list"]:
+ group.server_list.append(s_info)
+
+ for hk, server_list in g["member_hosts"].iteritems():
+ group.member_hosts[hk] = []
+ for s_info in server_list:
+ group.member_hosts[hk].append(s_info)
+
+ self.groups[gk] = group
+
+ def _load_flavors(self, _flavors):
+ """Take JSON flavor data as defined in /resources/flavor and
+ create Flavor instance.
+ """
+
+ for fk, f in _flavors.iteritems():
+ flavor = Flavor(fk)
+
+ flavor.status = f.get("status")
+
+ flavor.flavor_id = f.get("flavor_id")
+ flavor.vCPUs = f.get("vCPUs")
+ flavor.mem_cap = f.get("mem")
+ flavor.disk_cap = f.get("disk")
+ for k, v in f["extra_specs"].iteritems():
+ flavor.extra_specs[k] = v
+
+ self.flavors[fk] = flavor
+
+ def _load_hosts(self, _hosts):
+ """Take JSON host data as defined in /resources/host and
+ create Host instance.
+ """
+
+ for hk, h in _hosts.iteritems():
+ host = Host(hk)
+
+ host.status = h.get("status")
+ host.state = h.get("state")
+
+ host.uuid = h.get("uuid")
+
+ host.vCPUs = h.get("vCPUs")
+ host.original_vCPUs = h.get("original_vCPUs")
+ host.vCPUs_used = h.get("vCPUs_used")
+ host.avail_vCPUs = h.get("avail_vCPUs")
+
+ host.mem_cap = h.get("mem")
+ host.original_mem_cap = h.get("original_mem")
+ host.free_mem_mb = h.get("free_mem_mb")
+ host.avail_mem_cap = h.get("avail_mem")
+
+ host.local_disk_cap = h.get("local_disk")
+ host.original_local_disk_cap = h.get("original_local_disk")
+ host.free_disk_gb = h.get("free_disk_gb")
+ host.disk_available_least = h.get("disk_available_least")
+ host.avail_local_disk_cap = h.get("avail_local_disk")
+
+ host.NUMA = NUMA(numa=h.get("NUMA"))
+
+ for s_info in h["server_list"]:
+ host.server_list.append(s_info)
+
+ for gk in h["membership_list"]:
+ if gk in self.groups.keys():
+ host.memberships[gk] = self.groups[gk]
+
+ # Not used by Valet currently, only capacity planning module
+ if "candidate_host_types" in h.keys():
+ for htk, ht in h["candidate_host_types"].iteritems():
+ host.candidate_host_types[htk] = ht
+ else:
+ host.candidate_host_types = {}
+
+ self.hosts[hk] = host
+
+ def _load_host_groups(self, _host_groups):
+ for hgk, hg in _host_groups.iteritems():
+ host_group = HostGroup(hgk)
+
+ host_group.status = hg.get("status")
+
+ host_group.host_type = hg.get("host_type")
+
+ host_group.vCPUs = hg.get("vCPUs")
+ host_group.avail_vCPUs = hg.get("avail_vCPUs")
+
+ host_group.mem_cap = hg.get("mem")
+ host_group.avail_mem_cap = hg.get("avail_mem")
+
+ host_group.local_disk_cap = hg.get("local_disk")
+ host_group.avail_local_disk_cap = hg.get("avail_local_disk")
+
+ for s_info in hg["server_list"]:
+ host_group.server_list.append(s_info)
+
+ for gk in hg.get("membership_list"):
+ if gk in self.groups.keys():
+ host_group.memberships[gk] = self.groups[gk]
+
+ self.host_groups[hgk] = host_group
+
+ def _load_datacenter(self, _dc):
+ self.datacenter = Datacenter(_dc.get("name"))
+
+ self.datacenter.status = _dc.get("status")
+
+ self.datacenter.vCPUs = _dc.get("vCPUs")
+ self.datacenter.avail_vCPUs = _dc.get("avail_vCPUs")
+
+ self.datacenter.mem_cap = _dc.get("mem")
+ self.datacenter.avail_mem_cap = _dc.get("avail_mem")
+
+ self.datacenter.local_disk_cap = _dc.get("local_disk")
+ self.datacenter.avail_local_disk_cap = _dc.get("avail_local_disk")
+
+ for s_info in _dc["server_list"]:
+ self.datacenter.server_list.append(s_info)
+
+ for gk in _dc.get("membership_list"):
+ if gk in self.groups.keys():
+ self.datacenter.memberships[gk] = self.groups[gk]
+
+ def _update_compute_avail(self):
+ """Update amount of total available resources."""
+
+ self.CPU_avail = self.datacenter.avail_vCPUs
+ self.mem_avail = self.datacenter.avail_mem_cap
+ self.local_disk_avail = self.datacenter.avail_local_disk_cap
+
+ def update_resource(self):
+ """Update resource status triggered by placements, events, and batch."""
+
+ for level in LEVEL:
+ for _, host_group in self.host_groups.iteritems():
+ if host_group.host_type == level:
+ if host_group.is_available() and host_group.updated:
+ self._update_host_group(host_group)
+
+ if self.datacenter.updated:
+ self._update_datacenter()
+
+ self._update_compute_avail()
+
+ def _update_host_group(self, _host_group):
+ """Update host group (rack) status."""
+
+ _host_group.init_resources()
+ del _host_group.server_list[:]
+ _host_group.init_memberships()
+
+ for _, host in _host_group.child_resources.iteritems():
+ if host.is_available():
+ _host_group.vCPUs += host.vCPUs
+ _host_group.avail_vCPUs += host.avail_vCPUs
+ _host_group.mem_cap += host.mem_cap
+ _host_group.avail_mem_cap += host.avail_mem_cap
+ _host_group.local_disk_cap += host.local_disk_cap
+ _host_group.avail_local_disk_cap += host.avail_local_disk_cap
+
+ for server_info in host.server_list:
+ _host_group.server_list.append(server_info)
+
+ for gk in host.memberships.keys():
+ _host_group.memberships[gk] = host.memberships[gk]
+
+ def _update_datacenter(self):
+ """Update datacenter status."""
+
+ self.datacenter.init_resources()
+ del self.datacenter.server_list[:]
+ self.datacenter.memberships.clear()
+
+ for _, resource in self.datacenter.resources.iteritems():
+ if resource.is_available():
+ self.datacenter.vCPUs += resource.vCPUs
+ self.datacenter.avail_vCPUs += resource.avail_vCPUs
+ self.datacenter.mem_cap += resource.mem_cap
+ self.datacenter.avail_mem_cap += resource.avail_mem_cap
+ self.datacenter.local_disk_cap += resource.local_disk_cap
+ self.datacenter.avail_local_disk_cap += resource.avail_local_disk_cap
+
+ for s in resource.server_list:
+ self.datacenter.server_list.append(s)
+
+ for gk in resource.memberships.keys():
+ self.datacenter.memberships[gk] = resource.memberships[gk]
+
+ def compute_resources(self, host):
+ """Compute the amount of resources with oversubsription ratios."""
+
+ ram_allocation_ratio_list = []
+ cpu_allocation_ratio_list = []
+ disk_allocation_ratio_list = []
+
+ for _, g in host.memberships.iteritems():
+ if g.group_type == "aggr":
+ if g.name.startswith("valet:"):
+ metadata = g.metadata["prior_metadata"]
+ else:
+ metadata = g.metadata
+
+ if "ram_allocation_ratio" in metadata.keys():
+ if isinstance(metadata["ram_allocation_ratio"], list):
+ for r in metadata["ram_allocation_ratio"]:
+ ram_allocation_ratio_list.append(float(r))
+ else:
+ ram_allocation_ratio_list.append(float(metadata["ram_allocation_ratio"]))
+ if "cpu_allocation_ratio" in metadata.keys():
+ if isinstance(metadata["cpu_allocation_ratio"], list):
+ for r in metadata["cpu_allocation_ratio"]:
+ cpu_allocation_ratio_list.append(float(r))
+ else:
+ cpu_allocation_ratio_list.append(float(metadata["cpu_allocation_ratio"]))
+ if "disk_allocation_ratio" in metadata.keys():
+ if isinstance(metadata["disk_allocation_ratio"], list):
+ for r in metadata["disk_allocation_ratio"]:
+ disk_allocation_ratio_list.append(float(r))
+ else:
+ disk_allocation_ratio_list.append(float(metadata["disk_allocation_ratio"]))
+
+ ram_allocation_ratio = 1.0
+ if len(ram_allocation_ratio_list) > 0:
+ ram_allocation_ratio = min(ram_allocation_ratio_list)
+ else:
+ if self.default_ram_allocation_ratio > 0:
+ ram_allocation_ratio = self.default_ram_allocation_ratio
+
+ host.compute_mem(ram_allocation_ratio)
+
+ cpu_allocation_ratio = 1.0
+ if len(cpu_allocation_ratio_list) > 0:
+ cpu_allocation_ratio = min(cpu_allocation_ratio_list)
+ else:
+ if self.default_cpu_allocation_ratio > 0:
+ cpu_allocation_ratio = self.default_cpu_allocation_ratio
+
+ host.compute_cpus(cpu_allocation_ratio)
+
+ disk_allocation_ratio = 1.0
+ if len(disk_allocation_ratio_list) > 0:
+ disk_allocation_ratio = min(disk_allocation_ratio_list)
+ else:
+ if self.default_disk_allocation_ratio > 0:
+ disk_allocation_ratio = self.default_disk_allocation_ratio
+
+ host.compute_disk(disk_allocation_ratio)
+
+ def compute_avail_resources(self, host):
+ """Compute available amount of resources after placements."""
+
+ status = host.compute_avail_mem()
+ if status != "ok":
+ self.logger.warning(status)
+
+ status = host.compute_avail_cpus()
+ if status != "ok":
+ self.logger.warning(status)
+
+ status = host.compute_avail_disk()
+ if status != "ok":
+ self.logger.warning(status)
+
+ def mark_host_updated(self, _host_name):
+ """Mark the host updated."""
+
+ host = self.hosts[_host_name]
+ host.updated = True
+
+ if host.host_group is not None:
+ if isinstance(host.host_group, HostGroup):
+ self.mark_host_group_updated(host.host_group.name)
+ else:
+ self.mark_datacenter_updated()
+
+ def mark_host_group_updated(self, _name):
+ """Mark the host_group updated."""
+
+ host_group = self.host_groups[_name]
+ host_group.updated = True
+
+ if host_group.parent_resource is not None:
+ if isinstance(host_group.parent_resource, HostGroup):
+ self.mark_host_group_updated(host_group.parent_resource.name)
+ else:
+ self.mark_datacenter_updated()
+
+ def mark_datacenter_updated(self):
+ """Mark the datacenter updated."""
+
+ if self.datacenter is not None:
+ self.datacenter.updated = True
+
+ def get_host_of_server(self, _s_info):
+ """Check and return host that hosts this server."""
+
+ host = None
+
+ if len(self.change_of_placements) > 0:
+ if _s_info["stack_id"] != "none":
+ sid = _s_info["stack_id"] + ":" + _s_info["name"]
+ else:
+ sid = _s_info["uuid"]
+
+ if sid in self.change_of_placements.keys():
+ host_name = None
+ if "host" in self.change_of_placements[sid].keys():
+ host_name = self.change_of_placements[sid]["host"]
+ elif "new_host" in self.change_of_placements[sid].keys():
+ host_name = self.change_of_placements[sid]["new_host"]
+
+ if host_name is not None:
+ host = self.hosts[host_name]
+ else:
+ for _, h in self.hosts.iteritems():
+ if h.has_server(_s_info):
+ host = h
+ break
+
+ return host
+
+ def update_server_placements(self, change_of_placements=None, sync=False):
+ """Update hosts with the change of server placements.
+
+ Update the available resources of host and NUMA if sync is True.
+ """
+
+ if change_of_placements is None:
+ change_of_placements = self.change_of_placements
+
+ for _, change in change_of_placements.iteritems():
+ if "new_host" in change and "old_host" in change:
+ # Migration case
+
+ old_host = self.hosts[change.get("old_host")]
+ new_host = self.hosts[change.get("new_host")]
+
+ s_info = change.get("info")
+ old_info = old_host.get_server_info(s_info)
+
+ if sync:
+ # Adjust available remaining amount.
+
+ old_flavor = self.get_flavor(old_info.get("flavor_id"))
+ new_flavor = self.get_flavor(s_info.get("flavor_id"))
+
+ if new_flavor is None or old_flavor is None:
+ # NOTE(Gueyoung): ignore at this time.
+ # return False
+ pass
+ else:
+ s_info["vcpus"] = new_flavor.vCPUs
+ s_info["mem"] = new_flavor.mem_cap
+ s_info["disk"] = new_flavor.disk_cap
+
+ new_host.deduct_avail_resources(s_info)
+
+ if new_flavor.need_numa_alignment():
+ cell = new_host.NUMA.deduct_server_resources(s_info)
+ s_info["numa"] = cell
+
+ old_info["vcpus"] = old_flavor.vCPUs
+ old_info["mem"] = old_flavor.mem_cap
+ old_info["disk"] = old_flavor.disk_cap
+
+ old_host.rollback_avail_resources(old_info)
+
+ if old_flavor.need_numa_alignment():
+ old_host.NUMA.rollback_server_resources(old_info)
+
+ old_host.remove_server(old_info)
+
+ new_host.add_server(old_info)
+ new_host.update_server(s_info)
+
+ self.mark_host_updated(change.get("new_host"))
+ self.mark_host_updated(change.get("old_host"))
+
+ elif "new_host" in change and "old_host" not in change:
+ # New server case
+
+ host = self.hosts[change.get("new_host")]
+ s_info = change.get("info")
+
+ flavor = self.get_flavor(s_info.get("flavor_id"))
+
+ if flavor is None:
+ # NOTE(Gueyoung): ignore at this time.
+ # return False
+ pass
+ else:
+ s_info["vcpus"] = flavor.vCPUs
+ s_info["mem"] = flavor.mem_cap
+ s_info["disk"] = flavor.disk_cap
+
+ host.deduct_avail_resources(s_info)
+
+ host.add_server(s_info)
+
+ if sync:
+ if flavor is not None:
+ # Adjust available remaining amount.
+ if flavor.need_numa_alignment():
+ host.NUMA.deduct_server_resources(s_info)
+ else:
+ if s_info.get("numa") != "none":
+ host.NUMA.add_server(s_info)
+
+ self.mark_host_updated(change.get("new_host"))
+
+ elif "new_host" not in change and "old_host" in change:
+ # Deletion case
+
+ host = self.hosts[change.get("old_host")]
+ s_info = change.get("info")
+
+ flavor = self.get_flavor(s_info.get("flavor_id"))
+
+ if flavor is None:
+ # NOTE(Gueyoung): ignore at this time.
+ # return False
+ pass
+ else:
+ s_info["vcpus"] = flavor.vCPUs
+ s_info["mem"] = flavor.mem_cap
+ s_info["disk"] = flavor.disk_cap
+
+ host.rollback_avail_resources(s_info)
+
+ if flavor.need_numa_alignment():
+ host.NUMA.rollback_server_resources(s_info)
+
+ host.remove_server(s_info)
+
+ self.mark_host_updated(change.get("old_host"))
+
+ else:
+ # Update case
+
+ host = self.hosts[change.get("host")]
+ s_info = change.get("info")
+
+ if sync:
+ # Adjust available remaining amount.
+
+ old_info = host.get_server_info(s_info)
+
+ if s_info["flavor_id"] != old_info["flavor_id"]:
+ old_flavor = self.get_flavor(old_info.get("flavor_id"))
+ new_flavor = self.get_flavor(s_info.get("flavor_id"))
+
+ if old_flavor is None or new_flavor is None:
+ # NOTE(Gueyoung): ignore at this time.
+ # return False
+ pass
+ else:
+ host.rollback_avail_resources(old_info)
+
+ if old_flavor.need_numa_alignment():
+ host.NUMA.rollback_server_resources(old_info)
+
+ s_info["vcpus"] = new_flavor.vCPUs
+ s_info["mem"] = new_flavor.mem_cap
+ s_info["disk"] = new_flavor.disk_cap
+
+ host.deduct_avail_resources(s_info)
+
+ if new_flavor.need_numa_alignment():
+ cell = host.NUMA.deduct_server_resources(s_info)
+ s_info["numa"] = cell
+
+ new_info = host.update_server(s_info)
+
+ if new_info is not None:
+ self.mark_host_updated(change.get("host"))
+
+ return True
+
+ def update_server_grouping(self, change_of_placements=None, new_groups=None):
+ """Update group member_hosts and hosts' memberships
+
+ Caused by server addition, deletion, and migration.
+ """
+
+ if change_of_placements is None:
+ change_of_placements = self.change_of_placements
+
+ if new_groups is None:
+ new_groups = self._get_new_grouping()
+
+ for _, placement in change_of_placements.iteritems():
+ if "new_host" in placement.keys() and "old_host" in placement.keys():
+ # Migrated server. This server can be unknown one previously.
+
+ old_host = self.hosts[placement.get("old_host")]
+ new_host = self.hosts[placement.get("new_host")]
+ s_info = placement.get("info")
+ new_info = new_host.get_server_info(s_info)
+
+ # A list of Valet groups
+ group_list = []
+ self.get_groups_of_server(old_host, new_info, group_list)
+
+ _group_list = self._get_groups_of_server(new_info, new_groups)
+ for gk in _group_list:
+ if gk not in group_list:
+ group_list.append(gk)
+
+ self._remove_server_from_groups(old_host, new_info)
+
+ self._add_server_to_groups(new_host, new_info, group_list)
+
+ elif "new_host" in placement.keys() and "old_host" not in placement.keys():
+ # New server
+
+ new_host = self.hosts[placement.get("new_host")]
+ s_info = placement.get("info")
+ new_s_info = new_host.get_server_info(s_info)
+
+ group_list = self._get_groups_of_server(new_s_info, new_groups)
+
+ self._add_server_to_groups(new_host, new_s_info, group_list)
+
+ elif "new_host" not in placement.keys() and "old_host" in placement.keys():
+ # Deleted server. This server can be unknown one previously.
+
+ # Enabled host
+ host = self.hosts[placement["old_host"]]
+
+ self._remove_server_from_groups(host, placement.get("info"))
+
+ else:
+ host_name = placement.get("host")
+ s_info = placement.get("info")
+
+ if host_name in self.hosts.keys():
+ host = self.hosts[host_name]
+ new_info = host.get_server_info(s_info)
+
+ if new_info is not None:
+ self._update_server_in_groups(host, new_info)
+
+ # To create, delete, and update dynamic Host-Aggregates.
+ # TODO(Gueyoung): return error if fail to connect to Nova.
+ self._manage_dynamic_host_aggregates()
+
+ def _get_new_grouping(self, change_of_placements=None):
+ """Verify and get new hosts' memberships."""
+
+ if change_of_placements is None:
+ change_of_placements = self.change_of_placements
+
+ new_groups = {}
+
+ # TODO: grouping verification for 'new' servers.
+ # by calling verify_pre_valet_placements()
+ # Should add each host's new memberships.
+
+ # Add host's memberships for server-group.
+ # Do not need to verify.
+ for _, placement in change_of_placements.iteritems():
+ if "new_host" in placement.keys():
+ host = self.hosts[placement.get("new_host")]
+ s_info = placement.get("info")
+ new_info = host.get_server_info(s_info)
+
+ for gk, g in self.groups.iteritems():
+ if g.factory == "server-group" and g.status == "enabled":
+ if g.has_server_uuid(new_info.get("uuid")):
+ if gk not in host.memberships.keys():
+ host.memberships[gk] = g
+ self.mark_host_updated(host.name)
+
+ if gk not in new_groups.keys():
+ new_groups[gk] = []
+ new_groups[gk].append(new_info)
+
+ return new_groups
+
+ def _get_groups_of_server(self, _s_info, new_groups):
+ """Check and return group list where server belongs to."""
+
+ group_list = []
+
+ _stack_id = _s_info.get("stack_id")
+ _stack_name = _s_info.get("stack_name")
+ _uuid = _s_info.get("uuid")
+ _name = _s_info.get("name")
+
+ for gk, server_list in new_groups.iteritems():
+ for s_info in server_list:
+ if s_info["uuid"] != "none":
+ if s_info["uuid"] == _uuid:
+ if gk not in group_list:
+ group_list.append(gk)
+ break
+
+ if s_info["name"] != "none":
+ if s_info["stack_id"] != "none":
+ if s_info["stack_id"] == _stack_id and \
+ s_info["name"] == _name:
+ if gk not in group_list:
+ group_list.append(gk)
+ break
+
+ if s_info["stack_name"] != "none":
+ if s_info["stack_name"] == _stack_name and \
+ s_info["name"] == _name:
+ if gk not in group_list:
+ group_list.append(gk)
+ break
+
+ return group_list
+
+ def get_groups_of_server(self, _host, _s_info, _group_list):
+ """Get groups where the server is assigned."""
+
+ for gk in _host.memberships.keys():
+ if gk not in self.groups.keys() or self.groups[gk].status != "enabled":
+ del _host.memberships[gk]
+ if isinstance(_host, Host):
+ self.mark_host_updated(_host.name)
+ elif isinstance(_host, HostGroup):
+ self.mark_host_group_updated(_host.name)
+ else:
+ self.mark_datacenter_updated()
+ continue
+
+ g = self.groups[gk]
+
+ if g.factory not in ("valet", "server-group"):
+ continue
+
+ if isinstance(_host, HostGroup):
+ if g.level != _host.host_type:
+ continue
+
+ if g.has_server_in_host(_host.name, _s_info):
+ if gk not in _group_list:
+ _group_list.append(gk)
+
+ if isinstance(_host, Host) and _host.host_group is not None:
+ if _host.host_group.is_available():
+ self.get_groups_of_server(_host.host_group, _s_info, _group_list)
+ elif isinstance(_host, HostGroup) and _host.parent_resource is not None:
+ if _host.parent_resource.is_available():
+ if isinstance(_host.parent_resource, HostGroup):
+ self.get_groups_of_server(_host.parent_resource, _s_info, _group_list)
+
+ def _add_server_to_groups(self, _host, _s_info, _groups):
+ """Add new server into groups."""
+
+ for gk in _groups:
+ # The group must be verified for host membership
+ if gk not in _host.memberships.keys():
+ continue
+
+ if gk not in self.groups.keys() or self.groups[gk].status != "enabled":
+ del _host.memberships[gk]
+ if isinstance(_host, Host):
+ self.mark_host_updated(_host.name)
+ elif isinstance(_host, HostGroup):
+ self.mark_host_group_updated(_host.name)
+ else:
+ self.mark_datacenter_updated()
+ continue
+
+ g = self.groups[gk]
+
+ if g.factory not in ("valet", "server-group"):
+ continue
+
+ if isinstance(_host, HostGroup):
+ if g.level != _host.host_type:
+ continue
+
+ if g.factory == "server-group":
+ g.clean_server(_s_info["uuid"], _host.name)
+
+ if g.add_server(_s_info, _host.name):
+ g.updated = True
+ else:
+ self.logger.warning("server already exists in group")
+
+ if isinstance(_host, Host) and _host.host_group is not None:
+ if _host.host_group.is_available():
+ self._add_server_to_groups(_host.host_group, _s_info, _groups)
+ elif isinstance(_host, HostGroup) and _host.parent_resource is not None:
+ if _host.parent_resource.is_available():
+ if isinstance(_host.parent_resource, HostGroup):
+ self._add_server_to_groups(_host.parent_resource, _s_info, _groups)
+
+ def _remove_server_from_groups(self, _host, _s_info):
+ """Remove server from related groups."""
+
+ for gk in _host.memberships.keys():
+ if gk not in self.groups.keys() or self.groups[gk].status != "enabled":
+ del _host.memberships[gk]
+
+ if isinstance(_host, Host):
+ self.mark_host_updated(_host.name)
+ elif isinstance(_host, HostGroup):
+ self.mark_host_group_updated(_host.name)
+ else:
+ self.mark_datacenter_updated()
+ continue
+
+ g = self.groups[gk]
+
+ if g.factory not in ("valet", "server-group"):
+ continue
+
+ if isinstance(_host, HostGroup):
+ if g.level != _host.host_type:
+ continue
+
+ if g.remove_server(_s_info):
+ g.updated = True
+
+ if g.remove_server_from_host(_host.name, _s_info):
+ g.updated = True
+
+ # Remove host from group's membership if the host has no servers of the group.
+ if g.remove_member(_host.name):
+ g.updated = True
+
+ # Remove group from host's membership if group does not have the host
+ # Not consider group has datacenter level.
+ if isinstance(_host, Host) or isinstance(_host, HostGroup):
+ if _host.remove_membership(g):
+ if isinstance(_host, Host):
+ self.mark_host_updated(_host.name)
+ elif isinstance(_host, HostGroup):
+ self.mark_host_group_updated(_host.name)
+ else:
+ self.mark_datacenter_updated()
+
+ if len(g.server_list) == 0:
+ g.status = "disabled"
+ g.updated = True
+
+ if isinstance(_host, Host) and _host.host_group is not None:
+ if _host.host_group.is_available():
+ self._remove_server_from_groups(_host.host_group, _s_info)
+ elif isinstance(_host, HostGroup) and _host.parent_resource is not None:
+ if _host.parent_resource.is_available():
+ if isinstance(_host.parent_resource, HostGroup):
+ self._remove_server_from_groups(_host.parent_resource, _s_info)
+
+ def _update_server_in_groups(self, _host, _s_info):
+ """Update server info in groups."""
+
+ for gk in _host.memberships.keys():
+ if gk not in self.groups.keys() or self.groups[gk].status != "enabled":
+ del _host.memberships[gk]
+ if isinstance(_host, Host):
+ self.mark_host_updated(_host.name)
+ elif isinstance(_host, HostGroup):
+ self.mark_host_group_updated(_host.name)
+ else:
+ self.mark_datacenter_updated()
+ continue
+
+ g = self.groups[gk]
+
+ if g.factory not in ("valet", "server-group"):
+ continue
+
+ if isinstance(_host, HostGroup):
+ if g.level != _host.host_type:
+ continue
+
+ if g.update_server(_s_info):
+ g.update_server_in_host(_host.name, _s_info)
+ g.updated = True
+
+ if isinstance(_host, Host) and _host.host_group is not None:
+ if _host.host_group.is_available():
+ self._update_server_in_groups(_host.host_group, _s_info)
+ elif isinstance(_host, HostGroup) and _host.parent_resource is not None:
+ if _host.parent_resource.is_available():
+ if isinstance(_host.parent_resource, HostGroup):
+ self._update_server_in_groups(_host.parent_resource, _s_info)
+
+ def add_group(self, _g_name, _g_type, _level, _factory, _host_name):
+ """Add/Enable group unless the group exists or disabled."""
+
+ if _g_name not in self.groups.keys():
+ group = Group(_g_name)
+ group.group_type = _g_type
+ group.factory = _factory
+ group.level = _level
+ group.rule = self._get_rule_of_group(_g_name)
+ group.new = True
+ group.updated = True
+ self.groups[_g_name] = group
+ elif self.groups[_g_name].status != "enabled":
+ self.groups[_g_name].status = "enabled"
+ self.groups[_g_name].updated = True
+
+ if _host_name in self.hosts.keys():
+ host = self.hosts[_host_name]
+ else:
+ host = self.host_groups[_host_name]
+
+ # Update host memberships.
+ if host is not None:
+ if _g_name not in host.memberships.keys():
+ host.memberships[_g_name] = self.groups[_g_name]
+
+ if isinstance(host, Host):
+ self.mark_host_updated(_host_name)
+ elif isinstance(host, HostGroup):
+ self.mark_host_group_updated(_host_name)
+
+ return True
+
+ def _get_rule_of_group(self, _gk):
+ """Get valet group rule of the given group."""
+
+ rule_name_elements = _gk.split(':')
+ rule_name = rule_name_elements[len(rule_name_elements)-1]
+
+ if rule_name in self.group_rules.keys():
+ return self.group_rules[rule_name]
+
+ return None
+
+ def get_group_by_uuid(self, _uuid):
+ """Check and get the group with its uuid."""
+
+ for _, g in self.groups.iteritems():
+ if g.uuid == _uuid:
+ return g
+
+ return None
+
+ def check_valid_rules(self, _tenant_id, _rule_list, use_ex=True):
+ """Check if given rules are valid to be used."""
+
+ for rk in _rule_list:
+ if rk not in self.group_rules.keys():
+ return "not exist rule (" + rk + ")"
+
+ # TODO(Gueyoung): if disabled,
+ # what to do with placed servers under this rule?
+ if self.group_rules[rk].status != "enabled":
+ return "rule (" + rk + ") is not enabled"
+
+ if not use_ex:
+ if self.group_rules[rk].rule_type == "exclusivity":
+ return "exclusivity not supported"
+
+ rule = self.group_rules[rk]
+ if len(rule.members) > 0 and _tenant_id not in rule.members:
+ return "no valid tenant to use rule (" + rk + ")"
+
+ return "ok"
+
+ def _manage_dynamic_host_aggregates(self):
+ """Create, delete, or update Host-Aggregates after placement decisions."""
+
+ for gk in self.groups.keys():
+ g = self.groups[gk]
+ if g.group_type == "exclusivity" and g.status == "enabled":
+ aggr_name = "valet:" + g.name
+ if aggr_name not in self.groups.keys():
+ # Create Host-Aggregate.
+ status = self._add_exclusivity_aggregate(aggr_name, g)
+ # TODO(Gueyoung): return error
+ if status != "ok":
+ self.logger.warning("error while adding dynamic host-aggregate")
+ else:
+ dha = self.groups[aggr_name]
+ for hk in g.member_hosts.keys():
+ if hk not in dha.member_hosts.keys():
+ # Add new host into Host-Aggregate.
+ status = self._update_exclusivity_aggregate(dha,
+ self.hosts[hk])
+ # TODO(Gueyoung): return error
+ if status != "ok":
+ self.logger.warning("error while updating dynamic host-aggregate")
+
+ for gk in self.groups.keys():
+ g = self.groups[gk]
+ if g.group_type == "aggr" and g.status == "enabled":
+ if g.name.startswith("valet:"):
+ if g.metadata["valet_type"] == "exclusivity":
+ name_elements = g.name.split(':', 1)
+ ex_group_name = name_elements[1]
+ if ex_group_name not in self.groups.keys() or \
+ self.groups[ex_group_name].status != "enabled":
+ # Delete Host-Aggregate
+ status = self._remove_exclusivity_aggregate(g)
+ # TODO(Gueyoung): return error
+ if status != "ok":
+ self.logger.warning("error while removing dynamic host-aggregate")
+ else:
+ ex_group = self.groups[ex_group_name]
+ for hk in g.member_hosts.keys():
+ if hk not in ex_group.member_hosts.keys():
+ # Remove host from Host-Aggregate.
+ status = self._remove_host_from_exclusivity_aggregate(g,
+ self.hosts[hk])
+
+ # TODO(Gueyoung): return error
+ if status != "ok":
+ self.logger.warning("error while removing host from dynamic host-aggregate")
+
+ def _add_exclusivity_aggregate(self, _name, _group):
+ """Create platform Host-Aggregate for Valet rules.
+
+ Exclusivity: create Host-Aggregate, and lock.
+ """
+
+ group = Group(_name)
+ group.group_type = "aggr"
+ group.level = "host"
+ group.factory = "nova"
+
+ metadata = {"valet_type": "exclusivity"}
+
+ new_host_list = []
+ ex_metadata = {}
+
+ for hk in _group.member_hosts.keys():
+ host = self.hosts[hk]
+ aggregates = host.get_aggregates()
+
+ old_aggregates = []
+ for a in aggregates:
+ if a.name.startswith("valet:"):
+ continue
+
+ for mk, mv in a.metadata.iteritems():
+ if mk not in ex_metadata.keys():
+ ex_metadata[mk] = mv
+ else:
+ if isinstance(ex_metadata[mk], list):
+ if mv not in ex_metadata[mk]:
+ ex_metadata[mk].append(mv)
+ self.logger.warning("multiple values of metadata key")
+ else:
+ if mv != ex_metadata[mk]:
+ value_list = [ex_metadata[mk], mv]
+ ex_metadata[mk] = value_list
+ self.logger.warning("multiple values of metadata key")
+
+ old_aggregates.append(a)
+
+ if hk in a.member_hosts.keys():
+ del a.member_hosts[hk]
+ a.updated = True
+
+ if a.name in host.memberships.keys():
+ del host.memberships[a.name]
+
+ if len(old_aggregates) > 0:
+ metadata[hk] = str(old_aggregates[0].uuid)
+ for i in range(1, len(old_aggregates)):
+ metadata[hk] += ("," + str(old_aggregates[i].uuid))
+
+ new_host_list.append(host)
+
+ metadata["prior_metadata"] = ex_metadata
+
+ group.metadata = metadata
+
+ for host in new_host_list:
+ group.member_hosts[host.name] = []
+
+ host.memberships[_name] = group
+ self.mark_host_updated(host.name)
+
+ group.updated = True
+
+ if not self.metadata.source.valid_client(self.datacenter_url):
+ self.metadata.source.set_client(self.datacenter_url)
+
+ status = self.metadata.create_exclusive_aggregate(group,
+ new_host_list)
+
+ self.groups[_name] = group
+
+ return status
+
+ def _update_exclusivity_aggregate(self, _group, _host):
+ """Update platform Host-Aggregate for Valet rules.
+
+ Exclusivity: update Host-Aggregate, and lock.
+ """
+
+ status = "ok"
+
+ aggregates = _host.get_aggregates()
+
+ if _group.group_type == "aggr":
+ if _host.name not in _group.member_hosts.keys():
+ old_aggregates = []
+ ex_metadata = _group.metadata["prior_metadata"]
+
+ for a in aggregates:
+ if a.name.startswith("valet:"):
+ continue
+
+ for mk, mv in a.metadata.iteritems():
+ if mk not in ex_metadata.keys():
+ ex_metadata[mk] = mv
+ else:
+ if isinstance(ex_metadata[mk], list):
+ if mv not in ex_metadata[mk]:
+ ex_metadata[mk].append(mv)
+ self.logger.warning("multiple values of metadata key")
+ else:
+ if mv != ex_metadata[mk]:
+ value_list = [ex_metadata[mk], mv]
+ ex_metadata[mk] = value_list
+ self.logger.warning("multiple values of metadata key")
+
+ old_aggregates.append(a)
+
+ if _host.name in a.member_hosts.keys():
+ del a.member_hosts[_host.name]
+ a.updated = True
+
+ if a.name in _host.memberships.keys():
+ del _host.memberships[a.name]
+
+ if len(old_aggregates) > 0:
+ _group.metadata[_host.name] = str(old_aggregates[0].uuid)
+ for i in range(1, len(old_aggregates)):
+ _group.metadata[_host.name] += ("," + str(old_aggregates[i].uuid))
+
+ _group.metadata["prior_metadata"] = ex_metadata
+
+ _group.member_hosts[_host.name] = []
+ _group.updated = True
+
+ _host.memberships[_group.name] = _group
+ self.mark_host_updated(_host.name)
+
+ if not self.metadata.source.valid_client(self.datacenter_url):
+ self.metadata.source.set_client(self.datacenter_url)
+
+ status = self.metadata.update_exclusive_aggregate(_group.uuid,
+ _group.metadata,
+ _host.name,
+ old_aggregates)
+
+ return status
+
+ def _remove_exclusivity_aggregate(self, _group):
+ """Remove dynamic Host-Aggregate."""
+
+ for hk in _group.member_hosts.keys():
+ host = self.hosts[hk]
+
+ status = self._remove_host_from_exclusivity_aggregate(_group, host)
+ if status != "ok":
+ self.logger.warning("error while removing host from dynamic host-aggregate")
+
+ del self.groups[_group.name]
+
+ if not self.metadata.source.valid_client(self.datacenter_url):
+ self.metadata.source.set_client(self.datacenter_url)
+
+ return self.metadata.remove_exclusive_aggregate(_group.uuid)
+
+ def _remove_host_from_exclusivity_aggregate(self, _group, _host):
+ """Update platform Host-Aggregate for Valet rules.
+
+ Exclusivity: delete host from dynamic Host-Aggregate.
+ """
+
+ status = "ok"
+
+ if _group.group_type == "aggr":
+ if _host.name in _group.member_hosts.keys():
+ old_aggregates = []
+ if _host.name in _group.metadata.keys():
+ aggr_ids = _group.metadata[_host.name].split(',')
+
+ for aid in aggr_ids:
+ aggr = self.get_group_by_uuid(int(aid))
+ if aggr is not None:
+ aggr.member_hosts[_host.name] = []
+ aggr.updated = True
+ old_aggregates.append(aggr)
+
+ if aggr.name not in _host.memberships.keys():
+ _host.memberships[aggr.name] = aggr
+
+ _group.metadata[_host.name] = ""
+
+ del _group.member_hosts[_host.name]
+ _group.updated = True
+
+ del _host.memberships[_group.name]
+ self.mark_host_updated(_host.name)
+
+ if not self.metadata.source.valid_client(self.datacenter_url):
+ self.metadata.source.set_client(self.datacenter_url)
+
+ status = self.metadata.remove_host_from_exclusive_aggregate(_group.uuid,
+ _group.metadata,
+ _host.name,
+ old_aggregates)
+
+ return status
+
+ def sync_with_platform(self, store=False):
+ """Communicate with platform (e.g., nova) to get resource status.
+
+ Due to dependencies between resource types,
+ keep the following order of process.
+ """
+
+ if len(self.pending_requests) > 0:
+ return True
+
+ self.logger.info("load data from platform (e.g., nova)")
+
+ # Set the platorm client lib (e.g., novaclient).
+ if not self.metadata.source.valid_client(self.datacenter_url):
+ count = 0
+ while count < 3:
+ if not self.metadata.source.set_client(self.datacenter_url):
+ self.logger.warning("fail to set novaclient: try again")
+ count += 1
+ time.sleep(1)
+ else:
+ break
+ if count == 3:
+ self.logger.error("fail to set novaclient")
+ return False
+
+ count = 0
+ while count < 3:
+ # Set each flavor and its metadata.
+ if not self.metadata.get_flavors(self):
+ self.logger.warning("fail to get flavors: try again")
+ count += 1
+ time.sleep(1)
+ else:
+ break
+ if count == 3:
+ self.logger.error("fail to get flavors")
+ return False
+
+ count = 0
+ while count < 3:
+ # Set each compute host and servers information.
+ if not self.compute.get_hosts(self):
+ self.logger.warning("fail to get hosts: try again")
+ count += 1
+ time.sleep(1)
+ else:
+ break
+ if count == 3:
+ self.logger.error("fail to get hosts")
+ return False
+
+ # TODO(Gueyoung): need to every time?
+ # Set the layout between each compute host and rack.
+ if not self.topology.get_topology(self):
+ return False
+
+ count = 0
+ while count < 3:
+ # Set the availability-zone, host-aggregate, and server-group
+ # of each compute host.
+ if not self.metadata.get_groups(self):
+ self.logger.warning("fail to get groups: try again")
+ count += 1
+ time.sleep(1)
+ else:
+ break
+ if count == 3:
+ self.logger.error("fail to get groups")
+ return False
+
+ # Update total capacities of each host.
+ # Triggered by overcommit ratio update or newly added.
+ for _, host in self.hosts.iteritems():
+ if host.is_available() and host.updated:
+ self.compute_resources(host)
+
+ # Update server placements in hosts
+ # If sync is True, update the available capacities.
+ if not self.update_server_placements(sync=True):
+ return False
+
+ # Update the available capacities of each NUMA and host.
+ # Triggered by unknown server additions and deletions.
+ for _, host in self.hosts.iteritems():
+ if host.is_available() and host.updated:
+ self.compute_avail_resources(host)
+
+ # Update server grouping changed by deletion and migration of servers.
+ # TODO(Gueyoung): return False if fail to connect to Nova.
+ self.update_server_grouping()
+
+ # Update racks (and clusters) and datacenter based on host change.
+ self.update_resource()
+
+ # TODO: If peoridic batches to collect data from platform is activated,
+ # should check if there is any update before storing data into DB.
+ if store:
+ self.store_resource()
+
+ return True
+
+ def get_flavor(self, _id):
+ """Get a flavor info."""
+
+ if isinstance(_id, six.string_types):
+ flavor_id = _id
+ else:
+ flavor_id = str(_id)
+
+ self.logger.debug("fetching flavor = " + flavor_id)
+
+ flavor = None
+ if flavor_id in self.flavors.keys():
+ flavor = self.flavors[flavor_id]
+ else:
+ for _, f in self.flavors.iteritems():
+ if f.flavor_id == flavor_id:
+ flavor = f
+ break
+
+ if flavor is not None:
+ # Check if detailed information.
+ # TODO(Gueyoung): what if flavor specs changed from platform?
+ if flavor.vCPUs == 0:
+ if not self.metadata.source.valid_client(self.datacenter_url):
+ count = 0
+ while count < 3:
+ if not self.metadata.source.set_client(self.datacenter_url):
+ self.logger.warning("fail to set novaclient: try again")
+ count += 1
+ time.sleep(1)
+ else:
+ break
+ if count == 3:
+ self.logger.error("fail to set novaclient")
+ return None
+
+ f = self.metadata.source.get_flavor(flavor.flavor_id)
+ if f is None:
+ flavor = None
+ else:
+ flavor.set_info(f)
+ flavor.updated = True
+
+ self.logger.debug("flavor (" + flavor.name + ") fetched")
+ else:
+ self.logger.warning("unknown flavor = " + flavor_id)
+
+ return flavor
+
+ def store_resource(self, opt=None, req_id=None):
+ """Store resource status into DB."""
+
+ flavor_updates = {}
+ group_updates = {}
+ host_updates = {}
+ host_group_updates = {}
+
+ # Do not store disbaled resources.
+
+ for fk, flavor in self.flavors.iteritems():
+ # TODO(Gueyoung): store disabled flavor?
+ flavor_updates[fk] = flavor.get_json_info()
+
+ for gk, group in self.groups.iteritems():
+ if group.status == "enabled":
+ if group.factory != "valet":
+ group_updates[gk] = group.get_json_info()
+
+ for hk, host in self.hosts.iteritems():
+ if host.is_available():
+ host_updates[hk] = host.get_json_info()
+
+ for hgk, host_group in self.host_groups.iteritems():
+ if host_group.is_available():
+ host_group_updates[hgk] = host_group.get_json_info()
+
+ datacenter_update = self.datacenter.get_json_info()
+
+ # If there is pending requests (i.e., not confirmed nor rollbacked),
+ # do NOT sync with platform when dealing with new request.
+ # Here, add/remove request from/to pending list
+ # to track the list of pending requests.
+ if opt is not None and req_id is not None:
+ if opt in ("create", "delete", "update"):
+ self.pending_requests.append(req_id)
+ elif opt in ("confirm", "rollback"):
+ for rid in self.pending_requests:
+ if rid == req_id:
+ self.pending_requests.remove(rid)
+ break
+
+ json_update = {'flavors': flavor_updates, 'groups': group_updates, 'hosts': host_updates,
+ 'host_groups': host_group_updates, 'datacenter': datacenter_update}
+
+ if self.new:
+ if not self.dbh.create_resource(self.datacenter_id,
+ self.datacenter_url,
+ self.pending_requests,
+ json_update):
+ return False
+ else:
+ if not self.dbh.update_resource(self.datacenter_id,
+ self.datacenter_url,
+ self.pending_requests,
+ json_update):
+ return False
+
+ if self.new:
+ self.logger.debug("new datacenter = " + self.datacenter_id)
+ self.logger.debug(" url = " + self.datacenter_url)
+ else:
+ self.logger.debug("updated datacenter = " + self.datacenter_id)
+ self.logger.debug(" url = " + self.datacenter_url)
+ self.logger.debug("region = " + json.dumps(json_update['datacenter'], indent=4))
+ self.logger.debug("racks = " + json.dumps(json_update['host_groups'], indent=4))
+ self.logger.debug("hosts = " + json.dumps(json_update['hosts'], indent=4))
+ self.logger.debug("groups = " + json.dumps(json_update['groups'], indent=4))
+ self.logger.debug("flavors = ")
+ for fk, f_info in json_update['flavors'].iteritems():
+ if f_info["vCPUs"] > 0:
+ self.logger.debug(json.dumps(f_info, indent=4))
+
+ updated_valet_groups = {}
+ new_valet_groups = {}
+ deleted_valet_groups = {}
+ for gk, group in self.groups.iteritems():
+ if group.status == "enabled":
+ if group.factory == "valet":
+ if group.new:
+ new_valet_groups[gk] = group.get_json_info()
+ elif group.updated:
+ updated_valet_groups[gk] = group.get_json_info()
+ else:
+ if group.factory == "valet":
+ deleted_valet_groups[gk] = group.get_json_info()
+
+ for gk, g_info in new_valet_groups.iteritems():
+ if not self.dbh.create_valet_group(gk, g_info):
+ return False
+
+ self.logger.debug("new valet group = " + gk)
+ self.logger.debug("info = " + json.dumps(g_info, indent=4))
+
+ for gk, g_info in updated_valet_groups.iteritems():
+ if not self.dbh.update_valet_group(gk, g_info):
+ return False
+
+ self.logger.debug("updated valet group = " + gk)
+ self.logger.debug("info = " + json.dumps(g_info, indent=4))
+
+ for gk, g_info in deleted_valet_groups.iteritems():
+ if not self.dbh.delete_valet_group(gk):
+ return False
+
+ self.logger.debug("deleted valet group = " + gk)
+ self.logger.debug("info = " + json.dumps(g_info, indent=4))
+
+ return True
diff --git a/engine/src/valet/engine/resource_manager/resource_handler.py b/engine/src/valet/engine/resource_manager/resource_handler.py
new file mode 100644
index 0000000..38868c7
--- /dev/null
+++ b/engine/src/valet/engine/resource_manager/resource_handler.py
@@ -0,0 +1,299 @@
+#
+# -------------------------------------------------------------------------
+# Copyright (c) 2019 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
+import json
+
+from valet.engine.resource_manager.resource import Resource
+from valet.engine.resource_manager.resources.group_rule import GroupRule
+from valet.engine.resource_manager.resources.host_group import HostGroup
+
+
+class ResourceHandler:
+ """Handler for dealing with all existing datacenters and their resources."""
+
+ def __init__(self, _tid, _dbh, _compute, _metadata, _topology,
+ _config, _logger):
+ self.end_of_process = False
+
+ self.dbh = _dbh
+
+ self.compute = _compute
+ self.metadata = _metadata
+ self.topology = _topology
+
+ self.default_cpu_allocation_ratio = _config.get("default_cpu_allocation_ratio")
+ self.default_ram_allocation_ratio = _config.get("default_ram_allocation_ratio")
+ self.default_disk_allocation_ratio = _config.get("default_disk_allocation_ratio")
+ self.batch_sync_interval = _config.get("batch_sync_interval")
+
+ self.group_rules = {}
+ self.resource_list = []
+
+ self.logger = _logger
+
+ def load_group_rules_from_db(self):
+ """Get all defined valet group rules from DB.
+
+ Note that rules are applied to all datacenters.
+ """
+
+ # Init first
+ self.group_rules = {}
+
+ rule_list = self.dbh.get_group_rules()
+ if rule_list is None:
+ return None
+
+ for r in rule_list:
+ rule = GroupRule(r.get("id"))
+
+ rule.status = r.get("status")
+
+ rule.app_scope = r.get("app_scope")
+ rule.rule_type = r.get("type")
+ rule.level = r.get("level")
+ rule.members = json.loads(r.get("members"))
+ rule.desc = r.get("description")
+
+ self.group_rules[rule.rule_id] = rule
+
+ return "ok"
+
+ def load_group_rule_from_db(self, _id):
+ """Get valet group rule from DB."""
+
+ # Init first
+ self.group_rules = {}
+
+ r = self.dbh.get_group_rule(_id)
+ if r is None:
+ return None
+ elif len(r) == 0:
+ return "rule not found"
+
+ rule = GroupRule(r.get("id"))
+
+ rule.status = r.get("status")
+
+ rule.app_scope = r.get("app_scope")
+ rule.rule_type = r.get("type")
+ rule.level = r.get("level")
+ rule.members = json.loads(r.get("members"))
+ rule.desc = r.get("description")
+
+ self.group_rules[rule.rule_id] = rule
+
+ return "ok"
+
+ def create_group_rule(self, _name, _scope, _type, _level, _members, _desc):
+ """Create a new group rule in DB."""
+
+ r = self.dbh.get_group_rule(_name)
+ if r is None:
+ return None
+ elif len(r) > 0:
+ return "rule already exists"
+
+ if not self.dbh.create_group_rule(_name, _scope, _type, _level,
+ _members, _desc):
+ return None
+
+ return "ok"
+
+ def get_rules(self):
+ """Return basic info of valet rules."""
+
+ rule_list = []
+
+ valet_group_list = self.dbh.get_valet_groups()
+ if valet_group_list is None:
+ return None
+
+ for rk, rule in self.group_rules.iteritems():
+ rule_info = self._get_rule(rule)
+
+ for vg in valet_group_list:
+ if vg["rule_id"] == rk:
+ gk = vg.get("id")
+ gk_elements = gk.split(":")
+ dc_id = gk_elements[0]
+
+ if dc_id not in rule_info["regions"]:
+ rule_info["regions"].append(dc_id)
+
+ rule_list.append(rule_info)
+
+ return rule_list
+
+ def _get_rule(self, _rule):
+ """Return rule info."""
+
+ rule_info = {}
+
+ rule_info["id"] = _rule.rule_id
+ rule_info["type"] = _rule.rule_type
+ rule_info["app_scope"] = _rule.app_scope
+ rule_info["level"] = _rule.level
+ rule_info["members"] = _rule.members
+ rule_info["description"] = _rule.desc
+ rule_info["status"] = _rule.status
+ rule_info["regions"] = []
+
+ return rule_info
+
+ def get_placements_under_rule(self, _rule_name, _resource):
+ """Get server placements info under given rule in datacenter."""
+
+ placements = {}
+
+ rule = self.group_rules[_rule_name]
+
+ for gk, g in _resource.groups.iteritems():
+ if g.factory == "valet":
+ if g.rule.rule_id == _rule_name:
+ placements[gk] = self._get_placements(g, _resource)
+
+ result = {}
+ result["id"] = rule.rule_id
+ result["type"] = rule.rule_type
+ result["app_scope"] = rule.app_scope
+ result["level"] = rule.level
+ result["members"] = rule.members
+ result["description"] = rule.desc
+ result["status"] = rule.status
+ result["placements"] = placements
+
+ return result
+
+ def _get_placements(self, _g, _resource):
+ """Get placement info of servers in group."""
+
+ placements = {}
+
+ for hk, server_list in _g.member_hosts.iteritems():
+ for s_info in server_list:
+ sid = s_info.get("stack_name") + ":" + s_info.get("name")
+ placements[sid] = {}
+ placements[sid]["region"] = _resource.datacenter_id
+
+ if hk in _resource.hosts.keys():
+ host = _resource.hosts[hk]
+
+ placements[sid]["host"] = host.name
+
+ hg = host.host_group
+ if isinstance(hg, HostGroup) and hg.host_type == "rack":
+ placements[sid]["rack"] = hg.name
+ else:
+ placements[sid]["rack"] = "na"
+
+ az = host.get_availability_zone()
+ az_name_elements = az.name.split(':', 1)
+ if len(az_name_elements) > 1:
+ az_name = az_name_elements[1]
+ else:
+ az_name = az.name
+ placements[sid]["availability-zone"] = az_name
+
+ elif hk in _resource.host_groups.keys():
+ hg = _resource.host_groups[hk]
+
+ if hg.host_type == "rack":
+ placements[sid]["rack"] = hg.name
+
+ for hhk, host in hg.child_resources.iteritems():
+ if host.has_server(s_info):
+ placements[sid]["host"] = host.name
+
+ az = host.get_availability_zone()
+ az_name_elements = az.name.split(':', 1)
+ if len(az_name_elements) > 1:
+ az_name = az_name_elements[1]
+ else:
+ az_name = az.name
+ placements[sid]["availability-zone"] = az_name
+
+ break
+ else:
+ # TODO(Gueyoung): Look for az, rack and host
+ placements[sid]["availability-zone"] = "na"
+ placements[sid]["rack"] = "na"
+ placements[sid]["host"] = "na"
+
+ else:
+ placements[sid]["availability-zone"] = "na"
+ placements[sid]["rack"] = "na"
+ placements[sid]["host"] = "na"
+
+ return placements
+
+ def load_resource(self, _datacenter):
+ """Create a resource for placement decisions
+
+ in a given target datacenter.
+ """
+
+ # Init first
+ del self.resource_list[:]
+
+ resource = Resource(_datacenter, self.dbh,
+ self.compute, self.metadata, self.topology,
+ self.logger)
+
+ resource.set_config(self.default_cpu_allocation_ratio,
+ self.default_ram_allocation_ratio,
+ self.default_disk_allocation_ratio)
+
+ resource.set_group_rules(self.group_rules)
+
+ status = resource.load_resource_from_db()
+ if status is None:
+ return False
+ elif status != "ok":
+ self.logger.warning(status)
+ resource.new = True
+
+ self.resource_list.append(resource)
+
+ return True
+
+ def load_resource_with_rule(self, _datacenter):
+ """Create and return a resource with valet group rule."""
+
+ # Init first
+ del self.resource_list[:]
+
+ resource = Resource(_datacenter, self.dbh,
+ self.compute, self.metadata, self.topology,
+ self.logger)
+
+ resource.set_config(self.default_cpu_allocation_ratio,
+ self.default_ram_allocation_ratio,
+ self.default_disk_allocation_ratio)
+
+ resource.set_group_rules(self.group_rules)
+
+ status = resource.load_resource_from_db()
+ if status is None:
+ return None
+ elif status != "ok":
+ return status
+
+ self.resource_list.append(resource)
+
+ return "ok"
diff --git a/engine/src/valet/engine/resource_manager/resources/__init__.py b/engine/src/valet/engine/resource_manager/resources/__init__.py
new file mode 100644
index 0000000..bd50995
--- /dev/null
+++ b/engine/src/valet/engine/resource_manager/resources/__init__.py
@@ -0,0 +1,18 @@
+#
+# -------------------------------------------------------------------------
+# Copyright (c) 2019 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
diff --git a/engine/src/valet/engine/resource_manager/resources/datacenter.py b/engine/src/valet/engine/resource_manager/resources/datacenter.py
new file mode 100644
index 0000000..6f03bae
--- /dev/null
+++ b/engine/src/valet/engine/resource_manager/resources/datacenter.py
@@ -0,0 +1,85 @@
+#
+# -------------------------------------------------------------------------
+# Copyright (c) 2019 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
+#!/bin/python
+
+
+class Datacenter(object):
+ """Container for datacenter resource."""
+
+ def __init__(self, _name):
+ self.name = _name
+
+ self.status = "enabled"
+
+ # Enabled group objects (e.g., aggregate)
+ self.memberships = {}
+
+ self.vCPUs = 0
+ self.avail_vCPUs = 0
+
+ self.mem_cap = 0 # MB
+ self.avail_mem_cap = 0
+
+ self.local_disk_cap = 0 # GB, ephemeral
+ self.avail_local_disk_cap = 0
+
+ # Enabled host_group (rack) or host objects
+ self.resources = {}
+
+ # A list of placed servers
+ self.server_list = []
+
+ self.updated = False
+
+ def is_available(self):
+ """Check if host is available."""
+
+ if self.status == "enabled":
+ return True
+ else:
+ return False
+
+ def init_resources(self):
+ self.vCPUs = 0
+ self.avail_vCPUs = 0
+ self.mem_cap = 0
+ self.avail_mem_cap = 0
+ self.local_disk_cap = 0
+ self.avail_local_disk_cap = 0
+
+ def get_json_info(self):
+ membership_list = []
+ for gk in self.memberships.keys():
+ membership_list.append(gk)
+
+ child_list = []
+ for ck in self.resources.keys():
+ child_list.append(ck)
+
+ return {'status': self.status,
+ 'name': self.name,
+ 'membership_list': membership_list,
+ 'vCPUs': self.vCPUs,
+ 'avail_vCPUs': self.avail_vCPUs,
+ 'mem': self.mem_cap,
+ 'avail_mem': self.avail_mem_cap,
+ 'local_disk': self.local_disk_cap,
+ 'avail_local_disk': self.avail_local_disk_cap,
+ 'children': child_list,
+ 'server_list': self.server_list}
diff --git a/engine/src/valet/engine/resource_manager/resources/flavor.py b/engine/src/valet/engine/resource_manager/resources/flavor.py
new file mode 100644
index 0000000..ef8b13e
--- /dev/null
+++ b/engine/src/valet/engine/resource_manager/resources/flavor.py
@@ -0,0 +1,67 @@
+#
+# -------------------------------------------------------------------------
+# Copyright (c) 2019 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
+import six
+
+
+class Flavor(object):
+ """Container for flavor resource."""
+
+ def __init__(self, _name):
+ self.name = _name
+
+ self.flavor_id = None
+
+ self.status = "enabled"
+
+ self.vCPUs = 0
+ self.mem_cap = 0 # MB
+ self.disk_cap = 0 # including ephemeral (GB) and swap (MB)
+
+ self.extra_specs = {}
+
+ self.updated = False
+
+ def set_info(self, _f):
+ """Copy detailed flavor information."""
+
+ self.status = _f.status
+
+ self.vCPUs = _f.vCPUs
+ self.mem_cap = _f.mem_cap
+ self.disk_cap = _f.disk_cap
+
+ for ek, ev in _f.extra_specs.iteritems():
+ self.extra_specs[ek] = ev
+
+ def need_numa_alignment(self):
+ """Check if this flavor requires NUMA alignment."""
+
+ for key, req in six.iteritems(self.extra_specs):
+ if key == "hw:numa_nodes" and int(req) == 1:
+ return True
+
+ return False
+
+ def get_json_info(self):
+ return {'status': self.status,
+ 'flavor_id': self.flavor_id,
+ 'vCPUs': self.vCPUs,
+ 'mem': self.mem_cap,
+ 'disk': self.disk_cap,
+ 'extra_specs': self.extra_specs}
diff --git a/engine/src/valet/engine/resource_manager/resources/group.py b/engine/src/valet/engine/resource_manager/resources/group.py
new file mode 100644
index 0000000..eef9771
--- /dev/null
+++ b/engine/src/valet/engine/resource_manager/resources/group.py
@@ -0,0 +1,401 @@
+#
+# -------------------------------------------------------------------------
+# Copyright (c) 2019 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
+class Group(object):
+ """Container for groups."""
+
+ def __init__(self, _name):
+ """Define logical group of compute hosts."""
+
+ self.name = _name
+
+ self.uuid = None
+
+ # Group includes
+ # - host-aggregate, availability-zone,
+ # - server groups: affinity, diversity, soft-affinity, soft-diversity,
+ # - affinity, diversity, quorum-diversity, exclusivity
+ self.group_type = None
+
+ self.level = None
+
+ # Where the group is originated
+ # - 'valet' or 'nova' or 'server-group' or other cloud platform
+ self.factory = None
+
+ self.status = "enabled"
+
+ # A list of host_names and their placed servers
+ # Value is a list of server infos.
+ self.member_hosts = {}
+
+ # For Host-Aggregate group
+ self.metadata = {}
+
+ # Group rule object for valet groups
+ self.rule = None
+
+ # A list of placed servers (e.g., VMs)
+ # Value is a list of server infos.
+ self.server_list = []
+
+ self.updated = False
+
+ self.new = False
+
+ def has_server(self, _s_info):
+ """Check if the server exists in this group."""
+
+ for s_info in self.server_list:
+ if _s_info["uuid"] != "none":
+ if s_info["uuid"] != "none" and \
+ s_info["uuid"] == _s_info["uuid"]:
+ return True
+
+ if _s_info["stack_id"] != "none":
+ if (s_info["stack_id"] != "none" and \
+ s_info["stack_id"] == _s_info["stack_id"]) and \
+ s_info["name"] == _s_info["name"]:
+ return True
+
+ if _s_info["stack_name"] != "none":
+ if (s_info["stack_name"] != "none" and \
+ s_info["stack_name"] == _s_info["stack_name"]) and \
+ s_info["name"] == _s_info["name"]:
+ return True
+
+ return False
+
+ def has_server_uuid(self, _uuid):
+ """Check if the server exists in this group with uuid."""
+
+ for s_info in self.server_list:
+ if s_info["uuid"] == _uuid:
+ return True
+
+ return False
+
+ def has_server_in_host(self, _host_name, _s_info):
+ """Check if the server exists in the host in this group."""
+
+ if _host_name in self.member_hosts.keys():
+ server_list = self.member_hosts[_host_name]
+
+ for s_info in server_list:
+ if _s_info["uuid"] != "none":
+ if s_info["uuid"] != "none" and \
+ s_info["uuid"] == _s_info["uuid"]:
+ return True
+
+ if _s_info["stack_id"] != "none":
+ if (s_info["stack_id"] != "none" and \
+ s_info["stack_id"] == _s_info["stack_id"]) and \
+ s_info["name"] == _s_info["name"]:
+ return True
+
+ if _s_info["stack_name"] != "none":
+ if (s_info["stack_name"] != "none" and \
+ s_info["stack_name"] == _s_info["stack_name"]) and \
+ s_info["name"] == _s_info["name"]:
+ return True
+
+ return False
+
+ def get_server_info(self, _s_info):
+ """Get server info."""
+
+ for s_info in self.server_list:
+ if _s_info["uuid"] != "none":
+ if s_info["uuid"] != "none" and \
+ s_info["uuid"] == _s_info["uuid"]:
+ return s_info
+
+ if _s_info["stack_id"] != "none":
+ if (s_info["stack_id"] != "none" and \
+ s_info["stack_id"] == _s_info["stack_id"]) and \
+ s_info["name"] == _s_info["name"]:
+ return s_info
+
+ if _s_info["stack_name"] != "none":
+ if (s_info["stack_name"] != "none" and \
+ s_info["stack_name"] == _s_info["stack_name"]) and \
+ s_info["name"] == _s_info["name"]:
+ return s_info
+
+ return None
+
+ def get_server_info_in_host(self, _host_name, _s_info):
+ """Get server info."""
+
+ if _host_name in self.member_hosts.keys():
+ server_list = self.member_hosts[_host_name]
+
+ for s_info in server_list:
+ if _s_info["uuid"] != "none":
+ if s_info["uuid"] != "none" and \
+ s_info["uuid"] == _s_info["uuid"]:
+ return s_info
+
+ if _s_info["stack_id"] != "none":
+ if (s_info["stack_id"] != "none" and \
+ s_info["stack_id"] == _s_info["stack_id"]) and \
+ s_info["name"] == _s_info["name"]:
+ return s_info
+
+ if _s_info["stack_name"] != "none":
+ if (s_info["stack_name"] != "none" and \
+ s_info["stack_name"] == _s_info["stack_name"]) and \
+ s_info["name"] == _s_info["name"]:
+ return s_info
+
+ return None
+
+ def add_server(self, _s_info, _host_name):
+ """Add server to this group."""
+
+ if self.has_server(_s_info):
+ return False
+
+ if self.has_server_in_host(_host_name, _s_info):
+ return False
+
+ self.server_list.append(_s_info)
+
+ if self.factory in ("valet", "server-group"):
+ if _host_name not in self.member_hosts.keys():
+ self.member_hosts[_host_name] = []
+
+ self.member_hosts[_host_name].append(_s_info)
+
+ return True
+
+ def remove_server(self, _s_info):
+ """Remove server from this group's server_list."""
+
+ for s_info in self.server_list:
+ if _s_info["uuid"] != "none":
+ if s_info["uuid"] != "none" and \
+ s_info["uuid"] == _s_info["uuid"]:
+ self.server_list.remove(s_info)
+ return True
+
+ if _s_info["stack_id"] != "none":
+ if (s_info["stack_id"] != "none" and \
+ s_info["stack_id"] == _s_info["stack_id"]) and \
+ s_info["name"] == _s_info["name"]:
+ self.server_list.remove(s_info)
+ return True
+
+ if _s_info["stack_name"] != "none":
+ if (s_info["stack_name"] != "none" and \
+ s_info["stack_name"] == _s_info["stack_name"]) and \
+ s_info["name"] == _s_info["name"]:
+ self.server_list.remove(s_info)
+ return True
+
+ return False
+
+ def remove_server_from_host(self, _host_name, _s_info):
+ """Remove server from the host of this group."""
+
+ if _host_name in self.member_hosts.keys():
+ for s_info in self.member_hosts[_host_name]:
+ if _s_info["uuid"] != "none":
+ if s_info["uuid"] != "none" and \
+ s_info["uuid"] == _s_info["uuid"]:
+ self.member_hosts[_host_name].remove(s_info)
+ return True
+
+ if _s_info["stack_id"] != "none":
+ if (s_info["stack_id"] != "none" and \
+ s_info["stack_id"] == _s_info["stack_id"]) and \
+ s_info["name"] == _s_info["name"]:
+ self.member_hosts[_host_name].remove(s_info)
+ return True
+
+ if _s_info["stack_name"] != "none":
+ if (s_info["stack_name"] != "none" and \
+ s_info["stack_name"] == _s_info["stack_name"]) and \
+ s_info["name"] == _s_info["name"]:
+ self.member_hosts[_host_name].remove(s_info)
+ return True
+
+ return False
+
+ def remove_member(self, _host_name):
+ """Remove the host from this group's memberships if it is empty.
+
+ To return the host to pool for other placements.
+ """
+
+ if self.factory in ("valet", "server-group"):
+ if _host_name in self.member_hosts.keys() and \
+ len(self.member_hosts[_host_name]) == 0:
+ del self.member_hosts[_host_name]
+
+ return True
+
+ return False
+
+ def clean_server(self, _uuid, _host_name):
+ """Clean the server that does not have enriched info."""
+
+ if _uuid == "none":
+ return
+
+ for s_info in self.server_list:
+ if s_info["uuid"] == _uuid and s_info["name"] == "none":
+ self.server_list.remove(s_info)
+ break
+
+ if _host_name in self.member_hosts.keys():
+ for s_info in self.member_hosts[_host_name]:
+ if s_info["uuid"] == _uuid and s_info["name"] == "none":
+ self.member_hosts[_host_name].remove(s_info)
+ break
+
+ if _host_name in self.member_hosts.keys() and \
+ len(self.member_hosts[_host_name]) == 0:
+ del self.member_hosts[_host_name]
+
+ def update_server(self, _s_info):
+ """Update server with info from given info.
+
+ The info comes from platform or request (e.g., Heat stack).
+ """
+
+ updated = False
+
+ s_info = self.get_server_info(_s_info)
+
+ if s_info is not None:
+ if _s_info["stack_id"] != "none" and \
+ _s_info["stack_id"] != s_info["stack_id"]:
+ s_info["stack_id"] = _s_info["stack_id"]
+ updated = True
+
+ if _s_info["uuid"] != "none" and \
+ _s_info["uuid"] != s_info["uuid"]:
+ s_info["uuid"] = _s_info["uuid"]
+ updated = True
+
+ if _s_info["flavor_id"] != "none" and \
+ _s_info["flavor_id"] != s_info["flavor_id"]:
+ s_info["flavor_id"] = _s_info["flavor_id"]
+ updated = True
+
+ if _s_info["vcpus"] != -1 and \
+ _s_info["vcpus"] != s_info["vcpus"]:
+ s_info["vcpus"] = _s_info["vcpus"]
+ updated = True
+
+ if _s_info["mem"] != -1 and \
+ _s_info["mem"] != s_info["mem"]:
+ s_info["mem"] = _s_info["mem"]
+ updated = True
+
+ if _s_info["disk"] != -1 and \
+ _s_info["disk"] != s_info["disk"]:
+ s_info["disk"] = _s_info["disk"]
+ updated = True
+
+ if _s_info["image_id"] != "none" and \
+ _s_info["image_id"] != s_info["image_id"]:
+ s_info["image_id"] = _s_info["image_id"]
+ updated = True
+
+ if _s_info["state"] != "none" and \
+ _s_info["state"] != s_info["state"]:
+ s_info["state"] = _s_info["state"]
+ updated = True
+
+ if _s_info["status"] != "none" and \
+ _s_info["status"] != s_info["status"]:
+ s_info["status"] = _s_info["status"]
+ updated = True
+
+ if _s_info["numa"] != "none" and \
+ _s_info["numa"] != s_info["numa"]:
+ s_info["numa"] = _s_info["numa"]
+ updated = True
+
+ return updated
+
+ def update_server_in_host(self, _host_name, _s_info):
+ """Updateserver in the host of this group."""
+
+ if _host_name in self.member_hosts.keys():
+ s_info = self.get_server_info_in_host(_host_name, _s_info)
+
+ if s_info is not None:
+ if _s_info["stack_id"] != "none" and \
+ _s_info["stack_id"] != s_info["stack_id"]:
+ s_info["stack_id"] = _s_info["stack_id"]
+
+ if _s_info["uuid"] != "none" and \
+ _s_info["uuid"] != s_info["uuid"]:
+ s_info["uuid"] = _s_info["uuid"]
+
+ if _s_info["flavor_id"] != "none" and \
+ _s_info["flavor_id"] != s_info["flavor_id"]:
+ s_info["flavor_id"] = _s_info["flavor_id"]
+
+ if _s_info["vcpus"] != -1 and \
+ _s_info["vcpus"] != s_info["vcpus"]:
+ s_info["vcpus"] = _s_info["vcpus"]
+
+ if _s_info["mem"] != -1 and \
+ _s_info["mem"] != s_info["mem"]:
+ s_info["mem"] = _s_info["mem"]
+
+ if _s_info["disk"] != -1 and \
+ _s_info["disk"] != s_info["disk"]:
+ s_info["disk"] = _s_info["disk"]
+
+ if _s_info["image_id"] != "none" and \
+ _s_info["image_id"] != s_info["image_id"]:
+ s_info["image_id"] = _s_info["image_id"]
+
+ if _s_info["state"] != "none" and \
+ _s_info["state"] != s_info["state"]:
+ s_info["state"] = _s_info["state"]
+
+ if _s_info["status"] != "none" and \
+ _s_info["status"] != s_info["status"]:
+ s_info["status"] = _s_info["status"]
+
+ if _s_info["numa"] != "none" and \
+ _s_info["numa"] != s_info["numa"]:
+ s_info["numa"] = _s_info["numa"]
+
+ def get_json_info(self):
+ """Get group info as JSON format."""
+
+ rule_id = "none"
+ if self.rule is not None:
+ rule_id = self.rule.rule_id
+
+ return {'status': self.status,
+ 'uuid': self.uuid,
+ 'group_type': self.group_type,
+ 'level': self.level,
+ 'factory': self.factory,
+ 'rule_id': rule_id,
+ 'metadata': self.metadata,
+ 'server_list': self.server_list,
+ 'member_hosts': self.member_hosts}
diff --git a/engine/src/valet/engine/resource_manager/resources/group_rule.py b/engine/src/valet/engine/resource_manager/resources/group_rule.py
new file mode 100644
index 0000000..d43dc12
--- /dev/null
+++ b/engine/src/valet/engine/resource_manager/resources/group_rule.py
@@ -0,0 +1,52 @@
+#
+# -------------------------------------------------------------------------
+# Copyright (c) 2019 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
+#!/bin/python
+
+
+class GroupRule(object):
+ """Container for valet group rule."""
+
+ def __init__(self, _id):
+ self.rule_id = _id
+
+ self.status = "enabled"
+
+ self.app_scope = "lcp"
+ self.rule_type = "affinity"
+ self.level = "host"
+
+ self.members = [] # a lit of tenent ids who can use this rule
+
+ self.desc = None
+
+ # self.groups = [] # a list of group ids generated under this rule
+
+ self.updated = False
+
+ def get_json_info(self):
+ """Get group info as JSON format."""
+
+ return {'status': self.status,
+ 'app_scope': self.app_scope,
+ 'rule_type': self.rule_type,
+ 'level': self.level,
+ 'members': self.members,
+ 'desc': self.desc
+ # 'groups': self.groups
+ }
diff --git a/engine/src/valet/engine/resource_manager/resources/host.py b/engine/src/valet/engine/resource_manager/resources/host.py
new file mode 100644
index 0000000..a4d92ba
--- /dev/null
+++ b/engine/src/valet/engine/resource_manager/resources/host.py
@@ -0,0 +1,428 @@
+#
+# -------------------------------------------------------------------------
+# Copyright (c) 2019 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
+from valet.engine.resource_manager.resources.numa import NUMA
+
+
+class Host(object):
+ """Container for compute host."""
+
+ def __init__(self, _name):
+ """Define compute host."""
+
+ self.name = _name
+
+ self.uuid = None
+
+ self.status = "enabled"
+ self.state = "up"
+
+ # Enabled group objects (e.g., aggregate) this hosting server is in
+ self.memberships = {}
+
+ self.vCPUs = 0
+ self.original_vCPUs = 0
+ self.avail_vCPUs = 0
+
+ self.mem_cap = 0 # MB
+ self.original_mem_cap = 0
+ self.avail_mem_cap = 0
+
+ self.local_disk_cap = 0 # GB, ephemeral
+ self.original_local_disk_cap = 0
+ self.avail_local_disk_cap = 0
+
+ self.vCPUs_used = 0
+ self.free_mem_mb = 0
+ self.free_disk_gb = 0
+ self.disk_available_least = 0
+
+ # To track available cores and memory per NUMA cell
+ self.NUMA = NUMA()
+
+ self.host_group = None # host_group object (e.g., rack)
+
+ # Kepp a list of placed servers' information
+ # Here, server_info including {uuid, orch_id, name,
+ # stack_id, stack_name,
+ # flavor_id, image_id, tenent_id,
+ # vcpus, mem, disk, numa,
+ # state, status}
+ self.server_list = []
+
+ # If this host is not defined yet (unknown host).
+ self.candidate_host_types = {}
+
+ self.updated = False
+
+ def is_available(self):
+ """Check if host is available."""
+
+ if self.status == "enabled" and self.state == "up":
+ return True
+ else:
+ return False
+
+ def has_server(self, _s_info):
+ """Check if server is located in this host."""
+
+ for s_info in self.server_list:
+ if _s_info["uuid"] != "none":
+ if s_info["uuid"] != "none" and \
+ s_info["uuid"] == _s_info["uuid"]:
+ return True
+
+ if _s_info["stack_id"] != "none":
+ if (s_info["stack_id"] != "none" and \
+ s_info["stack_id"] == _s_info["stack_id"]) and \
+ s_info["name"] == _s_info["name"]:
+ return True
+
+ if _s_info["stack_name"] != "none":
+ if (s_info["stack_name"] != "none" and \
+ s_info["stack_name"] == _s_info["stack_name"]) and \
+ s_info["name"] == _s_info["name"]:
+ return True
+
+ return False
+
+ def get_server_info(self, _s_info):
+ """Get server info."""
+
+ for s_info in self.server_list:
+ if _s_info["uuid"] != "none":
+ if s_info["uuid"] != "none" and \
+ s_info["uuid"] == _s_info["uuid"]:
+ return s_info
+
+ if _s_info["stack_id"] != "none":
+ if (s_info["stack_id"] != "none" and \
+ s_info["stack_id"] == _s_info["stack_id"]) and \
+ s_info["name"] == _s_info["name"]:
+ return s_info
+
+ if _s_info["stack_name"] != "none":
+ if (s_info["stack_name"] != "none" and \
+ s_info["stack_name"] == _s_info["stack_name"]) and \
+ s_info["name"] == _s_info["name"]:
+ return s_info
+
+ return None
+
+ def add_server(self, _s_info):
+ """Add new server to this host."""
+
+ self.server_list.append(_s_info)
+
+ def remove_server(self, _s_info):
+ """Remove server from this host."""
+
+ for s_info in self.server_list:
+ if _s_info["uuid"] != "none":
+ if s_info["uuid"] != "none" and \
+ s_info["uuid"] == _s_info["uuid"]:
+ self.server_list.remove(s_info)
+ return True
+
+ if _s_info["stack_id"] != "none":
+ if (s_info["stack_id"] != "none" and \
+ s_info["stack_id"] == _s_info["stack_id"]) and \
+ s_info["name"] == _s_info["name"]:
+ self.server_list.remove(s_info)
+ return True
+
+ if _s_info["stack_name"] != "none":
+ if (s_info["stack_name"] != "none" and \
+ s_info["stack_name"] == _s_info["stack_name"]) and \
+ s_info["name"] == _s_info["name"]:
+ self.server_list.remove(s_info)
+ return True
+
+ return False
+
+ def update_server(self, _s_info):
+ """Update server with info from given info.
+
+ The info comes from platform or request (e.g., Heat stack).
+ """
+
+ updated = None
+
+ s_info = self.get_server_info(_s_info)
+
+ if s_info is not None:
+ if _s_info["stack_id"] != "none" and \
+ _s_info["stack_id"] != s_info["stack_id"]:
+ s_info["stack_id"] = _s_info["stack_id"]
+ updated = s_info
+
+ if _s_info["uuid"] != "none" and \
+ _s_info["uuid"] != s_info["uuid"]:
+ s_info["uuid"] = _s_info["uuid"]
+ updated = s_info
+
+ if _s_info["flavor_id"] != "none" and \
+ _s_info["flavor_id"] != s_info["flavor_id"]:
+ s_info["flavor_id"] = _s_info["flavor_id"]
+ updated = s_info
+
+ if _s_info["vcpus"] != -1 and \
+ _s_info["vcpus"] != s_info["vcpus"]:
+ s_info["vcpus"] = _s_info["vcpus"]
+ updated = s_info
+
+ if _s_info["mem"] != -1 and \
+ _s_info["mem"] != s_info["mem"]:
+ s_info["mem"] = _s_info["mem"]
+ updated = s_info
+
+ if _s_info["disk"] != -1 and \
+ _s_info["disk"] != s_info["disk"]:
+ s_info["disk"] = _s_info["disk"]
+ updated = s_info
+
+ if _s_info["image_id"] != "none" and \
+ _s_info["image_id"] != s_info["image_id"]:
+ s_info["image_id"] = _s_info["image_id"]
+ updated = s_info
+
+ if _s_info["state"] != "none" and \
+ _s_info["state"] != s_info["state"]:
+ s_info["state"] = _s_info["state"]
+ updated = s_info
+
+ if _s_info["status"] != "none" and \
+ _s_info["status"] != s_info["status"]:
+ s_info["status"] = _s_info["status"]
+ updated = s_info
+
+ if _s_info["numa"] != "none" and \
+ _s_info["numa"] != s_info["numa"]:
+ s_info["numa"] = _s_info["numa"]
+ updated = s_info
+
+ if updated is not None:
+ cell = self.NUMA.pop_cell_of_server(updated)
+
+ if updated["numa"] == "none":
+ if cell != "none":
+ updated["numa"] = cell
+
+ self.NUMA.add_server(updated)
+
+ return updated
+
+ def remove_membership(self, _g):
+ """Remove a membership.
+
+ To return to the resource pool for other placements.
+ """
+
+ if _g.factory in ("valet", "server-group"):
+ if self.name not in _g.member_hosts.keys():
+ del self.memberships[_g.name]
+
+ return True
+
+ return False
+
+ def compute_cpus(self, _overcommit_ratio):
+ """Compute and init oversubscribed CPUs."""
+
+ if self.vCPUs == 0:
+ # New host case
+
+ self.vCPUs = self.original_vCPUs * _overcommit_ratio
+ self.avail_vCPUs = self.vCPUs
+ self.NUMA.init_cpus(self.vCPUs)
+ else:
+ vcpus = self.original_vCPUs * _overcommit_ratio
+
+ if vcpus != self.vCPUs:
+ # Change of overcommit_ratio
+
+ self.NUMA.adjust_cpus(self.vCPUs, vcpus)
+
+ used = self.vCPUs - self.avail_vCPUs
+
+ self.vCPUs = vcpus
+ self.avail_vCPUs = self.vCPUs - used
+
+ def compute_avail_cpus(self):
+ """Compute available CPUs after placements."""
+
+ avail_vcpus = self.vCPUs - self.vCPUs_used
+
+ if avail_vcpus != self.avail_vCPUs:
+ # Incurred due to unknown server placement.
+
+ diff = self.avail_vCPUs - avail_vcpus
+ self.NUMA.apply_unknown_cpus(diff)
+
+ self.avail_vCPUs = avail_vcpus
+
+ return "avail cpus changed (" + str(diff) + ") in " + self.name
+
+ return "ok"
+
+ def compute_mem(self, _overcommit_ratio):
+ """Compute and init oversubscribed mem capacity."""
+
+ if self.mem_cap == 0:
+ # New host case
+
+ self.mem_cap = self.original_mem_cap * _overcommit_ratio
+
+ self.avail_mem_cap = self.mem_cap
+
+ self.NUMA.init_mem(self.mem_cap)
+ else:
+ mem_cap = self.original_mem_cap * _overcommit_ratio
+
+ if mem_cap != self.mem_cap:
+ # Change of overcommit_ratio
+
+ self.NUMA.adjust_mem(self.mem_cap, mem_cap)
+
+ used = self.mem_cap - self.avail_mem_cap
+
+ self.mem_cap = mem_cap
+ self.avail_mem_cap = self.mem_cap - used
+
+ def compute_avail_mem(self):
+ """Compute available mem capacity after placements."""
+
+ used_mem_mb = self.original_mem_cap - self.free_mem_mb
+
+ avail_mem_cap = self.mem_cap - used_mem_mb
+
+ if avail_mem_cap != self.avail_mem_cap:
+ # Incurred due to unknown server placement.
+
+ diff = self.avail_mem_cap - avail_mem_cap
+ self.NUMA.apply_unknown_mem(diff)
+
+ self.avail_mem_cap = avail_mem_cap
+
+ return "avail mem changed(" + str(diff) + ") in " + self.name
+
+ return "ok"
+
+ def compute_disk(self, _overcommit_ratio):
+ """Compute and init oversubscribed disk capacity."""
+
+ if self.local_disk_cap == 0:
+ # New host case
+
+ self.local_disk_cap = self.original_local_disk_cap * _overcommit_ratio
+
+ self.avail_local_disk_cap = self.local_disk_cap
+ else:
+ local_disk_cap = self.original_local_disk_cap * _overcommit_ratio
+
+ if local_disk_cap != self.local_disk_cap:
+ # Change of overcommit_ratio
+
+ used = self.local_disk_cap - self.avail_local_disk_cap
+
+ self.local_disk_cap = local_disk_cap
+ self.avail_local_disk_cap = self.local_disk_cap - used
+
+ def compute_avail_disk(self):
+ """Compute available disk capacity after placements."""
+
+ free_disk_cap = self.free_disk_gb
+ if self.disk_available_least > 0:
+ free_disk_cap = min(self.free_disk_gb, self.disk_available_least)
+
+ used_disk_cap = self.original_local_disk_cap - free_disk_cap
+
+ avail_local_disk_cap = self.local_disk_cap - used_disk_cap
+
+ if avail_local_disk_cap != self.avail_local_disk_cap:
+ diff = self.avail_local_disk_cap - avail_local_disk_cap
+
+ self.avail_local_disk_cap = avail_local_disk_cap
+
+ return "avail disk changed(" + str(diff) + ") in " + self.name
+
+ return "ok"
+
+ def deduct_avail_resources(self, _s_info):
+ """Deduct available amount of resources of this host."""
+
+ if _s_info.get("vcpus") != -1:
+ self.avail_vCPUs -= _s_info.get("vcpus")
+ self.avail_mem_cap -= _s_info.get("mem")
+ self.avail_local_disk_cap -= _s_info.get("disk")
+
+ def rollback_avail_resources(self, _s_info):
+ """Rollback available amount of resources of this host."""
+
+ if _s_info.get("vcpus") != -1:
+ self.avail_vCPUs += _s_info.get("vcpus")
+ self.avail_mem_cap += _s_info.get("mem")
+ self.avail_local_disk_cap += _s_info.get("disk")
+
+ def get_availability_zone(self):
+ """Get the availability-zone of this host."""
+
+ for gk, g in self.memberships.iteritems():
+ if g.group_type == "az":
+ return g
+
+ return None
+
+ def get_aggregates(self):
+ """Get the list of Host-Aggregates of this host."""
+
+ aggregates = []
+
+ for gk, g in self.memberships.iteritems():
+ if g.group_type == "aggr":
+ aggregates.append(g)
+
+ return aggregates
+
+ def get_json_info(self):
+ """Get compute host info as JSON format"""
+
+ membership_list = []
+ for gk in self.memberships.keys():
+ membership_list.append(gk)
+
+ return {'status': self.status, 'state': self.state,
+ 'uuid': self.uuid,
+ 'membership_list': membership_list,
+ 'vCPUs': self.vCPUs,
+ 'original_vCPUs': self.original_vCPUs,
+ 'avail_vCPUs': self.avail_vCPUs,
+ 'mem': self.mem_cap,
+ 'original_mem': self.original_mem_cap,
+ 'avail_mem': self.avail_mem_cap,
+ 'local_disk': self.local_disk_cap,
+ 'original_local_disk': self.original_local_disk_cap,
+ 'avail_local_disk': self.avail_local_disk_cap,
+ 'vCPUs_used': self.vCPUs_used,
+ 'free_mem_mb': self.free_mem_mb,
+ 'free_disk_gb': self.free_disk_gb,
+ 'disk_available_least': self.disk_available_least,
+ 'NUMA': self.NUMA.get_json_info(),
+ 'parent': self.host_group.name,
+ 'server_list': self.server_list,
+ 'candidate_host_types': self.candidate_host_types}
diff --git a/engine/src/valet/engine/resource_manager/resources/host_group.py b/engine/src/valet/engine/resource_manager/resources/host_group.py
new file mode 100644
index 0000000..ebf9a15
--- /dev/null
+++ b/engine/src/valet/engine/resource_manager/resources/host_group.py
@@ -0,0 +1,108 @@
+#
+# -------------------------------------------------------------------------
+# Copyright (c) 2019 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
+from valet.engine.app_manager.group import LEVEL
+
+
+class HostGroup(object):
+ """Container for host group (rack)."""
+
+ def __init__(self, _id):
+ self.name = _id
+
+ self.status = "enabled"
+ self.host_group = None
+
+ # 'rack' or 'cluster' (e.g., power domain, zone)
+ self.host_type = "rack"
+
+ self.parent_resource = None # e.g., datacenter object
+ self.child_resources = {} # e.g., hosting server objects
+
+ # Enabled group objects (e.g., aggregate) in this group
+ self.memberships = {}
+
+ self.vCPUs = 0
+ self.avail_vCPUs = 0
+
+ self.mem_cap = 0 # MB
+ self.avail_mem_cap = 0
+
+ self.local_disk_cap = 0 # GB, ephemeral
+ self.avail_local_disk_cap = 0
+
+ # A list of placed servers' info
+ self.server_list = []
+
+ self.updated = False
+
+ def is_available(self):
+ if self.status == "enabled":
+ return True
+ else:
+ return False
+
+ def init_resources(self):
+ self.vCPUs = 0
+ self.avail_vCPUs = 0
+ self.mem_cap = 0 # MB
+ self.avail_mem_cap = 0
+ self.local_disk_cap = 0 # GB, ephemeral
+ self.avail_local_disk_cap = 0
+
+ def init_memberships(self):
+ for gk in self.memberships.keys():
+ g = self.memberships[gk]
+
+ if g.factory == "valet":
+ if LEVEL.index(g.level) < LEVEL.index(self.host_type):
+ del self.memberships[gk]
+ else:
+ del self.memberships[gk]
+
+ def remove_membership(self, _g):
+ """Remove a membership. """
+
+ if _g.factory == "valet":
+ if self.name not in _g.member_hosts.keys():
+ del self.memberships[_g.name]
+ return True
+
+ return False
+
+ def get_json_info(self):
+ membership_list = []
+ for gk in self.memberships.keys():
+ membership_list.append(gk)
+
+ child_list = []
+ for ck in self.child_resources.keys():
+ child_list.append(ck)
+
+ return {'status': self.status,
+ 'host_type': self.host_type,
+ 'membership_list': membership_list,
+ 'vCPUs': self.vCPUs,
+ 'avail_vCPUs': self.avail_vCPUs,
+ 'mem': self.mem_cap,
+ 'avail_mem': self.avail_mem_cap,
+ 'local_disk': self.local_disk_cap,
+ 'avail_local_disk': self.avail_local_disk_cap,
+ 'parent': self.parent_resource.name,
+ 'children': child_list,
+ 'server_list': self.server_list}
diff --git a/engine/src/valet/engine/resource_manager/resources/numa.py b/engine/src/valet/engine/resource_manager/resources/numa.py
new file mode 100644
index 0000000..c6c9542
--- /dev/null
+++ b/engine/src/valet/engine/resource_manager/resources/numa.py
@@ -0,0 +1,264 @@
+#
+# -------------------------------------------------------------------------
+# Copyright (c) 2019 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
+class NUMA(object):
+ """Container for NUMA cells."""
+
+ def __init__(self, numa=None):
+ """Init NUMA cells.
+
+ Assume 2 NUMA cells of each compute host
+ """
+
+ self.cell_0 = {}
+
+ # Available resources
+ self.cell_0["cpus"] = 0
+ self.cell_0["mem"] = 0
+
+ # A list of server infos
+ self.cell_0["server_list"] = []
+
+ self.cell_1 = {}
+
+ # Available resources
+ self.cell_1["cpus"] = 0
+ self.cell_1["mem"] = 0
+
+ # A list of server infos
+ self.cell_1["server_list"] = []
+
+ if numa is not None:
+ self.cell_0["cpus"] = numa["cell_0"]["cpus"]
+ self.cell_0["mem"] = numa["cell_0"]["mem"]
+ self.cell_0["server_list"] = numa["cell_0"]["server_list"]
+
+ self.cell_1["cpus"] = numa["cell_1"]["cpus"]
+ self.cell_1["mem"] = numa["cell_1"]["mem"]
+ self.cell_1["server_list"] = numa["cell_1"]["server_list"]
+
+ def init_cpus(self, _cpus):
+ """Apply CPU capacity faily across NUMA cells.
+
+ Caused by new compute host.
+ """
+
+ div = int(float(_cpus) / 2.0)
+
+ self.cell_0["cpus"] = div
+ self.cell_1["cpus"] = (_cpus - div)
+
+ def init_mem(self, _mem):
+ """Apply mem capacity faily across NUMA cells.
+
+ Caused by new compute host.
+ """
+
+ div = int(float(_mem) / 2.0)
+
+ self.cell_0["mem"] = div
+ self.cell_1["mem"] = (_mem - div)
+
+ def adjust_cpus(self, _old_cpus, _new_cpus):
+ """Adjust CPU capacity across NUMA cells.
+
+ Caused by change in compute host.
+ """
+
+ div = int(float(_old_cpus) / 2.0)
+
+ old_cpus_0 = div
+ old_cpus_1 = (_old_cpus - div)
+
+ used_0 = old_cpus_0 - self.cell_0["cpus"]
+ used_1 = old_cpus_1 - self.cell_1["cpus"]
+
+ div = int(float(_new_cpus) / 2.0)
+
+ self.cell_0["cpus"] = div - used_0
+ self.cell_1["cpus"] = _new_cpus - div - used_1
+
+ def adjust_mem(self, _old_mem, _new_mem):
+ """Adjust mem capacity across NUMA cells.
+
+ Caused by change in compute host.
+ """
+
+ div = int(float(_old_mem) / 2.0)
+
+ old_mem_0 = div
+ old_mem_1 = (_old_mem - div)
+
+ used_0 = old_mem_0 - self.cell_0["mem"]
+ used_1 = old_mem_1 - self.cell_1["mem"]
+
+ div = int(float(_new_mem) / 2.0)
+
+ self.cell_0["mem"] = div - used_0
+ self.cell_1["mem"] = _new_mem - div - used_1
+
+ def has_enough_resources(self, _vcpus, _mem):
+ """Check if any cell has enough resources."""
+
+ if _vcpus <= self.cell_0["cpus"] and _mem <= self.cell_0["mem"]:
+ return True
+
+ if _vcpus <= self.cell_1["cpus"] and _mem <= self.cell_1["mem"]:
+ return True
+
+ return False
+
+ def pop_cell_of_server(self, _s_info):
+ """Get which cell server is placed."""
+
+ cell = None
+
+ for s_info in self.cell_0["server_list"]:
+ if _s_info["uuid"] != "none":
+ if s_info["uuid"] != "none" and \
+ s_info["uuid"] == _s_info["uuid"]:
+ cell = "cell_0"
+ self.cell_0["server_list"].remove(s_info)
+ break
+
+ if _s_info["stack_id"] != "none":
+ if (s_info["stack_id"] != "none" and \
+ s_info["stack_id"] == _s_info["stack_id"]) and \
+ s_info["name"] == _s_info["name"]:
+ cell = "cell_0"
+ self.cell_0["server_list"].remove(s_info)
+ break
+
+ if _s_info["stack_name"] != "none":
+ if (s_info["stack_name"] != "none" and \
+ s_info["stack_name"] == _s_info["stack_name"]) and \
+ s_info["name"] == _s_info["name"]:
+ cell = "cell_0"
+ self.cell_0["server_list"].remove(s_info)
+ break
+
+ if cell is None:
+ for s_info in self.cell_1["server_list"]:
+ if _s_info["uuid"] != "none":
+ if s_info["uuid"] != "none" and \
+ s_info["uuid"] == _s_info["uuid"]:
+ cell = "cell_1"
+ self.cell_1["server_list"].remove(s_info)
+ break
+
+ if _s_info["stack_id"] != "none":
+ if (s_info["stack_id"] != "none" and \
+ s_info["stack_id"] == _s_info["stack_id"]) and \
+ s_info["name"] == _s_info["name"]:
+ cell = "cell_1"
+ self.cell_1["server_list"].remove(s_info)
+ break
+
+ if _s_info["stack_name"] != "none":
+ if (s_info["stack_name"] != "none" and \
+ s_info["stack_name"] == _s_info["stack_name"]) and \
+ s_info["name"] == _s_info["name"]:
+ cell = "cell_1"
+ self.cell_1["server_list"].remove(s_info)
+ break
+
+ if cell is None:
+ return "none"
+ else:
+ return cell
+
+ def deduct_server_resources(self, _s_info):
+ """Reduce the available resources in a cell by adding a server."""
+
+ self.pop_cell_of_server(_s_info)
+
+ if self.cell_0["cpus"] > self.cell_1["cpus"]:
+ self.cell_0["cpus"] -= _s_info.get("vcpus")
+ self.cell_0["mem"] -= _s_info.get("mem")
+ self.cell_0["server_list"].append(_s_info)
+ return "cell_0"
+ else:
+ self.cell_1["cpus"] -= _s_info.get("vcpus")
+ self.cell_1["mem"] -= _s_info.get("mem")
+ self.cell_1["server_list"].append(_s_info)
+ return "cell_1"
+
+ def rollback_server_resources(self, _s_info):
+ """Rollback the server placement in cell by removing server."""
+
+ cell = self.pop_cell_of_server(_s_info)
+
+ if cell == "cell_0":
+ self.cell_0["cpus"] += _s_info.get("vcpus")
+ self.cell_0["mem"] += _s_info.get("mem")
+ elif cell == "cell_1":
+ self.cell_1["cpus"] += _s_info.get("vcpus")
+ self.cell_1["mem"] += _s_info.get("mem")
+
+ # TODO: need to non-NUMA server?
+ # else:
+ # self.apply_cpus_fairly(-1.0*_cpus)
+ # self.apply_mem_fairly(-1.0*_mem)
+
+ def add_server(self, _s_info):
+ """Add the server info into the cell."""
+
+ if _s_info["numa"] == "cell_0":
+ self.cell_0["server_list"].append(_s_info)
+ elif _s_info["numa"] == "cell_1":
+ self.cell_1["server_list"].append(_s_info)
+
+ def apply_unknown_cpus(self, _diff):
+ """Apply unknown cpus fairly across cells."""
+
+ if _diff > 0:
+ # Deduct
+
+ div = int(float(_diff) / 2.0)
+ self.cell_0["cpus"] -= div
+ self.cell_1["cpus"] -= (_diff - div)
+ elif _diff < 0:
+ # Rollback
+ _diff *= -1
+
+ div = int(float(_diff) / 2.0)
+ self.cell_0["cpus"] += div
+ self.cell_1["cpus"] += (_diff - div)
+
+ def apply_unknown_mem(self, _diff):
+ """Apply unknown mem capacity fairly across cells."""
+
+ if _diff > 0:
+ # Deduct
+
+ div = int(float(_diff) / 2.0)
+ self.cell_0["mem"] -= div
+ self.cell_1["mem"] -= (_diff - div)
+ elif _diff < 0:
+ # Rollback
+ _diff *= -1
+
+ div = int(float(_diff) / 2.0)
+ self.cell_0["mem"] += div
+ self.cell_1["mem"] += (_diff - div)
+
+ def get_json_info(self):
+ """Get NUMA info as JSON format"""
+
+ return {'cell_0': self.cell_0,
+ 'cell_1': self.cell_1}
diff --git a/engine/src/valet/engine/resource_manager/topology_manager.py b/engine/src/valet/engine/resource_manager/topology_manager.py
new file mode 100644
index 0000000..f8422d3
--- /dev/null
+++ b/engine/src/valet/engine/resource_manager/topology_manager.py
@@ -0,0 +1,237 @@
+#
+# -------------------------------------------------------------------------
+# Copyright (c) 2019 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
+#!/bin/python
+
+
+from valet.engine.resource_manager.resources.datacenter import Datacenter
+from valet.engine.resource_manager.resources.host_group import HostGroup
+
+
+class TopologyManager(object):
+ """Manager to maintain the layout of datacenter."""
+
+ def __init__(self, _source, _logger):
+ self.source = _source
+
+ self.datacenter = None
+ self.host_groups = {}
+ self.hosts = {}
+
+ self.logger = _logger
+
+ def get_topology(self, _resource):
+ """Set datacenter layout into resource."""
+
+ self.logger.info("set datacenter layout...")
+
+ # Init first
+ self.datacenter = Datacenter(_resource.datacenter_id)
+ self.host_groups.clear()
+ self.hosts.clear()
+
+ if self.source.get_topology(self.datacenter, self.host_groups, self.hosts,
+ _resource.hosts) != "ok":
+ return False
+
+ self._check_updated(_resource)
+
+ return True
+
+ def _check_updated(self, _resource):
+ """Check if the layout is changed."""
+
+ if _resource.datacenter is None:
+ _resource.datacenter = Datacenter(_resource.datacenter_id)
+ _resource.datacenter.updated = True
+
+ self.logger.info("new datacenter (" + _resource.datacenter_id + ") added")
+
+ for hgk in self.host_groups.keys():
+ if hgk not in _resource.host_groups.keys():
+ new_host_group = HostGroup(hgk)
+ new_host_group.host_type = self.host_groups[hgk].host_type
+
+ _resource.host_groups[new_host_group.name] = new_host_group
+ _resource.mark_host_group_updated(hgk)
+
+ self.logger.info("new host_group (" + hgk + ") added")
+
+ for rhgk in _resource.host_groups.keys():
+ if rhgk not in self.host_groups.keys():
+ host_group = _resource.host_groups[rhgk]
+ host_group.status = "disabled"
+ host_group.mark_host_group_updated(rhgk)
+
+ self.logger.info("host_group (" + rhgk + ") disabled")
+
+ # TODO(Gueyoung): what if host exists in topology,
+ # but does not in resource (DB or platform)?
+
+ for rhk in _resource.hosts.keys():
+ if not _resource.hosts[rhk].is_available():
+ continue
+
+ if rhk not in self.hosts.keys():
+ _resource.hosts[rhk].status = "disabled"
+ _resource.mark_host_updated(rhk)
+
+ self.logger.info("host (" + rhk + ") removed from topology")
+
+ if self._is_datacenter_updated(_resource):
+ _resource.datacenter.updated = True
+
+ for hgk in self.host_groups.keys():
+ hg = self.host_groups[hgk]
+
+ if self._is_host_group_updated(hg, _resource):
+ _resource.mark_host_group_updated(hgk)
+
+ for hk in self.hosts.keys():
+ if hk in _resource.hosts.keys():
+ if not _resource.hosts[hk].is_available():
+ continue
+
+ host = self.hosts[hk]
+
+ if self._is_host_updated(host, _resource):
+ _resource.mark_host_updated(hk)
+
+ # TODO(Gueyoung): Hierachical failure propagation
+
+ def _is_datacenter_updated(self, _resource):
+ """Check if datacenter's resources are changed."""
+
+ updated = False
+
+ _rdatacenter = _resource.datacenter
+
+ for rk in self.datacenter.resources.keys():
+
+ h = None
+ if rk in _resource.host_groups.keys():
+ h = _resource.host_groups[rk]
+ elif rk in _resource.hosts.keys():
+ h = _resource.hosts[rk]
+
+ if h is not None and h.is_available():
+ if rk not in _rdatacenter.resources.keys() or h.updated:
+ _rdatacenter.resources[rk] = h
+ updated = True
+
+ self.logger.info("datacenter updated (new resource)")
+
+ for rk in _rdatacenter.resources.keys():
+
+ h = None
+ if rk in _resource.host_groups.keys():
+ h = _resource.host_groups[rk]
+ elif rk in _resource.hosts.keys():
+ h = _resource.hosts[rk]
+
+ if h is None or \
+ not h.is_available() or \
+ rk not in self.datacenter.resources.keys():
+ del _rdatacenter.resources[rk]
+ updated = True
+
+ self.logger.info("datacenter updated (resource removed)")
+
+ return updated
+
+ def _is_host_group_updated(self, _hg, _resource):
+ """Check if host_group's parent or children are changed."""
+
+ updated = False
+
+ _rhg = _resource.host_groups[_hg.name]
+
+ if _hg.host_type != _rhg.host_type:
+ _rhg.host_type = _hg.host_type
+ updated = True
+ self.logger.info("host_group (" + _rhg.name + ") updated (hosting type)")
+
+ if _rhg.parent_resource is None or \
+ _rhg.parent_resource.name != _hg.parent_resource.name:
+ if _hg.parent_resource.name in _resource.host_groups.keys():
+ hg = _resource.host_groups[_hg.parent_resource.name]
+ if hg.is_available():
+ _rhg.parent_resource = hg
+ updated = True
+ elif _hg.parent_resource.name == _resource.datacenter.name:
+ _rhg.parent_resource = _resource.datacenter
+ updated = True
+
+ if updated:
+ self.logger.info("host_group (" + _rhg.name + ") updated (parent host_group)")
+
+ for rk in _hg.child_resources.keys():
+
+ h = None
+ if rk in _resource.hosts.keys():
+ h = _resource.hosts[rk]
+ elif rk in _resource.host_groups.keys():
+ h = _resource.host_groups[rk]
+
+ if h is not None and h.is_available():
+ if rk not in _rhg.child_resources.keys() or h.updated:
+ _rhg.child_resources[rk] = h
+ updated = True
+
+ self.logger.info("host_group (" + _rhg.name + ") updated (new child host)")
+
+ for rk in _rhg.child_resources.keys():
+
+ h = None
+ if rk in _resource.hosts.keys():
+ h = _resource.hosts[rk]
+ elif rk in _resource.host_groups.keys():
+ h = _resource.host_groups[rk]
+
+ if h is None or \
+ not h.is_available() or \
+ rk not in _hg.child_resources.keys():
+ del _rhg.child_resources[rk]
+ updated = True
+
+ self.logger.info("host_group (" + _rhg.name + ") updated (child host removed)")
+
+ return updated
+
+ def _is_host_updated(self, _host, _resource):
+ """Check if host's parent (e.g., rack) is changed."""
+
+ updated = False
+
+ _rhost = _resource.hosts[_host.name]
+
+ if _rhost.host_group is None or \
+ _rhost.host_group.name != _host.host_group.name:
+ if _host.host_group.name in _resource.host_groups.keys():
+ rhost_group = _resource.host_groups[_host.host_group.name]
+ if rhost_group.is_available():
+ _rhost.host_group = rhost_group
+ updated = True
+ elif _host.host_group.name == _resource.datacenter.name:
+ _rhost.host_group = _resource.datacenter
+ updated = True
+
+ if updated:
+ self.logger.info("host (" + _rhost.name + ") updated (host_group)")
+
+ return False
diff --git a/engine/src/valet/engine/search/__init__.py b/engine/src/valet/engine/search/__init__.py
new file mode 100644
index 0000000..bd50995
--- /dev/null
+++ b/engine/src/valet/engine/search/__init__.py
@@ -0,0 +1,18 @@
+#
+# -------------------------------------------------------------------------
+# Copyright (c) 2019 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
diff --git a/engine/src/valet/engine/search/avail_resources.py b/engine/src/valet/engine/search/avail_resources.py
new file mode 100644
index 0000000..c4484b8
--- /dev/null
+++ b/engine/src/valet/engine/search/avail_resources.py
@@ -0,0 +1,76 @@
+#
+# -------------------------------------------------------------------------
+# Copyright (c) 2019 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
+#!/bin/python
+
+
+from valet.engine.app_manager.group import LEVEL
+
+
+class AvailResources(object):
+ """Container to keep hosting resources and candidate resources
+
+ of each level (host or rack) for search.
+ """
+
+ def __init__(self, _level):
+ self.level = _level
+ self.avail_hosts = {}
+ self.candidates = {}
+
+ def set_next_level(self):
+ """Get the next level to search."""
+
+ current_level_index = LEVEL.index(self.level)
+ next_level_index = current_level_index - 1
+
+ if next_level_index < 0:
+ self.level = LEVEL[0]
+ else:
+ self.level = LEVEL[next_level_index]
+
+ def set_next_avail_hosts(self, _avail_hosts, _resource_of_level):
+ """Set the next level of available hosting resources."""
+
+ for hk, h in _avail_hosts.iteritems():
+ if self.level == "rack":
+ if h.rack_name == _resource_of_level:
+ self.avail_hosts[hk] = h
+ elif self.level == "host":
+ if h.host_name == _resource_of_level:
+ self.avail_hosts[hk] = h
+
+ def set_candidates(self):
+ if self.level == "rack":
+ for _, h in self.avail_hosts.iteritems():
+ self.candidates[h.rack_name] = h
+ elif self.level == "host":
+ self.candidates = self.avail_hosts
+
+ def get_candidate(self, _resource):
+ candidate = None
+
+ if self.level == "rack":
+ for _, h in self.avail_hosts.iteritems():
+ if h.rack_name == _resource.rack_name:
+ candidate = h
+ elif self.level == "host":
+ if _resource.host_name in self.avail_hosts.keys():
+ candidate = self.avail_hosts[_resource.host_name]
+
+ return candidate
diff --git a/engine/src/valet/engine/search/constraint_solver.py b/engine/src/valet/engine/search/constraint_solver.py
new file mode 100644
index 0000000..2593db8
--- /dev/null
+++ b/engine/src/valet/engine/search/constraint_solver.py
@@ -0,0 +1,117 @@
+#
+# -------------------------------------------------------------------------
+# Copyright (c) 2019 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
+from valet.engine.search.filters.affinity_filter import AffinityFilter
+from valet.engine.search.filters.aggregate_instance_filter import AggregateInstanceExtraSpecsFilter
+from valet.engine.search.filters.az_filter import AvailabilityZoneFilter
+from valet.engine.search.filters.cpu_filter import CPUFilter
+from valet.engine.search.filters.disk_filter import DiskFilter
+from valet.engine.search.filters.diversity_filter import DiversityFilter
+from valet.engine.search.filters.dynamic_aggregate_filter import DynamicAggregateFilter
+from valet.engine.search.filters.exclusivity_filter import ExclusivityFilter
+from valet.engine.search.filters.mem_filter import MemFilter
+from valet.engine.search.filters.no_exclusivity_filter import NoExclusivityFilter
+from valet.engine.search.filters.numa_filter import NUMAFilter
+from valet.engine.search.filters.quorum_diversity_filter import QuorumDiversityFilter
+
+
+class ConstraintSolver(object):
+ """Constraint solver to filter out candidate hosts."""
+
+ def __init__(self, _logger):
+ """Define fileters and application order."""
+
+ self.logger = _logger
+
+ self.filter_list = []
+
+ # TODO(Gueyoung): add soft-affinity and soft-diversity filters
+
+ # TODO(Gueyoung): the order of applying filters?
+
+ # Apply platform filters first
+ self.filter_list.append(AvailabilityZoneFilter())
+ self.filter_list.append(AggregateInstanceExtraSpecsFilter())
+ self.filter_list.append(CPUFilter())
+ self.filter_list.append(MemFilter())
+ self.filter_list.append(DiskFilter())
+ self.filter_list.append(NUMAFilter())
+
+ # Apply Valet filters next
+ self.filter_list.append(DiversityFilter())
+ self.filter_list.append(QuorumDiversityFilter())
+ self.filter_list.append(ExclusivityFilter())
+ self.filter_list.append(NoExclusivityFilter())
+ self.filter_list.append(AffinityFilter())
+
+ # Apply dynamic aggregate filter to determine the host's aggregate
+ # in a lazy way.
+ self.filter_list.append(DynamicAggregateFilter())
+
+ self.status = "ok"
+
+ def get_candidate_list(self, _n, _avail_resources, _avail_hosts, _avail_groups):
+ """Filter candidate hosts using a list of filters."""
+
+ level = _avail_resources.level
+
+ candidate_list = []
+
+ # This is the resource which name is 'any'
+ ghost_candidate = None
+
+ for _, r in _avail_resources.candidates.iteritems():
+ candidate_list.append(r)
+
+ if r.get_resource_name(level) == "any":
+ ghost_candidate = r
+
+ if len(candidate_list) == 0:
+ self.status = "no candidate for node = " + _n.vid
+ self.logger.warning(self.status)
+ return []
+
+ for f in self.filter_list:
+ f.init_condition()
+
+ if not f.check_pre_condition(level, _n, _avail_hosts, _avail_groups):
+ if f.status is not None:
+ self.status = f.status
+ self.logger.error(self.status)
+ return []
+ else:
+ self.logger.debug("skip " + f.name + " constraint for node = " + _n.vid)
+
+ continue
+
+ candidate_list = f.filter_candidates(level, _n, candidate_list)
+
+ if ghost_candidate and ghost_candidate not in candidate_list:
+ candidate_list.append(ghost_candidate)
+
+ if len(candidate_list) == 0:
+ self.status = "violate " + level + " " + f.name + " constraint for node = " + _n.vid
+ if f.status is not None:
+ self.status += " detail: " + f.status
+ self.logger.debug(self.status)
+ return []
+ elif len(candidate_list) > 0:
+ str_num = str(len(candidate_list))
+ self.logger.debug("pass " + f.name + " constraint for node = " + _n.vid + " with " + str_num)
+
+ return candidate_list
diff --git a/engine/src/valet/engine/search/filters/__init__.py b/engine/src/valet/engine/search/filters/__init__.py
new file mode 100644
index 0000000..bd50995
--- /dev/null
+++ b/engine/src/valet/engine/search/filters/__init__.py
@@ -0,0 +1,18 @@
+#
+# -------------------------------------------------------------------------
+# Copyright (c) 2019 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
diff --git a/engine/src/valet/engine/search/filters/affinity_filter.py b/engine/src/valet/engine/search/filters/affinity_filter.py
new file mode 100644
index 0000000..fb9aadc
--- /dev/null
+++ b/engine/src/valet/engine/search/filters/affinity_filter.py
@@ -0,0 +1,69 @@
+#
+# -------------------------------------------------------------------------
+# Copyright (c) 2019 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
+from valet.engine.app_manager.group import Group
+
+
+class AffinityFilter(object):
+
+ def __init__(self):
+ self.name = "affinity"
+
+ self.affinity_id = None
+ self.is_first = True
+
+ self.status = None
+
+ def init_condition(self):
+ self.affinity_id = None
+ self.is_first = True
+ self.status = None
+
+ def check_pre_condition(self, _level, _v, _avail_hosts, _avail_groups):
+ if isinstance(_v, Group):
+ self.affinity_id = _v.vid
+
+ if self.affinity_id in _avail_groups.keys():
+ self.is_first = False
+
+ if self.affinity_id is not None:
+ return True
+ else:
+ return False
+
+ def filter_candidates(self, _level, _v, _candidate_list):
+ if self.is_first:
+ return _candidate_list
+
+ candidate_list = []
+
+ for c in _candidate_list:
+ if self._check_candidate(_level, c):
+ candidate_list.append(c)
+
+ return candidate_list
+
+ def _check_candidate(self, _level, _candidate):
+ """Filter based on named affinity group."""
+
+ memberships = _candidate.get_all_memberships(_level)
+ for gk, gr in memberships.iteritems():
+ if gr.group_type == "affinity" and gk == self.affinity_id:
+ return True
+
+ return False
diff --git a/engine/src/valet/engine/search/filters/aggregate_instance_filter.py b/engine/src/valet/engine/search/filters/aggregate_instance_filter.py
new file mode 100644
index 0000000..316388e
--- /dev/null
+++ b/engine/src/valet/engine/search/filters/aggregate_instance_filter.py
@@ -0,0 +1,106 @@
+#
+# -------------------------------------------------------------------------
+# Copyright (c) 2019 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
+import filter_utils
+import six
+
+
+_SCOPE = 'aggregate_instance_extra_specs'
+
+
+class AggregateInstanceExtraSpecsFilter(object):
+ """AggregateInstanceExtraSpecsFilter works with InstanceType records."""
+
+ def __init__(self):
+ self.name = "aggregate-instance-extra-specs"
+ self.avail_hosts = {}
+ self.status = None
+
+ def init_condition(self):
+ self.avail_hosts = {}
+ self.status = None
+
+ def check_pre_condition(self, _level, _v, _avail_hosts, _avail_groups):
+ if len(_v.extra_specs_list) > 0:
+ self.avail_hosts = _avail_hosts
+ return True
+ else:
+ return False
+
+ def filter_candidates(self, _level, _v, _candidate_list):
+ candidate_list = []
+
+ for c in _candidate_list:
+ if self._check_candidate(_level, _v, c):
+ candidate_list.append(c)
+
+ return candidate_list
+
+ def _check_candidate(self, _level, _v, _candidate):
+ """Check given candidate host if instance's extra specs matches to metadata."""
+
+ # If the candidate's host_type is not determined, skip the filter.
+ if _level == "host":
+ if len(_candidate.candidate_host_types) > 0:
+ return True
+ else:
+ # In rack level, if any host's host_type in the rack is not determined,
+ # skip the filter
+ for _, rh in self.avail_hosts.iteritems():
+ if rh.rack_name == _candidate.rack_name:
+ if len(rh.candidate_host_types) > 0:
+ return True
+
+ metadatas = filter_utils.aggregate_metadata_get_by_host(_level, _candidate)
+
+ for extra_specs in _v.extra_specs_list:
+ for gk, metadata in metadatas.iteritems():
+ if self._match_metadata(gk, extra_specs, metadata):
+ break
+ else:
+ return False
+
+ return True
+
+ def _match_metadata(self, _g_name, _extra_specs, _metadata):
+ """Match conditions
+ - No extra_specs
+ - Different SCOPE of extra_specs keys
+ - key of extra_specs exists in metadata & any value matches
+ """
+
+ for key, req in six.iteritems(_extra_specs):
+ scope = key.split(':', 1)
+ if len(scope) > 1:
+ if scope[0] != _SCOPE:
+ continue
+ else:
+ del scope[0]
+ key = scope[0]
+
+ aggregate_vals = _metadata.get(key, None)
+ if not aggregate_vals:
+ return False
+
+ for aggregate_val in aggregate_vals:
+ if filter_utils.match(aggregate_val, req):
+ break
+ else:
+ return False
+
+ return True
diff --git a/engine/src/valet/engine/search/filters/az_filter.py b/engine/src/valet/engine/search/filters/az_filter.py
new file mode 100644
index 0000000..8902893
--- /dev/null
+++ b/engine/src/valet/engine/search/filters/az_filter.py
@@ -0,0 +1,74 @@
+#
+# -------------------------------------------------------------------------
+# Copyright (c) 2019 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
+import filter_utils
+
+from valet.engine.app_manager.group import Group
+from valet.engine.app_manager.server import Server
+
+
+class AvailabilityZoneFilter(object):
+ """Filters Hosts by availability zone.
+
+ Works with aggregate metadata availability zones, using the key 'availability_zone'.
+
+ Note: in theory a compute node can be part of multiple availability_zones
+ """
+
+ def __init__(self):
+ self.name = "availability-zone"
+
+ self.status = None
+
+ def init_condition(self):
+ self.status = None
+
+ def check_pre_condition(self, _level, _v, _avail_hosts, _avail_groups):
+ if (isinstance(_v, Server) and _v.availability_zone is not None) or \
+ (isinstance(_v, Group) and len(_v.availability_zone_list) > 0):
+ return True
+ else:
+ return False
+
+ def filter_candidates(self, _level, _v, _candidate_list):
+ candidate_list = []
+
+ for c in _candidate_list:
+ if self._check_candidate(_level, _v, c):
+ candidate_list.append(c)
+
+ return candidate_list
+
+ def _check_candidate(self, _level, _v, _candidate):
+ az_request_list = []
+ if isinstance(_v, Server):
+ az_request_list.append(_v.availability_zone)
+ else:
+ for az in _v.availability_zone_list:
+ az_request_list.append(az)
+
+ if len(az_request_list) == 0:
+ return True
+
+ availability_zone_list = filter_utils.availability_zone_get_by_host(_level, _candidate)
+
+ for azr in az_request_list:
+ if azr not in availability_zone_list:
+ return False
+
+ return True
diff --git a/engine/src/valet/engine/search/filters/cpu_filter.py b/engine/src/valet/engine/search/filters/cpu_filter.py
new file mode 100644
index 0000000..95dba8c
--- /dev/null
+++ b/engine/src/valet/engine/search/filters/cpu_filter.py
@@ -0,0 +1,57 @@
+#
+# -------------------------------------------------------------------------
+# Copyright (c) 2019 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
+class CPUFilter(object):
+
+ def __init__(self):
+ self.name = "cpu"
+
+ self.status = None
+
+ def init_condition(self):
+ self.status = None
+
+ def check_pre_condition(self, _level, _v, _avail_hosts, _avail_groups):
+ return True
+
+ def filter_candidates(self, _level, _v, _candidate_list):
+ candidate_list = []
+
+ for c in _candidate_list:
+ if self._check_candidate(_level, _v, c):
+ candidate_list.append(c)
+
+ return candidate_list
+
+ def _check_candidate(self, _level, _v, _candidate):
+ """Return True if host has sufficient CPU cores."""
+
+ avail_vcpus = _candidate.get_vcpus(_level)
+
+ instance_vcpus = _v.vCPUs
+
+ # TODO: need to check against original CPUs?
+ # Do not allow an instance to overcommit against itself,
+ # only against other instances.
+ # if instance_vcpus > vCPUs:
+ # return False
+
+ if avail_vcpus < instance_vcpus:
+ return False
+
+ return True
diff --git a/engine/src/valet/engine/search/filters/disk_filter.py b/engine/src/valet/engine/search/filters/disk_filter.py
new file mode 100644
index 0000000..00fa93e
--- /dev/null
+++ b/engine/src/valet/engine/search/filters/disk_filter.py
@@ -0,0 +1,50 @@
+#
+# -------------------------------------------------------------------------
+# Copyright (c) 2019 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
+class DiskFilter(object):
+
+ def __init__(self):
+ self.name = "disk"
+
+ self.status = None
+
+ def init_condition(self):
+ self.status = None
+
+ def check_pre_condition(self, _level, _v, _avail_hosts, _avail_groups):
+ return True
+
+ def filter_candidates(self, _level, _v, _candidate_list):
+ candidate_list = []
+
+ for c in _candidate_list:
+ if self._check_candidate(_level, _v, c):
+ candidate_list.append(c)
+
+ return candidate_list
+
+ def _check_candidate(self, _level, _v, _candidate):
+ """Filter based on disk usage."""
+
+ requested_disk = _v.local_volume_size
+ usable_disk = _candidate.get_local_disk(_level)
+
+ if not usable_disk >= requested_disk:
+ return False
+
+ return True
diff --git a/engine/src/valet/engine/search/filters/diversity_filter.py b/engine/src/valet/engine/search/filters/diversity_filter.py
new file mode 100644
index 0000000..882e11a
--- /dev/null
+++ b/engine/src/valet/engine/search/filters/diversity_filter.py
@@ -0,0 +1,62 @@
+#
+# -------------------------------------------------------------------------
+# Copyright (c) 2019 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
+class DiversityFilter(object):
+
+ def __init__(self):
+ self.name = "diversity"
+
+ self.diversity_list = []
+
+ self.status = None
+
+ def init_condition(self):
+ self.diversity_list = []
+ self.status = None
+
+ def check_pre_condition(self, _level, _v, _avail_hosts, _avail_groups):
+ if len(_v.diversity_groups) > 0:
+ for _, div_group in _v.diversity_groups.iteritems():
+ if div_group.level == _level:
+ self.diversity_list.append(div_group.vid)
+
+ if len(self.diversity_list) > 0:
+ return True
+ else:
+ return False
+
+ def filter_candidates(self, _level, _v, _candidate_list):
+ candidate_list = []
+
+ for c in _candidate_list:
+ if self._check_candidate(_level, c):
+ candidate_list.append(c)
+
+ return candidate_list
+
+ def _check_candidate(self, _level, _candidate):
+ """Filter based on named diversity groups."""
+
+ memberships = _candidate.get_memberships(_level)
+
+ for diversity_id in self.diversity_list:
+ for gk, gr in memberships.iteritems():
+ if gr.group_type == "diversity" and gk == diversity_id:
+ return False
+
+ return True
diff --git a/engine/src/valet/engine/search/filters/dynamic_aggregate_filter.py b/engine/src/valet/engine/search/filters/dynamic_aggregate_filter.py
new file mode 100644
index 0000000..709a9b9
--- /dev/null
+++ b/engine/src/valet/engine/search/filters/dynamic_aggregate_filter.py
@@ -0,0 +1,141 @@
+#
+# -------------------------------------------------------------------------
+# Copyright (c) 2019 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
+import copy
+
+from valet.engine.app_manager.server import Server
+from valet.engine.search.filters.aggregate_instance_filter import AggregateInstanceExtraSpecsFilter
+from valet.engine.search.filters.cpu_filter import CPUFilter
+from valet.engine.search.filters.disk_filter import DiskFilter
+from valet.engine.search.filters.mem_filter import MemFilter
+from valet.engine.search.filters.numa_filter import NUMAFilter
+
+
+class DynamicAggregateFilter(object):
+
+ def __init__(self):
+ self.name = "dynamic-aggregate"
+
+ self.avail_hosts = {}
+ self.avail_groups = {}
+
+ self.aggr_filter = AggregateInstanceExtraSpecsFilter()
+ self.cpu_filter = CPUFilter()
+ self.mem_filter = MemFilter()
+ self.disk_filter = DiskFilter()
+ self.numa_filter = NUMAFilter()
+
+ self.status = None
+
+ def init_condition(self):
+ self.avail_hosts = {}
+ self.avail_groups = {}
+ self.status = None
+
+ def check_pre_condition(self, _level, _v, _avail_hosts, _avail_groups):
+ if _level == "host" and isinstance(_v, Server):
+ self.avail_hosts = _avail_hosts
+ self.avail_groups = _avail_groups
+ return True
+ else:
+ return False
+
+ def filter_candidates(self, _level, _v, _candidate_list):
+ specified_candidate_list = [] # candidates having specific host type
+ unspecified_candidate_list = [] # candidates not having specific host type
+
+ for c in _candidate_list:
+ if len(c.candidate_host_types) == 0:
+ specified_candidate_list.append(c)
+ else:
+ unspecified_candidate_list.append(c)
+
+ # Try to use existing hosts that have specific host type
+ if len(specified_candidate_list) > 0:
+ return specified_candidate_list
+
+ # Take just one candidate
+ candidate = unspecified_candidate_list[0]
+
+ # Get the host-aggregate of _v
+ flavor_type_list = _v.get_flavor_types()
+ if len(flavor_type_list) > 1:
+ self.status = "have more than one flavor type"
+ return []
+
+ ha = self.avail_groups[flavor_type_list[0]]
+
+ # Add the host-aggregate into host and rack memberships.
+ # Adjust host with avail cpus, mem, disk, and numa
+ candidate.adjust_avail_resources(ha)
+
+ # Change all others in the same rack.
+ for hrk, hr in self.avail_hosts.iteritems():
+ if hrk != candidate.host_name:
+ if hr.rack_name == candidate.rack_name:
+ hr.adjust_avail_rack_resources(ha,
+ candidate.rack_avail_vCPUs,
+ candidate.rack_avail_mem,
+ candidate.rack_avail_local_disk)
+
+ # Once the host type (ha) is determined, remove candidate_host_types
+ candidate.old_candidate_host_types = copy.deepcopy(candidate.candidate_host_types)
+ candidate.candidate_host_types.clear()
+
+ # Filter against host-aggregate, cpu, mem, disk, numa
+
+ self.aggr_filter.init_condition()
+ if self.aggr_filter.check_pre_condition(_level, _v, self.avail_hosts, self.avail_groups):
+ if not self.aggr_filter._check_candidate(_level, _v, candidate):
+ self.status = "host-aggregate violation"
+
+ self.cpu_filter.init_condition()
+ if not self.cpu_filter._check_candidate(_level, _v, candidate):
+ self.status = "cpu violation"
+
+ self.mem_filter.init_condition()
+ if not self.mem_filter._check_candidate(_level, _v, candidate):
+ self.status = "mem violation"
+
+ self.disk_filter.init_condition()
+ if not self.disk_filter._check_candidate(_level, _v, candidate):
+ self.status = "disk violation"
+
+ self.numa_filter.init_condition()
+ if self.numa_filter.check_pre_condition(_level, _v, self.avail_hosts, self.avail_groups):
+ if not self.numa_filter._check_candidate(_level, _v, candidate):
+ self.status = "numa violation"
+
+ if self.status is None:
+ # Candidate not filtered.
+ return [candidate]
+ else:
+ # Rollback
+ candidate.rollback_avail_resources(ha)
+ candidate.candidate_host_types = copy.deepcopy(candidate.old_candidate_host_types)
+ candidate.old_candidate_host_types.clear()
+
+ for hrk, hr in self.avail_hosts.iteritems():
+ if hrk != candidate.host_name:
+ if hr.rack_name == candidate.rack_name:
+ hr.rollback_avail_rack_resources(ha,
+ candidate.rack_avail_vCPUs,
+ candidate.rack_avail_mem,
+ candidate.rack_avail_local_disk)
+
+ return []
diff --git a/engine/src/valet/engine/search/filters/exclusivity_filter.py b/engine/src/valet/engine/search/filters/exclusivity_filter.py
new file mode 100644
index 0000000..77efd97
--- /dev/null
+++ b/engine/src/valet/engine/search/filters/exclusivity_filter.py
@@ -0,0 +1,81 @@
+#
+# -------------------------------------------------------------------------
+# Copyright (c) 2019 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
+class ExclusivityFilter(object):
+
+ def __init__(self):
+ self.name = "exclusivity"
+
+ self.exclusivity_id = None
+
+ self.status = None
+
+ def init_condition(self):
+ self.exclusivity_id = None
+ self.status = None
+
+ def check_pre_condition(self, _level, _v, _avail_hosts, _avail_groups):
+ exclusivities = _v.get_exclusivities(_level)
+
+ if len(exclusivities) > 1:
+ self.status = "multiple exclusivities for node = " + _v.vid
+ return False
+
+ if len(exclusivities) == 1:
+ ex_group = exclusivities[exclusivities.keys()[0]]
+
+ if ex_group.level == _level:
+ self.exclusivity_id = ex_group.vid
+
+ if self.exclusivity_id is not None:
+ return True
+ else:
+ return False
+
+ def filter_candidates(self, _level, _v, _candidate_list):
+
+ candidate_list = self._get_candidates(_level, _candidate_list)
+
+ return candidate_list
+
+ def _get_candidates(self, _level, _candidate_list):
+ candidate_list = []
+
+ for c in _candidate_list:
+ if self._check_exclusive_candidate(_level, c) is True or \
+ self._check_empty(_level, c) is True:
+ candidate_list.append(c)
+
+ return candidate_list
+
+ def _check_exclusive_candidate(self, _level, _candidate):
+ memberships = _candidate.get_memberships(_level)
+
+ for gk, gr in memberships.iteritems():
+ if gr.group_type == "exclusivity" and gk == self.exclusivity_id:
+ return True
+
+ return False
+
+ def _check_empty(self, _level, _candidate):
+ num_of_placed_servers = _candidate.get_num_of_placed_servers(_level)
+
+ if num_of_placed_servers == 0:
+ return True
+
+ return False
diff --git a/engine/src/valet/engine/search/filters/filter_utils.py b/engine/src/valet/engine/search/filters/filter_utils.py
new file mode 100644
index 0000000..1005263
--- /dev/null
+++ b/engine/src/valet/engine/search/filters/filter_utils.py
@@ -0,0 +1,117 @@
+#
+# -------------------------------------------------------------------------
+# Copyright (c) 2019 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
+import collections
+import operator
+
+
+# 1. The following operations are supported:
+# =, s==, s!=, s>=, s>, s<=, s<, <in>, <all-in>, <or>, ==, !=, >=, <=
+# 2. Note that <or> is handled in a different way below.
+# 3. If the first word in the extra_specs is not one of the operators,
+# it is ignored.
+op_methods = {'=': lambda x, y: float(x) >= float(y),
+ '<in>': lambda x, y: y in x,
+ '<all-in>': lambda x, y: all(val in x for val in y),
+ '==': lambda x, y: float(x) == float(y),
+ '!=': lambda x, y: float(x) != float(y),
+ '>=': lambda x, y: float(x) >= float(y),
+ '<=': lambda x, y: float(x) <= float(y),
+ 's==': operator.eq,
+ 's!=': operator.ne,
+ 's<': operator.lt,
+ 's<=': operator.le,
+ 's>': operator.gt,
+ 's>=': operator.ge}
+
+
+def match(value, req):
+ words = req.split()
+
+ op = method = None
+ if words:
+ op = words.pop(0)
+ method = op_methods.get(op)
+
+ if op != '<or>' and not method:
+ return value == req
+
+ if value is None:
+ return False
+
+ if op == '<or>': # Ex: <or> v1 <or> v2 <or> v3
+ while True:
+ if words.pop(0) == value:
+ return True
+ if not words:
+ break
+ words.pop(0) # remove a keyword <or>
+ if not words:
+ break
+ return False
+
+ if words:
+ if op == '<all-in>': # requires a list not a string
+ return method(value, words)
+ return method(value, words[0])
+ return False
+
+
+def aggregate_metadata_get_by_host(_level, _host, _key=None):
+ """Returns a dict of all metadata based on a metadata key for a specific host.
+
+ If the key is not provided, returns a dict of all metadata.
+ """
+
+ metadatas = {}
+
+ groups = _host.get_memberships(_level)
+
+ for gk, g in groups.iteritems():
+ if g.group_type == "aggr":
+ if _key is None or _key in g.metadata:
+ metadata = collections.defaultdict(set)
+ for k, v in g.metadata.items():
+ if k != "prior_metadata":
+ metadata[k].update(x.strip() for x in v.split(','))
+ else:
+ # metadata[k] = v
+ if isinstance(g.metadata["prior_metadata"], dict):
+ for ik, iv in g.metadata["prior_metadata"].items():
+ metadata[ik].update(y.strip() for y in iv.split(','))
+ metadatas[gk] = metadata
+
+ return metadatas
+
+
+def availability_zone_get_by_host(_level, _host):
+ availability_zone_list = []
+
+ groups = _host.get_memberships(_level)
+
+ for gk, g in groups.iteritems():
+ if g.group_type == "az":
+ g_name_elements = gk.split(':', 1)
+ if len(g_name_elements) > 1:
+ g_name = g_name_elements[1]
+ else:
+ g_name = gk
+
+ availability_zone_list.append(g_name)
+
+ return availability_zone_list
diff --git a/engine/src/valet/engine/search/filters/mem_filter.py b/engine/src/valet/engine/search/filters/mem_filter.py
new file mode 100644
index 0000000..1b494c2
--- /dev/null
+++ b/engine/src/valet/engine/search/filters/mem_filter.py
@@ -0,0 +1,56 @@
+#
+# -------------------------------------------------------------------------
+# Copyright (c) 2019 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
+class MemFilter(object):
+
+ def __init__(self):
+ self.name = "mem"
+
+ self.status = None
+
+ def init_condition(self):
+ self.status = None
+
+ def check_pre_condition(self, _level, _v, _avail_hosts, _avail_groups):
+ return True
+
+ def filter_candidates(self, _level, _v, _candidate_list):
+ candidate_list = []
+
+ for c in _candidate_list:
+ if self._check_candidate(_level, _v, c):
+ candidate_list.append(c)
+
+ return candidate_list
+
+ def _check_candidate(self, _level, _v, _candidate):
+ """Only return hosts with sufficient available RAM."""
+
+ requested_ram = _v.mem # MB
+ usable_ram = _candidate.get_mem(_level)
+
+ # TODO: need to check against original mem_cap?
+ # Do not allow an instance to overcommit against itself,
+ # only against other instances.
+ # if not total_ram >= requested_ram:
+ # return False
+
+ if not usable_ram >= requested_ram:
+ return False
+
+ return True
diff --git a/engine/src/valet/engine/search/filters/no_exclusivity_filter.py b/engine/src/valet/engine/search/filters/no_exclusivity_filter.py
new file mode 100644
index 0000000..43516fe
--- /dev/null
+++ b/engine/src/valet/engine/search/filters/no_exclusivity_filter.py
@@ -0,0 +1,53 @@
+#
+# -------------------------------------------------------------------------
+# Copyright (c) 2019 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
+class NoExclusivityFilter(object):
+
+ def __init__(self):
+ self.name = "no-exclusivity"
+
+ self.status = None
+
+ def init_condition(self):
+ self.status = None
+
+ def check_pre_condition(self, _level, _v, _avail_hosts, _avail_groups):
+ exclusivities = _v.get_exclusivities(_level)
+
+ if len(exclusivities) == 0:
+ return True
+ else:
+ return False
+
+ def filter_candidates(self, _level, _v, _candidate_list):
+ candidate_list = []
+
+ for c in _candidate_list:
+ if self._check_candidate(_level, c):
+ candidate_list.append(c)
+
+ return candidate_list
+
+ def _check_candidate(self, _level, _candidate):
+ memberships = _candidate.get_memberships(_level)
+
+ for _, g in memberships.iteritems():
+ if g.group_type == "exclusivity" and g.level == _level:
+ return False
+
+ return True
diff --git a/engine/src/valet/engine/search/filters/numa_filter.py b/engine/src/valet/engine/search/filters/numa_filter.py
new file mode 100644
index 0000000..3e095ec
--- /dev/null
+++ b/engine/src/valet/engine/search/filters/numa_filter.py
@@ -0,0 +1,84 @@
+#
+# -------------------------------------------------------------------------
+# Copyright (c) 2019 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
+from valet.engine.app_manager.server import Server
+
+
+_SCOPE = 'hw'
+
+
+class NUMAFilter(object):
+ """Check NUMA alignment request in Flavor."""
+
+ def __init__(self):
+ """Define filter name and status."""
+
+ self.name = "numa"
+
+ self.status = None
+
+ def init_condition(self):
+ """Init variable."""
+
+ self.status = None
+
+ def check_pre_condition(self, _level, _v, _avail_hosts, _avail_groups):
+ """Check if given server needs to check this filter."""
+
+ if _level == "host" and isinstance(_v, Server):
+ if _v.need_numa_alignment():
+ return True
+
+ return False
+
+ def filter_candidates(self, _level, _v, _candidate_list):
+ """Check and filter one candidate at a time."""
+
+ candidate_list = []
+
+ for c in _candidate_list:
+ if self._check_candidate(_level, _v, c):
+ candidate_list.append(c)
+
+ return candidate_list
+
+ def _check_candidate(self, _level, _v, _candidate):
+ """Check given candidate host if it meets numa requirement."""
+
+ # servers = []
+ # if isinstance(_v, Group):
+ # _v.get_servers(servers)
+ # else:
+ # servers.append(_v)
+
+ # (vcpus_demand, mem_demand) = self._get_demand_with_numa(servers)
+
+ return _candidate.NUMA.has_enough_resources(_v.vCPUs, _v.mem)
+
+ def _get_demand_with_numa(self, _servers):
+ """Check numa and compute the amount of vCPUs and memory."""
+
+ vcpus = 0
+ mem = 0
+
+ for s in _servers:
+ if s.need_numa_alignment():
+ vcpus += s.vCPUs
+ mem += s.mem
+
+ return vcpus, mem
diff --git a/engine/src/valet/engine/search/filters/quorum_diversity_filter.py b/engine/src/valet/engine/search/filters/quorum_diversity_filter.py
new file mode 100644
index 0000000..6388ebc
--- /dev/null
+++ b/engine/src/valet/engine/search/filters/quorum_diversity_filter.py
@@ -0,0 +1,106 @@
+#
+# -------------------------------------------------------------------------
+# Copyright (c) 2019 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
+import math
+
+
+class QuorumDiversityFilter(object):
+
+ def __init__(self):
+ self.name = "quorum-diversity"
+
+ self.quorum_diversity_group_list = []
+
+ self.status = None
+
+ def init_condition(self):
+ self.quorum_diversity_group_list = []
+ self.status = None
+
+ def check_pre_condition(self, _level, _v, _avail_hosts, _avail_groups):
+ if len(_v.quorum_diversity_groups) > 0:
+ for _, qdiv_group in _v.quorum_diversity_groups.iteritems():
+ if qdiv_group.level == _level:
+ self.quorum_diversity_group_list.append(qdiv_group)
+
+ if len(self.quorum_diversity_group_list) > 0:
+ return True
+ else:
+ return False
+
+ def filter_candidates(self, _level, _v, _candidate_list):
+ candidate_list = []
+
+ # First, try diversity rule.
+
+ for c in _candidate_list:
+ if self._check_diversity_candidate(_level, c):
+ candidate_list.append(c)
+
+ if len(candidate_list) > 0:
+ return candidate_list
+
+ # Second, if no available hosts for diversity rule, try quorum rule.
+
+ for c in _candidate_list:
+ if self._check_quorum_candidate(_level, c):
+ candidate_list.append(c)
+
+ return candidate_list
+
+ def _check_diversity_candidate(self, _level, _candidate):
+ """Filter based on named diversity groups."""
+
+ memberships = _candidate.get_memberships(_level)
+
+ for qdiv in self.quorum_diversity_group_list:
+ for gk, gr in memberships.iteritems():
+ if gr.group_type == "quorum-diversity" and gk == qdiv.vid:
+ return False
+
+ return True
+
+ def _check_quorum_candidate(self, _level, _candidate):
+ """Filter based on quorum-diversity rule."""
+
+ memberships = _candidate.get_memberships(_level)
+ hk = _candidate.get_resource_name(_level)
+
+ for qdiv in self.quorum_diversity_group_list:
+ # Requested num of servers under this rule
+ total_num_of_servers = len(qdiv.server_list)
+
+ num_of_placed_servers_in_candidate = -1
+
+ for gk, gr in memberships.iteritems():
+ if gr.group_type == "quorum-diversity" and gk == qdiv.vid:
+ # Total num of servers under this rule
+ total_num_of_servers += gr.original_num_of_placed_servers
+
+ if hk in gr.num_of_placed_servers_of_host.keys():
+ num_of_placed_servers_in_candidate = gr.num_of_placed_servers_of_host[hk]
+
+ break
+
+ # Allowed maximum num of servers per host
+ quorum = max(math.ceil(float(total_num_of_servers) / 2.0 - 1.0), 1.0)
+
+ if num_of_placed_servers_in_candidate >= quorum:
+ return False
+
+ return True
diff --git a/engine/src/valet/engine/search/optimizer.py b/engine/src/valet/engine/search/optimizer.py
new file mode 100644
index 0000000..de45cee
--- /dev/null
+++ b/engine/src/valet/engine/search/optimizer.py
@@ -0,0 +1,494 @@
+#
+# -------------------------------------------------------------------------
+# Copyright (c) 2019 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
+from valet.engine.app_manager.group import Group
+from valet.engine.app_manager.server import Server
+from valet.engine.search.search import Search
+
+
+class Optimizer(object):
+ """Optimizer to compute the optimal placements."""
+
+ def __init__(self, _logger):
+ self.logger = _logger
+
+ self.search = Search(self.logger)
+
+ def place(self, _app):
+ """Scheduling placements given app."""
+
+ _app.set_weight()
+ _app.set_optimization_priority()
+
+ if self.search.place(_app) is True:
+ if _app.status == "ok":
+ self._set_app(_app, "create")
+
+ self._set_resource(_app)
+ if _app.status != "ok":
+ return
+ else:
+ if _app.status == "ok":
+ _app.status = "failed"
+
+ self._rollback_placements(_app)
+
+ def update(self, _app):
+ """Update state of current placements."""
+
+ if _app.state == "delete":
+ self._update_app_for_delete(_app)
+
+ self._update_resource(_app)
+ if _app.status != "ok":
+ return
+ else:
+ _app.status = "unknown state while updating app"
+
+ def confirm(self, _app):
+ """Confirm prior request."""
+
+ if _app.state == "created":
+ self._update_app(_app)
+ if _app.status != "ok":
+ return
+
+ self._update_resource(_app)
+ if _app.status != "ok":
+ return
+ elif _app.state == "deleted":
+ self._remove_resource(_app)
+ if _app.status != "ok":
+ return
+ else:
+ _app.status = "unknown state while updating app"
+ return
+
+ def rollback(self, _app):
+ """Rollback prior decision."""
+
+ if _app.state == "created":
+ self._update_app(_app)
+ if _app.status != "ok":
+ return
+
+ self._update_resource(_app)
+ if _app.status != "ok":
+ return
+ elif _app.state == "deleted":
+ self._remove_resource(_app)
+ if _app.status != "ok":
+ return
+ else:
+ _app.status = "unknown state while updating app"
+
+ def _set_app(self, _app, _state):
+ """Update with assigned hosts."""
+
+ for v, p in self.search.node_placements.iteritems():
+ if isinstance(v, Server):
+ v.host = p.host_name
+ if p.rack_name != "any":
+ v.host_group = p.rack_name
+
+ host = self.search.avail_hosts[p.host_name]
+
+ s_info = {}
+ if _app.app_id is None or _app.app_id == "none":
+ s_info["stack_id"] = "none"
+ else:
+ s_info["stack_id"] = _app.app_id
+ s_info["stack_name"] = _app.app_name
+ s_info["uuid"] = "none"
+ s_info["name"] = v.name
+
+ v.numa = host.NUMA.pop_cell_of_server(s_info)
+
+ v.state = _state
+
+ # Put back servers from groups.
+ _app.reset_servers()
+
+ def _update_app(self, _app):
+ """Update state of servers."""
+
+ for sk, s in _app.servers.iteritems():
+ if s["host"] == "none":
+ continue
+
+ s["state"] = _app.state
+
+ host_name = s.get("host")
+
+ host = None
+ if host_name in _app.resource.hosts.keys():
+ host = _app.resource.hosts[host_name]
+
+ s_info = {}
+ if _app.app_id is None or _app.app_id == "none":
+ s_info["stack_id"] = "none"
+ else:
+ s_info["stack_id"] = _app.app_id
+ s_info["stack_name"] = _app.app_name
+ s_info["uuid"] = "none"
+ s_info["name"] = s.get("name")
+
+ # Check if the prior placements changed.
+ if host is None or \
+ not host.is_available() or \
+ not host.has_server(s_info):
+ _app.status = "server (" + sk + ") placement has been changed"
+ self.logger.error(_app.status)
+
+ def _update_app_for_delete(self, _app):
+ """Check the prior placements and update state
+
+ And update placements if they have been changed.
+ """
+
+ for sk, s in _app.servers.iteritems():
+ if s["host"] == "none":
+ continue
+
+ s["state"] = _app.state
+
+ host_name = s.get("host")
+
+ host = None
+ if host_name in _app.resource.hosts.keys():
+ host = _app.resource.hosts[host_name]
+
+ s_info = {}
+ if _app.app_id is None or _app.app_id == "none":
+ s_info["stack_id"] = "none"
+ else:
+ s_info["stack_id"] = _app.app_id
+ s_info["stack_name"] = _app.app_name
+ s_info["uuid"] = "none"
+ s_info["name"] = s.get("name")
+
+ # Check if the prior placements changed.
+ if host is None or \
+ not host.is_available() or \
+ not host.has_server(s_info):
+ self.logger.warning("server (" + sk + ") placement has been changed")
+
+ new_host = _app.resource.get_host_of_server(s_info)
+
+ if new_host is not None:
+ s["host"] = new_host.name
+ else:
+ s["host"] = "none"
+ self.logger.warning("server (" + sk + ") not exists")
+
+ def _set_resource(self, _app):
+ """Update resource status based on new placements."""
+
+ # If host's type (i.e., host-aggregate) is not determined before,
+ # Convert/set host's type to one as specified in VM.
+ for v, p in self.search.node_placements.iteritems():
+ if isinstance(v, Server):
+ # The host object p was deep copied, so use original object.
+ rh = self.search.avail_hosts[p.host_name]
+
+ if rh.old_candidate_host_types is not None and len(rh.old_candidate_host_types) > 0:
+ flavor_type_list = v.get_flavor_types()
+ ha = self.search.avail_groups[flavor_type_list[0]]
+
+ self._convert_host(rh,
+ ha.name,
+ rh.get_host_type(ha, rh.old_candidate_host_types),
+ _app.resource)
+
+ placements = {}
+
+ for v, p in self.search.node_placements.iteritems():
+ if isinstance(v, Server):
+ s_info = {}
+
+ if _app.app_id is None or _app.app_id == "none":
+ s_info["stack_id"] = "none"
+ else:
+ s_info["stack_id"] = _app.app_id
+ s_info["stack_name"] = _app.app_name
+
+ s_info["uuid"] = "none"
+ s_info["orch_id"] = v.orch_id
+ s_info["name"] = v.name
+
+ s_info["flavor_id"] = v.flavor
+ s_info["vcpus"] = v.vCPUs
+ s_info["mem"] = v.mem
+ s_info["disk"] = v.local_volume_size
+ s_info["numa"] = v.numa
+
+ s_info["image_id"] = v.image
+ s_info["tenant_id"] = _app.tenant_id
+
+ s_info["state"] = v.state
+ s_info["status"] = "valid"
+
+ placements[v.vid] = {}
+ placements[v.vid]["new_host"] = p.host_name
+ placements[v.vid]["info"] = s_info
+
+ # Update compute host with new servers
+ if not _app.resource.update_server_placements(change_of_placements=placements):
+ _app.status = "fail while updating server placements"
+ return
+
+ groups = {}
+
+ for v, p in self.search.node_placements.iteritems():
+ if isinstance(v, Server):
+ rh = self.search.avail_hosts[p.host_name]
+
+ for gk, g in rh.host_memberships.iteritems():
+ if g.factory in ("valet", "server-group"):
+ if g.level == "host":
+ _app.resource.add_group(gk,
+ g.group_type,
+ g.level,
+ g.factory,
+ rh.host_name)
+
+ if rh.rack_name != "any":
+ for gk, g in rh.rack_memberships.iteritems():
+ if g.factory in ("valet", "server-group"):
+ if g.level == "rack":
+ _app.resource.add_group(gk,
+ g.group_type,
+ g.level,
+ g.factory,
+ rh.rack_name)
+
+ s_info = placements[v.vid].get("info")
+
+ self._collect_groups_of_server(v, s_info, groups)
+
+ # Update groups with new servers
+ _app.resource.update_server_grouping(change_of_placements=placements,
+ new_groups=groups)
+
+ _app.resource.update_resource()
+
+ def _convert_host(self, _rhost, _ha_name, _host_type, _resource):
+ """Convert host's type into the specific type as given."""
+
+ host = _resource.hosts[_rhost.host_name]
+
+ if host.candidate_host_types is None or len(host.candidate_host_types) == 0:
+ return
+
+ host.vCPUs = _host_type["vCPUs"]
+ host.original_vCPUs = _host_type["original_vCPUs"]
+ host.avail_vCPUs = _host_type["avail_vCPUs"]
+ host.mem_cap = _host_type["mem"]
+ host.original_mem_cap = _host_type["original_mem"]
+ host.avail_mem_cap = _host_type["avail_mem"]
+ host.local_disk_cap = _host_type["local_disk"]
+ host.original_local_disk_cap = _host_type["original_local_disk"]
+ host.avail_local_disk_cap = _host_type["avail_local_disk"]
+ host.vCPUs_used = _host_type["vCPUs_used"]
+ host.free_mem_mb = _host_type["free_mem_mb"]
+ host.free_disk_gb = _host_type["free_disk_gb"]
+ host.disk_available_least = _host_type["disk_available_least"]
+
+ host.NUMA = _rhost.NUMA
+
+ ha = _resource.groups[_ha_name]
+ host.memberships[ha.name] = ha
+ ha.member_hosts[host.name] = []
+ ha.updated = True
+
+ _resource.mark_host_updated(host.name)
+
+ _resource.update_resource()
+
+ if host.candidate_host_types is not None:
+ host.candidate_host_types.clear()
+
+ def _rollback_placements(self, _app):
+ """Remove placements when they fail.
+
+ Remove placements from NUMA cells of resource object.
+ """
+
+ for v, p in self.search.node_placements.iteritems():
+ if isinstance(v, Server):
+ s_info = {}
+
+ if _app.app_id is None or _app.app_id == "none":
+ s_info["stack_id"] = "none"
+ else:
+ s_info["stack_id"] = _app.app_id
+ s_info["stack_name"] = _app.app_name
+
+ s_info["uuid"] = "none"
+ s_info["orch_id"] = v.orch_id
+ s_info["name"] = v.name
+
+ s_info["flavor_id"] = v.flavor
+ s_info["vcpus"] = v.vCPUs
+ s_info["mem"] = v.mem
+ s_info["disk"] = v.local_volume_size
+ s_info["numa"] = v.numa
+
+ s_info["image_id"] = v.image
+ s_info["tenant_id"] = _app.tenant_id
+
+ s_info["state"] = v.state
+ s_info["status"] = "valid"
+
+ host = _app.resource.hosts[p.host_name]
+ host.NUMA.rollback_server_resources(s_info)
+
+ def _collect_groups_of_server(self, _v, _s_info, _groups):
+ """Collect all groups of the server and its parent (affinity)."""
+
+ # TODO(Gueyoung): track host-aggregates and availability-zone?
+
+ for gk in _v.exclusivity_groups.keys():
+ if gk not in _groups.keys():
+ _groups[gk] = []
+ _groups[gk].append(_s_info)
+
+ for gk in _v.diversity_groups.keys():
+ if gk not in _groups.keys():
+ _groups[gk] = []
+ _groups[gk].append(_s_info)
+
+ for gk in _v.quorum_diversity_groups.keys():
+ if gk not in _groups.keys():
+ _groups[gk] = []
+ _groups[gk].append(_s_info)
+
+ if isinstance(_v, Group):
+ if _v.vid not in _groups.keys():
+ _groups[_v.vid] = []
+ _groups[_v.vid].append(_s_info)
+
+ # Recursively check server or its affinity group.
+ if _v.surgroup is not None:
+ self._collect_groups_of_server(_v.surgroup, _s_info, _groups)
+
+ def _remove_resource(self, _app):
+ """Remove servers from resources.
+
+ Resources: NUMA, host, host_group, datacenter,
+ valet groups and server-groups.
+ """
+
+ placements = {}
+
+ for sk, s in _app.servers.iteritems():
+ if s["host"] == "none":
+ continue
+
+ s_info = {}
+
+ if _app.app_id is None or _app.app_id == "none":
+ s_info["stack_id"] = "none"
+ else:
+ s_info["stack_id"] = _app.app_id
+ s_info["stack_name"] = _app.app_name
+
+ s_info["uuid"] = "none"
+ s_info["name"] = s.get("name")
+
+ s_info["flavor_id"] = s.get("flavor")
+ s_info["vcpus"] = s.get("cpus")
+ s_info["mem"] = s.get("mem")
+ s_info["disk"] = s.get("local_volume")
+ s_info["numa"] = s.get("numa")
+
+ s_info["image_id"] = s.get("image")
+ s_info["tenant_id"] = _app.tenant_id
+
+ s_info["state"] = "deleted"
+
+ if s_info["stack_id"] != "none":
+ sid = s_info["stack_id"] + ":" + s_info["name"]
+ else:
+ sid = s_info["stack_name"] + ":" + s_info["name"]
+
+ placements[sid] = {}
+ placements[sid]["old_host"] = s.get("host")
+ placements[sid]["info"] = s_info
+
+ if not _app.resource.update_server_placements(change_of_placements=placements):
+ _app.status = "fail while updating server placements"
+ return
+
+ _app.resource.update_server_grouping(change_of_placements=placements,
+ new_groups={})
+
+ _app.resource.update_resource()
+
+ def _update_resource(self, _app):
+ """Update state of servers in resources.
+
+ Resources: NUMA, host, host_group, datacenter,
+ valet groups and server-groups.
+ """
+
+ placements = {}
+
+ for sk, s in _app.servers.iteritems():
+ if s["host"] == "none":
+ continue
+
+ s_info = {}
+
+ if _app.app_id is None or _app.app_id == "none":
+ s_info["stack_id"] = "none"
+ else:
+ s_info["stack_id"] = _app.app_id
+ s_info["stack_name"] = _app.app_name
+
+ s_info["uuid"] = "none"
+ s_info["name"] = s.get("name")
+
+ s_info["flavor_id"] = "none"
+ s_info["vcpus"] = -1
+ s_info["mem"] = -1
+ s_info["disk"] = -1
+ s_info["numa"] = "none"
+
+ s_info["image_id"] = "none"
+
+ s_info["state"] = s.get("state")
+ s_info["status"] = "none"
+
+ if s_info["stack_id"] != "none":
+ sid = s_info["stack_id"] + ":" + s_info["name"]
+ else:
+ sid = s_info["stack_name"] + ":" + s_info["name"]
+
+ placements[sid] = {}
+ placements[sid]["host"] = s.get("host")
+ placements[sid]["info"] = s_info
+
+ if not _app.resource.update_server_placements(change_of_placements=placements):
+ _app.status = "fail while updating server placements"
+ return
+
+ _app.resource.update_server_grouping(change_of_placements=placements,
+ new_groups={})
+
+ _app.resource.update_resource()
diff --git a/engine/src/valet/engine/search/resource.py b/engine/src/valet/engine/search/resource.py
new file mode 100644
index 0000000..18c8ca7
--- /dev/null
+++ b/engine/src/valet/engine/search/resource.py
@@ -0,0 +1,264 @@
+#
+# -------------------------------------------------------------------------
+# Copyright (c) 2019 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
+from valet.engine.resource_manager.resources.numa import NUMA
+
+
+class GroupResource(object):
+ """Container for all resource group includes
+
+ affinity, diversity, quorum-diversity, exclusivity, host-aggregate and availability.
+ """
+
+ def __init__(self):
+ self.name = None
+
+ self.group_type = "aggr"
+ self.factory = "nova"
+ self.level = "host"
+
+ self.metadata = {}
+
+ self.original_num_of_placed_servers = 0
+ self.num_of_placed_servers = 0
+
+ # key = host (host or rack), value = num_of_placed_servers
+ self.num_of_placed_servers_of_host = {}
+
+
+class HostResource(object):
+ """Container for hosting resource (host, rack)."""
+
+ def __init__(self):
+ # Host info
+ self.host_name = None
+
+ self.host_memberships = {} # all mapped groups to host
+
+ self.host_avail_vCPUs = 0 # remaining vCPUs after overcommit
+ self.host_avail_mem = 0 # remaining mem cap after
+ self.host_avail_local_disk = 0 # remaining local disk cap after overcommit
+
+ self.NUMA = None
+
+ self.host_num_of_placed_servers = 0 # the number of vms currently placed in this host
+
+ # If the host type is not determined yet,
+ # provide possible host types.
+ self.candidate_host_types = {}
+ self.old_candidate_host_types = {} # For rollback
+
+ # To track newly added host types.
+ self.new_host_aggregate_list = []
+
+ # Rack info
+ self.rack_name = None # where this host is located
+
+ self.rack_memberships = {}
+
+ self.rack_avail_vCPUs = 0
+ self.rack_avail_mem = 0
+ self.rack_avail_local_disk = 0
+
+ self.rack_num_of_placed_servers = 0
+
+ # To track newly added host types.
+ self.new_rack_aggregate_list = []
+
+ self.level = None # level of placement
+
+ self.sort_base = 0 # order to place
+
+ def get_host_type(self, _ha, _host_types):
+ """Take host-aggregate group and
+ return default host type of the host-aggregate.
+ """
+
+ host_type = None
+
+ if _host_types is None:
+ return host_type
+
+ host_type_list = _host_types[_ha.name]
+ for ht in host_type_list:
+ if "default" in ht.keys():
+ host_type = ht
+ break
+
+ return host_type
+
+ def adjust_avail_resources(self, _ha):
+ """Take host-aggregate group and
+ add it to host/rack memberships and
+ adjust the amount of available resources based on
+ the corresponding host type.
+ """
+
+ if _ha.name not in self.host_memberships.keys():
+ self.host_memberships[_ha.name] = _ha
+ self.new_host_aggregate_list.append(_ha.name)
+ if _ha.name not in self.rack_memberships.keys():
+ self.rack_memberships[_ha.name] = _ha
+ self.new_rack_aggregate_list.append(_ha.name)
+
+ host_type = self.get_host_type(_ha, self.candidate_host_types)
+
+ self.host_avail_vCPUs = host_type["avail_vCPUs"]
+ self.host_avail_mem = host_type["avail_mem"]
+ self.host_avail_local_disk = host_type["avail_local_disk"]
+
+ self.NUMA = NUMA(numa=host_type["NUMA"])
+
+ if self.candidate_host_types is not None:
+ for htk, htl in self.candidate_host_types.iteritems():
+ if htk == "mockup":
+ self.rack_avail_vCPUs -= htl[0]["avail_vCPUs"]
+ self.rack_avail_mem -= htl[0]["avail_mem"]
+ self.rack_avail_local_disk -= htl[0]["avail_local_disk"]
+
+ self.rack_avail_vCPUs += self.host_avail_vCPUs
+ self.rack_avail_mem += self.host_avail_mem
+ self.rack_avail_local_disk += self.host_avail_local_disk
+
+ break
+
+ def adjust_avail_rack_resources(self, _ha, _cpus, _mem, _disk):
+ """Take host-aggregate group and the amount of available resources
+ add the group into rack membership and
+ adjust the amount of available rack resources.
+ """
+
+ if _ha.name not in self.rack_memberships.keys():
+ self.rack_memberships[_ha.name] = _ha
+ self.new_rack_aggregate_list.append(_ha.name)
+
+ self.rack_avail_vCPUs = _cpus
+ self.rack_avail_mem = _mem
+ self.rack_avail_local_disk = _disk
+
+ def rollback_avail_resources(self, _ha):
+ if _ha.name in self.new_host_aggregate_list:
+ del self.host_memberships[_ha.name]
+ self.new_host_aggregate_list.remove(_ha.name)
+ if _ha.name in self.new_rack_aggregate_list:
+ del self.rack_memberships[_ha.name]
+ self.new_rack_aggregate_list.remove(_ha.name)
+
+ host_type = self.get_host_type(_ha, self.old_candidate_host_types)
+
+ if self.old_candidate_host_types is not None:
+ for htk, htl in self.old_candidate_host_types.iteritems():
+ if htk == "mockup":
+ self.host_avail_vCPUs = htl[0]["avail_vCPUs"]
+ self.host_avail_mem = htl[0]["avail_mem"]
+ self.host_avail_local_disk = htl[0]["avail_local_disk"]
+
+ self.NUMA = NUMA(numa=htl[0]["NUMA"])
+
+ self.rack_avail_vCPUs -= host_type["avail_vCPUs"]
+ self.rack_avail_mem -= host_type["avail_mem"]
+ self.rack_avail_local_disk -= host_type["avail_local_disk"]
+
+ self.rack_avail_vCPUs += self.host_avail_vCPUs
+ self.rack_avail_mem += self.host_avail_mem
+ self.rack_avail_local_disk += self.host_avail_local_disk
+
+ break
+
+ def rollback_avail_rack_resources(self, _ha, _cpus, _mem, _disk):
+ if _ha.name in self.new_rack_aggregate_list:
+ del self.rack_memberships[_ha.name]
+ self.new_rack_aggregate_list.remove(_ha.name)
+
+ self.rack_avail_vCPUs = _cpus
+ self.rack_avail_mem = _mem
+ self.rack_avail_local_disk = _disk
+
+ def get_resource_name(self, _level):
+ name = "unknown"
+
+ if _level == "rack":
+ name = self.rack_name
+ elif _level == "host":
+ name = self.host_name
+
+ return name
+
+ def get_vcpus(self, _level):
+ avail_vcpus = 0
+
+ if _level == "rack":
+ avail_vcpus = self.rack_avail_vCPUs
+ elif _level == "host":
+ avail_vcpus = self.host_avail_vCPUs
+
+ return avail_vcpus
+
+ def get_mem(self, _level):
+ avail_mem = 0
+
+ if _level == "rack":
+ avail_mem = self.rack_avail_mem
+ elif _level == "host":
+ avail_mem = self.host_avail_mem
+
+ return avail_mem
+
+ def get_local_disk(self, _level):
+ avail_local_disk = 0
+
+ if _level == "rack":
+ avail_local_disk = self.rack_avail_local_disk
+ elif _level == "host":
+ avail_local_disk = self.host_avail_local_disk
+
+ return avail_local_disk
+
+ def get_memberships(self, _level):
+ memberships = None
+
+ if _level == "rack":
+ memberships = self.rack_memberships
+ elif _level == "host":
+ memberships = self.host_memberships
+
+ return memberships
+
+ def get_all_memberships(self, _level):
+ memberships = {}
+
+ if _level == "rack":
+ for mk, m in self.rack_memberships.iteritems():
+ memberships[mk] = m
+ for mk, m in self.host_memberships.iteritems():
+ memberships[mk] = m
+ elif _level == "host":
+ for mk, m in self.host_memberships.iteritems():
+ memberships[mk] = m
+
+ return memberships
+
+ def get_num_of_placed_servers(self, _level):
+ num_of_servers = 0
+
+ if _level == "rack":
+ num_of_servers = self.rack_num_of_placed_servers
+ elif _level == "host":
+ num_of_servers = self.host_num_of_placed_servers
+
+ return num_of_servers
diff --git a/engine/src/valet/engine/search/search.py b/engine/src/valet/engine/search/search.py
new file mode 100644
index 0000000..40added
--- /dev/null
+++ b/engine/src/valet/engine/search/search.py
@@ -0,0 +1,708 @@
+#
+# -------------------------------------------------------------------------
+# Copyright (c) 2019 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
+import copy
+import operator
+
+from valet.engine.app_manager.server import Server
+from valet.engine.resource_manager.resources.datacenter import Datacenter
+from valet.engine.search.avail_resources import AvailResources
+from valet.engine.search.constraint_solver import ConstraintSolver
+from valet.engine.search.resource import GroupResource, HostResource
+from valet.engine.search.search_helper import *
+
+
+class Search(object):
+ """Bin-packing approach in the hierachical datacenter layout."""
+
+ def __init__(self, _logger):
+ self.logger = _logger
+
+ # Search inputs
+ self.app = None
+ self.resource = None
+
+ # Snapshot of current resource status
+ self.avail_hosts = {}
+ self.avail_groups = {}
+
+ # Search results
+ self.node_placements = {}
+ self.prior_placements = {} # TODO
+ self.num_of_hosts = 0
+
+ # Optimization criteria
+ self.CPU_weight = -1
+ self.mem_weight = -1
+ self.local_disk_weight = -1
+
+ self.constraint_solver = None
+
+ def _init_search(self, _app):
+ """Init the search information and the output results."""
+
+ self.app = _app
+ self.resource = _app.resource
+
+ self.avail_hosts.clear()
+ self.avail_groups.clear()
+
+ self.node_placements.clear()
+ self.prior_placements.clear() # TODO
+ self.num_of_hosts = 0
+
+ self.CPU_weight = -1
+ self.mem_weight = -1
+ self.local_disk_weight = -1
+
+ self.constraint_solver = ConstraintSolver(self.logger)
+
+ self._create_avail_groups()
+ self._create_avail_hosts()
+
+ # TODO
+ # if len(self.app.old_vm_map) > 0:
+ # self._adjust_resources()
+
+ self._set_resource_weights()
+
+ def _create_avail_groups(self):
+ """Collect all available resource groups.
+
+ Group type is affinity, diversity, quorum-diversity, exclusivity,
+ availability-zone, host-aggregate, server-group.
+ """
+
+ for gk, g in self.resource.groups.iteritems():
+ if g.status != "enabled":
+ self.logger.debug("group (" + g.name + ") disabled")
+ continue
+
+ gr = GroupResource()
+ gr.name = gk
+
+ gr.group_type = g.group_type
+ gr.factory = g.factory
+
+ if g.level is not None:
+ gr.level = g.level
+ else:
+ gr.level = "host"
+
+ for mk, mv in g.metadata.iteritems():
+ gr.metadata[mk] = mv
+
+ gr.original_num_of_placed_servers = len(g.server_list)
+ gr.num_of_placed_servers = len(g.server_list)
+
+ for hk in g.member_hosts.keys():
+ gr.num_of_placed_servers_of_host[hk] = len(g.member_hosts[hk])
+
+ self.avail_groups[gk] = gr
+
+ def _create_avail_hosts(self):
+ """Create all available hosts."""
+
+ for hk, host in self.resource.hosts.iteritems():
+ if not host.is_available():
+ self.logger.warning("host (" + host.name + ") not available at this time")
+ continue
+
+ hr = HostResource()
+ hr.host_name = hk
+
+ for mk in host.memberships.keys():
+ if mk in self.avail_groups.keys():
+ hr.host_memberships[mk] = self.avail_groups[mk]
+
+ # Not used by Valet, only capacity planning
+ try:
+ for htk, ht in host.candidate_host_types.iteritems():
+ hr.candidate_host_types[htk] = copy.deepcopy(ht)
+ except AttributeError:
+ hr.candidate_host_types = {}
+
+ hr.host_avail_vCPUs = host.avail_vCPUs
+ hr.host_avail_mem = host.avail_mem_cap
+ hr.host_avail_local_disk = host.avail_local_disk_cap
+
+ hr.NUMA = host.NUMA # NOTE: refer to host's NUMA, instead of deepcopy.
+
+ hr.host_num_of_placed_servers = len(host.server_list)
+
+ rack = host.host_group
+ if isinstance(rack, Datacenter):
+ hr.rack_name = "any"
+ else:
+ if not rack.is_available():
+ continue
+
+ hr.rack_name = rack.name
+
+ for mk in rack.memberships.keys():
+ if mk in self.avail_groups.keys():
+ hr.rack_memberships[mk] = self.avail_groups[mk]
+
+ hr.rack_avail_vCPUs = rack.avail_vCPUs
+ hr.rack_avail_mem = rack.avail_mem_cap
+ hr.rack_avail_local_disk = rack.avail_local_disk_cap
+
+ hr.rack_num_of_placed_servers = len(rack.server_list)
+
+ if hr.host_num_of_placed_servers > 0:
+ self.num_of_hosts += 1
+
+ self.avail_hosts[hk] = hr
+
+ def _set_resource_weights(self):
+ """Compute weight of each resource type.
+
+ As larger weight, as more important resource to be considered.
+ """
+
+ denominator = 0.0
+ for _, w in self.app.optimization_priority:
+ denominator += w
+
+ for t, w in self.app.optimization_priority:
+ if t == "cpu":
+ self.CPU_weight = float(w / denominator)
+ elif t == "mem":
+ self.mem_weight = float(w / denominator)
+ elif t == "lvol":
+ self.local_disk_weight = float(w / denominator)
+
+ def place(self, _app):
+ """Determine placements of new app creation."""
+
+ self._init_search(_app)
+
+ self.logger.info("search......")
+
+ open_node_list = self._open_list(self.app.servers, self.app.groups)
+
+ avail_resources = AvailResources(LEVEL[len(LEVEL) - 1])
+ avail_resources.avail_hosts = self.avail_hosts
+ avail_resources.set_next_level() # NOTE(Gueyoung): skip 'cluster' level
+
+ return self._run_greedy(open_node_list, avail_resources, "new")
+
+ # TODO: for update opt.
+ def re_place(self, _app_topology):
+ pass
+
+ def _re_place(self):
+ pass
+
+ def _open_list(self, _servers, _groups):
+ """Extract all servers and groups of each level (rack, host)."""
+
+ open_node_list = []
+
+ for _, s in _servers.iteritems():
+ self._set_node_weight(s)
+ open_node_list.append(s)
+
+ for _, g in _groups.iteritems():
+ self._set_node_weight(g)
+ open_node_list.append(g)
+
+ return open_node_list
+
+ def _set_node_weight(self, _v):
+ """Compute each server's or group's weight.
+
+ As larger weight, as more important one to be considered.
+ """
+
+ _v.sort_base = -1
+ _v.sort_base = self.CPU_weight * _v.vCPU_weight
+ _v.sort_base += self.mem_weight * _v.mem_weight
+ _v.sort_base += self.local_disk_weight * _v.local_volume_weight
+
+ # TODO: for update opt.
+ def _open_prior_list(self, _vms, _groups):
+ pass
+
+ def _adjust_resources(self):
+ pass
+
+ def _run_greedy(self, _open_node_list, _avail_resources, _mode):
+ """Search placements with greedy algorithm."""
+
+ _open_node_list.sort(key=operator.attrgetter("sort_base"), reverse=True)
+
+ for n in _open_node_list:
+ self.logger.debug("open node = " + n.vid + " cpus = " + str(n.vCPUs) + " sort = " + str(n.sort_base))
+
+ while len(_open_node_list) > 0:
+ n = _open_node_list.pop(0)
+
+ # TODO
+ # if _mode == "new":
+ best_resource = self._get_best_resource(n, _avail_resources, _mode)
+ # else:
+ # best_resource = self._get_best_resource_for_prior(n, _avail_resources, _mode)
+
+ if best_resource is None:
+ self.logger.error(self.app.status)
+ return False
+ else:
+ self._deduct_resources(_avail_resources.level, best_resource, n, _mode)
+ # TODO
+ # if _mode == "new":
+ self._close_node_placement(_avail_resources.level, best_resource, n)
+ # else:
+ # self._close_prior_placement(_avail_resources.level, best_resource, n)
+
+ return True
+
+ def _get_best_resource(self, _n, _avail_resources, _mode):
+ """Ddetermine the best placement for given server or affinity group."""
+
+ candidate_list = []
+ prior_resource = None
+
+ # If this is already placed one
+ if _n in self.prior_placements.keys():
+ prior_resource = _avail_resources.get_candidate(self.prior_placements[_n])
+ candidate_list.append(prior_resource)
+ else:
+ # TODO: need a list of candidates given as input?
+
+ _avail_resources.set_candidates()
+
+ candidate_list = self.constraint_solver.get_candidate_list(_n,
+ _avail_resources,
+ self.avail_hosts,
+ self.avail_groups)
+
+ if len(candidate_list) == 0:
+ if self.app.status == "ok":
+ if self.constraint_solver.status != "ok":
+ self.app.status = self.constraint_solver.status
+ else:
+ self.app.status = "fail while getting candidate hosts"
+ return None
+
+ if len(candidate_list) > 1:
+ self._set_compute_sort_base(_avail_resources.level, candidate_list)
+ candidate_list.sort(key=operator.attrgetter("sort_base"))
+
+ for c in candidate_list:
+ rn = c.get_resource_name(_avail_resources.level)
+ avail_cpus = c.get_vcpus(_avail_resources.level)
+ self.logger.debug("candidate = " + rn + " cpus = " + str(avail_cpus) + " sort = " + str(c.sort_base))
+
+ best_resource = None
+ if _avail_resources.level == "host" and isinstance(_n, Server):
+ best_resource = copy.deepcopy(candidate_list[0])
+ best_resource.level = "host"
+ else:
+ while len(candidate_list) > 0:
+ cr = candidate_list.pop(0)
+
+ (servers, groups) = get_next_placements(_n, _avail_resources.level)
+ open_node_list = self._open_list(servers, groups)
+
+ avail_resources = AvailResources(_avail_resources.level)
+ resource_name = cr.get_resource_name(_avail_resources.level)
+
+ avail_resources.set_next_avail_hosts(_avail_resources.avail_hosts, resource_name)
+ avail_resources.set_next_level()
+
+ # Recursive call
+ if self._run_greedy(open_node_list, avail_resources, _mode):
+ best_resource = copy.deepcopy(cr)
+ best_resource.level = _avail_resources.level
+ break
+ else:
+ if prior_resource is None:
+ self.logger.warning("rollback candidate = " + resource_name)
+
+ self._rollback_resources(_n)
+ self._rollback_node_placement(_n)
+
+ # TODO(Gueyoung): how to track the original error status?
+ if len(candidate_list) > 0 and self.app.status != "ok":
+ self.app.status = "ok"
+ else:
+ break
+
+ if best_resource is None and len(candidate_list) == 0:
+ if self.app.status == "ok":
+ self.app.status = "no available hosts"
+ self.logger.warning(self.app.status)
+
+ return best_resource
+
+ # TODO: for update opt.
+ def _get_best_resource_for_prior(self, _n, _avail_resources, _mode):
+ pass
+
+ def _set_compute_sort_base(self, _level, _candidate_list):
+ """Compute the weight of each candidate host."""
+
+ for c in _candidate_list:
+ cpu_ratio = -1
+ mem_ratio = -1
+ local_disk_ratio = -1
+
+ if _level == "rack":
+ cpu_ratio = float(c.rack_avail_vCPUs) / float(self.resource.CPU_avail)
+ mem_ratio = float(c.rack_avail_mem) / float(self.resource.mem_avail)
+ local_disk_ratio = float(c.rack_avail_local_disk) / float(self.resource.local_disk_avail)
+ elif _level == "host":
+ cpu_ratio = float(c.host_avail_vCPUs) / float(self.resource.CPU_avail)
+ mem_ratio = float(c.host_avail_mem) / float(self.resource.mem_avail)
+ local_disk_ratio = float(c.host_avail_local_disk) / float(self.resource.local_disk_avail)
+
+ c.sort_base = (1.0 - self.CPU_weight) * cpu_ratio + \
+ (1.0 - self.mem_weight) * mem_ratio + \
+ (1.0 - self.local_disk_weight) * local_disk_ratio
+
+ def _deduct_resources(self, _level, _best, _n, _mode):
+ """Apply new placement to hosting resources and groups."""
+
+ # Check if the chosen host is already applied.
+ if _mode == "new":
+ if _n in self.prior_placements.keys():
+ return
+ else:
+ if _n in self.node_placements.keys():
+ if _best.level == self.node_placements[_n].level:
+ return
+ else:
+ if _n in self.prior_placements.keys():
+ if _best.level == self.prior_placements[_n].level:
+ return
+
+ # Apply this placement to valet groups.
+
+ exclusivities = _n.get_exclusivities(_level)
+ if len(exclusivities) == 1:
+ exclusivity_group = exclusivities[exclusivities.keys()[0]]
+ self._add_exclusivity(_best, exclusivity_group)
+
+ if isinstance(_n, Group):
+ self._add_group(_level, _best, _n)
+
+ if len(_n.diversity_groups) > 0:
+ for _, div_group in _n.diversity_groups.iteritems():
+ self._add_group(_level, _best, div_group)
+
+ if len(_n.quorum_diversity_groups) > 0:
+ for _, div_group in _n.quorum_diversity_groups.iteritems():
+ self._add_group(_level, _best, div_group)
+
+ # Apply this placement to hosting resources.
+ if isinstance(_n, Server) and _level == "host":
+ self._deduct_server_resources(_best, _n)
+
+ def _add_exclusivity(self, _best, _group):
+ """Add new exclusivity group."""
+
+ if _group.vid not in self.avail_groups.keys():
+ gr = GroupResource()
+ gr.name = _group.vid
+ gr.group_type = "exclusivity"
+ gr.factory = "valet"
+ gr.level = _group.level
+ self.avail_groups[gr.name] = gr
+
+ self.logger.info("find exclusivity (" + _group.vid + ")")
+ else:
+ gr = self.avail_groups[_group.vid]
+
+ gr.num_of_placed_servers += 1
+
+ host_name = _best.get_resource_name(_group.level)
+ if host_name not in gr.num_of_placed_servers_of_host.keys():
+ gr.num_of_placed_servers_of_host[host_name] = 0
+ gr.num_of_placed_servers_of_host[host_name] += 1
+
+ chosen_host = self.avail_hosts[_best.host_name]
+ if _group.level == "host":
+ if _group.vid not in chosen_host.host_memberships.keys():
+ chosen_host.host_memberships[_group.vid] = gr
+ for _, np in self.avail_hosts.iteritems():
+ if chosen_host.rack_name != "any" and np.rack_name == chosen_host.rack_name:
+ if _group.vid not in np.rack_memberships.keys():
+ np.rack_memberships[_group.vid] = gr
+ else: # Rack level
+ for _, np in self.avail_hosts.iteritems():
+ if chosen_host.rack_name != "any" and np.rack_name == chosen_host.rack_name:
+ if _group.vid not in np.rack_memberships.keys():
+ np.rack_memberships[_group.vid] = gr
+
+ def _add_group(self, _level, _best, _group):
+ """Add new valet group."""
+
+ if _group.vid not in self.avail_groups.keys():
+ gr = GroupResource()
+ gr.name = _group.vid
+ gr.group_type = _group.group_type
+ gr.factory = _group.factory
+ gr.level = _group.level
+ self.avail_groups[gr.name] = gr
+
+ self.logger.info("find " + _group.group_type + " (" + _group.vid + ")")
+ else:
+ gr = self.avail_groups[_group.vid]
+
+ if _group.level == _level:
+ gr.num_of_placed_servers += 1
+
+ host_name = _best.get_resource_name(_level)
+ if host_name not in gr.num_of_placed_servers_of_host.keys():
+ gr.num_of_placed_servers_of_host[host_name] = 0
+ gr.num_of_placed_servers_of_host[host_name] += 1
+
+ chosen_host = self.avail_hosts[_best.host_name]
+ if _level == "host":
+ if _group.vid not in chosen_host.host_memberships.keys():
+ chosen_host.host_memberships[_group.vid] = gr
+ for _, np in self.avail_hosts.iteritems():
+ if chosen_host.rack_name != "any" and np.rack_name == chosen_host.rack_name:
+ if _group.vid not in np.rack_memberships.keys():
+ np.rack_memberships[_group.vid] = gr
+ else: # Rack level
+ for _, np in self.avail_hosts.iteritems():
+ if chosen_host.rack_name != "any" and np.rack_name == chosen_host.rack_name:
+ if _group.vid not in np.rack_memberships.keys():
+ np.rack_memberships[_group.vid] = gr
+
+ def _deduct_server_resources(self, _best, _n):
+ """Apply the reduced amount of resources to the chosen host.
+
+ _n is a server and _best is a compute host.
+ """
+
+ chosen_host = self.avail_hosts[_best.host_name]
+
+ chosen_host.host_avail_vCPUs -= _n.vCPUs
+ chosen_host.host_avail_mem -= _n.mem
+ chosen_host.host_avail_local_disk -= _n.local_volume_size
+
+ # Apply placement decision into NUMA
+ if _n.need_numa_alignment():
+ s_info = {}
+ s_info["stack_id"] = "none"
+ s_info["stack_name"] = self.app.app_name
+ s_info["uuid"] = "none"
+ s_info["name"] = _n.name
+ s_info["vcpus"] = _n.vCPUs
+ s_info["mem"] = _n.mem
+
+ chosen_host.NUMA.deduct_server_resources(s_info)
+
+ # TODO: need non_NUMA server?
+ # else:
+ # chosen_host.NUMA.apply_cpus_fairly(_n.vCPUs)
+ # chosen_host.NUMA.apply_mem_fairly(_n.mem)
+
+ if chosen_host.host_num_of_placed_servers == 0:
+ self.num_of_hosts += 1
+
+ chosen_host.host_num_of_placed_servers += 1
+
+ for _, np in self.avail_hosts.iteritems():
+ if chosen_host.rack_name != "any" and np.rack_name == chosen_host.rack_name:
+ np.rack_avail_vCPUs -= _n.vCPUs
+ np.rack_avail_mem -= _n.mem
+ np.rack_avail_local_disk -= _n.local_volume_size
+
+ np.rack_num_of_placed_servers += 1
+
+ def _close_node_placement(self, _level, _best, _v):
+ """Record the final placement decision."""
+
+ if _v not in self.node_placements.keys() and _v not in self.prior_placements.keys():
+ if _level == "host" or isinstance(_v, Group):
+ self.node_placements[_v] = _best
+
+ def _close_prior_placement(self, _level, _best, _v):
+ """Set the decision for placed server or group."""
+
+ if _v not in self.prior_placements.keys():
+ if _level == "host" or isinstance(_v, Group):
+ self.prior_placements[_v] = _best
+
+ def _rollback_resources(self, _v):
+ """Rollback the placement."""
+
+ if isinstance(_v, Server):
+ self._rollback_server_resources(_v)
+ elif isinstance(_v, Group):
+ for _, v in _v.subgroups.iteritems():
+ self._rollback_resources(v)
+
+ if _v in self.node_placements.keys():
+ chosen_host = self.avail_hosts[self.node_placements[_v].host_name]
+ level = self.node_placements[_v].level
+
+ if isinstance(_v, Group):
+ self._remove_group(chosen_host, _v, level)
+
+ exclusivities = _v.get_exclusivities(level)
+ if len(exclusivities) == 1:
+ ex_group = exclusivities[exclusivities.keys()[0]]
+ self._remove_exclusivity(chosen_host, ex_group)
+
+ if len(_v.diversity_groups) > 0:
+ for _, div_group in _v.diversity_groups.iteritems():
+ self._remove_group(chosen_host, div_group, level)
+
+ if len(_v.quorum_diversity_groups) > 0:
+ for _, div_group in _v.quorum_diversity_groups.iteritems():
+ self._remove_group(chosen_host, div_group, level)
+
+ def _remove_exclusivity(self, _chosen_host, _group):
+ """Remove the exclusivity group."""
+
+ gr = self.avail_groups[_group.vid]
+
+ host_name = _chosen_host.get_resource_name(_group.level)
+
+ gr.num_of_placed_servers -= 1
+ gr.num_of_placed_servers_of_host[host_name] -= 1
+
+ if gr.num_of_placed_servers_of_host[host_name] == 0:
+ del gr.num_of_placed_servers_of_host[host_name]
+
+ if gr.num_of_placed_servers == 0:
+ del self.avail_groups[_group.vid]
+
+ if _group.level == "host":
+ if _chosen_host.host_num_of_placed_servers == 0 and \
+ _group.vid in _chosen_host.host_memberships.keys():
+ del _chosen_host.host_memberships[_group.vid]
+
+ for _, np in self.avail_hosts.iteritems():
+ if _chosen_host.rack_name != "any" and np.rack_name == _chosen_host.rack_name:
+ if _group.vid in np.rack_memberships.keys():
+ del np.rack_memberships[_group.vid]
+ else: # Rack level
+ if _chosen_host.rack_num_of_placed_servers == 0:
+ for _, np in self.avail_hosts.iteritems():
+ if _chosen_host.rack_name != "any" and np.rack_name == _chosen_host.rack_name:
+ if _group.vid in np.rack_memberships.keys():
+ del np.rack_memberships[_group.vid]
+
+ def _remove_group(self, _chosen_host, _group, _level):
+ """Remove valet group."""
+
+ if _group.level == _level:
+ gr = self.avail_groups[_group.vid]
+
+ host_name = _chosen_host.get_resource_name(_level)
+
+ gr.num_of_placed_servers -= 1
+ gr.num_of_placed_servers_of_host[host_name] -= 1
+
+ if gr.num_of_placed_servers_of_host[host_name] == 0:
+ del gr.num_of_placed_servers_of_host[host_name]
+
+ if gr.num_of_placed_servers == 0:
+ del self.avail_groups[_group.vid]
+
+ exist_group = True
+ if _group.vid not in self.avail_groups.keys():
+ exist_group = False
+ else:
+ if host_name not in gr.num_of_placed_servers_of_host.keys():
+ exist_group = False
+
+ if _level == "host":
+ if not exist_group and _group.vid in _chosen_host.host_memberships.keys():
+ del _chosen_host.host_memberships[_group.vid]
+
+ for _, np in self.avail_hosts.iteritems():
+ if _chosen_host.rack_name != "any" and np.rack_name == _chosen_host.rack_name:
+ if _group.vid in np.rack_memberships.keys():
+ del np.rack_memberships[_group.vid]
+ else: # Rack level
+ if not exist_group:
+ for _, np in self.avail_hosts.iteritems():
+ if _chosen_host.rack_name != "any" and np.rack_name == _chosen_host.rack_name:
+ if _group.vid in np.rack_memberships.keys():
+ del np.rack_memberships[_group.vid]
+
+ def _rollback_server_resources(self, _v):
+ """Return back the amount of resources to host."""
+
+ if _v in self.node_placements.keys():
+ chosen_host = self.avail_hosts[self.node_placements[_v].host_name]
+
+ chosen_host.host_avail_vCPUs += _v.vCPUs
+ chosen_host.host_avail_mem += _v.mem
+ chosen_host.host_avail_local_disk += _v.local_volume_size
+
+ # Apply rollback into NUMA
+ if _v.need_numa_alignment():
+ s_info = {}
+ s_info["stack_id"] = "none"
+ s_info["stack_name"] = self.app.app_name
+ s_info["uuid"] = "none"
+ s_info["name"] = _v.name
+ s_info["vcpus"] = _v.vCPUs
+ s_info["mem"] = _v.mem
+
+ chosen_host.NUMA.rollback_server_resources(s_info)
+
+ chosen_host.host_num_of_placed_servers -= 1
+
+ if chosen_host.host_num_of_placed_servers == 0:
+ self.num_of_hosts -= 1
+
+ for _, np in self.avail_hosts.iteritems():
+ if chosen_host.rack_name != "any" and np.rack_name == chosen_host.rack_name:
+ np.rack_avail_vCPUs += _v.vCPUs
+ np.rack_avail_mem += _v.mem
+ np.rack_avail_local_disk += _v.local_volume_size
+
+ np.rack_num_of_placed_servers -= 1
+
+ # If the chosen host was a new host and its host type was unknown,
+ # rollback to the original unknown state.
+ if chosen_host.host_num_of_placed_servers == 0:
+ if chosen_host.old_candidate_host_types is not None and len(chosen_host.old_candidate_host_types) > 0:
+ flavor_type_list = _v.get_flavor_types()
+ ha = self.avail_groups[flavor_type_list[0]]
+
+ chosen_host.rollback_avail_resources(ha)
+ chosen_host.candidate_host_types = copy.deepcopy(chosen_host.old_candidate_host_types)
+ chosen_host.old_candidate_host_types.clear()
+
+ for hrk, hr in self.avail_hosts.iteritems():
+ if hrk != chosen_host.host_name:
+ if hr.rack_name == chosen_host.rack_name:
+ hr.rollback_avail_rack_resources(ha,
+ chosen_host.rack_avail_vCPUs,
+ chosen_host.rack_avail_mem,
+ chosen_host.rack_avail_local_disk)
+
+ def _rollback_node_placement(self, _v):
+ """Remove placement decisions."""
+
+ if _v in self.node_placements.keys():
+ del self.node_placements[_v]
+
+ if isinstance(_v, Group):
+ for _, sg in _v.subgroups.iteritems():
+ self._rollback_node_placement(sg)
diff --git a/engine/src/valet/engine/search/search_helper.py b/engine/src/valet/engine/search/search_helper.py
new file mode 100644
index 0000000..0e64ef7
--- /dev/null
+++ b/engine/src/valet/engine/search/search_helper.py
@@ -0,0 +1,43 @@
+#
+# -------------------------------------------------------------------------
+# Copyright (c) 2019 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
+#!/bin/python
+
+
+from valet.engine.app_manager.group import Group, LEVEL
+
+
+def get_next_placements(_n, _level):
+ """Get servers and groups to be handled in the next level of search."""
+
+ servers = {}
+ groups = {}
+
+ if isinstance(_n, Group):
+ if LEVEL.index(_n.level) < LEVEL.index(_level):
+ groups[_n.vid] = _n
+ else:
+ for _, sg in _n.subgroups.iteritems():
+ if isinstance(sg, Group):
+ groups[sg.vid] = sg
+ else:
+ servers[sg.vid] = sg
+ else:
+ servers[_n.vid] = _n
+
+ return servers, groups
diff --git a/engine/src/valet/rules/VNF_Rack_Diversity_RDN.json b/engine/src/valet/rules/VNF_Rack_Diversity_RDN.json
new file mode 100755
index 0000000..02c6afc
--- /dev/null
+++ b/engine/src/valet/rules/VNF_Rack_Diversity_RDN.json
@@ -0,0 +1,8 @@
+{
+ "name": "VNF_Rack_Diversity_RDN",
+ "type": "diversity",
+ "level": "rack",
+ "app_scope": "vnf",
+ "members": [],
+ "description": "Rack level diversity for vUSP RDN VMs"
+}
diff --git a/engine/src/valet/rules/VNF_Rack_Quorum_RDN.json b/engine/src/valet/rules/VNF_Rack_Quorum_RDN.json
new file mode 100755
index 0000000..c977ed1
--- /dev/null
+++ b/engine/src/valet/rules/VNF_Rack_Quorum_RDN.json
@@ -0,0 +1,8 @@
+{
+ "name": "VNF_Rack_Quorum_RDN",
+ "type": "quorum-diversity",
+ "level": "rack",
+ "app_scope": "vnf",
+ "members": [],
+ "description": "Rack level quorum for vUSP RDN VMs"
+}
diff --git a/engine/src/valet/rules/VNF_host_diversity_RDN.json b/engine/src/valet/rules/VNF_host_diversity_RDN.json
new file mode 100755
index 0000000..5da2b0a
--- /dev/null
+++ b/engine/src/valet/rules/VNF_host_diversity_RDN.json
@@ -0,0 +1,8 @@
+{
+ "name": "VNF_host_diversity_RDN",
+ "type": "diversity",
+ "level": "host",
+ "app_scope": "vnf",
+ "members": [],
+ "description": "Host level diversity for vUSP RDN VMs"
+}
diff --git a/engine/src/valet/rules/test_host_affinity_rule.json b/engine/src/valet/rules/test_host_affinity_rule.json
new file mode 100755
index 0000000..bd01da0
--- /dev/null
+++ b/engine/src/valet/rules/test_host_affinity_rule.json
@@ -0,0 +1,8 @@
+{
+ "name": "VALET_HOST_AFFINITY_RULE",
+ "type": "affinity",
+ "level": "host",
+ "app_scope": "vnf",
+ "members": [],
+ "description": "for test"
+}
diff --git a/engine/src/valet/rules/test_host_diveristy_rule0.json b/engine/src/valet/rules/test_host_diveristy_rule0.json
new file mode 100755
index 0000000..fbbc41b
--- /dev/null
+++ b/engine/src/valet/rules/test_host_diveristy_rule0.json
@@ -0,0 +1,8 @@
+{
+ "name": "VALET_HOST_DIVERSITY_RULE_0",
+ "type": "diversity",
+ "level": "host",
+ "app_scope": "lcp",
+ "members": [],
+ "description": "for test"
+}
diff --git a/engine/src/valet/rules/test_host_diveristy_rule1.json b/engine/src/valet/rules/test_host_diveristy_rule1.json
new file mode 100755
index 0000000..4a55515
--- /dev/null
+++ b/engine/src/valet/rules/test_host_diveristy_rule1.json
@@ -0,0 +1,8 @@
+{
+ "name": "VALET_HOST_DIVERSITY_RULE_1",
+ "type": "diversity",
+ "level": "host",
+ "app_scope": "lcp",
+ "members": [],
+ "description": "for test"
+}
diff --git a/engine/src/valet/rules/test_host_diveristy_rule2.json b/engine/src/valet/rules/test_host_diveristy_rule2.json
new file mode 100755
index 0000000..7ad81af
--- /dev/null
+++ b/engine/src/valet/rules/test_host_diveristy_rule2.json
@@ -0,0 +1,8 @@
+{
+ "name": "VALET_HOST_DIVERSITY_RULE_2",
+ "type": "diversity",
+ "level": "host",
+ "app_scope": "lcp",
+ "members": [],
+ "description": "for test"
+}
diff --git a/engine/src/valet/rules/test_host_exclusivity.json b/engine/src/valet/rules/test_host_exclusivity.json
new file mode 100755
index 0000000..d777b82
--- /dev/null
+++ b/engine/src/valet/rules/test_host_exclusivity.json
@@ -0,0 +1,8 @@
+{
+ "name": "VALET_HOST_EXCLUSIVITY",
+ "type": "exclusivity",
+ "level": "host",
+ "app_scope": "lcp",
+ "members": [],
+ "description": "for test"
+}
diff --git a/engine/src/valet/rules/test_host_exclusivity2.json b/engine/src/valet/rules/test_host_exclusivity2.json
new file mode 100755
index 0000000..2eff2a3
--- /dev/null
+++ b/engine/src/valet/rules/test_host_exclusivity2.json
@@ -0,0 +1,8 @@
+{
+ "name": "VALET_HOST_EXCLUSIVITY2",
+ "type": "exclusivity",
+ "level": "host",
+ "app_scope": "lcp",
+ "members": [],
+ "description": "for test"
+}
diff --git a/engine/src/valet/solver/__init__.py b/engine/src/valet/solver/__init__.py
new file mode 100644
index 0000000..bd50995
--- /dev/null
+++ b/engine/src/valet/solver/__init__.py
@@ -0,0 +1,18 @@
+#
+# -------------------------------------------------------------------------
+# Copyright (c) 2019 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
diff --git a/engine/src/valet/solver/ostro.py b/engine/src/valet/solver/ostro.py
new file mode 100644
index 0000000..67ba5df
--- /dev/null
+++ b/engine/src/valet/solver/ostro.py
@@ -0,0 +1,529 @@
+#
+# -------------------------------------------------------------------------
+# Copyright (c) 2019 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
+
+
+import json
+import traceback
+from datetime import datetime
+
+from valet.engine.db_connect.locks import *
+
+
+# noinspection PyBroadException
+class Ostro(object):
+ """Main class for scheduling and query."""
+
+ def __init__(self, _bootstrapper):
+ self.valet_id = _bootstrapper.valet_id
+
+ self.dbh = _bootstrapper.dbh
+
+ self.rh = _bootstrapper.rh
+ self.ahandler = _bootstrapper.ah
+
+ self.optimizer = _bootstrapper.optimizer
+
+ self.logger = _bootstrapper.logger
+
+ # To lock valet-engine per datacenter.
+ self.lock = _bootstrapper.lock
+
+ self.end_of_process = False
+
+ def run_ostro(self):
+ """Run main valet-engine loop."""
+
+ self.logger.info("*** start valet-engine main loop")
+
+ # TODO(Gueyoung): Run resource handler thread.
+
+ try:
+ # NOTE(Gueyoung): if DB causes any error, Valet-Engine exits.
+
+ while self.end_of_process is False:
+
+ if not self.lock.set_regions():
+ break
+
+ request_list = self.dbh.get_requests()
+ if len(request_list) > 0:
+ rc = self._handle_requests(request_list)
+ Logger.set_req_id(None)
+ if not rc:
+ break
+
+ time.sleep(1)
+ except KeyboardInterrupt:
+ self.logger.error("keyboard interrupt")
+ except Exception:
+ self.logger.error(traceback.format_exc())
+
+ self.lock.done_with_my_turn()
+
+ self.logger.info("*** exit valet-engine")
+
+ def plan(self):
+ """Handle planning requests.
+
+ This is only for capacity planning.
+ """
+
+ self.logger.info("*** start planning......")
+
+ request_list = self.dbh.get_requests()
+
+ if len(request_list) > 0:
+ if not self._handle_requests(request_list):
+ self.logger.error("while planning")
+ return False
+ else:
+ self.logger.error("while reading plan")
+ return False
+
+ return True
+
+ def _handle_requests(self, _req_list):
+ """Deal with all requests.
+
+ Request types (operations) are
+ Group rule management: 'group_query', 'group_create'.
+ Placement management: 'create', 'delete', 'update', 'confirm', 'rollback'.
+ Engine management: 'ping'.
+ """
+
+ for req in _req_list:
+ req_id_elements = req["request_id"].split("-", 1)
+ opt = req_id_elements[0]
+ req_id = req_id_elements[1]
+ Logger.set_req_id(req_id)
+ begin_time = datetime.now()
+
+ req_body = json.loads(req["request"])
+
+ self.logger.debug("input request_type = " + opt)
+ self.logger.debug("request = " + json.dumps(req_body, indent=4))
+
+ # Check if the same request with prior request.
+ (status, result) = self.ahandler.check_history(req["request_id"])
+
+ if result is None:
+ if opt in ("create", "delete", "update", "confirm", "rollback"):
+ app = self._handle_app(opt, req_id, req_body)
+
+ if app is None:
+ errstr = "valet-engine exits due to " + opt + " error"
+ Logger.get_logger('audit').error(errstr, beginTimestamp=begin_time, elapsedTime=datetime.now() - begin_time, statusCode=False)
+ self.logger.error(errstr)
+ return False
+
+ if app.status == "locked":
+ errstr = "datacenter is being serviced by another valet"
+ Logger.get_logger('audit').error(errstr, beginTimestamp=begin_time, elapsedTime=datetime.now() - begin_time, statusCode=False)
+ self.logger.info(errstr)
+ continue
+
+ (status, result) = self._get_json_result(app)
+
+ elif opt in ("group_query", "group_create"):
+ # TODO(Gueyoung): group_delete and group_update
+
+ (status, result) = self._handle_rule(opt, req_body)
+
+ if result is None:
+ errstr = "valet-engine exits due to " + opt + " error"
+ Logger.get_logger('audit').error(errstr, beginTimestamp=begin_time, elapsedTime=datetime.now() - begin_time, statusCode=False)
+ self.logger.info(errstr)
+ return False
+
+ if status["status"] == "locked":
+ errstr = "datacenter is locked by the other valet"
+ Logger.get_logger('audit').error(errstr, beginTimestamp=begin_time, elapsedTime=datetime.now() - begin_time, statusCode=False)
+ self.logger.info(errstr)
+ continue
+
+ elif opt == "ping":
+ # To check if the local valet-engine is alive.
+
+ if req_body["id"] == self.valet_id:
+ self.logger.debug("got ping")
+
+ status = {"status": "ok", "message": ""}
+ result = {}
+ else:
+ continue
+
+ else:
+ status = {"status": "failed", "message": "unknown operation = " + opt}
+ result = {}
+
+ self.logger.error(status["message"])
+
+ else:
+ self.logger.info("decision already made")
+
+ # Store final result in memory cache.
+ if status["message"] != "timeout":
+ self.ahandler.record_history(req["request_id"], status, result)
+
+ # Return result
+ if not self.dbh.return_request(req["request_id"], status, result):
+ return False
+
+ self.logger.debug("output status = " + json.dumps(status, indent=4))
+ self.logger.debug(" result = " + json.dumps(result, indent=4))
+
+ Logger.get_logger('audit').info("done request = " + req["request_id"], beginTimestamp=begin_time, elapsedTime=datetime.now() - begin_time)
+ self.logger.info("done request = " + req["request_id"] + ' ----')
+
+ # this should be handled by exceptions so we can log the audit correctly
+ if self.lock.done_with_my_turn() is None:
+ return False
+
+ return True
+
+ def _handle_app(self, _opt, _req_id, _req_body):
+ """Deal with placement request.
+
+ Placement management: 'create', 'delete', 'update', 'confirm', 'rollback'.
+
+ Validate the request, extract info, search placements, and store results.
+ """
+
+ resource = None
+ app = None
+
+ # Validate request.
+ if _opt == "create":
+ app = self.ahandler.validate_for_create(_req_id, _req_body)
+ elif _opt == "update":
+ app = self.ahandler.validate_for_update(_req_id, _req_body)
+ elif _opt == "delete":
+ app = self.ahandler.validate_for_delete(_req_id, _req_body)
+ elif _opt == "confirm":
+ app = self.ahandler.validate_for_confirm(_req_id, _req_body)
+ elif _opt == "rollback":
+ app = self.ahandler.validate_for_rollback(_req_id, _req_body)
+
+ if app is None:
+ return None
+ elif app.status != "ok":
+ return app
+
+ # Check if datacenter is locked.
+ # Set the expired time of current lock.
+ lock_status = self.lock.is_my_turn(app.datacenter_id)
+ if lock_status is None:
+ return None
+ elif lock_status == "no":
+ app.status = "locked"
+ return app
+
+ # Load valet rules.
+ if self.rh.load_group_rules_from_db() is None:
+ return None
+
+ if _opt == "create":
+ # Make placement decisions for newly created servers in stack.
+
+ # Load resource (hosts, racks, metadata, groups) from DB.
+ if not self.rh.load_resource(_req_body.get("datacenter")):
+ return None
+
+ resource = self.rh.resource_list[0]
+
+ # Sync rsource status with platform (OpenStack Nova).
+ if not resource.sync_with_platform():
+ self.logger.error("fail to sync resource status")
+ app.status = "fail to sync resource status"
+ return app
+
+ app.set_resource(resource)
+
+ self.ahandler.set_for_create(app)
+ if app is None:
+ return None
+ elif app.status != "ok":
+ return app
+
+ self.optimizer.place(app)
+ if app.status != "ok":
+ return app
+
+ elif _opt == "update":
+ # TODO(Gueyoung): assume only image update and
+ # Valet does not deal with this update.
+
+ self.ahandler.set_for_update(app)
+ if app is None:
+ return None
+ elif app.status != "ok":
+ return app
+
+ return app
+
+ elif _opt == "delete":
+ # Mark delete state in stack and servers.
+
+ # Load resource (hosts, racks, metadata, groups) from DB
+ if not self.rh.load_resource(_req_body.get("datacenter")):
+ return None
+
+ resource = self.rh.resource_list[0]
+
+ # Sync rsource status with platform
+ if not resource.sync_with_platform():
+ self.logger.error("fail to sync resource status")
+ app.status = "fail to sync resource status"
+ return app
+
+ app.set_resource(resource)
+
+ self.optimizer.update(app)
+ if app.status != "ok":
+ return app
+
+ elif _opt == "confirm":
+ # Confirm prior create, delete, or update request.
+
+ datacenter_info = {"id": app.datacenter_id, "url": "none"}
+
+ # Load resource (hosts, racks, metadata, groups) from DB
+ # No sync with platform.
+ if not self.rh.load_resource(datacenter_info):
+ return None
+
+ resource = self.rh.resource_list[0]
+
+ app.set_resource(resource)
+
+ self.optimizer.confirm(app)
+ if app.status != "ok":
+ return app
+
+ elif _opt == "rollback":
+ # Rollback prior create, delete, or update request.
+
+ datacenter_info = {"id": app.datacenter_id, "url": "none"}
+
+ # Load resource (hosts, racks, metadata, groups) from DB
+ # No sync with platform.
+ if not self.rh.load_resource(datacenter_info):
+ return None
+
+ resource = self.rh.resource_list[0]
+
+ app.set_resource(resource)
+
+ self.optimizer.rollback(app)
+ if app.status != "ok":
+ return app
+
+ # Check timeout before store data.
+ if self.lock.expired < now():
+ app.status = "timeout"
+ return app
+
+ # Store app info into DB.
+ if not self.ahandler.store_app(app):
+ return None
+ self.logger.info("requested app(" + app.app_name + ") is stored")
+
+ # Store resource into DB.
+ if not resource.store_resource(opt=_opt, req_id=_req_id):
+ return None
+ self.logger.info("resource status(" + resource.datacenter_id + ") is stored")
+
+ # TODO(Gueyoung): if timeout happened at this moment,
+ # Rollback data change.
+
+ return app
+
+ def _handle_rule(self, _opt, _req_body):
+ """Deal with valet rule and groups request.
+
+ Group rule management: 'group_query', 'group_create'.
+ """
+
+ status = {}
+
+ result = None
+
+ if _opt == "group_query":
+ # Query valet group rules and server placements under rules.
+
+ rule_name = _req_body.get("name", None)
+ datacenter_id = _req_body.get("datacenter_id", None)
+
+ if rule_name is None or rule_name == "":
+ # Return basic info of all rules.
+
+ # Load valet rules.
+ if self.rh.load_group_rules_from_db() is None:
+ status["status"] = "failed"
+ status["message"] = "DB error"
+ return status, []
+
+ result = self.rh.get_rules()
+ if result is None:
+ status["status"] = "failed"
+ status["message"] = "DB error"
+ return status, []
+
+ else:
+ # Return rule info with server placements under this rule.
+
+ if datacenter_id is None:
+ status["status"] = "failed"
+ status["message"] = "no region id given"
+ return status, {}
+
+ # Check if datacenter is locked.
+ lock_status = self.lock.is_my_turn(datacenter_id)
+ if lock_status is None:
+ status["status"] = "failed"
+ status["message"] = "DB error"
+ return status, []
+ elif lock_status == "no":
+ status["status"] = "locked"
+ status["message"] = ""
+ return status, {}
+
+ message = self.rh.load_group_rule_from_db(rule_name)
+ if message is None:
+ status["status"] = "failed"
+ status["message"] = "DB error while loading rule"
+ return status, {}
+ elif message != "ok":
+ status["status"] = "failed"
+ status["message"] = message
+ self.logger.error(status["message"])
+ return status, {}
+
+ datacenter_info = {"id": datacenter_id, "url": "none"}
+
+ # Load resource data from DB.
+ message = self.rh.load_resource_with_rule(datacenter_info)
+ if message is None:
+ status["status"] = "failed"
+ status["message"] = "DB error while loading resource"
+ return status, {}
+ elif message != "ok":
+ status["status"] = "failed"
+ status["message"] = message
+ self.logger.error(status["message"])
+ return status, {}
+
+ resource = self.rh.resource_list[0]
+
+ # Sync rsource status with platform
+ if not resource.sync_with_platform():
+ status["status"] = "failed"
+ status["message"] = "Platform delay"
+ return status, {}
+
+ result = self.rh.get_placements_under_rule(rule_name, resource)
+
+ # Check timeout before store data.
+ if self.lock.expired < now():
+ status["status"] = "failed"
+ status["message"] = "timeout"
+ return status, {}
+
+ # Store resource into DB.
+ if not resource.store_resource():
+ status["status"] = "failed"
+ status["message"] = "DB error while storing resource"
+ return status, {}
+ self.logger.info("resource status(" + datacenter_id + ") is stored")
+
+ # TODO(Gueyoung): If timeout happened here, Rollback stored data.
+
+ elif _opt == "group_create":
+ result = {}
+
+ rule_name = _req_body.get("name")
+ app_scope = _req_body.get("app_scope")
+ rule_type = _req_body.get("type")
+ level = _req_body.get("level")
+ members = _req_body.get("members", [])
+ desc = _req_body.get("desc", "none")
+
+ message = self.rh.create_group_rule(rule_name, app_scope,
+ rule_type, level,
+ members, desc)
+ if message is None:
+ status["status"] = "failed"
+ status["message"] = "DB error while creating rule"
+ return status, {}
+ elif message != "ok":
+ status["status"] = "failed"
+ status["message"] = message
+ return status, result
+
+ elif _opt == "group_delete":
+ pass
+ elif _opt == "group_update":
+ pass
+
+ status["status"] = "ok"
+ status["message"] = ""
+
+ return status, result
+
+ def _get_json_result(self, _app):
+ """Set request result format as JSON."""
+
+ status = {"status": "ok", "message": ""}
+
+ result = {}
+
+ if _app.status != "ok":
+ if _app.status.startswith("na:"):
+ status_elements = _app.status.split(':')
+ if status_elements[1].strip() != "update":
+ status["message"] = status_elements[1].strip()
+
+ return status, {}
+ else:
+ status["status"] = "failed"
+ status["message"] = _app.status
+ return status, {}
+
+ if _app.state == "create":
+ for sk, s in _app.servers.iteritems():
+ if s.host_assignment_inx == -1:
+ result[s.host_assignment_variable] = '::' + s.host
+ else:
+ p = '::' + s.host
+
+ if s.host_assignment_variable not in result.keys():
+ result[s.host_assignment_variable] = []
+ result[s.host_assignment_variable].insert(s.host_assignment_inx, p)
+ elif _app.state == "update":
+ for sk, s in _app.servers.iteritems():
+ if s.host_assignment_inx == -1:
+ result[s.host_assignment_variable] = ""
+ else:
+ p = ""
+
+ if s.host_assignment_variable not in result.keys():
+ result[s.host_assignment_variable] = []
+ result[s.host_assignment_variable].insert(s.host_assignment_inx, p)
+
+ return status, result
diff --git a/engine/src/valet/utils/__init__.py b/engine/src/valet/utils/__init__.py
new file mode 100644
index 0000000..bd50995
--- /dev/null
+++ b/engine/src/valet/utils/__init__.py
@@ -0,0 +1,18 @@
+#
+# -------------------------------------------------------------------------
+# Copyright (c) 2019 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
diff --git a/engine/src/valet/utils/decryption.py b/engine/src/valet/utils/decryption.py
new file mode 100644
index 0000000..c523edc
--- /dev/null
+++ b/engine/src/valet/utils/decryption.py
@@ -0,0 +1,44 @@
+#
+# -------------------------------------------------------------------------
+# Copyright (c) 2019 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
+from base64 import b64decode
+from Crypto.Cipher import AES
+from hashlib import md5
+
+
+UNPAD = lambda s: s[:-ord(s[len(s) - 1:])]
+
+
+def decrypt(_k1, _k2, _k3, _pw):
+ code_list = ['g', 'E', 't', 'a', 'W', 'i', 'Y', 'H', '2', 'L']
+
+ code = int(_k1) + int(_k2) * int(_k3)
+ str_code = str(code)
+
+ key = ""
+ for i in range(0, len(str_code)):
+ c_code = code_list[int(str_code[i])]
+ key += c_code
+
+ enc_key = md5(key.encode('utf8')).hexdigest()
+
+ enc = b64decode(_pw)
+ iv = enc[:16]
+ cipher = AES.new(enc_key, AES.MODE_CBC, iv)
+
+ return UNPAD(cipher.decrypt(enc[16:])).decode('utf8')
diff --git a/engine/src/valet/utils/logger.py b/engine/src/valet/utils/logger.py
new file mode 100644
index 0000000..9a5fca0
--- /dev/null
+++ b/engine/src/valet/utils/logger.py
@@ -0,0 +1,349 @@
+#
+# -------------------------------------------------------------------------
+# Copyright (c) 2019 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
+
+
+"""Setup logging.
+from valet.utils.logger import Logger
+ Logger.get_logger('metric').info('bootstrap STUFF')
+"""
+
+import json
+import logging
+import socket
+from datetime import datetime
+from logging.handlers import RotatingFileHandler
+
+
+class Logger(object):
+ logs = None
+
+ def __init__(self, _config=None, console=False):
+ if _config is None:
+ Logger.logs = {"console": Console()}
+
+ if Logger.logs is None:
+ Logger.logs = {"audit": Audit(_config), "metric": Metric(_config), "debug": Debug(_config)}
+ Logger.logs["error"] = Error(_config, Logger.logs["debug"])
+ if console:
+ Logger.logs["console"] = Console(Logger.logs["debug"])
+
+ @classmethod
+ def get_logger(cls, name):
+ return cls.logs[name].adapter
+
+ @classmethod
+ def set_req_id(cls, uuid):
+ EcompLogger._set_request_id(uuid)
+
+
+class EcompLogger(object):
+ """Parent class for all logs."""
+ logging.getLogger().setLevel(logging.DEBUG) # set root
+ _lvl = logging.INFO
+ _size = 10000000
+ datefmt = '%d/%m/%Y %H:%M:%S'
+
+ _requestID = None
+
+ def __init__(self):
+ self.fh = None
+ self.logger = None
+
+ def set_fh(self, name, fmt, _config, lvl=_lvl, size=_size):
+ logfile = _config.get("path") + name + ".log"
+ self.fh = RotatingFileHandler(logfile, mode='a', maxBytes=size, backupCount=2, encoding=None, delay=0)
+ self.fh.setLevel(lvl)
+ self.fh.setFormatter(fmt)
+
+ self.logger = logging.getLogger(name)
+ self.logger.addHandler(self.fh)
+ self.fh.addFilter(LoggerFilter())
+
+ def add_filter(self, fltr):
+ self.fh.addFilter(fltr())
+
+ @classmethod
+ def get_request_id(cls): return EcompLogger._requestID
+
+ @classmethod
+ def _set_request_id(cls, uuid): EcompLogger._requestID = uuid
+
+ @staticmethod
+ def format_str(fmt, sep="|"):
+ fmt = sep + sep.join(map(lambda x: '' if x.startswith('X') else '%(' + str(x) + ')s', fmt)) + sep
+ return fmt.replace('%(asctime)s', '%(asctime)s.%(msecs)03d')
+
+
+class LoggerFilter(logging.Filter):
+ def filter(self, record):
+ record.requestId = EcompLogger.get_request_id() or ''
+ return True
+
+
+class Audit(EcompLogger):
+ """A summary view of the processing of a requests.
+ It captures activity requests and includes time initiated, finished, and the API who invoked it
+ """
+ fmt = ['beginTimestamp', 'asctime', 'requestId', 'XserviceInstanceID', 'XthreadId', 'vmName', 'XserviceName', 'XpartnerName', 'statusCode', 'responseCode', 'responseDescription', 'XinstanceUUID', 'levelname', 'Xseverity', 'XserverIP', 'elapsedTime', 'server', 'XclientIP', 'XclassName', 'Xunused', 'XprocessKey', 'message', 'XcustomField2', 'XcustomField3', 'XcustomField4', 'XdetailMessage']
+
+ def __init__(self, _config):
+ EcompLogger.__init__(self)
+ fmt = logging.Formatter(self.format_str(Audit.fmt), EcompLogger.datefmt)
+ self.set_fh("audit", fmt, _config)
+ self.add_filter(AuditFilter)
+
+ # use value from kwargs in adapter process or the default given here
+ instantiation = {
+ 'beginTimestamp' : '',
+ 'statusCode' : True,
+ 'responseCode' : '900',
+ 'responseDescription' : '',
+ 'elapsedTime' : '',
+ }
+ self.adapter = AuditAdapter(self.logger, instantiation)
+
+
+# noinspection PyProtectedMember
+class AuditFilter(logging.Filter):
+ vmName = socket.gethostname()
+ vmFqdn = socket.getfqdn()
+ responseDecode = {
+ 'permission' : 100,
+ 'availabilty' : 200, # Availability/Timeouts
+ 'data' : 300,
+ 'schema' : 400,
+ 'process' : 500 # Business process errors
+ } # 900 # unknown
+
+ def filter(self, record):
+ record.beginTimestamp = AuditAdapter._beginTimestamp.strftime(EcompLogger.datefmt + ".%f")[:-3] if AuditAdapter._beginTimestamp else ""
+ record.vmName = AuditFilter.vmName
+ record.statusCode = "ERROR" if AuditAdapter._statusCode is False else "COMPLETE"
+ record.responseCode = AuditFilter.responseDecode.get(AuditAdapter._responseCode, AuditAdapter._responseCode)
+ record.responseDescription = AuditAdapter._responseDescription
+ record.elapsedTime = AuditAdapter._elapsedTime
+ record.server = AuditFilter.vmFqdn
+ return True
+
+
+class AuditAdapter(logging.LoggerAdapter):
+ _beginTimestamp = None
+ _elapsedTime = None
+ _responseDescription = None
+ _statusCode = None
+ _responseCode = ''
+
+ def process(self, msg, kwargs):
+ AuditAdapter._beginTimestamp = kwargs.pop('beginTimestamp', self.extra['beginTimestamp'])
+ AuditAdapter._elapsedTime = kwargs.pop('elapsedTime', self.extra['elapsedTime'])
+ AuditAdapter._responseCode = kwargs.pop('responseCode', self.extra['responseCode'])
+ AuditAdapter._responseDescription = kwargs.pop('responseDescription', self.extra['responseDescription'])
+ AuditAdapter._statusCode = kwargs.pop('statusCode', self.extra['statusCode'])
+ return msg, kwargs
+
+
+class Metric(EcompLogger):
+ """A detailed view into the processing of a transaction.
+ It captures the start and end of calls/interactions with other entities
+ """
+ fmt = ['beginTimestamp', 'targetEntity', 'asctime', 'requestId', 'XserviceInstanceID', 'XthreadId', 'vmName', 'XserviceName', 'XpartnerName', 'statusCode', 'XresponseCode', 'XresponseDescription', 'XinstanceUUID', 'levelname', 'Xseverity', 'XserverIP', 'elapsedTime', 'server', 'XclientIP', 'XclassName', 'Xunused', 'XprocessKey', 'message', 'XcustomField2', 'XcustomField3', 'XcustomField4', 'XdetailMessage']
+
+ def __init__(self, _config):
+ EcompLogger.__init__(self)
+ fmt = logging.Formatter(self.format_str(Metric.fmt), EcompLogger.datefmt)
+ self.set_fh("metric", fmt, _config)
+ self.add_filter(MetricFilter)
+
+ # use value from kwargs in adapter process or the default given here
+ instantiation = {
+ 'beginTimestamp' : '',
+ 'targetEntity' : '',
+ 'statusCode' : True,
+ 'elapsedTime' : '',
+ }
+ self.adapter = MetricAdapter(self.logger, instantiation)
+
+
+# noinspection PyProtectedMember
+class MetricFilter(logging.Filter):
+ vmName = socket.gethostname()
+ vmFqdn = socket.getfqdn()
+
+ def filter(self, record):
+ record.beginTimestamp = MetricAdapter._beginTimestamp.strftime(EcompLogger.datefmt + ".%f")[:-3] if MetricAdapter._beginTimestamp else ""
+ record.targetEntity = MetricAdapter._targetEntity
+ record.vmName = MetricFilter.vmName
+ record.statusCode = "ERROR" if MetricAdapter._statusCode is False else "COMPLETE"
+ record.elapsedTime = MetricAdapter._elapsedTime
+ record.server = MetricFilter.vmFqdn
+ return True
+
+
+class MetricAdapter(logging.LoggerAdapter):
+ _beginTimestamp = None
+ _elapsedTime = None
+ _targetEntity = None
+ _statusCode = None
+
+ def process(self, msg, kwargs):
+ MetricAdapter._beginTimestamp = kwargs.pop('beginTimestamp', self.extra['beginTimestamp'])
+ MetricAdapter._targetEntity = kwargs.pop('targetEntity', self.extra['targetEntity'])
+ MetricAdapter._elapsedTime = kwargs.pop('elapsedTime', self.extra['elapsedTime'])
+ MetricAdapter._statusCode = kwargs.pop('statusCode', self.extra['statusCode'])
+ return msg, kwargs
+
+
+class Error(EcompLogger):
+ """capture info, warn, error and fatal conditions"""
+ fmt = ['asctime', 'requestId', 'XthreadId', 'XserviceName', 'XpartnerName', 'targetEntity', 'targetServiceName', 'levelname', 'errorCode', 'errorDescription', 'filename)s:%(lineno)s - %(message']
+
+ def __init__(self, _config, logdebug):
+ EcompLogger.__init__(self)
+ fmt = logging.Formatter(self.format_str(Error.fmt) + '^', EcompLogger.datefmt)
+ self.set_fh("error", fmt, _config, lvl=logging.WARN)
+ # add my handler to the debug logger
+ logdebug.logger.addHandler(self.fh)
+ self.add_filter(ErrorFilter)
+
+
+# noinspection PyProtectedMember
+class ErrorFilter(logging.Filter):
+ errorDecode = {
+ 'permission' : 100,
+ 'availabilty' : 200, # Availability/Timeouts
+ 'data' : 300,
+ 'schema' : 400,
+ 'process' : 500 # Business process errors
+ } # 900 # unknown
+
+ def filter(self, record):
+ record.targetEntity = DebugAdapter._targetEntity
+ record.targetServiceName = DebugAdapter._targetServiceName
+ record.errorCode = ErrorFilter.errorDecode.get(DebugAdapter._errorCode, DebugAdapter._errorCode)
+ record.errorDescription = DebugAdapter._errorDescription
+ return True
+
+
+class Debug(EcompLogger):
+ """capture whatever data may be needed to debug and correct abnormal conditions"""
+ fmt = ['asctime', 'requestId', 'levelname', 'filename)s:%(lineno)s - %(message']
+
+ # use value from kwargs in adapter process or the default given here
+ instantiation = {
+ 'targetEntity' : '',
+ 'targetServiceName' : '',
+ 'errorCode' : '900',
+ 'errorDescription' : ''
+ }
+
+ def __init__(self, _config):
+ EcompLogger.__init__(self)
+ fmt = logging.Formatter(self.format_str(Debug.fmt) + '^', EcompLogger.datefmt)
+ self.set_fh("debug", fmt, _config, lvl=logging.DEBUG)
+
+ self.adapter = DebugAdapter(self.logger, Debug.instantiation)
+
+
+class DebugAdapter(logging.LoggerAdapter):
+ _targetEntity = ''
+ _targetServiceName = ''
+ _errorCode = ''
+ _errorDescription = ''
+
+ def process(self, msg, kwargs):
+ DebugAdapter._targetEntity = kwargs.pop('targetEntity', self.extra['targetEntity'])
+ DebugAdapter._targetServiceName = kwargs.pop('targetServiceName', self.extra['targetServiceName'])
+ DebugAdapter._errorCode = kwargs.pop('errorCode', self.extra['errorCode'])
+ DebugAdapter._errorDescription = kwargs.pop('errorDescription', self.extra['errorDescription'])
+ return msg, kwargs
+
+
+class Console(EcompLogger):
+ """ set logger to point to stderr."""
+ fmt = ['asctime', 'levelname', 'filename)s:%(lineno)s - %(message']
+
+ def __init__(self, logdebug=None):
+ EcompLogger.__init__(self)
+ ch = logging.StreamHandler()
+ ch.setLevel(logging.DEBUG)
+ fmt = logging.Formatter(self.format_str(Console.fmt, sep=" "), EcompLogger.datefmt)
+ ch.setFormatter(fmt)
+
+ # console can be written to when Debug is, or...
+ if logdebug is not None:
+ logdebug.logger.addHandler(ch)
+ return
+
+ # ...console is written to as a stand alone (ex. for tools using valet libs)
+ self.logger = logging.getLogger('console')
+ self.adapter = DebugAdapter(self.logger, Debug.instantiation)
+ self.logger.addHandler(ch)
+ ch.addFilter(LoggerFilter())
+
+
+def every_log(name):
+ log = Logger.get_logger(name)
+ log.info("so this happened")
+ log.debug("check out what happened")
+ log.warning("something bad happened")
+ log.error("something bad happened to me")
+
+
+""" MAIN """
+if __name__ == "__main__":
+ import argparse
+
+ parser = argparse.ArgumentParser(description='Test Logging', add_help=False) # <<<2
+ parser.add_argument('-pc', action='store_true', help='where to write log files')
+ parser.add_argument("-?", "--help", action="help", help="show this help message and exit")
+ opts = parser.parse_args()
+
+ path = "/opt/pc/" if opts.pc else "/tmp/"
+ config = json.loads('{ "path": "' + path + '" }')
+
+ now = datetime.now()
+ then = now.replace(hour=now.hour + 1)
+ differ = now - then
+
+ # write to console
+ logger = Logger().get_logger('console')
+ logger.info('Log files are written to ' + path)
+ Logger.logs = None # this reset is only needed cuz of console test, never for prod
+
+ # create all loggers and save an instance of debug logger
+ logger = Logger(config, console=True).get_logger('debug')
+
+ metric = Logger.get_logger('metric')
+ metric.info('METRIC STUFF')
+ metric = Logger.get_logger('metric')
+ Logger.set_req_id('1235-123-1234-1234')
+ metric.info(' -- METRIC NOW THEN -- ', beginTimestamp=then, elapsedTime=differ, statusCode=False)
+ every_log('metric')
+ every_log('debug')
+ Logger.get_logger('audit').info('AUDIT STUFF', responseCode=100, responseDescription="you shoulda seen it", elapsedTime=differ, statusCode=False, beginTimestamp=now)
+ every_log('audit')
+ logger.error("--------------------------------")
+ logger.error('EC:100 TE:OS', errorCode='100', targetEntity='target entity')
+ logger.error('EC:schema TSN:IDK', errorCode='schema', targetServiceName='target service name')
+ logger.error('EC:393 ED:bt', errorCode='393', errorDescription='this is an error')
+ logger.error("--------------------------------")
+ try:
+ assert False # ("Now test logging an exception")
+ except AssertionError:
+ logger.exception('This is a log of an exception', errorDescription='EXMAN')
diff --git a/engine/src/valet/valet_main.py b/engine/src/valet/valet_main.py
new file mode 100644
index 0000000..ba91f97
--- /dev/null
+++ b/engine/src/valet/valet_main.py
@@ -0,0 +1,88 @@
+#
+# -------------------------------------------------------------------------
+# Copyright (c) 2019 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
+#!/usr/bin/env python2.7
+
+
+import argparse
+import json
+import os.path
+import sys
+import traceback
+
+sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
+
+from valet.bootstrapper import Bootstrapper
+from valet.solver.ostro import Ostro
+from valet.utils.logger import Logger
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(description='Test Logging', add_help=False) # <<<2
+ parser.add_argument('config', help='config file path (required)')
+ parser.add_argument('-db', metavar='keyspace_string', help='override keyspace with typed in value')
+ parser.add_argument('-stdout', action='store_true', help='also print debugging log to stdout')
+ parser.add_argument("-?", "--help", action="help", help="show this help message and exit")
+ opts = parser.parse_args()
+
+ # Prepare configuration and logging.
+ # noinspection PyBroadException
+ try:
+ config_file_d = open(opts.config, 'r')
+ config_file = config_file_d.read()
+ config = json.loads(config_file)
+ config_file_d.close()
+
+ logger_config = config.get("logging")
+
+ if not os.path.exists(logger_config.get("path")):
+ os.makedirs(logger_config.get("path"))
+
+ # create all loggers and save an instance of the debug logger
+ logger = Logger(logger_config, console=opts.stdout).get_logger('debug')
+ except Exception:
+ print("error while configuring: " + traceback.format_exc())
+ sys.exit(2)
+
+ try:
+ config_file_dir = os.path.dirname(opts.config)
+ version_file_name = config_file_dir + "/version.json"
+ version_file_d = open(version_file_name, 'r')
+ version_json = json.dumps(json.loads(version_file_d.read()))
+ logger.info("Starting Valet with version: " + version_json)
+ version_file_d.close()
+ except Exception:
+ logger.warning("Warning! Error while printing version: " + traceback.format_exc())
+
+
+ # Boostrap all components and configure them.
+ # noinspection PyBroadException
+ try:
+ if opts.db:
+ config['db']['keyspace'] = opts.db
+
+ bootstrapper = Bootstrapper(config, logger)
+ if not bootstrapper.config_valet():
+ print("error while configurating")
+ except Exception:
+ print("error while bootstrapping: " + traceback.format_exc())
+ sys.exit(2)
+
+ # Start valet-engine (aka. Ostro).
+ ostro = Ostro(bootstrapper)
+ ostro.run_ostro()
diff --git a/valetapi/Jenkinsfile b/valetapi/Jenkinsfile
new file mode 100644
index 0000000..cc778d7
--- /dev/null
+++ b/valetapi/Jenkinsfile
@@ -0,0 +1,276 @@
+#!/usr/bin/env groovy
+
+
+properties([[$class: 'ParametersDefinitionProperty', parameterDefinitions: [
+[$class: 'hudson.model.StringParameterDefinition', name: 'PHASE', defaultValue: "BUILD_DEPLOY"],
+[$class: 'hudson.model.StringParameterDefinition', name: 'TARGET_ENV', defaultValue: "DEV"],
+[$class: 'hudson.model.StringParameterDefinition', name: 'K8S_CLUSTER_URL',defaultValue: "https://k8s.onap.org"],
+[$class: 'hudson.model.StringParameterDefinition', name: 'K8S_CONTEXT',defaultValue: "default"],
+[$class: 'hudson.model.StringParameterDefinition', name: 'K8S_USERNAME',defaultValue: "root"],
+[$class: 'hudson.model.PasswordParameterDefinition', name: 'K8S_PASSWORD',defaultValue: "k8s_password"],
+[$class: 'hudson.model.PasswordParameterDefinition', name: 'K8S_TOKEN',defaultValue: "k8suser@fgps.onap.org:enc:dfxissWKLGRuldTwXuAuK-WAT-b-f-wS"],
+[$class: 'hudson.model.StringParameterDefinition', name: 'K8S_NAME',defaultValue: "DEV"],
+[$class: 'hudson.model.StringParameterDefinition', name: 'K8S_PODS_REPLICAS',defaultValue: "1"],
+[$class: 'hudson.model.StringParameterDefinition', name: 'K8S_SERVICE_ACCOUNT',defaultValue: "default"],
+[$class: 'hudson.model.BooleanParameterDefinition', name: 'USE_ROOT_NS',defaultValue: false],
+[$class: 'hudson.model.StringParameterDefinition', name: 'BROKER_URL',defaultValue: "http://broker.onap.org:30120"],
+[$class: 'hudson.model.StringParameterDefinition', name: 'PACT_USERNAME', defaultValue: "pactadmin"],
+[$class: 'hudson.model.PasswordParameterDefinition', name: 'PACT_PASSWORD', defaultValue: "pactadmin"],
+[$class: 'hudson.model.StringParameterDefinition', name: 'CONSUMER', defaultValue: ""],
+[$class: 'hudson.model.StringParameterDefinition', name: 'GIT_REPO_FOLDER',defaultValue: ""],
+[$class: 'hudson.model.StringParameterDefinition', name: 'DEV_TEST_GIT_APP',defaultValue: ""],
+[$class: 'hudson.model.StringParameterDefinition', name: 'DEV_TEST_SERVER_URL',defaultValue: ""],
+[$class: 'hudson.model.StringParameterDefinition', name: 'LISA_PATH', defaultValue: "/opt/app/workload/tools/itko/server/9.5.1/Projects/"],
+[$class: 'hudson.model.StringParameterDefinition', name: 'TEST_CASE_PATH',defaultValue: "/Tests/HelloTest.tst"],
+[$class: 'hudson.model.StringParameterDefinition', name: 'SUITE_CASE_PATH', defaultValue: "/Tests/Suites/AllTestsSuite.ste"],
+[$class: 'hudson.model.StringParameterDefinition', name: 'CONFIG_PATH', defaultValue: "/Configs/project.config"],
+[$class: 'hudson.model.StringParameterDefinition', name: 'STAGING_DOC_PATH',defaultValue: "/Tests/StagingDocs/Run1User1Cycle.stg"],
+
+[$class: 'hudson.model.StringParameterDefinition', name: 'ANS_ROLE',defaultValue: ""],
+[$class: 'hudson.model.StringParameterDefinition', name: 'ANS_INVENTORY',defaultValue: "inventory/dev/hosts"],
+[$class: 'hudson.model.StringParameterDefinition', name: 'GITPlaybookPATH', defaultValue: ""],
+[$class: 'hudson.model.StringParameterDefinition', name: 'GITConfigRolePATH', defaultValue: ""],
+[$class: 'hudson.model.StringParameterDefinition', name: 'SONAR_BREAKER_SKIP',defaultValue: "True"],
+[$class: 'hudson.model.StringParameterDefinition', name: 'SONAR_BREAKER_QRY_INTERVAL',defaultValue: "7000"],
+[$class: 'hudson.model.StringParameterDefinition', name: 'SONAR_BREAKER_QRY_MAXATTEMPTS',defaultValue: "100"],
+[$class: 'hudson.model.StringParameterDefinition', name: 'ECO_PIPELINE_ID',defaultValue: ""],
+[$class: 'hudson.model.StringParameterDefinition', name: 'BUILD_VERSION',defaultValue: ""]
+]]])
+
+
+/**
+ jdk1.8 = fixed name for java
+ M3 = fixed name for maven
+ general_maven_settings = fixed name for maven settings Jenkins managed file
+*/
+
+echo "Build branch: ${env.BRANCH_NAME}"
+
+node("docker") {
+ stage 'Checkout'
+ checkout scm
+
+ pom = readMavenPom file: 'pom.xml'
+ PROJECT_NAME = pom.properties['namespace'] + ":" + pom.artifactId;
+// env.SERVICE_NAME=pom.artifactId;
+ env.APP_NAME=pom.artifactId;
+
+ env.SERVICE_NAME=pom.properties['serviceArtifactName']
+ env.VERSION=pom.version;
+
+ env.ANS_ROLE=SERVICE_NAME+"_configrole"
+ env.BUILDNUMBER_TIMESTAMP="${BUILD_NUMBER}"+"-"+"${currentBuild.timeInMillis}"
+
+ echo "ANS_ROLE: ${ANS_ROLE}"
+
+ LABEL_VERSION=pom.version.replaceAll(".", "-");
+ echo "LabelVerion: " + LABEL_VERSION
+ NAMESPACE=pom.properties['namespace']
+
+ TARGET_ENV=TARGET_ENV.toLowerCase()
+ if(params.USE_ROOT_NS)
+ {
+ env.KUBE_NAMESPACE=pom.properties['kube.namespace']
+ }else{
+ env.KUBE_NAMESPACE=pom.properties['kube.namespace']+"-"+TARGET_ENV
+ }
+
+ if(TARGET_ENV!="dev"){
+ env.ANS_INVENTORY="inventory/"+TARGET_ENV+"/hosts"
+ echo "ANS_INVENTORY: ${ANS_INVENTORY}"
+ }
+
+ env.TARGET_ENV=TARGET_ENV
+ REPLICA_COUNT="${params.K8S_PODS_REPLICAS}"
+ env.IMAGE_NAME=pom.properties['docker.registry']+"/"+NAMESPACE+"/"+SERVICE_NAME+":latest"
+ if (env.TARGET_ENV == "prod") {
+ env.IMAGE_NAME=pom.properties['docker.registry']+"/"+NAMESPACE+"/"+SERVICE_NAME+":release"
+ }
+ echo "Artifact: " + PROJECT_NAME
+ env.DOCKER_HOST="tcp://localhost:4243"
+ env.DOCKER_CONFIG="${WORKSPACE}/.docker"
+ def branchName
+ //This value can ideally come from pom as in IMAGE_NAME
+ def dockerRegistry = "nexus.onap.org:5100"
+
+ if(params.BUILD_VERSION != "")
+ {
+ echo "BUILD VERSION Set : " + BUILD_VERSION
+ currentBuild.displayName = "VERSION-${BUILD_VERSION}"
+ currentBuild.description = "${BUILD_VERSION} ${ECO_PIPELINE_ID} ${PHASE}"
+ }else{
+ echo "BUILD VERSION Not Set, Use pom version: " + VERSION
+ currentBuild.displayName = "VERSION-" +VERSION
+ currentBuild.description = "${VERSION} ${ECO_PIPELINE_ID} ${PHASE}"
+ }
+
+ env.KUBECTL_OPTS="--server=${K8S_CLUSTER_URL} --insecure-skip-tls-verify=true --password=${K8S_PASSWORD} --username=${K8S_USERNAME}"
+ env.K8S_SERVER_CREDENTIALS = " k8server=${K8S_CLUSTER_URL}:6443 k8susername=${K8S_USERNAME} k8spassword=${K8S_PASSWORD} k8stoken=${K8S_TOKEN}"
+ if ("${K8S_TOKEN}" != "" ) {
+ env.KUBECTL_OPTS = "--server=${K8S_CLUSTER_URL} --insecure-skip-tls-verify=true --token=${K8S_TOKEN}"
+ env.K8S_SERVER_CREDENTIALS =" k8server=${K8S_CLUSTER_URL}:6443 k8susername= k8spassword= k8stoken=${K8S_TOKEN}"
+ }
+
+ //echo "env.KUBECTL_OPTS=${KUBECTL_OPTS}"
+ //echo "K8S_SERVER_CREDENTIALS=${K8S_SERVER_CREDENTIALS}"
+
+ //IST Variable
+ LISA_PATH_NEW="${LISA_PATH}"+SERVICE_NAME
+
+ // Create kubectl.conf file here from Pipeline properties provided.
+
+ withEnv(["PATH=${env.PATH}:${tool 'M3'}/bin:${tool 'jdk1.8'}/bin", "JAVA_HOME=${tool 'jdk1.8'}", "MAVEN_HOME=${tool 'M3'}"]) {
+
+ echo "JAVA_HOME=${env.JAVA_HOME}"
+ echo "MAVEN_HOME=${env.MAVEN_HOME}"
+ echo "PATH=${env.PATH}"
+
+ wrap([$class: 'ConfigFileBuildWrapper', managedFiles: [
+ [fileId: 'maven-settings.xml', variable: 'MAVEN_SETTINGS'],
+ [fileId: 'sonar-secret.txt', variable: 'SONAR_SECRET'],
+ [fileId: 'sonar.properties', variable: 'SONAR_PROPERTIES']
+ ]]) {
+
+ branchName = (env.BRANCH_NAME ?: "master").replaceAll(/[^0-9a-zA-Z_]/, "-")
+
+
+ if ("${PHASE}" == "BUILD" || "${PHASE}" == "BUILD_DEPLOY" ) {
+
+ stage 'Compile'
+ sh 'mvn -DskipTests -Dmaven.test.skip=true -s $MAVEN_SETTINGS -Ddummy.prop=$SONAR_PROPERTIES clean compile'
+
+ stage 'Unit Test'
+ sh 'mvn -s $MAVEN_SETTINGS verify -P all-tests'
+
+ stage 'Package'
+ sh 'mvn -DskipTests -Dmaven.test.skip=true -s $MAVEN_SETTINGS package'
+
+ stage 'Verify'
+ sh 'mvn -DskipTests -Dmaven.test.skip=true -s $MAVEN_SETTINGS verify'
+
+
+ if ("${DEV_TEST_GIT_APP}" != "") {
+
+ stage("CADev Repo Update") {
+ def GitinvokeURL = "${DEV_TEST_GIT_APP}/gitapp/service/git/push?gitUrl=${GIT_REPO_FOLDER}/${SERVICE_NAME}"
+ sh "curl -i -X GET \'${GitinvokeURL}\'"
+ }
+
+ stage("Component Test Using CA DEV TEST") {
+
+ def invokeURL = "${DEV_TEST_SERVER_URL}/lisa-invoke/runTest?testCasePath=${LISA_PATH_NEW}${TEST_CASE_PATH}&stagingDocPath=${LISA_PATH_NEW}${STAGING_DOC_PATH}"
+ sh "curl -i \'${invokeURL}\'"
+ invokeURL = "${DEV_TEST_SERVER_URL}/lisa-invoke/runSuite?suitePath=${LISA_PATH_NEW}${SUITE_CASE_PATH}&configPath=${LISA_PATH_NEW}${CONFIG_PATH}"
+ sh "curl -i \'${invokeURL}\'"
+ }
+ }
+
+ stage 'Component Test Using Mockito'
+ sh 'mvn -s $MAVEN_SETTINGS -Dtest=ITComponentTest test'
+
+ stage 'Publish Artifact'
+ //sh 'docker ps'
+ sh 'mvn -DskipTests -Dmaven.test.skip=true -Dhttps.protocols="TLSv1" -Djavax.net.ssl.trustStore="/opt/app/etc/cacerts.jks" -Djavax.net.ssl.trustStorePassword="password" -Djavax.net.ssl.keyStore="/opt/app/etc/cacerts.jks" -Djavax.net.ssl.keyStorePassword="password" -s $MAVEN_SETTINGS -U docker:build docker:push'
+
+ }
+
+ if ("${PHASE}" == "BUILD_DEPLOY" || "${PHASE}" == "DEPLOY" || "${PHASE}" == "CONFIG") {
+ // deploy to k8s
+
+ stage ('Clone playbook and configrole') {
+
+ // read values captured at the generate time
+ env.REPO_PROJECT = readFile './repoproject.txt'
+ env.REPO_PROJECT= "${REPO_PROJECT.trim()}"
+ // put some if conditions to use below values if empty above.
+ if(params.GITPlaybookPATH==""){
+ GITPlaybookPATH="https://git.onap.org/scm/"+"${REPO_PROJECT}"+"/"+SERVICE_NAME+"_playbook.git"
+ }
+ echo "GITPlaybookPATH: ${GITPlaybookPATH}"
+ if(params.GITConfigRolePATH==""){
+ GITConfigRolePATH="https://git.onap.org/scm/"+"${REPO_PROJECT}"+"/"+SERVICE_NAME+"_configrole.git"
+ }
+ echo "GITConfigRolePATH: ${GITConfigRolePATH}"
+
+ env.dockermechid = readFile './dockerbuilduser.txt'
+ env.dockermechid = "${dockermechid.trim()}"
+ env.repogitid = readFile './repogitid.txt'
+ env.repogitid = "${repogitid.trim()}"
+ echo "repogitid : ${repogitid}"
+
+ withCredentials([usernamePassword(credentialsId: env.repogitid, usernameVariable: 'ITS_SECRET_USERNAME', passwordVariable: 'ITS_SECRET_PASSWORD')]) {
+ env.DOCKER_HOST='unix:///var/run/docker.sock'
+ env.ANS_HOST = params.ANS_HOST ?: new URL("${params.K8S_CLUSTER_URL}").getHost()
+
+ sh 'rm -rf playbook'
+
+ dir('playbook') {
+ git url: "${GITPlaybookPATH}", credentialsId: "${repogitid}"
+ }
+
+
+ dir("playbook/roles/${ANS_ROLE}") {
+ if (branchName != 'master'){
+ echo "branchName: ${branchName}"
+ git url: "${GITConfigRolePATH}", credentialsId: "${repogitid}", branch: "${branchName}"
+ }
+ else{
+ echo "else branchName: ${branchName}"
+ git url: "${GITConfigRolePATH}", credentialsId: "${repogitid}"
+ }
+ }
+ }
+ }
+
+ }
+
+ if ( "${PHASE}" == "CONTRACT_GENERATE") {
+ stage 'Contract Generate Publish and QG2'
+ sh 'mvn -s $MAVEN_SETTINGS -Dtest=PactTestSuit test -DBROKER_URL=${BROKER_URL} -DuserName=${PACT_USERNAME} -Dpassword=${PACT_PASSWORD}'
+ sh 'mvn -s $MAVEN_SETTINGS pact:publish -DBROKER_URL=${BROKER_URL} -DuserName=${PACT_USERNAME} -Dpassword=${PACT_PASSWORD}'
+ }
+
+ if ("${PHASE}" == "CONTRACT_VERIFY" ) {
+ stage ('Contract Verify' ){
+ withEnv([
+ "APP_NAME=${SERVICE_NAME}",
+ "K8S_CTX=${K8S_CONTEXT}",
+ "APP_NS=${KUBE_NAMESPACE}",
+ "KUBECTL=/opt/app/kubernetes/v1.5.2/bin/kubectl",
+ ]) {
+ def CLUSTER_URL=K8S_CLUSTER_URL.substring(8)
+
+ NODE_PORT = sh (
+ script: '${KUBECTL} get service ${APP_NAME} --namespace ${APP_NS} --context ${K8S_CTX} ${KUBECTL_OPTS} --output jsonpath={.spec.ports[*].nodePort}',
+ returnStdout: true
+ ).trim()
+ if( "${CONSUMER}" == "") {
+ sh "mvn -s $MAVEN_SETTINGS pact:verify -DBROKER_URL=${BROKER_URL} -DuserName=${PACT_USERNAME} -Dpassword=${PACT_PASSWORD} -DAPP_URL=${CLUSTER_URL} -DAPP_PORT=${NODE_PORT}"
+ } else {
+ sh "mvn -s $MAVEN_SETTINGS pact:verify -Dpact.filter.consumers=${CONSUMER} -DBROKER_URL=${BROKER_URL} -DuserName=${PACT_USERNAME} -Dpassword=${PACT_PASSWORD} -DAPP_URL=${CLUSTER_URL} -DAPP_PORT=${NODE_PORT}"
+ }
+ }
+ }
+ }
+
+ if ("${PHASE}" == "INTEGRATION_TEST" || "${PHASE}" == "IST" || "${PHASE}" == "DEPLOY_IST" ) {
+
+ stage("CADev Repo Update") {
+ def GitinvokeURL = "${DEV_TEST_GIT_APP}/gitapp/service/git/push?gitUrl=${GIT_REPO_FOLDER}/${SERVICE_NAME}"
+ sh "curl -i -X GET \'${GitinvokeURL}\'"
+ }
+
+ stage("Functional Test") {
+
+ def invokeURL = "${DEV_TEST_SERVER_URL}/lisa-invoke/runTest?testCasePath=${LISA_PATH_NEW}${TEST_CASE_PATH}&stagingDocPath=${LISA_PATH_NEW}${STAGING_DOC_PATH}"
+ sh "curl -i \'${invokeURL}\'"
+ invokeURL = "${DEV_TEST_SERVER_URL}/lisa-invoke/runSuite?suitePath=${LISA_PATH_NEW}${SUITE_CASE_PATH}&configPath=${LISA_PATH_NEW}${CONFIG_PATH}"
+ sh "curl -i \'${invokeURL}\'"
+
+
+ }
+ }
+
+ }
+}
+} \ No newline at end of file
diff --git a/valetapi/LICENSE.txt b/valetapi/LICENSE.txt
new file mode 100644
index 0000000..2bc081c
--- /dev/null
+++ b/valetapi/LICENSE.txt
@@ -0,0 +1,45 @@
+
+The following licence applies to all files in this and subdirectories. Licences
+are included in individual source files where appropriate, and if it differs
+from this text, it supersedes this. Any file that does not have licence text
+defaults to being covered by this text; not all files support the addition of
+licenses.
+
+/*
+* ============LICENSE_START==========================================
+* ONAP - F-GPS API
+* ===================================================================
+* Copyright (c) 2019 AT&T Intellectual Property. All rights reserved.
+* ===================================================================
+*
+* Unless otherwise specified, all software contained herein is licensed
+* under the Apache License, Version 2.0 (the "License");
+* you may not use this software except in compliance with the License.
+* You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*
+*
+*
+* Unless otherwise specified, all documentation contained herein is licensed
+* under the Creative Commons License, Attribution 4.0 Intl. (the "License");
+* you may not use this documentation except in compliance with the License.
+* You may obtain a copy of the License at
+*
+* https://creativecommons.org/licenses/by/4.0/
+*
+* Unless required by applicable law or agreed to in writing, documentation
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*
+* ============LICENSE_END============================================
+*
+*/
diff --git a/valetapi/kubernetes/org-onap-fgps-dev/Chart.yaml b/valetapi/kubernetes/org-onap-fgps-dev/Chart.yaml
new file mode 100755
index 0000000..c9cd331
--- /dev/null
+++ b/valetapi/kubernetes/org-onap-fgps-dev/Chart.yaml
@@ -0,0 +1,18 @@
+# Copyright © 2019 AT&T
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: v1
+description: Helm charts for valet api
+name: valetapi
+version: 1.0.0
diff --git a/valetapi/kubernetes/org-onap-fgps-dev/resources/config/application.properties b/valetapi/kubernetes/org-onap-fgps-dev/resources/config/application.properties
new file mode 100755
index 0000000..36eea73
--- /dev/null
+++ b/valetapi/kubernetes/org-onap-fgps-dev/resources/config/application.properties
@@ -0,0 +1,33 @@
+
+# ============LICENSE_START=======================================================
+# ONAP - F-GPS
+# ================================================================================
+# Copyright (C) 2019 AT&T Intellectual Property. All rights
+# reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END============================================
+# ===================================================================
+#
+###
+
+server.contextPath={{.Values.appProperties.server.contextpath}}
+server.port={{.Values.appProperties.server.port}}
+server.ssl.enabled={{.Values.appProperties.server.sslenabled}}
+server.ssl.key-store={{.Values.appProperties.server.sslkeystore}}
+server.ssl.key-store-type={{.Values.appProperties.server.sslkeystoretype}}
+server.ssl.key-store-password={{.Values.appProperties.server.sslkeystorepassword}}
+server.ssl.key-alias={{.Values.appProperties.server.sslkeyalias}}
+server.http.port={{.Values.appProperties.server.httpport}}
+valet.dark={{.Values.appProperties.valetdark}}
+aaf.url.base={{.Values.appProperties.aafurl}}
diff --git a/valetapi/kubernetes/org-onap-fgps-dev/resources/config/auth.properties b/valetapi/kubernetes/org-onap-fgps-dev/resources/config/auth.properties
new file mode 100755
index 0000000..14385d9
--- /dev/null
+++ b/valetapi/kubernetes/org-onap-fgps-dev/resources/config/auth.properties
@@ -0,0 +1,28 @@
+
+# ============LICENSE_START=======================================================
+# ONAP - F-GPS
+# ================================================================================
+# Copyright (C) 2019 AT&T Intellectual Property. All rights
+# reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END============================================
+# ===================================================================
+#
+###
+
+mso.name={{.Values.auth.msoName}}
+mso.pass={{.Values.auth.msoPass}}
+valet.aaf.name={{.Values.auth.aafName}}
+valet.aaf.pass={{.Values.auth.aasPassword}}
+portal.admin.role={{.Values.auth.portalRole}}
diff --git a/valetapi/kubernetes/org-onap-fgps-dev/resources/config/logback.xml b/valetapi/kubernetes/org-onap-fgps-dev/resources/config/logback.xml
new file mode 100755
index 0000000..71f466f
--- /dev/null
+++ b/valetapi/kubernetes/org-onap-fgps-dev/resources/config/logback.xml
@@ -0,0 +1,28 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<configuration>
+ <appender name="FILE-AUDIT"
+ class="ch.qos.logback.core.rolling.RollingFileAppender">
+ <file>/api/valet/debug123.log</file>
+ <encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder">
+ <Pattern>
+ %d{yyyy-MM-dd HH:mm:ss} - %msg%n
+ </Pattern>
+ </encoder>
+
+ <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
+ <!-- rollover daily -->
+ <fileNamePattern>/api/valet/archived/api123.%d{yyyy-MM-dd}.%i.log
+ </fileNamePattern>
+ <timeBasedFileNamingAndTriggeringPolicy
+ class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
+ <maxFileSize>10MB</maxFileSize>
+ </timeBasedFileNamingAndTriggeringPolicy>
+ </rollingPolicy>
+ </appender>
+ <logger name="com.valet" level="info" additivity="false">
+ <appender-ref ref="FILE-AUDIT" />
+ </logger>
+ <root level="INFO">
+ <appender-ref ref="FILE-AUDIT" />
+ </root>
+</configuration>
diff --git a/valetapi/kubernetes/org-onap-fgps-dev/resources/config/logmessages.properties b/valetapi/kubernetes/org-onap-fgps-dev/resources/config/logmessages.properties
new file mode 100755
index 0000000..b3b163d
--- /dev/null
+++ b/valetapi/kubernetes/org-onap-fgps-dev/resources/config/logmessages.properties
@@ -0,0 +1,29 @@
+
+# ============LICENSE_START=======================================================
+# ONAP - F-GPS
+# ================================================================================
+# Copyright (C) 2019 AT&T Intellectual Property. All rights
+# reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END============================================
+# ===================================================================
+#
+###
+
+RESTSERVICE_HELLO=SERVICE0001I|Get a quick hello|No resolution needed|No action is required
+RESTSERVICE_HELLO_NAME=SERVICE0002I|Get a quick hello for {0}|No resolution needed|No action is required
+SPRINSERVICE_HELLO=SERVICE0003I|Say a quick hello|No resolution needed|No action is required
+SPRINSERVICE_HELLO_NAME=SERVICE0004I|Say a quick hello for {0}|No resolution needed|No action is required
+SPRINSERVICE_HELLO_MESSAGE=SERVICE0005I|Say hello message: {0}|No resolution needed|No action is required
+SPRINSERVICE_HELLO_MESSAGE_NAME=SERVICE0006I|Say hello message object:{0}|No resolution needed|No action is required
diff --git a/valetapi/kubernetes/org-onap-fgps-dev/resources/config/resources.properties b/valetapi/kubernetes/org-onap-fgps-dev/resources/config/resources.properties
new file mode 100755
index 0000000..7959caa
--- /dev/null
+++ b/valetapi/kubernetes/org-onap-fgps-dev/resources/config/resources.properties
@@ -0,0 +1,34 @@
+
+# ============LICENSE_START=======================================================
+# ONAP - F-GPS
+# ================================================================================
+# Copyright (C) 2019 AT&T Intellectual Property. All rights
+# reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END============================================
+# ===================================================================
+#
+###
+
+musicdb.ip.1={{.Values.music.ipOne}}
+musicdb.ip.2={{.Values.music.ipTwo}}
+musicdb.ip.3={{.Values.music.ipThree}}
+db.create={{.Values.music.dbCreate}}
+music.MUSIC_DB_PORT={{.Values.music.port}}
+music.MUSIC_DB_URL={{.Values.music.url}}
+music.Keyspace={{.Values.music.Keyspace}}
+musicdb.namespace={{.Values.music.namespace}}
+musicdb.userId={{.Values.music.userid}}
+musicdb.password={{.Values.music.password}}
+instanceId={{.Values.music.instanceid}}
diff --git a/valetapi/kubernetes/org-onap-fgps-dev/resources/config/system.properties b/valetapi/kubernetes/org-onap-fgps-dev/resources/config/system.properties
new file mode 100755
index 0000000..43e497b
--- /dev/null
+++ b/valetapi/kubernetes/org-onap-fgps-dev/resources/config/system.properties
@@ -0,0 +1,44 @@
+
+# ============LICENSE_START=======================================================
+# ONAP - F-GPS
+# ================================================================================
+# Copyright (C) 2019 AT&T Intellectual Property. All rights
+# reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END============================================
+# ===================================================================
+#
+###
+
+# If the environment property system_properties_path contains a path to a file , System properties created using the file. If the environment variable not present, system.properties in the class path is used for system property creation
+
+org.onap.eelf.logging.file={{.Values.sysProperties.loggingFile}}
+
+# change as per logback.xml path
+org.onap.eelf.logging.path={{.Values.sysProperties.loggingPath}}
+logging.config={{.Values.sysProperties.loggingConfig}}
+
+
+# Default parameters during application startup.
+info.build.artifact={{.Values.sysProperties.buildInfo.artifact}}
+info.build.name={{.Values.sysProperties.buildInfo.name}}
+info.build.version={{.Values.sysProperties.buildInfo.version}}
+
+# Spring configuration files
+spring.config.location={{.Values.sysProperties.springConfig.location}}
+spring.config.name={{.Values.sysProperties.springConfig.name}}
+
+kubernetes.namespace={{.Values.sysProperties.kubeNamespace}}
+
+routeoffer={{.Values.sysProperties.routeOffer}}
diff --git a/valetapi/kubernetes/org-onap-fgps-dev/resources/config/version.properties b/valetapi/kubernetes/org-onap-fgps-dev/resources/config/version.properties
new file mode 100755
index 0000000..4f81f19
--- /dev/null
+++ b/valetapi/kubernetes/org-onap-fgps-dev/resources/config/version.properties
@@ -0,0 +1,27 @@
+
+# ============LICENSE_START=======================================================
+# ONAP - F-GPS
+# ================================================================================
+# Copyright (C) 2019 AT&T Intellectual Property. All rights
+# reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END============================================
+# ===================================================================
+#
+###
+
+version.major={{.Values.version.major}}
+version.minor={{.Values.version.minor}}
+version.patch={{.Values.version.patch}}
+version.full={{.Values.version.full}}
diff --git a/valetapi/kubernetes/org-onap-fgps-dev/templates/configmap.yaml b/valetapi/kubernetes/org-onap-fgps-dev/templates/configmap.yaml
new file mode 100755
index 0000000..bc0aeec
--- /dev/null
+++ b/valetapi/kubernetes/org-onap-fgps-dev/templates/configmap.yaml
@@ -0,0 +1,21 @@
+# Copyright © 2019 AT&T
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: valetapi-configs
+ namespace: org-onap-fgps-dev
+data:
+{{ tpl (.Files.Glob "resources/config/*").AsConfig . | indent 2 }}
diff --git a/valetapi/kubernetes/org-onap-fgps-dev/templates/deployment.yaml b/valetapi/kubernetes/org-onap-fgps-dev/templates/deployment.yaml
new file mode 100755
index 0000000..b806f58
--- /dev/null
+++ b/valetapi/kubernetes/org-onap-fgps-dev/templates/deployment.yaml
@@ -0,0 +1,104 @@
+# Copyright © 2019 AT&T
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+ name: {{ .Chart.Name }}
+ namespace: {{ .Values.global.ns }}
+ labels:
+ app: {{ .Chart.Name }}
+ chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service }}
+spec:
+ replicas: {{ .Values.replicaCount }}
+ template:
+ metadata:
+ labels:
+ app: {{ .Chart.Name }}
+ release: {{ .Release.Name }}
+ spec:
+ serviceAccount: "default"
+ volumes:
+ - name: valetapi-volume
+ configMap:
+ name: valetapi-configs
+ items:
+ - key: application.properties
+ path: application.properties
+ - key: version.properties
+ path: version.properties
+ - key: system.properties
+ path: system.properties
+ - key: logback.xml
+ path: logback.xml
+ - key: resources.properties
+ path: resources.properties
+ - name: valetapi-logs
+ hostPath:
+ path: /opt/logs/fgps/api
+ imagePullSecrets:
+ - name: pullsecret
+ initContainers:
+ - name: init-cont
+ image: ubuntu:16.04
+ command: ['bash', '-c', 'useradd --uid 825 valetu && chown -R valetu /api']
+ volumeMounts:
+ - mountPath: /api
+ name: valetapi-logs
+ containers:
+ - env:
+ - name: systempropertiespath
+ value: system.properties
+ - name: applicationpropertiespath
+ value: application.properties
+ - name: logbackpath
+ value: logback.xml
+ - name: resourcespropertiespath
+ value: resources.properties
+ - name: java_runtime_arguments
+ value: "-Xms75m -Xmx250m -XX:+UseConcMarkSweepGC -XX:+CMSIncrementalMode"
+ image: "nexus.onap.org:5100/org.onap.fgps/valetapi:latest"
+ imagePullPolicy: Always
+ name: "valetapi"
+ volumeMounts:
+ - name: valetapi-volume
+ mountPath: /opt/etc/config/application.properties
+ subPath: application.properties
+ - name: valetapi-volume
+ mountPath: /opt/etc/config/version.properties
+ subPath: version.properties
+ - name: valetapi-volume
+ mountPath: /opt/etc/config/system.properties
+ subPath: system.properties
+ - name: valetapi-volume
+ mountPath: /opt/etc/config/logback.xml
+ subPath: logback.xml
+ - name: valetapi-volume
+ mountPath: /opt/etc/config/resources.properties
+ subPath: resources.properties
+ - name: valetapi-logs
+ mountPath: /api
+ ports:
+ - containerPort: 8080
+ protocol: TCP
+ resources:
+ requests:
+ memory: "500Mi"
+ cpu: "250m"
+ limits:
+ memory: "1Gi"
+ cpu: "500m"
+ restartPolicy: Always
diff --git a/valetapi/kubernetes/org-onap-fgps-dev/templates/service.yaml b/valetapi/kubernetes/org-onap-fgps-dev/templates/service.yaml
new file mode 100755
index 0000000..2090a59
--- /dev/null
+++ b/valetapi/kubernetes/org-onap-fgps-dev/templates/service.yaml
@@ -0,0 +1,40 @@
+# Copyright © 2019 AT&T
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ .Chart.Name }}
+ namespace: {{ .Values.global.ns }}
+ labels:
+ app: {{ .Chart.Name }}
+ chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service }}
+ annotations:
+spec:
+ type: {{ .Values.service.type }}
+ ports:
+ {{if eq .Values.service.type "NodePort" -}}
+ - port: {{ .Values.service.externalPort }}
+ nodePort: {{ .Values.service.nodePort }}
+ name: {{ .Values.service.portName }}
+ {{- else -}}
+ - port: {{ .Values.service.externalPort }}
+ targetPort: {{ .Values.service.internalPort }}
+ name: {{ .Values.service.portName }}
+ {{- end}}
+ selector:
+ app: {{ .Chart.Name }}
+ release: {{ .Release.Name }}
diff --git a/valetapi/kubernetes/org-onap-fgps-dev/values.yaml b/valetapi/kubernetes/org-onap-fgps-dev/values.yaml
new file mode 100755
index 0000000..664fa01
--- /dev/null
+++ b/valetapi/kubernetes/org-onap-fgps-dev/values.yaml
@@ -0,0 +1,104 @@
+# Copyright © 2019 AT&T
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+global:
+ loggingRepository: docker.elastic.co
+ loggingImage: beats/filebeat:5.5.0
+ repositoryCred:
+ user: repouser@fgps.onap.org
+ password: repo_password
+ repository: nexus.onap.org:5100
+ commonConfigPrefix: valetapi
+ ns: org-onap-fgps-dev
+ image:
+ filebeat: docker.elastic.co/beats/filebeat:5.5.0
+ repository: nexus.onap.org:5100
+ name: org.onap.fgps/valetapi:latest
+
+pullPolicy: Always
+nodePortPrefix: 300
+dataRootDir: /dockerdata-nfs
+config:
+ aai:
+ serviceName: aai-ext1.test.onap.org
+ port: 8443
+ msb:
+ serviceName: msb-iag
+ port: 80
+
+service:
+ type: NodePort
+ name: valetapi
+ externalPort: 8080
+ internalPort: 8080
+ nodePort: 30808
+ portName: valetapi
+ingress:
+ enabled: false
+replicaCount: 2
+
+music:
+ ipOne: music_host_1.onap.org
+ ipTwo: music_host_1.onap.org
+ ipThree: music_host_1.onap.org
+ dbCreate: true
+ port: 8080
+ url: /MUSIC/rest/v2/
+ keyspace: pn2
+ namespace: org.onap.fgps.dev.music
+ userid: musicuser@onap.org
+ password: zev1w/9GdTYf92pTUQ9DhabHbEfUFcF4+kLjwLdA2as=
+ instanceid: valet01
+
+auth:
+ msoName: so_user
+ msoPass: IvuHSsIVfVkcy9QWoVhjAlh5Fi9Rg5myLmqvZEYhChE=
+ aafName: userid@fgps.onap.org
+ aasPassword: XuhhetzEGCh8O7Fm9bLF38LNsLvZEg3zvHzmFTgijlKcsC2hgfNJ21ojMkIZI5HG
+ portalRole: org.onap.portal.valet.admin
+
+appProperties:
+ server:
+ contextpath: /api/valet/
+ port: 8443
+ sslenabled: true
+ sslkeystore: classpath:keystore.p12
+ sslkeystoretype: PKCS12
+ sslkeystorepassword: password
+ sslkeyalias: tomcat
+ httpport: 8080
+ valetdark: false
+ aafurl: https://aaf.onap.org:8095/proxy
+
+sysProperties:
+ loggingFile: logback.xml
+ loggingPath: opt/etc/config
+ loggingConfig: opt/etc/config/logback.xml
+ buildInfo:
+ artifact: ValetApi
+ name: org.onap.fgps
+ version: 0.0.1-SNAPSHOT
+ springConfig:
+ location: opt/etc/config/
+ name: application
+ kubeNamespace: org-onap-fgps
+ routeOffer: DEFAULT
+
+version:
+ major: 0
+ minor: 4
+ patch: 8
+ full: 0.4.8
+
+ \ No newline at end of file
diff --git a/valetapi/opt/etc/config/application.properties b/valetapi/opt/etc/config/application.properties
new file mode 100644
index 0000000..b13b815
--- /dev/null
+++ b/valetapi/opt/etc/config/application.properties
@@ -0,0 +1,50 @@
+
+# ============LICENSE_START=======================================================
+# ONAP - F-GPS
+# ================================================================================
+# Copyright (C) 2019 AT&T Intellectual Property. All rights
+# reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END============================================
+# ===================================================================
+#
+###
+
+#Valet service
+server.contextPath=/api/valet/
+logging.pattern.console=
+logging.path=logs
+logging.file=${logging.path}/log.log
+logging.pattern.file=%d{dd-MM-yyyy HH:mm:ss.SSS} [%thread] %-5level %logger{36}.%M - %msg%n
+# If it is true it will print all logs for ping or else it will print only error logs in api.log file
+#logging.ping=true
+# To enable SSL, uncomment the following lines:
+server.port=8443
+server.ssl.enabled=true
+server.ssl.key-store=classpath:keystore.p12
+server.ssl.key-store-type=PKCS12
+server.ssl.key-store-password=password
+server.ssl.key-alias=tomcat
+
+# To enable HTTP while SSL is enabled, uncomment the following line:
+server.http.port=8080
+
+valet.dark=false
+
+aaf.url.base=https://aaf.onap.org:8095/proxy
+
+#If authentication flags are false, then credentials are not required. Otherwise, they are required.
+#authentication.aaf=false
+#authentication.basic=false
+
diff --git a/valetapi/opt/etc/config/logback.xml b/valetapi/opt/etc/config/logback.xml
new file mode 100644
index 0000000..44cf2f8
--- /dev/null
+++ b/valetapi/opt/etc/config/logback.xml
@@ -0,0 +1,25 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<configuration>
+ <appender name="FILE-AUDIT"
+ class="ch.qos.logback.core.rolling.RollingFileAppender">
+ <file>/api/debug.log</file>
+ <encoder
+ class="ch.qos.logback.classic.encoder.PatternLayoutEncoder">
+ <Pattern> %d{yyyy-MM-dd HH:mm:ss} - %msg%n </Pattern>
+ </encoder>
+ <rollingPolicy
+ class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy"> <!-- rollover daily -->
+ <fileNamePattern>/api/archived/api.%d{yyyy-MM-dd}.%i.log</fileNamePattern>
+ <timeBasedFileNamingAndTriggeringPolicy
+ class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
+ <maxFileSize>10MB</maxFileSize>
+ </timeBasedFileNamingAndTriggeringPolicy>
+ </rollingPolicy>
+ </appender>
+ <logger name="com.valet" level="info" additivity="false">
+ <appender-ref ref="FILE-AUDIT" />
+ </logger>
+ <root level="INFO">
+ <appender-ref ref="FILE-AUDIT" />
+ </root>
+</configuration>
diff --git a/valetapi/opt/etc/config/system.properties b/valetapi/opt/etc/config/system.properties
new file mode 100644
index 0000000..0cb3218
--- /dev/null
+++ b/valetapi/opt/etc/config/system.properties
@@ -0,0 +1,48 @@
+
+# ============LICENSE_START=======================================================
+# ONAP - F-GPS
+# ================================================================================
+# Copyright (C) 2019 AT&T Intellectual Property. All rights
+# reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END============================================
+# ===================================================================
+#
+###
+
+# If the environment property system_properties_path contains a path to a file , System properties created using the file. If the environment variable not present, system.properties in the class path is used for system property creation
+
+org.onap.eelf.logging.file=logback.xml
+
+# change as per logback.xml path
+org.onap.eelf.logging.path=opt/etc/config
+logging.config=opt/etc/config/logback.xml
+
+
+# Default parameters during application startup.
+info.build.artifact= ValetApi
+info.build.name=org.onap.fgps
+info.build.version=0.0.1-SNAPSHOT
+
+# Spring configuration files
+spring.config.location=opt/etc/config/
+spring.config.name=application
+
+kubernetes.namespace=org-onap-fgps
+
+routeoffer=DEFAULT
+
+app_display_name = Valet API
+instance_uuid=2768f67b-fdd6-4dc6-9ecb-8325d214a36e
+application_name = ValetApi
diff --git a/valetapi/pom.xml b/valetapi/pom.xml
new file mode 100644
index 0000000..8b86c35
--- /dev/null
+++ b/valetapi/pom.xml
@@ -0,0 +1,515 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+ <modelVersion>4.0.0</modelVersion>
+
+ <groupId>org.onap.fgps</groupId>
+ <artifactId>ValetApi</artifactId>
+ <version>0.0.1-SNAPSHOT</version>
+
+ <properties>
+ <swagger.directory>${basedir}/target/classes/META-INF/resources/swagger</swagger.directory>
+ <java.version>1.8</java.version>
+ <docker.registry>nexus.onap.org:5100</docker.registry>
+ <build.number>local</build.number>
+ <kube.namespace>org-onap-fgps</kube.namespace>
+ <service.account>svc-account</service.account>
+ <namespace>org.onap.fgps</namespace>
+ <maven.compiler.source>1.8</maven.compiler.source>
+ <maven.compiler.target>1.8</maven.compiler.target>
+ <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
+ <sdk.java.rest>6.2.0.11</sdk.java.rest>
+ <sonar.java.coveragePlugin>jacoco</sonar.java.coveragePlugin>
+ <sonar.dynamicAnalysis>reuseReports</sonar.dynamicAnalysis>
+ <sonar.surefire.reportsPath>${basedir}/target/surefire-reports</sonar.surefire.reportsPath>
+ <sonar.failsafe.reportsPath>${basedir}/target/failsafe-reports</sonar.failsafe.reportsPath>
+ <jacoco.path>${basedir}/target/jacoco_report</jacoco.path>
+ <jacoco.itPath>${basedir}/target/jacoco_itReport</jacoco.itPath>
+ <sonar.jacoco.reportPath>${basedir}/target/jacoco-ut.exec</sonar.jacoco.reportPath>
+ <sonar.jacoco.itReportPath>${basedir}/target/jacoco-it.exec</sonar.jacoco.itReportPath>
+ <sonar.language>java</sonar.language>
+ <serviceArtifactName>valetapi</serviceArtifactName>
+
+ <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
+ <project.reporting.outputEncoding>UTF-8</project.reporting.outputEncoding>
+ <java.version>1.8</java.version>
+ </properties>
+ <!-- Used during verify stage in Jenkins -->
+ <profiles>
+ <profile>
+ <id>all-tests</id>
+ <properties>
+ <build.profile.id>all-tests</build.profile.id>
+
+ <!-- All tests are run. -->
+ <skip.integration.tests>false</skip.integration.tests>
+ <skip.unit.tests>false</skip.unit.tests>
+ </properties>
+ </profile>
+ <profile>
+ <id>dev</id>
+ </profile>
+ <profile>
+ <id>integration-test</id>
+ <properties>
+ <!-- Only integration tests are run. -->
+ <build.profile.id>integration-test</build.profile.id>
+ <skip.integration.tests>false</skip.integration.tests>
+ <skip.unit.tests>true</skip.unit.tests>
+ </properties>
+ </profile>
+ </profiles>
+
+ <repositories>
+ <repository>
+ <id>central</id>
+ <name>nexus central repo</name>
+ <url>${nexusurl}/${centralPath}</url>
+ </repository>
+ <repository>
+ <id>atlassian</id>
+ <name>nexus atlassian repo</name>
+ <url>${nexusurl}/${atlassianPath}</url>
+ </repository>
+ </repositories>
+
+ <pluginRepositories>
+ <pluginRepository>
+ <id>central</id>
+ <name>nexus central plugin repo</name>
+ <url>${nexusurl}/${pluginPath}</url>
+ </pluginRepository>
+ <pluginRepository>
+ <id>atlassian</id>
+ <name>nexus central atlassian plugin repo</name>
+ <url>${nexusurl}/${atlassianPluginPath}</url>
+ </pluginRepository>
+ </pluginRepositories>
+
+ <parent>
+ <groupId>org.springframework.boot</groupId>
+ <artifactId>spring-boot-starter-parent</artifactId>
+ <version>2.1.2.RELEASE</version>
+ <relativePath /> <!-- lookup parent from repository -->
+ </parent>
+
+ <developers>
+ <developer>
+ <id>${userId}</id>
+ </developer>
+ </developers>
+
+ <dependencies>
+ <dependency>
+ <groupId>xerces</groupId>
+ <artifactId>xercesImpl</artifactId>
+ <version>2.12.0</version>
+ </dependency>
+ <dependency>
+ <groupId>io.netty</groupId>
+ <artifactId>netty-codec-http</artifactId>
+ <version>4.0.40.Final</version>
+ </dependency>
+ <dependency>
+ <groupId>commons-collections</groupId>
+ <artifactId>commons-collections</artifactId>
+ <version>3.2.2</version>
+ </dependency>
+ <dependency>
+ <groupId>org.codehaus.groovy</groupId>
+ <artifactId>groovy-all</artifactId>
+ <version>2.4.13</version>
+ </dependency>
+ <dependency>
+ <groupId>au.com.dius</groupId>
+ <artifactId>pact-jvm-consumer-junit_2.11</artifactId>
+ <version>3.3.9</version>
+ <exclusions>
+ <exclusion>
+ <groupId>org.codehaus.groovy</groupId>
+ <artifactId>groovy-all</artifactId>
+ </exclusion>
+ </exclusions>
+ </dependency>
+ <dependency>
+ <groupId>au.com.dius</groupId>
+ <artifactId>pact-jvm-provider-junit_2.11</artifactId>
+ <version>3.3.9</version>
+ </dependency>
+ <dependency>
+ <groupId>org.jmockit</groupId>
+ <artifactId>jmockit</artifactId>
+ <version>1.38</version>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.springframework.boot</groupId>
+ <artifactId>spring-boot-starter-test</artifactId>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>com.fasterxml.jackson.dataformat</groupId>
+ <artifactId>jackson-dataformat-yaml</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>com.fasterxml.uuid</groupId>
+ <artifactId>java-uuid-generator</artifactId>
+ <version>3.1.4</version>
+ </dependency>
+ <dependency>
+ <groupId>com.fasterxml.jackson.core</groupId>
+ <artifactId>jackson-databind</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.commons</groupId>
+ <artifactId>commons-lang3</artifactId>
+ <version>3.4</version>
+ </dependency>
+ <dependency>
+ <groupId>com.googlecode.json-simple</groupId>
+ <artifactId> json-simple</artifactId>
+ <version>1.1.1</version>
+ </dependency>
+ <dependency>
+ <groupId>ch.qos.logback</groupId>
+ <artifactId>logback-core</artifactId>
+ <version>1.2.3</version><!--$NO-MVN-MAN-VER$-->
+ <exclusions>
+ <exclusion>
+ <groupId>org.slf4j</groupId>
+ <artifactId>slf4j-log4j12</artifactId>
+ </exclusion>
+ </exclusions>
+ </dependency>
+ <dependency>
+ <groupId>ch.qos.logback</groupId>
+ <artifactId>logback-classic</artifactId>
+ <version>1.2.3</version>
+ <exclusions>
+ <exclusion>
+ <groupId>org.slf4j</groupId>
+ <artifactId>slf4j-log4j12</artifactId>
+ </exclusion>
+ </exclusions>
+ </dependency>
+ <dependency>
+ <groupId>commons-codec</groupId>
+ <artifactId>commons-codec</artifactId>
+ <version>1.10</version>
+ </dependency>
+ <dependency>
+ <groupId>commons-lang</groupId>
+ <artifactId>commons-lang</artifactId>
+ <version>2.6</version>
+ </dependency>
+<!--
+ <dependency>
+ <groupId>org.jboss.resteasy</groupId>
+ <artifactId>resteasy-client</artifactId>
+ <version>4.0.0.Beta3</version>
+ </dependency>
+-->
+ <dependency>
+ <groupId>io.swagger</groupId>
+ <artifactId>swagger-core</artifactId>
+ <version>1.5.20</version>
+ </dependency>
+ <dependency>
+ <groupId>io.swagger</groupId>
+ <artifactId>swagger-annotations</artifactId>
+ <version>1.5.20</version>
+ </dependency>
+ <!-- <dependency>
+ <groupId>org.glassfish.jersey.core</groupId>
+ <artifactId>jersey-client</artifactId>
+ <version>2.24</version>
+ </dependency> -->
+ <!-- <dependency>
+ <groupId>org.glassfish.jersey.core</groupId>
+ <artifactId>jersey-common</artifactId>
+ <version>2.24</version>
+ </dependency> -->
+ <!-- https://mvnrepository.com/artifact/org.springframework.boot/spring-boot-starter-web -->
+ <dependency>
+ <groupId>org.springframework.boot</groupId>
+ <artifactId>spring-boot-starter-web</artifactId>
+ </dependency>
+
+ <dependency>
+ <groupId>org.springframework.boot</groupId>
+ <artifactId>spring-boot-starter-test</artifactId>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.springframework.boot</groupId>
+ <artifactId>spring-boot-starter-actuator</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.springframework.boot</groupId>
+ <artifactId>spring-boot-starter-tomcat</artifactId>
+ <scope>compile</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.springframework</groupId>
+ <artifactId>spring-aop</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.aspectj</groupId>
+ <artifactId>aspectjrt</artifactId>
+ </dependency>
+
+ <dependency>
+ <groupId>org.aspectj</groupId>
+ <artifactId>aspectjweaver</artifactId>
+ </dependency>
+ <!-- csi-logging-dependencies goes here -->
+ <!-- csi-logging-dependency-addon -->
+ </dependencies>
+ <build>
+ <finalName>ValetApi</finalName>
+ <plugins>
+ <plugin>
+ <groupId>org.jacoco</groupId>
+ <artifactId>jacoco-maven-plugin</artifactId>
+ <version>0.7.5.201505241946</version>
+ <executions>
+ <!-- Prepares the property pointing to the JaCoCo runtime agent which
+ is passed as VM argument when Maven the Surefire plugin is executed. -->
+ <execution>
+ <id>pre-unit-test</id>
+ <goals>
+ <goal>prepare-agent</goal>
+ </goals>
+ <configuration>
+ <!-- Sets the path to the file which contains the execution data. -->
+ <destFile>${sonar.jacoco.reportPath}</destFile>
+ <propertyName>surefireArgLine</propertyName>
+ </configuration>
+ </execution>
+ <!-- Ensures that the code coverage report for unit tests is created
+ after unit tests have been run. -->
+ <execution>
+ <id>post-unit-test</id>
+ <phase>test</phase>
+ <goals>
+ <goal>report</goal>
+ </goals>
+ <configuration>
+ <!-- Sets the path to the file which contains the execution data. -->
+ <dataFile>${sonar.jacoco.reportPath}</dataFile>
+ <!-- Sets the output directory for the code coverage report. -->
+ <outputDirectory>${jacoco.path}</outputDirectory>
+ </configuration>
+ </execution>
+ <!-- Prepares the property pointing to the JaCoCo runtime agent which
+ is passed as VM argument when Maven the Failsafe plugin is executed. -->
+ <execution>
+ <id>pre-integration-test</id>
+ <phase>pre-integration-test</phase>
+ <goals>
+ <goal>prepare-agent</goal>
+ </goals>
+ <configuration>
+ <!-- Sets the path to the file which contains the execution data. -->
+ <destFile>${sonar.jacoco.itReportPath}</destFile>
+ <!-- Sets the name of the property containing the settings for JaCoCo
+ runtime agent. -->
+ <propertyName>failsafeArgLine</propertyName>
+ </configuration>
+ </execution>
+ <!-- Ensures that the code coverage report for integration tests after
+ integration tests have been run. -->
+ <execution>
+ <id>post-integration-test</id>
+ <phase>post-integration-test</phase>
+ <goals>
+ <goal>report</goal>
+ </goals>
+ <configuration>
+ <!-- Sets the path to the file which contains the execution data. -->
+ <dataFile>${sonar.jacoco.itReportPath}/</dataFile>
+ <!-- Sets the output directory for the code coverage report. -->
+ <outputDirectory>${jacoco.itPath}</outputDirectory>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+ <!-- Used for unit tests -->
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-surefire-plugin</artifactId>
+ <version>2.15</version>
+ <configuration>
+ <!-- Sets the VM argument line used when unit tests are run. -->
+ <argLine>${surefireArgLine}</argLine>
+
+ <!-- Skips unit tests if the value of skip.unit.tests property is true -->
+ <skipTests>${skip.unit.tests}</skipTests>
+ <!-- Excludes integration tests when unit tests are run. -->
+ <excludes>
+
+ <exclude>**/IT*.java</exclude>
+
+ </excludes>
+ </configuration>
+ </plugin>
+ <!-- Used for integration tests -->
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-failsafe-plugin</artifactId>
+ <version>2.15</version>
+ <executions>
+ <!-- Ensures that both integration-test and verify goals of the Failsafe
+ Maven plugin are executed. -->
+ <execution>
+ <id>integration-tests</id>
+ <goals>
+ <goal>integration-test</goal>
+ <goal>verify</goal>
+ </goals>
+ <configuration>
+ <!-- Sets the VM argument line used when integration tests are run. -->
+ <argLine>${failsafeArgLine}</argLine>
+
+ <!-- Skips integration tests if the value of skip.integration.tests
+ property is true -->
+ <skipTests>${skip.integration.tests}</skipTests>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+ <plugin>
+ <groupId>au.com.dius</groupId>
+ <artifactId>pact-jvm-provider-maven_2.11</artifactId>
+ <version>3.3.9</version>
+ <configuration>
+ <!--pactBrokerUrl,user name,password and project version required only
+ for consumer -->
+ <pactBrokerUrl>${BROKER_URL}</pactBrokerUrl>
+ <pactBrokerUsername>${userName}</pactBrokerUsername>
+ <pactBrokerPassword>${password}</pactBrokerPassword>
+ <projectVersion>0.0.1</projectVersion>
+ <!-- service provider required only for producer -->
+ <serviceProviders>
+ <serviceProvider>
+ <name>ValetApi</name>
+ <protocol>http</protocol>
+ <host>${APP_URL}</host>
+ <port>${APP_PORT}</port>
+ <path>/</path>
+ <pactBroker>
+ <url>${BROKER_URL}</url>
+ <authentication>
+ <username>${userName}</username>
+ <password>${password}</password>
+ </authentication>
+ </pactBroker>
+ <!-- If you want to test specific PACT and specific port uncomment
+ the consumers section and comment the pact broker in the service provider -->
+ <!-- <consumers> <consumer> <name>pactconsumer</name> <pactUrl>${brokerUrl}/pacts/provider/pactdemo/consumer/pactdemo/latest</pactUrl>
+ <pactUrl>${brokerUrl}/pacts/provider/pactdemo/consumer/pactconsumer2/version/0.0.1</pactUrl>
+ </consumer> </consumers> -->
+ </serviceProvider>
+ </serviceProviders>
+ </configuration>
+ </plugin>
+<plugin>
+ <artifactId>maven-dependency-plugin</artifactId>
+ </plugin>
+ <plugin>
+ <groupId>com.github.kongchen</groupId>
+ <artifactId>swagger-maven-plugin</artifactId>
+ <version>3.1.3</version>
+ <configuration>
+ <apiSources>
+ <apiSource>
+ <locations>org.onap.fgps.api.service.rs</locations>
+ <basePath>/api</basePath>
+ <info>
+ <title>${project.artifactId} Service</title>
+ <version>${project.version}</version>
+ </info>
+ <swaggerDirectory>${swagger.directory}</swaggerDirectory>
+ </apiSource>
+ </apiSources>
+ </configuration>
+ <executions>
+ <execution>
+ <!-- <phase>compile</phase> -->
+ <phase>package</phase>
+ <goals>
+ <goal>generate</goal>
+ </goals>
+ </execution>
+ </executions>
+ </plugin>
+ <plugin>
+ <artifactId>exec-maven-plugin</artifactId>
+ <groupId>org.codehaus.mojo</groupId>
+ </plugin>
+ <plugin>
+ <groupId>com.spotify</groupId>
+ <artifactId>docker-maven-plugin</artifactId>
+ <version>0.4.11</version>
+ <configuration>
+ <imageName>${docker.registry}/${namespace}/${serviceArtifactName}</imageName>
+ <dockerDirectory>src/main/docker</dockerDirectory>
+ <serverId>docker-hub</serverId>
+ <registryUrl>https://${docker.registry}</registryUrl>
+ <imageTags>
+ <imageTag>${project.version}</imageTag>
+ <imageTag>latest</imageTag>
+ </imageTags>
+ <forceTags>true</forceTags>
+ <resources>
+ <resource>
+ <targetPath>/</targetPath>
+ <directory>${project.build.directory}</directory>
+ <include>${project.build.finalName}.jar</include>
+ </resource>
+ <resource>
+ <targetPath>/</targetPath>
+ <directory>${project.build.directory}</directory>
+ <include>opt/etc/config/*</include>
+ </resource>
+ </resources>
+ </configuration>
+ </plugin>
+ <plugin>
+ <groupId>org.springframework.boot</groupId>
+ <artifactId>spring-boot-maven-plugin</artifactId>
+ <executions>
+ <execution>
+ <goals>
+ <goal>repackage</goal>
+ </goals>
+ </execution>
+ </executions>
+ <configuration>
+ <layout>ZIP</layout>
+ </configuration>
+ </plugin>
+ </plugins>
+ <!-- mention the logback.xml location through system property or environment
+ variable to edit logback.xml at run time -->
+ <resources>
+ <!-- <resource>
+ <directory>src/main/resources</directory>
+ <filtering>true</filtering>
+ <includes>
+ <include>**/*</include>
+ </includes>
+ </resource> -->
+ <!-- <resource>
+ <directory>opt/etc/config</directory>
+ <targetPath>../opt/etc/config</targetPath>
+ <filtering>true</filtering>
+ <includes>
+ <include>**/*</include>
+ </includes>
+ </resource> -->
+ <!-- csi-logging-target-resources-goes here -->
+ <!-- csi-logging-target-resources-addon -->
+ </resources>
+ </build>
+ </project>
diff --git a/valetapi/src/main/docker/Dockerfile b/valetapi/src/main/docker/Dockerfile
new file mode 100755
index 0000000..4632d3b
--- /dev/null
+++ b/valetapi/src/main/docker/Dockerfile
@@ -0,0 +1,18 @@
+FROM openjdk:8-jre-alpine
+
+RUN addgroup -g 825 -S valetg && adduser -u 825 -S valetu -G valetg
+RUN mkdir -p /opt/logs/valet/api && chown -R valetu:valetg /opt/logs
+
+VOLUME /tmp
+VOLUME /opt/etc
+VOLUME /opt/bin
+VOLUME /opt/logs
+
+ADD ValetApi.jar /opt/bin/app.jar
+ADD startService.sh /startService.sh
+
+RUN chown valetu:valetg /startService.sh
+USER valetu:valetg
+RUN chmod 700 /startService.sh && date > /home/valetu/imagedate.txt
+
+ENTRYPOINT ./startService.sh
diff --git a/valetapi/src/main/docker/startService.sh b/valetapi/src/main/docker/startService.sh
new file mode 100755
index 0000000..e682425
--- /dev/null
+++ b/valetapi/src/main/docker/startService.sh
@@ -0,0 +1,9 @@
+#!/bin/sh
+touch /app.jar
+
+java \
+ -Dloader.path=/opt/etc/config/,/opt/bin/app.jar \
+ -Dlogging.config=file:/opt/etc/config/logback.xml \
+ -Dspring.config.location=file:/opt/etc/config/application.properties,/opt/etc/config/auth.properties,/opt/etc/config/key.properties,/opt/etc/config/resources.properties,/opt/etc/config/version.properties \
+ $java_runtime_arguments \
+ -jar /opt/bin/app.jar
diff --git a/valetapi/src/main/java/org/onap/fgps/api/ApplicationStartup.java b/valetapi/src/main/java/org/onap/fgps/api/ApplicationStartup.java
new file mode 100644
index 0000000..a0f9a95
--- /dev/null
+++ b/valetapi/src/main/java/org/onap/fgps/api/ApplicationStartup.java
@@ -0,0 +1,92 @@
+/*
+ * ============LICENSE_START==========================================
+ * ONAP - F-GPS API
+ * ===================================================================
+ * Copyright © 2019 ATT Intellectual Property. All rights reserved.
+ * ===================================================================
+ *
+ * Unless otherwise specified, all software contained herein is licensed
+ * under the Apache License, Version 2.0 (the "License");
+ * you may not use this software except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * Unless otherwise specified, all documentation contained herein is licensed
+ * under the Creative Commons License, Attribution 4.0 Intl. (the "License");
+ * you may not use this documentation except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://creativecommons.org/licenses/by/4.0/
+ *
+ * Unless required by applicable law or agreed to in writing, documentation
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * ============LICENSE_END============================================
+ *
+ *
+ */
+package org.onap.fgps.api;
+
+import java.io.InputStream;
+import java.util.Properties;
+
+import org.onap.fgps.api.dao.SchemaDAO;
+import org.onap.fgps.api.logging.EELFLoggerDelegate;
+import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.boot.context.event.ApplicationReadyEvent;
+import org.springframework.context.ApplicationListener;
+import org.springframework.stereotype.Component;
+import org.onap.fgps.api.utils.UserUtils;
+
+@Component
+public class ApplicationStartup implements ApplicationListener<ApplicationReadyEvent> {
+
+ private SchemaDAO schemaDAO;
+ //private static final Logger LOGGER = LoggerFactory.getLogger(ValetServiceApplication.class);
+ private EELFLoggerDelegate LOGGER = EELFLoggerDelegate.getLogger(ApplicationStartup.class);
+
+
+ @Autowired
+ public ApplicationStartup(SchemaDAO schemaDAO) {
+ super();
+ this.schemaDAO = schemaDAO;
+ }
+
+ /**
+ * This event is executed as late as conceivably possible to indicate that
+ * the application is ready to service requests.
+ */
+ @Override
+ public void onApplicationEvent(final ApplicationReadyEvent event) {
+ Properties props = new Properties();
+ String propFileName = "resources.properties";
+ InputStream inputStream = getClass().getClassLoader().getResourceAsStream(propFileName);
+ try {
+ if (inputStream != null) {
+ props.load(inputStream);
+ } else {
+ LOGGER.info(EELFLoggerDelegate.applicationLogger,"DBProxy : inputstream is not");
+ }
+ String dbCreate = UserUtils.htmlEscape(props.getProperty("db.create"));
+ System.out.println( dbCreate );
+ if( dbCreate!=null && dbCreate.equals( "true")) {
+ schemaDAO.initializeDatabase();
+ }
+ } catch (Exception e) {
+ e.printStackTrace();
+ LOGGER.error(EELFLoggerDelegate.applicationLogger,"onApplicationEvent : Error details : "+ e.getMessage());
+ LOGGER.error(EELFLoggerDelegate.errorLogger,"onApplicationEvent : Error details : "+ e.getMessage());
+ }
+ return;
+ }
+}
diff --git a/valetapi/src/main/java/org/onap/fgps/api/ValetServiceApplication.java b/valetapi/src/main/java/org/onap/fgps/api/ValetServiceApplication.java
new file mode 100644
index 0000000..0ec614d
--- /dev/null
+++ b/valetapi/src/main/java/org/onap/fgps/api/ValetServiceApplication.java
@@ -0,0 +1,49 @@
+/*
+ * ============LICENSE_START==========================================
+ * ONAP - F-GPS API
+ * ===================================================================
+ * Copyright © 2019 ATT Intellectual Property. All rights reserved.
+ * ===================================================================
+ *
+ * Unless otherwise specified, all software contained herein is licensed
+ * under the Apache License, Version 2.0 (the "License");
+ * you may not use this software except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * Unless otherwise specified, all documentation contained herein is licensed
+ * under the Creative Commons License, Attribution 4.0 Intl. (the "License");
+ * you may not use this documentation except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://creativecommons.org/licenses/by/4.0/
+ *
+ * Unless required by applicable law or agreed to in writing, documentation
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * ============LICENSE_END============================================
+ *
+ *
+ */
+package org.onap.fgps.api;
+
+import org.springframework.boot.SpringApplication;
+import org.springframework.boot.autoconfigure.SpringBootApplication;
+
+@SpringBootApplication
+public class ValetServiceApplication {
+
+ public static void main(String[] args) {
+ SpringApplication.run(ValetServiceApplication.class, args);
+ }
+}
diff --git a/valetapi/src/main/java/org/onap/fgps/api/WebConfiguration.java b/valetapi/src/main/java/org/onap/fgps/api/WebConfiguration.java
new file mode 100644
index 0000000..cdd103e
--- /dev/null
+++ b/valetapi/src/main/java/org/onap/fgps/api/WebConfiguration.java
@@ -0,0 +1,66 @@
+/*
+ * ============LICENSE_START==========================================
+ * ONAP - F-GPS API
+ * ===================================================================
+ * Copyright © 2019 ATT Intellectual Property. All rights reserved.
+ * ===================================================================
+ *
+ * Unless otherwise specified, all software contained herein is licensed
+ * under the Apache License, Version 2.0 (the "License");
+ * you may not use this software except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * Unless otherwise specified, all documentation contained herein is licensed
+ * under the Creative Commons License, Attribution 4.0 Intl. (the "License");
+ * you may not use this documentation except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://creativecommons.org/licenses/by/4.0/
+ *
+ * Unless required by applicable law or agreed to in writing, documentation
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * ============LICENSE_END============================================
+ *
+ *
+ */
+package org.onap.fgps.api;
+
+import org.springframework.context.annotation.Bean;
+import org.springframework.context.annotation.Configuration;
+import org.springframework.web.servlet.config.annotation.ViewControllerRegistry;
+import org.springframework.web.servlet.config.annotation.WebMvcConfigurerAdapter;
+
+@Configuration
+public class WebConfiguration {
+
+ @Bean
+ public WebMvcConfigurerAdapter forwardToIndex() {
+ return new WebMvcConfigurerAdapter() {
+ @Override
+ public void addViewControllers(ViewControllerRegistry registry) {
+ /*
+ registry.addViewController("/swagger").setViewName(
+ "redirect:/swagger/index.html");
+ registry.addViewController("/swagger/").setViewName(
+ "redirect:/swagger/index.html");
+ registry.addViewController("/docs").setViewName(
+ "redirect:/docs/html/index.html");
+ registry.addViewController("/docs/").setViewName(
+ "redirect:/docs/html/index.html");
+ */
+ }
+ };
+ }
+} \ No newline at end of file
diff --git a/valetapi/src/main/java/org/onap/fgps/api/annotation/AafRoleRequired.java b/valetapi/src/main/java/org/onap/fgps/api/annotation/AafRoleRequired.java
new file mode 100644
index 0000000..49398c3
--- /dev/null
+++ b/valetapi/src/main/java/org/onap/fgps/api/annotation/AafRoleRequired.java
@@ -0,0 +1,74 @@
+/*
+ * ============LICENSE_START==========================================
+ * ONAP - F-GPS API
+ * ===================================================================
+ * Copyright © 2019 ATT Intellectual Property. All rights reserved.
+ * ===================================================================
+ *
+ * Unless otherwise specified, all software contained herein is licensed
+ * under the Apache License, Version 2.0 (the "License");
+ * you may not use this software except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * Unless otherwise specified, all documentation contained herein is licensed
+ * under the Creative Commons License, Attribution 4.0 Intl. (the "License");
+ * you may not use this documentation except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://creativecommons.org/licenses/by/4.0/
+ *
+ * Unless required by applicable law or agreed to in writing, documentation
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * ============LICENSE_END============================================
+ *
+ *
+ */
+package org.onap.fgps.api.annotation;
+
+import java.lang.annotation.ElementType;
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+import java.lang.annotation.Target;
+
+@Target(ElementType.METHOD)
+@Retention(RetentionPolicy.RUNTIME)
+public @interface AafRoleRequired {
+ /**
+ * Annotates a method to indicate that AAF authorization is required to execute.
+ *
+ * If AAF authentication is used, auth.properties must contain valet.aaf.name and valet.aaf.pass, which contains this application's
+ * AAF credentials.
+ *
+ * See also @PropertyBasedAuthorization.
+ */
+
+ /**
+ * Marks the role required in AAF.
+ * For example, @AafRoleRequired(roleRequired="portal.admin") will check AAF for the "portal.admin" role.
+ */
+ String roleRequired() default "";
+
+ /**
+ * If roleRequired is null or blank, marks the property in auth.properties which contains the role required by AAF.
+ * The property in auth.properties must end with ".role". The property specified in the annotation may omit that suffix.
+ *
+ * For example, if auth.properties contains "portal.admin.role=portal.admin", then either @AafRoleRequired(roleProperty="portal.admin.role")
+ * or @AafRoleRequired(roleProperty="portal.admin") will check AAF for the "portal.admin" role.
+ *
+ * If a roleProperty is specified in an AafRoleRequired annotation and the corresponding role (with or without ".role") is not
+ * present in auth.properties, a MissingRoleException will be thrown.
+ */
+ String roleProperty() default "";
+}
diff --git a/valetapi/src/main/java/org/onap/fgps/api/annotation/BasicAuthRequired.java b/valetapi/src/main/java/org/onap/fgps/api/annotation/BasicAuthRequired.java
new file mode 100644
index 0000000..a10fd61
--- /dev/null
+++ b/valetapi/src/main/java/org/onap/fgps/api/annotation/BasicAuthRequired.java
@@ -0,0 +1,59 @@
+/*
+ * ============LICENSE_START==========================================
+ * ONAP - F-GPS API
+ * ===================================================================
+ * Copyright © 2019 ATT Intellectual Property. All rights reserved.
+ * ===================================================================
+ *
+ * Unless otherwise specified, all software contained herein is licensed
+ * under the Apache License, Version 2.0 (the "License");
+ * you may not use this software except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * Unless otherwise specified, all documentation contained herein is licensed
+ * under the Creative Commons License, Attribution 4.0 Intl. (the "License");
+ * you may not use this documentation except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://creativecommons.org/licenses/by/4.0/
+ *
+ * Unless required by applicable law or agreed to in writing, documentation
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * ============LICENSE_END============================================
+ *
+ *
+ */
+package org.onap.fgps.api.annotation;
+
+import java.lang.annotation.ElementType;
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+import java.lang.annotation.Target;
+
+@Target(ElementType.METHOD)
+@Retention(RetentionPolicy.RUNTIME)
+public @interface BasicAuthRequired {
+ /**
+ * Annotates a method that BasicAuth is required to execute.
+ *
+ * If a method is annotated with @BasicAuth(authRequired="appname"), then auth.properties must include properties named
+ * appname.name and appname.pass . If appname.name is x and appname.pass is the encrypted value of y, then the headers
+ * for calls to this method must contain header "Authentication: Basic [base64 encoded x:y]".
+ *
+ * See also @PropertyBasedAuthorization.
+ */
+
+ String authRequired() default "";
+}
diff --git a/valetapi/src/main/java/org/onap/fgps/api/annotation/PropertyBasedAuthorization.java b/valetapi/src/main/java/org/onap/fgps/api/annotation/PropertyBasedAuthorization.java
new file mode 100644
index 0000000..9b77c0d
--- /dev/null
+++ b/valetapi/src/main/java/org/onap/fgps/api/annotation/PropertyBasedAuthorization.java
@@ -0,0 +1,64 @@
+/*
+ * ============LICENSE_START==========================================
+ * ONAP - F-GPS API
+ * ===================================================================
+ * Copyright © 2019 ATT Intellectual Property. All rights reserved.
+ * ===================================================================
+ *
+ * Unless otherwise specified, all software contained herein is licensed
+ * under the Apache License, Version 2.0 (the "License");
+ * you may not use this software except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * Unless otherwise specified, all documentation contained herein is licensed
+ * under the Creative Commons License, Attribution 4.0 Intl. (the "License");
+ * you may not use this documentation except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://creativecommons.org/licenses/by/4.0/
+ *
+ * Unless required by applicable law or agreed to in writing, documentation
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * ============LICENSE_END============================================
+ *
+ *
+ */
+package org.onap.fgps.api.annotation;
+
+import static java.lang.annotation.ElementType.METHOD;
+import static java.lang.annotation.RetentionPolicy.RUNTIME;
+
+import java.lang.annotation.Retention;
+import java.lang.annotation.Target;
+
+@Retention(RUNTIME)
+@Target(METHOD)
+public @interface PropertyBasedAuthorization {
+ /**
+ * Annotates a method whose authorization requirements are defined in a property file.
+ *
+ * The auth.properties file should contain one or more lines for each method annotated with this annotation.
+ * If the annotation value is "x", auth.properties should contain either "x.aaf" or "x.basic".
+ * If "x.aaf" is present, the user will be authenticated using AAF and authorized if they have the role which is value of x.aaf.
+ * (See also @AafRoleRequired .)
+ * If "x.basic" is present, with a value of "y", the user will be authorized using Basicauth with username of "y.name" and (encrypted) password of "y.pass".
+ * (See also @BasicAuthRequired .)
+ *
+ * If AAF authentication is used, auth.properties must contain valet.aaf.name and valet.aaf.pass, which contains this application's
+ * AAF credentials.
+ *
+ */
+ String value();
+}
diff --git a/valetapi/src/main/java/org/onap/fgps/api/beans/KeySpaceRequest.java b/valetapi/src/main/java/org/onap/fgps/api/beans/KeySpaceRequest.java
new file mode 100644
index 0000000..7fc0bc6
--- /dev/null
+++ b/valetapi/src/main/java/org/onap/fgps/api/beans/KeySpaceRequest.java
@@ -0,0 +1,44 @@
+/*
+ * ============LICENSE_START==========================================
+ * ONAP - F-GPS API
+ * ===================================================================
+ * Copyright-2019 ATT Intellectual Property. All rights reserved.
+ * ===================================================================
+ *
+ * Unless otherwise specified, all software contained herein is licensed
+ * under the Apache License, Version 2.0 (the "License");
+ * you may not use this software except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * Unless otherwise specified, all documentation contained herein is licensed
+ * under the Creative Commons License, Attribution 4.0 Intl. (the "License");
+ * you may not use this documentation except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://creativecommons.org/licenses/by/4.0/
+ *
+ * Unless required by applicable law or agreed to in writing, documentation
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * ============LICENSE_END============================================
+ *
+ *
+ */
+package org.onap.fgps.api.beans;
+
+public class KeySpaceRequest {
+ private String durabilityOfWrites;
+}
+
+
diff --git a/valetapi/src/main/java/org/onap/fgps/api/beans/Status.java b/valetapi/src/main/java/org/onap/fgps/api/beans/Status.java
new file mode 100644
index 0000000..50a1484
--- /dev/null
+++ b/valetapi/src/main/java/org/onap/fgps/api/beans/Status.java
@@ -0,0 +1,65 @@
+/*
+ * ============LICENSE_START==========================================
+ * ONAP - F-GPS API
+ * ===================================================================
+ * Copyright © 2019 ATT Intellectual Property. All rights reserved.
+ * ===================================================================
+ *
+ * Unless otherwise specified, all software contained herein is licensed
+ * under the Apache License, Version 2.0 (the "License");
+ * you may not use this software except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * Unless otherwise specified, all documentation contained herein is licensed
+ * under the Creative Commons License, Attribution 4.0 Intl. (the "License");
+ * you may not use this documentation except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://creativecommons.org/licenses/by/4.0/
+ *
+ * Unless required by applicable law or agreed to in writing, documentation
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * ============LICENSE_END============================================
+ *
+ *
+ */
+package org.onap.fgps.api.beans;
+
+import com.fasterxml.jackson.annotation.JsonProperty;
+import com.fasterxml.jackson.annotation.JsonRootName;
+
+@JsonRootName(value="status")
+public class Status {
+ @JsonProperty(value="status_code")
+ private String statusCode;
+
+ @JsonProperty(value="status_message")
+ private String statusMessage;
+
+ public Status(String statusCode, String statusMessage) {
+ super();
+ this.statusCode = statusCode;
+ this.statusMessage = statusMessage;
+ }
+
+ public String getStatusCode() {
+ return statusCode;
+ }
+
+ public String getStatusMessage() {
+ return statusMessage;
+ }
+
+}
diff --git a/valetapi/src/main/java/org/onap/fgps/api/beans/schema/Schema.java b/valetapi/src/main/java/org/onap/fgps/api/beans/schema/Schema.java
new file mode 100644
index 0000000..a9f1fbb
--- /dev/null
+++ b/valetapi/src/main/java/org/onap/fgps/api/beans/schema/Schema.java
@@ -0,0 +1,262 @@
+/*
+ * ============LICENSE_START==========================================
+ * ONAP - F-GPS API
+ * ===================================================================
+ * Copyright © 2019 AT&T Intellectual Property. All rights reserved.
+ * ===================================================================
+ *
+ * Unless otherwise specified, all software contained herein is licensed
+ * under the Apache License, Version 2.0 (the "License");
+ * you may not use this software except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * Unless otherwise specified, all documentation contained herein is licensed
+ * under the Creative Commons License, Attribution 4.0 Intl. (the "License");
+ * you may not use this documentation except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://creativecommons.org/licenses/by/4.0/
+ *
+ * Unless required by applicable law or agreed to in writing, documentation
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * ============LICENSE_END============================================
+ *
+ *
+ */
+package org.onap.fgps.api.beans.schema;
+
+import org.json.simple.JSONObject;
+import org.onap.fgps.api.utils.Constants;
+import org.springframework.stereotype.Component;
+
+import com.fasterxml.uuid.Generators;
+
+@Component
+public class Schema {
+ @SuppressWarnings("unchecked")
+ public static JSONObject getCommonTableSchema() {
+ JSONObject jsonRequest = new JSONObject();
+ JSONObject properties = new JSONObject();
+ JSONObject compression = new JSONObject();
+ JSONObject compaction = new JSONObject();
+ JSONObject consistencyInfo = new JSONObject();
+
+ compression.put("sstable_compression", "DeflateCompressor");
+ compression.put("chunk_length_kb", 64);
+
+ compaction.put("class", "SizeTieredCompactionStrategy");
+ compaction.put("min_threshold", 6);
+
+ properties.put("compression", compression);
+ properties.put("compaction", compaction);
+
+ consistencyInfo.put("type", "eventual");
+
+ jsonRequest.put("properties", properties);
+ jsonRequest.put("consistencyInfo", consistencyInfo);
+ return jsonRequest;
+ }
+
+ @SuppressWarnings("unchecked")
+ public static String getRequestTableSchema() {
+ JSONObject fields = new JSONObject();
+
+ fields.put("request_id", "varchar");
+ fields.put("timestamp", "varchar");
+ fields.put("request", "varchar");
+ fields.put("PRIMARY KEY", "(request_id)");
+
+ JSONObject jsonRequest = getCommonTableSchema();
+ jsonRequest.put("fields", fields);
+ return jsonRequest.toJSONString();
+ }
+
+ @SuppressWarnings("unchecked")
+ public static String getResultsTableSchema() {
+ JSONObject fields = new JSONObject();
+
+ fields.put("request_id", "varchar");
+ fields.put("status", "varchar");
+ fields.put("timestamp", "varchar");
+ fields.put("result", "varchar");
+ fields.put("PRIMARY KEY", "(request_id)");
+
+ JSONObject jsonRequest = getCommonTableSchema();
+ jsonRequest.put("fields", fields);
+ return jsonRequest.toJSONString();
+ }
+
+ @SuppressWarnings("unchecked")
+ public static String getGroupsRulesTableSchema() {
+ JSONObject fields = new JSONObject();
+
+ fields.put("id", "varchar");
+ fields.put("app_scope", "varchar");
+ fields.put("type", "varchar");
+ fields.put("level", "varchar");
+ fields.put("members", "varchar");
+ fields.put("description", "varchar");
+ fields.put("groups", "varchar");
+ fields.put("status", "varchar");
+ fields.put("timestamp", "varchar");
+ fields.put("PRIMARY KEY", "(id)");
+
+ JSONObject jsonRequest = getCommonTableSchema();
+ jsonRequest.put("fields", fields);
+ return jsonRequest.toJSONString();
+
+ }
+
+ @SuppressWarnings("unchecked")
+ public static String getStacksTableSchema() {
+ JSONObject fields = new JSONObject();
+
+ fields.put("id", "varchar");
+ fields.put("last_status", "varchar");
+ fields.put("datacenter", "varchar");
+ fields.put("stack_name", "varchar");
+ fields.put("uuid", "varchar");
+ fields.put("tenant_id", "varchar");
+ fields.put("metadata", "varchar");
+ fields.put("servers", "varchar");
+ fields.put("prior_servers", "varchar");
+ fields.put("state", "varchar");
+ fields.put("prior_State", "varchar");
+ fields.put("timestamp", "varchar");
+ fields.put("PRIMARY KEY", "(id)");
+
+ JSONObject jsonRequest = getCommonTableSchema();
+ jsonRequest.put("fields", fields);
+ return jsonRequest.toJSONString();
+ }
+
+ @SuppressWarnings("unchecked")
+ public static String getStacksIdMapTableSchema() {
+ JSONObject fields = new JSONObject();
+
+ fields.put("request_id", "varchar");
+ fields.put("stack_id", "varchar");
+ fields.put("timestamp", "varchar");
+ fields.put("PRIMARY KEY", "(request_id)");
+ JSONObject jsonRequest = getCommonTableSchema();
+ jsonRequest.put("fields", fields);
+ return jsonRequest.toJSONString();
+ }
+
+ @SuppressWarnings("unchecked")
+ public static String getResourcesTableSchema() {
+ JSONObject fields = new JSONObject();
+
+ fields.put("id", "varchar");
+ fields.put("url", "varchar");
+ fields.put("resource", "varchar");
+ fields.put("timestamp", "varchar");
+ fields.put("PRIMARY KEY", "(id)");
+ fields.put("requests", "varchar");
+
+ JSONObject jsonRequest = getCommonTableSchema();
+ jsonRequest.put("fields", fields);
+ return jsonRequest.toJSONString();
+ }
+ @SuppressWarnings("unchecked")
+ public static String getRegionsTableSchema() {
+ JSONObject fields = new JSONObject();
+
+ fields.put("region_id ", "varchar");
+ fields.put("PRIMARY KEY", "(region_id)");
+ fields.put("timestamp", "varchar");
+ fields.put("last_updated ", "varchar");
+ fields.put("keystone_url", "varchar");
+ fields.put("locked_by", "varchar");
+ fields.put("locked_time ", "varchar");
+ //fields.put("locked_time ", "varchar");
+ fields.put("expire_time", "varchar");
+
+ JSONObject jsonRequest = getCommonTableSchema();
+ jsonRequest.put("fields", fields);
+ return jsonRequest.toJSONString();
+ }
+
+ @SuppressWarnings("unchecked")
+ public static String getGroupsTableSchema() {
+ JSONObject fields = new JSONObject();
+
+ fields.put("id ", "varchar");
+ fields.put("PRIMARY KEY", "id");
+ fields.put("uuid", "varchar");
+ fields.put("type ", "varchar");
+ fields.put("level", "varchar");
+ fields.put("factory", "varchar");
+ fields.put("rule_id ", "varchar");
+ fields.put("metadata ", "varchar");
+ fields.put("server_list", "varchar");
+ fields.put("member_hosts", "varchar");
+ fields.put("status", "varchar");
+
+
+ JSONObject jsonRequest = getCommonTableSchema();
+ jsonRequest.put("fields", fields);
+ return jsonRequest.toJSONString();
+ }
+ @SuppressWarnings("unchecked")
+ public String formMsoInsertUpdateRequest(String requestId, String operation, String request) {
+ JSONObject jsonRequest = new JSONObject();
+ JSONObject values = new JSONObject();
+ JSONObject consistencyInfo = new JSONObject();
+ String request_id = requestId == null ? Generators.timeBasedGenerator().generate().toString()
+ : operation + '-' + requestId;
+
+ values.put(Constants.HEAT_REQUEST_REQUEST_ID, request_id);
+ values.put(Constants.HEAT_REQUEST_TIMESTAMP, System.currentTimeMillis());
+ values.put(Constants.HEAT_REQUEST_REQUEST, request);
+ consistencyInfo.put("type", "eventual");
+
+ jsonRequest.put("values", values);
+ jsonRequest.put("consistencyInfo", consistencyInfo);
+
+ return jsonRequest.toJSONString();
+ }
+ @SuppressWarnings("unchecked")
+ public String formHealthCheckRequest(String requestId, String operation, String request) {
+ JSONObject jsonRequest = new JSONObject();
+ JSONObject values = new JSONObject();
+ JSONObject consistencyInfo = new JSONObject();
+ String request_id = requestId == null ? Generators.timeBasedGenerator().generate().toString()
+ : operation + '-' + requestId;
+
+ values.put(Constants.HEAT_REQUEST_REQUEST_ID, request_id);
+ values.put(Constants.HEAT_REQUEST_TIMESTAMP, -1);
+ values.put(Constants.HEAT_REQUEST_REQUEST, request);
+ consistencyInfo.put("type", "eventual");
+
+ jsonRequest.put("values", values);
+ jsonRequest.put("consistencyInfo", consistencyInfo);
+
+ return jsonRequest.toJSONString();
+ }
+
+
+ @SuppressWarnings("unchecked")
+ public String formMsoDeleteRequest() {
+ JSONObject jsonRequest = new JSONObject();
+ JSONObject consistencyInfo = new JSONObject();
+
+ consistencyInfo.put("type", "eventual");
+ jsonRequest.put("consistencyInfo", consistencyInfo);
+
+ return jsonRequest.toJSONString();
+ }
+}
+
diff --git a/valetapi/src/main/java/org/onap/fgps/api/config/HttpConfig.java b/valetapi/src/main/java/org/onap/fgps/api/config/HttpConfig.java
new file mode 100644
index 0000000..83fe632
--- /dev/null
+++ b/valetapi/src/main/java/org/onap/fgps/api/config/HttpConfig.java
@@ -0,0 +1,82 @@
+/*
+ * ============LICENSE_START==========================================
+ * ONAP - F-GPS API
+ * ===================================================================
+ * Copyright © 2019 ATT Intellectual Property. All rights reserved.
+ * ===================================================================
+ *
+ * Unless otherwise specified, all software contained herein is licensed
+ * under the Apache License, Version 2.0 (the "License");
+ * you may not use this software except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * Unless otherwise specified, all documentation contained herein is licensed
+ * under the Creative Commons License, Attribution 4.0 Intl. (the "License");
+ * you may not use this documentation except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://creativecommons.org/licenses/by/4.0/
+ *
+ * Unless required by applicable law or agreed to in writing, documentation
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * ============LICENSE_END============================================
+ *
+ *
+ */
+package org.onap.fgps.api.config;
+
+import org.apache.catalina.connector.Connector;
+import org.springframework.beans.factory.annotation.Value;
+import org.springframework.boot.web.embedded.tomcat.TomcatServletWebServerFactory;
+import org.springframework.boot.web.server.WebServerFactoryCustomizer;
+import org.springframework.boot.web.servlet.server.ConfigurableServletWebServerFactory;
+import org.springframework.context.annotation.Bean;
+import org.springframework.context.annotation.Configuration;
+
+@Configuration
+public class HttpConfig {
+ @Value("${server.http.port:-1}")
+ private int httpPort;
+
+ @Bean
+ public WebServerFactoryCustomizer<ConfigurableServletWebServerFactory> containerCustomizer() {
+ System.out.println("In HttpConfig, httpPort = " + httpPort);
+
+ if (httpPort==-1) {
+ return new WebServerFactoryCustomizer<ConfigurableServletWebServerFactory>() {
+ @Override
+ public void customize(ConfigurableServletWebServerFactory arg0) {
+ //noop
+ }
+
+ };
+ }
+
+ return new WebServerFactoryCustomizer<ConfigurableServletWebServerFactory>() {
+ @Override
+ public void customize(ConfigurableServletWebServerFactory factory) {
+ if (factory instanceof TomcatServletWebServerFactory) {
+ TomcatServletWebServerFactory containerFactory =
+ (TomcatServletWebServerFactory) factory;
+
+ Connector connector = new Connector(TomcatServletWebServerFactory.DEFAULT_PROTOCOL);
+ connector.setPort(httpPort);
+ containerFactory.addAdditionalTomcatConnectors(connector);
+ }
+
+ }
+ };
+ }
+} \ No newline at end of file
diff --git a/valetapi/src/main/java/org/onap/fgps/api/config/SpringServletConfig.java b/valetapi/src/main/java/org/onap/fgps/api/config/SpringServletConfig.java
new file mode 100644
index 0000000..4fd84df
--- /dev/null
+++ b/valetapi/src/main/java/org/onap/fgps/api/config/SpringServletConfig.java
@@ -0,0 +1,71 @@
+/*
+ * ============LICENSE_START==========================================
+ * ONAP - F-GPS API
+ * ===================================================================
+ * Copyright © 2019 ATT Intellectual Property. All rights reserved.
+ * ===================================================================
+ *
+ * Unless otherwise specified, all software contained herein is licensed
+ * under the Apache License, Version 2.0 (the "License");
+ * you may not use this software except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * Unless otherwise specified, all documentation contained herein is licensed
+ * under the Creative Commons License, Attribution 4.0 Intl. (the "License");
+ * you may not use this documentation except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://creativecommons.org/licenses/by/4.0/
+ *
+ * Unless required by applicable law or agreed to in writing, documentation
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * ============LICENSE_END============================================
+ *
+ *
+ */
+package org.onap.fgps.api.config;
+
+import org.onap.fgps.api.interceptor.AuthorizationInterceptor;
+import org.onap.fgps.api.interceptor.DarknessInterceptor;
+import org.onap.fgps.api.interceptor.VersioningInterceptor;
+import org.onap.fgps.api.proxy.AAFProxy;
+import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.beans.factory.annotation.Value;
+import org.springframework.context.annotation.Configuration;
+import org.springframework.web.servlet.config.annotation.InterceptorRegistry;
+import org.springframework.web.servlet.config.annotation.WebMvcConfigurerAdapter;
+
+@Configuration
+public class SpringServletConfig extends WebMvcConfigurerAdapter {
+ private final boolean valetDark;
+ private final String aafUrl;
+ private final boolean aafAuthFlag;
+ private final boolean basicAuthFlag;
+
+ @Autowired
+ public SpringServletConfig(@Value("${valet.dark:false}") boolean valetDark, @Value("${aaf.url.base:}") String aafUrl, @Value("${authentication.aaf:true}") boolean aafAuthFlag, @Value("${authentication.basic:true}") boolean basicAuthFlag) {
+ this.valetDark = valetDark;
+ this.aafUrl = aafUrl;
+ this.aafAuthFlag = aafAuthFlag;
+ this.basicAuthFlag = basicAuthFlag;
+ }
+
+ @Override
+ public void addInterceptors(InterceptorRegistry registry) {
+ if (valetDark) registry.addInterceptor(new DarknessInterceptor());
+ if (aafUrl!=null && aafUrl.length()>0) registry.addInterceptor(new AuthorizationInterceptor(new AAFProxy(aafUrl), aafAuthFlag, basicAuthFlag));
+ registry.addInterceptor(new VersioningInterceptor());
+ }
+} \ No newline at end of file
diff --git a/valetapi/src/main/java/org/onap/fgps/api/controller/AdminController.java b/valetapi/src/main/java/org/onap/fgps/api/controller/AdminController.java
new file mode 100644
index 0000000..0f49ae4
--- /dev/null
+++ b/valetapi/src/main/java/org/onap/fgps/api/controller/AdminController.java
@@ -0,0 +1,66 @@
+/*
+ * ============LICENSE_START==========================================
+ * ONAP - F-GPS API
+ * ===================================================================
+ * Copyright © 2019 ATT Intellectual Property. All rights reserved.
+ * ===================================================================
+ *
+ * Unless otherwise specified, all software contained herein is licensed
+ * under the Apache License, Version 2.0 (the "License");
+ * you may not use this software except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * Unless otherwise specified, all documentation contained herein is licensed
+ * under the Creative Commons License, Attribution 4.0 Intl. (the "License");
+ * you may not use this documentation except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://creativecommons.org/licenses/by/4.0/
+ *
+ * Unless required by applicable law or agreed to in writing, documentation
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * ============LICENSE_END============================================
+ *
+ *
+ */
+package org.onap.fgps.api.controller;
+
+import org.onap.fgps.api.dao.SchemaDAO;
+import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.boot.autoconfigure.EnableAutoConfiguration;
+import org.springframework.web.bind.annotation.CrossOrigin;
+import org.springframework.web.bind.annotation.RequestMapping;
+import org.springframework.web.bind.annotation.RequestMethod;
+import org.springframework.web.bind.annotation.RestController;
+
+@CrossOrigin(origins = "*")
+@RestController
+@EnableAutoConfiguration
+@RequestMapping("/admin/")
+public class AdminController {
+ private SchemaDAO schemaDAO;
+
+ @Autowired
+ public AdminController(SchemaDAO schemaDAO) {
+ super();
+ this.schemaDAO = schemaDAO;
+ }
+
+ @RequestMapping(value = "create", method = RequestMethod.POST)
+ public String getVmDetails() {
+ // SchemaDAO dao = new SchemaDAO();
+ return schemaDAO.initializeDatabase();
+ }
+}
diff --git a/valetapi/src/main/java/org/onap/fgps/api/controller/ValetGroupsController.java b/valetapi/src/main/java/org/onap/fgps/api/controller/ValetGroupsController.java
new file mode 100644
index 0000000..fc8d402
--- /dev/null
+++ b/valetapi/src/main/java/org/onap/fgps/api/controller/ValetGroupsController.java
@@ -0,0 +1,144 @@
+/*
+ * ============LICENSE_START==========================================
+ * ONAP - F-GPS API
+ * ===================================================================
+ * Copyright © 2019 ATT Intellectual Property. All rights reserved.
+ * ===================================================================
+ *
+ * Unless otherwise specified, all software contained herein is licensed
+ * under the Apache License, Version 2.0 (the "License");
+ * you may not use this software except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * Unless otherwise specified, all documentation contained herein is licensed
+ * under the Creative Commons License, Attribution 4.0 Intl. (the "License");
+ * you may not use this documentation except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://creativecommons.org/licenses/by/4.0/
+ *
+ * Unless required by applicable law or agreed to in writing, documentation
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * ============LICENSE_END============================================
+ *
+ *
+ */
+package org.onap.fgps.api.controller;
+
+import javax.servlet.http.HttpServletRequest;
+
+import org.json.simple.JSONObject;
+import org.onap.fgps.api.annotation.PropertyBasedAuthorization;
+import org.onap.fgps.api.logging.EELFLoggerDelegate;
+import org.onap.fgps.api.logging.aspect.AuditLog;
+import org.onap.fgps.api.logging.aspect.MetricsLog;
+import org.onap.fgps.api.service.ValetGroupsService;
+import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.boot.autoconfigure.EnableAutoConfiguration;
+import org.springframework.context.annotation.EnableAspectJAutoProxy;
+import org.springframework.http.ResponseEntity;
+import org.springframework.web.bind.annotation.CrossOrigin;
+import org.springframework.web.bind.annotation.RequestBody;
+import org.springframework.web.bind.annotation.RequestMapping;
+import org.springframework.web.bind.annotation.RequestMethod;
+import org.springframework.web.bind.annotation.RequestParam;
+import org.springframework.web.bind.annotation.RestController;
+import org.onap.fgps.api.utils.UserUtils;
+
+@CrossOrigin(origins = "*")
+@RestController
+@EnableAutoConfiguration
+@RequestMapping("/groups/v1/")
+@EnableAspectJAutoProxy
+@AuditLog
+@MetricsLog
+public class ValetGroupsController {
+ private ValetGroupsService valetGroupsService;
+ private EELFLoggerDelegate LOGGER = EELFLoggerDelegate.getLogger(ValetGroupsController.class);
+ @Autowired
+ public ValetGroupsController(ValetGroupsService valetGroupsService) {
+ super();
+ this.valetGroupsService = valetGroupsService;
+ }
+
+
+ @PropertyBasedAuthorization("groups.query")
+ @SuppressWarnings("unchecked")
+ @RequestMapping(consumes = "application/json", method = RequestMethod.GET)
+ public ResponseEntity<String> queryGroups(@RequestParam("requestId") String requestId,
+ @RequestParam(value = "name", required = false) String name,
+ @RequestParam(value = "datacenter_id", required = false) String datacenterId,
+ @RequestParam(value = "host", required = false) String host) {
+ requestId = UserUtils.htmlEscape(requestId);
+ name = UserUtils.htmlEscape(name);
+ datacenterId = UserUtils.htmlEscape(datacenterId);
+ host = UserUtils.htmlEscape(host);
+
+ LOGGER.info(EELFLoggerDelegate.applicationLogger,"queryGroups controller - ", requestId);
+ JSONObject requestJson = new JSONObject();
+ if(name !=null && datacenterId !=null) {
+ requestJson.put("name", name);
+ requestJson.put("datacenter_id", datacenterId);
+ }
+ LOGGER.debug(EELFLoggerDelegate.debugLogger,"queryGroups: Initiating request to query groups for requestJson: {}, requestId: {}", requestJson, requestId);
+ return valetGroupsService.saveGroupsRequest(requestJson, "group_query", requestId);
+ }
+ @PropertyBasedAuthorization("groups.create")
+ @RequestMapping(consumes = "application/json", method = RequestMethod.POST)
+ public ResponseEntity<String> getCreateDetails(HttpServletRequest httpRequest, @RequestBody JSONObject request, @RequestParam("requestId") String requestId) {
+ requestId = UserUtils.htmlEscape(requestId);
+ LOGGER.info(EELFLoggerDelegate.applicationLogger,"getCreateDetails controller - ", requestId);
+ LOGGER.debug(EELFLoggerDelegate.debugLogger,"getCreateDetails: Initiating request to get create groups details for requestJson: {}, requestId: {}", request, requestId);
+ return valetGroupsService.saveGroupsRequest(request, "group_create", requestId);
+ }
+ @PropertyBasedAuthorization("groups.update")
+ @RequestMapping(consumes = "application/json", method = RequestMethod.PUT)
+ public ResponseEntity<String> getUpdateDetails(HttpServletRequest httpRequest, @RequestBody JSONObject request, @RequestParam("requestId") String requestId) {
+ requestId = UserUtils.htmlEscape(requestId);
+ LOGGER.info(EELFLoggerDelegate.applicationLogger,"getUpdateDetails controller - ", requestId);
+ LOGGER.debug(EELFLoggerDelegate.debugLogger,"getCreateDetails: Initiating request to get update groups details for requestJson: {}, requestId: {}", request, requestId);
+ return valetGroupsService.saveGroupsRequest(request, "group_update", requestId);
+ }
+ @PropertyBasedAuthorization("groups.delete")
+ @RequestMapping(consumes = "application/json", method = RequestMethod.DELETE)
+ public ResponseEntity<String> getDeleteDetails(HttpServletRequest httpRequest, @RequestBody JSONObject request, @RequestParam("requestId") String requestId) {
+ requestId = UserUtils.htmlEscape(requestId);
+ LOGGER.info(EELFLoggerDelegate.applicationLogger,"getDeleteDetails controller - ", requestId);
+ LOGGER.debug(EELFLoggerDelegate.debugLogger,"getDeleteDetails: Initiating request to get delete groups details for requestJson: {}, requestId: {}", request, requestId);
+ return valetGroupsService.saveGroupsRequest(request, "group_delete", requestId);
+ }
+
+ //j unit test cases controller
+ @SuppressWarnings("unchecked")
+ @RequestMapping(value = "portal", method = RequestMethod.GET)
+ public ResponseEntity<String> queryGroups1(@RequestParam("requestId") String requestId,
+ @RequestParam(value = "name", required = false) String name,
+ @RequestParam(value = "datacenter_id", required = false) String datacenterId,
+ @RequestParam(value = "host", required = false) String host) {
+ requestId = UserUtils.htmlEscape(requestId);
+ name = UserUtils.htmlEscape(name);
+ datacenterId = UserUtils.htmlEscape(datacenterId);
+ host = UserUtils.htmlEscape(host);
+
+ LOGGER.info(EELFLoggerDelegate.applicationLogger,"queryGroups controller - ", requestId);
+ JSONObject requestJson = new JSONObject();
+ if(name !=null && datacenterId !=null) {
+ requestJson.put("name", name);
+ requestJson.put("datacenter_id", datacenterId);
+ }
+
+ return valetGroupsService.saveGroupsRequest1(requestJson, "group_query", requestId);
+ }
+}
diff --git a/valetapi/src/main/java/org/onap/fgps/api/controller/ValetServicePlacementController.java b/valetapi/src/main/java/org/onap/fgps/api/controller/ValetServicePlacementController.java
new file mode 100644
index 0000000..465fdce
--- /dev/null
+++ b/valetapi/src/main/java/org/onap/fgps/api/controller/ValetServicePlacementController.java
@@ -0,0 +1,150 @@
+/*
+ * ============LICENSE_START==========================================
+ * ONAP - F-GPS API
+ * ===================================================================
+ * Copyright © 2019 ATT Intellectual Property. All rights reserved.
+ * ===================================================================
+ *
+ * Unless otherwise specified, all software contained herein is licensed
+ * under the Apache License, Version 2.0 (the "License");
+ * you may not use this software except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * Unless otherwise specified, all documentation contained herein is licensed
+ * under the Creative Commons License, Attribution 4.0 Intl. (the "License");
+ * you may not use this documentation except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://creativecommons.org/licenses/by/4.0/
+ *
+ * Unless required by applicable law or agreed to in writing, documentation
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * ============LICENSE_END============================================
+ *
+ *
+ */
+package org.onap.fgps.api.controller;
+
+import javax.servlet.http.HttpServletRequest;
+
+import org.json.simple.JSONObject;
+import org.onap.fgps.api.annotation.PropertyBasedAuthorization;
+import org.onap.fgps.api.helpers.Helper;
+import org.onap.fgps.api.logging.EELFLoggerDelegate;
+import org.onap.fgps.api.logging.aspect.AuditLog;
+import org.onap.fgps.api.logging.aspect.MetricsLog;
+import org.onap.fgps.api.service.ValetPlacementService;
+import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.boot.autoconfigure.EnableAutoConfiguration;
+import org.springframework.context.annotation.EnableAspectJAutoProxy;
+import org.springframework.http.ResponseEntity;
+import org.springframework.web.bind.annotation.CrossOrigin;
+import org.springframework.web.bind.annotation.PathVariable;
+import org.springframework.web.bind.annotation.RequestBody;
+import org.springframework.web.bind.annotation.RequestMapping;
+import org.springframework.web.bind.annotation.RequestMethod;
+import org.springframework.web.bind.annotation.RequestParam;
+import org.springframework.web.bind.annotation.RestController;
+import org.onap.fgps.api.utils.UserUtils;
+
+@CrossOrigin(origins = "*")
+@RestController
+@EnableAutoConfiguration
+@RequestMapping("/placement/v1/")
+@EnableAspectJAutoProxy
+@AuditLog
+@MetricsLog
+public class ValetServicePlacementController {
+
+ private ValetPlacementService valetPlacementService;
+ private static final EELFLoggerDelegate LOGGER = EELFLoggerDelegate.getLogger(ValetServicePlacementController.class);
+
+ @Autowired
+ public ValetServicePlacementController(ValetPlacementService valetPlacementService) {
+ super();
+ this.valetPlacementService = valetPlacementService;
+ }
+
+ @PropertyBasedAuthorization("placement.create")
+ @RequestMapping(consumes = "application/json", method = RequestMethod.POST)
+ public ResponseEntity<String> createVm(HttpServletRequest httpRequest, @RequestBody JSONObject request, @RequestParam("requestId") String requestId) {
+ requestId = UserUtils.htmlEscape(requestId);
+ LOGGER.debug(EELFLoggerDelegate.debugLogger,"createVm: Initiating request to create VM for request: {}, requestId: {}", request, requestId);
+ return valetPlacementService.processMSORequest1(request, requestId, "create");
+ }
+
+ @PropertyBasedAuthorization("placement.update")
+ @RequestMapping( consumes = "application/json", method = RequestMethod.PUT)
+ public ResponseEntity<String> updateVm(HttpServletRequest httpRequest, @RequestBody JSONObject request, @RequestParam("requestId") String requestId) {
+ requestId = UserUtils.htmlEscape(requestId);
+ LOGGER.debug(EELFLoggerDelegate.debugLogger,"updateVm: Initiating request to update VM for request: {}, requestId: {}", request, requestId);
+ return valetPlacementService.processMSORequest1(request,requestId,"update");
+ }
+ @PropertyBasedAuthorization("placement.delete")
+ @RequestMapping(consumes = "application/json", method = RequestMethod.DELETE)
+ public ResponseEntity<String> deleteVm(HttpServletRequest httpRequest, @RequestBody JSONObject request, @RequestParam("requestId") String requestId) {
+ requestId = UserUtils.htmlEscape(requestId);
+ LOGGER.debug(EELFLoggerDelegate.debugLogger,"deleteVm: Initiating request to delete VM for request: {}, requestId: {}", request, requestId);
+ return valetPlacementService.saveRequest(Helper.formatDeleteRequest(request), "delete", requestId);
+ }
+ @PropertyBasedAuthorization("placement.confirm")
+ @RequestMapping(value = "/{priorRequestId}/confirm", consumes = "application/json", method = RequestMethod.PUT)
+ public ResponseEntity<String> confirm(HttpServletRequest httpRequest, @PathVariable("priorRequestId") String priorRequestId, @RequestBody JSONObject request){
+ priorRequestId = UserUtils.htmlEscape(priorRequestId);
+ LOGGER.debug(EELFLoggerDelegate.debugLogger,"confirm: Initiating request to confirm VM for request: {}, priorRequestId: {}", request, priorRequestId);
+ return valetPlacementService.saveRequest(request, "confirm", priorRequestId);
+ }
+ @PropertyBasedAuthorization("placement.rollback")
+ @RequestMapping(value = "/{priorRequestId}/rollback", consumes = "application/json", method = RequestMethod.PUT)
+ public ResponseEntity<String> rollback(HttpServletRequest httpRequest, @PathVariable("priorRequestId") String priorRequestId, @RequestBody JSONObject request) {
+ priorRequestId = UserUtils.htmlEscape(priorRequestId);
+ LOGGER.debug(EELFLoggerDelegate.debugLogger,"rollback: Initiating request to rollback VM for request: {}, priorRequestId: {}", request, priorRequestId);
+ return valetPlacementService.saveRequest(request, "rollback", priorRequestId);
+ }
+
+ //Unit test cases mocked controllers
+ @RequestMapping(value = "/createVM2", consumes = "application/json", method = RequestMethod.POST)
+ public ResponseEntity<String> createVm2(@RequestBody JSONObject request, @RequestParam("requestId") String requestId) {
+ requestId = UserUtils.htmlEscape(requestId);
+ ResponseEntity<String> response = valetPlacementService.processMSORequest2(request, requestId);
+ return valetPlacementService.processMSORequest2(request, requestId);
+ }
+
+ @RequestMapping(value = "/updateVm1", consumes = "application/json", method = RequestMethod.PUT)
+ public ResponseEntity<String> updateVm1(@RequestBody JSONObject request, @RequestParam("requestId") String requestId) {
+ requestId = UserUtils.htmlEscape(requestId);
+ return valetPlacementService.processMSORequest2(request,requestId);
+ }
+
+ @RequestMapping(value = "/deleteVm1",consumes = "application/json", method = RequestMethod.DELETE)
+ public ResponseEntity<String> deleteVm1(@RequestBody JSONObject request, @RequestParam("requestId") String requestId) {
+ requestId = UserUtils.htmlEscape(requestId);
+ return valetPlacementService.saveRequesttest(Helper.formatDeleteRequest(request), "delete", requestId);
+ }
+
+ @RequestMapping(value = "/{priorRequestId}/confirm1", consumes = "application/json", method = RequestMethod.PUT)
+ public ResponseEntity<String> confirm1(@PathVariable("priorRequestId") String priorRequestId, @RequestBody JSONObject request) {
+ priorRequestId = UserUtils.htmlEscape(priorRequestId);
+ return valetPlacementService.saveRequesttest(request, "confirm", priorRequestId);
+ }
+
+ @RequestMapping(value = "/{priorRequestId}/rollback1", consumes = "application/json", method = RequestMethod.PUT)
+ public ResponseEntity<String> rollback1(@PathVariable("priorRequestId") String priorRequestId, @RequestBody JSONObject request) {
+ priorRequestId = UserUtils.htmlEscape(priorRequestId);
+ return valetPlacementService.saveRequesttest(request, "rollback", priorRequestId);
+ }
+
+
+}
diff --git a/valetapi/src/main/java/org/onap/fgps/api/controller/ValetUtilityController.java b/valetapi/src/main/java/org/onap/fgps/api/controller/ValetUtilityController.java
new file mode 100644
index 0000000..ef5d673
--- /dev/null
+++ b/valetapi/src/main/java/org/onap/fgps/api/controller/ValetUtilityController.java
@@ -0,0 +1,298 @@
+/*
+ * ============LICENSE_START==========================================
+ * ONAP - F-GPS API
+ * ===================================================================
+ * Copyright © 2019 ATT Intellectual Property. All rights reserved.
+ * ===================================================================
+ *
+ * Unless otherwise specified, all software contained herein is licensed
+ * under the Apache License, Version 2.0 (the "License");
+ * you may not use this software except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * Unless otherwise specified, all documentation contained herein is licensed
+ * under the Creative Commons License, Attribution 4.0 Intl. (the "License");
+ * you may not use this documentation except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://creativecommons.org/licenses/by/4.0/
+ *
+ * Unless required by applicable law or agreed to in writing, documentation
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * ============LICENSE_END============================================
+ *
+ *
+ */
+package org.onap.fgps.api.controller;
+
+import java.io.InputStream;
+import java.util.Properties;
+
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
+
+import org.json.simple.JSONObject;
+import org.json.simple.parser.JSONParser;
+import org.json.simple.parser.ParseException;
+import org.onap.fgps.api.annotation.AafRoleRequired;
+import org.onap.fgps.api.annotation.BasicAuthRequired;
+import org.onap.fgps.api.annotation.PropertyBasedAuthorization;
+import org.onap.fgps.api.beans.Status;
+import org.onap.fgps.api.beans.schema.Schema;
+import org.onap.fgps.api.dao.ValetServicePlacementDAO;
+import org.onap.fgps.api.logging.EELFLoggerDelegate;
+import org.onap.fgps.api.utils.Constants;
+import org.springframework.beans.factory.annotation.Value;
+import org.springframework.boot.autoconfigure.EnableAutoConfiguration;
+import org.springframework.http.ResponseEntity;
+import org.springframework.web.bind.annotation.CrossOrigin;
+import org.springframework.web.bind.annotation.RequestMapping;
+import org.springframework.web.bind.annotation.RequestMethod;
+import org.springframework.web.bind.annotation.RestController;
+import org.onap.fgps.api.utils.UserUtils;
+
+import com.fasterxml.jackson.core.JsonProcessingException;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.fasterxml.jackson.databind.SerializationFeature;
+
+@CrossOrigin(origins = "*")
+@RestController
+@EnableAutoConfiguration
+@RequestMapping("/")
+public class ValetUtilityController {
+
+ //static final Logger LOGGER = LoggerFactory.getLogger(ValetServiceApplication.class);
+ private static EELFLoggerDelegate LOGGER = EELFLoggerDelegate.getLogger(ValetGroupsController.class);
+
+ @Value("${logging.ping:false}")
+ private boolean pingLogFlag;
+
+ @RequestMapping(value = "/alive", produces = "text/plain")
+ public String alive() {
+ return "ok";
+ }
+
+ @SuppressWarnings("unchecked")
+ @RequestMapping(value = "/ping", method = RequestMethod.GET)
+ public ResponseEntity<String> ping() {
+ JSONObject pingResponse = new JSONObject();
+ JSONObject valetStatus = new JSONObject();
+ boolean allOk = true;
+
+ valetStatus.put("valet_service", "ok");
+ try {
+ ValetServicePlacementDAO valetServicePlacementDAO = new ValetServicePlacementDAO(pingLogFlag);
+ String response = valetServicePlacementDAO.getRow("pingRequest");
+ if(response.contains("DBRequest Failed"))
+ valetStatus.put("db_service", "Failed");
+ else
+ valetStatus.put("db_service", "OK");
+ } catch (Exception e) {
+ valetStatus.put("DB_Service", "failed");
+ allOk = false;
+ LOGGER.error(EELFLoggerDelegate.applicationLogger,"Ping failed!, Error details : "+e.getMessage());
+ }
+
+ pingResponse.put("status", valetStatus);
+ if (allOk) {
+ if(pingLogFlag) {
+ LOGGER.info(EELFLoggerDelegate.applicationLogger, "Ping ok");
+ }
+ return ResponseEntity.ok(pingResponse.toJSONString());
+ }
+ LOGGER.error(EELFLoggerDelegate.errorLogger,"Ping failed!");
+ return ResponseEntity.status(503).body(pingResponse.toJSONString());
+
+ }
+
+ @SuppressWarnings("unchecked")
+
+ @RequestMapping(value = "/healthcheck", method = RequestMethod.GET)
+
+ public ResponseEntity<String> healthcheck() {
+
+ ValetServicePlacementDAO valetServicePlacementDAO = new ValetServicePlacementDAO();
+
+ Schema schema = new Schema();
+
+ JSONObject pingResponse = new JSONObject();
+
+ JSONObject valetStatus = new JSONObject();
+
+ valetStatus.put("valet_service", "ok");
+ boolean allOk = true;
+
+ try {
+
+ JSONObject jObj = new JSONObject();
+
+ Properties props = new Properties();
+
+ String propFileName = "resources.properties";
+
+ InputStream inputStream = getClass().getClassLoader().getResourceAsStream(propFileName);
+ if (inputStream != null) {
+
+ props.load(inputStream);
+
+ } else {
+
+ LOGGER.info(EELFLoggerDelegate.applicationLogger,"DBProxy : inputstream is not");
+
+ }
+ String timeStamp = System.currentTimeMillis() + "";
+ jObj.put("id", UserUtils.htmlEscape(props.getProperty("instanceId")) );
+ String dbRequest = schema.formHealthCheckRequest(timeStamp, "ping", jObj.toJSONString());
+
+ String insertRow = valetServicePlacementDAO.insertRow(dbRequest);
+ boolean status = pollForResult(jObj, "ping-" + timeStamp, Constants.WAIT_UNITL_SECONDS,
+
+ Constants.POLL_EVERY_SECONDS);
+ if (!status) allOk = false;
+
+ valetStatus.put("DB_Service", "ok");
+
+ valetStatus.put("valet_engine", status ? "ok" : "not ok");
+
+ } catch (Exception e) {
+
+ valetStatus.put("DB_Service", "not ok");
+ allOk = false;
+ }
+
+ pingResponse.put("status", valetStatus);
+
+ if (allOk) return ResponseEntity.ok(pingResponse.toJSONString());
+ return ResponseEntity.status(503).body(pingResponse.toJSONString());
+ }
+
+ public static JSONObject parseToJSON(String jsonString) {
+ LOGGER.info(EELFLoggerDelegate.applicationLogger,"parseToJSON : parsing json");
+ JSONParser parser = new JSONParser();
+ try {
+ JSONObject json = (JSONObject) parser.parse(jsonString);
+ return json;
+ } catch (ParseException e) {
+ e.printStackTrace();
+ LOGGER.error(EELFLoggerDelegate.applicationLogger,"parseToJSON: Error details: "+ e.getMessage());
+ LOGGER.error(EELFLoggerDelegate.errorLogger,"parseToJSON: Error details: "+ e.getMessage());
+ return null;
+ }
+ }
+
+ public boolean pollForResult(JSONObject values, String requestId, int waitUntilSeconds, int pollEverySeconds) {
+ LOGGER.info("pollForResult : called", requestId);
+ ValetServicePlacementDAO valetServicePlacementDAO = new ValetServicePlacementDAO();
+ Schema schema = new Schema();
+
+ String result = null;
+ long waitUntil = System.currentTimeMillis() + (1000 * waitUntilSeconds);
+ int counter = 1;
+
+ JSONObject response = new JSONObject();
+ while (true) {
+ LOGGER.info(EELFLoggerDelegate.applicationLogger,"pollForResult : polling database - ", counter++);
+
+ result = valetServicePlacementDAO.getRowFromResults(requestId);
+ System.out.println("getRowFromResults called count:" + counter);
+ response = result != null ? parseToJSON(result) : null;
+
+ if (response != null && ((JSONObject) response.get("result")).get("row 0") != null) {
+ LOGGER.debug(EELFLoggerDelegate.debugLogger,"pollForResult : response recieved", result);
+ System.out.println("deleteRowFromResults called");
+ valetServicePlacementDAO.deleteRowFromResults(requestId, schema.formMsoDeleteRequest());
+
+ }
+ if (System.currentTimeMillis() < waitUntil && response == null
+ || ((JSONObject) response.get("result")).get("row 0") == null) {
+ try {
+ Thread.sleep(1000 * pollEverySeconds);
+ } catch (InterruptedException e) {
+ e.printStackTrace();
+ LOGGER.error(EELFLoggerDelegate.errorLogger,"pollForResult: Error details: "+ e.getMessage());
+ }
+ } else {
+ break;
+ }
+ }
+ if (System.currentTimeMillis() > waitUntil) {
+ return false;
+ }
+
+ return true;
+ }
+
+ @AafRoleRequired(roleRequired = "org.onap.portal.valet.admin")
+ @RequestMapping(value = "/sample", produces = "application/json")
+ public String sample(HttpServletRequest request, HttpServletResponse response) {
+ return okMessage("Sample page doesn't do anything.");
+ }
+
+ @AafRoleRequired(roleProperty = "portal.admin.role")
+ @RequestMapping(value = "/sample1", produces = "application/json")
+ public String sample1(HttpServletRequest request, HttpServletResponse response) {
+ return okMessage("Sample page does not do anything.");
+ }
+
+ @BasicAuthRequired(authRequired = "portal")
+ @RequestMapping(value = "/sample2", produces = "application/json")
+ public String sample2(HttpServletRequest request, HttpServletResponse response) {
+ return okMessage("Sample page doesn't do a thing.");
+ }
+
+ @PropertyBasedAuthorization("sample3")
+ @RequestMapping(value = "/sample3", produces = "application/json")
+ public String sample3(HttpServletRequest request, HttpServletResponse response) {
+ return okMessage("Sample page does nothing.");
+ }
+
+ @RequestMapping(value = "/dark", produces = "application/json")
+ public String darkMessage(HttpServletRequest request, HttpServletResponse response) {
+ response.setStatus(400);
+ return failureMessage("Valet is running dark.");
+ }
+
+ @RequestMapping(value = "/authfail", produces = "application/json")
+ public String authFail(HttpServletRequest request, HttpServletResponse response) {
+ response.setStatus(401);
+ return failureMessage(request.getAttribute("fail"));
+ }
+
+ private String okMessage(Object message) {
+ return returnMessage("ok", message);
+ }
+
+ private String failureMessage(Object message) {
+ return returnMessage("failed", message);
+ }
+
+ private String returnMessage(String status, Object message) {
+ Status s = null;
+ if (message == null) {
+ s = new Status(status, "No failure message.");
+ } else {
+ s = new Status(status, message.toString());
+ }
+
+ ObjectMapper mapper = new ObjectMapper();
+ mapper.configure(SerializationFeature.WRAP_ROOT_VALUE, true);
+ try {
+ return mapper.writeValueAsString(s);
+ } catch (JsonProcessingException e) {
+ return "{\"status\": {\"status_code\": \"failed\", \"status_message\": \"Failed to generate failure string?!?\"} }";
+ }
+ }
+
+}
diff --git a/valetapi/src/main/java/org/onap/fgps/api/dao/SchemaDAO.java b/valetapi/src/main/java/org/onap/fgps/api/dao/SchemaDAO.java
new file mode 100644
index 0000000..23ed463
--- /dev/null
+++ b/valetapi/src/main/java/org/onap/fgps/api/dao/SchemaDAO.java
@@ -0,0 +1,177 @@
+/*
+ * ============LICENSE_START==========================================
+ * ONAP - F-GPS API
+ * ===================================================================
+ * Copyright © 2019 ATT Intellectual Property. All rights reserved.
+ * ===================================================================
+ *
+ * Unless otherwise specified, all software contained herein is licensed
+ * under the Apache License, Version 2.0 (the "License");
+ * you may not use this software except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * Unless otherwise specified, all documentation contained herein is licensed
+ * under the Creative Commons License, Attribution 4.0 Intl. (the "License");
+ * you may not use this documentation except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://creativecommons.org/licenses/by/4.0/
+ *
+ * Unless required by applicable law or agreed to in writing, documentation
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * ============LICENSE_END============================================
+ *
+ *
+ */
+package org.onap.fgps.api.dao;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.text.MessageFormat;
+import java.util.Properties;
+
+import org.json.simple.JSONObject;
+import org.json.simple.parser.JSONParser;
+import org.json.simple.parser.ParseException;
+import org.onap.fgps.api.beans.schema.Schema;
+import org.onap.fgps.api.logging.EELFLoggerDelegate;
+import org.onap.fgps.api.proxy.DBProxy;
+import org.onap.fgps.api.utils.Constants;
+import org.onap.fgps.api.utils.DBInitializationRequests;
+import org.onap.fgps.api.utils.MusicDBConstants;
+import org.springframework.stereotype.Component;
+import org.onap.fgps.api.utils.UserUtils;
+
+@Component
+public class SchemaDAO {
+ private static EELFLoggerDelegate LOGGER = EELFLoggerDelegate.getLogger(SchemaDAO.class);
+ InputStream inputStream;
+
+ public String initializeDatabase() {
+ Properties props = new Properties();
+ String propFileName = "resources.properties";
+ InputStream inputStream = getClass().getClassLoader().getResourceAsStream(propFileName);
+ try {
+ if (inputStream != null) {
+ props.load(inputStream);
+ } else {
+ LOGGER.info(EELFLoggerDelegate.applicationLogger,"DBProxy : inputstream is not");
+ }
+ } catch (IOException e) {
+ e.printStackTrace();
+ LOGGER.error(EELFLoggerDelegate.applicationLogger,"initializeDatabase : Error while loading "+propFileName+", Error details : "+ e.getMessage());
+ LOGGER.error(EELFLoggerDelegate.errorLogger,"initializeDatabase : Error while loading "+propFileName+", Error details : "+ e.getMessage());
+ }
+ LOGGER.info(EELFLoggerDelegate.applicationLogger,"SchemaDAO : initializeDatabase called");
+
+ String keyspace = UserUtils.htmlEscape(props.getProperty("music.Keyspace"));
+ LOGGER.info(EELFLoggerDelegate.applicationLogger, "keyspace: " + keyspace);
+ if (keyspace!=null && keyspace.length()>0 && !keyspace.equalsIgnoreCase("false")) {
+ String dataCenterList = UserUtils.htmlEscape(props.getProperty("music.keyspace.data.centers"));
+ if (dataCenterList==null || dataCenterList.length()==0) {
+ // If music.keyspace.data.centers is not specified, use old behavior: data.center.one, .two, and .three get initialized with replication factor 3
+ createKeySpace(keyspace, UserUtils.htmlEscape(props.getProperty("data.center.one")), UserUtils.htmlEscape(props.getProperty("data.center.two")), UserUtils.htmlEscape(props.getProperty("data.center.three")) );
+ } else {
+ // If music.keyspace.data.centers is specified, it must be a pipe separated list, and music.keyspace.replication.factor must be an integer which is the replication factor
+ int replicationFactor = Integer.parseInt(props.getProperty("music.keyspace.replication.factor"));
+ createKeyspaceWithReplicationFactor(keyspace, dataCenterList, replicationFactor);
+ }
+ }
+
+ createTable(keyspace, Constants.SERVICE_PLACEMENTS_REQUEST_TABLE, Schema.getRequestTableSchema());
+ createTable(keyspace, Constants.TABLE_RESULT, Schema.getResultsTableSchema());
+ createTable(keyspace, Constants.TABLE_GROUP_RULES, Schema.getGroupsRulesTableSchema());
+ createTable(keyspace, Constants.TABLE_STACKS, Schema.getStacksTableSchema());
+ createTable(keyspace, Constants.TABLE_STACKS_ID_MAP, Schema.getStacksIdMapTableSchema());
+ createTable(keyspace, Constants.TABLE_RESOURCES, Schema.getResourcesTableSchema());
+ createTable(keyspace, Constants.TABLE_REGIONS, Schema.getRegionsTableSchema());
+ createTable(keyspace, Constants.TABLE_Groups, Schema.getGroupsTableSchema());
+
+
+ System.out.println("Tables created");
+ return "";
+ }
+
+ /**
+ *
+ * @param keyspace - music.Keyspace - name of the music keyspace
+ * @param dataCenterList - music.keyspace.data.centers - pipe separated list of data center names, e.g., "DC1|DC2|DC3|DC4"
+ * @param replicationFactor - music.keyspace.replication.factor - replication factor for each keyspace
+ * @return a String representing the response from Music, or an error string if it fails
+ */
+ private String createKeyspaceWithReplicationFactor(String keyspace, String dataCenterList, int replicationFactor) {
+ LOGGER.info(EELFLoggerDelegate.applicationLogger, "SchemaDAO.createKeyspaceWithReplicationFactor");
+ MessageFormat uri = new MessageFormat(MusicDBConstants.CREATE_KEYSPACE);
+
+ Object data[] = { keyspace };
+ String keyUrl = uri.format(data);
+ DBProxy dbProxy = new DBProxy();
+
+ String targetString = DBInitializationRequests.KEYSPACE_WITH_RF;
+ StringBuffer sb = new StringBuffer();
+ String sep = "";
+ java.util.StringTokenizer st = new java.util.StringTokenizer(dataCenterList, "|");
+ while (st.hasMoreTokens()) {
+ String token = st.nextToken();
+ sb.append(sep + "\"" + token + "\":" + replicationFactor);
+ sep = ",";
+ }
+ targetString = targetString.replaceAll("DATA_CENTER_INFO", sb.toString());
+
+ LOGGER.info(EELFLoggerDelegate.applicationLogger, "keyspace string = " + targetString);
+ return dbProxy.post(keyUrl, targetString);
+ }
+
+ /**
+ *
+ * @param keySpaceName
+ * @param DC1 Data Center Name
+ * @param DC2 Data Center Name
+ * @param DC3 Data Center Name
+ * @return
+ */
+ public String createKeySpace(String keySpaceName,String DC1,String DC2,String DC3) {
+ LOGGER.info(EELFLoggerDelegate.applicationLogger,"SchemaDAO : createKeySpace called");
+ MessageFormat uri = new MessageFormat(MusicDBConstants.CREATE_KEYSPACE);
+ JSONParser parser = new JSONParser();
+ JSONObject jsonRequest = null;
+ try {
+ jsonRequest = (JSONObject) parser.parse(DBInitializationRequests.KEYSPACE_REQUEST);
+ Object data[] = { keySpaceName };
+ String keyUrl = uri.format(data);
+ DBProxy dbProxy = new DBProxy();
+ System.out.println(jsonRequest.toJSONString().replace("DC1", DC1).replace("DC2", DC2).replace("DC3", DC3));
+ return dbProxy.post(keyUrl, jsonRequest.toJSONString().replace("DC1", DC1).replace("DC2", DC2).replace("DC3", DC3));
+ } catch (ParseException e) {
+ e.printStackTrace();
+ LOGGER.error(EELFLoggerDelegate.applicationLogger,"createKeySpace : Error details : "+ e.getMessage());
+ LOGGER.error(EELFLoggerDelegate.errorLogger,"createKeySpace : Error details : "+ e.getMessage());
+ return "Error parsing the request.Refer logs for more insight";
+ }
+ }
+
+ public String createTable(String keySpaceName, String tableName, String jsonRequest) {
+ LOGGER.info(EELFLoggerDelegate.applicationLogger,"SchemaDAO : createTable called");
+
+ MessageFormat uri = new MessageFormat(MusicDBConstants.CREATE_TABLE);
+ Object data[] = { keySpaceName, tableName };
+
+ DBProxy dbProxy = new DBProxy();
+ System.out.println(jsonRequest);
+ System.out.println(uri.format(data));
+
+ return dbProxy.post(uri.format(data), jsonRequest);
+ }
+}
diff --git a/valetapi/src/main/java/org/onap/fgps/api/dao/ValetServicePlacementDAO.java b/valetapi/src/main/java/org/onap/fgps/api/dao/ValetServicePlacementDAO.java
new file mode 100644
index 0000000..5d9d166
--- /dev/null
+++ b/valetapi/src/main/java/org/onap/fgps/api/dao/ValetServicePlacementDAO.java
@@ -0,0 +1,155 @@
+/*
+ * ============LICENSE_START==========================================
+ * ONAP - F-GPS API
+ * ===================================================================
+ * Copyright © 2019 ATT Intellectual Property. All rights reserved.
+ * ===================================================================
+ *
+ * Unless otherwise specified, all software contained herein is licensed
+ * under the Apache License, Version 2.0 (the "License");
+ * you may not use this software except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * Unless otherwise specified, all documentation contained herein is licensed
+ * under the Creative Commons License, Attribution 4.0 Intl. (the "License");
+ * you may not use this documentation except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://creativecommons.org/licenses/by/4.0/
+ *
+ * Unless required by applicable law or agreed to in writing, documentation
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * ============LICENSE_END============================================
+ *
+ *
+ */
+package org.onap.fgps.api.dao;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.Properties;
+
+import org.json.simple.JSONObject;
+import org.onap.fgps.api.logging.EELFLoggerDelegate;
+import org.onap.fgps.api.proxy.DBProxy;
+import org.onap.fgps.api.utils.Constants;
+import org.onap.fgps.api.utils.Helper;
+import org.onap.fgps.api.utils.MusicDBConstants;
+import org.springframework.stereotype.Component;
+import org.onap.fgps.api.utils.UserUtils;
+
+@Component
+public class ValetServicePlacementDAO {
+ //private static final Logger LOGGER = LoggerFactory.getLogger(ValetServiceApplication.class);
+ private static EELFLoggerDelegate LOGGER = EELFLoggerDelegate.getLogger(SchemaDAO.class);
+ private String keySpace;
+ private boolean pingLogFlag;
+
+ public ValetServicePlacementDAO(boolean pingFlag) {
+ this.pingLogFlag = pingFlag;
+ Properties props = new Properties();
+ String propFileName = "resources.properties";
+ InputStream inputStream = getClass().getClassLoader().getResourceAsStream(propFileName);
+ try {
+ if (inputStream != null) {
+ props.load(inputStream);
+ } else {
+ if(pingLogFlag) {
+ LOGGER.info(EELFLoggerDelegate.applicationLogger,"DBProxy : inputstream is not");
+ }
+ }
+ } catch (IOException e) {
+ e.printStackTrace();
+ LOGGER.error(EELFLoggerDelegate.applicationLogger,"ValetServicePlacementDAO : Error details : "+ e.getMessage());
+ LOGGER.error(EELFLoggerDelegate.errorLogger,"ValetServicePlacementDAO : Error details : "+ e.getMessage());
+ }
+ if(pingLogFlag) {
+ LOGGER.info(EELFLoggerDelegate.applicationLogger,"SchemaDAO : initializeDatabase called");
+ }
+ this.keySpace = UserUtils.htmlEscape(props.getProperty("music.Keyspace"));
+ }
+
+ public ValetServicePlacementDAO() {
+ this(true);
+ }
+
+ public String insertRow(String request) {
+ LOGGER.info(EELFLoggerDelegate.applicationLogger,"ValetServicePlacementDAO : insertRow : inserting the row");
+
+ DBProxy dbProxy = new DBProxy();
+ Object[] params = { this.keySpace, Constants.SERVICE_PLACEMENTS_REQUEST_TABLE };
+ String url = Helper.getURI(MusicDBConstants.INSERT_ROWS, params);
+ return dbProxy.post(url, request);
+ }
+
+ public String deleteRow(String request_id, String json) {
+ LOGGER.info(EELFLoggerDelegate.applicationLogger,"ValetServicePlacementDAO : deleteRow : deleting the row");
+ DBProxy dbProxy = new DBProxy();
+ Object[] params = { this.keySpace, Constants.SERVICE_PLACEMENTS_REQUEST_TABLE };
+ String url = Helper.getURI(MusicDBConstants.INSERT_ROWS, params);
+ return dbProxy.delete(url + "?request_id=" + request_id, json);
+ }
+
+ @SuppressWarnings("unchecked")
+ public String updateRow(JSONObject values) {
+ LOGGER.info(EELFLoggerDelegate.applicationLogger,"ValetServicePlacementDAO : updateRow : update the row");
+
+ JSONObject request = new JSONObject();
+ JSONObject consistencyInfo = new JSONObject();
+ consistencyInfo.put("type", "eventual");
+ request.put("values", values);
+ request.put("consistencyInfo", consistencyInfo);
+ DBProxy dbProxy = new DBProxy();
+ Object[] params = { this.keySpace, Constants.SERVICE_PLACEMENTS_REQUEST_TABLE };
+ String url = Helper.getURI(MusicDBConstants.INSERT_ROWS, params);
+ return dbProxy.put(url, request.toJSONString());
+ }
+
+ public String getRow(String request_id) {
+ if(pingLogFlag) {
+ LOGGER.info(EELFLoggerDelegate.applicationLogger,"ValetServicePlacementDAO : getRow : geting the row");
+ }
+
+ DBProxy dbProxy = new DBProxy(pingLogFlag);
+ Object[] params = { this.keySpace, Constants.SERVICE_PLACEMENTS_REQUEST_TABLE };
+ String url = Helper.getURI(MusicDBConstants.INSERT_ROWS, params);
+ return dbProxy.get(url + "?request_id=" + request_id);
+ }
+
+ public String getRowFromResults(String request_id) {
+ LOGGER.info(EELFLoggerDelegate.applicationLogger,"ValetServicePlacementDAO : getRowFromResults : geting the row");
+
+ DBProxy dbProxy = new DBProxy();
+ Object[] params = { this.keySpace, Constants.SERVICE_PLACEMENTS_RESULTS_TABLE };
+ String url = Helper.getURI(MusicDBConstants.INSERT_ROWS, params);
+ System.out.println(url + "?request_id=" + request_id);
+ String result = dbProxy.get(url + "?request_id=" + request_id);
+ System.out.println(result);
+ return result;
+
+ }
+
+ public String deleteRowFromResults(String request_id, String request) {
+ LOGGER.info(EELFLoggerDelegate.applicationLogger,"ValetServicePlacementDAO : deleteRowFromResults : deleting the row");
+
+ DBProxy dbProxy = new DBProxy();
+ Object[] params = { this.keySpace, Constants.SERVICE_PLACEMENTS_RESULTS_TABLE };
+ String url = Helper.getURI(MusicDBConstants.INSERT_ROWS, params);
+ System.out.println(url + "?request_id=" + request_id);
+ String result = dbProxy.delete(url + "?request_id=" + request_id, request);
+ System.out.println(result);
+ return result;
+ }
+}
diff --git a/valetapi/src/main/java/org/onap/fgps/api/eelf/configuration/Configuration.java b/valetapi/src/main/java/org/onap/fgps/api/eelf/configuration/Configuration.java
new file mode 100644
index 0000000..68c45ed
--- /dev/null
+++ b/valetapi/src/main/java/org/onap/fgps/api/eelf/configuration/Configuration.java
@@ -0,0 +1,159 @@
+/*
+ * ============LICENSE_START==========================================
+ * ONAP - F-GPS API
+ * ===================================================================
+ * Copyright © 2019 ATT Intellectual Property. All rights reserved.
+ * ===================================================================
+ *
+ * Unless otherwise specified, all software contained herein is licensed
+ * under the Apache License, Version 2.0 (the "License");
+ * you may not use this software except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * Unless otherwise specified, all documentation contained herein is licensed
+ * under the Creative Commons License, Attribution 4.0 Intl. (the "License");
+ * you may not use this documentation except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://creativecommons.org/licenses/by/4.0/
+ *
+ * Unless required by applicable law or agreed to in writing, documentation
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * ============LICENSE_END============================================
+ *
+ *
+ */
+package org.onap.fgps.api.eelf.configuration;
+
+/**
+ * This interface defines the configuration support and logger names in EELF.
+ * It also defines the MDC key names.
+ *
+ *
+ */
+public interface Configuration {
+
+
+ /**
+ * The name of the property used to define the filename for the logback configuration
+ */
+ public String PROPERTY_LOGGING_FILE_NAME = "org.onap.eelf.logging.file";
+
+ /**
+ * The name of the property used to define the filename for the logback configuration
+ */
+ public String PROPERTY_LOGGING_FILE_PATH = "org.onap.eelf.logging.path";
+
+ /**
+ * Logger name to be used for application general logging
+ */
+ public String GENERAL_LOGGER_NAME = "org.onap.eelf";
+
+
+ /**
+ * Logger name to be used for application metrics logging
+ */
+ public String METRICS_LOGGER_NAME = "org.onap.eelf.metrics";
+
+
+ /**
+ * Logger name to be used for application performance metrics
+ */
+ public String PERF_LOGGER_NAME = "org.onap.eelf.perf";
+
+
+ /**
+ * Logger name to be used for application policy logging
+ */
+ public String POLICY_LOGGER_NAME = "org.onap.eelf.policy";
+
+
+ /**
+ * Logger name to be used for application security logging
+ */
+ public String SECURITY_LOGGER_NAME = "org.onap.eelf.security";
+
+
+ /**
+ * Logger name to be used for application server logging
+ */
+ public String SERVER_LOGGER_NAME = "org.onap.eelf.server";
+
+ /**
+ * Logger name to be used for application audit logging
+ */
+ public String AUDIT_LOGGER_NAME = "org.onap.eelf.audit";
+
+ /**
+ * Logger name to be used for error logging
+ */
+ public String ERROR_LOGGER_NAME = "org.onap.eelf.error";
+
+ /**
+ * Logger name to be used for debug logging
+ */
+ public String DEBUG_LOGGER_NAME = "org.onap.eelf.debug";
+
+ /**
+ * The requestID is primarily used to track the processing of a request referencing
+ * a single instance of a service through the various sub-components.
+ */
+ public String MDC_KEY_REQUEST_ID = "RequestId";
+
+
+ /**
+ * The serviceInstanceID can be used to uniquely identity a service instance.
+ */
+ public String MDC_SERVICE_INSTANCE_ID = "ServiceInstanceId";
+
+
+ /**
+ * The serviceName can be used identify the name of the service.
+ */
+ public String MDC_SERVICE_NAME = "ServiceName";
+
+
+ /**
+ * The instanceUUID can be used to differentiate between multiple instances of the same (named), log writing service/application
+ */
+ public String MDC_INSTANCE_UUID= "InstanceUUID";
+
+ /**
+ * The serverIPAddress can be used to log the host server's IP address. (e.g. Jetty container's listening IP
+ * address)
+ */
+ public String MDC_SERVER_IP_ADDRESS = "ServerIPAddress";
+
+
+ /**
+ * The serverFQDN can be used to log the host server's FQDN.
+ */
+ public String MDC_SERVER_FQDN = "ServerFQDN";
+
+ /**
+ * The remote host name/ip address making the request
+ */
+ public static final String MDC_REMOTE_HOST = "RemoteHost";
+
+ /**
+ * The severity can be used to map to severity in alert messages eg. nagios alerts
+ */
+ public String MDC_ALERT_SEVERITY = "AlertSeverity";
+
+
+
+
+
+}
diff --git a/valetapi/src/main/java/org/onap/fgps/api/eelf/configuration/EELFLogger.java b/valetapi/src/main/java/org/onap/fgps/api/eelf/configuration/EELFLogger.java
new file mode 100644
index 0000000..df7fecf
--- /dev/null
+++ b/valetapi/src/main/java/org/onap/fgps/api/eelf/configuration/EELFLogger.java
@@ -0,0 +1,634 @@
+/*
+ * ============LICENSE_START==========================================
+ * ONAP - F-GPS API
+ * ===================================================================
+ * Copyright © 2019 ATT Intellectual Property. All rights reserved.
+ * ===================================================================
+ *
+ * Unless otherwise specified, all software contained herein is licensed
+ * under the Apache License, Version 2.0 (the "License");
+ * you may not use this software except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * Unless otherwise specified, all documentation contained herein is licensed
+ * under the Creative Commons License, Attribution 4.0 Intl. (the "License");
+ * you may not use this documentation except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://creativecommons.org/licenses/by/4.0/
+ *
+ * Unless required by applicable law or agreed to in writing, documentation
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * ============LICENSE_END============================================
+ *
+ *
+ */
+package org.onap.fgps.api.eelf.configuration;
+
+import java.util.Locale;
+
+import org.onap.fgps.api.eelf.i18n.EELFResolvableErrorEnum;
+
+/**
+ * The EELFLogger is the main interface to access loggers in EELF.
+ * <p>
+ * It defines the convenience methods that are available to the application to log messages based
+ * on the string or a key in the resource bundle(s).
+ * </p>
+ *
+ */
+public interface EELFLogger {
+
+ public enum Level {
+ TRACE, DEBUG, INFO, WARN, ERROR, OFF
+ }
+
+
+ /**
+ * Log a warn message, with no arguments.
+ * <P>
+ * If the logger is currently enabled for the given message level then the given message is forwarded to all the
+ * registered output Handler objects.
+ * </p>
+ *
+ * @param msg
+ * The string message
+ */
+ public void warn(String msg);
+
+
+ /**
+ * Log a warn message, with arguments.
+ * <P>
+ * If the logger is currently enabled for the given message level then the given message is forwarded to all the
+ * registered output Handler objects.
+ * </p>
+ *
+ * @param msg
+ * The string message
+ * @param arguments
+ * The list of arguments
+ */
+ public void warn(String msg, Object... arguments);
+
+ /**
+ * Log a exception at warn level.
+ * <P>
+ * If the logger is currently enabled for the given message level then the given message is forwarded to all the
+ * registered output Handler objects.
+ * </p>
+ *
+ * @param msg
+ * The string message
+ * @param th
+ * The exception object
+ */
+ public void warn(String msg, Throwable th);
+
+ /**
+ * Log a debug message, with no arguments.
+ * <P>
+ * If the logger is currently enabled for the given message level then the given message is forwarded to all the
+ * registered output Handler objects.
+ * </p>
+ *
+ * @param msg
+ * The string message
+ */
+ public void debug(String msg);
+
+ /**
+ * Log a debug message, with arguments.
+ * <P>
+ * If the logger is currently enabled for the given message level then the given message is forwarded to all the
+ * registered output Handler objects.
+ * </p>
+ *
+ * @param msg
+ * The string message
+ * @param arguments
+ * The list of arguments
+ */
+ public void debug(String msg, Object... arguments);
+
+ /**
+ * Log a exception at debug level.
+ * <P>
+ * If the logger is currently enabled for the given message level then the given message is forwarded to all the
+ * registered output Handler objects.
+ * </p>
+ *
+ * @param msg
+ * The string message
+ * @param th
+ * The exception object
+ */
+ public void debug(String msg, Throwable th);
+
+
+ /**
+ * Log a info message, with no arguments.
+ * <P>
+ * If the logger is currently enabled for the given message level then the given message is forwarded to all the
+ * registered output Handler objects.
+ * </p>
+ *
+ * @param msg
+ * The string message
+ */
+ public void info(String msg);
+
+ /**
+ * Log a info message, with arguments.
+ * <P>
+ * If the logger is currently enabled for the given message level then the given message is forwarded to all the
+ * registered output Handler objects.
+ * </p>
+ *
+ * @param msg
+ * The string message
+ * @param arguments
+ * The list of arguments
+ */
+ public void info(String msg, Object... arguments);
+
+
+
+ /**
+ * Log a trace message, with no arguments.
+ * <P>
+ * If the logger is currently enabled for the given message level then the given message is forwarded to all the
+ * registered output Handler objects.
+ * </p>
+ *
+ * @param msg
+ * The string message
+ */
+ public void trace(String msg);
+
+
+ /**
+ * Log a trace message, with arguments.
+ * <P>
+ * If the logger is currently enabled for the given message level then the given message is forwarded to all the
+ * registered output Handler objects.
+ * </p>
+ *
+ * @param msg
+ * The string message
+ * @param arguments
+ * The list of arguments
+ */
+ public void trace(String msg, Object... arguments);
+
+ /**
+ * Log a exception at trace level.
+ * <P>
+ * If the logger is currently enabled for the given message level then the given message is forwarded to all the
+ * registered output Handler objects.
+ * </p>
+ *
+ * @param msg
+ * The string message
+ * @param th
+ * The exception object
+ */
+ public void trace(String msg, Throwable th);
+
+ /**
+ * Log a error message, with no arguments.
+ * <P>
+ * If the logger is currently enabled for the given message level then the given message is forwarded to all the
+ * registered output Handler objects.
+ * </p>
+ *
+ * @param msg
+ * The string message
+ */
+ public void error(String msg);
+
+ /**
+ * Log a error message, with arguments.
+ * <P>
+ * If the logger is currently enabled for the given message level then the given message is forwarded to all the
+ * registered output Handler objects.
+ * </p>
+ *
+ * @param msg
+ * The string message
+ * @param arguments
+ * The list of arguments
+ */
+ public void error(String msg, Object... arguments);
+
+ /**
+ * Log a exception at error level.
+ * <P>
+ * If the logger is currently enabled for the given message level then the given message is forwarded to all the
+ * registered output Handler objects.
+ * </p>
+ *
+ * @param msg
+ * The string message
+ * @param th
+ * The exception object
+ */
+ public void error(String msg, Throwable th);
+
+ /**
+ * Checks if the trace is enabled for the logger
+ */
+ public boolean isTraceEnabled();
+
+ /**
+ * Checks if the info is enabled for the logger
+ */
+ public boolean isInfoEnabled();
+
+ /**
+ * Checks if the error is enabled for the logger
+ */
+ public boolean isErrorEnabled();
+
+ /**
+ * Checks if the warn is enabled for the logger
+ */
+ public boolean isWarnEnabled();
+
+ /**
+ * Checks if the debug is enabled for the logger
+ */
+ public boolean isDebugEnabled();
+
+ /**
+ * Log a message or exception with arguments if the argument list is provided
+ * <P>
+ * If the logger is currently enabled for the given message level then the given message is forwarded to all the
+ * registered output Handler objects.
+ * </p>
+ *
+ * @param level
+ * One of the message level identifiers, e.g., SEVERE
+ * @param msg
+ * The string message
+ * @param th
+ * The exception object
+ * @param arguments
+ * The list of arguments
+ */
+ public void log(Level level, String msg, Throwable th, Object... arguments);
+
+
+ /**
+ * Log a audit event using audit logger at info level.
+ *
+ * @param msg
+ * The string message
+ * @param arguments
+ * The list of arguments
+ */
+ public void auditEvent(String msg, Object... arguments);
+
+ /**
+ * Log a audit event using audit logger at given level.
+ *
+ * @param level
+ * One of the message level identifiers, e.g., WARN
+ * @param msg
+ * The string message
+ * @param arguments
+ * The list of arguments
+ */
+ public void auditEvent(Level level, String msg, Object... arguments);
+
+
+
+ /**
+ * Log a metrics event using metrics logger at info level.
+ *
+ * @param msg
+ * @param arguments
+ */
+ public void metricsEvent(String msg, Object... arguments);
+
+ /**
+ * Log a metrics event using metrics logger at info level at given level.
+ *
+ * @param level
+ * One of the message level identifiers, e.g., WARN
+ * @param msg
+ * The string message
+ * @param arguments
+ * The list of arguments
+ */
+
+ public void metricsEvent(Level level, String msg, Object... arguments);
+
+
+
+
+ /**
+ * Log a security event using security logger at info level.
+ *
+ * @param msg
+ * @param arguments
+ */
+ public void securityEvent(String msg, Object... arguments);
+
+ /**
+ * Log a security event using security logger at given level.
+ *
+ * @param level
+ * @param msg
+ * @param arguments
+ */
+ public void securityEvent(Level level, String msg, Object... arguments);
+
+
+
+ /**
+ * Log a performance event using performance logger at info level.
+ *
+ * @param msg
+ * @param arguments
+ */
+ public void performanceEvent(String msg, Object... arguments);
+
+ /**
+ * Log a performance event using performance logger at a given level.
+ *
+ * @param level
+ * @param msg
+ * @param arguments
+ */
+ public void performanceEvent(Level level, String msg, Object... arguments);
+
+
+
+ /**
+ * Log an application event using application logger at info.
+ *
+ * @param msg
+ * @param arguments
+ */
+ public void applicationEvent(String msg, Object... arguments);
+
+ /**
+ * Log an application event using application logger at a given level.
+ *
+ * @param level
+ * @param msg
+ * @param arguments
+ */
+ public void applicationEvent(Level level, String msg, Object... arguments);
+
+
+ /**
+ * Log a server event using server logger at info level.
+ *
+ * @param msg
+ * @param arguments
+ */
+ public void serverEvent(String msg, Object... arguments);
+
+ /**
+ * Log a server event using server logger at a given level.
+ *
+ * @param level
+ * @param arguments
+ */
+ public void serverEvent(Level level, String msg, Object... arguments);
+
+
+
+ /**
+ * Log a policy event using policy logger at info level.
+ *
+ * @param msg
+ * @param arguments
+ */
+ public void policyEvent(String msg, Object... arguments);
+
+ /**
+ * Log a policy event using policy logger at a given level.
+ *
+ * @param level
+ * @param msg
+ * @param arguments
+ */
+ public void policyEvent(Level level, String msg, Object... arguments);
+
+ /**
+ * Log a warn message based on message key as defined in resource bundle for the given locale
+ * along with exception.
+ *
+ * @param locale
+ * @param errorCode
+ * @param th
+ * @param args
+ */
+ public void warn(Locale locale,EELFResolvableErrorEnum errorCode, Throwable th, String... args);
+
+ /**
+ * Log a info message based on message key as defined in resource bundle for the given locale
+ * along with exception.
+ *
+ * @param locale
+ * @param errorCode
+ * @param th
+ * @param args
+ */
+ public void info(Locale locale, EELFResolvableErrorEnum errorCode, Throwable th, String... args);
+
+ /**
+ * Log a debug message based on message key as defined in resource bundle for the given locale
+ * along with exception.
+ *
+ * @param locale
+ * @param errorCode
+ * @param th
+ * @param args
+ */
+ public void debug(Locale locale, EELFResolvableErrorEnum errorCode, Throwable th, String... args);
+
+ /**
+ * Log a error message based on message key as defined in resource bundle for the given locale
+ * along with exception.
+ *
+ * @param locale
+ * @param errorCode
+ * @param th
+ * @param args
+ */
+ public void error(Locale locale, EELFResolvableErrorEnum errorCode, Throwable th, String... args);
+
+ /**
+ * Log a trace message based on message key as defined in resource bundle for the given locale
+ * along with exception.
+ *
+ * @param locale
+ * @param errorCode
+ * @param th
+ * @param args
+ */
+ public void trace(Locale locale,EELFResolvableErrorEnum errorCode, Throwable th, String... args);
+
+ /**
+ * Log a warn message based on message key as defined in resource bundle for the given locale.
+ *
+ * @param locale
+ * @param errorCode
+ * @param args
+ */
+ public void warn(Locale locale, EELFResolvableErrorEnum errorCode, String... args);
+
+ /**
+ * Log a info message based on message key as defined in resource bundle for the given locale.
+ *
+ * @param locale
+ * @param errorCode
+ * @param args
+ */
+ public void info(Locale locale, EELFResolvableErrorEnum errorCode, String... args);
+
+ /**
+ * Log a debug message based on message key as defined in resource bundle for the given locale.
+ *
+ * @param locale
+ * @param errorCode
+ * @param args
+ */
+ public void debug(Locale locale, EELFResolvableErrorEnum errorCode, String... args);
+
+ /**
+ * Log a error message based on message key as defined in resource bundle for the given locale.
+ *
+ * @param locale
+ * @param errorCode
+ * @param args
+ */
+ public void error(Locale locale, EELFResolvableErrorEnum errorCode, String... args);
+
+ /**
+ * Log a trace message based on message key as defined in resource bundle for the given locale.
+ *
+ * @param locale
+ * @param errorCode
+ * @param args
+ */
+ public void trace(Locale locale, EELFResolvableErrorEnum errorCode, String... args);
+
+ /**
+ * Log a warn message based on message key as defined in resource bundle with arguments.
+ *
+ * @param errorCode
+ * @param args
+ */
+ public void warn(EELFResolvableErrorEnum errorCode, String... args);
+
+ /**
+ * Log a info message based on message key as defined in resource bundle with arguments.
+ *
+ * @param errorCode
+ * @param args
+ */
+ public void info(EELFResolvableErrorEnum errorCode, String... args);
+
+ /**
+ * Log a debug message based on message key as defined in resource bundle with arguments.
+ *
+ * @param errorCode
+ * @param args
+ */
+ public void debug(EELFResolvableErrorEnum errorCode, String... args);
+
+ /**
+ * Log a error message based on message key as defined in resource bundle with arguments.
+ *
+ * @param errorCode
+ * @param args
+ */
+
+ public void error(EELFResolvableErrorEnum errorCode, String... args);
+
+ /**
+ * Log a trace message based on message key as defined in resource bundle with arguments.
+ *
+ * @param errorCode
+ * @param args
+ */
+ public void trace(EELFResolvableErrorEnum errorCode, String... args);
+
+ /**
+ * Log a warn message based on message key as defined in resource bundle along with exception.
+ *
+ * @param errorCode
+ * @param th
+ * @param args
+ */
+ public void warn(EELFResolvableErrorEnum errorCode, Throwable th, String... args);
+
+
+ /**
+ * Log a info message based on message key as defined in resource bundle along with exception.
+ *
+ * @param errorCode
+ * @param th
+ * @param args
+ */
+ public void info(EELFResolvableErrorEnum errorCode, Throwable th, String... args);
+
+ /**
+ * Log a debug message based on message key as defined in resource bundle along with exception.
+ *
+ * @param errorCode
+ * @param th
+ * @param args
+ */
+ public void debug(EELFResolvableErrorEnum errorCode, Throwable th, String... args);
+
+ /**
+ * Log a error message based on message key as defined in resource bundle along with exception.
+ *
+ * @param errorCode
+ * @param th
+ * @param args
+ */
+ public void error(EELFResolvableErrorEnum errorCode, Throwable th, String... args);
+
+ /**
+ * Log a trace message based on message key as defined in resource bundle along with exception.
+ *
+ * @param errorCode
+ * @param th
+ * @param args
+ */
+ public void trace(EELFResolvableErrorEnum errorCode, Throwable th, String... args);
+
+ /**
+ * Change the logging level for the logger
+ *
+ * @param level
+ */
+ public void setLevel(Level level);
+
+ /**
+ * Turn off the logging for the logger
+ */
+ public void disableLogging();
+
+
+
+ }
diff --git a/valetapi/src/main/java/org/onap/fgps/api/eelf/configuration/EELFManager.java b/valetapi/src/main/java/org/onap/fgps/api/eelf/configuration/EELFManager.java
new file mode 100644
index 0000000..feb98f9
--- /dev/null
+++ b/valetapi/src/main/java/org/onap/fgps/api/eelf/configuration/EELFManager.java
@@ -0,0 +1,502 @@
+/*
+ * ============LICENSE_START==========================================
+ * ONAP - F-GPS API
+ * ===================================================================
+ * Copyright © 2019 ATT Intellectual Property. All rights reserved.
+ * ===================================================================
+ *
+ * Unless otherwise specified, all software contained herein is licensed
+ * under the Apache License, Version 2.0 (the "License");
+ * you may not use this software except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * Unless otherwise specified, all documentation contained herein is licensed
+ * under the Creative Commons License, Attribution 4.0 Intl. (the "License");
+ * you may not use this documentation except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://creativecommons.org/licenses/by/4.0/
+ *
+ * Unless required by applicable law or agreed to in writing, documentation
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * ============LICENSE_END============================================
+ *
+ *
+ */
+package org.onap.fgps.api.eelf.configuration;
+
+import static org.onap.fgps.api.eelf.configuration.Configuration.AUDIT_LOGGER_NAME;
+import static org.onap.fgps.api.eelf.configuration.Configuration.DEBUG_LOGGER_NAME;
+import static org.onap.fgps.api.eelf.configuration.Configuration.ERROR_LOGGER_NAME;
+import static org.onap.fgps.api.eelf.configuration.Configuration.GENERAL_LOGGER_NAME;
+import static org.onap.fgps.api.eelf.configuration.Configuration.METRICS_LOGGER_NAME;
+import static org.onap.fgps.api.eelf.configuration.Configuration.PERF_LOGGER_NAME;
+import static org.onap.fgps.api.eelf.configuration.Configuration.POLICY_LOGGER_NAME;
+import static org.onap.fgps.api.eelf.configuration.Configuration.PROPERTY_LOGGING_FILE_NAME;
+import static org.onap.fgps.api.eelf.configuration.Configuration.PROPERTY_LOGGING_FILE_PATH;
+import static org.onap.fgps.api.eelf.configuration.Configuration.SECURITY_LOGGER_NAME;
+import static org.onap.fgps.api.eelf.configuration.Configuration.SERVER_LOGGER_NAME;
+
+import java.io.BufferedInputStream;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+
+import org.onap.fgps.api.eelf.i18n.EELFMsgs;
+import org.onap.fgps.api.eelf.i18n.EELFResourceManager;
+import org.slf4j.ILoggerFactory;
+import org.slf4j.LoggerFactory;
+
+import ch.qos.logback.classic.LoggerContext;
+import ch.qos.logback.classic.joran.JoranConfigurator;
+import ch.qos.logback.core.joran.spi.JoranException;
+
+/**
+ * This is a singleton class used to obtain a named Logger instance.
+ * The EELFManager object can be retrieved using EELFManager.getInstance(). It is created during class initialization and cannot subsequently be changed.
+ * At startup the EELFManager loads the logging configuration file.
+ * If no external logging configuration file is found, it will load the default logging configuration available at org/onap/eelf/logback.xml
+ */
+
+public final class EELFManager {
+
+ /**
+ * This is a string constant for the comma character. It's intended to be used a common string delimiter.
+ */
+ private static final String COMMA = ",";
+
+ /**
+ * The logger to be used to record general application log events
+ */
+ private EELFLogger applicationLogger;
+
+ /**
+ * The logger to be used to record audit events
+ */
+ private EELFLogger auditLogger;
+
+ /**
+ * The logger to be used to record metric events
+ */
+ private EELFLogger metricsLogger;
+
+
+ /**
+ * The logger to be used to record performance events
+ */
+ private EELFLogger performanceLogger;
+
+ /**
+ * The logger to be used to record policy manager application events
+ */
+ private EELFLogger policyLogger;
+
+ /**
+ * The logger to be used to record security events
+ */
+ private EELFLogger securityLogger;
+
+ /**
+ * The logger to be used to record server events
+ */
+ private EELFLogger serverLogger;
+
+ /**
+ * The logger to be used to record error only
+ */
+ private EELFLogger errorLogger;
+
+ /**
+ * The logger to be used to record debug logs
+ */
+ private EELFLogger debugLogger;
+
+ /**
+ * Cache of all other loggers used in application
+ */
+ private Map<String,EELFLogger> loggerCache = new ConcurrentHashMap<String,EELFLogger>();
+
+ /**
+ * This lock is used to serialize access to create the loggers
+ */
+ private Object loggerLock = new Object();
+
+ /**
+ * This lock is used to serialize access to create the loggers
+ */
+ private static final EELFManager logManager = new EELFManager();
+
+ private EELFManager() {
+ ArrayList<String> delayedLogging = new ArrayList<String>();
+ /*
+ * Now, we are ready to initialize logging. Check to see if logging has already been initialized and that the
+ * application logger exists already. If it does, then skip the logging configuration because it was already set
+ * up in the container that is calling us. If not, then we need to set it up.
+ */
+ ILoggerFactory factory = LoggerFactory.getILoggerFactory();
+ if (factory instanceof LoggerContext) {
+ LoggerContext loggerContext = (LoggerContext) factory;
+ if (loggerContext.exists(GENERAL_LOGGER_NAME) == null) {
+ initializeLogging(delayedLogging);
+ } else {
+ delayedLogging.add(EELFResourceManager.getMessage(EELFMsgs.LOGGING_ALREADY_INITIALIZED));
+ }
+ }
+
+ /*
+ * Copy all delayed logging messages to the logger
+ */
+ for (String message : delayedLogging) {
+ // All messages are prefixed with a message code of the form EELF####S
+ // Where:
+ // EELF --- is the product code
+ // #### -- Is the message number
+ // S ----- Is the severity code (I=INFO, D=DEBUG, W=WARN, E=ERROR)
+ char severity = message.charAt(8);
+ switch (severity) {
+ case 'D':
+ getApplicationLogger().debug(message);
+ break;
+ case 'I':
+ getApplicationLogger().info(message);
+ break;
+ case 'W':
+ getApplicationLogger().warn(message);
+ break;
+ case 'E':
+ getApplicationLogger().error(message);
+ }
+ }
+
+ delayedLogging.clear();
+
+ }
+
+ /**
+ * Initialize the logging environment, record all logging messages to the provided list for delayed processing.
+ *
+ * @param delayedLogging
+ * The list to record logging messages to for delayed processing after the logging environment is
+ * created.
+ */
+ private static void initializeLogging(final ArrayList<String> delayedLogging) {
+
+ /*
+ * See if we can find logback-test.xml first, unless a specific file has been provided
+ */
+ String filename = System.getProperty(PROPERTY_LOGGING_FILE_NAME, "logback-test.xml");
+
+ String path = System.getProperty(PROPERTY_LOGGING_FILE_PATH, "${user.home};etc;../etc");
+
+ String msg = EELFResourceManager.format(EELFMsgs.SEARCHING_LOG_CONFIGURATION,path, filename);
+ delayedLogging.add(msg);
+
+ if (scanAndLoadLoggingConfiguration(path, filename, delayedLogging)) {
+ return;
+ }
+
+ /*
+ * If the first attempt was for logback-test.xml and it failed to find it, look again for logback.xml
+ */
+ if (filename.equals("logback-test.xml")) {
+ filename = System.getProperty(PROPERTY_LOGGING_FILE_NAME, "logback.xml");
+
+ if (scanAndLoadLoggingConfiguration(path, filename, delayedLogging)) {
+ return;
+ }
+ }
+
+
+ /*
+ * If we reach here, then no external logging configurations were defined or found. In that case, we need to
+ * initialize the logging framework from hard-coded default values we load from resources.
+ */
+ InputStream stream = EELFManager.class.getClassLoader().getResourceAsStream("org/onap/eelf/logback.xml");
+ try {
+ if (stream != null) {
+ delayedLogging.add(EELFResourceManager.getMessage(EELFMsgs.LOADING_DEFAULT_LOG_CONFIGURATION,"org/onap/eelf/logback.xml"));
+ loadLoggingConfiguration(stream, delayedLogging);
+ } else {
+ delayedLogging.add(EELFResourceManager.format(EELFMsgs.NO_LOG_CONFIGURATION));
+ }
+ } finally {
+ if (stream != null) {
+ try {
+ stream.close();
+ } catch (IOException e) {
+ // not much we can do since logger may not be configured yet
+ e.printStackTrace(System.out);
+ }
+ }
+ }
+
+ }
+
+
+ /**
+ * Loads the logging configuration from the specified stream.
+ *
+ * @param stream
+ * The stream that contains the logging configuration document.
+ * @param delayedLogging
+ */
+ private static void loadLoggingConfiguration(final InputStream stream, final ArrayList<String> delayedLogging) {
+ ILoggerFactory loggerFactory = LoggerFactory.getILoggerFactory();
+ if (loggerFactory instanceof LoggerContext) {
+ configureLogback((LoggerContext) loggerFactory, stream);
+ } else {
+ delayedLogging.add((EELFResourceManager.format(EELFMsgs.UNSUPPORTED_LOGGING_FRAMEWORK)));
+ }
+
+ }
+
+
+
+ /**
+ * @param loggerFactory
+ * the logger factory context
+ * @param stream
+ * The input stream to be configured
+ */
+ private static void configureLogback(final LoggerContext context, final InputStream stream) {
+ JoranConfigurator configurator = new JoranConfigurator();
+ configurator.setContext(context);
+
+ try {
+ configurator.doConfigure(stream);
+ } catch (JoranException e) {
+ // not much we can do since logger may not be configured yet
+ e.printStackTrace(System.out);
+ }
+
+
+ }
+
+
+ /**
+ * This method scans a set of directories specified by the path for an occurrence of a file of the specified
+ * filename, and when found, loads that file as a logging configuration file.
+ *
+ * @param path
+ * The path to be scanned. This can be one or more directories, separated by the platform specific path
+ * separator character.
+ * @param filename
+ * The file name to be located. The file name examined within each element of the path for the first
+ * occurrence of the file that exists and which can be read and processed.
+ * @param delayedLogging
+ * @return True if a file was found and loaded, false if no files were found, or none were readable.
+ */
+ private static boolean scanAndLoadLoggingConfiguration(final String path, final String filename,
+ final ArrayList<String> delayedLogging) {
+ String[] pathElements = path.split(COMMA);
+ for (String pathElement : pathElements) {
+ File file = new File(pathElement, filename);
+ if (file.exists() && file.canRead() && !file.isDirectory()) {
+ String msg = EELFResourceManager.getMessage(EELFMsgs.LOADING_LOG_CONFIGURATION,file.getAbsolutePath());
+ delayedLogging.add(msg);
+
+ BufferedInputStream stream = null;
+ try {
+ stream = new BufferedInputStream(new FileInputStream(file));
+ delayedLogging.add(String.format("EELF000I Loading logging configuration from %s",
+ file.getAbsolutePath()));
+ loadLoggingConfiguration(stream, delayedLogging);
+ } catch (FileNotFoundException e) {
+ delayedLogging.add(EELFResourceManager.format(e));
+ } finally {
+ if (stream != null) {
+ try {
+ stream.close();
+ } catch (IOException e) {
+ // not much we can do since logger may not be configured yet
+ e.printStackTrace(System.out);
+ }
+ }
+ }
+
+ return true;
+ }
+ }
+ return false;
+ }
+
+
+ /**
+ * This method is used to obtain the EELFManager (as well as set it up if not already).
+ *
+ * @return The EELFManager object
+ */
+ public static EELFManager getInstance() {
+
+ return logManager;
+ }
+
+ /**
+ * Returns the logger associated with the name
+ * @return EELFLogger
+ */
+ public EELFLogger getLogger(String name) {
+ synchronized (loggerLock) {
+ if (!loggerCache.containsKey(name)) {
+ loggerCache.put(name,new SLF4jWrapper(name));
+ }
+ }
+ return loggerCache.get(name);
+
+ }
+
+ /**
+ * Returns the logger associated with the clazz
+ *
+ * @param clazz
+ * The class that we are obtaining the logger for
+ * @return EELFLogger The logger
+ */
+ public EELFLogger getLogger(Class<?> clazz) {
+ synchronized (loggerLock) {
+ if (!loggerCache.containsKey(clazz.getName())) {
+ loggerCache.put(clazz.getName(), new SLF4jWrapper(clazz.getName()));
+ }
+ }
+ return loggerCache.get(clazz.getName());
+
+ }
+
+ /**
+ * Returns the application logger
+ * @return EELFLogger
+ */
+ public EELFLogger getApplicationLogger() {
+ synchronized (loggerLock) {
+ if (applicationLogger == null) {
+ applicationLogger = new SLF4jWrapper(GENERAL_LOGGER_NAME);
+ }
+ }
+ return applicationLogger;
+ }
+
+ /**
+ * Returns the metrics logger
+ * @return EELFLogger
+ */
+ public EELFLogger getMetricsLogger() {
+ synchronized (loggerLock) {
+ if (metricsLogger == null) {
+ metricsLogger = new SLF4jWrapper(METRICS_LOGGER_NAME);
+ }
+ }
+ return metricsLogger;
+ }
+
+
+ /**
+ * Returns the audit logger
+ * @return EELFLogger
+ */
+ public EELFLogger getAuditLogger() {
+ synchronized (loggerLock) {
+ if (auditLogger == null) {
+ auditLogger = new SLF4jWrapper(AUDIT_LOGGER_NAME);
+ }
+ }
+ return auditLogger;
+ }
+
+ /**
+ * Returns the performance logger
+ * @return EELFLogger
+ */
+ public EELFLogger getPerformanceLogger() {
+ synchronized (loggerLock) {
+ if (performanceLogger == null) {
+ performanceLogger = new SLF4jWrapper(PERF_LOGGER_NAME);
+ }
+ }
+ return performanceLogger;
+ }
+
+ /**
+ * Returns the server logger
+ * @return EELFLogger
+ */
+ public EELFLogger getServerLogger() {
+ synchronized (loggerLock) {
+ if (serverLogger == null) {
+ serverLogger = new SLF4jWrapper(SERVER_LOGGER_NAME);
+ }
+ }
+ return serverLogger;
+ }
+
+
+ /**
+ * Returns the security logger
+ * @return EELFLogger
+ */
+ public EELFLogger getSecurityLogger() {
+ synchronized (loggerLock) {
+ if (securityLogger == null) {
+ securityLogger = new SLF4jWrapper(SECURITY_LOGGER_NAME);
+ }
+ }
+ return securityLogger;
+ }
+
+ /**
+ * Returns the policy logger
+ * @return EELFLogger
+ */
+ public EELFLogger getPolicyLogger() {
+ synchronized (loggerLock) {
+ if (policyLogger == null) {
+ policyLogger = new SLF4jWrapper(POLICY_LOGGER_NAME);
+ }
+ }
+ return policyLogger;
+ }
+
+
+ /**
+ * Returns the error logger
+ * @return EELFLogger
+ */
+ public EELFLogger getErrorLogger() {
+ synchronized (loggerLock) {
+ if (errorLogger == null) {
+ errorLogger = new SLF4jWrapper(ERROR_LOGGER_NAME);
+ }
+ }
+ return errorLogger;
+ }
+
+ /**
+ * Returns the error logger
+ * @return EELFLogger
+ */
+ public EELFLogger getDebugLogger() {
+ synchronized (loggerLock) {
+ if (debugLogger == null) {
+ debugLogger = new SLF4jWrapper(DEBUG_LOGGER_NAME);
+ }
+ }
+ return debugLogger;
+ }
+
+}
diff --git a/valetapi/src/main/java/org/onap/fgps/api/eelf/configuration/SLF4jWrapper.java b/valetapi/src/main/java/org/onap/fgps/api/eelf/configuration/SLF4jWrapper.java
new file mode 100644
index 0000000..e9dc30a
--- /dev/null
+++ b/valetapi/src/main/java/org/onap/fgps/api/eelf/configuration/SLF4jWrapper.java
@@ -0,0 +1,1043 @@
+/*
+ * ============LICENSE_START==========================================
+ * ONAP - F-GPS API
+ * ===================================================================
+ * Copyright © 2019 ATT Intellectual Property. All rights reserved.
+ * ===================================================================
+ *
+ * Unless otherwise specified, all software contained herein is licensed
+ * under the Apache License, Version 2.0 (the "License");
+ * you may not use this software except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * Unless otherwise specified, all documentation contained herein is licensed
+ * under the Creative Commons License, Attribution 4.0 Intl. (the "License");
+ * you may not use this documentation except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://creativecommons.org/licenses/by/4.0/
+ *
+ * Unless required by applicable law or agreed to in writing, documentation
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * ============LICENSE_END============================================
+ *
+ *
+ */
+package org.onap.fgps.api.eelf.configuration;
+
+
+import static org.onap.fgps.api.eelf.configuration.Configuration.AUDIT_LOGGER_NAME;
+import static org.onap.fgps.api.eelf.configuration.Configuration.GENERAL_LOGGER_NAME;
+import static org.onap.fgps.api.eelf.configuration.Configuration.METRICS_LOGGER_NAME;
+import static org.onap.fgps.api.eelf.configuration.Configuration.PERF_LOGGER_NAME;
+import static org.onap.fgps.api.eelf.configuration.Configuration.POLICY_LOGGER_NAME;
+import static org.onap.fgps.api.eelf.configuration.Configuration.SECURITY_LOGGER_NAME;
+import static org.onap.fgps.api.eelf.configuration.Configuration.SERVER_LOGGER_NAME;
+
+import java.util.Locale;
+
+import org.onap.fgps.api.eelf.i18n.EELFResolvableErrorEnum;
+import org.onap.fgps.api.eelf.i18n.EELFResourceManager;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * This class provides the implementation of <code>EELFLogger</code> interface.
+ * <p>
+ * It is wrapper of a SLF4J logger with a logback implementation to redirect logging calls to SLF4J.
+ *</p>
+ * @since Sept 8, 2015
+ */
+
+public class SLF4jWrapper implements EELFLogger {
+ private Logger logger;
+
+
+ /**
+ * Create the wrapper around the SLF4J logger
+ *
+ * @param name
+ * The SLF4J logger to be wrapped as a SLF4j logger
+ */
+ public SLF4jWrapper(String name) {
+ this.logger = LoggerFactory.getLogger(name);
+ }
+
+
+ /**
+ * Log a warn message, with no arguments.
+ * <P>
+ * If the logger is currently enabled for the given message level then the given message is forwarded to all the
+ * registered output Handler objects.
+ * </p>
+ *
+ * @param msg
+ * The string message
+ */
+ @Override
+ public void warn(String msg) {
+ writeToLog(Level.WARN, msg);
+ }
+
+ /**
+ * Log a warn message, with arguments.
+ * <P>
+ * If the logger is currently enabled for the given message level then the given message is forwarded to all the
+ * registered output Handler objects.
+ * </p>
+ *
+ * @param msg
+ * The string message
+ * @param arguments
+ * The list of arguments
+ */
+ @Override
+ public void warn(String msg, Object... arguments) {
+ writeToLog(Level.WARN, msg, null, arguments);
+ }
+
+ /**
+ * Log a exception at warn level.
+ * <P>
+ * If the logger is currently enabled for the given message level then the given message is forwarded to all the
+ * registered output Handler objects.
+ * </p>
+ *
+ * @param msg
+ * The string message
+ * @param th
+ * The exception object
+ */
+ @Override
+ public void warn(String msg, Throwable th) {
+ writeToLog(Level.WARN, msg, th);
+ }
+
+ /**
+ * Log a debug message, with no arguments.
+ * <P>
+ * If the logger is currently enabled for the given message level then the given message is forwarded to all the
+ * registered output Handler objects.
+ * </p>
+ *
+ * @param msg
+ * The string message
+ */
+ @Override
+ public void debug(String msg) {
+ writeToLog(Level.DEBUG, msg);
+ }
+
+ /**
+ * Log a debug message, with arguments.
+ * <P>
+ * If the logger is currently enabled for the given message level then the given message is forwarded to all the
+ * registered output Handler objects.
+ * </p>
+ *
+ * @param msg
+ * The string message
+ * @param arguments
+ * The list of arguments
+ */
+ @Override
+ public void debug(String msg, Object... arguments) {
+ writeToLog(Level.DEBUG, msg, null, arguments);
+ }
+
+ /**
+ * Log a exception at debug level.
+ * <P>
+ * If the logger is currently enabled for the given message level then the given message is forwarded to all the
+ * registered output Handler objects.
+ * </p>
+ *
+ * @param msg
+ * The string message
+ * @param th
+ * The exception object
+ */
+ @Override
+ public void debug(String msg, Throwable th) {
+ writeToLog(Level.DEBUG, msg, th);
+ }
+
+ /**
+ * Log a info message, with no arguments.
+ * <P>
+ * If the logger is currently enabled for the given message level then the given message is forwarded to all the
+ * registered output Handler objects.
+ * </p>
+ *
+ * @param msg
+ * The string message
+ */
+ @Override
+ public void info(String msg) {
+ writeToLog(Level.INFO, msg);
+ }
+
+ /**
+ * Log a info message, with arguments.
+ * <P>
+ * If the logger is currently enabled for the given message level then the given message is forwarded to all the
+ * registered output Handler objects.
+ * </p>
+ *
+ * @param msg
+ * The string message
+ * @param arguments
+ * The list of arguments
+ */
+ @Override
+ public void info(String msg, Object... arguments) {
+ writeToLog(Level.INFO, msg, null, arguments);
+ }
+
+ /**
+ * Log a trace message, with no arguments.
+ * <P>
+ * If the logger is currently enabled for the given message level then the given message is forwarded to all the
+ * registered output Handler objects.
+ * </p>
+ *
+ * @param msg
+ * The string message
+ */
+ @Override
+ public void trace(String msg) {
+ writeToLog(Level.TRACE, msg);
+ }
+
+ /**
+ * Log a trace message, with arguments.
+ * <P>
+ * If the logger is currently enabled for the given message level then the given message is forwarded to all the
+ * registered output Handler objects.
+ * </p>
+ *
+ * @param msg
+ * The string message
+ * @param arguments
+ * The list of arguments
+ */
+ @Override
+ public void trace(String msg, Object... arguments) {
+ writeToLog(Level.TRACE, msg, null, arguments);
+ }
+
+
+ /**
+ * Log a exception at trace level.
+ * <P>
+ * If the logger is currently enabled for the given message level then the given message is forwarded to all the
+ * registered output Handler objects.
+ * </p>
+ *
+ * @param msg
+ * The string message
+ * @param th
+ * The exception object
+ */
+ @Override
+ public void trace(String msg, Throwable th) {
+ writeToLog(Level.TRACE, msg, th);
+ }
+
+ /**
+ * Log a error message, with no arguments.
+ * <P>
+ * If the logger is currently enabled for the given message level then the given message is forwarded to all the
+ * registered output Handler objects.
+ * </p>
+ *
+ * @param msg
+ * The string message
+ */
+ @Override
+ public void error(String msg) {
+ writeToLog(Level.ERROR, msg);
+ }
+
+ /**
+ * Log a error message, with arguments.
+ * <P>
+ * If the logger is currently enabled for the given message level then the given message is forwarded to all the
+ * registered output Handler objects.
+ * </p>
+ *
+ * @param msg
+ * The string message
+ * @param arguments
+ * The list of arguments
+ */
+ @Override
+ public void error(String msg, Object... arguments) {
+ writeToLog(Level.ERROR, msg, null, arguments);
+ }
+
+
+ /**
+ * Log a exception at error level.
+ * <P>
+ * If the logger is currently enabled for the given message level then the given message is forwarded to all the
+ * registered output Handler objects.
+ * </p>
+ *
+ * @param msg
+ * The string message
+ * @param th
+ * The exception object
+ */
+ @Override
+ public void error(String msg, Throwable th) {
+ writeToLog(Level.ERROR, msg, th);
+ }
+
+
+ /**
+ * Checks if the trace is enabled for the logger
+ */
+ @Override
+ public boolean isTraceEnabled() {
+ return logger.isTraceEnabled();
+ }
+
+ /**
+ * Checks if the info is enabled for the logger
+ */
+ @Override
+ public boolean isInfoEnabled() {
+ return logger.isInfoEnabled();
+ }
+
+
+ /**
+ * Checks if the error is enabled for the logger
+ */
+ @Override
+ public boolean isErrorEnabled() {
+ return logger.isErrorEnabled();
+ }
+
+ /**
+ * Checks if the warn is enabled for the logger
+ */
+ @Override
+ public boolean isWarnEnabled() {
+ return logger.isWarnEnabled();
+ }
+
+
+ /**
+ * Checks if the debug is enabled for the logger
+ */
+ @Override
+ public boolean isDebugEnabled() {
+ return logger.isDebugEnabled();
+ }
+
+
+
+ /**
+ * Log a message or exception with arguments if the argument list is provided
+ * <P>
+ * If the logger is currently enabled for the given message level then the given message is forwarded to all the
+ * registered output Handler objects.
+ * </p>
+ *
+ * @param level
+ * One of the message level identifiers, e.g., SEVERE
+ * @param msg
+ * The string message
+ * @param th
+ * The exception object
+ * @param arguments
+ * The list of arguments
+ */
+ @Override
+ public void log(Level level, String msg, Throwable th, Object... arguments) {
+ writeToLog(level, msg, th, arguments);
+
+ }
+
+
+ /**
+ * Used by audit logger to record audit event with arguments at info level
+ *
+ * @param msg
+ * The string message
+ * @param arguments
+ * The list of arguments
+ */
+ @Override
+ public void auditEvent(String msg, Object... arguments) {
+ if (checkLoggerExists(AUDIT_LOGGER_NAME)) {
+ writeToLog(Level.INFO, msg, null, arguments);
+ }
+ }
+
+ /**
+ * Used by audit logger to record audit event with arguments at given level
+ *
+ * @param level
+ * One of the message level identifiers, e.g., SEVERE
+ * @param msg
+ * The string message
+ * @param arguments
+ * The list of arguments
+ */
+ @Override
+ public void auditEvent(Level level, String msg, Object... arguments) {
+ if (checkLoggerExists(AUDIT_LOGGER_NAME)) {
+ writeToLog(level, msg, null, arguments);
+ }
+
+ }
+
+
+ /**
+ * Used by metrics logger to record metrics event with arguments at info level
+ *
+ * @param msg
+ * The string message
+ * @param arguments
+ * The list of arguments
+ */
+ @Override
+ public void metricsEvent(String msg, Object... arguments) {
+ if (checkLoggerExists(METRICS_LOGGER_NAME)) {
+ writeToLog(Level.INFO, msg, null, arguments);
+ }
+
+ }
+
+ /**
+ * Used by metrics logger to record audit event with arguments at given level
+ *
+ * @param level
+ * One of the message level identifiers, e.g., SEVERE
+ * @param msg
+ * The string message
+ * @param arguments
+ * The list of arguments
+ */
+ @Override
+ public void metricsEvent(Level level, String msg, Object... arguments) {
+ if (checkLoggerExists(METRICS_LOGGER_NAME)) {
+ writeToLog(level, msg, null, arguments);
+ }
+
+ }
+
+
+
+ /**
+ * Used by security logger to record security event with arguments at info level
+ *
+ * @param msg
+ * The string message
+ * @param arguments
+ * The list of arguments
+ */
+ @Override
+ public void securityEvent(String msg, Object... arguments) {
+ if (checkLoggerExists(SECURITY_LOGGER_NAME)) {
+ writeToLog(Level.INFO, msg, null, arguments);
+ }
+
+ }
+
+ /**
+ * Used by security logger to record security event with arguments at given level
+ *
+ * @param level
+ * One of the message level identifiers, e.g., SEVERE
+ * @param msg
+ * The string message
+ * @param arguments
+ * The list of arguments
+ */
+ @Override
+ public void securityEvent(Level level, String msg, Object... arguments) {
+ if (checkLoggerExists(SECURITY_LOGGER_NAME)) {
+ writeToLog(level, msg, null, arguments);
+ }
+
+ }
+
+
+ @Override
+ public void performanceEvent(String msg, Object... arguments) {
+ if (checkLoggerExists(PERF_LOGGER_NAME)) {
+ writeToLog(Level.INFO, msg, null, arguments);
+ }
+
+ }
+
+ /**
+ * Used by performance logger to record performance event with arguments at given level
+ *
+ * @param level
+ * One of the message level identifiers, e.g., SEVERE
+ * @param msg
+ * The string message
+ * @param arguments
+ * The list of arguments
+ */
+ @Override
+ public void performanceEvent(Level level, String msg, Object... arguments) {
+ if (checkLoggerExists(PERF_LOGGER_NAME)) {
+ writeToLog(level, msg, null, arguments);
+ }
+
+ }
+
+
+ @Override
+ public void applicationEvent(String msg, Object... arguments) {
+ if (checkLoggerExists(GENERAL_LOGGER_NAME)) {
+ writeToLog(Level.INFO, msg, null, arguments);
+ }
+
+ }
+
+ /**
+ * Used by application logger to record application generic event with arguments at given level
+ *
+ * @param level
+ * One of the message level identifiers, e.g., SEVERE
+ * @param msg
+ * The string message
+ * @param arguments
+ * The list of arguments
+ */
+ @Override
+ public void applicationEvent(Level level, String msg, Object... arguments) {
+ if (checkLoggerExists(GENERAL_LOGGER_NAME)) {
+ writeToLog(level, msg, null, arguments);
+
+ }
+ }
+
+
+ @Override
+ public void serverEvent(String msg, Object... arguments) {
+ if (checkLoggerExists(SERVER_LOGGER_NAME)) {
+ writeToLog(Level.INFO, msg, null, arguments);
+ }
+
+ }
+
+ /**
+ * Used by server logger to record server event with arguments at given level
+ *
+ * @param level
+ * One of the message level identifiers, e.g., SEVERE
+ * @param msg
+ * The string message
+ * @param arguments
+ * The list of arguments
+ */
+ public void serverEvent(Level level, String msg, Object... arguments) {
+ if (checkLoggerExists(SERVER_LOGGER_NAME)) {
+ writeToLog(level, msg, null, arguments);
+ }
+
+ }
+
+
+ @Override
+ public void policyEvent(String msg, Object... arguments) {
+ if (checkLoggerExists(POLICY_LOGGER_NAME)) {
+ writeToLog(Level.INFO, msg, null, arguments);
+ }
+
+ }
+
+ /**
+ * Used by policy logger to record policy event with arguments at given level
+ *
+ * @param level
+ * One of the message level identifiers, e.g., SEVERE
+ * @param msg
+ * The string message
+ * @param arguments
+ * The list of arguments
+ */
+ public void policyEvent(Level level, String msg, Object... arguments) {
+ if (checkLoggerExists(POLICY_LOGGER_NAME)) {
+ writeToLog(level, msg, null, arguments);
+ }
+
+ }
+
+ /**
+ * This method is called by each logging method to determine if the specified level is active, format the message,
+ * and write it to the slf4j logger.
+ *
+ * @param level
+ * The level as defined by EELFLogger.
+ * @param msg
+ * The message to be written, possibly formatted with parameters
+ * @param th
+ * Any throwable to be recorded as part of the logging event, or null
+ * @param arguments
+ * The optional format parameters for the message
+ */
+
+ private void writeToLog(Level level, String msg, Throwable th, Object... arguments) {
+ if (level.equals(Level.TRACE)) {
+ if (logger.isTraceEnabled()) {
+ if (th != null) {
+ if (arguments == null || arguments.length == 0) {
+ logger.trace(msg, th);
+ } else {
+ logger.trace(msg, arguments, th);
+ }
+ } else {
+ if (arguments == null || arguments.length == 0) {
+ logger.trace(msg);
+ } else {
+ logger.trace(msg, arguments);
+ }
+ }
+ }
+ } else if (level.equals(Level.INFO)) {
+ if (logger.isInfoEnabled()) {
+ if (th != null) {
+ if (arguments == null || arguments.length == 0) {
+ logger.info(msg, th);
+ } else {
+ logger.info(msg, arguments, th);
+ }
+ } else {
+ if (arguments == null || arguments.length == 0) {
+ logger.info(msg);
+ } else {
+ logger.info(msg, arguments);
+ }
+ }
+ }
+ } else if (level.equals(Level.WARN)) {
+ if (logger.isWarnEnabled()) {
+ if (th != null) {
+ if (arguments == null || arguments.length == 0) {
+ logger.warn(msg, th);
+ } else {
+ logger.warn(msg, arguments, th);
+ }
+ } else {
+ if (arguments == null || arguments.length == 0) {
+ logger.warn(msg);
+ } else {
+ logger.warn(msg, arguments);
+ }
+ }
+ }
+ } else if (level.equals(Level.ERROR)) {
+ if (logger.isErrorEnabled()) {
+ if (th != null) {
+ if (arguments == null || arguments.length == 0) {
+ logger.error(msg, th);
+ } else {
+ logger.error(msg, arguments, th);
+ }
+ } else {
+ if (arguments == null || arguments.length == 0) {
+ logger.error(msg);
+ } else {
+ logger.error(msg, arguments);
+ }
+ }
+ }
+ } else if (level.equals(Level.DEBUG)) {
+ if (logger.isDebugEnabled()) {
+ if (th != null) {
+ if (arguments == null || arguments.length == 0) {
+ logger.debug(msg, th);
+ } else {
+ logger.debug(msg, arguments, th);
+ }
+ } else {
+ if (arguments == null || arguments.length == 0) {
+ logger.debug(msg);
+ } else {
+ logger.debug(msg, arguments);
+ }
+ }
+ }
+ }
+ }
+
+ /**
+ * This method is called by each logging method to determine if the specified level is active, format the message,
+ * and write it to the slf4j logger.
+ *
+ * @param level
+ * The level as defined by EELFLogger.
+ * @param msg
+ * The message to be written, possibly formatted with parameters
+ * @param arguments
+ * The optional format parameters for the message
+ */
+
+ private void writeToLog(Level level, String msg, Object... arguments) {
+ if (level.equals(Level.TRACE)) {
+ if (logger.isTraceEnabled()) {
+ if (arguments == null || arguments.length == 0) {
+ logger.trace(msg);
+ } else {
+ logger.trace(msg, arguments);
+ }
+ }
+ } else if (level.equals(Level.INFO)) {
+ if (logger.isInfoEnabled()) {
+
+ if (arguments == null || arguments.length == 0) {
+ logger.info(msg);
+ } else {
+ logger.info(msg, arguments);
+ }
+ }
+ } else if (level.equals(Level.WARN)) {
+ if (logger.isWarnEnabled()) {
+
+ if (arguments == null || arguments.length == 0) {
+ logger.warn(msg);
+ } else {
+ logger.warn(msg, arguments);
+ }
+
+ }
+ } else if (level.equals(Level.ERROR)) {
+ if (logger.isErrorEnabled()) {
+
+ if (arguments == null || arguments.length == 0) {
+ logger.error(msg);
+ } else {
+ logger.error(msg, arguments);
+ }
+
+ }
+ } else if (level.equals(Level.DEBUG)) {
+ if (logger.isDebugEnabled()) {
+
+ if (arguments == null || arguments.length == 0) {
+ logger.debug(msg);
+ } else {
+ logger.debug(msg, arguments);
+ }
+
+ }
+ }
+ }
+
+ @Override
+ public void warn(Locale locale,EELFResolvableErrorEnum errorCode, Throwable th, String... args) {
+ writeToLog(Level.WARN, errorCode, locale, th, args);
+
+ }
+
+ @Override
+ public void info(Locale locale, EELFResolvableErrorEnum errorCode, Throwable th, String... args) {
+ writeToLog(Level.INFO, errorCode, locale, th, args);
+
+ }
+
+ @Override
+ public void debug(Locale locale, EELFResolvableErrorEnum errorCode, Throwable th, String... args) {
+ writeToLog(Level.DEBUG, errorCode, locale, th, args);
+
+ }
+
+ @Override
+ public void trace(Locale locale, EELFResolvableErrorEnum errorCode, Throwable th, String... args) {
+ writeToLog(Level.TRACE, errorCode, locale, th, args);
+
+ }
+
+ @Override
+ public void error(Locale locale, EELFResolvableErrorEnum errorCode, Throwable th, String... args) {
+ writeToLog(Level.ERROR, errorCode, locale, th, args);
+
+ }
+
+
+ /**
+ * This method is called by each logging method to determine if the specified level is active, format the message,
+ * and write it to the slf4j logger.
+ *
+ * @param level
+ * The level as defined by EELFLogger.
+ * @param resource
+ * Retrieves the error code from the loaded bundle(s)
+ * @param locale
+ * The locale to use when selecting and formatting the message
+ * @param th
+ * Any throwable to be recorded as part of the logging event, or null
+ * @param arguments
+ * The optional format parameters for the message
+ */
+
+
+ private void writeToLog(Level level, EELFResolvableErrorEnum resource, Locale locale, Throwable th, String... arguments) {
+ if (level.equals(Level.TRACE)) {
+ if (logger.isTraceEnabled()) {
+ if (th != null) {
+ if (locale == null) {
+ logger.trace(EELFResourceManager.format(resource, th, arguments));
+ } else {
+ logger.trace(EELFResourceManager.format(locale, resource, th, arguments));
+ }
+ } else {
+ if (locale == null) {
+ logger.trace(EELFResourceManager.format(resource, arguments));
+ } else {
+ logger.trace(EELFResourceManager.format(locale, resource, arguments));
+ }
+ }
+ }
+ } else if (level.equals(Level.INFO)) {
+ if (logger.isInfoEnabled()) {
+ if (th != null) {
+ if (locale == null) {
+ logger.info(EELFResourceManager.format(resource, th, arguments));
+ } else {
+ logger.info(EELFResourceManager.format(locale, resource, th, arguments));
+ }
+ } else {
+ if (locale == null) {
+ logger.info(EELFResourceManager.format(resource, arguments));
+ } else {
+ logger.info(EELFResourceManager.format(locale, resource, arguments));
+ }
+ }
+ }
+ } else if (level.equals(Level.WARN)) {
+ if (logger.isWarnEnabled()) {
+ if (th != null) {
+ if (locale == null) {
+ logger.warn(EELFResourceManager.format(resource, th, arguments));
+ } else {
+ logger.warn(EELFResourceManager.format(locale, resource, th, arguments));
+ }
+ } else {
+ if (locale == null) {
+ logger.warn(EELFResourceManager.format(resource, arguments));
+ } else {
+ logger.warn(EELFResourceManager.format(locale, resource, arguments));
+ }
+ }
+ }
+ } else if (level.equals(Level.ERROR)) {
+ if (logger.isErrorEnabled()) {
+ if (th != null) {
+ if (locale == null) {
+ logger.error(EELFResourceManager.format(resource, th, arguments));
+ } else {
+ logger.error(EELFResourceManager.format(locale, resource, th, arguments));
+ }
+ } else {
+ if (locale == null) {
+ logger.error(EELFResourceManager.format(resource, arguments));
+ } else {
+ logger.error(EELFResourceManager.format(locale, resource, arguments));
+ }
+ }
+ }
+ } else if (level.equals(Level.DEBUG)) {
+ if (logger.isDebugEnabled()) {
+ if (th != null) {
+ if (locale == null) {
+ logger.debug(EELFResourceManager.format(resource, th, arguments));
+ } else {
+ logger.debug(EELFResourceManager.format(locale, resource, th, arguments));
+ }
+ } else {
+ if (locale == null) {
+ logger.debug(EELFResourceManager.format(resource, arguments));
+ } else {
+ logger.debug(EELFResourceManager.format(locale, resource, arguments));
+ }
+ }
+ }
+ }
+
+ }
+
+
+
+ @Override
+ public void warn(Locale locale, EELFResolvableErrorEnum errorCode, String... args) {
+ writeToLog(Level.WARN, errorCode, locale, null, args);
+
+ }
+
+
+
+ @Override
+ public void info(Locale locale, EELFResolvableErrorEnum errorCode, String... args) {
+ writeToLog(Level.INFO, errorCode, locale, null, args);
+
+ }
+
+
+
+ @Override
+ public void debug(Locale locale, EELFResolvableErrorEnum errorCode, String... args) {
+ writeToLog(Level.DEBUG, errorCode, locale, null, args);
+
+ }
+
+
+
+ @Override
+ public void error(Locale locale, EELFResolvableErrorEnum errorCode, String... args) {
+ writeToLog(Level.ERROR, errorCode, locale, null, args);
+ }
+
+
+
+ @Override
+ public void trace( Locale locale, EELFResolvableErrorEnum errorCode,String... args) {
+ writeToLog(Level.TRACE, errorCode, locale, null, args);
+
+ }
+
+
+
+ @Override
+ public void warn(EELFResolvableErrorEnum errorCode, String... args) {
+ writeToLog(Level.WARN, errorCode, null, null, args);
+
+ }
+
+
+
+
+ public void info(EELFResolvableErrorEnum errorCode, String... args) {
+ writeToLog(Level.INFO, errorCode, null, null, args);
+
+ }
+
+
+
+
+ public void debug(EELFResolvableErrorEnum errorCode, String... args) {
+ writeToLog(Level.DEBUG, errorCode, null, null, args);
+
+ }
+
+
+
+
+ public void error(EELFResolvableErrorEnum errorCode, String... args) {
+ writeToLog(Level.ERROR, errorCode, null, null, args);
+
+ }
+
+
+
+
+ public void trace(EELFResolvableErrorEnum errorCode, String... args) {
+ writeToLog(Level.TRACE, errorCode, null, null, args);
+
+ }
+
+
+
+
+ public void warn(EELFResolvableErrorEnum errorCode, Throwable th,
+ String... args) {
+ writeToLog(Level.WARN, errorCode, null, th, args);
+
+ }
+
+
+
+
+ public void info(EELFResolvableErrorEnum errorCode, Throwable th,
+ String... args) {
+ writeToLog(Level.INFO, errorCode, null, th, args);
+
+ }
+
+
+
+
+ public void debug(EELFResolvableErrorEnum errorCode, Throwable th,
+ String... args) {
+ writeToLog(Level.DEBUG, errorCode, null, th, args);
+
+ }
+
+
+
+
+ public void error(EELFResolvableErrorEnum errorCode, Throwable th,
+ String... args) {
+ writeToLog(Level.ERROR, errorCode, null, th, args);
+
+ }
+
+
+
+
+ public void trace(EELFResolvableErrorEnum errorCode, Throwable th,
+ String... args) {
+ writeToLog(Level.TRACE, errorCode, null, th, args);
+
+ }
+
+
+ @Override
+ public void setLevel(Level level) {
+ if (logger instanceof ch.qos.logback.classic.Logger) {
+ switch (level) {
+ case INFO: ((ch.qos.logback.classic.Logger)logger).setLevel(ch.qos.logback.classic.Level.INFO); break;
+ case ERROR: ((ch.qos.logback.classic.Logger)logger).setLevel(ch.qos.logback.classic.Level.ERROR); break;
+ case DEBUG: ((ch.qos.logback.classic.Logger)logger).setLevel(ch.qos.logback.classic.Level.DEBUG); break;
+ case TRACE: ((ch.qos.logback.classic.Logger)logger).setLevel(ch.qos.logback.classic.Level.TRACE); break;
+ case WARN: ((ch.qos.logback.classic.Logger)logger).setLevel(ch.qos.logback.classic.Level.WARN); break;
+ case OFF: ((ch.qos.logback.classic.Logger)logger).setLevel(ch.qos.logback.classic.Level.OFF); break;
+
+ }
+
+ }
+
+ }
+
+
+ @Override
+ public void disableLogging() {
+ if (logger instanceof ch.qos.logback.classic.Logger) {
+ ((ch.qos.logback.classic.Logger)logger).setLevel(ch.qos.logback.classic.Level.OFF);
+ }
+
+ }
+
+
+ private boolean checkLoggerExists(String name) {
+ if (logger.getName().equals(name)) {
+ return true;
+
+ } else {
+ return false;
+ }
+ }
+
+
+}
diff --git a/valetapi/src/main/java/org/onap/fgps/api/eelf/exception/EELFException.java b/valetapi/src/main/java/org/onap/fgps/api/eelf/exception/EELFException.java
new file mode 100644
index 0000000..4a18796
--- /dev/null
+++ b/valetapi/src/main/java/org/onap/fgps/api/eelf/exception/EELFException.java
@@ -0,0 +1,78 @@
+/*
+ * ============LICENSE_START==========================================
+ * ONAP - F-GPS API
+ * ===================================================================
+ * Copyright © 2019 ATT Intellectual Property. All rights reserved.
+ * ===================================================================
+ *
+ * Unless otherwise specified, all software contained herein is licensed
+ * under the Apache License, Version 2.0 (the "License");
+ * you may not use this software except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * Unless otherwise specified, all documentation contained herein is licensed
+ * under the Creative Commons License, Attribution 4.0 Intl. (the "License");
+ * you may not use this documentation except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://creativecommons.org/licenses/by/4.0/
+ *
+ * Unless required by applicable law or agreed to in writing, documentation
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * ============LICENSE_END============================================
+ *
+ *
+ */
+package org.onap.fgps.api.eelf.exception;
+
+public class EELFException extends Exception {
+ /**
+ * The serial version number for this class
+ */
+ private static final long serialVersionUID = 1L;
+
+ /**
+ * Create the exception, detailing the reason with a message
+ *
+ * @param message
+ * The message that details the reason for the exception
+ */
+ public EELFException(String message) {
+ super(message);
+ }
+
+ /**
+ * Create an exception by wrapping another exception
+ *
+ * @param message
+ * The message that details the exception
+ * @param cause
+ * Any exception that was caught and was the reason for this failure
+ */
+ public EELFException(String message, Throwable cause) {
+ super(message, cause);
+ }
+
+ /**
+ * Create the exception by wrapping another exception
+ *
+ * @param cause
+ * The cause of this exception
+ */
+ public EELFException(Throwable cause) {
+ super(cause);
+ }
+
+}
diff --git a/valetapi/src/main/java/org/onap/fgps/api/eelf/i18n/EELFMsgs.java b/valetapi/src/main/java/org/onap/fgps/api/eelf/i18n/EELFMsgs.java
new file mode 100644
index 0000000..0866f2c
--- /dev/null
+++ b/valetapi/src/main/java/org/onap/fgps/api/eelf/i18n/EELFMsgs.java
@@ -0,0 +1,85 @@
+/*
+ * ============LICENSE_START==========================================
+ * ONAP - F-GPS API
+ * ===================================================================
+ * Copyright © 2019 ATT Intellectual Property. All rights reserved.
+ * ===================================================================
+ *
+ * Unless otherwise specified, all software contained herein is licensed
+ * under the Apache License, Version 2.0 (the "License");
+ * you may not use this software except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * Unless otherwise specified, all documentation contained herein is licensed
+ * under the Creative Commons License, Attribution 4.0 Intl. (the "License");
+ * you may not use this documentation except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://creativecommons.org/licenses/by/4.0/
+ *
+ * Unless required by applicable law or agreed to in writing, documentation
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * ============LICENSE_END============================================
+ *
+ *
+ */
+package org.onap.fgps.api.eelf.i18n;
+
+public enum EELFMsgs implements EELFResolvableErrorEnum {
+
+ /**
+ * Loading default logging configuration from system resource file "{0}"
+ */
+ LOADING_DEFAULT_LOG_CONFIGURATION,
+
+ /**
+ * No log configuration could be found or defaulted!
+ */
+ NO_LOG_CONFIGURATION,
+
+ /**
+ * Logging has already been initialized, check the container logging definitions to ensure they represent your
+ * desired logging configuration.
+ */
+ LOGGING_ALREADY_INITIALIZED,
+
+ /**
+ * Searching path "{0}" for log configuration file "{1}"
+ */
+ SEARCHING_LOG_CONFIGURATION,
+
+ /**
+ * Loading logging configuration from file "{0}"
+ */
+ LOADING_LOG_CONFIGURATION,
+
+
+ /**
+ * An unsupported logging framework is bound to SLF4J.
+ */
+ UNSUPPORTED_LOGGING_FRAMEWORK;
+
+
+
+ /**
+ * Static initializer to ensure the resource bundles for this class are loaded...
+ */
+ static {
+ EELFResourceManager.loadMessageBundle("org/onap/eelf/Resources");
+ }
+
+
+
+}
diff --git a/valetapi/src/main/java/org/onap/fgps/api/eelf/i18n/EELFResolvableErrorEnum.java b/valetapi/src/main/java/org/onap/fgps/api/eelf/i18n/EELFResolvableErrorEnum.java
new file mode 100644
index 0000000..bd50bd7
--- /dev/null
+++ b/valetapi/src/main/java/org/onap/fgps/api/eelf/i18n/EELFResolvableErrorEnum.java
@@ -0,0 +1,52 @@
+/*
+ * ============LICENSE_START==========================================
+ * ONAP - F-GPS API
+ * ===================================================================
+ * Copyright © 2019 ATT Intellectual Property. All rights reserved.
+ * ===================================================================
+ *
+ * Unless otherwise specified, all software contained herein is licensed
+ * under the Apache License, Version 2.0 (the "License");
+ * you may not use this software except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * Unless otherwise specified, all documentation contained herein is licensed
+ * under the Creative Commons License, Attribution 4.0 Intl. (the "License");
+ * you may not use this documentation except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://creativecommons.org/licenses/by/4.0/
+ *
+ * Unless required by applicable law or agreed to in writing, documentation
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * ============LICENSE_END============================================
+ *
+ *
+ */
+package org.onap.fgps.api.eelf.i18n;
+
+
+/**
+ * This is a marker interface for making all the error code enums accessible for the Logger class. In order to enable an error code
+ * enum, it must implement this interface. <p>In addition, every error code must have a matching resolution and description
+ * file with the error code (the enumerated value) in the resources package directory.
+ */
+
+public interface EELFResolvableErrorEnum {
+
+
+
+
+}
diff --git a/valetapi/src/main/java/org/onap/fgps/api/eelf/i18n/EELFResourceManager.java b/valetapi/src/main/java/org/onap/fgps/api/eelf/i18n/EELFResourceManager.java
new file mode 100644
index 0000000..4f9a72d
--- /dev/null
+++ b/valetapi/src/main/java/org/onap/fgps/api/eelf/i18n/EELFResourceManager.java
@@ -0,0 +1,715 @@
+/*
+ * ============LICENSE_START==========================================
+ * ONAP - F-GPS API
+ * ===================================================================
+ * Copyright © 2019 ATT Intellectual Property. All rights reserved.
+ * ===================================================================
+ *
+ * Unless otherwise specified, all software contained herein is licensed
+ * under the Apache License, Version 2.0 (the "License");
+ * you may not use this software except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * Unless otherwise specified, all documentation contained herein is licensed
+ * under the Creative Commons License, Attribution 4.0 Intl. (the "License");
+ * you may not use this documentation except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://creativecommons.org/licenses/by/4.0/
+ *
+ * Unless required by applicable law or agreed to in writing, documentation
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * ============LICENSE_END============================================
+ *
+ *
+ */
+package org.onap.fgps.api.eelf.i18n;
+
+import java.text.MessageFormat;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Locale;
+import java.util.Map;
+import java.util.MissingResourceException;
+import java.util.ResourceBundle;
+
+/**
+ * The resource manager is used to retrieve and format resources from pluggable resource bundles and is capable of
+ * managing sets of bundles over multiple locales for message skeletons, message descriptions, message resolutions, and
+ * just basic resources.
+ * <p>
+ * Resource Bundles are pluggable definitions of any type of resource that may be needed by the application and can be
+ * externalized. In a typical GUI application, for example, resources may be things like icons, bitmaps, cursors, or
+ * other graphic character definitions. In many cases, resources are also the messages that are issued by an
+ * application.
+ * </p>
+ * <p>
+ * By externalizing the resources, and then providing a mechanism to load different versions of the resources, the
+ * application can be made to support multiple different languages and formatting customs. This is the essential
+ * mechanism in java to support internationalization (i18n). The key concept in the java support is that of a
+ * <em>resource bundle</em>. A resource bundle is a set, or collection, of resources identified by a key, and where each
+ * bundle represents a set of the same resource keys but with values specific to a particular language or region. For
+ * example, a message with the resource key of "ERROR_123" could have an entry in a US-English bundle as well as a
+ * France-French bundle. Bundles are usually identified by both a language and region code, although that is not
+ * required. For example, a bundle specifying only the French language would be used for Canada French as well as
+ * France.
+ * </p>
+ * <p>
+ * This class supports the loading of multiple types of resource bundles and the formatting of messages from those
+ * bundles. A default bundle is loaded based on the server locale for use by server-side implementation. However, for
+ * client-side implementations, this resource manager also allows the locale to be specified and a resource obtained
+ * from a locale-specific bundle. This allows the code to format messages and resources appropriate to the client, and
+ * not just the server.
+ * </p>
+ * <p>
+ * This ResourceManager also allows for additional bundles to be loaded to support bundles that may be needed for
+ * specific providers or extensions that may be loaded dynamically. In these cases, these provider implementations can
+ * request that their bundles be loaded and managed as part of the overall resources. It is important to have each
+ * provider or extension identify their resources using unique keys, because the resource manager will format the first
+ * resource it finds using the specified key by scanning the bundles. If two (or more) bundles define resources with the
+ * same key, the first occurrence found will be used.
+ * </p>
+ * <p>
+ * lastly, this resource manager has the ability to manage multiple different sets of resource bundles, segregated from
+ * each other to support different uses. for example, a set of bundles can be loaded for the messages, while another set
+ * can be loaded for the resolutions, and yet another set loaded for the descriptions. this support can be extended to
+ * support any number of different sets of resource bundles in the future if needed.
+ * </p>
+ *
+ * @since Sept 10, 2015
+ * @version $Id$
+ */
+public final class EELFResourceManager {
+
+ /**
+ * Error message to be generated if a resource is requested that cannot be formatted because of invalid/illegal
+ * arguments
+ */
+ private static final String BAD_ARGUMENTS = "EELF9997E Invalid arguments to format resource id [%s]!\n";
+
+ /**
+ * The message to be generated if a resource is requested that does not exist in the bundle
+ */
+ private static final String BAD_RESOURCE =
+ "EELF9998E Resource id [%s] cannot be formatted - no resource with that id exists!\n";
+
+ /**
+ * Error message that prefixes the partial stack trace when a resource cannot be formatted
+ */
+ private static final String CALLED_FROM = "Request for resource was made from:\n";
+
+
+
+ /**
+ * The set of all description bundle base names that have been requested to be loaded
+ */
+ private static List<String> descriptionBaseNames = new ArrayList<String>();
+
+ /**
+ * The set of all description resource bundles loaded for specific locales. The key of the map is a locale that has
+ * been requested. The first time a locale is requested, each bundle based on the set of base names, if they can be
+ * found, is loaded and inserted into the map. Each subsequent call will then return resources from the first bundle
+ * that contains the resource. If a different locale is requested that has not yet been loaded, then it is also
+ * loaded on first use. If no bundle can be found for the locale, the default bundle is loaded and used.
+ */
+ private static Map<String, Map<String, ResourceBundle>> descriptionBundles =
+ new HashMap<String, Map<String, ResourceBundle>>();
+
+ /**
+ * The set of all messages bundle base names that have been requested to be loaded
+ */
+ private static List<String> messageBaseNames = new ArrayList<String>();
+
+ /**
+ * The set of all message resource bundles loaded for specific locales. The key of the map is a locale that has been
+ * requested. The first time a locale is requested, each bundle based on the set of base names, if they can be
+ * found, is loaded and inserted into the map. Each subsequent call will then return resources from the first bundle
+ * that contains the resource. If a different locale is requested that has not yet been loaded, then it is also
+ * loaded on first use. If no bundle can be found for the locale, the default bundle is loaded and used.
+ */
+ private static Map<String, Map<String, ResourceBundle>> messageBundles =
+ new HashMap<String, Map<String, ResourceBundle>>();
+
+ /**
+ * The message to be generated if a format is requested and no resource bundle was loaded
+ */
+ private static final String NO_BUNDLE = "EELF9999E Resource id [%s] cannot be formatted - no bundle loaded!\n";
+
+ /**
+ * The set of all resolution bundle base names that have been requested to be loaded
+ */
+ private static List<String> resolutionBaseNames = new ArrayList<String>();
+
+
+
+ /**
+ * The set of all resolution resource bundles loaded for specific locales. The key of the map is a locale that has
+ * been requested. The first time a locale is requested, each bundle based on the set of base names, if they can be
+ * found, is loaded and inserted into the map. Each subsequent call will then return resources from the first bundle
+ * that contains the resource. If a different locale is requested that has not yet been loaded, then it is also
+ * loaded on first use. If no bundle can be found for the locale, the default bundle is loaded and used.
+ */
+ private static Map<String, Map<String, ResourceBundle>> resolutionBundles =
+ new HashMap<String, Map<String, ResourceBundle>>();
+
+ private static String delimiReqularExp = "\\|";
+
+ private static enum RESOURCE_TYPES{
+ code,
+ msg,
+ desc,
+ resolution;
+ }
+
+ /**
+ * This is the actual formatter used to format the resource with the supplied arguments, if any. The name of the
+ * method has a leading underscore to indicate that it is an internal method not to be directly called by any client
+ * code.
+ *
+ * @param locale
+ * The locale that we want to load the resource for
+ * @param identifier
+ * The message identifier, if one is to be assigned to the message, or null.
+ * @param resourceId
+ * The resource id to be formatted
+ * @param exception
+ * an optional exception to be formatted
+ * @param arguments
+ * The arguments inserted into the resource, if any
+ * @return The formatted resource
+ */
+ private static String format(Locale locale, String identifier, String resourceId, Throwable exception,
+ String... arguments) {
+ String skeleton = getMessage(locale, resourceId);
+
+ StringBuffer buffer = new StringBuffer();
+ try {
+ if (identifier != null) {
+ buffer.append(identifier.toUpperCase());
+ buffer.append(" ");
+ }
+ buffer.append(MessageFormat.format(skeleton, (Object[]) arguments));
+
+ if (exception != null) {
+ buffer.append(String.format("\nException %s: %s", exception.getClass().getSimpleName(),
+ exception.getMessage()));
+ StackTraceElement[] stack = exception.getStackTrace();
+ buffer.append("\n" + formatPartialStackTrace(stack, 0, 5));
+ }
+
+ } catch (IllegalArgumentException e) {
+ return String.format(BAD_ARGUMENTS, resourceId);
+ }
+ return buffer.toString();
+ }
+
+ /**
+ * Format a cause (nested exception). The formatted cause is appended to the current buffer.
+ *
+ * @param t
+ * The cause to be formatted.
+ * @param buffer
+ * The buffer to append the formatted output to.
+ */
+ private static void formatCause(Throwable t, StringBuffer buffer) {
+ buffer.append("Caused by: " + t.getClass().getName() + ": " + t.getMessage() + "\n");
+ formatStackFrames(t, buffer);
+ Throwable cause = t.getCause();
+ if (cause != null) {
+ formatCause(cause, buffer);
+ }
+ }
+
+ /**
+ * Format a stack trace
+ *
+ * @param t
+ * The throwable containing the stack trace to be formatted
+ * @param buffer
+ * The buffer to append the stack trace output to
+ */
+ private static void formatStackFrames(Throwable t, StringBuffer buffer) {
+ StackTraceElement[] stack = t.getStackTrace();
+ buffer.append(formatPartialStackTrace(stack, 0, 12));
+ }
+
+ /**
+ * This is a private method used to actually return a resource from a map of bundles loaded for a specific purpose.
+ *
+ * @param resourceId
+ * The resource ID that we are looking for
+ * @param bundles
+ * A map of the bundles loaded for the specific locale
+ * @return the specified resource, or if no bundles are loaded, a default message. If the resource cannot be found,
+ * then a diagnostic message is returned indicating the failure to load the resource and where it was called
+ * from.
+ */
+ private static String getResourceTemplateFromBundle(String resourceId, Map<String, ResourceBundle> bundles, RESOURCE_TYPES type) {
+ String output = null;
+ if (bundles == null) {
+ StackTraceElement[] stack = Thread.currentThread().getStackTrace();
+ return String.format(NO_BUNDLE, resourceId) + CALLED_FROM + formatPartialStackTrace(stack, 3, 5);
+ } else {
+ for (Map.Entry<String, ResourceBundle> entry : bundles.entrySet()) {
+ try {
+ String str = entry.getValue().getString(resourceId);
+ if (str != null) {
+ String[] values= str.split(delimiReqularExp);
+ switch (type) {
+ case code: if (values.length >= 1 ) output = values[0]; break;
+ case msg: if (values.length >= 2 ) output = values[1]; break;
+ case resolution: if (values.length >= 3 ) output = values[2]; break;
+ case desc: if (values.length >= 4 ) output = values[3]; break;
+ default: output = entry.getValue().getString(resourceId);
+ }
+
+ }
+ return output;
+ } catch (MissingResourceException e) {
+ continue;
+ }
+ }
+ }
+
+ StackTraceElement[] stack = Thread.currentThread().getStackTrace();
+ return String.format(BAD_RESOURCE, resourceId) + CALLED_FROM + formatPartialStackTrace(stack, 4, 9);
+ }
+
+ /**
+ * This method is a helper that allows a caller to format a variable set of strings as a list of the form
+ * "[value,...,value]"
+ *
+ * @param values
+ * The variable argument list of string values to be formatted as a list
+ * @return The list of values
+ */
+ public static String asList(String... values) {
+ StringBuffer buffer = new StringBuffer();
+ buffer.append("[");
+ for (String value : values) {
+ buffer.append(value);
+ buffer.append(",");
+ }
+ if (buffer.length() > 1) {
+ buffer.delete(buffer.length() - 1, buffer.length());
+ }
+ buffer.append("]");
+ return buffer.toString();
+ }
+
+ /**
+ * Formats a message identified by the application msg enumeration
+ *
+ * @param locale
+ * The locale that we want to load the resource for
+ * @param resourceId
+ * The resource to be formatted
+ * @param arguments
+ * The arguments to the message
+ * @return The formatted message
+ */
+ public static String format(Locale locale, EELFResolvableErrorEnum resourceId, String... arguments) {
+ return format(locale, getIdentifier(resourceId),resourceId.toString(), null, arguments);
+ }
+
+
+
+ /**
+ * Formats a message identified by the application msg enumeration
+ *
+ * @param locale
+ * The locale that we want to load the resource for
+ * @param resourceId
+ * The resource to be formatted
+ * @param exception
+ * The exception to be formatted
+ * @param arguments
+ * The arguments to the message
+ * @return The formatted message
+ */
+ public static String
+ format(Locale locale, EELFResolvableErrorEnum resourceId, Throwable exception, String... arguments) {
+ return format(locale, getIdentifier(resourceId),resourceId.toString(), exception, arguments);
+ }
+
+
+
+ /**
+ * Formats a message identified by the application msg enumeration
+ *
+ * @param resourceId
+ * The resource to be formatted
+ * @param arguments
+ * The arguments to the message
+ * @return The formatted message
+ */
+ public static String format(EELFResolvableErrorEnum resourceId, String... arguments) {
+ Locale locale = Locale.getDefault();
+ return format(locale, getIdentifier(resourceId),resourceId.toString(), null, arguments);
+ }
+
+ /**
+ * Formats a message identified by the application msg enumeration
+ *
+ * @param resourceId
+ * The resource to be formatted
+ * @param exception
+ * the exception to be formatted
+ * @param arguments
+ * The arguments to the message
+ * @return The formatted message
+ */
+ public static String format(EELFResolvableErrorEnum resourceId, Throwable exception, String... arguments) {
+ Locale locale = Locale.getDefault();
+ return format(locale, resourceId, exception, arguments);
+ }
+
+
+
+ /**
+ * This method formats an exception object (throwable) in a standard way and returns the result as a string. This
+ * enables the exception to be written to a log file easily.
+ *
+ * @param t
+ * The throwable to be formatted
+ * @return The formatted exception
+ */
+ public static String format(Throwable t) {
+ StringBuffer buffer = new StringBuffer();
+ Thread currentThread = Thread.currentThread();
+
+ buffer.append("Exception in thread " + currentThread.getName() + " " + t.getClass().getName() + ": "
+ + t.getMessage() + "\n");
+ formatStackFrames(t, buffer);
+ Throwable cause = t.getCause();
+ if (cause != null) {
+ formatCause(cause, buffer);
+ }
+
+ return buffer.toString();
+ }
+
+ // TODO The format(throwable) currently only supports formatting of exceptions in en_US (the default based on the
+ // encoding used to create the source file. In the future, this could/should be changed to format exceptions using a
+ // specified locale if needed. This is a low priority, as exception formatting is likely a server-side output only,
+ // and the end user would not be aware of these outputs.
+
+ /**
+ * This method formats an exception object (throwable) in a standard way and returns the result as a string. This
+ * enables the exception to be written to a log file easily.
+ *
+ * @param t
+ * The throwable to be formatted
+ * @param contextMsg
+ * Message to provide application context of this exception
+ * @return The formatted exception
+ */
+ public static String format(Throwable t, String contextMsg) {
+ StringBuffer buffer = new StringBuffer();
+ Thread currentThread = Thread.currentThread();
+
+ buffer.append("Exception in thread " + currentThread.getName() + " " + t.getClass().getName() + ": "
+ + t.getMessage() + " [contextMsg:" + contextMsg + "]\n");
+ formatStackFrames(t, buffer);
+ Throwable cause = t.getCause();
+ if (cause != null) {
+ formatCause(cause, buffer);
+ }
+
+ return buffer.toString();
+ }
+
+ /**
+ * Generates a partial stack trace showing the path taken to get to the caller. This method is not shown in the
+ * stack trace and does not count toward the limit number.
+ *
+ * @trace The stack trace array
+ * @param start
+ * the index into the array where the stack trace is to be started...
+ * @param limit
+ * The maximum number of entries to display in the stack trace...
+ * @return A string formatted result of the partial stack trace
+ */
+ private static String formatPartialStackTrace(StackTraceElement[] trace, int start, int limit) {
+ StringBuffer buffer = new StringBuffer();
+
+ if (trace != null && trace.length > start) {
+ for (int i = start; i < trace.length && i <= limit; i++) {
+ StackTraceElement frame = trace[i];
+ buffer.append(" at " + frame.getClassName() + "." + frame.getMethodName() + "("
+ + frame.getFileName() + ":" + frame.getLineNumber() + ")\n");
+ }
+ if (limit < trace.length) {
+ buffer.append(" ... " + Integer.toString(trace.length - limit) + " more frames\n");
+ }
+ }
+ return buffer.toString();
+ }
+
+ /**
+ * Obtains the specified description from description resource bundles that are loaded based on the resource id.
+ *
+ * @param locale
+ * The locale that we want to load the resource for
+ * @param resourceId
+ * The EELFResolvableErrorEnum to be located
+ * @return The requested resource, or a default message indicating that the resource id could not be found, or a
+ * default error message indicating that no bundles could be loaded.
+ */
+ public static String getDescription(Locale locale, EELFResolvableErrorEnum resourceId) {
+ Map<String, ResourceBundle> localBundles = getMessageBundle(locale, descriptionBundles, descriptionBaseNames);
+ return getResourceTemplateFromBundle(resourceId.toString(), localBundles,RESOURCE_TYPES.desc);
+ }
+
+ /**
+ * This is a convenience method that returns the message description indicated by the specified id, or key, in the
+ * resource bundle.
+ *
+ * @param resourceId
+ * The EELFResolvableErrorEnum to be located
+ * @return The requested description, or a default message indicating that the resource id could not be found.
+ */
+ public static String getDescription(EELFResolvableErrorEnum resourceId) {
+ return getDescription(Locale.getDefault(), resourceId);
+ }
+
+ /**
+ * Returns the message resource indicated by the specified id, or key, in the first message resource bundle loaded
+ * for the locale that contains the resource. If no bundle can be found that contains the resource, a "BAD_RESOURCE"
+ * resource is returned. If no bundles can be found for the specified locale, including the default bundle, then a
+ * "NO_BUNDLE" error resource is returned.
+ *
+ * @param locale
+ * The locale that we want to load the resource for
+ * @param resourceId
+ * The resource to be located
+ * @return The requested resource, or a default message indicating that the resource id could not be found, or a
+ * default error message indicating that no bundles could be loaded.
+ */
+ public static String getMessage(Locale locale, String resourceId) {
+ Map<String, ResourceBundle> localBundles = getMessageBundle(locale, messageBundles, messageBaseNames);
+ return getResourceTemplateFromBundle(resourceId, localBundles,RESOURCE_TYPES.msg);
+ }
+
+ /**
+ * This is a convenience method that returns the resource indicated by the specified id, or key, in the resource
+ * bundle.
+ *
+ * @param resourceId
+ * The resource to be located
+ * @return The requested resource, or a default message indicating that the resource id could not be found.
+ */
+ public static String getMessage(String resourceId) {
+ return getMessage(Locale.getDefault(), resourceId);
+ }
+
+ /**
+ * This is a method that returns the resource indicated by the specified id, or key, in the resource
+ * bundle.
+ *
+ * @param resourceId
+ * The resource to be located
+ * @return The requested resource, or a default message indicating that the resource id could not be found.
+ */
+ public static String getMessage(EELFResolvableErrorEnum resourceId) {
+ return getMessage(Locale.getDefault(), resourceId.toString());
+ }
+
+ /**
+ * This is a method that returns the resource indicated by the specified id, or key, in the resource
+ * bundle.
+ *
+ * @param resourceId
+ * The resource to be located
+ * @return The requested resource, or a default message indicating that the resource id could not be found.
+ */
+ public static String getMessage(EELFResolvableErrorEnum resourceId, String... args) {
+ return format(Locale.getDefault(), resourceId, args);
+ }
+ /**
+ * This method is used to load resource bundles by locale and cache them. It can be called any number of times, but
+ * will load a specific resource bundle for a specific locale only once.
+ *
+ * @param locale
+ * The locale to be used to locate the appropriate bundle
+ * @param bundles
+ * The map of maps of bundles by locales. The outer map is keyed by locale. The inner map is keyed by
+ * bundle base names, which allows multiple bundles to be loaded and searched for the same locale,
+ * allowing bundles to be segregated by functional group or other uses.
+ * @return The ResourceBundle that applies to the given locale
+ */
+ private static Map<String, ResourceBundle> getMessageBundle(Locale locale,
+ Map<String, Map<String, ResourceBundle>> bundles, List<String> baseNames) {
+ synchronized (messageBundles) {
+ List<String> failed = new ArrayList<String>();
+ Map<String, ResourceBundle> bundleMap = bundles.get(locale.toLanguageTag());
+ if (bundleMap == null) {
+ bundleMap = new HashMap<String, ResourceBundle>();
+ String languageTag = locale.toLanguageTag();
+ messageBundles.put(languageTag, bundleMap);
+ for (String baseName : baseNames) {
+ if (!loadResourceBundle(bundleMap, baseName, locale)) {
+ failed.add(baseName);
+ }
+ }
+ } else {
+ if (!bundleMap.keySet().containsAll(baseNames)) {
+ HashSet<String> differences = new HashSet<String>(baseNames);
+ differences.removeAll(bundleMap.keySet());
+ for (String baseName : differences) {
+ if (!loadResourceBundle(bundleMap, baseName, locale)) {
+ failed.add(baseName);
+ }
+ }
+ }
+ }
+
+ if (!failed.isEmpty()) {
+ for (String baseName : failed) {
+ messageBaseNames.remove(baseName);
+ }
+ }
+ return bundleMap;
+ }
+ }
+
+ /**
+ * Obtains the specified resolution from the resource bundles that are loaded based on the resource id.
+ *
+ * @param locale
+ * The locale that we want to load the resource for
+ * @param resource
+ * The resource to be located
+ * @return The requested resource, or a default message indicating that the resource id could not be found, or a
+ * default error message indicating that no bundles could be loaded.
+ */
+ public static String getResolution(Locale locale, EELFResolvableErrorEnum resource) {
+ Map<String, ResourceBundle> localBundles = getMessageBundle(locale, resolutionBundles, resolutionBaseNames);
+ return getResourceTemplateFromBundle(resource.toString(), localBundles,RESOURCE_TYPES.resolution);
+ }
+
+ /**
+ * This is a convenience method that returns the message resolution indicated by the specified id, or key, in the
+ * resource bundle.
+ *
+ * @param resource
+ * The resource to be located
+ * @return The requested resolution, or a default message indicating that the resource id could not be found.
+ */
+ public static String getResolution(EELFResolvableErrorEnum resource) {
+ return getResolution(Locale.getDefault(), resource);
+ }
+
+ /**
+ * Called to request that a specified description bundle base name be added to the set of resources managed by the
+ * resource manager.
+ *
+ * @param baseName
+ * The bundle base name to be added to the set of bundle base names that are being managed.
+ */
+ public static void loadDescriptionBundle(String baseName) {
+ if (!descriptionBaseNames.contains(baseName)) {
+ descriptionBaseNames.add(baseName);
+ }
+ }
+
+ /**
+ * Called to request that a specified message bundle base name be added to the set of resources managed by the
+ * resource manager.
+ *
+ * @param baseName
+ * The bundle base name to be added to the set of bundle base names that are being managed.
+ */
+ public static void loadMessageBundle(String baseName) {
+ if (!messageBaseNames.contains(baseName)) {
+ messageBaseNames.add(baseName);
+ }
+ loadDescriptionBundle(baseName);
+ loadResolutionBundle(baseName);
+ }
+
+ /**
+ * Called to request that a specified resolution bundle base name be added to the set of resources managed by the
+ * resource manager.
+ *
+ * @param baseName
+ * The bundle base name to be added to the set of bundle base names that are being managed.
+ */
+ public static void loadResolutionBundle(String baseName) {
+ if (!resolutionBaseNames.contains(baseName)) {
+ resolutionBaseNames.add(baseName);
+ }
+ }
+
+ /**
+ * Load the specified resource bundle identified by the base name and locale, and insert it into the provided map,
+ * where the key is the bundle base name. The map provided will be specific to the indicated locale.
+ *
+ * @param bundleMap
+ * the locale-specific HashMap to contain all loaded resource bundles for that locale
+ * @param baseName
+ * The base name of the bundle to be loaded
+ * @param locale
+ * The locale to load the bundle for
+ * @return True if the bundle was loaded, false if it failed
+ */
+ private static boolean loadResourceBundle(Map<String, ResourceBundle> bundleMap, String baseName, Locale locale) {
+ try {
+ ResourceBundle bundle = ResourceBundle.getBundle(baseName, locale);
+ bundleMap.put(baseName, bundle);
+ return true;
+ } catch (MissingResourceException e) {
+ System.err.println(String.format("Unable to load resource bundle %s for locale %s", baseName,
+ locale.toLanguageTag()));
+ }
+ return false;
+ }
+
+ /**
+ * A private default constructor to prevent anyone else from instantiating the object. We always load the default
+ * bundle identified by the default base name. We can optionally load and manage additional bundles as well.
+ */
+ private EELFResourceManager() {
+ }
+
+ /**
+ * This is a convenience method that returns the message error code indicated by the specified id, or key, in the
+ * resource bundle.
+ *
+ * @param resourceId
+ * The EELFResolvableErrorEnum to be located
+ * @return The requested description, or a default message indicating that the resource id could not be found.
+ */
+ public static String getIdentifier(EELFResolvableErrorEnum resourceId) {
+ return getIdentifier(Locale.getDefault(), resourceId);
+ }
+
+ /**
+ * Obtains the specified description from description resource bundles that are loaded based on the resource id.
+ *
+ * @param locale
+ * The locale that we want to load the resource for
+ * @param resourceId
+ * The EELFResolvableErrorEnum to be located
+ * @return The requested resource, or a default message indicating that the resource id could not be found, or a
+ * default error message indicating that no bundles could be loaded.
+ */
+ public static String getIdentifier(Locale locale, EELFResolvableErrorEnum resourceId) {
+ Map<String, ResourceBundle> localBundles = getMessageBundle(locale, messageBundles, messageBaseNames);
+ return getResourceTemplateFromBundle(resourceId.toString(), localBundles,RESOURCE_TYPES.code);
+ }
+}
diff --git a/valetapi/src/main/java/org/onap/fgps/api/exception/CipherUtilException.java b/valetapi/src/main/java/org/onap/fgps/api/exception/CipherUtilException.java
new file mode 100644
index 0000000..5e37e4d
--- /dev/null
+++ b/valetapi/src/main/java/org/onap/fgps/api/exception/CipherUtilException.java
@@ -0,0 +1,67 @@
+/*
+ * ============LICENSE_START==========================================
+ * ONAP - F-GPS API
+ * ===================================================================
+ * Copyright © 2019 ATT Intellectual Property. All rights reserved.
+ * ===================================================================
+ *
+ * Unless otherwise specified, all software contained herein is licensed
+ * under the Apache License, Version 2.0 (the "License");
+ * you may not use this software except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * Unless otherwise specified, all documentation contained herein is licensed
+ * under the Creative Commons License, Attribution 4.0 Intl. (the "License");
+ * you may not use this documentation except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://creativecommons.org/licenses/by/4.0/
+ *
+ * Unless required by applicable law or agreed to in writing, documentation
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * ============LICENSE_END============================================
+ *
+ *
+ */
+package org.onap.fgps.api.exception;
+
+public class CipherUtilException extends RuntimeException {
+
+ public CipherUtilException() {
+ super();
+ // TODO Auto-generated constructor stub
+ }
+
+ public CipherUtilException(String arg0, Throwable arg1, boolean arg2, boolean arg3) {
+ super(arg0, arg1, arg2, arg3);
+ // TODO Auto-generated constructor stub
+ }
+
+ public CipherUtilException(String arg0, Throwable arg1) {
+ super(arg0, arg1);
+ // TODO Auto-generated constructor stub
+ }
+
+ public CipherUtilException(String arg0) {
+ super(arg0);
+ // TODO Auto-generated constructor stub
+ }
+
+ public CipherUtilException(Throwable arg0) {
+ super(arg0);
+ // TODO Auto-generated constructor stub
+ }
+
+}
diff --git a/valetapi/src/main/java/org/onap/fgps/api/exception/MissingRoleException.java b/valetapi/src/main/java/org/onap/fgps/api/exception/MissingRoleException.java
new file mode 100644
index 0000000..b83f8fb
--- /dev/null
+++ b/valetapi/src/main/java/org/onap/fgps/api/exception/MissingRoleException.java
@@ -0,0 +1,62 @@
+/*
+ * ============LICENSE_START==========================================
+ * ONAP - F-GPS API
+ * ===================================================================
+ * Copyright © 2019 ATT Intellectual Property. All rights reserved.
+ * ===================================================================
+ *
+ * Unless otherwise specified, all software contained herein is licensed
+ * under the Apache License, Version 2.0 (the "License");
+ * you may not use this software except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * Unless otherwise specified, all documentation contained herein is licensed
+ * under the Creative Commons License, Attribution 4.0 Intl. (the "License");
+ * you may not use this documentation except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://creativecommons.org/licenses/by/4.0/
+ *
+ * Unless required by applicable law or agreed to in writing, documentation
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * ============LICENSE_END============================================
+ *
+ *
+ */
+package org.onap.fgps.api.exception;
+
+public class MissingRoleException extends RuntimeException {
+
+ public MissingRoleException() {
+ super();
+ }
+
+ public MissingRoleException(String arg0) {
+ super(arg0);
+ }
+
+ public MissingRoleException(Throwable arg0) {
+ super(arg0);
+ }
+
+ public MissingRoleException(String arg0, Throwable arg1) {
+ super(arg0, arg1);
+ }
+
+ public MissingRoleException(String arg0, Throwable arg1, boolean arg2, boolean arg3) {
+ super(arg0, arg1, arg2, arg3);
+ }
+
+}
diff --git a/valetapi/src/main/java/org/onap/fgps/api/helpers/Helper.java b/valetapi/src/main/java/org/onap/fgps/api/helpers/Helper.java
new file mode 100644
index 0000000..6016d7a
--- /dev/null
+++ b/valetapi/src/main/java/org/onap/fgps/api/helpers/Helper.java
@@ -0,0 +1,58 @@
+/*
+ * ============LICENSE_START==========================================
+ * ONAP - F-GPS API
+ * ===================================================================
+ * Copyright © 2019 ATT Intellectual Property. All rights reserved.
+ * ===================================================================
+ *
+ * Unless otherwise specified, all software contained herein is licensed
+ * under the Apache License, Version 2.0 (the "License");
+ * you may not use this software except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * Unless otherwise specified, all documentation contained herein is licensed
+ * under the Creative Commons License, Attribution 4.0 Intl. (the "License");
+ * you may not use this documentation except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://creativecommons.org/licenses/by/4.0/
+ *
+ * Unless required by applicable law or agreed to in writing, documentation
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * ============LICENSE_END============================================
+ *
+ *
+ */
+package org.onap.fgps.api.helpers;
+
+import org.json.simple.JSONObject;
+
+public class Helper {
+ @SuppressWarnings("unchecked")
+ public static JSONObject formatDeleteRequest(JSONObject request) {
+ JSONObject req = new JSONObject(), datacenter = new JSONObject() ;
+ datacenter.put("id", request.get("region_id"));
+ //datacenter.put("url", request.get("keystone_url"));
+ req.put("datacenter", datacenter);
+ if (request.get("stack_name") != null){
+ req.put("stack_name", request.get("stack_name"));
+ }else {
+ req.put("stack_name", request.get("vf_module_name"));
+ }
+ if(request.get("tenant_id") != null) { req.put("tenant_id", request.get("tenant_id"));}
+ return req;
+
+ }
+} \ No newline at end of file
diff --git a/valetapi/src/main/java/org/onap/fgps/api/interceptor/AuthorizationInterceptor.java b/valetapi/src/main/java/org/onap/fgps/api/interceptor/AuthorizationInterceptor.java
new file mode 100644
index 0000000..19bc9d3
--- /dev/null
+++ b/valetapi/src/main/java/org/onap/fgps/api/interceptor/AuthorizationInterceptor.java
@@ -0,0 +1,203 @@
+/*
+ * ============LICENSE_START==========================================
+ * ONAP - F-GPS API
+ * ===================================================================
+ * Copyright © 2019 ATT Intellectual Property. All rights reserved.
+ * ===================================================================
+ *
+ * Unless otherwise specified, all software contained herein is licensed
+ * under the Apache License, Version 2.0 (the "License");
+ * you may not use this software except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * Unless otherwise specified, all documentation contained herein is licensed
+ * under the Creative Commons License, Attribution 4.0 Intl. (the "License");
+ * you may not use this documentation except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://creativecommons.org/licenses/by/4.0/
+ *
+ * Unless required by applicable law or agreed to in writing, documentation
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * ============LICENSE_END============================================
+ *
+ *
+ */
+package org.onap.fgps.api.interceptor;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Properties;
+
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
+
+import org.apache.tomcat.util.codec.binary.Base64;
+import org.onap.fgps.api.annotation.AafRoleRequired;
+import org.onap.fgps.api.annotation.BasicAuthRequired;
+import org.onap.fgps.api.annotation.PropertyBasedAuthorization;
+import org.onap.fgps.api.exception.MissingRoleException;
+import org.onap.fgps.api.logging.EELFLoggerDelegate;
+import org.onap.fgps.api.proxy.AAFProxy;
+import org.onap.fgps.api.utils.CipherUtil;
+import org.springframework.core.io.ClassPathResource;
+import org.springframework.core.io.Resource;
+import org.springframework.web.method.HandlerMethod;
+import org.springframework.web.servlet.HandlerInterceptor;
+import org.springframework.web.servlet.ModelAndView;
+
+public class AuthorizationInterceptor implements HandlerInterceptor {
+ private static final EELFLoggerDelegate LOGGER = EELFLoggerDelegate.getLogger(AuthorizationInterceptor.class);
+ private boolean aafAuthFlag;
+ private boolean basicAuthFlag;
+ private AAFProxy aafProxy;
+ private Map<String, String> credentials = new HashMap<String, String>();
+ private Map<String, String> roles = new HashMap<String, String>();
+ private Map<String, String> aafTags = new HashMap<String, String>();
+ private Map<String, String> basicTags = new HashMap<String, String>();
+
+ public AuthorizationInterceptor(AAFProxy aafProxy, boolean aafAuthFlag, boolean basicAuthFlag) {
+ this.aafProxy = aafProxy;
+ this.aafAuthFlag = aafAuthFlag;
+ this.basicAuthFlag = basicAuthFlag;
+ Properties authProperties = new Properties();
+ try {
+ Resource fileResource = new ClassPathResource("auth.properties");
+ authProperties.load(fileResource.getInputStream());
+ } catch (IOException e) {
+ LOGGER.error(EELFLoggerDelegate.errorLogger,"Couldn't load auth.properties!");
+ }
+
+ for (Object o : authProperties.keySet()) {
+ String key = (String)o;
+ if (key.endsWith(".name")) {
+ String propname = key.substring(0, key.length()-5);
+ String user = authProperties.getProperty(key);
+ String encpass = authProperties.getProperty(propname + ".pass");
+ String pass = CipherUtil.decryptPKC(encpass);
+ String plainCredentials = user + ":" + pass;
+ String base64Credentials = new String(Base64.encodeBase64(plainCredentials.getBytes()));
+ if (key.equals("valet.aaf.name")) {
+ aafProxy.setCredentials(base64Credentials);
+ } else {
+ credentials.put(user, base64Credentials);
+ }
+ } else if (key.endsWith(".role")) {
+ roles.put(key, authProperties.getProperty(key));
+ } else if (key.endsWith(".aaf")) {
+ aafTags.put(key, authProperties.getProperty(key));
+ } else if (key.endsWith(".basic")) {
+ basicTags.put(key, authProperties.getProperty(key));
+ }
+ }
+ }
+
+ private boolean aafOk(HttpServletRequest request, Object handler) throws Exception {
+ String roleRequired = null;
+ try {
+ AafRoleRequired aafAnnotation = null;
+ HandlerMethod hm = (HandlerMethod)handler;
+ aafAnnotation = hm.getMethodAnnotation(AafRoleRequired.class);
+ roleRequired = getRole(aafAnnotation);
+ } catch (RuntimeException e) {
+ try {
+ PropertyBasedAuthorization pba = null;
+ HandlerMethod hm = (HandlerMethod)handler;
+ pba = hm.getMethodAnnotation(PropertyBasedAuthorization.class);
+ roleRequired = aafTags.get(pba.value() + ".aaf");
+ } catch (RuntimeException e2) {
+ // noop
+ }
+ }
+
+ if (roleRequired!=null && aafAuthFlag) {
+ LOGGER.info(EELFLoggerDelegate.applicationLogger,"AAF required: " + roleRequired);
+
+ if (!aafProxy.userAuthenticated(request)) return false;
+ if (!aafProxy.userAuthorized(request, roleRequired)) return false;
+ }
+
+ return true;
+ }
+
+ private String getRole(AafRoleRequired aafAnnotation) {
+ if (aafAnnotation.roleRequired()!=null && aafAnnotation.roleRequired().length()>0) return aafAnnotation.roleRequired();
+ if (roles.containsKey(aafAnnotation.roleProperty())) return roles.get(aafAnnotation.roleProperty());
+ if (roles.containsKey(aafAnnotation.roleProperty() + ".role")) return roles.get(aafAnnotation.roleProperty() + ".role");
+ throw new MissingRoleException("No role found for annotation: roleRequired = " + aafAnnotation.roleRequired() + ", roleProperty = " + aafAnnotation.roleProperty());
+ }
+
+ private boolean basicAuthOk(HttpServletRequest request, Object handler) {
+ String authRequired = null;
+ try {
+ BasicAuthRequired basicAuthAnnotation = null;
+ HandlerMethod hm = (HandlerMethod)handler;
+ basicAuthAnnotation = hm.getMethodAnnotation(BasicAuthRequired.class);
+ authRequired = basicAuthAnnotation.authRequired();
+ } catch (RuntimeException e) {
+ try {
+ PropertyBasedAuthorization pba = null;
+ HandlerMethod hm = (HandlerMethod)handler;
+ pba = hm.getMethodAnnotation(PropertyBasedAuthorization.class);
+ authRequired = basicTags.get(pba.value() + ".basic");
+ } catch (RuntimeException e2) {
+ // noop
+ }
+ }
+
+ if (authRequired!=null && basicAuthFlag) {
+ LOGGER.info(EELFLoggerDelegate.applicationLogger,"Basic auth required: " + authRequired);
+
+ if (credentials.containsKey(authRequired)) {
+ if (request.getHeader("Authorization")!=null && request.getHeader("Authorization").equals("Basic " + credentials.get(authRequired))) return true;
+ request.setAttribute("fail", "Basic auth failed Auth for " + authRequired + ".");
+ } else {
+ request.setAttribute("fail", "Basic auth not enabled for " + authRequired);
+ }
+
+ return false;
+ }
+
+ return true;
+ }
+
+ @Override
+ public boolean preHandle(HttpServletRequest request, HttpServletResponse response, Object handler) throws Exception {
+ if (!aafOk(request, handler)) {
+ LOGGER.info(EELFLoggerDelegate.applicationLogger,"AAF isn't ok, reason: " + request.getAttribute("fail"));
+ request.getRequestDispatcher("/authfail").forward(request, response);
+ return false;
+ }
+ if (!basicAuthOk(request, handler)) {
+ LOGGER.info(EELFLoggerDelegate.applicationLogger,"Basic auth isn't ok, reason: " + request.getAttribute("fail"));
+ request.getRequestDispatcher("/authfail").forward(request, response);
+ return false;
+ }
+
+ return true;
+ }
+
+ @Override
+ public void postHandle(HttpServletRequest request, HttpServletResponse response, Object handler, ModelAndView modelAndView) throws Exception {
+ // noop
+ }
+
+ @Override
+ public void afterCompletion(HttpServletRequest request, HttpServletResponse response, Object handler, Exception ex) throws Exception {
+ // noop
+ }
+
+}
diff --git a/valetapi/src/main/java/org/onap/fgps/api/interceptor/DarknessInterceptor.java b/valetapi/src/main/java/org/onap/fgps/api/interceptor/DarknessInterceptor.java
new file mode 100644
index 0000000..3efb3e0
--- /dev/null
+++ b/valetapi/src/main/java/org/onap/fgps/api/interceptor/DarknessInterceptor.java
@@ -0,0 +1,65 @@
+/*
+ * ============LICENSE_START==========================================
+ * ONAP - F-GPS API
+ * ===================================================================
+ * Copyright © 2019 ATT Intellectual Property. All rights reserved.
+ * ===================================================================
+ *
+ * Unless otherwise specified, all software contained herein is licensed
+ * under the Apache License, Version 2.0 (the "License");
+ * you may not use this software except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * Unless otherwise specified, all documentation contained herein is licensed
+ * under the Creative Commons License, Attribution 4.0 Intl. (the "License");
+ * you may not use this documentation except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://creativecommons.org/licenses/by/4.0/
+ *
+ * Unless required by applicable law or agreed to in writing, documentation
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * ============LICENSE_END============================================
+ *
+ *
+ */
+package org.onap.fgps.api.interceptor;
+
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
+
+import org.springframework.web.servlet.HandlerInterceptor;
+import org.springframework.web.servlet.ModelAndView;
+
+public class DarknessInterceptor implements HandlerInterceptor {
+ @Override
+ public boolean preHandle(HttpServletRequest request, HttpServletResponse response, Object handler) throws Exception {
+ if (request.getRequestURL().toString().endsWith("/dark")) return true;
+
+ request.getRequestDispatcher("/dark").forward(request, response);
+ return false;
+ }
+
+ @Override
+ public void postHandle(HttpServletRequest request, HttpServletResponse response, Object handler, ModelAndView modelAndView) throws Exception {
+ // noop
+ }
+
+ @Override
+ public void afterCompletion(HttpServletRequest request, HttpServletResponse response, Object handler, Exception ex) throws Exception {
+ // noop
+ }
+
+} \ No newline at end of file
diff --git a/valetapi/src/main/java/org/onap/fgps/api/interceptor/VersioningInterceptor.java b/valetapi/src/main/java/org/onap/fgps/api/interceptor/VersioningInterceptor.java
new file mode 100644
index 0000000..a3193ce
--- /dev/null
+++ b/valetapi/src/main/java/org/onap/fgps/api/interceptor/VersioningInterceptor.java
@@ -0,0 +1,88 @@
+/*
+ * ============LICENSE_START==========================================
+ * ONAP - F-GPS API
+ * ===================================================================
+ * Copyright © 2019 ATT Intellectual Property. All rights reserved.
+ * ===================================================================
+ *
+ * Unless otherwise specified, all software contained herein is licensed
+ * under the Apache License, Version 2.0 (the "License");
+ * you may not use this software except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * Unless otherwise specified, all documentation contained herein is licensed
+ * under the Creative Commons License, Attribution 4.0 Intl. (the "License");
+ * you may not use this documentation except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://creativecommons.org/licenses/by/4.0/
+ *
+ * Unless required by applicable law or agreed to in writing, documentation
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * ============LICENSE_END============================================
+ *
+ *
+ */
+package org.onap.fgps.api.interceptor;
+
+import java.util.Properties;
+
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
+
+import org.onap.fgps.api.logging.EELFLoggerDelegate;
+import org.springframework.core.io.ClassPathResource;
+import org.springframework.core.io.Resource;
+import org.springframework.web.servlet.HandlerInterceptor;
+import org.springframework.web.servlet.ModelAndView;
+
+
+public class VersioningInterceptor implements HandlerInterceptor {
+
+ private static final EELFLoggerDelegate LOGGER = EELFLoggerDelegate.getLogger(VersioningInterceptor.class);
+ private Properties versionProperties = null;
+
+ @Override
+ public boolean preHandle(HttpServletRequest request, HttpServletResponse response, Object handler) throws Exception {
+ if (versionProperties==null) {
+ versionProperties = new Properties();
+ try {
+ Resource fileResource = new ClassPathResource("version.properties");
+ versionProperties.load(fileResource.getInputStream());
+ } catch (NullPointerException e) {
+ e.printStackTrace();
+ LOGGER.error(EELFLoggerDelegate.applicationLogger," preHandle : Error details : "+ e.getMessage());
+ LOGGER.error(EELFLoggerDelegate.errorLogger," preHandle : Error details : "+ e.getMessage());
+ }
+ }
+
+ response.addHeader("X-MinorVersion", versionProperties.getProperty("version.minor"));
+ response.addHeader("X-PatchVersion", versionProperties.getProperty("version.patch"));
+ response.addHeader("X-LatestVersion", versionProperties.getProperty("version.full"));
+
+ return true;
+ }
+
+ @Override
+ public void postHandle(HttpServletRequest request, HttpServletResponse response, Object handler, ModelAndView modelAndView) throws Exception {
+ // noop
+ }
+
+ @Override
+ public void afterCompletion(HttpServletRequest request, HttpServletResponse response, Object handler, Exception ex) throws Exception {
+ // noop
+ }
+
+}
diff --git a/valetapi/src/main/java/org/onap/fgps/api/logging/EELFLoggerDelegate.java b/valetapi/src/main/java/org/onap/fgps/api/logging/EELFLoggerDelegate.java
new file mode 100644
index 0000000..4208334
--- /dev/null
+++ b/valetapi/src/main/java/org/onap/fgps/api/logging/EELFLoggerDelegate.java
@@ -0,0 +1,447 @@
+/*
+ * ============LICENSE_START==========================================
+ * ONAP - F-GPS API
+ * ===================================================================
+ * Copyright © 2019 ATT Intellectual Property. All rights reserved.
+ * ===================================================================
+ *
+ * Unless otherwise specified, all software contained herein is licensed
+ * under the Apache License, Version 2.0 (the "License");
+ * you may not use this software except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * Unless otherwise specified, all documentation contained herein is licensed
+ * under the Creative Commons License, Attribution 4.0 Intl. (the "License");
+ * you may not use this documentation except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://creativecommons.org/licenses/by/4.0/
+ *
+ * Unless required by applicable law or agreed to in writing, documentation
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * ============LICENSE_END============================================
+ *
+ *
+ */
+package org.onap.fgps.api.logging;
+
+import static org.onap.fgps.api.eelf.configuration.Configuration.MDC_ALERT_SEVERITY;
+import static org.onap.fgps.api.eelf.configuration.Configuration.MDC_INSTANCE_UUID;
+import static org.onap.fgps.api.eelf.configuration.Configuration.MDC_SERVER_FQDN;
+import static org.onap.fgps.api.eelf.configuration.Configuration.MDC_SERVER_IP_ADDRESS;
+import static org.onap.fgps.api.eelf.configuration.Configuration.MDC_SERVICE_INSTANCE_ID;
+import static org.onap.fgps.api.eelf.configuration.Configuration.MDC_SERVICE_NAME;
+
+import java.net.InetAddress;
+import java.text.MessageFormat;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+
+import javax.servlet.http.HttpServletRequest;
+
+import org.onap.fgps.api.eelf.configuration.EELFLogger;
+import org.onap.fgps.api.eelf.configuration.EELFManager;
+import org.onap.fgps.api.eelf.configuration.SLF4jWrapper;
+import org.onap.fgps.api.logging.aspect.EELFLoggerAdvice;
+import org.onap.fgps.api.logging.format.AlarmSeverityEnum;
+//import org.onap.portalsdk.core.domain.User;
+import org.onap.fgps.api.utils.SystemProperties;
+import org.onap.fgps.api.utils.UserUtils;;
+import org.slf4j.MDC;
+
+
+public class EELFLoggerDelegate extends SLF4jWrapper implements EELFLogger {
+
+
+ public static final EELFLogger errorLogger = EELFManager.getInstance().getErrorLogger();
+ public static final EELFLogger applicationLogger = EELFManager.getInstance().getApplicationLogger();
+ public static final EELFLogger auditLogger = EELFManager.getInstance().getAuditLogger();
+ public static final EELFLogger metricsLogger = EELFManager.getInstance().getMetricsLogger();
+ public static final EELFLogger debugLogger = EELFManager.getInstance().getDebugLogger();
+
+ private String className;
+ private static ConcurrentMap<String, EELFLoggerDelegate> classMap = new ConcurrentHashMap<>();
+
+ public EELFLoggerDelegate(final String className) {
+ super(className);
+ this.className = className;
+ }
+
+ /**
+ * Convenience method that gets a logger for the specified class.
+ *
+ * @see #getLogger(String)
+ *
+ * @param clazz
+ * @return Instance of EELFLoggerDelegate
+ */
+ public static EELFLoggerDelegate getLogger(Class<?> clazz) {
+ return getLogger(clazz.getName());
+ }
+
+ /**
+ * Gets a logger for the specified class name. If the logger does not already
+ * exist in the map, this creates a new logger.
+ *
+ * @param className
+ * If null or empty, uses EELFLoggerDelegate as the class name.
+ * @return Instance of EELFLoggerDelegate
+ */
+ public static EELFLoggerDelegate getLogger(final String className) {
+ String classNameNeverNull = className == null || "".equals(className) ? EELFLoggerDelegate.class.getName()
+ : className;
+ EELFLoggerDelegate delegate = classMap.get(classNameNeverNull);
+ if (delegate == null) {
+ delegate = new EELFLoggerDelegate(className);
+ classMap.put(className, delegate);
+ }
+ return delegate;
+ }
+
+ /**
+ * Logs a message at the lowest level: trace.
+ *
+ * @param logger
+ * @param msg
+ */
+ public void trace(EELFLogger logger, String msg) {
+ if (logger.isTraceEnabled()) {
+ MDC.put(SystemProperties.MDC_CLASS_NAME, className);
+ logger.trace(msg);
+ MDC.remove(SystemProperties.MDC_CLASS_NAME);
+ }
+ }
+
+ /**
+ * Logs a message with parameters at the lowest level: trace.
+ *
+ * @param logger
+ * @param msg
+ * @param arguments
+ */
+ public void trace(EELFLogger logger, String msg, Object... arguments) {
+ if (logger.isTraceEnabled()) {
+ MDC.put(SystemProperties.MDC_CLASS_NAME, className);
+ logger.trace(msg, arguments);
+ MDC.remove(SystemProperties.MDC_CLASS_NAME);
+ }
+ }
+
+ /**
+ * Logs a message and throwable at the lowest level: trace.
+ *
+ * @param logger
+ * @param msg
+ * @param th
+ */
+ public void trace(EELFLogger logger, String msg, Throwable th) {
+ if (logger.isTraceEnabled()) {
+ MDC.put(SystemProperties.MDC_CLASS_NAME, className);
+ logger.trace(msg, th);
+ MDC.remove(SystemProperties.MDC_CLASS_NAME);
+ }
+ }
+
+ /**
+ * Logs a message at the second-lowest level: debug.
+ *
+ * @param logger
+ * @param msg
+ */
+ public void debug(EELFLogger logger, String msg) {
+ if (logger.isDebugEnabled()) {
+ MDC.put(SystemProperties.MDC_CLASS_NAME, className);
+ logger.debug(msg);
+ MDC.remove(SystemProperties.MDC_CLASS_NAME);
+ }
+ }
+
+ /**
+ * Logs a message with parameters at the second-lowest level: debug.
+ *
+ * @param logger
+ * @param msg
+ * @param arguments
+ */
+ public void debug(EELFLogger logger, String msg, Object... arguments) {
+ if (logger.isDebugEnabled()) {
+ MDC.put(SystemProperties.MDC_CLASS_NAME, className);
+ logger.debug(msg, arguments);
+ MDC.remove(SystemProperties.MDC_CLASS_NAME);
+ }
+ }
+
+ /**
+ * Logs a message and throwable at the second-lowest level: debug.
+ *
+ * @param logger
+ * @param msg
+ * @param th
+ */
+ public void debug(EELFLogger logger, String msg, Throwable th) {
+ if (logger.isDebugEnabled()) {
+ MDC.put(SystemProperties.MDC_CLASS_NAME, className);
+ logger.debug(msg, th);
+ MDC.remove(SystemProperties.MDC_CLASS_NAME);
+ }
+ }
+
+ /**
+ * Logs a message at info level.
+ *
+ * @param logger
+ * @param msg
+ */
+ public void info(EELFLogger logger, String msg) {
+ MDC.put(SystemProperties.MDC_CLASS_NAME, className);
+ logger.info(msg);
+ MDC.remove(SystemProperties.MDC_CLASS_NAME);
+ }
+
+ /**
+ * Logs a message with parameters at info level.
+ *
+ * @param logger
+ * @param msg
+ * @param arguments
+ */
+ public void info(EELFLogger logger, String msg, Object... arguments) {
+ MDC.put(SystemProperties.MDC_CLASS_NAME, className);
+ logger.info(msg, arguments);
+ MDC.remove(SystemProperties.MDC_CLASS_NAME);
+ }
+
+ /**
+ * Logs a message and throwable at info level.
+ *
+ * @param logger
+ * @param msg
+ * @param th
+ */
+ public void info(EELFLogger logger, String msg, Throwable th) {
+ MDC.put(SystemProperties.MDC_CLASS_NAME, className);
+ logger.info(msg, th);
+ MDC.remove(SystemProperties.MDC_CLASS_NAME);
+ }
+
+ /**
+ * Logs a message at warn level.
+ *
+ * @param logger
+ * @param msg
+ */
+ public void warn(EELFLogger logger, String msg) {
+ MDC.put(SystemProperties.MDC_CLASS_NAME, className);
+ logger.warn(msg);
+ MDC.remove(SystemProperties.MDC_CLASS_NAME);
+ }
+
+ /**
+ * Logs a message with parameters at warn level.
+ *
+ * @param logger
+ * @param msg
+ * @param arguments
+ */
+ public void warn(EELFLogger logger, String msg, Object... arguments) {
+ MDC.put(SystemProperties.MDC_CLASS_NAME, className);
+ logger.warn(msg, arguments);
+ MDC.remove(SystemProperties.MDC_CLASS_NAME);
+ }
+
+ /**
+ * Logs a message and throwable at warn level.
+ *
+ * @param logger
+ * @param msg
+ * @param th
+ */
+ public void warn(EELFLogger logger, String msg, Throwable th) {
+ MDC.put(SystemProperties.MDC_CLASS_NAME, className);
+ logger.warn(msg, th);
+ MDC.remove(SystemProperties.MDC_CLASS_NAME);
+ }
+
+ /**
+ * Logs a message at error level.
+ *
+ * @param logger
+ * @param msg
+ */
+ public void error(EELFLogger logger, String msg) {
+ MDC.put(SystemProperties.MDC_CLASS_NAME, className);
+ logger.error(msg);
+ MDC.remove(SystemProperties.MDC_CLASS_NAME);
+ }
+
+ /**
+ * Logs a message with parameters at error level.
+ *
+ * @param logger
+ * @param msg
+ * @param arguments
+ */
+ public void error(EELFLogger logger, String msg, Object... arguments) {
+ MDC.put(SystemProperties.MDC_CLASS_NAME, className);
+ logger.warn(msg, arguments);
+ MDC.remove(SystemProperties.MDC_CLASS_NAME);
+ }
+
+ /**
+ * Logs a message and throwable at error level.
+ *
+ * @param logger
+ * @param msg
+ * @param th
+ */
+ public void error(EELFLogger logger, String msg, Throwable th) {
+ MDC.put(SystemProperties.MDC_CLASS_NAME, className);
+ logger.warn(msg, th);
+ MDC.remove(SystemProperties.MDC_CLASS_NAME);
+ }
+
+ /**
+ * Logs a message with the associated alarm severity at error level.
+ *
+ * @param logger
+ * @param msg
+ * @param severtiy
+ */
+ public void error(EELFLogger logger, String msg, AlarmSeverityEnum severtiy) {
+ MDC.put(MDC_ALERT_SEVERITY, severtiy.name());
+ MDC.put(SystemProperties.MDC_CLASS_NAME, className);
+ logger.error(msg);
+ MDC.remove(MDC_ALERT_SEVERITY);
+ MDC.remove(SystemProperties.MDC_CLASS_NAME);
+ }
+
+ /**
+ * Initializes the logger context.
+ */
+ public void init() {
+ setGlobalLoggingContext();
+ final String msg = "############################ Logging is started. ############################";
+ // These loggers emit the current date-time without being told.
+ info(applicationLogger, msg);
+ error(errorLogger, msg);
+ debug(debugLogger, msg);
+ // Audit and metrics logger must be told start AND stop times
+ final String currentDateTime = EELFLoggerAdvice.getCurrentDateTimeUTC();
+ // Set the MDC with audit properties
+ MDC.put(SystemProperties.AUDITLOG_BEGIN_TIMESTAMP, currentDateTime);
+ MDC.put(SystemProperties.AUDITLOG_END_TIMESTAMP, currentDateTime);
+ info(auditLogger, msg);
+ MDC.remove(SystemProperties.AUDITLOG_BEGIN_TIMESTAMP);
+ MDC.remove(SystemProperties.AUDITLOG_END_TIMESTAMP);
+ // Set the MDC with metrics properties
+ MDC.put(SystemProperties.METRICSLOG_BEGIN_TIMESTAMP, currentDateTime);
+ MDC.put(SystemProperties.METRICSLOG_END_TIMESTAMP, currentDateTime);
+ info(metricsLogger, msg);
+ MDC.remove(SystemProperties.METRICSLOG_BEGIN_TIMESTAMP);
+ MDC.remove(SystemProperties.METRICSLOG_END_TIMESTAMP);
+ }
+
+ /**
+ * Builds a message using a template string and the arguments.
+ *
+ * @param message
+ * @param args
+ * @return
+ */
+ private String formatMessage(String message, Object... args) {
+ StringBuilder sbFormattedMessage = new StringBuilder();
+ if (args != null && args.length > 0 && message != null && message != "") {
+ MessageFormat mf = new MessageFormat(message);
+ sbFormattedMessage.append(mf.format(args));
+ } else {
+ sbFormattedMessage.append(message);
+ }
+
+ return sbFormattedMessage.toString();
+ }
+
+ /**
+ * Loads all the default logging fields into the MDC context.
+ */
+ private void setGlobalLoggingContext() {
+ MDC.put(MDC_SERVICE_INSTANCE_ID, "");
+ MDC.put(MDC_ALERT_SEVERITY, AlarmSeverityEnum.INFORMATIONAL.toString());
+ try {
+ MDC.put(MDC_SERVER_FQDN, InetAddress.getLocalHost().getHostName());
+ MDC.put(MDC_SERVER_IP_ADDRESS, InetAddress.getLocalHost().getHostAddress());
+ MDC.put(MDC_INSTANCE_UUID, SystemProperties.getProperty(SystemProperties.INSTANCE_UUID));
+ } catch (Exception e) {
+ errorLogger.error("setGlobalLoggingContext failed", e);
+ }
+ }
+
+ public static void mdcPut(String key, String value) {
+ MDC.put(key, value);
+ }
+
+ public static String mdcGet(String key) {
+ return MDC.get(key);
+ }
+
+ public static void mdcRemove(String key) {
+ MDC.remove(key);
+ }
+
+ /**
+ * Loads the RequestId/TransactionId into the MDC which it should be receiving
+ * with an each incoming REST API request. Also, configures few other request
+ * based logging fields into the MDC context.
+ *
+ * @param req
+ * @param appName
+ */
+ public void setRequestBasedDefaultsIntoGlobalLoggingContext(HttpServletRequest req, String appName) {
+ // Load the default fields
+ setGlobalLoggingContext();
+
+ // Load the request based fields
+ if (req != null) {
+ // Load user agent into MDC context, if available.
+ String accessingClient = req.getHeader(SystemProperties.USERAGENT_NAME);
+ if (accessingClient != null && !"".equals(accessingClient) && (accessingClient.contains("Mozilla")
+ || accessingClient.contains("Chrome") || accessingClient.contains("Safari"))) {
+ accessingClient = appName + "_FE";
+ }
+ MDC.put(SystemProperties.PARTNER_NAME, accessingClient);
+
+ // Protocol, Rest URL & Rest Path
+ MDC.put(SystemProperties.FULL_URL, SystemProperties.UNKNOWN);
+ MDC.put(SystemProperties.PROTOCOL, SystemProperties.HTTP);
+ String restURL = UserUtils.getFullURL(req);
+ if (restURL != null && restURL != "") {
+ MDC.put(SystemProperties.FULL_URL, restURL);
+ if (restURL.toLowerCase().contains("https")) {
+ MDC.put(SystemProperties.PROTOCOL, SystemProperties.HTTPS);
+ }
+ }
+
+ // Rest Path
+ MDC.put(MDC_SERVICE_NAME, req.getServletPath());
+
+ // Client IPAddress i.e. IPAddress of the remote host who is making
+ // this request.
+ String clientIPAddress = req.getHeader("X-FORWARDED-FOR");
+ if (clientIPAddress == null) {
+ clientIPAddress = req.getRemoteAddr();
+ }
+ MDC.put(SystemProperties.CLIENT_IP_ADDRESS, clientIPAddress);
+ }
+ }
+}
diff --git a/valetapi/src/main/java/org/onap/fgps/api/logging/aspect/AuditLog.java b/valetapi/src/main/java/org/onap/fgps/api/logging/aspect/AuditLog.java
new file mode 100644
index 0000000..85a7cc3
--- /dev/null
+++ b/valetapi/src/main/java/org/onap/fgps/api/logging/aspect/AuditLog.java
@@ -0,0 +1,49 @@
+/*
+ * ============LICENSE_START==========================================
+ * ONAP - F-GPS API
+ * ===================================================================
+ * Copyright © 2019 ATT Intellectual Property. All rights reserved.
+ * ===================================================================
+ *
+ * Unless otherwise specified, all software contained herein is licensed
+ * under the Apache License, Version 2.0 (the "License");
+ * you may not use this software except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * Unless otherwise specified, all documentation contained herein is licensed
+ * under the Creative Commons License, Attribution 4.0 Intl. (the "License");
+ * you may not use this documentation except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://creativecommons.org/licenses/by/4.0/
+ *
+ * Unless required by applicable law or agreed to in writing, documentation
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * ============LICENSE_END============================================
+ *
+ *
+ */
+package org.onap.fgps.api.logging.aspect;
+
+import java.lang.annotation.ElementType;
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+import java.lang.annotation.Target;
+
+@Target({ ElementType.METHOD, ElementType.TYPE })
+@Retention(RetentionPolicy.RUNTIME)
+public @interface AuditLog {
+ String value() default "";
+} \ No newline at end of file
diff --git a/valetapi/src/main/java/org/onap/fgps/api/logging/aspect/EELFLoggerAdvice.java b/valetapi/src/main/java/org/onap/fgps/api/logging/aspect/EELFLoggerAdvice.java
new file mode 100644
index 0000000..ef0349a
--- /dev/null
+++ b/valetapi/src/main/java/org/onap/fgps/api/logging/aspect/EELFLoggerAdvice.java
@@ -0,0 +1,247 @@
+/*
+ * ============LICENSE_START==========================================
+ * ONAP - F-GPS API
+ * ===================================================================
+ * Copyright © 2019 ATT Intellectual Property. All rights reserved.
+ * ===================================================================
+ *
+ * Unless otherwise specified, all software contained herein is licensed
+ * under the Apache License, Version 2.0 (the "License");
+ * you may not use this software except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * Unless otherwise specified, all documentation contained herein is licensed
+ * under the Creative Commons License, Attribution 4.0 Intl. (the "License");
+ * you may not use this documentation except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://creativecommons.org/licenses/by/4.0/
+ *
+ * Unless required by applicable law or agreed to in writing, documentation
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * ============LICENSE_END============================================
+ *
+ *
+ */
+package org.onap.fgps.api.logging.aspect;
+
+import static org.onap.fgps.api.eelf.configuration.Configuration.MDC_KEY_REQUEST_ID;
+
+import java.text.SimpleDateFormat;
+import java.util.Date;
+
+import javax.servlet.http.HttpServletRequest;
+
+import org.onap.fgps.api.eelf.configuration.Configuration;
+import org.onap.fgps.api.logging.EELFLoggerDelegate;
+import org.onap.fgps.api.logging.format.AuditLogFormatter;
+import org.onap.fgps.api.utils.SystemProperties;
+import org.onap.fgps.api.utils.SystemProperties.SecurityEventTypeEnum;
+import org.slf4j.MDC;
+
+@org.springframework.context.annotation.Configuration
+public class EELFLoggerAdvice {
+
+ private static final EELFLoggerDelegate adviceLogger = EELFLoggerDelegate.getLogger(EELFLoggerAdvice.class);
+
+ // DateTime Format according to the Application Logging Guidelines.
+ private static final SimpleDateFormat logDateFormat = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSSXXX");
+
+ /**
+ * Gets the current date and time in expected log format.
+ *
+ * @return Current date and time
+ */
+ public static String getCurrentDateTimeUTC() {
+ String currentDateTime = logDateFormat.format(new Date());
+ return currentDateTime;
+ }
+
+ /**
+ *
+ * @param securityEventType
+ * @param args
+ * @param passOnArgs
+ * @return One-element array containing an empty String object.
+ */
+ public Object[] before(SecurityEventTypeEnum securityEventType, Object[] args, Object[] passOnArgs) {
+ try {
+ String className = "";
+ if (passOnArgs[0] != null) {
+ className = passOnArgs[0].toString();
+ }
+
+ String methodName = "";
+ if (passOnArgs[1] != null) {
+ methodName = passOnArgs[1].toString();
+ }
+ String appName = SystemProperties.APP_NAME;
+
+ EELFLoggerDelegate logger = EELFLoggerDelegate.getLogger(className);
+
+ // Initialize Request defaults only for controller methods.
+ MDC.put(className + methodName + SystemProperties.METRICSLOG_BEGIN_TIMESTAMP, getCurrentDateTimeUTC());
+ MDC.put(SystemProperties.TARGET_ENTITY, appName);
+ MDC.put(SystemProperties.TARGET_SERVICE_NAME, methodName);
+ if (securityEventType != null) {
+ MDC.put(className + methodName + SystemProperties.AUDITLOG_BEGIN_TIMESTAMP, getCurrentDateTimeUTC());
+ for(Object obj : args) {
+ if (obj != null && obj instanceof HttpServletRequest) {
+ HttpServletRequest req = (HttpServletRequest) obj;
+ logger.setRequestBasedDefaultsIntoGlobalLoggingContext(req, appName);
+ }else if (obj!= null && obj instanceof String) {
+ MDC.put(MDC_KEY_REQUEST_ID, "requestId : "+(String)obj);
+ }
+ }
+ }
+ logger.debug(EELFLoggerDelegate.debugLogger, "{} was invoked.", methodName);
+ } catch (Exception e) {
+ adviceLogger.error(EELFLoggerDelegate.errorLogger, "before failed", e);
+ }
+
+ return new Object[] { "" };
+ }
+
+ /**
+ *
+ * @param securityEventType
+ * @param result
+ * @param args
+ * @param returnArgs
+ * @param passOnArgs
+ */
+ public void after(SecurityEventTypeEnum securityEventType, String result, Object[] args, Object[] returnArgs,
+ Object[] passOnArgs) {
+ try {
+ String className = "";
+ if (passOnArgs[0] != null) {
+ className = passOnArgs[0].toString();
+ }
+
+ String methodName = "";
+ if (passOnArgs[1] != null) {
+ methodName = passOnArgs[1].toString();
+ }
+
+ EELFLoggerDelegate logger = EELFLoggerDelegate.getLogger(className);
+
+ String appName = SystemProperties.APP_NAME;
+
+ if (MDC.get(SystemProperties.TARGET_SERVICE_NAME) == null
+ || MDC.get(SystemProperties.TARGET_SERVICE_NAME) == "") {
+ MDC.put(SystemProperties.TARGET_SERVICE_NAME, methodName);
+ }
+
+ if (MDC.get(SystemProperties.TARGET_ENTITY) == null || MDC.get(SystemProperties.TARGET_ENTITY) == "") {
+ MDC.put(SystemProperties.TARGET_ENTITY, appName);
+ }
+
+ MDC.put(SystemProperties.STATUS_CODE, result);
+
+
+ MDC.put(SystemProperties.METRICSLOG_BEGIN_TIMESTAMP,
+ MDC.get(className + methodName + SystemProperties.METRICSLOG_BEGIN_TIMESTAMP));
+ MDC.put(SystemProperties.METRICSLOG_END_TIMESTAMP, getCurrentDateTimeUTC());
+
+ this.calculateDateTimeDifference(MDC.get(SystemProperties.METRICSLOG_BEGIN_TIMESTAMP),
+ MDC.get(SystemProperties.METRICSLOG_END_TIMESTAMP));
+
+ logger.info(EELFLoggerDelegate.metricsLogger, methodName + " operation is completed.");
+ logger.debug(EELFLoggerDelegate.debugLogger, "Finished executing " + methodName + ".");
+
+ if (securityEventType != null) {
+
+ MDC.put(SystemProperties.AUDITLOG_BEGIN_TIMESTAMP,
+ MDC.get(className + methodName + SystemProperties.AUDITLOG_BEGIN_TIMESTAMP));
+ MDC.put(SystemProperties.AUDITLOG_END_TIMESTAMP, getCurrentDateTimeUTC());
+ this.calculateDateTimeDifference(MDC.get(SystemProperties.AUDITLOG_BEGIN_TIMESTAMP),
+ MDC.get(SystemProperties.AUDITLOG_END_TIMESTAMP));
+
+ this.logSecurityMessage(logger, securityEventType, result, methodName);
+
+ // clear when finishes audit logging
+ MDC.remove(Configuration.MDC_KEY_REQUEST_ID);
+ MDC.remove(SystemProperties.PARTNER_NAME);
+ MDC.remove(SystemProperties.MDC_LOGIN_ID);
+ MDC.remove(SystemProperties.PROTOCOL);
+ MDC.remove(SystemProperties.FULL_URL);
+ MDC.remove(Configuration.MDC_SERVICE_NAME);
+ MDC.remove(SystemProperties.RESPONSE_CODE);
+ MDC.remove(SystemProperties.STATUS_CODE);
+ MDC.remove(className + methodName + SystemProperties.AUDITLOG_BEGIN_TIMESTAMP);
+ MDC.remove(SystemProperties.AUDITLOG_BEGIN_TIMESTAMP);
+ MDC.remove(SystemProperties.AUDITLOG_END_TIMESTAMP);
+ }else{
+ MDC.put(SystemProperties.STATUS_CODE, "COMPLETE");
+ }
+
+ MDC.remove(className + methodName + SystemProperties.METRICSLOG_BEGIN_TIMESTAMP);
+ MDC.remove(SystemProperties.METRICSLOG_BEGIN_TIMESTAMP);
+ MDC.remove(SystemProperties.METRICSLOG_END_TIMESTAMP);
+ MDC.remove(SystemProperties.MDC_TIMER);
+ MDC.remove(SystemProperties.TARGET_ENTITY);
+ MDC.remove(SystemProperties.TARGET_SERVICE_NAME);
+ MDC.remove(SystemProperties.STATUS_CODE);
+
+ } catch (Exception e) {
+ adviceLogger.error(EELFLoggerDelegate.errorLogger, "after failed", e);
+ }
+ }
+
+ /**
+ *
+ * @param logger
+ * @param securityEventType
+ * @param result
+ * @param restMethod
+ */
+ private void logSecurityMessage(EELFLoggerDelegate logger, SecurityEventTypeEnum securityEventType, String result,
+ String restMethod) {
+ StringBuilder additionalInfoAppender = new StringBuilder();
+
+ additionalInfoAppender.append(String.format("%s request was received.", restMethod));
+
+ // Status code
+ MDC.put(SystemProperties.STATUS_CODE, result);
+
+ String fullURL = MDC.get(SystemProperties.FULL_URL);
+ if (fullURL != null && fullURL != "") {
+ additionalInfoAppender.append(" Request-URL:" + MDC.get(SystemProperties.FULL_URL));
+ }
+
+ String auditMessage = AuditLogFormatter.getInstance().createMessage(MDC.get(SystemProperties.PROTOCOL),
+ securityEventType.name(), MDC.get(SystemProperties.MDC_LOGIN_ID), additionalInfoAppender.toString());
+
+ logger.info(EELFLoggerDelegate.auditLogger, auditMessage);
+ }
+
+ /**
+ *
+ * @param beginDateTime
+ * @param endDateTime
+ */
+ private void calculateDateTimeDifference(String beginDateTime, String endDateTime) {
+ if (beginDateTime != null && endDateTime != null) {
+ try {
+ Date beginDate = logDateFormat.parse(beginDateTime);
+ Date endDate = logDateFormat.parse(endDateTime);
+ String timeDifference = String.format("%d", endDate.getTime() - beginDate.getTime());
+ MDC.put(SystemProperties.MDC_TIMER, timeDifference);
+ } catch (Exception e) {
+ adviceLogger.error(EELFLoggerDelegate.errorLogger, "calculateDateTimeDifference failed", e);
+ }
+ }
+ }
+}
diff --git a/valetapi/src/main/java/org/onap/fgps/api/logging/aspect/EELFLoggerAspect.java b/valetapi/src/main/java/org/onap/fgps/api/logging/aspect/EELFLoggerAspect.java
new file mode 100644
index 0000000..a1055af
--- /dev/null
+++ b/valetapi/src/main/java/org/onap/fgps/api/logging/aspect/EELFLoggerAspect.java
@@ -0,0 +1,105 @@
+/*
+ * ============LICENSE_START==========================================
+ * ONAP - F-GPS API
+ * ===================================================================
+ * Copyright © 2019 ATT Intellectual Property. All rights reserved.
+ * ===================================================================
+ *
+ * Unless otherwise specified, all software contained herein is licensed
+ * under the Apache License, Version 2.0 (the "License");
+ * you may not use this software except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * Unless otherwise specified, all documentation contained herein is licensed
+ * under the Creative Commons License, Attribution 4.0 Intl. (the "License");
+ * you may not use this documentation except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://creativecommons.org/licenses/by/4.0/
+ *
+ * Unless required by applicable law or agreed to in writing, documentation
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * ============LICENSE_END============================================
+ *
+ *
+ */
+package org.onap.fgps.api.logging.aspect;
+
+import org.aspectj.lang.ProceedingJoinPoint;
+import org.aspectj.lang.annotation.Around;
+import org.aspectj.lang.annotation.Aspect;
+import org.aspectj.lang.annotation.Pointcut;
+import org.onap.fgps.api.utils.SystemProperties.SecurityEventTypeEnum;
+import org.springframework.beans.factory.annotation.Autowired;
+
+@Aspect
+@org.springframework.context.annotation.Configuration
+public class EELFLoggerAspect {
+
+ @Autowired
+ private EELFLoggerAdvice advice;
+
+ /*
+ * Point-cut expression to handle all INCOMING_REST_MESSAGES
+ */
+ @Pointcut("execution(public * org.onap.fgps.api.controller.*.*(..))")
+ public void incomingAuditMessages() {
+ // Nothing is logged on incoming message
+ }
+
+ @Around("incomingAuditMessages() && @annotation(AuditLog)")
+ public Object logAuditMethodAround(ProceedingJoinPoint joinPoint) throws Throwable {
+ return this.logAroundMethod(joinPoint, SecurityEventTypeEnum.INCOMING_REST_MESSAGE);
+ }
+
+ @Around("incomingAuditMessages() && @within(AuditLog)")
+ public Object logAuditMethodClassAround(ProceedingJoinPoint joinPoint) throws Throwable {
+ return this.logAroundMethod(joinPoint, SecurityEventTypeEnum.INCOMING_REST_MESSAGE);
+ }
+
+ private Object logAroundMethod(ProceedingJoinPoint joinPoint, SecurityEventTypeEnum securityEventType)
+ throws Throwable {
+ // Before
+ Object[] passOnArgs = new Object[] { joinPoint.getSignature().getDeclaringType().getName(),
+ joinPoint.getSignature().getName() };
+ Object[] returnArgs = advice.before(securityEventType, joinPoint.getArgs(), passOnArgs);
+
+ // Execute the actual method
+ Object result = null;
+ String restStatus = "COMPLETE";
+ try {
+ result = joinPoint.proceed();
+ } catch (Exception e) {
+ restStatus = "ERROR";
+ }
+ // After
+ advice.after(securityEventType, restStatus, joinPoint.getArgs(), returnArgs, passOnArgs);
+ return result;
+ }
+
+ //Metrics Logging
+ @Pointcut("execution(* *(..))")
+ public void performMetricsLogging() {}
+
+ @Around("performMetricsLogging() && @within(MetricsLog)")
+ public Object metricsLoggingAroundClass(ProceedingJoinPoint joinPoint, MetricsLog MetricsLog) throws Throwable {
+ return this.logAroundMethod(joinPoint, null);
+ }
+
+ @Around("performMetricsLogging() && @annotation(MetricsLog)")
+ public Object metricsLoggingAroundMethod(ProceedingJoinPoint joinPoint, MetricsLog MetricsLog) throws Throwable {
+ return this.logAroundMethod(joinPoint, null);
+ }
+}
diff --git a/valetapi/src/main/java/org/onap/fgps/api/logging/aspect/MetricsLog.java b/valetapi/src/main/java/org/onap/fgps/api/logging/aspect/MetricsLog.java
new file mode 100644
index 0000000..7374bd8
--- /dev/null
+++ b/valetapi/src/main/java/org/onap/fgps/api/logging/aspect/MetricsLog.java
@@ -0,0 +1,49 @@
+/*
+ * ============LICENSE_START==========================================
+ * ONAP - F-GPS API
+ * ===================================================================
+ * Copyright © 2019 ATT Intellectual Property. All rights reserved.
+ * ===================================================================
+ *
+ * Unless otherwise specified, all software contained herein is licensed
+ * under the Apache License, Version 2.0 (the "License");
+ * you may not use this software except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * Unless otherwise specified, all documentation contained herein is licensed
+ * under the Creative Commons License, Attribution 4.0 Intl. (the "License");
+ * you may not use this documentation except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://creativecommons.org/licenses/by/4.0/
+ *
+ * Unless required by applicable law or agreed to in writing, documentation
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * ============LICENSE_END============================================
+ *
+ *
+ */
+package org.onap.fgps.api.logging.aspect;
+
+import java.lang.annotation.ElementType;
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+import java.lang.annotation.Target;
+
+@Target({ ElementType.METHOD, ElementType.TYPE })
+@Retention(RetentionPolicy.RUNTIME)
+public @interface MetricsLog {
+ String value() default "";
+}
diff --git a/valetapi/src/main/java/org/onap/fgps/api/logging/format/AlarmSeverityEnum.java b/valetapi/src/main/java/org/onap/fgps/api/logging/format/AlarmSeverityEnum.java
new file mode 100644
index 0000000..e23936f
--- /dev/null
+++ b/valetapi/src/main/java/org/onap/fgps/api/logging/format/AlarmSeverityEnum.java
@@ -0,0 +1,56 @@
+/*
+ * ============LICENSE_START==========================================
+ * ONAP - F-GPS API
+ * ===================================================================
+ * Copyright © 2019 ATT Intellectual Property. All rights reserved.
+ * ===================================================================
+ *
+ * Unless otherwise specified, all software contained herein is licensed
+ * under the Apache License, Version 2.0 (the "License");
+ * you may not use this software except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * Unless otherwise specified, all documentation contained herein is licensed
+ * under the Creative Commons License, Attribution 4.0 Intl. (the "License");
+ * you may not use this documentation except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://creativecommons.org/licenses/by/4.0/
+ *
+ * Unless required by applicable law or agreed to in writing, documentation
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * ============LICENSE_END============================================
+ *
+ *
+ */
+package org.onap.fgps.api.logging.format;
+
+public enum AlarmSeverityEnum {
+ CRITICAL("1"),
+ MAJOR("2"),
+ MINOR("3"),
+ INFORMATIONAL("4"),
+ NONE("0");
+
+ private final String severity;
+
+ AlarmSeverityEnum(String severity) {
+ this.severity = severity;
+ }
+
+ public String severity() {
+ return severity;
+ }
+}
diff --git a/valetapi/src/main/java/org/onap/fgps/api/logging/format/AuditLogFormatter.java b/valetapi/src/main/java/org/onap/fgps/api/logging/format/AuditLogFormatter.java
new file mode 100644
index 0000000..b17c683
--- /dev/null
+++ b/valetapi/src/main/java/org/onap/fgps/api/logging/format/AuditLogFormatter.java
@@ -0,0 +1,111 @@
+/*
+ * ============LICENSE_START==========================================
+ * ONAP - F-GPS API
+ * ===================================================================
+ * Copyright © 2019 ATT Intellectual Property. All rights reserved.
+ * ===================================================================
+ *
+ * Unless otherwise specified, all software contained herein is licensed
+ * under the Apache License, Version 2.0 (the "License");
+ * you may not use this software except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * Unless otherwise specified, all documentation contained herein is licensed
+ * under the Creative Commons License, Attribution 4.0 Intl. (the "License");
+ * you may not use this documentation except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://creativecommons.org/licenses/by/4.0/
+ *
+ * Unless required by applicable law or agreed to in writing, documentation
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * ============LICENSE_END============================================
+ *
+ *
+ */
+package org.onap.fgps.api.logging.format;
+
+import java.text.MessageFormat;
+import java.util.Map;
+
+import org.onap.fgps.api.utils.SystemProperties;
+
+public class AuditLogFormatter {
+ // Singleton
+ private static AuditLogFormatter instance = new AuditLogFormatter();
+
+ public static AuditLogFormatter getInstance() {
+
+ return instance;
+ }
+
+ public String createMessage(String protocol, String set, String loginId, String message) {
+
+ Object[] securityMessageArgs = prepareFormatArgs(protocol, set, loginId, message);
+
+ return MessageFormat.format(SystemProperties.SECURITY_LOG_TEMPLATE, securityMessageArgs);
+ }
+
+ /**
+ * A method for normalizing the security log field - returns the @Param
+ * defaultValue in case the entry is null or empty. If the @param entry is not
+ * empty, a single quotation is added to it.
+ *
+ * @param entry
+ * the entry
+ * @param defaultValue
+ * The default value in case the entry is empty
+ * @return String (formatted)
+ */
+ private String formatEntry(Object entry, String defaultValue) {
+ return (entry != null && !entry.toString().isEmpty()) ? addSingleQuotes(entry.toString()) : defaultValue;
+
+ }
+
+ private String addSingleQuotes(String s) {
+ if (null != s && !s.isEmpty()) {
+ s = SystemProperties.SINGLE_QUOTE + s + SystemProperties.SINGLE_QUOTE;
+ }
+ return s;
+ }
+
+ /**
+ * This method prepares an Object array of arguments that would be passed to the
+ * MessageFormat.format() method, to format the security log.
+ *
+ * @param protocol
+ * @param set
+ * @param loginId
+ * @param accessingClient
+ * @param isSuccess
+ * @param message
+ * @return
+ */
+ private Object[] prepareFormatArgs(String protocol, String set, String loginId, String message) {
+
+ Object[] messageFormatArgs = { formatEntry(protocol, SystemProperties.NA),
+ formatEntry(set, SystemProperties.NA), formatEntry(loginId, SystemProperties.UNKNOWN), message };
+ return messageFormatArgs;
+ }
+
+ public String createMessage(Map<String, String> logArgsMap) {
+
+ Object[] securityMessageArgs = prepareFormatArgs(logArgsMap.get(SystemProperties.PROTOCOL),
+ logArgsMap.get(SystemProperties.SECURIRY_EVENT_TYPE), logArgsMap.get(SystemProperties.LOGIN_ID),
+ logArgsMap.get(SystemProperties.ADDITIONAL_INFO));
+
+ return MessageFormat.format(SystemProperties.SECURITY_LOG_TEMPLATE, securityMessageArgs);
+ }
+}
diff --git a/valetapi/src/main/java/org/onap/fgps/api/proxy/AAFProxy.java b/valetapi/src/main/java/org/onap/fgps/api/proxy/AAFProxy.java
new file mode 100644
index 0000000..43ecaf1
--- /dev/null
+++ b/valetapi/src/main/java/org/onap/fgps/api/proxy/AAFProxy.java
@@ -0,0 +1,232 @@
+/*
+ * ============LICENSE_START==========================================
+ * ONAP - F-GPS API
+ * ===================================================================
+ * Copyright © 2019 ATT Intellectual Property. All rights reserved.
+ * ===================================================================
+ *
+ * Unless otherwise specified, all software contained herein is licensed
+ * under the Apache License, Version 2.0 (the "License");
+ * you may not use this software except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * Unless otherwise specified, all documentation contained herein is licensed
+ * under the Creative Commons License, Attribution 4.0 Intl. (the "License");
+ * you may not use this documentation except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://creativecommons.org/licenses/by/4.0/
+ *
+ * Unless required by applicable law or agreed to in writing, documentation
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * ============LICENSE_END============================================
+ *
+ *
+ */
+package org.onap.fgps.api.proxy;
+
+import java.io.IOException;
+import java.text.ParseException;
+import java.text.SimpleDateFormat;
+import java.util.ArrayList;
+import java.util.Date;
+import java.util.List;
+
+import javax.servlet.http.HttpServletRequest;
+
+import org.onap.fgps.api.logging.EELFLoggerDelegate;
+import org.onap.fgps.api.utils.CipherUtil;
+import org.springframework.http.HttpEntity;
+import org.springframework.http.HttpHeaders;
+import org.springframework.http.HttpMethod;
+import org.springframework.http.HttpStatus;
+import org.springframework.http.ResponseEntity;
+import org.springframework.web.client.HttpClientErrorException;
+import org.springframework.web.client.RestClientException;
+import org.springframework.web.client.RestTemplate;
+
+import com.fasterxml.jackson.core.JsonParseException;
+import com.fasterxml.jackson.core.JsonProcessingException;
+import com.fasterxml.jackson.databind.JsonMappingException;
+import com.fasterxml.jackson.databind.ObjectMapper;
+
+public class AAFProxy {
+ public static class Auth {
+ public String id;
+ public String password;
+ }
+
+ public static class Item {
+ public String id;
+ public String expires;
+ }
+
+ public static class User {
+ public List<Item> user;
+ }
+
+ private static final EELFLoggerDelegate LOGGER = EELFLoggerDelegate.getLogger(AAFProxy.class);
+ private static final SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSSXXX");
+
+ private String aafUrlBase;
+ private String encodedPassword = null;
+
+ public AAFProxy(String aafUrlBase) {
+ this.aafUrlBase = aafUrlBase;
+ }
+
+ public boolean userAuthenticated(HttpServletRequest request) {
+ LOGGER.info(EELFLoggerDelegate.applicationLogger,"In AAFProxy.userAutheniticated");
+ String user = request.getHeader("mechId");
+ if (user==null || user.length()==0) {
+ request.setAttribute("fail", "AAF failed: mechId header not present.");
+ return false;
+ }
+
+ String encpassword = request.getHeader("password");
+ if (encpassword==null || encpassword.length()==0) {
+ request.setAttribute("fail", "AAF failed: password header not present.");
+ return false;
+ }
+ String password = CipherUtil.decryptPKC(encpassword);
+
+ String urlStr = aafUrlBase + "/authn/validate";
+ LOGGER.info(EELFLoggerDelegate.applicationLogger,"Going to call AAF, url = " + urlStr);
+
+ try {
+ RestTemplate restTemplate = new RestTemplate();
+
+ HttpHeaders headers = new HttpHeaders();
+ headers.add("Authorization", "Basic " + encodedPassword);
+ headers.add("Accept", "application/Users+json;q=1.0;charset=utf-8;version=2.0,application/json;q=1.0;version=2.0,*/*;q=1.0");
+ headers.add("Content-Type", "application/json");
+ LOGGER.info(EELFLoggerDelegate.applicationLogger,"Headers = " + headers);
+
+ Auth auth = new Auth();
+ auth.id = user;
+ auth.password = password;
+ ObjectMapper mapper = new ObjectMapper();
+ String body = mapper.writeValueAsString(auth);
+ LOGGER.info(EELFLoggerDelegate.applicationLogger,"Call body = " + body.replaceAll("\"password\": ?\".*?\"", "\"password\": \"*****\""));
+
+ HttpEntity<String> aafRequest = new HttpEntity<String>(body, headers);
+
+ ResponseEntity<String> response = restTemplate.exchange(urlStr, HttpMethod.POST, aafRequest, String.class);
+ if (response.getStatusCode().equals(HttpStatus.OK)) {
+ LOGGER.info(EELFLoggerDelegate.applicationLogger,"AAF returned 200 OK");
+ return true;
+ } else {
+ LOGGER.info(EELFLoggerDelegate.applicationLogger,"Unexpected status code returned from AAF? " + response.getStatusCode());
+ if (response.getStatusCodeValue()>=200 && response.getStatusCodeValue()<=299) {
+ LOGGER.info(EELFLoggerDelegate.applicationLogger,"Status code is 2XX, assuming OK");
+ return true;
+ } else {
+ LOGGER.warn(EELFLoggerDelegate.applicationLogger,"Status code is not 2XX (" + response.getStatusCode() + "), assuming failed.");
+ request.setAttribute("fail", "AAF failed: AAF returned status code " + response.getStatusCode());
+ return false;
+ }
+ }
+
+ } catch (HttpClientErrorException e) {
+ if (e.getStatusCode().equals(HttpStatus.FORBIDDEN)) {
+ LOGGER.warn(EELFLoggerDelegate.applicationLogger,"AAF returned 403 Forbidden");
+ request.setAttribute("fail", "AAF failed: user not authenticated.");
+ return false;
+ } else {
+ LOGGER.warn(EELFLoggerDelegate.applicationLogger,"Status code is not 2XX (" + e.getStatusCode() + "), assuming failed.");
+ request.setAttribute("fail", "AAF failed: AAF returned status code " + e.getStatusCode());
+ return false;
+ }
+ } catch (JsonProcessingException e) {
+ LOGGER.error(EELFLoggerDelegate.errorLogger,"Call to AAF threw an exception: " + e);
+ request.setAttribute("fail", "AAF failed: Couldn't convert call body? " + e);
+ return false;
+ } catch (RestClientException e) {
+ LOGGER.error(EELFLoggerDelegate.errorLogger,"Call to AAF threw an exception: " + e);
+ request.setAttribute("fail", "AAF failed: call to AAF threw an exception " + e);
+ return false;
+ }
+ }
+
+ public boolean userAuthorized(HttpServletRequest request, String roleRequired) {
+ LOGGER.info(EELFLoggerDelegate.applicationLogger,"In AAFProxy.userAuthorized");
+ String user = request.getHeader("mechId");
+ if (user==null || user.length()==0) {
+ request.setAttribute("fail", "AAF failed: mechId header not present.");
+ return false;
+ }
+ String urlStr = aafUrlBase + "/authz/users/" + user + "/" + roleRequired;
+ LOGGER.info(EELFLoggerDelegate.applicationLogger,"Going to call AAF, url = " + urlStr);
+
+ try {
+ RestTemplate restTemplate = new RestTemplate();
+
+ HttpHeaders headers = new HttpHeaders();
+ headers.add("Authorization", "Basic " + encodedPassword);
+ headers.add("Accept", "application/Users+json;q=1.0;charset=utf-8;version=2.0,application/json;q=1.0;version=2.0,*/*;q=1.0");
+
+ HttpEntity<String> aafRequest = new HttpEntity<String>(null, headers);
+
+ ResponseEntity<String> response = restTemplate.exchange(urlStr, HttpMethod.GET, aafRequest, String.class);
+
+ LOGGER.info(EELFLoggerDelegate.applicationLogger,"Received from AAF: " + response.getBody());
+ ObjectMapper mapper = new ObjectMapper();
+ User aafuser;
+ aafuser = mapper.readValue(response.getBody(), User.class);
+ if (aafuser.user==null) aafuser.user = new ArrayList<Item>();
+ LOGGER.info(EELFLoggerDelegate.applicationLogger,"User is a " + aafuser + " with " + aafuser.user.size() + (aafuser.user.size()==0?"":(" items; " + aafuser.user.get(0).id + ", " + aafuser.user.get(0).expires)) );
+
+ Date now = new Date();
+ for (Item item : aafuser.user) {
+ Date expiryDate = sdf.parse(item.expires);
+ LOGGER.info(EELFLoggerDelegate.applicationLogger,"Comparing " + item.id +" to " + user);
+ if (!item.id.equals(user)) continue;
+ LOGGER.info(EELFLoggerDelegate.applicationLogger,"Comparing " + now + " to " + expiryDate);
+ if (now.compareTo(expiryDate)>0) {
+ request.setAttribute("fail", "AAF failed: user role is expired.");
+ return false;
+ }
+ LOGGER.info(EELFLoggerDelegate.applicationLogger,"Success! User authorized.");
+ return true;
+ }
+ } catch (JsonParseException | JsonMappingException e) {
+ e.printStackTrace();
+ LOGGER.error(EELFLoggerDelegate.applicationLogger,"Exception while calling AAF: " + e.getMessage());
+ LOGGER.error(EELFLoggerDelegate.errorLogger,"Exception while calling AAF: " + e);
+ request.setAttribute("fail", "AAF failed: invalid JSON returned from AAF? (" + e + ")");
+ return false;
+ } catch (RuntimeException | IOException e) {
+ e.printStackTrace();
+ LOGGER.error(EELFLoggerDelegate.applicationLogger,"Exception while calling AAF: " + e.getMessage());
+ LOGGER.error(EELFLoggerDelegate.errorLogger,"Exception while calling AAF: " + e);
+ request.setAttribute("fail", "AAF failed: exception in call to AAF (" + e + ")");
+ return false;
+ } catch (ParseException e) {
+ LOGGER.error(EELFLoggerDelegate.applicationLogger,"Exception while calling AAF: " + e.getMessage());
+ LOGGER.error(EELFLoggerDelegate.errorLogger,"Exception while calling AAF: " + e);
+ e.printStackTrace();
+ request.setAttribute("fail", "AAF failed: invalid date format returned from AAF? (" + e + ")");
+ return false;
+ }
+
+ request.setAttribute("fail", "AAF failed: user does not have role.");
+ return false;
+ }
+
+ public void setCredentials(String encodedPassword) {
+ if (encodedPassword!=null) this.encodedPassword = encodedPassword;
+ }
+
+}
diff --git a/valetapi/src/main/java/org/onap/fgps/api/proxy/DBProxy.java b/valetapi/src/main/java/org/onap/fgps/api/proxy/DBProxy.java
new file mode 100644
index 0000000..d6753ad
--- /dev/null
+++ b/valetapi/src/main/java/org/onap/fgps/api/proxy/DBProxy.java
@@ -0,0 +1,215 @@
+/*
+ * ============LICENSE_START==========================================
+ * ONAP - F-GPS API
+ * ===================================================================
+ * Copyright © 2019 ATT Intellectual Property. All rights reserved.
+ * ===================================================================
+ *
+ * Unless otherwise specified, all software contained herein is licensed
+ * under the Apache License, Version 2.0 (the "License");
+ * you may not use this software except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * Unless otherwise specified, all documentation contained herein is licensed
+ * under the Creative Commons License, Attribution 4.0 Intl. (the "License");
+ * you may not use this documentation except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://creativecommons.org/licenses/by/4.0/
+ *
+ * Unless required by applicable law or agreed to in writing, documentation
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * ============LICENSE_END============================================
+ *
+ *
+ */
+package org.onap.fgps.api.proxy;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.Properties;
+
+import org.onap.fgps.api.logging.EELFLoggerDelegate;
+import org.onap.fgps.api.utils.CipherUtil;
+import org.springframework.http.HttpEntity;
+import org.springframework.http.HttpHeaders;
+import org.springframework.http.HttpMethod;
+import org.springframework.http.HttpStatus;
+import org.springframework.http.ResponseEntity;
+import org.springframework.web.client.HttpClientErrorException;
+import org.springframework.web.client.RestTemplate;
+import org.onap.fgps.api.utils.UserUtils;
+
+public class DBProxy {
+ private String[] server;
+ private RestTemplate rest;
+ private HttpHeaders headers;
+ private HttpStatus status;
+ private String ipAddress;
+ private boolean pingLogFlag;
+
+ private static final EELFLoggerDelegate LOGGER = EELFLoggerDelegate.getLogger(DBProxy.class);
+ InputStream inputStream;
+
+
+
+ public DBProxy(boolean pingFlag) {
+ this.pingLogFlag = pingFlag;
+ this.rest = new RestTemplate();
+ this.headers = new HttpHeaders();
+ headers.add("Content-Type", "application/json");
+ Properties props = new Properties();
+ String propFileName = "resources.properties";
+ inputStream = getClass().getClassLoader().getResourceAsStream(propFileName);
+ try {
+ if (inputStream != null) {
+ props.load(inputStream);
+ } else {
+ if(pingLogFlag) {
+ LOGGER.info(EELFLoggerDelegate.applicationLogger,"DBProxy : inputstream is not");
+ }
+ }
+ } catch (IOException e) {
+ e.printStackTrace();
+ LOGGER.error(EELFLoggerDelegate.applicationLogger,"DBProxy : Error details : "+ e.getMessage());
+ LOGGER.error(EELFLoggerDelegate.errorLogger,"DBProxy : Error details : "+ e.getMessage());
+ }
+ server = new String[3];
+ server[0] = "http://" + UserUtils.htmlEscape(props.getProperty("musicdb.ip.1")) + ":" + UserUtils.htmlEscape(props.getProperty("music.MUSIC_DB_PORT"))
+ + UserUtils.htmlEscape(props.getProperty("music.MUSIC_DB_URL"));
+ server[1] = "http://" + UserUtils.htmlEscape(props.getProperty("musicdb.ip.2")) + ":" + UserUtils.htmlEscape(props.getProperty("music.MUSIC_DB_PORT"))
+ + UserUtils.htmlEscape(props.getProperty("music.MUSIC_DB_URL"));
+ server[2] = "http://" + UserUtils.htmlEscape(props.getProperty("musicdb.ip.3")) + ":" + UserUtils.htmlEscape(props.getProperty("music.MUSIC_DB_PORT"))
+ + UserUtils.htmlEscape(props.getProperty("music.MUSIC_DB_URL"));
+ headers.add("ns", UserUtils.htmlEscape(props.getProperty("musicdb.namespace")));
+ headers.add("userId", UserUtils.htmlEscape(props.getProperty("musicdb.userId")));
+ headers.add("password", CipherUtil.decryptPKC(props.getProperty("musicdb.password")));
+ //keep headers userID and password for now, it works with old version
+ //new version of music(1810) needs userID and password as basic auth
+ headers.add("Authorization", CipherUtil.encodeBasicAuth(props.getProperty("musicdb.userId"), CipherUtil.decryptPKC(props.getProperty("musicdb.password"))));
+
+
+ }
+
+ public DBProxy() {
+ this(true);
+ }
+
+ public String retryRequest(String uri, HttpMethod operation, HttpEntity<String> requestEntity, int n) {
+ try {
+ StringBuffer headerOut = new StringBuffer();
+ HttpHeaders httpHeaders = requestEntity.getHeaders();
+ for (String key: httpHeaders.keySet()) {
+ headerOut.append(", ");
+ if (key.toLowerCase().contains("pass") || key.equals("Authorization")) {
+ headerOut.append(key + ": PASSWORD CENSORED");
+ } else {
+ headerOut.append(key + ": [" + httpHeaders.get(key) + "]");
+ }
+ }
+
+ String finalUri = this.server[n] + uri;
+ finalUri = finalUri.replaceAll("//", "/").replaceFirst("/", "//");
+ if(pingLogFlag) {
+ LOGGER.info(EELFLoggerDelegate.applicationLogger, "DBProxy: Sending request to DB : "+ finalUri + ", " + operation + headerOut.toString() );
+ }
+ // System.out.println();
+ ResponseEntity<String> responseEntity = rest.exchange( finalUri, operation, requestEntity, String.class);
+ this.setStatus(responseEntity.getStatusCode());
+ if(pingLogFlag) {
+ LOGGER.info(EELFLoggerDelegate.applicationLogger, "DBProxy: Received response from DB: " + responseEntity.toString() + ", " + responseEntity.getStatusCode());
+ }
+ return responseEntity.getBody();
+ } catch (HttpClientErrorException e) {
+ if(pingLogFlag) {
+ LOGGER.info(EELFLoggerDelegate.applicationLogger,"DBProxy : HttpClientErrorException in received response: " + e.getResponseBodyAsString() + ", " + e.getStatusCode());
+ LOGGER.debug(EELFLoggerDelegate.applicationLogger, " Response headers: " + e.getResponseHeaders());
+ }
+ LOGGER.info(EELFLoggerDelegate.errorLogger,"DBProxy : HttpClientErrorException in received response: " + e.getResponseBodyAsString() + ", " + e.getStatusCode());
+ LOGGER.debug(EELFLoggerDelegate.errorLogger, " Response headers: " + e.getResponseHeaders());
+ if (n < 2) {
+ if(pingLogFlag) {
+ LOGGER.info(EELFLoggerDelegate.applicationLogger, "Trying again");
+ }
+ return retryRequest(uri, operation, requestEntity, n + 1);
+ } else {
+ LOGGER.error(EELFLoggerDelegate.applicationLogger,"Error while accessing MUSIC: "+ e.getMessage());
+ LOGGER.error(EELFLoggerDelegate.errorLogger,"Error while accessing MUSIC: ");
+ LOGGER.error(EELFLoggerDelegate.errorLogger,"Error details: "+ e.getMessage());
+ return "DBRequest Failed";
+ }
+ } catch (Exception e) {
+ if (n < 2) {
+ if(pingLogFlag) {
+ LOGGER.info(EELFLoggerDelegate.applicationLogger,"DBProxy : Exception in received response : "+ this.server[n] + ", " + e + ", retrying ...");
+ }
+ LOGGER.info(EELFLoggerDelegate.errorLogger,"DBProxy : Exception in received response : "+ this.server[n] + ", " + e + ", retrying ...");
+ return retryRequest(uri, operation, requestEntity, n + 1);
+ } else {
+ LOGGER.error(EELFLoggerDelegate.applicationLogger,"Error while accessing MUSIC: "+ e.getMessage());
+ LOGGER.error(EELFLoggerDelegate.errorLogger,"Error while accessing MUSIC: ");
+ LOGGER.error(EELFLoggerDelegate.errorLogger,"Error details: "+ e.getMessage());
+ return "DBRequest Failed";
+ }
+ }
+ }
+
+ public String post(String uri, String json) {
+ LOGGER.info(EELFLoggerDelegate.applicationLogger,"DBProxy : Post request sent");
+
+ HttpEntity<String> requestEntity = new HttpEntity<String>(json, headers);
+ // System.out.println(headers);
+ System.out.println("In DBProxy.post, Headers: " + requestEntity.getHeaders().toString().replaceAll("password=\\[.*?\\]", "password=CENSORED").replaceAll("Authorization=\\[Basic.*?\\]", "Authorization=Basic CENSORED"));
+ System.out.println("In DBProxy.post, Body: " + requestEntity.getBody().toString());
+ return retryRequest(uri, HttpMethod.POST, requestEntity, 0);
+ }
+
+ public String get(String uri) {
+ if(pingLogFlag) {
+ LOGGER.info(EELFLoggerDelegate.applicationLogger,"DBProxy : Get request sent");
+ }
+
+ HttpEntity<String> requestEntity = new HttpEntity<String>("", headers);
+ if(pingLogFlag) {
+ LOGGER.info(EELFLoggerDelegate.applicationLogger,"Requesting : "+server + uri);
+ }
+
+ return retryRequest(uri, HttpMethod.GET, requestEntity, 0);
+ }
+
+ public String put(String uri, String json) {
+ LOGGER.info(EELFLoggerDelegate.applicationLogger,"DBProxy : Put request sent");
+
+ HttpEntity<String> requestEntity = new HttpEntity<String>(json, headers);
+
+ return retryRequest(uri, HttpMethod.PUT, requestEntity, 0);
+ }
+
+ public String delete(String uri, String json) {
+ LOGGER.info(EELFLoggerDelegate.applicationLogger,"DBProxy : Delete request sent");
+
+ HttpEntity<String> requestEntity = new HttpEntity<String>(json, headers);
+
+ return retryRequest(uri, HttpMethod.DELETE, requestEntity, 0);
+ }
+
+ public HttpStatus getStatus() {
+ return status;
+ }
+
+ public void setStatus(HttpStatus status) {
+ this.status = status;
+ }
+}
diff --git a/valetapi/src/main/java/org/onap/fgps/api/service/ValetGroupsService.java b/valetapi/src/main/java/org/onap/fgps/api/service/ValetGroupsService.java
new file mode 100644
index 0000000..90c6613
--- /dev/null
+++ b/valetapi/src/main/java/org/onap/fgps/api/service/ValetGroupsService.java
@@ -0,0 +1,141 @@
+/*
+ * ============LICENSE_START==========================================
+ * ONAP - F-GPS API
+ * ===================================================================
+ * Copyright © 2019 ATT Intellectual Property. All rights reserved.
+ * ===================================================================
+ *
+ * Unless otherwise specified, all software contained herein is licensed
+ * under the Apache License, Version 2.0 (the "License");
+ * you may not use this software except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * Unless otherwise specified, all documentation contained herein is licensed
+ * under the Creative Commons License, Attribution 4.0 Intl. (the "License");
+ * you may not use this documentation except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://creativecommons.org/licenses/by/4.0/
+ *
+ * Unless required by applicable law or agreed to in writing, documentation
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * ============LICENSE_END============================================
+ *
+ *
+ */
+package org.onap.fgps.api.service;
+
+import org.apache.catalina.connector.Response;
+import org.json.simple.JSONObject;
+import org.json.simple.parser.JSONParser;
+import org.json.simple.parser.ParseException;
+import org.onap.fgps.api.beans.schema.Schema;
+import org.onap.fgps.api.dao.ValetServicePlacementDAO;
+import org.onap.fgps.api.logging.EELFLoggerDelegate;
+import org.onap.fgps.api.utils.Constants;
+import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.http.ResponseEntity;
+import org.springframework.stereotype.Service;
+
+@Service
+public class ValetGroupsService {
+ private ValetServicePlacementDAO valetServicePlacementDAO;
+ private Schema schema;
+ private static final EELFLoggerDelegate LOGGER = EELFLoggerDelegate.getLogger(ValetGroupsService.class);
+
+ @Autowired
+ public ValetGroupsService(ValetServicePlacementDAO valetServicePlacementDAO, Schema schema) {
+ super();
+ this.valetServicePlacementDAO = valetServicePlacementDAO;
+ this.schema = schema;
+ }
+
+ public static JSONObject parseToJSON(String jsonString) {
+ LOGGER.info(EELFLoggerDelegate.applicationLogger,"parseToJSON : parsing json");
+ JSONParser parser = new JSONParser();
+ try {
+ JSONObject json = (JSONObject) parser.parse(jsonString);
+ return json;
+ } catch (ParseException e) {
+ e.printStackTrace();
+ LOGGER.error(EELFLoggerDelegate.applicationLogger,"parseToJSON : Error in parsing JSON : "+ e.getMessage());
+ LOGGER.error(EELFLoggerDelegate.errorLogger,"parseToJSON : Error in parsing JSON : "+ e.getMessage());
+ return null;
+ }
+ }
+
+ public String authorizeAAF() {
+
+ return "";
+ }
+
+ public ResponseEntity<String> saveGroupsRequest(JSONObject request, String operation, String requestId) {
+ LOGGER.info(EELFLoggerDelegate.applicationLogger,"SaveGroupRequest : request passed", requestId);
+
+ String dbRequest = schema.formMsoInsertUpdateRequest(requestId, operation, request.toJSONString());
+ String insertRow = valetServicePlacementDAO.insertRow(dbRequest);
+ return pollForResult(request, operation + "-" + requestId, Constants.WAIT_UNITL_SECONDS,
+ Constants.POLL_EVERY_SECONDS);
+
+ }
+ //Junit integration test method
+ public ResponseEntity<String> saveGroupsRequest1(JSONObject request, String operation, String requestId) {
+ LOGGER.info(EELFLoggerDelegate.applicationLogger,"SaveGroupRequest : request passed", requestId);
+
+ String dbRequest = schema.formMsoInsertUpdateRequest(requestId, operation, request.toJSONString());
+ return ResponseEntity.ok(dbRequest);
+
+ }
+
+ public ResponseEntity<String> pollForResult(JSONObject values, String requestId, int waitUntilSeconds,
+ int pollEverySeconds) {
+ LOGGER.info(EELFLoggerDelegate.applicationLogger,"pollForResult : called", requestId);
+
+ String result = null;
+ long waitUntil = System.currentTimeMillis() + (1000 * waitUntilSeconds);
+ int counter = 1;
+
+ JSONObject response = new JSONObject();
+ boolean isTimedOut = false;
+ while (true) {
+ LOGGER.info(EELFLoggerDelegate.applicationLogger,"pollForResult : polling database - ", counter++);
+ result = valetServicePlacementDAO.getRowFromResults(requestId);
+ response = result != null ? parseToJSON(result) : null;
+
+ if (response != null && ((JSONObject) response.get("result")).get("row 0") != null) {
+ LOGGER.debug(EELFLoggerDelegate.debugLogger,"pollForResult : response recieved", result);
+ valetServicePlacementDAO.deleteRowFromResults(requestId, schema.formMsoDeleteRequest());
+ }
+ if (System.currentTimeMillis() < waitUntil
+ &&( response == null || ((JSONObject) response.get("result")).get("row 0") == null)) {
+ try {
+ Thread.sleep(1000 * pollEverySeconds);
+ } catch (InterruptedException e) {
+ e.printStackTrace();
+ }
+ } else {
+ break;
+ }
+ }
+ if (System.currentTimeMillis()>waitUntil) {
+ return ResponseEntity.status(Response.SC_GATEWAY_TIMEOUT).body("Request timedout");
+ }
+ System.out.println("Response"+ ((JSONObject)((JSONObject) response.get("result")).get("row 0")).toJSONString());
+ JSONObject obj = ((JSONObject)((JSONObject) response.get("result")).get("row 0"));
+ obj.put("result",( (String) obj.get("result")));
+ return ResponseEntity.ok(obj.toJSONString());
+ }
+
+}
diff --git a/valetapi/src/main/java/org/onap/fgps/api/service/ValetPlacementService.java b/valetapi/src/main/java/org/onap/fgps/api/service/ValetPlacementService.java
new file mode 100644
index 0000000..0e09c5a
--- /dev/null
+++ b/valetapi/src/main/java/org/onap/fgps/api/service/ValetPlacementService.java
@@ -0,0 +1,1017 @@
+/*
+ * ============LICENSE_START==========================================
+ * ONAP - F-GPS API
+ * ===================================================================
+ * Copyright © 2019 ATT Intellectual Property. All rights reserved.
+ * ===================================================================
+ *
+ * Unless otherwise specified, all software contained herein is licensed
+ * under the Apache License, Version 2.0 (the "License");
+ * you may not use this software except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * Unless otherwise specified, all documentation contained herein is licensed
+ * under the Creative Commons License, Attribution 4.0 Intl. (the "License");
+ * you may not use this documentation except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://creativecommons.org/licenses/by/4.0/
+ *
+ * Unless required by applicable law or agreed to in writing, documentation
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * ============LICENSE_END============================================
+ *
+ *
+ */
+package org.onap.fgps.api.service;
+
+import java.util.ArrayList;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Set;
+
+import org.json.simple.JSONArray;
+import org.json.simple.JSONObject;
+import org.json.simple.parser.JSONParser;
+import org.json.simple.parser.ParseException;
+import org.onap.fgps.api.beans.schema.Schema;
+import org.onap.fgps.api.dao.ValetServicePlacementDAO;
+import org.onap.fgps.api.logging.EELFLoggerDelegate;
+import org.onap.fgps.api.utils.Constants;
+import org.onap.fgps.api.utils.YamlToJsonConverter;
+import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.http.HttpStatus;
+import org.springframework.http.ResponseEntity;
+import org.springframework.stereotype.Service;
+
+@Service
+public class ValetPlacementService {
+ String[] requiredFields = {"region_id","keystone_url","tenant_id","service_instance_id","vnf_id","vnf_name","vf_module_id","vf_module_name"};
+ private ValetServicePlacementDAO valetServicePlacementDAO;
+ private Schema schema;
+ private static final EELFLoggerDelegate LOGGER = EELFLoggerDelegate.getLogger(ValetPlacementService.class);
+
+
+ @Autowired
+ public ValetPlacementService(ValetServicePlacementDAO valetServicePlacementDAO, Schema schema) {
+ super();
+ this.valetServicePlacementDAO = valetServicePlacementDAO;
+ this.schema = schema;
+ }
+
+ private boolean isBlankArray(Object object) {
+ if (object instanceof ArrayList == false) {
+ return false;
+ }else {
+ ArrayList ja = (ArrayList)object;
+ for(int i = 0; i < ja.size(); i ++) {
+ if (!"".equals(ja.get(i).toString())) {
+ return false;
+ }
+ }
+ return true;
+ }
+ }
+
+ public Object getParam(String key, JSONObject envParams, JSONObject parentProperties, LinkedHashMap requestParameters, String rootKey,Integer paramIndex) {
+ if ("flavor".equals(key)) {
+ System.out.println("");
+ }
+ LOGGER.info(EELFLoggerDelegate.applicationLogger,"getParam : {}", key);
+ LOGGER.info(EELFLoggerDelegate.applicationLogger,"envParams : {}", envParams);
+ LOGGER.info(EELFLoggerDelegate.applicationLogger,"parentProperties : {}", parentProperties);
+ LOGGER.info(EELFLoggerDelegate.applicationLogger,"requestParameters : {}", requestParameters);
+ if (rootKey.equals(Constants.HEAT_REQUEST_VALET_HOST_ASSIGNMENT)) {
+ return key;
+ }
+ else if (parentProperties != null && parentProperties.get(key) != null && !"".equals(parentProperties.get(key))) {
+ LOGGER.info(EELFLoggerDelegate.applicationLogger,"Found in parent properties : {}", parentProperties.get(key));
+ if (parentProperties.get(key) instanceof JSONObject && ((JSONObject)parentProperties.get(key)).containsKey("get_param") && ((JSONObject)parentProperties.get(key)).get("get_param").equals(key)) {
+ return getParam(key, envParams, null, requestParameters, rootKey,paramIndex);
+ }
+ if (paramIndex!=null && paramIndex>=0) {
+ JSONArray toRet = (JSONArray)(parentProperties.get(key));
+ return toRet.get(paramIndex);
+ }
+ return parentProperties.get(key);
+ }else if (requestParameters != null && requestParameters.get(key) != null && !"".equals(requestParameters.get(key)) && !isBlankArray(requestParameters.get(key))) {
+ LOGGER.info(EELFLoggerDelegate.applicationLogger,"Found in request params : {}", requestParameters.get(key));
+ if (paramIndex!=null && paramIndex>=0) {
+ List toRet = (List)requestParameters.get(key);
+ return toRet.get(paramIndex);
+ }
+ return requestParameters.get(key);
+ }else if (envParams != null && envParams.get(key) != null && !"".equals(envParams.get(key))) {
+ LOGGER.info(EELFLoggerDelegate.applicationLogger,"Found in environment params : {}", envParams.get(key));
+ if (paramIndex!=null && paramIndex>=0) {
+ JSONArray toRet = (JSONArray)(envParams.get(key));
+ return toRet.get(paramIndex);
+ }
+ return envParams.get(key);
+ } else if (rootKey.equals(Constants.HEAT_REQUEST_AZ)) {
+ return "None";
+ }
+ /*commented as part of code merge start
+ else {
+ if (rootKey.equals(Constants.HEAT_REQUEST_AZ)) {
+ return "None";
+ }else if(rootKey.equals(Constants.HEAT_REQUEST_VALET_HOST_ASSIGNMENT)) {
+ return key;
+ }
+ } commented as part of code merge end*/
+ /*else { if (parentProperties.containsKey(Constants.HEAT_RESOURCE_PROPERTIES)) {// Sometimes there may be a inner properties which contains the key
+ return getParam(key, (JSONObject)parentProperties.get(Constants.HEAT_RESOURCE_PROPERTIES), envParams, requestParameters, rootKey);
+ } else if (!rootKey.equals(Constants.HEAT_REQUEST_AZ) && !rootKey.equals(Constants.HEAT_REQUEST_VALET_HOST_ASSIGNMENT) && (envParams == null || envParams.get(key) == null || ((String) envParams.get(key)).length() == 0)) {
+ LOGGER.info(EELFLoggerDelegate.applicationLogger,"Taking from requestParameters : {}", requestParameters.get(key));
+ return requestParameters.get(key);
+ }
+ if (rootKey.equals(Constants.HEAT_REQUEST_AZ) && (envParams.get(key) == null || "".equals(envParams.get(key).toString().trim()))) {
+ LOGGER.info(EELFLoggerDelegate.applicationLogger,"sending None");
+ return "None";
+ }else if(rootKey.equals(Constants.HEAT_REQUEST_VALET_HOST_ASSIGNMENT) && (envParams.get(key) == null || "".equals(envParams.get(key).toString().trim()))) {
+ LOGGER.info(EELFLoggerDelegate.applicationLogger,"sending same key : {}", key);
+ return key;
+ }
+ LOGGER.info(EELFLoggerDelegate.applicationLogger,"sending from envparams : {}", envParams.get(key));
+ return envParams.get(key);*/
+ return null;
+ }
+ public Object getParam(String key, JSONObject envParams, JSONObject parentProperties, LinkedHashMap requestParameters, String rootKey) {
+ return getParam(key, envParams, parentProperties, requestParameters, rootKey, null);
+ }
+
+ public Object getParam(String key, JSONObject envParams,JSONObject parentProperties, LinkedHashMap requestParameters) {
+ if ("names".equals(key)) {
+ System.out.println("");
+ }
+ return getParam(key, envParams, parentProperties, requestParameters, key);
+ }
+
+ public Object getAttr(JSONArray key, JSONObject resources, int index) {
+ LOGGER.info(EELFLoggerDelegate.applicationLogger,"key : {}", key);
+ for (int resourcesListIndex = resourcesList.size() -1 ; resourcesListIndex >= 0; resourcesListIndex --) {
+ Object obj = getAttr((String)key.get(index), resourcesList.get(resourcesListIndex));
+ if (obj != null) {
+ JSONArray arr = new JSONArray();
+ arr.add(obj);
+ arr.add((String)key.get(1));
+ return arr;
+ }
+ }
+ return null;
+ }
+
+ public Object getAttr(String key, JSONObject resources) {
+ if (resources != null) {
+ return resources.get(key);
+ }
+ return null;
+ }
+ private int resourceIndex = -1;
+ @SuppressWarnings({ "rawtypes", "unchecked", "unused" })
+ public JSONObject parseResourceObject(JSONObject parent, JSONObject returnObject, JSONObject resourceObject,
+ JSONObject envParams, LinkedHashMap files, JSONObject parentProperties, LinkedHashMap requestParameters) {
+ if (returnObject == null) {
+ returnObject = new JSONObject();
+ }
+ LOGGER.info(EELFLoggerDelegate.applicationLogger,"resourceObject : {}", resourceObject);
+ JSONObject properties = (JSONObject) resourceObject.get(Constants.HEAT_RESOURCE_PROPERTIES);
+ String resourceType = (String) resourceObject.get("type");
+ JSONObject propertiesTemp = new JSONObject();
+ if (resourceType != null && (resourceType.equals(Constants.OS_HEAT_RESOURCEGROUP) || resourceType.equals(Constants.OS_NOVA_SERVERGROUP_ROOT)) && properties.containsKey("resource_def")) {
+ // Check for a yaml file
+ JSONObject resourceDef = (JSONObject)properties.get("resource_def");
+ if (resourceDef != null && resourceDef.containsKey("type") && resourceDef.get("type").toString().endsWith(".yaml")) {
+ // Get the count
+ String count = "1";
+ if (properties.get("count") != null) {
+ if (properties.get("count") instanceof String) {
+ count = properties.get("count").toString();
+ }else {
+ Object countObj = getParam(((JSONObject)properties.get("count")).get("get_param").toString(), envParams, parentProperties, requestParameters);
+ if (countObj instanceof String) {
+ count = (String)countObj;
+ }else if(countObj instanceof Integer) {
+ count = ((Integer)countObj).toString();
+ }
+ if (count == null) {
+ count = "1";
+ }
+ }
+ }
+ LOGGER.info(EELFLoggerDelegate.applicationLogger,"count : {}", count);
+ for (resourceIndex = 0; resourceIndex < Integer.parseInt(count); resourceIndex ++) {
+ // Get the file
+ String nestedYamlFile = (String) files.get(resourceDef.get("type").toString());
+ //LOGGER.info(EELFLoggerDelegate.applicationLogger,"nestedYamlFile : {}", nestedYamlFile);
+ JSONObject resourceProperties = processProperties(
+ (JSONObject) resourceDef.get(Constants.HEAT_RESOURCE_PROPERTIES), parentProperties, parent,
+ envParams, requestParameters);
+ JSONObject nestedYamlJSON = null;
+ try {
+ if (nestedYamlFile != null) {
+ LOGGER.info(EELFLoggerDelegate.applicationLogger,"nestedYamlFile : {}", nestedYamlFile);
+ nestedYamlJSON = processTemplate(convertToJson(nestedYamlFile), files, envParams,
+ resourceProperties, requestParameters);
+ LOGGER.info(EELFLoggerDelegate.applicationLogger,"nestedYamlJSON : {}", nestedYamlJSON);
+ String resourceName = nestedYamlJSON.keySet().toArray()[0].toString();
+ JSONObject obj = (JSONObject)nestedYamlJSON.get(resourceName);
+ JSONObject objReturn = new JSONObject();
+ Set<String> objKeys = ((JSONObject)obj.get("properties")).keySet();
+ for(String objKey : objKeys) {
+ if (!Constants.HEAT_REQUEST_NAMES.equals(objKey) &&
+ !Constants.HEAT_REQUEST_FLAVOR.equals(objKey) && !Constants.HEAT_REQUEST_IMAGE.equals(objKey) && !Constants.HEAT_REQUEST_METADATA.equals(objKey) && !Constants.HEAT_REQUEST_AVAILABILITY_ZONE.equals(objKey) && !Constants.HEAT_REQUEST_SCHEDULER_HINTS.equals(objKey)){
+
+ }else {
+ objReturn.put(objKey, ((JSONObject)obj.get(Constants.HEAT_REQUEST_PROPERTIES)).get(objKey));
+ }
+ }
+ obj.put(Constants.HEAT_REQUEST_PROPERTIES , objReturn);
+ returnObject.put(resourceName + "_" + resourceIndex, obj);
+ }
+ } catch (Exception e) {
+ e.printStackTrace();
+ LOGGER.error(EELFLoggerDelegate.applicationLogger," : parseResourceObject : Error details : "+ e.getMessage());
+ LOGGER.error(EELFLoggerDelegate.errorLogger," : parseResourceObject : Error details : "+ e.getMessage());
+ }
+ resourceObject.put(Constants.HEAT_RESOURCE_PROPERTIES, resourceProperties);
+ resourceObject.put("type", nestedYamlJSON);
+ }
+ }
+ // NO YAML file, no processing
+ return returnObject;
+ } else if (resourceType != null && resourceType.toString().endsWith(".yaml")) {
+ // Generally comes in availability zone
+ // Get the template
+ JSONObject nestedYaml = convertToJson(files.get(resourceType.toString()).toString());
+ return nestedYaml;
+ } else {
+
+ if (properties != null) {
+ Set<String> propertiesKeySet = properties.keySet();
+ for (String propertiesKey : propertiesKeySet) {
+ Long get_param_count = (long) 1;
+ if (propertiesKey.equals(Constants.HEAT_REQUEST_KEYSTONE_NETWORKS)) {
+ continue;
+ }
+ if (propertiesKey.equals(Constants.HEAT_REQUEST_PROPERTIES_COUNT)) {
+ JSONObject count = (JSONObject) properties.get(propertiesKey);
+ String countParameter = (String) count.get("get_param");
+ get_param_count = Long.parseLong(getParam(countParameter,envParams, null, requestParameters).toString());
+ propertiesTemp.put(propertiesKey, get_param_count);
+
+ } else if (propertiesKey.equals(Constants.HEAT_REQUEST_RESOURCES_DEF)) {
+ JSONObject resource_def = (JSONObject) properties.get(propertiesKey);
+ String nestedTemplateName = (String) resource_def.get(Constants.HEAT_REQUEST_RESOURCES_TYPE);
+ JSONObject nestedYaml = null;
+
+ JSONObject resourceProperties = processProperties(
+ (JSONObject) resource_def.get(Constants.HEAT_RESOURCE_PROPERTIES), parentProperties,
+ parent, envParams, requestParameters);
+
+ if (nestedTemplateName != null) {
+ if (files.get(nestedTemplateName) != null) {
+ nestedYaml = processTemplate(convertToJson((String) files.get(nestedTemplateName)),
+ files, envParams, resourceProperties, requestParameters);
+ }
+ }
+ resource_def.put(Constants.HEAT_REQUEST_RESOURCES_TYPE, nestedYaml);
+ resource_def.put(Constants.HEAT_RESOURCE_PROPERTIES, resourceProperties);
+ propertiesTemp.put(propertiesKey, resource_def);
+ resourceObject.put(Constants.HEAT_RESOURCE_PROPERTIES, propertiesTemp);
+
+ } else if (propertiesKey.equals(Constants.HEAT_REQUEST_AVAILABILITY_ZONE)) {
+ JSONObject azJSON = (JSONObject)properties.get(propertiesKey);
+ if (azJSON.get("get_param") != null) {
+ propertiesTemp.put(propertiesKey, getParam((String)azJSON.get("get_param"), envParams, parentProperties, requestParameters,Constants.HEAT_REQUEST_AZ));
+ }else {
+ JSONObject str_replace = (JSONObject)azJSON.get("str_replace");
+ LOGGER.info(EELFLoggerDelegate.applicationLogger,"str_replace : {}", str_replace);
+ JSONObject params = (JSONObject)str_replace.get("params");
+ JSONObject az = (JSONObject)params.get(Constants.HEAT_REQUEST_AZ);
+ Object az_value = null;
+ if (az.containsKey("get_param")) {
+ if (az.get("get_param") instanceof String) {
+ az_value = getParam(az.get("get_param").toString(), envParams, parentProperties, requestParameters,Constants.HEAT_REQUEST_AZ);
+ }else {
+ // It can be a JSONArray
+ JSONArray az_getparam_array = (JSONArray)az.get("get_param");
+ JSONArray az_resources = (JSONArray)getParam(az_getparam_array.get(0).toString(), envParams, parentProperties, requestParameters,Constants.HEAT_REQUEST_AZ, ((Long)az_getparam_array.get(1)).intValue());
+ JSONObject az_resource = (JSONObject)az_resources.get(0);
+ JSONObject nestedYaml = parseResourceObject(parent, returnObject, az_resource, envParams, files, parentProperties, requestParameters);
+ JSONObject outputs = (JSONObject)nestedYaml.get("outputs");
+ JSONObject requiredJSON = (JSONObject)outputs.get(az_resources.get(1).toString());
+ JSONArray values = (JSONArray)requiredJSON.get("value");
+ JSONObject valueJSON = (JSONObject)values.get(resourceIndex);
+ if (valueJSON.containsKey("get_param")) {
+ az_value = getParam(valueJSON.get("get_param").toString(), envParams, null, requestParameters,Constants.HEAT_REQUEST_AZ);
+ }else {
+ az_value = valueJSON.toString();
+ }
+ }
+ }
+ JSONObject valet_host_assignment = (JSONObject)params.get(Constants.HEAT_REQUEST_VALET_HOST_ASSIGNMENT);
+ Object valet_host_assignment_value = null;
+ Object valet_host_assignment_second_arg = null;
+ if (valet_host_assignment != null && valet_host_assignment.containsKey("get_param")) {
+ // This can be either a string OR an array
+ if (valet_host_assignment.get("get_param") instanceof String) {
+ valet_host_assignment_value = getParam(valet_host_assignment.get("get_param").toString(), envParams, parentProperties, requestParameters,Constants.HEAT_REQUEST_VALET_HOST_ASSIGNMENT);
+ }else {
+ // Array
+ JSONArray valet_host_assignment_arr = (JSONArray)valet_host_assignment.get("get_param");
+ valet_host_assignment_value = getParam((String)valet_host_assignment_arr.get(0), envParams, parentProperties, requestParameters,Constants.HEAT_REQUEST_VALET_HOST_ASSIGNMENT,((Long)valet_host_assignment_arr.get(1)).intValue() );
+ if (valet_host_assignment_arr.get(1) instanceof JSONObject) {
+ JSONObject obj = (JSONObject)valet_host_assignment_arr.get(1);
+ String val = obj.get("get_param").toString();
+ if ("index".equals(val)) {
+ valet_host_assignment_second_arg = String.valueOf(resourceIndex);
+ }
+ }else {
+ valet_host_assignment_second_arg = valet_host_assignment_arr.get(1).toString();
+ }
+ }
+ }
+ // Create JSONArray
+ JSONArray arr = new JSONArray();
+ arr.add(az_value);
+ arr.add(valet_host_assignment_value);
+ if (valet_host_assignment_second_arg != null) {
+ arr.add(Integer.parseInt(valet_host_assignment_second_arg.toString()));
+ }
+ propertiesTemp.put(propertiesKey, arr);
+ }
+ } else if (propertiesKey.equals(Constants.HEAT_REQUEST_SCHEDULER_HINTS)) {
+ // scheduler_hints: { group: { get_resource: rdn_server_group } }
+ JSONObject group = ((JSONObject)((JSONObject)properties.get(Constants.HEAT_REQUEST_SCHEDULER_HINTS)).get("group"));
+ if (group.containsKey("get_resource")) {
+ JSONObject resObj = new JSONObject();
+ resObj.put(group.get("get_resource").toString(), parseResourceObject(parent, returnObject, (JSONObject)((JSONObject)parent.get("resources")).get(group.get("get_resource")), envParams, files, parentProperties, requestParameters));
+ group.clear();
+ group.put("group", resObj);
+ propertiesTemp.put(propertiesKey, group);
+ }else if (group.containsKey("get_param")) {
+ JSONObject resObj = new JSONObject();
+ // Check if this exist in resources list
+ boolean resourceFound = false;
+ for (int resourcesListIndex = resourcesList.size() -1 ; resourcesListIndex >= 0; resourcesListIndex --) {
+ if (resourcesList.get(resourcesListIndex).containsKey(group.get("get_param").toString())) {
+ resourceFound = true;
+ resObj.put(group.get("get_param").toString(), parseResourceObject(parent, returnObject, (JSONObject)resourcesList.get(resourcesListIndex).get(group.get("get_param")), envParams, files, parentProperties, requestParameters));
+ group.clear();
+ group.put("group", resObj);
+ propertiesTemp.put(propertiesKey, group);
+ break;
+ }
+ }
+ if (!resourceFound) {
+ resObj.put(group.get("get_param").toString(), getParam(group.get("get_param").toString(), envParams, parentProperties, requestParameters));
+ group.clear();
+ group.put("group", resObj);
+ propertiesTemp.put(propertiesKey, group);
+ }
+ }else {
+ JSONObject resObj = new JSONObject();
+ resObj.put("group", group);
+ propertiesTemp.put(propertiesKey, resObj);
+ }
+
+ } else if (propertiesKey.equals(Constants.HEAT_REQUEST_METADATA)) {
+ JSONObject metadata = (JSONObject)properties.get(propertiesKey);
+ JSONObject metadataTemp = new JSONObject();
+ boolean valetGroupsExist = false;
+
+ for(Object key : metadata.keySet()) {
+ if (!"valet_groups".equals(key)) {
+ continue;
+ }
+ valetGroupsExist = true;
+ String valet_groups = "";
+ if (metadata.get(key) instanceof String) {
+ valet_groups = metadata.get(key).toString();
+ }else if (metadata.get(key) instanceof JSONArray) {
+ /** Code to support array **/
+ JSONArray valetGroupsArr = (JSONArray)metadata.get(key);
+ for(int valetGroupIndex = 0; valetGroupIndex < valetGroupsArr.size(); valetGroupIndex ++) {
+// Get the key value
+ if (valetGroupsArr.get(valetGroupIndex) instanceof String) {
+ valet_groups += "," + valetGroupsArr.get(valetGroupIndex).toString();
+ continue;
+ }
+ JSONObject keyValue = (JSONObject)valetGroupsArr.get(valetGroupIndex);
+ // If the value contains get_param key, call getParam
+ if (keyValue.containsKey("get_param")) {
+ valet_groups += "," + getParam((String)keyValue.get("get_param"), envParams, parentProperties, requestParameters).toString();
+ }else {
+ // Else use the same key
+ valet_groups += "," + keyValue.toJSONString();
+ }
+ }
+ valet_groups = valet_groups.trim().substring(1);
+ }else {
+ // Get the key value
+ JSONObject keyValue = (JSONObject)metadata.get(key);
+ // If the value contains get_param key, call getParam
+ if (keyValue.containsKey("get_param")) {
+ valet_groups = getParam((String)keyValue.get("get_param"), envParams, parentProperties, requestParameters).toString();
+ }else {
+ // Else use the same key
+ valet_groups = keyValue.toJSONString();
+ }
+ }
+ LOGGER.info(EELFLoggerDelegate.applicationLogger,"valet_groups : {}", valet_groups);
+ valet_groups = valet_groups.replaceAll("\"\\s?,\\s?\"", ", ");
+ LOGGER.info(EELFLoggerDelegate.applicationLogger,"valet_groups : {}", valet_groups);
+ metadataTemp.put("valet_groups", valet_groups);
+ }
+ if (!valetGroupsExist) {
+ //metadataTemp.put("valet_groups", "");
+ }
+ propertiesTemp.put(Constants.HEAT_REQUEST_METADATA, metadataTemp);
+ }else {
+ LOGGER.info(EELFLoggerDelegate.applicationLogger,"propertiesKey : {}", propertiesKey);
+ if (properties.get(propertiesKey) instanceof JSONArray) {
+ JSONArray property = (JSONArray) properties.get(propertiesKey);
+ LOGGER.info(EELFLoggerDelegate.applicationLogger,"property.get(0) : {}", property.get(0));
+ if (property.get(0) instanceof JSONObject) {
+ propertiesTemp.put(propertiesKey, property);
+ }else {
+ String get_param = (String) property.get(0);
+ if (get_param != null && "get_param".equals(get_param)) {
+ propertiesTemp.put(propertiesKey,
+ getParam(get_param, envParams, parentProperties, requestParameters));
+ }else {
+ propertiesTemp.put(propertiesKey, property);
+ }
+ }
+
+ } else if (properties.get(propertiesKey) instanceof String) {
+ propertiesTemp.put(propertiesKey, properties.get(propertiesKey));
+ } else if (properties.get(propertiesKey) instanceof ArrayList) {
+ propertiesTemp.put(propertiesKey, ((ArrayList)properties.get(propertiesKey)).get(resourceIndex));
+ }else {
+ JSONObject property = (JSONObject) properties.get(propertiesKey);
+ if (property != null) {
+ String get_param = null;
+ Integer param_index = null;
+ if (property.get("get_param") instanceof JSONArray) {
+ get_param = ((JSONArray)property.get("get_param")).get(0).toString();
+ try {
+ param_index = ((Long)((JSONArray)property.get("get_param")).get(1)).intValue();
+ } catch (Exception e) {
+ LOGGER.warn("Couldn't parse get_param index as in integer! " + property);
+ param_index = null;
+ }
+ }else if (property.get("get_param") != null){
+ get_param = (String) property.get("get_param");
+ }
+ JSONArray get_attr = (JSONArray) property.get("get_attr");
+ JSONObject str_replace = (JSONObject) property.get("str_replace");
+ String getResource = (String) property.get("get_resource");
+ if (get_param != null) {
+ if (resourceIndex == -1) {// Base template
+ propertiesTemp.put(propertiesKey,
+ getParam(get_param, envParams, parentProperties, requestParameters,get_param, param_index));
+ }else {// Nested template.
+ Object paramValue = getParam(get_param, envParams, parentProperties, requestParameters);
+ if (paramValue instanceof String) {
+ propertiesTemp.put(propertiesKey,
+ paramValue);
+ }else if (paramValue instanceof JSONArray) {
+ JSONArray arr = (JSONArray)paramValue;
+ propertiesTemp.put(propertiesKey,
+ arr.get(resourceIndex));
+ }else if (paramValue instanceof ArrayList) {
+ ArrayList arr = (ArrayList)paramValue;
+ propertiesTemp.put(propertiesKey,
+ arr.get(resourceIndex));
+ }else {
+ propertiesTemp.put(propertiesKey,
+ paramValue);
+ }
+ }
+ }
+
+ if (get_attr != null) {
+ propertiesTemp.put(propertiesKey, getAttr(get_attr, parent, 0));
+ }
+
+ if (getResource != null) {
+ propertiesTemp.put(propertiesKey, parent.get(getResource));
+ }
+
+ if (str_replace != null) {
+ JSONObject str_replace_params = (JSONObject) str_replace.get("params");
+ if ("name".equals(propertiesKey)) {
+ //if (str_replace_params != null && str_replace_params.containsKey("$vnf_name")) {
+ if (str_replace_params != null) {
+ //Object vnf_name = str_replace_params.get("$vnf_name");
+ Object vnf_name = str_replace_params.get(str_replace_params.keySet().toArray()[0]);
+ if (vnf_name instanceof String) {
+ propertiesTemp.put(propertiesKey, vnf_name);
+ }else if (vnf_name instanceof JSONObject){
+ JSONObject json_vnf_name = (JSONObject)vnf_name;
+ if (json_vnf_name.containsKey("get_param")) {
+ propertiesTemp.put(propertiesKey, getParam((String)json_vnf_name.get("get_param"), envParams, parentProperties, requestParameters));
+ }
+ }
+ }
+ }
+ // propertiesTemp.put(propertiesKey, getAttr(str_replace, parent));
+ }
+ }
+ }
+ resourceObject.put(Constants.HEAT_RESOURCE_PROPERTIES, propertiesTemp);
+
+ }
+
+ }
+
+ }
+ }
+
+ return resourceObject;
+ }
+
+ public JSONObject processProperties(JSONObject properties, JSONObject parentProperties, JSONObject parent,
+ JSONObject parameters, LinkedHashMap requestParameters) {
+ Set<String> propertiesKeySet = properties.keySet();
+ JSONObject propertiesTemp = new JSONObject();
+
+ for (String propertiesKey : propertiesKeySet) {
+ LOGGER.info(EELFLoggerDelegate.applicationLogger,"propertiesKey : {}", propertiesKey);
+ if (properties.get(propertiesKey) instanceof String) {
+ propertiesTemp.put(propertiesKey, properties.get(propertiesKey));
+
+ } else if (properties.get(propertiesKey) instanceof JSONArray) {
+ propertiesTemp.put(propertiesKey, properties.get(propertiesKey));
+
+ } else if (properties.get(propertiesKey) instanceof Long) {
+ propertiesTemp.put(propertiesKey, properties.get(propertiesKey));
+ } else {
+ JSONObject property = (JSONObject) properties.get(propertiesKey);
+ JSONArray get_attr = (JSONArray) property.get("get_attr"), get_param_arr;
+ JSONObject str_replace = (JSONObject) property.get("str_replace");
+ String getResource = (String) property.get("get_resource");
+ String get_param_str = "";
+ if (property.get("get_param") instanceof String) {
+ get_param_str = (String) property.get("get_param");
+ } else {
+ get_param_arr = (JSONArray) property.get("get_param");
+ }
+
+ if (get_param_str != null) {
+ propertiesTemp.put(propertiesKey,
+ getParam(get_param_str, parameters, parentProperties, requestParameters));
+ }
+
+ if (get_attr != null) {
+ propertiesTemp.put(propertiesKey, getAttr(get_attr, (JSONObject)parent.get("resources"), 0));
+ }
+
+ if (getResource != null) {
+ propertiesTemp.put(propertiesKey, parent.get(getResource));
+ }
+ if (str_replace != null) {
+ // propertiesTemp.put(propertiesKey, str_replace(get_param,
+ // parent));
+ }
+ }
+ }
+ return propertiesTemp;
+ }
+ List<JSONObject> resourcesList = new ArrayList<>();
+ @SuppressWarnings({ "unchecked", "rawtypes" })
+ public JSONObject processTemplate(JSONObject template, LinkedHashMap files, JSONObject environment,
+ JSONObject parentProperties, LinkedHashMap requestParameters) {
+ JSONObject resources = (JSONObject) template.get(Constants.VALET_REQUEST_RESOURCES);
+ resourcesList.add(resources);
+ JSONObject returnObject = new JSONObject();
+ Set<String> resourceKeySet = resources.keySet();
+ for (String key : resourceKeySet) {
+ LOGGER.info(EELFLoggerDelegate.applicationLogger,"resource key : {}", key);
+ JSONObject resourceObject = (JSONObject) resources.get(key);
+ LOGGER.info(EELFLoggerDelegate.applicationLogger,"resource resourceObject : {}", resourceObject);
+ // Consider only OS::Nova::Server resources
+ if (!resourceObject.containsKey("type") || (!Constants.OS_NOVA_SERVER_ROOT.equals((String)resourceObject.get("type"))
+ && !Constants.OS_NOVA_SERVERGROUP_ROOT.equals((String)resourceObject.get("type"))
+ && !Constants.OS_HEAT_RESOURCEGROUP.equals((String)resourceObject.get("type")))) {
+ continue;
+ }
+ if (parentProperties == null && Constants.OS_NOVA_SERVERGROUP_ROOT.equals((String)resourceObject.get("type"))){
+ continue;
+ }
+ JSONObject resourceProperties = (JSONObject) resourceObject.get(Constants.HEAT_RESOURCE_PROPERTIES);
+ if (parentProperties != null) {
+ // Take parent properties into the nested template
+ Set<Object> parentPropertyKeySet = parentProperties.keySet();
+ for(Object parentProperty : parentPropertyKeySet) {
+ if (!resourceProperties.containsKey(parentProperty)) {
+ resourceProperties.put(parentProperty, parentProperties.get(parentProperty));
+ }else {
+ // If resource property value is get param and the param value is a parent propery
+ if (resourceProperties.get(parentProperty) instanceof JSONObject) {
+ // Check if it is get_param
+ JSONObject jo = (JSONObject)resourceProperties.get(parentProperty);
+ if (jo.containsKey("get_param") && jo.get("get_param").toString().equals(parentProperty)) {
+ resourceProperties.put(parentProperty, parentProperties.get(parentProperty));
+ }
+ }
+ }
+ }
+ }
+ JSONObject envParams = (JSONObject) environment.get("parameters");
+
+
+ if(key.equals("metadata")) {
+ JSONObject metadata = (JSONObject) resources.get("metadata");
+ if(metadata !=null) {
+ resources.put("metadata", processProperties(
+ metadata, parentProperties, resources,
+ envParams, requestParameters));
+ }
+ }
+ JSONObject resourceObj = parseResourceObject(template, null, resourceObject, envParams, files, resourceProperties,
+ requestParameters);
+ if (resourceObj.containsKey("type") && (Constants.OS_NOVA_SERVER_ROOT.equals((String)resourceObj.get("type"))
+ || Constants.OS_NOVA_SERVERGROUP_ROOT.equals((String)resourceObj.get("type"))
+ || Constants.OS_HEAT_RESOURCEGROUP.equals((String)resourceObj.get("type")))) {
+ returnObject.put(key, resourceObj);
+ }else {
+ Set<String> resourceKeys = resourceObj.keySet();
+ for(String resourceKey : resourceKeys) {
+ if (resourceObj.get(resourceKey) instanceof JSONObject) {
+ JSONObject obj = (JSONObject)resourceObj.get(resourceKey);
+ if (obj.containsKey("type") && (Constants.OS_NOVA_SERVER_ROOT.equals((String)obj.get("type"))
+ || Constants.OS_NOVA_SERVERGROUP_ROOT.equals((String)obj.get("type"))
+ || Constants.OS_HEAT_RESOURCEGROUP.equals((String)obj.get("type")))) {
+ returnObject.put(resourceKey, obj);
+ }
+ }else {
+ break;
+ }
+ }
+ }
+ }
+ if (resourcesList.size() > 1) {
+ resourcesList.remove(resources);
+ }
+ return returnObject;
+ }
+
+ public JSONObject parseLogicFinal() {
+
+ return null;
+ }
+
+ public ResponseEntity<String> processResponse(JSONObject response, LinkedHashMap requestParameters) {
+ LOGGER.info(EELFLoggerDelegate.applicationLogger,"response : "+response);
+ JSONObject statusObj = null;
+ String status = (String)response.get("status");
+ statusObj = parseToJSON(status);
+ status = (String) statusObj.get("status");
+ LOGGER.info(EELFLoggerDelegate.applicationLogger,"response : "+status);
+
+ JSONObject statusObjReturn = new JSONObject();
+ if (!"failed".equals(status)) {
+ //if ("".equals(statusObj.get("message"))) {//Success
+ // Process the result object
+ JSONObject result = null;
+ try {
+ result = (JSONObject)new JSONParser().parse((String)response.get("result"));
+ } catch (ParseException e) {
+ e.printStackTrace();
+ LOGGER.error(EELFLoggerDelegate.applicationLogger,"processResponse : Error details : "+ e.getMessage());
+ LOGGER.error(EELFLoggerDelegate.errorLogger,"processResponse : Error details : "+ e.getMessage());
+ return ResponseEntity.ok("Invalid response : " + response.toJSONString());
+ }
+ Set<Object> resultKeys = result.keySet();
+ JSONObject result_to_return = new JSONObject();
+ for(Object resultKey : resultKeys) {
+ requestParameters.put(resultKey, result.get(resultKey));
+ }
+ response.put("parameters", requestParameters);
+ response.remove("timestamp");
+ response.remove("result");
+ response.remove("request_id");
+ response.remove("status");
+ JSONObject status_obj = new JSONObject();
+ response.put("status", statusObj);
+ return ResponseEntity.ok(response.toJSONString());
+ // }
+ }
+ JSONObject status_obj = new JSONObject();
+ /*statusObjReturn.put("status", statusObj.get("status").toString());
+ statusObjReturn.put("message", statusObj.get("message").toString());*/
+ status_obj.put("status", statusObj);
+ return ResponseEntity.ok(status_obj.toJSONString());
+ }
+
+ @SuppressWarnings("unchecked")
+ public ResponseEntity<String> processMSORequest1(JSONObject request, String requestId , String operation) {
+ try {
+ ArrayList<String> missingfields = isValidateRequest(request);
+ if(missingfields.size() >0) {
+ return ResponseEntity.status(HttpStatus.UNPROCESSABLE_ENTITY).body("RequiredFields : "+ missingfields.toString());
+ }
+
+ @SuppressWarnings("rawtypes")
+ LinkedHashMap heat_request = (LinkedHashMap) request.get(Constants.HEAT_REQUEST);
+ JSONObject template = convertToJson(heat_request.get(Constants.HEAT_REQUEST_TEMPLATE).toString());
+ @SuppressWarnings("rawtypes")
+ LinkedHashMap files = (LinkedHashMap) heat_request.get(Constants.HEAT_REQUEST_FILES);
+ JSONObject environment = convertToJson(heat_request.get(Constants.HEAT_REQUEST_ENVIRONMENT).toString());
+ LinkedHashMap requestParameters = (LinkedHashMap) heat_request.get(Constants.HEAT_REQUEST_PARAMETERS);
+ if (requestParameters == null) {
+ requestParameters = new LinkedHashMap();
+ }
+ JSONObject response = processTemplate(template, files, environment, null, requestParameters);
+ return saveRequest(request, response, operation, requestId, requestParameters);
+ //return response.toJSONString();
+ } catch (Exception e) {
+ e.printStackTrace();
+ LOGGER.error(EELFLoggerDelegate.applicationLogger,"processMSORequest1: Error details : "+ e.getMessage());
+ LOGGER.error(EELFLoggerDelegate.errorLogger,"processMSORequest1: Error details : "+ e.getMessage());
+ return ResponseEntity.status(HttpStatus.INTERNAL_SERVER_ERROR).build();
+ }
+ }
+
+ //java junit test case purpose
+ @SuppressWarnings("unchecked")
+ public ResponseEntity<String> processMSORequest2(JSONObject request, String requestId) {
+ try {
+ LOGGER.debug(EELFLoggerDelegate.debugLogger,"in processMSORequest2");
+ @SuppressWarnings("rawtypes")
+ LinkedHashMap heat_request = (LinkedHashMap) request.get(Constants.HEAT_REQUEST);
+ JSONObject template = convertToJson(heat_request.get(Constants.HEAT_REQUEST_TEMPLATE).toString());
+ @SuppressWarnings("rawtypes")
+ LinkedHashMap files = (LinkedHashMap) heat_request.get(Constants.HEAT_REQUEST_FILES);
+ JSONObject environment = convertToJson(heat_request.get(Constants.HEAT_REQUEST_ENVIRONMENT).toString());
+ LinkedHashMap requestParameters = (LinkedHashMap) heat_request.get(Constants.HEAT_REQUEST_PARAMETERS);
+ if (requestParameters == null) {
+ requestParameters = new LinkedHashMap();
+ }
+ JSONObject response = processTemplate(template, files, environment, null, requestParameters);
+ LOGGER.info(EELFLoggerDelegate.applicationLogger,"Result : {}", response.toJSONString());
+ return saveRequest2(request, response, "create", requestId, requestParameters);
+ } catch (Exception e) {
+ e.printStackTrace();
+ LOGGER.error(EELFLoggerDelegate.applicationLogger,"processMSORequest2 : Error while processing MSO request for requestId : "+requestId+", Error details : "+ e.getMessage());
+ LOGGER.error(EELFLoggerDelegate.errorLogger,"processMSORequest2 : Error while processing MSO request for requestId : "+requestId+", Error details : "+ e.getMessage());
+ return null;
+ }
+ }
+
+ private ArrayList<String> isValidateRequest(JSONObject request) {
+ // TODO Auto-generated method stub
+ ArrayList<String> missingRequest = new ArrayList<>();
+ int i=0;
+ for(String field : requiredFields) {
+ if(request.get(field)==null || (request.get(field) instanceof String && "".equals(request.get(field)) )) {
+ missingRequest.add(field) ;
+ i++;
+ }
+ }
+ return missingRequest;
+ }
+
+ public String processMSORequest(JSONObject request) {
+ try {
+ @SuppressWarnings("rawtypes")
+ LinkedHashMap heat_request = (LinkedHashMap) request.get(Constants.HEAT_REQUEST);
+ JSONObject template = convertToJson(heat_request.get(Constants.HEAT_REQUEST_TEMPLATE).toString());
+ JSONObject resources = (JSONObject) template.get(Constants.VALET_REQUEST_RESOURCES);
+ @SuppressWarnings("rawtypes")
+ LinkedHashMap files = (LinkedHashMap) heat_request.get(Constants.HEAT_REQUEST_FILES);
+ JSONObject environment = convertToJson(heat_request.get(Constants.HEAT_REQUEST_ENVIRONMENT).toString());
+ JSONObject parameters = null;
+ if (environment != null) {
+ parameters = (JSONObject) environment.get("parameters");
+ }
+ LinkedHashMap requestParameters = (LinkedHashMap<?, ?>) heat_request.get("parameters");
+ if (requestParameters == null) {
+ requestParameters = new LinkedHashMap();
+ }
+
+ Set<String> resourceKeySet = resources.keySet();
+ for (String key : resourceKeySet) {
+ JSONObject resourceObject = (JSONObject) resources.get(key);
+ JSONObject properties = (JSONObject) resourceObject.get(Constants.HEAT_RESOURCE_PROPERTIES);
+ if (properties != null) {
+ JSONObject resource_def = (JSONObject) properties.get(Constants.HEAT_REQUEST_RESOURCES_DEF);
+ JSONObject count = (JSONObject) properties.get(Constants.HEAT_REQUEST_PROPERTIES_COUNT);
+ Long get_param = (long) 1;
+ if (count != null) {
+ String countParameter = (String) count.get("get_param");
+ get_param = (Long) parameters.get(countParameter);
+ }
+ if (resource_def != null) {
+ String nestedTemplateName = (String) resource_def.get(Constants.HEAT_REQUEST_RESOURCES_TYPE);
+
+ if (nestedTemplateName != null) {
+ if (files.get(nestedTemplateName) != null) {
+ JSONObject nestedYaml = convertToJson(files.get(nestedTemplateName).toString());
+ JSONArray nestedArray = new JSONArray();
+ System.out.println(get_param);
+ for (int i = 0; i < get_param; i++) {
+ nestedArray.add(nestedYaml);
+ }
+ resource_def.put(Constants.HEAT_REQUEST_RESOURCES_TYPE, nestedArray);
+ properties.put(Constants.HEAT_REQUEST_RESOURCES_DEF, resource_def);
+ resourceObject.put(Constants.HEAT_RESOURCE_PROPERTIES, properties);
+ resources.put(key, resourceObject);
+ }
+ }
+ }
+ }
+ }
+ String region_id = (String) request.get(Constants.HEAT_REQUEST_REGION_ID);
+ String keystone_url = (String) request.get(Constants.HEAT_REQUEST_KEYSTONE_ID);
+ JSONObject datacenter = new JSONObject();
+
+ if (region_id != null && keystone_url != null) {
+ datacenter.put("id", region_id);
+ datacenter.put("url", keystone_url);
+ }
+ request.put(Constants.HEAT_REQUEST_DATACENTER, datacenter);
+
+ template.put(Constants.VALET_REQUEST_RESOURCES, resources);
+ request.remove(Constants.HEAT_REQUEST);
+ request.put(Constants.VALET_ENGINE_KEY, template);
+ String dbRequest = schema.formMsoInsertUpdateRequest(null, "create", request.toJSONString());
+ // return valetServicePlacementDAO.insertRow(dbRequest);
+ return dbRequest;
+
+ } catch (Exception e) {
+ e.printStackTrace();
+ LOGGER.error(EELFLoggerDelegate.applicationLogger,"processMSORequest : Error details : "+ e.getMessage());
+ LOGGER.error(EELFLoggerDelegate.errorLogger,"processMSORequest : Error details : "+ e.getMessage());
+ return "bad request";
+ }
+ }
+
+ public JSONObject convertToJson(String data) {
+ String jsonString = YamlToJsonConverter.convertToJson(data);
+ JSONParser parser = new JSONParser();
+ try {
+ JSONObject json = (JSONObject) parser.parse(jsonString);
+ return json;
+ } catch (ParseException e) {
+ e.printStackTrace();
+ LOGGER.error(EELFLoggerDelegate.applicationLogger,"convertToJson : Error details : "+ e.getMessage());
+ LOGGER.error(EELFLoggerDelegate.errorLogger,"convertToJson : Error details : "+ e.getMessage());
+ return null;
+ }
+ }
+
+ public static JSONObject parseToJSON(String jsonString) {
+ JSONParser parser = new JSONParser();
+ try {
+ JSONObject json = (JSONObject) parser.parse(jsonString);
+ return json;
+ } catch (ParseException e) {
+ e.printStackTrace();
+
+ return null;
+ }
+ }
+
+ public String processDeleteRequest(JSONObject request, String requestId) {
+ String dbRequest = schema.formMsoInsertUpdateRequest(requestId, "delete", request.toJSONString());
+ return valetServicePlacementDAO.insertRow(dbRequest);
+ }
+
+
+
+ public ResponseEntity<String> saveRequest(JSONObject request, JSONObject response, String operation, String requestId, LinkedHashMap requestParameters){
+ JSONObject dataCenterObj = new JSONObject();
+ dataCenterObj.put("id", request.get("region_id"));
+ dataCenterObj.put("url", request.get("keystone_url"));
+ JSONObject dbJSON = new JSONObject();
+ dbJSON.put("datacenter", dataCenterObj);
+ dbJSON.put("tenant_id", request.get("tenant_id"));
+ dbJSON.put("service_instance_id", request.get("service_instance_id"));
+ dbJSON.put("vnf_instance_id", request.get("vnf_id"));
+ dbJSON.put("vnf_instance_name", request.get("vnf_name"));
+ dbJSON.put("vf_module_id", request.get("vf_module_id"));
+ dbJSON.put("vf_module_name", request.get("vf_module_name"));
+ JSONObject resources= new JSONObject();
+ resources.put("resources", response);
+ dbJSON.put("stack", resources);
+ dbJSON.put("stack_name", ((LinkedHashMap)request.get("heat_request")).get("stack_name"));
+ LOGGER.info(EELFLoggerDelegate.applicationLogger,"dbJSON : {}", dbJSON);
+ return saveCreateRequest(dbJSON, operation, requestId, requestParameters);
+ }
+ //java juint test case
+ public ResponseEntity<String> saveRequest2(JSONObject request, JSONObject response, String operation, String requestId, LinkedHashMap requestParameters){
+ JSONObject dataCenterObj = new JSONObject();
+ dataCenterObj.put("id", request.get("region_id"));
+ dataCenterObj.put("url", request.get("keystone_url"));
+ JSONObject dbJSON = new JSONObject();
+ dbJSON.put("datacenter", dataCenterObj);
+ dbJSON.put("tenant_id", request.get("tenant_id"));
+ dbJSON.put("service_instance_id", request.get("service_instance_id"));
+ dbJSON.put("vnf_instance_id", request.get("vnf_id"));
+ dbJSON.put("vnf_instance_name", request.get("vnf_name"));
+ dbJSON.put("vf_module_id", request.get("vf_module_id"));
+ dbJSON.put("vf_module_name", request.get("vf_module_name"));
+ JSONObject resources= new JSONObject();
+ resources.put("resources", response);
+ dbJSON.put("stack", resources);
+ dbJSON.put("stack_name", ((LinkedHashMap)request.get("heat_request")).get("stack_name"));
+ LOGGER.info(EELFLoggerDelegate.applicationLogger,"dbJSON : {}", dbJSON);
+ return ResponseEntity.ok(dbJSON.toJSONString());
+ }
+ public ResponseEntity<String> saveCreateRequest(JSONObject request, String operation, String requestId, LinkedHashMap requestParameters) {
+ LOGGER.info(EELFLoggerDelegate.applicationLogger,"saveRequest : the request - ", requestId);
+ String dbRequest = schema.formMsoInsertUpdateRequest(requestId, operation, request.toJSONString());
+ String insertRow = valetServicePlacementDAO.insertRow(dbRequest);
+ return pollForResult(request, operation + "-" + requestId, Constants.WAIT_UNITL_SECONDS,
+ Constants.POLL_EVERY_SECONDS, requestParameters);
+
+ }
+//java test case request
+ public ResponseEntity<String> saveRequesttest(JSONObject request, String operation, String requestId) {
+ LOGGER.info(EELFLoggerDelegate.applicationLogger,"saveRequest : the request - ", requestId);
+ String dbRequest = schema.formMsoInsertUpdateRequest(requestId, operation, request.toJSONString());
+ return ResponseEntity.ok(dbRequest);
+
+ }
+
+
+ public ResponseEntity<String> saveRequest(JSONObject request, String operation, String requestId) {
+ LOGGER.info(EELFLoggerDelegate.applicationLogger,"saveRequest : the request - ", requestId);
+ String dbRequest = schema.formMsoInsertUpdateRequest(requestId, operation, request.toJSONString());
+ String insertRow = valetServicePlacementDAO.insertRow(dbRequest);
+ return pollForResult(request, operation + "-" + requestId, Constants.WAIT_UNITL_SECONDS,
+ Constants.POLL_EVERY_SECONDS, null);
+
+ }
+
+ public ResponseEntity<String> pollForResult(JSONObject values, String requestId, int waitUntilSeconds,
+ int pollEverySeconds, LinkedHashMap requestParameters) {
+ LOGGER.info(EELFLoggerDelegate.applicationLogger,"pollForResult : called", requestId);
+
+ String result = null;
+ long waitUntil = System.currentTimeMillis() + (1000 * waitUntilSeconds);
+ int counter = 1;
+
+ JSONObject response = new JSONObject();
+ boolean isTimedOut = false;
+ while (true) {
+ LOGGER.info(EELFLoggerDelegate.applicationLogger,"pollForResult : polling database - ", counter++);
+
+ result = valetServicePlacementDAO.getRowFromResults(requestId);
+ System.out.println("getRowFromResults called count:" + counter);
+ response = result != null ? parseToJSON(result) : null;
+
+ if (response != null && ((JSONObject) response.get("result")).get("row 0") != null) {
+ LOGGER.debug(EELFLoggerDelegate.debugLogger,"pollForResult : response recieved", result);
+ System.out.println("deleteRowFromResults called");
+ valetServicePlacementDAO.deleteRowFromResults(requestId, schema.formMsoDeleteRequest());
+
+ }
+ if (System.currentTimeMillis() < waitUntil && (response == null
+ || ((JSONObject) response.get("result")).get("row 0") == null)) {
+ try {
+ Thread.sleep(1000 * pollEverySeconds);
+ } catch (InterruptedException e) {
+ e.printStackTrace();
+ LOGGER.error(EELFLoggerDelegate.applicationLogger,"pollForResult : Error while processing request with requestId : "+requestId+", Error details : "+ e.getMessage());
+ LOGGER.error(EELFLoggerDelegate.errorLogger,"pollForResult : Error while processing request with requestId : "+requestId+", Error details : "+ e.getMessage());
+ }
+ } else {
+ break;
+ }
+ }
+ if (System.currentTimeMillis() > waitUntil) {
+ return ResponseEntity.status(HttpStatus.GATEWAY_TIMEOUT).build();
+ }
+ if (requestParameters != null) {
+ LOGGER.info(EELFLoggerDelegate.applicationLogger,"Result from DB : {}", ((JSONObject) ((JSONObject) response.get("result")).get("row 0")));
+ return processResponse(((JSONObject) ((JSONObject) response.get("result")).get("row 0")), requestParameters);
+ }else{
+ //System.out.println("Response"+ ((JSONObject)((JSONObject) response.get("result")).get("row 0")).toJSONString());
+ JSONObject obj = ((JSONObject)((JSONObject) response.get("result")).get("row 0"));
+ /*obj.put("result",parseToJSON( (String) obj.get("result")));
+ response.put("Status", parseToJSON(result));
+ System.out.println("mso"+result);*/
+ JSONObject res = new JSONObject();
+ res.put("status",parseToJSON( (String) obj.get("status")));
+ return ResponseEntity.ok(res.toJSONString());
+ }
+ }
+
+}
diff --git a/valetapi/src/main/java/org/onap/fgps/api/service/ValetUtilityService.java b/valetapi/src/main/java/org/onap/fgps/api/service/ValetUtilityService.java
new file mode 100644
index 0000000..67c7859
--- /dev/null
+++ b/valetapi/src/main/java/org/onap/fgps/api/service/ValetUtilityService.java
@@ -0,0 +1,44 @@
+/*
+ * ============LICENSE_START==========================================
+ * ONAP - F-GPS API
+ * ===================================================================
+ * Copyright © 2019 ATT Intellectual Property. All rights reserved.
+ * ===================================================================
+ *
+ * Unless otherwise specified, all software contained herein is licensed
+ * under the Apache License, Version 2.0 (the "License");
+ * you may not use this software except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * Unless otherwise specified, all documentation contained herein is licensed
+ * under the Creative Commons License, Attribution 4.0 Intl. (the "License");
+ * you may not use this documentation except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://creativecommons.org/licenses/by/4.0/
+ *
+ * Unless required by applicable law or agreed to in writing, documentation
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * ============LICENSE_END============================================
+ *
+ *
+ */
+package org.onap.fgps.api.service;
+
+public class ValetUtilityService {
+ public static String health() {
+ return "";
+ }
+}
diff --git a/valetapi/src/main/java/org/onap/fgps/api/utils/CipherUtil.java b/valetapi/src/main/java/org/onap/fgps/api/utils/CipherUtil.java
new file mode 100644
index 0000000..6f8f595
--- /dev/null
+++ b/valetapi/src/main/java/org/onap/fgps/api/utils/CipherUtil.java
@@ -0,0 +1,214 @@
+/*
+ * ============LICENSE_START==========================================
+ * ONAP - F-GPS API
+ * ===================================================================
+ * Copyright © 2019 ATT Intellectual Property. All rights reserved.
+ * ===================================================================
+ *
+ * Unless otherwise specified, all software contained herein is licensed
+ * under the Apache License, Version 2.0 (the "License");
+ * you may not use this software except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * Unless otherwise specified, all documentation contained herein is licensed
+ * under the Creative Commons License, Attribution 4.0 Intl. (the "License");
+ * you may not use this documentation except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://creativecommons.org/licenses/by/4.0/
+ *
+ * Unless required by applicable law or agreed to in writing, documentation
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * ============LICENSE_END============================================
+ *
+ *
+ */
+package org.onap.fgps.api.utils;
+
+import java.security.Provider;
+import java.security.SecureRandom;
+import java.security.Security;
+
+import javax.crypto.BadPaddingException;
+import javax.crypto.Cipher;
+import javax.crypto.spec.IvParameterSpec;
+import javax.crypto.spec.SecretKeySpec;
+
+import org.apache.commons.codec.binary.Base64;
+import org.apache.commons.lang.ArrayUtils;
+import org.onap.fgps.api.exception.CipherUtilException;
+import org.onap.fgps.api.logging.EELFLoggerDelegate;
+
+public class CipherUtil {
+
+ private static final EELFLoggerDelegate LOGGER = EELFLoggerDelegate.getLogger(CipherUtil.class);
+
+ /**
+ * Default key.
+ */
+ private static final String keyString = KeyProperties.getProperty("cipher.enc.key");
+
+ private static final String ALGORITHM = "AES";
+ private static final String ALGORYTHM_DETAILS = ALGORITHM + "/CBC/PKCS5PADDING";
+ private static final int BLOCK_SIZE = 128;
+ @SuppressWarnings("unused")
+ private static SecretKeySpec secretKeySpec;
+ private static IvParameterSpec ivspec;
+
+ /**
+ * Encrypts the text using a secret key.
+ *
+ * @param plainText
+ * Text to encrypt
+ * @return Encrypted Text
+ * @throws CipherUtilException
+ * if any decryption step fails
+ */
+ public static String encryptPKC(String plainText) throws CipherUtilException {
+ return CipherUtil.encryptPKC(plainText, keyString);
+ }
+
+ private static SecretKeySpec getSecretKeySpec() {
+ byte[] key = Base64.decodeBase64(keyString);
+ return new SecretKeySpec(key, ALGORITHM);
+ }
+
+ private static SecretKeySpec getSecretKeySpec(String keyString) {
+ byte[] key = Base64.decodeBase64(keyString);
+ return new SecretKeySpec(key, ALGORITHM);
+ }
+
+ /**
+ * Encrypt the text using the secret key in key.properties file
+ *
+ * @param value
+ * @return The encrypted string
+ * @throws BadPaddingException
+ * @throws CipherUtilException
+ * In case of issue with the encryption
+ */
+ public static String encryptPKC(String value, String skey) throws CipherUtilException {
+ Cipher cipher = null;
+ byte[] iv = null, finalByte = null;
+
+ try {
+ cipher = Cipher.getInstance(ALGORYTHM_DETAILS, "SunJCE");
+
+ SecureRandom r = SecureRandom.getInstance("SHA1PRNG");
+ iv = new byte[BLOCK_SIZE / 8];
+ r.nextBytes(iv);
+ ivspec = new IvParameterSpec(iv);
+ cipher.init(Cipher.ENCRYPT_MODE, getSecretKeySpec(skey), ivspec);
+ finalByte = cipher.doFinal(value.getBytes());
+
+ } catch (Exception ex) {
+ LOGGER.error(EELFLoggerDelegate.errorLogger,"encrypt failed", ex);
+ throw new CipherUtilException(ex);
+ }
+ return Base64.encodeBase64String(ArrayUtils.addAll(iv, finalByte));
+ }
+
+ /**
+ * Decrypts the text using the secret key in key.properties file.
+ *
+ * @param message
+ * The encrypted string that must be decrypted using the ecomp
+ * Encryption Key
+ * @return The String decrypted
+ * @throws CipherUtilException
+ * if any decryption step fails
+ */
+ public static String decryptPKC(String message, String skey) throws CipherUtilException {
+ byte[] encryptedMessage = Base64.decodeBase64(message);
+ Cipher cipher;
+ byte[] decrypted = null;
+ try {
+ cipher = Cipher.getInstance(ALGORYTHM_DETAILS, "SunJCE");
+ ivspec = new IvParameterSpec(ArrayUtils.subarray(encryptedMessage, 0, BLOCK_SIZE / 8));
+ byte[] realData = ArrayUtils.subarray(encryptedMessage, BLOCK_SIZE / 8, encryptedMessage.length);
+ cipher.init(Cipher.DECRYPT_MODE, getSecretKeySpec(skey), ivspec);
+ decrypted = cipher.doFinal(realData);
+
+ } catch (Exception ex) {
+ LOGGER.error(EELFLoggerDelegate.errorLogger,"decrypt failed", ex);
+ throw new CipherUtilException(ex);
+ }
+
+ return new String(decrypted);
+ }
+
+ /**
+ *
+ * Decrypts the text using the secret key in key.properties file.
+ *
+ * @param encryptedText
+ * Text to decrypt
+ * @return Decrypted text
+ * @throws CipherUtilException
+ * if any decryption step fails
+ */
+ public static String decryptPKC(String encryptedText) throws CipherUtilException {
+ return CipherUtil.decryptPKC(encryptedText, keyString);
+ }
+
+ public static void maine(String[] args) throws CipherUtilException {
+ String testValue = "vmNKzC1wHH7w8PiZf7iPTTwq4iaAJn3dRlVK1YLvwgFESCqNPj3azGvRgNpR8tx+2p+o346C9PMip8SJyle/rw==";
+ String encrypted;
+
+ String decrypted;
+
+ encrypted=encryptPKC(testValue);
+ System.out.println("encrypted"+encrypted);
+
+ decrypted=decryptPKC(testValue);
+ System.out.println("decrypted"+ decrypted);
+
+ }
+
+ public static void main (String[] args) {
+ Provider[] pa = Security.getProviders();
+ for (Provider p : pa) {
+ System.out.println("Provider: " + p + ", " + p.getInfo() );
+ }
+
+ /*
+ String encoded = "vmNKzC1wHH7w8PiZf7iPTTwq4iaAJn3dRlVK1YLvwgFESCqNPj3azGvRgNpR8tx+2p+o346C9PMip8SJyle/rw==";
+ String decoded = decryptPKC(encoded);
+ String reencoded = encryptPKC(decoded);
+ System.out.println(encoded);
+ System.out.println(decoded);
+ System.out.println(reencoded);
+ */
+
+ String plainText = "Jackdaws love my big sphinx of quartz.";
+ String encoded = encryptPKC(plainText);
+ String decoded = decryptPKC(encoded);
+ System.out.println(plainText);
+ System.out.println(encoded);
+ System.out.println(decoded);
+
+ }
+
+ public static String encodeBasicAuth(String userId, String password) {
+
+ String plainCreds = userId + ":" + password;
+ byte[] plainCredsBytes = plainCreds.getBytes();
+ byte[] base64CredsBytes = Base64.encodeBase64(plainCredsBytes);
+ String base64Creds = new String(base64CredsBytes);
+
+ return "Basic " +base64Creds;
+ }
+
+}
diff --git a/valetapi/src/main/java/org/onap/fgps/api/utils/Constants.java b/valetapi/src/main/java/org/onap/fgps/api/utils/Constants.java
new file mode 100644
index 0000000..71fe8b8
--- /dev/null
+++ b/valetapi/src/main/java/org/onap/fgps/api/utils/Constants.java
@@ -0,0 +1,100 @@
+/*
+ * ============LICENSE_START==========================================
+ * ONAP - F-GPS API
+ * ===================================================================
+ * Copyright © 2019 ATT Intellectual Property. All rights reserved.
+ * ===================================================================
+ *
+ * Unless otherwise specified, all software contained herein is licensed
+ * under the Apache License, Version 2.0 (the "License");
+ * you may not use this software except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * Unless otherwise specified, all documentation contained herein is licensed
+ * under the Creative Commons License, Attribution 4.0 Intl. (the "License");
+ * you may not use this documentation except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://creativecommons.org/licenses/by/4.0/
+ *
+ * Unless required by applicable law or agreed to in writing, documentation
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * ============LICENSE_END============================================
+ *
+ *
+ */
+package org.onap.fgps.api.utils;
+
+public class Constants {
+ public static final String HEAT_REQUEST = "heat_request";
+ public static final String HEAT_REQUEST_TEMPLATE = "template";
+ public static final String VALET_ENGINE_KEY = "stack";
+ public static final String VALET_REQUEST_RESOURCES = "resources";
+ public static final String HEAT_REQUEST_FILES = "files";
+ public static final String HEAT_RESOURCE_PROPERTIES = "properties";
+ public static final String HEAT_REQUEST_RESOURCES_DEF = "resource_def";
+ public static final String HEAT_REQUEST_RESOURCES_TYPE = "type";
+ public static final String HEAT_REQUEST_ENVIRONMENT = "environment";
+ public static final String HEAT_REQUEST_PARAMETERS = "parameters";
+ public static final String HEAT_REQUEST_PROPERTIES_COUNT = "count";
+ public static final String HEAT_REQUEST_DATACENTER = "datacenter";
+ public static final String HEAT_REQUEST_REGION_ID = "region_id";
+ public static final String HEAT_REQUEST_KEYSTONE_ID = "keystone_url";
+ public static final String HEAT_REQUEST_KEYSTONE_NETWORKS = "networks";
+ public static final String HEAT_REQUEST_AVAILABILITY_ZONE = "availability_zone";
+ public static final String HEAT_REQUEST_SCHEDULER_HINTS = "scheduler_hints";
+ public static final String HEAT_REQUEST_METADATA = "metadata";
+ public static final String HEAT_REQUEST_VALET_HOST_ASSIGNMENT = "$VALET_HOST_ASSIGNMENT";
+ public static final String HEAT_REQUEST_AZ = "$AZ";
+ public static final String HEAT_REQUEST_PROPERTIES = "properties";
+ public static final String HEAT_REQUEST_NAMES = "name";
+ public static final String HEAT_REQUEST_IMAGE = "image";
+ public static final String HEAT_REQUEST_FLAVOR = "flavor";
+
+
+ // MSO Request constants
+ public static final String HEAT_REQUEST_REQUEST_ID = "request_id";
+ public static final String HEAT_REQUEST_TIMESTAMP = "timestamp";
+ public static final String HEAT_REQUEST_OPERATION = "operation";
+ public static final String HEAT_REQUEST_STATUS = "status";
+
+ // tables names
+ public static final String HEAT_REQUEST_REQUEST = "request";
+
+ // tables names
+ public static final String SERVICE_PLACEMENTS_REQUEST_TABLE = "requests";
+ public static final String SERVICE_PLACEMENTS_RESULTS_TABLE = "results";
+ public static final String TABLE_RESULT = "results";
+ public static final String TABLE_GROUP_RULES = "group_rules";
+ public static final String TABLE_STACKS = "stacks";
+ public static final String TABLE_STACKS_ID_MAP = "stack_id_map";
+ public static final String TABLE_RESOURCES = "resources";
+ public static final String TABLE_REGIONS = "regions";
+ public static final String TABLE_Groups = "groups";
+
+ public static final String GROUPS_TABLE = "groups";
+ public static final String GROUP_PLACEMENTS_TABLE = "group_placements";
+
+ // parsing constants
+ public static final String OS_NOVA_SERVER_ROOT = "OS::Nova::Server";
+ public static final String OS_NOVA_SERVERGROUP_ROOT = "OS::Nova::ServerGroup";
+ public static final String OS_HEAT_RESOURCEGROUP = "OS::Heat::ResourceGroup";
+
+ public static final int WAIT_UNITL_SECONDS = 300;
+
+ public static final int POLL_EVERY_SECONDS = 5;
+
+
+}
diff --git a/valetapi/src/main/java/org/onap/fgps/api/utils/DBInitializationRequests.java b/valetapi/src/main/java/org/onap/fgps/api/utils/DBInitializationRequests.java
new file mode 100644
index 0000000..691546c
--- /dev/null
+++ b/valetapi/src/main/java/org/onap/fgps/api/utils/DBInitializationRequests.java
@@ -0,0 +1,50 @@
+/*
+ * ============LICENSE_START==========================================
+ * ONAP - F-GPS API
+ * ===================================================================
+ * Copyright © 2019 ATT Intellectual Property. All rights reserved.
+ * ===================================================================
+ *
+ * Unless otherwise specified, all software contained herein is licensed
+ * under the Apache License, Version 2.0 (the "License");
+ * you may not use this software except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * Unless otherwise specified, all documentation contained herein is licensed
+ * under the Creative Commons License, Attribution 4.0 Intl. (the "License");
+ * you may not use this documentation except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://creativecommons.org/licenses/by/4.0/
+ *
+ * Unless required by applicable law or agreed to in writing, documentation
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * ============LICENSE_END============================================
+ *
+ *
+ */
+package org.onap.fgps.api.utils;
+
+public class DBInitializationRequests {
+ public static final String KEYSPACE_REQUEST = "{\"replicationInfo\":{\"class\":\"NetworkTopologyStrategy\",\"DC1\":3,\"DC2\":3,\"DC3\":3},\"durabilityOfWrites\":\"true\",\"consistencyInfo\":{\"type\":\"eventual\"}}";
+ public static final String KEYSPACE_WITH_RF = "{\"replicationInfo\":{\"class\":\"NetworkTopologyStrategy\",DATA_CENTER_INFO},\"durabilityOfWrites\":\"true\",\"consistencyInfo\":{\"type\":\"eventual\"}}";
+
+
+}
+
+
+
+
+ \ No newline at end of file
diff --git a/valetapi/src/main/java/org/onap/fgps/api/utils/Helper.java b/valetapi/src/main/java/org/onap/fgps/api/utils/Helper.java
new file mode 100644
index 0000000..ef1eec1
--- /dev/null
+++ b/valetapi/src/main/java/org/onap/fgps/api/utils/Helper.java
@@ -0,0 +1,48 @@
+/*
+ * ============LICENSE_START==========================================
+ * ONAP - F-GPS API
+ * ===================================================================
+ * Copyright © 2019 ATT Intellectual Property. All rights reserved.
+ * ===================================================================
+ *
+ * Unless otherwise specified, all software contained herein is licensed
+ * under the Apache License, Version 2.0 (the "License");
+ * you may not use this software except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * Unless otherwise specified, all documentation contained herein is licensed
+ * under the Creative Commons License, Attribution 4.0 Intl. (the "License");
+ * you may not use this documentation except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://creativecommons.org/licenses/by/4.0/
+ *
+ * Unless required by applicable law or agreed to in writing, documentation
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * ============LICENSE_END============================================
+ *
+ *
+ */
+package org.onap.fgps.api.utils;
+
+import java.text.MessageFormat;
+
+public class Helper {
+
+ public static String getURI(String macro, Object[] params) {
+ MessageFormat uri = new MessageFormat(macro);
+ return uri.format(params);
+ }
+}
diff --git a/valetapi/src/main/java/org/onap/fgps/api/utils/KeyProperties.java b/valetapi/src/main/java/org/onap/fgps/api/utils/KeyProperties.java
new file mode 100644
index 0000000..90b7ad0
--- /dev/null
+++ b/valetapi/src/main/java/org/onap/fgps/api/utils/KeyProperties.java
@@ -0,0 +1,122 @@
+/*
+ * ============LICENSE_START==========================================
+ * ONAP - F-GPS API
+ * ===================================================================
+ * Copyright © 2019 ATT Intellectual Property. All rights reserved.
+ * ===================================================================
+ *
+ * Unless otherwise specified, all software contained herein is licensed
+ * under the Apache License, Version 2.0 (the "License");
+ * you may not use this software except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * Unless otherwise specified, all documentation contained herein is licensed
+ * under the Creative Commons License, Attribution 4.0 Intl. (the "License");
+ * you may not use this documentation except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://creativecommons.org/licenses/by/4.0/
+ *
+ * Unless required by applicable law or agreed to in writing, documentation
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * ============LICENSE_END============================================
+ *
+ *
+ */
+package org.onap.fgps.api.utils;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.Properties;
+
+import org.onap.fgps.api.logging.EELFLoggerDelegate;
+
+/**
+ * Searches the classpath for the file "key.properties".
+ *
+ * To put the file "key.properties" on the classpath, it can be in the same
+ * directory where the first package folder is - 'myClasses' folder in the
+ * following case as an example:
+ *
+ */
+public class KeyProperties {
+
+ private static final EELFLoggerDelegate LOGGER = EELFLoggerDelegate.getLogger(CipherUtil.class);
+
+ private static Properties properties;
+ private static String propertyFileName = "key.properties";
+
+ private static final Object lockObject = new Object();
+
+ /**
+ * Constructor is private.
+ */
+ private KeyProperties() {
+ }
+
+ /**
+ * Gets the property value for the specified key. If a value is found, leading
+ * and trailing space is trimmed.
+ *
+ * @param property
+ * Property key
+ * @return Value for the named property; null if the property file was not
+ * loaded or the key was not found.
+ */
+ public static String getProperty(String property) {
+ if (properties == null) {
+ synchronized (lockObject) {
+ try {
+ if (!initialize()) {
+ LOGGER.error(EELFLoggerDelegate.errorLogger, "Failed to read property file " + propertyFileName);
+ return null;
+ }
+ } catch (IOException e) {
+ LOGGER.error(EELFLoggerDelegate.errorLogger, "Failed to read property file " + propertyFileName, e);
+ return null;
+ }
+ }
+ }
+ String value = properties.getProperty(property);
+ if (value != null)
+ value = value.trim();
+ return value;
+ }
+
+ /**
+ * Reads properties from a portal.properties file on the classpath.
+ *
+ * Clients do NOT need to call this method. Clients MAY call this method to test
+ * whether the properties file can be loaded successfully.
+ *
+ * @return True if properties were successfully loaded, else false.
+ * @throws IOException
+ * On failure
+ */
+ private static boolean initialize() throws IOException {
+ if (properties != null)
+ return true;
+ InputStream in = KeyProperties.class.getClassLoader().getResourceAsStream(propertyFileName);
+ if (in == null)
+ return false;
+ properties = new Properties();
+ try {
+ properties.load(in);
+ } finally {
+ in.close();
+ }
+ return true;
+ }
+}
diff --git a/valetapi/src/main/java/org/onap/fgps/api/utils/MusicDBConstants.java b/valetapi/src/main/java/org/onap/fgps/api/utils/MusicDBConstants.java
new file mode 100644
index 0000000..4c7a937
--- /dev/null
+++ b/valetapi/src/main/java/org/onap/fgps/api/utils/MusicDBConstants.java
@@ -0,0 +1,48 @@
+/*
+ * ============LICENSE_START==========================================
+ * ONAP - F-GPS API
+ * ===================================================================
+ * Copyright © 2019 ATT Intellectual Property. All rights reserved.
+ * ===================================================================
+ *
+ * Unless otherwise specified, all software contained herein is licensed
+ * under the Apache License, Version 2.0 (the "License");
+ * you may not use this software except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * Unless otherwise specified, all documentation contained herein is licensed
+ * under the Creative Commons License, Attribution 4.0 Intl. (the "License");
+ * you may not use this documentation except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://creativecommons.org/licenses/by/4.0/
+ *
+ * Unless required by applicable law or agreed to in writing, documentation
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * ============LICENSE_END============================================
+ *
+ *
+ */
+package org.onap.fgps.api.utils;
+
+public class MusicDBConstants {
+ public static final String CREATE_ONBOARDING = "/admin/onboardAppWithMusic";
+ public static final String CREATE_KEYSPACE = "/keyspaces/{0}";// {0} is name
+ public static final String CREATE_TABLE = "/keyspaces/{0}/tables/{1}"; //{1} is table name
+ public static final String INSERT_ROWS = "/keyspaces/{0}/tables/{1}/rows";
+ public static final String INDEX = "/keyspaces/{0}/tables/{1}/index/{2}"; //{2} fieldname
+ public static final String ONBOARDING = "/keyspaces/onboardAppWithMusic";
+ public static final String MUSIC_DB_URL = "/MUSIC/rest/v2";
+}
diff --git a/valetapi/src/main/java/org/onap/fgps/api/utils/SystemProperties.java b/valetapi/src/main/java/org/onap/fgps/api/utils/SystemProperties.java
new file mode 100644
index 0000000..5ca4dd1
--- /dev/null
+++ b/valetapi/src/main/java/org/onap/fgps/api/utils/SystemProperties.java
@@ -0,0 +1,193 @@
+/*
+ * ============LICENSE_START==========================================
+ * ONAP - F-GPS API
+ * ===================================================================
+ * Copyright © 2019 ATT Intellectual Property. All rights reserved.
+ * ===================================================================
+ *
+ * Unless otherwise specified, all software contained herein is licensed
+ * under the Apache License, Version 2.0 (the "License");
+ * you may not use this software except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * Unless otherwise specified, all documentation contained herein is licensed
+ * under the Creative Commons License, Attribution 4.0 Intl. (the "License");
+ * you may not use this documentation except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://creativecommons.org/licenses/by/4.0/
+ *
+ * Unless required by applicable law or agreed to in writing, documentation
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * ============LICENSE_END============================================
+ *
+ *
+ */
+package org.onap.fgps.api.utils;
+
+import javax.servlet.ServletContext;
+
+import org.onap.fgps.api.logging.EELFLoggerDelegate;
+import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.context.annotation.Configuration;
+import org.springframework.context.annotation.PropertySource;
+import org.springframework.core.env.Environment;
+
+/**
+ * SystemProperties contains a list of constants used throughout portions of the
+ * application. Populated by Spring from multiple configuration files.
+ *
+ * Should be used like this:
+ *
+ * <pre>
+ *
+ * &#64;Autowired
+ * SystemProperties systemProperties;
+ * </pre>
+ */
+@Configuration
+//@PropertySource(value = { "${container.classpath:}/system.properties" })
+@PropertySource(value = { "file:opt/etc/config/system.properties" })
+public class SystemProperties {
+
+ private static final EELFLoggerDelegate logger = EELFLoggerDelegate.getLogger(SystemProperties.class);
+
+ private static Environment environment;
+
+ private ServletContext servletContext;
+
+ public static final String APP_DISPLAY_NAME = "app_display_name";
+ public static final String APPLICATION_NAME = "application_name";
+ public static final String INSTANCE_UUID = "instance_uuid";
+ public static final String MDC_CLASS_NAME = "ClassName";
+ public static final String MDC_LOGIN_ID = "LoginId";
+ public static final String MDC_TIMER = "Timer";
+ public static final String APP_NAME = "VALET_API";
+ public static final String VALET_REQUEST_ID = "X-VALET-API-RequestID";
+ public static final String PARTNER_NAME = "PartnerName";
+ public static final String FULL_URL = "Full-URL";
+ public static final String AUDITLOG_BEGIN_TIMESTAMP = "AuditLogBeginTimestamp";
+ public static final String AUDITLOG_END_TIMESTAMP = "AuditLogEndTimestamp";
+ public static final String METRICSLOG_BEGIN_TIMESTAMP = "MetricsLogBeginTimestamp";
+ public static final String METRICSLOG_END_TIMESTAMP = "MetricsLogEndTimestamp";
+ public static final String CLIENT_IP_ADDRESS = "ClientIPAddress";
+ public static final String STATUS_CODE = "StatusCode";
+ public static final String RESPONSE_CODE = "ResponseCode";
+ // Component or sub component name
+ public static final String TARGET_ENTITY = "TargetEntity";
+ // API or operation name
+ public static final String TARGET_SERVICE_NAME = "TargetServiceName";
+
+ // Logging Compliance
+ public static final String SINGLE_QUOTE = "'";
+ public static final String NA = "N/A";
+ public static final String UNKNOWN = "Unknown";
+ public static final String SECURITY_LOG_TEMPLATE = "Protocol:{0} Security-Event-Type:{1} Login-ID:{2} {3}";
+ public static final String PROTOCOL = "PROTOCOL";
+ public static final String SECURIRY_EVENT_TYPE = "SECURIRY_EVENT_TYPE";
+ public static final String LOGIN_ID = "LOGIN_ID";
+ public static final String ADDITIONAL_INFO = "ADDITIONAL_INFO";
+ public static final String USERAGENT_NAME = "user-agent";
+
+ // Protocols
+ public static final String HTTP = "HTTP";
+ public static final String HTTPS = "HTTPS";
+
+ public enum RESULT_ENUM {
+ SUCCESS, FAILURE
+ }
+
+ public enum SecurityEventTypeEnum {
+ INCOMING_REST_MESSAGE, OUTGOING_REST_MESSAGE, REST_AUTHORIZATION_CREDENTIALS_MODIFIED
+ }
+
+ public SystemProperties() {
+ super();
+ }
+
+ protected Environment getEnvironment() {
+ return environment;
+ }
+
+ @Autowired
+ public void setEnvironment(Environment environment) {
+ SystemProperties.environment = environment;
+ }
+
+ public ServletContext getServletContext() {
+ return servletContext;
+ }
+
+ public void setServletContext(ServletContext servletContext) {
+ this.servletContext = servletContext;
+ }
+
+ /**
+ * Tests whether a property value is available for the specified key.
+ *
+ * @param key
+ * Property key
+ * @return True if the key is known, otherwise false.
+ */
+ public static boolean containsProperty(String key) {
+ return environment.containsProperty(key);
+ }
+
+ /**
+ * Returns the property value associated with the given key (never
+ * {@code null}), after trimming any trailing space.
+ *
+ * @param key
+ * Property key
+ * @return Property value; the empty string if the environment was not
+ * autowired, which should never happen.
+ * @throws IllegalStateException
+ * if the key is not found
+ */
+ public static String getProperty(String key) {
+ String value = "";
+ if (environment == null) {
+ logger.error(EELFLoggerDelegate.errorLogger, "getProperty: environment is null, should never happen!");
+ } else {
+ value = environment.getRequiredProperty(key);
+ // java.util.Properties preserves trailing space
+ if (value != null)
+ value = value.trim();
+ }
+ return value;
+ }
+
+ /**
+ * Gets the property value for the key {@link #APPLICATION_NAME}.
+ *
+ * method created to get around JSTL 1.0 limitation of not being able to access
+ * a static method of a bean
+ *
+ * @return Application name
+ */
+ public String getApplicationName() {
+ return getProperty(APPLICATION_NAME);
+ }
+
+ /**
+ * Gets the property value for the key {@link #APP_DISPLAY_NAME}.
+ *
+ * @return Application display name
+ */
+ public String getAppDisplayName() {
+ return getProperty(APP_DISPLAY_NAME);
+ }
+
+}
diff --git a/valetapi/src/main/java/org/onap/fgps/api/utils/UserUtils.java b/valetapi/src/main/java/org/onap/fgps/api/utils/UserUtils.java
new file mode 100644
index 0000000..be46ad2
--- /dev/null
+++ b/valetapi/src/main/java/org/onap/fgps/api/utils/UserUtils.java
@@ -0,0 +1,76 @@
+/*
+ * ============LICENSE_START==========================================
+ * ONAP - F-GPS API
+ * ===================================================================
+ * Copyright © 2019 ATT Intellectual Property. All rights reserved.
+ * ===================================================================
+ *
+ * Unless otherwise specified, all software contained herein is licensed
+ * under the Apache License, Version 2.0 (the "License");
+ * you may not use this software except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * Unless otherwise specified, all documentation contained herein is licensed
+ * under the Creative Commons License, Attribution 4.0 Intl. (the "License");
+ * you may not use this documentation except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://creativecommons.org/licenses/by/4.0/
+ *
+ * Unless required by applicable law or agreed to in writing, documentation
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * ============LICENSE_END============================================
+ *
+ *
+ */
+package org.onap.fgps.api.utils;
+
+import javax.servlet.http.HttpServletRequest;
+
+import org.onap.fgps.api.logging.EELFLoggerDelegate;
+import org.springframework.web.util.HtmlUtils;
+
+@SuppressWarnings("rawtypes")
+public class UserUtils {
+
+ private static final EELFLoggerDelegate logger = EELFLoggerDelegate.getLogger(UserUtils.class);
+
+ public static final String KEY_USER_ROLES_CACHE = "userRoles";
+
+ /**
+ * Gets the full URL of the request by joining the request and any query string.
+ *
+ * @param request
+ * @return Full URL of the request including query parameters
+ */
+ public static String getFullURL(HttpServletRequest request) {
+ if (request != null) {
+ StringBuffer requestURL = request.getRequestURL();
+ String queryString = request.getQueryString();
+
+ if (queryString == null) {
+ return requestURL.toString();
+ } else {
+ return requestURL.append('?').append(queryString).toString();
+ }
+ }
+ return "";
+ }
+
+ public static String htmlEscape(String input) {
+ if (input==null) return null;
+ return HtmlUtils.htmlEscape(input);
+ }
+}
diff --git a/valetapi/src/main/java/org/onap/fgps/api/utils/YamlToJsonConverter.java b/valetapi/src/main/java/org/onap/fgps/api/utils/YamlToJsonConverter.java
new file mode 100644
index 0000000..78c929c
--- /dev/null
+++ b/valetapi/src/main/java/org/onap/fgps/api/utils/YamlToJsonConverter.java
@@ -0,0 +1,60 @@
+/*
+ * ============LICENSE_START==========================================
+ * ONAP - F-GPS API
+ * ===================================================================
+ * Copyright © 2019 ATT Intellectual Property. All rights reserved.
+ * ===================================================================
+ *
+ * Unless otherwise specified, all software contained herein is licensed
+ * under the Apache License, Version 2.0 (the "License");
+ * you may not use this software except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * Unless otherwise specified, all documentation contained herein is licensed
+ * under the Creative Commons License, Attribution 4.0 Intl. (the "License");
+ * you may not use this documentation except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://creativecommons.org/licenses/by/4.0/
+ *
+ * Unless required by applicable law or agreed to in writing, documentation
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * ============LICENSE_END============================================
+ *
+ *
+ */
+package org.onap.fgps.api.utils;
+
+import org.onap.fgps.api.logging.EELFLoggerDelegate;
+
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.fasterxml.jackson.dataformat.yaml.YAMLFactory;
+
+public class YamlToJsonConverter {
+ private static final EELFLoggerDelegate LOGGER = EELFLoggerDelegate.getLogger(YamlToJsonConverter.class);
+ public static String convertToJson(String data) {
+ ObjectMapper mapper = new ObjectMapper(new YAMLFactory());
+ try {
+ Object obj = mapper.readValue(data, Object.class);
+ ObjectMapper jsonWriter = new ObjectMapper();
+ return jsonWriter.writeValueAsString(obj);
+ } catch (Exception e) {
+ e.printStackTrace();
+ LOGGER.error(EELFLoggerDelegate.applicationLogger,"convertToJson : Error details : "+ e.getMessage());
+ LOGGER.error(EELFLoggerDelegate.errorLogger,"convertToJson : Error details : "+ e.getMessage());
+ }
+ return "";
+ }
+}
diff --git a/valetapi/src/main/jenkins/versioning.groovy b/valetapi/src/main/jenkins/versioning.groovy
new file mode 100755
index 0000000..61b21b0
--- /dev/null
+++ b/valetapi/src/main/jenkins/versioning.groovy
@@ -0,0 +1,66 @@
+#!/usr/bin/env groovy
+// Construct tag version from pom version and pipelineId input param
+def set(String version, String pipelineId) {
+ if (pipelineId == "") {
+ //id empty, default to jenkins build info
+ echo "pipelineId is empty, defaulting to ${currentBuild.startTimeInMillis}-${currentBuild.number}"
+ pipelineId = "${currentBuild.startTimeInMillis}-${currentBuild.number}"
+ } else {
+ echo "pipelineId is ${pipelineId}"
+ }
+
+ TAG_VERSION = VERSION.replace("-SNAPSHOT", "") + "-" + pipelineId
+ currentBuild.displayName = "${TAG_VERSION}"
+ def previousDesc = currentBuild.description
+ currentBuild.description = "${previousDesc} TAG_VERSION=${TAG_VERSION}"
+ stage "SetVersion|" + TAG_VERSION
+}
+
+// Uses Maven Release Plugin
+// Creates SCM tag of format <artifact>-<tagVersion>
+// Retains POM version of branch as <devVersion>
+// <credentialId> should be Jenkins SSH credential with permissions to write to Repo
+// WARNING: when implementing auto-tagging, update CodeCloud Web Hook to filter out tag updates (i.e. add ^$ to tag filter)
+def tagScm(String artifact, String devVersion, String tagVersion, String credentialId) {
+
+ if (env.BRANCH_NAME == 'master') {
+ stage 'Tag SCM'
+ sh "git clean -f && git reset --hard origin/${env.BRANCH_NAME}"
+
+ //TODO - NEED TO INCREMENT VERSION SOMEWHERE OR ADD SOMETHING ABOUT BRANCH IN TAGVERSION
+ // MASTER AND RELEASE BRANCHES COULD STEP ON EACH OTHER IF TEAMS DON'T MANAGE THE POM
+ // VERSION ADEQUATELY
+
+ //TODO - evaluate if we want to edit the version in the pom or just use in the tag name?
+ // need to take into account how a branch will be created from the tag and what the
+ // versioning of that branch should be, and what the auto process does with it
+ // How to handle modification of snapshot (1.0.0-SNAPSHOT) vs. release (1.0.0) versions
+
+
+
+
+ // Run the maven build this is a release that keeps the development version
+ // unchanged and uses Jenkins to provide the version number uniqueness
+ sh "mvn -s ${MAVEN_SETTINGS} -DreleaseVersion=${tagVersion} -DdevelopmentVersion=${devVersion} -DpushChanges=false -DlocalCheckout=true -DpreparationGoals=initialize release:prepare release:perform -B"
+
+ // push the tags (alternatively we could have pushed them to a separate
+ // git repo that we then pull from and repush... the latter can be
+ // helpful in the case where you run the publish on a different node
+
+ //TODO logic needed to get credentialId and determine if https or ssh is use, to use credentials differently on push
+ //withCredentials([[$class: 'UsernamePasswordMultiBinding', credentialsId: 'git_m09262', usernameVariable: 'GIT_USERNAME', passwordVariable: 'GIT_PASSWORD']]) {
+ sshagent([credentialId]) {
+ sh "git remote -v"
+ sh "git push origin ${artifact}-${tagVersion}"
+ }
+
+
+ // we should also release the staging repo, if we had stashed the
+ //details of the staging repository identifier it would be easy
+
+ } else {
+ echo "version.setTag() not in branch 'master', no action performed"
+ }
+}
+
+return this; \ No newline at end of file
diff --git a/valetapi/src/main/resources/application.properties b/valetapi/src/main/resources/application.properties
new file mode 100644
index 0000000..6fbed45
--- /dev/null
+++ b/valetapi/src/main/resources/application.properties
@@ -0,0 +1,52 @@
+
+# ============LICENSE_START=======================================================
+# ONAP - F-GPS
+# ================================================================================
+# Copyright (C) 2019 AT&T Intellectual Property. All rights
+# reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END============================================
+# ===================================================================
+#
+###
+
+#Valet service
+#server.contextPath=/api/valet/
+server.servlet.context-path=/api/valet/
+logging.pattern.console=
+logging.path=logs
+logging.file=${logging.path}/log.log
+logging.pattern.file=%d{dd-MM-yyyy HH:mm:ss.SSS} [%thread] %-5level %logger{36}.%M - %msg%n
+# If it is true it will print all logs for ping or else it will print only error logs in api.log file
+#logging.ping=true
+# To enable SSL, uncomment the following lines:
+server.port=8443
+server.ssl.enabled=true
+server.ssl.key-store=classpath:keystore.p12
+server.ssl.key-store-type=PKCS12
+server.ssl.key-store-password=password
+server.ssl.key-alias=tomcat
+
+# To enable HTTP while SSL is enabled, uncomment the following line:
+server.http.port=8080
+
+
+valet.dark=false
+
+aaf.url.base=https://aaf.onap.org:8095/proxy
+
+#If authentication flags are false, then credentials are not required. Otherwise, they are required.
+#authentication.aaf=false
+#authentication.basic=false
+
diff --git a/valetapi/src/main/resources/auth.properties b/valetapi/src/main/resources/auth.properties
new file mode 100644
index 0000000..3bba95a
--- /dev/null
+++ b/valetapi/src/main/resources/auth.properties
@@ -0,0 +1,48 @@
+
+# ============LICENSE_START=======================================================
+# ONAP - F-GPS
+# ================================================================================
+# Copyright (C) 2019 AT&T Intellectual Property. All rights
+# reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END============================================
+# ===================================================================
+#
+###
+
+#Portal basic auth credentials. Used on methods with @BasicAuthRequired(authRequired = "portal") or @PropertyBasedAuthorization("x"), where x.basic=portal
+portal.name=portal
+portal.pass=j0mE4ZVIZjq5XO8dMjt4oeEWgm0lUdjo9Zl6M5guP7M=
+
+#Portal AAF role. Used on methods with @AafRoleRequired(roleProperty="portal.admin") or @AafRoleRequired(roleProperty="portal.admin.role")
+portal.admin.role=org.onap.portal.valet.admin
+
+#Credentials used to authenticate this application with AAF on calls which require AAF
+valet.aaf.name=userid@fgps.onap.org
+valet.aaf.pass=XuhhetzEGCh8O7Fm9bLF38LNsLvZEg3zvHzmFTgijlKcsC2hgfNJ21ojMkIZI5HG
+
+#Property based authentication using AAF role
+groups.query.aaf=org.onap.portal.valet.user
+groups.create.aaf=org.onap.portal.valet.admin
+groups.update.aaf=org.onap.portal.valet.admin
+groups.delete.aaf=org.onap.portal.valet.admin
+
+#Property based authentication using basic auth
+placement.create.basic=so
+placement.update.basic=so
+placement.delete.basic=so
+placement.confirm.basic=so
+placement.rollback.basic=so
+so.name=so_user
+so.pass=IvuHSsIVfVkcy9QWoVhjAlh5Fi9Rg5myLmqvZEYhChE=
diff --git a/valetapi/src/main/resources/banner.txt b/valetapi/src/main/resources/banner.txt
new file mode 100755
index 0000000..a883d26
--- /dev/null
+++ b/valetapi/src/main/resources/banner.txt
@@ -0,0 +1,13 @@
+ ___ ____________________
+ / . \\ /__ __/ ____// ____/
+ / /_\\ \\___/ / (____ )/ /___
+/_/ \\/____/ /_____/(_____/
+
+${archetype.name}-Version:${archetype.version}
+
+This project is generated from ECO seed template '${eco.seed.name}' on '${eco.seed.created.datetime}'.
+
+The details of the core maven project is :
+ 1. mvn.archetype.name=${mvn.archetype.name}
+ 2. mvn.archetype.groupName=${mvn.archetype.groupName}
+ 3. mvn.archetype.version=${mvn.archetype.version}
diff --git a/valetapi/src/main/resources/key.properties b/valetapi/src/main/resources/key.properties
new file mode 100644
index 0000000..e9f19b4
--- /dev/null
+++ b/valetapi/src/main/resources/key.properties
@@ -0,0 +1,24 @@
+
+# ============LICENSE_START=======================================================
+# ONAP - F-GPS
+# ================================================================================
+# Copyright (C) 2019 AT&T Intellectual Property. All rights
+# reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END============================================
+# ===================================================================
+#
+###
+
+cipher.enc.key=AAECAwQFBgcICQoLDA0ODw== \ No newline at end of file
diff --git a/valetapi/src/main/resources/keystore.p12 b/valetapi/src/main/resources/keystore.p12
new file mode 100644
index 0000000..fa4112e
--- /dev/null
+++ b/valetapi/src/main/resources/keystore.p12
Binary files differ
diff --git a/valetapi/src/main/resources/logback.xml b/valetapi/src/main/resources/logback.xml
new file mode 100644
index 0000000..52d3007
--- /dev/null
+++ b/valetapi/src/main/resources/logback.xml
@@ -0,0 +1,212 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<configuration scan="true" scanPeriod="3 seconds">
+ <!--<jmxConfigurator /> -->
+ <!-- directory path for all other type logs -->
+ <property name="logDir" value="/api" />
+
+ <!-- directory path for debugging type logs -->
+ <property name="debugDir" value="/api" />
+
+ <!-- specify the component name -->
+ <!-- <property name="componentName" value="EELF"></property> -->
+ <property name="componentName" value="VALTE-API"></property>
+
+ <!-- log file names -->
+ <property name="generalLogName" value="api" />
+ <property name="errorLogName" value="error" />
+ <property name="metricsLogName" value="metrics" />
+ <property name="auditLogName" value="audit" />
+ <property name="debugLogName" value="debug" />
+ <property name="defaultPattern" value="%d{yyyy-MM-dd HH:mm:ss} [%thread] %-5level %logger{36} - %msg%n" />
+ <property name="applicationLoggerPattern" value="%d{yyyy-MM-dd HH:mm:ss} [%thread] %-5level %msg%n" />
+ <!-- Logging Fields Format Revisions -->
+ <property name="auditLoggerPattern" value="begintimestamp:%X{AuditLogBeginTimestamp}|endtimestamp:%X{AuditLogEndTimestamp}|%X{RequestId}|%X{ServiceInstanceId}|thread:%thread|%X{VirtualServerName}|ServiceName:%X{ServiceName}|%X{PartnerName}|%X{StatusCode}|%X{ResponseCode}|%X{ResponseDescription}|InstanceUUID:%X{InstanceUUID}|Log Level:%.-5level|AlarmSeverity:%X{AlertSeverity}|ServerIPAddress:%X{ServerIPAddress}|Timer:%X{Timer}|ServerFQDN:%X{ServerFQDN}|%X{ClientIPAddress}|Class Name:%X{ClassName}|%X{Unused}|%X{ProcessKey}|%X{CustomField1}|%X{CustomField2}|%X{CustomField3}|%X{CustomField4}| Detailed Message:%msg%n" />
+ <property name="metricsLoggerPattern" value="begintimestamp:%X{MetricsLogBeginTimestamp}|endtimestamp:%X{MetricsLogEndTimestamp}|%X{RequestId}|%X{ServiceInstanceId}|thread:%thread|%X{VirtualServerName}|ServiceName:%X{ServiceName}|%X{PartnerName}|%X{TargetEntity}|%X{TargetServiceName}|%X{StatusCode}|%X{ResponseCode}|%X{ResponseDescription}|InstanceUUID:%X{InstanceUUID}|Log Level:%.-5level|AlarmSeverity:%X{AlertSeverity}|ServerIPAddress:%X{ServerIPAddress}|Timer:%X{Timer}|ServerFQDN:%X{ServerFQDN}|%X{ClientIPAddress}|Class Name:%X{ClassName}|%X{Unused}|%X{ProcessKey}|%X{TargetVisualEntity}|%X{CustomField1}|%X{CustomField2}|%X{CustomField3}|%X{CustomField4}| Detailed Message:%msg%n" />
+ <property name="errorLoggerPattern" value= "timestamp:%d{yyyy-MM-dd HH:mm:ss} [%thread]|Log Level:%-5level|Detailed Message:%msg%n " />
+ <property name="debugLoggerPattern" value="timestamp:%date{ISO8601,UTC}|%X{RequestId}|Log Level:%-5level|Detailed Message:%msg%n" ></property>
+ <property name="logDirectory" value="${logDir}" />
+ <property name="debugLogDirectory" value="${logDir}" />
+ <!-- Example evaluator filter applied against console appender -->
+ <appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
+ <!-- <encoder>
+ <pattern>${defaultPattern}</pattern>
+ </encoder> -->
+ <layout class="">
+ <pattern>
+ %d{yyyy-MM-dd HH:mm:ss} [%thread] %-5level %logger{36} - %msg%n
+ </pattern>
+ </layout>
+ </appender>
+
+ <!-- ============================================================================ -->
+ <!-- EELF Appenders -->
+ <!-- ============================================================================ -->
+ <appender name="EELF"
+ class="ch.qos.logback.core.rolling.RollingFileAppender">
+ <file>${logDirectory}/${generalLogName}.log</file>
+ <rollingPolicy
+ class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy">
+ <fileNamePattern>${logDirectory}/${generalLogName}.%i.log.zip
+ </fileNamePattern>
+ <minIndex>1</minIndex>
+ <maxIndex>9</maxIndex>
+ </rollingPolicy>
+ <triggeringPolicy
+ class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">
+ <maxFileSize>5MB</maxFileSize>
+ </triggeringPolicy>
+ <encoder>
+ <pattern>${applicationLoggerPattern}</pattern>
+ </encoder>
+ </appender>
+
+ <appender name="asyncEELF" class="ch.qos.logback.classic.AsyncAppender">
+ <queueSize>256</queueSize>
+ <includeCallerData>true</includeCallerData>
+ <appender-ref ref="EELF" />
+ </appender>
+
+
+ <!-- EELF Audit Appender. This appender is used to record audit engine
+ related logging events. The audit logger and appender are specializations
+ of the EELF application root logger and appender. This can be used to segregate
+ Policy engine events from other components, or it can be eliminated to record
+ these events as part of the application root log. -->
+
+ <appender name="EELFAudit"
+ class="ch.qos.logback.core.rolling.RollingFileAppender">
+ <file>${logDirectory}/${auditLogName}.log</file>
+ <rollingPolicy
+ class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy">
+ <fileNamePattern>${logDirectory}/${auditLogName}.%i.log.zip
+ </fileNamePattern>
+ <minIndex>1</minIndex>
+ <maxIndex>9</maxIndex>
+ </rollingPolicy>
+ <triggeringPolicy
+ class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">
+ <maxFileSize>5MB</maxFileSize>
+ </triggeringPolicy>
+ <encoder>
+ <pattern>${auditLoggerPattern}</pattern>
+ </encoder>
+ </appender>
+ <appender name="asyncEELFAudit" class="ch.qos.logback.classic.AsyncAppender">
+ <queueSize>256</queueSize>
+ <appender-ref ref="EELFAudit" />
+ </appender>
+
+<appender name="EELFMetrics"
+ class="ch.qos.logback.core.rolling.RollingFileAppender">
+ <file>${logDirectory}/${metricsLogName}.log</file>
+ <rollingPolicy
+ class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy">
+ <fileNamePattern>${logDirectory}/${metricsLogName}.%i.log.zip
+ </fileNamePattern>
+ <minIndex>1</minIndex>
+ <maxIndex>9</maxIndex>
+ </rollingPolicy>
+ <triggeringPolicy
+ class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">
+ <maxFileSize>5MB</maxFileSize>
+ </triggeringPolicy>
+ <encoder>
+ <!-- <pattern>"%d{HH:mm:ss.SSS} [%thread] %-5level %logger{1024} -
+ %msg%n"</pattern> -->
+ <pattern>${metricsLoggerPattern}</pattern>
+ </encoder>
+ </appender>
+
+
+ <appender name="asyncEELFMetrics" class="ch.qos.logback.classic.AsyncAppender">
+ <queueSize>256</queueSize>
+ <appender-ref ref="EELFMetrics"/>
+ </appender>
+
+ <appender name="EELFError"
+ class="ch.qos.logback.core.rolling.RollingFileAppender">
+ <file>${logDirectory}/${errorLogName}.log</file>
+ <rollingPolicy
+ class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy">
+ <fileNamePattern>${logDirectory}/${errorLogName}.%i.log.zip
+ </fileNamePattern>
+ <minIndex>1</minIndex>
+ <maxIndex>9</maxIndex>
+ </rollingPolicy>
+ <triggeringPolicy
+ class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">
+ <maxFileSize>5MB</maxFileSize>
+ </triggeringPolicy>
+ <encoder>
+ <pattern>${errorLoggerPattern}</pattern>
+ </encoder>
+ </appender>
+
+ <appender name="asyncEELFError" class="ch.qos.logback.classic.AsyncAppender">
+ <queueSize>256</queueSize>
+ <appender-ref ref="EELFError"/>
+ </appender>
+
+ <appender name="EELFDebug"
+ class="ch.qos.logback.core.rolling.RollingFileAppender">
+ <file>${debugLogDirectory}/${debugLogName}.log</file>
+ <rollingPolicy
+ class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy">
+ <fileNamePattern>${debugLogDirectory}/${debugLogName}.%i.log.zip
+ </fileNamePattern>
+ <minIndex>1</minIndex>
+ <maxIndex>9</maxIndex>
+ </rollingPolicy>
+ <triggeringPolicy
+ class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">
+ <maxFileSize>5MB</maxFileSize>
+ </triggeringPolicy>
+ <encoder>
+ <pattern>${debugLoggerPattern}</pattern>
+ </encoder>
+ </appender>
+
+ <appender name="asyncEELFDebug" class="ch.qos.logback.classic.AsyncAppender">
+ <queueSize>256</queueSize>
+ <appender-ref ref="EELFDebug" />
+ <includeCallerData>true</includeCallerData>
+ </appender>
+
+
+ <!-- ============================================================================ -->
+ <!-- EELF loggers -->
+ <!-- ============================================================================ -->
+ <!--
+ <logger name="org.onap.eelf" level="info" additivity="false">
+ <appender-ref ref="asyncEELF" />
+
+ </logger>
+
+ <logger name="org.onap.eelf.audit" level="info" additivity="false">
+ <appender-ref ref="asyncEELFAudit" />
+
+ </logger>
+
+ <logger name="org.onap.eelf.metrics" level="info" additivity="false">
+ <appender-ref ref="asyncEELFMetrics" />
+
+ </logger>
+
+
+ <logger name="org.onap.eelf.error" level="error" additivity="false">
+ <appender-ref ref="asyncEELFError" />
+
+ </logger>
+
+ <logger name="org.onap.eelf.debug" level="debug" additivity="false">
+ <appender-ref ref="asyncEELFDebug" />
+
+ </logger>
+ -->
+
+ <root level="INFO">
+ <appender-ref ref="asyncEELF" />
+ <appender-ref ref="STDOUT" />
+ </root>
+
+</configuration>
diff --git a/valetapi/src/main/resources/logmessages.properties b/valetapi/src/main/resources/logmessages.properties
new file mode 100755
index 0000000..b3b163d
--- /dev/null
+++ b/valetapi/src/main/resources/logmessages.properties
@@ -0,0 +1,29 @@
+
+# ============LICENSE_START=======================================================
+# ONAP - F-GPS
+# ================================================================================
+# Copyright (C) 2019 AT&T Intellectual Property. All rights
+# reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END============================================
+# ===================================================================
+#
+###
+
+RESTSERVICE_HELLO=SERVICE0001I|Get a quick hello|No resolution needed|No action is required
+RESTSERVICE_HELLO_NAME=SERVICE0002I|Get a quick hello for {0}|No resolution needed|No action is required
+SPRINSERVICE_HELLO=SERVICE0003I|Say a quick hello|No resolution needed|No action is required
+SPRINSERVICE_HELLO_NAME=SERVICE0004I|Say a quick hello for {0}|No resolution needed|No action is required
+SPRINSERVICE_HELLO_MESSAGE=SERVICE0005I|Say hello message: {0}|No resolution needed|No action is required
+SPRINSERVICE_HELLO_MESSAGE_NAME=SERVICE0006I|Say hello message object:{0}|No resolution needed|No action is required
diff --git a/valetapi/src/main/resources/resources.properties b/valetapi/src/main/resources/resources.properties
new file mode 100644
index 0000000..cd82323
--- /dev/null
+++ b/valetapi/src/main/resources/resources.properties
@@ -0,0 +1,39 @@
+
+# ============LICENSE_START=======================================================
+# ONAP - F-GPS
+# ================================================================================
+# Copyright (C) 2019 AT&T Intellectual Property. All rights
+# reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END============================================
+# ===================================================================
+#
+###
+
+musicdb.ip.1=music_host_1.onap.org
+musicdb.ip.2=music_host_2.onap.org
+musicdb.ip.3=music_host_3.onap.org
+
+
+db.create=true
+music.MUSIC_DB_PORT=8080
+music.MUSIC_DB_URL=/MUSIC/rest/v2/
+music.Keyspace=pn2
+musicdb.namespace=org.onap.dev.music
+musicdb.userId=musicuser@onap.org
+musicdb.password=zev1w/9GdTYf92pTUQ9DhabHbEfUFcF4+kLjwLdA2as=
+instanceId=valet01
+data.center.one=DC1
+data.center.two=DC2
+data.center.three=DC3
diff --git a/valetapi/src/main/resources/version.properties b/valetapi/src/main/resources/version.properties
new file mode 100644
index 0000000..8cca034
--- /dev/null
+++ b/valetapi/src/main/resources/version.properties
@@ -0,0 +1,28 @@
+
+# ============LICENSE_START=======================================================
+# ONAP - F-GPS
+# ================================================================================
+# Copyright (C) 2019 AT&T Intellectual Property. All rights
+# reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END============================================
+# ===================================================================
+#
+###
+
+version.major=0
+version.minor=4
+version.patch=23
+version.full=0.4.23
+version.type=C
diff --git a/valetapi/src/test/java/org/onap/fgps/api/componenttest/mockito/ITComponentTest.java b/valetapi/src/test/java/org/onap/fgps/api/componenttest/mockito/ITComponentTest.java
new file mode 100644
index 0000000..c6eee45
--- /dev/null
+++ b/valetapi/src/test/java/org/onap/fgps/api/componenttest/mockito/ITComponentTest.java
@@ -0,0 +1,54 @@
+/*
+ * ============LICENSE_START==========================================
+ * ONAP - F-GPS API
+ * ===================================================================
+ * Copyright © 2019 ATT Intellectual Property. All rights reserved.
+ * ===================================================================
+ *
+ * Unless otherwise specified, all software contained herein is licensed
+ * under the Apache License, Version 2.0 (the "License");
+ * you may not use this software except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * Unless otherwise specified, all documentation contained herein is licensed
+ * under the Creative Commons License, Attribution 4.0 Intl. (the "License");
+ * you may not use this documentation except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://creativecommons.org/licenses/by/4.0/
+ *
+ * Unless required by applicable law or agreed to in writing, documentation
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * ============LICENSE_END============================================
+ *
+ *
+ */
+package org.onap.fgps.api.componenttest.mockito;
+
+import static org.junit.Assert.*;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+import org.junit.Test;
+
+public class ITComponentTest {
+
+ @Test
+ public void greets() {
+ String msg = "Hello User!";
+ assertEquals(msg, "Hello User!");
+ }
+
+} \ No newline at end of file
diff --git a/valetapi/src/test/java/org/onap/fgps/controller/ValetGroupsControllerTest.java b/valetapi/src/test/java/org/onap/fgps/controller/ValetGroupsControllerTest.java
new file mode 100644
index 0000000..3f92566
--- /dev/null
+++ b/valetapi/src/test/java/org/onap/fgps/controller/ValetGroupsControllerTest.java
@@ -0,0 +1,107 @@
+/*
+ * ============LICENSE_START==========================================
+ * ONAP - F-GPS API
+ * ===================================================================
+ * Copyright - 2019 ATT Intellectual Property. All rights reserved.
+ * ===================================================================
+ *
+ * Unless otherwise specified, all software contained herein is licensed
+ * under the Apache License, Version 2.0 (the "License");
+ * you may not use this software except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * Unless otherwise specified, all documentation contained herein is licensed
+ * under the Creative Commons License, Attribution 4.0 Intl. (the "License");
+ * you may not use this documentation except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://creativecommons.org/licenses/by/4.0/
+ *
+ * Unless required by applicable law or agreed to in writing, documentation
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * ============LICENSE_END============================================
+ *
+ *
+ */
+package org.onap.fgps.controller;
+/*package com.valet.controller;
+
+import static org.assertj.core.api.Assertions.fail;
+import static org.junit.Assert.assertEquals;
+
+import java.io.BufferedReader;
+import java.io.FileInputStream;
+import java.io.InputStreamReader;
+
+import javax.ws.rs.client.Entity;
+import javax.ws.rs.core.MediaType;
+import javax.ws.rs.core.Response;
+
+import org.jboss.resteasy.client.jaxrs.ResteasyClient;
+import org.jboss.resteasy.client.jaxrs.ResteasyClientBuilder;
+import org.jboss.resteasy.client.jaxrs.ResteasyWebTarget;
+import org.json.simple.JSONObject;
+import org.json.simple.parser.JSONParser;
+import org.junit.Test;
+import org.skyscreamer.jsonassert.JSONAssert;
+
+
+public class ValetGroupsControllerTest {
+
+
+ @Test
+ public void testPotal1() throws Exception {
+ String expectedOutput = "{}";
+ // Do a REST call
+ ResteasyClient client = new ResteasyClientBuilder().build();
+ ResteasyWebTarget target = client.target("http://localhost:8080/api/valet/groups/v1/portal?requestId=12345");
+ Response response = target.request().get();
+ String respnseData = response.readEntity(String.class);
+ response.close();
+ JSONObject responseDataJSON = (JSONObject)new JSONParser().parse(respnseData);
+ respnseData = ((JSONObject)responseDataJSON.get("values")).get("request").toString();
+ System.out.println("Expected :");
+ System.out.println(expectedOutput);
+ System.out.println("Actual :");
+ System.out.println(respnseData);
+ // Compare the outputs
+ assertEquals(expectedOutput, respnseData);
+ }
+ @Test
+ public void testPortal2() throws Exception {
+ try(BufferedReader brOutput = new BufferedReader(new InputStreamReader(new FileInputStream("\\testing\\portal\\Advancedsearch\\advancedsearchoutput.txt")))) {
+ String expectedOutput = brOutput.readLine();
+ JSONObject expectedOuptutJSON = (JSONObject)new JSONParser().parse(expectedOutput);
+ // Do a REST call
+ ResteasyClient client = new ResteasyClientBuilder().build();
+ ResteasyWebTarget target = client.target("http://localhost:8080/api/valet/groups/v1/portal?requestId=12345&name=VALET_HOST_DIVERSITY_RULE&datacenter_id=mtn6");
+ Response response = target.request().get();
+ String respnseData = response.readEntity(String.class);
+ response.close();
+ expectedOutput = ((JSONObject)((JSONObject)expectedOuptutJSON.get("values")).get("request")).toJSONString();
+ JSONObject responseDataJSON = (JSONObject)new JSONParser().parse(respnseData);
+ respnseData = ((JSONObject)responseDataJSON.get("values")).get("request").toString();
+ System.out.println("Expected :");
+ System.out.println(expectedOutput);
+ System.out.println("Actual :");
+ System.out.println(respnseData);
+ // Compare the outputs
+ assertEquals(expectedOutput, respnseData);
+ }catch(Exception e) {
+ e.printStackTrace();
+ fail(e.getMessage());
+ }
+ }
+}*/ \ No newline at end of file
diff --git a/valetapi/src/test/java/org/onap/fgps/controller/ValetServicePlacementControllerTest.java b/valetapi/src/test/java/org/onap/fgps/controller/ValetServicePlacementControllerTest.java
new file mode 100644
index 0000000..7762283
--- /dev/null
+++ b/valetapi/src/test/java/org/onap/fgps/controller/ValetServicePlacementControllerTest.java
@@ -0,0 +1,221 @@
+/*
+ * ============LICENSE_START==========================================
+ * ONAP - F-GPS API
+ * ===================================================================
+ * Copyright - 2019 ATT Intellectual Property. All rights reserved.
+ * ===================================================================
+ *
+ * Unless otherwise specified, all software contained herein is licensed
+ * under the Apache License, Version 2.0 (the "License");
+ * you may not use this software except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * Unless otherwise specified, all documentation contained herein is licensed
+ * under the Creative Commons License, Attribution 4.0 Intl. (the "License");
+ * you may not use this documentation except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://creativecommons.org/licenses/by/4.0/
+ *
+ * Unless required by applicable law or agreed to in writing, documentation
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * ============LICENSE_END============================================
+ *
+ *
+ */
+package org.onap.fgps.controller;
+/*package com.valet.controller;
+
+import static org.assertj.core.api.Assertions.fail;
+import static org.junit.Assert.assertEquals;
+
+import java.io.BufferedReader;
+import java.io.FileInputStream;
+import java.io.InputStreamReader;
+
+import javax.ws.rs.client.Entity;
+import javax.ws.rs.core.MediaType;
+import javax.ws.rs.core.Response;
+
+import org.jboss.resteasy.client.jaxrs.ResteasyClient;
+import org.jboss.resteasy.client.jaxrs.ResteasyClientBuilder;
+import org.jboss.resteasy.client.jaxrs.ResteasyWebTarget;
+import org.json.simple.JSONObject;
+import org.json.simple.parser.JSONParser;
+import org.junit.Test;
+import org.skyscreamer.jsonassert.JSONAssert;
+
+
+public class ValetServicePlacementControllerTest {
+
+ @Test
+ public void testCreateVM2() throws Exception {
+ try(BufferedReader br = new BufferedReader(new InputStreamReader(new FileInputStream("\\testing\\createoperationandupdate\\diversitynested\\create_req_ValetHostDiversityNest_20180509e.txt")))) {
+ try(BufferedReader brOutput = new BufferedReader(new InputStreamReader(new FileInputStream("\\testing\\createoperationandupdate\\diversitynested\\diversitynestedoutput.txt")))) {
+ JSONObject input = (JSONObject)new JSONParser().parse(br);
+ String expectedOutput = brOutput.readLine();
+ JSONObject expectedOuptutJSON = (JSONObject)new JSONParser().parse(expectedOutput);
+ // Do a REST call
+ ResteasyClient client = new ResteasyClientBuilder().build();
+ ResteasyWebTarget target = client.target("http://localhost:8080/api/valet/placement/v1/createVM2?requestId=create-123");
+ Response response = target.request().post(Entity.entity(input.toJSONString(), MediaType.APPLICATION_JSON));
+ String respnseData = response.readEntity(String.class);
+ response.close();
+ expectedOutput = ((JSONObject)((JSONObject)expectedOuptutJSON.get("values")).get("request")).toJSONString();
+ System.out.println("Expected :");
+ System.out.println(expectedOutput);
+ System.out.println("Actual :");
+ System.out.println(respnseData);
+ // Compare the outputs
+ JSONAssert.assertEquals(expectedOutput, respnseData, false);
+ }
+ }catch(Exception e) {
+ e.printStackTrace();
+ fail(e.getMessage());
+ }
+ }
+ @Test
+ public void testupdateVm1() throws Exception {
+ try(BufferedReader br = new BufferedReader(new InputStreamReader(new FileInputStream("\\\\testing\\\\createoperationandupdate\\\\diversitynested\\\\create_req_ValetHostDiversityNest_20180509e.txt")))) {
+ try(BufferedReader brOutput = new BufferedReader(new InputStreamReader(new FileInputStream("\\\\testing\\\\createoperationandupdate\\\\diversitynested\\\\diversitynestedoutput.txt")))) {
+ JSONObject input = (JSONObject)new JSONParser().parse(br);
+ String expectedOutput = brOutput.readLine();
+ JSONObject expectedOuptutJSON = (JSONObject)new JSONParser().parse(expectedOutput);
+ // Do a REST call
+ ResteasyClient client = new ResteasyClientBuilder().build();
+ ResteasyWebTarget target = client.target("http://localhost:8080/api/valet/placement/v1/updateVm1?requestId=update-123");
+ Response response = target.request().put(Entity.entity(input.toJSONString(), MediaType.APPLICATION_JSON));
+ String respnseData = response.readEntity(String.class);
+ response.close();
+ expectedOutput = ((JSONObject)((JSONObject)expectedOuptutJSON.get("values")).get("request")).toJSONString();
+ System.out.println("Expected :");
+ System.out.println(expectedOutput);
+ System.out.println("Actual :");
+ System.out.println(respnseData);
+ // Compare the outputs
+ JSONAssert.assertEquals(expectedOutput, respnseData, false);
+ }
+ }catch(Exception e) {
+ e.printStackTrace();
+ fail(e.getMessage());
+ }
+ }
+ @Test
+ public void testdeleteVm1() throws Exception {
+ try(BufferedReader br = new BufferedReader(new InputStreamReader(new FileInputStream("\\testing\\Deleteoperation\\deleteinput.txt")))) {
+ try(BufferedReader brOutput = new BufferedReader(new InputStreamReader(new FileInputStream("\\\\testing\\\\Deleteoperation\\\\deleteoutput.txt")))) {
+ JSONObject input = (JSONObject)new JSONParser().parse(br);
+ String expectedOutput = brOutput.readLine();
+ JSONObject expectedOuptutJSON = (JSONObject)new JSONParser().parse(expectedOutput);
+ // Do a REST call
+ ResteasyClient client = new ResteasyClientBuilder().build();
+ ResteasyWebTarget target = client.target("http://localhost:8080/api/valet/placement/v1/deleteVm1?requestId=delete-123");
+ Response response = target.request().build("DELETE", Entity.entity(input.toJSONString(), MediaType.APPLICATION_JSON)).invoke();
+ String respnseData = response.readEntity(String.class);
+ response.close();
+ expectedOutput = ((JSONObject)((JSONObject)expectedOuptutJSON.get("values")).get("request")).toJSONString();
+ JSONObject responseDataJSON = (JSONObject)new JSONParser().parse(respnseData);
+ respnseData = ((JSONObject)responseDataJSON.get("values")).get("request").toString();
+ System.out.println("Expected :");
+ System.out.println(expectedOutput);
+ System.out.println("Actual :");
+ System.out.println(respnseData);
+ // Compare the outputs
+ JSONAssert.assertEquals(expectedOutput, respnseData, false);
+ }
+ }catch(Exception e) {
+ e.printStackTrace();
+ fail(e.getMessage());
+ }
+ }
+ @Test
+ public void testconfirm1() throws Exception {
+ try(BufferedReader br = new BufferedReader(new InputStreamReader(new FileInputStream("\\testing\\Confirm\\confriminput.txt")))) {
+ try(BufferedReader brOutput = new BufferedReader(new InputStreamReader(new FileInputStream("\\testing\\Confirm\\confrimoutput.txt")))) {
+ JSONObject input = (JSONObject)new JSONParser().parse(br);
+ String expectedOutput = brOutput.readLine();
+ JSONObject expectedOuptutJSON = (JSONObject)new JSONParser().parse(expectedOutput);
+ // Do a REST call
+ ResteasyClient client = new ResteasyClientBuilder().build();
+ ResteasyWebTarget target = client.target("http://localhost:8080/api/valet/placement/v1/confirm-123/confirm1");
+ Response response = target.request().put(Entity.entity(input.toJSONString(), MediaType.APPLICATION_JSON));
+ String respnseData = response.readEntity(String.class);
+ response.close();
+ expectedOutput = ((JSONObject)((JSONObject)expectedOuptutJSON.get("values")).get("request")).toJSONString();
+ JSONObject responseDataJSON = (JSONObject)new JSONParser().parse(respnseData);
+ respnseData = ((JSONObject)responseDataJSON.get("values")).get("request").toString();
+ System.out.println("Expected :");
+ System.out.println(expectedOutput);
+ System.out.println("Actual :");
+ System.out.println(respnseData);
+ // Compare the outputs
+ JSONAssert.assertEquals(expectedOutput, respnseData, false);
+ }
+ }catch(Exception e) {
+ e.printStackTrace();
+ fail(e.getMessage());
+ }
+ }
+ @Test
+ public void testrollback1() throws Exception {
+ try(BufferedReader br = new BufferedReader(new InputStreamReader(new FileInputStream("\\testing\\Rollback\\rollbackinput.txt")))) {
+ try(BufferedReader brOutput = new BufferedReader(new InputStreamReader(new FileInputStream("\\testing\\Rollback\\rollbackoutput.txt")))) {
+ JSONObject input = (JSONObject)new JSONParser().parse(br);
+ String expectedOutput = brOutput.readLine();
+ JSONObject expectedOuptutJSON = (JSONObject)new JSONParser().parse(expectedOutput);
+ // Do a REST call
+ ResteasyClient client = new ResteasyClientBuilder().build();
+ ResteasyWebTarget target = client.target("http://localhost:8080/api/valet/placement/v1/rollback-123/rollback1");
+ Response response = target.request().put(Entity.entity(input.toJSONString(), MediaType.APPLICATION_JSON));
+ String respnseData = response.readEntity(String.class);
+ response.close();
+ expectedOutput = ((JSONObject)((JSONObject)expectedOuptutJSON.get("values")).get("request")).toJSONString();
+ JSONObject responseDataJSON = (JSONObject)new JSONParser().parse(respnseData);
+ respnseData = ((JSONObject)responseDataJSON.get("values")).get("request").toString();
+ System.out.println("Expected :");
+ System.out.println(expectedOutput);
+ System.out.println("Actual :");
+ System.out.println(respnseData);
+ // Compare the outputs
+ JSONAssert.assertEquals(expectedOutput, respnseData, false);
+ }
+ }catch(Exception e) {
+ e.printStackTrace();
+ fail(e.getMessage());
+ }
+ }
+ @Test
+ public void testBodyVM2() throws Exception {
+ try(BufferedReader br = new BufferedReader(new InputStreamReader(new FileInputStream("")))) {
+ JSONObject input = (JSONObject)new JSONParser().parse(br);
+ String expectedOutput = "[Expected output]";
+ // Do a REST call
+ ResteasyClient client = new ResteasyClientBuilder().build();
+ ResteasyWebTarget target = client.target("http://localhost:8080/api/valet/placement/v1/createVM2?requestId=12345");
+ Response response = target.request().post(Entity.entity(input.toJSONString(), MediaType.APPLICATION_JSON));
+ String respnseData = response.readEntity(String.class);
+ response.close();
+ System.out.println("Expected :");
+ System.out.println(expectedOutput);
+ System.out.println("Actual :");
+ System.out.println(respnseData);
+ // Compare the outputs
+ assertEquals(expectedOutput, respnseData);
+ }catch(Exception e) {
+ e.printStackTrace();
+ fail(e.getMessage());
+ }
+ }
+}*/ \ No newline at end of file
diff --git a/valetapi/src/test/java/org/onap/fgps/controller/ValetServicePlacementControllerTest1.java b/valetapi/src/test/java/org/onap/fgps/controller/ValetServicePlacementControllerTest1.java
new file mode 100644
index 0000000..91412bc
--- /dev/null
+++ b/valetapi/src/test/java/org/onap/fgps/controller/ValetServicePlacementControllerTest1.java
@@ -0,0 +1,88 @@
+/*
+ * ============LICENSE_START==========================================
+ * ONAP - F-GPS API
+ * ===================================================================
+ * Copyright - 2019 ATT Intellectual Property. All rights reserved.
+ * ===================================================================
+ *
+ * Unless otherwise specified, all software contained herein is licensed
+ * under the Apache License, Version 2.0 (the "License");
+ * you may not use this software except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * Unless otherwise specified, all documentation contained herein is licensed
+ * under the Creative Commons License, Attribution 4.0 Intl. (the "License");
+ * you may not use this documentation except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://creativecommons.org/licenses/by/4.0/
+ *
+ * Unless required by applicable law or agreed to in writing, documentation
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * ============LICENSE_END============================================
+ *
+ *
+ */
+package org.onap.fgps.controller;
+/*package com.valet.controller;
+
+import static org.junit.Assert.*;
+
+import org.junit.Test;
+
+public class ValetServicePlacementControllerTest1 {
+
+ @Test
+ public void testValetServicePlacementController() {
+ fail("Not yet implemented");
+ }
+
+ @Test
+ public void testCreateVm() {
+ fail("Not yet implemented");
+ }
+
+ @Test
+ public void testUpdateVm() {
+ fail("Not yet implemented");
+ }
+
+ @Test
+ public void testDeleteVm() {
+ fail("Not yet implemented");
+ }
+
+ @Test
+ public void testConfirm() {
+ fail("Not yet implemented");
+ }
+
+ @Test
+ public void testRollback() {
+ fail("Not yet implemented");
+ }
+
+ @Test
+ public void testDummy() {
+ fail("Not yet implemented");
+ }
+
+ @Test
+ public void testDummy1() {
+ fail("Not yet implemented");
+ }
+
+}
+*/ \ No newline at end of file
diff --git a/valetapi/src/test/java/org/onap/fgps/service/ValetGroupsServiceTest.java b/valetapi/src/test/java/org/onap/fgps/service/ValetGroupsServiceTest.java
new file mode 100644
index 0000000..29e02b3
--- /dev/null
+++ b/valetapi/src/test/java/org/onap/fgps/service/ValetGroupsServiceTest.java
@@ -0,0 +1,103 @@
+/*
+ * ============LICENSE_START==========================================
+ * ONAP - F-GPS API
+ * ===================================================================
+ * Copyright © 2019 ATT Intellectual Property. All rights reserved.
+ * ===================================================================
+ *
+ * Unless otherwise specified, all software contained herein is licensed
+ * under the Apache License, Version 2.0 (the "License");
+ * you may not use this software except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * Unless otherwise specified, all documentation contained herein is licensed
+ * under the Creative Commons License, Attribution 4.0 Intl. (the "License");
+ * you may not use this documentation except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://creativecommons.org/licenses/by/4.0/
+ *
+ * Unless required by applicable law or agreed to in writing, documentation
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * ============LICENSE_END============================================
+ *
+ *
+ */
+package org.onap.fgps.service;
+/*package com.valet.service;
+
+import static org.junit.Assert.assertEquals;
+import static org.mockito.Mockito.when;
+
+import org.json.simple.JSONObject;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.mockito.InjectMocks;
+import org.mockito.Mock;
+import org.mockito.runners.MockitoJUnitRunner;
+
+import com.valet.beans.schema.Schema;
+import com.valet.dao.ValetServicePlacementDAO;
+
+@RunWith(MockitoJUnitRunner.class)
+public class ValetGroupsServiceTest {
+ @Mock
+ ValetServicePlacementDAO valetServicePlacementDAO;
+
+ @Mock
+ Schema schema;
+
+ @InjectMocks
+ ValetGroupsService valetGroupsService;
+
+ int WAIT_UNITL_SECONDS = 10;
+ int POLL_EVERY_SECONDS = 1;
+
+ @Test
+ public void saveGroupsRequest() {
+ JSONObject request = new JSONObject();
+ String operation = "test_operation";
+ String requestId = "req_id";
+ String expected = "test1";
+ String dbRequest = "dbRequest";
+ when(schema.formMsoInsertUpdateRequest(requestId, operation, operation + "-" + request.toJSONString()))
+ .thenReturn(dbRequest);
+ when(valetServicePlacementDAO.insertRow(dbRequest)).thenReturn(expected);
+ when(valetServicePlacementDAO.getRowFromResults(requestId)).thenReturn("");
+ when(valetServicePlacementDAO.deleteRowFromResults(requestId, dbRequest)).thenReturn("");
+
+ String actual = valetGroupsService.saveGroupsRequest(request, operation, requestId);
+ assertEquals(expected, actual);
+ }
+
+ @Test
+ public void pollForResult_result_at_first() {
+ JSONObject values = new JSONObject();
+ String requestId = "req_id";
+ when(valetServicePlacementDAO.getRowFromResults(requestId)).thenReturn("");
+ when(valetServicePlacementDAO.deleteRowFromResults(requestId, requestId)).thenReturn("");
+ valetGroupsService.pollForResult(values, requestId, WAIT_UNITL_SECONDS, POLL_EVERY_SECONDS);
+ }
+
+ @Test
+ public void pollForResult_result_not_got() {
+ JSONObject values = new JSONObject();
+ String requestId = "req_id";
+ when(valetServicePlacementDAO.getRowFromResults(requestId)).thenReturn(null);
+ valetGroupsService.pollForResult(values, requestId, WAIT_UNITL_SECONDS, POLL_EVERY_SECONDS);
+ }
+
+}
+*/ \ No newline at end of file
diff --git a/valetapi/src/test/java/org/onap/fgps/service/ValetPlacementServiceTest.java b/valetapi/src/test/java/org/onap/fgps/service/ValetPlacementServiceTest.java
new file mode 100644
index 0000000..d595de1
--- /dev/null
+++ b/valetapi/src/test/java/org/onap/fgps/service/ValetPlacementServiceTest.java
@@ -0,0 +1,335 @@
+/*
+ * ============LICENSE_START==========================================
+ * ONAP - F-GPS API
+ * ===================================================================
+ * Copyright © 2019 ATT Intellectual Property. All rights reserved.
+ * ===================================================================
+ *
+ * Unless otherwise specified, all software contained herein is licensed
+ * under the Apache License, Version 2.0 (the "License");
+ * you may not use this software except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * Unless otherwise specified, all documentation contained herein is licensed
+ * under the Creative Commons License, Attribution 4.0 Intl. (the "License");
+ * you may not use this documentation except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://creativecommons.org/licenses/by/4.0/
+ *
+ * Unless required by applicable law or agreed to in writing, documentation
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * ============LICENSE_END============================================
+ *
+ *
+ */
+package org.onap.fgps.service;
+/*package com.valet.service;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
+import static org.mockito.Matchers.any;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+import java.util.LinkedHashMap;
+
+import org.json.simple.JSONArray;
+import org.json.simple.JSONObject;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.mockito.InjectMocks;
+import org.mockito.Mock;
+import org.mockito.runners.MockitoJUnitRunner;
+
+import com.valet.beans.schema.Schema;
+import com.valet.dao.ValetServicePlacementDAO;
+import com.valet.utils.Constants;
+
+@RunWith(MockitoJUnitRunner.class)
+public class ValetPlacementServiceTest {
+
+ @Mock
+ ValetServicePlacementDAO valetServicePlacementDAO;
+ @Mock
+ Schema schema;
+
+ @InjectMocks
+ ValetPlacementService valetPlacementService;
+
+ @Test
+ public void getParam_params_get_key_notnull() {
+ String key = "key";
+ JSONObject parameters = new JSONObject();
+ Object expected = new String("test1");
+ parameters.put("key", expected);
+ JSONObject envPrams = new JSONObject();
+ LinkedHashMap requestParameters = new LinkedHashMap<>();
+ Object actual = valetPlacementService.getParam(key, parameters, envPrams, requestParameters);
+ assertEquals(expected, actual);
+ }
+
+ @Test
+ public void getParam_env_get_key_null() {
+ String key = "key";
+ JSONObject parameters = new JSONObject();
+ Object expected = new String("test2");
+ parameters.put("key1", "val1");
+ JSONObject envPrams = new JSONObject();
+ envPrams.put("diffKey", "diffVal");
+ LinkedHashMap requestParameters = new LinkedHashMap<>();
+ requestParameters.put("key", expected);
+ Object actual = valetPlacementService.getParam(key, parameters, envPrams, requestParameters);
+ assertEquals(expected, actual);
+ }
+
+ @Test
+ public void getParam_env_get_key_notnull_length_0() {
+ String key = "key";
+ JSONObject parameters = new JSONObject();
+ Object expected = new String("test3");
+ parameters.put("key1", "val1");
+ JSONObject envPrams = new JSONObject();
+ envPrams.put("", "");
+ LinkedHashMap requestParameters = new LinkedHashMap<>();
+ requestParameters.put("key", expected);
+ Object actual = valetPlacementService.getParam(key, parameters, envPrams, requestParameters);
+ assertEquals(expected, actual);
+ }
+
+ @Test
+ public void getParam_env_get_key_notnull_length_not_0() {
+ String key = "key";
+ JSONObject parameters = new JSONObject();
+ Object expected = new String("test4");
+ parameters.put("key1", "val1");
+ JSONObject envPrams = new JSONObject();
+ envPrams.put("key", expected);
+ LinkedHashMap requestParameters = new LinkedHashMap<>();
+ Object actual = valetPlacementService.getParam(key, parameters, envPrams, requestParameters);
+ assertEquals(expected, actual);
+ }
+
+ @Test
+ public void getAttr() {
+ JSONArray key = new JSONArray();
+ key.add("jsonArray");
+ JSONObject resources = new JSONObject();
+ Object expected = new String("test5");
+ resources.put(key, expected);
+ Object actual = valetPlacementService.getAttr(key, resources);
+ assertEquals(expected, actual);
+ }
+
+ @Test
+ public void processTemplate_env_notnull() {
+ ValetPlacementService mockedValetPlacementService = mock(ValetPlacementService.class);
+ JSONObject template = new JSONObject();
+ JSONObject resources = new JSONObject();
+ JSONObject resourceObject = new JSONObject();
+ JSONObject properties = new JSONObject();
+ resourceObject.put(Constants.HEAT_REQUEST_PROPERTIES, properties);
+ resources.put("resourceObject", resourceObject);
+
+ template.put(Constants.VALET_REQUEST_RESOURCES, resources);
+ LinkedHashMap files = new LinkedHashMap<>();
+ JSONObject environment = new JSONObject();
+ JSONObject parameters = new JSONObject();
+ environment.put("parameters", parameters);
+ JSONObject parentProperties = new JSONObject();
+ LinkedHashMap requestParameters = new LinkedHashMap<>();
+ JSONObject expected = new JSONObject();
+ when(mockedValetPlacementService.parseResourceObject(template, null, resourceObject, parameters, files,
+ parentProperties, requestParameters)).thenReturn(expected);
+ when(mockedValetPlacementService.processTemplate(template, files, environment, parentProperties,
+ requestParameters)).thenCallRealMethod();
+
+ JSONObject actual = mockedValetPlacementService.processTemplate(template, files, environment, parentProperties,
+ requestParameters);
+ assertEquals(expected, actual.get("resourceObject"));
+ }
+
+ @Test
+ public void processTemplate_env_null() {
+ ValetPlacementService mockedValetPlacementService = mock(ValetPlacementService.class);
+ JSONObject template = new JSONObject();
+ JSONObject resources = new JSONObject();
+ JSONObject resourceObject = new JSONObject();
+ JSONObject properties = new JSONObject();
+ resourceObject.put(Constants.HEAT_REQUEST_PROPERTIES, properties);
+ resources.put("resourceObject", resourceObject);
+
+ template.put(Constants.VALET_REQUEST_RESOURCES, resources);
+ JSONObject parameters = new JSONObject();
+ template.put("parameters", parameters);
+ LinkedHashMap files = new LinkedHashMap<>();
+ JSONObject environment = null;
+ JSONObject parentProperties = new JSONObject();
+ LinkedHashMap requestParameters = new LinkedHashMap<>();
+ JSONObject expected = new JSONObject();
+ when(mockedValetPlacementService.parseResourceObject(template, null, resourceObject, parameters, files,
+ parentProperties, requestParameters)).thenReturn(expected);
+ when(mockedValetPlacementService.processTemplate(template, files, environment, parentProperties,
+ requestParameters)).thenCallRealMethod();
+
+ JSONObject actual = mockedValetPlacementService.processTemplate(template, files, environment, parentProperties,
+ requestParameters);
+ assertEquals(expected, actual.get("resourceObject"));
+ }
+
+ @Test
+ public void parseLogicFinal() {
+ assertNull(valetPlacementService.parseLogicFinal());
+ }
+
+ @Test
+ public void processMSORequest1_requestParameters_notnull() {
+ ValetPlacementService mockService = mock(ValetPlacementService.class);
+ JSONObject request = new JSONObject();
+ LinkedHashMap heat_request = new LinkedHashMap<>();
+ JSONObject template = new JSONObject();
+ template.put("templatekey", "templatevalue");
+ heat_request.put(Constants.HEAT_REQUEST_TEMPLATE, template);
+ LinkedHashMap files = new LinkedHashMap<>();
+ heat_request.put(Constants.HEAT_REQUEST_FILES, files);
+ JSONObject environment = new JSONObject();
+ heat_request.put(Constants.HEAT_REQUEST_ENVIRONMENT, environment);
+ LinkedHashMap requestParameters = new LinkedHashMap<>();
+ heat_request.put("parameters", requestParameters);
+ when(mockService.processMSORequest1(request)).thenCallRealMethod();
+ when(mockService.convertToJson(template.toString())).thenCallRealMethod();
+ when(mockService.convertToJson(environment.toString())).thenCallRealMethod();
+ JSONObject expected = new JSONObject();
+ expected.put("key", "value");
+ when(mockService.processTemplate(template, files, environment, null, requestParameters)).thenReturn(expected);
+
+ request.put(Constants.HEAT_REQUEST, heat_request);
+
+ String actual = mockService.processMSORequest1(request);
+ assertEquals(expected.toJSONString(), actual);
+ }
+
+ @Test
+ public void processMSORequest1_requestParameters_null() {
+ ValetPlacementService mockService = mock(ValetPlacementService.class);
+ JSONObject request = new JSONObject();
+ LinkedHashMap heat_request = new LinkedHashMap<>();
+ JSONObject template = new JSONObject();
+ template.put("templatekey", "templatevalue");
+ heat_request.put(Constants.HEAT_REQUEST_TEMPLATE, template);
+ LinkedHashMap files = new LinkedHashMap<>();
+ heat_request.put(Constants.HEAT_REQUEST_FILES, files);
+ JSONObject environment = new JSONObject();
+ heat_request.put(Constants.HEAT_REQUEST_ENVIRONMENT, environment);
+ LinkedHashMap requestParameters = null;
+ heat_request.put("parameters", requestParameters);
+ when(mockService.processMSORequest1(request)).thenCallRealMethod();
+ when(mockService.convertToJson(template.toString())).thenCallRealMethod();
+ when(mockService.convertToJson(environment.toString())).thenCallRealMethod();
+ JSONObject expected = new JSONObject();
+ expected.put("key", "value");
+ org.mockito.Mockito.when(mockService.processTemplate(any(), any(), any(), any(), any())).thenReturn(expected);
+
+ request.put(Constants.HEAT_REQUEST, heat_request);
+
+ String actual = mockService.processMSORequest1(request);
+ assertEquals(expected.toJSONString(), actual);
+ }
+
+ @Test
+ public void processMSORequest1_exception() {
+ ValetPlacementService mockService = mock(ValetPlacementService.class);
+ JSONObject request = new JSONObject();
+ LinkedHashMap heat_request = new LinkedHashMap<>();
+ JSONObject template = new JSONObject();
+ template.put("templatekey", "templatevalue");
+ heat_request.put(Constants.HEAT_REQUEST_TEMPLATE, template);
+ LinkedHashMap files = new LinkedHashMap<>();
+ heat_request.put(Constants.HEAT_REQUEST_FILES, files);
+ JSONObject environment = new JSONObject();
+ heat_request.put(Constants.HEAT_REQUEST_ENVIRONMENT, environment);
+ LinkedHashMap requestParameters = null;
+ heat_request.put("parameters", requestParameters);
+ when(mockService.processMSORequest1(request)).thenCallRealMethod();
+ when(mockService.convertToJson(template.toString())).thenCallRealMethod();
+ when(mockService.convertToJson(environment.toString())).thenCallRealMethod();
+ JSONObject expected = new JSONObject();
+ expected.put("key", "value");
+ org.mockito.Mockito.when(mockService.processTemplate(any(), any(), any(), any(), any()))
+ .thenThrow(Exception.class);
+
+ request.put(Constants.HEAT_REQUEST, heat_request);
+
+ String actual = mockService.processMSORequest1(request);
+ assertNull(actual);
+ }
+
+ @Test
+ public void convertToJson() {
+ String data = "{\"key\":\"value\"}";
+ JSONObject actual = valetPlacementService.convertToJson(data);
+ assertEquals("value", actual.get("key"));
+ }
+
+ @Test
+ public void convertToJson_exception() {
+ String data = "{\"key\"::\"value\"}";
+ JSONObject actual = valetPlacementService.convertToJson(data);
+ assertNull(actual);
+ }
+
+ @Test
+ public void processDeleteRequest() {
+ String requestId = "req_id";
+ JSONObject request = new JSONObject();
+ String dbRequest = "dbRequest";
+ when(schema.formMsoInsertUpdateRequest(requestId, "delete", request.toJSONString())).thenReturn(dbRequest);
+ when(valetServicePlacementDAO.insertRow(dbRequest)).thenReturn(requestId);
+ String actual = valetPlacementService.processDeleteRequest(request, requestId);
+ assertEquals(requestId, actual);
+ }
+
+ @Test
+ public void saveRequest() {
+ String requestId = "req_id";
+ JSONObject request = new JSONObject();
+ String dbRequest = "dbRequest";
+ String operation = "operation";
+ when(schema.formMsoInsertUpdateRequest(requestId, operation, request.toJSONString())).thenReturn(dbRequest);
+ when(valetServicePlacementDAO.insertRow(dbRequest)).thenReturn(requestId);
+ when(valetServicePlacementDAO.getRowFromResults(requestId)).thenReturn("");
+ when(valetServicePlacementDAO.deleteRowFromResults(requestId, operation)).thenReturn("");
+ String actual = valetPlacementService.saveRequest(request, operation, requestId);
+ assertEquals(requestId, actual);
+ }
+
+ @Test
+ public void pollForResult_result_at_first() {
+ JSONObject values = new JSONObject();
+ String requestId = "req_id";
+ when(valetServicePlacementDAO.getRowFromResults(requestId)).thenReturn("");
+ when(valetServicePlacementDAO.deleteRowFromResults(requestId, requestId)).thenReturn("");
+ valetPlacementService.pollForResult(values, requestId, 10, 1);
+ }
+
+ @Test
+ public void pollForResult_result_not_got() {
+ JSONObject values = new JSONObject();
+ String requestId = "req_id";
+ when(valetServicePlacementDAO.getRowFromResults(requestId)).thenReturn(null);
+ valetPlacementService.pollForResult(values, requestId, 5, 1);
+ }
+}
+*/ \ No newline at end of file
diff --git a/valetapi/src/test/resources/application-test.properties b/valetapi/src/test/resources/application-test.properties
new file mode 100755
index 0000000..35c9fa2
--- /dev/null
+++ b/valetapi/src/test/resources/application-test.properties
@@ -0,0 +1,37 @@
+
+# ============LICENSE_START=======================================================
+# ONAP - F-GPS
+# ================================================================================
+# Copyright (C) 2019 AT&T Intellectual Property. All rights
+# reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END============================================
+# ===================================================================
+#
+###
+
+info.build.artifact=@project.artifactId@
+info.build.name=@project.name@
+info.build.description=@project.description@
+info.build.version=@project.version@
+
+spring.jersey.type=filter
+
+logging.level.root=info
+logging.level.org.glassfish=info
+logging.level.org.glassfish.jersey=info
+
+spring.autoconfigure.exclude=org.springframework.boot.autoconfigure.jdbc.DataSourceAutoConfiguration,org.springframework.boot.autoconfigure.orm.jpa.HibernateJpaAutoConfiguration
+
+logging.pattern.console=%clr(%d{yyyy-MM-dd HH:mm:ss.SSS}){faint} %clr(%5p) %clr($ threadId: {PID:- }){magenta} %clr(---){faint} %clr([ hostname: %X{hostname} serviceName: %X{serviceName} version: %X{version} transactionId: %X{transactionId} requestTimeStamp: %X{requestTimestamp} responseTimeStamp: %X{responseTimestamp} duration: %X{duration}]){yellow} %clr([%15.15t]){faint} %clr(%-40.40logger{39}){cyan} %clr(:){faint} %m%n%wex \ No newline at end of file