aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLucas, John (jl1315) <jflucas@research.att.com>2017-08-23 18:58:33 +0000
committerJ. F. Lucas <jflucas@research.att.com>2017-08-23 19:05:41 +0000
commit3c1527544df835a831898edbd74a473f027bb855 (patch)
tree2e2cf0c7060b39aec4829dc866fcb2468fa16b77
parentbacbd4c6349f9de9c597d309a6c23eced2058a15 (diff)
deployment handler initial seed code
Change-Id: I0bfc86d17edead0114ea0012fb469014e978cd15 Issue-Id: DCAEGEN2-43 Signed-off-by: J. F. Lucas <jflucas@research.att.com>
-rw-r--r--.gitignore8
-rw-r--r--.gitreview4
-rw-r--r--Dockerfile18
-rw-r--r--LICENSE.txt22
-rw-r--r--README.md15
-rw-r--r--deployment-handler.js144
-rw-r--r--dh.pngbin0 -> 12222 bytes
-rw-r--r--dispatcherAPI.yaml382
-rw-r--r--etc/log4js.json52
-rw-r--r--lib/auth.js65
-rw-r--r--lib/cloudify.js312
-rw-r--r--lib/config.js213
-rw-r--r--lib/consul.js66
-rw-r--r--lib/dcae-deployments.js234
-rw-r--r--lib/deploy.js306
-rw-r--r--lib/dispatcher-error.js53
-rw-r--r--lib/info.js38
-rw-r--r--lib/inventory.js169
-rw-r--r--lib/logging.js266
-rw-r--r--lib/middleware.js77
-rw-r--r--lib/promise_request.js114
-rw-r--r--lib/repeat.js54
-rw-r--r--lib/utils.js39
-rwxr-xr-xmvn-phase-script.sh155
-rw-r--r--package.json19
-rw-r--r--pom.xml279
-rwxr-xr-xset_version.sh2
-rw-r--r--version.js1
28 files changed, 3107 insertions, 0 deletions
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..8804e84
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,8 @@
+.jshintrc
+.project
+.settings/
+.sublime-project
+.tern-project
+node_modules/
+work/
+log/
diff --git a/.gitreview b/.gitreview
new file mode 100644
index 0000000..7ce5836
--- /dev/null
+++ b/.gitreview
@@ -0,0 +1,4 @@
+[gerrit]
+host=gerrit.onap.org
+port=29418
+project=dcaegen2/platform/deployment-handler.git
diff --git a/Dockerfile b/Dockerfile
new file mode 100644
index 0000000..0daecfc
--- /dev/null
+++ b/Dockerfile
@@ -0,0 +1,18 @@
+FROM node:6.10.3
+MAINTAINER maintainer
+ENV INSROOT /opt/app
+ENV APPUSER dh
+RUN mkdir -p ${INSROOT}/${APPUSER}/lib \
+ && mkdir -p ${INSROOT}/${APPUSER}/etc \
+ && mkdir -p ${INSROOT}/${APPUSER}/log \
+ && useradd -d ${INSROOT}/${APPUSER} ${APPUSER}
+COPY *.js ${INSROOT}/${APPUSER}/
+COPY *.json ${INSROOT}/${APPUSER}/
+COPY lib ${INSROOT}/${APPUSER}/lib/
+COPY etc/log4js.json ${INSROOT}/${APPUSER}/etc/log4js.json
+WORKDIR ${INSROOT}/${APPUSER}
+RUN npm install --only=production && chown -R ${APPUSER}:${APPUSER} ${INSROOT}/${APPUSER} && npm remove -g npm
+USER ${APPUSER}
+VOLUME ${INSROOT}/${APPUSER}/log
+EXPOSE 8443
+ENTRYPOINT ["/usr/local/bin/node", "deployment-handler.js"]
diff --git a/LICENSE.txt b/LICENSE.txt
new file mode 100644
index 0000000..ae12da2
--- /dev/null
+++ b/LICENSE.txt
@@ -0,0 +1,22 @@
+/*
+ * ============LICENSE_START==========================================
+ * ===================================================================
+ * Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+ * ===================================================================
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * ============LICENSE_END============================================
+ *
+ * ECOMP and OpenECOMP are trademarks
+ * and service marks of AT&T Intellectual Property.
+ *
+ */
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..cb7978c
--- /dev/null
+++ b/README.md
@@ -0,0 +1,15 @@
+### DCAE Deployment Handler
+
+The DCAE Deployment Handler (DH) is the interface to DCAE's orchestration system. The DH exposes an HTTP-based API that allows client systems to request the deployment of DCAE services based on service blueprints stored in the DCAE inventory, as well to check the status of deployments and to request undeployment of a previously-deployed service. The API is documented here [here](./dispatcherAPI.yaml)
+
+The diagram below shows the DH's place in the DCAE orchestration system.
+
+![Dh diagram](./dh.png)
+
+
+#### Building the Deployment Handler
+The Deployment Handler is packaged as a Docker image. The [Dockerfile](./Dockerfile) can drive a Docker build (using the `docker build` command) from the top-level directory of this repository.
+
+To incorporate git information about the current tag and commit into the code, run the `set_version.sh` script to generate a new `version.js` file before doing the Docker build. (The script uses `git describe --long --always` to get the version information.)
+
+When building an image intended as a release candidate or a released product, it's important to freeze all of the dependencies. Use the `npm shrinkwrap` command to freeze the dependencies, and commit the resulting `npm-shrinkwrap.json` file. \ No newline at end of file
diff --git a/deployment-handler.js b/deployment-handler.js
new file mode 100644
index 0000000..35744ae
--- /dev/null
+++ b/deployment-handler.js
@@ -0,0 +1,144 @@
+/*
+Copyright(c) 2017 AT&T Intellectual Property. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing,
+software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and limitations under the License.
+ */
+
+/* Deployment handler main */
+
+"use strict";
+
+const API_VERSION = "4.0.0";
+
+const fs = require('fs');
+const util = require('util');
+const http = require('http');
+const https = require('https');
+const express = require('express');
+const conf = require('./lib/config');
+const createError = require('./lib/dispatcher-error').createDispatcherError;
+
+/* Paths for API routes */
+const INFO_PATH = "/";
+const DEPLOYMENTS_PATH = "/dcae-deployments";
+
+const start = function(config) {
+
+ const startTime = new Date();
+
+ /*
+ * Set log level--config will supply a default of "INFO" if not explicitly
+ * set in config.json
+ */
+ logging.setLevel(config.logLevel);
+
+ /* Set up exported configuration */
+ config.version = require('./version').version;
+ config.apiVersion = API_VERSION;
+ config.apiLinks = {
+ info : INFO_PATH,
+ deployments: DEPLOYMENTS_PATH
+ };
+ exports.config = config;
+
+ log.debug(null, "Configuration: " + JSON.stringify(config));
+
+ /* Set up the application */
+ const app = express();
+ app.set('x-powered-by', false);
+ app.set('etag', false);
+
+ /* Give each request a unique request ID */
+ app.use(require('./lib/middleware').assignId);
+
+ /* If authentication is set up, check it */
+ app.use(require('./lib/auth').checkAuth);
+
+ /* Set up API routes */
+ app.use(INFO_PATH, require('./lib/info'));
+ app.use(DEPLOYMENTS_PATH, require('./lib/dcae-deployments'));
+
+ /* Set up error handling */
+ app.use(require('./lib/middleware').handleErrors);
+
+ /* Start the server */
+ var server = null;
+ var usingTLS = false;
+ try {
+ if (config.ssl && config.ssl.pfx && config.ssl.passphrase
+ && config.ssl.pfx.length > 0) {
+ /*
+ * Check for non-zero pfx length--DCAE config will deliver an empty
+ * pfx if no cert available for the host.
+ */
+ server = https.createServer({
+ pfx : config.ssl.pfx,
+ passphrase : config.ssl.passphrase
+ }, app);
+ usingTLS = true;
+ }
+ else {
+ server = http.createServer(app);
+ }
+ }
+ catch (e) {
+ throw (createError('Could not create http(s) server--exiting: '
+ + e.message, 500, 'system', 551));
+ }
+
+ server.setTimeout(0);
+
+ server.listen(config.listenPort, config.listenHost, function(err) {
+ var addr = server.address();
+ var msg = ("Dispatcher version " + config.version + " listening on "
+ + addr.address + ":" + addr.port + " pid: " + process.pid
+ + (usingTLS ? " " : " not ") + "using TLS (HTTPS)");
+ log.metrics(null, {startTime: startTime, complete: true}, msg);
+ });
+
+ /* Set up handling for terminate signal */
+ process.on('SIGTERM', function() {
+ var startTime = new Date();
+ log.metrics(null, {startTime: startTime, complete: true}, "Deployment Handler API shutting down.")
+ server.close(function() {
+ log.metrics(null, {startTime: startTime, complete: true}, "Deployment Handler API server shut down.")
+ });
+ });
+
+ /* Log actual exit */
+ /*
+ * logging is asynchronous, so we will see another beforeExit event
+ * after it completes.
+ */
+ var loggedExit = false;
+ process.on('beforeExit', function() {
+ if (!loggedExit) {
+ loggedExit = true;
+ log.metrics(null, {startTime: startTime, complete: true}, "Deployment Handler process exiting.")
+ }
+ });
+};
+
+/* Set up logging */
+const logging = require('./lib/logging');
+const log = logging.getLogger();
+
+/* Get configuration and start */
+conf.configure(process.env.CONSUL_HOST)
+.then(start)
+.catch(function(e) {
+ log.error(e.logCode ? e : createError(
+ 'Dispatcher exiting due to start-up problem: ' + e.message, 500,
+ 'system', 552));
+ console.log("Dispatcher exiting due to startup problem: " + e.message);
+}); \ No newline at end of file
diff --git a/dh.png b/dh.png
new file mode 100644
index 0000000..a57be65
--- /dev/null
+++ b/dh.png
Binary files differ
diff --git a/dispatcherAPI.yaml b/dispatcherAPI.yaml
new file mode 100644
index 0000000..5bd0d4f
--- /dev/null
+++ b/dispatcherAPI.yaml
@@ -0,0 +1,382 @@
+
+swagger: '2.0'
+
+info:
+ version: "4.0.0"
+ title: Dispatcher API
+ description: |
+ High-level API for deploying/deploying composed services using Cloudify Manager.
+
+# Paths
+paths:
+ /:
+ get:
+ description: |
+ Get API version information and links to API operations
+
+ responses:
+
+ 200:
+ description: Success
+ schema:
+ title: DispatcherInfo
+ type: object
+ properties:
+ apiVersion:
+ type: string
+ description: |
+ version of API supported by this server
+ serverVersion:
+ type: string
+ description: |
+ version of software running on this server
+ links:
+ type: object
+ description: |
+ Links to API resources
+ properties:
+ info:
+ type: string
+ description: |
+ path for the server information endpoint
+ events:
+ type: string
+ description: |
+ path for the events endpoint
+
+ /dcae-deployments:
+ get:
+ description: |
+ List service deployments known to the orchestrator, optionally restricted to a single service type
+
+ parameters:
+ - name: serviceTypeId
+ description: |
+ Service type identifier for the type whose deployments are to be listed
+ type: string
+ in: query
+ required: false
+
+ responses:
+
+ 200:
+ description: |
+ Success. (Note that if no matching deployments are found, the request is still a success; the
+ deployments array is empty in that case.)
+ schema:
+ $ref: "#/definitions/DCAEDeploymentsListResponse"
+
+ 500:
+ description: |
+ Problem on the server side. See the message
+ in the response for more details.
+ schema:
+ $ref: "#/definitions/DCAEErrorResponse"
+ 502:
+ description: |
+ Error reported to the dispatcher by a downstream system. See the message
+ in the response for more details.
+ schema:
+ $ref: "#/definitions/DCAEErrorResponse"
+
+ 504:
+ description: |
+ Error communicating with a downstream system. See the message
+ in the response for more details.
+ schema:
+ $ref: "#/definitions/DCAEErrorResponse"
+
+ /dcae-deployments/{deploymentId}:
+ put:
+ description: |
+ Request deployment of a DCAE service
+
+ consumes:
+ - application/json
+ produces:
+ - application/json
+
+ parameters:
+ - name: deploymentId
+ description: |
+ Unique deployment identifier assigned by the API client.
+ in: path
+ type: string
+ required: true
+
+ - name: body
+ in: body
+ schema:
+ $ref: "#/definitions/DCAEDeploymentRequest"
+ required: true
+
+ responses:
+
+ 202:
+ description: |
+ Success: The content that was posted is valid, the dispatcher has
+ found the needed blueprint, created an instance of the topology in the orchestrator,
+ and started an installation workflow.
+ schema:
+ $ref: "#/definitions/DCAEDeploymentResponse"
+
+ 400:
+ description: |
+ Bad request: See the message in the response for details.
+ schema:
+ $ref: "#/definitions/DCAEErrorResponse"
+
+ 409:
+ description: |
+ A service with the specified deployment Id already exists. Using PUT to update the service is not a supported operation.
+ schema:
+ $ref: "#/definitions/DCAEErrorResponse"
+
+ 415:
+ description: |
+ Bad request: The Content-Type header does not indicate that the content is
+ 'application/json'
+ schema:
+ $ref: "#/definitions/DCAEErrorResponse"
+
+ 500:
+ description: |
+ Problem on the server side. See the message
+ in the response for more details.
+ schema:
+ $ref: "#/definitions/DCAEErrorResponse"
+
+ 502:
+ description: |
+ Error reported to the dispatcher by a downstream system. See the message
+ in the response for more details.
+ schema:
+ $ref: "#/definitions/DCAEErrorResponse"
+
+ 504:
+ description: |
+ Error communicating with a downstream system. See the message
+ in the response for more details.
+ schema:
+ $ref: "#/definitions/DCAEErrorResponse"
+
+ delete:
+ description: |
+ Uninstall the DCAE service and remove all associated data from the orchestrator.
+
+ parameters:
+ - name: deploymentId
+ description: |
+ Deployment identifier for the service to be uninstalled.
+ in: path
+ type: string
+ required: true
+
+ responses:
+
+ 202:
+ description: |
+ Success: The dispatcher has initiated the uninstall operation.
+ schema:
+ $ref: "#/definitions/DCAEDeploymentResponse"
+
+ 400:
+ description: |
+ Bad request: See the message in the response for details.
+ schema:
+ $ref: "#/definitions/DCAEErrorResponse"
+
+ 500:
+ description: |
+ Problem on the server side. See the message
+ in the response for more details.
+ schema:
+ $ref: "#/definitions/DCAEErrorResponse"
+
+ 502:
+ description: |
+ Error reported to the dispatcher by a downstream system. See the message
+ in the response for more details.
+ schema:
+ $ref: "#/definitions/DCAEErrorResponse"
+
+ 504:
+ description: |
+ Error communicating with a downstream system. See the message
+ in the response for more details.
+ schema:
+ $ref: "#/definitions/DCAEErrorResponse"
+
+ /dcae-deployments/{deploymentId}/operation/{operationId}:
+ get:
+ description: |
+ Get status of a deployment operation
+ parameters:
+ - name: deploymentId
+ in: path
+ type: string
+ required: true
+ - name: operationId
+ in: path
+ type: string
+ required: true
+
+ responses:
+
+ 200:
+ description: Status information retrieved successfully
+ schema:
+ $ref: "#/definitions/DCAEOperationStatusResponse"
+
+ 404:
+ description: The operation information does not exist (possibly because the service has been uninstalled and deleted).
+ schema:
+ $ref: "#/definitions/DCAEErrorResponse"
+
+ 500:
+ description: |
+ Problem on the server side. See the message
+ in the response for more details.
+ schema:
+ $ref: "#/definitions/DCAEErrorResponse"
+
+ 502:
+ description: |
+ Error reported to the dispatcher by a downstream system. See the message
+ in the response for more details.
+ schema:
+ $ref: "#/definitions/DCAEErrorResponse"
+
+ 504:
+ description: |
+ Error communicating with a downstream system. See the message
+ in the response for more details.
+ schema:
+ $ref: "#/definitions/DCAEErrorResponse"
+
+
+
+
+definitions:
+
+ DCAEDeploymentRequest:
+ description: |
+ Request for deploying a DCAE service.
+ type:
+ object
+ required: [serviceTypeId]
+
+ properties:
+
+ serviceTypeId:
+ description: |
+ The service type identifier (a unique ID assigned by DCAE inventory) for the service to be deployed.
+ type: string
+
+ inputs:
+ description: |
+ Object containing inputs needed by the service blueprint to create an instance of the service.
+ Content of the object depends on the service being deployed.
+ type: object
+
+ DCAEDeploymentResponse:
+ description: |
+ Response body for a PUT or DELETE to /dcae-deployments/{deploymentId}
+ type: object
+
+ required: [requestId, links]
+
+ properties:
+ requestId:
+ type: string
+ description: |
+ Unique identifier for the request
+ links:
+ description: |
+ Links that the API client can access.
+ type: object
+ properties:
+ self:
+ type: string
+ description: |
+ Link used to retrieve information about the service being deployed
+ status:
+ type: string
+ description:
+ Link used to retrieve information about the status of the installation workflow
+
+ DCAEOperationStatusResponse:
+ description: |
+ Response body for a request for status of an installation or uninstallation operation.
+ type: object
+
+ required: [requestId, operationType, status]
+
+ properties:
+ requestId:
+ type: string
+ description: |
+ A unique identifier assigned to the request. Useful for tracing a request through logs.
+ operationType:
+ description: |
+ Type of operation being reported on. ("install" or "uninstall")
+ type: string
+ status:
+ description: |
+ Status of the installation or uninstallation operation. Possible values are "processing",
+ "succeeded", and "failed"
+ type: string
+ error:
+ description: |
+ If status is "failed", this field will be present and contain additional information about the reason the operation failed.
+ type: string
+ links:
+ description: |
+ If the operation succeeded, links that the client can follow to take further action. Note that a successful "uninstall" operation removes the DCAE service instance completely, so there are no possible further actions, and no links.
+ type: object
+ properties:
+ self:
+ type: string
+ description: |
+ Link used to retrieve information about the service.
+ uninstall:
+ type: string
+ description:
+ Link used to trigger an "uninstall" operation for the service. (Use the DELETE method.)
+
+ DCAEErrorResponse:
+ description: |
+ Object reporting an error.
+ type:
+ object
+ required: [status]
+
+ properties:
+ status:
+ description: HTTP status code for the response
+ type: integer
+
+ message:
+ description: Human-readable description of the reason for the error
+ type: string
+
+ DCAEDeploymentsListResponse:
+ description: |
+ Object providing a list of deployments
+ type: object
+ required: [requestId, deployments]
+
+ properties:
+ requestId:
+ type: string
+ description: |
+ Unique identifier for the request
+ deployments:
+ type: array
+ items:
+ type: object
+ properties:
+ href:
+ type: string
+ description: |
+ URL for the service deployment
diff --git a/etc/log4js.json b/etc/log4js.json
new file mode 100644
index 0000000..5916344
--- /dev/null
+++ b/etc/log4js.json
@@ -0,0 +1,52 @@
+{
+ "appenders":
+ [
+ {
+ "type": "file",
+ "filename": "log/audit.log",
+ "maxLogSize": 10240000,
+ "backups": 10,
+ "category": ["audit"],
+ "layout":
+ {
+ "type": "messagePassThrough"
+ }
+ },
+
+ {
+ "type": "file",
+ "filename": "log/metrics.log",
+ "maxLogSize": 10240000,
+ "backups": 10,
+ "category": ["metrics"],
+ "layout":
+ {
+ "type": "messagePassThrough"
+ }
+ },
+
+ {
+ "type": "file",
+ "filename": "log/error.log",
+ "maxLogSize": 10240000,
+ "backups": 10,
+ "category": ["error"],
+ "layout":
+ {
+ "type": "messagePassThrough"
+ }
+ },
+
+ {
+ "type": "file",
+ "filename": "log/debug.log",
+ "maxLogSize": 10240000,
+ "backups": 10,
+ "category": ["debug"],
+ "layout":
+ {
+ "type": "messagePassThrough"
+ }
+ }
+ ]
+}
diff --git a/lib/auth.js b/lib/auth.js
new file mode 100644
index 0000000..9ddd7b3
--- /dev/null
+++ b/lib/auth.js
@@ -0,0 +1,65 @@
+/*
+Copyright(c) 2017 AT&T Intellectual Property. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing,
+software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and limitations under the License.
+*/
+
+/* HTTP Basic Authentication */
+
+"use strict";
+
+/* Extract user name and password from the 'Authorization' header */
+const parseAuthHeader = function(authHeader){
+
+ var parsedHeader = {};
+
+ const authItems = authHeader.split(/\s+/); // Split on the white space between Basic and the base64 encoded user:password
+
+ if (authItems[0].toLowerCase() === 'basic') {
+ if (authItems[1]) {
+ const authString = (new Buffer(authItems[1], 'base64')).toString();
+ const userpass = authString.split(':');
+ if (userpass.length > 1) {
+ parsedHeader = {user: userpass[0], password: userpass[1]};
+ }
+ }
+ }
+ return parsedHeader;
+};
+
+/* Middleware function to check authentication */
+exports.checkAuth = function(req, res, next) {
+ const auth = process.mainModule.exports.config.auth;
+ if (auth) {
+ /* Authentication is configured */
+ if (req.headers.authorization) {
+ const creds = parseAuthHeader(req.headers.authorization);
+ if (creds.user && creds.password && (creds.user in auth) && (auth[creds.user] === creds.password)) {
+ next();
+ }
+ else {
+ var err = new Error('Authentication required');
+ err.status = 403;
+ next(err);
+ }
+ }
+ else {
+ var errx = new Error ('Authentication required');
+ errx.status = 403;
+ next(errx);
+ }
+ }
+ else {
+ next(); // Nothing to do, no authentication required
+ }
+}; \ No newline at end of file
diff --git a/lib/cloudify.js b/lib/cloudify.js
new file mode 100644
index 0000000..150f1c4
--- /dev/null
+++ b/lib/cloudify.js
@@ -0,0 +1,312 @@
+/*
+Copyright(c) 2017 AT&T Intellectual Property. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing,
+software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and limitations under the License.
+*/
+
+/* Low-level routines for using the Cloudify Manager REST API */
+
+"use strict";
+
+const admzip = require('adm-zip');
+
+const repeat = require('./repeat');
+const req = require('./promise_request');
+const doRequest = req.doRequest;
+
+var cfyAPI = null;
+var cfyAuth = null;
+var logger = null;
+
+
+// Delay function--returns a promise that's resolved after 'dtime'
+// milliseconds.`
+var delay = function(dtime) {
+ return new Promise(function(resolve, reject) {
+ setTimeout(resolve, dtime);
+ });
+};
+
+// Get current status of a workflow execution
+// Function for getting execution info
+const getExecutionStatus = function(executionId) {
+ var reqOptions = {
+ method : "GET",
+ uri : cfyAPI + "/executions/" + executionId
+ };
+ if (cfyAuth) {
+ reqOptions.auth = cfyAuth;
+ }
+ return doRequest(reqOptions);
+};
+
+// Poll for the result of a workflow execution
+var getWorkflowResult = function(execution_id) {
+ var finished = [ "terminated", "cancelled", "failed" ];
+ var retryInterval = 15000; // Every 15 seconds
+ var maxTries = 240; // Up to an hour
+
+ logger.debug(null, "Getting workflow result for execution id: " + execution_id);
+
+ // Function for testing if workflow is finished
+ // Expects the result of getExecStatus
+ var checkStatus = function(res) {
+ logger.debug(null, "Checking result: " + JSON.stringify(res) + " ==> " + (res.json && res.json.status && finished.indexOf(res.json.status) < 0));
+ return res.json && res.json.status && finished.indexOf(res.json.status) < 0;
+ };
+
+ // Create execution status checker function
+ var getExecStatus = function() { return getExecutionStatus(execution_id);};
+
+ return repeat.repeatWhile(getExecStatus, checkStatus, maxTries, retryInterval)
+ .then(
+
+ /* Handle fulfilled promise from repeatWhile */
+ function(res) {
+
+ logger.debug(null, 'workflow result: ' + JSON.stringify(res));
+
+ /* Successful completion */
+ if (res.json && res.json.status && res.json.status === 'terminated') {
+ return res;
+ }
+
+ /* If we get here, we don't have a success and we're going to throw something */
+
+ var error = {};
+
+ /* We expect a JSON object with a status */
+ if (res.json && res.json.status) {
+
+ /* Failure -- we need to return something that looks like the CM API failures */
+ if (res.json.status === 'failed') {
+ error.body = 'workflow failed: ' + execution_id + ' -- ' + (res.json.error ? JSON.stringify(res.json.error) : 'no error information');
+ }
+
+ /* Cancellation -- don't really expect this */
+ else if (res.json.status === 'canceled' || res.json.status === 'cancelled') {
+ error.body = 'workflow canceled: ' + execution_id;
+ }
+
+ /* Don't expect anything else -- but if we get it, it's not a success! */
+ else {
+ error.body = 'workflow--unexpected status ' + res.json.status + ' for ' + execution_id;
+ }
+ }
+
+ /* The body of the response from the API call to get execution status is not what we expect at all */
+ else {
+ error.body = 'workflow--unexpected result body getting execution status from CM for ' + execution_id;
+ }
+
+ throw error;
+ },
+
+ /* Handle rejection of promise from repeatWhile--don't use a catch because it would catch the error thrown above */
+ function(err) {
+ /* repeatWhile could fail and we get here because:
+ * -- repeatWhile explicitly rejects the promise because it has exhausted the retries
+ * -- repeatWhile propagates a system error (e.g., network problem) trying to access the API
+ * -- repeatWhile propagates a rejected promise due to a bad HTTP response status
+ * These should all get normalized in deploy.js--so we just rethrow the error.
+ */
+
+ throw err;
+
+ });
+};
+
+//Initiate a workflow execution against a deployment
+const initiateWorkflowExecution = function(dpid, workflow) {
+ // Set up the HTTP POST request
+ var reqOptions = {
+ method : "POST",
+ uri : cfyAPI + "/executions",
+ headers : {
+ "Content-Type" : "application/json",
+ "Accept" : "*/*"
+ }
+ };
+ if (cfyAuth) {
+ reqOptions.auth = cfyAuth;
+ }
+ var body = {
+ deployment_id : dpid,
+ workflow_id : workflow
+ };
+
+ // Make the POST request
+ return doRequest(reqOptions, JSON.stringify(body))
+ .then(function(result) {
+ logger.debug(null, "Result from POSTing workflow execution start: " + JSON.stringify(result));
+ if (result.json && result.json.id) {
+ return {deploymentId: dpid, workflowType: workflow, executionId: result.json.id};
+ }
+ else {
+ logger.debug(null,"Did not get expected JSON body from POST to start workflow");
+ var err = new Error("POST to start workflow got success response but no body");
+ err.status = err.code = 502;
+ }
+ });
+};
+
+// Uploads a blueprint via the Cloudify API
+exports.uploadBlueprint = function(bpid, blueprint) {
+
+ // Cloudify API wants a gzipped tar of a directory, not the blueprint text
+ var zip = new admzip();
+ zip.addFile('work/', new Buffer(0));
+ zip.addFile('work/blueprint.yaml', new Buffer(blueprint, 'utf8'));
+ var src = (zip.toBuffer());
+
+ // Set up the HTTP PUT request
+ var reqOptions = {
+ method : "PUT",
+ uri : cfyAPI + "/blueprints/" + bpid,
+ headers : {
+ "Content-Type" : "application/octet-stream",
+ "Accept" : "*/*"
+ }
+ };
+
+ if (cfyAuth) {
+ reqOptions.auth = cfyAuth;
+ }
+ // Initiate PUT request and return the promise for a result
+ return doRequest(reqOptions, src);
+};
+
+// Creates a deployment from a blueprint
+exports.createDeployment = function(dpid, bpid, inputs) {
+
+ // Set up the HTTP PUT request
+ var reqOptions = {
+ method : "PUT",
+ uri : cfyAPI + "/deployments/" + dpid,
+ headers : {
+ "Content-Type" : "application/json",
+ "Accept" : "*/*"
+ }
+ };
+
+ if (cfyAuth) {
+ reqOptions.auth = cfyAuth;
+ }
+ var body = {
+ blueprint_id : bpid
+ };
+ if (inputs) {
+ body.inputs = inputs;
+ }
+
+ // Make the PUT request to create the deployment
+ return doRequest(reqOptions, JSON.stringify(body));
+};
+
+// Initiate a workflow execution against a deployment
+exports.initiateWorkflowExecution = initiateWorkflowExecution;
+
+// Get the status of a workflow execution
+exports.getWorkflowExecutionStatus = getExecutionStatus;
+
+// Return a promise for the final result of a workflow execution
+exports.getWorkflowResult = getWorkflowResult;
+
+// Executes a workflow against a deployment and returns a promise for final result
+exports.executeWorkflow = function(dpid, workflow) {
+
+ // Initiate the workflow
+ return initiateWorkflowExecution(dpid, workflow)
+
+ // Wait for the result
+ .then (function(result) {
+ logger.debug(null, "Result from initiating workflow: " + JSON.stringify(result));
+ return getWorkflowResult(result.executionId);
+ });
+};
+
+// Wait for workflow to complete and get result
+exports.getWorkflowResult = getWorkflowResult;
+
+// Retrieves outputs for a deployment
+exports.getOutputs = function(dpid) {
+ var reqOptions = {
+ method : "GET",
+ uri : cfyAPI + "/deployments/" + dpid + "/outputs",
+ headers : {
+ "Accept" : "*/*"
+ }
+ };
+ if (cfyAuth) {
+ reqOptions.auth = cfyAuth;
+ }
+
+ return doRequest(reqOptions);
+};
+
+// Get the output descriptions for a deployment
+exports.getOutputDescriptions = function(dpid) {
+ var reqOptions = {
+ method : "GET",
+ uri : cfyAPI + "/deployments/" + dpid + "?include=outputs",
+ headers : {
+ "Accept" : "*/*"
+ }
+ };
+ if (cfyAuth) {
+ reqOptions.auth = cfyAuth;
+ }
+
+ return doRequest(reqOptions);
+};
+
+// Deletes a deployment
+exports.deleteDeployment = function(dpid) {
+ var reqOptions = {
+ method : "DELETE",
+ uri : cfyAPI + "/deployments/" + dpid
+ };
+ if (cfyAuth) {
+ reqOptions.auth = cfyAuth;
+ }
+
+ return doRequest(reqOptions);
+};
+
+// Deletes a blueprint
+exports.deleteBlueprint = function(bpid) {
+ var reqOptions = {
+ method : "DELETE",
+ uri : cfyAPI + "/blueprints/" + bpid
+ };
+ if (cfyAuth) {
+ reqOptions.auth = cfyAuth;
+ }
+
+ return doRequest(reqOptions);
+};
+
+// Allow client to set the Cloudify API root address
+exports.setAPIAddress = function(addr) {
+ cfyAPI = addr;
+};
+
+// Allow client to set Cloudify credentials
+exports.setCredentials = function(user, password) {
+ cfyAuth = user + ':' + password;
+};
+
+// Set a logger
+exports.setLogger = function(log) {
+ logger = log;
+};
diff --git a/lib/config.js b/lib/config.js
new file mode 100644
index 0000000..4429247
--- /dev/null
+++ b/lib/config.js
@@ -0,0 +1,213 @@
+/*
+Copyright(c) 2017 AT&T Intellectual Property. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing,
+software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and limitations under the License.
+*/
+
+/*
+ * Dispatcher configuration
+ * Configuration may come from environment variables, a value in a Consul key-value store, or defaults,
+ * in that order of precedence.
+ *
+ * The address of the Consul host is passed in an environment variable called CONSUL_HOST.
+ * If present, the configuration value in the key-value store is a UTF-8 serialization of a JSON object.
+ *
+ *
+ * --------------------------------------------------------------------------------------
+ * | JSON property | Environment variable | Required? | Default |
+ * --------------------------------------------------------------------------------------
+ * | logLevel | LOG_LEVEL | Yes | "INFO" |
+ * --------------------------------------------------------------------------------------
+ * | listenHost | LISTEN_HOST | Yes | "0.0.0.0" |
+ * --------------------------------------------------------------------------------------
+ * | listenPort | LISTEN_PORT | Yes | 8443 |
+ * --------------------------------------------------------------------------------------
+ * | cloudify.user | CLOUDIFY_USER | No | none |
+ * --------------------------------------------------------------------------------------
+ * | cloudify.password | CLOUDIFY_PASSWORD | No | none |
+ * --------------------------------------------------------------------------------------
+ * | cloudify.protocol | CLOUDIFY_PROTOCOL | No | "https" |
+ * --------------------------------------------------------------------------------------
+ * | inventory.user | INVENTORY_USER | No | none |
+ * --------------------------------------------------------------------------------------
+ * | inventory.password | INVENTORY_PASSWORD | No | none |
+ * --------------------------------------------------------------------------------------
+ * | inventory.protocol | INVENTORY_PROTOCOL | No | "https" |
+ * --------------------------------------------------------------------------------------
+ * | auth | (no environment var) | No | none |
+ * --------------------------------------------------------------------------------------
+ * auth, if present, is a JSON object, with property names corresponding to user names and
+ * property values corresponding to passwords. If the auth property has the value:
+ * {"admin" : "admin123", "other" : "other123"}, then any incoming HTTP requests must use
+ * Basic authentication and supply "admin" as a user name with "admin123" as the password or
+ * supply "other" as the user name with "other123" as the password.
+ *
+ * The dispatcher will attempt to run using TLS (i.e., as an HTTPS server) if a certificate
+ * file in pkcs12 format is stored at etc/cert/cert and a file containing the corresponding
+ * passphrase is stored at etc/cert/pass. These files can be made available to the container
+ * running the dispatcher by mounting a volume to the container.
+ */
+"use strict";
+
+const fs = require("fs");
+const utils = require("./utils");
+const consul = require("./consul");
+
+const SSL_CERT_FILE = "etc/cert/cert";
+const SSL_PASS_FILE = "etc/cert/pass";
+
+const CONFIG_KEY = "deployment_handler"; /* Configuration is stored under the name "deployment_handler" */
+const CM_NAME = "cloudify_manager";
+const INV_NAME = "inventory";
+
+const CM_API_PATH = "/api/v2.1";
+const INV_API_PATH = "";
+
+const DEFAULT_CLOUDIFY_PROTOCOL = "https";
+const DEFAULT_INVENTORY_PROTOCOL = "https";
+const DEFAULT_LISTEN_PORT = 8443;
+const DEFAULT_LISTEN_HOST = "0.0.0.0";
+const DEFAULT_LOG_LEVEL = "INFO";
+
+/* Check configuration for completeness */
+const findMissingConfig = function(cfg) {
+ const requiredProps = ['logLevel', 'listenHost', 'listenPort', 'cloudify.url', 'inventory.url'];
+ return requiredProps.filter(function(p){return !utils.hasProperty(cfg,p);});
+};
+
+/* Fetch configuration */
+const getConfig = function (configStoreAddress) {
+ const ch = consul({url: configStoreAddress});
+ return ch.getKey(CONFIG_KEY)
+ .then(function(res) {
+ return res || {};
+ })
+ .catch(function(err) {
+ throw err;
+ });
+};
+
+/* Get a service host:port */
+const getService = function (configStoreAddress, serviceName) {
+ const ch = consul({url: configStoreAddress});
+ return ch.getService(serviceName)
+ .then(function(res) {
+ if (res.length > 0) {
+ return res[0];
+ }
+ else {
+ throw new Error("No service address found for " + serviceName);
+ }
+ })
+};
+
+/* Get the content of a file */
+const getFileContents = function(path) {
+ return new Promise(function(resolve, reject) {
+ fs.readFile(path, function(err, data) {
+ if (err) {
+ reject(err);
+ }
+ else {
+ resolve(data);
+ }
+ })
+ })
+};
+
+/* Check for a TLS cert file and passphrase */
+const getTLSCredentials = function() {
+ var ssl = {};
+
+ /* Get the passphrase */
+ return getFileContents(SSL_PASS_FILE)
+ .then(function(phrase) {
+ ssl.passphrase = phrase.toString('utf8').trim();
+
+ /* Get the cert */
+ return getFileContents(SSL_CERT_FILE);
+ })
+
+ .then(function(cert) {
+ ssl.pfx = cert; /* Keep cert contents as a Buffer */
+ return ssl;
+ })
+
+ .catch(function(err) {
+ return {};
+ });
+}
+
+exports.configure = function(configStoreAddress) {
+
+ var config = {};
+
+ /* Construct a URL for Consul, assuming HTTP and the default Consul port */
+ const configStoreURL = 'http://' + configStoreAddress + ':8500';
+
+ /* Get configuration from configuration store */
+ return getConfig(configStoreURL)
+ .then (function(cfg) {
+
+ config = cfg;
+
+ /* Override values with environment variables and set defaults as needed */
+ config.listenPort = process.env.LISTEN_PORT || cfg.listenPort || DEFAULT_LISTEN_PORT;
+ config.listenHost = process.env.LISTEN_HOST || cfg.listenHost || DEFAULT_LISTEN_HOST;
+ config.logLevel = process.env.LOG_LEVEL || cfg.logLevel || DEFAULT_LOG_LEVEL;
+
+ config.cloudify = config.cloudify || {};
+ config.cloudify.protocol = process.env.CLOUDIFY_PROTOCOL || (cfg.cloudify && cfg.cloudify.protocol) || DEFAULT_CLOUDIFY_PROTOCOL;
+ if ((cfg.cloudify && cfg.cloudify.user) || process.env.CLOUDIFY_USER) {
+ config.cloudify.user = process.env.CLOUDIFY_USER || cfg.cloudify.user;
+ config.cloudify.password = process.env.CLOUDIFY_PASSWORD || cfg.cloudify.password || "";
+ }
+
+ config.inventory = config.inventory || {};
+ config.inventory.protocol = process.env.INVENTORY_PROTOCOL || (cfg.inventory && cfg.inventory.protocol) || DEFAULT_INVENTORY_PROTOCOL;
+ if ((cfg.inventory && cfg.inventory.user)|| process.env.INVENTORY_USER) {
+ config.inventory.user = process.env.INVENTORY_USER || cfg.inventory.user;
+ config.inventory.password = process.env.INVENTORY_PASSWORD || cfg.inventory.password || "";
+ }
+
+ /* Get service information for Cloudify Manager */
+ return getService(configStoreURL, CM_NAME);
+ })
+
+ .then(function(cmService) {
+
+ config.cloudify.url = config.cloudify.protocol +"://" + cmService.address + ":" + cmService.port + CM_API_PATH;
+
+ /* Get service information for inventory */
+ return getService(configStoreURL, INV_NAME);
+ })
+
+ .then(function(invService) {
+ config.inventory.url = config.inventory.protocol + "://" + invService.address + ":" + invService.port + INV_API_PATH;
+
+ /* Get TLS credentials, if they exist */
+ return getTLSCredentials();
+ })
+ .then(function(tls) {
+ config.ssl = tls;
+
+ /* Check for missing required configuration parameters */
+ const missing = findMissingConfig(config);
+ if (missing.length > 0) {
+ throw new Error ("Required configuration elements missing: " + missing.join(','));
+ config = null;
+ }
+
+ return config;
+ });
+};
diff --git a/lib/consul.js b/lib/consul.js
new file mode 100644
index 0000000..5b76ffc
--- /dev/null
+++ b/lib/consul.js
@@ -0,0 +1,66 @@
+/*
+Copyright(c) 2017 AT&T Intellectual Property. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing,
+software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and limitations under the License.
+*/
+
+/* Low-level routines for using the Consul REST API */
+
+const KEY = '/v1/kv/';
+const SERVICE = '/v1/catalog/service/';
+const DEFAULT_URL = 'http://localhost:8500';
+
+const doRequest = require('./promise_request').doRequest;
+
+module.exports = function(options) {
+ const url = options.url || DEFAULT_URL;
+
+ return {
+
+ /* Fetch (a promise for) the decoded value of a single key from Consul KV store.
+ * If the value is a string representation of a JSON object, return as an object.
+ * If there is no such key, resolve to null.
+ */
+ getKey: function(key) {
+ return doRequest({method: 'GET', uri: url + KEY + key + '?raw'})
+ .then(function(res) {
+ return res.json || res.body;
+ })
+ .catch(function(err) {
+ if (err.status === 404) {
+ /* Key wasn't found */
+ return null;
+ }
+ else {
+ /* Some other error, rethrow it */
+ throw err;
+ }
+ });
+ },
+
+ /* Retrieve (a promise for) address:port information for a named service from the Consul service catalog.
+ * If the service has tag(s), return the first one. (Should be the full URL of the service if it exists.
+ * Since a service can be registered at multiple nodes, the result is an array.
+ * If the service is not found, returns a zero-length array.
+ */
+ getService: function(serviceId) {
+ return doRequest({method: 'GET', uri: url + SERVICE + serviceId})
+ .then(function(res){
+ return res.json.map(function(r) {
+ /* Address for external service is in r.Address with r.ServiceAddress empty */
+ return {address: r.ServiceAddress || r.Address, port: r.ServicePort, url: r.ServiceTags ? r.ServiceTags[0] : ""};
+ });
+ });
+ }
+ }
+}; \ No newline at end of file
diff --git a/lib/dcae-deployments.js b/lib/dcae-deployments.js
new file mode 100644
index 0000000..bcec0e5
--- /dev/null
+++ b/lib/dcae-deployments.js
@@ -0,0 +1,234 @@
+/*
+Copyright(c) 2017 AT&T Intellectual Property. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing,
+software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and limitations under the License.
+*/
+
+/* Handle the /dcae-deployments API */
+
+"use strict";
+
+/* Set this code up as a "sub-app"--lets us get the mountpoint for creating links */
+const app = require('express')();
+app.set('x-powered-by', false);
+app.set('etag', false);
+
+const bodyParser = require('body-parser');
+const deploy = require('./deploy');
+const middleware = require('./middleware');
+const inv = require('./inventory');
+const log = require('./logging').getLogger();
+
+/* Pick up config exported by main */
+const config = process.mainModule.exports.config;
+const inventory = inv({url: config.inventory.url});
+
+/* Set up middleware stack for initial processing of request */
+app.use(middleware.checkType('application/json')); // Validate type
+app.use(bodyParser.json({strict: true})); // Parse body as JSON
+
+
+/* Return a promise for a blueprint for the given service type ID */
+const getBlueprint = function(serviceTypeId) {
+ return inventory.getBlueprintByType(serviceTypeId)
+ .then(function (blueprintInfo) {
+ if (!blueprintInfo.blueprint) {
+ var e = new Error("No service type with ID " + serviceTypeId);
+ e.status = 404;
+ throw e;
+ }
+ return blueprintInfo;
+ })
+};
+
+/* Generate self and status links object for responses */
+const createLinks = function(req, deploymentId, executionId) {
+ var baseURL = req.protocol + '://' + req.get('Host') + req.app.mountpath + '/' + deploymentId;
+ return {
+ self: baseURL,
+ status: baseURL + '/operation/' + executionId
+ };
+};
+
+/* Generate a success response body for PUT and DELETE operations */
+const createResponse = function(req, result) {
+ return {
+ requestId: req.dcaeReqId,
+ links: createLinks(req, result.deploymentId, result.executionId)
+ };
+};
+
+/* Look up running (or in process of deploying) instances of the given service type */
+app.get('/', function (req, res, next) {
+ var services = []
+
+
+ var searchTerm = {};
+
+ req.query['serviceTypeId'] && (searchTerm = {typeId: req.query['serviceTypeId']});
+
+ inventory.getServicesByType(searchTerm)
+ .then(function (result) {
+ var deployments = result.map(function(service){
+ return {
+ href: req.protocol + '://' + req.get('Host') + req.app.mountpath + '/' + service.deploymentId
+ };
+ })
+ res.status(200).json({requestId: req.dcaeReqId, deployments: deployments});
+ log.audit(req, 200);
+ })
+ .catch(next); /* Let the error handler send response and log the error */
+});
+
+/* Accept an incoming deployment request */
+app.put('/:deploymentId', function(req, res, next) {
+
+ log.debug(req.dcaeRequestId, "body: " + JSON.stringify(req.body));
+
+ /* Make sure there's a serviceTypeId in the body */
+ if (!req.body['serviceTypeId']) {
+ var e = new Error ('Missing required parameter serviceTypeId');
+ e.status = 400;
+ throw e;
+ }
+
+ /* Make sure the deploymentId doesn't already exist */
+ inventory.verifyUniqueDeploymentId(req.params['deploymentId'])
+
+ /* Get the blueprint for this service type */
+ .then(function(res) {
+ return getBlueprint(req.body['serviceTypeId']);
+ })
+
+ /* Add this new service instance to inventory
+ * Easier to remove from inventory if deployment fails than vice versa
+ * Also lets client check for deployed/deploying instances if client wants to limit number of instances
+ */
+ .then(function (blueprintInfo) {
+ req.dcaeBlueprint = blueprintInfo.blueprint;
+ return inventory.addService(req.params['deploymentId'], blueprintInfo.typeId, "dummyVnfId", "dummyVnfType", "dummyLocation");
+ })
+
+ /* Upload blueprint, create deployment and start install workflow (but don't wait for completion */
+ .then (function() {
+ req.dcaeAddedToInventory = true;
+ return deploy.launchBlueprint(req.params['deploymentId'], req.dcaeBlueprint, req.body['inputs']);
+ })
+
+ /* Send the HTTP response indicating workflow has started */
+ .then(function(result) {
+ res.status(202).json(createResponse(req, result));
+ log.audit(req, 202, "Execution ID: " + result.executionId);
+ return result;
+ })
+
+ /* Finish deployment--wait for the install workflow to complete, retrieve and annotate outputs */
+ .then(function(result) {
+ return deploy.finishInstallation(result.deploymentId, result.executionId);
+ })
+
+ /* Log completion in audit log */
+ .then (function(result) {
+ log.audit(req, 200, "Deployed id: " + req.params['deploymentId']);
+ })
+
+ /* All errors show up here */
+ .catch(function(error) {
+
+ /* If we haven't already sent a response, let the error handler send response and log the error */
+ if (!res.headersSent) {
+
+ /* If we made an inventory entry, remove it */
+ if (req.dcaeAddedToInventory) {
+ inventory.deleteService(req.params['deploymentId'])
+ .catch(function(error) {
+ log.error(error, req);
+ });
+ }
+
+ next(error);
+ }
+ else {
+ /* Already sent the response, so just log error */
+ /* Don't remove from inventory, because there is a deployment on CM that might need to be removed */
+ error.message = "Error deploying deploymentId " + req.params['deploymentId'] + ": " + error.message
+ log.error(error, req);
+ log.audit(req, 500, error.message);
+ }
+
+ });
+});
+
+/* Delete a running service instance */
+app.delete('/:deploymentId', function(req, res, next) {
+
+ /* Launch the uninstall workflow */
+ deploy.launchUninstall(req.params['deploymentId'])
+
+ /* Delete the service from inventory */
+ .then(function(result) {
+ return inventory.deleteService(req.params['deploymentId'])
+ .then (function() {
+ return result;
+ });
+ })
+
+ /* Send the HTTP response indicating workflow has started */
+ .then(function(result) {
+ res.status(202).send(createResponse(req, result));
+ log.audit(req, 202, "ExecutionId: " + result.executionId);
+ return result;
+ })
+
+ /* Finish the delete processing--wait for the uninstall to complete, delete deployment, delete blueprint */
+ .then(function(result) {
+ return deploy.finishUninstall(result.deploymentId, result.executionId);
+ })
+
+ /* Log completion in audit log */
+ .then(function(result) {
+ log.audit(req, 200, "Undeployed id: " + req.params['deploymentId']);
+ })
+
+ /* All errors show up here */
+ .catch(function(error) {
+ /* If we haven't already sent a response, give it to the error handler to send response */
+ if (!res.headersSent) {
+ next(error);
+ }
+ else {
+ /* Error happened after we sent the response--log it */
+ error.message = "Error undeploying deploymentId " + req.params['deploymentId'] + ": " + error.message
+ log.error(error, req);
+ log.audit(req, 500, error.message);
+ }
+ });
+});
+
+/* Get the status of a workflow execution */
+app.get('/:deploymentId/operation/:executionId', function(req, res, next){
+ deploy.getExecutionStatus(req.params['executionId'])
+
+ /* Send success response */
+ .then(function(result) {
+ result.requestId = req.dcaeReqId;
+ result.links = createLinks(req, req.params['deploymentId'], req.params['executionId']);
+ res.status(200).json(result);
+ log.audit(req, 200, "Workflow type: " + result.operationType + " -- execution status: " + result.status);
+ })
+
+ .catch(next); /* Let the error handler send the response and log the error */
+
+});
+
+module.exports = app; \ No newline at end of file
diff --git a/lib/deploy.js b/lib/deploy.js
new file mode 100644
index 0000000..741affb
--- /dev/null
+++ b/lib/deploy.js
@@ -0,0 +1,306 @@
+/*
+Copyright(c) 2017 AT&T Intellectual Property. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing,
+software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and limitations under the License.
+*/
+
+"use strict";
+
+/* Deploy and undeploy using Cloudify blueprints */
+
+const config = process.mainModule.exports.config;
+
+/* Set delays between steps */
+const DELAY_INSTALL_WORKFLOW = 30000;
+const DELAY_RETRIEVE_OUTPUTS = 5000;
+const DELAY_DELETE_DEPLOYMENT = 30000;
+const DELAY_DELETE_BLUEPRINT = 10000;
+
+const createError = require('./dispatcher-error').createDispatcherError;
+
+/* Set up logging */
+var logger = require("./logging").getLogger();
+
+/* Set up the Cloudify low-level interface library */
+var cfy = require("./cloudify.js");
+/* Set config for interface library */
+cfy.setAPIAddress(config.cloudify.url);
+cfy.setCredentials(config.cloudify.user, config.cloudify.password);
+cfy.setLogger(logger);
+
+
+
+
+// Try to parse a string as JSON
+var parseContent = function(input) {
+ var res = {json: false, content: input};
+ try {
+ var parsed = JSON.parse(input);
+ res.json = true;
+ res.content = parsed;
+ }
+ catch (pe) {
+ // Do nothing, just indicate it's not JSON and return content as is
+ }
+ return res;
+};
+
+// create a normalized representation of errors, whether they're a node.js Error or a Cloudify API error
+var normalizeError = function (err) {
+ var e;
+
+ if (err instanceof Error) {
+ /* node.js system error */
+ e = createError("Error communicating with CM: " + err.message, 504, "system", 202, 'cloudify-manager');
+ }
+ else {
+ // Try to populate error with information from a Cloudify API error
+ // We expect to see err.body, which is a stringified JSON object
+ // We can parse it and extract message and error_code
+ var message = err.message || "unknown Cloudify Manager API error";
+ var status = err.status || 502;
+ var cfyCode = "UNKNOWN";
+ var cfyMessage;
+
+ if (err.body) {
+ var p = parseContent(err.body);
+ if (p.json) {
+ cfyMessage = p.content.message ? p.content.message : "unknown Cloudify API error";
+ cfyCode = p.content.error_code ? p.content.error_code : "UNKNOWN";
+ }
+ else {
+ // if there's a body and we can't parse it just attach it as the message
+ cfyMessage = err.body;
+ }
+ message = "Status " + status + " from CM API -- error code: " + cfyCode + " -- message: " + cfyMessage;
+ }
+
+ /* Pass through 400-level status, recast 500-level */
+ var returnStatus = (err.status > 499) ? 502 : err.status;
+ e = createError(message, returnStatus, "api", 502, 'cloudify-manager');
+ }
+
+ return e;
+};
+
+// Augment the raw outputs from a deployment with the descriptions from the blueprint
+var annotateOutputs = function (id, rawOutputs) {
+ return new Promise(function(resolve, reject) {
+
+ var outItems = Object.keys(rawOutputs);
+
+ if (outItems.length < 1) {
+ // No output items, so obviously no descriptions, just return empty object
+ resolve({});
+ }
+ else {
+ // Call Cloudify to get the descriptions
+ cfy.getOutputDescriptions(id)
+ .then(function(res) {
+ // Assemble an outputs object with values from raw output and descriptions just obtained
+ var p = parseContent(res.body);
+ if (p.json && p.content.outputs) {
+ var outs = {};
+ outItems.forEach(function(i) {
+ outs[i] = {value: rawOutputs[i]};
+ if (p.content.outputs[i] && p.content.outputs[i].description) {
+ outs[i].description = p.content.outputs[i].description;
+ }
+ });
+ resolve(outs);
+ }
+ else {
+ reject({code: "API_INVALID_RESPONSE", message: "Invalid response for output descriptions query"});
+ }
+ });
+ }
+
+ });
+};
+
+// Delay function--returns a promise that's resolved after 'dtime' milliseconds.`
+var delay = function(dtime) {
+ return new Promise(function(resolve, reject){
+ setTimeout(resolve, dtime);
+ });
+};
+
+// Go through the Cloudify API call sequence to upload blueprint, create deployment, and launch install workflow
+// (but don't wait for the workflow to finish)
+const launchBlueprint = function(id, blueprint, inputs) {
+ logger.debug(null, "deploymentId: " + id + " starting blueprint upload");
+ // Upload blueprint
+ return cfy.uploadBlueprint(id, blueprint)
+
+ // Create deployment
+ .then (function(result) {
+ logger.debug(null, "deploymentId: " + id + " blueprint uploaded");
+ // Create deployment
+ return cfy.createDeployment(id, id, inputs);
+ })
+
+ // Launch the workflow, but don't wait for it to complete
+ .then(function(result){
+ logger.debug(null, "deploymentId: " + id + " deployment created");
+ return delay(DELAY_INSTALL_WORKFLOW)
+ .then(function(){
+ return cfy.initiateWorkflowExecution(id, 'install');
+ });
+ })
+ .catch(function(error) {
+ logger.debug(null, "Error: " + error + " for launch blueprint for deploymentId " + id);
+ throw normalizeError(error);
+ })
+};
+exports.launchBlueprint = launchBlueprint;
+
+// Finish installation launched with launchBlueprint
+const finishInstallation = function(deploymentId, executionId) {
+ logger.debug(null, "finishInstallation: " + deploymentId + " -- executionId: " + executionId);
+ return cfy.getWorkflowResult(executionId)
+ .then (function(result){
+ logger.debug(null, "deploymentId: " + deploymentId + " install workflow successfully executed");
+ // Retrieve the outputs from the deployment, as specified in the blueprint
+ return delay(DELAY_RETRIEVE_OUTPUTS).then(function() { return cfy.getOutputs(deploymentId); });
+ })
+ .then(function(result) {
+ // We have the raw outputs from the deployment but not annotated with the descriptions
+ var rawOutputs = {};
+ if (result.body) {
+ var p = parseContent(result.body);
+ if (p.json) {
+ if (p.content.outputs) {
+ rawOutputs = p.content.outputs;
+ }
+ }
+ }
+ logger.debug(null, "output retrieval result for " + deploymentId + ": " + JSON.stringify(result));
+ return annotateOutputs(deploymentId, rawOutputs);
+ })
+ .catch(function(err) {
+ logger.debug(null, "Error finishing install workflow: " + err + " -- " + JSON.stringify(err));
+ throw normalizeError(err);
+ });
+}
+exports.finishInstallation = finishInstallation;
+
+// Initiate uninstall workflow against a deployment, but don't wait for workflow to finish
+const launchUninstall = function(deploymentId) {
+ logger.debug(null, "deploymentId: " + deploymentId + " starting uninstall workflow");
+ // Run uninstall workflow
+ return cfy.initiateWorkflowExecution(deploymentId, 'uninstall')
+ .then(function(result) {
+ return result;
+ })
+ .catch(function(err) {
+ logger.debug(null, "Error initiating uninstall workflow: " + err + " -- " + JSON.stringify(err));
+ throw normalizeError(err);
+ });
+};
+exports.launchUninstall = launchUninstall;
+
+const finishUninstall = function(deploymentId, executionId) {
+ logger.debug(null, "finishUninstall: " + deploymentId + " -- executionId: " + executionId);
+ return cfy.getWorkflowResult(executionId)
+ .then (function(result){
+ logger.debug(null, "deploymentId: " + deploymentId + " uninstall workflow successfully executed");
+ // Delete the deployment
+ return delay(DELAY_DELETE_DEPLOYMENT).then(function() {return cfy.deleteDeployment(deploymentId);});
+ })
+ .then (function(result){
+ logger.debug(null, "deploymentId: " + deploymentId + " deployment deleted");
+ // Delete the blueprint
+ return delay(DELAY_DELETE_BLUEPRINT).then(function() {return cfy.deleteBlueprint(deploymentId);});
+ })
+ .then (function(result){
+ return result;
+ })
+ .catch (function(err){
+ throw normalizeError(err);
+ });
+
+};
+exports.finishUninstall = finishUninstall;
+
+// Get the status of a workflow execution
+exports.getExecutionStatus = function (exid) {
+ return cfy.getWorkflowExecutionStatus(exid)
+ .then(function(res){
+
+ var result = {
+ operationType: res.json.workflow_id
+ }
+
+ // Map execution status
+ if (res.json.status === "terminated") {
+ result.status = "succeeded";
+ }
+ else if (res.json.status === "failed") {
+ result.status = "failed";
+ }
+ else if (res.json.status === "cancelled" || res.stats === "canceled") {
+ result.status = "canceled";
+ }
+ else {
+ result.status = "processing";
+ }
+
+ if (res.json.error) {
+ result.error = res.json.error;
+ }
+ logger.debug(null, "getExecutionStatus result: " + JSON.stringify(result));
+ return result;
+ })
+ .catch(function(error) {
+ throw normalizeError(error);
+ });
+};
+
+// Go through the Cloudify API call sequence to do a deployment
+exports.deployBlueprint = function(id, blueprint, inputs) {
+
+ // Upload blueprint, create deployment, and initiate install workflow
+ return launchBlueprint(id, blueprint, inputs)
+
+ // Wait for the workflow to complete
+ .then(
+
+ // launchBlueprint promise fulfilled -- finish installation
+ function(result){
+ return finishInstallation(result.deploymentId, result.executionId); // Will throw normalized error if it fails
+ },
+
+ // launchBlueprint promise rejected -- report error
+ function(err) {
+ throw normalizeError(err);
+ });
+};
+
+// Go through the Cloudify API call sequence to do an undeployment of a previously deployed blueprint
+exports.undeployDeployment = function(id) {
+ logger.debug(null, "deploymentId: " + id + " starting uninstall workflow");
+
+ // Run launch uninstall workflow
+ return launchUninstall(id)
+
+ // launchUninstall promise fulfilled -- finish uninstall
+ .then (function(result){
+ return finishUninstall(result.deploymentId, result.executionId); // Will throw normalized error if it fails
+ },
+
+ // launchUninstall promise rejected -- report error
+ function(err){
+ throw normalizeError(err);
+ });
+};
+
diff --git a/lib/dispatcher-error.js b/lib/dispatcher-error.js
new file mode 100644
index 0000000..ae51fcc
--- /dev/null
+++ b/lib/dispatcher-error.js
@@ -0,0 +1,53 @@
+/*
+Copyright(c) 2017 AT&T Intellectual Property. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing,
+software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and limitations under the License.
+*/
+
+"use strict";
+
+/*
+ * Extend the standard Error type by appending fields to capture more information at the
+ * point of detection. The error information guides dispatcher's response to the incoming HTTP request
+ * that triggered the error and helps make the error log more specific and meaningful.
+ * This type of Error typically reports on problems encountered when attempting to use a downstream API.
+ *
+ * The standard Error has two fields:
+ * - name: the name of the Error, which is 'Error'
+ * - message: a text description of the error
+ *
+ * For dispatcher purposes, we add:
+ * - status: the HTTP status code that dispatcher should use in its response
+ * - type: "system" or "api" depending on whether the error was the result of a failed system call or
+ * an error reported by the downstream API.
+ * - logCode: the error code to use in the log entry.
+ * - target: the downstream system dispatcher was attempting to interact with
+ *
+ * Note that we're not defining a new class, just adding fields to the existing Error type. This pattern is
+ * used in Node for system errors.
+ */
+
+/* Create an error given the parameters */
+exports.createDispatcherError = function(message, status, type, logCode, target) {
+ var e = new Error();
+
+ e.message = message || 'no error information';
+ e.status = status || 500;
+ e.type = type;
+ e.logCode = logCode || 900;
+ e.target = target || '';
+
+ return e;
+};
+
+
diff --git a/lib/info.js b/lib/info.js
new file mode 100644
index 0000000..94d6455
--- /dev/null
+++ b/lib/info.js
@@ -0,0 +1,38 @@
+/*
+Copyright(c) 2017 AT&T Intellectual Property. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing,
+software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and limitations under the License.
+*/
+
+/* Handle the / API that provides API information */
+
+"use strict";
+
+const router = require('express').Router();
+
+/* Pick up config exported by main */
+const config = process.mainModule.exports.config;
+
+/* Accept an incoming event */
+router.get('/', function(req, res) {
+ res.json(
+ {
+ apiVersion: config.apiVersion,
+ serverVersion: config.version,
+ links: config.apiLinks
+ }
+ );
+ require('./logging').getLogger().audit(req, 200);
+});
+
+module.exports = router; \ No newline at end of file
diff --git a/lib/inventory.js b/lib/inventory.js
new file mode 100644
index 0000000..75a0e47
--- /dev/null
+++ b/lib/inventory.js
@@ -0,0 +1,169 @@
+/*
+Copyright(c) 2017 AT&T Intellectual Property. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing,
+software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and limitations under the License.
+*/
+
+ /* Routines related to accessing DCAE inventory */
+
+ "use strict";
+
+ const req = require('./promise_request');
+ const createError = require('./dispatcher-error').createDispatcherError;
+
+ const INV_SERV_TYPES = '/dcae-service-types';
+ const INV_SERVICES = '/dcae-services';
+
+ /*
+ * Common error handling for inventory API calls
+ */
+ const invError = function(err) {
+ if (err.status && err.status === 404) {
+ /* Map 404 to an empty list */
+ return [];
+ }
+ else {
+ var newErr;
+ var message;
+ if (err.status) {
+ /* Got a response from inventory indicating an error */
+ message = "Error response " + err.status + " from DCAE inventory: " + err.body;
+ newErr = createError(message, 502, "api", 501, "dcae-inventory");
+ }
+ else {
+ /* Problem connecting to inventory */
+ message = "Error communicating with inventory: " + err.message;
+ newErr = createError(message, 504, "system", 201, "dcae-inventory");
+ }
+ throw newErr;
+ }
+ };
+
+ module.exports = function(options) {
+
+ const url = options.url;
+
+ return {
+
+ /* Add a DCAE service to the inventory. Done after a deployment.*/
+ addService: function(deploymentId, serviceType, vnfId, vnfType, vnfLocation, outputs) {
+
+ /* Create the service description */
+ var serviceDescription =
+ {
+ "vnfId" : vnfId,
+ "vnfType" : vnfType,
+ "vnfLocation" : vnfLocation,
+ "typeId" : serviceType,
+ "deploymentRef" : deploymentId
+ };
+
+ // TODO create 'components' array using 'outputs'--for now, a dummy
+ serviceDescription.components = [
+ {
+ componentType: "dummy_component",
+ componentId: "/components/dummy",
+ componentSource: "DCAEController",
+ shareable: 0
+ }
+ ];
+
+ const reqOptions = {
+ method : "PUT",
+ uri : url + INV_SERVICES + "/" + deploymentId,
+ json: serviceDescription
+ };
+
+ return req.doRequest(reqOptions);
+ },
+
+ /* Remove a DCAE service from the inventory. Done after an undeployment. */
+ deleteService: function(serviceId) {
+ return req.doRequest({method: "DELETE", uri: url + INV_SERVICES + "/" + serviceId});
+ },
+
+ /* Find running/deploying instances of services (with a given type name, if specified) */
+ getServicesByType: function(query) {
+ var options = {
+ method: 'GET',
+ uri: url + INV_SERVICES,
+ qs: query || {}
+ };
+
+ return req.doRequest(options)
+ .then (function (result) {
+ var services = [];
+ var content = JSON.parse(result.body);
+ if(content.items) {
+ /* Pick out the fields we want */
+ services = content.items.map(function(i) { return { deploymentId: i.deploymentRef, serviceTypeId: i.typeId};});
+ }
+ return services;
+ })
+ .catch(invError);
+ },
+
+ /* Find a blueprint given the service type ID -- return blueprint and type ID */
+ getBlueprintByType: function(serviceTypeId) {
+ return req.doRequest({
+ method: "GET",
+ uri: url + INV_SERV_TYPES + '/' + serviceTypeId
+ })
+ .then (function(result) {
+ var blueprintInfo = {};
+ var content = JSON.parse(result.body);
+ blueprintInfo.blueprint = content.blueprintTemplate;
+ blueprintInfo.typeId = content.typeId;
+
+ return blueprintInfo;
+ })
+ .catch(invError);
+ },
+
+ /*
+ * Verify that the specified deployment ID does not already have
+ * an entry in inventory. This is needed to enforce the rule that
+ * creating a second instance of a deployment under the
+ * same ID as an existing deployment is not permitted.
+ * The function checks for a service in inventory using the
+ * deployment ID as service name. If it doesn't exist, the function
+ * resolves its promise. If it *does* exist, then it throws an error.
+ */
+ verifyUniqueDeploymentId: function(deploymentId) {
+
+ return req.doRequest({
+ method: "GET",
+ uri: url + INV_SERVICES + "/" + deploymentId
+ })
+
+ /* Successful lookup -- the deployment exists, so throw an error */
+ .then(function(res) {
+ throw createError("Deployment " + deploymentId + " already exists", 409, "api", 501);
+ },
+
+ /* Error from the lookup -- either deployment ID doesn't exist or some other problem */
+ function (err) {
+
+ /* Inventory returns a 404 if it does not find the deployment ID */
+ if (err.status === 404) {
+ return true;
+ }
+
+ /* Some other error -- it really is an error and we can't continue */
+ else {
+ return invError(err);
+ }
+ });
+ }
+ };
+ };
diff --git a/lib/logging.js b/lib/logging.js
new file mode 100644
index 0000000..5bfd48a
--- /dev/null
+++ b/lib/logging.js
@@ -0,0 +1,266 @@
+/*
+Copyright(c) 2017 AT&T Intellectual Property. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing,
+software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and limitations under the License.
+*/
+
+"use strict";
+
+const os = require('os');
+
+const log4js = require('log4js');
+log4js.configure('etc/log4js.json');
+
+const auditLogger = log4js.getLogger('audit');
+const errorLogger = log4js.getLogger('error');
+const metricsLogger = log4js.getLogger('metrics');
+const debugLogger = log4js.getLogger('debug');
+
+/* Audit log fields */
+const AUDIT_BEGIN = 0;
+const AUDIT_END = 1;
+const AUDIT_REQID = 2;
+const AUDIT_SVCINST = 3;
+const AUDIT_THREAD = 4;
+const AUDIT_SRVNAME = 5;
+const AUDIT_SVCNAME = 6;
+const AUDIT_PARTNER = 7;
+const AUDIT_STATUSCODE = 8;
+const AUDIT_RESPCODE = 9;
+const AUDIT_RESPDESC = 10;
+const AUDIT_INSTUUID = 11;
+const AUDIT_CATLOGLEVEL = 12;
+const AUDIT_SEVERITY = 13;
+const AUDIT_SRVIP = 14;
+const AUDIT_ELAPSED = 15;
+const AUDIT_SERVER = 16;
+const AUDIT_CLIENTIP = 17;
+const AUDIT_CLASSNAME = 18;
+const AUDIT_UNUSED = 19;
+const AUDIT_PROCESSKEY = 20;
+const AUDIT_CUSTOM1 = 21;
+const AUDIT_CUSTOM2 = 22;
+const AUDIT_CUSTOM3 = 23;
+const AUDIT_CUSTOM4 = 24;
+const AUDIT_DETAILMSG = 25;
+const AUDIT_NFIELDS = 26;
+
+/* Error log fields */
+const ERROR_TIMESTAMP = 0;
+const ERROR_REQID = 1;
+const ERROR_THREAD = 2;
+const ERROR_SVCNAME = 3;
+const ERROR_PARTNER = 4;
+const ERROR_TGTENTITY = 5;
+const ERROR_TGTSVC = 6;
+const ERROR_CATEGORY = 7;
+const ERROR_CODE = 8;
+const ERROR_DESCRIPTION = 9;
+const ERROR_MESSAGE = 10;
+const ERROR_NFIELDS = 11;
+
+/* Error code -> description mapping */
+const descriptions = {
+
+ 201: 'Inventory communication error',
+ 202: 'Cloudify Manager communication error',
+
+ 501: 'Inventory API error',
+ 502: 'Cloudify Manager API error',
+
+ 551: 'HTTP(S) Server initialization error',
+ 552: 'Dispatcher start-up error',
+
+ 999: 'Unknown error'
+};
+
+/* Metrics log fields */
+const METRICS_BEGIN = 0;
+const METRICS_END = 1;
+const METRICS_REQID = 2;
+const METRICS_SVCINST= 3;
+const METRICS_THREAD = 4;
+const METRICS_SRVNAME = 5;
+const METRICS_SVCNAME = 6;
+const METRICS_PARTNER = 7;
+const METRICS_TGTENTITY = 8;
+const METRICS_TGTSVC = 9;
+const METRICS_STATUSCODE = 10;
+const METRICS_RESPCODE = 11;
+const METRICS_RESPDESC = 12;
+const METRICS_INSTUUID = 13;
+const METRICS_CATLOGLEVEL = 14;
+const METRICS_SEVERITY = 15;
+const METRICS_SRVIP = 16;
+const METRICS_ELAPSED = 17;
+const METRICS_SERVER = 18;
+const METRICS_CLIENTIP = 19;
+const METRICS_CLASSNAME = 20;
+const METRICS_UNUSED = 21;
+const METRICS_PROCESSKEY = 22;
+const METRICS_TGTVIRTENTITY = 23;
+const METRICS_CUSTOM1 = 24;
+const METRICS_CUSTOM2 = 25;
+const METRICS_CUSTOM3 = 26;
+const METRICS_CUSTOM4 = 27;
+const METRICS_DETAILMSG = 28;
+const METRICS_NFIELDS = 29;
+
+/* Debug log fields */
+const DEBUG_TIMESTAMP = 0;
+const DEBUG_REQID = 1;
+const DEBUG_INFO = 2;
+const DEBUG_EOR = 3;
+const DEBUG_NFIELDS = 4;
+const DEBUG_MARKER = '^\n';
+
+
+/* Format audit record for an incoming API request */
+const formatAuditRecord = function(req, status, extra) {
+ var rec = new Array(AUDIT_NFIELDS);
+ const end = new Date();
+ rec[AUDIT_END] = end.toISOString();
+ rec[AUDIT_BEGIN] = req.startTime.toISOString();
+ rec[AUDIT_REQID] = req.dcaeReqId;
+ rec[AUDIT_SRVNAME] = req.hostname; // Use the value from the Host header
+ rec[AUDIT_SVCNAME] = req.method + ' ' + req.originalUrl; // Method and URL identify the operation being performed
+ rec[AUDIT_STATUSCODE] = (status < 300 ) ? "COMPLETE" : "ERROR";
+ rec[AUDIT_RESPCODE] = status; // Use the HTTP status code--does not match the table in the logging spec, but makes more sense
+ rec[AUDIT_CATLOGLEVEL] = "INFO"; // The audit records are informational, regardless of the outcome of the operation
+ rec[AUDIT_SRVIP] = req.socket.address().address;
+ rec[AUDIT_ELAPSED] = end - req.startTime;
+ rec[AUDIT_SERVER] = req.hostname // From the Host header, again
+ rec[AUDIT_CLIENTIP] = req.connection.remoteAddress;
+
+ if (extra) {
+ rec[AUDIT_DETAILMSG]= extra.replace(/\n/g, " "); /* Collapse multi-line extra data to a single line */
+ }
+ return rec.join('|');
+};
+
+/* Format metrics record for internal processing */
+/* opInfo has:
+ * startTime -- operation start time in millis
+ * complete -- true if operation completed successfully, false if failed
+ * respCode -- response code received from downstream system, if any
+ * respDesc -- response description received from downstream system, if any
+ * targetEntity -- name or identifier of downstream system used for subrequest, if any
+ * targetRequest -- request made to downstream system, if any
+ */
+const formatMetricsRecord = function(req, opInfo, extra) {
+ var rec = new Array(METRICS_NFIELDS);
+ const end = new Date();
+ rec[METRICS_END] = end.toISOString();
+ rec[METRICS_BEGIN] = opInfo.startTime.toISOString();
+
+ /* If reporting on a suboperation invoked as a result of an incoming request, capture info about that request */
+ if (req) {
+ rec[METRICS_REQID] = req.dcaeReqId;
+ rec[METRICS_SRVNAME] = req.hostname; // Use name from the host header
+ rec[METRICS_SVCNAME] = req.method + ' ' + req.originalUrl; // Method and URL identify the operation being performed
+ rec[METRICS_CLIENTIP] = req.connection.remoteAddress;
+ rec[METRICS_SRVIP] = req.socket.address().address;
+ }
+ else {
+ /* No incoming request */
+ rec[METRICS_REQID] = 'no incoming request';
+ rec[METRICS_SRVNAME] = os.hostname();
+ rec[METRICS_SVCNAME] = 'no incoming request';
+ }
+
+ rec[METRICS_TGTENTITY] = opInfo.targetEntity;
+ rec[METRICS_TGTSVC] = opInfo.targetService;
+ rec[METRICS_STATUSCODE] = opInfo.complete ? "COMPLETE" : "ERROR";
+ rec[METRICS_RESPCODE] = opInfo.respCode;
+ rec[METRICS_CATLOGLEVEL] = "INFO"; // The audit records are informational, regardless of the outcome of the operation
+
+ rec[METRICS_ELAPSED] = end - opInfo.startTime;
+ rec[METRICS_SERVER] = rec[METRICS_SRVNAME];
+
+ if (extra) {
+ rec[METRICS_DETAILMSG]= extra.replace(/\n/g, " "); /* Collapse multi-line extra data to a single line */
+ }
+ return rec.join('|');
+};
+
+/* Format error log record */
+const formatErrorRecord = function(category, code, detail, req, target) {
+ var rec = new Array(ERROR_NFIELDS);
+
+ /* Common fields */
+ rec[ERROR_TIMESTAMP] = (new Date()).toISOString();
+ rec[ERROR_CATEGORY] = category;
+ rec[ERROR_CODE] = code;
+ rec[ERROR_DESCRIPTION] = descriptions[code] || 'no description available';
+
+ /* Log error detail in a single line if provided */
+ if (detail) {
+ rec[ERROR_MESSAGE] = detail.replace(/\n/g, " ");
+ }
+
+ /* Fields available if the error happened during processing of an incoming API request */
+ if (req) {
+ rec[ERROR_REQID] = req.dcaeReqId;
+ rec[ERROR_SVCNAME] = req.method + ' ' + req.originalUrl; // Method and URL identify the operation being performed
+ rec[ERROR_PARTNER] = req.connection.remoteAddress; // We don't have the partner's name, but we know the remote IP address
+ }
+
+ /* Include information about the target entity/service if available */
+ if (target) {
+ rec[ERROR_TGTENTITY] = target.entity || '';
+ rec[ERROR_TGTSVC] = target.service || '';
+ }
+ return rec.join('|');
+};
+
+/* Format debug log record */
+const formatDebugRecord = function(reqId, msg) {
+ var rec = new Array(DEBUG_NFIELDS);
+
+ rec[DEBUG_TIMESTAMP] = new Date().toISOString();
+ rec[DEBUG_REQID] = reqId || '';
+ rec[DEBUG_INFO] = msg;
+ rec[DEBUG_EOR] = DEBUG_MARKER;
+
+ return rec.join('|');
+
+};
+
+exports.getLogger = function() {
+ return {
+
+ audit: function(req, status, extra) {
+ auditLogger.info(formatAuditRecord(req, status, extra));
+ },
+
+ error: function(error, req) {
+ errorLogger.error(formatErrorRecord("ERROR", error.logCode, error.message, req, {entity: error.target}));
+ },
+
+ warn: function(error, req) {
+ errorLogger.error(formatErrorRecord("WARN", error.logCode, error.message, req, {entity: error.target}));
+ },
+
+ metrics: function(req, opInfo, extra) {
+ metricsLogger.info(formatMetricsRecord(req, opInfo, extra));
+ },
+
+ debug: function(reqId, msg) {
+ debugLogger.debug(formatDebugRecord(reqId, msg));
+ }
+ }
+};
+
+exports.setLevel = function(level) {
+ log4js.setGlobalLogLevel(level);
+};
diff --git a/lib/middleware.js b/lib/middleware.js
new file mode 100644
index 0000000..183cf77
--- /dev/null
+++ b/lib/middleware.js
@@ -0,0 +1,77 @@
+/*
+Copyright(c) 2017 AT&T Intellectual Property. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing,
+software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and limitations under the License.
+*/
+
+/* Middleware modules */
+
+"use strict";
+
+const utils = require('./utils');
+const log = require('./logging').getLogger();
+
+/* Assign a request ID and start time to each incoming request */
+exports.assignId = function(req, res, next) {
+ /* Use request ID from header if available, otherwise generate one */
+ req.startTime = new Date();
+ req.dcaeReqId = req.get('X-ECOMP-RequestID') || utils.generateId();
+ next();
+};
+
+
+/* Error handler -- send error with JSON body */
+exports.handleErrors = function(err, req, res, next) {
+ var status = err.status || 500;
+ var msg = err.message || err.body || 'unknown error'
+ res.status(status).type('application/json').send({status: status, message: msg });
+ log.audit(req, status, msg);
+
+ if (status >= 500) {
+ log.error(err, req);
+ }
+};
+
+/* Make sure Content-Type is correct for POST and PUT */
+exports.checkType = function(type){
+ return function(req, res, next) {
+ const ctype = req.header('content-type');
+ const method = req.method.toLowerCase();
+ /* Content-Type matters only for POST and PUT */
+ if (ctype === type || ['post','put'].indexOf(method) < 0) {
+ next();
+ }
+ else {
+ var err = new Error ('Content-Type must be \'' + type +'\'');
+ err.status = 415;
+ next (err);
+ }
+ };
+};
+
+/* Check that a JSON body has a set of properties */
+exports.checkProps = function(props) {
+ return function (req, res, next) {
+ const missing = props.filter(function(p){return !utils.hasProperty(req.body,p);});
+ if (missing.length > 0) {
+ var err = new Error ('Request missing required properties: ' + missing.join(','));
+ err.status = 400;
+ next(err);
+ }
+ else {
+ next();
+ }
+ };
+};
+
+
diff --git a/lib/promise_request.js b/lib/promise_request.js
new file mode 100644
index 0000000..ca09eee
--- /dev/null
+++ b/lib/promise_request.js
@@ -0,0 +1,114 @@
+/*
+Copyright(c) 2017 AT&T Intellectual Property. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing,
+software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and limitations under the License.
+*/
+
+ /* Promise-based HTTP request client */
+
+ "use strict";
+
+/*
+ * Make an HTTP request using a string for the body
+ * of the request.
+ * Return a promise for result in the form
+ * {status: <http status code>, body: <response body>}
+ */
+
+const http = require('http');
+const https = require('https');
+const url = require('url');
+const querystring = require('querystring');
+
+exports.doRequest = function(options, body) {
+
+ return new Promise(function(resolve, reject) {
+
+ var reqBody = null;
+ if (options.json) {
+ reqBody = JSON.stringify(options.json);
+ options.headers = options.headers || {};
+ options.headers['Content-Type'] = 'application/json';
+ }
+ else if (body) {
+ reqBody = body;
+ }
+
+ if (options.uri) {
+ var parsed = url.parse(options.uri);
+ options.protocol = parsed.protocol;
+ options.hostname = parsed.hostname;
+ options.port = parsed.port;
+ options.path = parsed.path;
+ if (options.qs) {
+ options.path += ('?' + querystring.stringify(options.qs));
+ }
+ }
+
+ try {
+ var req = (options.protocol === 'https:' ? https.request(options) : http.request(options));
+ }
+ catch (e) {
+ reject(e);
+ }
+
+ // Reject promise if there's an error
+ req.on('error', function(error) {
+ reject(error);
+ });
+
+ // Capture the response
+ req.on('response', function(resp) {
+
+ // Collect the body of the response
+ var rbody = '';
+ resp.on('data', function(d) {
+ rbody += d;
+ });
+
+ // resolve or reject when finished
+ resp.on('end', function() {
+
+ var result = {
+ status : resp.statusCode,
+ body : rbody
+ };
+
+ // Add a JSON version of the body if appropriate
+ if (rbody.length > 0) {
+ try {
+ var jbody = JSON.parse(rbody);
+ result.json = jbody;
+ }
+ catch (pe) {
+ // Do nothing, no json property added to the result object
+ }
+ }
+
+ if (resp.statusCode > 199 && resp.statusCode < 300) {
+ // HTTP status code indicates success - resolve the promise
+ resolve(result);
+ }
+ else {
+ // Reject the promise
+ reject(result);
+ }
+ });
+ });
+
+ if (reqBody) {
+ req.write(reqBody, 'utf8');
+ }
+ req.end();
+ });
+};
diff --git a/lib/repeat.js b/lib/repeat.js
new file mode 100644
index 0000000..2ea0e14
--- /dev/null
+++ b/lib/repeat.js
@@ -0,0 +1,54 @@
+/*
+Copyright(c) 2017 AT&T Intellectual Property. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing,
+software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and limitations under the License.
+*/
+
+"use strict";
+
+/**
+ * Returns a promise for running and re-running the specified action until the result meets a specific condition
+ * - action is a function that returns a promise
+ * - predicate is a function that takes a success result from action and returns true if the action should be rerun
+ * - maxTries is the total number of times to try the action
+ * - interval is the interval, in milliseconds, between tries, as approximated by setTimeout()
+ */
+
+exports.repeatWhile = function(action, predicate, maxTries, interval) {
+ return new Promise(function(resolve, reject) {
+
+ var count = 0;
+
+ function makeAttempt() {
+ action()
+ .then (function(res) {
+ if (!predicate(res)) {
+ // We're done
+ resolve(res);
+ }
+ else {
+ if (++count < maxTries) {
+ // set up next attempt
+ setTimeout(makeAttempt, interval);
+ }
+ else {
+ // we've run out of retries or it's not retryable, so reject the promise
+ reject({message: "maximum repetions reached: " + count });
+ }
+ }
+ });
+ }
+
+ makeAttempt();
+ });
+};
diff --git a/lib/utils.js b/lib/utils.js
new file mode 100644
index 0000000..70146e3
--- /dev/null
+++ b/lib/utils.js
@@ -0,0 +1,39 @@
+/*
+Copyright(c) 2017 AT&T Intellectual Property. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing,
+software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and limitations under the License.
+*/
+
+"use strict";
+
+const uuid = require('uuid/v4');
+
+// Utility functions
+
+/* Does object 'o' have property 'key' */
+exports.hasProperty = function(o, key) {
+ return key.split('.').every(function(e){
+ if (typeof(o) === 'object' && o !== null && (e in o) && (typeof o[e] !== 'undefined')) {
+ o = o[e];
+ return true;
+ }
+ else {
+ return false;
+ }
+ });
+};
+
+/* Generate a random ID string */
+exports.generateId = function() {
+ return uuid();
+};
diff --git a/mvn-phase-script.sh b/mvn-phase-script.sh
new file mode 100755
index 0000000..d260909
--- /dev/null
+++ b/mvn-phase-script.sh
@@ -0,0 +1,155 @@
+#!/bin/bash
+
+echo "running script: [$0] for module [$1] at stage [$2]"
+
+echo "=> Prepare environment "
+#env
+if [ -z "$MVN_DOCKERREG_URL" ]; then
+ MVN_DOCKERREG_URL='nexus3.onap.org:10001'
+fi
+if [ -z "$SETTINGS_FILE" ]; then
+ SETTINGS_FILE='settings.xml'
+fi
+
+
+TIMESTAMP=$(date +%C%y%m%dT%H%M%S)
+export BUILD_NUMBER="${TIMESTAMP}"
+
+# expected environment variables
+if [ -z "${MVN_NEXUSPROXY}" ]; then
+ echo "MVN_NEXUSPROXY environment variable not set. Cannot proceed"
+ exit
+fi
+MVN_NEXUSPROXY_HOST=$(echo $MVN_NEXUSPROXY |cut -f3 -d'/' | cut -f1 -d':')
+
+
+if [ -z "${SETTINGS_FILE}" ]; then
+ echo "SETTINGS_FILE environment variable not set. Cannot proceed"
+ exit
+fi
+
+
+if [ ]; then
+
+# login to all docker registries
+DOCKER_REPOSITORIES="nexus3.onap.org:10001 nexus3.onap.org:10002 nexus3.onap.org:10003 nexus3.onap.org:10004"
+for DOCKER_REPOSITORY in $DOCKER_REPOSITORIES;
+do
+ USER=$(xpath -e "//servers/server[id='$DOCKER_REPOSITORY']/username/text()" "$SETTINGS_FILE")
+ PASS=$(xpath -e "//servers/server[id='$DOCKER_REPOSITORY']/password/text()" "$SETTINGS_FILE")
+
+ if [ -z "$USER" ]; then
+ echo "Error: no user provided"
+ fi
+
+ if [ -z "$PASS" ]; then
+ echo "Error: no password provided"
+ fi
+
+ [ -z "$PASS" ] && PASS_PROVIDED="<empty>" || PASS_PROVIDED="<password>"
+ echo docker login $DOCKER_REPOSITORY -u "$USER" -p "$PASS_PROVIDED"
+ docker login $DOCKER_REPOSITORY -u "$USER" -p "$PASS"
+done
+fi
+
+# set up env variables, get ready for template resolution
+export ONAPTEMPLATE_RAWREPOURL_org_onap_dcae="${MVN_NEXUSPROXY}/content/sites/raw"
+export ONAPTEMPLATE_PYPIURL_org_onap_dcae="${MVN_NEXUSPROXY}/content/sites/pypi"
+export ONAPTEMPLATE_DOCKERREGURL_org_onap_dcae="${MVN_DOCKERREG_URL}"
+
+
+# use the version text detect which phase we are in in LF CICD process: verify, merge, or (daily) release
+LF_PHASE="verify"
+
+# mvn phase in life cycle
+MVN_PHASE="$2"
+
+case $MVN_PHASE in
+clean)
+ echo "==> clean phase script"
+ ;;
+generate-sources)
+ echo "==> generate-sources phase script"
+
+ TEMPLATES=$(env |grep ONAPTEMPLATE)
+ echo "====> Resolving the following template from environment variables "
+ echo "[$TEMPLATES]"
+ set -x #DEBUG
+ SELFFILE=$(echo $0 | rev | cut -f1 -d '/' | rev)
+ for TEMPLATE in $TEMPLATES; do
+ KEY=$(echo $TEMPLATE | cut -f1 -d'=')
+ VALUE=$(echo $TEMPLATE | cut -f2 -d'=')
+ VALUE2=$(echo $TEMPLATE | cut -f2 -d'=' |sed 's/\//\\\//g')
+ FILES=$(grep -rl "$KEY" .)
+
+ # assuming FILES is not longer than 2M bytes, the limit for variable value max size on this VM
+ for F in $FILES; do
+ if [[ $F == *"$SELFFILE" ]]; then
+ continue
+ fi
+ echo "======> Resolving template $KEY to value $VALUE for file $F"
+ sed -i "s/{{[[:space:]]*$KEY[[:space:]]*}}/$VALUE2/g" $F
+ done
+
+ #if [ ! -z "$FILES" ]; then
+ # echo "====> Resolving template $VALUE to value $VALUE"
+ # #CMD="grep -rl \"$VALUE\" | tr '\n' '\0' | xargs -0 sed -i \"s/{{[[:space:]]*$VALUE[[:space:]]*}}/$VALUE/g\""
+ # grep -rl "$KEY" | tr '\n' '\0' | xargs -0 sed -i 's/$KEY/$VALUE2/g'
+ # #echo $CMD
+ # #eval $CMD
+ #fi
+ done
+ echo "====> Done template resolving"
+ echo "====> Generate version.js file with: $(git describe --long --always)"
+ echo "exports.version=\"$(git describe --long --always)\";" > version.js
+ ;;
+compile)
+ echo "==> compile phase script"
+ ;;
+test)
+ echo "==> test phase script"
+ ;;
+package)
+ echo "==> package phase script"
+
+DOCKER_IMAGE=${MVN_DOCKERREG_URL}/dcaegen2_platform/${MVN_PROJECT_ARTIFACTID}:${MVN_PROJECT_VERSION}
+ echo "==> docker build: ${DOCKER_IMAGE}"
+ docker build -t ${DOCKER_IMAGE} .
+ ;;
+install)
+ echo "==> install phase script"
+ ;;
+deploy)
+ echo "==> deploy phase script"
+
+ # prepare credential for curl use (raw repo)
+ #PASS=$(xpath -q -e "//servers/server[id='ecomp-raw']/password/text()" "$SETTINGS_FILE")
+ #export NETRC=$(mktemp)
+ #echo "machine $MVN_NEXUSPROXY_HOST login ${USER} password ${PASS}" >> "${NETRC}"
+ #set -x; curl -k --netrc-file '${NETRC}' --upload-file '{0}' '${REPO}/{2}-{1}'
+
+
+
+ # login to all docker registries
+ USER=$(xpath -e "//servers/server[id='$MVN_DOCKERREG_URL']/username/text()" "$SETTINGS_FILE")
+ PASS=$(xpath -e "//servers/server[id='$MVN_DOCKERREG_URL']/password/text()" "$SETTINGS_FILE")
+ if [ -z "$USER" ]; then
+ echo "Error: no user provided"
+ fi
+ if [ -z "$PASS" ]; then
+ echo "Error: no password provided"
+ fi
+ [ -z "$PASS" ] && PASS_PROVIDED="<empty>" || PASS_PROVIDED="<password>"
+ echo docker login $MVN_DOCKERREG_URL -u "$USER" -p "$PASS_PROVIDED"
+ docker login $MVN_DOCKERREG_URL -u "$USER" -p "$PASS"
+
+ #docker push
+ DOCKER_IMAGE=${MVN_DOCKERREG_URL}/dcaegen2/deployment-handler:$(git describe --always)
+ echo "==> docker push: ${DOCKER_IMAGE}"
+ docker push ${DOCKER_IMAGE}
+ ;;
+*)
+ echo "==> unprocessed phase"
+ ;;
+esac
+
diff --git a/package.json b/package.json
new file mode 100644
index 0000000..40cf785
--- /dev/null
+++ b/package.json
@@ -0,0 +1,19 @@
+{
+ "name": "DCAE-Orch-Deployment-Handler",
+ "version": "4.1.4",
+ "description": "DCAE Orchestrator Deployment Handler",
+ "main": "deployment-handler.js",
+ "dependencies": {
+ "adm-zip": "^0.4.7",
+ "body-parser": "^1.15.0",
+ "express": "^4.13.4",
+ "log4js": "^0.6.33",
+ "uuid": "^3.0.1"
+ },
+ "devDependencies": {},
+ "scripts": {
+ "test": "echo \"Error: no test specified\" && exit 1"
+ },
+ "author": "author",
+ "license": "(Apache-2.0)"
+}
diff --git a/pom.xml b/pom.xml
new file mode 100644
index 0000000..3d38c1d
--- /dev/null
+++ b/pom.xml
@@ -0,0 +1,279 @@
+<?xml version="1.0"?>
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+ <modelVersion>4.0.0</modelVersion>
+ <parent>
+ <groupId>org.onap.oparent</groupId>
+ <artifactId>oparent</artifactId>
+ <version>1.0.0-SNAPSHOT</version>
+ </parent>
+ <!-- parent>
+ <groupId>org.onap.dcae.platform</groupId>
+ <artifactId>plugins</artifactId>
+ <version>1.0.0</version>
+ </parent -->
+
+ <!--- CHANGE THE FOLLOWING 3 OBJECTS for your own repo -->
+ <groupId>org.onap.dcaegen2.platform</groupId>
+ <artifactId>deployment-handler</artifactId>
+ <name>deployment-handler</name>
+
+ <version>1.0.0-SNAPSHOT</version>
+ <url>http://maven.apache.org</url>
+ <properties>
+ <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
+ <sonar.sources>.</sonar.sources>
+ <!-- customize the SONARQUBE URL -->
+ <sonar.host.url>http://localhost:9000</sonar.host.url>
+ <!-- below are language dependent -->
+ <sonar.language>js</sonar.language>
+ <sonar.pluginName>JS</sonar.pluginName>
+ <sonar.inclusions>**/*.js</sonar.inclusions>
+ </properties>
+
+ <build>
+ <finalName>${project.artifactId}-${project.version}</finalName>
+ <pluginManagement>
+ <plugins>
+ <plugin>
+ <groupId>org.codehaus.mojo</groupId>
+ <artifactId>sonar-maven-plugin</artifactId>
+ <version>2.7.1</version>
+ </plugin>
+ </plugins>
+ </pluginManagement>
+
+ <plugins>
+ <!-- plugin>
+ <artifactId>maven-assembly-plugin</artifactId>
+ <version>2.4.1</version>
+ <configuration>
+ <descriptors>
+ <descriptor>assembly/dep.xml</descriptor>
+ </descriptors>
+ </configuration>
+ <executions>
+ <execution>
+ <id>make-assembly</id>
+ <phase>package</phase>
+ <goals>
+ <goal>single</goal>
+ </goals>
+ </execution>
+ </executions>
+ </plugin -->
+
+ <!-- first disable the default Java plugins at various stages -->
+ <!-- maven-resources-plugin is called during "*resource" phases by default behavior. it prepares the resources
+ dir. we do not need it -->
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-resources-plugin</artifactId>
+ <version>2.6</version>
+ <configuration>
+ <skip>true</skip>
+ </configuration>
+ </plugin>
+
+ <!-- maven-compiler-plugin is called during "compile" phases by default behavior. we do not need it -->
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-compiler-plugin</artifactId>
+ <version>3.1</version>
+ <configuration>
+ <skip>true</skip>
+ </configuration>
+ </plugin>
+
+ <!-- maven-jar-plugin is called during "compile" phase by default behavior. we do not need it -->
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-jar-plugin</artifactId>
+ <version>2.4</version>
+ <executions>
+ <execution>
+ <id>default-jar</id>
+ <phase/>
+ </execution>
+ </executions>
+ </plugin>
+
+ <!-- maven-install-plugin is called during "install" phase by default behavior. it tries to copy stuff under
+ target dir to ~/.m2. we do not need it -->
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-install-plugin</artifactId>
+ <version>2.4</version>
+ <configuration>
+ <skip>true</skip>
+ </configuration>
+ </plugin>
+
+ <!-- maven-surefire-plugin is called during "test" phase by default behavior. it triggers junit test.
+ we do not need it -->
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-surefire-plugin</artifactId>
+ <version>2.12.4</version>
+ <configuration>
+ <skipTests>true</skipTests>
+ </configuration>
+ </plugin>
+
+
+ <!-- now we configure custom action (calling a script) at various lifecycle phases -->
+ <plugin>
+ <groupId>org.codehaus.mojo</groupId>
+ <artifactId>exec-maven-plugin</artifactId>
+ <version>1.2.1</version>
+ <executions>
+ <execution>
+ <id>clean phase script</id>
+ <phase>clean</phase>
+ <goals><goal>exec</goal></goals>
+ <configuration>
+ <executable>${session.executionRootDirectory}/mvn-phase-script.sh</executable>
+ <arguments>
+ <argument>${project.artifactId}</argument>
+ <argument>clean</argument>
+ </arguments>
+ <environmentVariables>
+ <!-- make mvn properties as env for our script -->
+ <MVN_PROJECT_GROUPID>${project.groupId}</MVN_PROJECT_GROUPID>
+ <MVN_PROJECT_ARTIFACTID>${project.artifactId}</MVN_PROJECT_ARTIFACTID>
+ <MVN_PROJECT_VERSION>${project.version}</MVN_PROJECT_VERSION>
+ <MVN_NEXUSPROXY>${onap.nexus.url}</MVN_NEXUSPROXY>
+ <!--MVN_DOCKERREG_URL>${docker.push.registry}</MVN_DOCKERREG_URL -->
+ </environmentVariables>
+ </configuration>
+ </execution>
+
+ <execution>
+ <id>generate-sources script</id>
+ <phase>generate-sources</phase>
+ <goals><goal>exec</goal></goals>
+ <configuration>
+ <executable>mvn-phase-script.sh</executable>
+ <arguments>
+ <argument>${project.artifactId}</argument>
+ <argument>generate-sources</argument>
+ </arguments>
+ <environmentVariables>
+ <!-- make mvn properties as env for our script -->
+ <MVN_PROJECT_GROUPID>${project.groupId}</MVN_PROJECT_GROUPID>
+ <MVN_PROJECT_ARTIFACTID>${project.artifactId}</MVN_PROJECT_ARTIFACTID>
+ <MVN_PROJECT_VERSION>${project.version}</MVN_PROJECT_VERSION>
+ <MVN_NEXUSPROXY>${onap.nexus.url}</MVN_NEXUSPROXY>
+ <!-- MVN_DOCKERREG_URL>${docker.push.registry}</MVN_DOCKERREG_URL-->
+ </environmentVariables>
+ </configuration>
+ </execution>
+
+ <execution>
+ <id>compile script</id>
+ <phase>compile</phase>
+ <goals><goal>exec</goal></goals>
+ <configuration>
+ <executable>mvn-phase-script.sh</executable>
+ <arguments>
+ <argument>${project.artifactId}</argument>
+ <argument>compile</argument>
+ </arguments>
+ <environmentVariables>
+ <!-- make mvn properties as env for our script -->
+ <MVN_PROJECT_GROUPID>${project.groupId}</MVN_PROJECT_GROUPID>
+ <MVN_PROJECT_ARTIFACTID>${project.artifactId}</MVN_PROJECT_ARTIFACTID>
+ <MVN_PROJECT_VERSION>${project.version}</MVN_PROJECT_VERSION>
+ <MVN_NEXUSPROXY>${onap.nexus.url}</MVN_NEXUSPROXY>
+ <!--MVN_DOCKERREG_URL>${docker.push.registry}</MVN_DOCKERREG_URL -->
+ </environmentVariables>
+ </configuration>
+ </execution>
+
+ <execution>
+ <id>package script</id>
+ <phase>package</phase>
+ <goals><goal>exec</goal></goals>
+ <configuration>
+ <executable>mvn-phase-script.sh</executable>
+ <arguments>
+ <argument>${project.artifactId}</argument>
+ <argument>package</argument>
+ </arguments>
+ <environmentVariables>
+ <!-- make mvn properties as env for our script -->
+ <MVN_PROJECT_GROUPID>${project.groupId}</MVN_PROJECT_GROUPID>
+ <MVN_PROJECT_ARTIFACTID>${project.artifactId}</MVN_PROJECT_ARTIFACTID>
+ <MVN_PROJECT_VERSION>${project.version}</MVN_PROJECT_VERSION>
+ <MVN_NEXUSPROXY>${onap.nexus.url}</MVN_NEXUSPROXY>
+ <!--MVN_DOCKERREG_URL>${docker.push.registry}</MVN_DOCKERREG_URL -->
+ </environmentVariables>
+ </configuration>
+ </execution>
+
+ <execution>
+ <id>test script</id>
+ <phase>test</phase>
+ <goals><goal>exec</goal></goals>
+ <configuration>
+ <executable>mvn-phase-script.sh</executable>
+ <arguments>
+ <argument>${project.artifactId}</argument>
+ <argument>test</argument>
+ </arguments>
+ <environmentVariables>
+ <!-- make mvn properties as env for our script -->
+ <MVN_PROJECT_GROUPID>${project.groupId}</MVN_PROJECT_GROUPID>
+ <MVN_PROJECT_ARTIFACTID>${project.artifactId}</MVN_PROJECT_ARTIFACTID>
+ <MVN_PROJECT_VERSION>${project.version}</MVN_PROJECT_VERSION>
+ <MVN_NEXUSPROXY>${onap.nexus.url}</MVN_NEXUSPROXY>
+ <!--MVN_DOCKERREG_URL>${docker.push.registry}</MVN_DOCKERREG_URL-->
+ </environmentVariables>
+ </configuration>
+ </execution>
+
+ <execution>
+ <id>install script</id>
+ <phase>install</phase>
+ <goals><goal>exec</goal></goals>
+ <configuration>
+ <executable>mvn-phase-script.sh</executable>
+ <arguments>
+ <argument>${project.artifactId}</argument>
+ <argument>install</argument>
+ </arguments>
+ <environmentVariables>
+ <!-- make mvn properties as env for our script -->
+ <MVN_PROJECT_GROUPID>${project.groupId}</MVN_PROJECT_GROUPID>
+ <MVN_PROJECT_ARTIFACTID>${project.artifactId}</MVN_PROJECT_ARTIFACTID>
+ <MVN_PROJECT_VERSION>${project.version}</MVN_PROJECT_VERSION>
+ <MVN_NEXUSPROXY>${onap.nexus.url}</MVN_NEXUSPROXY>
+ <!--MVN_DOCKERREG_URL>${docker.push.registry}</MVN_DOCKERREG_URL-->
+ </environmentVariables>
+ </configuration>
+ </execution>
+
+ <execution>
+ <id>deploy script</id>
+ <phase>deploy</phase>
+ <goals><goal>exec</goal></goals>
+ <configuration>
+ <executable>mvn-phase-script.sh</executable>
+ <arguments>
+ <argument>${project.artifactId}</argument>
+ <argument>deploy</argument>
+ </arguments>
+ <environmentVariables>
+ <!-- make mvn properties as env for our script -->
+ <MVN_PROJECT_GROUPID>${project.groupId}</MVN_PROJECT_GROUPID>
+ <MVN_PROJECT_ARTIFACTID>${project.artifactId}</MVN_PROJECT_ARTIFACTID>
+ <MVN_PROJECT_VERSION>${project.version}</MVN_PROJECT_VERSION>
+ <MVN_NEXUSPROXY>${onap.nexus.url}</MVN_NEXUSPROXY>
+ <!--MVN_DOCKERREG_URL>${docker.push.registry}</MVN_DOCKERREG_URL-->
+ </environmentVariables>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+ </plugins>
+ </build>
+</project>
diff --git a/set_version.sh b/set_version.sh
new file mode 100755
index 0000000..dfb7e8e
--- /dev/null
+++ b/set_version.sh
@@ -0,0 +1,2 @@
+#!/bin/bash
+echo "exports.version=\"$(git describe --long --always)\";" > version.js
diff --git a/version.js b/version.js
new file mode 100644
index 0000000..1fa7c69
--- /dev/null
+++ b/version.js
@@ -0,0 +1 @@
+exports.version="unspecified";