diff options
148 files changed, 13437 insertions, 306 deletions
diff --git a/.gitreview b/.gitreview index 22476ab029..3a5f50e02f 100644 --- a/.gitreview +++ b/.gitreview @@ -1,4 +1,4 @@ [gerrit] host=gerrit.onap.org port=29418 -project=mso.git +project=so.git diff --git a/adapters/mso-adapter-utils/pom.xml b/adapters/mso-adapter-utils/pom.xml index f80bfeb2cb..62e7153473 100644 --- a/adapters/mso-adapter-utils/pom.xml +++ b/adapters/mso-adapter-utils/pom.xml @@ -2,11 +2,11 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"> <modelVersion>4.0.0</modelVersion> <parent> - <groupId>org.openecomp.mso</groupId> + <groupId>org.openecomp.so</groupId> <artifactId>adapters</artifactId> <version>1.1.0-SNAPSHOT</version> </parent> - <groupId>org.openecomp.mso.adapters</groupId> + <groupId>org.openecomp.so.adapters</groupId> <artifactId>mso-adapter-utils</artifactId> <name>mso-adapter-utils</name> <description>Common MSO utilities, including Openstack client wrappers.</description> @@ -44,32 +44,32 @@ <dependencies> <dependency> - <groupId>org.openecomp.mso.libs.openstack-java-sdk</groupId> + <groupId>org.openecomp.so.libs.openstack-java-sdk</groupId> <artifactId>keystone-client</artifactId> <version>${openstack.version}</version> </dependency> <dependency> - <groupId>org.openecomp.mso.libs.openstack-java-sdk</groupId> + <groupId>org.openecomp.so.libs.openstack-java-sdk</groupId> <artifactId>heat-client</artifactId> <version>${openstack.version}</version> </dependency> <dependency> - <groupId>org.openecomp.mso.libs.openstack-java-sdk</groupId> + <groupId>org.openecomp.so.libs.openstack-java-sdk</groupId> <artifactId>quantum-client</artifactId> <version>${openstack.version}</version> </dependency> <dependency> - <groupId>org.openecomp.mso.libs.openstack-java-sdk.client-connectors</groupId> + <groupId>org.openecomp.so.libs.openstack-java-sdk.client-connectors</groupId> <artifactId>http-connector</artifactId> <version>${openstack.version}</version> </dependency> <dependency> - <groupId>org.openecomp.mso</groupId> + <groupId>org.openecomp.so</groupId> <artifactId>common</artifactId> <version>${project.version}</version> </dependency> <dependency> - <groupId>org.openecomp.mso</groupId> + <groupId>org.openecomp.so</groupId> <artifactId>mso-catalog-db</artifactId> <version>${project.version}</version> </dependency> diff --git a/adapters/mso-adapters-rest-interface/pom.xml b/adapters/mso-adapters-rest-interface/pom.xml index 8619f4bfa6..b83d6f82f8 100644 --- a/adapters/mso-adapters-rest-interface/pom.xml +++ b/adapters/mso-adapters-rest-interface/pom.xml @@ -2,11 +2,11 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"> <modelVersion>4.0.0</modelVersion> <parent> - <groupId>org.openecomp.mso</groupId> + <groupId>org.openecomp.so</groupId> <artifactId>adapters</artifactId> <version>1.1.0-SNAPSHOT</version> </parent> - <groupId>org.openecomp.mso.adapters</groupId> + <groupId>org.openecomp.so.adapters</groupId> <artifactId>mso-adapters-rest-interface</artifactId> <packaging>jar</packaging> <name>mso-adapters-rest-interface</name> @@ -44,7 +44,7 @@ <version>3.0.19.Final</version> </dependency> <dependency> - <groupId>org.openecomp.mso.adapters</groupId> + <groupId>org.openecomp.so.adapters</groupId> <artifactId>mso-adapter-utils</artifactId> <version>${project.version}</version> </dependency> diff --git a/adapters/mso-catalog-db-adapter/pom.xml b/adapters/mso-catalog-db-adapter/pom.xml index e7c4b533c7..4459660240 100644 --- a/adapters/mso-catalog-db-adapter/pom.xml +++ b/adapters/mso-catalog-db-adapter/pom.xml @@ -2,12 +2,12 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"> <modelVersion>4.0.0</modelVersion> <parent> - <groupId>org.openecomp.mso</groupId> + <groupId>org.openecomp.so</groupId> <artifactId>adapters</artifactId> <version>1.1.0-SNAPSHOT</version> </parent> - <groupId>org.openecomp.mso.adapters</groupId> + <groupId>org.openecomp.so.adapters</groupId> <artifactId>mso-catalog-db-adapter</artifactId> <packaging>war</packaging> @@ -65,7 +65,7 @@ <dependencies> <dependency> - <groupId>org.openecomp.mso</groupId> + <groupId>org.openecomp.so</groupId> <artifactId>mso-catalog-db</artifactId> <version>${project.version}</version> </dependency> @@ -95,7 +95,7 @@ <version>3.0.16.Final</version> </dependency> <!--<dependency> - <groupId>org.openecomp.mso</groupId> + <groupId>org.openecomp.so</groupId> <artifactId>status-control</artifactId> <version>${project.version}</version> </dependency>--> diff --git a/adapters/mso-network-adapter-async-client/pom.xml b/adapters/mso-network-adapter-async-client/pom.xml index 7fad404f9b..6e0c9bf702 100644 --- a/adapters/mso-network-adapter-async-client/pom.xml +++ b/adapters/mso-network-adapter-async-client/pom.xml @@ -2,11 +2,11 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
- <groupId>org.openecomp.mso</groupId>
+ <groupId>org.openecomp.so</groupId>
<artifactId>adapters</artifactId>
<version>1.1.0-SNAPSHOT</version> </parent>
- <groupId>org.openecomp.mso.adapters</groupId>
+ <groupId>org.openecomp.so.adapters</groupId>
<artifactId>mso-network-adapter-async-client</artifactId>
<packaging>jar</packaging>
<name>mso-network-adapter-async-client</name>
diff --git a/adapters/mso-network-adapter/pom.xml b/adapters/mso-network-adapter/pom.xml index 207d58abdf..94e5759ee3 100644 --- a/adapters/mso-network-adapter/pom.xml +++ b/adapters/mso-network-adapter/pom.xml @@ -2,11 +2,11 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"> <modelVersion>4.0.0</modelVersion> <parent> - <groupId>org.openecomp.mso</groupId> + <groupId>org.openecomp.so</groupId> <artifactId>adapters</artifactId> <version>1.1.0-SNAPSHOT</version> </parent> - <groupId>org.openecomp.mso.adapters</groupId> + <groupId>org.openecomp.so.adapters</groupId> <artifactId>mso-network-adapter</artifactId> <packaging>war</packaging> <name>mso-network-adapter</name> @@ -53,7 +53,7 @@ <version>2.2.7</version> </dependency> <dependency> - <groupId>org.openecomp.mso.adapters</groupId> + <groupId>org.openecomp.so.adapters</groupId> <artifactId>mso-adapter-utils</artifactId> <version>${project.version}</version> </dependency> @@ -106,17 +106,17 @@ <scope>provided</scope> </dependency> <dependency> - <groupId>org.openecomp.mso.adapters</groupId> + <groupId>org.openecomp.so.adapters</groupId> <artifactId>mso-adapter-utils</artifactId> <version>${project.version}</version> </dependency> <dependency> - <groupId>org.openecomp.mso.adapters</groupId> + <groupId>org.openecomp.so.adapters</groupId> <artifactId>mso-network-adapter-async-client</artifactId> <version>${project.version}</version> </dependency> <dependency> - <groupId>org.openecomp.mso.adapters</groupId> + <groupId>org.openecomp.so.adapters</groupId> <artifactId>mso-adapters-rest-interface</artifactId> <version>${project.version}</version> </dependency> @@ -145,7 +145,7 @@ <scope>provided</scope> </dependency> <dependency> - <groupId>org.openecomp.mso</groupId> + <groupId>org.openecomp.so</groupId> <artifactId>status-control</artifactId> <version>${project.version}</version> </dependency> diff --git a/adapters/mso-requests-db-adapter/pom.xml b/adapters/mso-requests-db-adapter/pom.xml index c1f78488ad..c3ed987cab 100644 --- a/adapters/mso-requests-db-adapter/pom.xml +++ b/adapters/mso-requests-db-adapter/pom.xml @@ -2,12 +2,12 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"> <modelVersion>4.0.0</modelVersion> <parent> - <groupId>org.openecomp.mso</groupId> + <groupId>org.openecomp.so</groupId> <artifactId>adapters</artifactId> <version>1.1.0-SNAPSHOT</version> </parent> - <groupId>org.openecomp.mso.adapters</groupId> + <groupId>org.openecomp.so.adapters</groupId> <artifactId>mso-requests-db-adapter</artifactId> <packaging>war</packaging> @@ -15,7 +15,7 @@ <dependencies> <dependency> - <groupId>org.openecomp.mso</groupId> + <groupId>org.openecomp.so</groupId> <artifactId>mso-requests-db</artifactId> <version>${project.version}</version> </dependency> @@ -40,7 +40,7 @@ </exclusions> </dependency> <dependency> - <groupId>org.openecomp.mso</groupId> + <groupId>org.openecomp.so</groupId> <artifactId>status-control</artifactId> <version>${project.version}</version> </dependency> @@ -85,7 +85,7 @@ <version>2.2.7</version> </dependency> <dependency> - <groupId>org.openecomp.mso.adapters</groupId> + <groupId>org.openecomp.so.adapters</groupId> <artifactId>mso-adapter-utils</artifactId> <version>${project.version}</version> </dependency> diff --git a/adapters/mso-sdnc-adapter/pom.xml b/adapters/mso-sdnc-adapter/pom.xml index f439459955..7c1ba1a47f 100644 --- a/adapters/mso-sdnc-adapter/pom.xml +++ b/adapters/mso-sdnc-adapter/pom.xml @@ -2,11 +2,11 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"> <modelVersion>4.0.0</modelVersion> <parent> - <groupId>org.openecomp.mso</groupId> + <groupId>org.openecomp.so</groupId> <artifactId>adapters</artifactId> <version>1.1.0-SNAPSHOT</version> </parent> - <groupId>org.openecomp.mso.adapters</groupId> + <groupId>org.openecomp.so.adapters</groupId> <artifactId>mso-sdnc-adapter</artifactId> <packaging>war</packaging> <name>mso-sdnc-adapter</name> @@ -67,7 +67,7 @@ </build> <dependencies> <dependency> - <groupId>org.openecomp.mso.adapters</groupId> + <groupId>org.openecomp.so.adapters</groupId> <artifactId>mso-adapter-utils</artifactId> <version>${project.version}</version> @@ -91,12 +91,12 @@ <scope>provided</scope> </dependency> <dependency> - <groupId>org.openecomp.mso</groupId> + <groupId>org.openecomp.so</groupId> <artifactId>status-control</artifactId> <version>${project.version}</version> </dependency> <dependency> - <groupId>org.openecomp.mso.adapters</groupId> + <groupId>org.openecomp.so.adapters</groupId> <artifactId>mso-adapters-rest-interface</artifactId> <version>${project.version}</version> </dependency> diff --git a/adapters/mso-tenant-adapter/pom.xml b/adapters/mso-tenant-adapter/pom.xml index d57529cb43..3ceb853f27 100644 --- a/adapters/mso-tenant-adapter/pom.xml +++ b/adapters/mso-tenant-adapter/pom.xml @@ -2,11 +2,11 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"> <modelVersion>4.0.0</modelVersion> <parent> - <groupId>org.openecomp.mso</groupId> + <groupId>org.openecomp.so</groupId> <artifactId>adapters</artifactId> <version>1.1.0-SNAPSHOT</version> </parent> - <groupId>org.openecomp.mso.adapters</groupId> + <groupId>org.openecomp.so.adapters</groupId> <artifactId>mso-tenant-adapter</artifactId> <packaging>war</packaging> <name>mso-tenant-adapter</name> @@ -72,7 +72,7 @@ <version>2.2.7</version> </dependency> <dependency> - <groupId>org.openecomp.mso.adapters</groupId> + <groupId>org.openecomp.so.adapters</groupId> <artifactId>mso-adapter-utils</artifactId> <version>${project.version}</version> </dependency> @@ -119,12 +119,12 @@ </build> <dependencies> <dependency> - <groupId>org.openecomp.mso.adapters</groupId> + <groupId>org.openecomp.so.adapters</groupId> <artifactId>mso-adapter-utils</artifactId> <version>${project.version}</version> </dependency> <dependency> - <groupId>org.openecomp.mso.adapters</groupId> + <groupId>org.openecomp.so.adapters</groupId> <artifactId>mso-adapters-rest-interface</artifactId> <version>${project.version}</version> </dependency> @@ -147,7 +147,7 @@ <scope>provided</scope> </dependency> <dependency> - <groupId>org.openecomp.mso</groupId> + <groupId>org.openecomp.so</groupId> <artifactId>status-control</artifactId> <version>${project.version}</version> </dependency> diff --git a/adapters/mso-vnf-adapter-async-client/pom.xml b/adapters/mso-vnf-adapter-async-client/pom.xml index 27e555b815..c42c002dc1 100644 --- a/adapters/mso-vnf-adapter-async-client/pom.xml +++ b/adapters/mso-vnf-adapter-async-client/pom.xml @@ -2,11 +2,11 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
- <groupId>org.openecomp.mso</groupId>
+ <groupId>org.openecomp.so</groupId>
<artifactId>adapters</artifactId>
<version>1.1.0-SNAPSHOT</version> </parent>
- <groupId>org.openecomp.mso.adapters</groupId>
+ <groupId>org.openecomp.so.adapters</groupId>
<artifactId>mso-vnf-adapter-async-client</artifactId>
<packaging>jar</packaging>
<name>mso-vnf-adapter-async-client</name>
diff --git a/adapters/mso-vnf-adapter/pom.xml b/adapters/mso-vnf-adapter/pom.xml index ced5c3ae6e..ba0c87ab5e 100644 --- a/adapters/mso-vnf-adapter/pom.xml +++ b/adapters/mso-vnf-adapter/pom.xml @@ -2,11 +2,11 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
- <groupId>org.openecomp.mso</groupId>
+ <groupId>org.openecomp.so</groupId>
<artifactId>adapters</artifactId>
<version>1.1.0-SNAPSHOT</version> </parent>
- <groupId>org.openecomp.mso.adapters</groupId>
+ <groupId>org.openecomp.so.adapters</groupId>
<artifactId>mso-vnf-adapter</artifactId>
<packaging>war</packaging>
<name>mso-vnf-adapter</name>
@@ -56,7 +56,7 @@ </executions>
<dependencies>
<dependency>
- <groupId>org.openecomp.mso.adapters</groupId>
+ <groupId>org.openecomp.so.adapters</groupId>
<artifactId>mso-adapter-utils</artifactId>
<version>${project.version}</version>
</dependency>
@@ -107,17 +107,17 @@ </build>
<dependencies>
<dependency>
- <groupId>org.openecomp.mso.adapters</groupId>
+ <groupId>org.openecomp.so.adapters</groupId>
<artifactId>mso-adapter-utils</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
- <groupId>org.openecomp.mso.adapters</groupId>
+ <groupId>org.openecomp.so.adapters</groupId>
<artifactId>mso-adapters-rest-interface</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
- <groupId>org.openecomp.mso.adapters</groupId>
+ <groupId>org.openecomp.so.adapters</groupId>
<artifactId>mso-vnf-adapter-async-client</artifactId>
<version>${project.version}</version>
</dependency>
@@ -140,13 +140,13 @@ <scope>provided</scope>
</dependency>
<dependency>
- <groupId>org.openecomp.mso</groupId>
+ <groupId>org.openecomp.so</groupId>
<artifactId>status-control</artifactId>
<version>${project.version}</version>
</dependency>
<!-- <dependency> -->
- <!-- <groupId>org.openecomp.mso</groupId> -->
+ <!-- <groupId>org.openecomp.so</groupId> -->
<!-- <artifactId>mso-catalog-db</artifactId> -->
<!-- <version>${project.version}</version> -->
<!-- </dependency> -->
diff --git a/adapters/mso-workflow-message-adapter/pom.xml b/adapters/mso-workflow-message-adapter/pom.xml index bde619f509..5ee3fbfa38 100644 --- a/adapters/mso-workflow-message-adapter/pom.xml +++ b/adapters/mso-workflow-message-adapter/pom.xml @@ -2,11 +2,11 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"> <modelVersion>4.0.0</modelVersion> <parent> - <groupId>org.openecomp.mso</groupId> + <groupId>org.openecomp.so</groupId> <artifactId>adapters</artifactId> <version>1.1.0-SNAPSHOT</version> </parent> - <groupId>org.openecomp.mso.adapters</groupId> + <groupId>org.openecomp.so.adapters</groupId> <artifactId>mso-workflow-message-adapter</artifactId> <packaging>war</packaging> <name>mso-workflow-message-adapter</name> @@ -29,12 +29,12 @@ <dependencies> <dependency> - <groupId>org.openecomp.mso.adapters</groupId> + <groupId>org.openecomp.so.adapters</groupId> <artifactId>mso-adapter-utils</artifactId> <version>${project.version}</version> </dependency> <dependency> - <groupId>org.openecomp.mso.adapters</groupId> + <groupId>org.openecomp.so.adapters</groupId> <artifactId>mso-adapters-rest-interface</artifactId> <version>${project.version}</version> </dependency> @@ -62,7 +62,7 @@ <version>4.5.2</version> </dependency> <dependency> - <groupId>org.openecomp.mso</groupId> + <groupId>org.openecomp.so</groupId> <artifactId>status-control</artifactId> <version>${project.version}</version> </dependency> diff --git a/adapters/pom.xml b/adapters/pom.xml index a96b04aed9..28fd4093ff 100644 --- a/adapters/pom.xml +++ b/adapters/pom.xml @@ -2,12 +2,12 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"> <modelVersion>4.0.0</modelVersion> <parent> - <groupId>org.openecomp</groupId> - <artifactId>mso</artifactId> + <groupId>org.openecomp.so</groupId> + <artifactId>so</artifactId> <version>1.1.0-SNAPSHOT</version> </parent> - <groupId>org.openecomp.mso</groupId> + <groupId>org.openecomp.so</groupId> <artifactId>adapters</artifactId> <name>MSO Adapters</name> <description>Adapters for MSO</description> diff --git a/aria/aria-rest-java-client/pom.xml b/aria/aria-rest-java-client/pom.xml new file mode 100755 index 0000000000..12019f0228 --- /dev/null +++ b/aria/aria-rest-java-client/pom.xml @@ -0,0 +1,43 @@ +<!-- +/* + * ============LICENSE_START=================================================== + * Copyright (c) 2017 Cloudify.co. All rights reserved. + * =================================================================== + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + * ============LICENSE_END==================================================== +*/ +--> +<?xml version="1.0" encoding="UTF-8"?> +<project xmlns="http://maven.apache.org/POM/4.0.0" + xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" + xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"> + <modelVersion>4.0.0</modelVersion> + + <groupId>aria-client</groupId> + <artifactId>aria-client</artifactId> + <version>1.0-SNAPSHOT</version> + + <dependencies> + <dependency> + <groupId>org.glassfish.jersey.core</groupId> + <artifactId>jersey-client</artifactId> + <version>2.26-b03</version> + </dependency> + <dependency> + <groupId>org.glassfish.jersey.media</groupId> + <artifactId>jersey-media-json-jackson1</artifactId> + <version>2.26-b03</version> + </dependency> + </dependencies> + +</project> diff --git a/aria/aria-rest-java-client/src/main/java/com/gigaspaces/aria/rest/client/AriaClient.java b/aria/aria-rest-java-client/src/main/java/com/gigaspaces/aria/rest/client/AriaClient.java new file mode 100755 index 0000000000..d6e9f2434d --- /dev/null +++ b/aria/aria-rest-java-client/src/main/java/com/gigaspaces/aria/rest/client/AriaClient.java @@ -0,0 +1,201 @@ +/*
+ * ============LICENSE_START===================================================
+ * Copyright (c) 2017 Cloudify.co. All rights reserved.
+ * ===================================================================
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ * use this file except in compliance with the License. You may obtain a copy
+ * of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ * ============LICENSE_END====================================================
+*/
+package com.gigaspaces.aria.rest.client;
+
+import com.gigaspaces.aria.rest.client.exceptions.StorageException;
+import com.gigaspaces.aria.rest.client.exceptions.ValidationException;
+
+import java.util.List;
+import java.util.Map;
+
+/**
+ * Created by DeWayne on 7/12/2017.
+ */
+public interface AriaClient {
+
+ /**
+ * Installs a service template
+ *
+ * @param template
+ * @throws ValidationException
+ * @throws StorageException
+ */
+ public void install_service_template(ServiceTemplate template)throws ValidationException, StorageException, Exception;
+
+ /**
+ * Validate a service template
+ * @param template
+ * @return
+ */
+ public ValidationResult validate_service_template(ServiceTemplate template)throws Exception;
+
+ /**
+ * Fetch a list of stored service templates
+ *
+ * @return
+ */
+ public List<? extends ServiceTemplate> list_service_templates();
+
+ /**
+ * Delete an existing template
+ *
+ * @param template_id
+ * @throws IllegalArgumentException
+ */
+ public void delete_service_template(int template_id) throws IllegalArgumentException, Exception;
+
+ /**
+ * Returns a list of node templates for a given service template
+ * @param template_id
+ * @return
+ */
+ List<? extends NodeTemplate> list_nodes(int template_id);
+
+ /**
+ * Fetch a given node template
+ *
+ * @param node_id
+ * @return
+ * @throws IllegalArgumentException
+ */
+ public NodeTemplate get_node( int node_id) throws IllegalArgumentException;
+
+ /**
+ * List all services
+ *
+ * @return
+ */
+ public List<? extends Service> list_services();
+
+ /**
+ * Fetch the specified service
+ *
+ * @param service_id
+ * @return
+ * @throws IllegalArgumentException
+ */
+ public Service get_service(int service_id) throws IllegalArgumentException;
+
+ /**
+ * Fetch the outputs of the specified service
+ *
+ * @param service_id
+ * @return
+ * @throws IllegalArgumentException
+ */
+ public List<? extends Output> list_service_outputs(int service_id) throws IllegalArgumentException;
+
+ /**
+ * Fetch the inputs of the specified service
+ *
+ * @param service_id
+ * @return
+ * @throws IllegalArgumentException
+ */
+ public List<? extends Input> list_service_inputs(int service_id) throws IllegalArgumentException;
+
+ /**
+ * Create a service
+ *
+ * @param template_id
+ * @param service_name
+ * @param inputs
+ * @throws Exception
+ */
+ public void create_service(int template_id, String service_name, List<Input> inputs)throws Exception;
+
+ /**
+ * Delete the specified service
+ *
+ * @param service_id
+ * @throws IllegalArgumentException
+ */
+ public void delete_service(int service_id)throws Exception;
+
+ /**
+ * List workflows for the provided service
+ *
+ * @param service_id
+ * @return
+ * @throws IllegalArgumentException
+ */
+ public List<? extends Workflow> list_workflows(int service_id)throws IllegalArgumentException;
+
+ /**
+ * Fetch the specified workflow
+ *
+ * @param workflow_id
+ * @return the requested Workflow
+ * @throws IllegalArgumentException when the workflow_id doesn't exist
+ */
+ public Workflow get_workflow(int workflow_id)throws IllegalArgumentException;
+
+ /**
+ * List all executions
+ *
+ * @return
+ * @throws Exception
+ */
+ public List<? extends Execution> list_executions()throws Exception;
+
+ /**
+ * List executions for provided service
+ *
+ * @param service_id
+ * @return
+ * @throws Exception
+ */
+ public List<? extends Execution> list_executions(int service_id)throws Exception;
+
+ /**
+ * Fetch the specified execution
+ *
+ * @param execution_id
+ * @return
+ * @throws IllegalArgumentException
+ */
+ public Execution get_execution(int execution_id)throws IllegalArgumentException;
+
+ /**
+ * Starts an execution
+ *
+ * @param service_id
+ * @param workflow_name
+ * @param details
+ * @return the execution id
+ * @throws Exception
+ */
+ public int start_execution(int service_id, String workflow_name, ExecutionDetails details)throws Exception;
+
+ /**
+ * Resumes an interrupted execution
+ *
+ * @param execution_id
+ * @param details
+ * @throws IllegalArgumentException
+ */
+ public void resume_execution(int execution_id, ExecutionDetails details)throws IllegalArgumentException;
+
+ /**
+ * Cancels the specified execution
+ *
+ * @param execution_id
+ * @throws IllegalArgumentException
+ */
+ public void cancel_execution(int execution_id)throws Exception;
+}
diff --git a/aria/aria-rest-java-client/src/main/java/com/gigaspaces/aria/rest/client/AriaClientFactory.java b/aria/aria-rest-java-client/src/main/java/com/gigaspaces/aria/rest/client/AriaClientFactory.java new file mode 100755 index 0000000000..a97384e084 --- /dev/null +++ b/aria/aria-rest-java-client/src/main/java/com/gigaspaces/aria/rest/client/AriaClientFactory.java @@ -0,0 +1,31 @@ +/*
+ * ============LICENSE_START===================================================
+ * Copyright (c) 2017 Cloudify.co. All rights reserved.
+ * ===================================================================
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ * use this file except in compliance with the License. You may obtain a copy
+ * of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ * ============LICENSE_END====================================================
+*/
+
+package com.gigaspaces.aria.rest.client;
+
+import java.net.URL;
+
+/**
+ * Created by DeWayne on 7/12/2017.
+ */
+public class AriaClientFactory {
+
+ AriaClient createRestClient(String protocol, String address, int port, String version){
+ return new AriaRestClient(protocol, address, port, version);
+ }
+}
diff --git a/aria/aria-rest-java-client/src/main/java/com/gigaspaces/aria/rest/client/AriaRestClient.java b/aria/aria-rest-java-client/src/main/java/com/gigaspaces/aria/rest/client/AriaRestClient.java new file mode 100755 index 0000000000..a4e453395d --- /dev/null +++ b/aria/aria-rest-java-client/src/main/java/com/gigaspaces/aria/rest/client/AriaRestClient.java @@ -0,0 +1,336 @@ +/*
+ * ============LICENSE_START===================================================
+ * Copyright (c) 2017 Cloudify.co. All rights reserved.
+ * ===================================================================
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ * use this file except in compliance with the License. You may obtain a copy
+ * of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ * ============LICENSE_END====================================================
+*/
+package com.gigaspaces.aria.rest.client;
+
+import com.gigaspaces.aria.rest.client.exceptions.StorageException;
+import com.gigaspaces.aria.rest.client.exceptions.ValidationException;
+import org.codehaus.jackson.JsonFactory;
+import org.codehaus.jackson.JsonNode;
+import org.codehaus.jackson.jaxrs.JacksonJsonProvider;
+import org.codehaus.jackson.map.ObjectMapper;
+import sun.reflect.generics.reflectiveObjects.NotImplementedException;
+
+import javax.ws.rs.client.Client;
+import javax.ws.rs.client.ClientBuilder;
+import javax.ws.rs.client.Entity;
+import javax.ws.rs.client.WebTarget;
+import javax.ws.rs.core.GenericType;
+import javax.ws.rs.core.MediaType;
+import javax.ws.rs.core.Response;
+import java.net.URI;
+import java.util.ArrayList;
+import java.util.List;
+
+import static javax.ws.rs.client.Entity.entity;
+
+/**
+ * Created by DeWayne on 7/12/2017.
+ */
+public class AriaRestClient implements AriaClient {
+ private Client client=null;
+ private WebTarget base_target=null;
+
+ /**
+ * Construct an Aria REST client
+ *
+ * @param protocol either http or https
+ * @param address the IP address or host name
+ * @param port the port of the service
+ * @param version the api version
+ */
+ public AriaRestClient(String protocol, String address, int port, String version){
+ this.client = ClientBuilder.newBuilder().register(JacksonJsonProvider.class).build();
+ base_target = client.target(protocol+"://"+address+":"+port+"/api/"+version);
+ }
+
+ /**
+ * Installs a service template
+ *
+ * @param template the template object
+ * @throws ValidationException
+ * @throws StorageException
+ */
+ public void install_service_template(ServiceTemplate template) throws ValidationException, StorageException, Exception {
+
+ Response response = base_target.path("templates/"+template.getName()).request(MediaType.APPLICATION_JSON).put(Entity.entity(
+ "{\"service-template-path\":\""+template.getURI().toString()+"\""+
+ ",\"service-template-filename\":\""+template.getFilename()+"\"", MediaType.APPLICATION_JSON));
+
+ if(response.getStatus() == 500){
+ throw new StorageException(response.readEntity(String.class));
+ }
+ else if(response.getStatus() == 400){
+ throw new ValidationException(response.readEntity(String.class));
+ }
+ else if(response.getStatus()>199 && response.getStatus() <300){
+ return;
+ }
+ else{
+ throw new Exception("Error installing template: "+response.getStatus()+" "+ response.readEntity(String.class));
+ }
+ }
+
+ public ValidationResult validate_service_template(ServiceTemplate template)throws Exception{
+ Response response = base_target.path("templates").request(MediaType.APPLICATION_JSON).post(Entity.entity(
+ "{\"service-template-path\":\""+template.getURI().toString()+"\""+
+ ",\"service-template-filename\":\""+template.getFilename()+"\"}", MediaType.APPLICATION_JSON));
+
+ ValidationResultImpl result = new ValidationResultImpl();
+ if(response.getStatus() >= 200 && response.getStatus() < 300){
+ result.setFailed(false);
+ }
+ else if(response.getStatus()==400){
+ result.setFailed(true);
+ }
+ else{
+ throw new Exception("received error response '"+ response.getStatus()+"':"+response.readEntity(String.class));
+ }
+ return result;
+
+ }
+
+ /**
+ *
+ * @return a list of service templates
+ */
+ public List<? extends ServiceTemplate> list_service_templates(){
+ List<? extends ServiceTemplate> templates = base_target.path("templates").request(MediaType.APPLICATION_JSON).get(new GenericType<List<ServiceTemplateImpl>>(){});
+
+ return templates;
+ }
+
+
+ /**
+ * Deletes the specified template.
+ *
+ * TODO: Error handling is a little blunt. Need to describe failures better
+ *
+ * @param template_id the template id to delete
+ * @throws IllegalArgumentException thrown when the template can't be deleted
+ * @throws Exception other server side errors
+ */
+ public void delete_service_template(int template_id) throws IllegalArgumentException, Exception{
+ Response response = base_target.path("templates/"+template_id).request(MediaType.APPLICATION_JSON).delete();
+
+ if(response.getStatus()>=200 && response.getStatus()<300){
+ return;
+ }
+ else if(response.getStatus()==400){
+ throw new IllegalArgumentException("Error deleting template '"+template_id+"'");
+ }
+ else{
+ throw new Exception("Error processing request. Return code = "+response.getStatus());
+ }
+ }
+
+ /**
+ * List the node templates for a given template id
+ *
+ * @param template_id
+ * @return
+ */
+ public List<? extends NodeTemplate> list_nodes(int template_id) {
+ List<? extends NodeTemplate> nodes = base_target.path("templates/"+template_id+"/nodes").request(MediaType.APPLICATION_JSON).get(new GenericType<List<NodeTemplateImpl>>(){});
+ return nodes;
+ }
+
+ /**
+ * Get a specific node by id
+ *
+ * @param node_id the node id
+ * @return
+ * @throws IllegalArgumentException
+ */
+ public NodeTemplate get_node(int node_id) throws IllegalArgumentException {
+ NodeTemplate node = base_target.path("nodes/"+node_id).request(MediaType.APPLICATION_JSON).get(NodeTemplateImpl.class);
+ return node;
+ }
+
+ public List<? extends Service> list_services() {
+ List<? extends Service> services = base_target.path("services").request(MediaType.APPLICATION_JSON).get(new GenericType<List<ServiceImpl>>(){});
+ return services;
+ }
+
+ public Service get_service(int service_id) throws IllegalArgumentException {
+ throw new NotImplementedException();
+ }
+
+ public List<? extends Output> list_service_outputs(int service_id) throws IllegalArgumentException {
+ List<? extends Output> outputs = base_target.path("services").request(MediaType.APPLICATION_JSON).get(new GenericType<List<OutputImpl>>(){});
+ return outputs;
+ }
+
+ public List<? extends Input> list_service_inputs(int service_id) throws IllegalArgumentException {
+ List<? extends Input> inputs = base_target.path("services").request(MediaType.APPLICATION_JSON).get(new GenericType<List<InputImpl>>(){});
+ return inputs;
+ }
+
+ /**
+ * Create a service based on the supplied template
+ *
+ * @param template_id the template to create the service for
+ * @param service_name a name for the service
+ * @param inputs an optional list of inputs for the service (can be null)
+ * @throws Exception
+ */
+ public void create_service(int template_id, String service_name, List<Input> inputs) throws Exception {
+
+ String json="{"+inputsToJson(inputs)+"}";
+
+ Response response = base_target.path("templates/"+template_id+"/services/"+service_name).
+ request(MediaType.APPLICATION_JSON).post(
+ Entity.entity(json, MediaType.APPLICATION_JSON)
+ );
+
+ if( response.getStatus()< 200 || response.getStatus()>299){
+ throw new Exception("create service failed:"+response.getStatus()+" "+ response.readEntity(String.class));
+ }
+ }
+
+ public void delete_service(int service_id) throws Exception {
+ Response response = base_target.path("services/"+service_id).request(MediaType.APPLICATION_JSON).delete();
+ if(!responseOK(response)){
+ throw new Exception("delete service failed: "+response.getStatus()+" "+ response.readEntity(String.class));
+ }
+ }
+
+ /**
+ * List user workflows for supplied service
+ *
+ * @param service_id
+ * @return
+ * @throws IllegalArgumentException
+ */
+ public List<? extends Workflow> list_workflows(int service_id) throws IllegalArgumentException {
+ List<? extends Workflow> workflows = base_target.path("services/"+service_id+"/workflows").request(MediaType.APPLICATION_JSON).get(new GenericType<List<WorkflowImpl>>(){});
+ return workflows;
+ }
+
+ public Workflow get_workflow(int workflow_id) throws IllegalArgumentException {
+ throw new NotImplementedException();
+ }
+
+ /**
+ * List all executions
+ *
+ * @return
+ * @throws Exception
+ */
+ public List<? extends Execution> list_executions() throws Exception {
+ List<? extends Execution> executions = base_target.path("executions").request(MediaType.APPLICATION_JSON).get(new GenericType<List<ExecutionImpl>>(){});
+ return executions;
+ }
+
+ /**
+ * List executions for specified service
+ *
+ * @param service_id
+ * @return
+ * @throws Exception
+ */
+ public List<? extends Execution> list_executions(int service_id) throws Exception {
+ List<? extends Execution> executions = base_target.path("services/"+service_id+"/executions").request(MediaType.APPLICATION_JSON).get(new GenericType<List<ExecutionImpl>>(){});
+ return executions;
+ }
+
+ /**
+ * Get details about a specified execution
+ *
+ * @param execution_id
+ * @return
+ * @throws IllegalArgumentException
+ */
+ public Execution get_execution(int execution_id) throws IllegalArgumentException {
+ Execution execution = base_target.path("executions/"+execution_id).request(MediaType.APPLICATION_JSON).get(ExecutionImpl.class);
+ return execution;
+ }
+
+ /**
+ * Start an execution for the specified service
+ *
+ * @param service_id the service to run the execution for
+ * @param workflow_name the name of the workflow to execute
+ * @param details details controlling execution operation
+ * @return the execution id
+ * @throws Exception
+ */
+ public int start_execution(int service_id, String workflow_name, ExecutionDetails details) throws Exception {
+ StringBuilder json=new StringBuilder("{");
+ if(details.getExecutor().length()>0){
+ json.append("\"executor\":\"").append(details.getExecutor()).append("\",");
+ }
+ if(details.getInputs()!=null){
+ json.append(inputsToJson(details.getInputs()));
+ }
+ json.append("\"task_max_attempts\":").append(details.getTaskMaxAttempts()).append(",");
+ json.append("\"task_retry_interval\":").append(details.getTaskRetryInterval()).append("}");
+
+ System.out.println("JSON="+json.toString());
+
+ Response response = base_target.path("services/"+service_id+"/executions/"+workflow_name).request(MediaType.APPLICATION_JSON).
+ post(Entity.entity(json.toString(), MediaType.APPLICATION_JSON));
+
+ if(!responseOK(response)){
+ throw new Exception("start execution failed: "+response.getStatus()+" "+response.readEntity(String.class));
+ }
+
+ ObjectMapper mapper = new ObjectMapper(new JsonFactory());
+ JsonNode rootNode = mapper.readTree(response.readEntity(String.class));
+ int id=rootNode.get("id").asInt(-1);
+ return id;
+ }
+
+ public void resume_execution(int execution_id, ExecutionDetails details) throws IllegalArgumentException {
+ StringBuilder json=new StringBuilder("{");
+ if(details.getExecutor().length()>0){
+ json.append("\"executor\":\"").append(details.getExecutor()).append("\",");
+ }
+ json.append("\"retry_failed_tasks\":").append(details.isRetry_failed_tasks()).append("}");
+ Response response = base_target.path("executions/"+execution_id).request(MediaType.APPLICATION_JSON).
+ post(Entity.entity(json.toString(), MediaType.APPLICATION_JSON));
+ }
+
+ public void cancel_execution(int execution_id) throws Exception {
+ Response response = base_target.path("executions/"+execution_id).request(MediaType.APPLICATION_JSON).delete();
+ if(!responseOK(response)){
+ throw new Exception("delete service failed: "+response.getStatus()+" "+ response.readEntity(String.class));
+ }
+ }
+
+ /**
+ * -----
+ * ----- PRIVATE METHODS
+ * -----
+ */
+
+ private boolean responseOK(Response response){
+ return response.getStatus()>199 && response.getStatus()<300;
+ }
+
+ private String inputsToJson(List<Input> inputs){
+ if(inputs==null)return null;
+
+ StringBuilder sb=new StringBuilder("\"inputs\":{");
+ for(Input input:inputs){
+ sb.append("\"").append(input.getName()).append("\":\"").append(input.getValue()).append("\",");
+ }
+ if(inputs.size()>0)sb.deleteCharAt(sb.length()-1); //trim comma
+
+ return sb.toString();
+ }
+}
diff --git a/aria/aria-rest-java-client/src/main/java/com/gigaspaces/aria/rest/client/Execution.java b/aria/aria-rest-java-client/src/main/java/com/gigaspaces/aria/rest/client/Execution.java new file mode 100755 index 0000000000..ab742833e6 --- /dev/null +++ b/aria/aria-rest-java-client/src/main/java/com/gigaspaces/aria/rest/client/Execution.java @@ -0,0 +1,29 @@ +/*
+ * ============LICENSE_START===================================================
+ * Copyright (c) 2017 Cloudify.co. All rights reserved.
+ * ===================================================================
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ * use this file except in compliance with the License. You may obtain a copy
+ * of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ * ============LICENSE_END====================================================
+*/
+package com.gigaspaces.aria.rest.client;
+
+/**
+ * Created by DeWayne on 7/12/2017.
+ */
+public interface Execution {
+ int getExecutionId();
+ String getWorkflowName();
+ String getServiceTemplateName();
+ String getServiceName();
+ String getStatus();
+}
diff --git a/aria/aria-rest-java-client/src/main/java/com/gigaspaces/aria/rest/client/ExecutionDetails.java b/aria/aria-rest-java-client/src/main/java/com/gigaspaces/aria/rest/client/ExecutionDetails.java new file mode 100755 index 0000000000..e7685a259c --- /dev/null +++ b/aria/aria-rest-java-client/src/main/java/com/gigaspaces/aria/rest/client/ExecutionDetails.java @@ -0,0 +1,73 @@ +/*
+ * ============LICENSE_START===================================================
+ * Copyright (c) 2017 Cloudify.co. All rights reserved.
+ * ===================================================================
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ * use this file except in compliance with the License. You may obtain a copy
+ * of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ * ============LICENSE_END====================================================
+*/
+package com.gigaspaces.aria.rest.client;
+
+import java.util.List;
+
+/**
+ * Created by DeWayne on 7/12/2017.
+ */
+public class ExecutionDetails {
+ private String executor=""; //default
+ private int task_max_attempts=30;
+ private int task_retry_interval=30;
+ private List<Input> inputs=null;
+ private boolean retry_failed_tasks=false;
+
+ public ExecutionDetails(){}
+
+ public ExecutionDetails(String executor){
+ this.executor=executor;
+ }
+
+ public ExecutionDetails(String executor, int task_max_attempts, int task_retry_interval, boolean retry_failed_tasks,
+ List<Input> inputs){
+ this.executor=executor;
+ this.task_max_attempts=task_max_attempts;
+ this.task_retry_interval=task_retry_interval;
+ this.retry_failed_tasks = retry_failed_tasks;
+ this.inputs=inputs;
+ }
+ public String getExecutor(){
+ return executor;
+ }
+ public void setExecutor(String executor){
+ this.executor=executor;
+ }
+ public int getTaskMaxAttempts(){
+ return task_max_attempts;
+ }
+ public void setTaskMaxAttempts(int max){
+ this.task_max_attempts=max;
+ }
+ public int getTaskRetryInterval(){
+ return task_retry_interval;
+ }
+ public void setTaskRetryInterval(int interval){
+ this.task_retry_interval=interval;
+ }
+ public List<Input> getInputs(){
+ return inputs;
+ }
+ public void setInputs(List<Input> inputs){
+ this.inputs=inputs;
+ }
+ public boolean isRetry_failed_tasks() {return retry_failed_tasks;}
+ public void setRetry_failed_tasks(boolean retry_failed_tasks) {this.retry_failed_tasks = retry_failed_tasks;}
+
+}
diff --git a/aria/aria-rest-java-client/src/main/java/com/gigaspaces/aria/rest/client/ExecutionImpl.java b/aria/aria-rest-java-client/src/main/java/com/gigaspaces/aria/rest/client/ExecutionImpl.java new file mode 100755 index 0000000000..8e420cc16c --- /dev/null +++ b/aria/aria-rest-java-client/src/main/java/com/gigaspaces/aria/rest/client/ExecutionImpl.java @@ -0,0 +1,54 @@ +/* + * ============LICENSE_START=================================================== + * Copyright (c) 2017 Cloudify.co. All rights reserved. + * =================================================================== + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + * ============LICENSE_END==================================================== +*/ +package com.gigaspaces.aria.rest.client; + +import org.codehaus.jackson.annotate.JsonProperty; + +/** + * Created by DeWayne on 7/17/2017. + */ +public class ExecutionImpl implements Execution { + @JsonProperty("execution_id") + int execution_id; + @JsonProperty("workflow_name") + String workflow_name; + @JsonProperty("service_template_name") + String service_template_name; + @JsonProperty("service_name") + String service_name; + String status; + + public int getExecutionId() { + return execution_id; + } + public String getWorkflowName() { + return workflow_name; + } + + public String getServiceTemplateName() { + return service_template_name; + } + + public String getServiceName() { + return service_name; + } + + public String getStatus() { + return status; + } +} diff --git a/aria/aria-rest-java-client/src/main/java/com/gigaspaces/aria/rest/client/Input.java b/aria/aria-rest-java-client/src/main/java/com/gigaspaces/aria/rest/client/Input.java new file mode 100755 index 0000000000..595dfb1245 --- /dev/null +++ b/aria/aria-rest-java-client/src/main/java/com/gigaspaces/aria/rest/client/Input.java @@ -0,0 +1,27 @@ +/*
+ * ============LICENSE_START===================================================
+ * Copyright (c) 2017 Cloudify.co. All rights reserved.
+ * ===================================================================
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ * use this file except in compliance with the License. You may obtain a copy
+ * of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ * ============LICENSE_END====================================================
+*/
+package com.gigaspaces.aria.rest.client;
+
+/**
+ * Created by DeWayne on 7/12/2017.
+ */
+public interface Input {
+ String getName();
+ String getDescription();
+ String getValue();
+}
diff --git a/aria/aria-rest-java-client/src/main/java/com/gigaspaces/aria/rest/client/InputImpl.java b/aria/aria-rest-java-client/src/main/java/com/gigaspaces/aria/rest/client/InputImpl.java new file mode 100755 index 0000000000..3002b7b54e --- /dev/null +++ b/aria/aria-rest-java-client/src/main/java/com/gigaspaces/aria/rest/client/InputImpl.java @@ -0,0 +1,49 @@ +/* + * ============LICENSE_START=================================================== + * Copyright (c) 2017 Cloudify.co. All rights reserved. + * =================================================================== + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + * ============LICENSE_END==================================================== +*/ +package com.gigaspaces.aria.rest.client; + +/** + * Created by DeWayne on 7/17/2017. + */ +public class InputImpl implements Input { + private String name, description, value; + + public InputImpl(){} + + public InputImpl(String name,String value,String description){ + if(name==null || value==null){ + throw new IllegalArgumentException("null argument supplied"); + } + this.name=name; + this.value=value; + if(description!=null)this.description=description; + } + + public String getName() { + return name; + } + + public String getDescription() { + return description; + } + + public String getValue() { + return value; + } + +} diff --git a/aria/aria-rest-java-client/src/main/java/com/gigaspaces/aria/rest/client/NodeTemplate.java b/aria/aria-rest-java-client/src/main/java/com/gigaspaces/aria/rest/client/NodeTemplate.java new file mode 100755 index 0000000000..bc46d7f5b2 --- /dev/null +++ b/aria/aria-rest-java-client/src/main/java/com/gigaspaces/aria/rest/client/NodeTemplate.java @@ -0,0 +1,29 @@ +/*
+ * ============LICENSE_START===================================================
+ * Copyright (c) 2017 Cloudify.co. All rights reserved.
+ * ===================================================================
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ * use this file except in compliance with the License. You may obtain a copy
+ * of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ * ============LICENSE_END====================================================
+*/
+package com.gigaspaces.aria.rest.client;
+
+/**
+ * Created by DeWayne on 7/12/2017.
+ */
+public interface NodeTemplate {
+ int getId();
+ String getName();
+ String getDescription();
+ int getServiceTemplateId();
+ String getTypeName();
+}
diff --git a/aria/aria-rest-java-client/src/main/java/com/gigaspaces/aria/rest/client/NodeTemplateImpl.java b/aria/aria-rest-java-client/src/main/java/com/gigaspaces/aria/rest/client/NodeTemplateImpl.java new file mode 100755 index 0000000000..43338c952d --- /dev/null +++ b/aria/aria-rest-java-client/src/main/java/com/gigaspaces/aria/rest/client/NodeTemplateImpl.java @@ -0,0 +1,68 @@ +/* + * ============LICENSE_START=================================================== + * Copyright (c) 2017 Cloudify.co. All rights reserved. + * =================================================================== + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + * ============LICENSE_END==================================================== +*/ +package com.gigaspaces.aria.rest.client; + +import org.codehaus.jackson.annotate.JsonProperty; + +/** + * Created by DeWayne on 7/18/2017. + */ +public class NodeTemplateImpl implements NodeTemplate { + private int id; + private String name; + private String description=""; + @JsonProperty("service_template_id") + private int service_template_id; + @JsonProperty("type_name") + private String type_name=""; + + public NodeTemplateImpl(){} + + public NodeTemplateImpl(int id, String name, String description, int service_template_id, String type_name){ + this.id=id; + this.description=description; + this.service_template_id=service_template_id; + this.type_name=type_name; + } + + public int getId() { + return id; + } + + public String getDescription() { + return description; + } + + public int getServiceTemplateId() { + return service_template_id; + } + + public String getTypeName() { + return type_name; + } + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + +} diff --git a/aria/aria-rest-java-client/src/main/java/com/gigaspaces/aria/rest/client/Output.java b/aria/aria-rest-java-client/src/main/java/com/gigaspaces/aria/rest/client/Output.java new file mode 100755 index 0000000000..83363bad6c --- /dev/null +++ b/aria/aria-rest-java-client/src/main/java/com/gigaspaces/aria/rest/client/Output.java @@ -0,0 +1,27 @@ +/*
+ * ============LICENSE_START===================================================
+ * Copyright (c) 2017 Cloudify.co. All rights reserved.
+ * ===================================================================
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ * use this file except in compliance with the License. You may obtain a copy
+ * of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ * ============LICENSE_END====================================================
+*/
+package com.gigaspaces.aria.rest.client;
+
+/**
+ * Created by DeWayne on 7/12/2017.
+ */
+public interface Output {
+ String getName();
+ String getDescription();
+ String getValue();
+}
diff --git a/aria/aria-rest-java-client/src/main/java/com/gigaspaces/aria/rest/client/OutputImpl.java b/aria/aria-rest-java-client/src/main/java/com/gigaspaces/aria/rest/client/OutputImpl.java new file mode 100755 index 0000000000..0a6cecc9c1 --- /dev/null +++ b/aria/aria-rest-java-client/src/main/java/com/gigaspaces/aria/rest/client/OutputImpl.java @@ -0,0 +1,37 @@ +/* + * ============LICENSE_START=================================================== + * Copyright (c) 2017 Cloudify.co. All rights reserved. + * =================================================================== + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + * ============LICENSE_END==================================================== +*/ +package com.gigaspaces.aria.rest.client; + +/** + * Created by DeWayne on 7/17/2017. + */ +public class OutputImpl implements Output { + private String name, description, value; + + public String getName() { + return name; + } + + public String getDescription() { + return description; + } + + public String getValue() { + return value; + } +} diff --git a/aria/aria-rest-java-client/src/main/java/com/gigaspaces/aria/rest/client/Service.java b/aria/aria-rest-java-client/src/main/java/com/gigaspaces/aria/rest/client/Service.java new file mode 100755 index 0000000000..9cf86ec73f --- /dev/null +++ b/aria/aria-rest-java-client/src/main/java/com/gigaspaces/aria/rest/client/Service.java @@ -0,0 +1,32 @@ +/*
+ * ============LICENSE_START===================================================
+ * Copyright (c) 2017 Cloudify.co. All rights reserved.
+ * ===================================================================
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ * use this file except in compliance with the License. You may obtain a copy
+ * of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ * ============LICENSE_END====================================================
+*/
+package com.gigaspaces.aria.rest.client;
+
+import java.util.Date;
+
+/**
+ * Created by DeWayne on 7/12/2017.
+ */
+public interface Service {
+ int getId();
+ String getDescription();
+ String getName();
+ String getServiceTemplate();
+ Date getCreated();
+ Date getUpdated();
+}
diff --git a/aria/aria-rest-java-client/src/main/java/com/gigaspaces/aria/rest/client/ServiceTemplate.java b/aria/aria-rest-java-client/src/main/java/com/gigaspaces/aria/rest/client/ServiceTemplate.java new file mode 100755 index 0000000000..0df6d60905 --- /dev/null +++ b/aria/aria-rest-java-client/src/main/java/com/gigaspaces/aria/rest/client/ServiceTemplate.java @@ -0,0 +1,31 @@ +/*
+ * ============LICENSE_START===================================================
+ * Copyright (c) 2017 Cloudify.co. All rights reserved.
+ * ===================================================================
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ * use this file except in compliance with the License. You may obtain a copy
+ * of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ * ============LICENSE_END====================================================
+*/
+package com.gigaspaces.aria.rest.client;
+
+import java.net.URI;
+
+/**
+ * Created by DeWayne on 7/12/2017.
+ */
+public interface ServiceTemplate {
+ String getName();
+ URI getURI();
+ int getId();
+ String getFilename();
+ String getDescription();
+}
diff --git a/aria/aria-rest-java-client/src/main/java/com/gigaspaces/aria/rest/client/ServiceTemplateImpl.java b/aria/aria-rest-java-client/src/main/java/com/gigaspaces/aria/rest/client/ServiceTemplateImpl.java new file mode 100755 index 0000000000..9e158a27fd --- /dev/null +++ b/aria/aria-rest-java-client/src/main/java/com/gigaspaces/aria/rest/client/ServiceTemplateImpl.java @@ -0,0 +1,81 @@ +/* + * ============LICENSE_START=================================================== + * Copyright (c) 2017 Cloudify.co. All rights reserved. + * =================================================================== + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + * ============LICENSE_END==================================================== +*/ +package com.gigaspaces.aria.rest.client; + +import java.net.URI; + +/** + * CSAR based implementation + * + * Created by DeWayne on 7/17/2017. + */ +public class ServiceTemplateImpl implements ServiceTemplate { + public static final String DEFAULT_TEMPLATE_NAME = "service-template.yaml"; + private String name; + private int id; + private URI uri; + private String filename = DEFAULT_TEMPLATE_NAME; + private String description; + + public ServiceTemplateImpl(){} + + public ServiceTemplateImpl(String name, URI uri){ + this.name=name; + this.uri=uri; + } + + /** + * Construct an instance + * @param name a textual name for the template + * @param uri a URI to a CSAR + * @param filename the filename in the CSAR representing main yaml template + */ + public ServiceTemplateImpl(String name, URI uri, String filename, String description){ + this.name=name; + this.uri=uri; + this.filename=filename; + this.description=description; + } + + public int getId(){ + return id; + } + public void setId(int id){ + this.id=id; + } + public String getName() { + return name; + } + public void setName(String name){ + this.name=name; + } + public URI getURI() { + return uri; + } + public void setPath(String path){ + this.uri=uri; + } + public String getFilename() { + return filename; + } + public void setFilename(String filename){ + this.filename=filename; + } + + public String getDescription(){ return description;} +} diff --git a/aria/aria-rest-java-client/src/main/java/com/gigaspaces/aria/rest/client/ValidationResult.java b/aria/aria-rest-java-client/src/main/java/com/gigaspaces/aria/rest/client/ValidationResult.java new file mode 100755 index 0000000000..3d40dfa1ec --- /dev/null +++ b/aria/aria-rest-java-client/src/main/java/com/gigaspaces/aria/rest/client/ValidationResult.java @@ -0,0 +1,26 @@ +/*
+ * ============LICENSE_START===================================================
+ * Copyright (c) 2017 Cloudify.co. All rights reserved.
+ * ===================================================================
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ * use this file except in compliance with the License. You may obtain a copy
+ * of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ * ============LICENSE_END====================================================
+*/
+package com.gigaspaces.aria.rest.client;
+
+/**
+ * Created by DeWayne on 7/12/2017.
+ */
+public interface ValidationResult {
+
+ boolean getFailed();
+}
diff --git a/aria/aria-rest-java-client/src/main/java/com/gigaspaces/aria/rest/client/ValidationResultImpl.java b/aria/aria-rest-java-client/src/main/java/com/gigaspaces/aria/rest/client/ValidationResultImpl.java new file mode 100755 index 0000000000..22e34eb7b3 --- /dev/null +++ b/aria/aria-rest-java-client/src/main/java/com/gigaspaces/aria/rest/client/ValidationResultImpl.java @@ -0,0 +1,32 @@ +/* + * ============LICENSE_START=================================================== + * Copyright (c) 2017 Cloudify.co. All rights reserved. + * =================================================================== + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + * ============LICENSE_END==================================================== +*/ +package com.gigaspaces.aria.rest.client; + +/** + * Created by DeWayne on 7/17/2017. + */ +public class ValidationResultImpl implements ValidationResult { + private boolean failed=false; + + public void setFailed(boolean failed){ + this.failed=failed; + } + public boolean getFailed() { + return failed; + } +} diff --git a/aria/aria-rest-java-client/src/main/java/com/gigaspaces/aria/rest/client/Workflow.java b/aria/aria-rest-java-client/src/main/java/com/gigaspaces/aria/rest/client/Workflow.java new file mode 100755 index 0000000000..7dbab18943 --- /dev/null +++ b/aria/aria-rest-java-client/src/main/java/com/gigaspaces/aria/rest/client/Workflow.java @@ -0,0 +1,25 @@ +/*
+ * ============LICENSE_START===================================================
+ * Copyright (c) 2017 Cloudify.co. All rights reserved.
+ * ===================================================================
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ * use this file except in compliance with the License. You may obtain a copy
+ * of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ * ============LICENSE_END====================================================
+*/
+package com.gigaspaces.aria.rest.client;
+
+/**
+ * Created by DeWayne on 7/12/2017.
+ */
+public interface Workflow {
+ String getName();
+}
diff --git a/aria/aria-rest-java-client/src/main/java/com/gigaspaces/aria/rest/client/WorkflowImpl.java b/aria/aria-rest-java-client/src/main/java/com/gigaspaces/aria/rest/client/WorkflowImpl.java new file mode 100755 index 0000000000..41105df26a --- /dev/null +++ b/aria/aria-rest-java-client/src/main/java/com/gigaspaces/aria/rest/client/WorkflowImpl.java @@ -0,0 +1,29 @@ +/* + * ============LICENSE_START=================================================== + * Copyright (c) 2017 Cloudify.co. All rights reserved. + * =================================================================== + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + * ============LICENSE_END==================================================== +*/ +package com.gigaspaces.aria.rest.client; + +/** + * Created by DeWayne on 7/17/2017. + */ +public class WorkflowImpl implements Workflow{ + String name; + + public String getName() { + return name; + } +} diff --git a/aria/aria-rest-java-client/src/main/java/com/gigaspaces/aria/rest/client/exceptions/StorageException.java b/aria/aria-rest-java-client/src/main/java/com/gigaspaces/aria/rest/client/exceptions/StorageException.java new file mode 100755 index 0000000000..50ff38da10 --- /dev/null +++ b/aria/aria-rest-java-client/src/main/java/com/gigaspaces/aria/rest/client/exceptions/StorageException.java @@ -0,0 +1,27 @@ +/*
+ * ============LICENSE_START===================================================
+ * Copyright (c) 2017 Cloudify.co. All rights reserved.
+ * ===================================================================
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ * use this file except in compliance with the License. You may obtain a copy
+ * of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ * ============LICENSE_END====================================================
+*/
+package com.gigaspaces.aria.rest.client.exceptions;
+
+/**
+ * Created by DeWayne on 7/12/2017.
+ */
+public class StorageException extends Exception {
+ public StorageException(String message){
+ super(message);
+ }
+}
diff --git a/aria/aria-rest-java-client/src/main/java/com/gigaspaces/aria/rest/client/exceptions/ValidationException.java b/aria/aria-rest-java-client/src/main/java/com/gigaspaces/aria/rest/client/exceptions/ValidationException.java new file mode 100755 index 0000000000..cbcee31cb4 --- /dev/null +++ b/aria/aria-rest-java-client/src/main/java/com/gigaspaces/aria/rest/client/exceptions/ValidationException.java @@ -0,0 +1,27 @@ +/*
+ * ============LICENSE_START===================================================
+ * Copyright (c) 2017 Cloudify.co. All rights reserved.
+ * ===================================================================
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ * use this file except in compliance with the License. You may obtain a copy
+ * of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ * ============LICENSE_END====================================================
+*/
+package com.gigaspaces.aria.rest.client.exceptions;
+
+/**
+ * Created by DeWayne on 7/12/2017.
+ */
+public class ValidationException extends Exception {
+ public ValidationException(String message){
+ super(message);
+ }
+}
diff --git a/aria/aria-rest-server/src/main/python/aria-rest/LICENSE b/aria/aria-rest-server/src/main/python/aria-rest/LICENSE new file mode 100644 index 0000000000..270877b831 --- /dev/null +++ b/aria/aria-rest-server/src/main/python/aria-rest/LICENSE @@ -0,0 +1,18 @@ +# +# ============LICENSE_START=================================================== +# Copyright (c) 2017 Cloudify.co. All rights reserved. +# =================================================================== +# Licensed under the Apache License, Version 2.0 (the "License"); you may not +# use this file except in compliance with the License. You may obtain a copy +# of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations under +# the License. +# ============LICENSE_END==================================================== +# + diff --git a/aria/aria-rest-server/src/main/python/aria-rest/__init__.py b/aria/aria-rest-server/src/main/python/aria-rest/__init__.py new file mode 100644 index 0000000000..5e93dc2ae5 --- /dev/null +++ b/aria/aria-rest-server/src/main/python/aria-rest/__init__.py @@ -0,0 +1,19 @@ + +# +# ============LICENSE_START=================================================== +# Copyright (c) 2017 Cloudify.co. All rights reserved. +# =================================================================== +# Licensed under the Apache License, Version 2.0 (the "License"); you may not +# use this file except in compliance with the License. You may obtain a copy +# of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations under +# the License. +# ============LICENSE_END==================================================== +# + diff --git a/aria/aria-rest-server/src/main/python/aria-rest/aria_rest/__init__.py b/aria/aria-rest-server/src/main/python/aria-rest/aria_rest/__init__.py new file mode 100644 index 0000000000..5e93dc2ae5 --- /dev/null +++ b/aria/aria-rest-server/src/main/python/aria-rest/aria_rest/__init__.py @@ -0,0 +1,19 @@ + +# +# ============LICENSE_START=================================================== +# Copyright (c) 2017 Cloudify.co. All rights reserved. +# =================================================================== +# Licensed under the Apache License, Version 2.0 (the "License"); you may not +# use this file except in compliance with the License. You may obtain a copy +# of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations under +# the License. +# ============LICENSE_END==================================================== +# + diff --git a/aria/aria-rest-server/src/main/python/aria-rest/aria_rest/rest.py b/aria/aria-rest-server/src/main/python/aria-rest/aria_rest/rest.py new file mode 100644 index 0000000000..fae6afcfe6 --- /dev/null +++ b/aria/aria-rest-server/src/main/python/aria-rest/aria_rest/rest.py @@ -0,0 +1,604 @@ +# +# ============LICENSE_START=================================================== +# Copyright (c) 2017 Cloudify.co. All rights reserved. +# =================================================================== +# Licensed under the Apache License, Version 2.0 (the "License"); you may not +# use this file except in compliance with the License. You may obtain a copy +# of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations under +# the License. +# ============LICENSE_END==================================================== +# + + +import os +from cStringIO import StringIO +from flask import Flask, render_template, request, jsonify +from flask_autodoc.autodoc import Autodoc +from aria import install_aria_extensions +from aria.parser import consumption +from aria.utils import formatting, collections +from aria.cli.core import aria +from aria.cli import utils +from aria.exceptions import ParsingError, DependentServicesError +from aria.core import Core +from aria.cli import service_template_utils +from aria.storage import exceptions as storage_exceptions +from aria.utils import threading +from aria.orchestrator.workflow_runner import WorkflowRunner +from aria.orchestrator.workflows.executor.dry import DryExecutor +import util + +version_id = "0.1" +route_base = "/api/" + version_id + "/" +app = Flask("onap-aria-rest") +auto = Autodoc(app) + +# TODO Garbage collect this dict somehow +execution_state = util.SafeDict() + + +def main(): + install_aria_extensions() + app.run(host='0.0.0.0', port=5000, threaded=True) + + +@app.route("/") +@app.route("/api") +@app.route("/docs") +def index(): + return auto.html() + + +### +# TEMPLATES +### + +# add template +@app.route(route_base + "templates/<template_name>", methods=['PUT']) +@auto.doc() +@aria.pass_model_storage +@aria.pass_resource_storage +@aria.pass_plugin_manager +@aria.pass_logger +def install_template(template_name, model_storage, resource_storage, + plugin_manager, logger): + """ + installs a template in Aria storage + """ + body = request.json + + # Check body + if "service_template_path" in body: + service_template_path = body["service_template_path"] + else: + return "request body missing service_template_path", 501 + + if "service_template_filename" in body: + service_template_filename = body["service_template_filename"] + else: + service_template_filename = "service-template.yaml" + + service_template_path = service_template_utils.get( + service_template_path, service_template_filename) + + core = Core(model_storage, resource_storage, plugin_manager) + + try: + core.create_service_template(service_template_path, + os.path.dirname(service_template_path), + template_name) + except storage_exceptions.StorageError as e: + logger.error("storage exception") + utils.check_overriding_storage_exceptions( + e, 'service template', template_name) + return e.message, 500 + except Exception as e: + logger.error("catchall exception") + return e.message, 500 + + return "service template installed", 200 + +# validate template +@app.route(route_base + "templates", methods=['POST']) +@auto.doc() +@aria.pass_model_storage +@aria.pass_resource_storage +@aria.pass_plugin_manager +@aria.pass_logger +def validate_template(model_storage, resource_storage, plugin_manager, logger): + """ + Validates a TOSCA template + """ + body = request.json + + # Check body + if "service_template_path" in body: + service_template_path = body["service_template_path"] + else: + return "request body missing service_template_path", 501 + if "service_template_filename" in body: + service_template_filename = body["service_template_filename"] + else: + service_template_filename = "service-template.yaml" + + service_template_path = service_template_utils.get( + service_template_path, service_template_filename) + + core = Core(model_storage, resource_storage, plugin_manager) + try: + context = core.validate_service_template(service_template_path) + except ParsingError as e: + return e.message, 400 + + logger.info('Service template {} validated'.format(service_template_path)) + return "", 200 + + +# delete template +@app.route(route_base + "templates/<template_id>", methods=['DELETE']) +@auto.doc() +@aria.pass_model_storage +@aria.pass_resource_storage +@aria.pass_plugin_manager +@aria.pass_logger +def delete_template( + template_id, + model_storage, + resource_storage, + plugin_manager, + logger): + """ + Deletes a template from Aria storage + """ + + logger.info('Deleting service template {}'.format(template_id)) + core = Core(model_storage, resource_storage, plugin_manager) + try: + core.delete_service_template(template_id) + except DependentServicesError as e: + logger.error("dependent services error") + return e.message, 400 + except Exception as e: + logger.error("failed") + return "Failed to delete template", 500 + + logger.info('Service template {} deleted'.format(template_id)) + return "", 200 + + +# get template json +@app.route(route_base + "templates/<template_id>/json", methods=['GET']) +@auto.doc() +@aria.pass_model_storage +@aria.pass_logger +def get_template_json(template_id, model_storage, logger): + """ get JSON representation of template """ + template = model_storage.service_template.get(template_id) + consumption.ConsumptionContext() + body = formatting.json_dumps(collections.prune(template.as_raw)) + return body + + +# list templates +@app.route(route_base + "templates", methods=['GET']) +@auto.doc() +@aria.pass_model_storage +@aria.pass_logger +def list_templates(model_storage, logger): + """ + Lists templates installed in Aria storage + """ + list = model_storage.service_template.list() + templates = [] + for item in list: + templates.append({"name": item.name, + "id": item.id, + "description": item.description + }) + return jsonify(templates) + + +# list nodes +@app.route(route_base + "templates/<template_id>/nodes", methods=['GET']) +@auto.doc() +@aria.pass_model_storage +@aria.pass_logger +def list_nodes_by_template(template_id, model_storage, logger): + """ + Lists node templates in specified Aria template + """ + service_template = model_storage.service_template.get(template_id) + filters = dict(service_template=service_template) + nodes = model_storage.node_template.list(filters=filters) + nodelist = [] + + for node in nodes: + nodelist.append({ + "id": node.id, + "name": node.name, + "description": node.description, + "service_template_id": service_template.id, + "type_name": node.type_name + }) + return jsonify(nodelist), 200 + + +# show node details +@app.route(route_base + "nodes/<node_id>", methods=['GET']) +@auto.doc() +@aria.pass_model_storage +@aria.pass_logger +def get_node(node_id, model_storage, logger): + """ + Get node details + """ + node_template = model_storage.node_template.get(node_id) + service_template = model_storage.service_template.get_by_name( + node_template.service_template_name) + retmap = {} + retmap['id'] = node_id + retmap['name'] = node_template.name + retmap['description'] = node_template.description + retmap['service_template_id'] = service_template.id + retmap['type_name'] = node_template.type_name + return jsonify(retmap), 200 + +### +# SERVICES +### + + +# list services +@app.route(route_base + "services", methods=['GET']) +@auto.doc() +@aria.pass_model_storage +@aria.pass_logger +def list_services(model_storage, logger): + """ + Lists all services + """ + services_list = model_storage.service.list() + outlist = [] + for service in services_list: + outlist.append({"id": service.id, + "description": service.description, + "name": service.name, + "service_template": service.service_template.name, + "created": service.created_at, + "updated": service.updated_at}) + return jsonify(outlist), 200 + + +# show service +@app.route(route_base + "services/<service_id>", methods=['GET']) +def show_service(service_id): + """ + Returns details for specified servie + """ + return "not implemented", 501 + + +# get service outputs +@app.route(route_base + "services/<service_id>/outputs", methods=['GET']) +@auto.doc() +@aria.pass_model_storage +@aria.pass_logger +def get_service_outputs(service_id, model_storage, logger): + """ + Gets outputs for specified service + """ + service = model_storage.service.get(service_id) + outlist = [] + for output_name, output in service.outputs.iteritems(): + outlist.append({"name": output_name, "description": output.description, + "value": output.value}) + return jsonify(outlist) + + +# get service inputs +@app.route(route_base + "services/<service_id>/inputs", methods=['GET']) +@auto.doc() +@aria.pass_model_storage +@aria.pass_logger +def get_service_inputs(service_id, model_storage, logger): + """ + Gets inputs for specified service + """ + service = model_storage.service.get(service_id) + outlist = [] + for input_name, input in service.inputs.iteritems(): + outlist.append({"name": input_name, "description": input.description, + "value": input.value}) + return jsonify(outlist) + + +# create service +@app.route(route_base + "templates/<template_id>/services/<service_name>", + methods=['POST']) +@auto.doc() +@aria.pass_model_storage +@aria.pass_resource_storage +@aria.pass_plugin_manager +@aria.pass_logger +def create_service(template_id, service_name, model_storage, resource_storage, + plugin_manager, logger): + """ + Creates a service from the specified service template + """ + body = request.json + inputs = {} + if 'inputs' in body: + inputs = body['inputs'] + core = Core(model_storage, resource_storage, plugin_manager) + service = core.create_service(template_id, inputs, service_name) + + logger.info("service {} created".format(service.name)) + return "service {} created".format(service.name), 200 + + +# delete service +@app.route(route_base + "services/<service_id>", methods=['DELETE']) +@auto.doc() +@aria.pass_model_storage +@aria.pass_resource_storage +@aria.pass_plugin_manager +@aria.pass_logger +def delete_service( + service_id, + model_storage, + resource_storage, + plugin_manager, + logger): + """ + Deletes the specified servi e + """ + service = model_storage.service.get(service_id) + core = Core(model_storage, resource_storage, plugin_manager) + core.delete_service(service_id, force=True) + return "service {} deleted".format(service.id), 200 + + +### +# WORKFLOWS +### + + +# list workflows +@app.route(route_base + "services/<service_id>/workflows", methods=['GET']) +@auto.doc() +@aria.pass_model_storage +@aria.pass_logger +def list_workflows(service_id, model_storage, logger): + """ + Lists all defined user workflows for the specified service + """ + service = model_storage.service.get(service_id) + workflows = service.workflows.itervalues() + outlist = [] + for workflow in workflows: + outlist.append(workflow.name) + return jsonify(outlist), 200 + + +# show workflow +@app.route( + route_base + + "services/<service_id>/workflow/<workflow_name>", + methods=['GET']) +def show_workflow(service_name, workflow_name): + """ + Returns details of specified workflow + """ + return "not implemented", 501 + +### +# EXECUTIONS +### + + +# list all executions +@app.route(route_base + "executions", methods=['GET']) +@auto.doc() +@aria.pass_model_storage +@aria.pass_logger +def list_executions(model_storage, logger): + """ + Return all executions + """ + elist = model_storage.execution.list() + outlist = [] + for execution in elist: + outlist.append( + {"execution_id": execution.id, + "workflow_name": execution.workflow_name, + "service_template_name": execution.service_template_name, + "service_name": execution.service_name, + "status": execution.status}) + return jsonify(outlist), 200 + + +# list executions for service +@app.route(route_base + "services/<service_id>/executions", methods=['GET']) +@auto.doc() +@aria.pass_model_storage +@aria.pass_logger +def list_service_executions(service_id, model_storage, logger): + """ + Return all executions for specified service + """ + service = model_storage.service.get(service_id) + elist = model_storage.execution.list(filters=dict(service=service)) + outlist = [] + for execution in elist: + outlist.append( + {"execution_id": execution.id, + "workflow_name": execution.workflow_name, + "service_template_name": execution.service_template_name, + "service_name": execution.service_name, + "status": execution.status}) + return jsonify(outlist), 200 + + +# show execution +@app.route(route_base + "executions/<execution_id>", methods=['GET']) +@auto.doc() +@aria.pass_model_storage +@aria.pass_logger +def show_execution(execution_id, model_storage, logger): + """ + Return details of specified execution + """ + try: + execution = model_storage.execution.get(execution_id) + except BaseException: + return "Execution {} not found".format(execution_id), 404 + + return jsonify({"execution_id": execution_id, + "service_name": execution.service_name, + "service_template_name": execution.service_template_name, + "workflow_name": execution.workflow_name, + "status": execution.status}), 200 + +# start execution + + +# TODO allow executors other than default and dry to be used +@app.route( + route_base + + "services/<service_id>/executions/<workflow_name>", + methods=['POST']) +@auto.doc() +@aria.pass_model_storage +@aria.pass_resource_storage +@aria.pass_plugin_manager +@aria.pass_logger +def start_execution( + service_id, + workflow_name, + model_storage, + resource_storage, + plugin_manager, + logger): + """ + Start an execution for the specified service + """ + body = request.json + executor = DryExecutor( + ) if 'executor' in body and body['executor'] == 'dry' else None + + inputs = body['inputs'] if 'inputs' in body else None + task_max_attempts = (body['task_max_attempts'] + if 'task_max_attempts' in body else 30) + task_retry_interval = (body['task_retry_interval'] + if 'task_retry_interval' in body else 30) + + runner = WorkflowRunner(model_storage, resource_storage, plugin_manager, + service_id=service_id, + workflow_name=workflow_name, + inputs=inputs, + executor=executor, + task_max_attempts=task_max_attempts, + task_retry_interval=task_retry_interval) + + service = model_storage.service.get(service_id) + tname = '{}_{}_{}'.format(service.name, workflow_name, runner.execution_id) + thread = threading.ExceptionThread(target=runner.execute, + name=tname) + thread.start() + execution_state[str(runner.execution_id)] = [runner, thread] + return jsonify({"id": runner.execution_id}), 202 + + +# resume execution +@app.route(route_base + "executions/<execution_id>", methods=['POST']) +@auto.doc() +@aria.pass_model_storage +@aria.pass_resource_storage +@aria.pass_plugin_manager +@aria.pass_logger +def resume_execution( + execution_id, + model_storage, + resource_storage, + plugin_manager, + logger): + """ + Resume the specified execution + """ + body = request.json + execution = model_storage.execution.get(execution_id) + if execution.status != execution.status.CANCELLED: + return "cancelled execution cannot be resumed", 400 + executor = DryExecutor( + ) if 'executor' in body and body['executor'] == 'dry' else None + retry_failed_tasks = body['retry_failed_tasks'] \ + if 'retry_failed_tasks' in body else False + + runner = WorkflowRunner(model_storage, resource_storage, plugin_manager, + execution_id=execution_id, + executor=executor, + retry_failed_tasks=retry_failed_tasks) + + tname = '{}_{}_{}'.format(execution.service.name, execution.workflow_name, + runner.execution_id) + thread = threading.ExceptionThread(target=runner.execute, + name=tname, + daemon=True) + thread.start() + execution_state[str(runner.execution_id)] = [runner, thread] + return jsonify({"id": runner.execution_id}), 202 + + +# cancel execution +@app.route(route_base + "executions/<execution_id>", methods=['DELETE']) +@auto.doc() +@aria.pass_model_storage +@aria.pass_logger +def cancel_execution(execution_id, model_storage, logger): + """ + Cancel the specified execution + """ + logger.info("cancelling execution {}".format(execution_id)) + body = request.json + + try: + execution = model_storage.execution.get(execution_id) + except BaseException: + return "Execution {} not found".format(execution_id), 404 + + if (not execution.status == execution.PENDING and + not execution.status == execution.STARTED): + return "Cancel ignored. Execution state = {}".format( + execution.status), 200 + + if execution_id not in execution_state: + logger.error("id {} not found".format(execution_id)) + return "execution id {} not found".format(execution_id), 400 + + einfo = execution_state[execution_id] + runner = einfo[0] + thread = einfo[1] + timeout = 30 # seconds to wait for thread death + if 'timeout' in body: + timeout = body['timeout'] + + runner.cancel() + while thread.is_alive() and timeout > 0: + thread.join(1) + if not thread.is_alive(): + return "execution {} cancelled".format(execution_id), 200 + timeout = timeout - 1 + if timeout == 0: + return "execution cancel timed out", 500 + return "execution {} cancelled".format(execution_id), 200 + + +if __name__ == "__main__": + app.run(host='0.0.0.0', port=5000, threaded=True) diff --git a/aria/aria-rest-server/src/main/python/aria-rest/aria_rest/templates/index.html b/aria/aria-rest-server/src/main/python/aria-rest/aria_rest/templates/index.html new file mode 100644 index 0000000000..e9a5e2ea00 --- /dev/null +++ b/aria/aria-rest-server/src/main/python/aria-rest/aria_rest/templates/index.html @@ -0,0 +1,23 @@ +<!-- +# +# ============LICENSE_START=================================================== +# Copyright (c) 2017 Cloudify.co. All rights reserved. +# =================================================================== +# Licensed under the Apache License, Version 2.0 (the "License"); you may not +# use this file except in compliance with the License. You may obtain a copy +# of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations under +# the License. +# ============LICENSE_END==================================================== +# +--> + +<body> +<h1>Not Implemented</h1> +</body> diff --git a/aria/aria-rest-server/src/main/python/aria-rest/aria_rest/util.py b/aria/aria-rest-server/src/main/python/aria-rest/aria_rest/util.py new file mode 100644 index 0000000000..2310d7eddf --- /dev/null +++ b/aria/aria-rest-server/src/main/python/aria-rest/aria_rest/util.py @@ -0,0 +1,48 @@ +# +# ============LICENSE_START=================================================== +# Copyright (c) 2017 Cloudify.co. All rights reserved. +# =================================================================== +# Licensed under the Apache License, Version 2.0 (the "License"); you may not +# use this file except in compliance with the License. You may obtain a copy +# of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations under +# the License. +# ============LICENSE_END==================================================== +# + + +import threading + +def make_template_name( user, template_name ): + return "{}.{}".format(user,template_name) + + +class SafeDict(dict): + def __init__(self, *args): + self._lockobj = threading.Lock() + dict.__init__(self, args) + + def __getitem__(self, key): + try: + self._lockobj.acquire() + val = dict.__getitem__(self, key) + except: + raise + finally: + self._lockobj.release() + + def __setitem__(self, key, value): + try: + self._lockobj.acquire() + dict.__setitem__(self, key, value) + except: + raise + finally: + self._lockobj.release() + diff --git a/aria/aria-rest-server/src/main/python/aria-rest/rest.py b/aria/aria-rest-server/src/main/python/aria-rest/rest.py new file mode 100644 index 0000000000..6669ac39ee --- /dev/null +++ b/aria/aria-rest-server/src/main/python/aria-rest/rest.py @@ -0,0 +1,57 @@ +# +# ============LICENSE_START=================================================== +# Copyright (c) 2017 Cloudify.co. All rights reserved. +# =================================================================== +# Licensed under the Apache License, Version 2.0 (the "License"); you may not +# use this file except in compliance with the License. You may obtain a copy +# of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations under +# the License. +# ============LICENSE_END==================================================== +# + +from flask import Flask, render_template +from aria.exceptions import AriaException + +version_id = "0.1" +route_base = "/api/" + version_id + "/" +app = Flask("onap-aria-rest") + +@app.route("/") +def index(): + return render_template('index.html') + + +@app.route(route_base + "templates/", methods = ['GET']) +def list_templates(): + +@app.route(route_base + "templates/<template_id>", methods = ['POST']) +def install_template( template_id ): + + # GET CSAR FROM SDC + + # DEPLOY CSAR + + # UPDATE A&AI? + + return "template {} instantiated" + +@app.route(route_base + "templates/<template_id>", methods = ['DELETE']) +def delete_template( template_id ): + + # RUN UNINSTALL + + # DELETE TEMPLATE + + # UPDATE A&AI? + + return "template {} deleted" + +if __name__ == "__main__": + app.run() diff --git a/aria/aria-rest-server/src/main/python/aria-rest/setup.py b/aria/aria-rest-server/src/main/python/aria-rest/setup.py new file mode 100644 index 0000000000..8c80a9004d --- /dev/null +++ b/aria/aria-rest-server/src/main/python/aria-rest/setup.py @@ -0,0 +1,41 @@ +# +# ============LICENSE_START=================================================== +# Copyright (c) 2017 Cloudify.co. All rights reserved. +# =================================================================== +# Licensed under the Apache License, Version 2.0 (the "License"); you may not +# use this file except in compliance with the License. You may obtain a copy +# of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations under +# the License. +# ============LICENSE_END==================================================== +# + + +from setuptools import setup + +setup( + zip_safe=True, + name='aria-rest', + version='0.1', + author='dewayne', + author_email='dewayne@gigaspaces.com', + packages=[ + 'aria_rest' + ], + entry_points = { + 'console_scripts' : ['aria-rest=aria_rest.rest:main'] + }, + license='LICENSE', + description='Aria REST API for ONAP', + install_requires=[ + 'Flask==0.12.2', + 'flask-autodoc==0.1.2', + 'apache-ariatosca==0.1.0' + ] +) diff --git a/aria/aria-rest-server/src/main/python/aria-rest/templates/index.html b/aria/aria-rest-server/src/main/python/aria-rest/templates/index.html new file mode 100644 index 0000000000..6d74cfc0f8 --- /dev/null +++ b/aria/aria-rest-server/src/main/python/aria-rest/templates/index.html @@ -0,0 +1,3 @@ +<body> +<h1>Not Implemented</h1> +</body> diff --git a/aria/multivim-plugin/.gitignore b/aria/multivim-plugin/.gitignore new file mode 100644 index 0000000000..ce50313b79 --- /dev/null +++ b/aria/multivim-plugin/.gitignore @@ -0,0 +1,63 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] + +# C extensions +*.so + +# Distribution / packaging +.Python +env/ +bin/ +build/ +develop-eggs/ +dist/ +eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +*.egg-info/ +.installed.cfg +*.egg + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.coverage +.cache +nosetests.xml +coverage.xml + +# Translations +*.mo + +# Mr Developer +.mr.developer.cfg +.project +.pydevproject + +# Rope +.ropeproject + +# Django stuff: +*.log +*.pot + +# Sphinx documentation +docs/_build/ + +*.iml + +*COMMIT_MSG + +# QuickBuild +.qbcache/ + +.idea/ + diff --git a/aria/multivim-plugin/.travis.yml b/aria/multivim-plugin/.travis.yml new file mode 100644 index 0000000000..8653f2f76a --- /dev/null +++ b/aria/multivim-plugin/.travis.yml @@ -0,0 +1,18 @@ +language: python +sudo: false +python: + - "2.7" +env: + # - TOX_ENV=docs + - TOX_ENV=flake8 + - TOX_ENV=py27 +# TODO: add coveralls support +install: + - pip install tox + # - pip install coveralls +script: + - tox -e $TOX_ENV +# after_success: +# coveralls +notifications: + flowdock: 1f4ec6febcf1ac9b35ae6c1f0049471f diff --git a/aria/multivim-plugin/CHANGELOG.txt b/aria/multivim-plugin/CHANGELOG.txt new file mode 100644 index 0000000000..da9875a5bc --- /dev/null +++ b/aria/multivim-plugin/CHANGELOG.txt @@ -0,0 +1,30 @@ +2.2.0
: + - Fix duplicated mapping key in plugin.yaml. + - Create Server with security groups from instance relationships. This prevents a window of time when a server can + be unsecured. (OPENSTACK-38) + - Fix floating IP detach issue. (OPENSTACK-12) + - Allow openstack_config as runtime property. (OPENSTACK-112) + - Fix key creation when folders don't exist. (OPENSTACK-7) +2.0.1: + - Don't overwrite server['image'] when server is booted from volume + - Fix loading auth_url from environment (OPENSTACK-101) + - Raise an error if server is not attached to a network. Previously an IndexError would be raised. + - Make sure security_group is removed if a later step (rule creation) fails (OPENSTACK-106) + - Fix attempt to access `volume.display_name` (is now .name) (OPENSTACK-108) + - Correctly handle nova_url and neutron_url in openstack_configuration (these are deprecated) (OPENSTACK-109) +2.0: + - Don't require a Server image to be specified if a boot_volume is attached + - Add support for keystone auth v3. auth_url setting must now include version + - Upgraded openstack library dependencies + - Use availability_zone from connected boot_volume if Server doesn't specify + - Embed full docs in plugin repo. Now using sphinxify sphinx extension +1.5: + - Create project, assign existing users with roles and customize quotas. + - Create image from file (local workflow only) or url. + - Add conditional creation to all resources. Create a resource only if it doesn't already exist. Previously, could + either use an existing resource, or create it. + - Boot server from volume. Support boot from block storage and not only from image like in previous versions. + - Fix connect port to security group race-condition. + - Get mac address from port after creation. + - Raise error also when external network is missing in floating ip creation. Previously, an error was raised only + when floating network id or name was missing. diff --git a/aria/multivim-plugin/LICENSE b/aria/multivim-plugin/LICENSE new file mode 100644 index 0000000000..e06d208186 --- /dev/null +++ b/aria/multivim-plugin/LICENSE @@ -0,0 +1,202 @@ +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/aria/multivim-plugin/Makefile b/aria/multivim-plugin/Makefile new file mode 100644 index 0000000000..cfb7416fa7 --- /dev/null +++ b/aria/multivim-plugin/Makefile @@ -0,0 +1,39 @@ +.PHONY: release install files test docs prepare publish + +all: + @echo "make release - prepares a release and publishes it" + @echo "make dev - prepares a development environment" + @echo "make install - install on local system" + @echo "make files - update changelog and todo files" + @echo "make test - run tox" + @echo "make docs - build docs" + @echo "prepare - prepare module for release (CURRENTLY IRRELEVANT)" + @echo "make publish - upload to pypi" + +release: test docs publish + +dev: + pip install -rdev-requirements.txt + python setup.py develop + +install: + python setup.py install + +files: + grep '# TODO' -rn * --exclude-dir=docs --exclude-dir=build --exclude=TODO.md | sed 's/: \+#/: # /g;s/:#/: # /g' | sed -e 's/^/- /' | grep -v Makefile > TODO.md + git log --oneline --decorate --color > CHANGELOG + +test: + pip install tox + tox + +docs: + pip install sphinx sphinx-rtd-theme + cd docs && make html + pandoc README.md -f markdown -t rst -s -o README.rst + +prepare: + python scripts/make-release.py + +publish: + python setup.py sdist upload
\ No newline at end of file diff --git a/aria/multivim-plugin/README.md b/aria/multivim-plugin/README.md new file mode 100644 index 0000000000..3b5b8df721 --- /dev/null +++ b/aria/multivim-plugin/README.md @@ -0,0 +1,11 @@ +cloudify-openstack-plugin +========================= + +[![Circle CI](https://circleci.com/gh/cloudify-cosmo/cloudify-openstack-plugin/tree/master.svg?style=shield)](https://circleci.com/gh/cloudify-cosmo/cloudify-openstack-plugin/tree/master) +[![Build Status](https://travis-ci.org/cloudify-cosmo/cloudify-openstack-plugin.svg?branch=master)](https://travis-ci.org/cloudify-cosmo/cloudify-openstack-plugin) + +Cloudify OpenStack Plugin + +## Usage + +See [Openstack Plugin](http://docs.getcloudify.org/latest/plugins/openstack/) diff --git a/aria/multivim-plugin/README.rst b/aria/multivim-plugin/README.rst new file mode 100644 index 0000000000..eaa0de6eaf --- /dev/null +++ b/aria/multivim-plugin/README.rst @@ -0,0 +1,4 @@ +cloudify-openstack-plugin +========================= + +Cloudify OpenStack Plugin diff --git a/aria/multivim-plugin/cinder_plugin/__init__.py b/aria/multivim-plugin/cinder_plugin/__init__.py new file mode 100644 index 0000000000..a9dfcc4473 --- /dev/null +++ b/aria/multivim-plugin/cinder_plugin/__init__.py @@ -0,0 +1,14 @@ +######### +# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# * See the License for the specific language governing permissions and +# * limitations under the License. diff --git a/aria/multivim-plugin/cinder_plugin/tests/__init__.py b/aria/multivim-plugin/cinder_plugin/tests/__init__.py new file mode 100644 index 0000000000..a9dfcc4473 --- /dev/null +++ b/aria/multivim-plugin/cinder_plugin/tests/__init__.py @@ -0,0 +1,14 @@ +######### +# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# * See the License for the specific language governing permissions and +# * limitations under the License. diff --git a/aria/multivim-plugin/cinder_plugin/tests/test_volume.py b/aria/multivim-plugin/cinder_plugin/tests/test_volume.py new file mode 100644 index 0000000000..0ee85bc334 --- /dev/null +++ b/aria/multivim-plugin/cinder_plugin/tests/test_volume.py @@ -0,0 +1,342 @@ +######### +# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# * See the License for the specific language governing permissions and +# * limitations under the License. + +import mock +import unittest + +from cloudify import mocks as cfy_mocks +from cloudify import exceptions as cfy_exc +from cloudify.state import current_ctx +from cinder_plugin import volume +from nova_plugin import server +from openstack_plugin_common import (OPENSTACK_ID_PROPERTY, + OPENSTACK_TYPE_PROPERTY, + OPENSTACK_NAME_PROPERTY) + + +class TestCinderVolume(unittest.TestCase): + + def _mock(self, **kwargs): + ctx = cfy_mocks.MockCloudifyContext(**kwargs) + current_ctx.set(ctx) + return ctx + + def tearDown(self): + current_ctx.clear() + + def test_create_new(self): + volume_name = 'fake volume name' + volume_description = 'fake volume' + volume_id = '00000000-0000-0000-0000-000000000000' + volume_size = 10 + + volume_properties = { + 'volume': { + 'size': volume_size, + 'description': volume_description + }, + 'use_external_resource': False, + 'device_name': '/dev/fake', + 'resource_id': volume_name, + } + + creating_volume_m = mock.Mock() + creating_volume_m.id = volume_id + creating_volume_m.status = volume.VOLUME_STATUS_CREATING + available_volume_m = mock.Mock() + available_volume_m.id = volume_id + available_volume_m.status = volume.VOLUME_STATUS_AVAILABLE + cinder_client_m = mock.Mock() + cinder_client_m.volumes = mock.Mock() + cinder_client_m.volumes.create = mock.Mock( + return_value=creating_volume_m) + cinder_client_m.volumes.get = mock.Mock( + return_value=available_volume_m) + ctx_m = self._mock(node_id='a', properties=volume_properties) + + volume.create(cinder_client=cinder_client_m, args={}, ctx=ctx_m, + status_attempts=10, status_timeout=2) + + cinder_client_m.volumes.create.assert_called_once_with( + size=volume_size, + name=volume_name, + description=volume_description) + cinder_client_m.volumes.get.assert_called_once_with(volume_id) + self.assertEqual( + volume_id, + ctx_m.instance.runtime_properties[OPENSTACK_ID_PROPERTY]) + self.assertEqual( + volume.VOLUME_OPENSTACK_TYPE, + ctx_m.instance.runtime_properties[OPENSTACK_TYPE_PROPERTY]) + + def test_create_use_existing(self): + volume_id = '00000000-0000-0000-0000-000000000000' + + volume_properties = { + 'use_external_resource': True, + 'device_name': '/dev/fake', + 'resource_id': volume_id, + } + existing_volume_m = mock.Mock() + existing_volume_m.id = volume_id + existing_volume_m.status = volume.VOLUME_STATUS_AVAILABLE + cinder_client_m = mock.Mock() + cinder_client_m.volumes = mock.Mock() + cinder_client_m.volumes.create = mock.Mock() + cinder_client_m.cosmo_get_if_exists = mock.Mock( + return_value=existing_volume_m) + cinder_client_m.get_id_from_resource = mock.Mock( + return_value=volume_id) + ctx_m = self._mock(node_id='a', properties=volume_properties) + + volume.create(cinder_client=cinder_client_m, args={}, ctx=ctx_m, + status_attempts=10, status_timeout=2) + + self.assertFalse(cinder_client_m.volumes.create.called) + self.assertEqual( + volume_id, + ctx_m.instance.runtime_properties[OPENSTACK_ID_PROPERTY]) + self.assertEqual( + volume.VOLUME_OPENSTACK_TYPE, + ctx_m.instance.runtime_properties[OPENSTACK_TYPE_PROPERTY]) + + def test_delete(self): + volume_id = '00000000-0000-0000-0000-000000000000' + volume_name = 'test-volume' + + volume_properties = { + 'use_external_resource': False, + } + + cinder_client_m = mock.Mock() + cinder_client_m.cosmo_delete_resource = mock.Mock() + + ctx_m = self._mock(node_id='a', properties=volume_properties) + ctx_m.instance.runtime_properties[OPENSTACK_ID_PROPERTY] = volume_id + ctx_m.instance.runtime_properties[OPENSTACK_TYPE_PROPERTY] = \ + volume.VOLUME_OPENSTACK_TYPE + ctx_m.instance.runtime_properties[OPENSTACK_NAME_PROPERTY] = \ + volume_name + + volume.delete(cinder_client=cinder_client_m, ctx=ctx_m) + + cinder_client_m.cosmo_delete_resource.assert_called_once_with( + volume.VOLUME_OPENSTACK_TYPE, volume_id) + self.assertTrue( + OPENSTACK_ID_PROPERTY not in ctx_m.instance.runtime_properties) + self.assertTrue(OPENSTACK_TYPE_PROPERTY + not in ctx_m.instance.runtime_properties) + self.assertTrue(OPENSTACK_NAME_PROPERTY + not in ctx_m.instance.runtime_properties) + + @mock.patch('openstack_plugin_common.NovaClientWithSugar') + @mock.patch('openstack_plugin_common.CinderClientWithSugar') + @mock.patch.object(volume, 'wait_until_status', return_value=(None, True)) + def test_attach(self, wait_until_status_m, cinder_m, nova_m): + volume_id = '00000000-0000-0000-0000-000000000000' + server_id = '11111111-1111-1111-1111-111111111111' + device_name = '/dev/fake' + + volume_ctx = cfy_mocks.MockContext({ + 'node': cfy_mocks.MockContext({ + 'properties': {volume.DEVICE_NAME_PROPERTY: device_name} + }), + 'instance': cfy_mocks.MockContext({ + 'runtime_properties': { + OPENSTACK_ID_PROPERTY: volume_id, + } + }) + }) + server_ctx = cfy_mocks.MockContext({ + 'node': cfy_mocks.MockContext({ + 'properties': {} + }), + 'instance': cfy_mocks.MockContext({ + 'runtime_properties': { + server.OPENSTACK_ID_PROPERTY: server_id + } + }) + }) + + ctx_m = self._mock(node_id='a', + target=server_ctx, + source=volume_ctx) + + nova_instance = nova_m.return_value + cinder_instance = cinder_m.return_value + + server.attach_volume(ctx=ctx_m, status_attempts=10, + status_timeout=2) + + nova_instance.volumes.create_server_volume.assert_called_once_with( + server_id, volume_id, device_name) + wait_until_status_m.assert_called_once_with( + cinder_client=cinder_instance, + volume_id=volume_id, + status=volume.VOLUME_STATUS_IN_USE, + num_tries=10, + timeout=2, + ) + + @mock.patch('openstack_plugin_common.NovaClientWithSugar') + @mock.patch('openstack_plugin_common.CinderClientWithSugar') + def _test_cleanup__after_attach_fails( + self, expected_err_cls, expect_cleanup, + wait_until_status_m, cinder_m, nova_m): + volume_id = '00000000-0000-0000-0000-000000000000' + server_id = '11111111-1111-1111-1111-111111111111' + attachment_id = '22222222-2222-2222-2222-222222222222' + device_name = '/dev/fake' + + attachment = {'id': attachment_id, + 'server_id': server_id, + 'volume_id': volume_id} + + volume_ctx = cfy_mocks.MockContext({ + 'node': cfy_mocks.MockContext({ + 'properties': {volume.DEVICE_NAME_PROPERTY: device_name} + }), + 'instance': cfy_mocks.MockContext({ + 'runtime_properties': { + OPENSTACK_ID_PROPERTY: volume_id, + } + }) + }) + server_ctx = cfy_mocks.MockContext({ + 'node': cfy_mocks.MockContext({ + 'properties': {} + }), + 'instance': cfy_mocks.MockContext({ + 'runtime_properties': { + server.OPENSTACK_ID_PROPERTY: server_id + } + }) + }) + + ctx_m = self._mock(node_id='a', + target=server_ctx, + source=volume_ctx) + + attached_volume = mock.Mock(id=volume_id, + status=volume.VOLUME_STATUS_IN_USE, + attachments=[attachment]) + nova_instance = nova_m.return_value + cinder_instance = cinder_m.return_value + cinder_instance.volumes.get.return_value = attached_volume + + with self.assertRaises(expected_err_cls): + server.attach_volume(ctx=ctx_m, status_attempts=10, + status_timeout=2) + + nova_instance.volumes.create_server_volume.assert_called_once_with( + server_id, volume_id, device_name) + volume.wait_until_status.assert_any_call( + cinder_client=cinder_instance, + volume_id=volume_id, + status=volume.VOLUME_STATUS_IN_USE, + num_tries=10, + timeout=2, + ) + if expect_cleanup: + nova_instance.volumes.delete_server_volume.assert_called_once_with( + server_id, attachment_id) + self.assertEqual(2, volume.wait_until_status.call_count) + volume.wait_until_status.assert_called_with( + cinder_client=cinder_instance, + volume_id=volume_id, + status=volume.VOLUME_STATUS_AVAILABLE, + num_tries=10, + timeout=2) + + def test_cleanup_after_waituntilstatus_throws_recoverable_error(self): + err = cfy_exc.RecoverableError('Some recoverable error') + with mock.patch.object(volume, 'wait_until_status', + side_effect=[err, (None, True)]) as wait_mock: + self._test_cleanup__after_attach_fails(type(err), True, wait_mock) + + def test_cleanup_after_waituntilstatus_throws_any_not_nonrecov_error(self): + class ArbitraryNonRecoverableException(Exception): + pass + err = ArbitraryNonRecoverableException('An exception') + with mock.patch.object(volume, 'wait_until_status', + side_effect=[err, (None, True)]) as wait_mock: + self._test_cleanup__after_attach_fails(type(err), True, wait_mock) + + def test_cleanup_after_waituntilstatus_lets_nonrecov_errors_pass(self): + err = cfy_exc.NonRecoverableError('Some non recoverable error') + with mock.patch.object(volume, 'wait_until_status', + side_effect=[err, (None, True)]) as wait_mock: + self._test_cleanup__after_attach_fails(type(err), False, wait_mock) + + @mock.patch.object(volume, 'wait_until_status', return_value=(None, False)) + def test_cleanup_after_waituntilstatus_times_out(self, wait_mock): + self._test_cleanup__after_attach_fails(cfy_exc.RecoverableError, True, + wait_mock) + + @mock.patch('openstack_plugin_common.NovaClientWithSugar') + @mock.patch('openstack_plugin_common.CinderClientWithSugar') + @mock.patch.object(volume, 'wait_until_status', return_value=(None, True)) + def test_detach(self, wait_until_status_m, cinder_m, nova_m): + volume_id = '00000000-0000-0000-0000-000000000000' + server_id = '11111111-1111-1111-1111-111111111111' + attachment_id = '22222222-2222-2222-2222-222222222222' + + attachment = {'id': attachment_id, + 'server_id': server_id, + 'volume_id': volume_id} + + volume_ctx = cfy_mocks.MockContext({ + 'node': cfy_mocks.MockContext({ + 'properties': {} + }), + 'instance': cfy_mocks.MockContext({ + 'runtime_properties': { + OPENSTACK_ID_PROPERTY: volume_id, + } + }) + }) + server_ctx = cfy_mocks.MockContext({ + 'node': cfy_mocks.MockContext({ + 'properties': {} + }), + 'instance': cfy_mocks.MockContext({ + 'runtime_properties': { + server.OPENSTACK_ID_PROPERTY: server_id + } + }) + }) + + ctx_m = self._mock(node_id='a', + target=server_ctx, + source=volume_ctx) + + attached_volume = mock.Mock(id=volume_id, + status=volume.VOLUME_STATUS_IN_USE, + attachments=[attachment]) + nova_instance = nova_m.return_value + cinder_instance = cinder_m.return_value + cinder_instance.volumes.get.return_value = attached_volume + + server.detach_volume(ctx=ctx_m, status_attempts=10, status_timeout=2) + + nova_instance.volumes.delete_server_volume.assert_called_once_with( + server_id, attachment_id) + volume.wait_until_status.assert_called_once_with( + cinder_client=cinder_instance, + volume_id=volume_id, + status=volume.VOLUME_STATUS_AVAILABLE, + num_tries=10, + timeout=2, + ) diff --git a/aria/multivim-plugin/cinder_plugin/volume.py b/aria/multivim-plugin/cinder_plugin/volume.py new file mode 100644 index 0000000000..168681b943 --- /dev/null +++ b/aria/multivim-plugin/cinder_plugin/volume.py @@ -0,0 +1,125 @@ +######### +# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# * See the License for the specific language governing permissions and +# * limitations under the License. + +import time + +from cloudify import ctx +from cloudify.decorators import operation +from cloudify import exceptions as cfy_exc + +from openstack_plugin_common import (delete_resource_and_runtime_properties, + with_cinder_client, + get_resource_id, + transform_resource_name, + use_external_resource, + validate_resource, + COMMON_RUNTIME_PROPERTIES_KEYS, + OPENSTACK_AZ_PROPERTY, + OPENSTACK_ID_PROPERTY, + OPENSTACK_TYPE_PROPERTY, + OPENSTACK_NAME_PROPERTY) +from glance_plugin.image import handle_image_from_relationship + +VOLUME_STATUS_CREATING = 'creating' +VOLUME_STATUS_DELETING = 'deleting' +VOLUME_STATUS_AVAILABLE = 'available' +VOLUME_STATUS_IN_USE = 'in-use' +VOLUME_STATUS_ERROR = 'error' +VOLUME_STATUS_ERROR_DELETING = 'error_deleting' +VOLUME_ERROR_STATUSES = (VOLUME_STATUS_ERROR, VOLUME_STATUS_ERROR_DELETING) + +# Note: The 'device_name' property should actually be a property of the +# relationship between a server and a volume; It'll move to that +# relationship type once relationship properties are better supported. +DEVICE_NAME_PROPERTY = 'device_name' + +VOLUME_OPENSTACK_TYPE = 'volume' + +RUNTIME_PROPERTIES_KEYS = COMMON_RUNTIME_PROPERTIES_KEYS + + +@operation +@with_cinder_client +def create(cinder_client, status_attempts, status_timeout, args, **kwargs): + + if use_external_resource(ctx, cinder_client, VOLUME_OPENSTACK_TYPE, + 'name'): + return + + name = get_resource_id(ctx, VOLUME_OPENSTACK_TYPE) + volume_dict = {'name': name} + volume_dict.update(ctx.node.properties['volume'], **args) + handle_image_from_relationship(volume_dict, 'imageRef', ctx) + volume_dict['name'] = transform_resource_name( + ctx, volume_dict['name']) + + v = cinder_client.volumes.create(**volume_dict) + + ctx.instance.runtime_properties[OPENSTACK_ID_PROPERTY] = v.id + ctx.instance.runtime_properties[OPENSTACK_TYPE_PROPERTY] = \ + VOLUME_OPENSTACK_TYPE + ctx.instance.runtime_properties[OPENSTACK_NAME_PROPERTY] = \ + volume_dict['name'] + wait_until_status(cinder_client=cinder_client, + volume_id=v.id, + status=VOLUME_STATUS_AVAILABLE, + num_tries=status_attempts, + timeout=status_timeout, + ) + ctx.instance.runtime_properties[OPENSTACK_AZ_PROPERTY] = \ + v.availability_zone + + +@operation +@with_cinder_client +def delete(cinder_client, **kwargs): + delete_resource_and_runtime_properties(ctx, cinder_client, + RUNTIME_PROPERTIES_KEYS) + + +@with_cinder_client +def wait_until_status(cinder_client, volume_id, status, num_tries, + timeout): + for _ in range(num_tries): + volume = cinder_client.volumes.get(volume_id) + + if volume.status in VOLUME_ERROR_STATUSES: + raise cfy_exc.NonRecoverableError( + "Volume {0} is in error state".format(volume_id)) + + if volume.status == status: + return volume, True + time.sleep(timeout) + + ctx.logger.warning("Volume {0} current state: '{1}', " + "expected state: '{2}'".format(volume_id, + volume.status, + status)) + return volume, False + + +@with_cinder_client +def get_attachment(cinder_client, volume_id, server_id): + volume = cinder_client.volumes.get(volume_id) + for attachment in volume.attachments: + if attachment['server_id'] == server_id: + return attachment + + +@operation +@with_cinder_client +def creation_validation(cinder_client, **kwargs): + validate_resource(ctx, cinder_client, VOLUME_OPENSTACK_TYPE, + 'name') diff --git a/aria/multivim-plugin/circle.yml b/aria/multivim-plugin/circle.yml new file mode 100644 index 0000000000..2a2c66e88c --- /dev/null +++ b/aria/multivim-plugin/circle.yml @@ -0,0 +1,27 @@ +machine: + python: + version: 2.7.9 + +checkout: + post: + - > + if [ -n "$CI_PULL_REQUEST" ]; then + PR_ID=${CI_PULL_REQUEST##*/} + git fetch origin +refs/pull/$PR_ID/merge: + git checkout -qf FETCH_HEAD + fi + +dependencies: + override: + - pip install --upgrade tox virtualenv + +test: + override: + # - tox -e docs + - tox -e flake8 + - tox -e py27 + +# Docs artifacts +general: + artifacts: + - .tox/docs/tmp/html diff --git a/aria/multivim-plugin/dev-requirements.txt b/aria/multivim-plugin/dev-requirements.txt new file mode 100644 index 0000000000..fcb6a806cd --- /dev/null +++ b/aria/multivim-plugin/dev-requirements.txt @@ -0,0 +1,3 @@ +https://github.com/cloudify-cosmo/cloudify-dsl-parser/archive/3.4.1.zip +https://github.com/cloudify-cosmo/cloudify-rest-client/archive/3.4.1.zip +https://github.com/cloudify-cosmo/cloudify-plugins-common/archive/3.4.1.zip diff --git a/aria/multivim-plugin/docs/Makefile b/aria/multivim-plugin/docs/Makefile new file mode 100644 index 0000000000..1bff5a1115 --- /dev/null +++ b/aria/multivim-plugin/docs/Makefile @@ -0,0 +1,177 @@ +# Makefile for Sphinx documentation +# + +# You can set these variables from the command line. +SPHINXOPTS = +SPHINXBUILD = sphinx-build +PAPER = +BUILDDIR = _build + +# User-friendly check for sphinx-build +ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1) +$(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/) +endif + +# Internal variables. +PAPEROPT_a4 = -D latex_paper_size=a4 +PAPEROPT_letter = -D latex_paper_size=letter +ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . +# the i18n builder cannot share the environment and doctrees with the others +I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . + +.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext + +help: + @echo "Please use \`make <target>' where <target> is one of" + @echo " html to make standalone HTML files" + @echo " dirhtml to make HTML files named index.html in directories" + @echo " singlehtml to make a single large HTML file" + @echo " pickle to make pickle files" + @echo " json to make JSON files" + @echo " htmlhelp to make HTML files and a HTML help project" + @echo " qthelp to make HTML files and a qthelp project" + @echo " devhelp to make HTML files and a Devhelp project" + @echo " epub to make an epub" + @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" + @echo " latexpdf to make LaTeX files and run them through pdflatex" + @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" + @echo " text to make text files" + @echo " man to make manual pages" + @echo " texinfo to make Texinfo files" + @echo " info to make Texinfo files and run them through makeinfo" + @echo " gettext to make PO message catalogs" + @echo " changes to make an overview of all changed/added/deprecated items" + @echo " xml to make Docutils-native XML files" + @echo " pseudoxml to make pseudoxml-XML files for display purposes" + @echo " linkcheck to check all external links for integrity" + @echo " doctest to run all doctests embedded in the documentation (if enabled)" + +clean: + rm -rf $(BUILDDIR)/* + +html: + $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html + @echo + @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." + +dirhtml: + $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml + @echo + @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." + +singlehtml: + $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml + @echo + @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." + +pickle: + $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle + @echo + @echo "Build finished; now you can process the pickle files." + +json: + $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json + @echo + @echo "Build finished; now you can process the JSON files." + +htmlhelp: + $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp + @echo + @echo "Build finished; now you can run HTML Help Workshop with the" \ + ".hhp project file in $(BUILDDIR)/htmlhelp." + +qthelp: + $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp + @echo + @echo "Build finished; now you can run "qcollectiongenerator" with the" \ + ".qhcp project file in $(BUILDDIR)/qthelp, like this:" + @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/cloudify-openstack-plugin.qhcp" + @echo "To view the help file:" + @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/cloudify-openstack-plugin.qhc" + +devhelp: + $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp + @echo + @echo "Build finished." + @echo "To view the help file:" + @echo "# mkdir -p $$HOME/.local/share/devhelp/cloudify-openstack-plugin" + @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/cloudify-cli" + @echo "# devhelp" + +epub: + $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub + @echo + @echo "Build finished. The epub file is in $(BUILDDIR)/epub." + +latex: + $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex + @echo + @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." + @echo "Run \`make' in that directory to run these through (pdf)latex" \ + "(use \`make latexpdf' here to do that automatically)." + +latexpdf: + $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex + @echo "Running LaTeX files through pdflatex..." + $(MAKE) -C $(BUILDDIR)/latex all-pdf + @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." + +latexpdfja: + $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex + @echo "Running LaTeX files through platex and dvipdfmx..." + $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja + @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." + +text: + $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text + @echo + @echo "Build finished. The text files are in $(BUILDDIR)/text." + +man: + $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man + @echo + @echo "Build finished. The manual pages are in $(BUILDDIR)/man." + +texinfo: + $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo + @echo + @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." + @echo "Run \`make' in that directory to run these through makeinfo" \ + "(use \`make info' here to do that automatically)." + +info: + $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo + @echo "Running Texinfo files through makeinfo..." + make -C $(BUILDDIR)/texinfo info + @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." + +gettext: + $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale + @echo + @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." + +changes: + $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes + @echo + @echo "The overview file is in $(BUILDDIR)/changes." + +linkcheck: + $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck + @echo + @echo "Link check complete; look for any errors in the above output " \ + "or in $(BUILDDIR)/linkcheck/output.txt." + +doctest: + $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest + @echo "Testing of doctests in the sources finished, look at the " \ + "results in $(BUILDDIR)/doctest/output.txt." + +xml: + $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml + @echo + @echo "Build finished. The XML files are in $(BUILDDIR)/xml." + +pseudoxml: + $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml + @echo + @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." diff --git a/aria/multivim-plugin/docs/_static/.gitkeep b/aria/multivim-plugin/docs/_static/.gitkeep new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/aria/multivim-plugin/docs/_static/.gitkeep diff --git a/aria/multivim-plugin/docs/changelog.rst b/aria/multivim-plugin/docs/changelog.rst new file mode 100644 index 0000000000..a5192b492c --- /dev/null +++ b/aria/multivim-plugin/docs/changelog.rst @@ -0,0 +1,7 @@ + + +Changelog +========= + +.. include:: ../CHANGELOG.txt + diff --git a/aria/multivim-plugin/docs/conf.py b/aria/multivim-plugin/docs/conf.py new file mode 100644 index 0000000000..3a829451d4 --- /dev/null +++ b/aria/multivim-plugin/docs/conf.py @@ -0,0 +1,301 @@ +# -*- coding: utf-8 -*- +# +# cloudify-openstack-plugin documentation build configuration file, created by +# sphinx-quickstart on Tue Nov 8 14:02:23 2016. +# +# This file is execfile()d with the current directory set to its +# containing dir. +# +# Note that not all possible configuration values are present in this +# autogenerated file. +# +# All configuration values have a default; values that are commented out +# serve to show the default. + +import sys +import os + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +#sys.path.insert(0, os.path.abspath('.')) + +# -- General configuration ------------------------------------------------ + +# If your documentation needs a minimal Sphinx version, state it here. +#needs_sphinx = '1.0' + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ + 'sphinx.ext.autodoc', + 'sphinx.ext.intersphinx', + 'sphinx.ext.todo', + 'sphinx.ext.viewcode', + 'sphinxify', +] + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# The suffix(es) of source filenames. +# You can specify multiple suffix as a list of string: +# source_suffix = ['.rst', '.md'] +source_suffix = '.rst' + +# The encoding of source files. +#source_encoding = 'utf-8-sig' + +# The master toctree document. +master_doc = 'index' + +# General information about the project. +project = u'cloudify-openstack-plugin' +copyright = u'2016-17 GigaSpaces Technologies Ltd.' +author = u'GigaSpaces Technologies Ltd.' + +# The version info for the project you're documenting, acts as replacement for +# |version| and |release|, also used in various other places throughout the +# built documents. +# +# The short X.Y version. +version = u'2.0' +# The full version, including alpha/beta/rc tags. +release = u'2.0' + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +# +# This is also used if you do content translation via gettext catalogs. +# Usually you set "language" from the command line for these cases. +language = None + +# There are two options for replacing |today|: either, you set today to some +# non-false value, then it is used: +#today = '' +# Else, today_fmt is used as the format for a strftime call. +#today_fmt = '%B %d, %Y' + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +# This patterns also effect to html_static_path and html_extra_path +exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] + +# The reST default role (used for this markup: `text`) to use for all +# documents. +#default_role = None + +# If true, '()' will be appended to :func: etc. cross-reference text. +#add_function_parentheses = True + +# If true, the current module name will be prepended to all description +# unit titles (such as .. function::). +#add_module_names = True + +# If true, sectionauthor and moduleauthor directives will be shown in the +# output. They are ignored by default. +#show_authors = False + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = 'sphinx' + +# A list of ignored prefixes for module index sorting. +#modindex_common_prefix = [] + +# If true, keep warnings as "system message" paragraphs in the built documents. +#keep_warnings = False + +# If true, `todo` and `todoList` produce output, else they produce nothing. +todo_include_todos = True + + +# -- Options for HTML output ---------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +# html_theme = 'sphinx-rtd-theme' + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +#html_theme_options = {} + +# Add any paths that contain custom themes here, relative to this directory. +#html_theme_path = [] + +# The name for this set of Sphinx documents. +# "<project> v<release> documentation" by default. +#html_title = u'cloudify-openstack-plugin v1.0a1' + +# A shorter title for the navigation bar. Default is the same as html_title. +#html_short_title = None + +# The name of an image file (relative to this directory) to place at the top +# of the sidebar. +#html_logo = None + +# The name of an image file (relative to this directory) to use as a favicon of +# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 +# pixels large. +#html_favicon = None + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ['_static'] + +# Add any extra paths that contain custom files (such as robots.txt or +# .htaccess) here, relative to this directory. These files are copied +# directly to the root of the documentation. +#html_extra_path = [] + +# If not None, a 'Last updated on:' timestamp is inserted at every page +# bottom, using the given strftime format. +# The empty string is equivalent to '%b %d, %Y'. +#html_last_updated_fmt = None + +# If true, SmartyPants will be used to convert quotes and dashes to +# typographically correct entities. +#html_use_smartypants = True + +# Custom sidebar templates, maps document names to template names. +#html_sidebars = {} + +# Additional templates that should be rendered to pages, maps page names to +# template names. +#html_additional_pages = {} + +# If false, no module index is generated. +#html_domain_indices = True + +# If false, no index is generated. +#html_use_index = True + +# If true, the index is split into individual pages for each letter. +#html_split_index = False + +# If true, links to the reST sources are added to the pages. +#html_show_sourcelink = True + +# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. +#html_show_sphinx = True + +# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. +#html_show_copyright = True + +# If true, an OpenSearch description file will be output, and all pages will +# contain a <link> tag referring to it. The value of this option must be the +# base URL from which the finished HTML is served. +#html_use_opensearch = '' + +# This is the file name suffix for HTML files (e.g. ".xhtml"). +#html_file_suffix = None + +# Language to be used for generating the HTML full-text search index. +# Sphinx supports the following languages: +# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' +# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh' +#html_search_language = 'en' + +# A dictionary with options for the search language support, empty by default. +# 'ja' uses this config value. +# 'zh' user can custom change `jieba` dictionary path. +#html_search_options = {'type': 'default'} + +# The name of a javascript file (relative to the configuration directory) that +# implements a search results scorer. If empty, the default will be used. +#html_search_scorer = 'scorer.js' + +# Output file base name for HTML help builder. +htmlhelp_basename = 'cloudify-openstack-plugindoc' + +# -- Options for LaTeX output --------------------------------------------- + +latex_elements = { +# The paper size ('letterpaper' or 'a4paper'). +#'papersize': 'letterpaper', + +# The font size ('10pt', '11pt' or '12pt'). +#'pointsize': '10pt', + +# Additional stuff for the LaTeX preamble. +#'preamble': '', + +# Latex figure (float) alignment +#'figure_align': 'htbp', +} + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, +# author, documentclass [howto, manual, or own class]). +latex_documents = [ + (master_doc, 'cloudify-openstack-plugin.tex', u'cloudify-openstack-plugin Documentation', + u'GigaSpaces Technologies Ltd.', 'manual'), +] + +# The name of an image file (relative to this directory) to place at the top of +# the title page. +#latex_logo = None + +# For "manual" documents, if this is true, then toplevel headings are parts, +# not chapters. +#latex_use_parts = False + +# If true, show page references after internal links. +#latex_show_pagerefs = False + +# If true, show URL addresses after external links. +#latex_show_urls = False + +# Documents to append as an appendix to all manuals. +#latex_appendices = [] + +# If false, no module index is generated. +#latex_domain_indices = True + + +# -- Options for manual page output --------------------------------------- + +# One entry per manual page. List of tuples +# (source start file, name, description, authors, manual section). +man_pages = [ + (master_doc, 'cloudify-openstack-plugin', u'cloudify-openstack-plugin Documentation', + [author], 1) +] + +# If true, show URL addresses after external links. +#man_show_urls = False + + +# -- Options for Texinfo output ------------------------------------------- + +# Grouping the document tree into Texinfo files. List of tuples +# (source start file, target name, title, author, +# dir menu entry, description, category) +texinfo_documents = [ + (master_doc, 'cloudify-openstack-plugin', u'cloudify-openstack-plugin Documentation', + author, 'cloudify-openstack-plugin', 'One line description of project.', + 'Miscellaneous'), +] + +# Documents to append as an appendix to all manuals. +#texinfo_appendices = [] + +# If false, no module index is generated. +#texinfo_domain_indices = True + +# How to display URL addresses: 'footnote', 'no', or 'inline'. +#texinfo_show_urls = 'footnote' + +# If true, do not generate a @detailmenu in the "Top" node's menu. +#texinfo_no_detailmenu = False + + +# Example configuration for intersphinx: refer to the Python standard library. +intersphinx_mapping = {'https://docs.python.org/2/': None} + + +# SCVersioning +scv_show_banner = True +scv_banner_greatest_tag = True diff --git a/aria/multivim-plugin/docs/configuration.rst b/aria/multivim-plugin/docs/configuration.rst new file mode 100644 index 0000000000..2dfb803885 --- /dev/null +++ b/aria/multivim-plugin/docs/configuration.rst @@ -0,0 +1,82 @@ +.. _config: + +Openstack Configuration +======================= + +The Openstack plugin requires credentials and endpoint setup information in order to authenticate and interact with Openstack. + +This information will be gathered by the plugin from the following sources, +each source possibly partially or completely overriding values gathered from previous ones: + +1. environment variables for each of the configuration parameters. +2. JSON file at ``~/openstack_config.json`` or at a path specified by the value of an environment variable named ``OPENSTACK_CONFIG_PATH`` +3. values specified in the ``openstack_config`` property for the node whose operation is currently getting executed (in the case of relationship operations, the ``openstack_config`` property of either the **source** or **target** nodes will be used if available, with the **source**'s one taking precedence). + +The structure of the JSON file in section (2), as well as of the ``openstack_config`` property in section (3), is as follows: + +.. highlight:: json + +:: + + { + "username": "", + "password": "", + "tenant_name": "", + "auth_url": "", + "region": "", + "nova_url": "", + "neutron_url": "", + "custom_configuration": "" + } + +* ``username`` username for authentication with Openstack Keystone service. +* ``password`` password for authentication with Openstack Keystone service. +* ``tenant_name`` name of the tenant to be used. +* ``auth_url`` URL of the Openstack Keystone service. + + .. attention:: New in 2.0 + + ``auth_url`` must include the full keystone auth URL, including the version number. + +* ``region`` Openstack region to be used. This may be optional when there's but a single region. +* ``nova_url`` (**DEPRECATED** - instead, use ``custom_configuration`` to pass ``endpoint_override`` directly to the Nova client) explicit URL for the Openstack Nova service. This may be used to override the URL for the Nova service that is listed in the Keystone service. +* ``neutron_url`` (**DEPRECATED** - instead, use ``custom_configuration`` to pass ``endpoint_url`` directly to the Neutron client) explicit URL for the Openstack Neutron service. This may be used to override the URL for the Neutron service that is listed in the Keystone service. +* ``custom_configuration`` a dictionary which allows overriding or directly passing custom configuration parameter to each of the Openstack clients, by using any of the relevant keys: ``keystone_client``, ``nova_client``, ``neutron_client`` or ``cinder_client``. + * Parameters passed directly to Openstack clients using the ``custom_configuration`` mechanism will override other definitions (e.g. any of the common Openstack configuration parameters listed above, such as ``username`` and ``tenant_name``) + * The following is an example for the usage of the ``custom_configuration`` section in a blueprint: + +.. highlight:: yaml + +:: + + custom_configuration: + nova_client: + endpoint_override: nova-endpoint-url + nova_specific_key_1: value_1 + nova_specific_key_2: value_2 + neutron_client: + endpoint_url: neutron-endpoint-url + keystone_client: + .. + cinder_client: + .. + + +The environment variables mentioned in (1) are the standard Openstack environment variables equivalent to the ones in the JSON file or ``openstack_config`` property. In their respective order, they are: + +* ``OS_USERNAME`` +* ``OS_PASSWORD`` +* ``OS_TENANT_NAME`` +* ``OS_AUTH_URL`` +* ``OS_REGION_NAME`` +* ``NOVACLIENT_BYPASS_URL`` +* ``OS_URL`` + +**Note**: ``custom_configuration`` doesn't have an equivalent standard Openstack environment variable. + + + The Openstack manager blueprint stores the Openstack configuration used for the bootstrap process in a JSON file as described in (2) at + ``~/openstack-config.json``. + Therefore, if they've been used for bootstrap, + the Openstack configuration for applications isn't required as the plugin will default to these same settings. + diff --git a/aria/multivim-plugin/docs/examples.rst b/aria/multivim-plugin/docs/examples.rst new file mode 100644 index 0000000000..4f36743494 --- /dev/null +++ b/aria/multivim-plugin/docs/examples.rst @@ -0,0 +1,338 @@ + +.. highlight:: yaml + +Examples +======== + +Example I +--------- + +This example will show how to use most of the types in this plugin, +as well as how to make the relationships between them. + +We'll see how to create a server with a security group set on it and a floating_ip associated to it, +on a subnet in a network. + + +The following is an excerpt from the blueprint's `blueprint`.`nodes` section:: + + my_floating_ip: + type: cloudify.openstack.nodes.FloatingIP + interfaces: + cloudify.interfaces.lifecycle: + create: + inputs: + args: + floating_network_name: Ext-Net + + + my_network: + type: cloudify.openstack.nodes.Network + properties: + resource_id: my_network_openstack_name + + + my_subnet: + type: cloudify.openstack.nodes.Subnet + properties: + resource_id: my_subnet_openstack_name + interfaces: + cloudify.interfaces.lifecycle: + create: + inputs: + args: + cidr: 1.2.3.0/24 + ip_version: 4 + cloudify.interfaces.validation: + creation: + inputs: + args: + cidr: 1.2.3.0/24 + ip_version: 4 + relationships: + - target: my_network + type: cloudify.relationships.contained_in + + + my_security_group: + type: cloudify.openstack.nodes.SecurityGroup + properties: + resource_id: my_security_group_openstack_name + rules: + - remote_ip_prefix: 0.0.0.0/0 + port: 8080 + + + my_server: + type: cloudify.openstack.nodes.Server + properties: + resource_id: my_server_openstack_name + interfaces: + cloudify.interfaces.lifecycle: + create: + inputs: + args: + image: 8672f4c6-e33d-46f5-b6d8-ebbeba12fa02 + flavor: 101 + cloudify.interfaces.validation: + creation: + inputs: + args: + image: 8672f4c6-e33d-46f5-b6d8-ebbeba12fa02 + flavor: 101 + relationships: + - target: my_network + type: cloudify.relationships.connected_to + - target: my_subnet + type: cloudify.relationships.depends_on + - target: my_floating_ip + type: cloudify.openstack.server_connected_to_floating_ip + - target: my_security_group + type: cloudify.openstack.server_connected_to_security_group + + +1. Creates a floating IP, whose node name is ``my_floating_ip``, and whose floating_network_name is ``Ext-Net`` (This value represents the name of the external network). +2. Creates a network, whose node name is ``my_network``, and whose name on Openstack is ``my_network_openstack_name``. +3. Creates a subnet, whose node name is ``my_subnet``, and whose name on Openstack is ``my_subnet_openstack_name``. The subnet's address range is defined to be 1.2.3.0 - 1.2.3.255 using the ``cidr`` parameter, and the subnet's IP version is set to version 4. The subnet will be set on the ``my_network_openstack_name`` network because of the relationship to the ``my_network`` node. +4. Creates a security_group, whose node name is ``my_security_group``, and whose name on Openstack is ``my_security_group_openstack_Name``. The security group is set with a single rule, which allows all traffic (since we use the address range ``0.0.0.0/0``) to port ``8080`` (default direction is *ingress*). +5. Creates a server, whose node name is ``my_server``, and whose name on openstack is ``my_server_openstack_name``. The server is set with an image and flavor IDs. The server is set with multiple relationships: + + - A relationship to the ``my_network`` node: Through this relationship, + the server will be automatically placed on the ``my_network_openstack_name`` network. + - A relationship to the ``my_subnet`` node: + This relationship is strictly for ensuring the order of creation is correct, + as the server requires the ``my_subnet_openstack_name`` subnet to exist before it can be created on it. + - A relationship to the ``my_floating_ip`` node: + This designated relationship type will take care of associating the server with the floating IP represented by the ``my_floating_ip`` node. + - A relationship with the ``my_security_group`` node: + This relationship will take care of setting the server up with the security group represented by the ``my_security_group`` node. + + +Example II +---------- + +This example will show how to use the ``router`` and ``port`` types, as well as some of the relationships that were missing from Example I. + +We'll see how to create a server connected to a port, where the port is set on a subnet in a network, and has a security group set on it. Finally, we'll see how this subnet connects to a router and from there to the external network. + + +The following is an excerpt from the blueprint's ``blueprint``.``node_templates`` section:: + + my_network: + type: cloudify.openstack.nodes.Network + properties: + resource_id: my_network_openstack_name + + + my_security_group: + type: cloudify.openstack.nodes.SecurityGroup + properties: + resource_id: my_security_group_openstack_name + rules: + - remote_ip_prefix: 0.0.0.0/0 + port: 8080 + + + my_subnet: + type: cloudify.openstack.nodes.Subnet + properties: + resource_id: my_subnet_openstack_name + interfaces: + cloudify.interfaces.lifecycle: + create: + inputs: + args: + cidr: 1.2.3.0/24 + ip_version: 4 + cloudify.interfaces.validation: + creation: + inputs: + args: + cidr: 1.2.3.0/24 + ip_version: 4 + relationships: + - target: my_network + type: cloudify.relationships.contained_in + - target: my_router + type: cloudify.openstack.subnet_connected_to_router + + + my_port: + type: cloudify.openstack.nodes.Port + properties: + resource_id: my_port_openstack_name + relationships: + - target: my_network + type: cloudify.relationships.contained_in + - target: my_subnet + type: cloudify.relationships.depends_on + - target: my_security_group + type: cloudify.openstack.port_connected_to_security_group + + + my_router: + type: cloudify.openstack.nodes.Router + properties: + resource_id: my_router_openstack_Name + + + my_server: + type: cloudify.openstack.nodes.Server + properties: + cloudify_agent: + user: ubuntu + interfaces: + cloudify.interfaces.lifecycle: + create: + inputs: + args: + image: 8672f4c6-e33d-46f5-b6d8-ebbeba12fa02 + flavor: 101 + cloudify.interfaces.validation: + creation: + inputs: + args: + image: 8672f4c6-e33d-46f5-b6d8-ebbeba12fa02 + flavor: 101 + relationships: + - target: my_port + type: cloudify.openstack.server_connected_to_port + + +1. Creates a network. See Example I for more information. + +2. Creates a security group. See Example I for more information. + +3. Creates a subnet. This is again similar to what we've done in Example I. The difference here is that the subnet has an extra relationship set towards a router. + +4. Creates a port, whose node name is ``my_port``, and whose name on Openstack is ``my_port_openstack_name``. The port is set with multiple relationships: + + - A relationship to the ``my_network`` node: Through this relationship, the port will be automatically placed on the ``my_network_openstack_name`` network. + - A relationship to the ``my_subnet`` node: This relationship is strictly for ensuring the order of creation is correct, as the port requires the ``my_subnet_openstack_name`` subnet to exist before it can be created on it. + - A relationship to the ``my_security_group`` node: This designated relationship type will take care of setting the ``my_security_group_openstack_name`` security group on the port. + +5. Creates a router, whose node name is ``my_router``, and whose name on Openstack is ``my_router_openstack_name``. The router will automatically have an interface in the external network. + +6. Creates a server, whose node name is ``my_server``, and whose name on Openstack is **the node's ID** (since no ``name`` parameter was supplied under the ``server`` property). The server is set with an image and flavor IDs. It also overrides the ``cloudify_agent`` property of its parent type to set the username that will be used to connect to the server for installing the Cloudify agent on it. Finally, it is set with a relationship to the ``my_port`` node: This designated relationship type will take care of connecting the server to ``my_port_openstack_name``. + + +Example III +----------- + +This example will show how to use the ``volume`` type, as well as ``volume_attached_to_server`` relationship. + +The following is an excerpt from the blueprint's ``blueprint``.``node_templates`` section:: + + my_server: + type: cloudify.openstack.nodes.Server + properties: + cloudify_agent: + user: ubuntu + interfaces: + cloudify.interfaces.lifecycle: + create: + inputs: + args: + image: 8672f4c6-e33d-46f5-b6d8-ebbeba12fa02 + flavor: 101 + cloudify.interfaces.validation: + creation: + inputs: + args: + image: 8672f4c6-e33d-46f5-b6d8-ebbeba12fa02 + flavor: 101 + + my_volume: + type: cloudify.openstack.nodes.Volume + properties: + resource_id: my_openstack_volume_name + device_name: /dev/vdb + interfaces: + cloudify.interfaces.lifecycle: + create: + inputs: + args: + size: 1 + relationships: + - target: my_server + type: cloudify.openstack.volume_attached_to_server + + +1. Creates a server, with name ``my_server``, and with name on Openstack **the node's ID** (since no ``name`` parameter was supplied under the ``server`` property). The server is set with an image and flavor IDs. +2. Creates a volume. It is set with a relationship to the ``my_server`` node: This designated relationship type will take care of attaching the volume to Openstack server node. + + + +Example IV +---------- + +This example will show how to use a Windows server with a Cloudify agent on it. + + +The following is an excerpt from the blueprint's ``blueprint``.``node_templates`` section:: + + my_keypair: + type: cloudify.openstack.nodes.KeyPair + properties: + private_key_path: /tmp/windows-test.pem + + my_server: + type: cloudify.openstack.nodes.WindowsServer + relationships: + - type: cloudify.openstack.server_connected_to_keypair + target: keypair + interfaces: + cloudify.interfaces.lifecycle: + create: + inputs: + args: + server: + image: 8672f4c6-e33d-46f5-b6d8-ebbeba12fa02 + flavor: 101 + name: my-server + userdata: | + #ps1_sysnative + winrm quickconfig -q + winrm set winrm/config/winrs '@{MaxMemoryPerShellMB="300"}' + winrm set winrm/config '@{MaxTimeoutms="1800000"}' + winrm set winrm/config/service '@{AllowUnencrypted="true"}' + winrm set winrm/config/service/auth '@{Basic="true"}' + &netsh advfirewall firewall add rule name="WinRM 5985" protocol=TCP dir=in localport=5985 action=allow + &netsh advfirewall firewall add rule name="WinRM 5986" protocol=TCP dir=in localport=5986 action=allow + + msiexec /i https://www.python.org/ftp/python/2.7.6/python-2.7.6.msi TARGETDIR=C:\Python27 ALLUSERS=1 /qn + cloudify.interfaces.validation: + creation: + inputs: + args: + server: + image: 8672f4c6-e33d-46f5-b6d8-ebbeba12fa02 + flavor: 101 + name: my-server + userdata: | + #ps1_sysnative + winrm quickconfig -q + winrm set winrm/config/winrs '@{MaxMemoryPerShellMB="300"}' + winrm set winrm/config '@{MaxTimeoutms="1800000"}' + winrm set winrm/config/service '@{AllowUnencrypted="true"}' + winrm set winrm/config/service/auth '@{Basic="true"}' + &netsh advfirewall firewall add rule name="WinRM 5985" protocol=TCP dir=in localport=5985 action=allow + &netsh advfirewall firewall add rule name="WinRM 5986" protocol=TCP dir=in localport=5986 action=allow + + msiexec /i https://www.python.org/ftp/python/2.7.6/python-2.7.6.msi TARGETDIR=C:\Python27 ALLUSERS=1 /qn + cloudify.interfaces.worker_installer: + install: + inputs: + cloudify_agent: + user: Admin + password: { get_attribute: [SELF, password] } + + +1. Creates a keypair. the private key will be saved under ``/tmp/windows-test.pem``. +2. Creates a Windows server: + + * It is set with a relationship to the ``my_keypair`` node, which will make the server use the it as a public key for authentication, and also use this public key to encrypt its password before posting it to the Openstack metadata service. + * The worker-installer interface operations are given values for the user and password for the ``cloudify_agent`` input - the password uses the [get_attribute]({{< relref "blueprints/spec-intrinsic-functions.md#get-attribute" >}}) feature to retrieve the decrypted password from the Server's runtime properties (Note that in this example, only the ``install`` operation was given with this input, but all of the worker installer operations as well as the plugin installer operations should be given with it). + * We define custom userdata which configures WinRM and installs Python on the machine (Windows Server 2012 in this example) once it's up. This is required for the Cloudify agent to be installed on the machine. + + diff --git a/aria/multivim-plugin/docs/index.rst b/aria/multivim-plugin/docs/index.rst new file mode 100644 index 0000000000..dc229f790b --- /dev/null +++ b/aria/multivim-plugin/docs/index.rst @@ -0,0 +1,68 @@ +.. cloudify-cli documentation master file, created by + sphinx-quickstart on Thu Jun 12 15:30:03 2014. + You can adapt this file completely to your liking, but it should at least + contain the root `toctree` directive. + +Cloudify Openstack Plugin +========================= + +The OpenStack plugin allows users to use an OpenStack based cloud infrastructure for deploying services and applications. +For more information about OpenStack, please refer to: https://www.openstack.org/. + + +Contents: + +.. toctree:: + :maxdepth: 2 + + configuration + types + nova-net + examples + misc + changelog + + +Plugin Requirements +------------------- + +* Python versions: + + * 2.7.x +* If the plugin is installed from source, + then the following system dependencies are required: + + * ``gcc`` + * ``gcc-c++`` + * ``python-devel`` + + +Compatibility +------------- + +* *Mitaka* official support +* *Liberty* official support +* *Kilo* official support +* *Juno*, *Icehouse* previously supported, not currently tested. + +.. attention:: New in 2.0 + + The full Keystone URL in :ref:`config` is now required in the ``openstack_config`` ``auth_url`` property: eg ``http://192.0.2.200:5000/v2.0`` or ``http://192.0.2.200:5000/v3``. + +The Openstack plugin uses various Openstack clients packages. The versions used in Openstack Plugin are as follows: + +* `keystoneauth1 <https://github.com/openstack/keystoneauth>`_ - 2.12.1 +* `Keystone client <https://github.com/openstack/python-keystoneclient>`_ - 3.5.0 +* `Nova client <https://github.com/openstack/python-novaclient>`_ - 7.0.0 +* `Neutron client <https://github.com/openstack/python-neutronclient>`_ - 6.0.0 +* `Cinder client <https://github.com/openstack/python-cinderclient>`_ - 1.9.0 +* `Glance client <https://github.com/openstack/python-glanceclient>`_ - 2.5.0 + + +Indices and tables +================== + +* :ref:`genindex` +* :ref:`modindex` +* :ref:`search` + diff --git a/aria/multivim-plugin/docs/misc.rst b/aria/multivim-plugin/docs/misc.rst new file mode 100644 index 0000000000..7ba5c84907 --- /dev/null +++ b/aria/multivim-plugin/docs/misc.rst @@ -0,0 +1,121 @@ + +.. highlight:: yaml + +Tips +==== + +* It is highly recommended to **ensure that Openstack names are unique** (for a given type): While Openstack allows for same name objects, having identical names for objects of the same type might lead to ambiguities and errors. + +* To set up DNS servers for Openstack servers (whether it's the Cloudify Manager or application VMs), one may use the Openstack ``dns_nameservers`` parameter for the [Subnet type](#cloudifyopenstacknodessubnet) - that is, pass the parameter directly to Neutron by using the ``args`` input of the operations in Subnet node, e.g.:: + + my_subnet_node: + interfaces: + cloudify.interfaces.lifecycle: + create: + inputs: + args: + dns_nameservers: [1.2.3.4] + cloudify.interfaces.validation: + creation: + inputs: + args: + dns_nameservers: [1.2.3.4] + + This will set up ``1.2.3.4`` as the DNS server for all servers on this subnet. + +* Public keys, unlike the rest of the Openstack resources, are user-based rather than tenant-based. When errors indicate a missing keypair, make sure you're using the correct user rather than tenant. + +* ICMP rules show up on Horizon (Openstack GUI) as ones defined using ``type`` and ``code`` fields, rather than a port range. However, in the actual Neutron (and Nova, in case of Nova-net security groups) service, these fields are represented using the standard port range fields (i.e., ``type`` and ``code`` correspond to ``port_range_min`` and ``port_range_max`` (respectively) on Neutron security groups, and to ``from_port`` and ``to_port`` (respectively) on Nova-net security groups). + + ** For example, to set a security group rule which allows **ping** from anywhere, the following setting may be declared in the blueprint: + * ``protocol``: ``icmp`` + * ``port_range_min``: ``0`` (type) + * ``port_range_max``: ``0`` (code) + * ``remote_ip_prefix``: ``0.0.0.0/0`` + +* To use Openstack Neutron's ML2 extensions, use the ``args`` input for the Network's ``create`` operation. For example, the `provider network <http://developer.openstack.org/api-ref-networking-v2-ext.html#createProviderNetwork>`_ may be set in the following way:: + + my_network: + type: cloudify.openstack.nodes.Network + ... + interfaces: + cloudify.interfaces.lifecycle: + create: + inputs: + args: + # Note that for this parameter to work, OpenStack must be configured to use Neutron's ML2 extensions + provider:network_type: vxlan + +* Ordering NICs in the Openstack plugin can be done in the 1.4 version of the Openstack plugin by simply stating the relationships to the various networks (or ports) in the desired order, e.g.:: + + node_templates: + server: + type: cloudify.openstack.nodes.Server + relationships: + - target: network1 + type: cloudify.relationships.connected_to + - target: network2 + type: cloudify.relationships.connected_to + + network1: + type: cloudify.openstack.nodes.Network + properties: + resource_id: network1 + + network2: + type: cloudify.openstack.nodes.Network + properties: + resource_id: network2 + + In the example above, network1 will be connected to a NIC preceding the one network2 will - however these wont be eth0/eth1, but rather eth1/eth2 - because by default, the management network will be prepended to the networks list (i.e. it'll be assigned to eth0). + To avoid this prepending, one should explicitly declare a relationship to the management network, where the network's represented in the blueprint by an existing resource (using the "use_external_resource" property). + This will cause the management network adhere the NICs ordering as the rest of them. + Example:: + + node_templates: + server: + type: cloudify.openstack.nodes.Server + properties: + management_network_name: network2 + relationships: + - target: network1 + type: cloudify.relationships.connected_to + - target: network2 + type: cloudify.relationships.connected_to + - target: network3 + type: cloudify.relationships.connected_to + + network1: + type: cloudify.openstack.nodes.Network + properties: + resource_id: network1 + + network2: + type: cloudify.openstack.nodes.Network + properties: + use_external_resource: true + resource_id: network2 + + network3: + type: cloudify.openstack.nodes.Network + properties: + use_external_resource: true + resource_id: network3 + + In this example, "network2" represents the management network, yet it'll be connected to eth1, while "network1" will take eth0, and "network3" (which also happened to already exist) will get connected to eth2. + + The server's property "management_network_name: network2" is not mandatory for this to work - this was just to make the example clear - yet the management network can also be inferred from the provider context (which is what happens when this property isn't explicitly set). Were the provider context to have "network2" set as the management network, this example would've worked just the same with this property omitted. + +Misc +==== + +* The plugin's operations are each **transactional** + (and therefore also retryable on failures), + yet not **idempotent**. + Attempting to execute the same operation twice is likely to fail. + +* Over this documentation, it's been mentioned multiple times that some configuration-saving information may be available in the Provider Context. + The Openstack manager blueprint and Openstack provider both create this relevant information, + and therefore if either was used for bootstrapping, the Provider Context will be available for the Openstack plugin to use. + +The exact details of the structure of the Openstack Provider Context are not documented since this feature is going through deprecation and will be replaced with a more advanced one. diff --git a/aria/multivim-plugin/docs/nova-net.rst b/aria/multivim-plugin/docs/nova-net.rst new file mode 100644 index 0000000000..dccf360c73 --- /dev/null +++ b/aria/multivim-plugin/docs/nova-net.rst @@ -0,0 +1,48 @@ + +Nova-net Support +================ + +The Openstack plugin includes support for Nova-net mode - +i.e. an Openstack installation which does not have the Networking API +(Neutron service). + +In such an environment, there is but a single preconfigured private network, +which all servers make use of automatically. +There are no subnets, networks, routers or ports. +Since these resource types don't exist, +the plugin's equivalent types aren't valid to use in such an environment. + +There are, however, some resource types whose API is available via both the Nova and Neutron services - These had originally been on the Nova service, +and later were moved and got extended implementation in the Neutron one, +but were also kept in the Nova service for backward compatibility. + +For these resource types, the Openstack plugin defines two separate types - one in the plugin's standard types namespace (``cloudify.openstack.nodes.XXX``), +which uses the newer and extended API via the Neutron service; +and Another in a special namespace (``cloudify.openstack.nova_net.nodes.XXX``), +which uses the older API via the Nova service. +This is why you may notice two separate types defined for [Floating](#cloudifyopenstacknodesfloatingip) [IP](#cloudifyopenstacknovanetnodesfloatingip), +as well as for [Security](#cloudifyopenstacknodessecuritygroup) [Group](#cloudifyopenstacknovanetnodessecuritygroup). + + +To summarize, ensure that when working in a Nova-net Openstack environment, +Neutron types aren't used - these include all types whose resources' APIs are natively available only via the Network API, +as well as the types which are in the ``cloudify.openstack.nova_net.Nodes`` namespace. + +On the opposite side, when using an Openstack environment which supports Neutron, +it's recommended to use the Neutron-versions of the relevant types +(i.e. avoid any types defined under the +``cloudify.openstack.nova_net.Nodes`` namespace), +as they offer more advanced capabilities. +However, it's important to mention that this is not required, +and using the Nova-versions of some types in a Neutron-enabled environment is possible and will work as well. + + +Nova-net Node Types +------------------- + + +.. cfy:node:: cloudify.openstack.nova_net.nodes.FloatingIP + + +.. cfy:node:: cloudify.openstack.nova_net.nodes.SecurityGroup + diff --git a/aria/multivim-plugin/docs/requirements.txt b/aria/multivim-plugin/docs/requirements.txt new file mode 100644 index 0000000000..07de519be6 --- /dev/null +++ b/aria/multivim-plugin/docs/requirements.txt @@ -0,0 +1 @@ +git+https://github.com/cloudify-cosmo/sphinxify.git diff --git a/aria/multivim-plugin/docs/types.rst b/aria/multivim-plugin/docs/types.rst new file mode 100644 index 0000000000..1b02757696 --- /dev/null +++ b/aria/multivim-plugin/docs/types.rst @@ -0,0 +1,188 @@ + +.. highlight:: yaml + +Types +^^^^^ + +Node Types +========== + +.. cfy:node:: cloudify.openstack.nodes.Server + + An OpenStack server. + + +.. cfy:node:: cloudify.openstack.nodes.WindowsServer + + This type has the same properties and operations-mapping as the type above (as it derives from it), yet it overrides some of the agent and plugin installations operations-mapping derived from the built-in cloudify.nodes.Compute type. Use this type when working with a Windows server. + + Additionally, the default value for the use_password property is overridden for this type, and is set to true. When using an image with a preset password, it should be modified to false. + + +.. cfy:node:: cloudify.openstack.nodes.KeyPair + + +.. cfy:node:: cloudify.openstack.nodes.Image + + +.. cfy:node:: cloudify.openstack.nodes.SecurityGroup + + +.. cfy:node:: cloudify.openstack.nodes.Router + + +.. cfy:node:: cloudify.openstack.nodes.Port + + +.. cfy:node:: cloudify.openstack.nodes.Network + + +.. cfy:node:: cloudify.openstack.nodes.Subnet + + +.. cfy:node:: cloudify.openstack.nodes.FloatingIP + + +.. cfy:node:: cloudify.openstack.nodes.Volume + + +.. cfy:node:: cloudify.openstack.nodes.Project + + +Types' Common Behaviors +======================= + +Validations +----------- + +All types offer the same base functionality for the ``cloudify.interfaces.validation.creation`` interface operation: + + * If it's a new resource (``use_external_resource`` is set to ``false``), the basic validation is to verify there's enough quota to allocate a new resource of the given type. + + * When [using an existing resource](#using-existing-resources), the validation ensures the resource indeed exists. + + +Runtime Properties +------------------ + +Node instances of any of the types defined in this plugin get set with the following runtime properties during the ``cloudify.interfaces.lifecycle.create`` operation: + + * ``external_id`` the Openstack ID of the resource + * ``external_type`` the Openstack type of the resource + * ``external_name`` the Openstack name of the resource + +The only exceptions are the two *floating-ip* types - Since floating-ip objects on Openstack don't have a name, the ``external_name`` runtime property is replaced with the ``floating_ip_address`` one, which holds the object's actual IP address. + + +Default Resource Naming Convention +---------------------------------- + +When creating a new resource (i.e. ``use_external_resource`` is set to ``false``), its name on Openstack will be the value of its ``resource_id`` property. However, if this value is not provided, the name will default to the following schema: + +``<openstack-resource-type>_<deployment-id>_<node-instance-id>`` + +For example, if a server node is defined as so:: + + node_templates: + myserver: + type: cloudify.openstack.nodes.Server + ... + +Yet without setting the ``resource_id`` property, then the server's name on Openstack will be ``server_my-deployment_myserver_XXXXX`` (where the XXXXX is the autogenerated part of the node instance's ID). + + + +Using Existing Resources +------------------------ + +It is possible to use existing resources on Openstack - whether these have been created by a different Cloudify deployment or not via Cloudify at all. + +All Cloudify Openstack types have a property named ``use_external_resource``, whose default value is ``false``. When set to ``true``, the plugin will apply different semantics for each of the operations executed on the relevant node's instances. Specifically, in the case of the ``cloudify.interfaces.lifecycle.create`` operation, rather than creating a new resource on Openstack of the given type, the plugin will behave as follows: + +1. Try to find an existing resource on Openstack whose name (or IP, in the case of one of the **floating-ip** types) is the value specified for the ``resource_id`` property. If more than one is found, an error is raised. + +2. If no resource was found, the plugin will use the value of the ``resource_id`` property to look for the resource by ID instead. If a resource still isn't found, an error is raised. + +3. If a single resource was found, the plugin will use that resource, and set the node instance with the appropriate runtime properties according to the resource's data. + + +The semantics of other operations are affected as well: + +* The ``cloudify.interfaces.lifecycle.start`` operation, where applicable, will only validate that the resource is indeed started, raising an error if it isn't. + +* The ``cloudify.interfaces.lifecycle.stop`` operation, where applicable, won't have any effect. + +* The ``cloudify.interfaces.lifecycle.delete`` operation will not actually delete the resource from Openstack (but will clear the runtime properties from the node instance). + +* The ``cloudify.interfaces.validation.creation`` operation will verify that a resource with the given name or ID indeed exists, or otherwise print a list of all available resources of the given type. + +* The ``cloudify.interfaces.relationship_lifecycle.establish`` operation will behave as normal if the related node is not set with ``use_external_resource`` as ``true``; However if both nodes have this property set to ``true``, the operation will only attempt to verify that they're indeed "connected" on Openstack as well ("connected" in this case also refers to a security-group imposed on a server, floating-ip associated with a server, etc.). + + +Notes +----- + +* As mentioned in the [Relationships section](#relationships), some relationships take effect in non-relationship operations. When ``use_external_resource`` is set to ``true``, the existence of such connections is validated as well. + +* Using an existing resource only makes sense for single-instance nodes. + + + + +Relationships +============= + + Not all relationships have built-in types + (i.e., some types may simply get connected using standard Cloudify relationships such as ``cloudify.relationships.connected_to``). + + Some relationships take effect in non-relationship operations, + e.g. a subnet which is connected to a network actually gets connected on subnet's creation + (in the ``cloudify.interfaces.lifecycle.create`` operation) + and not in a ``cloudify.interfaces.relationship_lifecycle.establish`` operation - this occurs whenever the connection information is required on resource creation. + + +.. cfy:rel:: cloudify.openstack.server_connected_to_port + + A relationship for connecting a server to a port. The server will use this relationship to automatically connect to the port upon server creation. + + +.. cfy:rel:: cloudify.openstack.port_connected_to_security_group + + A relationship for a port to a security group. + + +.. cfy:rel:: cloudify.openstack.server_connected_to_keypair + + +.. cfy:rel:: cloudify.openstack.port_connected_to_subnet + + A relationship for connecting a port to a subnet. This is useful when a network has multiple subnets, and a port should belong to a specific subnet on that network. The port will then receive some IP from that given subnet. + + Note that when using this relationship in combination with the port type's property `fixed_ip`, the IP given should be on the CIDR of the subnet connected to the port. + + *Note*: This relationship has no operations associated with it; The port will use this relationship to automatically connect to the subnet upon port creation. + + +.. cfy:rel:: cloudify.openstack.server_connected_to_security_group + + A relationship for setting a security group on a server. + + +.. cfy:rel:: cloudify.openstack.subnet_connected_to_router + + A relationship for connecting a subnet to a router. + + +.. cfy:rel:: cloudify.openstack.port_connected_to_floating_ip + + A relationship for associating a floating ip with a port. If that port is later connected to a server, the server will be accessible via the floating IP. + + +.. cfy:rel:: cloudify.openstack.server_connected_to_floating_ip + + A relationship for associating a floating ip with a server. + + +.. cfy:rel:: cloudify.openstack.volume_attached_to_server + + A relationship for attaching a volume to a server. diff --git a/aria/multivim-plugin/glance_plugin/__init__.py b/aria/multivim-plugin/glance_plugin/__init__.py new file mode 100644 index 0000000000..809f033a55 --- /dev/null +++ b/aria/multivim-plugin/glance_plugin/__init__.py @@ -0,0 +1,14 @@ +######### +# Copyright (c) 2015 GigaSpaces Technologies Ltd. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# * See the License for the specific language governing permissions and +# * limitations under the License. diff --git a/aria/multivim-plugin/glance_plugin/image.py b/aria/multivim-plugin/glance_plugin/image.py new file mode 100644 index 0000000000..a8d5b203f4 --- /dev/null +++ b/aria/multivim-plugin/glance_plugin/image.py @@ -0,0 +1,177 @@ +######### +# Copyright (c) 2015 GigaSpaces Technologies Ltd. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# * See the License for the specific language governing permissions and +# * limitations under the License. +import httplib +from urlparse import urlparse + +from cloudify import ctx +from cloudify.decorators import operation +from cloudify.exceptions import NonRecoverableError + +from openstack_plugin_common import ( + with_glance_client, + get_resource_id, + use_external_resource, + get_openstack_ids_of_connected_nodes_by_openstack_type, + delete_resource_and_runtime_properties, + validate_resource, + COMMON_RUNTIME_PROPERTIES_KEYS, + OPENSTACK_ID_PROPERTY, + OPENSTACK_TYPE_PROPERTY, + OPENSTACK_NAME_PROPERTY) + + +IMAGE_OPENSTACK_TYPE = 'image' +IMAGE_STATUS_ACTIVE = 'active' + +RUNTIME_PROPERTIES_KEYS = COMMON_RUNTIME_PROPERTIES_KEYS +REQUIRED_PROPERTIES = ['container_format', 'disk_format'] + + +@operation +@with_glance_client +def create(glance_client, **kwargs): + if use_external_resource(ctx, glance_client, IMAGE_OPENSTACK_TYPE): + return + + img_dict = { + 'name': get_resource_id(ctx, IMAGE_OPENSTACK_TYPE) + } + _validate_image_dictionary() + img_properties = ctx.node.properties['image'] + img_dict.update({key: value for key, value in img_properties.iteritems() + if key != 'data'}) + img = glance_client.images.create(**img_dict) + img_path = img_properties.get('data', '') + img_url = ctx.node.properties.get('image_url') + try: + _validate_image() + if img_path: + with open(img_path, 'rb') as image_file: + glance_client.images.upload( + image_id=img.id, + image_data=image_file) + elif img_url: + img = glance_client.images.add_location(img.id, img_url, {}) + + except: + _remove_protected(glance_client) + glance_client.images.delete(image_id=img.id) + raise + + ctx.instance.runtime_properties[OPENSTACK_ID_PROPERTY] = img.id + ctx.instance.runtime_properties[OPENSTACK_TYPE_PROPERTY] = \ + IMAGE_OPENSTACK_TYPE + ctx.instance.runtime_properties[OPENSTACK_NAME_PROPERTY] = img.name + + +def _get_image_by_ctx(glance_client, ctx): + return glance_client.images.get( + image_id=ctx.instance.runtime_properties[OPENSTACK_ID_PROPERTY]) + + +@operation +@with_glance_client +def start(glance_client, start_retry_interval, **kwargs): + img = _get_image_by_ctx(glance_client, ctx) + if img.status != IMAGE_STATUS_ACTIVE: + return ctx.operation.retry( + message='Waiting for image to get uploaded', + retry_after=start_retry_interval) + + +@operation +@with_glance_client +def delete(glance_client, **kwargs): + _remove_protected(glance_client) + delete_resource_and_runtime_properties(ctx, glance_client, + RUNTIME_PROPERTIES_KEYS) + + +@operation +@with_glance_client +def creation_validation(glance_client, **kwargs): + validate_resource(ctx, glance_client, IMAGE_OPENSTACK_TYPE) + _validate_image_dictionary() + _validate_image() + + +def _validate_image_dictionary(): + img = ctx.node.properties['image'] + missing = '' + try: + for prop in REQUIRED_PROPERTIES: + if prop not in img: + missing += '{0} '.format(prop) + except TypeError: + missing = ' '.join(REQUIRED_PROPERTIES) + if missing: + raise NonRecoverableError('Required properties are missing: {' + '0}. Please update your image ' + 'dictionary.'.format(missing)) + + +def _validate_image(): + img = ctx.node.properties['image'] + img_path = img.get('data') + img_url = ctx.node.properties.get('image_url') + if not img_url and not img_path: + raise NonRecoverableError('Neither image url nor image path was ' + 'provided') + if img_url and img_path: + raise NonRecoverableError('Multiple image sources provided') + if img_url: + _check_url(img_url) + if img_path: + _check_path() + + +def _check_url(url): + p = urlparse(url) + conn = httplib.HTTPConnection(p.netloc) + conn.request('HEAD', p.path) + resp = conn.getresponse() + if resp.status >= 400: + raise NonRecoverableError('Invalid image URL') + + +def _check_path(): + img = ctx.node.properties['image'] + img_path = img.get('data') + try: + with open(img_path, 'rb'): + pass + except TypeError: + if not img.get('url'): + raise NonRecoverableError('No path or url provided') + except IOError: + raise NonRecoverableError( + 'Unable to open image file with path: "{}"'.format(img_path)) + + +def _remove_protected(glance_client): + if use_external_resource(ctx, glance_client, IMAGE_OPENSTACK_TYPE): + return + + is_protected = ctx.node.properties['image'].get('protected', False) + if is_protected: + img_id = ctx.instance.runtime_properties[OPENSTACK_ID_PROPERTY] + glance_client.images.update(img_id, protected=False) + + +def handle_image_from_relationship(obj_dict, property_name_to_put, ctx): + images = get_openstack_ids_of_connected_nodes_by_openstack_type( + ctx, IMAGE_OPENSTACK_TYPE) + if images: + obj_dict.update({property_name_to_put: images[0]}) diff --git a/aria/multivim-plugin/glance_plugin/tests/resources/test-image-start.yaml b/aria/multivim-plugin/glance_plugin/tests/resources/test-image-start.yaml new file mode 100644 index 0000000000..12c9aa79b7 --- /dev/null +++ b/aria/multivim-plugin/glance_plugin/tests/resources/test-image-start.yaml @@ -0,0 +1,30 @@ + +tosca_definitions_version: cloudify_dsl_1_3 + +imports: + - https://raw.githubusercontent.com/cloudify-cosmo/cloudify-manager/4.1/resources/rest-service/cloudify/types/types.yaml + - plugin.yaml + +inputs: + use_password: + type: boolean + default: false + +node_templates: + image: + type: cloudify.openstack.nodes.Image + properties: + image: + disk_format: test_format + container_format: test_format + data: test_path + openstack_config: + username: aaa + password: aaa + tenant_name: aaa + auth_url: aaa + interfaces: + cloudify.interfaces.lifecycle: + start: + inputs: + start_retry_interval: 1 diff --git a/aria/multivim-plugin/glance_plugin/tests/test.py b/aria/multivim-plugin/glance_plugin/tests/test.py new file mode 100644 index 0000000000..4a88cba4e7 --- /dev/null +++ b/aria/multivim-plugin/glance_plugin/tests/test.py @@ -0,0 +1,148 @@ +######### +# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# * See the License for the specific language governing permissions and +# * limitations under the License. + +import mock +import os +import tempfile +import unittest + +import glance_plugin +from glance_plugin import image + +from cloudify.mocks import MockCloudifyContext +from cloudify.test_utils import workflow_test +from cloudify.exceptions import NonRecoverableError + + +def ctx_mock(image_dict): + return MockCloudifyContext( + node_id='d', + properties=image_dict) + + +class TestCheckImage(unittest.TestCase): + + @mock.patch('glance_plugin.image.ctx', + ctx_mock({'image': {}})) + def test_check_image_no_file_no_url(self): + # Test if it throws exception no file & no url + self.assertRaises(NonRecoverableError, + image._validate_image) + + @mock.patch('glance_plugin.image.ctx', + ctx_mock({'image_url': 'test-url', 'image': {'data': '.'}})) + def test_check_image_and_url(self): + # Test if it throws exception file & url + self.assertRaises(NonRecoverableError, + image._validate_image) + + @mock.patch('glance_plugin.image.ctx', + ctx_mock({'image_url': 'test-url', 'image': {}})) + def test_check_image_url(self): + # test if it passes no file & url + http_connection_mock = mock.MagicMock() + http_connection_mock.return_value.getresponse.return_value.status = 200 + with mock.patch('httplib.HTTPConnection', http_connection_mock): + glance_plugin.image._validate_image() + + def test_check_image_file(self): + # test if it passes file & no url + image_file_path = tempfile.mkstemp()[1] + with mock.patch('glance_plugin.image.ctx', + ctx_mock({'image': {'data': image_file_path}})): + glance_plugin.image._validate_image() + + @mock.patch('glance_plugin.image.ctx', + ctx_mock({'image': {'data': '/test/path'}})) + # test when open file throws IO error + def test_check_image_bad_file(self): + open_name = '%s.open' % __name__ + with mock.patch(open_name, create=True) as mock_open: + mock_open.side_effect = [mock_open(read_data='Data').return_value] + self.assertRaises(NonRecoverableError, + glance_plugin.image._validate_image) + + @mock.patch('glance_plugin.image.ctx', + ctx_mock({'image_url': '?', 'image': {}})) + # test when bad url + def test_check_image_bad_url(self): + http_connection_mock = mock.MagicMock() + http_connection_mock.return_value.getresponse.return_value.status = 400 + with mock.patch('httplib.HTTPConnection', http_connection_mock): + self.assertRaises(NonRecoverableError, + glance_plugin.image._validate_image) + + +class TestValidateProperties(unittest.TestCase): + + @mock.patch('glance_plugin.image.ctx', + ctx_mock({'image': {'container_format': 'bare'}})) + def test_check_image_container_format_no_disk_format(self): + # Test if it throws exception no file & no url + self.assertRaises(NonRecoverableError, + image._validate_image_dictionary) + + @mock.patch('glance_plugin.image.ctx', + ctx_mock({'image': {'disk_format': 'qcow2'}})) + def test_check_image_no_container_format_disk_format(self): + # Test if it throws exception no container_format & disk_format + self.assertRaises(NonRecoverableError, + image._validate_image_dictionary) + + @mock.patch('glance_plugin.image.ctx', + ctx_mock({'image': {}})) + def test_check_image_no_container_format_no_disk_format(self): + # Test if it throws exception no container_format & no disk_format + self.assertRaises(NonRecoverableError, + image._validate_image_dictionary) + + @mock.patch('glance_plugin.image.ctx', + ctx_mock( + {'image': + {'container_format': 'bare', + 'disk_format': 'qcow2'}})) + def test_check_image_container_format_disk_format(self): + # Test if it do not throw exception container_format & disk_format + image._validate_image_dictionary() + + +class TestStartImage(unittest.TestCase): + blueprint_path = os.path.join('resources', + 'test-image-start.yaml') + + @mock.patch('glance_plugin.image.create') + @workflow_test(blueprint_path, copy_plugin_yaml=True) + def test_image_lifecycle_start(self, cfy_local, *_): + test_vars = { + 'counter': 0, + 'image': mock.MagicMock() + } + + def _mock_get_image_by_ctx(*_): + i = test_vars['image'] + if test_vars['counter'] == 0: + i.status = 'different image status' + else: + i.status = glance_plugin.image.IMAGE_STATUS_ACTIVE + test_vars['counter'] += 1 + return i + + with mock.patch('openstack_plugin_common.GlanceClient'): + with mock.patch('glance_plugin.image._get_image_by_ctx', + side_effect=_mock_get_image_by_ctx): + cfy_local.execute('install', task_retries=3) + + self.assertEqual(2, test_vars['counter']) + self.assertEqual(0, test_vars['image'].start.call_count) diff --git a/aria/multivim-plugin/keystone_plugin/__init__.py b/aria/multivim-plugin/keystone_plugin/__init__.py new file mode 100644 index 0000000000..809f033a55 --- /dev/null +++ b/aria/multivim-plugin/keystone_plugin/__init__.py @@ -0,0 +1,14 @@ +######### +# Copyright (c) 2015 GigaSpaces Technologies Ltd. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# * See the License for the specific language governing permissions and +# * limitations under the License. diff --git a/aria/multivim-plugin/keystone_plugin/project.py b/aria/multivim-plugin/keystone_plugin/project.py new file mode 100644 index 0000000000..223ffbbb5c --- /dev/null +++ b/aria/multivim-plugin/keystone_plugin/project.py @@ -0,0 +1,150 @@ +######### +# Copyright (c) 2015 GigaSpaces Technologies Ltd. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# * See the License for the specific language governing permissions and +# * limitations under the License. + +from cloudify import ctx +from cloudify.decorators import operation +from cloudify.exceptions import NonRecoverableError + +from openstack_plugin_common import (with_keystone_client, + with_nova_client, + with_cinder_client, + with_neutron_client, + get_resource_id, + use_external_resource, + delete_resource_and_runtime_properties, + validate_resource, + COMMON_RUNTIME_PROPERTIES_KEYS, + OPENSTACK_ID_PROPERTY, + OPENSTACK_TYPE_PROPERTY, + OPENSTACK_NAME_PROPERTY) + + +PROJECT_OPENSTACK_TYPE = 'project' + +TENANT_QUOTA_TYPE = 'quota' + +RUNTIME_PROPERTIES_KEYS = COMMON_RUNTIME_PROPERTIES_KEYS + + +@operation +@with_keystone_client +def create(keystone_client, **kwargs): + if use_external_resource(ctx, keystone_client, PROJECT_OPENSTACK_TYPE): + return + + project_dict = { + 'name': get_resource_id(ctx, PROJECT_OPENSTACK_TYPE), + 'domain': 'default' + } + + project_dict.update(ctx.node.properties['project']) + project = keystone_client.projects.create(**project_dict) + + ctx.instance.runtime_properties[OPENSTACK_ID_PROPERTY] = project.id + ctx.instance.runtime_properties[OPENSTACK_TYPE_PROPERTY] = \ + PROJECT_OPENSTACK_TYPE + ctx.instance.runtime_properties[OPENSTACK_NAME_PROPERTY] = project.name + + +@operation +@with_keystone_client +@with_nova_client +@with_cinder_client +@with_neutron_client +def start(keystone_client, nova_client, cinder_client, neutron_client, + **kwargs): + project_id = ctx.instance.runtime_properties[OPENSTACK_ID_PROPERTY] + users = ctx.node.properties['users'] + validate_users(users, keystone_client) + + assign_users(project_id, users, keystone_client) + + quota = ctx.node.properties[TENANT_QUOTA_TYPE] + update_quota(project_id, quota, nova_client, 'nova') + update_quota(project_id, quota, neutron_client, 'neutron') + update_quota(project_id, quota, cinder_client, 'cinder') + + +@operation +@with_keystone_client +@with_nova_client +@with_cinder_client +@with_neutron_client +def delete(keystone_client, nova_client, cinder_client, + neutron_client, **kwargs): + tenant_id = ctx.instance.runtime_properties[OPENSTACK_ID_PROPERTY] + quota = ctx.node.properties[TENANT_QUOTA_TYPE] + delete_quota(tenant_id, quota, nova_client, 'nova') + delete_quota(tenant_id, quota, neutron_client, 'neutron') + delete_quota(tenant_id, quota, cinder_client, 'cinder') + delete_resource_and_runtime_properties(ctx, keystone_client, + RUNTIME_PROPERTIES_KEYS) + + +@operation +@with_keystone_client +def creation_validation(keystone_client, **kwargs): + validate_resource(ctx, keystone_client, PROJECT_OPENSTACK_TYPE) + + +def assign_users(project_id, users, keystone_client): + for user in users: + roles = user['roles'] + u = keystone_client.users.find(name=user['name']) + for role in roles: + r = keystone_client.roles.find(name=role) + keystone_client.roles.grant(user=u.id, + project=project_id, + role=r.id) + + +def validate_users(users, keystone_client): + user_names = [user['name'] for user in users] + if len(user_names) > len(set(user_names)): + raise NonRecoverableError('Users are not unique') + + for user_name in user_names: + keystone_client.users.find(name=user_name) + + for user in users: + if len(user['roles']) > len(set(user['roles'])): + msg = 'Roles for user {} are not unique' + raise NonRecoverableError(msg.format(user['name'])) + + role_names = {role for user in users for role in user['roles']} + for role_name in role_names: + keystone_client.roles.find(name=role_name) + + +def update_quota(tenant_id, quota, client, what_quota): + updated_quota = quota.get(what_quota) + if updated_quota: + if what_quota == 'neutron': + new_quota = client.update_quota(tenant_id=tenant_id, + body={'quota': updated_quota}) + else: + new_quota = client.quotas.update(tenant_id=tenant_id, + **updated_quota) + ctx.logger.info( + 'Updated {0} quota: {1}'.format(what_quota, str(new_quota))) + + +def delete_quota(project_id, quota, client, what_quota): + deleting_quota = quota.get(what_quota) + if deleting_quota: + if what_quota == 'neutron': + client.delete_quota(tenant_id=project_id) + else: + client.quotas.delete(tenant_id=project_id) diff --git a/aria/multivim-plugin/keystone_plugin/tests/__init__.py b/aria/multivim-plugin/keystone_plugin/tests/__init__.py new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/aria/multivim-plugin/keystone_plugin/tests/__init__.py diff --git a/aria/multivim-plugin/keystone_plugin/tests/test.py b/aria/multivim-plugin/keystone_plugin/tests/test.py new file mode 100644 index 0000000000..de6567ba3a --- /dev/null +++ b/aria/multivim-plugin/keystone_plugin/tests/test.py @@ -0,0 +1,115 @@ +import mock +import unittest + +from cloudify.context import NODE_INSTANCE + +from cloudify.mocks import ( + MockContext, + MockNodeInstanceContext, + MockNodeContext +) +from openstack_plugin_common import ( + OPENSTACK_ID_PROPERTY, + OPENSTACK_NAME_PROPERTY, + OPENSTACK_TYPE_PROPERTY + ) +from keystone_plugin.project import PROJECT_OPENSTACK_TYPE +import keystone_plugin + + +class TestProject(unittest.TestCase): + + test_id = 'test-id' + test_name = 'test-name' + test_deployment_id = 'test-deployment-id' + test_user = 'test-user' + test_role = 'test-role' + + class MockProjectOS: + def __init__(self, id, name): + self._id = id + self._name = name + self._users = {} + + @property + def id(self): + return self._id + + @property + def name(self): + return self._name + + def find(self, *_, **__): + return mock.MagicMock(id='test-role') + + def grant(self, role, user, *_, **__): + self._users[user] = role + + def mock_keystone_client(self, mock_project): + keystone_client = mock.MagicMock() + keystone_client.projects.create.return_value = mock_project + keystone_client.users.find.return_value = mock.MagicMock( + id=self.test_user) + keystone_client.roles = mock_project + return keystone_client + + def mock_ctx(self, test_vars, test_id, + test_deployment_id, runtime_properties=None): + ctx = MockContext() + ctx.node = MockNodeContext(properties=test_vars) + ctx.instance = MockNodeInstanceContext( + id=test_id, runtime_properties=runtime_properties or {}) + ctx.deployment = mock.Mock() + ctx.deployment.id = test_deployment_id + ctx.type = NODE_INSTANCE + return ctx + + @mock.patch('openstack_plugin_common._put_client_in_kw', + autospec=True, return_value=None) + def test_keystone_project_create(self, *_): + test_vars = { + 'project': {}, + 'resource_id': '', + 'quota': {}, + 'users': {} + } + + ctx = self.mock_ctx(test_vars, self.test_id, self.test_deployment_id) + keystone_plugin.project.ctx = ctx + keystone_plugin.project.create( + self.mock_keystone_client(self.MockProjectOS(self.test_id, + self.test_name))) + self.assertEqual(self.test_name, + ctx.instance.runtime_properties[ + OPENSTACK_NAME_PROPERTY]) + self.assertEqual(self.test_id, + ctx.instance.runtime_properties[ + OPENSTACK_ID_PROPERTY]) + self.assertEqual(PROJECT_OPENSTACK_TYPE, + ctx.instance.runtime_properties[ + OPENSTACK_TYPE_PROPERTY]) + + @mock.patch('openstack_plugin_common._put_client_in_kw', + autospec=True, return_value=None) + def test_assign_user(self, *_): + test_vars = { + 'project': {}, + 'resource_id': '', + 'quota': {}, + 'users': [{'name': self.test_user, + 'roles': [self.test_role]}] + } + ctx = self.mock_ctx(test_vars, + self.test_id, + self.test_deployment_id, + {OPENSTACK_ID_PROPERTY: self.test_id}) + mock_project = self.MockProjectOS(self.test_id, self.test_name) + keystone_client = self.mock_keystone_client(mock_project) + keystone_plugin.project.ctx = ctx + keystone_plugin.project.start( + keystone_client, + mock.MagicMock(), # nova_client + mock.MagicMock(), # cinder_client + mock.MagicMock()) # neutron_client + self.assertEqual({self.test_user: self.test_role}, + mock_project._users) diff --git a/aria/multivim-plugin/neutron_plugin/__init__.py b/aria/multivim-plugin/neutron_plugin/__init__.py new file mode 100644 index 0000000000..04cb21f745 --- /dev/null +++ b/aria/multivim-plugin/neutron_plugin/__init__.py @@ -0,0 +1 @@ +__author__ = 'idanmo' diff --git a/aria/multivim-plugin/neutron_plugin/floatingip.py b/aria/multivim-plugin/neutron_plugin/floatingip.py new file mode 100644 index 0000000000..1a9d0449ca --- /dev/null +++ b/aria/multivim-plugin/neutron_plugin/floatingip.py @@ -0,0 +1,104 @@ +######### +# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# * See the License for the specific language governing permissions and +# * limitations under the License. + +from cloudify import ctx +from cloudify.decorators import operation +from cloudify.exceptions import NonRecoverableError +from openstack_plugin_common import ( + with_neutron_client, + provider, + is_external_relationship, + is_external_relationship_not_conditionally_created, + OPENSTACK_ID_PROPERTY +) +from openstack_plugin_common.floatingip import ( + use_external_floatingip, + set_floatingip_runtime_properties, + delete_floatingip, + floatingip_creation_validation +) + + +@operation +@with_neutron_client +def create(neutron_client, args, **kwargs): + + if use_external_floatingip(neutron_client, 'floating_ip_address', + lambda ext_fip: ext_fip['floating_ip_address']): + return + + floatingip = { + # No defaults + } + floatingip.update(ctx.node.properties['floatingip'], **args) + + # Sugar: floating_network_name -> (resolve) -> floating_network_id + if 'floating_network_name' in floatingip: + floatingip['floating_network_id'] = neutron_client.cosmo_get_named( + 'network', floatingip['floating_network_name'])['id'] + del floatingip['floating_network_name'] + elif 'floating_network_id' not in floatingip: + provider_context = provider(ctx) + ext_network = provider_context.ext_network + if ext_network: + floatingip['floating_network_id'] = ext_network['id'] + else: + raise NonRecoverableError( + 'Missing floating network id, name or external network') + + fip = neutron_client.create_floatingip( + {'floatingip': floatingip})['floatingip'] + set_floatingip_runtime_properties(fip['id'], fip['floating_ip_address']) + + ctx.logger.info('Floating IP creation response: {0}'.format(fip)) + + +@operation +@with_neutron_client +def delete(neutron_client, **kwargs): + delete_floatingip(neutron_client) + + +@operation +@with_neutron_client +def creation_validation(neutron_client, **kwargs): + floatingip_creation_validation(neutron_client, 'floating_ip_address') + + +@operation +@with_neutron_client +def connect_port(neutron_client, **kwargs): + if is_external_relationship_not_conditionally_created(ctx): + return + + port_id = ctx.source.instance.runtime_properties[OPENSTACK_ID_PROPERTY] + floating_ip_id = ctx.target.instance.runtime_properties[ + OPENSTACK_ID_PROPERTY] + fip = {'port_id': port_id} + neutron_client.update_floatingip(floating_ip_id, {'floatingip': fip}) + + +@operation +@with_neutron_client +def disconnect_port(neutron_client, **kwargs): + if is_external_relationship(ctx): + ctx.logger.info('Not disassociating floatingip and port since ' + 'external floatingip and port are being used') + return + + floating_ip_id = ctx.target.instance.runtime_properties[ + OPENSTACK_ID_PROPERTY] + fip = {'port_id': None} + neutron_client.update_floatingip(floating_ip_id, {'floatingip': fip}) diff --git a/aria/multivim-plugin/neutron_plugin/network.py b/aria/multivim-plugin/neutron_plugin/network.py new file mode 100644 index 0000000000..eadcc3b4e8 --- /dev/null +++ b/aria/multivim-plugin/neutron_plugin/network.py @@ -0,0 +1,109 @@ +######### +# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# * See the License for the specific language governing permissions and +# * limitations under the License. + +from cloudify import ctx +from cloudify.decorators import operation +from cloudify.exceptions import NonRecoverableError +from openstack_plugin_common import ( + transform_resource_name, + with_neutron_client, + get_resource_id, + is_external_resource, + is_external_resource_not_conditionally_created, + delete_resource_and_runtime_properties, + use_external_resource, + validate_resource, + OPENSTACK_ID_PROPERTY, + OPENSTACK_TYPE_PROPERTY, + OPENSTACK_NAME_PROPERTY, + COMMON_RUNTIME_PROPERTIES_KEYS +) + +NETWORK_OPENSTACK_TYPE = 'network' + +# Runtime properties +RUNTIME_PROPERTIES_KEYS = COMMON_RUNTIME_PROPERTIES_KEYS + + +@operation +@with_neutron_client +def create(neutron_client, args, **kwargs): + + if use_external_resource(ctx, neutron_client, NETWORK_OPENSTACK_TYPE): + return + + network = { + 'admin_state_up': True, + 'name': get_resource_id(ctx, NETWORK_OPENSTACK_TYPE), + } + network.update(ctx.node.properties['network'], **args) + transform_resource_name(ctx, network) + + net = neutron_client.create_network({'network': network})['network'] + ctx.instance.runtime_properties[OPENSTACK_ID_PROPERTY] = net['id'] + ctx.instance.runtime_properties[OPENSTACK_TYPE_PROPERTY] =\ + NETWORK_OPENSTACK_TYPE + ctx.instance.runtime_properties[OPENSTACK_NAME_PROPERTY] = net['name'] + + +@operation +@with_neutron_client +def start(neutron_client, **kwargs): + network_id = ctx.instance.runtime_properties[OPENSTACK_ID_PROPERTY] + + if is_external_resource_not_conditionally_created(ctx): + ctx.logger.info('Validating external network is started') + if not neutron_client.show_network( + network_id)['network']['admin_state_up']: + raise NonRecoverableError( + 'Expected external resource network {0} to be in ' + '"admin_state_up"=True'.format(network_id)) + return + + neutron_client.update_network( + network_id, { + 'network': { + 'admin_state_up': True + } + }) + + +@operation +@with_neutron_client +def stop(neutron_client, **kwargs): + if is_external_resource(ctx): + ctx.logger.info('Not stopping network since an external network is ' + 'being used') + return + + neutron_client.update_network( + ctx.instance.runtime_properties[OPENSTACK_ID_PROPERTY], { + 'network': { + 'admin_state_up': False + } + }) + + +@operation +@with_neutron_client +def delete(neutron_client, **kwargs): + delete_resource_and_runtime_properties(ctx, neutron_client, + RUNTIME_PROPERTIES_KEYS) + + +@operation +@with_neutron_client +def creation_validation(neutron_client, **kwargs): + validate_resource(ctx, neutron_client, NETWORK_OPENSTACK_TYPE) diff --git a/aria/multivim-plugin/neutron_plugin/port.py b/aria/multivim-plugin/neutron_plugin/port.py new file mode 100644 index 0000000000..4db4c442c5 --- /dev/null +++ b/aria/multivim-plugin/neutron_plugin/port.py @@ -0,0 +1,222 @@ +######### +# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# * See the License for the specific language governing permissions and +# * limitations under the License. + +from cloudify import ctx +from cloudify.decorators import operation +from cloudify.exceptions import NonRecoverableError + +import neutronclient.common.exceptions as neutron_exceptions + +from openstack_plugin_common import ( + transform_resource_name, + with_neutron_client, + with_nova_client, + get_resource_id, + get_openstack_id_of_single_connected_node_by_openstack_type, + delete_resource_and_runtime_properties, + delete_runtime_properties, + use_external_resource, + is_external_relationship, + validate_resource, + OPENSTACK_ID_PROPERTY, + OPENSTACK_TYPE_PROPERTY, + OPENSTACK_NAME_PROPERTY, + COMMON_RUNTIME_PROPERTIES_KEYS, + is_external_relationship_not_conditionally_created) + +from neutron_plugin.network import NETWORK_OPENSTACK_TYPE +from neutron_plugin.subnet import SUBNET_OPENSTACK_TYPE +from openstack_plugin_common.floatingip import get_server_floating_ip + +PORT_OPENSTACK_TYPE = 'port' + +# Runtime properties +FIXED_IP_ADDRESS_PROPERTY = 'fixed_ip_address' # the fixed ip address +MAC_ADDRESS_PROPERTY = 'mac_address' # the mac address +RUNTIME_PROPERTIES_KEYS = \ + COMMON_RUNTIME_PROPERTIES_KEYS + [FIXED_IP_ADDRESS_PROPERTY, + MAC_ADDRESS_PROPERTY] + +NO_SG_PORT_CONNECTION_RETRY_INTERVAL = 3 + + +@operation +@with_neutron_client +def create(neutron_client, args, **kwargs): + + ext_port = use_external_resource(ctx, neutron_client, PORT_OPENSTACK_TYPE) + if ext_port: + try: + net_id = \ + get_openstack_id_of_single_connected_node_by_openstack_type( + ctx, NETWORK_OPENSTACK_TYPE, True) + + if net_id: + port_id = ctx.instance.runtime_properties[ + OPENSTACK_ID_PROPERTY] + + if neutron_client.show_port( + port_id)['port']['network_id'] != net_id: + raise NonRecoverableError( + 'Expected external resources port {0} and network {1} ' + 'to be connected'.format(port_id, net_id)) + + ctx.instance.runtime_properties[FIXED_IP_ADDRESS_PROPERTY] = \ + _get_fixed_ip(ext_port) + ctx.instance.runtime_properties[MAC_ADDRESS_PROPERTY] = \ + ext_port['mac_address'] + return + except Exception: + delete_runtime_properties(ctx, RUNTIME_PROPERTIES_KEYS) + raise + + net_id = get_openstack_id_of_single_connected_node_by_openstack_type( + ctx, NETWORK_OPENSTACK_TYPE) + + port = { + 'name': get_resource_id(ctx, PORT_OPENSTACK_TYPE), + 'network_id': net_id, + 'security_groups': [], + } + + _handle_fixed_ips(port) + port.update(ctx.node.properties['port'], **args) + transform_resource_name(ctx, port) + + p = neutron_client.create_port({'port': port})['port'] + ctx.instance.runtime_properties[OPENSTACK_ID_PROPERTY] = p['id'] + ctx.instance.runtime_properties[OPENSTACK_TYPE_PROPERTY] =\ + PORT_OPENSTACK_TYPE + ctx.instance.runtime_properties[OPENSTACK_NAME_PROPERTY] = p['name'] + ctx.instance.runtime_properties[FIXED_IP_ADDRESS_PROPERTY] = \ + _get_fixed_ip(p) + ctx.instance.runtime_properties[MAC_ADDRESS_PROPERTY] = p['mac_address'] + + +@operation +@with_neutron_client +def delete(neutron_client, **kwargs): + try: + delete_resource_and_runtime_properties(ctx, neutron_client, + RUNTIME_PROPERTIES_KEYS) + except neutron_exceptions.NeutronClientException, e: + if e.status_code == 404: + # port was probably deleted when an attached device was deleted + delete_runtime_properties(ctx, RUNTIME_PROPERTIES_KEYS) + else: + raise + + +@operation +@with_nova_client +@with_neutron_client +def detach(nova_client, neutron_client, **kwargs): + + if is_external_relationship(ctx): + ctx.logger.info('Not detaching port from server since ' + 'external port and server are being used') + return + + port_id = ctx.target.instance.runtime_properties[OPENSTACK_ID_PROPERTY] + server_id = ctx.source.instance.runtime_properties[OPENSTACK_ID_PROPERTY] + + server_floating_ip = get_server_floating_ip(neutron_client, server_id) + if server_floating_ip: + ctx.logger.info('We have floating ip {0} attached to server' + .format(server_floating_ip['floating_ip_address'])) + server = nova_client.servers.get(server_id) + server.remove_floating_ip(server_floating_ip['floating_ip_address']) + return ctx.operation.retry( + message='Waiting for the floating ip {0} to ' + 'detach from server {1}..' + .format(server_floating_ip['floating_ip_address'], + server_id), + retry_after=10) + change = { + 'port': { + 'device_id': '', + 'device_owner': '' + } + } + ctx.logger.info('Detaching port {0}...'.format(port_id)) + neutron_client.update_port(port_id, change) + ctx.logger.info('Successfully detached port {0}'.format(port_id)) + + +@operation +@with_neutron_client +def connect_security_group(neutron_client, **kwargs): + port_id = ctx.source.instance.runtime_properties[OPENSTACK_ID_PROPERTY] + security_group_id = ctx.target.instance.runtime_properties[ + OPENSTACK_ID_PROPERTY] + + if is_external_relationship_not_conditionally_created(ctx): + ctx.logger.info('Validating external port and security-group are ' + 'connected') + if any(sg for sg in neutron_client.show_port(port_id)['port'].get( + 'security_groups', []) if sg == security_group_id): + return + raise NonRecoverableError( + 'Expected external resources port {0} and security-group {1} to ' + 'be connected'.format(port_id, security_group_id)) + + # WARNING: non-atomic operation + port = neutron_client.cosmo_get('port', id=port_id) + ctx.logger.info( + "connect_security_group(): source_id={0} target={1}".format( + port_id, ctx.target.instance.runtime_properties)) + sgs = port['security_groups'] + [security_group_id] + neutron_client.update_port(port_id, {'port': {'security_groups': sgs}}) + + # Double check if SG has been actually updated (a race-condition + # in OpenStack): + port_info = neutron_client.show_port(port_id)['port'] + port_security_groups = port_info.get('security_groups', []) + if security_group_id not in port_security_groups: + return ctx.operation.retry( + message='Security group connection (`{0}\' -> `{1}\')' + ' has not been established!'.format(port_id, + security_group_id), + retry_after=NO_SG_PORT_CONNECTION_RETRY_INTERVAL + ) + + +@operation +@with_neutron_client +def creation_validation(neutron_client, **kwargs): + validate_resource(ctx, neutron_client, PORT_OPENSTACK_TYPE) + + +def _get_fixed_ip(port): + # a port may have no fixed IP if it's set on a network without subnets + return port['fixed_ips'][0]['ip_address'] if port['fixed_ips'] else None + + +def _handle_fixed_ips(port): + fixed_ips_element = {} + + # checking for fixed ip property + if ctx.node.properties['fixed_ip']: + fixed_ips_element['ip_address'] = ctx.node.properties['fixed_ip'] + + # checking for a connected subnet + subnet_id = get_openstack_id_of_single_connected_node_by_openstack_type( + ctx, SUBNET_OPENSTACK_TYPE, if_exists=True) + if subnet_id: + fixed_ips_element['subnet_id'] = subnet_id + + # applying fixed ip parameter, if available + if fixed_ips_element: + port['fixed_ips'] = [fixed_ips_element] diff --git a/aria/multivim-plugin/neutron_plugin/router.py b/aria/multivim-plugin/neutron_plugin/router.py new file mode 100644 index 0000000000..1a2851e4bc --- /dev/null +++ b/aria/multivim-plugin/neutron_plugin/router.py @@ -0,0 +1,215 @@ +######### +# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# * See the License for the specific language governing permissions and +# * limitations under the License. + +import warnings + +from cloudify import ctx +from cloudify.decorators import operation +from cloudify.exceptions import NonRecoverableError + +from openstack_plugin_common import ( + provider, + transform_resource_name, + get_resource_id, + with_neutron_client, + use_external_resource, + is_external_relationship, + is_external_relationship_not_conditionally_created, + delete_runtime_properties, + get_openstack_ids_of_connected_nodes_by_openstack_type, + delete_resource_and_runtime_properties, + get_resource_by_name_or_id, + validate_resource, + COMMON_RUNTIME_PROPERTIES_KEYS, + OPENSTACK_ID_PROPERTY, + OPENSTACK_TYPE_PROPERTY, + OPENSTACK_NAME_PROPERTY +) + +from neutron_plugin.network import NETWORK_OPENSTACK_TYPE + + +ROUTER_OPENSTACK_TYPE = 'router' + +# Runtime properties +RUNTIME_PROPERTIES_KEYS = COMMON_RUNTIME_PROPERTIES_KEYS + + +@operation +@with_neutron_client +def create(neutron_client, args, **kwargs): + + if use_external_resource(ctx, neutron_client, ROUTER_OPENSTACK_TYPE): + try: + ext_net_id_by_rel = _get_connected_ext_net_id(neutron_client) + + if ext_net_id_by_rel: + router_id = \ + ctx.instance.runtime_properties[OPENSTACK_ID_PROPERTY] + + router = neutron_client.show_router(router_id)['router'] + if not (router['external_gateway_info'] and 'network_id' in + router['external_gateway_info'] and + router['external_gateway_info']['network_id'] == + ext_net_id_by_rel): + raise NonRecoverableError( + 'Expected external resources router {0} and ' + 'external network {1} to be connected'.format( + router_id, ext_net_id_by_rel)) + return + except Exception: + delete_runtime_properties(ctx, RUNTIME_PROPERTIES_KEYS) + raise + + router = { + 'name': get_resource_id(ctx, ROUTER_OPENSTACK_TYPE), + } + router.update(ctx.node.properties['router'], **args) + transform_resource_name(ctx, router) + + _handle_external_network_config(router, neutron_client) + + r = neutron_client.create_router({'router': router})['router'] + + ctx.instance.runtime_properties[OPENSTACK_ID_PROPERTY] = r['id'] + ctx.instance.runtime_properties[OPENSTACK_TYPE_PROPERTY] =\ + ROUTER_OPENSTACK_TYPE + ctx.instance.runtime_properties[OPENSTACK_NAME_PROPERTY] = r['name'] + + +@operation +@with_neutron_client +def connect_subnet(neutron_client, **kwargs): + router_id = ctx.target.instance.runtime_properties[OPENSTACK_ID_PROPERTY] + subnet_id = ctx.source.instance.runtime_properties[OPENSTACK_ID_PROPERTY] + + if is_external_relationship_not_conditionally_created(ctx): + ctx.logger.info('Validating external subnet and router ' + 'are associated') + for port in neutron_client.list_ports(device_id=router_id)['ports']: + for fixed_ip in port.get('fixed_ips', []): + if fixed_ip.get('subnet_id') == subnet_id: + return + raise NonRecoverableError( + 'Expected external resources router {0} and subnet {1} to be ' + 'connected'.format(router_id, subnet_id)) + + neutron_client.add_interface_router(router_id, {'subnet_id': subnet_id}) + + +@operation +@with_neutron_client +def disconnect_subnet(neutron_client, **kwargs): + if is_external_relationship(ctx): + ctx.logger.info('Not connecting subnet and router since external ' + 'subnet and router are being used') + return + + neutron_client.remove_interface_router( + ctx.target.instance.runtime_properties[OPENSTACK_ID_PROPERTY], { + 'subnet_id': ctx.source.instance.runtime_properties[ + OPENSTACK_ID_PROPERTY] + } + ) + + +@operation +@with_neutron_client +def delete(neutron_client, **kwargs): + delete_resource_and_runtime_properties(ctx, neutron_client, + RUNTIME_PROPERTIES_KEYS) + + +@operation +@with_neutron_client +def creation_validation(neutron_client, **kwargs): + validate_resource(ctx, neutron_client, ROUTER_OPENSTACK_TYPE) + + +def _insert_ext_net_id_to_router_config(ext_net_id, router): + router['external_gateway_info'] = router.get( + 'external_gateway_info', {}) + router['external_gateway_info']['network_id'] = ext_net_id + + +def _handle_external_network_config(router, neutron_client): + # attempting to find an external network for the router to connect to - + # first by either a network name or id passed in explicitly; then by a + # network connected by a relationship; with a final optional fallback to an + # external network set in the Provider-context. Otherwise the router will + # simply not get connected to an external network + + provider_context = provider(ctx) + + ext_net_id_by_rel = _get_connected_ext_net_id(neutron_client) + ext_net_by_property = ctx.node.properties['external_network'] + + # the following is meant for backwards compatibility with the + # 'network_name' sugaring + if 'external_gateway_info' in router and 'network_name' in \ + router['external_gateway_info']: + warnings.warn( + 'Passing external "network_name" inside the ' + 'external_gateway_info key of the "router" property is now ' + 'deprecated; Use the "external_network" property instead', + DeprecationWarning) + + ext_net_by_property = router['external_gateway_info']['network_name'] + del (router['external_gateway_info']['network_name']) + + # need to check if the user explicitly passed network_id in the external + # gateway configuration as it affects external network behavior by + # relationship and/or provider context + if 'external_gateway_info' in router and 'network_id' in \ + router['external_gateway_info']: + ext_net_by_property = router['external_gateway_info']['network_name'] + + if ext_net_by_property and ext_net_id_by_rel: + raise RuntimeError( + "Router can't have an external network connected by both a " + 'relationship and by a network name/id') + + if ext_net_by_property: + ext_net_id = get_resource_by_name_or_id( + ext_net_by_property, NETWORK_OPENSTACK_TYPE, neutron_client)['id'] + _insert_ext_net_id_to_router_config(ext_net_id, router) + elif ext_net_id_by_rel: + _insert_ext_net_id_to_router_config(ext_net_id_by_rel, router) + elif ctx.node.properties['default_to_managers_external_network'] and \ + provider_context.ext_network: + _insert_ext_net_id_to_router_config(provider_context.ext_network['id'], + router) + + +def _check_if_network_is_external(neutron_client, network_id): + return neutron_client.show_network( + network_id)['network']['router:external'] + + +def _get_connected_ext_net_id(neutron_client): + ext_net_ids = \ + [net_id + for net_id in + get_openstack_ids_of_connected_nodes_by_openstack_type( + ctx, NETWORK_OPENSTACK_TYPE) if + _check_if_network_is_external(neutron_client, net_id)] + + if len(ext_net_ids) > 1: + raise NonRecoverableError( + 'More than one external network is connected to router {0}' + ' by a relationship; External network IDs: {0}'.format( + ext_net_ids)) + + return ext_net_ids[0] if ext_net_ids else None diff --git a/aria/multivim-plugin/neutron_plugin/security_group.py b/aria/multivim-plugin/neutron_plugin/security_group.py new file mode 100644 index 0000000000..5f335f482b --- /dev/null +++ b/aria/multivim-plugin/neutron_plugin/security_group.py @@ -0,0 +1,130 @@ +######### +# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# * See the License for the specific language governing permissions and +# * limitations under the License. + +from time import sleep + +from requests.exceptions import RequestException + +from cloudify import ctx +from cloudify.decorators import operation +from cloudify.exceptions import NonRecoverableError +from openstack_plugin_common import ( + transform_resource_name, + with_neutron_client, + delete_resource_and_runtime_properties, +) +from openstack_plugin_common.security_group import ( + build_sg_data, + process_rules, + use_external_sg, + set_sg_runtime_properties, + delete_sg, + sg_creation_validation, + RUNTIME_PROPERTIES_KEYS +) + +DEFAULT_RULE_VALUES = { + 'direction': 'ingress', + 'ethertype': 'IPv4', + 'port_range_min': 1, + 'port_range_max': 65535, + 'protocol': 'tcp', + 'remote_group_id': None, + 'remote_ip_prefix': '0.0.0.0/0', +} + + +@operation +@with_neutron_client +def create( + neutron_client, args, + status_attempts=10, status_timeout=2, **kwargs +): + + security_group = build_sg_data(args) + if not security_group['description']: + security_group['description'] = ctx.node.properties['description'] + + sg_rules = process_rules(neutron_client, DEFAULT_RULE_VALUES, + 'remote_ip_prefix', 'remote_group_id', + 'port_range_min', 'port_range_max') + + disable_default_egress_rules = ctx.node.properties.get( + 'disable_default_egress_rules') + + if use_external_sg(neutron_client): + return + + transform_resource_name(ctx, security_group) + + sg = neutron_client.create_security_group( + {'security_group': security_group})['security_group'] + + for attempt in range(max(status_attempts, 1)): + sleep(status_timeout) + try: + neutron_client.show_security_group(sg['id']) + except RequestException as e: + ctx.logger.debug("Waiting for SG to be visible. Attempt {}".format( + attempt)) + else: + break + else: + raise NonRecoverableError( + "Timed out waiting for security_group to exist", e) + + set_sg_runtime_properties(sg, neutron_client) + + try: + if disable_default_egress_rules: + for er in _egress_rules(_rules_for_sg_id(neutron_client, + sg['id'])): + neutron_client.delete_security_group_rule(er['id']) + + for sgr in sg_rules: + sgr['security_group_id'] = sg['id'] + neutron_client.create_security_group_rule( + {'security_group_rule': sgr}) + except Exception: + try: + delete_resource_and_runtime_properties( + ctx, neutron_client, + RUNTIME_PROPERTIES_KEYS) + except Exception as e: + raise NonRecoverableError( + 'Exception while tearing down for retry', e) + raise + + +@operation +@with_neutron_client +def delete(neutron_client, **kwargs): + delete_sg(neutron_client) + + +@operation +@with_neutron_client +def creation_validation(neutron_client, **kwargs): + sg_creation_validation(neutron_client, 'remote_ip_prefix') + + +def _egress_rules(rules): + return [rule for rule in rules if rule.get('direction') == 'egress'] + + +def _rules_for_sg_id(neutron_client, id): + rules = neutron_client.list_security_group_rules()['security_group_rules'] + rules = [rule for rule in rules if rule['security_group_id'] == id] + return rules diff --git a/aria/multivim-plugin/neutron_plugin/subnet.py b/aria/multivim-plugin/neutron_plugin/subnet.py new file mode 100644 index 0000000000..6e97c96755 --- /dev/null +++ b/aria/multivim-plugin/neutron_plugin/subnet.py @@ -0,0 +1,101 @@ +######### +# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# * See the License for the specific language governing permissions and +# * limitations under the License. + +from cloudify import ctx +from cloudify.decorators import operation +from cloudify.exceptions import NonRecoverableError +from openstack_plugin_common import ( + with_neutron_client, + transform_resource_name, + get_resource_id, + get_openstack_id_of_single_connected_node_by_openstack_type, + delete_resource_and_runtime_properties, + delete_runtime_properties, + use_external_resource, + validate_resource, + validate_ip_or_range_syntax, + OPENSTACK_ID_PROPERTY, + OPENSTACK_TYPE_PROPERTY, + OPENSTACK_NAME_PROPERTY, + COMMON_RUNTIME_PROPERTIES_KEYS +) + +from neutron_plugin.network import NETWORK_OPENSTACK_TYPE + +SUBNET_OPENSTACK_TYPE = 'subnet' + +# Runtime properties +RUNTIME_PROPERTIES_KEYS = COMMON_RUNTIME_PROPERTIES_KEYS + + +@operation +@with_neutron_client +def create(neutron_client, args, **kwargs): + + if use_external_resource(ctx, neutron_client, SUBNET_OPENSTACK_TYPE): + try: + net_id = \ + get_openstack_id_of_single_connected_node_by_openstack_type( + ctx, NETWORK_OPENSTACK_TYPE, True) + + if net_id: + subnet_id = \ + ctx.instance.runtime_properties[OPENSTACK_ID_PROPERTY] + + if neutron_client.show_subnet( + subnet_id)['subnet']['network_id'] != net_id: + raise NonRecoverableError( + 'Expected external resources subnet {0} and network' + ' {1} to be connected'.format(subnet_id, net_id)) + return + except Exception: + delete_runtime_properties(ctx, RUNTIME_PROPERTIES_KEYS) + raise + + net_id = get_openstack_id_of_single_connected_node_by_openstack_type( + ctx, NETWORK_OPENSTACK_TYPE) + subnet = { + 'name': get_resource_id(ctx, SUBNET_OPENSTACK_TYPE), + 'network_id': net_id, + } + subnet.update(ctx.node.properties['subnet'], **args) + transform_resource_name(ctx, subnet) + + s = neutron_client.create_subnet({'subnet': subnet})['subnet'] + ctx.instance.runtime_properties[OPENSTACK_ID_PROPERTY] = s['id'] + ctx.instance.runtime_properties[OPENSTACK_TYPE_PROPERTY] = \ + SUBNET_OPENSTACK_TYPE + ctx.instance.runtime_properties[OPENSTACK_NAME_PROPERTY] = subnet['name'] + + +@operation +@with_neutron_client +def delete(neutron_client, **kwargs): + delete_resource_and_runtime_properties(ctx, neutron_client, + RUNTIME_PROPERTIES_KEYS) + + +@operation +@with_neutron_client +def creation_validation(neutron_client, args, **kwargs): + validate_resource(ctx, neutron_client, SUBNET_OPENSTACK_TYPE) + subnet = dict(ctx.node.properties['subnet'], **args) + + if 'cidr' not in subnet: + err = '"cidr" property must appear under the "subnet" property of a ' \ + 'subnet node' + ctx.logger.error('VALIDATION ERROR: ' + err) + raise NonRecoverableError(err) + validate_ip_or_range_syntax(ctx, subnet['cidr']) diff --git a/aria/multivim-plugin/neutron_plugin/tests/__init__.py b/aria/multivim-plugin/neutron_plugin/tests/__init__.py new file mode 100644 index 0000000000..04cb21f745 --- /dev/null +++ b/aria/multivim-plugin/neutron_plugin/tests/__init__.py @@ -0,0 +1 @@ +__author__ = 'idanmo' diff --git a/aria/multivim-plugin/neutron_plugin/tests/test.py b/aria/multivim-plugin/neutron_plugin/tests/test.py new file mode 100644 index 0000000000..459c23a6cd --- /dev/null +++ b/aria/multivim-plugin/neutron_plugin/tests/test.py @@ -0,0 +1,220 @@ +######### +# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# * See the License for the specific language governing permissions and +# * limitations under the License. + +import mock +import random +import string +import unittest + +from cloudify.exceptions import NonRecoverableError +from cloudify.context import BootstrapContext + +from cloudify.mocks import MockCloudifyContext + +import openstack_plugin_common as common +import openstack_plugin_common.tests.test as common_test + +import neutron_plugin +import neutron_plugin.network +import neutron_plugin.port +import neutron_plugin.router +import neutron_plugin.security_group + + +class ResourcesRenamingTest(unittest.TestCase): + def setUp(self): + neutron_plugin.port._find_network_in_related_nodes = mock.Mock() + # *** Configs from files ******************** + common.Config.get = mock.Mock() + common.Config.get.return_value = {} + # *** Neutron ******************** + self.neutron_mock = mock.Mock() + + def neutron_mock_connect(unused_self, unused_cfg): + return self.neutron_mock + common.NeutronClient.connect = neutron_mock_connect + + self.neutron_mock.cosmo_list = mock.Mock() + self.neutron_mock.cosmo_list.return_value = [] + + def _setup_ctx(self, obj_type): + ctx = common_test.create_mock_ctx_with_provider_info( + node_id='__cloudify_id_something_001', + properties={ + obj_type: { + 'name': obj_type + '_name', + }, + 'rules': [] # For security_group + } + ) + return ctx + + def _test(self, obj_type): + ctx = self._setup_ctx(obj_type) + attr = getattr(self.neutron_mock, 'create_' + obj_type) + attr.return_value = { + obj_type: { + 'id': obj_type + '_id', + } + } + getattr(neutron_plugin, obj_type).create(ctx) + calls = attr.mock_calls + self.assertEquals(len(calls), 1) # Exactly one object created + # Indexes into call[]: + # 0 - the only call + # 1 - regular arguments + # 0 - first argument + arg = calls[0][1][0] + self.assertEquals(arg[obj_type]['name'], 'p2_' + obj_type + '_name') + + def test_network(self): + self._test('network') + + def test_port(self): + self._test('port') + + def test_router(self): + self._test('router') + + def test_security_group(self): + self._test('security_group') + + # Network chosen arbitrary for this test. + # Just testing something without prefix. + def test_network_no_prefix(self): + ctx = self._setup_ctx('network') + for pctx in common_test.BOOTSTRAP_CONTEXTS_WITHOUT_PREFIX: + ctx._bootstrap_context = BootstrapContext(pctx) + self.neutron_mock.create_network.reset_mock() + self.neutron_mock.create_network.return_value = { + 'network': { + 'id': 'network_id', + } + } + neutron_plugin.network.create(ctx) + calls = self.neutron_mock.create_network.mock_calls + self.assertEquals(len(calls), 1) # Exactly one network created + # Indexes into call[]: + # 0 - the only call + # 1 - regular arguments + # 0 - first argument + arg = calls[0][1][0] + self.assertEquals(arg['network']['name'], 'network_name', + "Failed with context: " + str(pctx)) + + +def _rand_str(n): + chars = string.ascii_uppercase + string.digits + return ''.join(random.choice(chars) for _ in range(n)) + + +class SecurityGroupTest(unittest.TestCase): + def setUp(self): + # *** Configs from files ******************** + common.Config.get = mock.Mock() + common.Config.get.return_value = {} + # *** Neutron ******************** + self.neutron_mock = mock.Mock() + + def neutron_mock_connect(unused_self, unused_cfg): + return self.neutron_mock + common.NeutronClient.connect = neutron_mock_connect + neutron_plugin.security_group._rules_for_sg_id = mock.Mock() + neutron_plugin.security_group._rules_for_sg_id.return_value = [] + + def _setup_ctx(self): + sg_name = _rand_str(6) + '_new' + ctx = MockCloudifyContext(properties={ + 'security_group': { + 'name': sg_name, + 'description': 'blah' + }, + 'rules': [{'port': 80}], + 'disable_default_egress_rules': True, + }) + return ctx + + def test_sg_new(self): + ctx = self._setup_ctx() + self.neutron_mock.cosmo_list = mock.Mock() + self.neutron_mock.cosmo_list.return_value = [] + self.neutron_mock.create_security_group = mock.Mock() + self.neutron_mock.create_security_group.return_value = { + 'security_group': { + 'description': 'blah', + 'id': ctx['security_group']['name'] + '_id', + } + } + neutron_plugin.security_group.create(ctx) + self.assertTrue(self.neutron_mock.create_security_group.mock_calls) + + def test_sg_use_existing(self): + ctx = self._setup_ctx() + self.neutron_mock.cosmo_list = mock.Mock() + self.neutron_mock.cosmo_list.return_value = [{ + 'id': ctx['security_group']['name'] + '_existing_id', + 'description': 'blah', + 'security_group_rules': [{ + 'remote_group_id': None, + 'direction': 'ingress', + 'protocol': 'tcp', + 'ethertype': 'IPv4', + 'port_range_max': 80, + 'port_range_min': 80, + 'remote_ip_prefix': '0.0.0.0/0', + }] + }] + self.neutron_mock.create_security_group = mock.Mock() + self.neutron_mock.create_security_group.return_value = { + 'security_group': { + 'description': 'blah', + 'id': ctx['security_group']['name'] + '_id', + } + } + neutron_plugin.security_group.create(ctx) + self.assertFalse(self.neutron_mock.create_security_group.mock_calls) + + def test_sg_use_existing_with_other_rules(self): + ctx = self._setup_ctx() + self.neutron_mock.cosmo_list = mock.Mock() + self.neutron_mock.cosmo_list.return_value = [{ + 'id': ctx['security_group']['name'] + '_existing_id', + 'description': 'blah', + 'security_group_rules': [{ + 'remote_group_id': None, + 'direction': 'ingress', + 'protocol': 'tcp', + 'ethertype': 'IPv4', + 'port_range_max': 81, # Note the different port! + 'port_range_min': 81, # Note the different port! + 'remote_ip_prefix': '0.0.0.0/0', + }] + }] + self.neutron_mock.create_security_group = mock.Mock() + self.neutron_mock.create_security_group.return_value = { + 'security_group': { + 'description': 'blah', + 'id': ctx['security_group']['name'] + '_id', + } + } + self.assertRaises( + NonRecoverableError, + neutron_plugin.security_group.create, + ctx + ) + + +if __name__ == '__main__': + unittest.main() diff --git a/aria/multivim-plugin/neutron_plugin/tests/test_port.py b/aria/multivim-plugin/neutron_plugin/tests/test_port.py new file mode 100644 index 0000000000..1acee3d05d --- /dev/null +++ b/aria/multivim-plugin/neutron_plugin/tests/test_port.py @@ -0,0 +1,156 @@ +######## +# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# * See the License for the specific language governing permissions and +# * limitations under the License. + +import unittest + +import mock + +import neutron_plugin.port +from cloudify.mocks import (MockCloudifyContext, + MockNodeInstanceContext, + MockRelationshipSubjectContext) +from openstack_plugin_common import (NeutronClientWithSugar, + OPENSTACK_ID_PROPERTY) +from cloudify.exceptions import OperationRetry + + +class TestPort(unittest.TestCase): + + def test_fixed_ips_no_fixed_ips(self): + node_props = {'fixed_ip': ''} + + with mock.patch( + 'neutron_plugin.port.' + 'get_openstack_id_of_single_connected_node_by_openstack_type', + self._get_connected_subnet_mock(return_empty=True)): + with mock.patch( + 'neutron_plugin.port.ctx', + self._get_mock_ctx_with_node_properties(node_props)): + + port = {} + neutron_plugin.port._handle_fixed_ips(port) + + self.assertNotIn('fixed_ips', port) + + def test_fixed_ips_subnet_only(self): + node_props = {'fixed_ip': ''} + + with mock.patch( + 'neutron_plugin.port.' + 'get_openstack_id_of_single_connected_node_by_openstack_type', + self._get_connected_subnet_mock(return_empty=False)): + with mock.patch( + 'neutron_plugin.port.ctx', + self._get_mock_ctx_with_node_properties(node_props)): + + port = {} + neutron_plugin.port._handle_fixed_ips(port) + + self.assertEquals([{'subnet_id': 'some-subnet-id'}], + port.get('fixed_ips')) + + def test_fixed_ips_ip_address_only(self): + node_props = {'fixed_ip': '1.2.3.4'} + + with mock.patch( + 'neutron_plugin.port.' + 'get_openstack_id_of_single_connected_node_by_openstack_type', + self._get_connected_subnet_mock(return_empty=True)): + with mock.patch( + 'neutron_plugin.port.ctx', + self._get_mock_ctx_with_node_properties(node_props)): + + port = {} + neutron_plugin.port._handle_fixed_ips(port) + + self.assertEquals([{'ip_address': '1.2.3.4'}], + port.get('fixed_ips')) + + def test_fixed_ips_subnet_and_ip_address(self): + node_props = {'fixed_ip': '1.2.3.4'} + + with mock.patch( + 'neutron_plugin.port.' + 'get_openstack_id_of_single_connected_node_by_openstack_type', + self._get_connected_subnet_mock(return_empty=False)): + with mock.patch( + 'neutron_plugin.port.ctx', + self._get_mock_ctx_with_node_properties(node_props)): + + port = {} + neutron_plugin.port._handle_fixed_ips(port) + + self.assertEquals([{'ip_address': '1.2.3.4', + 'subnet_id': 'some-subnet-id'}], + port.get('fixed_ips')) + + @staticmethod + def _get_connected_subnet_mock(return_empty=True): + return lambda *args, **kw: None if return_empty else 'some-subnet-id' + + @staticmethod + def _get_mock_ctx_with_node_properties(properties): + return MockCloudifyContext(node_id='test_node_id', + properties=properties) + + +class MockNeutronClient(NeutronClientWithSugar): + """A fake neutron client with hard-coded test data.""" + def __init__(self, update): + self.update = update + self.body = {'port': {'id': 'test-id', 'security_groups': []}} + + def show_port(self, *_): + return self.body + + def update_port(self, _, b, **__): + if self.update: + self.body.update(b) + return + + def cosmo_get(self, *_, **__): + return self.body['port'] + + +class TestPortSG(unittest.TestCase): + @mock.patch('openstack_plugin_common._put_client_in_kw') + def test_connect_sg_to_port(self, *_): + mock_neutron = MockNeutronClient(update=True) + ctx = MockCloudifyContext( + source=MockRelationshipSubjectContext(node=mock.MagicMock(), + instance=mock.MagicMock()), + target=MockRelationshipSubjectContext(node=mock.MagicMock(), + instance=mock.MagicMock())) + + with mock.patch('neutron_plugin.port.ctx', ctx): + neutron_plugin.port.connect_security_group(mock_neutron) + self.assertIsNone(ctx.operation._operation_retry) + + @mock.patch('openstack_plugin_common._put_client_in_kw') + def test_connect_sg_to_port_race_condition(self, *_): + mock_neutron = MockNeutronClient(update=False) + + ctx = MockCloudifyContext( + source=MockRelationshipSubjectContext(node=mock.MagicMock(), + instance=mock.MagicMock()), + target=MockRelationshipSubjectContext( + node=mock.MagicMock(), + instance=MockNodeInstanceContext( + runtime_properties={ + OPENSTACK_ID_PROPERTY: 'test-sg-id'}))) + with mock.patch('neutron_plugin.port.ctx', ctx): + neutron_plugin.port.connect_security_group(mock_neutron, ctx=ctx) + self.assertIsInstance(ctx.operation._operation_retry, + OperationRetry) diff --git a/aria/multivim-plugin/neutron_plugin/tests/test_security_group.py b/aria/multivim-plugin/neutron_plugin/tests/test_security_group.py new file mode 100644 index 0000000000..e958cddb33 --- /dev/null +++ b/aria/multivim-plugin/neutron_plugin/tests/test_security_group.py @@ -0,0 +1,115 @@ +# -*- coding: utf-8 -*- +######### +# Copyright (c) 2016 GigaSpaces Technologies Ltd. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# * See the License for the specific language governing permissions and +# * limitations under the License. + +import unittest + +from mock import Mock, patch +from requests.exceptions import RequestException + +from neutron_plugin import security_group + +from cloudify.exceptions import NonRecoverableError +from cloudify.state import current_ctx + +from cloudify.mocks import MockCloudifyContext + + +class FakeException(Exception): + pass + + +@patch('openstack_plugin_common.OpenStackClient._validate_auth_params') +@patch('openstack_plugin_common.NeutronClientWithSugar') +class TestSecurityGroup(unittest.TestCase): + + def setUp(self): + super(TestSecurityGroup, self).setUp() + self.nova_client = Mock() + + self.ctx = MockCloudifyContext( + node_id='test', + deployment_id='test', + properties={ + 'description': 'The best Security Group. Great', + 'rules': [], + 'resource_id': 'mock_sg', + 'security_group': { + }, + 'server': {}, + 'openstack_config': { + 'auth_url': 'things/v3', + }, + }, + operation={'retry_number': 0}, + provider_context={'resources': {}} + ) + current_ctx.set(self.ctx) + self.addCleanup(current_ctx.clear) + + findctx = patch( + 'openstack_plugin_common._find_context_in_kw', + return_value=self.ctx, + ) + findctx.start() + self.addCleanup(findctx.stop) + + def test_set_sg_runtime_properties(self, mock_nc, *_): + security_group.create( + nova_client=self.nova_client, + ctx=self.ctx, + args={}, + ) + + self.assertEqual( + { + 'external_type': 'security_group', + 'external_id': mock_nc().get_id_from_resource(), + 'external_name': mock_nc().get_name_from_resource(), + }, + self.ctx.instance.runtime_properties + ) + + def test_create_sg_wait_timeout(self, mock_nc, *_): + mock_nc().show_security_group.side_effect = RequestException + + with self.assertRaises(NonRecoverableError): + security_group.create( + nova_client=self.nova_client, + ctx=self.ctx, + args={}, + status_attempts=3, + status_timeout=0.001, + ) + + @patch( + 'neutron_plugin.security_group.delete_resource_and_runtime_properties') + def test_dont_duplicate_if_failed_rule(self, mock_del_res, mock_nc, *_): + self.ctx.node.properties['rules'] = [ + { + 'port': '🍷', + }, + ] + mock_nc().create_security_group_rule.side_effect = FakeException + mock_del_res.side_effect = FakeException('the 2nd') + + with self.assertRaises(NonRecoverableError) as e: + security_group.create( + nova_client=self.nova_client, + ctx=self.ctx, + args={}, + ) + + self.assertIn('the 2nd', str(e.exception)) diff --git a/aria/multivim-plugin/nova_plugin/__init__.py b/aria/multivim-plugin/nova_plugin/__init__.py new file mode 100644 index 0000000000..bb533273be --- /dev/null +++ b/aria/multivim-plugin/nova_plugin/__init__.py @@ -0,0 +1,16 @@ +######### +# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# * See the License for the specific language governing permissions and +# * limitations under the License. + +__author__ = 'idanmo' diff --git a/aria/multivim-plugin/nova_plugin/floatingip.py b/aria/multivim-plugin/nova_plugin/floatingip.py new file mode 100644 index 0000000000..e770c540a8 --- /dev/null +++ b/aria/multivim-plugin/nova_plugin/floatingip.py @@ -0,0 +1,60 @@ +######### +# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# * See the License for the specific language governing permissions and +# * limitations under the License. + +from cloudify import ctx +from cloudify.decorators import operation +from openstack_plugin_common import with_nova_client +from openstack_plugin_common.floatingip import ( + use_external_floatingip, + set_floatingip_runtime_properties, + delete_floatingip, + floatingip_creation_validation +) + + +# random note regarding nova floating-ips: floating ips on nova-net have +# pre-assigned ids, and thus a call "nova.floating_ips.get(<fip_id>)" will +# return a value even if the floating-ip isn't even allocated. +# currently all lookups in the code, including by id, use search (i.e. +# nova.<type>.findall) and lists, which won't return such unallocated +# resources. + +@operation +@with_nova_client +def create(nova_client, args, **kwargs): + + if use_external_floatingip(nova_client, 'ip', + lambda ext_fip: ext_fip.ip): + return + + floatingip = { + 'pool': None + } + floatingip.update(ctx.node.properties['floatingip'], **args) + + fip = nova_client.floating_ips.create(floatingip['pool']) + set_floatingip_runtime_properties(fip.id, fip.ip) + + +@operation +@with_nova_client +def delete(nova_client, **kwargs): + delete_floatingip(nova_client) + + +@operation +@with_nova_client +def creation_validation(nova_client, **kwargs): + floatingip_creation_validation(nova_client, 'ip') diff --git a/aria/multivim-plugin/nova_plugin/keypair.py b/aria/multivim-plugin/nova_plugin/keypair.py new file mode 100644 index 0000000000..92281ab9e5 --- /dev/null +++ b/aria/multivim-plugin/nova_plugin/keypair.py @@ -0,0 +1,202 @@ +######### +# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# * See the License for the specific language governing permissions and +# * limitations under the License. + +import os +import errno +from getpass import getuser + +from cloudify import ctx +from cloudify.decorators import operation +from cloudify.exceptions import NonRecoverableError +from openstack_plugin_common import ( + with_nova_client, + validate_resource, + use_external_resource, + transform_resource_name, + is_external_resource, + is_external_resource_not_conditionally_created, + delete_runtime_properties, + get_resource_id, + delete_resource_and_runtime_properties, + OPENSTACK_ID_PROPERTY, + OPENSTACK_TYPE_PROPERTY, + OPENSTACK_NAME_PROPERTY, + COMMON_RUNTIME_PROPERTIES_KEYS +) + +RUNTIME_PROPERTIES_KEYS = COMMON_RUNTIME_PROPERTIES_KEYS +KEYPAIR_OPENSTACK_TYPE = 'keypair' + +PRIVATE_KEY_PATH_PROP = 'private_key_path' + + +@operation +@with_nova_client +def create(nova_client, args, **kwargs): + + private_key_path = _get_private_key_path() + pk_exists = _check_private_key_exists(private_key_path) + + if use_external_resource(ctx, nova_client, KEYPAIR_OPENSTACK_TYPE): + if not pk_exists: + delete_runtime_properties(ctx, RUNTIME_PROPERTIES_KEYS) + raise NonRecoverableError( + 'Failed to use external keypair (node {0}): the public key {1}' + ' is available on Openstack, but the private key could not be ' + 'found at {2}'.format(ctx.node.id, + ctx.node.properties['resource_id'], + private_key_path)) + return + + if pk_exists: + raise NonRecoverableError( + "Can't create keypair - private key path already exists: {0}" + .format(private_key_path)) + + keypair = { + 'name': get_resource_id(ctx, KEYPAIR_OPENSTACK_TYPE), + } + keypair.update(ctx.node.properties['keypair'], **args) + transform_resource_name(ctx, keypair) + + keypair = nova_client.keypairs.create(keypair['name'], + keypair.get('public_key')) + ctx.instance.runtime_properties[OPENSTACK_ID_PROPERTY] = keypair.id + ctx.instance.runtime_properties[OPENSTACK_TYPE_PROPERTY] = \ + KEYPAIR_OPENSTACK_TYPE + ctx.instance.runtime_properties[OPENSTACK_NAME_PROPERTY] = keypair.name + + try: + # write private key file + _mkdir_p(os.path.dirname(private_key_path)) + with open(private_key_path, 'w') as f: + f.write(keypair.private_key) + os.chmod(private_key_path, 0600) + except Exception: + _delete_private_key_file() + delete_resource_and_runtime_properties(ctx, nova_client, + RUNTIME_PROPERTIES_KEYS) + raise + + +@operation +@with_nova_client +def delete(nova_client, **kwargs): + if not is_external_resource(ctx): + ctx.logger.info('deleting keypair') + + _delete_private_key_file() + + nova_client.keypairs.delete( + ctx.instance.runtime_properties[OPENSTACK_ID_PROPERTY]) + else: + ctx.logger.info('not deleting keypair since an external keypair is ' + 'being used') + + delete_runtime_properties(ctx, RUNTIME_PROPERTIES_KEYS) + + +@operation +@with_nova_client +def creation_validation(nova_client, **kwargs): + + def validate_private_key_permissions(private_key_path): + ctx.logger.debug('checking whether private key file {0} has the ' + 'correct permissions'.format(private_key_path)) + if not os.access(private_key_path, os.R_OK): + err = 'private key file {0} is not readable'\ + .format(private_key_path) + ctx.logger.error('VALIDATION ERROR: ' + err) + raise NonRecoverableError(err) + ctx.logger.debug('OK: private key file {0} has the correct ' + 'permissions'.format(private_key_path)) + + def validate_path_owner(path): + ctx.logger.debug('checking whether directory {0} is owned by the ' + 'current user'.format(path)) + from pwd import getpwnam, getpwuid + + user = getuser() + owner = getpwuid(os.stat(path).st_uid).pw_name + current_user_id = str(getpwnam(user).pw_uid) + owner_id = str(os.stat(path).st_uid) + + if not current_user_id == owner_id: + err = '{0} is not owned by the current user (it is owned by {1})'\ + .format(path, owner) + ctx.logger.warning('VALIDATION WARNING: {0}'.format(err)) + return + ctx.logger.debug('OK: {0} is owned by the current user'.format(path)) + + validate_resource(ctx, nova_client, KEYPAIR_OPENSTACK_TYPE) + + private_key_path = _get_private_key_path() + pk_exists = _check_private_key_exists(private_key_path) + + if is_external_resource_not_conditionally_created(ctx): + if pk_exists: + if os.name == 'posix': + validate_private_key_permissions(private_key_path) + validate_path_owner(private_key_path) + else: + err = "can't use external keypair: the public key {0} is " \ + "available on Openstack, but the private key could not be " \ + "found at {1}".format(ctx.node.properties['resource_id'], + private_key_path) + ctx.logger.error('VALIDATION ERROR: {0}'.format(err)) + raise NonRecoverableError(err) + else: + if pk_exists: + err = 'private key path already exists: {0}'.format( + private_key_path) + ctx.logger.error('VALIDATION ERROR: {0}'.format(err)) + raise NonRecoverableError(err) + else: + err = 'private key directory {0} is not writable' + while private_key_path: + if os.path.isdir(private_key_path): + if not os.access(private_key_path, os.W_OK | os.X_OK): + raise NonRecoverableError(err.format(private_key_path)) + else: + break + private_key_path, _ = os.path.split(private_key_path) + + ctx.logger.debug('OK: keypair configuration is valid') + + +def _get_private_key_path(): + return os.path.expanduser(ctx.node.properties[PRIVATE_KEY_PATH_PROP]) + + +def _delete_private_key_file(): + private_key_path = _get_private_key_path() + ctx.logger.debug('deleting private key file at {0}'.format( + private_key_path)) + try: + os.remove(private_key_path) + except OSError as e: + if e.errno == errno.ENOENT: + # file was already deleted somehow + pass + raise + + +def _check_private_key_exists(private_key_path): + return os.path.isfile(private_key_path) + + +def _mkdir_p(path): + if path and not os.path.isdir(path): + os.makedirs(path) diff --git a/aria/multivim-plugin/nova_plugin/security_group.py b/aria/multivim-plugin/nova_plugin/security_group.py new file mode 100644 index 0000000000..283eae85cf --- /dev/null +++ b/aria/multivim-plugin/nova_plugin/security_group.py @@ -0,0 +1,81 @@ +######### +# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# * See the License for the specific language governing permissions and +# * limitations under the License. + +from cloudify import ctx +from cloudify.decorators import operation +from openstack_plugin_common import ( + transform_resource_name, + with_nova_client, + delete_resource_and_runtime_properties +) +from openstack_plugin_common.security_group import ( + build_sg_data, + process_rules, + use_external_sg, + set_sg_runtime_properties, + delete_sg, + sg_creation_validation, + RUNTIME_PROPERTIES_KEYS +) + + +@operation +@with_nova_client +def create(nova_client, args, **kwargs): + + security_group = build_sg_data(args) + security_group['description'] = ctx.node.properties['description'] + + sgr_default_values = { + 'ip_protocol': 'tcp', + 'from_port': 1, + 'to_port': 65535, + 'cidr': '0.0.0.0/0', + # 'group_id': None, + # 'parent_group_id': None, + } + sg_rules = process_rules(nova_client, sgr_default_values, + 'cidr', 'group_id', 'from_port', 'to_port') + + if use_external_sg(nova_client): + return + + transform_resource_name(ctx, security_group) + + sg = nova_client.security_groups.create( + security_group['name'], security_group['description']) + + set_sg_runtime_properties(sg, nova_client) + + try: + for sgr in sg_rules: + sgr['parent_group_id'] = sg.id + nova_client.security_group_rules.create(**sgr) + except Exception: + delete_resource_and_runtime_properties(ctx, nova_client, + RUNTIME_PROPERTIES_KEYS) + raise + + +@operation +@with_nova_client +def delete(nova_client, **kwargs): + delete_sg(nova_client) + + +@operation +@with_nova_client +def creation_validation(nova_client, **kwargs): + sg_creation_validation(nova_client, 'cidr') diff --git a/aria/multivim-plugin/nova_plugin/server.py b/aria/multivim-plugin/nova_plugin/server.py new file mode 100644 index 0000000000..6726f24804 --- /dev/null +++ b/aria/multivim-plugin/nova_plugin/server.py @@ -0,0 +1,944 @@ +######### +# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# * See the License for the specific language governing permissions and +# * limitations under the License. + + +import os +import time +import copy +import operator + +from novaclient import exceptions as nova_exceptions + +from cloudify import ctx +from cloudify.manager import get_rest_client +from cloudify.decorators import operation +from cloudify.exceptions import NonRecoverableError, RecoverableError +from cinder_plugin import volume +from openstack_plugin_common import ( + provider, + transform_resource_name, + get_resource_id, + get_openstack_ids_of_connected_nodes_by_openstack_type, + with_nova_client, + with_cinder_client, + assign_payload_as_runtime_properties, + get_openstack_id_of_single_connected_node_by_openstack_type, + get_openstack_names_of_connected_nodes_by_openstack_type, + get_single_connected_node_by_openstack_type, + is_external_resource, + is_external_resource_by_properties, + is_external_resource_not_conditionally_created, + is_external_relationship_not_conditionally_created, + use_external_resource, + delete_runtime_properties, + is_external_relationship, + validate_resource, + USE_EXTERNAL_RESOURCE_PROPERTY, + OPENSTACK_AZ_PROPERTY, + OPENSTACK_ID_PROPERTY, + OPENSTACK_TYPE_PROPERTY, + OPENSTACK_NAME_PROPERTY, + COMMON_RUNTIME_PROPERTIES_KEYS, + with_neutron_client) +from nova_plugin.keypair import KEYPAIR_OPENSTACK_TYPE +from nova_plugin import userdata +from openstack_plugin_common.floatingip import (IP_ADDRESS_PROPERTY, + get_server_floating_ip) +from neutron_plugin.network import NETWORK_OPENSTACK_TYPE +from neutron_plugin.port import PORT_OPENSTACK_TYPE +from cinder_plugin.volume import VOLUME_OPENSTACK_TYPE +from openstack_plugin_common.security_group import \ + SECURITY_GROUP_OPENSTACK_TYPE +from glance_plugin.image import handle_image_from_relationship + +SERVER_OPENSTACK_TYPE = 'server' + +# server status constants. Full lists here: http://docs.openstack.org/api/openstack-compute/2/content/List_Servers-d1e2078.html # NOQA +SERVER_STATUS_ACTIVE = 'ACTIVE' +SERVER_STATUS_BUILD = 'BUILD' +SERVER_STATUS_SHUTOFF = 'SHUTOFF' + +OS_EXT_STS_TASK_STATE = 'OS-EXT-STS:task_state' +SERVER_TASK_STATE_POWERING_ON = 'powering-on' + +MUST_SPECIFY_NETWORK_EXCEPTION_TEXT = 'More than one possible network found.' +SERVER_DELETE_CHECK_SLEEP = 2 + +# Runtime properties +NETWORKS_PROPERTY = 'networks' # all of the server's ips +IP_PROPERTY = 'ip' # the server's private ip +ADMIN_PASSWORD_PROPERTY = 'password' # the server's password +RUNTIME_PROPERTIES_KEYS = COMMON_RUNTIME_PROPERTIES_KEYS + \ + [NETWORKS_PROPERTY, IP_PROPERTY, ADMIN_PASSWORD_PROPERTY] + + +def _get_management_network_id_and_name(neutron_client, ctx): + """Examine the context to find the management network id and name.""" + management_network_id = None + management_network_name = None + provider_context = provider(ctx) + + if ('management_network_name' in ctx.node.properties) and \ + ctx.node.properties['management_network_name']: + management_network_name = \ + ctx.node.properties['management_network_name'] + management_network_name = transform_resource_name( + ctx, management_network_name) + management_network_id = neutron_client.cosmo_get_named( + 'network', management_network_name) + management_network_id = management_network_id['id'] + else: + int_network = provider_context.int_network + if int_network: + management_network_id = int_network['id'] + management_network_name = int_network['name'] # Already transform. + + return management_network_id, management_network_name + + +def _merge_nics(management_network_id, *nics_sources): + """Merge nics_sources into a single nics list, insert mgmt network if + needed. + nics_sources are lists of networks received from several sources + (server properties, relationships to networks, relationships to ports). + Merge them into a single list, and if the management network isn't present + there, prepend it as the first network. + """ + merged = [] + for nics in nics_sources: + merged.extend(nics) + if management_network_id is not None and \ + not any(nic['net-id'] == management_network_id for nic in merged): + merged.insert(0, {'net-id': management_network_id}) + return merged + + +def _normalize_nics(nics): + """Transform the NICs passed to the form expected by openstack. + + If both net-id and port-id are provided, remove net-id: it is ignored + by openstack anyway. + """ + def _normalize(nic): + if 'port-id' in nic and 'net-id' in nic: + nic = nic.copy() + del nic['net-id'] + return nic + return [_normalize(nic) for nic in nics] + + +def _prepare_server_nics(neutron_client, ctx, server): + """Update server['nics'] based on declared relationships. + + server['nics'] should contain the pre-declared nics, then the networks + that the server has a declared relationship to, then the networks + of the ports the server has a relationship to. + + If that doesn't include the management network, it should be prepended + as the first network. + + The management network id and name are stored in the server meta properties + """ + network_ids = get_openstack_ids_of_connected_nodes_by_openstack_type( + ctx, NETWORK_OPENSTACK_TYPE) + port_ids = get_openstack_ids_of_connected_nodes_by_openstack_type( + ctx, PORT_OPENSTACK_TYPE) + management_network_id, management_network_name = \ + _get_management_network_id_and_name(neutron_client, ctx) + if management_network_id is None and (network_ids or port_ids): + # Known limitation + raise NonRecoverableError( + "Nova server with NICs requires " + "'management_network_name' in properties or id " + "from provider context, which was not supplied") + + nics = _merge_nics( + management_network_id, + server.get('nics', []), + [{'net-id': net_id} for net_id in network_ids], + get_port_networks(neutron_client, port_ids)) + + nics = _normalize_nics(nics) + + server['nics'] = nics + if management_network_id is not None: + server['meta']['cloudify_management_network_id'] = \ + management_network_id + if management_network_name is not None: + server['meta']['cloudify_management_network_name'] = \ + management_network_name + + +def _get_boot_volume_relationships(type_name, ctx): + ctx.logger.debug('Instance relationship target instances: {0}'.format(str([ + rel.target.instance.runtime_properties + for rel in ctx.instance.relationships]))) + targets = [ + rel.target.instance + for rel in ctx.instance.relationships + if rel.target.instance.runtime_properties.get( + OPENSTACK_TYPE_PROPERTY) == type_name and + rel.target.node.properties.get('boot', False)] + + if not targets: + return None + elif len(targets) > 1: + raise NonRecoverableError("2 boot volumes not supported") + return targets[0] + + +def _handle_boot_volume(server, ctx): + boot_volume = _get_boot_volume_relationships(VOLUME_OPENSTACK_TYPE, ctx) + if boot_volume: + boot_volume_id = boot_volume.runtime_properties[OPENSTACK_ID_PROPERTY] + ctx.logger.info('boot_volume_id: {0}'.format(boot_volume_id)) + az = boot_volume.runtime_properties[OPENSTACK_AZ_PROPERTY] + # If a block device mapping already exists we shouldn't overwrite it + # completely + bdm = server.setdefault('block_device_mapping', {}) + bdm['vda'] = '{0}:::0'.format(boot_volume_id) + # Some nova configurations allow cross-az server-volume connections, so + # we can't treat that as an error. + if not server.get('availability_zone'): + server['availability_zone'] = az + + +@operation +@with_nova_client +@with_neutron_client +def create(nova_client, neutron_client, args, **kwargs): + """ + Creates a server. Exposes the parameters mentioned in + http://docs.openstack.org/developer/python-novaclient/api/novaclient.v1_1 + .servers.html#novaclient.v1_1.servers.ServerManager.create + """ + + external_server = use_external_resource(ctx, nova_client, + SERVER_OPENSTACK_TYPE) + + if external_server: + _set_network_and_ip_runtime_properties(external_server) + if ctx._local: + return + else: + network_ids = \ + get_openstack_ids_of_connected_nodes_by_openstack_type( + ctx, NETWORK_OPENSTACK_TYPE) + port_ids = get_openstack_ids_of_connected_nodes_by_openstack_type( + ctx, PORT_OPENSTACK_TYPE) + try: + _validate_external_server_nics( + neutron_client, + network_ids, + port_ids + ) + _validate_external_server_keypair(nova_client) + return + except Exception: + delete_runtime_properties(ctx, RUNTIME_PROPERTIES_KEYS) + raise + + provider_context = provider(ctx) + + def rename(name): + return transform_resource_name(ctx, name) + + server = { + 'name': get_resource_id(ctx, SERVER_OPENSTACK_TYPE), + } + server.update(copy.deepcopy(ctx.node.properties['server'])) + server.update(copy.deepcopy(args)) + + _handle_boot_volume(server, ctx) + handle_image_from_relationship(server, 'image', ctx) + + if 'meta' not in server: + server['meta'] = dict() + + transform_resource_name(ctx, server) + + ctx.logger.debug( + "server.create() server before transformations: {0}".format(server)) + + for key in 'block_device_mapping', 'block_device_mapping_v2': + if key in server: + # if there is a connected boot volume, don't require the `image` + # property. + # However, python-novaclient requires an `image` input anyway, and + # checks it for truthiness when deciding whether to pass it along + # to the API + if 'image' not in server: + server['image'] = ctx.node.properties.get('image') + break + else: + _handle_image_or_flavor(server, nova_client, 'image') + _handle_image_or_flavor(server, nova_client, 'flavor') + + if provider_context.agents_security_group: + security_groups = server.get('security_groups', []) + asg = provider_context.agents_security_group['name'] + if asg not in security_groups: + security_groups.append(asg) + server['security_groups'] = security_groups + elif not server.get('security_groups', []): + # Make sure that if the server is connected to a security group + # from CREATE time so that there the user can control + # that there is never a time that a running server is not protected. + security_group_names = \ + get_openstack_names_of_connected_nodes_by_openstack_type( + ctx, + SECURITY_GROUP_OPENSTACK_TYPE) + server['security_groups'] = security_group_names + + # server keypair handling + keypair_id = get_openstack_id_of_single_connected_node_by_openstack_type( + ctx, KEYPAIR_OPENSTACK_TYPE, True) + + if 'key_name' in server: + if keypair_id: + raise NonRecoverableError("server can't both have the " + '"key_name" nested property and be ' + 'connected to a keypair via a ' + 'relationship at the same time') + server['key_name'] = rename(server['key_name']) + elif keypair_id: + server['key_name'] = _get_keypair_name_by_id(nova_client, keypair_id) + elif provider_context.agents_keypair: + server['key_name'] = provider_context.agents_keypair['name'] + else: + server['key_name'] = None + ctx.logger.info( + 'server must have a keypair, yet no keypair was connected to the ' + 'server node, the "key_name" nested property ' + "wasn't used, and there is no agent keypair in the provider " + "context. Agent installation can have issues.") + + _fail_on_missing_required_parameters( + server, + ('name', 'flavor'), + 'server') + + _prepare_server_nics(neutron_client, ctx, server) + + ctx.logger.debug( + "server.create() server after transformations: {0}".format(server)) + + userdata.handle_userdata(server) + + ctx.logger.info("Creating VM with parameters: {0}".format(str(server))) + # Store the server dictionary contents in runtime properties + assign_payload_as_runtime_properties(ctx, SERVER_OPENSTACK_TYPE, server) + ctx.logger.debug( + "Asking Nova to create server. All possible parameters are: {0})" + .format(','.join(server.keys()))) + + try: + s = nova_client.servers.create(**server) + except nova_exceptions.BadRequest as e: + if 'Block Device Mapping is Invalid' in str(e): + return ctx.operation.retry( + message='Block Device Mapping is not created yet', + retry_after=30) + if str(e).startswith(MUST_SPECIFY_NETWORK_EXCEPTION_TEXT): + raise NonRecoverableError( + "Can not provision server: management_network_name or id" + " is not specified but there are several networks that the " + "server can be connected to.") + raise + ctx.instance.runtime_properties[OPENSTACK_ID_PROPERTY] = s.id + ctx.instance.runtime_properties[OPENSTACK_TYPE_PROPERTY] = \ + SERVER_OPENSTACK_TYPE + ctx.instance.runtime_properties[OPENSTACK_NAME_PROPERTY] = server['name'] + + +def get_port_networks(neutron_client, port_ids): + + def get_network(port_id): + port = neutron_client.show_port(port_id) + return { + 'net-id': port['port']['network_id'], + 'port-id': port['port']['id'] + } + + return map(get_network, port_ids) + + +@operation +@with_nova_client +def start(nova_client, start_retry_interval, private_key_path, **kwargs): + server = get_server_by_context(nova_client) + + if is_external_resource_not_conditionally_created(ctx): + ctx.logger.info('Validating external server is started') + if server.status != SERVER_STATUS_ACTIVE: + raise NonRecoverableError( + 'Expected external resource server {0} to be in ' + '"{1}" status'.format(server.id, SERVER_STATUS_ACTIVE)) + return + + if server.status == SERVER_STATUS_ACTIVE: + ctx.logger.info('Server is {0}'.format(server.status)) + + if ctx.node.properties['use_password']: + private_key = _get_private_key(private_key_path) + ctx.logger.debug('retrieving password for server') + password = server.get_password(private_key) + + if not password: + return ctx.operation.retry( + message='Waiting for server to post generated password', + retry_after=start_retry_interval) + + ctx.instance.runtime_properties[ADMIN_PASSWORD_PROPERTY] = password + ctx.logger.info('Server has been set with a password') + + _set_network_and_ip_runtime_properties(server) + return + + server_task_state = getattr(server, OS_EXT_STS_TASK_STATE) + + if server.status == SERVER_STATUS_SHUTOFF and \ + server_task_state != SERVER_TASK_STATE_POWERING_ON: + ctx.logger.info('Server is in {0} status - starting server...'.format( + SERVER_STATUS_SHUTOFF)) + server.start() + server_task_state = SERVER_TASK_STATE_POWERING_ON + + if server.status == SERVER_STATUS_BUILD or \ + server_task_state == SERVER_TASK_STATE_POWERING_ON: + return ctx.operation.retry( + message='Waiting for server to be in {0} state but is in {1}:{2} ' + 'state. Retrying...'.format(SERVER_STATUS_ACTIVE, + server.status, + server_task_state), + retry_after=start_retry_interval) + + raise NonRecoverableError( + 'Unexpected server state {0}:{1}'.format(server.status, + server_task_state)) + + +@operation +@with_nova_client +def stop(nova_client, **kwargs): + """ + Stop server. + + Depends on OpenStack implementation, server.stop() might not be supported. + """ + if is_external_resource(ctx): + ctx.logger.info('Not stopping server since an external server is ' + 'being used') + return + + server = get_server_by_context(nova_client) + + if server.status != SERVER_STATUS_SHUTOFF: + nova_client.servers.stop(server) + else: + ctx.logger.info('Server is already stopped') + + +@operation +@with_nova_client +def delete(nova_client, **kwargs): + if not is_external_resource(ctx): + ctx.logger.info('deleting server') + server = get_server_by_context(nova_client) + nova_client.servers.delete(server) + _wait_for_server_to_be_deleted(nova_client, server) + else: + ctx.logger.info('not deleting server since an external server is ' + 'being used') + + delete_runtime_properties(ctx, RUNTIME_PROPERTIES_KEYS) + + +def _wait_for_server_to_be_deleted(nova_client, + server, + timeout=120, + sleep_interval=5): + timeout = time.time() + timeout + while time.time() < timeout: + try: + server = nova_client.servers.get(server) + ctx.logger.debug('Waiting for server "{}" to be deleted. current' + ' status: {}'.format(server.id, server.status)) + time.sleep(sleep_interval) + except nova_exceptions.NotFound: + return + # recoverable error + raise RuntimeError('Server {} has not been deleted. waited for {} seconds' + .format(server.id, timeout)) + + +def get_server_by_context(nova_client): + return nova_client.servers.get( + ctx.instance.runtime_properties[OPENSTACK_ID_PROPERTY]) + + +def _set_network_and_ip_runtime_properties(server): + + ips = {} + + if not server.networks: + raise NonRecoverableError( + 'The server was created but not attached to a network. ' + 'Cloudify requires that a server is connected to ' + 'at least one port.' + ) + + manager_network_ip = None + management_network_name = server.metadata.get( + 'cloudify_management_network_name') + + for network, network_ips in server.networks.items(): + if (management_network_name and + network == management_network_name) or not \ + manager_network_ip: + manager_network_ip = next(iter(network_ips or []), None) + ips[network] = network_ips + ctx.instance.runtime_properties[NETWORKS_PROPERTY] = ips + # The ip of this instance in the management network + ctx.instance.runtime_properties[IP_PROPERTY] = manager_network_ip + + +@operation +@with_nova_client +def connect_floatingip(nova_client, fixed_ip, **kwargs): + server_id = ctx.source.instance.runtime_properties[OPENSTACK_ID_PROPERTY] + floating_ip_id = ctx.target.instance.runtime_properties[ + OPENSTACK_ID_PROPERTY] + + if is_external_relationship_not_conditionally_created(ctx): + ctx.logger.info('Validating external floatingip and server ' + 'are associated') + if nova_client.floating_ips.get(floating_ip_id).instance_id ==\ + server_id: + return + raise NonRecoverableError( + 'Expected external resources server {0} and floating-ip {1} to be ' + 'connected'.format(server_id, floating_ip_id)) + + floating_ip_address = ctx.target.instance.runtime_properties[ + IP_ADDRESS_PROPERTY] + server = nova_client.servers.get(server_id) + server.add_floating_ip(floating_ip_address, fixed_ip or None) + + server = nova_client.servers.get(server_id) + all_server_ips = reduce(operator.add, server.networks.values()) + if floating_ip_address not in all_server_ips: + return ctx.operation.retry(message='Failed to assign floating ip {0}' + ' to machine {1}.' + .format(floating_ip_address, server_id)) + + +@operation +@with_nova_client +@with_neutron_client +def disconnect_floatingip(nova_client, neutron_client, **kwargs): + if is_external_relationship(ctx): + ctx.logger.info('Not disassociating floatingip and server since ' + 'external floatingip and server are being used') + return + + server_id = ctx.source.instance.runtime_properties[OPENSTACK_ID_PROPERTY] + ctx.logger.info("Remove floating ip {0}".format( + ctx.target.instance.runtime_properties[IP_ADDRESS_PROPERTY])) + server_floating_ip = get_server_floating_ip(neutron_client, server_id) + if server_floating_ip: + server = nova_client.servers.get(server_id) + server.remove_floating_ip(server_floating_ip['floating_ip_address']) + ctx.logger.info("Floating ip {0} detached from server" + .format(server_floating_ip['floating_ip_address'])) + + +@operation +@with_nova_client +def connect_security_group(nova_client, **kwargs): + server_id = ctx.source.instance.runtime_properties[OPENSTACK_ID_PROPERTY] + security_group_id = ctx.target.instance.runtime_properties[ + OPENSTACK_ID_PROPERTY] + security_group_name = ctx.target.instance.runtime_properties[ + OPENSTACK_NAME_PROPERTY] + + if is_external_relationship_not_conditionally_created(ctx): + ctx.logger.info('Validating external security group and server ' + 'are associated') + server = nova_client.servers.get(server_id) + if [sg for sg in server.list_security_group() if sg.id == + security_group_id]: + return + raise NonRecoverableError( + 'Expected external resources server {0} and security-group {1} to ' + 'be connected'.format(server_id, security_group_id)) + + server = nova_client.servers.get(server_id) + for security_group in server.list_security_group(): + # Since some security groups are already attached in + # create this will ensure that they are not attached twice. + if security_group_id != security_group.id and \ + security_group_name != security_group.name: + # to support nova security groups as well, + # we connect the security group by name + # (as connecting by id + # doesn't seem to work well for nova SGs) + server.add_security_group(security_group_name) + + _validate_security_group_and_server_connection_status(nova_client, + server_id, + security_group_id, + security_group_name, + is_connected=True) + + +@operation +@with_nova_client +def disconnect_security_group(nova_client, **kwargs): + if is_external_relationship(ctx): + ctx.logger.info('Not disconnecting security group and server since ' + 'external security group and server are being used') + return + + server_id = ctx.source.instance.runtime_properties[OPENSTACK_ID_PROPERTY] + security_group_id = ctx.target.instance.runtime_properties[ + OPENSTACK_ID_PROPERTY] + security_group_name = ctx.target.instance.runtime_properties[ + OPENSTACK_NAME_PROPERTY] + server = nova_client.servers.get(server_id) + # to support nova security groups as well, we disconnect the security group + # by name (as disconnecting by id doesn't seem to work well for nova SGs) + server.remove_security_group(security_group_name) + + _validate_security_group_and_server_connection_status(nova_client, + server_id, + security_group_id, + security_group_name, + is_connected=False) + + +@operation +@with_nova_client +@with_cinder_client +def attach_volume(nova_client, cinder_client, status_attempts, + status_timeout, **kwargs): + server_id = ctx.target.instance.runtime_properties[OPENSTACK_ID_PROPERTY] + volume_id = ctx.source.instance.runtime_properties[OPENSTACK_ID_PROPERTY] + + if is_external_relationship_not_conditionally_created(ctx): + ctx.logger.info('Validating external volume and server ' + 'are connected') + attachment = volume.get_attachment(cinder_client=cinder_client, + volume_id=volume_id, + server_id=server_id) + if attachment: + return + else: + raise NonRecoverableError( + 'Expected external resources server {0} and volume {1} to be ' + 'connected'.format(server_id, volume_id)) + + # Note: The 'device_name' property should actually be a property of the + # relationship between a server and a volume; It'll move to that + # relationship type once relationship properties are better supported. + device = ctx.source.node.properties[volume.DEVICE_NAME_PROPERTY] + nova_client.volumes.create_server_volume( + server_id, + volume_id, + device if device != 'auto' else None) + try: + vol, wait_succeeded = volume.wait_until_status( + cinder_client=cinder_client, + volume_id=volume_id, + status=volume.VOLUME_STATUS_IN_USE, + num_tries=status_attempts, + timeout=status_timeout + ) + if not wait_succeeded: + raise RecoverableError( + 'Waiting for volume status {0} failed - detaching volume and ' + 'retrying..'.format(volume.VOLUME_STATUS_IN_USE)) + if device == 'auto': + # The device name was assigned automatically so we + # query the actual device name + attachment = volume.get_attachment( + cinder_client=cinder_client, + volume_id=volume_id, + server_id=server_id + ) + device_name = attachment['device'] + ctx.logger.info('Detected device name for attachment of volume ' + '{0} to server {1}: {2}' + .format(volume_id, server_id, device_name)) + ctx.source.instance.runtime_properties[ + volume.DEVICE_NAME_PROPERTY] = device_name + except Exception, e: + if not isinstance(e, NonRecoverableError): + _prepare_attach_volume_to_be_repeated( + nova_client, cinder_client, server_id, volume_id, + status_attempts, status_timeout) + raise + + +def _prepare_attach_volume_to_be_repeated( + nova_client, cinder_client, server_id, volume_id, + status_attempts, status_timeout): + + ctx.logger.info('Cleaning after a failed attach_volume() call') + try: + _detach_volume(nova_client, cinder_client, server_id, volume_id, + status_attempts, status_timeout) + except Exception, e: + ctx.logger.error('Cleaning after a failed attach_volume() call failed ' + 'raising a \'{0}\' exception.'.format(e)) + raise NonRecoverableError(e) + + +def _detach_volume(nova_client, cinder_client, server_id, volume_id, + status_attempts, status_timeout): + attachment = volume.get_attachment(cinder_client=cinder_client, + volume_id=volume_id, + server_id=server_id) + if attachment: + nova_client.volumes.delete_server_volume(server_id, attachment['id']) + volume.wait_until_status(cinder_client=cinder_client, + volume_id=volume_id, + status=volume.VOLUME_STATUS_AVAILABLE, + num_tries=status_attempts, + timeout=status_timeout) + + +@operation +@with_nova_client +@with_cinder_client +def detach_volume(nova_client, cinder_client, status_attempts, + status_timeout, **kwargs): + if is_external_relationship(ctx): + ctx.logger.info('Not detaching volume from server since ' + 'external volume and server are being used') + return + + server_id = ctx.target.instance.runtime_properties[OPENSTACK_ID_PROPERTY] + volume_id = ctx.source.instance.runtime_properties[OPENSTACK_ID_PROPERTY] + + _detach_volume(nova_client, cinder_client, server_id, volume_id, + status_attempts, status_timeout) + + +def _fail_on_missing_required_parameters(obj, required_parameters, hint_where): + for k in required_parameters: + if k not in obj: + raise NonRecoverableError( + "Required parameter '{0}' is missing (under host's " + "properties.{1}). Required parameters are: {2}" + .format(k, hint_where, required_parameters)) + + +def _validate_external_server_keypair(nova_client): + keypair_id = get_openstack_id_of_single_connected_node_by_openstack_type( + ctx, KEYPAIR_OPENSTACK_TYPE, True) + if not keypair_id: + return + + keypair_instance_id = \ + [node_instance_id for node_instance_id, runtime_props in + ctx.capabilities.get_all().iteritems() if + runtime_props.get(OPENSTACK_ID_PROPERTY) == keypair_id][0] + keypair_node_properties = _get_properties_by_node_instance_id( + keypair_instance_id) + if not is_external_resource_by_properties(keypair_node_properties): + raise NonRecoverableError( + "Can't connect a new keypair node to a server node " + "with '{0}'=True".format(USE_EXTERNAL_RESOURCE_PROPERTY)) + + server = get_server_by_context(nova_client) + if keypair_id == _get_keypair_name_by_id(nova_client, server.key_name): + return + raise NonRecoverableError( + "Expected external resources server {0} and keypair {1} to be " + "connected".format(server.id, keypair_id)) + + +def _get_keypair_name_by_id(nova_client, key_name): + keypair = nova_client.cosmo_get_named(KEYPAIR_OPENSTACK_TYPE, key_name) + return keypair.id + + +def _validate_external_server_nics(neutron_client, network_ids, port_ids): + # validate no new nics are being assigned to an existing server (which + # isn't possible on Openstack) + new_nic_nodes = \ + [node_instance_id for node_instance_id, runtime_props in + ctx.capabilities.get_all().iteritems() if runtime_props.get( + OPENSTACK_TYPE_PROPERTY) in (PORT_OPENSTACK_TYPE, + NETWORK_OPENSTACK_TYPE) and + not is_external_resource_by_properties( + _get_properties_by_node_instance_id(node_instance_id))] + if new_nic_nodes: + raise NonRecoverableError( + "Can't connect new port and/or network nodes to a server node " + "with '{0}'=True".format(USE_EXTERNAL_RESOURCE_PROPERTY)) + + # validate all expected connected networks and ports are indeed already + # connected to the server. note that additional networks (e.g. the + # management network) may be connected as well with no error raised + if not network_ids and not port_ids: + return + + server_id = ctx.instance.runtime_properties[OPENSTACK_ID_PROPERTY] + connected_ports = neutron_client.list_ports(device_id=server_id)['ports'] + + # not counting networks connected by a connected port since allegedly + # the connection should be on a separate port + connected_ports_networks = {port['network_id'] for port in + connected_ports if port['id'] not in port_ids} + connected_ports_ids = {port['id'] for port in + connected_ports} + disconnected_networks = [network_id for network_id in network_ids if + network_id not in connected_ports_networks] + disconnected_ports = [port_id for port_id in port_ids if port_id not + in connected_ports_ids] + if disconnected_networks or disconnected_ports: + raise NonRecoverableError( + 'Expected external resources to be connected to external server {' + '0}: Networks - {1}; Ports - {2}'.format(server_id, + disconnected_networks, + disconnected_ports)) + + +def _get_properties_by_node_instance_id(node_instance_id): + client = get_rest_client() + node_instance = client.node_instances.get(node_instance_id) + node = client.nodes.get(ctx.deployment.id, node_instance.node_id) + return node.properties + + +@operation +@with_nova_client +def creation_validation(nova_client, args, **kwargs): + + def validate_server_property_value_exists(server_props, property_name): + ctx.logger.debug( + 'checking whether {0} exists...'.format(property_name)) + + serv_props_copy = server_props.copy() + try: + handle_image_from_relationship(serv_props_copy, 'image', ctx) + _handle_image_or_flavor(serv_props_copy, nova_client, + property_name) + except (NonRecoverableError, nova_exceptions.NotFound) as e: + # temporary error - once image/flavor_name get removed, these + # errors won't be relevant anymore + err = str(e) + ctx.logger.error('VALIDATION ERROR: ' + err) + raise NonRecoverableError(err) + + prop_value_id = str(serv_props_copy[property_name]) + prop_values = list(nova_client.cosmo_list(property_name)) + for f in prop_values: + if prop_value_id == f.id: + ctx.logger.debug('OK: {0} exists'.format(property_name)) + return + err = '{0} {1} does not exist'.format(property_name, prop_value_id) + ctx.logger.error('VALIDATION ERROR: ' + err) + if prop_values: + ctx.logger.info('list of available {0}s:'.format(property_name)) + for f in prop_values: + ctx.logger.info(' {0:>10} - {1}'.format(f.id, f.name)) + else: + ctx.logger.info('there are no available {0}s'.format( + property_name)) + raise NonRecoverableError(err) + + validate_resource(ctx, nova_client, SERVER_OPENSTACK_TYPE) + + server_props = dict(ctx.node.properties['server'], **args) + validate_server_property_value_exists(server_props, 'flavor') + + +def _get_private_key(private_key_path): + pk_node_by_rel = \ + get_single_connected_node_by_openstack_type( + ctx, KEYPAIR_OPENSTACK_TYPE, True) + + if private_key_path: + if pk_node_by_rel: + raise NonRecoverableError("server can't both have a " + '"private_key_path" input and be ' + 'connected to a keypair via a ' + 'relationship at the same time') + key_path = private_key_path + else: + if pk_node_by_rel and pk_node_by_rel.properties['private_key_path']: + key_path = pk_node_by_rel.properties['private_key_path'] + else: + key_path = ctx.bootstrap_context.cloudify_agent.agent_key_path + + if key_path: + key_path = os.path.expanduser(key_path) + if os.path.isfile(key_path): + return key_path + + err_message = 'Cannot find private key file' + if key_path: + err_message += '; expected file path was {0}'.format(key_path) + raise NonRecoverableError(err_message) + + +def _validate_security_group_and_server_connection_status( + nova_client, server_id, sg_id, sg_name, is_connected): + + # verifying the security group got connected or disconnected + # successfully - this is due to Openstack concurrency issues that may + # take place when attempting to connect/disconnect multiple SGs to the + # same server at the same time + server = nova_client.servers.get(server_id) + + if is_connected ^ any(sg for sg in server.list_security_group() if + sg.id == sg_id): + raise RecoverableError( + message='Security group {0} did not get {2} server {1} ' + 'properly' + .format( + sg_name, + server.name, + 'connected to' if is_connected else 'disconnected from')) + + +def _handle_image_or_flavor(server, nova_client, prop_name): + if prop_name not in server and '{0}_name'.format(prop_name) not in server: + # setting image or flavor - looking it up by name; if not found, then + # the value is assumed to be the id + server[prop_name] = ctx.node.properties[prop_name] + + # temporary error message: once the 'image' and 'flavor' properties + # become mandatory, this will become less relevant + if not server[prop_name]: + raise NonRecoverableError( + 'must set {0} by either setting a "{0}" property or by setting' + ' a "{0}" or "{0}_name" (deprecated) field under the "server" ' + 'property'.format(prop_name)) + + image_or_flavor = \ + nova_client.cosmo_get_if_exists(prop_name, name=server[prop_name]) + if image_or_flavor: + server[prop_name] = image_or_flavor.id + else: # Deprecated sugar + if '{0}_name'.format(prop_name) in server: + prop_name_plural = nova_client.cosmo_plural(prop_name) + server[prop_name] = \ + getattr(nova_client, prop_name_plural).find( + name=server['{0}_name'.format(prop_name)]).id + del server['{0}_name'.format(prop_name)] diff --git a/aria/multivim-plugin/nova_plugin/tests/__init__.py b/aria/multivim-plugin/nova_plugin/tests/__init__.py new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/aria/multivim-plugin/nova_plugin/tests/__init__.py diff --git a/aria/multivim-plugin/nova_plugin/tests/resources/test-keypair-validation-blueprint.yaml b/aria/multivim-plugin/nova_plugin/tests/resources/test-keypair-validation-blueprint.yaml new file mode 100644 index 0000000000..22b7fb5362 --- /dev/null +++ b/aria/multivim-plugin/nova_plugin/tests/resources/test-keypair-validation-blueprint.yaml @@ -0,0 +1,23 @@ +tosca_definitions_version: cloudify_dsl_1_3 + +imports: + - https://raw.githubusercontent.com/cloudify-cosmo/cloudify-manager/4.1/resources/rest-service/cloudify/types/types.yaml + - plugin.yaml + +inputs: + private_key: {} + is_keypair_external: {} + + +node_templates: + + keypair: + type: cloudify.openstack.nodes.KeyPair + properties: + private_key_path: { get_input: private_key } + use_external_resource: { get_input: is_keypair_external } + openstack_config: + username: aaa + password: aaa + tenant_name: aaa + auth_url: aaa diff --git a/aria/multivim-plugin/nova_plugin/tests/resources/test-server-create-secgroup.yaml b/aria/multivim-plugin/nova_plugin/tests/resources/test-server-create-secgroup.yaml new file mode 100644 index 0000000000..70b75f6bf5 --- /dev/null +++ b/aria/multivim-plugin/nova_plugin/tests/resources/test-server-create-secgroup.yaml @@ -0,0 +1,31 @@ +tosca_definitions_version: cloudify_dsl_1_3 + +imports: + - https://raw.githubusercontent.com/cloudify-cosmo/cloudify-manager/4.1/resources/rest-service/cloudify/types/types.yaml + - plugin.yaml + +inputs: + use_password: + type: boolean + default: false + +node_templates: + + security_group: + type: cloudify.openstack.nodes.SecurityGroup + + server: + type: cloudify.openstack.nodes.Server + properties: + install_agent: false + use_password: { get_input: use_password } + openstack_config: + username: aaa + password: aaa + tenant_name: aaa + auth_url: aaa + server: + key_name: 'aa' + relationships: + - type: cloudify.openstack.server_connected_to_security_group + target: security_group diff --git a/aria/multivim-plugin/nova_plugin/tests/resources/test-start-operation-retry-blueprint.yaml b/aria/multivim-plugin/nova_plugin/tests/resources/test-start-operation-retry-blueprint.yaml new file mode 100644 index 0000000000..275806cf5a --- /dev/null +++ b/aria/multivim-plugin/nova_plugin/tests/resources/test-start-operation-retry-blueprint.yaml @@ -0,0 +1,31 @@ +tosca_definitions_version: cloudify_dsl_1_3 + +imports: + - https://raw.githubusercontent.com/cloudify-cosmo/cloudify-manager/4.1/resources/rest-service/cloudify/types/types.yaml + - plugin.yaml + +inputs: + use_password: + type: boolean + default: false + +node_templates: + server: + type: cloudify.openstack.nodes.Server + properties: + install_agent: false + use_password: { get_input: use_password } + server: + key_name: key + scheduler_hints: + group: affinity-group-id + openstack_config: + username: aaa + password: aaa + tenant_name: aaa + auth_url: aaa + interfaces: + cloudify.interfaces.lifecycle: + start: + inputs: + start_retry_interval: 1 diff --git a/aria/multivim-plugin/nova_plugin/tests/test_relationships.py b/aria/multivim-plugin/nova_plugin/tests/test_relationships.py new file mode 100644 index 0000000000..2814057fb7 --- /dev/null +++ b/aria/multivim-plugin/nova_plugin/tests/test_relationships.py @@ -0,0 +1,228 @@ +######### +# Copyright (c) 2016 GigaSpaces Technologies Ltd. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# * See the License for the specific language governing permissions and +# * limitations under the License. + +"""Test the functions related to retrieving relationship information. + +Functions under test are mostly inside openstack_plugin_common: +get_relationships_by_openstack_type +get_connected_nodes_by_openstack_type +get_openstack_ids_of_connected_nodes_by_openstack_type +get_single_connected_node_by_openstack_type +""" + +import uuid +from unittest import TestCase + +from neutron_plugin.network import NETWORK_OPENSTACK_TYPE + +from cloudify.exceptions import NonRecoverableError + +from cloudify.mocks import ( + MockCloudifyContext, + MockNodeContext, + MockNodeInstanceContext, + MockRelationshipContext, + MockRelationshipSubjectContext, +) +from openstack_plugin_common import ( + OPENSTACK_ID_PROPERTY, + OPENSTACK_TYPE_PROPERTY, + get_openstack_id_of_single_connected_node_by_openstack_type, + get_openstack_ids_of_connected_nodes_by_openstack_type, + get_relationships_by_openstack_type, + get_single_connected_node_by_openstack_type, +) + + +class RelationshipsTestBase(TestCase): + def _make_vm_ctx_with_relationships(self, rel_specs, properties=None): + """Prepare a mock CloudifyContext from the given relationship spec. + + rel_specs is an ordered collection of relationship specs - dicts + with the keys "node" and "instance" used to construct the + MockNodeContext and the MockNodeInstanceContext, and optionally a + "type" key. + Examples: [ + {}, + {"node": {"id": 5}}, + { + "type": "some_type", + "instance": { + "id": 3, + "runtime_properties":{} + } + } + ] + """ + if properties is None: + properties = {} + relationships = [] + for rel_spec in rel_specs: + node = rel_spec.get('node', {}) + node_id = node.pop('id', uuid.uuid4().hex) + + instance = rel_spec.get('instance', {}) + instance_id = instance.pop('id', '{0}_{1}'.format( + node_id, uuid.uuid4().hex)) + if 'properties' not in node: + node['properties'] = {} + node_ctx = MockNodeContext(id=node_id, **node) + instance_ctx = MockNodeInstanceContext(id=instance_id, **instance) + + rel_subject_ctx = MockRelationshipSubjectContext( + node=node_ctx, instance=instance_ctx) + rel_type = rel_spec.get('type') + rel_ctx = MockRelationshipContext(target=rel_subject_ctx, + type=rel_type) + relationships.append(rel_ctx) + return MockCloudifyContext(node_id='vm', properties=properties, + relationships=relationships) + + +class TestGettingRelatedResources(RelationshipsTestBase): + + def test_get_relationships_finds_all_by_type(self): + """get_relationships_by_openstack_type returns all rels that match.""" + rel_specs = [{ + 'instance': { + 'id': instance_id, + 'runtime_properties': { + OPENSTACK_TYPE_PROPERTY: NETWORK_OPENSTACK_TYPE + } + } + } for instance_id in range(3)] + + rel_specs.append({ + 'instance': { + 'runtime_properties': { + OPENSTACK_TYPE_PROPERTY: 'something else' + } + } + }) + + ctx = self._make_vm_ctx_with_relationships(rel_specs) + filtered = get_relationships_by_openstack_type(ctx, + NETWORK_OPENSTACK_TYPE) + self.assertEqual(3, len(filtered)) + + def test_get_ids_of_nodes_by_type(self): + + rel_spec = { + 'instance': { + 'runtime_properties': { + OPENSTACK_TYPE_PROPERTY: NETWORK_OPENSTACK_TYPE, + OPENSTACK_ID_PROPERTY: 'the node id' + } + } + } + ctx = self._make_vm_ctx_with_relationships([rel_spec]) + ids = get_openstack_ids_of_connected_nodes_by_openstack_type( + ctx, NETWORK_OPENSTACK_TYPE) + self.assertEqual(['the node id'], ids) + + +class TestGetSingleByID(RelationshipsTestBase): + def _make_instances(self, ids): + """Mock a context with relationships to instances with given ids.""" + rel_specs = [{ + 'node': { + 'id': node_id + }, + 'instance': { + 'runtime_properties': { + OPENSTACK_TYPE_PROPERTY: NETWORK_OPENSTACK_TYPE, + OPENSTACK_ID_PROPERTY: node_id + } + } + } for node_id in ids] + return self._make_vm_ctx_with_relationships(rel_specs) + + def test_get_single_id(self): + ctx = self._make_instances(['the node id']) + found_id = get_openstack_id_of_single_connected_node_by_openstack_type( + ctx, NETWORK_OPENSTACK_TYPE) + self.assertEqual('the node id', found_id) + + def test_get_single_id_two_found(self): + ctx = self._make_instances([0, 1]) + self.assertRaises( + NonRecoverableError, + get_openstack_id_of_single_connected_node_by_openstack_type, ctx, + NETWORK_OPENSTACK_TYPE) + + def test_get_single_id_two_found_if_exists_true(self): + ctx = self._make_instances([0, 1]) + + try: + get_openstack_id_of_single_connected_node_by_openstack_type( + ctx, NETWORK_OPENSTACK_TYPE, if_exists=True) + except NonRecoverableError as e: + self.assertIn(NETWORK_OPENSTACK_TYPE, e.message) + else: + self.fail() + + def test_get_single_id_if_exists_none_found(self): + ctx = self._make_instances([]) + found = get_openstack_id_of_single_connected_node_by_openstack_type( + ctx, NETWORK_OPENSTACK_TYPE, if_exists=True) + self.assertIsNone(found) + + def test_get_single_id_none_found(self): + rel_spec = [] + ctx = self._make_vm_ctx_with_relationships(rel_spec) + self.assertRaises( + NonRecoverableError, + get_openstack_id_of_single_connected_node_by_openstack_type, + ctx, + NETWORK_OPENSTACK_TYPE) + + def test_get_single_node(self): + ctx = self._make_instances(['the node id']) + found_node = get_single_connected_node_by_openstack_type( + ctx, NETWORK_OPENSTACK_TYPE) + self.assertEqual('the node id', found_node.id) + + def test_get_single_node_two_found(self): + ctx = self._make_instances([0, 1]) + self.assertRaises( + NonRecoverableError, + get_single_connected_node_by_openstack_type, + ctx, NETWORK_OPENSTACK_TYPE) + + def test_get_single_node_two_found_if_exists(self): + ctx = self._make_instances([0, 1]) + + self.assertRaises( + NonRecoverableError, + get_single_connected_node_by_openstack_type, + ctx, + NETWORK_OPENSTACK_TYPE, + if_exists=True) + + def test_get_single_node_if_exists_none_found(self): + ctx = self._make_instances([]) + + found = get_single_connected_node_by_openstack_type( + ctx, NETWORK_OPENSTACK_TYPE, if_exists=True) + self.assertIsNone(found) + + def test_get_single_node_none_found(self): + ctx = self._make_instances([]) + + self.assertRaises( + NonRecoverableError, + get_single_connected_node_by_openstack_type, + ctx, + NETWORK_OPENSTACK_TYPE) diff --git a/aria/multivim-plugin/nova_plugin/tests/test_server.py b/aria/multivim-plugin/nova_plugin/tests/test_server.py new file mode 100644 index 0000000000..a50930555c --- /dev/null +++ b/aria/multivim-plugin/nova_plugin/tests/test_server.py @@ -0,0 +1,551 @@ +######### +# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# * See the License for the specific language governing permissions and +# * limitations under the License. + +from os import path +import tempfile + +import unittest +import mock + +import nova_plugin +from cloudify.test_utils import workflow_test + +from openstack_plugin_common import NeutronClientWithSugar, \ + OPENSTACK_TYPE_PROPERTY, OPENSTACK_ID_PROPERTY +from neutron_plugin.network import NETWORK_OPENSTACK_TYPE +from neutron_plugin.port import PORT_OPENSTACK_TYPE +from nova_plugin.tests.test_relationships import RelationshipsTestBase +from nova_plugin.server import _prepare_server_nics +from cinder_plugin.volume import VOLUME_OPENSTACK_TYPE +from cloudify.exceptions import NonRecoverableError +from cloudify.state import current_ctx + +from cloudify.utils import setup_logger + +from cloudify.mocks import ( + MockNodeContext, + MockCloudifyContext, + MockNodeInstanceContext, + MockRelationshipContext, + MockRelationshipSubjectContext +) + + +class TestServer(unittest.TestCase): + + blueprint_path = path.join('resources', + 'test-start-operation-retry-blueprint.yaml') + + @mock.patch('nova_plugin.server.create') + @mock.patch('nova_plugin.server._set_network_and_ip_runtime_properties') + @workflow_test(blueprint_path, copy_plugin_yaml=True) + def test_nova_server_lifecycle_start(self, cfy_local, *_): + + test_vars = { + 'counter': 0, + 'server': mock.MagicMock() + } + + def mock_get_server_by_context(*_): + s = test_vars['server'] + if test_vars['counter'] == 0: + s.status = nova_plugin.server.SERVER_STATUS_BUILD + else: + s.status = nova_plugin.server.SERVER_STATUS_ACTIVE + test_vars['counter'] += 1 + return s + + with mock.patch('nova_plugin.server.get_server_by_context', + new=mock_get_server_by_context): + cfy_local.execute('install', task_retries=3) + + self.assertEqual(2, test_vars['counter']) + self.assertEqual(0, test_vars['server'].start.call_count) + + @workflow_test(blueprint_path, copy_plugin_yaml=True) + @mock.patch('nova_plugin.server.create') + @mock.patch('nova_plugin.server._set_network_and_ip_runtime_properties') + def test_nova_server_lifecycle_start_after_stop(self, cfy_local, *_): + + test_vars = { + 'counter': 0, + 'server': mock.MagicMock() + } + + def mock_get_server_by_context(_): + s = test_vars['server'] + if test_vars['counter'] == 0: + s.status = nova_plugin.server.SERVER_STATUS_SHUTOFF + elif test_vars['counter'] == 1: + setattr(s, + nova_plugin.server.OS_EXT_STS_TASK_STATE, + nova_plugin.server.SERVER_TASK_STATE_POWERING_ON) + else: + s.status = nova_plugin.server.SERVER_STATUS_ACTIVE + test_vars['counter'] += 1 + test_vars['server'] = s + return s + + with mock.patch('nova_plugin.server.get_server_by_context', + new=mock_get_server_by_context): + cfy_local.execute('install', task_retries=3) + + self.assertEqual(1, test_vars['server'].start.call_count) + self.assertEqual(3, test_vars['counter']) + + @workflow_test(blueprint_path, copy_plugin_yaml=True) + @mock.patch('nova_plugin.server.create') + @mock.patch('nova_plugin.server._set_network_and_ip_runtime_properties') + def test_nova_server_lifecycle_start_unknown_status(self, cfy_local, *_): + test_vars = { + 'counter': 0, + 'server': mock.MagicMock() + } + + def mock_get_server_by_context(_): + s = test_vars['server'] + if test_vars['counter'] == 0: + s.status = '### unknown-status ###' + test_vars['counter'] += 1 + test_vars['server'] = s + return s + + with mock.patch('nova_plugin.server.get_server_by_context', + new=mock_get_server_by_context): + self.assertRaisesRegexp(RuntimeError, + 'Unexpected server state', + cfy_local.execute, + 'install') + + self.assertEqual(0, test_vars['server'].start.call_count) + self.assertEqual(1, test_vars['counter']) + + @workflow_test(blueprint_path, copy_plugin_yaml=True) + @mock.patch('nova_plugin.server.start') + @mock.patch('nova_plugin.server._handle_image_or_flavor') + @mock.patch('nova_plugin.server._fail_on_missing_required_parameters') + @mock.patch('openstack_plugin_common.nova_client') + def test_nova_server_creation_param_integrity( + self, cfy_local, mock_nova, *args): + cfy_local.execute('install', task_retries=0) + calls = mock_nova.Client.return_value.servers.method_calls + self.assertEqual(1, len(calls)) + kws = calls[0][2] + self.assertIn('scheduler_hints', kws) + self.assertEqual(kws['scheduler_hints'], + {'group': 'affinity-group-id'}, + 'expecting \'scheduler_hints\' value to exist') + + @workflow_test(blueprint_path, copy_plugin_yaml=True, + inputs={'use_password': True}) + @mock.patch('nova_plugin.server.create') + @mock.patch('nova_plugin.server._set_network_and_ip_runtime_properties') + @mock.patch( + 'nova_plugin.server.get_single_connected_node_by_openstack_type', + autospec=True, return_value=None) + def test_nova_server_with_use_password(self, cfy_local, *_): + + test_vars = { + 'counter': 0, + 'server': mock.MagicMock() + } + + tmp_path = tempfile.NamedTemporaryFile(prefix='key_name') + key_path = tmp_path.name + + def mock_get_server_by_context(_): + s = test_vars['server'] + if test_vars['counter'] == 0: + s.status = nova_plugin.server.SERVER_STATUS_BUILD + else: + s.status = nova_plugin.server.SERVER_STATUS_ACTIVE + test_vars['counter'] += 1 + + def check_agent_key_path(private_key): + self.assertEqual(private_key, key_path) + return private_key + + s.get_password = check_agent_key_path + return s + + with mock.patch('nova_plugin.server.get_server_by_context', + mock_get_server_by_context): + with mock.patch( + 'cloudify.context.BootstrapContext.' + 'CloudifyAgent.agent_key_path', + new_callable=mock.PropertyMock, return_value=key_path): + cfy_local.execute('install', task_retries=5) + + +class TestMergeNICs(unittest.TestCase): + def test_merge_prepends_management_network(self): + """When the mgmt network isnt in a relationship, its the 1st nic.""" + mgmt_network_id = 'management network' + nics = [{'net-id': 'other network'}] + + merged = nova_plugin.server._merge_nics(mgmt_network_id, nics) + + self.assertEqual(len(merged), 2) + self.assertEqual(merged[0]['net-id'], 'management network') + + def test_management_network_in_relationships(self): + """When the mgmt network was in a relationship, it's not prepended.""" + mgmt_network_id = 'management network' + nics = [{'net-id': 'other network'}, {'net-id': 'management network'}] + + merged = nova_plugin.server._merge_nics(mgmt_network_id, nics) + + self.assertEqual(nics, merged) + + +class TestNormalizeNICs(unittest.TestCase): + def test_normalize_port_priority(self): + """Whe there's both net-id and port-id, port-id is used.""" + nics = [{'net-id': '1'}, {'port-id': '2'}, {'net-id': 3, 'port-id': 4}] + normalized = nova_plugin.server._normalize_nics(nics) + expected = [{'net-id': '1'}, {'port-id': '2'}, {'port-id': 4}] + self.assertEqual(expected, normalized) + + +class MockNeutronClient(NeutronClientWithSugar): + """A fake neutron client with hard-coded test data.""" + + @mock.patch('openstack_plugin_common.OpenStackClient.__init__', + new=mock.Mock()) + def __init__(self): + super(MockNeutronClient, self).__init__() + + @staticmethod + def _search_filter(objs, search_params): + """Mock neutron's filtering by attributes in list_* methods. + + list_* methods (list_networks, list_ports) + """ + def _matches(obj, search_params): + return all(obj[k] == v for k, v in search_params.items()) + return [obj for obj in objs if _matches(obj, search_params)] + + def list_networks(self, **search_params): + networks = [ + {'name': 'network1', 'id': '1'}, + {'name': 'network2', 'id': '2'}, + {'name': 'network3', 'id': '3'}, + {'name': 'network4', 'id': '4'}, + {'name': 'network5', 'id': '5'}, + {'name': 'network6', 'id': '6'}, + {'name': 'other', 'id': 'other'} + ] + return {'networks': self._search_filter(networks, search_params)} + + def list_ports(self, **search_params): + ports = [ + {'name': 'port1', 'id': '1', 'network_id': '1'}, + {'name': 'port2', 'id': '2', 'network_id': '1'}, + {'name': 'port3', 'id': '3', 'network_id': '2'}, + {'name': 'port4', 'id': '4', 'network_id': '2'}, + ] + return {'ports': self._search_filter(ports, search_params)} + + def show_port(self, port_id): + ports = self.list_ports(id=port_id) + return {'port': ports['ports'][0]} + + +class NICTestBase(RelationshipsTestBase): + """Base test class for the NICs tests. + + It comes with helper methods to create a mock cloudify context, with + the specified relationships. + """ + mock_neutron = MockNeutronClient() + + def _relationship_spec(self, obj, objtype): + return {'node': {'properties': obj}, + 'instance': { + 'runtime_properties': {OPENSTACK_TYPE_PROPERTY: objtype, + OPENSTACK_ID_PROPERTY: obj['id']}}} + + def _make_vm_ctx_with_ports(self, management_network_name, ports): + port_specs = [self._relationship_spec(obj, PORT_OPENSTACK_TYPE) + for obj in ports] + vm_properties = {'management_network_name': management_network_name} + return self._make_vm_ctx_with_relationships(port_specs, + vm_properties) + + def _make_vm_ctx_with_networks(self, management_network_name, networks): + network_specs = [self._relationship_spec(obj, NETWORK_OPENSTACK_TYPE) + for obj in networks] + vm_properties = {'management_network_name': management_network_name} + return self._make_vm_ctx_with_relationships(network_specs, + vm_properties) + + +class TestServerNICs(NICTestBase): + """Test preparing the NICs list from server<->network relationships. + + Each test creates a cloudify context that represents a openstack VM + with relationships to networks. Then, examine the NICs list produced from + the relationships. + """ + def test_nova_server_creation_nics_ordering(self): + """NIC list keeps the order of the relationships. + + The nics= list passed to nova.server.create should be ordered + depending on the relationships to the networks (as defined in the + blueprint). + """ + ctx = self._make_vm_ctx_with_networks( + management_network_name='network1', + networks=[ + {'id': '1'}, + {'id': '2'}, + {'id': '3'}, + {'id': '4'}, + {'id': '5'}, + {'id': '6'}, + ]) + server = {'meta': {}} + + _prepare_server_nics( + self.mock_neutron, ctx, server) + + self.assertEqual( + ['1', '2', '3', '4', '5', '6'], + [n['net-id'] for n in server['nics']]) + + def test_server_creation_prepends_mgmt_network(self): + """If the management network isn't in a relation, it's the first NIC. + + Creating the server examines the relationships, and if it doesn't find + a relationship to the management network, it adds the management + network to the NICs list, as the first element. + """ + ctx = self._make_vm_ctx_with_networks( + management_network_name='other', + networks=[ + {'id': '1'}, + {'id': '2'}, + {'id': '3'}, + {'id': '4'}, + {'id': '5'}, + {'id': '6'}, + ]) + server = {'meta': {}} + + _prepare_server_nics( + self.mock_neutron, ctx, server) + + first_nic = server['nics'][0] + self.assertEqual('other', first_nic['net-id']) + self.assertEqual(7, len(server['nics'])) + + def test_server_creation_uses_relation_mgmt_nic(self): + """If the management network is in a relation, it isn't prepended. + + If the server has a relationship to the management network, + a new NIC isn't prepended to the list. + """ + ctx = self._make_vm_ctx_with_networks( + management_network_name='network1', + networks=[ + {'id': '1'}, + {'id': '2'}, + {'id': '3'}, + {'id': '4'}, + {'id': '5'}, + {'id': '6'}, + ]) + server = {'meta': {}} + + _prepare_server_nics( + self.mock_neutron, ctx, server) + self.assertEqual(6, len(server['nics'])) + + +class TestServerPortNICs(NICTestBase): + """Test preparing the NICs list from server<->port relationships. + + Create a cloudify ctx representing a vm with relationships to + openstack ports. Then examine the resulting NICs list: check that it + contains the networks that the ports were connected to, and that each + connection uses the port that was provided. + """ + + def test_network_with_port(self): + """Port on the management network is used to connect to it. + + The NICs list entry for the management network contains the + port-id of the port from the relationship, but doesn't contain net-id. + """ + ports = [{'id': '1'}] + ctx = self._make_vm_ctx_with_ports('network1', ports) + server = {'meta': {}} + + _prepare_server_nics( + self.mock_neutron, ctx, server) + + self.assertEqual([{'port-id': '1'}], server['nics']) + + def test_port_not_to_mgmt_network(self): + """A NICs list entry is added with the network and the port. + + A relationship to a port must not only add a NIC, but the NIC must + also make sure to use that port. + """ + ports = [{'id': '1'}] + ctx = self._make_vm_ctx_with_ports('other', ports) + server = {'meta': {}} + + _prepare_server_nics( + self.mock_neutron, ctx, server) + expected = [ + {'net-id': 'other'}, + {'port-id': '1'} + ] + self.assertEqual(expected, server['nics']) + + +class TestBootFromVolume(unittest.TestCase): + + @mock.patch('nova_plugin.server._get_boot_volume_relationships', + autospec=True) + def test_handle_boot_volume(self, mock_get_rels): + mock_get_rels.return_value.runtime_properties = { + 'external_id': 'test-id', + 'availability_zone': 'test-az', + } + server = {} + ctx = mock.MagicMock() + nova_plugin.server._handle_boot_volume(server, ctx) + self.assertEqual({'vda': 'test-id:::0'}, + server['block_device_mapping']) + self.assertEqual('test-az', + server['availability_zone']) + + @mock.patch('nova_plugin.server._get_boot_volume_relationships', + autospec=True, return_value=[]) + def test_handle_boot_volume_no_boot_volume(self, *_): + server = {} + ctx = mock.MagicMock() + nova_plugin.server._handle_boot_volume(server, ctx) + self.assertNotIn('block_device_mapping', server) + + +class TestImageFromRelationships(unittest.TestCase): + + @mock.patch('glance_plugin.image.' + 'get_openstack_ids_of_connected_nodes_by_openstack_type', + autospec=True, return_value=['test-id']) + def test_handle_boot_image(self, *_): + server = {} + ctx = mock.MagicMock() + nova_plugin.server.handle_image_from_relationship(server, 'image', ctx) + self.assertEqual({'image': 'test-id'}, server) + + @mock.patch('glance_plugin.image.' + 'get_openstack_ids_of_connected_nodes_by_openstack_type', + autospec=True, return_value=[]) + def test_handle_boot_image_no_image(self, *_): + server = {} + ctx = mock.MagicMock() + nova_plugin.server.handle_image_from_relationship(server, 'image', ctx) + self.assertNotIn('image', server) + + +class TestServerRelationships(unittest.TestCase): + + def _get_ctx_mock(self, instance_id, boot): + rel_specs = [MockRelationshipContext( + target=MockRelationshipSubjectContext(node=MockNodeContext( + properties={'boot': boot}), instance=MockNodeInstanceContext( + runtime_properties={ + OPENSTACK_TYPE_PROPERTY: VOLUME_OPENSTACK_TYPE, + OPENSTACK_ID_PROPERTY: instance_id + })))] + ctx = mock.MagicMock() + ctx.instance = MockNodeInstanceContext(relationships=rel_specs) + ctx.logger = setup_logger('mock-logger') + return ctx + + def test_boot_volume_relationship(self): + instance_id = 'test-id' + ctx = self._get_ctx_mock(instance_id, True) + result = nova_plugin.server._get_boot_volume_relationships( + VOLUME_OPENSTACK_TYPE, ctx) + self.assertEqual( + instance_id, + result.runtime_properties['external_id']) + + def test_no_boot_volume_relationship(self): + instance_id = 'test-id' + ctx = self._get_ctx_mock(instance_id, False) + result = nova_plugin.server._get_boot_volume_relationships( + VOLUME_OPENSTACK_TYPE, ctx) + self.assertFalse(result) + + +class TestServerNetworkRuntimeProperties(unittest.TestCase): + + @property + def mock_ctx(self): + return MockCloudifyContext( + node_id='test', + deployment_id='test', + properties={}, + operation={'retry_number': 0}, + provider_context={'resources': {}} + ) + + def test_server_networks_runtime_properties_empty_server(self): + ctx = self.mock_ctx + current_ctx.set(ctx=ctx) + server = mock.MagicMock() + setattr(server, 'networks', {}) + with self.assertRaisesRegexp( + NonRecoverableError, + 'The server was created but not attached to a network.'): + nova_plugin.server._set_network_and_ip_runtime_properties(server) + + def test_server_networks_runtime_properties_valid_networks(self): + ctx = self.mock_ctx + current_ctx.set(ctx=ctx) + server = mock.MagicMock() + network_id = 'management_network' + network_ips = ['good', 'bad1', 'bad2'] + setattr(server, + 'networks', + {network_id: network_ips}) + nova_plugin.server._set_network_and_ip_runtime_properties(server) + self.assertIn('networks', ctx.instance.runtime_properties.keys()) + self.assertIn('ip', ctx.instance.runtime_properties.keys()) + self.assertEquals(ctx.instance.runtime_properties['ip'], 'good') + self.assertEquals(ctx.instance.runtime_properties['networks'], + {network_id: network_ips}) + + def test_server_networks_runtime_properties_empty_networks(self): + ctx = self.mock_ctx + current_ctx.set(ctx=ctx) + server = mock.MagicMock() + network_id = 'management_network' + network_ips = [] + setattr(server, + 'networks', + {network_id: network_ips}) + nova_plugin.server._set_network_and_ip_runtime_properties(server) + self.assertIn('networks', ctx.instance.runtime_properties.keys()) + self.assertIn('ip', ctx.instance.runtime_properties.keys()) + self.assertEquals(ctx.instance.runtime_properties['ip'], None) + self.assertEquals(ctx.instance.runtime_properties['networks'], + {network_id: network_ips}) diff --git a/aria/multivim-plugin/nova_plugin/tests/test_server_image_and_flavor.py b/aria/multivim-plugin/nova_plugin/tests/test_server_image_and_flavor.py new file mode 100644 index 0000000000..2ae475843c --- /dev/null +++ b/aria/multivim-plugin/nova_plugin/tests/test_server_image_and_flavor.py @@ -0,0 +1,228 @@ +######### +# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# * See the License for the specific language governing permissions and +# * limitations under the License. + + +import unittest + +import mock +from novaclient import exceptions as nova_exceptions + +import nova_plugin.server as server +from cloudify.exceptions import NonRecoverableError +from cloudify.mocks import MockCloudifyContext + + +class TestServerImageAndFlavor(unittest.TestCase): + + def test_no_image_and_no_flavor(self): + node_props = { + 'image': '', + 'flavor': '' + } + with mock.patch('nova_plugin.server.ctx', + self._get_mock_ctx_with_node_properties(node_props)): + nova_client = self._get_mocked_nova_client() + + serv = {} + self.assertRaises(NonRecoverableError, + server._handle_image_or_flavor, + serv, nova_client, 'image') + self.assertRaises(NonRecoverableError, + server._handle_image_or_flavor, + serv, nova_client, 'flavor') + + def test_image_and_flavor_properties_as_names(self): + node_props = { + 'image': 'some-image-name', + 'flavor': 'some-flavor-name' + } + with mock.patch('nova_plugin.server.ctx', + self._get_mock_ctx_with_node_properties(node_props)): + nova_client = self._get_mocked_nova_client() + + serv = {} + server._handle_image_or_flavor(serv, nova_client, 'image') + server._handle_image_or_flavor(serv, nova_client, 'flavor') + + self.assertEquals('some-image-id', serv.get('image')) + self.assertEquals('some-flavor-id', serv.get('flavor')) + + def test_image_and_flavor_properties_as_ids(self): + node_props = { + 'image': 'some-image-id', + 'flavor': 'some-flavor-id' + } + with mock.patch('nova_plugin.server.ctx', + self._get_mock_ctx_with_node_properties(node_props)): + nova_client = self._get_mocked_nova_client() + + serv = {} + server._handle_image_or_flavor(serv, nova_client, 'image') + server._handle_image_or_flavor(serv, nova_client, 'flavor') + + self.assertEquals('some-image-id', serv.get('image')) + self.assertEquals('some-flavor-id', serv.get('flavor')) + + def test_image_id_and_flavor_id(self): + node_props = { + 'image': '', + 'flavor': '' + } + with mock.patch('nova_plugin.server.ctx', + self._get_mock_ctx_with_node_properties(node_props)): + nova_client = self._get_mocked_nova_client() + + serv = {} + serv['image'] = 'some-image-id' + serv['flavor'] = 'some-flavor-id' + server._handle_image_or_flavor(serv, nova_client, 'image') + server._handle_image_or_flavor(serv, nova_client, 'flavor') + + self.assertEquals('some-image-id', serv.get('image')) + self.assertEquals('some-flavor-id', serv.get('flavor')) + + def test_image_name_and_flavor_name(self): + node_props = { + 'image': '', + 'flavor': '' + } + with mock.patch('nova_plugin.server.ctx', + self._get_mock_ctx_with_node_properties(node_props)): + nova_client = self._get_mocked_nova_client() + + serv = {} + serv['image_name'] = 'some-image-name' + serv['flavor_name'] = 'some-flavor-name' + server._handle_image_or_flavor(serv, nova_client, 'image') + server._handle_image_or_flavor(serv, nova_client, 'flavor') + + self.assertEquals('some-image-id', serv.get('image')) + self.assertNotIn('image_name', serv) + self.assertEquals('some-flavor-id', serv.get('flavor')) + self.assertNotIn('flavor_name', serv) + + def test_unknown_image_name_and_flavor_name(self): + node_props = { + 'image': '', + 'flavor': '' + } + with mock.patch('nova_plugin.server.ctx', + self._get_mock_ctx_with_node_properties(node_props)): + nova_client = self._get_mocked_nova_client() + + serv = {} + serv['image_name'] = 'some-unknown-image-name' + serv['flavor_name'] = 'some-unknown-flavor-name' + + self.assertRaises(nova_exceptions.NotFound, + server._handle_image_or_flavor, + serv, nova_client, 'image') + self.assertRaises(nova_exceptions.NotFound, + server._handle_image_or_flavor, + serv, nova_client, 'flavor') + + def test_image_id_and_flavor_id_override_on_properties(self): + node_props = { + 'image': 'properties-image-id', + 'flavor': 'properties-flavor-id' + } + with mock.patch('nova_plugin.server.ctx', + self._get_mock_ctx_with_node_properties(node_props)): + nova_client = self._get_mocked_nova_client() + + serv = {} + serv['image'] = 'some-image-id' + serv['flavor'] = 'some-flavor-id' + server._handle_image_or_flavor(serv, nova_client, 'image') + server._handle_image_or_flavor(serv, nova_client, 'flavor') + + self.assertEquals('some-image-id', serv.get('image')) + self.assertEquals('some-flavor-id', serv.get('flavor')) + + def test_image_name_and_flavor_name_override_on_properties(self): + node_props = { + 'image': 'properties-image-id', + 'flavor': 'properties-flavor-id' + } + with mock.patch('nova_plugin.server.ctx', + self._get_mock_ctx_with_node_properties(node_props)): + nova_client = self._get_mocked_nova_client() + + serv = {} + serv['image_name'] = 'some-image-name' + serv['flavor_name'] = 'some-flavor-name' + server._handle_image_or_flavor(serv, nova_client, 'image') + server._handle_image_or_flavor(serv, nova_client, 'flavor') + + self.assertEquals('some-image-id', serv.get('image')) + self.assertNotIn('image_name', serv) + self.assertEquals('some-flavor-id', serv.get('flavor')) + self.assertNotIn('flavor_name', serv) + + def test_image_name_and_flavor_name_override_on_image_and_flavor_ids(self): + node_props = { + 'image': '', + 'flavor': '' + } + with mock.patch('nova_plugin.server.ctx', + self._get_mock_ctx_with_node_properties(node_props)): + nova_client = self._get_mocked_nova_client() + + serv = {} + serv['image'] = 'some-bad-image-id' + serv['image_name'] = 'some-image-name' + serv['flavor'] = 'some-bad-flavor-id' + serv['flavor_name'] = 'some-flavor-name' + server._handle_image_or_flavor(serv, nova_client, 'image') + server._handle_image_or_flavor(serv, nova_client, 'flavor') + + self.assertEquals('some-image-id', serv.get('image')) + self.assertNotIn('image_name', serv) + self.assertEquals('some-flavor-id', serv.get('flavor')) + self.assertNotIn('flavor_name', serv) + + @staticmethod + def _get_mocked_nova_client(): + nova_client = mock.MagicMock() + + def mock_get_if_exists(prop_name, **kwargs): + is_image = prop_name == 'image' + searched_name = kwargs.get('name') + if (is_image and searched_name == 'some-image-name') or \ + (not is_image and searched_name == 'some-flavor-name'): + result = mock.MagicMock() + result.id = 'some-image-id' if \ + is_image else 'some-flavor-id' + return result + return [] + + def mock_find_generator(prop_name): + def mock_find(**kwargs): + result = mock_get_if_exists(prop_name, **kwargs) + if not result: + raise nova_exceptions.NotFound(404) + return result + return mock_find + + nova_client.cosmo_plural = lambda x: '{0}s'.format(x) + nova_client.cosmo_get_if_exists = mock_get_if_exists + nova_client.images.find = mock_find_generator('image') + nova_client.flavors.find = mock_find_generator('flavor') + return nova_client + + @staticmethod + def _get_mock_ctx_with_node_properties(properties): + return MockCloudifyContext(node_id='test_node_id', + properties=properties) diff --git a/aria/multivim-plugin/nova_plugin/tests/test_userdata.py b/aria/multivim-plugin/nova_plugin/tests/test_userdata.py new file mode 100644 index 0000000000..d7f056d72c --- /dev/null +++ b/aria/multivim-plugin/nova_plugin/tests/test_userdata.py @@ -0,0 +1,63 @@ +######### +# Copyright (c) 2015 GigaSpaces Technologies Ltd. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# * See the License for the specific language governing permissions and +# * limitations under the License. + +import unittest + +import mock + +from cloudify.mocks import MockCloudifyContext + +from nova_plugin import userdata + + +def ctx_mock(): + result = MockCloudifyContext( + node_id='d', + properties={}) + result.node.type_hierarchy = ['cloudify.nodes.Compute'] + return result + + +class TestServerUserdataHandling(unittest.TestCase): + + @mock.patch('nova_plugin.userdata.ctx', ctx_mock()) + def test_no_userdata(self): + server_conf = {} + userdata.handle_userdata(server_conf) + self.assertEqual(server_conf, {}) + + def test_agent_installation_userdata(self): + ctx = ctx_mock() + ctx.agent.init_script = lambda: 'SCRIPT' + with mock.patch('nova_plugin.userdata.ctx', ctx): + server_conf = {} + userdata.handle_userdata(server_conf) + self.assertEqual(server_conf, {'userdata': 'SCRIPT'}) + + @mock.patch('nova_plugin.userdata.ctx', ctx_mock()) + def test_existing_userdata(self): + server_conf = {'userdata': 'EXISTING'} + server_conf_copy = server_conf.copy() + userdata.handle_userdata(server_conf) + self.assertEqual(server_conf, server_conf_copy) + + def test_existing_and_agent_installation_userdata(self): + ctx = ctx_mock() + ctx.agent.init_script = lambda: '#! SCRIPT' + with mock.patch('nova_plugin.userdata.ctx', ctx): + server_conf = {'userdata': '#! EXISTING'} + userdata.handle_userdata(server_conf) + self.assertTrue(server_conf['userdata'].startswith( + 'Content-Type: multi')) diff --git a/aria/multivim-plugin/nova_plugin/tests/test_validation.py b/aria/multivim-plugin/nova_plugin/tests/test_validation.py new file mode 100644 index 0000000000..aa1dfdd814 --- /dev/null +++ b/aria/multivim-plugin/nova_plugin/tests/test_validation.py @@ -0,0 +1,194 @@ +######### +# Copyright (c) 2016 GigaSpaces Technologies Ltd. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# * See the License for the specific language governing permissions and +# * limitations under the License. + +import os +from os import path +import tempfile +import shutil + +import unittest +import mock + +from cloudify.test_utils import workflow_test +from nova_plugin.keypair import creation_validation +from cloudify.exceptions import NonRecoverableError + +PRIVATE_KEY_NAME = 'private_key' + + +class TestValidation(unittest.TestCase): + + blueprint_path = path.join('resources', + 'test-keypair-validation-blueprint.yaml') + + def setUp(self): + _, fp = tempfile.mkstemp() + self.private_key = fp + _, fp = tempfile.mkstemp() + self.not_readable_private_key = fp + os.chmod(self.not_readable_private_key, 0o200) + self.temp_dir = tempfile.mkdtemp() + self.not_writable_temp_dir_r = tempfile.mkdtemp() + os.chmod(self.not_writable_temp_dir_r, 0o400) + self.not_writable_temp_dir_rx = tempfile.mkdtemp() + os.chmod(self.not_writable_temp_dir_rx, 0o500) + self.not_writable_temp_dir_rw = tempfile.mkdtemp() + os.chmod(self.not_writable_temp_dir_rw, 0o600) + + def tearDown(self): + if self.private_key: + os.remove(self.private_key) + + if self.not_readable_private_key: + os.remove(self.not_readable_private_key) + + shutil.rmtree(self.not_writable_temp_dir_r, ignore_errors=True) + shutil.rmtree(self.not_writable_temp_dir_rx, ignore_errors=True) + shutil.rmtree(self.not_writable_temp_dir_rw, ignore_errors=True) + shutil.rmtree(self.temp_dir, ignore_errors=True) + + def new_keypair_create(self, *args, **kwargs): + creation_validation(*args, **kwargs) + + def new_keypair_create_with_exception(self, *args, **kwargs): + self.assertRaises(NonRecoverableError, creation_validation, + *args, **kwargs) + + def get_keypair_inputs_private_key(self, is_external, **kwargs): + return { + 'private_key': self.private_key, + 'is_keypair_external': is_external + } + + def get_keypair_inputs_not_readable_private_key(self, + is_external, **kwargs): + return { + 'private_key': self.not_readable_private_key, + 'is_keypair_external': is_external + } + + def get_keypair_inputs_not_writable_dir_r(self, is_external, **kwargs): + return { + 'private_key': path.join(self.not_writable_temp_dir_r, + PRIVATE_KEY_NAME), + 'is_keypair_external': is_external + } + + def get_keypair_inputs_not_writable_dir_rx(self, is_external, **kwargs): + return { + 'private_key': path.join(self.not_writable_temp_dir_rx, + PRIVATE_KEY_NAME), + 'is_keypair_external': is_external + } + + def get_keypair_inputs_not_writable_dir_rw(self, is_external, **kwargs): + return { + 'private_key': path.join(self.not_writable_temp_dir_rw, + PRIVATE_KEY_NAME), + 'is_keypair_external': is_external + } + + def get_keypair_inputs_temp_dir(self, is_external, **kwargs): + return { + 'private_key': path.join(self.temp_dir, PRIVATE_KEY_NAME), + 'is_keypair_external': is_external + } + + @workflow_test(blueprint_path, inputs={ + 'private_key': '', + 'is_keypair_external': False + }) + @mock.patch('nova_plugin.keypair.validate_resource') + def test_keypair_valid_config(self, cfy_local, *args): + + with mock.patch('nova_plugin.keypair.create', + new=self.new_keypair_create): + cfy_local.execute('install', task_retries=0) + + @workflow_test(blueprint_path, inputs='get_keypair_inputs_private_key', + input_func_kwargs={'is_external': True}) + @mock.patch('nova_plugin.keypair.validate_resource') + def test_keypair_valid_config_external(self, cfy_local, *args): + + with mock.patch('nova_plugin.keypair.create', + new=self.new_keypair_create): + cfy_local.execute('install', task_retries=0) + + @workflow_test(blueprint_path, inputs='get_keypair_inputs_temp_dir', + input_func_kwargs={'is_external': True}) + @mock.patch('nova_plugin.keypair.validate_resource') + def test_keypair_no_private_key(self, cfy_local, *args): + + with mock.patch('nova_plugin.keypair.create', + new=self.new_keypair_create_with_exception): + cfy_local.execute('install', task_retries=0) + + @workflow_test(blueprint_path, inputs='get_keypair_inputs_private_key', + input_func_kwargs={'is_external': False}) + @mock.patch('nova_plugin.keypair.validate_resource') + def test_keypair_local_and_exists(self, cfy_local, *args): + + with mock.patch('nova_plugin.keypair.create', + new=self.new_keypair_create_with_exception): + cfy_local.execute('install', task_retries=0) + + @workflow_test(blueprint_path, inputs='get_keypair_inputs_temp_dir', + input_func_kwargs={'is_external': False}) + @mock.patch('nova_plugin.keypair.validate_resource') + def test_keypair_local_temp_dir(self, cfy_local, *args): + + with mock.patch('nova_plugin.keypair.create', + new=self.new_keypair_create): + cfy_local.execute('install', task_retries=0) + + @workflow_test(blueprint_path, + inputs='get_keypair_inputs_not_writable_dir_r', + input_func_kwargs={'is_external': False}) + @mock.patch('nova_plugin.keypair.validate_resource') + def test_keypair_local_non_writable_dir_r(self, cfy_local, *args): + + with mock.patch('nova_plugin.keypair.create', + new=self.new_keypair_create_with_exception): + cfy_local.execute('install', task_retries=0) + + @workflow_test(blueprint_path, + inputs='get_keypair_inputs_not_writable_dir_rx', + input_func_kwargs={'is_external': False}) + @mock.patch('nova_plugin.keypair.validate_resource') + def test_keypair_local_non_writable_dir_rx(self, cfy_local, *args): + + with mock.patch('nova_plugin.keypair.create', + new=self.new_keypair_create_with_exception): + cfy_local.execute('install', task_retries=0) + + @workflow_test(blueprint_path, + inputs='get_keypair_inputs_not_writable_dir_rw', + input_func_kwargs={'is_external': False}) + @mock.patch('nova_plugin.keypair.validate_resource') + def test_keypair_local_non_writable_dir_rw(self, cfy_local, *args): + + with mock.patch('nova_plugin.keypair.create', + new=self.new_keypair_create_with_exception): + cfy_local.execute('install', task_retries=0) + + @workflow_test(blueprint_path, + inputs='get_keypair_inputs_not_readable_private_key', + input_func_kwargs={'is_external': True}) + @mock.patch('nova_plugin.keypair.validate_resource') + def test_keypair_not_readable_private_key(self, cfy_local, *args): + + with mock.patch('nova_plugin.keypair.create', + new=self.new_keypair_create_with_exception): + cfy_local.execute('install', task_retries=0) diff --git a/aria/multivim-plugin/nova_plugin/userdata.py b/aria/multivim-plugin/nova_plugin/userdata.py new file mode 100644 index 0000000000..ba63bb5328 --- /dev/null +++ b/aria/multivim-plugin/nova_plugin/userdata.py @@ -0,0 +1,50 @@ +######### +# Copyright (c) 2015 GigaSpaces Technologies Ltd. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# * See the License for the specific language governing permissions and +# * limitations under the License. + +import requests + +from cloudify import compute +from cloudify import exceptions +from cloudify import ctx + + +def handle_userdata(server): + + existing_userdata = server.get('userdata') + install_agent_userdata = ctx.agent.init_script() + + if not (existing_userdata or install_agent_userdata): + return + + if isinstance(existing_userdata, dict): + ud_type = existing_userdata['type'] + if ud_type not in userdata_handlers: + raise exceptions.NonRecoverableError( + "Invalid type '{0}' for server userdata)".format(ud_type)) + existing_userdata = userdata_handlers[ud_type](existing_userdata) + + if not existing_userdata: + final_userdata = install_agent_userdata + elif not install_agent_userdata: + final_userdata = existing_userdata + else: + final_userdata = compute.create_multi_mimetype_userdata( + [existing_userdata, install_agent_userdata]) + server['userdata'] = final_userdata + + +userdata_handlers = { + 'http': lambda params: requests.get(params['url']).text +} diff --git a/aria/multivim-plugin/openstack_plugin_common/__init__.py b/aria/multivim-plugin/openstack_plugin_common/__init__.py new file mode 100644 index 0000000000..353b2be03f --- /dev/null +++ b/aria/multivim-plugin/openstack_plugin_common/__init__.py @@ -0,0 +1,1005 @@ +######### +# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# * See the License for the specific language governing permissions and +# * limitations under the License. + +from functools import wraps, partial +import json +import os +import sys + +from IPy import IP +from keystoneauth1 import loading, session +import cinderclient.client as cinder_client +import cinderclient.exceptions as cinder_exceptions +import keystoneclient.v3.client as keystone_client +import keystoneclient.exceptions as keystone_exceptions +import neutronclient.v2_0.client as neutron_client +import neutronclient.common.exceptions as neutron_exceptions +import novaclient.client as nova_client +import novaclient.exceptions as nova_exceptions +import glanceclient.client as glance_client +import glanceclient.exc as glance_exceptions + +import cloudify +from cloudify import context, ctx +from cloudify.exceptions import NonRecoverableError, RecoverableError + +INFINITE_RESOURCE_QUOTA = -1 + +# properties +USE_EXTERNAL_RESOURCE_PROPERTY = 'use_external_resource' +CREATE_IF_MISSING_PROPERTY = 'create_if_missing' +CONFIG_PROPERTY = 'openstack_config' + +# runtime properties +OPENSTACK_AZ_PROPERTY = 'availability_zone' +OPENSTACK_ID_PROPERTY = 'external_id' # resource's openstack id +OPENSTACK_TYPE_PROPERTY = 'external_type' # resource's openstack type +OPENSTACK_NAME_PROPERTY = 'external_name' # resource's openstack name +CONDITIONALLY_CREATED = 'conditionally_created' # resource was +# conditionally created +CONFIG_RUNTIME_PROPERTY = CONFIG_PROPERTY # openstack configuration + +# operation inputs +CONFIG_INPUT = CONFIG_PROPERTY + +# runtime properties which all types use +COMMON_RUNTIME_PROPERTIES_KEYS = [OPENSTACK_ID_PROPERTY, + OPENSTACK_TYPE_PROPERTY, + OPENSTACK_NAME_PROPERTY, + CONDITIONALLY_CREATED] + +MISSING_RESOURCE_MESSAGE = "Couldn't find a resource of " \ + "type {0} with the name or id {1}" + + +class ProviderContext(object): + + def __init__(self, provider_context): + self._provider_context = provider_context or {} + self._resources = self._provider_context.get('resources', {}) + + @property + def agents_keypair(self): + return self._resources.get('agents_keypair') + + @property + def agents_security_group(self): + return self._resources.get('agents_security_group') + + @property + def ext_network(self): + return self._resources.get('ext_network') + + @property + def floating_ip(self): + return self._resources.get('floating_ip') + + @property + def int_network(self): + return self._resources.get('int_network') + + @property + def management_keypair(self): + return self._resources.get('management_keypair') + + @property + def management_security_group(self): + return self._resources.get('management_security_group') + + @property + def management_server(self): + return self._resources.get('management_server') + + @property + def router(self): + return self._resources.get('router') + + @property + def subnet(self): + return self._resources.get('subnet') + + def __repr__(self): + info = json.dumps(self._provider_context) + return '<' + self.__class__.__name__ + ' ' + info + '>' + + +def provider(ctx): + return ProviderContext(ctx.provider_context) + + +def assign_payload_as_runtime_properties(ctx, resource_name, payload={}): + """ + In general Openstack API objects have create, update, and delete + functions. Each function normally receives a payload that describes + the desired configuration of the object. + This makes sure to store that configuration in the runtime + properties and cleans any potentially sensitive data. + + :param ctx: The Cloudify NodeInstanceContext + :param resource_name: A string describing the resource. + :param payload: The payload. + :return: + """ + + # Avoid failing if a developer inadvertently passes a + # non-NodeInstanceContext + if getattr(ctx, 'instance'): + if resource_name not in ctx.instance.runtime_properties.keys(): + ctx.instance.runtime_properties[resource_name] = {} + for key, value in payload.items(): + if key != 'user_data' and key != 'adminPass': + ctx.instance.runtime_properties[resource_name][key] = value + + +def get_relationships_by_relationship_type(ctx, type_name): + """ + Get cloudify relationships by relationship type. + Follows the inheritance tree. + + :param ctx: Cloudify NodeInstanceContext + :param type_name: desired relationship type derived + from cloudify.relationships.depends_on. + :return: list of RelationshipSubjectContext + """ + + return [rel for rel in ctx.instance.relationships if + type_name in rel.type_hierarchy] + + +def get_attribute_of_connected_nodes_by_relationship_type(ctx, + type_name, + attribute_name): + """ + Returns a list of OPENSTACK_ID_PROPERTY from a list of + Cloudify RelationshipSubjectContext. + + :param ctx: Cloudify NodeInstanceContext + :param type_name: desired relationship type derived + from cloudify.relationships.depends_on. + :param attribute_name: usually either + OPENSTACK_NAME_PROPERTY or OPENSTACK_ID_PROPERTY + :return: + """ + + return [rel.target.instance.runtime_properties[attribute_name] + for rel in get_relationships_by_relationship_type(ctx, type_name)] + + +def get_relationships_by_openstack_type(ctx, type_name): + return [rel for rel in ctx.instance.relationships + if rel.target.instance.runtime_properties.get( + OPENSTACK_TYPE_PROPERTY) == type_name] + + +def get_connected_nodes_by_openstack_type(ctx, type_name): + return [rel.target.node + for rel in get_relationships_by_openstack_type(ctx, type_name)] + + +def get_openstack_ids_of_connected_nodes_by_openstack_type(ctx, type_name): + return [rel.target.instance.runtime_properties[OPENSTACK_ID_PROPERTY] + for rel in get_relationships_by_openstack_type(ctx, type_name) + ] + + +def get_openstack_names_of_connected_nodes_by_openstack_type(ctx, type_name): + return [rel.target.instance.runtime_properties[OPENSTACK_NAME_PROPERTY] + for rel in get_relationships_by_openstack_type(ctx, type_name) + ] + + +def get_single_connected_node_by_openstack_type( + ctx, type_name, if_exists=False): + nodes = get_connected_nodes_by_openstack_type(ctx, type_name) + check = len(nodes) > 1 if if_exists else len(nodes) != 1 + if check: + raise NonRecoverableError( + 'Expected {0} one {1} node. got {2}'.format( + 'at most' if if_exists else 'exactly', type_name, len(nodes))) + return nodes[0] if nodes else None + + +def get_openstack_id_of_single_connected_node_by_openstack_type( + ctx, type_name, if_exists=False): + ids = get_openstack_ids_of_connected_nodes_by_openstack_type(ctx, + type_name) + check = len(ids) > 1 if if_exists else len(ids) != 1 + if check: + raise NonRecoverableError( + 'Expected {0} one {1} capability. got {2}'.format( + 'at most' if if_exists else 'exactly', type_name, len(ids))) + return ids[0] if ids else None + + +def get_resource_id(ctx, type_name): + if ctx.node.properties['resource_id']: + return ctx.node.properties['resource_id'] + return "{0}_{1}_{2}".format(type_name, ctx.deployment.id, ctx.instance.id) + + +def transform_resource_name(ctx, res): + + if isinstance(res, basestring): + res = {'name': res} + + if not isinstance(res, dict): + raise ValueError("transform_resource_name() expects either string or " + "dict as the first parameter") + + pfx = ctx.bootstrap_context.resources_prefix + + if not pfx: + return res['name'] + + name = res['name'] + res['name'] = pfx + name + + if name.startswith(pfx): + ctx.logger.warn("Prefixing resource '{0}' with '{1}' but it " + "already has this prefix".format(name, pfx)) + else: + ctx.logger.info("Transformed resource name '{0}' to '{1}'".format( + name, res['name'])) + + return res['name'] + + +def _get_resource_by_name_or_id_from_ctx(ctx, name_field_name, openstack_type, + sugared_client): + resource_id = ctx.node.properties['resource_id'] + if not resource_id: + raise NonRecoverableError( + "Can't set '{0}' to True without supplying a value for " + "'resource_id'".format(USE_EXTERNAL_RESOURCE_PROPERTY)) + + return get_resource_by_name_or_id(resource_id, openstack_type, + sugared_client, True, name_field_name) + + +def get_resource_by_name_or_id( + resource_id, openstack_type, sugared_client, + raise_if_not_found=True, name_field_name='name'): + + # search for resource by name (or name-equivalent field) + search_param = {name_field_name: resource_id} + resource = sugared_client.cosmo_get_if_exists(openstack_type, + **search_param) + if not resource: + # fallback - search for resource by id + resource = sugared_client.cosmo_get_if_exists( + openstack_type, id=resource_id) + + if not resource and raise_if_not_found: + raise NonRecoverableError( + MISSING_RESOURCE_MESSAGE.format(openstack_type, resource_id)) + + return resource + + +def use_external_resource(ctx, sugared_client, openstack_type, + name_field_name='name'): + if not is_external_resource(ctx): + return None + try: + resource = _get_resource_by_name_or_id_from_ctx( + ctx, name_field_name, openstack_type, sugared_client) + except NonRecoverableError: + if is_create_if_missing(ctx): + ctx.instance.runtime_properties[CONDITIONALLY_CREATED] = True + return None + else: + raise + + ctx.instance.runtime_properties[OPENSTACK_ID_PROPERTY] = \ + sugared_client.get_id_from_resource(resource) + ctx.instance.runtime_properties[OPENSTACK_TYPE_PROPERTY] = openstack_type + + from openstack_plugin_common.floatingip import FLOATINGIP_OPENSTACK_TYPE + # store openstack name runtime property, unless it's a floating IP type, + # in which case the ip will be stored in the runtime properties instead. + if openstack_type != FLOATINGIP_OPENSTACK_TYPE: + ctx.instance.runtime_properties[OPENSTACK_NAME_PROPERTY] = \ + sugared_client.get_name_from_resource(resource) + + ctx.logger.info('Using external resource {0}: {1}'.format( + openstack_type, ctx.node.properties['resource_id'])) + return resource + + +def validate_resource(ctx, sugared_client, openstack_type, + name_field_name='name'): + ctx.logger.debug('validating resource {0} (node {1})'.format( + openstack_type, ctx.node.id)) + + openstack_type_plural = sugared_client.cosmo_plural(openstack_type) + resource = None + + if is_external_resource(ctx): + + try: + # validate the resource truly exists + resource = _get_resource_by_name_or_id_from_ctx( + ctx, name_field_name, openstack_type, sugared_client) + ctx.logger.debug('OK: {0} {1} found in pool'.format( + openstack_type, ctx.node.properties['resource_id'])) + except NonRecoverableError as e: + if not is_create_if_missing(ctx): + ctx.logger.error('VALIDATION ERROR: ' + str(e)) + resource_list = list(sugared_client.cosmo_list(openstack_type)) + if resource_list: + ctx.logger.info('list of existing {0}: '.format( + openstack_type_plural)) + for resource in resource_list: + ctx.logger.info(' {0:>10} - {1}'.format( + sugared_client.get_id_from_resource(resource), + sugared_client.get_name_from_resource(resource))) + else: + ctx.logger.info('there are no existing {0}'.format( + openstack_type_plural)) + raise + if not resource: + if isinstance(sugared_client, NovaClientWithSugar): + # not checking quota for Nova resources due to a bug in Nova client + return + + # validate available quota for provisioning the resource + resource_list = list(sugared_client.cosmo_list(openstack_type)) + resource_amount = len(resource_list) + + resource_quota = sugared_client.get_quota(openstack_type) + + if resource_amount < resource_quota \ + or resource_quota == INFINITE_RESOURCE_QUOTA: + ctx.logger.debug( + 'OK: {0} (node {1}) can be created. provisioned {2}: {3}, ' + 'quota: {4}' + .format(openstack_type, ctx.node.id, openstack_type_plural, + resource_amount, resource_quota)) + else: + err = ('{0} (node {1}) cannot be created due to quota limitations.' + ' provisioned {2}: {3}, quota: {4}' + .format(openstack_type, ctx.node.id, openstack_type_plural, + resource_amount, resource_quota)) + ctx.logger.error('VALIDATION ERROR:' + err) + raise NonRecoverableError(err) + + +def delete_resource_and_runtime_properties(ctx, sugared_client, + runtime_properties_keys): + node_openstack_type = ctx.instance.runtime_properties[ + OPENSTACK_TYPE_PROPERTY] + if not is_external_resource(ctx): + ctx.logger.info('deleting {0}'.format(node_openstack_type)) + sugared_client.cosmo_delete_resource( + node_openstack_type, + ctx.instance.runtime_properties[OPENSTACK_ID_PROPERTY]) + else: + ctx.logger.info('not deleting {0} since an external {0} is ' + 'being used'.format(node_openstack_type)) + + delete_runtime_properties(ctx, runtime_properties_keys) + + +def is_external_resource(ctx): + return is_external_resource_by_properties(ctx.node.properties) + + +def is_external_resource_not_conditionally_created(ctx): + return is_external_resource_by_properties(ctx.node.properties) and \ + not ctx.instance.runtime_properties.get(CONDITIONALLY_CREATED) + + +def is_external_relationship_not_conditionally_created(ctx): + return is_external_resource_by_properties(ctx.source.node.properties) and \ + is_external_resource_by_properties(ctx.target.node.properties) and \ + not ctx.source.instance.runtime_properties.get( + CONDITIONALLY_CREATED) and not \ + ctx.target.instance.runtime_properties.get(CONDITIONALLY_CREATED) + + +def is_create_if_missing(ctx): + return is_create_if_missing_by_properties(ctx.node.properties) + + +def is_external_relationship(ctx): + return is_external_resource_by_properties(ctx.source.node.properties) and \ + is_external_resource_by_properties(ctx.target.node.properties) + + +def is_external_resource_by_properties(properties): + return USE_EXTERNAL_RESOURCE_PROPERTY in properties and \ + properties[USE_EXTERNAL_RESOURCE_PROPERTY] + + +def is_create_if_missing_by_properties(properties): + return CREATE_IF_MISSING_PROPERTY in properties and \ + properties[CREATE_IF_MISSING_PROPERTY] + + +def delete_runtime_properties(ctx, runtime_properties_keys): + for runtime_prop_key in runtime_properties_keys: + if runtime_prop_key in ctx.instance.runtime_properties: + del ctx.instance.runtime_properties[runtime_prop_key] + + +def validate_ip_or_range_syntax(ctx, address, is_range=True): + range_suffix = ' range' if is_range else '' + ctx.logger.debug('checking whether {0} is a valid address{1}...' + .format(address, range_suffix)) + try: + IP(address) + ctx.logger.debug('OK:' + '{0} is a valid address{1}.'.format(address, + range_suffix)) + except ValueError as e: + err = ('{0} is not a valid address{1}; {2}'.format( + address, range_suffix, e.message)) + ctx.logger.error('VALIDATION ERROR:' + err) + raise NonRecoverableError(err) + + +class Config(object): + + OPENSTACK_CONFIG_PATH_ENV_VAR = 'OPENSTACK_CONFIG_PATH' + OPENSTACK_CONFIG_PATH_DEFAULT_PATH = '~/openstack_config.json' + OPENSTACK_ENV_VAR_PREFIX = 'OS_' + OPENSTACK_SUPPORTED_ENV_VARS = { + 'OS_AUTH_URL', 'OS_USERNAME', 'OS_PASSWORD', 'OS_TENANT_NAME', + 'OS_REGION_NAME', 'OS_PROJECT_ID', 'OS_PROJECT_NAME', + 'OS_USER_DOMAIN_NAME', 'OS_PROJECT_DOMAIN_NAME' + } + + @classmethod + def get(cls): + static_config = cls._build_config_from_env_variables() + env_name = cls.OPENSTACK_CONFIG_PATH_ENV_VAR + default_location_tpl = cls.OPENSTACK_CONFIG_PATH_DEFAULT_PATH + default_location = os.path.expanduser(default_location_tpl) + config_path = os.getenv(env_name, default_location) + try: + with open(config_path) as f: + cls.update_config(static_config, json.loads(f.read())) + except IOError: + pass + return static_config + + @classmethod + def _build_config_from_env_variables(cls): + return {v.lstrip(cls.OPENSTACK_ENV_VAR_PREFIX).lower(): os.environ[v] + for v in cls.OPENSTACK_SUPPORTED_ENV_VARS if v in os.environ} + + @staticmethod + def update_config(overridden_cfg, overriding_cfg): + """ this method is like dict.update() only that it doesn't override + with (or set new) empty values (e.g. empty string) """ + for k, v in overriding_cfg.iteritems(): + if v: + overridden_cfg[k] = v + + +class OpenStackClient(object): + + COMMON = {'username', 'password', 'auth_url'} + AUTH_SETS = [ + COMMON | {'tenant_name'}, + COMMON | {'project_id', 'user_domain_name'}, + COMMON | {'project_id', 'project_name', 'user_domain_name'}, + COMMON | {'project_name', 'user_domain_name', 'project_domain_name'}, + ] + OPTIONAL_AUTH_PARAMS = {'insecure'} + + def __init__(self, client_name, client_class, config=None, *args, **kw): + cfg = Config.get() + + if config: + Config.update_config(cfg, config) + + v3 = '/v3' in cfg['auth_url'] + # Newer libraries expect the region key to be `region_name`, not + # `region`. + region = cfg.pop('region', None) + if v3 and region: + cfg['region_name'] = region + + cfg = self._merge_custom_configuration(cfg, client_name) + + auth_params, client_params = OpenStackClient._split_config(cfg) + OpenStackClient._validate_auth_params(auth_params) + + if v3: + # keystone v3 complains if these aren't set. + for key in 'user_domain_name', 'project_domain_name': + auth_params.setdefault(key, 'default') + + client_params['session'] = self._authenticate(auth_params) + self._client = client_class(**client_params) + + @classmethod + def _validate_auth_params(cls, params): + if set(params.keys()) - cls.OPTIONAL_AUTH_PARAMS in cls.AUTH_SETS: + return + + def set2str(s): + return '({})'.format(', '.join(sorted(s))) + + received_params = set2str(params) + valid_auth_sets = map(set2str, cls.AUTH_SETS) + raise NonRecoverableError( + "{} is not valid set of auth params. Expected to find parameters " + "either as environment variables, in a JSON file (at either a " + "path which is set under the environment variable {} or at the " + "default location {}), or as nested properties under an " + "'{}' property. Valid auth param sets are: {}." + .format(received_params, + Config.OPENSTACK_CONFIG_PATH_ENV_VAR, + Config.OPENSTACK_CONFIG_PATH_DEFAULT_PATH, + CONFIG_PROPERTY, + ', '.join(valid_auth_sets))) + + @staticmethod + def _merge_custom_configuration(cfg, client_name): + config = cfg.copy() + + mapping = { + 'nova_url': 'nova_client', + 'neutron_url': 'neutron_client' + } + for key in 'nova_url', 'neutron_url': + val = config.pop(key, None) + if val is not None: + ctx.logger.warn( + "'{}' property is deprecated. Use `custom_configuration" + ".{}.endpoint_override` instead.".format( + key, mapping[key])) + if mapping.get(key, None) == client_name: + config['endpoint_override'] = val + + if 'custom_configuration' in cfg: + del config['custom_configuration'] + config.update(cfg['custom_configuration'].get(client_name, {})) + return config + + @classmethod + def _split_config(cls, cfg): + all = reduce(lambda x, y: x | y, cls.AUTH_SETS) + all |= cls.OPTIONAL_AUTH_PARAMS + + auth, misc = {}, {} + for param, value in cfg.items(): + if param in all: + auth[param] = value + else: + misc[param] = value + return auth, misc + + @staticmethod + def _authenticate(cfg): + verify = True + if 'insecure' in cfg: + cfg = cfg.copy() + # NOTE: Next line will evaluate to False only when insecure is set + # to True. Any other value (string etc.) will force verify to True. + # This is done on purpose, since we do not wish to use insecure + # connection by mistake. + verify = not (cfg['insecure'] is True) + del cfg['insecure'] + loader = loading.get_plugin_loader("password") + auth = loader.load_from_options(**cfg) + sess = session.Session(auth=auth, verify=verify) + return sess + + # Proxy any unknown call to base client + def __getattr__(self, attr): + return getattr(self._client, attr) + + # Sugar, common to all clients + def cosmo_plural(self, obj_type_single): + return obj_type_single + 's' + + def cosmo_get_named(self, obj_type_single, name, **kw): + return self.cosmo_get(obj_type_single, name=name, **kw) + + def cosmo_get(self, obj_type_single, **kw): + return self._cosmo_get(obj_type_single, False, **kw) + + def cosmo_get_if_exists(self, obj_type_single, **kw): + return self._cosmo_get(obj_type_single, True, **kw) + + def _cosmo_get(self, obj_type_single, if_exists, **kw): + ls = list(self.cosmo_list(obj_type_single, **kw)) + check = len(ls) > 1 if if_exists else len(ls) != 1 + if check: + raise NonRecoverableError( + "Expected {0} one object of type {1} " + "with match {2} but there are {3}".format( + 'at most' if if_exists else 'exactly', + obj_type_single, kw, len(ls))) + return ls[0] if ls else None + + +class GlanceClient(OpenStackClient): + + # Can't glance_url be figured out from keystone + REQUIRED_CONFIG_PARAMS = \ + ['username', 'password', 'tenant_name', 'auth_url'] + + def connect(self, cfg): + loader = loading.get_plugin_loader('password') + auth = loader.load_from_options( + auth_url=cfg['auth_url'], + username=cfg['username'], + password=cfg['password'], + tenant_name=cfg['tenant_name']) + sess = session.Session(auth=auth) + + client_kwargs = dict( + session=sess, + ) + if cfg.get('glance_url'): + client_kwargs['endpoint'] = cfg['glance_url'] + + return GlanceClientWithSugar(**client_kwargs) + + +# Decorators +def _find_instanceof_in_kw(cls, kw): + ret = [v for v in kw.values() if isinstance(v, cls)] + if not ret: + return None + if len(ret) > 1: + raise NonRecoverableError( + "Expected to find exactly one instance of {0} in " + "kwargs but found {1}".format(cls, len(ret))) + return ret[0] + + +def _find_context_in_kw(kw): + return _find_instanceof_in_kw(cloudify.context.CloudifyContext, kw) + + +def with_neutron_client(f): + @wraps(f) + def wrapper(*args, **kw): + _put_client_in_kw('neutron_client', NeutronClientWithSugar, kw) + + try: + return f(*args, **kw) + except neutron_exceptions.NeutronClientException, e: + if e.status_code in _non_recoverable_error_codes: + _re_raise(e, recoverable=False, status_code=e.status_code) + else: + raise + return wrapper + + +def with_nova_client(f): + @wraps(f) + def wrapper(*args, **kw): + _put_client_in_kw('nova_client', NovaClientWithSugar, kw) + + try: + return f(*args, **kw) + except nova_exceptions.OverLimit, e: + _re_raise(e, recoverable=True, retry_after=e.retry_after) + except nova_exceptions.ClientException, e: + if e.code in _non_recoverable_error_codes: + _re_raise(e, recoverable=False, status_code=e.code) + else: + raise + return wrapper + + +def with_cinder_client(f): + @wraps(f) + def wrapper(*args, **kw): + _put_client_in_kw('cinder_client', CinderClientWithSugar, kw) + + try: + return f(*args, **kw) + except cinder_exceptions.ClientException, e: + if e.code in _non_recoverable_error_codes: + _re_raise(e, recoverable=False, status_code=e.code) + else: + raise + return wrapper + + +def with_glance_client(f): + @wraps(f) + def wrapper(*args, **kw): + _put_client_in_kw('glance_client', GlanceClientWithSugar, kw) + + try: + return f(*args, **kw) + except glance_exceptions.ClientException, e: + if e.code in _non_recoverable_error_codes: + _re_raise(e, recoverable=False, status_code=e.code) + else: + raise + return wrapper + + +def with_keystone_client(f): + @wraps(f) + def wrapper(*args, **kw): + _put_client_in_kw('keystone_client', KeystoneClientWithSugar, kw) + + try: + return f(*args, **kw) + except keystone_exceptions.HTTPError, e: + if e.http_status in _non_recoverable_error_codes: + _re_raise(e, recoverable=False, status_code=e.http_status) + else: + raise + except keystone_exceptions.ClientException, e: + _re_raise(e, recoverable=False) + return wrapper + + +def _put_client_in_kw(client_name, client_class, kw): + if client_name in kw: + return + + ctx = _find_context_in_kw(kw) + if ctx.type == context.NODE_INSTANCE: + config = ctx.node.properties.get(CONFIG_PROPERTY) + rt_config = ctx.instance.runtime_properties.get( + CONFIG_RUNTIME_PROPERTY) + elif ctx.type == context.RELATIONSHIP_INSTANCE: + config = ctx.source.node.properties.get(CONFIG_PROPERTY) + rt_config = ctx.source.instance.runtime_properties.get( + CONFIG_RUNTIME_PROPERTY) + if not config: + config = ctx.target.node.properties.get(CONFIG_PROPERTY) + rt_config = ctx.target.instance.runtime_properties.get( + CONFIG_RUNTIME_PROPERTY) + + else: + config = None + rt_config = None + + # Overlay with configuration from runtime property, if any. + if rt_config: + if config: + config = config.copy() + config.update(rt_config) + else: + config = rt_config + + if CONFIG_INPUT in kw: + if config: + config = config.copy() + config.update(kw[CONFIG_INPUT]) + else: + config = kw[CONFIG_INPUT] + kw[client_name] = client_class(config=config) + + +_non_recoverable_error_codes = [400, 401, 403, 404, 409] + + +def _re_raise(e, recoverable, retry_after=None, status_code=None): + exc_type, exc, traceback = sys.exc_info() + message = e.message + if status_code is not None: + message = '{0} [status_code={1}]'.format(message, status_code) + if recoverable: + if retry_after == 0: + retry_after = None + raise RecoverableError( + message=message, + retry_after=retry_after), None, traceback + else: + raise NonRecoverableError(message), None, traceback + + +# Sugar for clients + +class NovaClientWithSugar(OpenStackClient): + + def __init__(self, *args, **kw): + config = kw['config'] + if config.get('nova_url'): + config['endpoint_override'] = config.pop('nova_url') + + super(NovaClientWithSugar, self).__init__( + 'nova_client', partial(nova_client.Client, '2'), *args, **kw) + + def cosmo_list(self, obj_type_single, **kw): + """ Sugar for xxx.findall() - not using xxx.list() because findall + can receive filtering parameters, and it's common for all types""" + obj_type_plural = self._get_nova_field_name_for_type(obj_type_single) + for obj in getattr(self, obj_type_plural).findall(**kw): + yield obj + + def cosmo_delete_resource(self, obj_type_single, obj_id): + obj_type_plural = self._get_nova_field_name_for_type(obj_type_single) + getattr(self, obj_type_plural).delete(obj_id) + + def get_id_from_resource(self, resource): + return resource.id + + def get_name_from_resource(self, resource): + return resource.name + + def get_quota(self, obj_type_single): + raise RuntimeError( + 'Retrieving quotas from Nova service is currently unsupported ' + 'due to a bug in Nova python client') + + # we're already authenticated, but the following call will make + # 'service_catalog' available under 'client', through which we can + # extract the tenant_id (Note that self.client.tenant_id might be + # None if project_id (AKA tenant_name) was used instead; However the + # actual tenant_id must be used to retrieve the quotas) + self.client.authenticate() + tenant_id = self.client.service_catalog.get_tenant_id() + quotas = self.quotas.get(tenant_id) + return getattr(quotas, self.cosmo_plural(obj_type_single)) + + def _get_nova_field_name_for_type(self, obj_type_single): + from openstack_plugin_common.floatingip import \ + FLOATINGIP_OPENSTACK_TYPE + if obj_type_single == FLOATINGIP_OPENSTACK_TYPE: + # since we use the same 'openstack type' property value for both + # neutron and nova floating-ips, this adjustment must be made + # for nova client, as fields names differ between the two clients + obj_type_single = 'floating_ip' + return self.cosmo_plural(obj_type_single) + + +class NeutronClientWithSugar(OpenStackClient): + + def __init__(self, *args, **kw): + super(NeutronClientWithSugar, self).__init__( + 'neutron_client', neutron_client.Client, *args, **kw) + + def cosmo_list(self, obj_type_single, **kw): + """ Sugar for list_XXXs()['XXXs'] """ + obj_type_plural = self.cosmo_plural(obj_type_single) + for obj in getattr(self, 'list_' + obj_type_plural)(**kw)[ + obj_type_plural]: + yield obj + + def cosmo_delete_resource(self, obj_type_single, obj_id): + getattr(self, 'delete_' + obj_type_single)(obj_id) + + def get_id_from_resource(self, resource): + return resource['id'] + + def get_name_from_resource(self, resource): + return resource['name'] + + def get_quota(self, obj_type_single): + tenant_id = self.get_quotas_tenant()['tenant']['tenant_id'] + quotas = self.show_quota(tenant_id)['quota'] + return quotas[obj_type_single] + + def cosmo_list_prefixed(self, obj_type_single, name_prefix): + for obj in self.cosmo_list(obj_type_single): + if obj['name'].startswith(name_prefix): + yield obj + + def cosmo_delete_prefixed(self, name_prefix): + # Cleanup all neutron.list_XXX() objects with names starting + # with self.name_prefix + for obj_type_single in 'port', 'router', 'network', 'subnet',\ + 'security_group': + for obj in self.cosmo_list_prefixed(obj_type_single, name_prefix): + if obj_type_single == 'router': + ports = self.cosmo_list('port', device_id=obj['id']) + for port in ports: + try: + self.remove_interface_router( + port['device_id'], + {'port_id': port['id']}) + except neutron_exceptions.NeutronClientException: + pass + getattr(self, 'delete_' + obj_type_single)(obj['id']) + + def cosmo_find_external_net(self): + """ For tests of floating IP """ + nets = self.list_networks()['networks'] + ls = [net for net in nets if net.get('router:external')] + if len(ls) != 1: + raise NonRecoverableError( + "Expected exactly one external network but found {0}".format( + len(ls))) + return ls[0] + + +class CinderClientWithSugar(OpenStackClient): + + def __init__(self, *args, **kw): + super(CinderClientWithSugar, self).__init__( + 'cinder_client', partial(cinder_client.Client, '2'), *args, **kw) + + def cosmo_list(self, obj_type_single, **kw): + obj_type_plural = self.cosmo_plural(obj_type_single) + for obj in getattr(self, obj_type_plural).findall(**kw): + yield obj + + def cosmo_delete_resource(self, obj_type_single, obj_id): + obj_type_plural = self.cosmo_plural(obj_type_single) + getattr(self, obj_type_plural).delete(obj_id) + + def get_id_from_resource(self, resource): + return resource.id + + def get_name_from_resource(self, resource): + return resource.name + + def get_quota(self, obj_type_single): + # we're already authenticated, but the following call will make + # 'service_catalog' available under 'client', through which we can + # extract the tenant_id (Note that self.client.tenant_id might be + # None if project_id (AKA tenant_name) was used instead; However the + # actual tenant_id must be used to retrieve the quotas) + self.client.authenticate() + project_id = self.client.session.get_project_id() + quotas = self.quotas.get(project_id) + return getattr(quotas, self.cosmo_plural(obj_type_single)) + + +class KeystoneClientWithSugar(OpenStackClient): + # keystone does not have resource quota + KEYSTONE_INFINITE_RESOURCE_QUOTA = 10**9 + + def __init__(self, *args, **kw): + super(KeystoneClientWithSugar, self).__init__( + 'keystone_client', keystone_client.Client, *args, **kw) + + def cosmo_list(self, obj_type_single, **kw): + obj_type_plural = self.cosmo_plural(obj_type_single) + for obj in getattr(self, obj_type_plural).list(**kw): + yield obj + + def cosmo_delete_resource(self, obj_type_single, obj_id): + obj_type_plural = self.cosmo_plural(obj_type_single) + getattr(self, obj_type_plural).delete(obj_id) + + def get_id_from_resource(self, resource): + return resource.id + + def get_name_from_resource(self, resource): + return resource.name + + def get_quota(self, obj_type_single): + return self.KEYSTONE_INFINITE_RESOURCE_QUOTA + + +class GlanceClientWithSugar(OpenStackClient): + GLANCE_INIFINITE_RESOURCE_QUOTA = 10**9 + + def __init__(self, *args, **kw): + super(GlanceClientWithSugar, self).__init__( + 'glance_client', partial(glance_client.Client, '2'), *args, **kw) + + def cosmo_list(self, obj_type_single, **kw): + obj_type_plural = self.cosmo_plural(obj_type_single) + return getattr(self, obj_type_plural).list(filters=kw) + + def cosmo_delete_resource(self, obj_type_single, obj_id): + obj_type_plural = self.cosmo_plural(obj_type_single) + getattr(self, obj_type_plural).delete(obj_id) + + def get_id_from_resource(self, resource): + return resource.id + + def get_name_from_resource(self, resource): + return resource.name + + def get_quota(self, obj_type_single): + return self.GLANCE_INIFINITE_RESOURCE_QUOTA diff --git a/aria/multivim-plugin/openstack_plugin_common/floatingip.py b/aria/multivim-plugin/openstack_plugin_common/floatingip.py new file mode 100644 index 0000000000..fe5896520b --- /dev/null +++ b/aria/multivim-plugin/openstack_plugin_common/floatingip.py @@ -0,0 +1,84 @@ +######### +# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# * See the License for the specific language governing permissions and +# * limitations under the License. + +from cloudify import ctx +from openstack_plugin_common import ( + delete_resource_and_runtime_properties, + use_external_resource, + validate_resource, + COMMON_RUNTIME_PROPERTIES_KEYS, + OPENSTACK_ID_PROPERTY, + OPENSTACK_TYPE_PROPERTY) + + +FLOATINGIP_OPENSTACK_TYPE = 'floatingip' + +# Runtime properties +IP_ADDRESS_PROPERTY = 'floating_ip_address' # the actual ip address +RUNTIME_PROPERTIES_KEYS = COMMON_RUNTIME_PROPERTIES_KEYS + \ + [IP_ADDRESS_PROPERTY] + + +def use_external_floatingip(client, ip_field_name, ext_fip_ip_extractor): + external_fip = use_external_resource( + ctx, client, FLOATINGIP_OPENSTACK_TYPE, ip_field_name) + if external_fip: + ctx.instance.runtime_properties[IP_ADDRESS_PROPERTY] = \ + ext_fip_ip_extractor(external_fip) + return True + + return False + + +def set_floatingip_runtime_properties(fip_id, ip_address): + ctx.instance.runtime_properties[OPENSTACK_ID_PROPERTY] = fip_id + ctx.instance.runtime_properties[OPENSTACK_TYPE_PROPERTY] = \ + FLOATINGIP_OPENSTACK_TYPE + ctx.instance.runtime_properties[IP_ADDRESS_PROPERTY] = ip_address + + +def delete_floatingip(client, **kwargs): + delete_resource_and_runtime_properties(ctx, client, + RUNTIME_PROPERTIES_KEYS) + + +def floatingip_creation_validation(client, ip_field_name, **kwargs): + validate_resource(ctx, client, FLOATINGIP_OPENSTACK_TYPE, + ip_field_name) + + +def get_server_floating_ip(neutron_client, server_id): + + floating_ips = neutron_client.list_floatingips() + + floating_ips = floating_ips.get('floatingips') + if not floating_ips: + return None + + for floating_ip in floating_ips: + port_id = floating_ip.get('port_id') + if not port_id: + # this floating ip is not attached to any port + continue + + port = neutron_client.show_port(port_id)['port'] + device_id = port.get('device_id') + if not device_id: + # this port is not attached to any server + continue + + if server_id == device_id: + return floating_ip + return None diff --git a/aria/multivim-plugin/openstack_plugin_common/security_group.py b/aria/multivim-plugin/openstack_plugin_common/security_group.py new file mode 100644 index 0000000000..0fa21aa149 --- /dev/null +++ b/aria/multivim-plugin/openstack_plugin_common/security_group.py @@ -0,0 +1,148 @@ +######### +# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# * See the License for the specific language governing permissions and +# * limitations under the License. + +import copy +import re + +from cloudify import ctx +from cloudify.exceptions import NonRecoverableError + +from openstack_plugin_common import ( + get_resource_id, + use_external_resource, + delete_resource_and_runtime_properties, + validate_resource, + validate_ip_or_range_syntax, + OPENSTACK_ID_PROPERTY, + OPENSTACK_TYPE_PROPERTY, + OPENSTACK_NAME_PROPERTY, + COMMON_RUNTIME_PROPERTIES_KEYS +) + +SECURITY_GROUP_OPENSTACK_TYPE = 'security_group' + +# Runtime properties +RUNTIME_PROPERTIES_KEYS = COMMON_RUNTIME_PROPERTIES_KEYS + +NODE_NAME_RE = re.compile('^(.*)_.*$') # Anything before last underscore + + +def build_sg_data(args=None): + security_group = { + 'description': None, + 'name': get_resource_id(ctx, SECURITY_GROUP_OPENSTACK_TYPE), + } + + args = args or {} + security_group.update(ctx.node.properties['security_group'], **args) + + return security_group + + +def process_rules(client, sgr_default_values, cidr_field_name, + remote_group_field_name, min_port_field_name, + max_port_field_name): + rules_to_apply = ctx.node.properties['rules'] + security_group_rules = [] + for rule in rules_to_apply: + security_group_rules.append( + _process_rule(rule, client, sgr_default_values, cidr_field_name, + remote_group_field_name, min_port_field_name, + max_port_field_name)) + + return security_group_rules + + +def use_external_sg(client): + return use_external_resource(ctx, client, + SECURITY_GROUP_OPENSTACK_TYPE) + + +def set_sg_runtime_properties(sg, client): + ctx.instance.runtime_properties[OPENSTACK_ID_PROPERTY] =\ + client.get_id_from_resource(sg) + ctx.instance.runtime_properties[OPENSTACK_TYPE_PROPERTY] =\ + SECURITY_GROUP_OPENSTACK_TYPE + ctx.instance.runtime_properties[OPENSTACK_NAME_PROPERTY] = \ + client.get_name_from_resource(sg) + + +def delete_sg(client, **kwargs): + delete_resource_and_runtime_properties(ctx, client, + RUNTIME_PROPERTIES_KEYS) + + +def sg_creation_validation(client, cidr_field_name, **kwargs): + validate_resource(ctx, client, SECURITY_GROUP_OPENSTACK_TYPE) + + ctx.logger.debug('validating CIDR for rules with a {0} field'.format( + cidr_field_name)) + for rule in ctx.node.properties['rules']: + if cidr_field_name in rule: + validate_ip_or_range_syntax(ctx, rule[cidr_field_name]) + + +def _process_rule(rule, client, sgr_default_values, cidr_field_name, + remote_group_field_name, min_port_field_name, + max_port_field_name): + ctx.logger.debug( + "Security group rule before transformations: {0}".format(rule)) + + sgr = copy.deepcopy(sgr_default_values) + if 'port' in rule: + rule[min_port_field_name] = rule['port'] + rule[max_port_field_name] = rule['port'] + del rule['port'] + sgr.update(rule) + + if (remote_group_field_name in sgr) and sgr[remote_group_field_name]: + sgr[cidr_field_name] = None + elif ('remote_group_node' in sgr) and sgr['remote_group_node']: + _, remote_group_node = _capabilities_of_node_named( + sgr['remote_group_node']) + sgr[remote_group_field_name] = remote_group_node[OPENSTACK_ID_PROPERTY] + del sgr['remote_group_node'] + sgr[cidr_field_name] = None + elif ('remote_group_name' in sgr) and sgr['remote_group_name']: + sgr[remote_group_field_name] = \ + client.get_id_from_resource( + client.cosmo_get_named( + SECURITY_GROUP_OPENSTACK_TYPE, sgr['remote_group_name'])) + del sgr['remote_group_name'] + sgr[cidr_field_name] = None + + ctx.logger.debug( + "Security group rule after transformations: {0}".format(sgr)) + return sgr + + +def _capabilities_of_node_named(node_name): + result = None + caps = ctx.capabilities.get_all() + for node_id in caps: + match = NODE_NAME_RE.match(node_id) + if match: + candidate_node_name = match.group(1) + if candidate_node_name == node_name: + if result: + raise NonRecoverableError( + "More than one node named '{0}' " + "in capabilities".format(node_name)) + result = (node_id, caps[node_id]) + if not result: + raise NonRecoverableError( + "Could not find node named '{0}' " + "in capabilities".format(node_name)) + return result diff --git a/aria/multivim-plugin/openstack_plugin_common/tests/__init__.py b/aria/multivim-plugin/openstack_plugin_common/tests/__init__.py new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/aria/multivim-plugin/openstack_plugin_common/tests/__init__.py diff --git a/aria/multivim-plugin/openstack_plugin_common/tests/openstack_client_tests.py b/aria/multivim-plugin/openstack_plugin_common/tests/openstack_client_tests.py new file mode 100644 index 0000000000..27d443c2e4 --- /dev/null +++ b/aria/multivim-plugin/openstack_plugin_common/tests/openstack_client_tests.py @@ -0,0 +1,849 @@ +######## +# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# * See the License for the specific language governing permissions and +# * limitations under the License. + +import os +import unittest +import tempfile +import json +import __builtin__ as builtins + +import mock +from cloudify.exceptions import NonRecoverableError + +from cloudify.mocks import MockCloudifyContext +import openstack_plugin_common as common + + +class ConfigTests(unittest.TestCase): + + @mock.patch.dict('os.environ', clear=True) + def test__build_config_from_env_variables_empty(self): + cfg = common.Config._build_config_from_env_variables() + self.assertEqual({}, cfg) + + @mock.patch.dict('os.environ', clear=True, + OS_AUTH_URL='test_url') + def test__build_config_from_env_variables_single(self): + cfg = common.Config._build_config_from_env_variables() + self.assertEqual({'auth_url': 'test_url'}, cfg) + + @mock.patch.dict('os.environ', clear=True, + OS_AUTH_URL='test_url', + OS_PASSWORD='pass', + OS_REGION_NAME='region') + def test__build_config_from_env_variables_multiple(self): + cfg = common.Config._build_config_from_env_variables() + self.assertEqual({ + 'auth_url': 'test_url', + 'password': 'pass', + 'region_name': 'region', + }, cfg) + + @mock.patch.dict('os.environ', clear=True, + OS_INVALID='invalid', + PASSWORD='pass', + os_region_name='region') + def test__build_config_from_env_variables_all_ignored(self): + cfg = common.Config._build_config_from_env_variables() + self.assertEqual({}, cfg) + + @mock.patch.dict('os.environ', clear=True, + OS_AUTH_URL='test_url', + OS_PASSWORD='pass', + OS_REGION_NAME='region', + OS_INVALID='invalid', + PASSWORD='pass', + os_region_name='region') + def test__build_config_from_env_variables_extract_valid(self): + cfg = common.Config._build_config_from_env_variables() + self.assertEqual({ + 'auth_url': 'test_url', + 'password': 'pass', + 'region_name': 'region', + }, cfg) + + def test_update_config_empty_target(self): + target = {} + override = {'k1': 'u1'} + result = override.copy() + + common.Config.update_config(target, override) + self.assertEqual(result, target) + + def test_update_config_empty_override(self): + target = {'k1': 'v1'} + override = {} + result = target.copy() + + common.Config.update_config(target, override) + self.assertEqual(result, target) + + def test_update_config_disjoint_configs(self): + target = {'k1': 'v1'} + override = {'k2': 'u2'} + result = target.copy() + result.update(override) + + common.Config.update_config(target, override) + self.assertEqual(result, target) + + def test_update_config_do_not_remove_empty_from_target(self): + target = {'k1': ''} + override = {} + result = target.copy() + + common.Config.update_config(target, override) + self.assertEqual(result, target) + + def test_update_config_no_empty_in_override(self): + target = {'k1': 'v1', 'k2': 'v2'} + override = {'k1': 'u2'} + result = target.copy() + result.update(override) + + common.Config.update_config(target, override) + self.assertEqual(result, target) + + def test_update_config_all_empty_in_override(self): + target = {'k1': '', 'k2': 'v2'} + override = {'k1': '', 'k3': ''} + result = target.copy() + + common.Config.update_config(target, override) + self.assertEqual(result, target) + + def test_update_config_misc(self): + target = {'k1': 'v1', 'k2': 'v2'} + override = {'k1': '', 'k2': 'u2', 'k3': '', 'k4': 'u4'} + result = {'k1': 'v1', 'k2': 'u2', 'k4': 'u4'} + + common.Config.update_config(target, override) + self.assertEqual(result, target) + + @mock.patch.object(common.Config, 'update_config') + @mock.patch.object(common.Config, '_build_config_from_env_variables', + return_value={}) + @mock.patch.dict('os.environ', clear=True, + values={common.Config.OPENSTACK_CONFIG_PATH_ENV_VAR: + '/this/should/not/exist.json'}) + def test_get_missing_static_config_missing_file(self, from_env, update): + cfg = common.Config.get() + self.assertEqual({}, cfg) + from_env.assert_called_once_with() + update.assert_not_called() + + @mock.patch.object(common.Config, 'update_config') + @mock.patch.object(common.Config, '_build_config_from_env_variables', + return_value={}) + def test_get_empty_static_config_present_file(self, from_env, update): + file_cfg = {'k1': 'v1', 'k2': 'v2'} + env_var = common.Config.OPENSTACK_CONFIG_PATH_ENV_VAR + file = tempfile.NamedTemporaryFile(delete=False) + json.dump(file_cfg, file) + file.close() + + with mock.patch.dict('os.environ', {env_var: file.name}, clear=True): + common.Config.get() + + os.unlink(file.name) + from_env.assert_called_once_with() + update.assert_called_once_with({}, file_cfg) + + @mock.patch.object(common.Config, 'update_config') + @mock.patch.object(common.Config, '_build_config_from_env_variables', + return_value={'k1': 'v1'}) + def test_get_present_static_config_empty_file(self, from_env, update): + file_cfg = {} + env_var = common.Config.OPENSTACK_CONFIG_PATH_ENV_VAR + file = tempfile.NamedTemporaryFile(delete=False) + json.dump(file_cfg, file) + file.close() + + with mock.patch.dict('os.environ', {env_var: file.name}, clear=True): + common.Config.get() + + os.unlink(file.name) + from_env.assert_called_once_with() + update.assert_called_once_with({'k1': 'v1'}, file_cfg) + + @mock.patch.object(common.Config, 'update_config') + @mock.patch.object(common.Config, '_build_config_from_env_variables', + return_value={'k1': 'v1'}) + @mock.patch.dict('os.environ', clear=True, + values={common.Config.OPENSTACK_CONFIG_PATH_ENV_VAR: + '/this/should/not/exist.json'}) + def test_get_present_static_config_missing_file(self, from_env, update): + cfg = common.Config.get() + self.assertEqual({'k1': 'v1'}, cfg) + from_env.assert_called_once_with() + update.assert_not_called() + + @mock.patch.object(common.Config, 'update_config') + @mock.patch.object(common.Config, '_build_config_from_env_variables', + return_value={'k1': 'v1'}) + def test_get_all_present(self, from_env, update): + file_cfg = {'k2': 'u2'} + env_var = common.Config.OPENSTACK_CONFIG_PATH_ENV_VAR + file = tempfile.NamedTemporaryFile(delete=False) + json.dump(file_cfg, file) + file.close() + + with mock.patch.dict('os.environ', {env_var: file.name}, clear=True): + common.Config.get() + + os.unlink(file.name) + from_env.assert_called_once_with() + update.assert_called_once_with({'k1': 'v1'}, file_cfg) + + +class OpenstackClientTests(unittest.TestCase): + + def test__merge_custom_configuration_no_custom_cfg(self): + cfg = {'k1': 'v1'} + new = common.OpenStackClient._merge_custom_configuration(cfg, "dummy") + self.assertEqual(cfg, new) + + def test__merge_custom_configuration_client_present(self): + cfg = { + 'k1': 'v1', + 'k2': 'v2', + 'custom_configuration': { + 'dummy': { + 'k2': 'u2', + 'k3': 'u3' + } + } + } + result = { + 'k1': 'v1', + 'k2': 'u2', + 'k3': 'u3' + } + bak = cfg.copy() + new = common.OpenStackClient._merge_custom_configuration(cfg, "dummy") + self.assertEqual(result, new) + self.assertEqual(cfg, bak) + + def test__merge_custom_configuration_client_missing(self): + cfg = { + 'k1': 'v1', + 'k2': 'v2', + 'custom_configuration': { + 'dummy': { + 'k2': 'u2', + 'k3': 'u3' + } + } + } + result = { + 'k1': 'v1', + 'k2': 'v2' + } + bak = cfg.copy() + new = common.OpenStackClient._merge_custom_configuration(cfg, "baddy") + self.assertEqual(result, new) + self.assertEqual(cfg, bak) + + def test__merge_custom_configuration_multi_client(self): + cfg = { + 'k1': 'v1', + 'k2': 'v2', + 'custom_configuration': { + 'dummy': { + 'k2': 'u2', + 'k3': 'u3' + }, + 'bummy': { + 'k1': 'z1' + } + } + } + result = { + 'k1': 'z1', + 'k2': 'v2', + } + bak = cfg.copy() + new = common.OpenStackClient._merge_custom_configuration(cfg, "bummy") + self.assertEqual(result, new) + self.assertEqual(cfg, bak) + + @mock.patch.object(common, 'ctx') + def test__merge_custom_configuration_nova_url(self, mock_ctx): + cfg = { + 'nova_url': 'gopher://nova', + } + bak = cfg.copy() + + self.assertEqual( + common.OpenStackClient._merge_custom_configuration( + cfg, 'nova_client'), + {'endpoint_override': 'gopher://nova'}, + ) + self.assertEqual( + common.OpenStackClient._merge_custom_configuration( + cfg, 'dummy'), + {}, + ) + self.assertEqual(cfg, bak) + mock_ctx.logger.warn.assert_has_calls([ + mock.call( + "'nova_url' property is deprecated. Use `custom_configuration." + "nova_client.endpoint_override` instead."), + mock.call( + "'nova_url' property is deprecated. Use `custom_configuration." + "nova_client.endpoint_override` instead."), + ]) + + @mock.patch('keystoneauth1.session.Session') + def test___init___multi_region(self, m_session): + mock_client_class = mock.MagicMock() + + cfg = { + 'auth_url': 'test-auth_url/v3', + 'region': 'test-region', + } + + with mock.patch.object( + builtins, 'open', + mock.mock_open( + read_data=""" + { + "region": "region from file", + "other": "this one should get through" + } + """ + ), + create=True, + ): + common.OpenStackClient('fred', mock_client_class, cfg) + + mock_client_class.assert_called_once_with( + region_name='test-region', + other='this one should get through', + session=m_session.return_value, + ) + + def test__validate_auth_params_missing(self): + with self.assertRaises(NonRecoverableError): + common.OpenStackClient._validate_auth_params({}) + + def test__validate_auth_params_too_much(self): + with self.assertRaises(NonRecoverableError): + common.OpenStackClient._validate_auth_params({ + 'auth_url': 'url', + 'password': 'pass', + 'username': 'user', + 'tenant_name': 'tenant', + 'project_id': 'project_test', + }) + + def test__validate_auth_params_v2(self): + common.OpenStackClient._validate_auth_params({ + 'auth_url': 'url', + 'password': 'pass', + 'username': 'user', + 'tenant_name': 'tenant', + }) + + def test__validate_auth_params_v3(self): + common.OpenStackClient._validate_auth_params({ + 'auth_url': 'url', + 'password': 'pass', + 'username': 'user', + 'project_id': 'project_test', + 'user_domain_name': 'user_domain', + }) + + def test__validate_auth_params_v3_mod(self): + common.OpenStackClient._validate_auth_params({ + 'auth_url': 'url', + 'password': 'pass', + 'username': 'user', + 'user_domain_name': 'user_domain', + 'project_name': 'project_test_name', + 'project_domain_name': 'project_domain', + }) + + def test__validate_auth_params_skip_insecure(self): + common.OpenStackClient._validate_auth_params({ + 'auth_url': 'url', + 'password': 'pass', + 'username': 'user', + 'user_domain_name': 'user_domain', + 'project_name': 'project_test_name', + 'project_domain_name': 'project_domain', + 'insecure': True + }) + + def test__split_config(self): + auth = {'auth_url': 'url', 'password': 'pass'} + misc = {'misc1': 'val1', 'misc2': 'val2'} + all = dict(auth) + all.update(misc) + + a, m = common.OpenStackClient._split_config(all) + + self.assertEqual(auth, a) + self.assertEqual(misc, m) + + @mock.patch.object(common, 'loading') + @mock.patch.object(common, 'session') + def test__authenticate_secure(self, mock_session, mock_loading): + auth_params = {'k1': 'v1'} + common.OpenStackClient._authenticate(auth_params) + loader = mock_loading.get_plugin_loader.return_value + loader.load_from_options.assert_called_once_with(k1='v1') + auth = loader.load_from_options.return_value + mock_session.Session.assert_called_once_with(auth=auth, verify=True) + + @mock.patch.object(common, 'loading') + @mock.patch.object(common, 'session') + def test__authenticate_secure_explicit(self, mock_session, mock_loading): + auth_params = {'k1': 'v1', 'insecure': False} + common.OpenStackClient._authenticate(auth_params) + loader = mock_loading.get_plugin_loader.return_value + loader.load_from_options.assert_called_once_with(k1='v1') + auth = loader.load_from_options.return_value + mock_session.Session.assert_called_once_with(auth=auth, verify=True) + + @mock.patch.object(common, 'loading') + @mock.patch.object(common, 'session') + def test__authenticate_insecure(self, mock_session, mock_loading): + auth_params = {'k1': 'v1', 'insecure': True} + common.OpenStackClient._authenticate(auth_params) + loader = mock_loading.get_plugin_loader.return_value + loader.load_from_options.assert_called_once_with(k1='v1') + auth = loader.load_from_options.return_value + mock_session.Session.assert_called_once_with(auth=auth, verify=False) + + @mock.patch.object(common, 'loading') + @mock.patch.object(common, 'session') + def test__authenticate_secure_misc(self, mock_session, mock_loading): + params = {'k1': 'v1'} + tests = ('', 'a', [], {}, set(), 4, 0, -1, 3.14, 0.0, None) + for test in tests: + auth_params = params.copy() + auth_params['insecure'] = test + + common.OpenStackClient._authenticate(auth_params) + loader = mock_loading.get_plugin_loader.return_value + loader.load_from_options.assert_called_with(**params) + auth = loader.load_from_options.return_value + mock_session.Session.assert_called_with(auth=auth, verify=True) + + @mock.patch.object(common, 'cinder_client') + def test_cinder_client_get_name_from_resource(self, cc_mock): + ccws = common.CinderClientWithSugar() + mock_volume = mock.Mock() + + self.assertIs( + mock_volume.name, + ccws.get_name_from_resource(mock_volume)) + + +class ClientsConfigTest(unittest.TestCase): + + def setUp(self): + file = tempfile.NamedTemporaryFile(delete=False) + json.dump(self.get_file_cfg(), file) + file.close() + self.addCleanup(os.unlink, file.name) + + env_cfg = self.get_env_cfg() + env_cfg[common.Config.OPENSTACK_CONFIG_PATH_ENV_VAR] = file.name + mock.patch.dict('os.environ', env_cfg, clear=True).start() + + self.loading = mock.patch.object(common, 'loading').start() + self.session = mock.patch.object(common, 'session').start() + self.nova = mock.patch.object(common, 'nova_client').start() + self.neutron = mock.patch.object(common, 'neutron_client').start() + self.cinder = mock.patch.object(common, 'cinder_client').start() + self.addCleanup(mock.patch.stopall) + + self.loader = self.loading.get_plugin_loader.return_value + self.auth = self.loader.load_from_options.return_value + + +class CustomConfigFromInputs(ClientsConfigTest): + + def get_file_cfg(self): + return { + 'username': 'file-username', + 'password': 'file-password', + 'tenant_name': 'file-tenant-name', + 'custom_configuration': { + 'nova_client': { + 'username': 'custom-username', + 'password': 'custom-password', + 'tenant_name': 'custom-tenant-name' + }, + } + } + + def get_inputs_cfg(self): + return { + 'auth_url': 'envar-auth-url', + 'username': 'inputs-username', + 'custom_configuration': { + 'neutron_client': { + 'password': 'inputs-custom-password' + }, + 'cinder_client': { + 'password': 'inputs-custom-password', + 'auth_url': 'inputs-custom-auth-url', + 'extra_key': 'extra-value' + }, + } + } + + def get_env_cfg(self): + return { + 'OS_USERNAME': 'envar-username', + 'OS_PASSWORD': 'envar-password', + 'OS_TENANT_NAME': 'envar-tenant-name', + 'OS_AUTH_URL': 'envar-auth-url', + common.Config.OPENSTACK_CONFIG_PATH_ENV_VAR: file.name + } + + def test_nova(self): + common.NovaClientWithSugar(config=self.get_inputs_cfg()) + self.loader.load_from_options.assert_called_once_with( + username='inputs-username', + password='file-password', + tenant_name='file-tenant-name', + auth_url='envar-auth-url' + ) + self.session.Session.assert_called_with(auth=self.auth, verify=True) + self.nova.Client.assert_called_once_with( + '2', session=self.session.Session.return_value) + + def test_neutron(self): + common.NeutronClientWithSugar(config=self.get_inputs_cfg()) + self.loader.load_from_options.assert_called_once_with( + username='inputs-username', + password='inputs-custom-password', + tenant_name='file-tenant-name', + auth_url='envar-auth-url' + ) + self.session.Session.assert_called_with(auth=self.auth, verify=True) + self.neutron.Client.assert_called_once_with( + session=self.session.Session.return_value) + + def test_cinder(self): + common.CinderClientWithSugar(config=self.get_inputs_cfg()) + self.loader.load_from_options.assert_called_once_with( + username='inputs-username', + password='inputs-custom-password', + tenant_name='file-tenant-name', + auth_url='inputs-custom-auth-url' + ) + self.session.Session.assert_called_with(auth=self.auth, verify=True) + self.cinder.Client.assert_called_once_with( + '2', session=self.session.Session.return_value, + extra_key='extra-value') + + +class CustomConfigFromFile(ClientsConfigTest): + + def get_file_cfg(self): + return { + 'username': 'file-username', + 'password': 'file-password', + 'tenant_name': 'file-tenant-name', + 'custom_configuration': { + 'nova_client': { + 'username': 'custom-username', + 'password': 'custom-password', + 'tenant_name': 'custom-tenant-name' + }, + } + } + + def get_inputs_cfg(self): + return { + 'auth_url': 'envar-auth-url', + 'username': 'inputs-username', + } + + def get_env_cfg(self): + return { + 'OS_USERNAME': 'envar-username', + 'OS_PASSWORD': 'envar-password', + 'OS_TENANT_NAME': 'envar-tenant-name', + 'OS_AUTH_URL': 'envar-auth-url', + common.Config.OPENSTACK_CONFIG_PATH_ENV_VAR: file.name + } + + def test_nova(self): + common.NovaClientWithSugar(config=self.get_inputs_cfg()) + self.loader.load_from_options.assert_called_once_with( + username='custom-username', + password='custom-password', + tenant_name='custom-tenant-name', + auth_url='envar-auth-url' + ) + self.session.Session.assert_called_with(auth=self.auth, verify=True) + self.nova.Client.assert_called_once_with( + '2', session=self.session.Session.return_value) + + def test_neutron(self): + common.NeutronClientWithSugar(config=self.get_inputs_cfg()) + self.loader.load_from_options.assert_called_once_with( + username='inputs-username', + password='file-password', + tenant_name='file-tenant-name', + auth_url='envar-auth-url' + ) + self.session.Session.assert_called_with(auth=self.auth, verify=True) + self.neutron.Client.assert_called_once_with( + session=self.session.Session.return_value) + + def test_cinder(self): + common.CinderClientWithSugar(config=self.get_inputs_cfg()) + self.loader.load_from_options.assert_called_once_with( + username='inputs-username', + password='file-password', + tenant_name='file-tenant-name', + auth_url='envar-auth-url' + ) + self.session.Session.assert_called_with(auth=self.auth, verify=True) + self.cinder.Client.assert_called_once_with( + '2', session=self.session.Session.return_value) + + +class PutClientInKwTests(unittest.TestCase): + + def test_override_prop_empty_ctx(self): + props = {} + ctx = MockCloudifyContext(node_id='a20846', properties=props) + kwargs = { + 'ctx': ctx, + 'openstack_config': { + 'p1': 'v1' + } + } + expected_cfg = kwargs['openstack_config'] + + client_class = mock.MagicMock() + common._put_client_in_kw('mock_client', client_class, kwargs) + client_class.assert_called_once_with(config=expected_cfg) + + def test_override_prop_nonempty_ctx(self): + props = { + 'openstack_config': { + 'p1': 'u1', + 'p2': 'u2' + } + } + props_copy = props.copy() + ctx = MockCloudifyContext(node_id='a20846', properties=props) + kwargs = { + 'ctx': ctx, + 'openstack_config': { + 'p1': 'v1', + 'p3': 'v3' + } + } + expected_cfg = { + 'p1': 'v1', + 'p2': 'u2', + 'p3': 'v3' + } + + client_class = mock.MagicMock() + common._put_client_in_kw('mock_client', client_class, kwargs) + client_class.assert_called_once_with(config=expected_cfg) + # Making sure that _put_client_in_kw will not modify + # 'openstack_config' property of a node. + self.assertEqual(props_copy, ctx.node.properties) + + def test_override_runtime_prop(self): + props = { + 'openstack_config': { + 'p1': 'u1', + 'p2': 'u2' + } + } + runtime_props = { + 'openstack_config': { + 'p1': 'u3' + } + } + props_copy = props.copy() + runtime_props_copy = runtime_props.copy() + ctx = MockCloudifyContext(node_id='a20847', properties=props, + runtime_properties=runtime_props) + kwargs = { + 'ctx': ctx + } + expected_cfg = { + 'p1': 'u3', + 'p2': 'u2' + } + client_class = mock.MagicMock() + common._put_client_in_kw('mock_client', client_class, kwargs) + client_class.assert_called_once_with(config=expected_cfg) + self.assertEqual(props_copy, ctx.node.properties) + self.assertEqual(runtime_props_copy, ctx.instance.runtime_properties) + + +class ResourceQuotaTests(unittest.TestCase): + + def _test_quota_validation(self, amount, quota, failure_expected): + ctx = MockCloudifyContext(node_id='node_id', properties={}) + client = mock.MagicMock() + + def mock_cosmo_list(_): + return [x for x in range(0, amount)] + client.cosmo_list = mock_cosmo_list + + def mock_get_quota(_): + return quota + client.get_quota = mock_get_quota + + if failure_expected: + self.assertRaisesRegexp( + NonRecoverableError, + 'cannot be created due to quota limitations', + common.validate_resource, + ctx=ctx, sugared_client=client, + openstack_type='openstack_type') + else: + common.validate_resource( + ctx=ctx, sugared_client=client, + openstack_type='openstack_type') + + def test_equals_quotas(self): + self._test_quota_validation(3, 3, True) + + def test_exceeded_quota(self): + self._test_quota_validation(5, 3, True) + + def test_infinite_quota(self): + self._test_quota_validation(5, -1, False) + + +class UseExternalResourceTests(unittest.TestCase): + + def _test_use_external_resource(self, + is_external, + create_if_missing, + exists): + properties = {'create_if_missing': create_if_missing, + 'use_external_resource': is_external, + 'resource_id': 'resource_id'} + client_mock = mock.MagicMock() + os_type = 'test' + + def _raise_error(*_): + raise NonRecoverableError('Error') + + def _return_something(*_): + return mock.MagicMock() + + return_value = _return_something if exists else _raise_error + if exists: + properties.update({'resource_id': 'rid'}) + + node_context = MockCloudifyContext(node_id='a20847', + properties=properties) + with mock.patch( + 'openstack_plugin_common._get_resource_by_name_or_id_from_ctx', + new=return_value): + return common.use_external_resource(node_context, + client_mock, os_type) + + def test_use_existing_resource(self): + self.assertIsNotNone(self._test_use_external_resource(True, True, + True)) + self.assertIsNotNone(self._test_use_external_resource(True, False, + True)) + + def test_create_resource(self): + self.assertIsNone(self._test_use_external_resource(False, True, False)) + self.assertIsNone(self._test_use_external_resource(False, False, + False)) + self.assertIsNone(self._test_use_external_resource(True, True, False)) + + def test_raise_error(self): + # If exists and shouldn't it is checked in resource + # validation so below scenario is not tested here + self.assertRaises(NonRecoverableError, + self._test_use_external_resource, + is_external=True, + create_if_missing=False, + exists=False) + + +class ValidateResourceTests(unittest.TestCase): + + def _test_validate_resource(self, + is_external, + create_if_missing, + exists, + client_mock_provided=None): + properties = {'create_if_missing': create_if_missing, + 'use_external_resource': is_external, + 'resource_id': 'resource_id'} + client_mock = client_mock_provided or mock.MagicMock() + os_type = 'test' + + def _raise_error(*_): + raise NonRecoverableError('Error') + + def _return_something(*_): + return mock.MagicMock() + return_value = _return_something if exists else _raise_error + if exists: + properties.update({'resource_id': 'rid'}) + + node_context = MockCloudifyContext(node_id='a20847', + properties=properties) + with mock.patch( + 'openstack_plugin_common._get_resource_by_name_or_id_from_ctx', + new=return_value): + return common.validate_resource(node_context, client_mock, os_type) + + def test_use_existing_resource(self): + self._test_validate_resource(True, True, True) + self._test_validate_resource(True, False, True) + + def test_create_resource(self): + client_mock = mock.MagicMock() + client_mock.cosmo_list.return_value = ['a', 'b', 'c'] + client_mock.get_quota.return_value = 5 + self._test_validate_resource(False, True, False, client_mock) + self._test_validate_resource(False, False, False, client_mock) + self._test_validate_resource(True, True, False, client_mock) + + def test_raise_error(self): + # If exists and shouldn't it is checked in resource + # validation so below scenario is not tested here + self.assertRaises(NonRecoverableError, + self._test_validate_resource, + is_external=True, + create_if_missing=False, + exists=False) + + def test_raise_quota_error(self): + client_mock = mock.MagicMock() + client_mock.cosmo_list.return_value = ['a', 'b', 'c'] + client_mock.get_quota.return_value = 3 + self.assertRaises(NonRecoverableError, + self._test_validate_resource, + is_external=True, + create_if_missing=True, + exists=False, + client_mock_provided=client_mock) diff --git a/aria/multivim-plugin/openstack_plugin_common/tests/provider-context.json b/aria/multivim-plugin/openstack_plugin_common/tests/provider-context.json new file mode 100644 index 0000000000..f7e20e4ef5 --- /dev/null +++ b/aria/multivim-plugin/openstack_plugin_common/tests/provider-context.json @@ -0,0 +1,78 @@ +{ + "context": { + "resources": { + "management_keypair": { + "name": "p2_cloudify-manager-kp-ilya", + "id": "p2_cloudify-manager-kp-ilya", + "type": "keypair", + "external_resource": true + }, + "router": { + "name": "p2_cloudify-router", + "id": "856f9fb8-6676-4b99-b64d-b76874b30abf", + "type": "router", + "external_resource": true + }, + "subnet": { + "name": "p2_cloudify-admin-network-subnet", + "id": "dd193491-d728-4e3e-8199-27eec0ba18e4", + "type": "subnet", + "external_resource": true + }, + "int_network": { + "name": "p2_cloudify-admin-network", + "id": "27ef2770-5219-4bb1-81d4-14ed450c5181", + "type": "network", + "external_resource": true + }, + "management_server": { + "name": "p2_cfy-mgr-ilya-2014-06-01-11:59", + "id": "be9991da-9c34-4f7c-9c33-5e04ad2d5b3e", + "type": "server", + "external_resource": false + }, + "agents_security_group": { + "name": "p2_cloudify-sg-agents", + "id": "d52280aa-0e79-4697-bd08-baf3f84e2a10", + "type": "neutron security group", + "external_resource": true + }, + "agents_keypair": { + "name": "p2_cloudify-agents-kp-ilya", + "id": "p2_cloudify-agents-kp-ilya", + "type": "keypair", + "external_resource": true + }, + "management_security_group": { + "name": "p2_cloudify-sg-management", + "id": "5862e0d2-8f28-472e-936b-d2da9cb935b3", + "type": "neutron security group", + "external_resource": true + }, + "floating_ip": { + "external_resource": true, + "id": "None", + "type": "floating ip", + "ip": "CENSORED" + }, + "ext_network": { + "name": "Ext-Net", + "id": "7da74520-9d5e-427b-a508-213c84e69616", + "type": "network", + "external_resource": true + } + }, + "cloudify": { + "resources_prefix": "p2_", + "cloudify_agent": { + "user": "ubuntu", + "agent_key_path": "/PATH/CENSORED/p2_cloudify-agents-kp-ilya.pem", + "min_workers": 2, + "max_workers": 5, + "remote_execution_port": 22 + } + } + }, + "name": "cloudify_openstack" +} + diff --git a/aria/multivim-plugin/openstack_plugin_common/tests/test.py b/aria/multivim-plugin/openstack_plugin_common/tests/test.py new file mode 100644 index 0000000000..13099292ca --- /dev/null +++ b/aria/multivim-plugin/openstack_plugin_common/tests/test.py @@ -0,0 +1,40 @@ +import json +import os + +from cloudify.context import BootstrapContext + +from cloudify.mocks import MockCloudifyContext + + +RETRY_AFTER = 1 +# Time during which no retry could possibly happen. +NO_POSSIBLE_RETRY_TIME = RETRY_AFTER / 2.0 + +BOOTSTRAP_CONTEXTS_WITHOUT_PREFIX = ( + { + }, + { + 'resources_prefix': '' + }, + { + 'resources_prefix': None + }, +) + + +def set_mock_provider_context(ctx, provider_context): + + def mock_provider_context(provider_name_unused): + return provider_context + + ctx.get_provider_context = mock_provider_context + + +def create_mock_ctx_with_provider_info(*args, **kw): + cur_dir = os.path.dirname(os.path.realpath(__file__)) + full_file_name = os.path.join(cur_dir, 'provider-context.json') + with open(full_file_name) as f: + provider_context = json.loads(f.read())['context'] + kw['provider_context'] = provider_context + kw['bootstrap_context'] = BootstrapContext(provider_context['cloudify']) + return MockCloudifyContext(*args, **kw) diff --git a/aria/multivim-plugin/plugin.yaml b/aria/multivim-plugin/plugin.yaml new file mode 100644 index 0000000000..9c1c2ca2ba --- /dev/null +++ b/aria/multivim-plugin/plugin.yaml @@ -0,0 +1,954 @@ +################################################################################## +# Multi-vim built in types and plugins definitions. +################################################################################## + +plugins: + multivim: + executor: central_deployment_agent + #source: https://github.com/cloudify-cosmo/onap-multivim-plugin/archive/2.2.0.zip + source: + package_name: onap-multivim-plugin + package_version: '2.2.0' + +node_types: + onap.multivim.nodes.Server: + derived_from: cloudify.nodes.Compute + properties: + server: + default: {} + description: > + key-value server configuration as described in OpenStack compute create server API. (DEPRECATED - Use the args input in create operation instead) + use_external_resource: + type: boolean + default: false + description: > + a boolean for setting whether to create the resource or use an existing one. + See the using existing resources section. + create_if_missing: + default: false + description: > + If use_external_resource is ``true`` and the resource is missing, + create it instead of failing. + resource_id: + default: '' + description: > + name to give to the new resource or the name or ID of an existing resource when the ``use_external_resource`` property is set to ``true`` (see the using existing resources section). Defaults to '' (empty string). + image: + default: '' + description: > + The image for the server. + May receive either the ID or the name of the image. + note: This property is currently optional for backwards compatibility, + but will be modified to become a required property in future versions + (Default: ''). + flavor: + default: '' + description: > + The flavor for the server. + May receive either the ID or the name of the flavor. + note: This property is currently optional for backwards compatibility, + but will be modified to become a required property in future versions + (Default: ''). + management_network_name: + default: '' + description: > + Cloudify's management network name. + Every server should be connected to the management network. + If the management network's name information is available in the Provider Context, + this connection is made automatically and there's no need to override this property + (See the Misc section for more information on the Openstack Provider Context). + Otherwise, it is required to set the value of this property to the management network name as it was set in the bootstrap process. + Note: When using Nova-net Openstack (see the Nova-net Support section), + don't set this property. Defaults to '' (empty string). + use_password: + default: false + description: > + A boolean describing whether this server image supports user-password authentication. + Images that do should post the administrator user's password to the Openstack metadata service (e.g. via cloudbase); + The password would then be retrieved by the plugin, + decrypted using the server's keypair and then saved in the server's runtime properties. + multivim_config: + default: {} + description: > + see Openstack Configuraion + interfaces: + cloudify.interfaces.lifecycle: + create: + implementation: openstack.nova_plugin.server.create + inputs: + args: + default: {} + multivim_config: + default: {} + start: + implementation: openstack.nova_plugin.server.start + inputs: + start_retry_interval: + description: Polling interval until the server is active in seconds + type: integer + default: 30 + private_key_path: + description: > + Path to private key which matches the server's + public key. Will be used to decrypt password in case + the "use_password" property is set to "true" + type: string + default: '' + multivim_config: + default: {} + stop: + implementation: openstack.nova_plugin.server.stop + inputs: + multivim_config: + default: {} + delete: + implementation: openstack.nova_plugin.server.delete + inputs: + multivim_config: + default: {} + cloudify.interfaces.validation: + creation: + implementation: openstack.nova_plugin.server.creation_validation + inputs: + args: + default: {} + multivim_config: + default: {} + + onap.multivim.nodes.WindowsServer: + derived_from: onap.multivim.nodes.Server + properties: + use_password: + default: true + description: > + Default changed for derived type + because Windows instances need a password for agent installation + os_family: + default: windows + description: > + (updates the os_family default as a convenience) + agent_config: + type: cloudify.datatypes.AgentConfig + default: + port: 5985 + description: > + (updates the defaults for the agent_config for Windows) + + onap.multivim.nodes.KeyPair: + derived_from: cloudify.nodes.Root + properties: + keypair: + default: {} + description: > + the keypair object as described by Openstack. This + parameter can be used to override and pass parameters + directly to Nova client. + Note that in the case of keypair, the only nested parameter + that can be used is "name". + private_key_path: + description: > + the path (on the machine the plugin is running on) to + where the private key should be stored. If + use_external_resource is set to "true", the existing + private key is expected to be at this path. + use_external_resource: + type: boolean + default: false + description: > + a boolean describing whether this resource should be + created or rather that it already exists on Openstack + and should be used as-is. + create_if_missing: + default: false + description: > + If use_external_resource is ``true`` and the resource is missing, + create it instead of failing. + resource_id: + default: '' + description: > + the name that will be given to the resource on Openstack (excluding optional prefix). + If not provided, a default name will be given instead. + If use_external_resource is set to "true", this exact + value (without any prefixes applied) will be looked for + as either the name or id of an existing keypair to be used. + multivim_config: + default: {} + description: > + endpoints and authentication configuration for Openstack. + Expected to contain the following nested fields: + username, password, tenant_name, auth_url, region. + interfaces: + cloudify.interfaces.lifecycle: + create: + implementation: openstack.nova_plugin.keypair.create + inputs: + args: + default: {} + multivim_config: + default: {} + delete: + implementation: openstack.nova_plugin.keypair.delete + inputs: + multivim_config: + default: {} + cloudify.interfaces.validation: + creation: + implementation: openstack.nova_plugin.keypair.creation_validation + inputs: + multivim_config: + default: {} + + onap.multivim.nodes.Subnet: + derived_from: cloudify.nodes.Subnet + properties: + subnet: + default: {} + description: > + key-value subnet configuration as described at http://developer.openstack.org/api-ref-networking-v2.html#subnets. (**DEPRECATED - Use the `args` input in create operation instead**) + use_external_resource: + type: boolean + default: false + description: > + a boolean for setting whether to create the resource or use an existing one. + See the using existing resources section. + create_if_missing: + default: false + description: > + If use_external_resource is ``true`` and the resource is missing, + create it instead of failing. + resource_id: + default: '' + description: > + name to give to the new resource or the name or ID of an existing resource when the ``use_external_resource`` property is set to ``true`` (see the using existing resources section). Defaults to '' (empty string). + multivim_config: + default: {} + description: > + endpoints and authentication configuration for Openstack. + Expected to contain the following nested fields: + username, password, tenant_name, auth_url, region. + interfaces: + cloudify.interfaces.lifecycle: + create: + implementation: openstack.neutron_plugin.subnet.create + inputs: + args: + default: {} + multivim_config: + default: {} + delete: + implementation: openstack.neutron_plugin.subnet.delete + inputs: + multivim_config: + default: {} + cloudify.interfaces.validation: + creation: + implementation: openstack.neutron_plugin.subnet.creation_validation + inputs: + args: + default: {} + multivim_config: + default: {} + + onap.multivim.nodes.SecurityGroup: + derived_from: cloudify.nodes.SecurityGroup + properties: + security_group: + default: {} + description: > + key-value security_group configuration as described in http://developer.openstack.org/api-ref-networking-v2-ext.html#createSecGroup. (**DEPRECATED - Use the `args` input in create operation instead**) + description: + type: string + default: '' + description: > + SecurityGroup description. + create_if_missing: + default: false + description: > + If use_external_resource is ``true`` and the resource is missing, + create it instead of failing. + use_external_resource: + type: boolean + default: false + description: > + a boolean for setting whether to create the resource or use an existing one. + See the using existing resources section. + resource_id: + default: '' + description: > + name to give to the new resource or the name or ID of an existing resource when the ``use_external_resource`` property is set to ``true`` (see the using existing resources section). Defaults to '' (empty string). + multivim_config: + default: {} + description: > + endpoints and authentication configuration for Openstack. + Expected to contain the following nested fields: + username, password, tenant_name, auth_url, region. + rules: + default: [] + description: > + key-value security_group_rule configuration as described in http://developer.openstack.org/api-ref-networking-v2.html#security_groups + disable_default_egress_rules: + default: false + description: > + a flag for removing the default rules which https://wiki.openstack.org/wiki/Neutron/SecurityGroups#Behavior. If not set to `true`, these rules will remain, and exist alongside any additional rules passed using the `rules` property. + interfaces: + cloudify.interfaces.lifecycle: + create: + implementation: openstack.neutron_plugin.security_group.create + inputs: + args: + default: {} + multivim_config: + default: {} + delete: + implementation: openstack.neutron_plugin.security_group.delete + inputs: + multivim_config: + default: {} + cloudify.interfaces.validation: + creation: + implementation: openstack.neutron_plugin.security_group.creation_validation + inputs: + multivim_config: + default: {} + + onap.multivim.nodes.Router: + derived_from: cloudify.nodes.Router + properties: + router: + default: {} + description: > + key-value router configuration as described in http://developer.openstack.org/api-ref-networking-v2.html#layer3. (**DEPRECATED - Use the `args` input in create operation instead**) + external_network: + default: '' + description: > + An external network name or ID. + If given, the router will use this external network as a gateway. + default_to_managers_external_network: + default: true + description: > + A boolean which determines whether to use the Cloudify Manager's external network if no other external network was given (whether by a relationship, by the `external_network` property or by the nested `external_gateway_info` key in the `router` property). This is only relevant if the manager's external network appears in the Provider-context. Defaults to `true`. + use_external_resource: + type: boolean + default: false + description: > + a boolean for setting whether to create the resource or use an existing one. + See the using existing resources section. + create_if_missing: + default: false + description: > + If use_external_resource is ``true`` and the resource is missing, + create it instead of failing. + resource_id: + default: '' + description: > + name to give to the new resource or the name or ID of an existing resource when the ``use_external_resource`` property is set to ``true`` (see the using existing resources section). Defaults to '' (empty string). + multivim_config: + default: {} + description: > + endpoints and authentication configuration for Openstack. + Expected to contain the following nested fields: + username, password, tenant_name, auth_url, region. + interfaces: + cloudify.interfaces.lifecycle: + create: + implementation: openstack.neutron_plugin.router.create + inputs: + args: + default: {} + multivim_config: + default: {} + delete: + implementation: openstack.neutron_plugin.router.delete + inputs: + multivim_config: + default: {} + cloudify.interfaces.validation: + creation: + implementation: openstack.neutron_plugin.router.creation_validation + inputs: + multivim_config: + default: {} + + onap.multivim.nodes.Port: + derived_from: cloudify.nodes.Port + properties: + port: + default: {} + description: > + key-value port configuration as described in http://developer.openstack.org/api-ref-networking-v2.html#ports. (**DEPRECATED - Use the `args` input in create operation instead**) + fixed_ip: + default: '' + description: > + may be used to request a specific fixed IP for the port. + If the IP is unavailable + (either already taken or does not belong to a subnet the port is on) + an error will be raised. + use_external_resource: + type: boolean + default: false + description: > + a boolean for setting whether to create the resource or use an existing one. + See the using existing resources section. + create_if_missing: + default: false + description: > + If use_external_resource is ``true`` and the resource is missing, + create it instead of failing. + resource_id: + default: '' + description: > + name to give to the new resource or the name or ID of an existing resource when the ``use_external_resource`` property is set to ``true`` (see the using existing resources section). Defaults to '' (empty string). + multivim_config: + default: {} + description: > + endpoints and authentication configuration for Openstack. + Expected to contain the following nested fields: + username, password, tenant_name, auth_url, region. + interfaces: + cloudify.interfaces.lifecycle: + create: + implementation: openstack.neutron_plugin.port.create + inputs: + args: + default: {} + multivim_config: + default: {} + delete: + implementation: openstack.neutron_plugin.port.delete + inputs: + multivim_config: + default: {} + cloudify.interfaces.validation: + creation: + implementation: openstack.neutron_plugin.port.creation_validation + inputs: + multivim_config: + default: {} + + onap.multivim.nodes.Network: + derived_from: cloudify.nodes.Network + properties: + network: + default: {} + description: > + key-value network configuration as described in http://developer.openstack.org/api-ref-networking-v2.html#networks. (**DEPRECATED - Use the `args` input in create operation instead**) + use_external_resource: + type: boolean + default: false + description: > + a boolean for setting whether to create the resource or use an existing one. + See the using existing resources section. + create_if_missing: + default: false + description: > + If use_external_resource is ``true`` and the resource is missing, + create it instead of failing. + resource_id: + default: '' + description: > + name to give to the new resource or the name or ID of an existing resource when the ``use_external_resource`` property is set to ``true`` (see the using existing resources section). Defaults to '' (empty string). + multivim_config: + default: {} + description: > + endpoints and authentication configuration for Openstack. + Expected to contain the following nested fields: + username, password, tenant_name, auth_url, region. + interfaces: + cloudify.interfaces.lifecycle: + create: + implementation: openstack.neutron_plugin.network.create + inputs: + args: + default: {} + multivim_config: + default: {} + delete: + implementation: openstack.neutron_plugin.network.delete + inputs: + multivim_config: + default: {} + cloudify.interfaces.validation: + creation: + implementation: openstack.neutron_plugin.network.creation_validation + inputs: + multivim_config: + default: {} + + onap.multivim.nodes.FloatingIP: + derived_from: cloudify.nodes.VirtualIP + properties: + floatingip: + default: {} + description: > + key-value floatingip configuration as described in http://developer.openstack.org/api-ref-networking-v2.html#layer3. (**DEPRECATED - Use the `args` input in create operation instead**) + use_external_resource: + type: boolean + default: false + description: > + a boolean for setting whether to create the resource or use an existing one. + See the using existing resources section. + create_if_missing: + default: false + description: > + If use_external_resource is ``true`` and the resource is missing, + create it instead of failing. + resource_id: + description: IP address of the floating IP + default: '' + multivim_config: + default: {} + description: > + endpoints and authentication configuration for Openstack. + Expected to contain the following nested fields: + username, password, tenant_name, auth_url, region. + interfaces: + cloudify.interfaces.lifecycle: + create: + implementation: openstack.neutron_plugin.floatingip.create + inputs: + args: + default: {} + multivim_config: + default: {} + delete: + implementation: openstack.neutron_plugin.floatingip.delete + inputs: + multivim_config: + default: {} + cloudify.interfaces.validation: + creation: + implementation: openstack.neutron_plugin.floatingip.creation_validation + inputs: + multivim_config: + default: {} + + onap.multivim.nodes.Volume: + derived_from: cloudify.nodes.Volume + properties: + volume: + default: {} + description: > + key-value volume configuration as described in http://developer.openstack.org/api-ref-blockstorage-v1.html#volumes-v1. (**DEPRECATED - Use the `args` input in create operation instead**) + use_external_resource: + type: boolean + default: false + description: > + a boolean for setting whether to create the resource or use an existing one. + See the using existing resources section. + create_if_missing: + default: false + description: > + If use_external_resource is ``true`` and the resource is missing, + create it instead of failing. + resource_id: + default: '' + description: > + name to give to the new resource or the name or ID of an existing resource when the ``use_external_resource`` property is set to ``true`` (see the using existing resources section). Defaults to '' (empty string). + device_name: + default: auto + description: > + The device name this volume will be attached to. + Default value is *auto*, + which means openstack will auto-assign a device. + Note that if you do explicitly set a value, + this value may not be the actual device name assigned. + Sometimes the device requested will not be available and openstack will assign it to a different device, + this is why we recommend using *auto*. + multivim_config: + default: {} + description: > + endpoints and authentication configuration for Openstack. + Expected to contain the following nested fields: + username, password, tenant_name, auth_url, region. + boot: + type: boolean + default: false + description: > + If a Server instance is connected to this Volume by a relationship, + this volume will be used as the boot volume for that Server. + interfaces: + cloudify.interfaces.lifecycle: + create: + implementation: openstack.cinder_plugin.volume.create + inputs: + args: + default: {} + multivim_config: + default: {} + status_attempts: + description: > + Number of times to check for the creation's status before failing + type: integer + default: 20 + status_timeout: + description: > + Interval (in seconds) between subsequent inquiries of the creation's + status + type: integer + default: 15 + delete: + implementation: openstack.cinder_plugin.volume.delete + inputs: + multivim_config: + default: {} + cloudify.interfaces.validation: + creation: + implementation: openstack.cinder_plugin.volume.creation_validation + inputs: + multivim_config: + default: {} + + onap.multivim.nova_net.nodes.FloatingIP: + derived_from: cloudify.nodes.VirtualIP + properties: + floatingip: + default: {} + description: > + key-value floatingip configuration as described in http://developer.openstack.org/api-ref-compute-v2-ext.html#ext-os-floating-ips. (**DEPRECATED - Use the `args` input in create operation instead**) + use_external_resource: + type: boolean + default: false + description: > + a boolean for setting whether to create the resource or use an existing one. + See the using existing resources section. + create_if_missing: + default: false + description: > + TODO: CREATE. THIS IS MISSING + resource_id: + default: '' + description: > + name to give to the new resource or the name or ID of an existing resource when the ``use_external_resource`` property is set to ``true`` (see the using existing resources section). Defaults to '' (empty string). + multivim_config: + default: {} + description: > + endpoints and authentication configuration for Openstack. + Expected to contain the following nested fields: + username, password, tenant_name, auth_url, region. + interfaces: + cloudify.interfaces.lifecycle: + create: + implementation: openstack.nova_plugin.floatingip.create + inputs: + args: + default: {} + multivim_config: + default: {} + delete: + implementation: openstack.nova_plugin.floatingip.delete + inputs: + multivim_config: + default: {} + cloudify.interfaces.validation: + creation: + implementation: openstack.nova_plugin.floatingip.creation_validation + inputs: + multivim_config: + default: {} + + onap.multivim.nova_net.nodes.SecurityGroup: + derived_from: cloudify.nodes.SecurityGroup + properties: + security_group: + default: {} + description: > + key-value security_group configuration as described in http://developer.openstack.org/api-ref-compute-v2-ext.html#ext-os-security-groups. (**DEPRECATED - Use the `args` input in create operation instead**) + use_external_resource: + type: boolean + default: false + description: > + a boolean for setting whether to create the resource or use an existing one. + See the using existing resources section. + create_if_missing: + default: false + description: > + TODO: CREATE. THIS IS MISSING + resource_id: + default: '' + description: > + name to give to the new resource or the name or ID of an existing resource when the ``use_external_resource`` property is set to ``true`` (see the using existing resources section). Defaults to '' (empty string). + multivim_config: + default: {} + description: > + endpoints and authentication configuration for Openstack. + Expected to contain the following nested fields: + username, password, tenant_name, auth_url, region. + rules: + default: [] + description: > + key-value security group rule as described in http://developer.openstack.org/api-ref-compute-v2-ext.html#ext-os-security-group-default-rules. + description: + description: security group description + interfaces: + cloudify.interfaces.lifecycle: + create: + implementation: openstack.nova_plugin.security_group.create + inputs: + args: + default: {} + multivim_config: + default: {} + status_attempts: + description: > + Number of times to check for the attachment's status before failing + type: integer + default: 10 + status_timeout: + description: > + Interval (in seconds) between subsequent inquiries of the attachment's + status + type: integer + default: 2 + delete: + implementation: openstack.nova_plugin.security_group.delete + inputs: + multivim_config: + default: {} + cloudify.interfaces.validation: + creation: + implementation: openstack.nova_plugin.security_group.creation_validation + inputs: + multivim_config: + default: {} + + onap.multivim.nodes.Image: + derived_from: cloudify.nodes.Root + properties: + image: + description: > + Required parameters are (container_format, disk_format). Accepted + types are available on + http://docs.openstack.org/developer/glance/formats.html + To create an image from the local file its path should be added + in data parameter. + default: {} + image_url: + default: '' + description: > + The openstack resource URL for the image. + use_external_resource: + default: false + description: > + a boolean for setting whether to create the resource or use an existing one. + See the using existing resources section. + create_if_missing: + default: false + description: > + If use_external_resource is ``true`` and the resource is missing, + create it instead of failing. + resource_id: + default: '' + description: > + name to give to the new resource or the name or ID of an existing resource when the ``use_external_resource`` property is set to ``true`` (see the using existing resources section). Defaults to '' (empty string). + multivim_config: + default: {} + description: > + endpoints and authentication configuration for Openstack. + Expected to contain the following nested fields: + username, password, tenant_name, auth_url, region. + interfaces: + cloudify.interfaces.lifecycle: + create: + implementation: openstack.glance_plugin.image.create + inputs: + multivim_config: + default: {} + start: + implementation: openstack.glance_plugin.image.start + inputs: + start_retry_interval: + default: 30 + multivim_config: + default: {} + delete: + implementation: openstack.glance_plugin.image.delete + inputs: + multivim_config: + default: {} + cloudify.interfaces.validation: + creation: + implementation: openstack.glance_plugin.image.creation_validation + inputs: + multivim_config: + default: {} + + + onap.multivim.nodes.Project: + derived_from: cloudify.nodes.Root + properties: + project: + default: {} + description: > + key-value project configuration. + users: + default: [] + description: > + List of users assigned to this project in the following format: + { name: string, roles: [string] } + quota: + default: {} + description: | + A dictionary mapping service names to quota definitions for a proejct + + e.g:: + + quota: + neutron: <quota> + nova: <quota> + use_external_resource: + default: false + description: > + a boolean for setting whether to create the resource or use an existing one. + See the using existing resources section. + create_if_missing: + default: false + description: > + If use_external_resource is ``true`` and the resource is missing, + create it instead of failing. + resource_id: + default: '' + description: > + name to give to the new resource or the name or ID of an existing resource when the ``use_external_resource`` property is set to ``true`` (see the using existing resources section). Defaults to '' (empty string). + multivim_config: + default: {} + description: > + endpoints and authentication configuration for Openstack. + Expected to contain the following nested fields: + username, password, tenant_name, auth_url, region. + interfaces: + cloudify.interfaces.lifecycle: + create: openstack.keystone_plugin.project.create + start: openstack.keystone_plugin.project.start + delete: openstack.keystone_plugin.project.delete + cloudify.interfaces.validation: + creation: openstack.keystone_plugin.project.creation_validation + +relationships: + onap.multivim.port_connected_to_security_group: + derived_from: cloudify.relationships.connected_to + source_interfaces: + cloudify.interfaces.relationship_lifecycle: + establish: + implementation: openstack.neutron_plugin.port.connect_security_group + inputs: + multivim_config: + default: {} + + onap.multivim.subnet_connected_to_router: + derived_from: cloudify.relationships.connected_to + target_interfaces: + cloudify.interfaces.relationship_lifecycle: + establish: + implementation: openstack.neutron_plugin.router.connect_subnet + inputs: + multivim_config: + default: {} + unlink: + implementation: openstack.neutron_plugin.router.disconnect_subnet + inputs: + multivim_config: + default: {} + + onap.multivim.server_connected_to_floating_ip: + derived_from: cloudify.relationships.connected_to + source_interfaces: + cloudify.interfaces.relationship_lifecycle: + establish: + implementation: openstack.nova_plugin.server.connect_floatingip + inputs: + fixed_ip: + description: > + The fixed IP to be associated with the floating IP. + If omitted, Openstack will choose which port to associate. + type: string + default: '' + multivim_config: + default: {} + unlink: + implementation: openstack.nova_plugin.server.disconnect_floatingip + inputs: + multivim_config: + default: {} + + onap.multivim.port_connected_to_floating_ip: + derived_from: cloudify.relationships.connected_to + source_interfaces: + cloudify.interfaces.relationship_lifecycle: + establish: + implementation: openstack.neutron_plugin.floatingip.connect_port + inputs: + multivim_config: + default: {} + unlink: + implementation: openstack.neutron_plugin.floatingip.disconnect_port + inputs: + multivim_config: + default: {} + + onap.multivim.server_connected_to_security_group: + derived_from: cloudify.relationships.connected_to + source_interfaces: + cloudify.interfaces.relationship_lifecycle: + establish: + implementation: openstack.nova_plugin.server.connect_security_group + inputs: + multivim_config: + default: {} + unlink: + implementation: openstack.nova_plugin.server.disconnect_security_group + inputs: + multivim_config: + default: {} + + onap.multivim.server_connected_to_port: + derived_from: cloudify.relationships.connected_to + source_interfaces: + cloudify.interfaces.relationship_lifecycle: + unlink: + implementation: openstack.neutron_plugin.port.detach + inputs: + multivim_config: + default: {} + + onap.multivim.server_connected_to_keypair: + derived_from: cloudify.relationships.connected_to + + onap.multivim.port_connected_to_subnet: + derived_from: cloudify.relationships.connected_to + + onap.multivim.volume_attached_to_server: + derived_from: cloudify.relationships.connected_to + target_interfaces: + cloudify.interfaces.relationship_lifecycle: + establish: + implementation: openstack.nova_plugin.server.attach_volume + inputs: + multivim_config: + default: {} + status_attempts: + description: > + Number of times to check for the attachment's status before failing + type: integer + default: 10 + status_timeout: + description: > + Interval (in seconds) between subsequent inquiries of the attachment's + status + type: integer + default: 2 + unlink: + implementation: openstack.nova_plugin.server.detach_volume + inputs: + multivim_config: + default: {} + status_attempts: + description: > + Number of times to check for the detachment's status before failing + type: integer + default: 10 + status_timeout: + description: > + Interval (in seconds) between subsequent inquiries of the detachment's + status + type: integer + default: 2 diff --git a/aria/multivim-plugin/readthedocs.yml b/aria/multivim-plugin/readthedocs.yml new file mode 100644 index 0000000000..af59f269aa --- /dev/null +++ b/aria/multivim-plugin/readthedocs.yml @@ -0,0 +1 @@ +requirements_file: docs/requirements.txt diff --git a/aria/multivim-plugin/setup.py b/aria/multivim-plugin/setup.py new file mode 100644 index 0000000000..116aba507c --- /dev/null +++ b/aria/multivim-plugin/setup.py @@ -0,0 +1,45 @@ +######### +# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# * See the License for the specific language governing permissions and +# * limitations under the License. + +from setuptools import setup + + +setup( + zip_safe=True, + name='cloudify-openstack-plugin', + version='2.2.0', + author='idanmo', + author_email='idan@gigaspaces.com', + packages=[ + 'openstack_plugin_common', + 'nova_plugin', + 'neutron_plugin', + 'cinder_plugin', + 'glance_plugin', + 'keystone_plugin' + ], + license='LICENSE', + description='Cloudify plugin for OpenStack infrastructure.', + install_requires=[ + 'cloudify-plugins-common>=3.3.1', + 'keystoneauth1>=2.16.0,<3', + 'python-novaclient==7.0.0', + 'python-keystoneclient==3.5.0', + 'python-neutronclient==6.0.0', + 'python-cinderclient==1.9.0', + 'python-glanceclient==2.5.0', + 'IPy==0.81' + ] +) diff --git a/aria/multivim-plugin/system_tests/__init__.py b/aria/multivim-plugin/system_tests/__init__.py new file mode 100644 index 0000000000..3ad9513f40 --- /dev/null +++ b/aria/multivim-plugin/system_tests/__init__.py @@ -0,0 +1,2 @@ +from pkgutil import extend_path +__path__ = extend_path(__path__, __name__) diff --git a/aria/multivim-plugin/system_tests/openstack_handler.py b/aria/multivim-plugin/system_tests/openstack_handler.py new file mode 100644 index 0000000000..76368fa10a --- /dev/null +++ b/aria/multivim-plugin/system_tests/openstack_handler.py @@ -0,0 +1,657 @@ +######## +# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# * See the License for the specific language governing permissions and +# * limitations under the License. + +import random +import logging +import os +import time +import copy +from contextlib import contextmanager + +from cinderclient import client as cinderclient +from keystoneauth1 import loading, session +import novaclient.client as nvclient +import neutronclient.v2_0.client as neclient +from retrying import retry + +from cosmo_tester.framework.handlers import ( + BaseHandler, + BaseCloudifyInputsConfigReader) +from cosmo_tester.framework.util import get_actual_keypath + +logging.getLogger('neutronclient.client').setLevel(logging.INFO) +logging.getLogger('novaclient.client').setLevel(logging.INFO) + + +VOLUME_TERMINATION_TIMEOUT_SECS = 300 + + +class OpenstackCleanupContext(BaseHandler.CleanupContext): + + def __init__(self, context_name, env): + super(OpenstackCleanupContext, self).__init__(context_name, env) + self.before_run = self.env.handler.openstack_infra_state() + + def cleanup(self): + """ + Cleans resources created by the test. + Resource that existed before the test will not be removed + """ + super(OpenstackCleanupContext, self).cleanup() + resources_to_teardown = self.get_resources_to_teardown( + self.env, resources_to_keep=self.before_run) + if self.skip_cleanup: + self.logger.warn('[{0}] SKIPPING cleanup of resources: {1}' + .format(self.context_name, resources_to_teardown)) + else: + self._clean(self.env, resources_to_teardown) + + @classmethod + def clean_all(cls, env): + """ + Cleans *all* resources, including resources that were not + created by the test + """ + super(OpenstackCleanupContext, cls).clean_all(env) + resources_to_teardown = cls.get_resources_to_teardown(env) + cls._clean(env, resources_to_teardown) + + @classmethod + def _clean(cls, env, resources_to_teardown): + cls.logger.info('Openstack handler will try to remove these resources:' + ' {0}'.format(resources_to_teardown)) + failed_to_remove = env.handler.remove_openstack_resources( + resources_to_teardown) + if failed_to_remove: + trimmed_failed_to_remove = {key: value for key, value in + failed_to_remove.iteritems() + if value} + if len(trimmed_failed_to_remove) > 0: + msg = 'Openstack handler failed to remove some resources:' \ + ' {0}'.format(trimmed_failed_to_remove) + cls.logger.error(msg) + raise RuntimeError(msg) + + @classmethod + def get_resources_to_teardown(cls, env, resources_to_keep=None): + all_existing_resources = env.handler.openstack_infra_state() + if resources_to_keep: + return env.handler.openstack_infra_state_delta( + before=resources_to_keep, after=all_existing_resources) + else: + return all_existing_resources + + def update_server_id(self, server_name): + + # retrieve the id of the new server + nova, _, _ = self.env.handler.openstack_clients() + servers = nova.servers.list( + search_opts={'name': server_name}) + if len(servers) > 1: + raise RuntimeError( + 'Expected 1 server with name {0}, but found {1}' + .format(server_name, len(servers))) + + new_server_id = servers[0].id + + # retrieve the id of the old server + old_server_id = None + servers = self.before_run['servers'] + for server_id, name in servers.iteritems(): + if server_name == name: + old_server_id = server_id + break + if old_server_id is None: + raise RuntimeError( + 'Could not find a server with name {0} ' + 'in the internal cleanup context state' + .format(server_name)) + + # replace the id in the internal state + servers[new_server_id] = servers.pop(old_server_id) + + +class CloudifyOpenstackInputsConfigReader(BaseCloudifyInputsConfigReader): + + def __init__(self, cloudify_config, manager_blueprint_path, **kwargs): + super(CloudifyOpenstackInputsConfigReader, self).__init__( + cloudify_config, manager_blueprint_path=manager_blueprint_path, + **kwargs) + + @property + def region(self): + return self.config['region'] + + @property + def management_server_name(self): + return self.config['manager_server_name'] + + @property + def agent_key_path(self): + return self.config['agent_private_key_path'] + + @property + def management_user_name(self): + return self.config['ssh_user'] + + @property + def management_key_path(self): + return self.config['ssh_key_filename'] + + @property + def agent_keypair_name(self): + return self.config['agent_public_key_name'] + + @property + def management_keypair_name(self): + return self.config['manager_public_key_name'] + + @property + def use_existing_agent_keypair(self): + return self.config['use_existing_agent_keypair'] + + @property + def use_existing_manager_keypair(self): + return self.config['use_existing_manager_keypair'] + + @property + def external_network_name(self): + return self.config['external_network_name'] + + @property + def keystone_username(self): + return self.config['keystone_username'] + + @property + def keystone_password(self): + return self.config['keystone_password'] + + @property + def keystone_tenant_name(self): + return self.config['keystone_tenant_name'] + + @property + def keystone_url(self): + return self.config['keystone_url'] + + @property + def neutron_url(self): + return self.config.get('neutron_url', None) + + @property + def management_network_name(self): + return self.config['management_network_name'] + + @property + def management_subnet_name(self): + return self.config['management_subnet_name'] + + @property + def management_router_name(self): + return self.config['management_router'] + + @property + def agents_security_group(self): + return self.config['agents_security_group_name'] + + @property + def management_security_group(self): + return self.config['manager_security_group_name'] + + +class OpenstackHandler(BaseHandler): + + CleanupContext = OpenstackCleanupContext + CloudifyConfigReader = CloudifyOpenstackInputsConfigReader + + def before_bootstrap(self): + super(OpenstackHandler, self).before_bootstrap() + with self.update_cloudify_config() as patch: + suffix = '-%06x' % random.randrange(16 ** 6) + server_name_prop_path = 'manager_server_name' + patch.append_value(server_name_prop_path, suffix) + + def after_bootstrap(self, provider_context): + super(OpenstackHandler, self).after_bootstrap(provider_context) + resources = provider_context['resources'] + agent_keypair = resources['agents_keypair'] + management_keypair = resources['management_keypair'] + self.remove_agent_keypair = agent_keypair['external_resource'] is False + self.remove_management_keypair = \ + management_keypair['external_resource'] is False + + def after_teardown(self): + super(OpenstackHandler, self).after_teardown() + if self.remove_agent_keypair: + agent_key_path = get_actual_keypath(self.env, + self.env.agent_key_path, + raise_on_missing=False) + if agent_key_path: + os.remove(agent_key_path) + if self.remove_management_keypair: + management_key_path = get_actual_keypath( + self.env, + self.env.management_key_path, + raise_on_missing=False) + if management_key_path: + os.remove(management_key_path) + + def openstack_clients(self): + creds = self._client_creds() + params = { + 'region_name': creds.pop('region_name'), + } + + loader = loading.get_plugin_loader("password") + auth = loader.load_from_options(**creds) + sess = session.Session(auth=auth, verify=True) + + params['session'] = sess + + nova = nvclient.Client('2', **params) + neutron = neclient.Client(**params) + cinder = cinderclient.Client('2', **params) + + return (nova, neutron, cinder) + + @retry(stop_max_attempt_number=5, wait_fixed=20000) + def openstack_infra_state(self): + """ + @retry decorator is used because this error sometimes occur: + ConnectionFailed: Connection to neutron failed: Maximum + attempts reached + """ + nova, neutron, cinder = self.openstack_clients() + try: + prefix = self.env.resources_prefix + except (AttributeError, KeyError): + prefix = '' + return { + 'networks': dict(self._networks(neutron, prefix)), + 'subnets': dict(self._subnets(neutron, prefix)), + 'routers': dict(self._routers(neutron, prefix)), + 'security_groups': dict(self._security_groups(neutron, prefix)), + 'servers': dict(self._servers(nova, prefix)), + 'key_pairs': dict(self._key_pairs(nova, prefix)), + 'floatingips': dict(self._floatingips(neutron, prefix)), + 'ports': dict(self._ports(neutron, prefix)), + 'volumes': dict(self._volumes(cinder, prefix)) + } + + def openstack_infra_state_delta(self, before, after): + after = copy.deepcopy(after) + return { + prop: self._remove_keys(after[prop], before[prop].keys()) + for prop in before + } + + def _find_keypairs_to_delete(self, nodes, node_instances): + """Filter the nodes only returning the names of keypair nodes + + Examine node_instances and nodes, return the external_name of + those node_instances, which correspond to a node that has a + type == KeyPair + + To filter by deployment_id, simply make sure that the nodes and + node_instances this method receives, are pre-filtered + (ie. filter the nodes while fetching them from the manager) + """ + keypairs = set() # a set of (deployment_id, node_id) tuples + + for node in nodes: + if node.get('type') != 'cloudify.openstack.nodes.KeyPair': + continue + # deployment_id isnt always present in local_env runs + key = (node.get('deployment_id'), node['id']) + keypairs.add(key) + + for node_instance in node_instances: + key = (node_instance.get('deployment_id'), + node_instance['node_id']) + if key not in keypairs: + continue + + runtime_properties = node_instance['runtime_properties'] + if not runtime_properties: + continue + name = runtime_properties.get('external_name') + if name: + yield name + + def _delete_keypairs_by_name(self, keypair_names): + nova, neutron, cinder = self.openstack_clients() + existing_keypairs = nova.keypairs.list() + + for name in keypair_names: + for keypair in existing_keypairs: + if keypair.name == name: + nova.keypairs.delete(keypair) + + def remove_keypairs_from_local_env(self, local_env): + """Query the local_env for nodes which are keypairs, remove them + + Similar to querying the manager, we can look up nodes in the local_env + which is used for tests. + """ + nodes = local_env.storage.get_nodes() + node_instances = local_env.storage.get_node_instances() + names = self._find_keypairs_to_delete(nodes, node_instances) + self._delete_keypairs_by_name(names) + + def remove_keypairs_from_manager(self, deployment_id=None, + rest_client=None): + """Query the manager for nodes by deployment_id, delete keypairs + + Fetch nodes and node_instances from the manager by deployment_id + (or all if not given), find which ones represent openstack keypairs, + remove them. + """ + if rest_client is None: + rest_client = self.env.rest_client + + nodes = rest_client.nodes.list(deployment_id=deployment_id) + node_instances = rest_client.node_instances.list( + deployment_id=deployment_id) + keypairs = self._find_keypairs_to_delete(nodes, node_instances) + self._delete_keypairs_by_name(keypairs) + + def remove_keypair(self, name): + """Delete an openstack keypair by name. If it doesnt exist, do nothing. + """ + self._delete_keypairs_by_name([name]) + + def remove_openstack_resources(self, resources_to_remove): + # basically sort of a workaround, but if we get the order wrong + # the first time, there is a chance things would better next time + # 3'rd time can't really hurt, can it? + # 3 is a charm + for _ in range(3): + resources_to_remove = self._remove_openstack_resources_impl( + resources_to_remove) + if all([len(g) == 0 for g in resources_to_remove.values()]): + break + # give openstack some time to update its data structures + time.sleep(3) + return resources_to_remove + + def _remove_openstack_resources_impl(self, resources_to_remove): + nova, neutron, cinder = self.openstack_clients() + + servers = nova.servers.list() + ports = neutron.list_ports()['ports'] + routers = neutron.list_routers()['routers'] + subnets = neutron.list_subnets()['subnets'] + networks = neutron.list_networks()['networks'] + # keypairs = nova.keypairs.list() + floatingips = neutron.list_floatingips()['floatingips'] + security_groups = neutron.list_security_groups()['security_groups'] + volumes = cinder.volumes.list() + + failed = { + 'servers': {}, + 'routers': {}, + 'ports': {}, + 'subnets': {}, + 'networks': {}, + 'key_pairs': {}, + 'floatingips': {}, + 'security_groups': {}, + 'volumes': {} + } + + volumes_to_remove = [] + for volume in volumes: + if volume.id in resources_to_remove['volumes']: + volumes_to_remove.append(volume) + + left_volumes = self._delete_volumes(nova, cinder, volumes_to_remove) + for volume_id, ex in left_volumes.iteritems(): + failed['volumes'][volume_id] = ex + + for server in servers: + if server.id in resources_to_remove['servers']: + with self._handled_exception(server.id, failed, 'servers'): + nova.servers.delete(server) + + for router in routers: + if router['id'] in resources_to_remove['routers']: + with self._handled_exception(router['id'], failed, 'routers'): + for p in neutron.list_ports( + device_id=router['id'])['ports']: + neutron.remove_interface_router(router['id'], { + 'port_id': p['id'] + }) + neutron.delete_router(router['id']) + + for port in ports: + if port['id'] in resources_to_remove['ports']: + with self._handled_exception(port['id'], failed, 'ports'): + neutron.delete_port(port['id']) + + for subnet in subnets: + if subnet['id'] in resources_to_remove['subnets']: + with self._handled_exception(subnet['id'], failed, 'subnets'): + neutron.delete_subnet(subnet['id']) + + for network in networks: + if network['name'] == self.env.external_network_name: + continue + if network['id'] in resources_to_remove['networks']: + with self._handled_exception(network['id'], failed, + 'networks'): + neutron.delete_network(network['id']) + + # TODO: implement key-pair creation and cleanup per tenant + # + # IMPORTANT: Do not remove key-pairs, they might be used + # by another tenant (of the same user) + # + # for key_pair in keypairs: + # if key_pair.name == self.env.agent_keypair_name and \ + # self.env.use_existing_agent_keypair: + # # this is a pre-existing agent key-pair, do not remove + # continue + # elif key_pair.name == self.env.management_keypair_name and \ + # self.env.use_existing_manager_keypair: + # # this is a pre-existing manager key-pair, do not remove + # continue + # elif key_pair.id in resources_to_remove['key_pairs']: + # with self._handled_exception(key_pair.id, failed, + # 'key_pairs'): + # nova.keypairs.delete(key_pair) + + for floatingip in floatingips: + if floatingip['id'] in resources_to_remove['floatingips']: + with self._handled_exception(floatingip['id'], failed, + 'floatingips'): + neutron.delete_floatingip(floatingip['id']) + + for security_group in security_groups: + if security_group['name'] == 'default': + continue + if security_group['id'] in resources_to_remove['security_groups']: + with self._handled_exception(security_group['id'], + failed, 'security_groups'): + neutron.delete_security_group(security_group['id']) + + return failed + + def _delete_volumes(self, nova, cinder, existing_volumes): + unremovables = {} + end_time = time.time() + VOLUME_TERMINATION_TIMEOUT_SECS + + for volume in existing_volumes: + # detach the volume + if volume.status in ['available', 'error', 'in-use']: + try: + self.logger.info('Detaching volume {0} ({1}), currently in' + ' status {2} ...'. + format(volume.name, volume.id, + volume.status)) + for attachment in volume.attachments: + nova.volumes.delete_server_volume( + server_id=attachment['server_id'], + attachment_id=attachment['id']) + except Exception as e: + self.logger.warning('Attempt to detach volume {0} ({1})' + ' yielded exception: "{2}"'. + format(volume.name, volume.id, + e)) + unremovables[volume.id] = e + existing_volumes.remove(volume) + + time.sleep(3) + for volume in existing_volumes: + # delete the volume + if volume.status in ['available', 'error', 'in-use']: + try: + self.logger.info('Deleting volume {0} ({1}), currently in' + ' status {2} ...'. + format(volume.name, volume.id, + volume.status)) + cinder.volumes.delete(volume) + except Exception as e: + self.logger.warning('Attempt to delete volume {0} ({1})' + ' yielded exception: "{2}"'. + format(volume.name, volume.id, + e)) + unremovables[volume.id] = e + existing_volumes.remove(volume) + + # wait for all volumes deletion until completed or timeout is reached + while existing_volumes and time.time() < end_time: + time.sleep(3) + for volume in existing_volumes: + volume_id = volume.id + volume_name = volume.name + try: + vol = cinder.volumes.get(volume_id) + if vol.status == 'deleting': + self.logger.debug('volume {0} ({1}) is being ' + 'deleted...'.format(volume_name, + volume_id)) + else: + self.logger.warning('volume {0} ({1}) is in ' + 'unexpected status: {2}'. + format(volume_name, volume_id, + vol.status)) + except Exception as e: + # the volume wasn't found, it was deleted + if hasattr(e, 'code') and e.code == 404: + self.logger.info('deleted volume {0} ({1})'. + format(volume_name, volume_id)) + existing_volumes.remove(volume) + else: + self.logger.warning('failed to remove volume {0} ' + '({1}), exception: {2}'. + format(volume_name, + volume_id, e)) + unremovables[volume_id] = e + existing_volumes.remove(volume) + + if existing_volumes: + for volume in existing_volumes: + # try to get the volume's status + try: + vol = cinder.volumes.get(volume.id) + vol_status = vol.status + except: + # failed to get volume... status is unknown + vol_status = 'unknown' + + unremovables[volume.id] = 'timed out while removing volume '\ + '{0} ({1}), current volume status '\ + 'is {2}'.format(volume.name, + volume.id, + vol_status) + + if unremovables: + self.logger.warning('failed to remove volumes: {0}'.format( + unremovables)) + + return unremovables + + def _client_creds(self): + return { + 'username': self.env.keystone_username, + 'password': self.env.keystone_password, + 'auth_url': self.env.keystone_url, + 'project_name': self.env.keystone_tenant_name, + 'region_name': self.env.region + } + + def _networks(self, neutron, prefix): + return [(n['id'], n['name']) + for n in neutron.list_networks()['networks'] + if self._check_prefix(n['name'], prefix)] + + def _subnets(self, neutron, prefix): + return [(n['id'], n['name']) + for n in neutron.list_subnets()['subnets'] + if self._check_prefix(n['name'], prefix)] + + def _routers(self, neutron, prefix): + return [(n['id'], n['name']) + for n in neutron.list_routers()['routers'] + if self._check_prefix(n['name'], prefix)] + + def _security_groups(self, neutron, prefix): + return [(n['id'], n['name']) + for n in neutron.list_security_groups()['security_groups'] + if self._check_prefix(n['name'], prefix)] + + def _servers(self, nova, prefix): + return [(s.id, s.human_id) + for s in nova.servers.list() + if self._check_prefix(s.human_id, prefix)] + + def _key_pairs(self, nova, prefix): + return [(kp.id, kp.name) + for kp in nova.keypairs.list() + if self._check_prefix(kp.name, prefix)] + + def _floatingips(self, neutron, prefix): + return [(ip['id'], ip['floating_ip_address']) + for ip in neutron.list_floatingips()['floatingips']] + + def _ports(self, neutron, prefix): + return [(p['id'], p['name']) + for p in neutron.list_ports()['ports'] + if self._check_prefix(p['name'], prefix)] + + def _volumes(self, cinder, prefix): + return [(v.id, v.name) for v in cinder.volumes.list() + if self._check_prefix(v.name, prefix)] + + def _check_prefix(self, name, prefix): + # some openstack resources (eg. volumes) can have no display_name, + # in which case it's None + return name is None or name.startswith(prefix) + + def _remove_keys(self, dct, keys): + for key in keys: + if key in dct: + del dct[key] + return dct + + @contextmanager + def _handled_exception(self, resource_id, failed, resource_group): + try: + yield + except BaseException, ex: + failed[resource_group][resource_id] = ex + + +handler = OpenstackHandler diff --git a/aria/multivim-plugin/system_tests/openstack_nova_net_handler.py b/aria/multivim-plugin/system_tests/openstack_nova_net_handler.py new file mode 100644 index 0000000000..06fa0ab4d0 --- /dev/null +++ b/aria/multivim-plugin/system_tests/openstack_nova_net_handler.py @@ -0,0 +1,98 @@ +######## +# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# * See the License for the specific language governing permissions and +# * limitations under the License. + +import novaclient.v2.client as nvclient + +from system_tests.openstack_handler import OpenstackHandler + + +class OpenstackNovaNetHandler(OpenstackHandler): + + # using the Config Readers of the regular OpenstackHandler - attempts + # of reading neutron-related data may fail but shouldn't happen from + # nova-net tests in the first place + # CloudifyConfigReader = None + + def openstack_clients(self): + creds = self._client_creds() + return nvclient.Client(**creds) + + def openstack_infra_state(self): + nova = self.openstack_clients() + prefix = self.env.resources_prefix + return { + 'security_groups': dict(self._security_groups(nova, prefix)), + 'servers': dict(self._servers(nova, prefix)), + 'key_pairs': dict(self._key_pairs(nova, prefix)), + 'floatingips': dict(self._floatingips(nova, prefix)), + } + + def _floatingips(self, nova, prefix): + return [(ip.id, ip.ip) + for ip in nova.floating_ips.list()] + + def _security_groups(self, nova, prefix): + return [(n.id, n.name) + for n in nova.security_groups.list() + if self._check_prefix(n.name, prefix)] + + def _remove_openstack_resources_impl(self, resources_to_remove): + nova = self.openstack_clients() + + servers = nova.servers.list() + keypairs = nova.keypairs.list() + floatingips = nova.floating_ips.list() + security_groups = nova.security_groups.list() + + failed = { + 'servers': {}, + 'key_pairs': {}, + 'floatingips': {}, + 'security_groups': {} + } + + for server in servers: + if server.id in resources_to_remove['servers']: + with self._handled_exception(server.id, failed, 'servers'): + nova.servers.delete(server) + for key_pair in keypairs: + if key_pair.name == self.env.agent_keypair_name and \ + self.env.use_existing_agent_keypair: + # this is a pre-existing agent key-pair, do not remove + continue + elif key_pair.name == self.env.management_keypair_name and \ + self.env.use_existing_manager_keypair: + # this is a pre-existing manager key-pair, do not remove + continue + elif key_pair.id in resources_to_remove['key_pairs']: + with self._handled_exception(key_pair.id, failed, 'key_pairs'): + nova.keypairs.delete(key_pair) + for floatingip in floatingips: + if floatingip.id in resources_to_remove['floatingips']: + with self._handled_exception(floatingip.id, failed, + 'floatingips'): + nova.floating_ips.delete(floatingip) + for security_group in security_groups: + if security_group.name == 'default': + continue + if security_group.id in resources_to_remove['security_groups']: + with self._handled_exception(security_group.id, failed, + 'security_groups'): + nova.security_groups.delete(security_group) + + return failed + + +handler = OpenstackNovaNetHandler diff --git a/aria/multivim-plugin/tox.ini b/aria/multivim-plugin/tox.ini new file mode 100644 index 0000000000..b3572d70d2 --- /dev/null +++ b/aria/multivim-plugin/tox.ini @@ -0,0 +1,44 @@ +# content of: tox.ini , put in same dir as setup.py +[tox] +envlist=flake8,docs,py27 + +[testenv] +deps = + -rdev-requirements.txt + +[testenv:py27] +deps = + coverage==3.7.1 + nose + nose-cov + mock + testfixtures + {[testenv]deps} +commands = + nosetests --with-cov --cov-report term-missing \ + --cov cinder_plugin cinder_plugin/tests \ + --cov glance_plugin glance_plugin/tests \ + --cov keystone_plugin keystone_plugin/tests \ + --cov neutron_plugin \ + neutron_plugin/tests/test_port.py neutron_plugin/tests/test_security_group.py \ + --cov nova_plugin nova_plugin/tests \ + --cov openstack_plugin_common openstack_plugin_common/tests + +[testenv:docs] +changedir=docs +deps = + git+https://github.com/cloudify-cosmo/sphinxify.git@initial-work +commands = + sphinx-build -W -b html -d {envtmpdir}/doctrees . {envtmpdir}/html + +[testenv:flake8] +deps = + flake8 + {[testenv]deps} +commands = + flake8 cinder_plugin + flake8 neutron_plugin + flake8 nova_plugin + flake8 openstack_plugin_common + flake8 glance_plugin + flake8 keystone_plugin diff --git a/asdc-controller/pom.xml b/asdc-controller/pom.xml index e0a620f8a9..9560092fd2 100644 --- a/asdc-controller/pom.xml +++ b/asdc-controller/pom.xml @@ -2,12 +2,12 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
- <groupId>org.openecomp</groupId>
- <artifactId>mso</artifactId>
+ <groupId>org.openecomp.so</groupId>
+ <artifactId>so</artifactId>
<version>1.1.0-SNAPSHOT</version>
</parent>
- <groupId>org.openecomp.mso</groupId>
+ <groupId>org.openecomp.so</groupId>
<artifactId>asdc-controller</artifactId>
<name>asdc-controller</name>
<description>ASDC CLient and Controller</description>
@@ -47,7 +47,7 @@ <scope>test</scope>
</dependency>
<dependency>
- <groupId>org.openecomp.mso</groupId>
+ <groupId>org.openecomp.so</groupId>
<artifactId>mso-catalog-db</artifactId>
<version>${project.version}</version>
</dependency>
@@ -91,7 +91,7 @@ </dependency>
<dependency>
- <groupId>org.openecomp.mso</groupId>
+ <groupId>org.openecomp.so</groupId>
<artifactId>common</artifactId>
<version>${project.version}</version>
</dependency>
@@ -109,7 +109,7 @@ <scope>provided</scope>
</dependency>
<dependency>
- <groupId>org.openecomp.mso</groupId>
+ <groupId>org.openecomp.so</groupId>
<artifactId>status-control</artifactId>
<version>${project.version}</version>
</dependency>
diff --git a/bpmn/MSOCockpit/pom.xml b/bpmn/MSOCockpit/pom.xml index 165c528812..5fa9e7e2f1 100644 --- a/bpmn/MSOCockpit/pom.xml +++ b/bpmn/MSOCockpit/pom.xml @@ -1,13 +1,13 @@ <project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<parent>
- <groupId>org.openecomp.mso</groupId>
+ <groupId>org.openecomp.so</groupId>
<artifactId>bpmn</artifactId>
<version>1.1.0-SNAPSHOT</version> </parent>
<modelVersion>4.0.0</modelVersion>
- <groupId>org.openecomp.mso</groupId>
+ <groupId>org.openecomp.so</groupId>
<artifactId>MSOCockpit</artifactId>
<packaging>pom</packaging>
<name>MSOCockpit</name>
@@ -47,17 +47,17 @@ </dependency>
<dependency>
- <groupId>org.openecomp.mso</groupId>
+ <groupId>org.openecomp.so</groupId>
<artifactId>common</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
- <groupId>org.openecomp.mso</groupId>
+ <groupId>org.openecomp.so</groupId>
<artifactId>cockpit-urnmap-plugin</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
- <groupId>org.openecomp.mso</groupId>
+ <groupId>org.openecomp.so</groupId>
<artifactId>MSOCoreBPMN</artifactId>
<version>${project.version}</version>
</dependency>
diff --git a/bpmn/MSOCommonBPMN/pom.xml b/bpmn/MSOCommonBPMN/pom.xml index e35c874a4e..3452fe3fdd 100644 --- a/bpmn/MSOCommonBPMN/pom.xml +++ b/bpmn/MSOCommonBPMN/pom.xml @@ -3,7 +3,7 @@ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<modelVersion>4.0.0</modelVersion>
<parent>
- <groupId>org.openecomp.mso</groupId>
+ <groupId>org.openecomp.so</groupId>
<artifactId>bpmn</artifactId>
<version>1.1.0-SNAPSHOT</version>
</parent>
@@ -296,13 +296,13 @@ <version>3.4</version>
</dependency>
<dependency>
- <groupId>org.openecomp.mso</groupId>
+ <groupId>org.openecomp.so</groupId>
<artifactId>MSOCoreBPMN</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<!-- unit test utilities -->
- <groupId>org.openecomp.mso</groupId>
+ <groupId>org.openecomp.so</groupId>
<artifactId>MSOCoreBPMN</artifactId>
<version>${project.version}</version>
<classifier>tests</classifier>
@@ -320,14 +320,14 @@ <version>4.3.2.RELEASE</version>
</dependency>
<dependency>
- <groupId>org.openecomp.mso</groupId>
+ <groupId>org.openecomp.so</groupId>
<artifactId>MSOMockServer</artifactId>
<version>${project.version}</version>
<classifier>classes</classifier>
<scope>test</scope>
</dependency>
<dependency>
- <groupId>org.openecomp.mso</groupId>
+ <groupId>org.openecomp.so</groupId>
<artifactId>MSORESTClient</artifactId>
<version>${project.version}</version>
</dependency>
diff --git a/bpmn/MSOCoreBPMN/pom.xml b/bpmn/MSOCoreBPMN/pom.xml index bdc26713e9..29ea1b6e01 100644 --- a/bpmn/MSOCoreBPMN/pom.xml +++ b/bpmn/MSOCoreBPMN/pom.xml @@ -2,12 +2,12 @@ <project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"> <parent> - <groupId>org.openecomp.mso</groupId> + <groupId>org.openecomp.so</groupId> <artifactId>bpmn</artifactId> <version>1.1.0-SNAPSHOT</version> </parent> <modelVersion>4.0.0</modelVersion> - <groupId>org.openecomp.mso</groupId> + <groupId>org.openecomp.so</groupId> <artifactId>MSOCoreBPMN</artifactId> <packaging>jar</packaging> @@ -134,7 +134,7 @@ <artifactId>Saxon-HE</artifactId> </dependency> <dependency> - <groupId>org.openecomp.mso</groupId> + <groupId>org.openecomp.so</groupId> <artifactId>common</artifactId> <version>${project.version}</version> </dependency> @@ -169,7 +169,7 @@ <version>1.6</version> </dependency> <dependency> - <groupId>org.openecomp.mso</groupId> + <groupId>org.openecomp.so</groupId> <artifactId>status-control</artifactId> <version>${project.version}</version> </dependency> diff --git a/bpmn/MSOCoreBPMN/src/test/java/org/openecomp/mso/bpmn/core/PropertyConfigurationTest.java b/bpmn/MSOCoreBPMN/src/test/java/org/openecomp/mso/bpmn/core/PropertyConfigurationTest.java index fa0f8d8dd8..50b9a53f04 100644 --- a/bpmn/MSOCoreBPMN/src/test/java/org/openecomp/mso/bpmn/core/PropertyConfigurationTest.java +++ b/bpmn/MSOCoreBPMN/src/test/java/org/openecomp/mso/bpmn/core/PropertyConfigurationTest.java @@ -105,30 +105,4 @@ public class PropertyConfigurationTest { Assert.assertEquals("testValue", props.get("testKey")); props.put("newKey", "newvalue"); } - - @Test - public void testNotAllowedPropertyReloading() throws IOException { - Path msoConfigPath = Paths.get(System.getProperty("mso.config.path")); - Path backupPropFilePath = msoConfigPath.resolve("backup-" + PropertyConfiguration.MSO_BPMN_PROPERTIES); - - try { - // Create a new file... a backup file - Files.createFile(backupPropFilePath); - - // Load properties - PropertyConfiguration propertyConfiguration = PropertyConfiguration.getInstance(); - Map<String,String> props = propertyConfiguration.getProperties(PropertyConfiguration.MSO_BPMN_PROPERTIES); - Assert.assertNotNull(props); - Assert.assertEquals("testValue", props.get("testKey")); - - // Update the backup file - Path bpmnPropertiesSourcePath = Paths.get("src", "test", "resources", "mso.bpmn.properties"); - Files.copy(bpmnPropertiesSourcePath, backupPropFilePath, StandardCopyOption.REPLACE_EXISTING); - - // Cache size should remain the same - Assert.assertEquals(1, PropertyConfiguration.getInstance().cacheSize()); - } finally { - backupPropFilePath.toFile().delete(); - } - } } diff --git a/bpmn/MSOInfrastructureBPMN/pom.xml b/bpmn/MSOInfrastructureBPMN/pom.xml index ca9ce57912..5c0eff96bb 100644 --- a/bpmn/MSOInfrastructureBPMN/pom.xml +++ b/bpmn/MSOInfrastructureBPMN/pom.xml @@ -1,7 +1,7 @@ <project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<parent>
- <groupId>org.openecomp.mso</groupId>
+ <groupId>org.openecomp.so</groupId>
<artifactId>bpmn</artifactId>
<version>1.1.0-SNAPSHOT</version>
</parent>
@@ -282,17 +282,17 @@ <version>3.4</version>
</dependency>
<dependency>
- <groupId>org.openecomp.mso</groupId>
+ <groupId>org.openecomp.so</groupId>
<artifactId>MSOCoreBPMN</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
- <groupId>org.openecomp.mso</groupId>
+ <groupId>org.openecomp.so</groupId>
<artifactId>MSOCommonBPMN</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
- <groupId>org.openecomp.mso</groupId>
+ <groupId>org.openecomp.so</groupId>
<artifactId>MSOCommonBPMN</artifactId>
<version>${project.version}</version>
<classifier>tests</classifier>
@@ -300,7 +300,7 @@ </dependency>
<dependency>
<!-- unit test utilities -->
- <groupId>org.openecomp.mso</groupId>
+ <groupId>org.openecomp.so</groupId>
<artifactId>MSOCoreBPMN</artifactId>
<version>${project.version}</version>
<classifier>tests</classifier>
@@ -318,14 +318,14 @@ <version>4.3.2.RELEASE</version>
</dependency>
<dependency>
- <groupId>org.openecomp.mso</groupId>
+ <groupId>org.openecomp.so</groupId>
<artifactId>MSOMockServer</artifactId>
<version>${project.version}</version>
<scope>test</scope>
<classifier>classes</classifier>
</dependency>
<dependency>
- <groupId>org.openecomp.mso</groupId>
+ <groupId>org.openecomp.so</groupId>
<artifactId>MSORESTClient</artifactId>
<version>${project.version}</version>
</dependency>
diff --git a/bpmn/MSOMockServer/pom.xml b/bpmn/MSOMockServer/pom.xml index 268f392977..34f7a2331d 100644 --- a/bpmn/MSOMockServer/pom.xml +++ b/bpmn/MSOMockServer/pom.xml @@ -1,12 +1,12 @@ <project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"> <parent> - <groupId>org.openecomp.mso</groupId> + <groupId>org.openecomp.so</groupId> <artifactId>bpmn</artifactId> <version>1.1.0-SNAPSHOT</version> </parent> <modelVersion>4.0.0</modelVersion> - <groupId>org.openecomp.mso</groupId> + <groupId>org.openecomp.so</groupId> <artifactId>MSOMockServer</artifactId> <packaging>war</packaging> diff --git a/bpmn/MSORESTClient/pom.xml b/bpmn/MSORESTClient/pom.xml index 2c899084f8..e17a58f488 100644 --- a/bpmn/MSORESTClient/pom.xml +++ b/bpmn/MSORESTClient/pom.xml @@ -4,12 +4,12 @@ <modelVersion>4.0.0</modelVersion>
<parent>
- <groupId>org.openecomp.mso</groupId>
+ <groupId>org.openecomp.so</groupId>
<artifactId>bpmn</artifactId>
<version>1.1.0-SNAPSHOT</version> </parent>
- <groupId>org.openecomp.mso</groupId>
+ <groupId>org.openecomp.so</groupId>
<artifactId>MSORESTClient</artifactId>
<packaging>jar</packaging>
diff --git a/bpmn/MSOURN-plugin/pom.xml b/bpmn/MSOURN-plugin/pom.xml index ac75b53c08..a8b198de58 100644 --- a/bpmn/MSOURN-plugin/pom.xml +++ b/bpmn/MSOURN-plugin/pom.xml @@ -3,12 +3,12 @@ <modelVersion>4.0.0</modelVersion>
<parent>
- <groupId>org.openecomp.mso</groupId>
+ <groupId>org.openecomp.so</groupId>
<artifactId>bpmn</artifactId>
<version>1.1.0-SNAPSHOT</version>
</parent>
- <groupId>org.openecomp.mso</groupId>
+ <groupId>org.openecomp.so</groupId>
<artifactId>cockpit-urnmap-plugin</artifactId>
<packaging>jar</packaging>
@@ -56,7 +56,7 @@ <scope>provided</scope>
</dependency>
<dependency>
- <groupId>org.openecomp.mso</groupId>
+ <groupId>org.openecomp.so</groupId>
<artifactId>common</artifactId>
<version>${project.version}</version>
<scope>compile</scope>
diff --git a/bpmn/pom.xml b/bpmn/pom.xml index ed062ba1c3..073db573fb 100644 --- a/bpmn/pom.xml +++ b/bpmn/pom.xml @@ -3,12 +3,12 @@ <modelVersion>4.0.0</modelVersion> <parent> - <groupId>org.openecomp</groupId> - <artifactId>mso</artifactId> + <groupId>org.openecomp.so</groupId> + <artifactId>so</artifactId> <version>1.1.0-SNAPSHOT</version> </parent> - <groupId>org.openecomp.mso</groupId> + <groupId>org.openecomp.so</groupId> <artifactId>bpmn</artifactId> <name>BPMN Subsystem</name> <description>BPMN Subsystem for MSO</description> diff --git a/common/pom.xml b/common/pom.xml index 3bacaf03dd..59a5e1054a 100644 --- a/common/pom.xml +++ b/common/pom.xml @@ -2,12 +2,12 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"> <modelVersion>4.0.0</modelVersion> <parent> - <groupId>org.openecomp</groupId> - <artifactId>mso</artifactId> + <groupId>org.openecomp.so</groupId> + <artifactId>so</artifactId> <version>1.1.0-SNAPSHOT</version> </parent> - <groupId>org.openecomp.mso</groupId> + <groupId>org.openecomp.so</groupId> <artifactId>common</artifactId> <name>MSO Common classes</name> <description>MSO Common classes:- Logger</description> @@ -205,7 +205,7 @@ </executions> <dependencies> <dependency> - <groupId>org.openecomp.mso</groupId> + <groupId>org.openecomp.so</groupId> <artifactId>common</artifactId> <version>${project.version}</version> </dependency> diff --git a/common/src/main/java/org/openecomp/mso/logger/MsoLogger.java b/common/src/main/java/org/openecomp/mso/logger/MsoLogger.java index 110cb40914..4c39815bd0 100644 --- a/common/src/main/java/org/openecomp/mso/logger/MsoLogger.java +++ b/common/src/main/java/org/openecomp/mso/logger/MsoLogger.java @@ -103,7 +103,7 @@ public class MsoLogger { public enum ResponseCode { Suc(0), PermissionError(100), DataError(300), DataNotFound(301), BadRequest(302), SchemaError( 400), BusinessProcesssError(500), ServiceNotAvailable(501), InternalError( - 502), Conflict(503), DBAccessError(504), CommunicationError(505), UnknownError(900); + 502), Conflict(503), DBAccessError(504), CommunicationError(505), UnknownError(900); private int value; @@ -131,7 +131,7 @@ public class MsoLogger { } }; - private EELFLogger logger, auditLogger, metricsLogger; + private EELFLogger debugLogger, errorLogger, auditLogger, metricsLogger; private static final String CONFIG_FILE = System.getProperty("jboss.home.dir") + "/mso-config/uuid/uuid_" + System.getProperty("jboss.server.name"); private static String instanceUUID, serverIP, serverName; @@ -141,7 +141,8 @@ public class MsoLogger { private static final Logger LOGGER = Logger.getLogger(MsoLogger.class.getName()); private MsoLogger(MsoLogger.Catalog cat) { - this.logger = EELFManager.getInstance().getErrorLogger(); + this.debugLogger = EELFManager.getInstance().getDebugLogger(); + this.errorLogger = EELFManager.getInstance().getErrorLogger(); this.auditLogger = EELFManager.getInstance().getAuditLogger(); this.metricsLogger = EELFManager.getInstance().getMetricsLogger(); MsoLogger.initialization(); @@ -168,7 +169,7 @@ public class MsoLogger { /** * Get the MsoLogger based on the catalog - * + * * @param cat * Catalog of the logger * @return the MsoLogger @@ -179,7 +180,7 @@ public class MsoLogger { /** * Record the Metrics event with no argument - * + * * @param startTime * Transaction starting time in millieseconds * @param statusCode @@ -196,7 +197,7 @@ public class MsoLogger { * Target VNF or VM acted opon by the component, if available */ public void recordMetricEvent(Long startTime, StatusCode statusCode, ResponseCode responseCode, String responseDesc, - String targetEntity, String targetServiceName, String targetVEntity) { + String targetEntity, String targetServiceName, String targetVEntity) { prepareMetricMsg(startTime, statusCode, responseCode.getValue(), responseDesc, targetEntity, targetServiceName, targetVEntity); metricsLogger.info(""); @@ -218,7 +219,7 @@ public class MsoLogger { * Human redable description of the application response code */ public void recordAuditEvent(Long startTime, StatusCode statusCode, ResponseCode responseCode, - String responseDesc) { + String responseDesc) { prepareAuditMsg(startTime, statusCode, responseCode.getValue(), responseDesc); auditLogger.info(""); MDC.remove(TIMER); @@ -233,7 +234,7 @@ public class MsoLogger { */ public void debug(String msg) { prepareMsg(DEBUG_LEVEL); - logger.debug(msg); + debugLogger.debug(msg); } /** @@ -246,7 +247,7 @@ public class MsoLogger { */ public void debug(String msg, Throwable t) { prepareMsg(DEBUG_LEVEL); - logger.debug(msg, t); + debugLogger.debug(msg, t); } // Info methods @@ -259,7 +260,7 @@ public class MsoLogger { public void info(EELFResolvableErrorEnum msg, String targetEntity, String targetServiceName) { prepareErrorMsg(INFO_LEVEL, targetEntity, targetServiceName, null, ""); - logger.info(msg); + debugLogger.info(msg); MDC.remove(TARGETENTITY); MDC.remove(TARGETSERVICENAME); } @@ -275,7 +276,7 @@ public class MsoLogger { public void info(EELFResolvableErrorEnum msg, String arg0, String targetEntity, String targetServiceName) { prepareErrorMsg(INFO_LEVEL, targetEntity, targetServiceName, null, ""); - logger.info(msg, normalize(arg0)); + debugLogger.info(msg, normalize(arg0)); MDC.remove(TARGETENTITY); MDC.remove(TARGETSERVICENAME); } @@ -289,10 +290,10 @@ public class MsoLogger { * The arguments used in the log message */ public void info(EELFResolvableErrorEnum msg, String arg0, String arg1, String targetEntity, - String targetServiceName) { + String targetServiceName) { prepareErrorMsg(INFO_LEVEL, targetEntity, targetServiceName, null, ""); - logger.info(msg, normalize(arg0), normalize(arg1)); + debugLogger.info(msg, normalize(arg0), normalize(arg1)); MDC.remove(TARGETENTITY); MDC.remove(TARGETSERVICENAME); } @@ -306,10 +307,10 @@ public class MsoLogger { * The arguments used in the log message */ public void info(EELFResolvableErrorEnum msg, String arg0, String arg1, String arg2, String targetEntity, - String targetServiceName) { + String targetServiceName) { prepareErrorMsg(INFO_LEVEL, targetEntity, targetServiceName, null, ""); - logger.info(msg, normalize(arg0), normalize(arg1), normalize(arg2)); + debugLogger.info(msg, normalize(arg0), normalize(arg1), normalize(arg2)); MDC.remove(TARGETENTITY); MDC.remove(TARGETSERVICENAME); } @@ -323,10 +324,10 @@ public class MsoLogger { * The arguments used in the log message */ public void info(EELFResolvableErrorEnum msg, String arg0, String arg1, String arg2, String arg3, - String targetEntity, String targetServiceName) { + String targetEntity, String targetServiceName) { prepareErrorMsg(INFO_LEVEL, targetEntity, targetServiceName, null, ""); - logger.info(msg, normalize(arg0), normalize(arg1), normalize(arg2), normalize(arg3)); + debugLogger.info(msg, normalize(arg0), normalize(arg1), normalize(arg2), normalize(arg3)); MDC.remove(TARGETENTITY); MDC.remove(TARGETSERVICENAME); } @@ -340,10 +341,10 @@ public class MsoLogger { * The arguments used in the log message */ public void info(EELFResolvableErrorEnum msg, String arg0, String arg1, String arg2, String arg3, String arg4, - String targetEntity, String targetServiceName) { + String targetEntity, String targetServiceName) { prepareErrorMsg(INFO_LEVEL, targetEntity, targetServiceName, null, ""); - logger.info(msg, normalize(arg0), normalize(arg1), normalize(arg2), normalize(arg3), normalize(arg4)); + debugLogger.info(msg, normalize(arg0), normalize(arg1), normalize(arg2), normalize(arg3), normalize(arg4)); MDC.remove(TARGETENTITY); MDC.remove(TARGETSERVICENAME); } @@ -357,10 +358,10 @@ public class MsoLogger { * The arguments used in the log message */ public void info(EELFResolvableErrorEnum msg, String arg0, String arg1, String arg2, String arg3, String arg4, - String arg5, String targetEntity, String targetServiceName) { + String arg5, String targetEntity, String targetServiceName) { prepareErrorMsg(INFO_LEVEL, targetEntity, targetServiceName, null, ""); - logger.info(msg, normalize(arg0), normalize(arg1), normalize(arg2), normalize(arg3), normalize(arg4), + debugLogger.info(msg, normalize(arg0), normalize(arg1), normalize(arg2), normalize(arg3), normalize(arg4), normalize(arg5)); MDC.remove(TARGETENTITY); MDC.remove(TARGETSERVICENAME); @@ -374,10 +375,10 @@ public class MsoLogger { * The log message to put */ public void warn(EELFResolvableErrorEnum msg, String targetEntity, String targetServiceName, ErrorCode errorCode, - String errorDesc) { + String errorDesc) { prepareErrorMsg(WARN_LEVEL, targetEntity, targetServiceName, errorCode, errorDesc); - logger.warn(msg); + errorLogger.warn(msg); MDC.remove(TARGETENTITY); MDC.remove(TARGETSERVICENAME); } @@ -391,11 +392,11 @@ public class MsoLogger { * The exception info */ public void warn(EELFResolvableErrorEnum msg, String targetEntity, String targetServiceName, ErrorCode errorCode, - String errorDesc, Throwable t) { + String errorDesc, Throwable t) { prepareErrorMsg(WARN_LEVEL, targetEntity, targetServiceName, errorCode, errorDesc); - logger.warn(msg); - logger.warn("Exception raised: " + getNormalizedStackTrace(t)); - logger.debug("Exception raised", t); + errorLogger.warn(msg); + errorLogger.warn("Exception raised: " + getNormalizedStackTrace(t)); + debugLogger.debug("Exception raised", t); MDC.remove(TARGETENTITY); MDC.remove(TARGETSERVICENAME); } @@ -409,9 +410,9 @@ public class MsoLogger { * The argument used in the log message */ public void warn(EELFResolvableErrorEnum msg, String arg, String targetEntity, String targetServiceName, - ErrorCode errorCode, String errorDesc) { + ErrorCode errorCode, String errorDesc) { prepareErrorMsg(WARN_LEVEL, targetEntity, targetServiceName, errorCode, errorDesc); - logger.warn(msg, arg); + errorLogger.warn(msg, arg); MDC.remove(TARGETENTITY); MDC.remove(TARGETSERVICENAME); } @@ -427,11 +428,11 @@ public class MsoLogger { * The exception info */ public void warn(EELFResolvableErrorEnum msg, String arg, String targetEntity, String targetServiceName, - ErrorCode errorCode, String errorDesc, Throwable t) { + ErrorCode errorCode, String errorDesc, Throwable t) { prepareErrorMsg(WARN_LEVEL, targetEntity, targetServiceName, errorCode, errorDesc); - logger.warn(msg, arg); - logger.warn("Exception raised: " + getNormalizedStackTrace(t)); - logger.debug("Exception raised", t); + errorLogger.warn(msg, arg); + errorLogger.warn("Exception raised: " + getNormalizedStackTrace(t)); + debugLogger.debug("Exception raised", t); MDC.remove(TARGETENTITY); MDC.remove(TARGETSERVICENAME); } @@ -445,9 +446,9 @@ public class MsoLogger { * The arguments used in the log message */ public void warn(EELFResolvableErrorEnum msg, String arg0, String arg1, String targetEntity, - String targetServiceName, ErrorCode errorCode, String errorDesc) { + String targetServiceName, ErrorCode errorCode, String errorDesc) { prepareErrorMsg(WARN_LEVEL, targetEntity, targetServiceName, errorCode, errorDesc); - logger.warn(msg, normalize(arg0), normalize(arg1)); + errorLogger.warn(msg, normalize(arg0), normalize(arg1)); MDC.remove(TARGETENTITY); MDC.remove(TARGETSERVICENAME); } @@ -463,11 +464,11 @@ public class MsoLogger { * The exception info */ public void warn(EELFResolvableErrorEnum msg, String arg0, String arg1, String targetEntity, - String targetServiceName, ErrorCode errorCode, String errorDesc, Throwable t) { + String targetServiceName, ErrorCode errorCode, String errorDesc, Throwable t) { prepareErrorMsg(WARN_LEVEL, targetEntity, targetServiceName, errorCode, errorDesc); - logger.warn(msg, normalize(arg0), normalize(arg1)); - logger.warn("Exception raised: " + getNormalizedStackTrace(t)); - logger.debug("Exception raised", t); + errorLogger.warn(msg, normalize(arg0), normalize(arg1)); + errorLogger.warn("Exception raised: " + getNormalizedStackTrace(t)); + debugLogger.debug("Exception raised", t); MDC.remove(TARGETENTITY); MDC.remove(TARGETSERVICENAME); } @@ -481,9 +482,9 @@ public class MsoLogger { * The arguments used in the log message */ public void warn(EELFResolvableErrorEnum msg, String arg0, String arg1, String arg2, String targetEntity, - String targetServiceName, ErrorCode errorCode, String errorDesc) { + String targetServiceName, ErrorCode errorCode, String errorDesc) { prepareErrorMsg(WARN_LEVEL, targetEntity, targetServiceName, errorCode, errorDesc); - logger.warn(msg, normalize(arg0), normalize(arg1), normalize(arg2)); + errorLogger.warn(msg, normalize(arg0), normalize(arg1), normalize(arg2)); MDC.remove(TARGETENTITY); MDC.remove(TARGETSERVICENAME); } @@ -499,11 +500,11 @@ public class MsoLogger { * The exception info */ public void warn(EELFResolvableErrorEnum msg, String arg0, String arg1, String arg2, String targetEntity, - String targetServiceName, ErrorCode errorCode, String errorDesc, Throwable t) { + String targetServiceName, ErrorCode errorCode, String errorDesc, Throwable t) { prepareErrorMsg(WARN_LEVEL, targetEntity, targetServiceName, errorCode, errorDesc); - logger.warn(msg, normalize(arg0), normalize(arg1), normalize(arg2)); - logger.warn("Exception raised: " + getNormalizedStackTrace(t)); - logger.debug("Exception raised", t); + errorLogger.warn(msg, normalize(arg0), normalize(arg1), normalize(arg2)); + errorLogger.warn("Exception raised: " + getNormalizedStackTrace(t)); + debugLogger.debug("Exception raised", t); MDC.remove(TARGETENTITY); MDC.remove(TARGETSERVICENAME); } @@ -517,9 +518,9 @@ public class MsoLogger { * The arguments used in the log message */ public void warn(EELFResolvableErrorEnum msg, String arg0, String arg1, String arg2, String arg3, - String targetEntity, String targetServiceName, ErrorCode errorCode, String errorDesc) { + String targetEntity, String targetServiceName, ErrorCode errorCode, String errorDesc) { prepareErrorMsg(WARN_LEVEL, targetEntity, targetServiceName, errorCode, errorDesc); - logger.warn(msg, normalize(arg0), normalize(arg1), normalize(arg2), normalize(arg3)); + errorLogger.warn(msg, normalize(arg0), normalize(arg1), normalize(arg2), normalize(arg3)); MDC.remove(TARGETENTITY); MDC.remove(TARGETSERVICENAME); } @@ -535,11 +536,11 @@ public class MsoLogger { * The exception info */ public void warn(EELFResolvableErrorEnum msg, String arg0, String arg1, String arg2, String arg3, - String targetEntity, String targetServiceName, ErrorCode errorCode, String errorDesc, Throwable t) { + String targetEntity, String targetServiceName, ErrorCode errorCode, String errorDesc, Throwable t) { prepareErrorMsg(WARN_LEVEL, targetEntity, targetServiceName, errorCode, errorDesc); - logger.warn(msg, normalize(arg0), normalize(arg1), normalize(arg2), normalize(arg3)); - logger.warn("Exception raised: " + getNormalizedStackTrace(t)); - logger.debug("Exception raised", t); + errorLogger.warn(msg, normalize(arg0), normalize(arg1), normalize(arg2), normalize(arg3)); + errorLogger.warn("Exception raised: " + getNormalizedStackTrace(t)); + debugLogger.debug("Exception raised", t); MDC.remove(TARGETENTITY); MDC.remove(TARGETSERVICENAME); } @@ -553,9 +554,9 @@ public class MsoLogger { * The arguments used in the log message */ public void warn(EELFResolvableErrorEnum msg, String arg0, String arg1, String arg2, String arg3, String arg4, - String targetEntity, String targetServiceName, ErrorCode errorCode, String errorDesc) { + String targetEntity, String targetServiceName, ErrorCode errorCode, String errorDesc) { prepareErrorMsg(WARN_LEVEL, targetEntity, targetServiceName, errorCode, errorDesc); - logger.warn(msg, normalize(arg0), normalize(arg1), normalize(arg2), normalize(arg3), normalize(arg4)); + errorLogger.warn(msg, normalize(arg0), normalize(arg1), normalize(arg2), normalize(arg3), normalize(arg4)); MDC.remove(TARGETENTITY); MDC.remove(TARGETSERVICENAME); } @@ -571,11 +572,11 @@ public class MsoLogger { * The exception info */ public void warn(EELFResolvableErrorEnum msg, String arg0, String arg1, String arg2, String arg3, String arg4, - String targetEntity, String targetServiceName, ErrorCode errorCode, String errorDesc, Throwable t) { + String targetEntity, String targetServiceName, ErrorCode errorCode, String errorDesc, Throwable t) { prepareErrorMsg(WARN_LEVEL, targetEntity, targetServiceName, errorCode, errorDesc); - logger.warn(msg, normalize(arg0), normalize(arg1), normalize(arg2), normalize(arg3), normalize(arg4)); - logger.warn("Exception raised: " + getNormalizedStackTrace(t)); - logger.debug("Exception raised", t); + errorLogger.warn(msg, normalize(arg0), normalize(arg1), normalize(arg2), normalize(arg3), normalize(arg4)); + errorLogger.warn("Exception raised: " + getNormalizedStackTrace(t)); + debugLogger.debug("Exception raised", t); MDC.remove(TARGETENTITY); MDC.remove(TARGETSERVICENAME); } @@ -588,9 +589,9 @@ public class MsoLogger { * The log message to put */ public void error(EELFResolvableErrorEnum msg, String targetEntity, String targetServiceName, ErrorCode errorCode, - String errorDesc) { + String errorDesc) { prepareErrorMsg(ERROR_LEVEL, targetEntity, targetServiceName, errorCode, errorDesc); - logger.error(msg); + errorLogger.error(msg); MDC.remove(TARGETENTITY); MDC.remove(TARGETSERVICENAME); } @@ -604,11 +605,11 @@ public class MsoLogger { * The exception info */ public void error(EELFResolvableErrorEnum msg, String targetEntity, String targetServiceName, ErrorCode errorCode, - String errorDesc, Throwable t) { + String errorDesc, Throwable t) { prepareErrorMsg(ERROR_LEVEL, targetEntity, targetServiceName, errorCode, errorDesc); - logger.error(msg); - logger.error(exceptionArg, getNormalizedStackTrace(t)); - logger.debug("Exception raised", t); + errorLogger.error(msg); + errorLogger.error(exceptionArg, getNormalizedStackTrace(t)); + debugLogger.debug("Exception raised", t); MDC.remove(TARGETENTITY); MDC.remove(TARGETSERVICENAME); } @@ -622,9 +623,9 @@ public class MsoLogger { * The arguments used in the log message */ public void error(EELFResolvableErrorEnum msg, String arg0, String targetEntity, String targetServiceName, - ErrorCode errorCode, String errorDesc) { + ErrorCode errorCode, String errorDesc) { prepareErrorMsg(ERROR_LEVEL, targetEntity, targetServiceName, errorCode, errorDesc); - logger.error(msg, normalize(arg0)); + errorLogger.error(msg, normalize(arg0)); MDC.remove(TARGETENTITY); MDC.remove(TARGETSERVICENAME); } @@ -640,11 +641,11 @@ public class MsoLogger { * The exception info */ public void error(EELFResolvableErrorEnum msg, String arg0, String targetEntity, String targetServiceName, - ErrorCode errorCode, String errorDesc, Throwable t) { + ErrorCode errorCode, String errorDesc, Throwable t) { prepareErrorMsg(ERROR_LEVEL, targetEntity, targetServiceName, errorCode, errorDesc); - logger.error(msg, normalize(arg0)); - logger.error(exceptionArg, getNormalizedStackTrace(t)); - logger.debug("Exception raised", t); + errorLogger.error(msg, normalize(arg0)); + errorLogger.error(exceptionArg, getNormalizedStackTrace(t)); + debugLogger.debug("Exception raised", t); MDC.remove(TARGETENTITY); MDC.remove(TARGETSERVICENAME); } @@ -658,9 +659,9 @@ public class MsoLogger { * The arguments used in the log message */ public void error(EELFResolvableErrorEnum msg, String arg0, String arg1, String targetEntity, - String targetServiceName, ErrorCode errorCode, String errorDesc) { + String targetServiceName, ErrorCode errorCode, String errorDesc) { prepareErrorMsg(ERROR_LEVEL, targetEntity, targetServiceName, errorCode, errorDesc); - logger.error(msg, normalize(arg0), normalize(arg1)); + errorLogger.error(msg, normalize(arg0), normalize(arg1)); MDC.remove(TARGETENTITY); MDC.remove(TARGETSERVICENAME); } @@ -676,11 +677,11 @@ public class MsoLogger { * The exception info */ public void error(EELFResolvableErrorEnum msg, String arg0, String arg1, String targetEntity, - String targetServiceName, ErrorCode errorCode, String errorDesc, Throwable t) { + String targetServiceName, ErrorCode errorCode, String errorDesc, Throwable t) { prepareErrorMsg(ERROR_LEVEL, targetEntity, targetServiceName, errorCode, errorDesc); - logger.error(msg, normalize(arg0), normalize(arg1)); - logger.error(exceptionArg, getNormalizedStackTrace(t)); - logger.debug("Exception raised", t); + errorLogger.error(msg, normalize(arg0), normalize(arg1)); + errorLogger.error(exceptionArg, getNormalizedStackTrace(t)); + debugLogger.debug("Exception raised", t); MDC.remove(TARGETENTITY); MDC.remove(TARGETSERVICENAME); } @@ -694,9 +695,9 @@ public class MsoLogger { * The arguments used in the log message */ public void error(EELFResolvableErrorEnum msg, String arg0, String arg1, String arg2, String targetEntity, - String targetServiceName, ErrorCode errorCode, String errorDesc) { + String targetServiceName, ErrorCode errorCode, String errorDesc) { prepareErrorMsg(ERROR_LEVEL, targetEntity, targetServiceName, errorCode, errorDesc); - logger.error(msg, normalize(arg0), normalize(arg1), normalize(arg2)); + errorLogger.error(msg, normalize(arg0), normalize(arg1), normalize(arg2)); MDC.remove(TARGETENTITY); MDC.remove(TARGETSERVICENAME); } @@ -712,11 +713,11 @@ public class MsoLogger { * The exception info */ public void error(EELFResolvableErrorEnum msg, String arg0, String arg1, String arg2, String targetEntity, - String targetServiceName, ErrorCode errorCode, String errorDesc, Throwable t) { + String targetServiceName, ErrorCode errorCode, String errorDesc, Throwable t) { prepareErrorMsg(ERROR_LEVEL, targetEntity, targetServiceName, errorCode, errorDesc); - logger.error(msg, normalize(arg0), normalize(arg1), normalize(arg2)); - logger.error(exceptionArg, getNormalizedStackTrace(t)); - logger.debug("Exception raised", t); + errorLogger.error(msg, normalize(arg0), normalize(arg1), normalize(arg2)); + errorLogger.error(exceptionArg, getNormalizedStackTrace(t)); + debugLogger.debug("Exception raised", t); MDC.remove(TARGETENTITY); MDC.remove(TARGETSERVICENAME); } @@ -730,9 +731,9 @@ public class MsoLogger { * The arguments used in the log message */ public void error(EELFResolvableErrorEnum msg, String arg0, String arg1, String arg2, String arg3, - String targetEntity, String targetServiceName, ErrorCode errorCode, String errorDesc) { + String targetEntity, String targetServiceName, ErrorCode errorCode, String errorDesc) { prepareErrorMsg(ERROR_LEVEL, targetEntity, targetServiceName, errorCode, errorDesc); - logger.error(msg, normalize(arg0), normalize(arg1), normalize(arg2), normalize(arg3)); + errorLogger.error(msg, normalize(arg0), normalize(arg1), normalize(arg2), normalize(arg3)); MDC.remove(TARGETENTITY); MDC.remove(TARGETSERVICENAME); } @@ -748,11 +749,11 @@ public class MsoLogger { * The exception info */ public void error(EELFResolvableErrorEnum msg, String arg0, String arg1, String arg2, String arg3, - String targetEntity, String targetServiceName, ErrorCode errorCode, String errorDesc, Throwable t) { + String targetEntity, String targetServiceName, ErrorCode errorCode, String errorDesc, Throwable t) { prepareErrorMsg(ERROR_LEVEL, targetEntity, targetServiceName, errorCode, errorDesc); - logger.error(msg, normalize(arg0), normalize(arg1), normalize(arg2), normalize(arg3)); - logger.error(exceptionArg, getNormalizedStackTrace(t)); - logger.debug("Exception raised", t); + errorLogger.error(msg, normalize(arg0), normalize(arg1), normalize(arg2), normalize(arg3)); + errorLogger.error(exceptionArg, getNormalizedStackTrace(t)); + debugLogger.debug("Exception raised", t); MDC.remove(TARGETENTITY); MDC.remove(TARGETSERVICENAME); } @@ -766,9 +767,9 @@ public class MsoLogger { * The arguments used in the log message */ public void error(EELFResolvableErrorEnum msg, String arg0, String arg1, String arg2, String arg3, String arg4, - String targetEntity, String targetServiceName, ErrorCode errorCode, String errorDesc) { + String targetEntity, String targetServiceName, ErrorCode errorCode, String errorDesc) { prepareErrorMsg(ERROR_LEVEL, targetEntity, targetServiceName, errorCode, errorDesc); - logger.error(msg, normalize(arg0), normalize(arg1), normalize(arg2), normalize(arg3), normalize(arg4)); + errorLogger.error(msg, normalize(arg0), normalize(arg1), normalize(arg2), normalize(arg3), normalize(arg4)); MDC.remove(TARGETENTITY); MDC.remove(TARGETSERVICENAME); } @@ -784,17 +785,17 @@ public class MsoLogger { * The exception info */ public void error(EELFResolvableErrorEnum msg, String arg0, String arg1, String arg2, String arg3, String arg4, - String targetEntity, String targetServiceName, ErrorCode errorCode, String errorDesc, Throwable t) { + String targetEntity, String targetServiceName, ErrorCode errorCode, String errorDesc, Throwable t) { prepareErrorMsg(ERROR_LEVEL, targetEntity, targetServiceName, errorCode, errorDesc); - logger.error(msg, normalize(arg0), normalize(arg1), normalize(arg2), normalize(arg3), normalize(arg4)); - logger.error(exceptionArg, getNormalizedStackTrace(t)); - logger.debug("Exception raised", t); + errorLogger.error(msg, normalize(arg0), normalize(arg1), normalize(arg2), normalize(arg3), normalize(arg4)); + errorLogger.error(exceptionArg, getNormalizedStackTrace(t)); + debugLogger.debug("Exception raised", t); MDC.remove(TARGETENTITY); MDC.remove(TARGETSERVICENAME); } public boolean isDebugEnabled() { - return logger.isDebugEnabled(); + return debugLogger.isDebugEnabled(); } private void prepareMsg(String loggingLevel) { @@ -835,7 +836,7 @@ public class MsoLogger { } private void prepareAuditMetricMsg(long startTime, long endTime, StatusCode statusCode, int responseCode, - String responseDesc) { + String responseDesc) { Date startDate = new Date(startTime); Date endDate = new Date(endTime); DateFormat formatter = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSSXXX"); @@ -848,7 +849,7 @@ public class MsoLogger { } private void prepareErrorMsg(String loggingLevel, String targetEntity, String targetServiceName, - ErrorCode errorCode, String errorDesc) { + ErrorCode errorCode, String errorDesc) { MDC.put(ALERT_SEVERITY, getSeverityLevel(loggingLevel)); MDC.put(ERRORCODE, String.valueOf(errorCode)); MDC.put(ERRORDESC, errorDesc); @@ -858,7 +859,7 @@ public class MsoLogger { } private void prepareMetricMsg(long startTime, StatusCode statusCode, int responseCode, String responseDesc, - String targetEntity, String targetServiceName, String targetVEntity) { + String targetEntity, String targetServiceName, String targetVEntity) { long endTime = System.currentTimeMillis(); prepareMsg(INFO_LEVEL, null, String.valueOf(endTime - startTime)); prepareAuditMetricMsg(startTime, endTime, statusCode, responseCode, responseDesc); @@ -996,7 +997,7 @@ public class MsoLogger { /** * Set the requestId and serviceInstanceId - * + * * @param reqId * The requestId * @param svcId @@ -1014,7 +1015,7 @@ public class MsoLogger { /** * Set the remoteIp and the basic HTTP Authentication user - * + * * @param remoteIpp * The remote ip address * @param userp @@ -1031,7 +1032,7 @@ public class MsoLogger { /** * Set the serviceName - * + * * @param serviceNamep * The service name */ @@ -1044,7 +1045,7 @@ public class MsoLogger { /** * Get the serviceName - * + * * @return The service name */ public static String getServiceName() { @@ -1060,7 +1061,7 @@ public class MsoLogger { /** * Set the requestId and serviceInstanceId based on the mso request - * + * * @param msoRequest * The mso request */ diff --git a/mso-api-handlers/mso-api-handler-common/pom.xml b/mso-api-handlers/mso-api-handler-common/pom.xml index e11fbb8c25..9159bb2a52 100644 --- a/mso-api-handlers/mso-api-handler-common/pom.xml +++ b/mso-api-handlers/mso-api-handler-common/pom.xml @@ -3,13 +3,13 @@ <modelVersion>4.0.0</modelVersion> <parent> - <groupId>org.openecomp.mso</groupId> + <groupId>org.openecomp.so</groupId> <artifactId>mso-api-handlers</artifactId> <version>1.1.0-SNAPSHOT</version> </parent> - <groupId>org.openecomp.mso</groupId> + <groupId>org.openecomp.so</groupId> <artifactId>mso-api-handler-common</artifactId> <name>ECOMP MSO API Handler common</name> @@ -73,13 +73,13 @@ </dependency> <dependency> - <groupId>org.openecomp.mso</groupId> + <groupId>org.openecomp.so</groupId> <artifactId>mso-catalog-db</artifactId> <version>${project.version}</version> </dependency> <dependency> - <groupId>org.openecomp.mso</groupId> + <groupId>org.openecomp.so</groupId> <artifactId>mso-requests-db</artifactId> <version>${project.version}</version> </dependency> diff --git a/mso-api-handlers/mso-api-handler-infra/pom.xml b/mso-api-handlers/mso-api-handler-infra/pom.xml index 71f8249546..2eb5f1e185 100644 --- a/mso-api-handlers/mso-api-handler-infra/pom.xml +++ b/mso-api-handlers/mso-api-handler-infra/pom.xml @@ -3,7 +3,7 @@ <modelVersion>4.0.0</modelVersion> <parent> - <groupId>org.openecomp.mso</groupId> + <groupId>org.openecomp.so</groupId> <artifactId>mso-api-handlers</artifactId> <version>1.1.0-SNAPSHOT</version> </parent> @@ -68,22 +68,22 @@ </dependency> <dependency> - <groupId>org.openecomp.mso</groupId> + <groupId>org.openecomp.so</groupId> <artifactId>mso-catalog-db</artifactId> <version>${project.version}</version> </dependency> <dependency> - <groupId>org.openecomp.mso</groupId> + <groupId>org.openecomp.so</groupId> <artifactId>mso-requests-db</artifactId> <version>${project.version}</version> </dependency> <dependency> - <groupId>org.openecomp.mso</groupId> + <groupId>org.openecomp.so</groupId> <artifactId>mso-api-handler-common</artifactId> <version>${project.version}</version> </dependency> <dependency> - <groupId>org.openecomp.mso</groupId> + <groupId>org.openecomp.so</groupId> <artifactId>status-control</artifactId> <version>${project.version}</version> </dependency> diff --git a/mso-api-handlers/mso-api-handler-infra/src/main/java/org/openecomp/mso/apihandlerinfra/ServiceInstances.java b/mso-api-handlers/mso-api-handler-infra/src/main/java/org/openecomp/mso/apihandlerinfra/ServiceInstances.java index 317859da0e..988d72cc6f 100644 --- a/mso-api-handlers/mso-api-handler-infra/src/main/java/org/openecomp/mso/apihandlerinfra/ServiceInstances.java +++ b/mso-api-handlers/mso-api-handler-infra/src/main/java/org/openecomp/mso/apihandlerinfra/ServiceInstances.java @@ -654,33 +654,35 @@ public class ServiceInstances { // SERVICE REQUEST // Construct the default service name // TODO need to make this a configurable property - String defaultServiceName = msoRequest.getRequestInfo().getSource() + "_DEFAULT"; + String sourceDefaultServiceName = msoRequest.getRequestInfo().getSource() + "_DEFAULT"; + String defaultService = "*"; Service serviceRecord = null; - if(msoRequest.getALaCarteFlag()){ - serviceRecord = db.getServiceByName(defaultServiceName); - }else{ - serviceRecord = db.getServiceByVersionAndInvariantId(msoRequest.getModelInfo().getModelInvariantId(), msoRequest.getModelInfo().getModelVersion()); - } int serviceId; ServiceRecipe recipe = null; - if(serviceRecord !=null){ - serviceId = serviceRecord.getId(); - recipe = db.getServiceRecipe(serviceId, action.name()); + + //if an aLaCarte flag was Not sent in the request, look first if there is a custom recipe for the specific model version + if(!msoRequest.getALaCarteFlag()){ + serviceRecord = db.getServiceByVersionAndInvariantId(msoRequest.getModelInfo().getModelInvariantId(), msoRequest.getModelInfo().getModelVersion()); + if(serviceRecord !=null){ + serviceId = serviceRecord.getId(); + recipe = db.getServiceRecipe(serviceId, action.name()); + } } - //if an aLaCarte flag was sent in the request, throw an error if the recipe was not found - RequestParameters reqParam = msoRequest.getServiceInstancesRequest().getRequestDetails().getRequestParameters(); - if(reqParam!=null && reqParam.isALaCarteSet() && recipe==null){ - return null; - }else if (recipe == null) { //aLaCarte wasn't sent, so we'll try the default - serviceRecord = db.getServiceByName(defaultServiceName); - serviceId = serviceRecord.getId(); - recipe = db.getServiceRecipe(serviceId, action.name()); + + if (recipe == null) { + //find source(initiator) default recipe + recipe = db.getServiceRecipeByServiceNameAndAction(sourceDefaultServiceName, action.name()); + } + if (recipe == null) { + //find default recipe + recipe = db.getServiceRecipeByServiceNameAndAction(defaultService, action.name()); } if(recipe==null){ return null; } return new RecipeLookupResult (recipe.getOrchestrationUri (), recipe.getRecipeTimeout ()); + } @@ -787,27 +789,38 @@ public class ServiceInstances { private RecipeLookupResult getNetworkUri (CatalogDatabase db, MsoRequest msoRequest, Action action) throws Exception { - String defaultNetworkType = msoRequest.getRequestInfo().getSource() + "_DEFAULT"; + String sourceDefaultNetworkType = msoRequest.getRequestInfo().getSource() + "_DEFAULT"; + String defaultNetworkType = "*"; String modelName = msoRequest.getModelInfo().getModelName(); Recipe recipe = null; - if(msoRequest.getALaCarteFlag()){ - recipe = db.getNetworkRecipe(defaultNetworkType, action.name()); - }else{ + //if an aLaCarte flag was Not sent in the request, look first if there is a custom recipe for the specific ModelCustomizationId + if(!msoRequest.getALaCarteFlag()){ + String networkType = null; + if(msoRequest.getModelInfo().getModelCustomizationId()!=null){ NetworkResource networkResource = db.getNetworkResourceByModelCustUuid(msoRequest.getModelInfo().getModelCustomizationId()); if(networkResource!=null){ - recipe = db.getNetworkRecipe(networkResource.getNetworkType(), action.name()); + networkType = networkResource.getNetworkType(); }else{ throw new ValidationException("no catalog entry found"); } }else{ //ok for version < 3 - recipe = db.getNetworkRecipe(modelName, action.name()); - } - if(recipe == null){ - recipe = db.getNetworkRecipe(defaultNetworkType, action.name()); + networkType = modelName; } + + //find latest version Recipe for the given networkType and action + recipe = db.getNetworkRecipe(networkType, action.name()); + } + + if(recipe == null){ + //find source(initiator) default recipe + recipe = db.getNetworkRecipe(sourceDefaultNetworkType, action.name()); + } + if(recipe == null){ + //find default recipe + recipe = db.getNetworkRecipe(defaultNetworkType, action.name()); } if (recipe == null) { return null; diff --git a/mso-api-handlers/mso-requests-db/pom.xml b/mso-api-handlers/mso-requests-db/pom.xml index 63520f065b..ef69e8acb1 100644 --- a/mso-api-handlers/mso-requests-db/pom.xml +++ b/mso-api-handlers/mso-requests-db/pom.xml @@ -3,7 +3,7 @@ <modelVersion>4.0.0</modelVersion> <parent> - <groupId>org.openecomp.mso</groupId> + <groupId>org.openecomp.so</groupId> <artifactId>mso-api-handlers</artifactId> <version>1.1.0-SNAPSHOT</version> </parent> @@ -97,7 +97,7 @@ <version>1.0.0.Final</version> </dependency> <dependency> - <groupId>org.openecomp.mso</groupId> + <groupId>org.openecomp.so</groupId> <artifactId>common</artifactId> <version>${project.version}</version> </dependency> diff --git a/mso-api-handlers/pom.xml b/mso-api-handlers/pom.xml index 23f7be3f04..c4e1bbb50d 100644 --- a/mso-api-handlers/pom.xml +++ b/mso-api-handlers/pom.xml @@ -2,12 +2,12 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"> <modelVersion>4.0.0</modelVersion> <parent> - <groupId>org.openecomp</groupId> - <artifactId>mso</artifactId> + <groupId>org.openecomp.so</groupId> + <artifactId>so</artifactId> <version>1.1.0-SNAPSHOT</version> </parent> - <groupId>org.openecomp.mso</groupId> + <groupId>org.openecomp.so</groupId> <artifactId>mso-api-handlers</artifactId> <name>API Handler</name> <description>API Handler for MSO</description> @@ -35,4 +35,4 @@ <artifactId>httpclient</artifactId> </dependency> </dependencies> -</project>
\ No newline at end of file +</project> diff --git a/mso-catalog-db/pom.xml b/mso-catalog-db/pom.xml index 38af3ecf7e..7051ab7071 100644 --- a/mso-catalog-db/pom.xml +++ b/mso-catalog-db/pom.xml @@ -2,12 +2,12 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"> <modelVersion>4.0.0</modelVersion> <parent> - <groupId>org.openecomp</groupId> - <artifactId>mso</artifactId> + <groupId>org.openecomp.so</groupId> + <artifactId>so</artifactId> <version>1.1.0-SNAPSHOT</version> </parent> - <groupId>org.openecomp.mso</groupId> + <groupId>org.openecomp.so</groupId> <artifactId>mso-catalog-db</artifactId> <name>mso-catalog-db</name> <description>MSO Catalog Database definition and Hibernate objects</description> @@ -164,7 +164,7 @@ <scope>test</scope> </dependency> <dependency> - <groupId>org.openecomp.mso</groupId> + <groupId>org.openecomp.so</groupId> <artifactId>common</artifactId> <version>${project.version}</version> </dependency> diff --git a/mso-catalog-db/src/main/java/org/openecomp/mso/db/catalog/CatalogDatabase.java b/mso-catalog-db/src/main/java/org/openecomp/mso/db/catalog/CatalogDatabase.java index fcdaff7395..7ddaedcd30 100644 --- a/mso-catalog-db/src/main/java/org/openecomp/mso/db/catalog/CatalogDatabase.java +++ b/mso-catalog-db/src/main/java/org/openecomp/mso/db/catalog/CatalogDatabase.java @@ -514,6 +514,19 @@ public class CatalogDatabase implements Closeable { return resultList.get (0); } + /** + * @param serviceName + * @param action + * @return ServiceRecipe object or null if none found. returns a newest version of Service recipe that matches a given serviceName, action and for the newest service version + */ + public ServiceRecipe getServiceRecipeByServiceNameAndAction(String serviceName, String action) { + Service service = getServiceByName(serviceName); + if (service != null ){ + return getServiceRecipe(service.getId(),action); + } + return null; + } + public List<ServiceRecipe> getServiceRecipes (int serviceId) { StringBuilder hql = null; diff --git a/packages/arquillian-unit-tests/pom.xml b/packages/arquillian-unit-tests/pom.xml index 9ec7a0aacf..a557f56f46 100644 --- a/packages/arquillian-unit-tests/pom.xml +++ b/packages/arquillian-unit-tests/pom.xml @@ -2,12 +2,12 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"> <modelVersion>4.0.0</modelVersion> <parent> - <groupId>org.openecomp.mso</groupId> + <groupId>org.openecomp.so</groupId> <artifactId>packages</artifactId> <version>1.1.0-SNAPSHOT</version> </parent> - <groupId>org.openecomp.mso.packages</groupId> + <groupId>org.openecomp.so.packages</groupId> <name>Arquillian Unit Testing on MSO</name> <artifactId>arquillian-unit-tests</artifactId> @@ -225,7 +225,7 @@ </dependency> <dependency> - <groupId>org.openecomp.mso</groupId> + <groupId>org.openecomp.so</groupId> <artifactId>asdc-controller</artifactId> <version>${project.version}</version> <scope>test</scope> @@ -233,7 +233,7 @@ </dependency> <!-- <dependency> - <groupId>org.openecomp.mso.adapters</groupId> + <groupId>org.openecomp.so.adapters</groupId> <artifactId>mso-appc-adapter</artifactId> <version>${project.version}</version> <scope>test</scope> @@ -255,7 +255,7 @@ </dependency> <dependency> - <groupId>org.openecomp.mso</groupId> + <groupId>org.openecomp.so</groupId> <artifactId>mso-catalog-db</artifactId> <version>${project.version}</version> <scope>test</scope> diff --git a/packages/deliveries/pom.xml b/packages/deliveries/pom.xml index a67e0a36c0..e083a28656 100644 --- a/packages/deliveries/pom.xml +++ b/packages/deliveries/pom.xml @@ -2,12 +2,12 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"> <modelVersion>4.0.0</modelVersion> <parent> - <groupId>org.openecomp.mso</groupId> + <groupId>org.openecomp.so</groupId> <artifactId>packages</artifactId> <version>1.1.0-SNAPSHOT</version> </parent> - <groupId>org.openecomp.mso.packages</groupId> + <groupId>org.openecomp.so.packages</groupId> <artifactId>mso-deliveries</artifactId> <packaging>pom</packaging> diff --git a/packages/docker/pom.xml b/packages/docker/pom.xml index 5f271a9fac..c37d68a5ed 100644 --- a/packages/docker/pom.xml +++ b/packages/docker/pom.xml @@ -3,13 +3,13 @@ <modelVersion>4.0.0</modelVersion> <parent> - <groupId>org.openecomp.mso</groupId> + <groupId>org.openecomp.so</groupId> <artifactId>packages</artifactId> <version>1.1.0-SNAPSHOT</version> </parent> <packaging>pom</packaging> - <groupId>org.openecomp.mso</groupId> + <groupId>org.openecomp.so</groupId> <artifactId>docker</artifactId> <name>MSO Docker Deliveries</name> @@ -22,8 +22,8 @@ i.e: mvn clean install -P docker -Dmso.git.url=https://gerrit.openecomp.org/r--> <mso.chef.git.url.prefix>${env.GIT_NO_PROJECT}</mso.chef.git.url.prefix> <mso.chef.git.branchname>master</mso.chef.git.branchname> - <mso.chef.git.url.suffix.chef.repo>mso/chef-repo</mso.chef.git.url.suffix.chef.repo> - <mso.chef.git.url.suffix.chef.config>mso/mso-config</mso.chef.git.url.suffix.chef.config> + <mso.chef.git.url.suffix.chef.repo>so/chef-repo</mso.chef.git.url.suffix.chef.repo> + <mso.chef.git.url.suffix.chef.config>so/so-config</mso.chef.git.url.suffix.chef.config> <mso.project.version>${project.version}</mso.project.version> </properties> diff --git a/packages/docker/src/main/docker/docker-files/Dockerfile.aria b/packages/docker/src/main/docker/docker-files/Dockerfile.aria new file mode 100644 index 0000000000..43371b5fa1 --- /dev/null +++ b/packages/docker/src/main/docker/docker-files/Dockerfile.aria @@ -0,0 +1,29 @@ +# +# ============LICENSE_START=================================================== +# Copyright (c) 2017 Cloudify.co. All rights reserved. +# =================================================================== +# Licensed under the Apache License, Version 2.0 (the "License"); you may not +# use this file except in compliance with the License. You may obtain a copy +# of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations under +# the License. +# ============LICENSE_END==================================================== +# + +FROM httpd:alpine + +RUN apk update +RUN apk add python2 py2-pip gcc python2-dev linux-headers musl-dev git curl + +RUN pip install apache-ariatosca==0.1.0 jinja2==2.8 +RUN curl -sL http://github.com/dfilppi/aria-rest/archive/master.tar.gz|tar xzf - +WORKDIR aria-rest-master +RUN pip install . + +CMD aria-rest diff --git a/packages/docker/src/main/docker/docker-files/Dockerfile.jacoco b/packages/docker/src/main/docker/docker-files/Dockerfile.jacoco index 744916258d..cd70b373f7 100644 --- a/packages/docker/src/main/docker/docker-files/Dockerfile.jacoco +++ b/packages/docker/src/main/docker/docker-files/Dockerfile.jacoco @@ -2,7 +2,7 @@ FROM ubuntu:14.04 ### File Author / Maintainer -MAINTAINER "The OpenECOMP Team" +MAINTAINER "The ONAP Team" LABEL Description="This image is used to get jacoco result from a jboss image" Version="1.0" ARG http_proxy diff --git a/packages/docker/src/main/docker/docker-files/Dockerfile.mso-arquillian b/packages/docker/src/main/docker/docker-files/Dockerfile.mso-arquillian index b07c8c9472..c279597d34 100644 --- a/packages/docker/src/main/docker/docker-files/Dockerfile.mso-arquillian +++ b/packages/docker/src/main/docker/docker-files/Dockerfile.mso-arquillian @@ -2,8 +2,8 @@ FROM openecomp/wildfly:1.0 ### File Author / Maintainer -MAINTAINER "The OpenECOMP Team" -LABEL Description="This image contains the OpenECOMP MSO base for arquillian" Version="1.0" +MAINTAINER "The ONAP Team" +LABEL Description="This image contains the ONAP SO base for arquillian" Version="1.0" ENV CHEF_REPO_NAME="chef-repo" ENV CHEF_CONFIG_NAME="mso-config" diff --git a/packages/docker/src/main/docker/docker-files/Dockerfile.mso-chef-final b/packages/docker/src/main/docker/docker-files/Dockerfile.mso-chef-final index ff7f44bcfd..16d062ec82 100644 --- a/packages/docker/src/main/docker/docker-files/Dockerfile.mso-chef-final +++ b/packages/docker/src/main/docker/docker-files/Dockerfile.mso-chef-final @@ -2,8 +2,8 @@ FROM openecomp/wildfly:1.0 ### File Author / Maintainer -MAINTAINER "The OpenECOMP Team" -LABEL Description="This image contains the OpenECOMP MSO" Version="1.0" +MAINTAINER "The ONAP Team" +LABEL Description="This image contains the ONAP SO" Version="1.0" ARG http_proxy ARG https_proxy diff --git a/packages/docker/src/main/docker/docker-files/Dockerfile.ubuntu-16.04-update b/packages/docker/src/main/docker/docker-files/Dockerfile.ubuntu-16.04-update index 2e8f268f19..8ed710745a 100644 --- a/packages/docker/src/main/docker/docker-files/Dockerfile.ubuntu-16.04-update +++ b/packages/docker/src/main/docker/docker-files/Dockerfile.ubuntu-16.04-update @@ -1,7 +1,7 @@ FROM ubuntu:16.04 ### File Author / Maintainer -MAINTAINER "The OpenECOMP Team" +MAINTAINER "The ONAP Team" LABEL Description="This image contains an updated version of ubuntu 16.04" Version="1.0" ARG http_proxy diff --git a/packages/docker/src/main/docker/docker-files/Dockerfile.wildfly-10 b/packages/docker/src/main/docker/docker-files/Dockerfile.wildfly-10 index fff753217d..0e766e2658 100644 --- a/packages/docker/src/main/docker/docker-files/Dockerfile.wildfly-10 +++ b/packages/docker/src/main/docker/docker-files/Dockerfile.wildfly-10 @@ -1,8 +1,8 @@ FROM openecomp/ubuntu-update:1.0 ### File Author / Maintainer -MAINTAINER "The OpenECOMP Team" -LABEL Description="This image contains OpenECOMP MSO ubuntu base" Version="1.0" +MAINTAINER "The ONAP Team" +LABEL Description="This image contains ONAP SO ubuntu base" Version="1.0" ARG http_proxy ARG https_proxy diff --git a/packages/pom.xml b/packages/pom.xml index 92d8c3e6c2..e632f82d62 100644 --- a/packages/pom.xml +++ b/packages/pom.xml @@ -2,12 +2,12 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
- <artifactId>mso</artifactId>
- <groupId>org.openecomp</groupId>
+ <groupId>org.openecomp.so</groupId>
+ <artifactId>so</artifactId>
<version>1.1.0-SNAPSHOT</version>
</parent>
- <groupId>org.openecomp.mso</groupId>
+ <groupId>org.openecomp.so</groupId>
<artifactId>packages</artifactId>
<packaging>pom</packaging>
<name>MSO Packages</name>
diff --git a/packages/root-pack-extras/config-resources/mariadb/db-sql-scripts/bulkload-files/default/create_mso_db-default.sql b/packages/root-pack-extras/config-resources/mariadb/db-sql-scripts/bulkload-files/default/create_mso_db-default.sql index 2165041f5c..34deb08782 100644 --- a/packages/root-pack-extras/config-resources/mariadb/db-sql-scripts/bulkload-files/default/create_mso_db-default.sql +++ b/packages/root-pack-extras/config-resources/mariadb/db-sql-scripts/bulkload-files/default/create_mso_db-default.sql @@ -84,8 +84,11 @@ INSERT INTO `VNF_COMPONENTS_RECIPE` UNLOCK TABLES; INSERT INTO service (id, SERVICE_NAME, VERSION_STR, DESCRIPTION, SERVICE_NAME_VERSION_ID) VALUES ('4', 'VID_DEFAULT', '1.0', 'Default service for VID to use for infra APIH orchestration', 'MANUAL_RECORD'); +INSERT INTO service (id, SERVICE_NAME, VERSION_STR, DESCRIPTION, SERVICE_NAME_VERSION_ID) VALUES ('5', '*', '1.0', 'Default service to use for infra APIH orchestration', 'MANUAL_RECORD'); INSERT INTO service_recipe (SERVICE_ID, ACTION, VERSION_STR, DESCRIPTION, ORCHESTRATION_URI, RECIPE_TIMEOUT) VALUES ('4', 'createInstance', '1', 'VID_DEFAULT recipe to create service-instance if no custom BPMN flow is found', '/mso/async/services/CreateGenericALaCarteServiceInstance', '180'); INSERT INTO service_recipe (SERVICE_ID, ACTION, VERSION_STR, DESCRIPTION, ORCHESTRATION_URI, RECIPE_TIMEOUT) VALUES ('4', 'deleteInstance', '1', 'VID_DEFAULT recipe to delete service-instance if no custom BPMN flow is found', '/mso/async/services/DeleteGenericALaCarteServiceInstance', '180'); +INSERT INTO service_recipe (SERVICE_ID, ACTION, VERSION_STR, DESCRIPTION, ORCHESTRATION_URI, RECIPE_TIMEOUT) VALUES ('5', 'createInstance', '1', 'DEFAULT recipe to create service-instance if no custom BPMN flow is found', '/mso/async/services/CreateGenericALaCarteServiceInstance', '180'); +INSERT INTO service_recipe (SERVICE_ID, ACTION, VERSION_STR, DESCRIPTION, ORCHESTRATION_URI, RECIPE_TIMEOUT) VALUES ('5', 'deleteInstance', '1', 'DEFAULT recipe to delete service-instance if no custom BPMN flow is found', '/mso/async/services/DeleteGenericALaCarteServiceInstance', '180'); INSERT INTO vnf_recipe (VNF_TYPE, ACTION, VERSION_STR, DESCRIPTION, ORCHESTRATION_URI, RECIPE_TIMEOUT) VALUES ('VID_DEFAULT', 'createInstance', '1', 'VID_DEFAULT recipe to create VNF if no custom BPMN flow is found', '/mso/async/services/CreateVnfInfra', '180'); INSERT INTO vnf_recipe (VNF_TYPE, ACTION, VERSION_STR, DESCRIPTION, ORCHESTRATION_URI, RECIPE_TIMEOUT) VALUES ('VID_DEFAULT', 'deleteInstance', '1', 'VID_DEFAULT recipe to delete VNF if no custom BPMN flow is found', '/mso/async/services/DeleteVnfInfra', '180'); INSERT INTO vnf_components_recipe (VNF_TYPE, VNF_COMPONENT_TYPE, ACTION, VERSION, DESCRIPTION, ORCHESTRATION_URI, RECIPE_TIMEOUT, VF_MODULE_ID) VALUES (NULL, 'volumeGroup', 'createInstance', '1', 'VID_DEFAULT recipe to create volume-group if no custom BPMN flow is found', '/mso/async/services/CreateVfModuleVolumeInfraV1', '180', 'VID_DEFAULT'); @@ -1,8 +1,8 @@ <project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"> <modelVersion>4.0.0</modelVersion> - <groupId>org.openecomp</groupId> - <artifactId>mso</artifactId> + <groupId>org.openecomp.so</groupId> + <artifactId>so</artifactId> <packaging>pom</packaging> <version>1.1.0-SNAPSHOT</version> <name>MSO main project</name> @@ -38,13 +38,13 @@ <org.apache.maven.user-settings></org.apache.maven.user-settings> <!-- this is used for Chef mso-code cookbook --> <swm.version>2.19.3-1</swm.version> - <openstack.version>1.1.0</openstack.version> + <openstack.version>1.1.0-SNAPSHOT</openstack.version> <evosuiteVersion>1.0.4-alpha2</evosuiteVersion> <nexusproxy>https://nexus.onap.org</nexusproxy> <snapshotNexusPath>/content/repositories/snapshots/</snapshotNexusPath> <releaseNexusPath>/content/repositories/releases/</releaseNexusPath> <stagingNexusPath>/content/repositories/staging/</stagingNexusPath> - <sitePath>/content/sites/site/org/onap/mso/${project.version}</sitePath> + <sitePath>/content/sites/site/org/onap/so/${project.version}</sitePath> <maven.build.timestamp.format>yyyyMMdd'T'HHmm</maven.build.timestamp.format> </properties> <distributionManagement> diff --git a/status-control/pom.xml b/status-control/pom.xml index 212d5ee2f6..5a507929e7 100644 --- a/status-control/pom.xml +++ b/status-control/pom.xml @@ -4,12 +4,12 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
- <groupId>org.openecomp</groupId>
- <artifactId>mso</artifactId>
+ <groupId>org.openecomp.so</groupId>
+ <artifactId>so</artifactId>
<version>1.1.0-SNAPSHOT</version> </parent>
- <groupId>org.openecomp.mso</groupId>
+ <groupId>org.openecomp.so</groupId>
<artifactId>status-control</artifactId>
<name>MSO Status Control module</name>
<description>Contains classes to update and query the MSO status per site</description>
@@ -22,17 +22,17 @@ <scope>test</scope>
</dependency>
<dependency>
- <groupId>org.openecomp.mso</groupId>
+ <groupId>org.openecomp.so</groupId>
<artifactId>common</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
- <groupId>org.openecomp.mso</groupId>
+ <groupId>org.openecomp.so</groupId>
<artifactId>mso-catalog-db</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
- <groupId>org.openecomp.mso</groupId>
+ <groupId>org.openecomp.so</groupId>
<artifactId>mso-requests-db</artifactId>
<version>${project.version}</version>
</dependency>
@@ -70,4 +70,4 @@ </dependency>
</dependencies>
-</project>
\ No newline at end of file +</project> |