diff options
Diffstat (limited to 'src/main')
158 files changed, 22735 insertions, 0 deletions
diff --git a/src/main/assembly/descriptor.xml b/src/main/assembly/descriptor.xml new file mode 100644 index 0000000..91e8e18 --- /dev/null +++ b/src/main/assembly/descriptor.xml @@ -0,0 +1,32 @@ +<assembly xmlns="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.2"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.2 http://maven.apache.org/xsd/assembly-1.1.2.xsd">
+ <id>build</id>
+ <includeBaseDirectory>false</includeBaseDirectory>
+ <formats>
+ <format>dir</format>
+ </formats>
+ <fileSets>
+ <fileSet>
+ <directory>${project.basedir}/src/main/resources</directory>
+ <outputDirectory>/resources</outputDirectory>
+ <includes>
+ <include>**/*</include>
+ </includes>
+ </fileSet>
+ <fileSet>
+ <directory>${project.basedir}/src/main/scripts</directory>
+ <outputDirectory>/bin</outputDirectory>
+ <includes>
+ <include>**/*</include>
+ </includes>
+ </fileSet>
+ <fileSet>
+ <directory>${project.build.directory}</directory>
+ <outputDirectory>/lib</outputDirectory>
+ <includes>
+ <include>${project.artifactId}-${project.version}.jar</include>
+ </includes>
+ </fileSet>
+ </fileSets>
+</assembly>
diff --git a/src/main/docker/Dockerfile b/src/main/docker/Dockerfile new file mode 100755 index 0000000..f1454e3 --- /dev/null +++ b/src/main/docker/Dockerfile @@ -0,0 +1,22 @@ +FROM aaionap/aai-common:1.3.0 + +# Add the proper files into the docker image from your build +WORKDIR /opt/app/aai-graphadmin + +# Expose the ports for outside linux to use +# 8447 is the important one to be used +EXPOSE 8449 + +HEALTHCHECK --interval=40s --timeout=10s --retries=3 CMD nc -z -v localhost 8449 || exit 1 + +ENTRYPOINT ["/bin/bash", "/opt/app/aai-graphadmin/docker-entrypoint.sh"] + +RUN mkdir -p /opt/aaihome/aaiadmin /opt/aai/logroot/AAI-GA + +VOLUME /opt/aai/logroot/AAI-GA +VOLUME /opt/data +VOLUME /opt/tools + +COPY /maven/aai-graphadmin/ . + +ENV AAI_BUILD_VERSION @aai.docker.version@ diff --git a/src/main/docker/aai.sh b/src/main/docker/aai.sh new file mode 100644 index 0000000..f68dc21 --- /dev/null +++ b/src/main/docker/aai.sh @@ -0,0 +1,44 @@ +# +# ============LICENSE_START======================================================= +# org.onap.aai +# ================================================================================ +# Copyright © 2017 AT&T Intellectual Property. All rights reserved. +# ================================================================================ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============LICENSE_END========================================================= +# +# ECOMP is a trademark and service mark of AT&T Intellectual Property. +# + +PROJECT_HOME=/opt/app/aai-graphadmin +export PROJECT_HOME + +JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64 +export JAVA_HOME + +AAIENV=dev +export AAIENV + +PATH=/usr/lib/jvm/java-8-openjdk-amd64:$PATH + +PROJECT_OWNER=aaiadmin +PROJECT_GROUP=aaiadmin +PROJECT_UNIXHOMEROOT=/opt/aaihome +export PROJECT_OWNER PROJECT_GROUP PROJECT_UNIXHOMEROOT +umask 0022 + +export idns_api_url= +export idnscred= +export idnstenant= + + diff --git a/src/main/docker/docker-entrypoint.sh b/src/main/docker/docker-entrypoint.sh new file mode 100644 index 0000000..2f90ce5 --- /dev/null +++ b/src/main/docker/docker-entrypoint.sh @@ -0,0 +1,134 @@ +### +# ============LICENSE_START======================================================= +# org.onap.aai +# ================================================================================ +# Copyright (C) 2017 AT&T Intellectual Property. All rights reserved. +# ================================================================================ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============LICENSE_END========================================================= +### + +APP_HOME=$(pwd); +RESOURCES_HOME=${APP_HOME}/resources/; + +export SERVER_PORT=${SERVER_PORT:-8449}; + +USER_ID=${LOCAL_USER_ID:-9001} +GROUP_ID=${LOCAL_GROUP_ID:-9001} + +echo "Project Build Version: ${AAI_BUILD_VERSION}"; + +if [ $(cat /etc/passwd | grep aaiadmin | wc -l) -eq 0 ]; then + + groupadd aaiadmin -g ${GROUP_ID} || { + echo "Unable to create the group id for ${GROUP_ID}"; + exit 1; + } + useradd --shell=/bin/bash -u ${USER_ID} -g ${GROUP_ID} -o -c "" -m aaiadmin || { + echo "Unable to create the user id for ${USER_ID}"; + exit 1; + } +fi; + +chown -R aaiadmin:aaiadmin /opt/app /opt/aai/logroot /var/chef +find /opt/app/ -name "*.sh" -exec chmod +x {} + + +if [ -f ${APP_HOME}/aai.sh ]; then + gosu aaiadmin ln -s bin scripts + gosu aaiadmin ln -s /opt/aai/logroot/AAI-GA logs + + mv ${APP_HOME}/aai.sh /etc/profile.d/aai.sh + chmod 755 /etc/profile.d/aai.sh + + scriptName=$1; + + if [ ! -z $scriptName ]; then + + if [ -f ${APP_HOME}/bin/${scriptName} ]; then + shift 1; + gosu aaiadmin ${APP_HOME}/bin/${scriptName} "$@" || { + echo "Failed to run the ${scriptName}"; + exit 1; + } + else + echo "Unable to find the script ${scriptName} in ${APP_HOME}/bin"; + exit 1; + fi; + + exit 0; + fi; + +fi; + +mkdir -p /opt/app/aai-graphadmin/logs/gc +chown -R aaiadmin:aaiadmin /opt/app/aai-graphadmin/logs/gc + +if [ -f ${APP_HOME}/resources/aai-graphadmin-swm-vars.sh ]; then + source ${APP_HOME}/resources/aai-graphadmin-swm-vars.sh; +fi; + +MIN_HEAP_SIZE=${MIN_HEAP_SIZE:-512m}; +MAX_HEAP_SIZE=${MAX_HEAP_SIZE:-1024m}; +MAX_PERM_SIZE=${MAX_PERM_SIZE:-512m}; +PERM_SIZE=${PERM_SIZE:-512m}; + +JAVA_CMD="exec gosu aaiadmin java"; + +JVM_OPTS="${PRE_JVM_ARGS} -Xloggc:/opt/app/aai-graphadmin/logs/gc/aai_gc.log"; +JVM_OPTS="${JVM_OPTS} -XX:HeapDumpPath=/opt/app/aai-graphadmin/logs/ajsc-jetty/heap-dump"; +JVM_OPTS="${JVM_OPTS} -Xms${MIN_HEAP_SIZE}"; +JVM_OPTS="${JVM_OPTS} -Xmx${MAX_HEAP_SIZE}"; + +JVM_OPTS="${JVM_OPTS} -XX:+PrintGCDetails"; +JVM_OPTS="${JVM_OPTS} -XX:+PrintGCTimeStamps"; +JVM_OPTS="${JVM_OPTS} -XX:MaxPermSize=${MAX_PERM_SIZE}"; +JVM_OPTS="${JVM_OPTS} -XX:PermSize=${PERM_SIZE}"; + +JVM_OPTS="${JVM_OPTS} -server"; +JVM_OPTS="${JVM_OPTS} -XX:NewSize=512m"; +JVM_OPTS="${JVM_OPTS} -XX:MaxNewSize=512m"; +JVM_OPTS="${JVM_OPTS} -XX:SurvivorRatio=8"; +JVM_OPTS="${JVM_OPTS} -XX:+DisableExplicitGC"; +JVM_OPTS="${JVM_OPTS} -verbose:gc"; +JVM_OPTS="${JVM_OPTS} -XX:+UseParNewGC"; +JVM_OPTS="${JVM_OPTS} -XX:+CMSParallelRemarkEnabled"; +JVM_OPTS="${JVM_OPTS} -XX:+CMSClassUnloadingEnabled"; +JVM_OPTS="${JVM_OPTS} -XX:+UseConcMarkSweepGC"; +JVM_OPTS="${JVM_OPTS} -XX:-UseBiasedLocking"; +JVM_OPTS="${JVM_OPTS} -XX:ParallelGCThreads=4"; +JVM_OPTS="${JVM_OPTS} -XX:LargePageSizeInBytes=128m"; +JVM_OPTS="${JVM_OPTS} -XX:+PrintGCDetails"; +JVM_OPTS="${JVM_OPTS} -XX:+PrintGCTimeStamps"; +JVM_OPTS="${JVM_OPTS} -Dsun.net.inetaddr.ttl=180"; +JVM_OPTS="${JVM_OPTS} -XX:+HeapDumpOnOutOfMemoryError"; +JVM_OPTS="${JVM_OPTS} ${POST_JVM_ARGS}"; +JAVA_OPTS="${PRE_JAVA_OPTS} -DAJSC_HOME=$APP_HOME"; +if [ -f ${INTROSCOPE_LIB}/Agent.jar ] && [ -f ${INTROSCOPE_AGENTPROFILE} ]; then + JAVA_OPTS="${JAVA_OPTS} -javaagent:${INTROSCOPE_LIB}/Agent.jar -noverify -Dcom.wily.introscope.agentProfile=${INTROSCOPE_AGENTPROFILE} -Dintroscope.agent.agentName=graphadmin" +fi +JAVA_OPTS="${JAVA_OPTS} -Dserver.port=${SERVER_PORT}"; +JAVA_OPTS="${JAVA_OPTS} -DBUNDLECONFIG_DIR=./resources"; +JAVA_OPTS="${JAVA_OPTS} -Dserver.local.startpath=${RESOURCES_HOME}"; +JAVA_OPTS="${JAVA_OPTS} -DAAI_CHEF_ENV=${AAI_CHEF_ENV}"; +JAVA_OPTS="${JAVA_OPTS} -DSCLD_ENV=${SCLD_ENV}"; +JAVA_OPTS="${JAVA_OPTS} -DAFT_ENVIRONMENT=${AFT_ENVIRONMENT}"; +JAVA_OPTS="${JAVA_OPTS} -DlrmName=com.att.ajsc.aai-graphadmin"; +JAVA_OPTS="${JAVA_OPTS} -DAAI_BUILD_VERSION=${AAI_BUILD_VERSION}"; +JAVA_OPTS="${JAVA_OPTS} -Djava.security.egd=file:/dev/./urandom"; +JAVA_OPTS="${JAVA_OPTS} -Dlogback.configurationFile=./resources/logback.xml"; +JAVA_OPTS="${JAVA_OPTS} -Dloader.path=$APP_HOME/resources"; +JAVA_OPTS="${JAVA_OPTS} ${POST_JAVA_OPTS}"; + +JAVA_MAIN_JAR=$(ls lib/aai-graphadmin*.jar); + +${JAVA_CMD} ${JVM_OPTS} ${JAVA_OPTS} -jar ${JAVA_MAIN_JAR}; diff --git a/src/main/java/org/onap/aai/GraphAdminApp.java b/src/main/java/org/onap/aai/GraphAdminApp.java new file mode 100644 index 0000000..aa9c457 --- /dev/null +++ b/src/main/java/org/onap/aai/GraphAdminApp.java @@ -0,0 +1,129 @@ +/** + * ============LICENSE_START======================================================= + * org.onap.aai + * ================================================================================ + * Copyright © 2017-2018 AT&T Intellectual Property. All rights reserved. + * ================================================================================ + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============LICENSE_END========================================================= + */ +package org.onap.aai; + +import com.att.eelf.configuration.EELFLogger; +import com.att.eelf.configuration.EELFManager; +import org.onap.aai.config.PropertyPasswordConfiguration; +import org.onap.aai.dbmap.AAIGraph; +import org.onap.aai.logging.LoggingContext; +import org.onap.aai.nodes.NodeIngestor; +import org.onap.aai.util.AAIConfig; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.boot.SpringApplication; +import org.springframework.boot.autoconfigure.EnableAutoConfiguration; +import org.springframework.boot.autoconfigure.SpringBootApplication; +import org.springframework.boot.autoconfigure.jdbc.DataSourceAutoConfiguration; +import org.springframework.boot.autoconfigure.orm.jpa.HibernateJpaAutoConfiguration; +import org.springframework.context.annotation.ComponentScan; +import org.springframework.core.env.Environment; +import org.springframework.scheduling.annotation.EnableAsync; +import org.springframework.scheduling.annotation.EnableScheduling; + +import javax.annotation.PostConstruct; +import javax.annotation.PreDestroy; +import java.util.UUID; + +@SpringBootApplication +// Scan the specific packages that has the beans/components +// This will add the ScheduledTask that was created in aai-common +// Add more packages where you would need to scan for files +@ComponentScan(basePackages = { + "org.onap.aai.tasks", + "org.onap.aai.config", + "org.onap.aai.service", + "org.onap.aai.setup", + "org.onap.aai.rest", + "org.onap.aai.web", + "org.onap.aai.interceptors", + "org.onap.aai.datasnapshot", + "org.onap.aai.datagrooming", + "org.onap.aai.datacleanup" +}) +@EnableAsync +@EnableScheduling +@EnableAutoConfiguration(exclude = {DataSourceAutoConfiguration.class, HibernateJpaAutoConfiguration.class}) +public class GraphAdminApp { + + public static final String APP_NAME = "GraphAdmin"; + private static final EELFLogger LOGGER = EELFManager.getInstance().getLogger(GraphAdminApp.class); + + @Autowired + private Environment env; + + @Autowired + private NodeIngestor nodeIngestor; + + @PostConstruct + private void initialize(){ + loadDefaultProps(); + + LoggingContext.save(); + LoggingContext.component("init"); + LoggingContext.partnerName("NA"); + LoggingContext.targetEntity(APP_NAME); + LoggingContext.requestId(UUID.randomUUID().toString()); + LoggingContext.serviceName(APP_NAME); + LoggingContext.targetServiceName("contextInitialized"); + } + + @PreDestroy + public void cleanup(){ + AAIGraph.getInstance().graphShutdown(); + } + + public static void main(String[] args) throws Exception { + + loadDefaultProps(); + SpringApplication app = new SpringApplication(GraphAdminApp.class); + app.setRegisterShutdownHook(true); + app.addInitializers(new PropertyPasswordConfiguration()); + Environment env = app.run(args).getEnvironment(); + + LOGGER.info( + "Application '{}' is running on {}!" , + env.getProperty("spring.application.name"), + env.getProperty("server.port") + ); + // The main reason this was moved from the constructor is due + // to the SchemaGenerator needs the bean and during the constructor + // the Spring Context is not yet initialized + + AAIConfig.init(); + AAIGraph.getInstance(); + + System.setProperty("org.onap.aai.graphadmin.started", "true"); + LOGGER.info("GraphAdmin MicroService Started"); + LOGGER.error("GraphAdmin MicroService Started"); + LOGGER.debug("GraphAdmin MicroService Started"); + System.out.println("GraphAdmin Microservice Started"); + } + + public static void loadDefaultProps(){ + + if(System.getProperty("AJSC_HOME") == null){ + System.setProperty("AJSC_HOME", "."); + } + + if(System.getProperty("BUNDLECONFIG_DIR") == null){ + System.setProperty("BUNDLECONFIG_DIR", "src/main/resources"); + } + } +} diff --git a/src/main/java/org/onap/aai/Profiles.java b/src/main/java/org/onap/aai/Profiles.java new file mode 100644 index 0000000..f0419d8 --- /dev/null +++ b/src/main/java/org/onap/aai/Profiles.java @@ -0,0 +1,31 @@ +/** + * ============LICENSE_START======================================================= + * org.onap.aai + * ================================================================================ + * Copyright © 2017-2018 AT&T Intellectual Property. All rights reserved. + * ================================================================================ + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============LICENSE_END========================================================= + */ +package org.onap.aai; + +public final class Profiles { + + public static final String DMAAP = "dmaap"; + public static final String DME2 = "dme2"; + + public static final String ONE_WAY_SSL = "one-way-ssl"; + public static final String TWO_WAY_SSL = "two-way-ssl"; + + private Profiles(){} +} diff --git a/src/main/java/org/onap/aai/config/AuditorConfiguration.java b/src/main/java/org/onap/aai/config/AuditorConfiguration.java new file mode 100644 index 0000000..9377393 --- /dev/null +++ b/src/main/java/org/onap/aai/config/AuditorConfiguration.java @@ -0,0 +1,34 @@ +/** + * ============LICENSE_START======================================================= + * org.onap.aai + * ================================================================================ + * Copyright © 2017-2018 AT&T Intellectual Property. All rights reserved. + * ================================================================================ + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============LICENSE_END========================================================= + */ +package org.onap.aai.config; + +import org.onap.aai.db.schema.AuditorFactory; +import org.onap.aai.introspection.LoaderFactory; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; + +@Configuration +public class AuditorConfiguration { + + @Bean + public AuditorFactory auditorFactory(LoaderFactory loaderFactory){ + return new AuditorFactory(loaderFactory); + } +} diff --git a/src/main/java/org/onap/aai/config/DslConfiguration.java b/src/main/java/org/onap/aai/config/DslConfiguration.java new file mode 100644 index 0000000..74bc046 --- /dev/null +++ b/src/main/java/org/onap/aai/config/DslConfiguration.java @@ -0,0 +1,44 @@ +/** + * ============LICENSE_START======================================================= + * org.onap.aai + * ================================================================================ + * Copyright © 2017-2018 AT&T Intellectual Property. All rights reserved. + * ================================================================================ + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============LICENSE_END========================================================= + */ +package org.onap.aai.config; + +import org.onap.aai.edges.EdgeIngestor; +import org.onap.aai.rest.dsl.DslListener; +import org.onap.aai.rest.dsl.DslQueryProcessor; +import org.springframework.beans.factory.config.ConfigurableBeanFactory; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; +import org.springframework.context.annotation.Scope; + +@Configuration +public class DslConfiguration { + + @Bean + @Scope(scopeName = ConfigurableBeanFactory.SCOPE_PROTOTYPE) + public DslListener dslListener(EdgeIngestor edgeIngestor){ + return new DslListener(edgeIngestor); + } + + @Bean + @Scope(scopeName = ConfigurableBeanFactory.SCOPE_PROTOTYPE) + public DslQueryProcessor dslQueryProcessor(DslListener dslListener){ + return new DslQueryProcessor(dslListener); + } +} diff --git a/src/main/java/org/onap/aai/config/JettyPasswordDecoder.java b/src/main/java/org/onap/aai/config/JettyPasswordDecoder.java new file mode 100644 index 0000000..944f951 --- /dev/null +++ b/src/main/java/org/onap/aai/config/JettyPasswordDecoder.java @@ -0,0 +1,33 @@ +/** + * ============LICENSE_START======================================================= + * org.onap.aai + * ================================================================================ + * Copyright © 2017-2018 AT&T Intellectual Property. All rights reserved. + * ================================================================================ + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============LICENSE_END========================================================= + */ +package org.onap.aai.config; + +import org.eclipse.jetty.util.security.Password; + +public class JettyPasswordDecoder implements PasswordDecoder { + + @Override + public String decode(String input) { + if (input.startsWith("OBF:")) { + return Password.deobfuscate(input); + } + return Password.deobfuscate("OBF:" + input); + } +} diff --git a/src/main/java/org/onap/aai/config/PasswordDecoder.java b/src/main/java/org/onap/aai/config/PasswordDecoder.java new file mode 100644 index 0000000..0dcb845 --- /dev/null +++ b/src/main/java/org/onap/aai/config/PasswordDecoder.java @@ -0,0 +1,25 @@ +/** + * ============LICENSE_START======================================================= + * org.onap.aai + * ================================================================================ + * Copyright © 2017-2018 AT&T Intellectual Property. All rights reserved. + * ================================================================================ + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============LICENSE_END========================================================= + */ +package org.onap.aai.config; + +public interface PasswordDecoder { + + String decode(String input); +} diff --git a/src/main/java/org/onap/aai/config/PropertyPasswordConfiguration.java b/src/main/java/org/onap/aai/config/PropertyPasswordConfiguration.java new file mode 100644 index 0000000..9befb13 --- /dev/null +++ b/src/main/java/org/onap/aai/config/PropertyPasswordConfiguration.java @@ -0,0 +1,78 @@ +/** + * ============LICENSE_START======================================================= + * org.onap.aai + * ================================================================================ + * Copyright © 2017-2018 AT&T Intellectual Property. All rights reserved. + * ================================================================================ + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============LICENSE_END========================================================= + */ +package org.onap.aai.config; + +import org.springframework.context.ApplicationContextInitializer; +import org.springframework.context.ConfigurableApplicationContext; +import org.springframework.core.env.ConfigurableEnvironment; +import org.springframework.core.env.EnumerablePropertySource; +import org.springframework.core.env.MapPropertySource; +import org.springframework.core.env.PropertySource; + +import java.util.LinkedHashMap; +import java.util.Map; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +public class PropertyPasswordConfiguration implements ApplicationContextInitializer<ConfigurableApplicationContext> { + + private static final Pattern decodePasswordPattern = Pattern.compile("password\\((.*?)\\)"); + + private PasswordDecoder passwordDecoder = new JettyPasswordDecoder(); + + @Override + public void initialize(ConfigurableApplicationContext applicationContext) { + ConfigurableEnvironment environment = applicationContext.getEnvironment(); + for (PropertySource<?> propertySource : environment.getPropertySources()) { + Map<String, Object> propertyOverrides = new LinkedHashMap<>(); + decodePasswords(propertySource, propertyOverrides); + if (!propertyOverrides.isEmpty()) { + PropertySource<?> decodedProperties = new MapPropertySource("decoded "+ propertySource.getName(), propertyOverrides); + environment.getPropertySources().addBefore(propertySource.getName(), decodedProperties); + } + } + } + + private void decodePasswords(PropertySource<?> source, Map<String, Object> propertyOverrides) { + if (source instanceof EnumerablePropertySource) { + EnumerablePropertySource<?> enumerablePropertySource = (EnumerablePropertySource<?>) source; + for (String key : enumerablePropertySource.getPropertyNames()) { + Object rawValue = source.getProperty(key); + if (rawValue instanceof String) { + String decodedValue = decodePasswordsInString((String) rawValue); + propertyOverrides.put(key, decodedValue); + } + } + } + } + + private String decodePasswordsInString(String input) { + if (input == null) return null; + StringBuffer output = new StringBuffer(); + Matcher matcher = decodePasswordPattern.matcher(input); + while (matcher.find()) { + String replacement = passwordDecoder.decode(matcher.group(1)); + matcher.appendReplacement(output, replacement); + } + matcher.appendTail(output); + return output.toString(); + } + +} diff --git a/src/main/java/org/onap/aai/datacleanup/DataCleanupTasks.java b/src/main/java/org/onap/aai/datacleanup/DataCleanupTasks.java new file mode 100644 index 0000000..a3dc708 --- /dev/null +++ b/src/main/java/org/onap/aai/datacleanup/DataCleanupTasks.java @@ -0,0 +1,317 @@ +/**
+ * ============LICENSE_START=======================================================
+ * org.onap.aai
+ * ================================================================================
+ * Copyright © 2017-2018 AT&T Intellectual Property. All rights reserved.
+ * ================================================================================
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * ============LICENSE_END=========================================================
+ */
+package org.onap.aai.datacleanup;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.nio.file.Files;
+import java.nio.file.attribute.BasicFileAttributes;
+import java.nio.file.attribute.FileTime;
+import java.text.DateFormat;
+import java.text.ParseException;
+import java.text.SimpleDateFormat;
+import java.time.ZoneId;
+import java.util.ArrayList;
+import java.util.Calendar;
+import java.util.Date;
+import java.util.zip.ZipEntry;
+import java.util.zip.ZipOutputStream;
+
+import org.onap.aai.exceptions.AAIException;
+import org.springframework.context.annotation.PropertySource;
+import org.springframework.scheduling.annotation.Scheduled;
+import org.springframework.stereotype.Component;
+
+import com.att.eelf.configuration.EELFLogger;
+import com.att.eelf.configuration.EELFManager;
+
+import org.onap.aai.logging.ErrorLogHelper;
+import org.onap.aai.logging.LoggingContext;
+import org.onap.aai.logging.LoggingContext.StatusCode;
+import org.onap.aai.util.AAIConfig;
+import org.onap.aai.util.AAIConstants;
+
+import com.att.eelf.configuration.EELFLogger;
+
+@Component
+@PropertySource("file:${server.local.startpath}/etc/appprops/datatoolscrons.properties")
+public class DataCleanupTasks {
+
+ private static final EELFLogger logger = EELFManager.getInstance().getLogger(DataCleanupTasks.class);
+ private static final SimpleDateFormat simpleDateFormat = new SimpleDateFormat("yyyyMMdd");
+
+ /**The function archives/deletes files that end in .out (Ie. dataGrooming.201511111305.out) that sit in our log/data directory structure.
+ logDir is the {project_home}/logs
+ archiveDir is the ARCHIVE directory where the files will be stored after 5 days.
+ ageZip is the number of days after which the file will be moved to the ARCHIVE folder.
+ ageDelete is the number of days after which the data files will be deleted i.e after 30 days.
+ */
+ @Scheduled(cron = "${datagroomingcleanup.cron}" )
+ public void dataGroomingCleanup() throws AAIException, Exception {
+
+ logger.info("Started cron job dataGroomingCleanup @ " + simpleDateFormat.format(new Date()));
+
+ try {
+ String logDir = AAIConstants.AAI_HOME + AAIConstants.AAI_FILESEP + "logs";
+ String dataGroomingDir = logDir + AAIConstants.AAI_FILESEP + "data" + AAIConstants.AAI_FILESEP + "dataGrooming";
+ String archiveDir = dataGroomingDir + AAIConstants.AAI_FILESEP + "ARCHIVE";
+ String dataGroomingArcDir = archiveDir + AAIConstants.AAI_FILESEP + "dataGrooming";
+ File path = new File(dataGroomingDir);
+ File archivepath = new File(archiveDir);
+ File dataGroomingPath = new File(dataGroomingArcDir);
+
+ logger.info("The logDir is " + logDir);
+ logger.info("The dataGroomingDir is " + dataGroomingDir);
+ logger.info("The archiveDir is " + archiveDir );
+ logger.info("The dataGroomingArcDir is " + dataGroomingArcDir );
+
+ boolean exists = directoryExists(logDir);
+ logger.info("Directory" + logDir + "exists: " + exists);
+ if(exists == false)
+ logger.error("The directory" + logDir +"does not exists");
+
+ Integer ageZip = AAIConfig.getInt("aai.datagrooming.agezip");
+ Integer ageDelete = AAIConfig.getInt("aai.datagrooming.agedelete");
+
+ Date newAgeZip = getZipDate(ageZip);
+
+ //Iterate through the dataGroomingDir
+ File[] listFiles = path.listFiles();
+ if(listFiles != null) {
+ for(File listFile : listFiles) {
+ if (listFile.toString().contains("ARCHIVE")){
+ continue;
+ }
+ if(listFile.isFile()){
+ logger.info("The file name in dataGrooming: " +listFile.getName());
+ Date fileCreateDate = fileCreationMonthDate(listFile);
+ logger.info("The fileCreateDate in dataGrooming is " + fileCreateDate);
+ if( fileCreateDate.compareTo(newAgeZip) < 0) {
+ archive(listFile,archiveDir,dataGroomingArcDir);
+ }
+ }
+ }
+ }
+
+ Date newAgeDelete = getZipDate(ageDelete);
+ //Iterate through the archive/dataGrooming dir
+ File[] listFilesArchive = dataGroomingPath.listFiles();
+ if(listFilesArchive != null) {
+ for(File listFileArchive : listFilesArchive) {
+ if(listFileArchive.isFile()) {
+ logger.info("The file name in ARCHIVE/dataGrooming: " +listFileArchive.getName());
+ Date fileCreateDate = fileCreationMonthDate(listFileArchive);
+ logger.info("The fileCreateDate in ARCHIVE/dataGrooming is " + fileCreateDate);
+ if(fileCreateDate.compareTo(newAgeDelete) < 0) {
+ delete(listFileArchive);
+ }
+ }
+ }
+ }
+ }
+ catch (Exception e) {
+ ErrorLogHelper.logError("AAI_4000", "Exception running cron job for DataCleanup"+e.toString());
+ logger.info("AAI_4000", "Exception running cron job for DataCleanup"+e.toString());
+ throw e;
+ }
+ }
+
+ /**
+ * This method checks if the directory exists
+ * @param DIR
+ *
+ */
+ public boolean directoryExists(String dir) {
+ File path = new File(dir);
+ boolean exists = path.exists();
+ return exists;
+ }
+
+ public Date getZipDate(Integer days) throws Exception {
+ return getZipDate(days, new Date());
+ }
+
+ public Date getZipDate(Integer days, Date date) throws Exception{
+
+ Calendar cal = Calendar.getInstance();
+ logger.info("The current date is " + date );
+ cal.setTime(date);
+ cal.add(Calendar.DATE, -days);
+ Date newAgeZip = cal.getTime();
+ logger.info("The newAgeDate is " +newAgeZip);
+ return newAgeZip;
+ }
+
+
+ public Date fileCreationMonthDate (File file) throws Exception {
+
+ BasicFileAttributes attr = Files.readAttributes(file.toPath(),
+ BasicFileAttributes.class);
+ FileTime time = attr.creationTime();
+ String formatted = simpleDateFormat.format( new Date( time.toMillis() ) );
+ Date d = simpleDateFormat.parse(formatted);
+ return d;
+ }
+
+ /**
+ * This method will zip the files and add it to the archive folder
+ * Checks if the archive folder exists, if not then creates one
+ * After adding the file to archive folder it deletes the file from the filepath
+ * @throws AAIException
+ * @throws Exception
+ */
+ public void archive(File file, String archiveDir, String afterArchiveDir) throws AAIException, Exception {
+
+ logger.info("Inside the archive folder");
+ String filename = file.getName();
+ logger.info("file name is " +filename);
+ File archivepath = new File(archiveDir);
+
+ String zipFile = afterArchiveDir + AAIConstants.AAI_FILESEP + filename;
+
+ File dataGroomingPath = new File(afterArchiveDir);
+
+ boolean exists = directoryExists(archiveDir);
+ logger.info("Directory" + archiveDir + "exists: " + exists);
+ if(exists == false) {
+ logger.error("The directory" + archiveDir +"does not exists so will create a new archive folder");
+ //Create an archive folder if does not exists
+ boolean flag = dataGroomingPath.mkdirs();
+ if(flag == false)
+ logger.error("Failed to create ARCHIVE folder");
+ }
+ try {
+ FileOutputStream outputstream = new FileOutputStream(zipFile + ".gz");
+ ZipOutputStream zoutputstream = new ZipOutputStream(outputstream);
+ ZipEntry ze = new ZipEntry(file.getName());
+ zoutputstream.putNextEntry(ze);
+ //read the file and write to the zipOutputStream
+ FileInputStream inputstream = new FileInputStream(file);
+ byte[] buffer = new byte[1024];
+ int len;
+ while ((len = inputstream.read(buffer)) > 0) {
+ zoutputstream.write(buffer,0,len);
+ }
+ //close all the sources
+ zoutputstream.closeEntry();
+ zoutputstream.close();
+ inputstream.close();
+ outputstream.close();
+ //Delete the file after been added to archive folder
+ delete(file);
+ logger.info("The file archived is " + file + " at " + afterArchiveDir );
+ }
+ catch (IOException e) {
+ ErrorLogHelper.logError("AAI_4000", "Exception running cron job for DataCleanup " + e.getStackTrace());
+ logger.info("AAI_4000", "Exception running cron job for DataCleanup", e);
+ throw e;
+ }
+ }
+
+ /**
+ * This method will delete all the files from the archive folder that are older than 60 days
+ * @param file
+ */
+ public static void delete(File file) {
+
+ logger.info("Deleting the file " + file);
+ boolean deleteStatus = file.delete();
+ if(deleteStatus == false){
+ logger.error("Failed to delete the file" +file);
+ }
+ }
+
+ /**The function archives/deletes files that end in .out (Ie. dataGrooming.201511111305.out) that sit in our log/data directory structure.
+ logDir is the {project_home}/logs
+ archiveDir is the ARCHIVE directory where the files will be stored after 5 days.
+ ageZip is the number of days after which the file will be moved to the ARCHIVE folder.
+ ageDelete is the number of days after which the data files will be deleted i.e after 30 days.
+*/
+ @Scheduled(cron = "${datasnapshotcleanup.cron}" )
+ public void dataSnapshotCleanup() throws AAIException, Exception {
+
+ logger.info("Started cron job dataSnapshotCleanup @ " + simpleDateFormat.format(new Date()));
+
+ try {
+ String logDir = AAIConstants.AAI_HOME + AAIConstants.AAI_FILESEP + "logs";
+ String dataSnapshotDir = logDir + AAIConstants.AAI_FILESEP + "data" + AAIConstants.AAI_FILESEP + "dataSnapshots";
+ String archiveDir = dataSnapshotDir + AAIConstants.AAI_FILESEP + "ARCHIVE";
+ String dataSnapshotArcDir = archiveDir + AAIConstants.AAI_FILESEP + "dataSnapshots";
+ File path = new File(dataSnapshotDir);
+ File archivepath = new File(archiveDir);
+ File dataSnapshotPath = new File(dataSnapshotArcDir);
+
+ logger.info("The logDir is " + logDir);
+ logger.info("The dataSnapshotDir is " + dataSnapshotDir);
+ logger.info("The archiveDir is " + archiveDir );
+ logger.info("The dataSnapshotArcDir is " + dataSnapshotArcDir );
+
+ boolean exists = directoryExists(logDir);
+ logger.info("Directory" + logDir + "exists: " + exists);
+ if(exists == false)
+ logger.error("The directory" + logDir +"does not exists");
+
+ Integer ageZipSnapshot = AAIConfig.getInt("aai.datasnapshot.agezip");
+ Integer ageDeleteSnapshot = AAIConfig.getInt("aai.datasnapshot.agedelete");
+
+ Date newAgeZip = getZipDate(ageZipSnapshot);
+
+ //Iterate through the dataGroomingDir
+ File[] listFiles = path.listFiles();
+ if(listFiles != null) {
+ for(File listFile : listFiles) {
+ if (listFile.toString().contains("ARCHIVE")){
+ continue;
+ }
+ if(listFile.isFile()){
+ logger.info("The file name in dataSnapshot: " +listFile.getName());
+ Date fileCreateDate = fileCreationMonthDate(listFile);
+ logger.info("The fileCreateDate in dataSnapshot is " + fileCreateDate);
+ if( fileCreateDate.compareTo(newAgeZip) < 0) {
+ archive(listFile,archiveDir,dataSnapshotArcDir);
+ }
+ }
+ }
+ }
+
+ Date newAgeDelete = getZipDate(ageDeleteSnapshot);
+ //Iterate through the archive/dataSnapshots dir
+ File[] listFilesArchive = dataSnapshotPath.listFiles();
+ if(listFilesArchive != null) {
+ for(File listFileArchive : listFilesArchive) {
+ if(listFileArchive.isFile()) {
+ logger.info("The file name in ARCHIVE/dataSnapshot: " +listFileArchive.getName());
+ Date fileCreateDate = fileCreationMonthDate(listFileArchive);
+ logger.info("The fileCreateDate in ARCHIVE/dataSnapshot is " + fileCreateDate);
+ if(fileCreateDate.compareTo(newAgeDelete) < 0) {
+ delete(listFileArchive);
+ }
+ }
+ }
+ }
+ }
+ catch (Exception e) {
+ ErrorLogHelper.logError("AAI_4000", "Exception running cron job for DataCleanup"+e.toString());
+ logger.info("AAI_4000", "Exception running cron job for DataCleanup"+e.toString());
+ throw e;
+ }
+ }
+}
diff --git a/src/main/java/org/onap/aai/datagrooming/DataGrooming.java b/src/main/java/org/onap/aai/datagrooming/DataGrooming.java new file mode 100644 index 0000000..6149dd9 --- /dev/null +++ b/src/main/java/org/onap/aai/datagrooming/DataGrooming.java @@ -0,0 +1,2853 @@ +/** + * ============LICENSE_START======================================================= + * org.onap.aai + * ================================================================================ + * Copyright © 2017-2018 AT&T Intellectual Property. All rights reserved. + * ================================================================================ + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============LICENSE_END========================================================= + */ +package org.onap.aai.datagrooming; + +import java.io.BufferedReader; +import java.io.BufferedWriter; +import java.io.File; +import java.io.FileReader; +import java.io.FileWriter; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; +import java.util.Iterator; +import java.util.LinkedHashSet; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Properties; +import java.util.Set; +import java.util.UUID; + +import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal; +import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversalSource; +import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.__; +import org.apache.tinkerpop.gremlin.structure.Direction; +import org.apache.tinkerpop.gremlin.structure.Edge; +import org.apache.tinkerpop.gremlin.structure.Graph; +import org.apache.tinkerpop.gremlin.structure.Property; +import org.apache.tinkerpop.gremlin.structure.Vertex; +import org.apache.tinkerpop.gremlin.structure.VertexProperty; +import org.onap.aai.GraphAdminApp; +import org.onap.aai.dbmap.AAIGraph; +import org.onap.aai.dbmap.AAIGraphConfig; +import org.onap.aai.exceptions.AAIException; +import org.onap.aai.introspection.Introspector; +import org.onap.aai.introspection.Loader; +import org.onap.aai.introspection.LoaderFactory; +import org.onap.aai.introspection.ModelType; +import org.onap.aai.introspection.exceptions.AAIUnknownObjectException; +import org.onap.aai.logging.ErrorLogHelper; +import org.onap.aai.logging.LogFormatTools; +import org.onap.aai.logging.LoggingContext; +import org.onap.aai.edges.enums.AAIDirection; +import org.onap.aai.edges.enums.EdgeProperty; +import org.onap.aai.setup.SchemaVersions; +import org.onap.aai.util.*; +import org.onap.aai.logging.LoggingContext.StatusCode; + +import com.att.eelf.configuration.Configuration; +import com.att.eelf.configuration.EELFLogger; +import com.att.eelf.configuration.EELFManager; +import org.janusgraph.core.JanusGraphFactory; +import org.janusgraph.core.JanusGraph; +import org.springframework.context.annotation.AnnotationConfigApplicationContext; + + +public class DataGrooming { + + private static EELFLogger LOGGER = EELFManager.getInstance().getLogger(DataGrooming.class); + + private static final String FROMAPPID = "AAI-DB"; + private static final String TRANSID = UUID.randomUUID().toString(); + private int dupeGrpsDeleted = 0; + + private LoaderFactory loaderFactory; + private SchemaVersions schemaVersions; + + public DataGrooming(LoaderFactory loaderFactory, SchemaVersions schemaVersions){ + this.loaderFactory = loaderFactory; + this.schemaVersions = schemaVersions; + } + + public void execute(String[] args){ + + String ver = "version"; // Placeholder + Boolean doAutoFix = false; + Boolean edgesOnlyFlag = false; + Boolean dontFixOrphansFlag = false; + Boolean skipHostCheck = false; + Boolean singleCommits = false; + Boolean dupeCheckOff = false; + Boolean dupeFixOn = false; + Boolean ghost2CheckOff = false; + Boolean ghost2FixOn = false; + Boolean neverUseCache = false; + Boolean skipEdgeCheckFlag = false; + Boolean skipIndexUpdateFix = false; + + // A value of 0 means that we will not have a time-window -- we will look + // at all nodes of the passed-in nodeType. + int timeWindowMinutes = 0; + + int maxRecordsToFix = AAIConstants.AAI_GROOMING_DEFAULT_MAX_FIX; + int sleepMinutes = AAIConstants.AAI_GROOMING_DEFAULT_SLEEP_MINUTES; + try { + String maxFixStr = AAIConfig.get("aai.grooming.default.max.fix"); + if( maxFixStr != null && !maxFixStr.equals("") ){ + maxRecordsToFix = Integer.parseInt(maxFixStr); + } + String sleepStr = AAIConfig.get("aai.grooming.default.sleep.minutes"); + if( sleepStr != null && !sleepStr.equals("") ){ + sleepMinutes = Integer.parseInt(sleepStr); + } + } + catch ( Exception e ){ + // Don't worry, we'll just use the defaults that we got from AAIConstants + LOGGER.warn("WARNING - could not pick up aai.grooming values from aaiconfig.properties file. "); + } + + String prevFileName = ""; + String singleNodeType = ""; + dupeGrpsDeleted = 0; + FormatDate fd = new FormatDate("yyyyMMddHHmm", "GMT"); + String dteStr = fd.getDateTime(); + + if (args.length > 0) { + // They passed some arguments in that will affect processing + for (int i = 0; i < args.length; i++) { + String thisArg = args[i]; + if (thisArg.equals("-edgesOnly")) { + edgesOnlyFlag = true; + } else if (thisArg.equals("-autoFix")) { + doAutoFix = true; + } else if (thisArg.equals("-skipHostCheck")) { + skipHostCheck = true; + } else if (thisArg.equals("-dontFixOrphans")) { + dontFixOrphansFlag = true; + } else if (thisArg.equals("-singleCommits")) { + singleCommits = true; + } else if (thisArg.equals("-dupeCheckOff")) { + dupeCheckOff = true; + } else if (thisArg.equals("-dupeFixOn")) { + dupeFixOn = true; + } else if (thisArg.equals("-ghost2CheckOff")) { + ghost2CheckOff = true; + } else if (thisArg.equals("-neverUseCache")) { + neverUseCache = true; + } else if (thisArg.equals("-ghost2FixOn")) { + ghost2FixOn = true; + } else if (thisArg.equals("-skipEdgeChecks")) { + skipEdgeCheckFlag = true; + } else if (thisArg.equals("-skipIndexUpdateFix")) { + skipIndexUpdateFix = true; + } else if (thisArg.equals("-maxFix")) { + i++; + if (i >= args.length) { + LoggingContext.statusCode(StatusCode.ERROR); + LoggingContext.responseCode(LoggingContext.BUSINESS_PROCESS_ERROR); + LOGGER.error(" No value passed with -maxFix option. "); + AAISystemExitUtil.systemExitCloseAAIGraph(0); + } + String nextArg = args[i]; + try { + maxRecordsToFix = Integer.parseInt(nextArg); + } catch (Exception e) { + LoggingContext.statusCode(StatusCode.ERROR); + LoggingContext.responseCode(LoggingContext.BUSINESS_PROCESS_ERROR); + LOGGER.error("Bad value passed with -maxFix option: [" + + nextArg + "]"); + AAISystemExitUtil.systemExitCloseAAIGraph(0); + } + } else if (thisArg.equals("-sleepMinutes")) { + i++; + if (i >= args.length) { + LoggingContext.statusCode(StatusCode.ERROR); + LoggingContext.responseCode(LoggingContext.BUSINESS_PROCESS_ERROR); + LOGGER.error("No value passed with -sleepMinutes option."); + AAISystemExitUtil.systemExitCloseAAIGraph(0); + } + String nextArg = args[i]; + try { + sleepMinutes = Integer.parseInt(nextArg); + } catch (Exception e) { + LoggingContext.statusCode(StatusCode.ERROR); + LoggingContext.responseCode(LoggingContext.BUSINESS_PROCESS_ERROR); + LOGGER.error("Bad value passed with -sleepMinutes option: [" + + nextArg + "]"); + AAISystemExitUtil.systemExitCloseAAIGraph(0); + } + } else if (thisArg.equals("-timeWindowMinutes")) { + i++; + if (i >= args.length) { + LoggingContext.statusCode(StatusCode.ERROR); + LoggingContext.responseCode(LoggingContext.BUSINESS_PROCESS_ERROR); + LOGGER.error("No value passed with -timeWindowMinutes option."); + AAISystemExitUtil.systemExitCloseAAIGraph(0); + } + String nextArg = args[i]; + try { + timeWindowMinutes = Integer.parseInt(nextArg); + } catch (Exception e) { + LoggingContext.statusCode(StatusCode.ERROR); + LoggingContext.responseCode(LoggingContext.BUSINESS_PROCESS_ERROR); + LOGGER.error("Bad value passed with -timeWindowMinutes option: [" + + nextArg + "]"); + AAISystemExitUtil.systemExitCloseAAIGraph(0); + } + + } else if (thisArg.equals("-f")) { + i++; + if (i >= args.length) { + LoggingContext.statusCode(StatusCode.ERROR); + LoggingContext.responseCode(LoggingContext.BUSINESS_PROCESS_ERROR); + LOGGER.error(" No value passed with -f option. "); + AAISystemExitUtil.systemExitCloseAAIGraph(0); + } + prevFileName = args[i]; + } else if (thisArg.equals("-singleNodeType")) { + i++; + if (i >= args.length) { + LoggingContext.statusCode(StatusCode.ERROR); + LoggingContext.responseCode(LoggingContext.BUSINESS_PROCESS_ERROR); + LOGGER.error(" No value passed with -onlyThisNodeType option. "); + AAISystemExitUtil.systemExitCloseAAIGraph(0); + } + singleNodeType = args[i]; + } else { + LoggingContext.statusCode(StatusCode.ERROR); + LoggingContext.responseCode(LoggingContext.BUSINESS_PROCESS_ERROR); + LOGGER.error(" Unrecognized argument passed to DataGrooming: [" + + thisArg + "]. "); + LOGGER.error(" Valid values are: -f -autoFix -maxFix -edgesOnly -skipEdgeChecks -dupeFixOn -donFixOrphans -timeWindowMinutes -sleepMinutes -neverUseCache"); + AAISystemExitUtil.systemExitCloseAAIGraph(0); + } + } + } + + String windowTag = "FULL"; + if( timeWindowMinutes > 0 ){ + windowTag = "PARTIAL"; + } + String groomOutFileName = "dataGrooming." + windowTag + "." + dteStr + ".out"; + + try { + loaderFactory.createLoaderForVersion(ModelType.MOXY, schemaVersions.getDefaultVersion()); + } + catch (Exception ex){ + LoggingContext.statusCode(StatusCode.ERROR); + LoggingContext.responseCode(LoggingContext.BUSINESS_PROCESS_ERROR); + LOGGER.error("ERROR - Could not create loader " + LogFormatTools.getStackTop(ex)); + AAISystemExitUtil.systemExitCloseAAIGraph(1); + } + + try { + if (!prevFileName.equals("")) { + // They are trying to fix some data based on a data in a + // previous file. + LOGGER.info(" Call doTheGrooming() with a previous fileName [" + + prevFileName + "] for cleanup. "); + Boolean finalShutdownFlag = true; + Boolean cacheDbOkFlag = false; + doTheGrooming(prevFileName, edgesOnlyFlag, dontFixOrphansFlag, + maxRecordsToFix, groomOutFileName, ver, singleCommits, + dupeCheckOff, dupeFixOn, ghost2CheckOff, ghost2FixOn, + finalShutdownFlag, cacheDbOkFlag, + skipEdgeCheckFlag, timeWindowMinutes, + singleNodeType, skipIndexUpdateFix ); + } else if (doAutoFix) { + // They want us to run the processing twice -- first to look for + // delete candidates, then after + // napping for a while, run it again and delete any candidates + // that were found by the first run. + // Note: we will produce a separate output file for each of the + // two runs. + LOGGER.info(" Doing an auto-fix call to Grooming. "); + LOGGER.info(" First, Call doTheGrooming() to look at what's out there. "); + Boolean finalShutdownFlag = false; + Boolean cacheDbOkFlag = true; + int fixCandCount = doTheGrooming("", edgesOnlyFlag, + dontFixOrphansFlag, maxRecordsToFix, groomOutFileName, + ver, singleCommits, dupeCheckOff, dupeFixOn, ghost2CheckOff, ghost2FixOn, + finalShutdownFlag, cacheDbOkFlag, + skipEdgeCheckFlag, timeWindowMinutes, + singleNodeType, skipIndexUpdateFix ); + if (fixCandCount == 0) { + LOGGER.info(" No fix-Candidates were found by the first pass, so no second/fix-pass is needed. "); + } else { + // We'll sleep a little and then run a fix-pass based on the + // first-run's output file. + try { + LOGGER.info("About to sleep for " + sleepMinutes + + " minutes."); + int sleepMsec = sleepMinutes * 60 * 1000; + Thread.sleep(sleepMsec); + } catch (InterruptedException ie) { + LOGGER.info("\n >>> Sleep Thread has been Interrupted <<< "); + AAISystemExitUtil.systemExitCloseAAIGraph(0); + } + + dteStr = fd.getDateTime(); + String secondGroomOutFileName = "dataGrooming." + windowTag + "." + dteStr + ".out"; + LOGGER.info(" Now, call doTheGrooming() a second time and pass in the name of the file " + + "generated by the first pass for fixing: [" + + groomOutFileName + "]"); + finalShutdownFlag = true; + cacheDbOkFlag = false; + doTheGrooming(groomOutFileName, edgesOnlyFlag, + dontFixOrphansFlag, maxRecordsToFix, + secondGroomOutFileName, ver, singleCommits, + dupeCheckOff, dupeFixOn, ghost2CheckOff, ghost2FixOn, + finalShutdownFlag, cacheDbOkFlag, + skipEdgeCheckFlag, timeWindowMinutes, + singleNodeType, skipIndexUpdateFix ); + } + } else { + // Do the grooming - plain vanilla (no fix-it-file, no + // auto-fixing) + Boolean finalShutdownFlag = true; + LOGGER.info(" Call doTheGrooming() "); + Boolean cacheDbOkFlag = true; + if( neverUseCache ){ + // They have forbidden us from using a cached db connection. + cacheDbOkFlag = false; + } + doTheGrooming("", edgesOnlyFlag, dontFixOrphansFlag, + maxRecordsToFix, groomOutFileName, ver, singleCommits, + dupeCheckOff, dupeFixOn, ghost2CheckOff, ghost2FixOn, + finalShutdownFlag, cacheDbOkFlag, + skipEdgeCheckFlag, timeWindowMinutes, + singleNodeType, skipIndexUpdateFix ); + } + } catch (Exception ex) { + LoggingContext.statusCode(StatusCode.ERROR); + LoggingContext.responseCode(LoggingContext.DATA_ERROR); + LOGGER.error("Exception while grooming data " + LogFormatTools.getStackTop(ex)); + } + LOGGER.info(" Done! "); + AAISystemExitUtil.systemExitCloseAAIGraph(0); + } + + /** + * The main method. + * + * @param args the arguments + */ + public static void main(String[] args) { + + // Set the logging file properties to be used by EELFManager + System.setProperty("aai.service.name", DataGrooming.class.getSimpleName()); + + LoggingContext.init(); + LoggingContext.partnerName(FROMAPPID); + LoggingContext.serviceName(GraphAdminApp.APP_NAME); + LoggingContext.component("dataGrooming"); + LoggingContext.targetEntity(GraphAdminApp.APP_NAME); + LoggingContext.targetServiceName("main"); + LoggingContext.requestId(TRANSID); + LoggingContext.statusCode(StatusCode.COMPLETE); + LoggingContext.responseCode(LoggingContext.SUCCESS); + + Properties props = System.getProperties(); + props.setProperty(Configuration.PROPERTY_LOGGING_FILE_NAME, AAIConstants.AAI_LOGBACK_PROPS); + props.setProperty(Configuration.PROPERTY_LOGGING_FILE_PATH, AAIConstants.AAI_HOME_BUNDLECONFIG); + + AnnotationConfigApplicationContext ctx = new AnnotationConfigApplicationContext( + "org.onap.aai.config", + "org.onap.aai.setup" + ); + + LoaderFactory loaderFactory = ctx.getBean(LoaderFactory.class); + SchemaVersions schemaVersions = ctx.getBean(SchemaVersions.class); + DataGrooming dataGrooming = new DataGrooming(loaderFactory, schemaVersions); + dataGrooming.execute(args); + } + + /** + * Do the grooming. + * + * @param fileNameForFixing the file name for fixing + * @param edgesOnlyFlag the edges only flag + * @param dontFixOrphansFlag the dont fix orphans flag + * @param maxRecordsToFix the max records to fix + * @param groomOutFileName the groom out file name + * @param version the version + * @param singleCommits the single commits + * @param dupeCheckOff the dupe check off + * @param dupeFixOn the dupe fix on + * @param ghost2CheckOff the ghost 2 check off + * @param ghost2FixOn the ghost 2 fix on + * @param finalShutdownFlag the final shutdown flag + * @param cacheDbOkFlag the cacheDbOk flag + * @return the int + */ + private int doTheGrooming( String fileNameForFixing, + Boolean edgesOnlyFlag, Boolean dontFixOrphansFlag, + int maxRecordsToFix, String groomOutFileName, String version, + Boolean singleCommits, + Boolean dupeCheckOff, Boolean dupeFixOn, + Boolean ghost2CheckOff, Boolean ghost2FixOn, + Boolean finalShutdownFlag, Boolean cacheDbOkFlag, + Boolean skipEdgeCheckFlag, int timeWindowMinutes, + String singleNodeType, Boolean skipIndexUpdateFix ) { + + LOGGER.debug(" Entering doTheGrooming \n"); + + int cleanupCandidateCount = 0; + long windowStartTime = 0; // Translation of the window into a starting timestamp + BufferedWriter bw = null; + JanusGraph graph = null; + JanusGraph graph2 = null; + int deleteCount = 0; + int dummyUpdCount = 0; + boolean executeFinalCommit = false; + Set<String> deleteCandidateList = new LinkedHashSet<>(); + Set<String> processedVertices = new LinkedHashSet<>(); + Set<String> postCommitRemoveList = new LinkedHashSet<>(); + + Graph g = null; + Graph g2 = null; + try { + if( timeWindowMinutes > 0 ){ + // Translate the window value (ie. 30 minutes) into a unix timestamp like + // we use in the db - so we can select data created after that time. + windowStartTime = figureWindowStartTime( timeWindowMinutes ); + } + + AAIConfig.init(); + String targetDir = AAIConstants.AAI_HOME + AAIConstants.AAI_FILESEP + + "logs" + AAIConstants.AAI_FILESEP + "data" + + AAIConstants.AAI_FILESEP + "dataGrooming"; + + // Make sure the target directory exists + new File(targetDir).mkdirs(); + + if (!fileNameForFixing.equals("")) { + deleteCandidateList = getDeleteList(targetDir, + fileNameForFixing, edgesOnlyFlag, dontFixOrphansFlag, + dupeFixOn); + } + + if (deleteCandidateList.size() > maxRecordsToFix) { + LoggingContext.statusCode(StatusCode.ERROR); + LoggingContext.responseCode(LoggingContext.DATA_ERROR); + LOGGER.warn(" >> WARNING >> Delete candidate list size (" + + deleteCandidateList.size() + + ") is too big. The maxFix we are using is: " + + maxRecordsToFix + + ". No candidates will be deleted. "); + // Clear out the list so it won't be processed below. + deleteCandidateList = new LinkedHashSet<>(); + } + + String fullOutputFileName = targetDir + AAIConstants.AAI_FILESEP + + groomOutFileName; + File groomOutFile = new File(fullOutputFileName); + try { + groomOutFile.createNewFile(); + } catch (IOException e) { + String emsg = " Problem creating output file [" + + fullOutputFileName + "], exception=" + e.getMessage(); + throw new AAIException("AAI_6124", emsg); + } + + LOGGER.info(" Will write to " + fullOutputFileName ); + bw = new BufferedWriter(new FileWriter(groomOutFile.getAbsoluteFile())); + ErrorLogHelper.loadProperties(); + + LOGGER.info(" ---- NOTE --- about to open graph (takes a little while)--------\n"); + + if( cacheDbOkFlag ){ + // Since we're just reading (not deleting/fixing anything), we can use + // a cached connection to the DB + + // -- note JanusGraphFactory has been leaving db connections open + //graph = JanusGraphFactory.open(new AAIGraphConfig.Builder(AAIConstants.CACHED_DB_CONFIG).forService(DataGrooming.class.getSimpleName()).withGraphType("cached").buildConfiguration()); + graph = AAIGraph.getInstance().getGraph(); + } + else { + // -- note JanusGraphFactory has been leaving db connections open + //graph = JanusGraphFactory.open(new AAIGraphConfig.Builder(AAIConstants.REALTIME_DB_CONFIG).forService(DataGrooming.class.getSimpleName()).withGraphType("realtime1").buildConfiguration()); + graph = AAIGraph.getInstance().getGraph(); + } + if (graph == null) { + String emsg = "null graph object in DataGrooming\n"; + throw new AAIException("AAI_6101", emsg); + } + + LOGGER.debug(" Got the graph object. "); + + g = graph.newTransaction(); + if (g == null) { + String emsg = "null graphTransaction object in DataGrooming\n"; + throw new AAIException("AAI_6101", emsg); + } + GraphTraversalSource source1 = g.traversal(); + + ArrayList<String> errArr = new ArrayList<>(); + int totalNodeCount = 0; + HashMap<String, String> misMatchedHash = new HashMap<String, String>(); + HashMap<String, Vertex> orphanNodeHash = new HashMap<String, Vertex>(); + HashMap<String, Vertex> missingAaiNtNodeHash = new HashMap<String, Vertex>(); + HashMap<String, Edge> oneArmedEdgeHash = new HashMap<String, Edge>(); + HashMap<String, String> emptyVertexHash = new HashMap<String, String>(); + HashMap<String, Vertex> ghostNodeHash = new HashMap<String, Vertex>(); + ArrayList<String> dupeGroups = new ArrayList<>(); + + Loader loader = loaderFactory.createLoaderForVersion(ModelType.MOXY, schemaVersions.getDefaultVersion()); + + + // NOTE --- At one point, we tried explicitly searching for + // nodes that were missing their aai-node-type (which does + // happen sometimes), but the search takes too long and cannot + // be restricted to a date-range since these nodes usually do + // not have timestamps either. Instead, when we run across them + // as orphans, we will not treat them as orphans, but catagorize + // them as "missingAaiNodeType" - which we will treat more like + // ghost nodes - that is, delete them without asking permission. + // + // Note Also - It's a little surprising that we can run + // across these when looking for orphans since that search at + // least begins based on a given aai-node-type. But watching + // where they come up, they are getting discovered when a node + // is looking for its parent node. So, say, a “tenant” node + // follows a “contains” edge and finds the bad node. + + + + Set<Entry<String, Introspector>> entrySet = loader.getAllObjects().entrySet(); + String ntList = ""; + LOGGER.info(" Starting DataGrooming Processing "); + + if (edgesOnlyFlag) { + LOGGER.info(" NOTE >> Skipping Node processing as requested. Will only process Edges. << "); + } + else { + for (Entry<String, Introspector> entry : entrySet) { + String nType = entry.getKey(); + int thisNtCount = 0; + int thisNtDeleteCount = 0; + + if( !singleNodeType.equals("") && !singleNodeType.equals(nType) ){ + // We are only going to process this one node type + continue; + } + + LOGGER.debug(" > Look at : [" + nType + "] ..."); + ntList = ntList + "," + nType; + + // Get a collection of the names of the key properties for this nodeType to use later + // Determine what the key fields are for this nodeType - use an arrayList so they + // can be gotten out in a consistent order. + Set <String> keyPropsSet = entry.getValue().getKeys(); + ArrayList <String> keyProps = new ArrayList <String> (); + keyProps.addAll(keyPropsSet); + + Set <String> indexedPropsSet = entry.getValue().getIndexedProperties(); + ArrayList <String> indexedProps = new ArrayList <String> (); + indexedProps.addAll(indexedPropsSet); + + Iterator<String> indPropItr = indexedProps.iterator(); + HashMap <String,String> propTypeHash = new HashMap <String, String> (); + while( indPropItr.hasNext() ){ + String propName = indPropItr.next(); + String propType = entry.getValue().getType(propName); + propTypeHash.put(propName, propType); + } + + // Get the types of nodes that this nodetype depends on for uniqueness (if any) + Collection <String> depNodeTypes = loader.introspectorFromName(nType).getDependentOn(); + + // Loop through all the nodes of this Node type + int lastShownForNt = 0; + ArrayList <Vertex> tmpList = new ArrayList <> (); + Iterator <Vertex> iterv = source1.V().has("aai-node-type",nType); + while (iterv.hasNext()) { + // We put the nodes into an ArrayList because the graph.query iterator can time out + tmpList.add(iterv.next()); + } + + Iterator <Vertex> iter = tmpList.iterator(); + while (iter.hasNext()) { + try { + thisNtCount++; + if( thisNtCount == lastShownForNt + 1000 ){ + lastShownForNt = thisNtCount; + LOGGER.debug("count for " + nType + " so far = " + thisNtCount ); + } + Vertex thisVtx = iter.next(); + if( windowStartTime > 0 ){ + // They are using the time-window, so we only want nodes that are updated after a + // passed-in timestamp OR that have no last-modified-timestamp which means they are suspicious. + Object objModTimeStamp = thisVtx.property("aai-last-mod-ts").orElse(null); + if( objModTimeStamp != null ){ + long thisNodeModTime = (long)objModTimeStamp; + if( thisNodeModTime < windowStartTime ){ + // It has a last modified ts and is NOT in our window, so we can pass over it + continue; + } + } + } + + String thisVid = thisVtx.id().toString(); + if (processedVertices.contains(thisVid)) { + LOGGER.debug("skipping already processed vertex: " + thisVid); + continue; + } + totalNodeCount++; + List <Vertex> secondGetList = new ArrayList <> (); + // ----------------------------------------------------------------------- + // For each vertex of this nodeType, we want to: + // a) make sure that it can be retrieved using it's AAI defined key + // b) make sure that it is not a duplicate + // ----------------------------------------------------------------------- + + // For this instance of this nodeType, get the key properties + HashMap<String, Object> propHashWithKeys = new HashMap<>(); + Iterator<String> keyPropI = keyProps.iterator(); + while (keyPropI.hasNext()) { + String propName = keyPropI.next(); + String propVal = ""; + //delete an already deleted vertex + Object obj = thisVtx.<Object>property(propName).orElse(null); + if (obj != null) { + propVal = obj.toString(); + } + propHashWithKeys.put(propName, propVal); + } + try { + // If this node is dependent on another for uniqueness, then do the query from that parent node + // Note - all of our nodes that are dependent on others for uniqueness are + // "children" of that node. + boolean depNodeOk = true; + if( depNodeTypes.isEmpty() ){ + // This kind of node is not dependent on any other. + // Make sure we can get it back using it's key properties (that is the + // phantom checking) and that we only get one. Note - we also need + // to collect data for a second type of dupe-checking which is done later. + secondGetList = getNodeJustUsingKeyParams( TRANSID, FROMAPPID, source1, nType, + propHashWithKeys, version ); + } + else { + // This kind of node is dependent on another for uniqueness. + // Start at it's parent (the parent/containing vertex) and make sure we can get it + // back using it's key properties and that we only get one. + Iterator <Vertex> vertI2 = source1.V(thisVtx).union(__.inE().has(EdgeProperty.CONTAINS.toString(), AAIDirection.OUT.toString()).outV(), __.outE().has(EdgeProperty.CONTAINS.toString(), AAIDirection.IN.toString()).inV()); + Vertex parentVtx = null; + // First we need to try to find the parent/containing vertex. + int pCount = 0; + while( vertI2 != null && vertI2.hasNext() ){ + parentVtx = vertI2.next(); + pCount++; + } + if( pCount <= 0 ){ + // It's Missing it's dependent/parent/containing node - it's an orphan + depNodeOk = false; + if (deleteCandidateList.contains(thisVid)) { + boolean okFlag = true; + boolean updateOnlyFlag = false; + try { + processedVertices.add(thisVtx.id().toString()); + Object ob = thisVtx.<Object>property("aai-node-type").orElse(null); + if( ob == null && !skipIndexUpdateFix ){ + updateIndexedProps(thisVtx, thisVid, nType, propTypeHash, indexedProps); + updateOnlyFlag = true; + dummyUpdCount++; + // Since we are updating this delete candidate, not deleting it, we + // want it to show up as a delete candidate for this run also. + missingAaiNtNodeHash.put(thisVid, thisVtx); + } + else { + // There was an aai-node-type parameter, so we'll do the remove + thisVtx.remove(); + deleteCount++; + thisNtDeleteCount++; + } + } catch (Exception e) { + okFlag = false; + LoggingContext.statusCode(StatusCode.ERROR); + LoggingContext.responseCode(LoggingContext.DATA_ERROR); + LOGGER.error("ERROR trying to delete delete Candidate VID = " + thisVid + " " + LogFormatTools.getStackTop(e)); + } + if (okFlag){ + if( updateOnlyFlag ) { + LOGGER.info(" Updated Indexes for Delete Candidate VID = " + thisVid); + } + else { + LOGGER.info(" DELETED Delete Candidate VID = " + thisVid); + } + } + } else { + // NOTE - Only nodes that are missing their parent/containing node are ever considered "orphaned". + // That is, you could have a node with no edges... which sounds like an orphan, but not all + // nodes require edges. For example, you could have a newly created "image" node which does not have + // any edges connected to it (using it) yet. + Object ob = thisVtx.<Object>property("aai-node-type").orElse(null); + if( ob == null ){ + // Group this with missing-node-type guys - which + // we will delete more readily than orphans. + LOGGER.info(" >> Encountered a missingAaiNodeType while looking for the parent of a [" + nType + "] node."); + missingAaiNtNodeHash.put(thisVid, thisVtx); + } + else { + Object ob2 = thisVtx.<Object>property("aai-uuid").orElse(null); + String auid = ""; + if( ob2 != null ){ + auid = ob2.toString(); + } + String checkDummyUid = thisVid + "dummy"; + if( auid.equals(checkDummyUid) ){ + // Group this with missing-node-type guys. + LOGGER.info(" >> Encountered a missingAaiNodeType mid-fix-node while looking for the parent of a [" + nType + "] node."); + missingAaiNtNodeHash.put(thisVid, thisVtx); + } + else { + // It's a regular old orphan + orphanNodeHash.put(thisVid, thisVtx); + } + } + } + } + else if ( pCount > 1 ){ + // Not sure how this could happen? Should we do something here? + depNodeOk = false; + } + else { + // We found the parent - so use it to do the second-look. + // NOTE --- We're just going to do the same check from the other direction - because + // there could be duplicates or the pointer going the other way could be broken + ArrayList <Vertex> tmpListSec = new ArrayList <> (); + + tmpListSec = getConnectedChildrenOfOneType( source1, parentVtx, nType ) ; + Iterator<Vertex> vIter = tmpListSec.iterator(); + while (vIter.hasNext()) { + Vertex tmpV = vIter.next(); + if( vertexHasTheseKeys(tmpV, propHashWithKeys) ){ + secondGetList.add(tmpV); + } + } + } + }// end of -- else this is a dependent node -- piece + + if( depNodeOk && (secondGetList == null || secondGetList.size() == 0) ){ + // We could not get the node back using it's own key info. + // So, it's a PHANTOM + if (deleteCandidateList.contains(thisVid)) { + boolean okFlag = true; + boolean updateOnlyFlag = false; + try { + Object ob = thisVtx.<Object>property("aai-node-type").orElse(null); + if( ob == null && !skipIndexUpdateFix ){ + updateIndexedProps(thisVtx, thisVid, nType, propTypeHash, indexedProps); + dummyUpdCount++; + updateOnlyFlag = true; + // Since we are updating this delete candidate, not deleting it, we + // want it to show up as a delete candidate for this run also. + missingAaiNtNodeHash.put(thisVid, thisVtx); + } + else { + // There was an aai-node-type parameter, so we'll do the remove + thisVtx.remove(); + deleteCount++; + thisNtDeleteCount++; + } + } catch (Exception e) { + okFlag = false; + LoggingContext.statusCode(StatusCode.ERROR); + LoggingContext.responseCode(LoggingContext.DATA_ERROR); + LOGGER.error("ERROR trying to delete phantom VID = " + thisVid + " " + LogFormatTools.getStackTop(e)); + } + if (okFlag){ + if( updateOnlyFlag ) { + LOGGER.info(" Updated Indexes for Delete Candidate VID = " + thisVid); + } + else { + LOGGER.info(" DELETED VID = " + thisVid); + } + } + } else { + ghostNodeHash.put(thisVid, thisVtx); + } + } + else if( (secondGetList.size() > 1) && depNodeOk && !dupeCheckOff ){ + // Found some DUPLICATES - need to process them + LOGGER.info(" - now check Dupes for this guy - "); + List<String> tmpDupeGroups = checkAndProcessDupes( + TRANSID, FROMAPPID, g, source1, version, + nType, secondGetList, dupeFixOn, + deleteCandidateList, singleCommits, dupeGroups, loader); + Iterator<String> dIter = tmpDupeGroups.iterator(); + while (dIter.hasNext()) { + // Add in any newly found dupes to our running list + String tmpGrp = dIter.next(); + LOGGER.info("Found set of dupes: [" + tmpGrp + "]"); + dupeGroups.add(tmpGrp); + } + } + } + catch (AAIException e1) { + LoggingContext.statusCode(StatusCode.ERROR); + LoggingContext.responseCode(LoggingContext.DATA_ERROR); + LOGGER.warn(" For nodeType = " + nType + " Caught exception", e1); + errArr.add(e1.getErrorObject().toString()); + } + catch (Exception e2) { + LoggingContext.statusCode(StatusCode.ERROR); + LoggingContext.responseCode(LoggingContext.DATA_ERROR); + LOGGER.warn(" For nodeType = " + nType + + " Caught exception", e2); + errArr.add(e2.getMessage()); + } + }// try block to enclose looping over each single vertex + catch (Exception exx) { + LoggingContext.statusCode(StatusCode.ERROR); + LoggingContext.responseCode(LoggingContext.DATA_ERROR); + LOGGER.warn("WARNING from inside the while-verts-loop ", exx); + } + + } // while loop for each record of a nodeType + + if( depNodeTypes.isEmpty() && !dupeCheckOff ){ + // For this nodeType, we haven't looked at the possibility of a + // non-dependent node where two verts have same key info + ArrayList<ArrayList<Vertex>> nonDependentDupeSets = new ArrayList<ArrayList<Vertex>>(); + nonDependentDupeSets = getDupeSets4NonDepNodes( + TRANSID, FROMAPPID, g, + version, nType, tmpList, + keyProps, loader ); + // For each set found (each set is for a unique instance of key-values), + // process the dupes found + Iterator<ArrayList<Vertex>> dsItr = nonDependentDupeSets.iterator(); + while( dsItr.hasNext() ){ + ArrayList<Vertex> dupeList = dsItr.next(); + LOGGER.info(" - now check Dupes for some non-dependent guys - "); + List<String> tmpDupeGroups = checkAndProcessDupes( + TRANSID, FROMAPPID, g, source1, version, + nType, dupeList, dupeFixOn, + deleteCandidateList, singleCommits, dupeGroups, loader); + Iterator<String> dIter = tmpDupeGroups.iterator(); + while (dIter.hasNext()) { + // Add in any newly found dupes to our running list + String tmpGrp = dIter.next(); + LOGGER.info("Found set of dupes: [" + tmpGrp + "]"); + dupeGroups.add(tmpGrp); + } + } + + }// end of extra dupe check for non-dependent nodes + + if ( (thisNtDeleteCount > 0) && singleCommits ) { + // NOTE - the singleCommits option is not used in normal processing + g.tx().commit(); + g = AAIGraph.getInstance().getGraph().newTransaction(); + + } + thisNtDeleteCount = 0; + LOGGER.info( " Processed " + thisNtCount + " records for [" + nType + "], " + totalNodeCount + " total (in window) overall. " ); + + }// While-loop for each node type + + }// end of check to make sure we weren't only supposed to do edges + + + if( !skipEdgeCheckFlag ){ + // -------------------------------------------------------------------------------------- + // Now, we're going to look for one-armed-edges. Ie. an edge that + // should have + // been deleted (because a vertex on one side was deleted) but + // somehow was not deleted. + // So the one end of it points to a vertexId -- but that vertex is + // empty. + // -------------------------------------------------------------------------------------- + + // To do some strange checking - we need a second graph object + LOGGER.debug(" ---- DEBUG --- about to open a SECOND graph (takes a little while)--------\n"); + // Note - graph2 just reads - but we want it to use a fresh connection to + // the database, so we are NOT using the CACHED DB CONFIG here. + + // -- note JanusGraphFactory has been leaving db connections open + //graph2 = JanusGraphFactory.open(new AAIGraphConfig.Builder(AAIConstants.REALTIME_DB_CONFIG).forService(DataGrooming.class.getSimpleName()).withGraphType("realtime2").buildConfiguration()); + graph2 = AAIGraph.getInstance().getGraph(); + if (graph2 == null) { + String emsg = "null graph2 object in DataGrooming\n"; + throw new AAIException("AAI_6101", emsg); + } else { + LOGGER.debug("Got the graph2 object... \n"); + } + g2 = graph2.newTransaction(); + if (g2 == null) { + String emsg = "null graphTransaction2 object in DataGrooming\n"; + throw new AAIException("AAI_6101", emsg); + } + + ArrayList<Vertex> vertList = new ArrayList<>(); + Iterator<Vertex> vItor3 = g.traversal().V(); + // Gotta hold these in a List - or else HBase times out as you cycle + // through these + while (vItor3.hasNext()) { + Vertex v = vItor3.next(); + vertList.add(v); + } + int counter = 0; + int lastShown = 0; + Iterator<Vertex> vItor2 = vertList.iterator(); + LOGGER.info(" Checking for bad edges --- "); + + while (vItor2.hasNext()) { + Vertex v = null; + try { + try { + v = vItor2.next(); + } catch (Exception vex) { + LoggingContext.statusCode(StatusCode.ERROR); + LoggingContext.responseCode(LoggingContext.DATA_ERROR); + LOGGER.warn(">>> WARNING trying to get next vertex on the vItor2 "); + continue; + } + + counter++; + String thisVertId = ""; + try { + thisVertId = v.id().toString(); + } catch (Exception ev) { + LoggingContext.statusCode(StatusCode.ERROR); + LoggingContext.responseCode(LoggingContext.DATA_ERROR); + LOGGER.warn("WARNING when doing getId() on a vertex from our vertex list. "); + continue; + } + if (ghostNodeHash.containsKey(thisVertId)) { + // This is a phantom node, so don't try to use it + LOGGER.info(" >> Skipping edge check for edges from vertexId = " + + thisVertId + + ", since that guy is a Phantom Node"); + continue; + } + + if( windowStartTime > 0 ){ + // They are using the time-window, so we only want nodes that are updated after a + // passed-in timestamp OR that have no last-modified-timestamp which means they are suspicious. + Object objModTimeStamp = v.property("aai-last-mod-ts").orElse(null); + if( objModTimeStamp != null ){ + long thisNodeModTime = (long)objModTimeStamp; + if( thisNodeModTime < windowStartTime ){ + // It has a last modified ts and is NOT in our window, so we can pass over it + continue; + } + } + } + + if (counter == lastShown + 250) { + lastShown = counter; + LOGGER.info("... Checking edges for vertex # " + + counter); + } + Iterator<Edge> eItor = v.edges(Direction.BOTH); + while (eItor.hasNext()) { + Edge e = null; + Vertex vIn = null; + Vertex vOut = null; + try { + e = eItor.next(); + } catch (Exception iex) { + LoggingContext.statusCode(StatusCode.ERROR); + LoggingContext.responseCode(LoggingContext.DATA_ERROR); + LOGGER.warn(">>> WARNING trying to get next edge on the eItor ", iex); + continue; + } + + try { + vIn = e.inVertex(); + } catch (Exception err) { + LoggingContext.statusCode(StatusCode.ERROR); + LoggingContext.responseCode(LoggingContext.DATA_ERROR); + LOGGER.warn(">>> WARNING trying to get edge's In-vertex ", err); + } + String vNtI = ""; + String vIdI = ""; + Vertex ghost2 = null; + + Boolean keysMissing = true; + Boolean cantGetUsingVid = false; + if (vIn != null) { + try { + Object ob = vIn.<Object>property("aai-node-type").orElse(null); + if (ob != null) { + vNtI = ob.toString(); + keysMissing = anyKeyFieldsMissing(vNtI, vIn, loader); + } + ob = vIn.id(); + long vIdLong = 0L; + if (ob != null) { + vIdI = ob.toString(); + vIdLong = Long.parseLong(vIdI); + } + + if( ! ghost2CheckOff ){ + Vertex connectedVert = g2.traversal().V(vIdLong).next(); + if( connectedVert == null ) { + LoggingContext.statusCode(StatusCode.ERROR); + LoggingContext.responseCode(LoggingContext.DATA_ERROR); + LOGGER.warn( "GHOST2 -- got NULL when doing getVertex for vid = " + vIdLong); + cantGetUsingVid = true; + + // If we can NOT get this ghost with the SECOND graph-object, + // it is still a ghost since even though we can get data about it using the FIRST graph + // object. + try { + ghost2 = g.traversal().V(vIdLong).next(); + } + catch( Exception ex){ + LoggingContext.statusCode(StatusCode.ERROR); + LoggingContext.responseCode(LoggingContext.DATA_ERROR); + LOGGER.warn( "GHOST2 -- Could not get the ghost info for a bad edge for vtxId = " + vIdLong, ex); + } + if( ghost2 != null ){ + ghostNodeHash.put(vIdI, ghost2); + } + } + }// end of the ghost2 checking + } + catch (Exception err) { + LoggingContext.statusCode(StatusCode.ERROR); + LoggingContext.responseCode(LoggingContext.DATA_ERROR); + LOGGER.warn(">>> WARNING trying to get edge's In-vertex props ", err); + } + } + if (keysMissing || vIn == null || vNtI.equals("") + || cantGetUsingVid) { + // this is a bad edge because it points to a vertex + // that isn't there anymore or is corrupted + String thisEid = e.id().toString(); + if (deleteCandidateList.contains(thisEid) || deleteCandidateList.contains(vIdI)) { + boolean okFlag = true; + if (!vIdI.equals("")) { + // try to get rid of the corrupted vertex + try { + if( (ghost2 != null) && ghost2FixOn ){ + ghost2.remove(); + } + else { + vIn.remove(); + } + if (singleCommits) { + // NOTE - the singleCommits option is not used in normal processing + g.tx().commit(); + g = AAIGraph.getInstance().getGraph().newTransaction(); + } + else { + executeFinalCommit = true; + } + deleteCount++; + } catch (Exception e1) { + okFlag = false; + LoggingContext.statusCode(StatusCode.ERROR); + LoggingContext.responseCode(LoggingContext.DATA_ERROR); + LOGGER.warn("WARNING when trying to delete bad-edge-connected VERTEX VID = " + + vIdI, e1); + } + if (okFlag) { + LOGGER.info(" DELETED vertex from bad edge = " + + vIdI); + } + } else { + // remove the edge if we couldn't get the + // vertex + try { + e.remove(); + if (singleCommits) { + // NOTE - the singleCommits option is not used in normal processing + g.tx().commit(); + g = AAIGraph.getInstance().getGraph().newTransaction(); + } + else { + executeFinalCommit = true; + } + deleteCount++; + } catch (Exception ex) { + // NOTE - often, the exception is just + // that this edge has already been + // removed + okFlag = false; + LoggingContext.statusCode(StatusCode.ERROR); + LoggingContext.responseCode(LoggingContext.DATA_ERROR); + LOGGER.warn("WARNING when trying to delete edge = " + + thisEid); + } + if (okFlag) { + LOGGER.info(" DELETED edge = " + thisEid); + } + } + } else { + oneArmedEdgeHash.put(thisEid, e); + if ((vIn != null) && (vIn.id() != null)) { + emptyVertexHash.put(thisEid, vIn.id() + .toString()); + } + } + } + + try { + vOut = e.outVertex(); + } catch (Exception err) { + LoggingContext.statusCode(StatusCode.ERROR); + LoggingContext.responseCode(LoggingContext.DATA_ERROR); + LOGGER.warn(">>> WARNING trying to get edge's Out-vertex "); + } + String vNtO = ""; + String vIdO = ""; + ghost2 = null; + keysMissing = true; + cantGetUsingVid = false; + if (vOut != null) { + try { + Object ob = vOut.<Object>property("aai-node-type").orElse(null); + if (ob != null) { + vNtO = ob.toString(); + keysMissing = anyKeyFieldsMissing(vNtO, + vOut, loader); + } + ob = vOut.id(); + long vIdLong = 0L; + if (ob != null) { + vIdO = ob.toString(); + vIdLong = Long.parseLong(vIdO); + } + + if( ! ghost2CheckOff ){ + Vertex connectedVert = g2.traversal().V(vIdLong).next(); + if( connectedVert == null ) { + cantGetUsingVid = true; + LOGGER.info( "GHOST2 -- got NULL when doing getVertex for vid = " + vIdLong); + // If we can get this ghost with the other graph-object, then get it -- it's still a ghost + try { + ghost2 = g.traversal().V(vIdLong).next(); + } + catch( Exception ex){ + LoggingContext.statusCode(StatusCode.ERROR); + LoggingContext.responseCode(LoggingContext.DATA_ERROR); + LOGGER.warn( "GHOST2 -- Could not get the ghost info for a bad edge for vtxId = " + vIdLong, ex); + } + if( ghost2 != null ){ + ghostNodeHash.put(vIdO, ghost2); + } + } + } + } catch (Exception err) { + LoggingContext.statusCode(StatusCode.ERROR); + LoggingContext.responseCode(LoggingContext.DATA_ERROR); + LOGGER.warn(">>> WARNING trying to get edge's Out-vertex props ", err); + } + } + if (keysMissing || vOut == null || vNtO.equals("") + || cantGetUsingVid) { + // this is a bad edge because it points to a vertex + // that isn't there anymore + String thisEid = e.id().toString(); + if (deleteCandidateList.contains(thisEid) || deleteCandidateList.contains(vIdO)) { + boolean okFlag = true; + if (!vIdO.equals("")) { + // try to get rid of the corrupted vertex + try { + if( (ghost2 != null) && ghost2FixOn ){ + ghost2.remove(); + } + else if (vOut != null) { + vOut.remove(); + } + if (singleCommits) { + // NOTE - the singleCommits option is not used in normal processing + g.tx().commit(); + g = AAIGraph.getInstance().getGraph().newTransaction(); + } + else { + executeFinalCommit = true; + } + deleteCount++; + } catch (Exception e1) { + okFlag = false; + LoggingContext.statusCode(StatusCode.ERROR); + LoggingContext.responseCode(LoggingContext.DATA_ERROR); + LOGGER.warn("WARNING when trying to delete bad-edge-connected VID = " + + vIdO, e1); + } + if (okFlag) { + LOGGER.info(" DELETED vertex from bad edge = " + + vIdO); + } + } else { + // remove the edge if we couldn't get the + // vertex + try { + e.remove(); + if (singleCommits) { + // NOTE - the singleCommits option is not used in normal processing + g.tx().commit(); + g = AAIGraph.getInstance().getGraph().newTransaction(); + } + else { + executeFinalCommit = true; + } + deleteCount++; + } catch (Exception ex) { + // NOTE - often, the exception is just + // that this edge has already been + // removed + okFlag = false; + LoggingContext.statusCode(StatusCode.ERROR); + LoggingContext.responseCode(LoggingContext.DATA_ERROR); + LOGGER.warn("WARNING when trying to delete edge = " + + thisEid, ex); + } + if (okFlag) { + LOGGER.info(" DELETED edge = " + thisEid); + } + } + } else { + oneArmedEdgeHash.put(thisEid, e); + if ((vOut != null) && (vOut.id() != null)) { + emptyVertexHash.put(thisEid, vOut.id() + .toString()); + } + } + } + }// End of while-edges-loop + } catch (Exception exx) { + LoggingContext.statusCode(StatusCode.ERROR); + LoggingContext.responseCode(LoggingContext.DATA_ERROR); + LOGGER.warn("WARNING from in the while-verts-loop ", exx); + } + }// End of while-vertices-loop (the edge-checking) + LOGGER.info(" Done checking for bad edges --- "); + } // end of -- if we're not skipping the edge-checking + + + deleteCount = deleteCount + dupeGrpsDeleted; + if (!singleCommits && (deleteCount > 0 || dummyUpdCount > 0) ){ + executeFinalCommit = true; + } + + int ghostNodeCount = ghostNodeHash.size(); + int orphanNodeCount = orphanNodeHash.size(); + int oneArmedEdgeCount = oneArmedEdgeHash.size(); + int missingAaiNtNodeCount = missingAaiNtNodeHash.size(); + int dupeCount = dupeGroups.size(); + + deleteCount = deleteCount + dupeGrpsDeleted; + + bw.write("\n\n ============ Summary ==============\n"); + if( timeWindowMinutes == 0 ){ + bw.write("Ran FULL data grooming (no time-window). \n"); + } + else { + bw.write("Ran PARTIAL data grooming just looking at data added/updated in the last " + timeWindowMinutes + " minutes. \n"); + } + + bw.write("\nRan these nodeTypes: " + ntList + "\n\n"); + bw.write("There were this many delete candidates from previous run = " + + deleteCandidateList.size() + "\n"); + if (dontFixOrphansFlag) { + bw.write(" Note - we are not counting orphan nodes since the -dontFixOrphans parameter was used. \n"); + } + bw.write("Deleted this many delete candidates = " + deleteCount + + "\n"); + bw.write("Dummy-index-update to delete candidates = " + dummyUpdCount + + "\n"); + bw.write("Total number of nodes looked at = " + totalNodeCount + + "\n"); + bw.write("Ghost Nodes identified = " + ghostNodeCount + "\n"); + bw.write("Orphan Nodes identified = " + orphanNodeCount + "\n"); + bw.write("Missing aai-node-type Nodes identified = " + missingAaiNtNodeCount + "\n"); + bw.write("Bad Edges identified = " + oneArmedEdgeCount + "\n"); + bw.write("Duplicate Groups count = " + dupeCount + "\n"); + bw.write("MisMatching Label/aai-node-type count = " + + misMatchedHash.size() + "\n"); + + bw.write("\n ------------- Delete Candidates ---------\n"); + for (Map.Entry<String, Vertex> entry : ghostNodeHash + .entrySet()) { + String vid = entry.getKey(); + bw.write("DeleteCandidate: Phantom Vid = [" + vid + "]\n"); + cleanupCandidateCount++; + } + for (Map.Entry<String, Vertex> entry : missingAaiNtNodeHash + .entrySet()) { + String vid = entry.getKey(); + bw.write("DeleteCandidate: Missing aai-node-type Vid = [" + vid + "]\n"); + cleanupCandidateCount++; + } + for (Map.Entry<String, Vertex> entry : orphanNodeHash + .entrySet()) { + String vid = entry.getKey(); + bw.write("DeleteCandidate: OrphanDepNode Vid = [" + vid + "]\n"); + if (!dontFixOrphansFlag) { + cleanupCandidateCount++; + } + } + for (Map.Entry<String, Edge> entry : oneArmedEdgeHash.entrySet()) { + String eid = entry.getKey(); + bw.write("DeleteCandidate: Bad EDGE Edge-id = [" + eid + "]\n"); + cleanupCandidateCount++; + } + + bw.write("\n-- NOTE - To see DeleteCandidates for Duplicates, you need to look in the Duplicates Detail section below.\n"); + + bw.write("\n ------------- GHOST NODES - detail "); + for (Map.Entry<String, Vertex> entry : ghostNodeHash + .entrySet()) { + try { + String vid = entry.getKey(); + bw.write("\n ==> Phantom Vid = " + vid + "\n"); + ArrayList<String> retArr = showPropertiesForNode( + TRANSID, FROMAPPID, entry.getValue()); + for (String info : retArr) { + bw.write(info + "\n"); + } + retArr = showAllEdgesForNode(TRANSID, FROMAPPID, + entry.getValue()); + for (String info : retArr) { + bw.write(info + "\n"); + } + } catch (Exception dex) { + LoggingContext.statusCode(StatusCode.ERROR); + LoggingContext.responseCode(LoggingContext.DATA_ERROR); + LOGGER.error("error trying to print detail info for a ghost-node: " + LogFormatTools.getStackTop(dex)); + } + } + + bw.write("\n ------------- Missing aai-node-type NODES - detail: "); + for (Map.Entry<String, Vertex> entry : missingAaiNtNodeHash + .entrySet()) { + try { + String vid = entry.getKey(); + bw.write("\n> Missing aai-node-type Node Vid = " + vid + "\n"); + ArrayList<String> retArr = showPropertiesForNode( + TRANSID, FROMAPPID, entry.getValue()); + for (String info : retArr) { + bw.write(info + "\n"); + } + + retArr = showAllEdgesForNode(TRANSID, FROMAPPID, + entry.getValue()); + for (String info : retArr) { + bw.write(info + "\n"); + } + } catch (Exception dex) { + LoggingContext.statusCode(StatusCode.ERROR); + LoggingContext.responseCode(LoggingContext.DATA_ERROR); + LOGGER.error("error trying to print detail info for a node missing its aai-node-type " + LogFormatTools.getStackTop(dex)); + } + } + + bw.write("\n ------------- Missing Dependent Edge ORPHAN NODES - detail: "); + for (Map.Entry<String, Vertex> entry : orphanNodeHash + .entrySet()) { + try { + String vid = entry.getKey(); + bw.write("\n> Orphan Node Vid = " + vid + "\n"); + ArrayList<String> retArr = showPropertiesForNode( + TRANSID, FROMAPPID, entry.getValue()); + for (String info : retArr) { + bw.write(info + "\n"); + } + + retArr = showAllEdgesForNode(TRANSID, FROMAPPID, + entry.getValue()); + for (String info : retArr) { + bw.write(info + "\n"); + } + } catch (Exception dex) { + LoggingContext.statusCode(StatusCode.ERROR); + LoggingContext.responseCode(LoggingContext.DATA_ERROR); + LOGGER.error("error trying to print detail info for a Orphan Node /missing dependent edge " + LogFormatTools.getStackTop(dex)); + } + } + + bw.write("\n ------------- EDGES pointing to empty/bad vertices: "); + for (Map.Entry<String, Edge> entry : oneArmedEdgeHash.entrySet()) { + try { + String eid = entry.getKey(); + Edge thisE = entry.getValue(); + String badVid = emptyVertexHash.get(eid); + bw.write("\n> Edge pointing to bad vertex (Vid = " + + badVid + ") EdgeId = " + eid + "\n"); + bw.write("Label: [" + thisE.label() + "]\n"); + Iterator<Property<Object>> pI = thisE.properties(); + while (pI.hasNext()) { + Property<Object> propKey = pI.next(); + bw.write("Prop: [" + propKey + "], val = [" + + propKey.value() + "]\n"); + } + } catch (Exception pex) { + LoggingContext.statusCode(StatusCode.ERROR); + LoggingContext.responseCode(LoggingContext.DATA_ERROR); + LOGGER.error("error trying to print empty/bad vertex data: " + LogFormatTools.getStackTop(pex)); + } + } + + bw.write("\n ------------- Duplicates: "); + Iterator<String> dupeIter = dupeGroups.iterator(); + int dupeSetCounter = 0; + while (dupeIter.hasNext()) { + dupeSetCounter++; + String dset = (String) dupeIter.next(); + + bw.write("\n --- Duplicate Group # " + dupeSetCounter + + " Detail -----------\n"); + try { + // We expect each line to have at least two vid's, followed + // by the preferred one to KEEP + String[] dupeArr = dset.split("\\|"); + ArrayList<String> idArr = new ArrayList<>(); + int lastIndex = dupeArr.length - 1; + for (int i = 0; i <= lastIndex; i++) { + if (i < lastIndex) { + // This is not the last entry, it is one of the + // dupes, so we want to show all its info + bw.write(" >> Duplicate Group # " + + dupeSetCounter + " Node # " + i + + " ----\n"); + String vidString = dupeArr[i]; + idArr.add(vidString); + long longVertId = Long.parseLong(vidString); + Iterator<Vertex> vtxIterator = g.vertices(longVertId); + Vertex vtx = null; + if (vtxIterator.hasNext()) { + vtx = vtxIterator.next(); + } + ArrayList<String> retArr = showPropertiesForNode(TRANSID, FROMAPPID, vtx); + for (String info : retArr) { + bw.write(info + "\n"); + } + + retArr = showAllEdgesForNode(TRANSID, + FROMAPPID, vtx); + for (String info : retArr) { + bw.write(info + "\n"); + } + } else { + // This is the last entry which should tell us if we + // have a preferred keeper + String prefString = dupeArr[i]; + if (prefString.equals("KeepVid=UNDETERMINED")) { + bw.write("\n For this group of duplicates, could not tell which one to keep.\n"); + bw.write(" >>> This group needs to be taken care of with a manual/forced-delete.\n"); + } else { + // If we know which to keep, then the prefString + // should look like, "KeepVid=12345" + String[] prefArr = prefString.split("="); + if (prefArr.length != 2 + || (!prefArr[0].equals("KeepVid"))) { + throw new Exception("Bad format. Expecting KeepVid=999999"); + } else { + String keepVidStr = prefArr[1]; + if (idArr.contains(keepVidStr)) { + bw.write("\n The vertex we want to KEEP has vertexId = " + + keepVidStr); + bw.write("\n The others become delete candidates: \n"); + idArr.remove(keepVidStr); + for (int x = 0; x < idArr.size(); x++) { + cleanupCandidateCount++; + bw.write("DeleteCandidate: Duplicate Vid = [" + + idArr.get(x) + "]\n"); + } + } else { + throw new Exception("ERROR - Vertex Id to keep not found in list of dupes. dset = [" + + dset + "]"); + } + } + }// else we know which one to keep + }// else last entry + }// for each vertex in a group + } catch (Exception dex) { + LoggingContext.statusCode(StatusCode.ERROR); + LoggingContext.responseCode(LoggingContext.DATA_ERROR); + LOGGER.error("error trying to print duplicate vertex data " + LogFormatTools.getStackTop(dex)); + } + + }// while - work on each group of dupes + + bw.write("\n ------------- Mis-matched Label/aai-node-type Nodes: \n "); + for (Map.Entry<String, String> entry : misMatchedHash.entrySet()) { + String msg = entry.getValue(); + bw.write("MixedMsg = " + msg + "\n"); + } + + bw.write("\n ------------- Got these errors while processing: \n"); + Iterator<String> errIter = errArr.iterator(); + while (errIter.hasNext()) { + String line = (String) errIter.next(); + bw.write(line + "\n"); + } + + bw.close(); + + LOGGER.info("\n ------------- Done doing all the checks ------------ "); + LOGGER.info("Output will be written to " + fullOutputFileName); + + if (cleanupCandidateCount > 0) { + // Technically, this is not an error -- but we're throwing this + // error so that hopefully a + // monitoring system will pick it up and do something with it. + throw new AAIException("AAI_6123", "See file: [" + fullOutputFileName + + "] and investigate delete candidates. "); + } + } catch (AAIException e) { + LoggingContext.statusCode(StatusCode.ERROR); + LoggingContext.responseCode(LoggingContext.DATA_ERROR); + LOGGER.error("Caught AAIException while grooming data"); + ErrorLogHelper.logException(e); + } catch (Exception ex) { + LoggingContext.statusCode(StatusCode.ERROR); + LoggingContext.responseCode(LoggingContext.DATA_ERROR); + LOGGER.error("Caught exception while grooming data"); + ErrorLogHelper.logError("AAI_6128", ex.getMessage() + ", resolve and rerun dataGrooming"); + } finally { + + if (bw != null) { + try { + bw.close(); + } catch (IOException iox) { + LoggingContext.statusCode(StatusCode.ERROR); + LoggingContext.responseCode(LoggingContext.AVAILABILITY_TIMEOUT_ERROR); + LOGGER.warn("Got an IOException trying to close bufferedWriter() \n", iox); + } + } + + if (executeFinalCommit) { + // If we were holding off on commits till the end - then now is the time. + if( g == null ){ + LoggingContext.statusCode(StatusCode.ERROR); + LoggingContext.responseCode(LoggingContext.DATA_ERROR); + LOGGER.error(" >>>> ERROR <<<< Could not commit changes. graph was null when we wanted to commit."); + } + else if( !g.tx().isOpen() ){ + LoggingContext.statusCode(StatusCode.ERROR); + LoggingContext.responseCode(LoggingContext.DATA_ERROR); + LOGGER.error(" >>>> ERROR <<<< Could not commit changes. Transaction was not open when we wanted to commit."); + } + else { + try { + LOGGER.info("About to do the commit for " + + deleteCount + " removes. "); + g.tx().commit(); + LOGGER.info("Commit was successful "); + } catch (Exception excom) { + LoggingContext.statusCode(StatusCode.ERROR); + LoggingContext.responseCode(LoggingContext.DATA_ERROR); + LOGGER.error(" >>>> ERROR <<<< Could not commit changes. " + LogFormatTools.getStackTop(excom)); + deleteCount = 0; + } + } + } + else if (g != null && g.tx().isOpen()) { + try { + // We did not do any deletes that need to be committed. + // The rollback is to clear out the transaction used while doing those reads + g.tx().rollback(); + } catch (Exception ex) { + // Don't throw anything because JanusGraph sometimes is just saying that the graph is already closed + LoggingContext.statusCode(StatusCode.ERROR); + LoggingContext.responseCode(LoggingContext.AVAILABILITY_TIMEOUT_ERROR); + LOGGER.warn("WARNING from final graphTransaction.rollback()", ex); + } + } + + if (g2 != null && g2.tx().isOpen()) { + try { + // We only read on g2. The rollback is to clear out the transaction used while doing those reads + g2.tx().rollback(); + } catch (Exception ex) { + // Don't throw anything because JanusGraph sometimes is just saying that the graph is already closed + LoggingContext.statusCode(StatusCode.ERROR); + LoggingContext.responseCode(LoggingContext.AVAILABILITY_TIMEOUT_ERROR); + LOGGER.warn("WARNING from final graphTransaction2.rollback()", ex); + } + } + + if( finalShutdownFlag ){ + try { + if( graph != null && graph.isOpen() ){ + graph.tx().close(); + if( "true".equals(System.getProperty("org.onap.aai.graphadmin.started"))) { + // Since dataGrooming was called from a scheduled task - do not call graph.close() + } + else { + // DataGrooming must have been called manually - so we need to call close(). + graph.close(); + } + } + } catch (Exception ex) { + // Don't throw anything because JanusGraph sometimes is just saying that the graph is already closed{ + LoggingContext.statusCode(StatusCode.ERROR); + LoggingContext.responseCode(LoggingContext.AVAILABILITY_TIMEOUT_ERROR); + LOGGER.warn("WARNING from final graph.shutdown()", ex); + } + + try { + if( graph2 != null && graph2.isOpen() ){ + graph2.tx().close(); + if( "true".equals(System.getProperty("org.onap.aai.graphadmin.started"))) { + // Since dataGrooming was called from a scheduled task - do not call graph2.close() + } + else { + // DataGrooming must have been called manually - so we need to call close(). + graph2.close(); + } + } + } catch (Exception ex) { + // Don't throw anything because JanusGraph sometimes is just saying that the graph is already closed{ + LoggingContext.statusCode(StatusCode.ERROR); + LoggingContext.responseCode(LoggingContext.AVAILABILITY_TIMEOUT_ERROR); + LOGGER.warn("WARNING from final graph2.shutdown()", ex); + } + } + + } + + return cleanupCandidateCount; + + }// end of doTheGrooming() + + + private void updateIndexedProps(Vertex thisVtx, String thisVidStr, String nType, + HashMap <String,String>propTypeHash, ArrayList <String> indexedProps) { + // This is a "missing-aai-node-type" scenario. + // Other indexes may also be messed up, so we will update all of them on + // this pass. A future pass will just treat this node like a regular orphan + // and delete it (if appropriate). + LOGGER.info(" We will be updating the indexed properties for this node to dummy values. VID = " + thisVidStr ); + String dummyPropValStr = thisVidStr + "dummy"; + // These reserved-prop-names are all indexed for all nodes + thisVtx.property("aai-node-type",nType); + thisVtx.property("aai-uri", dummyPropValStr); + thisVtx.property("aai-unique-key", dummyPropValStr); + thisVtx.property("aai-uuid", dummyPropValStr); + thisVtx.property("source-of-truth", dummyPropValStr); + Iterator<String> indexedPropI = indexedProps.iterator(); + while (indexedPropI.hasNext()) { + String propName = indexedPropI.next(); + // Using the VID in case this property is unique in the db and + // we're doing this kind of thing on more than one of these nodes.. + String dataType = propTypeHash.get(propName); + if( dataType == null || dataType.toLowerCase().endsWith(".string") ){ + thisVtx.property(propName, dummyPropValStr); + } + else if( dataType.toLowerCase().endsWith(".long") ){ + Long thisVidLong = (Long) thisVtx.id(); + thisVtx.property(propName, thisVidLong); + } + else if( dataType.toLowerCase().endsWith(".boolean") ){ + thisVtx.property(propName, false); + } + else if( dataType.toLowerCase().endsWith(".integer") ){ + thisVtx.property(propName, 9999); + } + else { + // Not sure what it is - try a string + thisVtx.property(propName, dummyPropValStr); + } + } + } + + /** + * Vertex has these keys. + * + * @param tmpV the tmp V + * @param propHashWithKeys the prop hash with keys + * @return the boolean + */ + private Boolean vertexHasTheseKeys( Vertex tmpV, HashMap <String, Object> propHashWithKeys) { + Iterator <?> it = propHashWithKeys.entrySet().iterator(); + while( it.hasNext() ){ + String propName = ""; + String propVal = ""; + Map.Entry <?,?>propEntry = (Map.Entry<?,?>)it.next(); + Object propNameObj = propEntry.getKey(); + if( propNameObj != null ){ + propName = propNameObj.toString(); + } + Object propValObj = propEntry.getValue(); + if( propValObj != null ){ + propVal = propValObj.toString(); + } + Object checkValObj = tmpV.<Object>property(propName).orElse(null); + if( checkValObj == null ) { + return false; + } + else if( !propVal.equals(checkValObj.toString()) ){ + return false; + } + } + return true; + } + + + /** + * Any key fields missing. + * + * @param nType the n type + * @param v the v + * @return the boolean + */ + private Boolean anyKeyFieldsMissing(String nType, Vertex v, Loader loader) { + + try { + Introspector obj = null; + try { + obj = loader.introspectorFromName(nType); + } catch (AAIUnknownObjectException e) { + // They gave us a non-empty nodeType but our NodeKeyProps does + // not have data for it. Since we do not know what the + // key params are for this type of node, we will just + // return "false". + String emsg = " -- WARNING -- Unrecognized nodeType: [" + nType + + "]. We cannot determine required keys for this nType. "; + // NOTE - this will be caught below and a "false" returned + throw new AAIException("AAI_6121", emsg); + } + + // Determine what the key fields are for this nodeType + Collection <String> keyPropNamesColl = obj.getKeys(); + Iterator<String> keyPropI = keyPropNamesColl.iterator(); + while (keyPropI.hasNext()) { + String propName = keyPropI.next(); + Object ob = v.<Object>property(propName).orElse(null); + if (ob == null || ob.toString().equals("")) { + // It is missing a key property + return true; + } + } + } catch (AAIException e) { + // Something was wrong -- but since we weren't able to check + // the keys, we will not declare that it is missing keys. + return false; + } + return false; + } + + + /** + * Gets the delete list. + * + * @param targetDir the target dir + * @param fileName the file name + * @param edgesOnlyFlag the edges only flag + * @param dontFixOrphans the dont fix orphans + * @param dupeFixOn the dupe fix on + * @return the delete list + * @throws AAIException the AAI exception + */ + private Set<String> getDeleteList(String targetDir, + String fileName, Boolean edgesOnlyFlag, Boolean dontFixOrphans, + Boolean dupeFixOn) throws AAIException { + + // Look in the file for lines formated like we expect - pull out any + // Vertex Id's to delete on this run + Set<String> delList = new LinkedHashSet<>(); + String fullFileName = targetDir + AAIConstants.AAI_FILESEP + fileName; + + try(BufferedReader br = new BufferedReader(new FileReader(fullFileName))) { + String line = br.readLine(); + while (line != null) { + if (!"".equals(line) && line.startsWith("DeleteCandidate")) { + if (edgesOnlyFlag && (!line.contains("Bad Edge"))) { + // We're only processing edges and this line is not for an edge + } else if (dontFixOrphans && line.contains("Orphan")) { + // We're not going to process orphans + } else if (!dupeFixOn && line.contains("Duplicate")) { + // We're not going to process Duplicates + } else { + int begIndex = line.indexOf("id = "); + int endIndex = line.indexOf("]"); + String vidVal = line.substring(begIndex + 6, endIndex); + delList.add(vidVal); + } + } + line = br.readLine(); + } + br.close(); + } catch (IOException e) { + throw new AAIException("AAI_6124", e, "Could not open input-file [" + fullFileName + + "], exception= " + e.getMessage()); + } + + return delList; + + }// end of getDeleteList + + /** + * Gets the preferred dupe. + * + * @param transId the trans id + * @param fromAppId the from app id + * @param g the g + * @param dupeVertexList the dupe vertex list + * @param ver the ver + * @return Vertex + * @throws AAIException the AAI exception + */ + public Vertex getPreferredDupe(String transId, + String fromAppId, GraphTraversalSource g, + ArrayList<Vertex> dupeVertexList, String ver, Loader loader) + throws AAIException { + + // This method assumes that it is being passed a List of vertex objects + // which + // violate our uniqueness constraints. + + Vertex nullVtx = null; + + if (dupeVertexList == null) { + return nullVtx; + } + int listSize = dupeVertexList.size(); + if (listSize == 0) { + return nullVtx; + } + if (listSize == 1) { + return (dupeVertexList.get(0)); + } + + Vertex vtxPreferred = null; + Vertex currentFaveVtx = dupeVertexList.get(0); + for (int i = 1; i < listSize; i++) { + Vertex vtxB = dupeVertexList.get(i); + vtxPreferred = pickOneOfTwoDupes(transId, fromAppId, g, + currentFaveVtx, vtxB, ver, loader); + if (vtxPreferred == null) { + // We couldn't choose one + return nullVtx; + } else { + currentFaveVtx = vtxPreferred; + } + } + + return (currentFaveVtx); + + } // end of getPreferredDupe() + + /** + * Pick one of two dupes. + * + * @param transId the trans id + * @param fromAppId the from app id + * @param g the g + * @param vtxA the vtx A + * @param vtxB the vtx B + * @param ver the ver + * @return Vertex + * @throws AAIException the AAI exception + */ + public Vertex pickOneOfTwoDupes(String transId, + String fromAppId, GraphTraversalSource g, Vertex vtxA, + Vertex vtxB, String ver, Loader loader) throws AAIException { + + Vertex nullVtx = null; + Vertex preferredVtx = null; + + Long vidA = new Long(vtxA.id().toString()); + Long vidB = new Long(vtxB.id().toString()); + + String vtxANodeType = ""; + String vtxBNodeType = ""; + Object objType = vtxA.<Object>property("aai-node-type").orElse(null); + if (objType != null) { + vtxANodeType = objType.toString(); + } + objType = vtxB.<Object>property("aai-node-type").orElse(null); + if (objType != null) { + vtxBNodeType = objType.toString(); + } + + if (vtxANodeType.equals("") || (!vtxANodeType.equals(vtxBNodeType))) { + // Either they're not really dupes or there's some bad data - so + // don't pick one + return nullVtx; + } + + // Check that node A and B both have the same key values (or else they + // are not dupes) + // (We'll check dep-node later) + // Determine what the key fields are for this nodeType + Collection <String> keyProps = new ArrayList <>(); + HashMap <String,Object> keyPropValsHash = new HashMap <String,Object>(); + try { + keyProps = loader.introspectorFromName(vtxANodeType).getKeys(); + } catch (AAIUnknownObjectException e) { + LOGGER.warn("Required property not found", e); + throw new AAIException("AAI_6105", "Required Property name(s) not found for nodeType = " + vtxANodeType + ")"); + } + + Iterator<String> keyPropI = keyProps.iterator(); + while (keyPropI.hasNext()) { + String propName = keyPropI.next(); + String vtxAKeyPropVal = ""; + objType = vtxA.<Object>property(propName).orElse(null); + if (objType != null) { + vtxAKeyPropVal = objType.toString(); + } + String vtxBKeyPropVal = ""; + objType = vtxB.<Object>property(propName).orElse(null); + if (objType != null) { + vtxBKeyPropVal = objType.toString(); + } + + if (vtxAKeyPropVal.equals("") + || (!vtxAKeyPropVal.equals(vtxBKeyPropVal))) { + // Either they're not really dupes or they are missing some key + // data - so don't pick one + return nullVtx; + } + else { + // Keep these around for (potential) use later + keyPropValsHash.put(propName, vtxAKeyPropVal); + } + + } + + // Collect the vid's and aai-node-types of the vertices that each vertex + // (A and B) is connected to. + ArrayList<String> vtxIdsConn2A = new ArrayList<>(); + ArrayList<String> vtxIdsConn2B = new ArrayList<>(); + HashMap<String, String> nodeTypesConn2A = new HashMap<>(); + HashMap<String, String> nodeTypesConn2B = new HashMap<>(); + + ArrayList<Vertex> vertListA = getConnectedNodes( g, vtxA ); + if (vertListA != null) { + Iterator<Vertex> iter = vertListA.iterator(); + while (iter.hasNext()) { + Vertex tvCon = iter.next(); + String conVid = tvCon.id().toString(); + String nt = ""; + objType = tvCon.<Object>property("aai-node-type").orElse(null); + if (objType != null) { + nt = objType.toString(); + } + nodeTypesConn2A.put(nt, conVid); + vtxIdsConn2A.add(conVid); + } + } + + ArrayList<Vertex> vertListB = getConnectedNodes( g, vtxB ); + if (vertListB != null) { + Iterator<Vertex> iter = vertListB.iterator(); + while (iter.hasNext()) { + Vertex tvCon = iter.next(); + String conVid = tvCon.id().toString(); + String nt = ""; + objType = tvCon.<Object>property("aai-node-type").orElse(null); + if (objType != null) { + nt = objType.toString(); + } + nodeTypesConn2B.put(nt, conVid); + vtxIdsConn2B.add(conVid); + } + } + + // 1 - If this kind of node needs a dependent node for uniqueness, then + // verify that they both nodes point to the same dependent + // node (otherwise they're not really duplicates) + // Note - there are sometimes more than one dependent node type since + // one nodeType can be used in different ways. But for a + // particular node, it will only have one dependent node that + // it's connected to. + String onlyNodeThatIndexPointsToVidStr = ""; + Collection<String> depNodeTypes = loader.introspectorFromName(vtxANodeType).getDependentOn(); + if (depNodeTypes.isEmpty()) { + // This kind of node is not dependent on any other. That is ok. + // We need to find out if the unique index info is good or not and + // use that later when deciding if we can delete one. + onlyNodeThatIndexPointsToVidStr = findJustOneUsingIndex( transId, + fromAppId, g, keyPropValsHash, vtxANodeType, vidA, vidB, ver ); + } else { + String depNodeVtxId4A = ""; + String depNodeVtxId4B = ""; + Iterator<String> iter = depNodeTypes.iterator(); + while (iter.hasNext()) { + String depNodeType = iter.next(); + if (nodeTypesConn2A.containsKey(depNodeType)) { + // This is the dependent node type that vertex A is using + depNodeVtxId4A = nodeTypesConn2A.get(depNodeType); + } + if (nodeTypesConn2B.containsKey(depNodeType)) { + // This is the dependent node type that vertex B is using + depNodeVtxId4B = nodeTypesConn2B.get(depNodeType); + } + } + if (depNodeVtxId4A.equals("") + || (!depNodeVtxId4A.equals(depNodeVtxId4B))) { + // Either they're not really dupes or there's some bad data - so + // don't pick either one + return nullVtx; + } + } + + if (vtxIdsConn2A.size() == vtxIdsConn2B.size()) { + // 2 - If they both have edges to all the same vertices, + // then return the one that can be reached uniquely via the + // key if that is the case or + // else the one with the lower vertexId + + boolean allTheSame = true; + Iterator<String> iter = vtxIdsConn2A.iterator(); + while (iter.hasNext()) { + String vtxIdConn2A = iter.next(); + if (!vtxIdsConn2B.contains(vtxIdConn2A)) { + allTheSame = false; + break; + } + } + + if (allTheSame) { + // If everything is the same, but one of the two has a good + // pointer to it, then save that one. Otherwise, take the + // older one. + if( !onlyNodeThatIndexPointsToVidStr.equals("") ){ + // only one is reachable via the index - choose that one. + if( onlyNodeThatIndexPointsToVidStr.equals(vidA.toString()) ){ + preferredVtx = vtxA; + } + else if( onlyNodeThatIndexPointsToVidStr.equals(vidB.toString()) ){ + preferredVtx = vtxB; + } + } + else if (vidA < vidB) { + preferredVtx = vtxA; + } else { + preferredVtx = vtxB; + } + } + } else if (vtxIdsConn2A.size() > vtxIdsConn2B.size()) { + // 3 - VertexA is connected to more things than vtxB. + // We'll pick VtxA if its edges are a superset of vtxB's edges + // and it doesn't contradict the check for the index/key pointer. + boolean missingOne = false; + Iterator<String> iter = vtxIdsConn2B.iterator(); + while (iter.hasNext()) { + String vtxIdConn2B = iter.next(); + if (!vtxIdsConn2A.contains(vtxIdConn2B)) { + missingOne = true; + break; + } + } + if (!missingOne) { + if( onlyNodeThatIndexPointsToVidStr.equals("") + || onlyNodeThatIndexPointsToVidStr.equals(vidA.toString()) ){ + preferredVtx = vtxA; + } + } + } else if (vtxIdsConn2B.size() > vtxIdsConn2A.size()) { + // 4 - VertexB is connected to more things than vtxA. + // We'll pick VtxB if its edges are a superset of vtxA's edges + // and it doesn't contradict the check for the index/key pointer. + boolean missingOne = false; + Iterator<String> iter = vtxIdsConn2A.iterator(); + while (iter.hasNext()) { + String vtxIdConn2A = iter.next(); + if (!vtxIdsConn2B.contains(vtxIdConn2A)) { + missingOne = true; + break; + } + } + if (!missingOne) { + if( onlyNodeThatIndexPointsToVidStr.equals("") + || onlyNodeThatIndexPointsToVidStr.equals(vidB.toString()) ){ + preferredVtx = vtxB; + } + } + } else { + preferredVtx = nullVtx; + } + + return (preferredVtx); + + } // end of pickOneOfTwoDupes() + + /** + * Check and process dupes. + * + * @param transId the trans id + * @param fromAppId the from app id + * @param g the g + * @param version the version + * @param nType the n type + * @param passedVertList the passed vert list + * @param dupeFixOn the dupe fix on + * @param deleteCandidateList the delete candidate list + * @param singleCommits the single commits + * @param alreadyFoundDupeGroups the already found dupe groups + * @return the array list + */ + private List<String> checkAndProcessDupes(String transId, + String fromAppId, Graph g, GraphTraversalSource source, String version, String nType, + List<Vertex> passedVertList, Boolean dupeFixOn, + Set<String> deleteCandidateList, Boolean singleCommits, + ArrayList<String> alreadyFoundDupeGroups, Loader loader ) { + + ArrayList<String> returnList = new ArrayList<>(); + ArrayList<Vertex> checkVertList = new ArrayList<>(); + ArrayList<String> alreadyFoundDupeVidArr = new ArrayList<>(); + Boolean noFilterList = true; + Iterator<String> afItr = alreadyFoundDupeGroups.iterator(); + while (afItr.hasNext()) { + String dupeGrpStr = afItr.next(); + String[] dupeArr = dupeGrpStr.split("\\|"); + int lastIndex = dupeArr.length - 1; + for (int i = 0; i < lastIndex; i++) { + // Note: we don't want the last one... + String vidString = dupeArr[i]; + alreadyFoundDupeVidArr.add(vidString); + noFilterList = false; + } + } + + // For a given set of Nodes that were found with a set of KEY + // Parameters, (nodeType + key data) we will + // see if we find any duplicate nodes that need to be cleaned up. Note - + // it's legit to have more than one + // node with the same key data if the nodes depend on a parent for + // uniqueness -- as long as the two nodes + // don't hang off the same Parent. + // If we find duplicates, and we can figure out which of each set of + // duplicates is the one that we + // think should be preserved, we will record that. Whether we can tell + // which one should be + // preserved or not, we will return info about any sets of duplicates + // found. + // + // Each element in the returned arrayList might look like this: + // "1234|5678|keepVid=UNDETERMINED" (if there were 2 dupes, and we + // couldn't figure out which one to keep) + // or, "100017|200027|30037|keepVid=30037" (if there were 3 dupes and we + // thought the third one was the one that should survive) + + // Because of the way the calling code loops over stuff, we can get the + // same data multiple times - so we should + // not process any vertices that we've already seen. + + try { + Iterator<Vertex> pItr = passedVertList.iterator(); + while (pItr.hasNext()) { + Vertex tvx = pItr.next(); + String passedId = tvx.id().toString(); + if (noFilterList || !alreadyFoundDupeVidArr.contains(passedId)) { + // We haven't seen this one before - so we should check it. + checkVertList.add(tvx); + } + } + + if (checkVertList.size() < 2) { + // Nothing new to check. + return returnList; + } + + if (loader.introspectorFromName(nType).isTopLevel()) { + // If this was a node that does NOT depend on other nodes for + // uniqueness, and we + // found more than one node using its key -- record the found + // vertices as duplicates. + String dupesStr = ""; + for (int i = 0; i < checkVertList.size(); i++) { + dupesStr = dupesStr + + ((checkVertList.get(i))).id() + .toString() + "|"; + } + if (dupesStr != "") { + Vertex prefV = getPreferredDupe(transId, fromAppId, + source, checkVertList, version, loader); + if (prefV == null) { + // We could not determine which duplicate to keep + dupesStr = dupesStr + "KeepVid=UNDETERMINED"; + returnList.add(dupesStr); + } else { + dupesStr = dupesStr + "KeepVid=" + prefV.id(); + Boolean didRemove = false; + if (dupeFixOn) { + didRemove = deleteNonKeepersIfAppropriate(g, + dupesStr, prefV.id().toString(), + deleteCandidateList, singleCommits); + } + if (didRemove) { + dupeGrpsDeleted++; + } else { + // keep them on our list + returnList.add(dupesStr); + } + } + } + } else { + // More than one node have the same key fields since they may + // depend on a parent node for uniqueness. Since we're finding + // more than one, we want to check to see if any of the + // vertices that have this set of keys (and are the same nodeType) + // are also pointing at the same 'parent' node. + // Note: for a given set of key data, it is possible that there + // could be more than one set of duplicates. + HashMap<String, ArrayList<Vertex>> vertsGroupedByParentHash = groupVertsByDepNodes( + transId, fromAppId, source, version, nType, + checkVertList, loader); + for (Map.Entry<String, ArrayList<Vertex>> entry : vertsGroupedByParentHash + .entrySet()) { + ArrayList<Vertex> thisParentsVertList = entry + .getValue(); + if (thisParentsVertList.size() > 1) { + // More than one vertex found with the same key info + // hanging off the same parent/dependent node + String dupesStr = ""; + for (int i = 0; i < thisParentsVertList.size(); i++) { + dupesStr = dupesStr + + ((thisParentsVertList + .get(i))).id() + "|"; + } + if (dupesStr != "") { + Vertex prefV = getPreferredDupe(transId, + fromAppId, source, thisParentsVertList, + version, loader); + + if (prefV == null) { + // We could not determine which duplicate to + // keep + dupesStr = dupesStr + "KeepVid=UNDETERMINED"; + returnList.add(dupesStr); + } else { + Boolean didRemove = false; + dupesStr = dupesStr + "KeepVid=" + + prefV.id().toString(); + if (dupeFixOn) { + didRemove = deleteNonKeepersIfAppropriate( + g, dupesStr, prefV.id() + .toString(), + deleteCandidateList, singleCommits); + } + if (didRemove) { + dupeGrpsDeleted++; + } else { + // keep them on our list + returnList.add(dupesStr); + } + } + } + } + } + } + } catch (Exception e) { + LoggingContext.statusCode(StatusCode.ERROR); + LoggingContext.responseCode(LoggingContext.DATA_ERROR); + LOGGER.warn(" >>> Threw an error in checkAndProcessDupes - just absorb this error and move on. ", e); + } + + return returnList; + + }// End of checkAndProcessDupes() + + /** + * Group verts by dep nodes. + * + * @param transId the trans id + * @param fromAppId the from app id + * @param g the g + * @param version the version + * @param nType the n type + * @param passedVertList the passed vert list + * @return the hash map + * @throws AAIException the AAI exception + */ + private HashMap<String, ArrayList<Vertex>> groupVertsByDepNodes( + String transId, String fromAppId, GraphTraversalSource g, String version, + String nType, ArrayList<Vertex> passedVertList, Loader loader) + throws AAIException { + // Given a list of JanusGraph Vertices of one nodeType (see AAI-8956), group + // them together by the parent node they depend on. + // Ie. if given a list of ip address nodes (assumed to all have the + // same key info) they might sit under several different parent vertices. + // Under Normal conditions, there would only be one per parent -- but + // we're trying to find duplicates - so we + // allow for the case where more than one is under the same parent node. + + HashMap<String, ArrayList<Vertex>> retHash = new HashMap<String, ArrayList<Vertex>>(); + if (loader.introspectorFromName(nType).isTopLevel()) { + // This method really should not have been called if this is not the + // kind of node + // that depends on a parent for uniqueness, so just return the empty + // hash. + return retHash; + } + + // Find out what types of nodes the passed in nodes can depend on + ArrayList<String> depNodeTypeL = new ArrayList<>(); + Collection<String> depNTColl = loader.introspectorFromName(nType).getDependentOn(); + Iterator<String> ntItr = depNTColl.iterator(); + while (ntItr.hasNext()) { + depNodeTypeL.add(ntItr.next()); + } + // For each vertex, we want find its depended-on/parent vertex so we + // can track what other vertexes that are dependent on that same guy. + if (passedVertList != null) { + Iterator<Vertex> iter = passedVertList.iterator(); + while (iter.hasNext()) { + Vertex thisVert = iter.next(); + Vertex tmpParentVtx = getConnectedParent( g, thisVert ); + if( tmpParentVtx != null ) { + String parentNt = null; + Object obj = tmpParentVtx.<Object>property("aai-node-type").orElse(null); + if (obj != null) { + parentNt = obj.toString(); + } + if (depNTColl.contains(parentNt)) { + // This must be the parent/dependent node + String parentVid = tmpParentVtx.id().toString(); + if (retHash.containsKey(parentVid)) { + // add this vert to the list for this parent key + retHash.get(parentVid).add(thisVert); + } else { + // This is the first one we found on this parent + ArrayList<Vertex> vList = new ArrayList<>(); + vList.add(thisVert); + retHash.put(parentVid, vList); + } + } + } + } + } + + return retHash; + + }// end of groupVertsByDepNodes() + + /** + * Delete non keepers if appropriate. + * + * @param g the g + * @param dupeInfoString the dupe info string + * @param vidToKeep the vid to keep + * @param deleteCandidateList the delete candidate list + * @param singleCommits the single commits + * @return the boolean + */ + private Boolean deleteNonKeepersIfAppropriate(Graph g, + String dupeInfoString, String vidToKeep, + Set<String> deleteCandidateList, Boolean singleCommits) { + + Boolean deletedSomething = false; + // This assumes that the dupeInfoString is in the format of + // pipe-delimited vid's followed by + // ie. "3456|9880|keepVid=3456" + if (deleteCandidateList == null || deleteCandidateList.size() == 0) { + // No vid's on the candidate list -- so no deleting will happen on + // this run + return false; + } + + String[] dupeArr = dupeInfoString.split("\\|"); + ArrayList<String> idArr = new ArrayList<>(); + int lastIndex = dupeArr.length - 1; + for (int i = 0; i <= lastIndex; i++) { + if (i < lastIndex) { + // This is not the last entry, it is one of the dupes, + String vidString = dupeArr[i]; + idArr.add(vidString); + } else { + // This is the last entry which should tell us if we have a + // preferred keeper + String prefString = dupeArr[i]; + if (prefString.equals("KeepVid=UNDETERMINED")) { + // They sent us a bad string -- nothing should be deleted if + // no dupe could be tagged as preferred + return false; + } else { + // If we know which to keep, then the prefString should look + // like, "KeepVid=12345" + String[] prefArr = prefString.split("="); + if (prefArr.length != 2 || (!prefArr[0].equals("KeepVid"))) { + LoggingContext.statusCode(StatusCode.ERROR); + LoggingContext.responseCode(LoggingContext.DATA_ERROR); + LOGGER.error("Bad format. Expecting KeepVid=999999"); + return false; + } else { + String keepVidStr = prefArr[1]; + if (idArr.contains(keepVidStr)) { + idArr.remove(keepVidStr); + + // So now, the idArr should just contain the vid's + // that we want to remove. + for (int x = 0; x < idArr.size(); x++) { + boolean okFlag = true; + String thisVid = idArr.get(x); + if (deleteCandidateList.contains(thisVid)) { + // This vid is a valid delete candidate from + // a prev. run, so we can remove it. + try { + long longVertId = Long + .parseLong(thisVid); + Vertex vtx = g + .traversal().V(longVertId).next(); + vtx.remove(); + + if (singleCommits) { + // NOTE - the singleCommits option is not used in normal processing + g.tx().commit(); + g = AAIGraph.getInstance().getGraph().newTransaction(); + } + } catch (Exception e) { + okFlag = false; + LoggingContext.statusCode(StatusCode.ERROR); + LoggingContext.responseCode(LoggingContext.DATA_ERROR); + LOGGER.error("ERROR trying to delete VID = " + thisVid + " " + LogFormatTools.getStackTop(e)); + } + if (okFlag) { + LOGGER.info(" DELETED VID = " + thisVid); + deletedSomething = true; + } + } + } + } else { + LoggingContext.statusCode(StatusCode.ERROR); + LoggingContext.responseCode(LoggingContext.DATA_ERROR); + LOGGER.error("ERROR - Vertex Id to keep not found in list of dupes. dupeInfoString = [" + + dupeInfoString + "]"); + return false; + } + } + }// else we know which one to keep + }// else last entry + }// for each vertex in a group + + return deletedSomething; + + }// end of deleteNonKeepersIfAppropriate() + + + /** + * Gets the node just using key params. + * + * @param transId the trans id + * @param fromAppId the from app id + * @param graph the graph + * @param nodeType the node type + * @param keyPropsHash the key props hash + * @param apiVersion the api version + * @return the node just using key params + * @throws AAIException the AAI exception + */ + public List <Vertex> getNodeJustUsingKeyParams( String transId, String fromAppId, GraphTraversalSource graph, String nodeType, + HashMap<String,Object> keyPropsHash, String apiVersion ) throws AAIException{ + + List <Vertex> retVertList = new ArrayList <> (); + + // We assume that all NodeTypes have at least one key-property defined. + // Note - instead of key-properties (the primary key properties), a user could pass + // alternate-key values if they are defined for the nodeType. + List<String> kName = new ArrayList<>(); + List<Object> kVal = new ArrayList<>(); + if( keyPropsHash == null || keyPropsHash.isEmpty() ) { + throw new AAIException("AAI_6120", " NO key properties passed for this getNodeJustUsingKeyParams() request. NodeType = [" + nodeType + "]. "); + } + + int i = -1; + for( Map.Entry<String, Object> entry : keyPropsHash.entrySet() ){ + i++; + kName.add(i, entry.getKey()); + kVal.add(i, entry.getValue()); + } + int topPropIndex = i; + Vertex tiV = null; + String propsAndValuesForMsg = ""; + Iterator <Vertex> verts = null; + + try { + if( topPropIndex == 0 ){ + propsAndValuesForMsg = " (" + kName.get(0) + " = " + kVal.get(0) + ") "; + verts= graph.V().has(kName.get(0),kVal.get(0)).has("aai-node-type",nodeType); + } + else if( topPropIndex == 1 ){ + propsAndValuesForMsg = " (" + kName.get(0) + " = " + kVal.get(0) + ", " + + kName.get(1) + " = " + kVal.get(1) + ") "; + verts = graph.V().has(kName.get(0),kVal.get(0)).has(kName.get(1),kVal.get(1)).has("aai-node-type",nodeType); + } + else if( topPropIndex == 2 ){ + propsAndValuesForMsg = " (" + kName.get(0) + " = " + kVal.get(0) + ", " + + kName.get(1) + " = " + kVal.get(1) + ", " + + kName.get(2) + " = " + kVal.get(2) + ") "; + verts= graph.V().has(kName.get(0),kVal.get(0)).has(kName.get(1),kVal.get(1)).has(kName.get(2),kVal.get(2)).has("aai-node-type",nodeType); + } + else if( topPropIndex == 3 ){ + propsAndValuesForMsg = " (" + kName.get(0) + " = " + kVal.get(0) + ", " + + kName.get(1) + " = " + kVal.get(1) + ", " + + kName.get(2) + " = " + kVal.get(2) + ", " + + kName.get(3) + " = " + kVal.get(3) + ") "; + verts= graph.V().has(kName.get(0),kVal.get(0)).has(kName.get(1),kVal.get(1)).has(kName.get(2),kVal.get(2)).has(kName.get(3),kVal.get(3)).has("aai-node-type",nodeType); + } + else { + throw new AAIException("AAI_6114", " We only support 4 keys per nodeType for now \n"); + } + } + catch( Exception ex ){ + LoggingContext.statusCode(StatusCode.ERROR); + LoggingContext.responseCode(LoggingContext.DATA_ERROR); + LOGGER.error( " ERROR trying to get node for: [" + propsAndValuesForMsg + "]" + LogFormatTools.getStackTop(ex)); + } + + if( verts != null ){ + while( verts.hasNext() ){ + tiV = verts.next(); + retVertList.add(tiV); + } + } + + if( retVertList.size() == 0 ){ + LOGGER.debug("DEBUG No node found for nodeType = [" + nodeType + + "], propsAndVal = " + propsAndValuesForMsg ); + } + + return retVertList; + + }// End of getNodeJustUsingKeyParams() + + /** + * Show all edges for node. + * + * @param transId the trans id + * @param fromAppId the from app id + * @param tVert the t vert + * @return the array list + */ + private ArrayList <String> showAllEdgesForNode( String transId, String fromAppId, Vertex tVert ){ + + ArrayList <String> retArr = new ArrayList <> (); + Iterator <Edge> eI = tVert.edges(Direction.IN); + if( ! eI.hasNext() ){ + retArr.add("No IN edges were found for this vertex. "); + } + while( eI.hasNext() ){ + Edge ed = eI.next(); + String lab = ed.label(); + Vertex vtx; + if (tVert.equals(ed.inVertex())) { + vtx = ed.outVertex(); + } else { + vtx = ed.inVertex(); + } + if( vtx == null ){ + retArr.add(" >>> COULD NOT FIND VERTEX on the other side of this edge edgeId = " + ed.id() + " <<< "); + } + else { + String nType = vtx.<String>property("aai-node-type").orElse(null); + String vid = vtx.id().toString(); + retArr.add("Found an IN edge (" + lab + ") to this vertex from a [" + nType + "] node with VtxId = " + vid ); + + } + } + + eI = tVert.edges(Direction.OUT); + if( ! eI.hasNext() ){ + retArr.add("No OUT edges were found for this vertex. "); + } + while( eI.hasNext() ){ + Edge ed = eI.next(); + String lab = ed.label(); + Vertex vtx; + if (tVert.equals(ed.inVertex())) { + vtx = ed.outVertex(); + } else { + vtx = ed.inVertex(); + } + if( vtx == null ){ + retArr.add(" >>> COULD NOT FIND VERTEX on the other side of this edge edgeId = " + ed.id() + " <<< "); + } + else { + String nType = vtx.<String>property("aai-node-type").orElse(null); + String vid = vtx.id().toString(); + retArr.add("Found an OUT edge (" + lab + ") from this vertex to a [" + nType + "] node with VtxId = " + vid ); + } + } + return retArr; + } + + + /** + * Show properties for node. + * + * @param transId the trans id + * @param fromAppId the from app id + * @param tVert the t vert + * @return the array list + */ + private ArrayList <String> showPropertiesForNode( String transId, String fromAppId, Vertex tVert ){ + + ArrayList <String> retArr = new ArrayList <> (); + if( tVert == null ){ + retArr.add("null Node object passed to showPropertiesForNode()\n"); + } + else { + String nodeType = ""; + Object ob = tVert.<Object>property("aai-node-type").orElse(null); + if( ob == null ){ + nodeType = "null"; + } + else{ + nodeType = ob.toString(); + } + + retArr.add(" AAINodeType/VtxID for this Node = [" + nodeType + "/" + tVert.id() + "]"); + retArr.add(" Property Detail: "); + Iterator<VertexProperty<Object>> pI = tVert.properties(); + while( pI.hasNext() ){ + VertexProperty<Object> tp = pI.next(); + Object val = tp.value(); + retArr.add("Prop: [" + tp.key() + "], val = [" + val + "] "); + } + } + return retArr; + } + + + private ArrayList <Vertex> getConnectedNodes(GraphTraversalSource g, Vertex startVtx ) + throws AAIException { + + ArrayList <Vertex> retArr = new ArrayList <> (); + if( startVtx == null ){ + return retArr; + } + else { + GraphTraversal<Vertex, Vertex> modPipe = null; + modPipe = g.V(startVtx).both(); + if( modPipe != null && modPipe.hasNext() ){ + while( modPipe.hasNext() ){ + Vertex conVert = modPipe.next(); + retArr.add(conVert); + } + } + } + return retArr; + + }// End of getConnectedNodes() + + + private ArrayList <Vertex> getConnectedChildrenOfOneType( GraphTraversalSource g, + Vertex startVtx, String childNType ) throws AAIException{ + + ArrayList <Vertex> childList = new ArrayList <> (); + Iterator <Vertex> vertI = g.V(startVtx).union(__.outE().has(EdgeProperty.CONTAINS.toString(), AAIDirection.OUT.toString()).inV(), __.inE().has(EdgeProperty.CONTAINS.toString(), AAIDirection.IN.toString()).outV()); + + Vertex tmpVtx = null; + while( vertI != null && vertI.hasNext() ){ + tmpVtx = vertI.next(); + Object ob = tmpVtx.<Object>property("aai-node-type").orElse(null); + if (ob != null) { + String tmpNt = ob.toString(); + if( tmpNt.equals(childNType)){ + childList.add(tmpVtx); + } + } + } + + return childList; + + }// End of getConnectedChildrenOfOneType() + + + private Vertex getConnectedParent( GraphTraversalSource g, + Vertex startVtx ) throws AAIException{ + + Vertex parentVtx = null; + Iterator <Vertex> vertI = g.V(startVtx).union(__.inE().has(EdgeProperty.CONTAINS.toString(), AAIDirection.OUT.toString()).outV(), __.outE().has(EdgeProperty.CONTAINS.toString(), AAIDirection.IN.toString()).inV()); + + while( vertI != null && vertI.hasNext() ){ + // Note - there better only be one! + parentVtx = vertI.next(); + } + + return parentVtx; + + }// End of getConnectedParent() + + + private long figureWindowStartTime( int timeWindowMinutes ){ + // Given a window size, calculate what the start-timestamp would be. + + if( timeWindowMinutes <= 0 ){ + // This just means that there is no window... + return 0; + } + long unixTimeNow = System.currentTimeMillis(); + long windowInMillis = timeWindowMinutes * 60L * 1000; + + long startTimeStamp = unixTimeNow - windowInMillis; + + return startTimeStamp; + } // End of figureWindowStartTime() + + + /** + * Collect Duplicate Sets for nodes that are NOT dependent on parent nodes. + * + * @param transId the trans id + * @param fromAppId the from app id + * @param g the g + * @param version the version + * @param nType the n type + * @param passedVertList the passed vert list + * @return the array list + */ + private ArrayList<ArrayList<Vertex>> getDupeSets4NonDepNodes( String transId, + String fromAppId, Graph g, String version, String nType, + ArrayList<Vertex> passedVertList, + ArrayList <String> keyPropNamesArr, + Loader loader ) { + + ArrayList<ArrayList<Vertex>> returnList = new ArrayList<ArrayList<Vertex>>(); + + // We've been passed a set of nodes that we want to check. + // They are all NON-DEPENDENT nodes of the same nodeType meaning that they should be + // unique in the DB based on their KEY DATA alone. So, if + // we group them by their key data - if any key has more than one + // vertex mapped to it, those vertices are dupes. + // + // When we find duplicates, we group them in an ArrayList (there can be + // more than one duplicate for one set of key data) + // Then these dupeSets are grouped up and returned. + // + + HashMap <String, ArrayList<String>> keyVals2VidHash = new HashMap <String, ArrayList<String>>(); + HashMap <String,Vertex> vtxHash = new HashMap <String,Vertex>(); + Iterator<Vertex> pItr = passedVertList.iterator(); + while (pItr.hasNext()) { + try { + Vertex tvx = pItr.next(); + String thisVid = tvx.id().toString(); + vtxHash.put(thisVid, tvx); + + // if there are more than one vertexId mapping to the same keyProps -- they are dupes + // we dont check till later since a set can contain more than 2. + String hKey = getNodeKeyValString( tvx, keyPropNamesArr ); + if( hKey.equals("") ){ + // When we have corrupted data, hKey comes back as an empty string + // We will just skip this entry since it is not a Dupe - it is + // corrupted data which should be picked up in other checks. + continue; + } + if( keyVals2VidHash.containsKey(hKey) ){ + // We've already seen this key + ArrayList <String> tmpVL = (ArrayList <String>)keyVals2VidHash.get(hKey); + tmpVL.add(thisVid); + keyVals2VidHash.put(hKey, tmpVL); + } + else { + // First time for this key + ArrayList <String> tmpVL = new ArrayList <String>(); + tmpVL.add(thisVid); + keyVals2VidHash.put(hKey, tmpVL); + } + } + catch (Exception e) { + LOGGER.warn(" >>> Threw an error in getDupeSets4NonDepNodes - just absorb this error and move on. ", e); + } + } + + for( Map.Entry<String, ArrayList<String>> entry : keyVals2VidHash.entrySet() ){ + ArrayList <String> vidList = entry.getValue(); + try { + if( !vidList.isEmpty() && vidList.size() > 1 ){ + // There are more than one vertex id's using the same key info + ArrayList <Vertex> vertList = new ArrayList <Vertex> (); + for (int i = 0; i < vidList.size(); i++) { + String tmpVid = vidList.get(i); + vertList.add(vtxHash.get(tmpVid)); + } + returnList.add(vertList); + } + } + catch (Exception e) { + LOGGER.warn(" >>> Threw an error in getDupeSets4NonDepNodes - just absorb this error and move on. ", e); + } + + } + return returnList; + + }// End of getDupeSets4NonDepNodes() + + + /** + * Get values of the key properties for a node as a single string + * + * @param tvx the vertex to pull the properties from + * @param keyPropNamesArr collection of key prop names + * @return a String of concatenated values + */ + private String getNodeKeyValString( Vertex tvx, + ArrayList <String> keyPropNamesArr ) { + + String retString = ""; + Iterator <String> propItr = keyPropNamesArr.iterator(); + while( propItr.hasNext() ){ + String propName = propItr.next(); + if( tvx != null ){ + Object propValObj = tvx.property(propName).orElse(null); + if( propValObj == null ){ + LOGGER.warn(" >>> WARNING >>> could not find this key-property for this vertex. propName = [" + + propName + "], VID = " + tvx.id().toString() ); + } + else { + retString = " " + retString + propValObj.toString(); + } + } + } + return retString; + + }// End of getNodeKeyValString() + + + private String findJustOneUsingIndex( String transId, String fromAppId, + GraphTraversalSource gts, HashMap <String,Object> keyPropValsHash, + String nType, Long vidAL, Long vidBL, String apiVer){ + + // See if querying by JUST the key params (which should be indexed) brings back + // ONLY one of the two vertices. Ie. the db still has a pointer to one of them + // and the other one is sort of stranded. + String returnVid = ""; + + try { + List <Vertex> tmpVertList = getNodeJustUsingKeyParams( transId, fromAppId, gts, + nType, keyPropValsHash, apiVer ); + if( tmpVertList != null && tmpVertList.size() == 1 ){ + // We got just one - if it matches one of the ones we're looking + // for, then return that VID + Vertex tmpV = tmpVertList.get(0); + String thisVid = tmpV.id().toString(); + if( thisVid.equals(vidAL.toString()) || thisVid.equals(vidBL.toString()) ){ + String msg = " vid = " + thisVid + " is one of two that the DB can retrieve directly ------"; + //System.out.println(msg); + LOGGER.info(msg); + returnVid = thisVid; + } + } + } + catch ( AAIException ae ){ + String emsg = "Error trying to get node just by key " + ae.getMessage(); + //System.out.println(emsg); + LOGGER.error(emsg); + } + + return returnVid; + + }// End of findJustOneUsingIndex() + +}
\ No newline at end of file diff --git a/src/main/java/org/onap/aai/datagrooming/DataGroomingTasks.java b/src/main/java/org/onap/aai/datagrooming/DataGroomingTasks.java new file mode 100644 index 0000000..85a127f --- /dev/null +++ b/src/main/java/org/onap/aai/datagrooming/DataGroomingTasks.java @@ -0,0 +1,204 @@ +/** + * ============LICENSE_START======================================================= + * org.onap.aai + * ================================================================================ + * Copyright © 2017-2018 AT&T Intellectual Property. All rights reserved. + * ================================================================================ + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============LICENSE_END========================================================= + */ +package org.onap.aai.datagrooming; + +import java.io.BufferedReader; +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.text.SimpleDateFormat; +import java.util.*; + +import org.onap.aai.exceptions.AAIException; +import org.onap.aai.introspection.LoaderFactory; +import org.onap.aai.logging.ErrorLogHelper; +import org.onap.aai.logging.LoggingContext; +import org.onap.aai.setup.SchemaVersions; +import org.onap.aai.util.AAIConfig; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.context.annotation.PropertySource; +import org.springframework.scheduling.annotation.Scheduled; +import org.springframework.stereotype.Component; + +import com.att.eelf.configuration.EELFLogger; +import com.att.eelf.configuration.EELFManager; + +@Component +@PropertySource("file:${server.local.startpath}/etc/appprops/datatoolscrons.properties") +public class DataGroomingTasks { + + private static final EELFLogger LOGGER = EELFManager.getInstance().getLogger(DataGroomingTasks.class); + private static final SimpleDateFormat dateFormat = new SimpleDateFormat("HH:mm:ss"); + + @Autowired + private LoaderFactory loaderFactory; + + @Autowired + private SchemaVersions schemaVersions; + + @Scheduled(cron = "${datagroomingtasks.cron}" ) + public void groomingScheduleTask() throws AAIException, Exception { + + LoggingContext.init(); + LoggingContext.requestId(UUID.randomUUID().toString()); + LoggingContext.partnerName("AAI"); + LoggingContext.targetEntity("CronApp"); + LoggingContext.component("dataGrooming"); + LoggingContext.serviceName("groomingScheduleTask"); + LoggingContext.targetServiceName("groomingScheduleTask"); + LoggingContext.statusCode(LoggingContext.StatusCode.COMPLETE); + + + if(!"true".equals(AAIConfig.get("aai.disable.check.grooming.running", "false"))){ + if(checkIfDataGroomingIsRunning()){ + LOGGER.info("Data Grooming is already running on the system"); + return; + } + } + + LOGGER.info("Started cron job dataGrooming @ " + dateFormat.format(new Date())); + + Map<String, String> dataGroomingFlagMap = new HashMap<>(); + append("enableautofix" , AAIConfig.get("aai.datagrooming.enableautofix"), dataGroomingFlagMap); + append("enabledupefixon" , AAIConfig.get("aai.datagrooming.enabledupefixon"), dataGroomingFlagMap); + append("enabledontfixorphans" , AAIConfig.get("aai.datagrooming.enabledontfixorphans"), dataGroomingFlagMap); + append("enabletimewindowminutes" , AAIConfig.get("aai.datagrooming.enabletimewindowminutes"), dataGroomingFlagMap); + append("enableskiphostcheck" , AAIConfig.get("aai.datagrooming.enableskiphostcheck"), dataGroomingFlagMap); + append("enablesleepminutes" , AAIConfig.get("aai.datagrooming.enablesleepminutes"), dataGroomingFlagMap); + append("enableedgesonly" , AAIConfig.get("aai.datagrooming.enableedgesonly"), dataGroomingFlagMap); + append("enableskipedgechecks" , AAIConfig.get("aai.datagrooming.enableskipedgechecks"), dataGroomingFlagMap); + append("enablemaxfix" , AAIConfig.get("aai.datagrooming.enablemaxfix"), dataGroomingFlagMap); + append("enablesinglecommits" , AAIConfig.get("aai.datagrooming.enablesinglecommits"), dataGroomingFlagMap); + append("enabledupecheckoff" , AAIConfig.get("aai.datagrooming.enabledupecheckoff"), dataGroomingFlagMap); + append("enableghost2checkoff" , AAIConfig.get("aai.datagrooming.enableghost2checkoff"), dataGroomingFlagMap); + append("enableghost2fixon" , AAIConfig.get("aai.datagrooming.enableghost2fixon"), dataGroomingFlagMap); + append("enablef" , AAIConfig.get("aai.datagrooming.enablef"), dataGroomingFlagMap); + append("fvalue" , AAIConfig.get("aai.datagrooming.fvalue"), dataGroomingFlagMap); + append("timewindowminutesvalue" , AAIConfig.get("aai.datagrooming.timewindowminutesvalue"), dataGroomingFlagMap); + append("sleepminutesvalue" , AAIConfig.get("aai.datagrooming.sleepminutesvalue"), dataGroomingFlagMap); + append("maxfixvalue" , AAIConfig.get("aai.datagrooming.maxfixvalue"), dataGroomingFlagMap); + + if(LOGGER.isDebugEnabled()){ + LOGGER.debug("DataGrooming Flag Values : "); + dataGroomingFlagMap.forEach((key, val) -> LOGGER.debug("Key: {} Value: {}", key, val)); + } + + List<String> paramsArray = new ArrayList(); + try { + if("true".equals(dataGroomingFlagMap.get("enableautofix"))){ + paramsArray.add("-autoFix"); + } + if("true".equals(dataGroomingFlagMap.get("enabledupefixon"))){ + paramsArray.add("-dupeFixOn"); + } + if("true".equals(dataGroomingFlagMap.get("enabledontfixorphans"))){ + paramsArray.add("-dontFixOrphans"); + } + if("true".equals(dataGroomingFlagMap.get("enabletimewindowminutes"))){ + paramsArray.add("-timeWindowMinutes"); + paramsArray.add(dataGroomingFlagMap.get("enabletimewindowminutesvalue")); + } + if("true".equals(dataGroomingFlagMap.get("enableskiphostcheck"))){ + paramsArray.add("-skipHostCheck"); + } + + if("true".equals(dataGroomingFlagMap.get("enablesleepminutes"))) { + paramsArray.add("-sleepMinutes"); + paramsArray.add(dataGroomingFlagMap.get("sleepminutesvalue")); + } + + if("true".equals(dataGroomingFlagMap.get("enableedgesonly"))){ + paramsArray.add("-edgesOnly"); + } + if("true".equals(dataGroomingFlagMap.get("enableskipedgechecks"))) { + paramsArray.add("-skipEdgeChecks"); + } + + if("true".equals(dataGroomingFlagMap.get("enablemaxfix"))) { + paramsArray.add("-maxFix"); + paramsArray.add(dataGroomingFlagMap.get("maxfixvalue")); + } + if("true".equals(dataGroomingFlagMap.get("enablesinglecommits"))){ + paramsArray.add("-singleCommits"); + } + if("true".equals(dataGroomingFlagMap.get("enabledupecheckoff"))){ + paramsArray.add("-dupeCheckOff"); + } + if("true".equals(dataGroomingFlagMap.get("enableghost2checkoff"))){ + paramsArray.add("-ghost2CheckOff"); + } + if("true".equals(dataGroomingFlagMap.get("enableghost2fixon"))){ + paramsArray.add("-ghost2FixOn"); + } + + if("true".equals(dataGroomingFlagMap.get("enablef"))) { + paramsArray.add("-f"); + paramsArray.add(dataGroomingFlagMap.get("fvalue")); + } + + DataGrooming dataGrooming = new DataGrooming(loaderFactory, schemaVersions); + String[] paramsList = paramsArray.toArray(new String[0]); + if (AAIConfig.get("aai.cron.enable.dataGrooming").equals("true")) { + dataGrooming.execute(paramsList); + System.out.println("returned from main method "); + } + } + catch (Exception e) { + ErrorLogHelper.logError("AAI_4000", "Exception running cron job for dataGrooming"+e.toString()); + LOGGER.info("AAI_4000", "Exception running cron job for dataGrooming"+e.toString()); + throw e; + } finally { + LOGGER.info("Ended cron job dataGrooming @ " + dateFormat.format(new Date())); + LoggingContext.clear(); + } + } + + private boolean checkIfDataGroomingIsRunning(){ + + Process process = null; + + int count = 0; + try { + process = new ProcessBuilder().command("bash", "-c", "ps -ef | grep '[D]ataGrooming'").start(); + InputStream is = process.getInputStream(); + InputStreamReader isr = new InputStreamReader(is); + BufferedReader br = new BufferedReader(isr); + + while (br.readLine() != null){ + count++; + } + + int exitVal = process.waitFor(); + LOGGER.info("Exit value of the dataGrooming check process: " + exitVal); + } catch (Exception e) { + e.printStackTrace(); + } + + if(count > 0){ + return true; + } else { + return false; + } + } + + private void append(String key, String value, Map<String, String> hashMap){ + hashMap.put(key, value); + } +} diff --git a/src/main/java/org/onap/aai/datasnapshot/DataSnapshot.java b/src/main/java/org/onap/aai/datasnapshot/DataSnapshot.java new file mode 100644 index 0000000..12815ee --- /dev/null +++ b/src/main/java/org/onap/aai/datasnapshot/DataSnapshot.java @@ -0,0 +1,835 @@ +/** + * ============LICENSE_START======================================================= + * org.onap.aai + * ================================================================================ + * Copyright © 2017-2018 AT&T Intellectual Property. All rights reserved. + * ================================================================================ + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============LICENSE_END========================================================= + */ + +/** + * ============LICENSE_START======================================================= + * org.onap.aai + * ================================================================================ + * Copyright © 2017 AT&T Intellectual Property. All rights reserved. + * ================================================================================ + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============LICENSE_END========================================================= + * + * ECOMP is a trademark and service mark of AT&T Intellectual Property. + */ +package org.onap.aai.datasnapshot; + +import java.io.ByteArrayOutputStream; +import java.io.File; +import java.io.FileInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.SequenceInputStream; +import java.util.*; + +import java.util.concurrent.Callable; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; + +import java.util.concurrent.TimeUnit; +import org.apache.tinkerpop.gremlin.structure.Vertex; + +import org.apache.commons.configuration.PropertiesConfiguration; + +import org.apache.tinkerpop.gremlin.structure.io.IoCore; +import org.apache.tinkerpop.gremlin.structure.io.graphson.LegacyGraphSONReader; +import org.onap.aai.dbmap.AAIGraph; +import org.onap.aai.dbmap.AAIGraphConfig; +import org.onap.aai.exceptions.AAIException; +import org.onap.aai.logging.ErrorLogHelper; +import org.onap.aai.util.AAIConfig; +import org.onap.aai.util.AAIConstants; +import org.onap.aai.util.AAISystemExitUtil; +import org.onap.aai.util.FormatDate; + +import com.att.eelf.configuration.Configuration; +import com.att.eelf.configuration.EELFLogger; +import com.att.eelf.configuration.EELFManager; +import org.janusgraph.core.JanusGraph; +import org.janusgraph.core.JanusGraphFactory; +import org.janusgraph.core.util.JanusGraphCleanup; + +public class DataSnapshot { + + private static EELFLogger LOGGER; + + /* Using realtime d */ + private static final String REALTIME_DB = "realtime"; + + private static final Set<String> SNAPSHOT_RELOAD_COMMANDS = new HashSet<>(); + + static { + SNAPSHOT_RELOAD_COMMANDS.add("RELOAD_LEGACY_DATA"); + SNAPSHOT_RELOAD_COMMANDS.add("RELOAD_DATA"); + SNAPSHOT_RELOAD_COMMANDS.add("RELOAD_DATA_MULTI"); + } + + + /** + * The main method. + * + * @param args + * the arguments + */ + public static void main(String[] args) { + + boolean success = true; + + // Set the logging file properties to be used by EELFManager + System.setProperty("aai.service.name", DataSnapshot.class.getSimpleName()); + Properties props = System.getProperties(); + props.setProperty(Configuration.PROPERTY_LOGGING_FILE_NAME, AAIConstants.AAI_LOGBACK_PROPS); + props.setProperty(Configuration.PROPERTY_LOGGING_FILE_PATH, AAIConstants.AAI_HOME_BUNDLECONFIG); + LOGGER = EELFManager.getInstance().getLogger(DataSnapshot.class); + Boolean dbClearFlag = false; + JanusGraph graph = null; + String command = "JUST_TAKE_SNAPSHOT"; // This is the default + String oldSnapshotFileName = ""; + + Long vertAddDelayMs = 1L; // Default value + Long edgeAddDelayMs = 1L; // Default value + + Long failureDelayMs = 50L; // Default value + Long retryDelayMs = 1500L; // Default value + int maxErrorsPerThread = 25; // Default value + Long vertToEdgeProcDelay = 9000L; // Default value + Long staggerThreadDelay = 5000L; // Default value + + int threadCount = 0; + Boolean debugFlag = false; + int debugAddDelayTime = 1; // Default to 1 millisecond + + boolean isExistingTitan = false; + + if (args.length >= 1) { + command = args[0]; + } + + if( SNAPSHOT_RELOAD_COMMANDS.contains(command)){ + if (args.length == 2) { + // If re-loading, they need to also pass the snapshot file name to use. + // We expected the file to be found in our snapshot directory. + oldSnapshotFileName = args[1]; + } + } + else if( command.equals("THREADED_SNAPSHOT") ){ + if (args.length == 2) { + // If doing a "threaded" snapshot, they need to specify how many threads to use + try { + threadCount = Integer.parseInt(args[1]); + } + catch ( NumberFormatException nfe ){ + ErrorLogHelper.logError("AAI_6128", "Bad (non-integer) threadCount passed to DataSnapshot [" + args[1] + "]"); + LOGGER.debug("Bad (non-integer) threadCount passed to DataSnapshot [" + args[1] + "]"); + AAISystemExitUtil.systemExitCloseAAIGraph(1); + } + if( threadCount < 1 || threadCount > 100 ){ + ErrorLogHelper.logError("AAI_6128", "Out of range (1-100) threadCount passed to DataSnapshot [" + args[1] + "]"); + LOGGER.debug("Out of range (1-100) threadCount passed to DataSnapshot [" + args[1] + "]"); + AAISystemExitUtil.systemExitCloseAAIGraph(1); + } + LOGGER.debug(" Will do Threaded Snapshot with threadCount = " + threadCount ); + } + else if (args.length == 3) { + // If doing a "threaded" snapshot, they need to specify how many threads to use + // They can also use debug mode if they pass the word "DEBUG" to do the nodes one at a time to see where it breaks. + try { + threadCount = Integer.parseInt(args[1]); + } + catch ( NumberFormatException nfe ){ + ErrorLogHelper.logError("AAI_6128", "Bad (non-integer) threadCount passed to DataSnapshot [" + args[1] + "]"); + LOGGER.debug("Bad (non-integer) threadCount passed to DataSnapshot [" + args[1] + "]"); + AAISystemExitUtil.systemExitCloseAAIGraph(1); + } + if( threadCount < 1 || threadCount > 100 ){ + ErrorLogHelper.logError("AAI_6128", "Out of range (1-100) threadCount passed to DataSnapshot [" + args[1] + "]"); + LOGGER.debug("Out of range (1-100) threadCount passed to DataSnapshot [" + args[1] + "]"); + AAISystemExitUtil.systemExitCloseAAIGraph(1); + } + if( args[2].equals("DEBUG") ){ + debugFlag = true; + } + LOGGER.debug(" Will do Threaded Snapshot with threadCount = " + threadCount + + ", and DEBUG mode set ON. "); + } + else if (args.length == 4) { + // If doing a "threaded" snapshot, they need to specify how many threads to use (param 1) + // They can also use debug mode if they pass the word "DEBUG" to do the nodes one (param 2) + // They can also pass a delayTimer - how many milliseconds to put between each node's ADD (param 3) + try { + threadCount = Integer.parseInt(args[1]); + } + catch ( NumberFormatException nfe ){ + ErrorLogHelper.logError("AAI_6128", "Bad (non-integer) threadCount passed to DataSnapshot [" + args[1] + "]"); + LOGGER.debug("Bad (non-integer) threadCount passed to DataSnapshot [" + args[1] + "]"); + AAISystemExitUtil.systemExitCloseAAIGraph(1); + } + if( threadCount < 1 || threadCount > 100 ){ + ErrorLogHelper.logError("AAI_6128", "Out of range (1-100) threadCount passed to DataSnapshot [" + args[1] + "]"); + LOGGER.debug("Out of range (1-100) threadCount passed to DataSnapshot [" + args[1] + "]"); + AAISystemExitUtil.systemExitCloseAAIGraph(1); + } + if( args[2].equals("DEBUG") ){ + debugFlag = true; + } + try { + debugAddDelayTime = Integer.parseInt(args[3]); + } + catch ( NumberFormatException nfe ){ + ErrorLogHelper.logError("AAI_6128", "Bad (non-integer) debugAddDelayTime passed to DataSnapshot [" + args[3] + "]"); + LOGGER.debug("Bad (non-integer) debugAddDelayTime passed to DataSnapshot [" + args[3] + "]"); + AAISystemExitUtil.systemExitCloseAAIGraph(1); + } + LOGGER.debug(" Will do Threaded Snapshot with threadCount = " + threadCount + + ", DEBUG mode ON and addDelayTimer = " + debugAddDelayTime + " mSec. "); + } + else { + ErrorLogHelper.logError("AAI_6128", "Wrong param count (should be 2,3 or 4) when using THREADED_SNAPSHOT."); + LOGGER.debug("Wrong param count (should be 2,3 or 4) when using THREADED_SNAPSHOT."); + AAISystemExitUtil.systemExitCloseAAIGraph(1); + } + } + else if( command.equals("MULTITHREAD_RELOAD") ){ + // Note - this will use as many threads as the snapshot file is + // broken up into. (up to a limit) + if (args.length == 2) { + // Since they are re-loading, they need to pass the snapshot file name to use. + // We expected the file to be found in our snapshot directory. Note - if + // it is a multi-part snapshot, then this should be the root of the name. + // We will be using the default delay timers. + oldSnapshotFileName = args[1]; + } + else if (args.length == 7) { + // Since they are re-loading, they need to pass the snapshot file name to use. + // We expected the file to be found in our snapshot directory. Note - if + // it is a multi-part snapshot, then this should be the root of the name. + oldSnapshotFileName = args[1]; + // They should be passing the timers in in this order: + // vertDelay, edgeDelay, failureDelay, retryDelay + vertAddDelayMs = Long.parseLong(args[2]); + edgeAddDelayMs = Long.parseLong(args[3]); + failureDelayMs = Long.parseLong(args[4]); + retryDelayMs = Long.parseLong(args[5]); + try { + maxErrorsPerThread = Integer.parseInt(args[6]); + } + catch ( NumberFormatException nfe ){ + ErrorLogHelper.logError("AAI_6128", "Bad (non-integer) maxErrorsPerThread passed to DataSnapshot [" + args[6] + "]"); + LOGGER.debug("Bad (non-integer) maxErrorsPerThread passed to DataSnapshot [" + args[6] + "]"); + AAISystemExitUtil.systemExitCloseAAIGraph(1); + } + if( maxErrorsPerThread < 1 ){ + ErrorLogHelper.logError("AAI_6128", "Out of range (>0) maxErrorsPerThread passed to DataSnapshot [" + args[6] + "]"); + LOGGER.debug("Out of range (>0) maxErrorsPerThread passed to DataSnapshot [" + args[6] + "]"); + AAISystemExitUtil.systemExitCloseAAIGraph(1); + } + } + else { + ErrorLogHelper.logError("AAI_6128", "Wrong param count (should be either 2 or 7) when using MUTLITHREAD_RELOAD."); + LOGGER.debug("Wrong param count (should be 2 or 7) when using MUTLITHREAD_RELOAD."); + AAISystemExitUtil.systemExitCloseAAIGraph(1); + } + } + else if (command.equals("CLEAR_ENTIRE_DATABASE")) { + if (args.length >= 2) { + oldSnapshotFileName = args[1]; + } + if (args.length == 3) { + String titanFlag = args[2]; + if ("titan".equalsIgnoreCase(titanFlag)) { + isExistingTitan = true; + } + } + } + + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + try { + + AAIConfig.init(); + ErrorLogHelper.loadProperties(); + LOGGER.debug("Command = " + command + ", oldSnapshotFileName = [" + oldSnapshotFileName + "]."); + String targetDir = AAIConstants.AAI_HOME + AAIConstants.AAI_FILESEP + "logs" + AAIConstants.AAI_FILESEP + "data" + AAIConstants.AAI_FILESEP + "dataSnapshots"; + + // Make sure the dataSnapshots directory is there + new File(targetDir).mkdirs(); + + LOGGER.debug(" ---- NOTE --- about to open graph (takes a little while) "); + + if (command.equals("JUST_TAKE_SNAPSHOT")) { + // ------------------------------------------ + // They just want to take a snapshot. + // ------------------------------------------ + verifyGraph(AAIGraph.getInstance().getGraph()); + FormatDate fd = new FormatDate("yyyyMMddHHmm", "GMT"); + String dteStr = fd.getDateTime(); + String newSnapshotOutFname = targetDir + AAIConstants.AAI_FILESEP + "dataSnapshot.graphSON." + dteStr; + graph = AAIGraph.getInstance().getGraph(); + + graph.io(IoCore.graphson()).writeGraph(newSnapshotOutFname); + + LOGGER.debug("Snapshot written to " + newSnapshotOutFname); + + } + else if (command.equals("THREADED_SNAPSHOT")) { + // --------------------------------------------------------------------- + // They want the creation of the snapshot to be spread out via threads + // --------------------------------------------------------------------- + + FormatDate fd = new FormatDate("yyyyMMddHHmm", "GMT"); + String dteStr = fd.getDateTime(); + String newSnapshotOutFname = targetDir + AAIConstants.AAI_FILESEP + "dataSnapshot.graphSON." + dteStr; + verifyGraph(AAIGraph.getInstance().getGraph()); + graph = AAIGraph.getInstance().getGraph(); + LOGGER.debug(" Successfully got the Graph instance. "); + long timeA = System.nanoTime(); + + LOGGER.debug(" Need to divide vertexIds across this many threads: " + threadCount ); + HashMap <String,ArrayList> vertListHash = new HashMap <String,ArrayList> (); + for( int t = 0; t < threadCount; t++ ){ + ArrayList <Vertex> vList = new ArrayList <Vertex> (); + String tk = "" + t; + vertListHash.put( tk, vList); + } + LOGGER.debug("Count how many nodes are in the db. "); + long totalVertCount = graph.traversal().V().count().next(); + LOGGER.debug(" Total Count of Nodes in DB = " + totalVertCount + "."); + long nodesPerFile = totalVertCount / threadCount; + LOGGER.debug(" Thread count = " + threadCount + ", each file will get (roughly): " + nodesPerFile + " nodes."); + long timeA2 = System.nanoTime(); + long diffTime = timeA2 - timeA; + long minCount = TimeUnit.NANOSECONDS.toMinutes(diffTime); + long secCount = TimeUnit.NANOSECONDS.toSeconds(diffTime) - (60 * minCount); + LOGGER.debug(" -- To count all vertices in DB it took: " + + minCount + " minutes, " + secCount + " seconds " ); + + long vtxIndex = 0; + int currentTNum = 0; + String currentTKey = "0"; + long thisThrIndex = 0; + Iterator <Vertex> vtxItr = graph.vertices(); + while( vtxItr.hasNext() ){ + // Divide up all the vertices so we can process them on different threads + vtxIndex++; + thisThrIndex++; + if( (thisThrIndex > nodesPerFile) && (currentTNum < threadCount -1) ){ + // We will need to start adding to the Hash for the next thread + currentTNum++; + currentTKey = "" + currentTNum; + thisThrIndex = 0; + } + (vertListHash.get(currentTKey)).add(vtxItr.next()); + } + + long timeB = System.nanoTime(); + diffTime = timeB - timeA2; + minCount = TimeUnit.NANOSECONDS.toMinutes(diffTime); + secCount = TimeUnit.NANOSECONDS.toSeconds(diffTime) - (60 * minCount); + LOGGER.debug(" -- To Loop over all vertices, and put them into sub-Arrays it took: " + + minCount + " minutes, " + secCount + " seconds " ); + + // Need to print out each set of vertices using it's own thread + ArrayList <Thread> threadArr = new ArrayList <Thread> (); + for( int thNum = 0; thNum < threadCount; thNum++ ){ + String thNumStr = "" + thNum; + String subFName = newSnapshotOutFname + ".P" + thNumStr; + Thread thr = new Thread(new PrintVertexDetails(graph, subFName, vertListHash.get(thNumStr), + debugFlag, debugAddDelayTime) ); + thr.start(); + threadArr.add(thr); + } + + // Make sure all the threads finish before moving on. + for( int thNum = 0; thNum < threadCount; thNum++ ){ + if( null != threadArr.get(thNum) ){ + (threadArr.get(thNum)).join(); + } + } + + long timeC = System.nanoTime(); + diffTime = timeC - timeB; + minCount = TimeUnit.NANOSECONDS.toMinutes(diffTime); + secCount = TimeUnit.NANOSECONDS.toSeconds(diffTime) - (60 * minCount); + LOGGER.debug(" -- To write all the data out to snapshot files, it took: " + + minCount + " minutes, " + secCount + " seconds " ); + + + } else if( command.equals("MULTITHREAD_RELOAD") ){ + // --------------------------------------------------------------------- + // They want the RELOAD of the snapshot to be spread out via threads + // NOTE - it will only use as many threads as the number of files the + // snapshot is written to. Ie. if you have a single-file snapshot, + // then this will be single-threaded. + // + ArrayList <File> snapFilesArr = getFilesToProcess(targetDir, oldSnapshotFileName, false); + int fCount = snapFilesArr.size(); + Iterator <File> fItr = snapFilesArr.iterator(); + + JanusGraph graph1 = AAIGraph.getInstance().getGraph(); + long timeStart = System.nanoTime(); + + HashMap <String,String> old2NewVertIdMap = new <String,String> HashMap (); + + // We're going to try loading in the vertices - without edges or properties + // using Separate threads + + ExecutorService executor = Executors.newFixedThreadPool(fCount); + List<Future<HashMap<String,String>>> list = new ArrayList<Future<HashMap<String,String>>>(); + + for( int i=0; i < fCount; i++ ){ + File f = snapFilesArr.get(i); + String fname = f.getName(); + String fullSnapName = targetDir + AAIConstants.AAI_FILESEP + fname; + Thread.sleep(staggerThreadDelay); // Stagger the threads a bit + LOGGER.debug(" -- Read file: [" + fullSnapName + "]"); + LOGGER.debug(" -- Call the PartialVertexLoader to just load vertices ----"); + LOGGER.debug(" -- vertAddDelayMs = " + vertAddDelayMs + + ", failureDelayMs = " + failureDelayMs + ", retryDelayMs = " + retryDelayMs + + ", maxErrorsPerThread = " + maxErrorsPerThread ); + Callable <HashMap<String,String>> vLoader = new PartialVertexLoader(graph1, fullSnapName, + vertAddDelayMs, failureDelayMs, retryDelayMs, maxErrorsPerThread, LOGGER); + Future <HashMap<String,String>> future = (Future<HashMap<String, String>>) executor.submit(vLoader); + + // add Future to the list, we can get return value using Future + list.add(future); + LOGGER.debug(" -- Starting PartialDbLoad VERT_ONLY thread # "+ i ); + } + + threadCount = 0; + int threadFailCount = 0; + for(Future<HashMap<String,String>> fut : list){ + threadCount++; + try { + old2NewVertIdMap.putAll(fut.get()); + LOGGER.debug(" -- back from PartialVertexLoader. returned thread # " + threadCount + + ", current size of old2NewVertMap is: " + old2NewVertIdMap.size() ); + } + catch (InterruptedException e) { + threadFailCount++; + e.printStackTrace(); + } + catch (ExecutionException e) { + threadFailCount++; + e.printStackTrace(); + } + } + + executor.shutdown(); + + if( threadFailCount > 0 ) { + String emsg = " FAILURE >> " + threadFailCount + " Vertex-loader thread(s) failed to complete successfully. "; + LOGGER.debug(emsg); + throw new Exception( emsg ); + } + + long timeX = System.nanoTime(); + long diffTime = timeX - timeStart; + long minCount = TimeUnit.NANOSECONDS.toMinutes(diffTime); + long secCount = TimeUnit.NANOSECONDS.toSeconds(diffTime) - (60 * minCount); + LOGGER.debug(" -- To reload just the vertex ids from the snapshot files, it took: " + + minCount + " minutes, " + secCount + " seconds " ); + + // Give the DB a little time to chew on all those vertices + Thread.sleep(vertToEdgeProcDelay); + + // ---------------------------------------------------------------------------------------- + LOGGER.debug("\n\n\n -- Now do the edges/props ----------------------"); + // ---------------------------------------------------------------------------------------- + + + // We're going to try loading in the edges and missing properties + // Note - we're passing the whole oldVid2newVid mapping to the PartialPropAndEdgeLoader + // so that the String-updates to the GraphSON will happen in the threads instead of + // here in the un-threaded calling method. + executor = Executors.newFixedThreadPool(fCount); + ArrayList<Future<ArrayList<String>>> listEdg = new ArrayList<Future<ArrayList<String>>>(); + for( int i=0; i < fCount; i++ ){ + File f = snapFilesArr.get(i); + String fname = f.getName(); + String fullSnapName = targetDir + AAIConstants.AAI_FILESEP + fname; + Thread.sleep(staggerThreadDelay); // Stagger the threads a bit + LOGGER.debug(" -- Read file: [" + fullSnapName + "]"); + LOGGER.debug(" -- Call the PartialPropAndEdgeLoader for Properties and EDGEs ----"); + LOGGER.debug(" -- edgeAddDelayMs = " + vertAddDelayMs + + ", failureDelayMs = " + failureDelayMs + ", retryDelayMs = " + retryDelayMs + + ", maxErrorsPerThread = " + maxErrorsPerThread ); + + Callable eLoader = new PartialPropAndEdgeLoader(graph1, fullSnapName, + edgeAddDelayMs, failureDelayMs, retryDelayMs, + old2NewVertIdMap, maxErrorsPerThread, LOGGER); + Future <ArrayList<String>> future = (Future<ArrayList<String>>) executor.submit(eLoader); + + //add Future to the list, we can get return value using Future + listEdg.add(future); + LOGGER.debug(" -- Starting PartialPropAndEdge thread # "+ i ); + } + + threadCount = 0; + for(Future<ArrayList<String>> fut : listEdg){ + threadCount++; + try{ + fut.get(); // DEBUG -- should be doing something with the return value if it's not empty - ie. errors + LOGGER.debug(" -- back from PartialPropAndEdgeLoader. thread # " + threadCount ); + } + catch (InterruptedException e) { + threadFailCount++; + e.printStackTrace(); + } + catch (ExecutionException e) { + threadFailCount++; + e.printStackTrace(); + } + } + + executor.shutdown(); + + if( threadFailCount > 0 ) { + String emsg = " FAILURE >> " + threadFailCount + " Property/Edge-loader thread(s) failed to complete successfully. "; + LOGGER.debug(emsg); + throw new Exception( emsg ); + } + + // This is needed so we can see the data committed by the called threads + graph1.tx().commit(); + + long timeEnd = System.nanoTime(); + diffTime = timeEnd - timeX; + minCount = TimeUnit.NANOSECONDS.toMinutes(diffTime); + secCount = TimeUnit.NANOSECONDS.toSeconds(diffTime) - (60 * minCount); + LOGGER.debug(" -- To reload the edges and properties from snapshot files, it took: " + + minCount + " minutes, " + secCount + " seconds " ); + + long totalDiffTime = timeEnd - timeStart; + long totalMinCount = TimeUnit.NANOSECONDS.toMinutes(totalDiffTime); + long totalSecCount = TimeUnit.NANOSECONDS.toSeconds(totalDiffTime) - (60 * totalMinCount); + LOGGER.debug(" -- TOTAL multi-threaded reload time: " + + totalMinCount + " minutes, " + totalSecCount + " seconds " ); + + } else if (command.equals("CLEAR_ENTIRE_DATABASE")) { + // ------------------------------------------------------------------ + // They are calling this to clear the db before re-loading it + // later + // ------------------------------------------------------------------ + + // First - make sure the backup file(s) they will be using can be + // found and has(have) data. + // getFilesToProcess makes sure the file(s) exist and have some data. + getFilesToProcess(targetDir, oldSnapshotFileName, true); + + LOGGER.debug("\n>>> WARNING <<<< "); + LOGGER.debug(">>> All data and schema in this database will be removed at this point. <<<"); + LOGGER.debug(">>> Processing will begin in 5 seconds. <<<"); + LOGGER.debug(">>> WARNING <<<< "); + + try { + // Give them a chance to back out of this + Thread.sleep(5000); + } catch (java.lang.InterruptedException ie) { + LOGGER.debug(" DB Clearing has been aborted. "); + AAISystemExitUtil.systemExitCloseAAIGraph(1); + } + + LOGGER.debug(" Begin clearing out old data. "); + String rtConfig = AAIConstants.REALTIME_DB_CONFIG; + String serviceName = System.getProperty("aai.service.name", "NA"); + LOGGER.debug("Getting new configs for clearig"); + PropertiesConfiguration propertiesConfiguration = new AAIGraphConfig.Builder(rtConfig).forService(serviceName).withGraphType(REALTIME_DB).buildConfiguration(); + if(isExistingTitan){ + LOGGER.debug("Existing DB is Titan"); + propertiesConfiguration.setProperty("graph.titan-version","1.0.0"); + } + LOGGER.debug("Open New Janus Graph"); + JanusGraph janusGraph = JanusGraphFactory.open(propertiesConfiguration); + verifyGraph(janusGraph); + + if(isExistingTitan){ + JanusGraphFactory.drop(janusGraph); + } else { + janusGraph.close(); + JanusGraphCleanup.clear(janusGraph); + } + LOGGER.debug(" Done clearing data. "); + LOGGER.debug(">>> IMPORTANT - NOTE >>> you need to run the SchemaGenerator (use GenTester) before "); + LOGGER.debug(" reloading data or the data will be put in without indexes. "); + dbClearFlag = true; + LOGGER.debug("All done clearing DB"); + + } else if (command.equals("RELOAD_LEGACY_DATA")) { + // ------------------------------------------------------------------- + // They want to restore the database from an old snapshot file + // ------------------------------------------------------------------- + verifyGraph(AAIGraph.getInstance().getGraph()); + graph = AAIGraph.getInstance().getGraph(); + if (oldSnapshotFileName.equals("")) { + String emsg = "No oldSnapshotFileName passed to DataSnapshot when RELOAD_LEGACY_DATA used."; + LOGGER.debug(emsg); + AAISystemExitUtil.systemExitCloseAAIGraph(1); + } + String oldSnapshotFullFname = targetDir + AAIConstants.AAI_FILESEP + oldSnapshotFileName; + File f = new File(oldSnapshotFullFname); + if (!f.exists()) { + String emsg = "oldSnapshotFile " + oldSnapshotFullFname + " could not be found."; + LOGGER.debug(emsg); + AAISystemExitUtil.systemExitCloseAAIGraph(1); + } else if (!f.canRead()) { + String emsg = "oldSnapshotFile " + oldSnapshotFullFname + " could not be read."; + LOGGER.debug(emsg); + AAISystemExitUtil.systemExitCloseAAIGraph(1); + } else if (f.length() == 0) { + String emsg = "oldSnapshotFile " + oldSnapshotFullFname + " had no data."; + LOGGER.debug(emsg); + AAISystemExitUtil.systemExitCloseAAIGraph(1); + } + + LOGGER.debug("We will load data IN from the file = " + oldSnapshotFullFname); + LOGGER.debug(" Begin reloading JanusGraph 0.5 data. "); + + LegacyGraphSONReader lgr = LegacyGraphSONReader.build().create(); + InputStream is = new FileInputStream(oldSnapshotFullFname); + lgr.readGraph(is, graph); + + LOGGER.debug("Completed the inputGraph command, now try to commit()... "); + graph.tx().commit(); + LOGGER.debug("Completed reloading JanusGraph 0.5 data."); + + long vCount = graph.traversal().V().count().next(); + LOGGER.debug("A little after repopulating from an old snapshot, we see: " + vCount + " vertices in the db."); + } else if (command.equals("RELOAD_DATA")) { + // ------------------------------------------------------------------- + // They want to restore the database from an old snapshot file + // ------------------------------------------------------------------- + verifyGraph(AAIGraph.getInstance().getGraph()); + graph = AAIGraph.getInstance().getGraph(); + if (oldSnapshotFileName.equals("")) { + String emsg = "No oldSnapshotFileName passed to DataSnapshot when RELOAD_DATA used."; + LOGGER.debug(emsg); + AAISystemExitUtil.systemExitCloseAAIGraph(1); + } + String oldSnapshotFullFname = targetDir + AAIConstants.AAI_FILESEP + oldSnapshotFileName; + File f = new File(oldSnapshotFullFname); + if (!f.exists()) { + String emsg = "oldSnapshotFile " + oldSnapshotFullFname + " could not be found."; + LOGGER.debug(emsg); + AAISystemExitUtil.systemExitCloseAAIGraph(1); + } else if (!f.canRead()) { + String emsg = "oldSnapshotFile " + oldSnapshotFullFname + " could not be read."; + LOGGER.debug(emsg); + AAISystemExitUtil.systemExitCloseAAIGraph(1); + } else if (f.length() == 0) { + String emsg = "oldSnapshotFile " + oldSnapshotFullFname + " had no data."; + LOGGER.debug(emsg); + AAISystemExitUtil.systemExitCloseAAIGraph(1); + } + + LOGGER.debug("We will load data IN from the file = " + oldSnapshotFullFname); + LOGGER.debug(" Begin reloading data. "); + graph.io(IoCore.graphson()).readGraph(oldSnapshotFullFname); + LOGGER.debug("Completed the inputGraph command, now try to commit()... "); + graph.tx().commit(); + LOGGER.debug("Completed reloading data."); + + long vCount = graph.traversal().V().count().next(); + + LOGGER.debug("A little after repopulating from an old snapshot, we see: " + vCount + " vertices in the db."); + + } else if (command.equals("RELOAD_DATA_MULTI")) { + // ------------------------------------------------------------------- + // They want to restore the database from a group of snapshot files + // Note - this uses multiple snapshot files, but runs single-threaded. + // ------------------------------------------------------------------- + verifyGraph(AAIGraph.getInstance().getGraph()); + graph = AAIGraph.getInstance().getGraph(); + + ArrayList <File> snapFilesArr = getFilesToProcess(targetDir, oldSnapshotFileName, false); + + long timeA = System.nanoTime(); + + int fCount = snapFilesArr.size(); + Iterator <File> fItr = snapFilesArr.iterator(); + Vector<InputStream> inputStreamsV = new Vector<>(); + for( int i = 0; i < fCount; i++ ){ + File f = snapFilesArr.get(i); + String fname = f.getName(); + if (!f.canRead()) { + String emsg = "oldSnapshotFile " + fname + " could not be read."; + LOGGER.debug(emsg); + AAISystemExitUtil.systemExitCloseAAIGraph(1); + } else if (f.length() == 0) { + String emsg = "oldSnapshotFile " + fname + " had no data."; + LOGGER.debug(emsg); + AAISystemExitUtil.systemExitCloseAAIGraph(1); + } + String fullFName = targetDir + AAIConstants.AAI_FILESEP + fname; + InputStream fis = new FileInputStream(fullFName); + inputStreamsV.add(fis); + } + // Now add inputStreams.elements() to the Vector, + // inputStreams.elements() will return Enumerations + InputStream sis = new SequenceInputStream(inputStreamsV.elements()); + LOGGER.debug("Begin loading data from " + fCount + " files -----"); + graph.io(IoCore.graphson()).reader().create().readGraph(sis, graph); + LOGGER.debug("Completed the inputGraph command, now try to commit()... "); + graph.tx().commit(); + LOGGER.debug(" >> Completed reloading data."); + + long vCount = graph.traversal().V().count().next(); + LOGGER.debug("A little after repopulating from an old snapshot, we see: " + vCount + " vertices in the db."); + + long timeB = System.nanoTime(); + long diffTime = timeB - timeA; + long minCount = TimeUnit.NANOSECONDS.toMinutes(diffTime); + long secCount = TimeUnit.NANOSECONDS.toSeconds(diffTime) - (60 * minCount); + LOGGER.debug(" -- To Reload this snapshot, it took: " + + minCount + " minutes, " + secCount + " seconds " ); + + + } else { + String emsg = "Bad command passed to DataSnapshot: [" + command + "]"; + LOGGER.debug(emsg); + AAISystemExitUtil.systemExitCloseAAIGraph(1); + } + + } catch (AAIException e) { + ErrorLogHelper.logError("AAI_6128", e.getMessage()); + LOGGER.error("Encountered an exception during the datasnapshot: ", e); + e.printStackTrace(); + success = false; + } catch (Exception ex) { + ErrorLogHelper.logError("AAI_6128", ex.getMessage()); + LOGGER.error("Encountered an exception during the datasnapshot: ", ex); + ex.printStackTrace(); + success = false; + } finally { + if (!dbClearFlag && graph != null) { + // Any changes that worked correctly should have already done + // thier commits. + if(!"true".equals(System.getProperty("org.onap.aai.graphadmin.started"))) { + if (graph.isOpen()) { + graph.tx().rollback(); + graph.close(); + } + } + } + try { + baos.close(); + } catch (IOException iox) { + } + } + + if(success){ + AAISystemExitUtil.systemExitCloseAAIGraph(0); + } else { + AAISystemExitUtil.systemExitCloseAAIGraph(1); + } + + }// End of main() + + + private static ArrayList <File> getFilesToProcess(String targetDir, String oldSnapshotFileName, boolean doingClearDb) + throws Exception { + + if( oldSnapshotFileName == null || oldSnapshotFileName.equals("") ){ + String emsg = "No oldSnapshotFileName passed to DataSnapshot for Reload. "; + if( doingClearDb ) { + emsg = "No oldSnapshotFileName passed to DataSnapshot. Needed when Clearing the db in case we need a backup. "; + } + LOGGER.debug(emsg); + throw new Exception( emsg ); + } + + ArrayList <File> snapFilesArrList = new ArrayList <File> (); + + // First, we'll assume that this is a multi-file snapshot and + // look for names based on that. + String thisSnapPrefix = oldSnapshotFileName + ".P"; + File fDir = new File(targetDir); // Snapshot directory + File[] allFilesArr = fDir.listFiles(); + for (File snapFile : allFilesArr) { + String snapFName = snapFile.getName(); + if( snapFName.startsWith(thisSnapPrefix)){ + if (!snapFile.canRead()) { + String emsg = "oldSnapshotFile " + snapFName + " could not be read."; + LOGGER.debug(emsg); + throw new Exception (emsg); + } else if (snapFile.length() == 0) { + String emsg = "oldSnapshotFile " + snapFName + " had no data."; + LOGGER.debug(emsg); + throw new Exception (emsg); + } + snapFilesArrList.add(snapFile); + } + } + + if( snapFilesArrList.isEmpty() ){ + // Multi-file snapshot check did not find files, so this may + // be a single-file snapshot. + String oldSnapshotFullFname = targetDir + AAIConstants.AAI_FILESEP + oldSnapshotFileName; + File f = new File(oldSnapshotFullFname); + if (!f.exists()) { + String emsg = "oldSnapshotFile " + oldSnapshotFullFname + " could not be found."; + LOGGER.debug(emsg); + throw new Exception (emsg); + } else if (!f.canRead()) { + String emsg = "oldSnapshotFile " + oldSnapshotFullFname + " could not be read."; + LOGGER.debug(emsg); + throw new Exception (emsg); + } else if (f.length() == 0) { + String emsg = "oldSnapshotFile " + oldSnapshotFullFname + " had no data."; + LOGGER.debug(emsg); + throw new Exception (emsg); + } + snapFilesArrList.add(f); + } + + if( snapFilesArrList.isEmpty() ){ + // Still haven't found anything.. that was not a good file name. + String fullFName = targetDir + AAIConstants.AAI_FILESEP + thisSnapPrefix; + String emsg = "oldSnapshotFile " + fullFName + "* could not be found."; + LOGGER.debug(emsg); + throw new Exception(emsg); + } + + return snapFilesArrList; + } + + + public static void verifyGraph(JanusGraph graph) { + + if (graph == null) { + String emsg = "Not able to get a graph object in DataSnapshot.java\n"; + LOGGER.debug(emsg); + AAISystemExitUtil.systemExitCloseAAIGraph(1); + } + + } + + +}
\ No newline at end of file diff --git a/src/main/java/org/onap/aai/datasnapshot/DataSnapshotTasks.java b/src/main/java/org/onap/aai/datasnapshot/DataSnapshotTasks.java new file mode 100644 index 0000000..cc9ca97 --- /dev/null +++ b/src/main/java/org/onap/aai/datasnapshot/DataSnapshotTasks.java @@ -0,0 +1,115 @@ +/** + * ============LICENSE_START======================================================= + * org.onap.aai + * ================================================================================ + * Copyright © 2017-2018 AT&T Intellectual Property. All rights reserved. + * ================================================================================ + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============LICENSE_END========================================================= + */ +package org.onap.aai.datasnapshot; + +import java.io.BufferedReader; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.text.SimpleDateFormat; +import java.util.*; + +import org.onap.aai.datagrooming.DataGrooming; +import org.onap.aai.datagrooming.DataGroomingTasks; +import org.onap.aai.exceptions.AAIException; +import org.onap.aai.logging.ErrorLogHelper; +import org.onap.aai.logging.LoggingContext; +import org.onap.aai.util.AAIConfig; +import org.springframework.context.annotation.PropertySource; +import org.springframework.scheduling.annotation.Scheduled; +import org.springframework.stereotype.Component; +import com.att.eelf.configuration.EELFLogger; +import com.att.eelf.configuration.EELFManager; + +@Component +@PropertySource("file:${server.local.startpath}/etc/appprops/datatoolscrons.properties") +public class DataSnapshotTasks { + + private static final EELFLogger LOGGER = EELFManager.getInstance().getLogger(DataSnapshotTasks.class); + private static final SimpleDateFormat dateFormat = new SimpleDateFormat("HH:mm:ss"); + + @Scheduled(cron = "${datasnapshottasks.cron}" ) + public void snapshotScheduleTask() throws AAIException, Exception { + + LoggingContext.init(); + LoggingContext.requestId(UUID.randomUUID().toString()); + LoggingContext.partnerName("AAI"); + LoggingContext.targetEntity("CronApp"); + LoggingContext.component("dataSnapshot"); + LoggingContext.serviceName("snapshotScheduleTask"); + LoggingContext.targetServiceName("snapshotScheduleTask"); + LoggingContext.statusCode(LoggingContext.StatusCode.COMPLETE); + + if(!"true".equals(AAIConfig.get("aai.disable.check.snapshot.running", "false"))){ + if(checkIfDataSnapshotIsRunning()){ + LOGGER.info("Data Snapshot is already running on the system"); + return; + } + } + + LOGGER.info("Started cron job dataSnapshot @ " + dateFormat.format(new Date())); + try { + if (AAIConfig.get("aai.cron.enable.dataSnapshot").equals("true")) { + DataSnapshot dataSnapshot = new DataSnapshot(); + String [] dataSnapshotParms = AAIConfig.get("aai.datasnapshot.params", "JUST_TAKE_SNAPSHOT").split("\\s+"); + LOGGER.info("DataSnapshot Params {}", Arrays.toString(dataSnapshotParms)); + dataSnapshot.main(dataSnapshotParms); + } + } + catch (Exception e) { + ErrorLogHelper.logError("AAI_4000", "Exception running cron job for DataSnapshot"+e.toString()); + LOGGER.info("AAI_4000", "Exception running cron job for DataSnapshot"+e.toString()); + throw e; + } finally { + LOGGER.info("Ended cron job dataSnapshot @ " + dateFormat.format(new Date())); + LoggingContext.clear(); + } + + } + + private boolean checkIfDataSnapshotIsRunning(){ + + Process process = null; + + int count = 0; + try { + process = new ProcessBuilder().command("bash", "-c", "ps -ef | grep '[D]ataSnapshot'").start(); + InputStream is = process.getInputStream(); + InputStreamReader isr = new InputStreamReader(is); + BufferedReader br = new BufferedReader(isr); + + while (br.readLine() != null){ + count++; + } + + int exitVal = process.waitFor(); + LOGGER.info("Exit value of the dataSnapshot check process: " + exitVal); + } catch (Exception e) { + e.printStackTrace(); + } + + if(count > 0){ + return true; + } else { + return false; + } + } +} + +
\ No newline at end of file diff --git a/src/main/java/org/onap/aai/datasnapshot/PartialPropAndEdgeLoader.java b/src/main/java/org/onap/aai/datasnapshot/PartialPropAndEdgeLoader.java new file mode 100644 index 0000000..af858ae --- /dev/null +++ b/src/main/java/org/onap/aai/datasnapshot/PartialPropAndEdgeLoader.java @@ -0,0 +1,421 @@ +/** + * ============LICENSE_START======================================================= + * org.onap.aai + * ================================================================================ + * Copyright © 2017-2018 AT&T Intellectual Property. All rights reserved. + * ================================================================================ + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============LICENSE_END========================================================= + */ +package org.onap.aai.datasnapshot; + +import java.io.BufferedReader; +import java.io.FileReader; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.Iterator; +import java.util.concurrent.Callable; +import org.apache.tinkerpop.gremlin.structure.Vertex; +import org.janusgraph.core.JanusGraph; +import org.apache.tinkerpop.gremlin.structure.Edge; +import org.json.JSONArray; +import org.json.JSONObject; + +import com.att.eelf.configuration.EELFLogger; + + + + +public class PartialPropAndEdgeLoader implements Callable <ArrayList<String>>{ + + private EELFLogger LOGGER; + + private JanusGraph jg; + private String fName; + private Long edgeAddDelayMs; + private Long retryDelayMs; + private Long failureDelayMs; + private HashMap<String,String> old2NewVidMap; + private int maxAllowedErrors; + + + + public PartialPropAndEdgeLoader (JanusGraph graph, String fn, Long edgeDelay, Long failureDelay, Long retryDelay, + HashMap<String,String> vidMap, int maxErrors, EELFLogger elfLog ){ + jg = graph; + fName = fn; + edgeAddDelayMs = edgeDelay; + failureDelayMs = failureDelay; + retryDelayMs = retryDelay; + old2NewVidMap = vidMap; + maxAllowedErrors = maxErrors; + LOGGER = elfLog; + } + + + public ArrayList<String> call() throws Exception { + + // This is a partner to the "PartialVertexLoader" code. + // That code loads in vertex-id's/vertex-label's for a + // multi-file data snapshot. + // This code assumes that the all vertex-id's are now in the target db. + // This code loads vertex properties and edges for a + // multi-file data snapshot (the same one that loaded + // the vertex-ids). + // + + + // NOTE - We will be loading parameters and edges for one node at a time so that problems can be + // identified or ignored or re-tried instead of causing the entire load to fail. + // + // Return an arrayList of Strings to give info on what nodes encountered problems + + int entryCount = 0; + int retryCount = 0; + int failureCount = 0; + int retryFailureCount = 0; + HashMap <String,String> failedAttemptHash = new HashMap <String,String> (); + ArrayList <String> failedAttemptInfo = new ArrayList <String> (); + + int passNum = 1; + try( BufferedReader br = new BufferedReader(new FileReader(fName))) { + // loop through the file lines and do PUT for each vertex or the edges depending on what the loadtype is + for(String origLine; (origLine = br.readLine()) != null; ) { + entryCount++; + Thread.sleep(edgeAddDelayMs); // Space the edge requests out a little + + String errInfoStr = processThisLine(origLine, passNum); + if( !errInfoStr.equals("") ){ + // There was a problem with this line + String vidStr = getTheVidForThisLine(origLine); + // We'll use the failedAttemptHash to reTry this item + failedAttemptHash.put(vidStr,origLine); + failedAttemptInfo.add(errInfoStr); + failureCount++; + if( failureCount > maxAllowedErrors ) { + LOGGER.debug(">>> Abandoning PartialPropAndEdgeLoader() because " + + "Max Allowed Error count was exceeded for this thread. (max = " + + maxAllowedErrors + ". "); + throw new Exception(" ERROR - Max Allowed Error count exceeded for this thread. (max = " + maxAllowedErrors + ". "); + } + Thread.sleep(failureDelayMs); // take a little nap if it failed + } + } // End of looping over each line + if( br != null ){ + br.close(); + } + } + catch (Exception e) { + LOGGER.debug(" --- Failed in the main loop for Buffered-Reader item # " + entryCount + + ", fName = " + fName ); + LOGGER.debug(" --- msg = " + e.getMessage() ); + throw e; + } + + // --------------------------------------------------------------------------- + // Now Re-Try any failed requests that might have Failed on the first pass. + // --------------------------------------------------------------------------- + passNum++; + try { + for (String failedVidStr : failedAttemptHash.keySet()) { + // Take a little nap, and retry this failed attempt + LOGGER.debug("DEBUG >> We will sleep for " + retryDelayMs + " and then RETRY any failed edge/property ADDs. "); + Thread.sleep(retryDelayMs); + retryCount++; + Long failedVidL = Long.parseLong(failedVidStr); + // When an Edge/Property Add fails, we store the whole (translated) graphSON line as the data in the failedAttemptHash + // We're really just doing a GET of this one vertex here... + String jsonLineToRetry = failedAttemptHash.get(failedVidStr); + String errInfoStr = processThisLine(jsonLineToRetry, passNum); + if( !errInfoStr.equals("") ){ + // There was a problem with this line + String translatedVidStr = getTheVidForThisLine(jsonLineToRetry); + failedAttemptHash.put(translatedVidStr,jsonLineToRetry); + failedAttemptInfo.add(errInfoStr); + retryFailureCount++; + if( retryFailureCount > maxAllowedErrors ) { + LOGGER.debug(">>> Abandoning PartialPropAndEdgeLoader() because " + + "Max Allowed Error count was exceeded while doing retries for this thread. (max = " + + maxAllowedErrors + ". "); + throw new Exception(" ERROR - Max Allowed Error count exceeded for this thread. (max = " + maxAllowedErrors + ". "); + } + Thread.sleep(failureDelayMs); // take a little nap if it failed + } + } // End of looping over each failed line + } + catch (Exception e) { + LOGGER.debug(" -- error in RETRY block. ErrorMsg = [" + e.getMessage() + "]" ); + throw e; + } + + LOGGER.debug(">>> After Processing in PartialPropAndEdgeLoader() " + + entryCount + " records processed. " + failureCount + " records failed. " + + retryCount + " RETRYs processed. " + retryFailureCount + " RETRYs failed. "); + + return failedAttemptInfo; + + }// end of call() + + + + private String translateThisVid(String oldVid) throws Exception { + + if( old2NewVidMap == null ){ + throw new Exception(" ERROR - null old2NewVidMap found in translateThisVid. "); + } + + if( old2NewVidMap.containsKey(oldVid) ){ + return old2NewVidMap.get(oldVid); + } + else { + throw new Exception(" ERROR - could not find VID translation for original VID = " + oldVid ); + } + } + + + private String getTheVidForThisLine(String graphSonLine) throws Exception { + + if( graphSonLine == null ){ + throw new Exception(" ERROR - null graphSonLine passed to getTheVidForThisLine. "); + } + + // We are assuming that the graphSonLine has the vertexId as the first ID: + // {"id":100995128,"label":"vertex","inE":{"hasPinterface":[{"id":"7lgg0e-2... etc... + + // The vertexId for this line is the numeric part after the initial {"id":xxxxx up to the first comma + int x = graphSonLine.indexOf(':') + 1; + int y = graphSonLine.indexOf(','); + String initialVid = graphSonLine.substring(x,y); + if( initialVid != null && !initialVid.isEmpty() && initialVid.matches("^[0-9]+$") ){ + return initialVid; + } + else { + throw new Exception(" ERROR - could not determine initial VID for graphSonLine: " + graphSonLine ); + } + } + + + private String processThisLine(String graphSonLine, int passNum){ + + String passInfo = ""; + if( passNum > 1 ) { + passInfo = " >> RETRY << pass # " + passNum + " "; + } + + JSONObject jObj = new JSONObject(); + String originalVid = ""; + + try{ + jObj = new JSONObject(graphSonLine); + originalVid = jObj.get("id").toString(); + } + catch ( Exception e ){ + LOGGER.debug(" -- Could not convert line to JsonObject [ " + graphSonLine + "]" ); + LOGGER.debug(" -- ErrorMsg = [" +e.getMessage() + "]"); + + return(" DEBUG -a- JSON translation exception when processing this line ---"); + //xxxxxDEBUGxxxxx I think we put some info on the return String and then return? + } + + // ----------------------------------------------------------------------------------------- + // Note - this assumes that any vertices referred to by an edge will already be in the DB. + // ----------------------------------------------------------------------------------------- + Vertex dbVtx = null; + + String newVidStr = ""; + Long newVidL = 0L; + try { + newVidStr = translateThisVid(originalVid); + newVidL = Long.parseLong(newVidStr); + } + catch ( Exception e ){ + LOGGER.debug(" -- " + passInfo + " translate VertexId before adding edges failed for this: vtxId = " + + originalVid + ". ErrorMsg = [" +e.getMessage() + "]"); + + return(" DEBUG -b- there VID-translation error when processing this line ---"); + //xxxxxDEBUGxxxxx I think we put some info on the return String and then return? + } + + + try { + dbVtx = getVertexFromDbForVid(newVidStr); + } + catch ( Exception e ){ + LOGGER.debug(" -- " + passInfo + " READ Vertex from DB before adding edges failed for this: vtxId = " + originalVid + + ", newVidId = " + newVidL + ". ErrorMsg = [" +e.getMessage() + "]"); + + return(" -- there was an error processing this line --- Line = [" + graphSonLine + "]"); + //xxxxxxDEBUGxxxx I think we put some info on the return String and then return? + } + + + String edResStr = processEdgesForVtx( jObj, dbVtx, passInfo, originalVid ); + if( edResStr.equals("") ){ + // We will commit the edges by themselves in case the properties stuff below fails + try { + jg.tx().commit(); + } + catch ( Exception e ){ + LOGGER.debug(" -- " + passInfo + " COMMIT FAILED adding EDGES for this vertex: vtxId = " + + originalVid + ". ErrorMsg = [" +e.getMessage() + "]"); + //xxxxxxxxxx I think we put some info on the return String and then return? + return(" DEBUG -d- there was an error doing the commit while processing edges for this line ---"); + } + } + + // Add the properties that we didn't have when we added the 'bare-bones' vertex + String pResStr = processPropertiesForVtx( jObj, dbVtx, passInfo, originalVid ); + if( pResStr.equals("") ){ + try { + jg.tx().commit(); + return ""; + } + catch ( Exception e ){ + LOGGER.debug(" -- " + passInfo + " COMMIT FAILED adding Properties for this vertex: vtxId = " + + originalVid + ". ErrorMsg = [" +e.getMessage() + "]"); + //xxxxxxxxxx I think we put some info on the return String and then return? + return(" DEBUG -e- there was an error doing the commit while processing Properties for this line ---"); + } + } + else { + LOGGER.debug("DEBUG " + passInfo + " Error processing Properties for this vertex: vtxId = " + originalVid ); + + //xxxxxxxxxx I think we put some info on the return String and then return? + return(" DEBUG -f- there was an error while processing Properties for this line ---"); + } + } + + + private String processPropertiesForVtx( JSONObject jObj, Vertex dbVtx, String passInfo, String originalVid ){ + + try { + JSONObject propsOb = (JSONObject) jObj.get("properties"); + Iterator <String> propsItr = propsOb.keys(); + while( propsItr.hasNext() ){ + String pKey = propsItr.next(); + JSONArray propsDetArr = propsOb.getJSONArray(pKey); + for( int i=0; i< propsDetArr.length(); i++ ){ + JSONObject prop = propsDetArr.getJSONObject(i); + String val = prop.getString("value"); + dbVtx.property(pKey, val); //DEBUGjojo -- val is always String here.. which is not right -------------------DEBUG + } + } + + } + catch ( Exception e ){ + LOGGER.debug(" -- " + passInfo + " failure getting/setting properties for: vtxId = " + + originalVid + ". ErrorMsg = [" + e.getMessage() + "]"); + //xxxDEBUGxxxxxxx I think we put some info on the return String and then return? + return(" DEBUG -g- there was an error adding properties while processing this line ---"); + + } + + return ""; + } + + + private Vertex getVertexFromDbForVid( String vtxIdStr ) throws Exception { + Vertex thisVertex = null; + Long vtxIdL = 0L; + + try { + vtxIdL = Long.parseLong(vtxIdStr); + Iterator <Vertex> vItr = jg.vertices(vtxIdL); + // Note - we only expect to find one vertex found for this ID. + while( vItr.hasNext() ){ + thisVertex = vItr.next(); + } + } + catch ( Exception e ){ + String emsg = "Error finding vertex for vid = " + vtxIdStr + "[" + e.getMessage() + "]"; + throw new Exception ( emsg ); + } + + if( thisVertex == null ){ + String emsg = "Could not find vertex for passed vid = " + vtxIdStr; + throw new Exception ( emsg ); + } + + return thisVertex; + } + + + private String processEdgesForVtx( JSONObject jObj, Vertex dbVtx, String passInfo, String originalVid ){ + + // Process the edges for this vertex -- but, just the "OUT" ones so edges don't get added twice (once from + // each side of the edge). + JSONObject edOb = null; + try { + edOb = (JSONObject) jObj.get("outE"); + } + catch (Exception e){ + // There were no OUT edges. This is OK. + return ""; + } + + try { + if( edOb == null ){ + // There were no OUT edges. This is OK. Not all nodes have out edges. + return ""; + } + Iterator <String> edItr = edOb.keys(); + while( edItr.hasNext() ){ + String eLabel = edItr.next(); + String inVid = ""; // Note - this should really be a Long? + JSONArray edArr = edOb.getJSONArray(eLabel); + for( int i=0; i< edArr.length(); i++ ){ + JSONObject eObj = edArr.getJSONObject(i); + String inVidStr = eObj.get("inV").toString(); + String translatedInVidStr = translateThisVid(inVidStr); + Vertex newInVertex = getVertexFromDbForVid(translatedInVidStr); + + // Note - addEdge automatically adds the edge in the OUT direction from the + // 'anchor' node that the call is being made from. + Edge tmpE = dbVtx.addEdge(eLabel, newInVertex); + JSONObject ePropsOb = null; + try { + ePropsOb = (JSONObject) eObj.get("properties"); + } + catch (Exception e){ + // NOTE - model definition related edges do not have edge properties. That is OK. + // Ie. when a model-element node has an "isA" edge to a "model-ver" node, that edge does + // not have edge properties on it. + } + if( ePropsOb != null ){ + Iterator <String> ePropsItr = ePropsOb.keys(); + while( ePropsItr.hasNext() ){ + String pKey = ePropsItr.next(); + tmpE.property(pKey, ePropsOb.getString(pKey)); + } + } + } + } + + } + catch ( Exception e ){ + String msg = " -- " + passInfo + " failure adding edge for: original vtxId = " + + originalVid + ". ErrorMsg = [" +e.getMessage() + "]"; + LOGGER.debug( " -- " + msg ); + //xxxxxxDEBUGxxxx I think we might need some better info on the return String to return? + LOGGER.debug(" -- now going to return/bail out of processEdgesForVtx" ); + return(" >> " + msg ); + + } + + return ""; + } + + +} + + diff --git a/src/main/java/org/onap/aai/datasnapshot/PartialVertexLoader.java b/src/main/java/org/onap/aai/datasnapshot/PartialVertexLoader.java new file mode 100644 index 0000000..387f45e --- /dev/null +++ b/src/main/java/org/onap/aai/datasnapshot/PartialVertexLoader.java @@ -0,0 +1,223 @@ +/** + * ============LICENSE_START======================================================= + * org.onap.aai + * ================================================================================ + * Copyright © 2017-2018 AT&T Intellectual Property. All rights reserved. + * ================================================================================ + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============LICENSE_END========================================================= + */ +package org.onap.aai.datasnapshot; + +import java.io.BufferedReader; +import java.io.FileReader; +import java.util.HashMap; +import java.util.concurrent.Callable; +import org.apache.tinkerpop.gremlin.structure.Vertex; +import org.apache.tinkerpop.gremlin.structure.io.graphson.GraphSONReader; +import org.janusgraph.core.JanusGraph; + +import com.att.eelf.configuration.EELFLogger; +import com.google.gson.JsonObject; +import com.google.gson.JsonParser; + + + +public class PartialVertexLoader implements Callable<HashMap<String,String>>{ + + private EELFLogger LOGGER; + + private JanusGraph jg; + private String fName; + private Long vertAddDelayMs; + private Long failurePauseMs; + private Long retryDelayMs; + private int maxAllowedErrors; + + public PartialVertexLoader (JanusGraph graph, String fn, Long vertDelay, Long failurePause, + Long retryDelay, int maxErrors, EELFLogger elfLog ){ + jg = graph; + fName = fn; + vertAddDelayMs = vertDelay; + failurePauseMs = failurePause; + retryDelayMs = retryDelay; + maxAllowedErrors = maxErrors; + LOGGER = elfLog; + } + + public HashMap<String,String> call() throws Exception { + + // NOTE - we will be loading one node at a time so that bad nodes can be ignored instead of causing the + // entire load to fail. + // + int entryCount = 0; + int retryCount = 0; + int failureCount = 0; + int retryFailureCount = 0; + HashMap <String,String> failedAttemptHash = new HashMap <String,String> (); + HashMap <String,String> old2NewVtxIdHash = new HashMap <String,String> (); + GraphSONReader gsr = GraphSONReader.build().create(); + + + // Read this file into a JSON object + JsonParser parser = new JsonParser(); + + try( BufferedReader br = new BufferedReader(new FileReader(fName))) { + // loop through the file lines and do PUT for each vertex or the edges depending on what the loadtype is + for(String line; (line = br.readLine()) != null; ) { + entryCount++; + Object ob = parser.parse(line); + JsonObject jObj = (JsonObject) ob; + // NOTE - we will need to keep track of how the newly generated vid's map + // to the old ones so we can aim the edges correctly later. + + // ---- Note -- This ONLY loads the vertexId and the label for each vertex ------------- + Thread.sleep(vertAddDelayMs); + + String oldVtxIdStr = jObj.get("id").getAsString(); + String vtxLabelStr = jObj.get("label").getAsString(); + try { + Vertex tmpV = jg.addVertex(vtxLabelStr); + String newVtxIdStr = tmpV.id().toString(); + old2NewVtxIdHash.put(oldVtxIdStr, newVtxIdStr); + } + catch ( Exception e ){ + failureCount++; + Thread.sleep(failurePauseMs); // Slow down if things are failing + LOGGER.debug(" >> addVertex FAILED for vtxId = " + oldVtxIdStr + ", label = [" + + vtxLabelStr + "]. ErrorMsg = [" + e.getMessage() + "]" ); + //e.printStackTrace(); + failedAttemptHash.put(oldVtxIdStr, vtxLabelStr); + if( failureCount > maxAllowedErrors ) { + LOGGER.debug(" >>> Abandoning PartialVertexLoader() because " + + "Max Allowed Error count was exceeded for this thread. (max = " + + maxAllowedErrors + ". "); + throw new Exception(" ERROR - Max Allowed Error count exceeded for this thread. (max = " + maxAllowedErrors + ". "); + } + else { + continue; + } + } + try { + jg.tx().commit(); + } + catch ( Exception e ){ + failureCount++; + Thread.sleep(failurePauseMs); // Slow down if things are failing + LOGGER.debug(" -- COMMIT FAILED for Vtx ADD for vtxId = " + oldVtxIdStr + ", label = [" + + vtxLabelStr + "]. ErrorMsg = [" +e.getMessage() + "]" ); + //e.printStackTrace(); + failedAttemptHash.put(oldVtxIdStr, vtxLabelStr); + if( failureCount > maxAllowedErrors ) { + LOGGER.debug(">>> Abandoning PartialVertexLoader() because " + + "Max Allowed Error count was exceeded for this thread. (max = " + + maxAllowedErrors + ". "); + throw new Exception(" ERROR - Max Allowed Error count exceeded for this thread. (max = " + maxAllowedErrors + ". "); + } + else { + continue; + } + } + + } // End of looping over each line + + if( br != null ){ + br.close(); + } + } + catch (Exception e) { + LOGGER.debug(" --- Failed in the main loop for Buffered-Reader item # " + entryCount + + ", fName = " + fName ); + LOGGER.debug(" --- msg = " + e.getMessage() ); + e.printStackTrace(); + throw e; + } + + // --------------------------------------------------------------------------- + // Now Re-Try any failed requests that might have Failed on the first pass. + // --------------------------------------------------------------------------- + try { + for (String failedVidStr : failedAttemptHash.keySet()) { + // Take a little nap, and retry this failed attempt + LOGGER.debug("DEBUG >> We will sleep for " + retryDelayMs + " and then RETRY any failed vertex ADDs. "); + Thread.sleep(retryDelayMs); + + retryCount++; + // When a vertex Add fails we store the label as the data in the failedAttemptHash. + String failedLabel = failedAttemptHash.get(failedVidStr); + LOGGER.debug("DEBUG >> RETRY << " + + failedVidStr + ", label = " + failedLabel ); + try { + Vertex tmpV = jg.addVertex(failedLabel); + String newVtxIdStr = tmpV.id().toString(); + old2NewVtxIdHash.put(failedVidStr, newVtxIdStr); + } + catch ( Exception e ){ + retryFailureCount++; + LOGGER.debug(" -- addVertex FAILED for RETRY for vtxId = " + + failedVidStr + ", label = [" + failedLabel + + "]. ErrorMsg = [" +e.getMessage() + "]" ); + e.printStackTrace(); + if( retryFailureCount > maxAllowedErrors ) { + LOGGER.debug(">>> Abandoning PartialVertexLoader() because " + + "Max Allowed Error count was exceeded for this thread. (max = " + + maxAllowedErrors + ". "); + throw new Exception(" ERROR - Max Allowed Error count exceeded for this thread. (max = " + maxAllowedErrors + ". "); + } + else { + continue; + } + } + try { + jg.tx().commit(); + // If this worked, we can take it off of the failed list + failedAttemptHash.remove(failedVidStr); + } + catch ( Exception e ){ + retryFailureCount++; + LOGGER.debug(" -- COMMIT FAILED for RETRY for vtxId = " + failedVidStr + + ", label = [" + failedLabel + "]. ErrorMsg = [" + e.getMessage() + "]" ); + e.printStackTrace(); + if( retryFailureCount > maxAllowedErrors ) { + LOGGER.debug(">>> Abandoning PartialVertexLoader() because " + + "Max Allowed Error count was exceeded for this thread. (max = " + + maxAllowedErrors + ". "); + throw new Exception(" ERROR - Max Allowed Error count exceeded for this thread. (max = " + maxAllowedErrors + ". "); + } + else { + continue; + } + } + } // End of looping over failed attempt hash and doing retries + + } + catch ( Exception e ){ + LOGGER.debug(" -- error in RETRY block. ErrorMsg = [" +e.getMessage() + "]" ); + e.printStackTrace(); + throw e; + } + + // This would need to be properly logged... + LOGGER.debug(">>> After Processing in PartialVertexLoader(): " + + entryCount + " records processed. " + failureCount + " records failed. " + + retryCount + " RETRYs processed. " + retryFailureCount + " RETRYs failed. "); + + return old2NewVtxIdHash; + + }// end of call() + + + +} + + diff --git a/src/main/java/org/onap/aai/datasnapshot/PrintVertexDetails.java b/src/main/java/org/onap/aai/datasnapshot/PrintVertexDetails.java new file mode 100644 index 0000000..791ae15 --- /dev/null +++ b/src/main/java/org/onap/aai/datasnapshot/PrintVertexDetails.java @@ -0,0 +1,107 @@ +/** + * ============LICENSE_START======================================================= + * org.onap.aai + * ================================================================================ + * Copyright © 2017-2018 AT&T Intellectual Property. All rights reserved. + * ================================================================================ + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============LICENSE_END========================================================= + */ +package org.onap.aai.datasnapshot; + +import java.io.FileOutputStream; +import java.util.ArrayList; +import java.util.Iterator; +import org.apache.tinkerpop.gremlin.structure.Direction; +import org.apache.tinkerpop.gremlin.structure.Vertex; +import org.apache.tinkerpop.gremlin.structure.io.IoCore; +import org.janusgraph.core.JanusGraph; + + +public class PrintVertexDetails implements Runnable{ + + //private static EELFLogger LOGGER; + + private JanusGraph jg; + private String fname; + private ArrayList<Vertex> vtxList; + private Boolean debugOn; + private int debugDelayMs; + + public PrintVertexDetails (JanusGraph graph, String fn, ArrayList<Vertex> vL, Boolean debugFlag, int debugDelay){ + jg = graph; + fname = fn; + vtxList = vL; + debugOn = debugFlag; + debugDelayMs = debugDelay; + } + + public void run(){ + if( debugOn ){ + // This is much slower, but sometimes we need to find out which single line is causing a failure + try{ + int okCount = 0; + int failCount = 0; + Long debugDelayMsL = new Long(debugDelayMs); + FileOutputStream subFileStr = new FileOutputStream(fname); + Iterator <Vertex> vSubItr = vtxList.iterator(); + while( vSubItr.hasNext() ){ + Long vertexIdL = 0L; + String aaiNodeType = ""; + String aaiUri = ""; + String aaiUuid = ""; + try { + Vertex tmpV = vSubItr.next(); + vertexIdL = (Long) tmpV.id(); + aaiNodeType = (String) tmpV.property("aai-node-type").orElse(null); + aaiUri = (String) tmpV.property("aai-uri").orElse(null); + aaiUuid = (String) tmpV.property("aai-uuid").orElse(null); + + Thread.sleep(debugDelayMsL); // Make sure it doesn't bump into itself + jg.io(IoCore.graphson()).writer().create().writeVertex(subFileStr, tmpV, Direction.BOTH); + okCount++; + } + catch(Exception e) { + failCount++; + System.out.println(" >> DEBUG MODE >> Failed at: VertexId = [" + vertexIdL + + "], aai-node-type = [" + aaiNodeType + + "], aai-uuid = [" + aaiUuid + + "], aai-uri = [" + aaiUri + "]. " ); + e.printStackTrace(); + } + } + System.out.println(" -- Printed " + okCount + " vertexes out to " + fname + + ", with " + failCount + " failed."); + subFileStr.close(); + } + catch(Exception e){ + e.printStackTrace(); + } + } + else { + // Not in DEBUG mode, so we'll do all the nodes in one group + try{ + int count = vtxList.size(); + Iterator <Vertex> vSubItr = vtxList.iterator(); + FileOutputStream subFileStr = new FileOutputStream(fname); + jg.io(IoCore.graphson()).writer().create().writeVertices(subFileStr, vSubItr, Direction.BOTH); + subFileStr.close(); + System.out.println(" -- Printed " + count + " vertexes out to " + fname); + } + catch(Exception e){ + e.printStackTrace(); + } + } + } + +}
\ No newline at end of file diff --git a/src/main/java/org/onap/aai/db/schema/AuditDoc.java b/src/main/java/org/onap/aai/db/schema/AuditDoc.java new file mode 100644 index 0000000..2beec12 --- /dev/null +++ b/src/main/java/org/onap/aai/db/schema/AuditDoc.java @@ -0,0 +1,88 @@ +/** + * ============LICENSE_START======================================================= + * org.onap.aai + * ================================================================================ + * Copyright © 2017-2018 AT&T Intellectual Property. All rights reserved. + * ================================================================================ + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============LICENSE_END========================================================= + */ +package org.onap.aai.db.schema; + +import org.codehaus.jackson.annotate.JsonProperty; + +import java.util.List; + +public class AuditDoc { + + private List<DBProperty> properties; + private List<DBIndex> indexes; + private List<EdgeProperty> edgeLabels; + + /** + * Gets the properties. + * + * @return the properties + */ + public List<DBProperty> getProperties() { + return properties; + } + + /** + * Sets the properties. + * + * @param properties the new properties + */ + public void setProperties(List<DBProperty> properties) { + this.properties = properties; + } + + /** + * Gets the indexes. + * + * @return the indexes + */ + public List<DBIndex> getIndexes() { + return indexes; + } + + /** + * Sets the indexes. + * + * @param indexes the new indexes + */ + public void setIndexes(List<DBIndex> indexes) { + this.indexes = indexes; + } + + /** + * Gets the edge labels. + * + * @return the edge labels + */ + @JsonProperty("edge-labels") + public List<EdgeProperty> getEdgeLabels() { + return edgeLabels; + } + + /** + * Sets the edge labels. + * + * @param edgeLabels the new edge labels + */ + public void setEdgeLabels(List<EdgeProperty> edgeLabels) { + this.edgeLabels = edgeLabels; + } + + +} diff --git a/src/main/java/org/onap/aai/db/schema/AuditJanusGraph.java b/src/main/java/org/onap/aai/db/schema/AuditJanusGraph.java new file mode 100644 index 0000000..e49aa7f --- /dev/null +++ b/src/main/java/org/onap/aai/db/schema/AuditJanusGraph.java @@ -0,0 +1,121 @@ +/** + * ============LICENSE_START======================================================= + * org.onap.aai + * ================================================================================ + * Copyright © 2017-2018 AT&T Intellectual Property. All rights reserved. + * ================================================================================ + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============LICENSE_END========================================================= + */ +package org.onap.aai.db.schema; + + +import org.apache.tinkerpop.gremlin.structure.Vertex; +import org.janusgraph.core.EdgeLabel; +import org.janusgraph.core.JanusGraph; +import org.janusgraph.core.PropertyKey; +import org.janusgraph.core.schema.JanusGraphIndex; +import org.janusgraph.core.schema.JanusGraphManagement; + +import java.util.Iterator; +import java.util.LinkedHashSet; + +public class AuditJanusGraph extends Auditor { + + private final JanusGraph graph; + + /** + * Instantiates a new audit JanusGraph. + * + * @param g the g + */ + public AuditJanusGraph (JanusGraph g) { + this.graph = g; + buildSchema(); + } + + /** + * Builds the schema. + */ + private void buildSchema() { + populateProperties(); + populateIndexes(); + populateEdgeLabels(); + } + + /** + * Populate properties. + */ + private void populateProperties() { + JanusGraphManagement mgmt = graph.openManagement(); + Iterable<PropertyKey> iterable = mgmt.getRelationTypes(PropertyKey.class); + Iterator<PropertyKey> JanusGraphProperties = iterable.iterator(); + PropertyKey propKey; + while (JanusGraphProperties.hasNext()) { + propKey = JanusGraphProperties.next(); + DBProperty prop = new DBProperty(); + + prop.setName(propKey.name()); + prop.setCardinality(propKey.cardinality()); + prop.setTypeClass(propKey.dataType()); + + this.properties.put(prop.getName(), prop); + } + } + + /** + * Populate indexes. + */ + private void populateIndexes() { + JanusGraphManagement mgmt = graph.openManagement(); + Iterable<JanusGraphIndex> iterable = mgmt.getGraphIndexes(Vertex.class); + Iterator<JanusGraphIndex> JanusGraphIndexes = iterable.iterator(); + JanusGraphIndex JanusGraphIndex; + while (JanusGraphIndexes.hasNext()) { + JanusGraphIndex = JanusGraphIndexes.next(); + if (JanusGraphIndex.isCompositeIndex()) { + DBIndex index = new DBIndex(); + LinkedHashSet<DBProperty> dbProperties = new LinkedHashSet<>(); + index.setName(JanusGraphIndex.name()); + index.setUnique(JanusGraphIndex.isUnique()); + PropertyKey[] keys = JanusGraphIndex.getFieldKeys(); + for (PropertyKey key : keys) { + dbProperties.add(this.properties.get(key.name())); + } + index.setProperties(dbProperties); + index.setStatus(JanusGraphIndex.getIndexStatus(keys[0])); + this.indexes.put(index.getName(), index); + } + } + } + + /** + * Populate edge labels. + */ + private void populateEdgeLabels() { + JanusGraphManagement mgmt = graph.openManagement(); + Iterable<EdgeLabel> iterable = mgmt.getRelationTypes(EdgeLabel.class); + Iterator<EdgeLabel> JanusGraphEdgeLabels = iterable.iterator(); + EdgeLabel edgeLabel; + while (JanusGraphEdgeLabels.hasNext()) { + edgeLabel = JanusGraphEdgeLabels.next(); + EdgeProperty edgeProperty = new EdgeProperty(); + + edgeProperty.setName(edgeLabel.name()); + edgeProperty.setMultiplicity(edgeLabel.multiplicity()); + + this.edgeLabels.put(edgeProperty.getName(), edgeProperty); + } + } + +} diff --git a/src/main/java/org/onap/aai/db/schema/AuditOXM.java b/src/main/java/org/onap/aai/db/schema/AuditOXM.java new file mode 100644 index 0000000..417824c --- /dev/null +++ b/src/main/java/org/onap/aai/db/schema/AuditOXM.java @@ -0,0 +1,227 @@ +/** + * ============LICENSE_START======================================================= + * org.onap.aai + * ================================================================================ + * Copyright © 2017-2018 AT&T Intellectual Property. All rights reserved. + * ================================================================================ + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============LICENSE_END========================================================= + */ +package org.onap.aai.db.schema; + +import com.att.eelf.configuration.EELFLogger; +import com.att.eelf.configuration.EELFManager; +import com.google.common.collect.Multimap; +import org.janusgraph.core.Cardinality; +import org.janusgraph.core.Multiplicity; +import org.janusgraph.core.schema.SchemaStatus; +import org.onap.aai.db.props.AAIProperties; +import org.onap.aai.edges.EdgeIngestor; +import org.onap.aai.edges.EdgeRule; +import org.onap.aai.edges.exceptions.EdgeRuleNotFoundException; +import org.onap.aai.introspection.Introspector; +import org.onap.aai.introspection.Loader; +import org.onap.aai.introspection.LoaderFactory; +import org.onap.aai.introspection.ModelType; +import org.onap.aai.introspection.exceptions.AAIUnknownObjectException; +import org.onap.aai.logging.LogFormatTools; +import org.onap.aai.schema.enums.ObjectMetadata; +import org.onap.aai.setup.SchemaVersion; + +import java.util.*; +import java.util.stream.Collectors; + +public class AuditOXM extends Auditor { + + private static final EELFLogger LOGGER = EELFManager.getInstance().getLogger(AuditOXM.class); + + private Set<Introspector> allObjects; + private EdgeIngestor ingestor; + + /** + * Instantiates a new audit OXM. + * + * @param version the version + */ + public AuditOXM(LoaderFactory loaderFactory, SchemaVersion version) { + + Loader loader = loaderFactory.createLoaderForVersion(ModelType.MOXY, version); + Set<String> objectNames = getAllObjects(loader); + allObjects = new HashSet<>(); + for (String key : objectNames) { + try { + final Introspector temp = loader.introspectorFromName(key); + allObjects.add(temp); + this.createDBProperties(temp); + } catch (AAIUnknownObjectException e) { + LOGGER.warn("Skipping audit for object " + key + " (Unknown Object) " + LogFormatTools.getStackTop(e)); + } + } + for (Introspector temp : allObjects) { + this.createDBIndexes(temp); + } + try { + createEdgeLabels(); + } catch (EdgeRuleNotFoundException e) { + LOGGER.warn("Skipping audit for version " + version + " due to " + LogFormatTools.getStackTop(e)); + } + + } + + /** + * Gets the all objects. + * + * @param version the version + * @return the all objects + */ + private Set<String> getAllObjects(Loader loader) { + + Set<String> result = loader.getAllObjects().entrySet() + .stream() + .map(Map.Entry::getKey) + .collect(Collectors.toSet()); + + result.remove("EdgePropNames"); + return result; + } + + /** + * Creates the DB properties. + * + * @param temp the temp + */ + private void createDBProperties(Introspector temp) { + Set<String> objectProperties = temp.getProperties(); + + for (String prop : objectProperties) { + if (!properties.containsKey(prop)) { + DBProperty dbProperty = new DBProperty(); + dbProperty.setName(prop); + if (temp.isListType(prop)) { + dbProperty.setCardinality(Cardinality.SET); + if (temp.isSimpleGenericType(prop)) { + Class<?> clazz = null; + try { + clazz = Class.forName(temp.getGenericType(prop)); + } catch (ClassNotFoundException e) { + clazz = Object.class; + } + dbProperty.setTypeClass(clazz); + properties.put(prop, dbProperty); + } + } else { + dbProperty.setCardinality(Cardinality.SINGLE); + if (temp.isSimpleType(prop)) { + Class<?> clazz = null; + try { + clazz = Class.forName(temp.getType(prop)); + } catch (ClassNotFoundException e) { + clazz = Object.class; + } + dbProperty.setTypeClass(clazz); + properties.put(prop, dbProperty); + } + } + } + } + + } + + /** + * Creates the DB indexes. + * + * @param temp the temp + */ + private void createDBIndexes(Introspector temp) { + String uniqueProps = temp.getMetadata(ObjectMetadata.UNIQUE_PROPS); + String namespace = temp.getMetadata(ObjectMetadata.NAMESPACE); + if (uniqueProps == null) { + uniqueProps = ""; + } + if (namespace == null) { + namespace = ""; + } + boolean isTopLevel = namespace != ""; + List<String> unique = Arrays.asList(uniqueProps.split(",")); + Set<String> indexed = temp.getIndexedProperties(); + Set<String> keys = temp.getKeys(); + + for (String prop : indexed) { + DBIndex dbIndex = new DBIndex(); + LinkedHashSet<DBProperty> properties = new LinkedHashSet<>(); + if (!this.indexes.containsKey(prop)) { + dbIndex.setName(prop); + dbIndex.setUnique(unique.contains(prop)); + properties.add(this.properties.get(prop)); + dbIndex.setProperties(properties); + dbIndex.setStatus(SchemaStatus.ENABLED); + this.indexes.put(prop, dbIndex); + } + } + if (keys.size() > 1 || isTopLevel) { + DBIndex dbIndex = new DBIndex(); + LinkedHashSet<DBProperty> properties = new LinkedHashSet<>(); + dbIndex.setName("key-for-" + temp.getDbName()); + if (!this.indexes.containsKey(dbIndex.getName())) { + boolean isUnique = false; + if (isTopLevel) { + properties.add(this.properties.get(AAIProperties.NODE_TYPE)); + } + for (String key : keys) { + properties.add(this.properties.get(key)); + + if (unique.contains(key) && !isUnique) { + isUnique = true; + } + } + dbIndex.setUnique(isUnique); + dbIndex.setProperties(properties); + dbIndex.setStatus(SchemaStatus.ENABLED); + this.indexes.put(dbIndex.getName(), dbIndex); + } + } + + } + + /** + * Creates the edge labels. + */ + private void createEdgeLabels() throws EdgeRuleNotFoundException { + Multimap<String, EdgeRule> edgeRules = ingestor.getAllCurrentRules(); + for (String key : edgeRules.keySet()) { + Collection<EdgeRule> collection = edgeRules.get(key); + EdgeProperty prop = new EdgeProperty(); + //there is only ever one, they used the wrong type for EdgeRules + String label = ""; + for (EdgeRule item : collection) { + label = item.getLabel(); + } + prop.setName(label); + prop.setMultiplicity(Multiplicity.MULTI); + this.edgeLabels.put(label, prop); + } + } + + /** + * Gets the all introspectors. + * + * @return the all introspectors + */ + public Set<Introspector> getAllIntrospectors() { + return this.allObjects; + } + + public void setEdgeIngestor(EdgeIngestor ingestor){ + this.ingestor = ingestor; + } +} diff --git a/src/main/java/org/onap/aai/db/schema/Auditor.java b/src/main/java/org/onap/aai/db/schema/Auditor.java new file mode 100644 index 0000000..5dc8c6c --- /dev/null +++ b/src/main/java/org/onap/aai/db/schema/Auditor.java @@ -0,0 +1,53 @@ +/** + * ============LICENSE_START======================================================= + * org.onap.aai + * ================================================================================ + * Copyright © 2017-2018 AT&T Intellectual Property. All rights reserved. + * ================================================================================ + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============LICENSE_END========================================================= + */ +package org.onap.aai.db.schema; + +import java.util.*; + +public abstract class Auditor { + + protected Map<String, DBProperty> properties = new HashMap<>(); + protected Map<String, DBIndex> indexes = new HashMap<>(); + protected Map<String, EdgeProperty> edgeLabels = new HashMap<>(); + + /** + * Gets the audit doc. + * + * @return the audit doc + */ + public AuditDoc getAuditDoc() { + AuditDoc doc = new AuditDoc(); + List<DBProperty> propertyList = new ArrayList<>(); + List<DBIndex> indexList = new ArrayList<>(); + List<EdgeProperty> edgeLabelList = new ArrayList<>(); + propertyList.addAll(this.properties.values()); + indexList.addAll(this.indexes.values()); + edgeLabelList.addAll(this.edgeLabels.values()); + Collections.sort(propertyList, new CompareByName()); + Collections.sort(indexList, new CompareByName()); + Collections.sort(edgeLabelList, new CompareByName()); + + doc.setProperties(propertyList); + doc.setIndexes(indexList); + doc.setEdgeLabels(edgeLabelList); + + return doc; + } +} diff --git a/src/main/java/org/onap/aai/db/schema/AuditorFactory.java b/src/main/java/org/onap/aai/db/schema/AuditorFactory.java new file mode 100644 index 0000000..6d96f29 --- /dev/null +++ b/src/main/java/org/onap/aai/db/schema/AuditorFactory.java @@ -0,0 +1,53 @@ +/** + * ============LICENSE_START======================================================= + * org.onap.aai + * ================================================================================ + * Copyright © 2017-2018 AT&T Intellectual Property. All rights reserved. + * ================================================================================ + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============LICENSE_END========================================================= + */ +package org.onap.aai.db.schema; + +import org.janusgraph.core.JanusGraph; +import org.onap.aai.introspection.LoaderFactory; +import org.onap.aai.setup.SchemaVersions; +import org.onap.aai.setup.SchemaVersion; + +public class AuditorFactory { + + private LoaderFactory loaderFactory; + + public AuditorFactory(LoaderFactory loaderFactory){ + this.loaderFactory = loaderFactory; + } + /** + * Gets the OXM auditor. + * + * @param v the v + * @return the OXM auditor + */ + public Auditor getOXMAuditor (SchemaVersion v) { + return new AuditOXM(loaderFactory, v); + } + + /** + * Gets the graph auditor. + * + * @param g the g + * @return the graph auditor + */ + public Auditor getGraphAuditor (JanusGraph g) { + return new AuditJanusGraph(g); + } +} diff --git a/src/main/java/org/onap/aai/db/schema/CompareByName.java b/src/main/java/org/onap/aai/db/schema/CompareByName.java new file mode 100644 index 0000000..829239d --- /dev/null +++ b/src/main/java/org/onap/aai/db/schema/CompareByName.java @@ -0,0 +1,35 @@ +/** + * ============LICENSE_START======================================================= + * org.onap.aai + * ================================================================================ + * Copyright © 2017-2018 AT&T Intellectual Property. All rights reserved. + * ================================================================================ + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============LICENSE_END========================================================= + */ +package org.onap.aai.db.schema; + +import java.util.Comparator; + +public class CompareByName implements Comparator<Named>{ + + /** + * {@inheritDoc} + */ + @Override + public int compare(Named o1, Named o2) { + return o1.getName().compareTo(o2.getName()); + } + + +} diff --git a/src/main/java/org/onap/aai/db/schema/DBIndex.java b/src/main/java/org/onap/aai/db/schema/DBIndex.java new file mode 100644 index 0000000..754999c --- /dev/null +++ b/src/main/java/org/onap/aai/db/schema/DBIndex.java @@ -0,0 +1,104 @@ +/** + * ============LICENSE_START======================================================= + * org.onap.aai + * ================================================================================ + * Copyright © 2017-2018 AT&T Intellectual Property. All rights reserved. + * ================================================================================ + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============LICENSE_END========================================================= + */ +package org.onap.aai.db.schema; + +import org.janusgraph.core.schema.SchemaStatus; + +import java.util.LinkedHashSet; +import java.util.Set; + +public class DBIndex implements Named { + + private String name = null; + private boolean unique = false; + private LinkedHashSet<DBProperty> properties = new LinkedHashSet<>(); + private SchemaStatus status = null; + + /** + * Gets the name + */ + public String getName() { + return name; + } + + /** + * Sets the name. + * + * @param name the new name + */ + public void setName(String name) { + this.name = name; + } + + /** + * Checks if is unique. + * + * @return true, if is unique + */ + public boolean isUnique() { + return unique; + } + + /** + * Sets the unique. + * + * @param unique the new unique + */ + public void setUnique(boolean unique) { + this.unique = unique; + } + + /** + * Gets the properties. + * + * @return the properties + */ + public Set<DBProperty> getProperties() { + return properties; + } + + /** + * Sets the properties. + * + * @param properties the new properties + */ + public void setProperties(LinkedHashSet<DBProperty> properties) { + this.properties = properties; + } + + /** + * Gets the status. + * + * @return the status + */ + public SchemaStatus getStatus() { + return status; + } + + /** + * Sets the status. + * + * @param status the new status + */ + public void setStatus(SchemaStatus status) { + this.status = status; + } + +} diff --git a/src/main/java/org/onap/aai/db/schema/DBProperty.java b/src/main/java/org/onap/aai/db/schema/DBProperty.java new file mode 100644 index 0000000..491331d --- /dev/null +++ b/src/main/java/org/onap/aai/db/schema/DBProperty.java @@ -0,0 +1,83 @@ +/** + * ============LICENSE_START======================================================= + * org.onap.aai + * ================================================================================ + * Copyright © 2017-2018 AT&T Intellectual Property. All rights reserved. + * ================================================================================ + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============LICENSE_END========================================================= + */ +package org.onap.aai.db.schema; + +import org.janusgraph.core.Cardinality; + +public class DBProperty implements Named { + + + private String name = null; + private Cardinality cardinality = null; + private Class<?> typeClass = null; + + /** + * Gets the name + */ + public String getName() { + return name; + } + + /** + * Sets the name. + * + * @param name the new name + */ + public void setName(String name) { + this.name = name; + } + + /** + * Gets the cardinality. + * + * @return the cardinality + */ + public Cardinality getCardinality() { + return cardinality; + } + + /** + * Sets the cardinality. + * + * @param cardinality the new cardinality + */ + public void setCardinality(Cardinality cardinality) { + this.cardinality = cardinality; + } + + /** + * Gets the type class. + * + * @return the type class + */ + public Class<?> getTypeClass() { + return typeClass; + } + + /** + * Sets the type class. + * + * @param type the new type class + */ + public void setTypeClass(Class<?> type) { + this.typeClass = type; + } + +} diff --git a/src/main/java/org/onap/aai/db/schema/EdgeProperty.java b/src/main/java/org/onap/aai/db/schema/EdgeProperty.java new file mode 100644 index 0000000..f89bc8f --- /dev/null +++ b/src/main/java/org/onap/aai/db/schema/EdgeProperty.java @@ -0,0 +1,68 @@ +/** + * ============LICENSE_START======================================================= + * org.onap.aai + * ================================================================================ + * Copyright © 2017-2018 AT&T Intellectual Property. All rights reserved. + * ================================================================================ + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============LICENSE_END========================================================= + */ +package org.onap.aai.db.schema; + +import org.codehaus.jackson.annotate.JsonProperty; +import org.codehaus.jackson.annotate.JsonPropertyOrder; +import org.janusgraph.core.Multiplicity; + +@JsonPropertyOrder({ "label", "multiplicity" }) +public class EdgeProperty implements Named { + + private String name = null; + private Multiplicity multiplicity = null; + + /** + * Gets the name + */ + @JsonProperty("label") + public String getName() { + return name; + } + + /** + * Sets the name. + * + * @param name the new name + */ + @JsonProperty("label") + public void setName(String name) { + this.name = name; + } + + /** + * Gets the multiplicity. + * + * @return the multiplicity + */ + public Multiplicity getMultiplicity() { + return multiplicity; + } + + /** + * Sets the multiplicity. + * + * @param multiplicity the new multiplicity + */ + public void setMultiplicity(Multiplicity multiplicity) { + this.multiplicity = multiplicity; + } + +} diff --git a/src/main/java/org/onap/aai/db/schema/ManageJanusGraphSchema.java b/src/main/java/org/onap/aai/db/schema/ManageJanusGraphSchema.java new file mode 100644 index 0000000..dccc141 --- /dev/null +++ b/src/main/java/org/onap/aai/db/schema/ManageJanusGraphSchema.java @@ -0,0 +1,328 @@ +/** + * ============LICENSE_START======================================================= + * org.onap.aai + * ================================================================================ + * Copyright © 2017-2018 AT&T Intellectual Property. All rights reserved. + * ================================================================================ + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============LICENSE_END========================================================= + */ +package org.onap.aai.db.schema; + +import org.apache.tinkerpop.gremlin.structure.Vertex; +import org.janusgraph.core.JanusGraph; +import org.janusgraph.core.PropertyKey; +import org.janusgraph.core.schema.JanusGraphIndex; +import org.janusgraph.core.schema.JanusGraphManagement; +import org.janusgraph.core.schema.JanusGraphManagement.IndexBuilder; +import org.janusgraph.core.schema.SchemaStatus; +import org.onap.aai.setup.SchemaVersions; +import org.onap.aai.setup.SchemaVersion; + +import java.util.ArrayList; +import java.util.List; +import java.util.Set; + +public class ManageJanusGraphSchema { + + + private JanusGraphManagement graphMgmt; + private JanusGraph graph; + private List<DBProperty> aaiProperties; + private List<DBIndex> aaiIndexes; + private List<EdgeProperty> aaiEdgeProperties; + private Auditor oxmInfo = null; + private Auditor graphInfo = null; + + /** + * Instantiates a new manage JanusGraph schema. + * + * @param graph the graph + */ + public ManageJanusGraphSchema(final JanusGraph graph, AuditorFactory auditorFactory, SchemaVersions schemaVersions) { + this.graph = graph; + oxmInfo = auditorFactory.getOXMAuditor(schemaVersions.getDefaultVersion()); + graphInfo = auditorFactory.getGraphAuditor(graph); + } + + + /** + * Builds the schema. + */ + public void buildSchema() { + + this.graphMgmt = graph.openManagement(); + aaiProperties = new ArrayList<>(); + aaiEdgeProperties = new ArrayList<>(); + aaiIndexes = new ArrayList<>(); + aaiProperties.addAll(oxmInfo.getAuditDoc().getProperties()); + aaiIndexes.addAll(oxmInfo.getAuditDoc().getIndexes()); + aaiEdgeProperties.addAll(oxmInfo.getAuditDoc().getEdgeLabels()); + try { + createPropertyKeys(); + createIndexes(); + createEdgeLabels(); + } catch (Exception e) { + e.printStackTrace(); + graphMgmt.rollback(); + } + graphMgmt.commit(); + } + + /** + * Creates the property keys. + */ + private void createPropertyKeys() { + + + for (DBProperty prop : aaiProperties) { + + if (graphMgmt.containsPropertyKey(prop.getName())) { + PropertyKey key = graphMgmt.getPropertyKey(prop.getName()); + boolean isChanged = false; + if (!prop.getCardinality().equals(key.cardinality())) { + isChanged = true; + } + if (!prop.getTypeClass().equals(key.dataType())) { + isChanged = true; + } + if (isChanged) { + //must modify! + this.replaceProperty(prop); + } + } else { + //create a new property key + System.out.println("Key: " + prop.getName() + " not found - adding"); + graphMgmt.makePropertyKey(prop.getName()).dataType(prop.getTypeClass()).cardinality(prop.getCardinality()).make(); + } + } + + } + + /** + * Creates the indexes. + */ + private void createIndexes() { + + for (DBIndex index : aaiIndexes) { + Set<DBProperty> props = index.getProperties(); + boolean isChanged = false; + boolean isNew = false; + List<PropertyKey> keyList = new ArrayList<>(); + for (DBProperty prop : props) { + keyList.add(graphMgmt.getPropertyKey(prop.getName())); + } + if (graphMgmt.containsGraphIndex(index.getName())) { + JanusGraphIndex JanusGraphIndex = graphMgmt.getGraphIndex(index.getName()); + PropertyKey[] dbKeys = JanusGraphIndex.getFieldKeys(); + if (dbKeys.length != keyList.size()) { + isChanged = true; + } else { + int i = 0; + for (PropertyKey key : keyList) { + if (!dbKeys[i].equals(key)) { + isChanged = true; + break; + } + i++; + } + } + } else { + isNew = true; + } + if (keyList.size() > 0) { + this.createIndex(graphMgmt, index.getName(), keyList, index.isUnique(), isNew, isChanged); + } + } + } + + // Use EdgeRules to make sure edgeLabels are defined in the db. NOTE: the multiplicty used here is + // always "MULTI". This is not the same as our internal "Many2Many", "One2One", "One2Many" or "Many2One" + // We use the same edge-label for edges between many different types of nodes and our internal + // multiplicty definitions depends on which two types of nodes are being connected. + /** + * Creates the edge labels. + */ + private void createEdgeLabels() { + + + for (EdgeProperty prop : aaiEdgeProperties) { + + if (graphMgmt.containsEdgeLabel(prop.getName())) { + // see what changed + } else { + graphMgmt.makeEdgeLabel(prop.getName()).multiplicity(prop.getMultiplicity()).make(); + } + + } + + + } + + /** + * Creates the property. + * + * @param mgmt the mgmt + * @param prop the prop + */ + private void createProperty(JanusGraphManagement mgmt, DBProperty prop) { + if (mgmt.containsPropertyKey(prop.getName())) { + PropertyKey key = mgmt.getPropertyKey(prop.getName()); + boolean isChanged = false; + if (!prop.getCardinality().equals(key.cardinality())) { + isChanged = true; + } + if (!prop.getTypeClass().equals(key.dataType())) { + isChanged = true; + } + if (isChanged) { + //must modify! + this.replaceProperty(prop); + } + } else { + //create a new property key + System.out.println("Key: " + prop.getName() + " not found - adding"); + mgmt.makePropertyKey(prop.getName()).dataType(prop.getTypeClass()).cardinality(prop.getCardinality()).make(); + } + } + + /** + * Creates the index. + * + * @param mgmt the mgmt + * @param indexName the index name + * @param keys the keys + * @param isUnique the is unique + * @param isNew the is new + * @param isChanged the is changed + */ + private void createIndex(JanusGraphManagement mgmt, String indexName, List<PropertyKey> keys, boolean isUnique, boolean isNew, boolean isChanged) { + + /*if (isChanged) { + System.out.println("Changing index: " + indexName); + JanusGraphIndex oldIndex = mgmt.getGraphIndex(indexName); + mgmt.updateIndex(oldIndex, SchemaAction.DISABLE_INDEX); + mgmt.commit(); + //cannot remove indexes + //graphMgmt.updateIndex(oldIndex, SchemaAction.REMOVE_INDEX); + }*/ + if (isNew || isChanged) { + + if (isNew) { + IndexBuilder builder = mgmt.buildIndex(indexName,Vertex.class); + for (PropertyKey k : keys) { + builder.addKey(k); + } + if (isUnique) { + builder.unique(); + } + builder.buildCompositeIndex(); + System.out.println("Built index for " + indexName + " with keys: " + keys); + + //mgmt.commit(); + } + + //mgmt = graph.asAdmin().getManagementSystem(); + //mgmt.updateIndex(mgmt.getGraphIndex(indexName), SchemaAction.REGISTER_INDEX); + //mgmt.commit(); + + try { + //waitForCompletion(indexName); + //JanusGraphIndexRepair.hbaseRepair(AAIConstants.AAI_CONFIG_FILENAME, indexName, ""); + } catch (Exception e) { + // TODO Auto-generated catch block + graph.tx().rollback(); + graph.close(); + e.printStackTrace(); + } + + //mgmt = graph.asAdmin().getManagementSystem(); + //mgmt.updateIndex(mgmt.getGraphIndex(indexName), SchemaAction.REINDEX); + + //mgmt.updateIndex(mgmt.getGraphIndex(indexName), SchemaAction.ENABLE_INDEX); + + //mgmt.commit(); + + } + } + + /** + * Wait for completion. + * + * @param name the name + * @throws InterruptedException the interrupted exception + */ + private void waitForCompletion(String name) throws InterruptedException { + + boolean registered = false; + long before = System.currentTimeMillis(); + while (!registered) { + Thread.sleep(500L); + JanusGraphManagement mgmt = graph.openManagement(); + JanusGraphIndex idx = mgmt.getGraphIndex(name); + registered = true; + for (PropertyKey k : idx.getFieldKeys()) { + SchemaStatus s = idx.getIndexStatus(k); + registered &= s.equals(SchemaStatus.REGISTERED); + } + mgmt.rollback(); + } + System.out.println("Index REGISTERED in " + (System.currentTimeMillis() - before) + " ms"); + } + + /** + * Replace property. + * + * @param key the key + */ + private void replaceProperty(DBProperty key) { + + + + + } + + /** + * Update index. + * + * @param index the index + */ + public void updateIndex(DBIndex index) { + + JanusGraphManagement mgmt = graph.openManagement(); + List<PropertyKey> keys = new ArrayList<>(); + boolean isNew = false; + boolean isChanged = false; + for (DBProperty prop : index.getProperties()) { + createProperty(mgmt, prop); + keys.add(mgmt.getPropertyKey(prop.getName())); + } + if (mgmt.containsGraphIndex(index.getName())) { + System.out.println("index already exists"); + isNew = false; + isChanged = true; + } else { + isNew = true; + isChanged = false; + } + this.createIndex(mgmt, index.getName(), keys, index.isUnique(), isNew, isChanged); + + mgmt.commit(); + + } + + + + + +} diff --git a/src/main/java/org/onap/aai/db/schema/Named.java b/src/main/java/org/onap/aai/db/schema/Named.java new file mode 100644 index 0000000..f12699b --- /dev/null +++ b/src/main/java/org/onap/aai/db/schema/Named.java @@ -0,0 +1,30 @@ +/** + * ============LICENSE_START======================================================= + * org.onap.aai + * ================================================================================ + * Copyright © 2017-2018 AT&T Intellectual Property. All rights reserved. + * ================================================================================ + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============LICENSE_END========================================================= + */ +package org.onap.aai.db.schema; + +public interface Named { + + /** + * Gets the name. + * + * @return the name + */ + public String getName(); +} diff --git a/src/main/java/org/onap/aai/db/schema/ScriptDriver.java b/src/main/java/org/onap/aai/db/schema/ScriptDriver.java new file mode 100644 index 0000000..dca8e83 --- /dev/null +++ b/src/main/java/org/onap/aai/db/schema/ScriptDriver.java @@ -0,0 +1,123 @@ +/**
+ * ============LICENSE_START=======================================================
+ * org.onap.aai
+ * ================================================================================
+ * Copyright © 2017-2018 AT&T Intellectual Property. All rights reserved.
+ * ================================================================================
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * ============LICENSE_END=========================================================
+ */
+package org.onap.aai.db.schema;
+
+import java.io.IOException;
+import java.util.UUID;
+
+import com.fasterxml.jackson.databind.ObjectMapper;
+import org.apache.commons.configuration.ConfigurationException;
+import org.apache.commons.configuration.PropertiesConfiguration;
+import org.codehaus.jackson.JsonGenerationException;
+import org.onap.aai.dbmap.AAIGraphConfig;
+import org.onap.aai.exceptions.AAIException;
+import org.onap.aai.setup.SchemaVersions;
+import org.onap.aai.setup.SchemaVersion;
+import org.onap.aai.logging.LoggingContext;
+import org.onap.aai.logging.LoggingContext.StatusCode;
+import org.onap.aai.util.AAIConfig;
+import com.beust.jcommander.JCommander;
+import com.beust.jcommander.Parameter;
+import org.janusgraph.core.JanusGraphFactory;
+import org.janusgraph.core.JanusGraph;
+import org.springframework.context.annotation.AnnotationConfigApplicationContext;
+
+public class ScriptDriver {
+
+ /**
+ * The main method.
+ *
+ * @param args the arguments
+ * @throws AAIException the AAI exception
+ * @throws JsonGenerationException the json generation exception
+ * @throws JsonMappingException the json mapping exception
+ * @throws IOException Signals that an I/O exception has occurred.
+ */
+ public static void main (String[] args) throws AAIException, IOException, ConfigurationException {
+ CommandLineArgs cArgs = new CommandLineArgs();
+
+ LoggingContext.init();
+ LoggingContext.component("DBSchemaScriptDriver");
+ LoggingContext.partnerName("NA");
+ LoggingContext.targetEntity("AAI");
+ LoggingContext.requestId(UUID.randomUUID().toString());
+ LoggingContext.serviceName("AAI");
+ LoggingContext.targetServiceName("main");
+ LoggingContext.statusCode(StatusCode.COMPLETE);
+ LoggingContext.responseCode(LoggingContext.SUCCESS);
+
+ new JCommander(cArgs, args);
+
+ if (cArgs.help) {
+ System.out.println("-c [path to graph configuration] -type [what you want to audit - oxm or graph]");
+ }
+
+ AnnotationConfigApplicationContext ctx = new AnnotationConfigApplicationContext(
+ "org.onap.aai.config",
+ "org.onap.aai.setup"
+ );
+
+ AuditorFactory auditorFactory = ctx.getBean(AuditorFactory.class);
+ SchemaVersions schemaVersions = ctx.getBean(SchemaVersions.class);
+
+ String config = cArgs.config;
+ AAIConfig.init();
+
+ PropertiesConfiguration graphConfiguration = new AAIGraphConfig
+ .Builder(config)
+ .forService(ScriptDriver.class.getSimpleName())
+ .withGraphType("NA")
+ .buildConfiguration();
+
+ try (JanusGraph graph = JanusGraphFactory.open(graphConfiguration)) {
+ if (!("oxm".equals(cArgs.type) || "graph".equals(cArgs.type))) {
+ System.out.println("type: " + cArgs.type + " not recognized.");
+ System.exit(1);
+ }
+
+ AuditDoc doc = null;
+ if ("oxm".equals(cArgs.type)) {
+ doc = auditorFactory.getOXMAuditor(schemaVersions.getDefaultVersion()).getAuditDoc();
+ } else if ("graph".equals(cArgs.type)) {
+ doc = auditorFactory.getGraphAuditor(graph).getAuditDoc();
+ }
+
+ ObjectMapper mapper = new ObjectMapper();
+
+ String json = mapper.writerWithDefaultPrettyPrinter().writeValueAsString(doc);
+ System.out.println(json);
+ }
+ }
+
+}
+
+class CommandLineArgs {
+
+ @Parameter(names = "--help", description = "Help")
+ public boolean help = false;
+
+ @Parameter(names = "-c", description = "Configuration", required=true)
+ public String config;
+
+ @Parameter(names = "-type", description = "Type", required=true)
+ public String type = "graph";
+
+
+}
\ No newline at end of file diff --git a/src/main/java/org/onap/aai/dbgen/DupeTool.java b/src/main/java/org/onap/aai/dbgen/DupeTool.java new file mode 100644 index 0000000..7b7ef99 --- /dev/null +++ b/src/main/java/org/onap/aai/dbgen/DupeTool.java @@ -0,0 +1,1854 @@ +/** + * ============LICENSE_START======================================================= + * org.onap.aai + * ================================================================================ + * Copyright © 2017-2018 AT&T Intellectual Property. All rights reserved. + * ================================================================================ + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============LICENSE_END========================================================= + */ +package org.onap.aai.dbgen; + +import java.io.FileInputStream; +import java.io.InputStream; +import java.util.*; +import java.util.Map.Entry; + +import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal; +import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversalSource; +import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.__; +import org.apache.tinkerpop.gremlin.structure.Direction; +import org.apache.tinkerpop.gremlin.structure.Edge; +import org.apache.tinkerpop.gremlin.structure.Graph; +import org.apache.tinkerpop.gremlin.structure.Vertex; +import org.apache.tinkerpop.gremlin.structure.VertexProperty; +import org.onap.aai.db.props.AAIProperties; +import org.onap.aai.dbmap.AAIGraphConfig; +import org.onap.aai.dbmap.AAIGraph; +import org.onap.aai.edges.enums.EdgeProperty; +import org.onap.aai.exceptions.AAIException; +import org.onap.aai.introspection.Introspector; +import org.onap.aai.introspection.Loader; +import org.onap.aai.introspection.LoaderFactory; +import org.onap.aai.introspection.ModelType; +import org.onap.aai.logging.ErrorLogHelper; +import org.onap.aai.logging.LogFormatTools; +import org.onap.aai.logging.LoggingContext; +import org.onap.aai.logging.LoggingContext.StatusCode; +import org.onap.aai.edges.enums.AAIDirection; +import org.onap.aai.setup.SchemaVersions; +import org.onap.aai.util.AAIConfig; +import org.onap.aai.util.AAIConstants; +import org.slf4j.MDC; + +import com.att.eelf.configuration.Configuration; +import com.att.eelf.configuration.EELFLogger; +import com.att.eelf.configuration.EELFManager; +import org.janusgraph.core.JanusGraphFactory; +import org.janusgraph.core.JanusGraph; +import org.springframework.context.annotation.AnnotationConfigApplicationContext; + +public class DupeTool { + + private static final EELFLogger logger = EELFManager.getInstance().getLogger(DupeTool.class.getSimpleName()); + private static final String FROMAPPID = "AAI-DB"; + private static final String TRANSID = UUID.randomUUID().toString(); + + private static String graphType = "realdb"; + private final SchemaVersions schemaVersions; + + private boolean shouldExitVm = true; + + public void exit(int statusCode) { + if (this.shouldExitVm) { + System.exit(1); + } + } + + private LoaderFactory loaderFactory; + + public DupeTool(LoaderFactory loaderFactory, SchemaVersions schemaVersions){ + this(loaderFactory, schemaVersions, true); + } + + public DupeTool(LoaderFactory loaderFactory, SchemaVersions schemaVersions, boolean shouldExitVm){ + this.loaderFactory = loaderFactory; + this.schemaVersions = schemaVersions; + this.shouldExitVm = shouldExitVm; + } + + public void execute(String[] args){ + + String defVersion = "v12"; + try { + defVersion = AAIConfig.get(AAIConstants.AAI_DEFAULT_API_VERSION_PROP); + } catch (AAIException ae) { + String emsg = "Error trying to get default API Version property \n"; + System.out.println(emsg); + LoggingContext.statusCode(StatusCode.ERROR); + LoggingContext.responseCode(LoggingContext.DATA_ERROR); + logger.error(emsg); + exit(0); + } + + + Loader loader = null; + try { + loader = loaderFactory.createLoaderForVersion(ModelType.MOXY, schemaVersions.getDefaultVersion()); + } catch (Exception ex) { + LoggingContext.statusCode(StatusCode.ERROR); + LoggingContext.responseCode(LoggingContext.UNKNOWN_ERROR); + logger.error("ERROR - Could not do the moxyMod.init() " + LogFormatTools.getStackTop(ex)); + exit(1); + } + JanusGraph graph1 = null; + JanusGraph graph2 = null; + Graph gt1 = null; + Graph gt2 = null; + + boolean specialTenantRule = false; + + try { + AAIConfig.init(); + int maxRecordsToFix = AAIConstants.AAI_DUPETOOL_DEFAULT_MAX_FIX; + int sleepMinutes = AAIConstants.AAI_DUPETOOL_DEFAULT_SLEEP_MINUTES; + int timeWindowMinutes = 0; // A value of 0 means that we will not have a time-window -- we will look + // at all nodes of the passed-in nodeType. + long windowStartTime = 0; // Translation of the window into a starting timestamp + + try { + String maxFixStr = AAIConfig.get("aai.dupeTool.default.max.fix"); + if (maxFixStr != null && !maxFixStr.equals("")) { + maxRecordsToFix = Integer.parseInt(maxFixStr); + } + String sleepStr = AAIConfig.get("aai.dupeTool.default.sleep.minutes"); + if (sleepStr != null && !sleepStr.equals("")) { + sleepMinutes = Integer.parseInt(sleepStr); + } + } catch (Exception e) { + // Don't worry, we'll just use the defaults that we got from AAIConstants + logger.warn("WARNING - could not pick up aai.dupeTool values from aaiconfig.properties file. Will use defaults. "); + } + + String nodeTypeVal = ""; + String userIdVal = ""; + String filterParams = ""; + Boolean skipHostCheck = false; + Boolean autoFix = false; + String argStr4Msg = ""; + Introspector obj = null; + + if (args != null && args.length > 0) { + // They passed some arguments in that will affect processing + for (int i = 0; i < args.length; i++) { + String thisArg = args[i]; + argStr4Msg = argStr4Msg + " " + thisArg; + + if (thisArg.equals("-nodeType")) { + i++; + if (i >= args.length) { + LoggingContext.statusCode(StatusCode.ERROR); + LoggingContext.responseCode(LoggingContext.BUSINESS_PROCESS_ERROR); + logger.error(" No value passed with -nodeType option. "); + exit(0); + } + nodeTypeVal = args[i]; + argStr4Msg = argStr4Msg + " " + nodeTypeVal; + } else if (thisArg.equals("-sleepMinutes")) { + i++; + if (i >= args.length) { + LoggingContext.statusCode(StatusCode.ERROR); + LoggingContext.responseCode(LoggingContext.BUSINESS_PROCESS_ERROR); + logger.error("No value passed with -sleepMinutes option."); + exit(0); + } + String nextArg = args[i]; + try { + sleepMinutes = Integer.parseInt(nextArg); + } catch (Exception e) { + LoggingContext.statusCode(StatusCode.ERROR); + LoggingContext.responseCode(LoggingContext.BUSINESS_PROCESS_ERROR); + logger.error("Bad value passed with -sleepMinutes option: [" + + nextArg + "]"); + exit(0); + } + argStr4Msg = argStr4Msg + " " + sleepMinutes; + } else if (thisArg.equals("-maxFix")) { + i++; + if (i >= args.length) { + LoggingContext.statusCode(StatusCode.ERROR); + LoggingContext.responseCode(LoggingContext.BUSINESS_PROCESS_ERROR); + logger.error("No value passed with -maxFix option."); + exit(0); + } + String nextArg = args[i]; + try { + maxRecordsToFix = Integer.parseInt(nextArg); + } catch (Exception e) { + LoggingContext.statusCode(StatusCode.ERROR); + LoggingContext.responseCode(LoggingContext.BUSINESS_PROCESS_ERROR); + logger.error("Bad value passed with -maxFix option: [" + + nextArg + "]"); + exit(0); + } + argStr4Msg = argStr4Msg + " " + maxRecordsToFix; + } else if (thisArg.equals("-timeWindowMinutes")) { + i++; + if (i >= args.length) { + LoggingContext.statusCode(StatusCode.ERROR); + LoggingContext.responseCode(LoggingContext.BUSINESS_PROCESS_ERROR); + logger.error("No value passed with -timeWindowMinutes option."); + exit(0); + } + String nextArg = args[i]; + try { + timeWindowMinutes = Integer.parseInt(nextArg); + } catch (Exception e) { + LoggingContext.statusCode(StatusCode.ERROR); + LoggingContext.responseCode(LoggingContext.BUSINESS_PROCESS_ERROR); + logger.error("Bad value passed with -timeWindowMinutes option: [" + + nextArg + "]"); + exit(0); + } + argStr4Msg = argStr4Msg + " " + timeWindowMinutes; + } else if (thisArg.equals("-skipHostCheck")) { + skipHostCheck = true; + } else if (thisArg.equals("-specialTenantRule")) { + specialTenantRule = true; + } else if (thisArg.equals("-autoFix")) { + autoFix = true; + } else if (thisArg.equals("-userId")) { + i++; + if (i >= args.length) { + LoggingContext.statusCode(StatusCode.ERROR); + LoggingContext.responseCode(LoggingContext.BUSINESS_PROCESS_ERROR); + logger.error(" No value passed with -userId option. "); + exit(0); + } + userIdVal = args[i]; + argStr4Msg = argStr4Msg + " " + userIdVal; + } else if (thisArg.equals("-params4Collect")) { + i++; + if (i >= args.length) { + LoggingContext.statusCode(StatusCode.ERROR); + LoggingContext.responseCode(LoggingContext.BUSINESS_PROCESS_ERROR); + logger.error(" No value passed with -params4Collect option. "); + exit(0); + } + filterParams = args[i]; + argStr4Msg = argStr4Msg + " " + filterParams; + } else { + LoggingContext.statusCode(StatusCode.ERROR); + LoggingContext.responseCode(LoggingContext.BUSINESS_PROCESS_ERROR); + logger.error(" Unrecognized argument passed to DupeTool: [" + + thisArg + "]. "); + logger.error(" Valid values are: -action -userId -vertexId -edgeId -overRideProtection "); + exit(0); + } + } + } + + userIdVal = userIdVal.trim(); + if ((userIdVal.length() < 6) || userIdVal.toUpperCase().equals("AAIADMIN")) { + String emsg = "userId parameter is required. [" + userIdVal + "] passed to DupeTool(). userId must be not empty and not aaiadmin \n"; + System.out.println(emsg); + LoggingContext.statusCode(StatusCode.ERROR); + LoggingContext.responseCode(LoggingContext.BUSINESS_PROCESS_ERROR); + logger.error(emsg); + exit(0); + } + + nodeTypeVal = nodeTypeVal.trim(); + if (nodeTypeVal.equals("")) { + String emsg = " nodeType is a required parameter for DupeTool().\n"; + System.out.println(emsg); + LoggingContext.statusCode(StatusCode.ERROR); + LoggingContext.responseCode(LoggingContext.BUSINESS_PROCESS_ERROR); + logger.error(emsg); + exit(0); + } else { + obj = loader.introspectorFromName(nodeTypeVal); + } + + if (timeWindowMinutes > 0) { + // Translate the window value (ie. 30 minutes) into a unix timestamp like + // we use in the db - so we can select data created after that time. + windowStartTime = figureWindowStartTime(timeWindowMinutes); + } + + String msg = ""; + msg = "DupeTool called with these params: [" + argStr4Msg + "]"; + System.out.println(msg); + logger.info(msg); + + // Determine what the key fields are for this nodeType (and we want them ordered) + ArrayList<String> keyPropNamesArr = new ArrayList<String>(obj.getKeys()); + + // Determine what kinds of nodes (if any) this nodeType is dependent on for uniqueness + ArrayList<String> depNodeTypeList = new ArrayList<String>(); + Collection<String> depNTColl = obj.getDependentOn(); + Iterator<String> ntItr = depNTColl.iterator(); + while (ntItr.hasNext()) { + depNodeTypeList.add(ntItr.next()); + } + + // Based on the nodeType, window and filterData, figure out the vertices that we will be checking + System.out.println(" ---- NOTE --- about to open graph (takes a little while)--------\n"); + graph1 = setupGraph(logger); + gt1 = getGraphTransaction(graph1, logger); + ArrayList<Vertex> verts2Check = new ArrayList<Vertex>(); + try { + verts2Check = figureOutNodes2Check(TRANSID, FROMAPPID, gt1, + nodeTypeVal, windowStartTime, filterParams, logger); + } catch (AAIException ae) { + String emsg = "Error trying to get initial set of nodes to check. \n"; + System.out.println(emsg); + LoggingContext.statusCode(StatusCode.ERROR); + LoggingContext.responseCode(LoggingContext.DATA_ERROR); + logger.error(emsg); + exit(0); + } + + if (verts2Check == null || verts2Check.size() == 0) { + msg = " No vertices found to check. Used nodeType = [" + nodeTypeVal + + "], windowMinutes = " + timeWindowMinutes + + ", filterData = [" + filterParams + "]."; + logger.info(msg); + System.out.println(msg); + exit(0); + } else { + msg = " Found " + verts2Check.size() + " nodes of type " + nodeTypeVal + + " to check using passed filterParams and windowStartTime. "; + logger.info(msg); + System.out.println(msg); + } + + ArrayList<String> firstPassDupeSets = new ArrayList<String>(); + ArrayList<String> secondPassDupeSets = new ArrayList<String>(); + Boolean isDependentOnParent = false; + if (!obj.getDependentOn().isEmpty()) { + isDependentOnParent = true; + } + + if (isDependentOnParent) { + firstPassDupeSets = getDupeSets4DependentNodes(TRANSID, FROMAPPID, gt1, + defVersion, nodeTypeVal, verts2Check, keyPropNamesArr, loader, + specialTenantRule, logger); + } else { + firstPassDupeSets = getDupeSets4NonDepNodes(TRANSID, FROMAPPID, gt1, + defVersion, nodeTypeVal, verts2Check, keyPropNamesArr, + specialTenantRule, loader, logger); + } + + msg = " Found " + firstPassDupeSets.size() + " sets of duplicates for this request. "; + logger.info(msg); + System.out.println(msg); + if (firstPassDupeSets.size() > 0) { + msg = " Here is what they look like: "; + logger.info(msg); + System.out.println(msg); + for (int x = 0; x < firstPassDupeSets.size(); x++) { + msg = " Set " + x + ": [" + firstPassDupeSets.get(x) + "] "; + logger.info(msg); + System.out.println(msg); + showNodeDetailsForADupeSet(gt1, firstPassDupeSets.get(x), logger); + } + } + + boolean didSomeDeletesFlag = false; + ArrayList<String> dupeSetsToFix = new ArrayList<String>(); + if (autoFix && firstPassDupeSets.size() == 0) { + msg = "AutoFix option is on, but no dupes were found on the first pass. Nothing to fix."; + logger.info(msg); + System.out.println(msg); + } else if (autoFix) { + // We will try to fix any dupes that we can - but only after sleeping for a + // time and re-checking the list of duplicates using a seperate transaction. + try { + msg = "\n\n----------- About to sleep for " + sleepMinutes + " minutes." + + " -----------\n\n"; + logger.info(msg); + System.out.println(msg); + int sleepMsec = sleepMinutes * 60 * 1000; + Thread.sleep(sleepMsec); + } catch (InterruptedException ie) { + msg = "\n >>> Sleep Thread has been Interrupted <<< "; + logger.info(msg); + System.out.println(msg); + exit(0); + } + + graph2 = setupGraph(logger); + gt2 = getGraphTransaction(graph2, logger); + if (isDependentOnParent) { + secondPassDupeSets = getDupeSets4DependentNodes(TRANSID, FROMAPPID, gt2, + defVersion, nodeTypeVal, verts2Check, keyPropNamesArr, loader, + specialTenantRule, logger); + } else { + secondPassDupeSets = getDupeSets4NonDepNodes(TRANSID, FROMAPPID, gt2, + defVersion, nodeTypeVal, verts2Check, keyPropNamesArr, + specialTenantRule, loader, logger); + } + + dupeSetsToFix = figureWhichDupesStillNeedFixing(firstPassDupeSets, secondPassDupeSets, logger); + msg = "\nAfter running a second pass, there were " + dupeSetsToFix.size() + + " sets of duplicates that we think can be deleted. "; + logger.info(msg); + System.out.println(msg); + if (dupeSetsToFix.size() > 0) { + msg = " Here is what the sets look like: "; + logger.info(msg); + System.out.println(msg); + for (int x = 0; x < dupeSetsToFix.size(); x++) { + msg = " Set " + x + ": [" + dupeSetsToFix.get(x) + "] "; + logger.info(msg); + System.out.println(msg); + showNodeDetailsForADupeSet(gt2, dupeSetsToFix.get(x), logger); + } + } + + if (dupeSetsToFix.size() > 0) { + if (dupeSetsToFix.size() > maxRecordsToFix) { + String infMsg = " >> WARNING >> Dupe list size (" + + dupeSetsToFix.size() + + ") is too big. The maxFix we are using is: " + + maxRecordsToFix + + ". No nodes will be deleted. (use the" + + " -maxFix option to override this limit.)"; + System.out.println(infMsg); + logger.info(infMsg); + } else { + // Call the routine that fixes known dupes + didSomeDeletesFlag = deleteNonKeepers(gt2, dupeSetsToFix, logger); + } + } + if (didSomeDeletesFlag) { + gt2.tx().commit(); + } + } + + } catch (AAIException e) { + logger.error("Caught AAIException while running the dupeTool: " + LogFormatTools.getStackTop(e)); + ErrorLogHelper.logException(e); + } catch (Exception ex) { + logger.error("Caught exception while running the dupeTool: " + LogFormatTools.getStackTop(ex)); + ErrorLogHelper.logError("AAI_6128", ex.getMessage() + ", resolve and rerun the dupeTool. "); + } finally { + if (gt1 != null && gt1.tx().isOpen()) { + // We don't change any data with gt1 - so just roll it back so it knows we're done. + try { + gt1.tx().rollback(); + } catch (Exception ex) { + // Don't throw anything because JanusGraph sometimes is just saying that the graph is already closed + logger.warn("WARNING from final gt1.rollback() " + LogFormatTools.getStackTop(ex)); + } + } + + if (gt2 != null && gt2.tx().isOpen()) { + // Any changes that worked correctly should have already done + // their commits. + try { + gt2.tx().rollback(); + } catch (Exception ex) { + // Don't throw anything because JanusGraph sometimes is just saying that the graph is already closed + logger.warn("WARNING from final gt2.rollback() " + LogFormatTools.getStackTop(ex)); + } + } + + try { + if (graph1 != null && graph1.isOpen()) { + closeGraph(graph1, logger); + } + } catch (Exception ex) { + // Don't throw anything because JanusGraph sometimes is just saying that the graph is already closed{ + logger.warn("WARNING from final graph1.shutdown() " + LogFormatTools.getStackTop(ex)); + } + + try { + if (graph2 != null && graph2.isOpen()) { + closeGraph(graph2, logger); + } + } catch (Exception ex) { + // Don't throw anything because JanusGraph sometimes is just saying that the graph is already closed{ + logger.warn("WARNING from final graph2.shutdown() " + LogFormatTools.getStackTop(ex)); + } + } + + exit(0); + } + + /** + * The main method. + * + * @param args the arguments + */ + public static void main(String[] args) { + + System.setProperty("aai.service.name", DupeTool.class.getSimpleName()); + // Set the logging file properties to be used by EELFManager + Properties props = System.getProperties(); + props.setProperty(Configuration.PROPERTY_LOGGING_FILE_NAME, "dupeTool-logback.xml"); + props.setProperty(Configuration.PROPERTY_LOGGING_FILE_PATH, AAIConstants.AAI_HOME_BUNDLECONFIG); + MDC.put("logFilenameAppender", DupeTool.class.getSimpleName()); + + LoggingContext.init(); + LoggingContext.partnerName(FROMAPPID); + LoggingContext.serviceName(AAIConstants.AAI_RESOURCES_MS); + LoggingContext.component("dupeTool"); + LoggingContext.targetEntity(AAIConstants.AAI_RESOURCES_MS); + LoggingContext.targetServiceName("main"); + LoggingContext.requestId(TRANSID); + LoggingContext.statusCode(StatusCode.COMPLETE); + LoggingContext.responseCode(LoggingContext.SUCCESS); + + AnnotationConfigApplicationContext ctx = new AnnotationConfigApplicationContext( + "org.onap.aai.config", + "org.onap.aai.setup" + ); + + LoaderFactory loaderFactory = ctx.getBean(LoaderFactory.class); + SchemaVersions schemaVersions = ctx.getBean(SchemaVersions.class); + DupeTool dupeTool = new DupeTool(loaderFactory, schemaVersions); + dupeTool.execute(args); + }// end of main() + + + /** + * Collect Duplicate Sets for nodes that are NOT dependent on parent nodes. + * + * @param transId the trans id + * @param fromAppId the from app id + * @param g the g + * @param version the version + * @param nType the n type + * @param passedVertList the passed vert list + * @param dbMaps the db maps + * @return the array list + */ + private ArrayList<String> getDupeSets4NonDepNodes(String transId, + String fromAppId, Graph g, String version, String nType, + ArrayList<Vertex> passedVertList, + ArrayList<String> keyPropNamesArr, + Boolean specialTenantRule, Loader loader, EELFLogger logger) { + + ArrayList<String> returnList = new ArrayList<String>(); + + // We've been passed a set of nodes that we want to check. + // They are all NON-DEPENDENT nodes meaning that they should be + // unique in the DB based on their KEY DATA alone. So, if + // we group them by their key data - if any key has more than one + // vertex mapped to it, those vertices are dupes. + // + // When we find duplicates, we return then as a String (there can be + // more than one duplicate for one set of key data): + // Each element in the returned arrayList might look like this: + // "1234|5678|keepVid=UNDETERMINED" (if there were 2 dupes, and we + // couldn't figure out which one to keep) + // or, "100017|200027|30037|keepVid=30037" (if there were 3 dupes and we + // thought the third one was the one that should survive) + + HashMap<String, ArrayList<String>> keyVals2VidHash = new HashMap<String, ArrayList<String>>(); + HashMap<String, Vertex> vtxHash = new HashMap<String, Vertex>(); + Iterator<Vertex> pItr = passedVertList.iterator(); + while (pItr.hasNext()) { + try { + Vertex tvx = pItr.next(); + String thisVid = tvx.id().toString(); + vtxHash.put(thisVid, tvx); + + // if there are more than one vertexId mapping to the same keyProps -- they are dupes + String hKey = getNodeKeyValString(tvx, keyPropNamesArr, logger); + if (keyVals2VidHash.containsKey(hKey)) { + // We've already seen this key + ArrayList<String> tmpVL = (ArrayList<String>) keyVals2VidHash.get(hKey); + tmpVL.add(thisVid); + keyVals2VidHash.put(hKey, tmpVL); + } else { + // First time for this key + ArrayList<String> tmpVL = new ArrayList<String>(); + tmpVL.add(thisVid); + keyVals2VidHash.put(hKey, tmpVL); + } + } catch (Exception e) { + logger.warn(" >>> Threw an error in getDupeSets4NonDepNodes - just absorb this error and move on. " + LogFormatTools.getStackTop(e)); + } + } + + for (Map.Entry<String, ArrayList<String>> entry : keyVals2VidHash.entrySet()) { + ArrayList<String> vidList = entry.getValue(); + try { + if (!vidList.isEmpty() && vidList.size() > 1) { + // There are more than one vertex id's using the same key info + String dupesStr = ""; + ArrayList<Vertex> vertList = new ArrayList<Vertex>(); + for (int i = 0; i < vidList.size(); i++) { + String tmpVid = vidList.get(i); + dupesStr = dupesStr + tmpVid + "|"; + vertList.add(vtxHash.get(tmpVid)); + } + + if (dupesStr != "") { + Vertex prefV = getPreferredDupe(transId, fromAppId, + g, vertList, version, specialTenantRule, loader, logger); + if (prefV == null) { + // We could not determine which duplicate to keep + dupesStr = dupesStr + "KeepVid=UNDETERMINED"; + returnList.add(dupesStr); + } else { + dupesStr = dupesStr + "KeepVid=" + prefV.id(); + returnList.add(dupesStr); + } + } + } + } catch (Exception e) { + logger.warn(" >>> Threw an error in getDupeSets4NonDepNodes - just absorb this error and move on. " + LogFormatTools.getStackTop(e)); + } + + } + return returnList; + + }// End of getDupeSets4NonDepNodes() + + + /** + * Collect Duplicate Sets for nodes that are dependent on parent nodes. + * + * @param transId the trans id + * @param fromAppId the from app id + * @param g the g + * @param version the version + * @param nType the n type + * @param passedVertList the passed vert list + * @param dbMaps the db maps + * @param keyPropNamesArr Array (ordered) of keyProperty names + * @param specialTenantRule flag + * @param EELFLogger the logger + * @return the array list + */ + private ArrayList<String> getDupeSets4DependentNodes(String transId, + String fromAppId, Graph g, String version, String nType, + ArrayList<Vertex> passedVertList, + ArrayList<String> keyPropNamesArr, Loader loader, + Boolean specialTenantRule, EELFLogger logger) { + + // This is for nodeTypes that DEPEND ON A PARENT NODE FOR UNIQUNESS + + ArrayList<String> returnList = new ArrayList<String>(); + ArrayList<String> alreadyFoundDupeVidArr = new ArrayList<String>(); + + // We've been passed a set of nodes that we want to check. These are + // all nodes that ARE DEPENDENT on a PARENT Node for uniqueness. + // The first thing to do is to identify the key properties for the node-type + // and pull from the db just using those properties. + // Then, we'll check those nodes with their parent nodes to see if there + // are any duplicates. + // + // When we find duplicates, we return then as a String (there can be + // more than one duplicate for one set of key data): + // Each element in the returned arrayList might look like this: + // "1234|5678|keepVid=UNDETERMINED" (if there were 2 dupes, and we + // couldn't figure out which one to keep) + // or, "100017|200027|30037|keepVid=30037" (if there were 3 dupes and we + // thought the third one was the one that should survive) + HashMap<String, Object> checkVertHash = new HashMap<String, Object>(); + try { + Iterator<Vertex> pItr = passedVertList.iterator(); + while (pItr.hasNext()) { + Vertex tvx = pItr.next(); + String passedId = tvx.id().toString(); + if (!alreadyFoundDupeVidArr.contains(passedId)) { + // We haven't seen this one before - so we should check it. + HashMap<String, Object> keyPropValsHash = getNodeKeyVals(tvx, keyPropNamesArr, logger); + ArrayList<Vertex> tmpVertList = getNodeJustUsingKeyParams(transId, fromAppId, g, + nType, keyPropValsHash, version, logger); + + if (tmpVertList.size() <= 1) { + // Even without a parent node, this thing is unique so don't worry about it. + } else { + for (int i = 0; i < tmpVertList.size(); i++) { + Vertex tmpVtx = (tmpVertList.get(i)); + String tmpVid = tmpVtx.id().toString(); + alreadyFoundDupeVidArr.add(tmpVid); + + String hKey = getNodeKeyValString(tmpVtx, keyPropNamesArr, logger); + if (checkVertHash.containsKey(hKey)) { + // add it to an existing list + ArrayList<Vertex> tmpVL = (ArrayList<Vertex>) checkVertHash.get(hKey); + tmpVL.add(tmpVtx); + checkVertHash.put(hKey, tmpVL); + } else { + // First time for this key + ArrayList<Vertex> tmpVL = new ArrayList<Vertex>(); + tmpVL.add(tmpVtx); + checkVertHash.put(hKey, tmpVL); + } + } + } + } + } + + // More than one node have the same key fields since they may + // depend on a parent node for uniqueness. Since we're finding + // more than one, we want to check to see if any of the + // vertices that have this set of keys are also pointing at the + // same 'parent' node. + // Note: for a given set of key data, it is possible that there + // could be more than one set of duplicates. + for (Entry<String, Object> lentry : checkVertHash.entrySet()) { + ArrayList<Vertex> thisIdSetList = (ArrayList<Vertex>) lentry.getValue(); + if (thisIdSetList == null || thisIdSetList.size() < 2) { + // Nothing to check for this set. + continue; + } + + HashMap<String, ArrayList<Vertex>> vertsGroupedByParentHash = groupVertsByDepNodes( + transId, fromAppId, g, version, nType, + thisIdSetList, loader); + for (Map.Entry<String, ArrayList<Vertex>> entry : vertsGroupedByParentHash + .entrySet()) { + ArrayList<Vertex> thisParentsVertList = entry + .getValue(); + if (thisParentsVertList.size() > 1) { + // More than one vertex found with the same key info + // hanging off the same parent/dependent node + String dupesStr = ""; + for (int i = 0; i < thisParentsVertList.size(); i++) { + dupesStr = dupesStr + + ((thisParentsVertList + .get(i))).id() + "|"; + } + if (dupesStr != "") { + Vertex prefV = getPreferredDupe(transId, + fromAppId, g, thisParentsVertList, + version, specialTenantRule, loader, logger); + + if (prefV == null) { + // We could not determine which duplicate to keep + dupesStr = dupesStr + "KeepVid=UNDETERMINED"; + returnList.add(dupesStr); + } else { + dupesStr = dupesStr + "KeepVid=" + + prefV.id().toString(); + returnList.add(dupesStr); + } + } + } + } + } + + } catch (Exception e) { + logger.warn(" >>> Threw an error in checkAndProcessDupes - just absorb this error and move on. " + LogFormatTools.getStackTop(e)); + } + + return returnList; + + }// End of getDupeSets4DependentNodes() + + + private Graph getGraphTransaction(JanusGraph graph, EELFLogger logger) { + + Graph gt = null; + try { + if (graph == null) { + String emsg = "could not get graph object in DupeTool. \n"; + System.out.println(emsg); + LoggingContext.statusCode(StatusCode.ERROR); + LoggingContext.responseCode(LoggingContext.AVAILABILITY_TIMEOUT_ERROR); + logger.error(emsg); + exit(0); + } + gt = graph.newTransaction(); + if (gt == null) { + String emsg = "null graphTransaction object in DupeTool. \n"; + throw new AAIException("AAI_6101", emsg); + } + + } catch (AAIException e1) { + String msg = e1.getErrorObject().toString(); + System.out.println(msg); + LoggingContext.statusCode(StatusCode.ERROR); + LoggingContext.responseCode(LoggingContext.DATA_ERROR); + logger.error(msg); + exit(0); + } catch (Exception e2) { + String msg = e2.toString(); + System.out.println(msg); + LoggingContext.statusCode(StatusCode.ERROR); + LoggingContext.responseCode(LoggingContext.UNKNOWN_ERROR); + logger.error(msg); + exit(0); + } + + return gt; + + }// End of getGraphTransaction() + + + public void showNodeInfo(EELFLogger logger, Vertex tVert, Boolean displayAllVidsFlag) { + + try { + Iterator<VertexProperty<Object>> pI = tVert.properties(); + String infStr = ">>> Found Vertex with VertexId = " + tVert.id() + ", properties: "; + System.out.println(infStr); + logger.info(infStr); + while (pI.hasNext()) { + VertexProperty<Object> tp = pI.next(); + infStr = " [" + tp.key() + "|" + tp.value() + "] "; + System.out.println(infStr); + logger.info(infStr); + } + + ArrayList<String> retArr = collectEdgeInfoForNode(logger, tVert, displayAllVidsFlag); + for (String infoStr : retArr) { + System.out.println(infoStr); + logger.info(infoStr); + } + } catch (Exception e) { + String warnMsg = " -- Error -- trying to display edge info. [" + e.getMessage() + "]"; + System.out.println(warnMsg); + LoggingContext.statusCode(StatusCode.ERROR); + LoggingContext.responseCode(LoggingContext.UNKNOWN_ERROR); + logger.warn(warnMsg); + LoggingContext.statusCode(StatusCode.COMPLETE); + LoggingContext.responseCode(LoggingContext.SUCCESS); + } + + }// End of showNodeInfo() + + + public ArrayList<String> collectEdgeInfoForNode(EELFLogger logger, Vertex tVert, boolean displayAllVidsFlag) { + ArrayList<String> retArr = new ArrayList<String>(); + Direction dir = Direction.OUT; + for (int i = 0; i <= 1; i++) { + if (i == 1) { + // Second time through we'll look at the IN edges. + dir = Direction.IN; + } + Iterator<Edge> eI = tVert.edges(dir); + if (!eI.hasNext()) { + retArr.add("No " + dir + " edges were found for this vertex. "); + } + while (eI.hasNext()) { + Edge ed = eI.next(); + String lab = ed.label(); + Vertex vtx = null; + if (dir == Direction.OUT) { + // get the vtx on the "other" side + vtx = ed.inVertex(); + } else { + // get the vtx on the "other" side + vtx = ed.outVertex(); + } + if (vtx == null) { + retArr.add(" >>> COULD NOT FIND VERTEX on the other side of this edge edgeId = " + ed.id() + " <<< "); + } else { + String nType = vtx.<String>property("aai-node-type").orElse(null); + if (displayAllVidsFlag) { + // This should rarely be needed + String vid = vtx.id().toString(); + retArr.add("Found an " + dir + " edge (" + lab + ") between this vertex and a [" + nType + "] node with VtxId = " + vid); + } else { + // This is the normal case + retArr.add("Found an " + dir + " edge (" + lab + ") between this vertex and a [" + nType + "] node. "); + } + } + } + } + return retArr; + + }// end of collectEdgeInfoForNode() + + + private long figureWindowStartTime(int timeWindowMinutes) { + // Given a window size, calculate what the start-timestamp would be. + + if (timeWindowMinutes <= 0) { + // This just means that there is no window... + return 0; + } + long unixTimeNow = System.currentTimeMillis(); + long windowInMillis = timeWindowMinutes * 60 * 1000; + + long startTimeStamp = unixTimeNow - windowInMillis; + + return startTimeStamp; + } // End of figureWindowStartTime() + + + /** + * Gets the node(s) just using key params. + * + * @param transId the trans id + * @param fromAppId the from app id + * @param graph the graph + * @param nodeType the node type + * @param keyPropsHash the key props hash + * @param apiVersion the api version + * @return the node just using key params + * @throws AAIException the AAI exception + */ + public ArrayList<Vertex> getNodeJustUsingKeyParams(String transId, String fromAppId, Graph graph, String nodeType, + HashMap<String, Object> keyPropsHash, String apiVersion, EELFLogger logger) throws AAIException { + + ArrayList<Vertex> retVertList = new ArrayList<Vertex>(); + + // We assume that all NodeTypes have at least one key-property defined. + // Note - instead of key-properties (the primary key properties), a user could pass + // alternate-key values if they are defined for the nodeType. + ArrayList<String> kName = new ArrayList<String>(); + ArrayList<Object> kVal = new ArrayList<Object>(); + if (keyPropsHash == null || keyPropsHash.isEmpty()) { + throw new AAIException("AAI_6120", " NO key properties passed for this getNodeJustUsingKeyParams() request. NodeType = [" + nodeType + "]. "); + } + + int i = -1; + for (Map.Entry<String, Object> entry : keyPropsHash.entrySet()) { + i++; + kName.add(i, entry.getKey()); + kVal.add(i, entry.getValue()); + } + int topPropIndex = i; + Vertex tiV = null; + String propsAndValuesForMsg = ""; + Iterator<Vertex> verts = null; + GraphTraversalSource g = graph.traversal(); + try { + if (topPropIndex == 0) { + propsAndValuesForMsg = " (" + kName.get(0) + " = " + kVal.get(0) + ") "; + verts = g.V().has(kName.get(0), kVal.get(0)).has("aai-node-type", nodeType); + } else if (topPropIndex == 1) { + propsAndValuesForMsg = " (" + kName.get(0) + " = " + kVal.get(0) + ", " + + kName.get(1) + " = " + kVal.get(1) + ") "; + verts = g.V().has(kName.get(0), kVal.get(0)).has(kName.get(1), kVal.get(1)).has("aai-node-type", nodeType); + } else if (topPropIndex == 2) { + propsAndValuesForMsg = " (" + kName.get(0) + " = " + kVal.get(0) + ", " + + kName.get(1) + " = " + kVal.get(1) + ", " + + kName.get(2) + " = " + kVal.get(2) + ") "; + verts = g.V().has(kName.get(0), kVal.get(0)).has(kName.get(1), kVal.get(1)).has(kName.get(2), kVal.get(2)).has("aai-node-type", nodeType); + } else if (topPropIndex == 3) { + propsAndValuesForMsg = " (" + kName.get(0) + " = " + kVal.get(0) + ", " + + kName.get(1) + " = " + kVal.get(1) + ", " + + kName.get(2) + " = " + kVal.get(2) + ", " + + kName.get(3) + " = " + kVal.get(3) + ") "; + verts = g.V().has(kName.get(0), kVal.get(0)).has(kName.get(1), kVal.get(1)).has(kName.get(2), kVal.get(2)).has(kName.get(3), kVal.get(3)).has("aai-node-type", nodeType); + } else { + throw new AAIException("AAI_6114", " We only support 4 keys per nodeType for now \n"); + } + } catch (Exception ex) { + LoggingContext.statusCode(StatusCode.ERROR); + LoggingContext.responseCode(LoggingContext.DATA_ERROR); + logger.error(" ERROR trying to get node for: [" + propsAndValuesForMsg + "] " + LogFormatTools.getStackTop(ex)); + LoggingContext.statusCode(StatusCode.COMPLETE); + LoggingContext.responseCode(LoggingContext.SUCCESS); + } + + if (verts != null) { + while (verts.hasNext()) { + tiV = verts.next(); + retVertList.add(tiV); + } + } + + if (retVertList.size() == 0) { + logger.debug("DEBUG No node found for nodeType = [" + nodeType + + "], propsAndVal = " + propsAndValuesForMsg); + } + + return retVertList; + + }// End of getNodeJustUsingKeyParams() + + + /** + * Gets the node(s) just using key params. + * + * @param transId the trans id + * @param fromAppId the from app id + * @param graph the graph + * @param nodeType the node type + * @param windowStartTime the window start time + * @param propsHash the props hash + * @param apiVersion the api version + * @return the nodes + * @throws AAIException the AAI exception + */ + public ArrayList<Vertex> figureOutNodes2Check(String transId, String fromAppId, + Graph graph, String nodeType, long windowStartTime, + String propsString, EELFLogger logger) throws AAIException { + + ArrayList<Vertex> retVertList = new ArrayList<Vertex>(); + String msg = ""; + GraphTraversal<Vertex, Vertex> tgQ = graph.traversal().V().has("aai-node-type", nodeType); + String qStringForMsg = "graph.traversal().V().has(\"aai-node-type\"," + nodeType + ")"; + + if (propsString != null && !propsString.trim().equals("")) { + propsString = propsString.trim(); + int firstPipeLoc = propsString.indexOf("|"); + if (firstPipeLoc <= 0) { + msg = "Bad props4Collect passed: [" + propsString + "]. \n Expecting a format like, 'propName1|propVal1,propName2|propVal2'"; + System.out.println(msg); + LoggingContext.statusCode(StatusCode.ERROR); + LoggingContext.responseCode(LoggingContext.DATA_ERROR); + logger.error(msg); + exit(0); + } + + // Note - if they're only passing on parameter, there won't be any commas + String[] paramArr = propsString.split(","); + for (int i = 0; i < paramArr.length; i++) { + int pipeLoc = paramArr[i].indexOf("|"); + if (pipeLoc <= 0) { + msg = "Bad propsString passed: [" + propsString + "]. \n Expecting a format like, 'propName1|propVal1,propName2|propVal2'"; + System.out.println(msg); + LoggingContext.statusCode(StatusCode.ERROR); + LoggingContext.responseCode(LoggingContext.DATA_ERROR); + logger.error(msg); + exit(0); + } else { + String propName = paramArr[i].substring(0, pipeLoc); + String propVal = paramArr[i].substring(pipeLoc + 1); + tgQ = tgQ.has(propName, propVal); + qStringForMsg = qStringForMsg + ".has(" + propName + "," + propVal + ")"; + } + } + } + + if (tgQ == null) { + msg = "Bad JanusGraphQuery object. "; + System.out.println(msg); + LoggingContext.statusCode(StatusCode.ERROR); + LoggingContext.responseCode(LoggingContext.AVAILABILITY_TIMEOUT_ERROR); + logger.error(msg); + exit(0); + } else { + Iterator<Vertex> vertItor = tgQ; + while (vertItor.hasNext()) { + Vertex tiV = vertItor.next(); + if (windowStartTime <= 0) { + // We're not applying a time-window + retVertList.add(tiV); + } else { + Object objTimeStamp = tiV.property("aai-created-ts").orElse(null); + if (objTimeStamp == null) { + // No timestamp - so just take it + retVertList.add(tiV); + } else { + long thisNodeCreateTime = (long) objTimeStamp; + if (thisNodeCreateTime > windowStartTime) { + // It is in our window, so we can take it + retVertList.add(tiV); + } + } + } + } + } + + if (retVertList.size() == 0) { + logger.debug("DEBUG No node found for: [" + qStringForMsg + ", with aai-created-ts > " + windowStartTime); + } + + return retVertList; + + }// End of figureOutNodes2Check() + + + /** + * Gets the preferred dupe. + * + * @param transId the trans id + * @param fromAppId the from app id + * @param g the g + * @param dupeVertexList the dupe vertex list + * @param ver the ver + * @param EELFLogger the logger + * @return Vertex + * @throws AAIException the AAI exception + */ + public Vertex getPreferredDupe(String transId, + String fromAppId, Graph g, + ArrayList<Vertex> dupeVertexList, String ver, + Boolean specialTenantRule, Loader loader, EELFLogger logger) + throws AAIException { + + // This method assumes that it is being passed a List of vertex objects + // which violate our uniqueness constraints. + + Vertex nullVtx = null; + + if (dupeVertexList == null) { + return nullVtx; + } + int listSize = dupeVertexList.size(); + if (listSize == 0) { + return nullVtx; + } + if (listSize == 1) { + return (dupeVertexList.get(0)); + } + + Vertex vtxPreferred = null; + Vertex currentFaveVtx = dupeVertexList.get(0); + for (int i = 1; i < listSize; i++) { + Vertex vtxB = dupeVertexList.get(i); + vtxPreferred = pickOneOfTwoDupes(transId, fromAppId, g, + currentFaveVtx, vtxB, ver, specialTenantRule, loader, logger); + if (vtxPreferred == null) { + // We couldn't choose one + return nullVtx; + } else { + currentFaveVtx = vtxPreferred; + } + } + + return (currentFaveVtx); + + } // end of getPreferredDupe() + + + /** + * Pick one of two dupes. + * + * @param transId the trans id + * @param fromAppId the from app id + * @param g the g + * @param vtxA the vtx A + * @param vtxB the vtx B + * @param ver the ver + * @param boolean specialTenantRuleFlag flag + * @param EELFLogger the logger + * @return Vertex + * @throws AAIException the AAI exception + */ + public Vertex pickOneOfTwoDupes(String transId, + String fromAppId, Graph g, Vertex vtxA, + Vertex vtxB, String ver, Boolean specialTenantRule, Loader loader, EELFLogger logger) throws AAIException { + + Vertex nullVtx = null; + Vertex preferredVtx = null; + + Long vidA = new Long(vtxA.id().toString()); + Long vidB = new Long(vtxB.id().toString()); + + String vtxANodeType = ""; + String vtxBNodeType = ""; + Object obj = vtxA.<Object>property("aai-node-type").orElse(null); + if (obj != null) { + vtxANodeType = obj.toString(); + } + obj = vtxB.<Object>property("aai-node-type").orElse(null); + if (obj != null) { + vtxBNodeType = obj.toString(); + } + + if (vtxANodeType.equals("") || (!vtxANodeType.equals(vtxBNodeType))) { + // Either they're not really dupes or there's some bad data - so + // don't pick one + return nullVtx; + } + + // Check that node A and B both have the same key values (or else they + // are not dupes) + // (We'll check dep-node later) + Collection<String> keyProps = loader.introspectorFromName(vtxANodeType).getKeys(); + Iterator<String> keyPropI = keyProps.iterator(); + while (keyPropI.hasNext()) { + String propName = keyPropI.next(); + String vtxAKeyPropVal = ""; + obj = vtxA.<Object>property(propName).orElse(null); + if (obj != null) { + vtxAKeyPropVal = obj.toString(); + } + String vtxBKeyPropVal = ""; + obj = vtxB.<Object>property(propName).orElse(null); + if (obj != null) { + vtxBKeyPropVal = obj.toString(); + } + + if (vtxAKeyPropVal.equals("") + || (!vtxAKeyPropVal.equals(vtxBKeyPropVal))) { + // Either they're not really dupes or they are missing some key + // data - so don't pick one + return nullVtx; + } + } + + // Collect the vid's and aai-node-types of the vertices that each vertex + // (A and B) is connected to. + ArrayList<String> vtxIdsConn2A = new ArrayList<String>(); + ArrayList<String> vtxIdsConn2B = new ArrayList<String>(); + HashMap<String, String> nodeTypesConn2A = new HashMap<String, String>(); + HashMap<String, String> nodeTypesConn2B = new HashMap<String, String>(); + + ArrayList<String> retArr = new ArrayList<String>(); + Iterator<Edge> eAI = vtxA.edges(Direction.BOTH); + while (eAI.hasNext()) { + Edge ed = eAI.next(); + Vertex tmpVtx; + if (vtxA.equals(ed.inVertex())) { + tmpVtx = ed.outVertex(); + } else { + tmpVtx = ed.inVertex(); + } + if (tmpVtx == null) { + retArr.add(" >>> COULD NOT FIND VERTEX on the other side of this edge edgeId = " + ed.id() + " <<< "); + } else { + String conVid = tmpVtx.id().toString(); + String nt = ""; + obj = tmpVtx.<Object>property("aai-node-type").orElse(null); + if (obj != null) { + nt = obj.toString(); + } + nodeTypesConn2A.put(nt, conVid); + vtxIdsConn2A.add(conVid); + } + } + + Iterator<Edge> eBI = vtxB.edges(Direction.BOTH); + while (eBI.hasNext()) { + Edge ed = eBI.next(); + Vertex tmpVtx; + + if (vtxB.equals(ed.inVertex())) { + tmpVtx = ed.outVertex(); + } else { + tmpVtx = ed.inVertex(); + } + if (tmpVtx == null) { + retArr.add(" >>> COULD NOT FIND VERTEX on the other side of this edge edgeId = " + ed.id() + " <<< "); + } else { + String conVid = tmpVtx.id().toString(); + String nt = ""; + obj = tmpVtx.<Object>property("aai-node-type").orElse(null); + if (obj != null) { + nt = obj.toString(); + } + nodeTypesConn2B.put(nt, conVid); + vtxIdsConn2B.add(conVid); + } + } + + // 1 - If this kind of node needs a dependent node for uniqueness, then + // verify that they both nodes point to the same dependent + // node (otherwise they're not really duplicates) + // Note - there are sometimes more than one dependent node type since + // one nodeType can be used in different ways. But for a + // particular node, it will only have one dependent node that + // it's connected to. + Collection<String> depNodeTypes = loader.introspectorFromName(vtxANodeType).getDependentOn(); + if (depNodeTypes.isEmpty()) { + // This kind of node is not dependent on any other. That is ok. + } else { + String depNodeVtxId4A = ""; + String depNodeVtxId4B = ""; + Iterator<String> iter = depNodeTypes.iterator(); + while (iter.hasNext()) { + String depNodeType = iter.next(); + if (nodeTypesConn2A.containsKey(depNodeType)) { + // This is the dependent node type that vertex A is using + depNodeVtxId4A = nodeTypesConn2A.get(depNodeType); + } + if (nodeTypesConn2B.containsKey(depNodeType)) { + // This is the dependent node type that vertex B is using + depNodeVtxId4B = nodeTypesConn2B.get(depNodeType); + } + } + if (depNodeVtxId4A.equals("") + || (!depNodeVtxId4A.equals(depNodeVtxId4B))) { + // Either they're not really dupes or there's some bad data - so + // don't pick either one + return nullVtx; + } + } + + if (vtxIdsConn2A.size() == vtxIdsConn2B.size()) { + // 2 - If they both have edges to all the same vertices, then return + // the one with the lower vertexId. + + // OR (2b)-- if this is the SPECIAL case -- of + // "tenant|vserver vs. tenant|service-subscription" + // then we pick/prefer the one that's connected to + // the service-subscription. AAI-8172 + boolean allTheSame = true; + Iterator<String> iter = vtxIdsConn2A.iterator(); + while (iter.hasNext()) { + String vtxIdConn2A = iter.next(); + if (!vtxIdsConn2B.contains(vtxIdConn2A)) { + allTheSame = false; + break; + } + } + + if (allTheSame) { + if (vidA < vidB) { + preferredVtx = vtxA; + } else { + preferredVtx = vtxB; + } + } else if (specialTenantRule) { + // They asked us to apply a special rule if it applies + if (vtxIdsConn2A.size() == 2 && vtxANodeType.equals("tenant")) { + // We're dealing with two tenant nodes which each just have + // two connections. One must be the parent (cloud-region) + // which we check in step 1 above. If one connects to + // a vserver and the other connects to a service-subscription, + // our special rule is to keep the one connected + // to the + if (nodeTypesConn2A.containsKey("vserver") && nodeTypesConn2B.containsKey("service-subscription")) { + String infMsg = " WARNING >>> we are using the special tenant rule to choose to " + + " delete tenant vtxId = " + vidA + ", and keep tenant vtxId = " + vidB; + System.out.println(infMsg); + logger.info(infMsg); + preferredVtx = vtxB; + } else if (nodeTypesConn2B.containsKey("vserver") && nodeTypesConn2A.containsKey("service-subscription")) { + String infMsg = " WARNING >>> we are using the special tenant rule to choose to " + + " delete tenant vtxId = " + vidB + ", and keep tenant vtxId = " + vidA; + System.out.println(infMsg); + logger.info(infMsg); + preferredVtx = vtxA; + } + } + } + } else if (vtxIdsConn2A.size() > vtxIdsConn2B.size()) { + // 3 - VertexA is connected to more things than vtxB. + // We'll pick VtxA if its edges are a superset of vtxB's edges. + boolean missingOne = false; + Iterator<String> iter = vtxIdsConn2B.iterator(); + while (iter.hasNext()) { + String vtxIdConn2B = iter.next(); + if (!vtxIdsConn2A.contains(vtxIdConn2B)) { + missingOne = true; + break; + } + } + if (!missingOne) { + preferredVtx = vtxA; + } + } else if (vtxIdsConn2B.size() > vtxIdsConn2A.size()) { + // 4 - VertexB is connected to more things than vtxA. + // We'll pick VtxB if its edges are a superset of vtxA's edges. + boolean missingOne = false; + Iterator<String> iter = vtxIdsConn2A.iterator(); + while (iter.hasNext()) { + String vtxIdConn2A = iter.next(); + if (!vtxIdsConn2B.contains(vtxIdConn2A)) { + missingOne = true; + break; + } + } + if (!missingOne) { + preferredVtx = vtxB; + } + } else { + preferredVtx = nullVtx; + } + + return (preferredVtx); + + } // end of pickOneOfTwoDupes() + + + /** + * Group verts by dep nodes. + * + * @param transId the trans id + * @param fromAppId the from app id + * @param g the g + * @param version the version + * @param nType the n type + * @param passedVertList the passed vert list + * @param dbMaps the db maps + * @return the hash map + * @throws AAIException the AAI exception + */ + private HashMap<String, ArrayList<Vertex>> groupVertsByDepNodes( + String transId, String fromAppId, Graph g, String version, + String nType, ArrayList<Vertex> passedVertList, Loader loader) + throws AAIException { + + // Given a list of JanusGraph Vertices, group them together by dependent + // nodes. Ie. if given a list of ip address nodes (assumed to all + // have the same key info) they might sit under several different + // parent vertices. + // Under Normal conditions, there would only be one per parent -- but + // we're trying to find duplicates - so we allow for the case + // where more than one is under the same parent node. + + HashMap<String, ArrayList<Vertex>> retHash = new HashMap<String, ArrayList<Vertex>>(); + GraphTraversalSource gts = g.traversal(); + if (passedVertList != null) { + Iterator<Vertex> iter = passedVertList.iterator(); + while (iter.hasNext()) { + Vertex thisVert = iter.next(); + Vertex parentVtx = getConnectedParent(gts, thisVert); + if (parentVtx != null) { + String parentVid = parentVtx.id().toString(); + if (retHash.containsKey(parentVid)) { + // add this vert to the list for this parent key + retHash.get(parentVid).add(thisVert); + } else { + // This is the first one we found on this parent + ArrayList<Vertex> vList = new ArrayList<Vertex>(); + vList.add(thisVert); + retHash.put(parentVid, vList); + } + } + } + } + return retHash; + + }// end of groupVertsByDepNodes() + + + private Vertex getConnectedParent(GraphTraversalSource g, + Vertex startVtx) throws AAIException { + + Vertex parentVtx = null; + // This traversal does not assume a parent/child edge direction + Iterator<Vertex> vertI = g.V(startVtx).union(__.inE().has(EdgeProperty.CONTAINS.toString(), AAIDirection.OUT.toString()).outV(), __.outE().has(EdgeProperty.CONTAINS.toString(), AAIDirection.IN.toString()).inV()); + while (vertI != null && vertI.hasNext()) { + // Note - there better only be one! + parentVtx = vertI.next(); + } + return parentVtx; + + }// End of getConnectedParent() + + + /** + * Delete non keepers if appropriate. + * + * @param g the g + * @param dupeInfoList the dupe info string + * @param logger the EELFLogger + * @return the boolean + */ + private Boolean deleteNonKeepers(Graph g, + ArrayList<String> dupeInfoList, EELFLogger logger) { + + // This assumes that each dupeInfoString is in the format of + // pipe-delimited vid's followed by either "keepVid=xyz" or "keepVid=UNDETERMINED" + // ie. "3456|9880|keepVid=3456" + + boolean didADelFlag = false; + for (int n = 0; n < dupeInfoList.size(); n++) { + String dupeInfoString = dupeInfoList.get(n); + boolean tmpFlag = deleteNonKeeperForOneSet(g, dupeInfoString, logger); + didADelFlag = tmpFlag | didADelFlag; + } + + return didADelFlag; + + }// end of deleteNonKeepers() + + + /** + * Delete non keepers if appropriate. + * + * @param g the g + * @param dupeSetStr the dupe string + * @param logger the EELFLogger + * @return the boolean + */ + private Boolean deleteNonKeeperForOneSet(Graph g, + String dupeInfoString, EELFLogger logger) { + + Boolean deletedSomething = false; + // This assumes that each dupeInfoString is in the format of + // pipe-delimited vid's followed by either "keepVid=xyz" or "keepVid=UNDETERMINED" + // ie. "3456|9880|keepVid=3456" + + + String[] dupeArr = dupeInfoString.split("\\|"); + ArrayList<String> idArr = new ArrayList<String>(); + int lastIndex = dupeArr.length - 1; + for (int i = 0; i <= lastIndex; i++) { + if (i < lastIndex) { + // This is not the last entry, it is one of the dupes, + String vidString = dupeArr[i]; + idArr.add(vidString); + } else { + // This is the last entry which should tell us if we have a + // preferred keeper + String prefString = dupeArr[i]; + if (prefString.equals("KeepVid=UNDETERMINED")) { + // They sent us a bad string -- nothing should be deleted if + // no dupe could be tagged as preferred. + return false; + } else { + // If we know which to keep, then the prefString should look + // like, "KeepVid=12345" + String[] prefArr = prefString.split("="); + if (prefArr.length != 2 || (!prefArr[0].equals("KeepVid"))) { + String emsg = "Bad format. Expecting KeepVid=999999"; + System.out.println(emsg); + LoggingContext.statusCode(StatusCode.ERROR); + LoggingContext.responseCode(LoggingContext.DATA_ERROR); + logger.error(emsg); + LoggingContext.statusCode(StatusCode.COMPLETE); + LoggingContext.responseCode(LoggingContext.SUCCESS); + return false; + } else { + String keepVidStr = prefArr[1]; + if (idArr.contains(keepVidStr)) { + idArr.remove(keepVidStr); + // So now, the idArr should just contain the vid's + // that we want to remove. + for (int x = 0; x < idArr.size(); x++) { + boolean okFlag = true; + String thisVid = idArr.get(x); + try { + long longVertId = Long.parseLong(thisVid); + Vertex vtx = g.traversal().V(longVertId).next(); + String msg = "--->>> We will delete node with VID = " + thisVid + " <<<---"; + System.out.println(msg); + logger.info(msg); + vtx.remove(); + } catch (Exception e) { + okFlag = false; + String emsg = "ERROR trying to delete VID = " + thisVid + ", [" + e + "]"; + System.out.println(emsg); + LoggingContext.statusCode(StatusCode.ERROR); + LoggingContext.responseCode(LoggingContext.DATA_ERROR); + logger.error(emsg); + LoggingContext.statusCode(StatusCode.COMPLETE); + LoggingContext.responseCode(LoggingContext.SUCCESS); + } + if (okFlag) { + String infMsg = " DELETED VID = " + thisVid; + logger.info(infMsg); + System.out.println(infMsg); + deletedSomething = true; + } + } + } else { + String emsg = "ERROR - Vertex Id to keep not found in list of dupes. dupeInfoString = [" + + dupeInfoString + "]"; + LoggingContext.statusCode(StatusCode.ERROR); + LoggingContext.responseCode(LoggingContext.DATA_ERROR); + logger.error(emsg); + LoggingContext.statusCode(StatusCode.COMPLETE); + LoggingContext.responseCode(LoggingContext.SUCCESS); + System.out.println(emsg); + return false; + } + } + }// else we know which one to keep + }// else last entry + }// for each vertex in a group + + return deletedSomething; + + }// end of deleteNonKeeperForOneSet() + + + /** + * Get values of the key properties for a node. + * + * @param tvx the vertex to pull the properties from + * @param keyPropertyNames ArrayList (ordered) of key prop names + * @param logger the EELFLogger + * @return a hashMap of the propertyNames/values + */ + private HashMap<String, Object> getNodeKeyVals(Vertex tvx, + ArrayList<String> keyPropNamesArr, EELFLogger logger) { + + HashMap<String, Object> retHash = new HashMap<String, Object>(); + Iterator<String> propItr = keyPropNamesArr.iterator(); + while (propItr.hasNext()) { + String propName = propItr.next(); + if (tvx != null) { + Object propValObj = tvx.property(propName).orElse(null); + retHash.put(propName, propValObj); + } + } + return retHash; + + }// End of getNodeKeyVals() + + + /** + * Get values of the key properties for a node as a single string + * + * @param tvx the vertex to pull the properties from + * @param keyPropertyNames collection of key prop names + * @param logger the EELFLogger + * @return a String of concatenated values + */ + private String getNodeKeyValString(Vertex tvx, + ArrayList<String> keyPropNamesArr, EELFLogger logger) { + + // -- NOTE -- for what we're using this for, we would need to + // guarantee that the properties are always in the same order + + String retString = ""; + Iterator<String> propItr = keyPropNamesArr.iterator(); + while (propItr.hasNext()) { + String propName = propItr.next(); + if (tvx != null) { + Object propValObj = tvx.property(propName).orElse(null); + retString = " " + retString + propValObj.toString(); + } + } + return retString; + + }// End of getNodeKeyValString() + + + /** + * Find duplicate sets from two dupe runs. + * + * @param firstPassDupeSets from the first pass + * @param secondPassDupeSets from the second pass + * @param EELFLogger logger + * @return commonDupeSets that are common to both passes and have a determined keeper + */ + private ArrayList<String> figureWhichDupesStillNeedFixing(ArrayList<String> firstPassDupeSets, + ArrayList<String> secondPassDupeSets, EELFLogger logger) { + + ArrayList<String> common2BothSet = new ArrayList<String>(); + + // We just want to look for entries from the first set which have identical (almost) + // entries in the secondary set. I say "almost" because the order of the + // vid's to delete may be in a different order, but we only want to use it if + // they have all the same values. Note also - we're just looking for + // the sets where we have a candidate to delete. + + // The duplicate-set Strings are in this format: + // "1234|5678|keepVid=UNDETERMINED" (if there were 2 dupes, and we + // couldn't figure out which one to keep) + // or, "100017|200027|30037|keepVid=30037" (if there were 3 dupes and we + // thought the third one was the one that should survive) + + if (firstPassDupeSets == null || firstPassDupeSets.isEmpty() + || secondPassDupeSets == null || secondPassDupeSets.isEmpty()) { + // If either set is empty, then our return list has to be empty too + return common2BothSet; + } + + boolean needToParse = false; + for (int x = 0; x < secondPassDupeSets.size(); x++) { + String secPassDupeSetStr = secondPassDupeSets.get(x); + if (secPassDupeSetStr.endsWith("UNDETERMINED")) { + // This is a set of dupes where we could not pick one + // to delete - so don't include it on our list for + // fixing. + } else if (firstPassDupeSets.contains(secPassDupeSetStr)) { + // We have lucked out and do not even need to parse this since + // it was in the other array with any dupes listed in the same order + // This is actually the most common scenario since there is + // usually only one dupe, so order doesn't matter. + common2BothSet.add(secPassDupeSetStr); + } else { + // We'll need to do some parsing to check this one + needToParse = true; + } + } + + if (needToParse) { + // Make a hash from the first and second Pass data + // where the key is the vid to KEEP and the value is an + // array of (String) vids that would get deleted. + HashMap<String, ArrayList<String>> firstPassHash = makeKeeperHashOfDupeStrings(firstPassDupeSets, common2BothSet, logger); + + HashMap<String, ArrayList<String>> secPassHash = makeKeeperHashOfDupeStrings(secondPassDupeSets, common2BothSet, logger); + + // Loop through the secondPass data and keep the ones + // that check out against the firstPass set. + for (Map.Entry<String, ArrayList<String>> entry : secPassHash.entrySet()) { + boolean skipThisOne = false; + String secKey = entry.getKey(); + ArrayList<String> secList = entry.getValue(); + if (!firstPassHash.containsKey(secKey)) { + // The second pass found this delete candidate, but not the first pass + skipThisOne = true; + } else { + // They both think they should keep this VID, check the associated deletes for it. + ArrayList<String> firstList = firstPassHash.get(secKey); + for (int z = 0; z < secList.size(); z++) { + if (!firstList.contains(secList.get(z))) { + // The first pass did not think this needed to be deleted + skipThisOne = true; + } + } + } + if (!skipThisOne) { + // Put the string back together and pass it back + // Not beautiful, but no time to make it nice right now... + // Put it back in the format: "3456|9880|keepVid=3456" + String thisDelSetStr = ""; + for (int z = 0; z < secList.size(); z++) { + if (z == 0) { + thisDelSetStr = secList.get(z); + } else { + thisDelSetStr = thisDelSetStr + "|" + secList.get(z); + } + } + thisDelSetStr = thisDelSetStr + "|keepVid=" + secKey; + common2BothSet.add(thisDelSetStr); + } + } + + } + return common2BothSet; + + }// figureWhichDupesStillNeedFixing + + + private HashMap<String, ArrayList<String>> makeKeeperHashOfDupeStrings(ArrayList<String> dupeSets, + ArrayList<String> excludeSets, EELFLogger logger) { + + HashMap<String, ArrayList<String>> keeperHash = new HashMap<String, ArrayList<String>>(); + + for (int x = 0; x < dupeSets.size(); x++) { + String tmpSetStr = dupeSets.get(x); + if (excludeSets.contains(tmpSetStr)) { + // This isn't one of the ones we needed to parse. + continue; + } + + String[] dupeArr = tmpSetStr.split("\\|"); + ArrayList<String> delIdArr = new ArrayList<String>(); + int lastIndex = dupeArr.length - 1; + for (int i = 0; i <= lastIndex; i++) { + if (i < lastIndex) { + // This is not the last entry, it is one of the dupes + delIdArr.add(dupeArr[i]); + } else { + // This is the last entry which should tell us if we + // have a preferred keeper and how many dupes we had + String prefString = dupeArr[i]; + if (i == 1) { + // There was only one dupe, so if we were gonna find + // it, we would have found it above with no parsing. + } else if (prefString.equals("KeepVid=UNDETERMINED")) { + // This one had no determined keeper, so we don't + // want it. + } else { + // If we know which to keep, then the prefString + // should look like, "KeepVid=12345" + String[] prefArr = prefString.split("="); + if (prefArr.length != 2 + || (!prefArr[0].equals("KeepVid"))) { + String infMsg = "Bad format in figureWhichDupesStillNeedFixing(). Expecting " + + " KeepVid=999999 but string looks like: [" + tmpSetStr + "]"; + System.out.println(infMsg); + logger.info(infMsg); + } else { + keeperHash.put(prefArr[0], delIdArr); + } + } + } + } + } + + return keeperHash; + + }// End makeHashOfDupeStrings() + + + /** + * Get values of the key properties for a node. + * + * @param g the g + * @param dupeInfoString + * @param logger the EELFLogger + * @return void + */ + private void showNodeDetailsForADupeSet(Graph g, String dupeInfoString, EELFLogger logger) { + + // dang... parsing this string once again... + + String[] dupeArr = dupeInfoString.split("\\|"); + int lastIndex = dupeArr.length - 1; + for (int i = 0; i <= lastIndex; i++) { + if (i < lastIndex) { + // This is not the last entry, it is one of the dupes, + String vidString = dupeArr[i]; + long longVertId = Long.parseLong(vidString); + Vertex vtx = g.traversal().V(longVertId).next(); + showNodeInfo(logger, vtx, false); + } else { + // This is the last entry which should tell us if we have a + // preferred keeper + String prefString = dupeArr[i]; + if (prefString.equals("KeepVid=UNDETERMINED")) { + String msg = " Our algorithm cannot choose from among these, so they will all be kept. -------\n"; + System.out.println(msg); + logger.info(msg); + } else { + // If we know which to keep, then the prefString should look + // like, "KeepVid=12345" + String[] prefArr = prefString.split("="); + if (prefArr.length != 2 || (!prefArr[0].equals("KeepVid"))) { + String emsg = "Bad format. Expecting KeepVid=999999"; + System.out.println(emsg); + LoggingContext.statusCode(StatusCode.ERROR); + LoggingContext.responseCode(LoggingContext.DATA_ERROR); + logger.error(emsg); + LoggingContext.statusCode(StatusCode.COMPLETE); + LoggingContext.responseCode(LoggingContext.SUCCESS); + } else { + String keepVidStr = prefArr[1]; + String msg = " vid = " + keepVidStr + " is the one that we would KEEP. ------\n"; + System.out.println(msg); + logger.info(msg); + } + } + } + } + + }// End of showNodeDetailsForADupeSet() + + private int graphIndex = 1; + + public JanusGraph setupGraph(EELFLogger logger) { + + JanusGraph JanusGraph = null; + + + try (InputStream inputStream = new FileInputStream(AAIConstants.REALTIME_DB_CONFIG);) { + + Properties properties = new Properties(); + properties.load(inputStream); + + if ("inmemory".equals(properties.get("storage.backend"))) { + JanusGraph = AAIGraph.getInstance().getGraph(); + graphType = "inmemory"; + } else { + JanusGraph = JanusGraphFactory.open(new AAIGraphConfig.Builder(AAIConstants.REALTIME_DB_CONFIG).forService(DupeTool.class.getSimpleName()).withGraphType("realtime" + graphIndex).buildConfiguration()); + graphIndex++; + } + } catch (Exception e) { + logger.error("Unable to open the graph", e); + } + + return JanusGraph; + } + + public void closeGraph(JanusGraph graph, EELFLogger logger) { + + try { + if ("inmemory".equals(graphType)) { + return; + } + if (graph != null && graph.isOpen()) { + graph.tx().close(); + graph.close(); + } + } catch (Exception ex) { + // Don't throw anything because JanusGraph sometimes is just saying that the graph is already closed{ + logger.warn("WARNING from final graph.shutdown()", ex); + } + } +} + diff --git a/src/main/java/org/onap/aai/dbgen/ForceDeleteTool.java b/src/main/java/org/onap/aai/dbgen/ForceDeleteTool.java new file mode 100644 index 0000000..790bfa1 --- /dev/null +++ b/src/main/java/org/onap/aai/dbgen/ForceDeleteTool.java @@ -0,0 +1,875 @@ +/** + * ============LICENSE_START======================================================= + * org.onap.aai + * ================================================================================ + * Copyright © 2017-2018 AT&T Intellectual Property. All rights reserved. + * ================================================================================ + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============LICENSE_END========================================================= + */ +package org.onap.aai.dbgen; +import java.io.FileInputStream; +import java.io.FileNotFoundException; +import java.io.InputStream; +import java.util.ArrayList; +import java.util.Iterator; +import java.util.Properties; +import java.util.Scanner; +import java.util.UUID; + +import org.apache.commons.configuration.ConfigurationException; +import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal; +import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.__; +import org.apache.tinkerpop.gremlin.structure.Direction; +import org.apache.tinkerpop.gremlin.structure.Edge; +import org.apache.tinkerpop.gremlin.structure.Vertex; +import org.apache.tinkerpop.gremlin.structure.VertexProperty; +import org.onap.aai.dbmap.AAIGraphConfig; +import org.onap.aai.dbmap.AAIGraph; +import org.onap.aai.exceptions.AAIException; +import org.onap.aai.logging.LogFormatTools; +import org.onap.aai.logging.LoggingContext; +import org.onap.aai.logging.LoggingContext.StatusCode; +import org.onap.aai.edges.enums.AAIDirection; +import org.onap.aai.edges.enums.EdgeProperty; +import org.onap.aai.util.AAIConfig; +import org.onap.aai.util.AAIConstants; +import org.slf4j.MDC; + +import com.att.eelf.configuration.Configuration; +import com.att.eelf.configuration.EELFLogger; +import com.att.eelf.configuration.EELFManager; +import org.janusgraph.core.JanusGraphFactory; +import org.janusgraph.core.JanusGraph; + + + +public class ForceDeleteTool { + private static final String FROMAPPID = "AAI-DB"; + private static final String TRANSID = UUID.randomUUID().toString(); + + private static String graphType = "realdb"; + + public static boolean SHOULD_EXIT_VM = true; + + public static int EXIT_VM_STATUS_CODE = -1; + + public static void exit(int statusCode){ + if(SHOULD_EXIT_VM){ + System.exit(1); + } + EXIT_VM_STATUS_CODE = statusCode; + } + + /* + * The main method. + * + * @param args the arguments + */ + public static void main(String[] args) { + + //SWGK 01/21/2016 - To suppress the warning message when the tool is run from the Terminal. + + System.setProperty("aai.service.name", ForceDelete.class.getSimpleName()); + // Set the logging file properties to be used by EELFManager + Properties props = System.getProperties(); + props.setProperty(Configuration.PROPERTY_LOGGING_FILE_NAME, AAIConstants.AAI_FORCE_DELETE_LOGBACK_PROPS); + props.setProperty(Configuration.PROPERTY_LOGGING_FILE_PATH, AAIConstants.AAI_HOME_BUNDLECONFIG); + EELFLogger logger = EELFManager.getInstance().getLogger(ForceDeleteTool.class.getSimpleName()); + MDC.put("logFilenameAppender", ForceDeleteTool.class.getSimpleName()); + + LoggingContext.init(); + LoggingContext.partnerName(FROMAPPID); + LoggingContext.serviceName(AAIConstants.AAI_RESOURCES_MS); + LoggingContext.component("forceDeleteTool"); + LoggingContext.targetEntity(AAIConstants.AAI_RESOURCES_MS); + LoggingContext.targetServiceName("main"); + LoggingContext.requestId(TRANSID); + LoggingContext.statusCode(StatusCode.COMPLETE); + LoggingContext.responseCode(LoggingContext.SUCCESS); + + String actionVal = ""; + String userIdVal = ""; + String dataString = ""; + Boolean displayAllVidsFlag = false; // Note - This should rarely be needed + Boolean overRideProtection = false; // This should rarely be used - it overrides all our new checking + long vertexIdLong = 0; + String edgeIdStr = ""; + String argStr4Msg = ""; + + if (args != null && args.length > 0) { + // They passed some arguments in that will affect processing + for (int i = 0; i < args.length; i++) { + String thisArg = args[i]; + argStr4Msg = argStr4Msg + " " + thisArg; + + if (thisArg.equals("-action")) { + i++; + if (i >= args.length) { + LoggingContext.statusCode(StatusCode.ERROR); + LoggingContext.responseCode(LoggingContext.BUSINESS_PROCESS_ERROR); + logger.error(" No value passed with -action option. "); + exit(0); + } + actionVal = args[i]; + argStr4Msg = argStr4Msg + " " + actionVal; + } + else if (thisArg.equals("-userId")) { + i++; + if (i >= args.length) { + LoggingContext.statusCode(StatusCode.ERROR); + LoggingContext.responseCode(LoggingContext.BUSINESS_PROCESS_ERROR); + logger.error(" No value passed with -userId option. "); + exit(0); + } + userIdVal = args[i]; + argStr4Msg = argStr4Msg + " " + userIdVal; + } + else if (thisArg.equals("-overRideProtection")) { + overRideProtection = true; + } + else if (thisArg.equals("-DISPLAY_ALL_VIDS")) { + displayAllVidsFlag = true; + } + else if (thisArg.equals("-vertexId")) { + i++; + if (i >= args.length) { + LoggingContext.statusCode(StatusCode.ERROR); + LoggingContext.responseCode(LoggingContext.BUSINESS_PROCESS_ERROR); + logger.error(" No value passed with -vertexId option. "); + exit(0); + } + String nextArg = args[i]; + argStr4Msg = argStr4Msg + " " + nextArg; + try { + vertexIdLong = Long.parseLong(nextArg); + } catch (Exception e) { + LoggingContext.statusCode(StatusCode.ERROR); + LoggingContext.responseCode(LoggingContext.BUSINESS_PROCESS_ERROR); + logger.error("Bad value passed with -vertexId option: [" + + nextArg + "]"); + exit(0); + } + } + else if (thisArg.equals("-params4Collect")) { + i++; + if (i >= args.length) { + LoggingContext.statusCode(StatusCode.ERROR); + LoggingContext.responseCode(LoggingContext.BUSINESS_PROCESS_ERROR); + logger.error(" No value passed with -params4Collect option. "); + exit(0); + } + dataString = args[i]; + argStr4Msg = argStr4Msg + " " + dataString; + } + else if (thisArg.equals("-edgeId")) { + i++; + if (i >= args.length) { + LoggingContext.statusCode(StatusCode.ERROR); + LoggingContext.responseCode(LoggingContext.BUSINESS_PROCESS_ERROR); + logger.error(" No value passed with -edgeId option. "); + exit(0); + } + String nextArg = args[i]; + argStr4Msg = argStr4Msg + " " + nextArg; + edgeIdStr = nextArg; + } + else { + LoggingContext.statusCode(StatusCode.ERROR); + LoggingContext.responseCode(LoggingContext.BUSINESS_PROCESS_ERROR); + logger.error(" Unrecognized argument passed to ForceDeleteTool: [" + + thisArg + "]. "); + logger.error(" Valid values are: -action -userId -vertexId -edgeId -overRideProtection -params4Collect -DISPLAY_ALL_VIDS"); + exit(0); + } + } + } + + if( !actionVal.equals("COLLECT_DATA") && !actionVal.equals("DELETE_NODE") && !actionVal.equals("DELETE_EDGE")){ + String emsg = "Bad action parameter [" + actionVal + "] passed to ForceDeleteTool(). Valid values = COLLECT_DATA or DELETE_NODE or DELETE_EDGE\n"; + System.out.println(emsg); + LoggingContext.statusCode(StatusCode.ERROR); + LoggingContext.responseCode(LoggingContext.BUSINESS_PROCESS_ERROR); + logger.error(emsg); + exit(0); + } + + if( actionVal.equals("DELETE_NODE") && vertexIdLong == 0 ){ + String emsg = "ERROR: No vertex ID passed on DELETE_NODE request. \n"; + System.out.println(emsg); + LoggingContext.statusCode(StatusCode.ERROR); + LoggingContext.responseCode(LoggingContext.BUSINESS_PROCESS_ERROR); + logger.error(emsg); + exit(0); + } + else if( actionVal.equals("DELETE_EDGE") && edgeIdStr.equals("")){ + String emsg = "ERROR: No edge ID passed on DELETE_EDGE request. \n"; + System.out.println(emsg); + LoggingContext.statusCode(StatusCode.ERROR); + LoggingContext.responseCode(LoggingContext.BUSINESS_PROCESS_ERROR); + logger.error(emsg); + exit(0); + } + + + userIdVal = userIdVal.trim(); + if( (userIdVal.length() < 6) || userIdVal.toUpperCase().equals("AAIADMIN") ){ + String emsg = "Bad userId parameter [" + userIdVal + "] passed to ForceDeleteTool(). must be not empty and not aaiadmin \n"; + System.out.println(emsg); + LoggingContext.statusCode(StatusCode.ERROR); + LoggingContext.responseCode(LoggingContext.BUSINESS_PROCESS_ERROR); + logger.error(emsg); + exit(0); + } + + String msg = ""; + JanusGraph graph = null; + try { + AAIConfig.init(); + System.out.println(" ---- NOTE --- about to open graph (takes a little while)--------\n"); + graph = setupGraph(logger); + if( graph == null ){ + String emsg = "could not get graph object in ForceDeleteTool() \n"; + System.out.println(emsg); + LoggingContext.statusCode(StatusCode.ERROR); + LoggingContext.responseCode(LoggingContext.AVAILABILITY_TIMEOUT_ERROR); + logger.error(emsg); + exit(0); + } + } + catch (AAIException e1) { + msg = e1.getErrorObject().toString(); + System.out.println(msg); + LoggingContext.statusCode(StatusCode.ERROR); + LoggingContext.responseCode(LoggingContext.UNKNOWN_ERROR); + logger.error(msg); + exit(0); + } + catch (Exception e2) { + msg = e2.toString(); + System.out.println(msg); + LoggingContext.statusCode(StatusCode.ERROR); + LoggingContext.responseCode(LoggingContext.UNKNOWN_ERROR); + logger.error(msg); + exit(0); + } + + msg = "ForceDelete called by: userId [" + userIdVal + "] with these params: [" + argStr4Msg + "]"; + System.out.println(msg); + logger.info(msg); + + ForceDelete fd = new ForceDelete(graph); + if( actionVal.equals("COLLECT_DATA") ){ + // When doing COLLECT_DATA, we expect them to either pass the vertexId or + // that the dataString string to be comma separated name value pairs like this: + // "propName1|propVal1,propName2|propVal2" etc. We will look for a node or nodes + // that have properties that ALL match what was passed in. + GraphTraversal<Vertex, Vertex> g = null; + String qStringForMsg = ""; + int resCount = 0; + if( vertexIdLong > 0 ){ + // They know which vertex they want to look at + qStringForMsg = "graph.vertices(" + vertexIdLong + ")"; + Iterator <Vertex> vtxItr = graph.vertices( vertexIdLong ); + if( vtxItr != null && vtxItr.hasNext() ) { + Vertex vtx = vtxItr.next(); + fd.showNodeInfo( logger, vtx, displayAllVidsFlag ); + resCount++; + } + } + else { + // we need to find the node or nodes based on the dataString + int firstPipeLoc = dataString.indexOf("|"); + if( firstPipeLoc <= 0 ){ + msg = "Must use the -params4Collect option when collecting data with data string in a format like: 'propName1|propVal1,propName2|propVal2'"; + System.out.println(msg); + LoggingContext.statusCode(StatusCode.ERROR); + LoggingContext.responseCode(LoggingContext.BUSINESS_PROCESS_ERROR); + logger.error(msg); + exit(0); + } + g = graph.traversal().V(); + qStringForMsg = " graph.traversal().V()"; + // Note - if they're only passing one parameter, there won't be any commas + String [] paramArr = dataString.split(","); + for( int i = 0; i < paramArr.length; i++ ){ + int pipeLoc = paramArr[i].indexOf("|"); + if( pipeLoc <= 0 ){ + msg = "Must use the -params4Collect option when collecting data with data string in a format like: 'propName1|propVal1,propName2|propVal2'"; + System.out.println(msg); + LoggingContext.statusCode(StatusCode.ERROR); + LoggingContext.responseCode(LoggingContext.BUSINESS_PROCESS_ERROR); + logger.error(msg); + exit(0); + } + else { + String propName = paramArr[i].substring(0,pipeLoc); + String propVal = paramArr[i].substring(pipeLoc + 1); + g = g.has(propName,propVal); + qStringForMsg = qStringForMsg + ".has(" + propName + "," + propVal + ")"; + } + } + + if( (g != null)){ + Iterator<Vertex> vertItor = g; + while( vertItor.hasNext() ){ + resCount++; + Vertex v = vertItor.next(); + fd.showNodeInfo( logger, v, displayAllVidsFlag ); + int descendantCount = fd.countDescendants( logger, v, 0 ); + String infMsg = " Found " + descendantCount + " descendant nodes \n"; + System.out.println( infMsg ); + logger.info( infMsg ); + } + } + else { + msg = "Bad JanusGraphQuery object. "; + System.out.println(msg); + LoggingContext.statusCode(StatusCode.ERROR); + LoggingContext.responseCode(LoggingContext.DATA_ERROR); + logger.error(msg); + exit(0); + } + } + + String infMsg = "\n\n Found: " + resCount + " nodes for this query: [" + qStringForMsg + "]\n"; + System.out.println( infMsg ); + logger.info( infMsg ); + } + else if( actionVal.equals("DELETE_NODE") ){ + Iterator <Vertex> vtxItr = graph.vertices( vertexIdLong ); + if( vtxItr != null && vtxItr.hasNext() ) { + Vertex vtx = vtxItr.next(); + fd.showNodeInfo( logger, vtx, displayAllVidsFlag ); + int descendantCount = fd.countDescendants( logger, vtx, 0 ); + String infMsg = " Found " + descendantCount + " descendant nodes. Note - forceDelete does not cascade to " + + " child nodes, but they may become unreachable after the delete. \n"; + System.out.println( infMsg ); + logger.info( infMsg ); + + int edgeCount = fd.countEdges( logger, vtx ); + + infMsg = " Found total of " + edgeCount + " edges incident on this node. \n"; + System.out.println( infMsg ); + logger.info( infMsg ); + + if( fd.getNodeDelConfirmation(logger, userIdVal, vtx, descendantCount, edgeCount, overRideProtection) ){ + vtx.remove(); + graph.tx().commit(); + infMsg = ">>>>>>>>>> Removed node with vertexId = " + vertexIdLong; + logger.info( infMsg ); + System.out.println(infMsg); + } + else { + infMsg = " Delete Cancelled. "; + System.out.println(infMsg); + logger.info( infMsg ); + } + } + else { + String infMsg = ">>>>>>>>>> Vertex with vertexId = " + vertexIdLong + " not found."; + System.out.println( infMsg ); + logger.info( infMsg ); + } + } + else if( actionVal.equals("DELETE_EDGE") ){ + Edge thisEdge = null; + Iterator <Edge> edItr = graph.edges( edgeIdStr ); + if( edItr != null && edItr.hasNext() ) { + thisEdge = edItr.next(); + } + + if( thisEdge == null ){ + String infMsg = ">>>>>>>>>> Edge with edgeId = " + edgeIdStr + " not found."; + logger.info( infMsg ); + System.out.println(infMsg); + exit(0); + } + + if( fd.getEdgeDelConfirmation(logger, userIdVal, thisEdge, overRideProtection) ){ + thisEdge.remove(); + graph.tx().commit(); + String infMsg = ">>>>>>>>>> Removed edge with edgeId = " + edgeIdStr; + logger.info( infMsg ); + System.out.println(infMsg); + } + else { + String infMsg = " Delete Cancelled. "; + System.out.println(infMsg); + logger.info( infMsg ); + } + exit(0); + } + else { + String emsg = "Unknown action parameter [" + actionVal + "] passed to ForceDeleteTool(). Valid values = COLLECT_DATA, DELETE_NODE or DELETE_EDGE \n"; + System.out.println(emsg); + logger.info( emsg ); + exit(0); + } + + closeGraph(graph, logger); + exit(0); + + }// end of main() + + public static class ForceDelete { + + private final int MAXDESCENDENTDEPTH = 15; + private final JanusGraph graph; + public ForceDelete(JanusGraph graph) { + this.graph = graph; + } + public void showNodeInfo(EELFLogger logger, Vertex tVert, Boolean displayAllVidsFlag ){ + + try { + Iterator<VertexProperty<Object>> pI = tVert.properties(); + String infStr = ">>> Found Vertex with VertexId = " + tVert.id() + ", properties: "; + System.out.println( infStr ); + logger.info(infStr); + while( pI.hasNext() ){ + VertexProperty<Object> tp = pI.next(); + infStr = " [" + tp.key() + "|" + tp.value() + "] "; + System.out.println( infStr ); + logger.info(infStr); + } + + ArrayList <String> retArr = collectEdgeInfoForNode( logger, tVert, displayAllVidsFlag ); + for( String infoStr : retArr ){ + System.out.println( infoStr ); + logger.info(infoStr); + } + } + catch (Exception e){ + String warnMsg = " -- Error -- trying to display edge info. [" + e.getMessage() + "]"; + System.out.println( warnMsg ); + LoggingContext.statusCode(StatusCode.ERROR); + LoggingContext.responseCode(LoggingContext.DATA_ERROR); + logger.warn(warnMsg); + LoggingContext.successStatusFields(); + } + + }// End of showNodeInfo() + + + public void showPropertiesForEdge( EELFLogger logger, Edge tEd ){ + String infMsg = ""; + if( tEd == null ){ + infMsg = "null Edge object passed to showPropertiesForEdge()"; + System.out.print(infMsg); + logger.info(infMsg); + return; + } + + // Try to show the edge properties + try { + infMsg =" Label for this Edge = [" + tEd.label() + "] "; + System.out.print(infMsg); + logger.info(infMsg); + + infMsg =" EDGE Properties for edgeId = " + tEd.id() + ": "; + System.out.print(infMsg); + logger.info(infMsg); + Iterator <String> pI = tEd.keys().iterator(); + while( pI.hasNext() ){ + String propKey = pI.next(); + infMsg = "Prop: [" + propKey + "], val = [" + + tEd.property(propKey) + "] "; + System.out.print(infMsg); + logger.info(infMsg); + } + } + catch( Exception ex ){ + infMsg = " Could not retrieve properties for this edge. exMsg = [" + + ex.getMessage() + "] "; + System.out.println( infMsg ); + logger.info(infMsg); + } + + // Try to show what's connected to the IN side of this Edge + try { + infMsg = " Looking for the Vertex on the IN side of the edge: "; + System.out.print(infMsg); + logger.info(infMsg); + Vertex inVtx = tEd.inVertex(); + Iterator<VertexProperty<Object>> pI = inVtx.properties(); + String infStr = ">>> Found Vertex with VertexId = " + inVtx.id() + + ", properties: "; + System.out.println( infStr ); + logger.info(infStr); + while( pI.hasNext() ){ + VertexProperty<Object> tp = pI.next(); + infStr = " [" + tp.key() + "|" + tp.value() + "] "; + System.out.println( infStr ); + logger.info(infStr); + } + } + catch( Exception ex ){ + infMsg = " Could not retrieve vertex data for the IN side of " + + "the edge. exMsg = [" + ex.getMessage() + "] "; + System.out.println( infMsg ); + logger.info(infMsg); + } + + // Try to show what's connected to the OUT side of this Edge + try { + infMsg = " Looking for the Vertex on the OUT side of the edge: "; + System.out.print(infMsg); + logger.info(infMsg); + Vertex outVtx = tEd.outVertex(); + Iterator<VertexProperty<Object>> pI = outVtx.properties(); + String infStr = ">>> Found Vertex with VertexId = " + outVtx.id() + + ", properties: "; + System.out.println( infStr ); + logger.info(infStr); + while( pI.hasNext() ){ + VertexProperty<Object> tp = pI.next(); + infStr = " [" + tp.key() + "|" + tp.value() + "] "; + System.out.println( infStr ); + logger.info(infStr); + } + } + catch( Exception ex ){ + infMsg = " Could not retrieve vertex data for the OUT side of " + + "the edge. exMsg = [" + ex.getMessage() + "] "; + System.out.println( infMsg ); + logger.info(infMsg); + } + + }// end showPropertiesForEdge() + + + + public ArrayList <String> collectEdgeInfoForNode( EELFLogger logger, Vertex tVert, boolean displayAllVidsFlag ){ + ArrayList <String> retArr = new ArrayList <String> (); + Direction dir = Direction.OUT; + for ( int i = 0; i <= 1; i++ ){ + if( i == 1 ){ + // Second time through we'll look at the IN edges. + dir = Direction.IN; + } + Iterator <Edge> eI = tVert.edges(dir); + if( ! eI.hasNext() ){ + retArr.add("No " + dir + " edges were found for this vertex. "); + } + while( eI.hasNext() ){ + Edge ed = eI.next(); + String edId = ed.id().toString(); + String lab = ed.label(); + Vertex vtx = null; + if( dir == Direction.OUT ){ + // get the vtx on the "other" side + vtx = ed.inVertex(); + } + else { + // get the vtx on the "other" side + vtx = ed.outVertex(); + } + if( vtx == null ){ + retArr.add(" >>> COULD NOT FIND VERTEX on the other side of this edge edgeId = " + ed.id() + " <<< "); + } + else { + String nType = vtx.<String>property("aai-node-type").orElse(null); + if( displayAllVidsFlag ){ + // This should rarely be needed + String vid = vtx.id().toString(); + retArr.add("Found an " + dir + " edge (" + lab + ") with EDGE-ID = " + edId + + ", between this vertex and a [" + nType + "] node with VtxId = " + vid ); + } + else { + // This is the normal case + retArr.add("Found an " + dir + " edge (" + lab + ") between this vertex and a [" + nType + "] node. "); + } + } + } + } + return retArr; + + }// end of collectEdgeInfoForNode() + + + public int countEdges( EELFLogger logger, Vertex vtx ){ + int edgeCount = 0; + try { + Iterator<Edge> edgesItr = vtx.edges(Direction.BOTH); + while( edgesItr.hasNext() ){ + edgesItr.next(); + edgeCount++; + } + } + catch (Exception e) { + String wMsg = "-- ERROR -- Stopping the counting of edges because of Exception [" + e.getMessage() + "]"; + System.out.println( wMsg ); + LoggingContext.statusCode(StatusCode.ERROR); + LoggingContext.responseCode(LoggingContext.DATA_ERROR); + logger.warn( wMsg ); + LoggingContext.successStatusFields(); + } + return edgeCount; + + }// end of countEdges() + + + public int countDescendants(EELFLogger logger, Vertex vtx, int levelVal ){ + int totalCount = 0; + int thisLevel = levelVal + 1; + + if( thisLevel > MAXDESCENDENTDEPTH ){ + String wMsg = "Warning -- Stopping the counting of descendents because we reached the max depth of " + MAXDESCENDENTDEPTH; + System.out.println( wMsg ); + LoggingContext.statusCode(StatusCode.ERROR); + LoggingContext.responseCode(LoggingContext.DATA_ERROR); + logger.warn( wMsg ); + return totalCount; + } + + try { + Iterator <Vertex> vertI = graph.traversal().V(vtx).union(__.outE().has(EdgeProperty.CONTAINS.toString(), AAIDirection.OUT.toString()).inV(), __.inE().has(EdgeProperty.CONTAINS.toString(), AAIDirection.IN.toString()).outV()); + while( vertI != null && vertI.hasNext() ){ + totalCount++; + Vertex childVtx = vertI.next(); + totalCount = totalCount + countDescendants( logger, childVtx, thisLevel ); + } + } + catch (Exception e) { + String wMsg = "Error -- Stopping the counting of descendents because of Exception [" + e.getMessage() + "]"; + System.out.println( wMsg ); + LoggingContext.statusCode(StatusCode.ERROR); + LoggingContext.responseCode(LoggingContext.DATA_ERROR); + logger.warn( wMsg ); + LoggingContext.successStatusFields(); + + } + + return totalCount; + }// end of countDescendants() + + + public boolean getEdgeDelConfirmation( EELFLogger logger, String uid, Edge ed, + Boolean overRideProtection ) { + + showPropertiesForEdge( logger, ed ); + System.out.print("\n Are you sure you want to delete this EDGE? (y/n): "); + Scanner s = new Scanner(System.in); + s.useDelimiter(""); + String confirm = s.next(); + s.close(); + + if (!confirm.equalsIgnoreCase("y")) { + String infMsg = " User [" + uid + "] has chosen to abandon this delete request. "; + System.out.println("\n" + infMsg); + logger.info(infMsg); + return false; + } + else { + String infMsg = " User [" + uid + "] has confirmed this delete request. "; + System.out.println("\n" + infMsg); + logger.info(infMsg); + return true; + } + + } // End of getEdgeDelConfirmation() + + + public boolean getNodeDelConfirmation( EELFLogger logger, String uid, Vertex vtx, int edgeCount, + int descendantCount, Boolean overRideProtection ) { + String thisNodeType = ""; + try { + thisNodeType = vtx.<String>property("aai-node-type").orElse(null); + } + catch ( Exception nfe ){ + // Let the user know something is going on - but they can confirm the delete if they want to. + String infMsg = " -- WARNING -- could not get an aai-node-type for this vertex. -- WARNING -- "; + System.out.println( infMsg ); + LoggingContext.statusCode(StatusCode.ERROR); + LoggingContext.responseCode(LoggingContext.DATA_ERROR); + logger.warn( infMsg ); + LoggingContext.successStatusFields(); + } + + String ntListString = ""; + String maxDescString = ""; + String maxEdgeString = ""; + + int maxDescCount = 10; // default value + int maxEdgeCount = 10; // default value + ArrayList <String> protectedNTypes = new ArrayList <String> (); + protectedNTypes.add("cloud-region"); // default value + + try { + ntListString = AAIConfig.get("aai.forceDel.protected.nt.list"); + maxDescString = AAIConfig.get("aai.forceDel.protected.descendant.count"); + maxEdgeString = AAIConfig.get("aai.forceDel.protected.edge.count"); + } + catch ( Exception nfe ){ + // Don't worry, we will use default values + String infMsg = "-- WARNING -- could not get aai.forceDel.protected values from aaiconfig.properties -- will use default values. "; + System.out.println( infMsg ); + LoggingContext.statusCode(StatusCode.ERROR); + LoggingContext.responseCode(LoggingContext.DATA_ERROR); + logger.warn( infMsg ); + LoggingContext.successStatusFields(); + } + + if( maxDescString != null && !maxDescString.equals("") ){ + try { + maxDescCount = Integer.parseInt(maxDescString); + } + catch ( Exception nfe ){ + // Don't worry, we will leave "maxDescCount" set to the default value + } + } + + if( maxEdgeString != null && !maxEdgeString.equals("") ){ + try { + maxEdgeCount = Integer.parseInt(maxEdgeString); + } + catch ( Exception nfe ){ + // Don't worry, we will leave "maxEdgeCount" set to the default value + } + } + + if( ntListString != null && !ntListString.trim().equals("") ){ + String [] nodeTypes = ntListString.split("\\|"); + for( int i = 0; i < nodeTypes.length; i++ ){ + protectedNTypes.add(nodeTypes[i]); + } + } + + boolean giveProtOverRideMsg = false; + boolean giveProtErrorMsg = false; + if( descendantCount > maxDescCount ){ + // They are trying to delete a node with a lots of descendants + String infMsg = " >> WARNING >> This node has more descendant edges than the max ProtectedDescendantCount: " + edgeCount + ". Max = " + + maxEdgeCount + ". It can be DANGEROUS to delete one of these. << WARNING << "; + System.out.println(infMsg); + logger.info(infMsg); + if( ! overRideProtection ){ + // They cannot delete this kind of node without using the override option + giveProtErrorMsg = true; + } + else { + giveProtOverRideMsg = true; + } + } + + if( edgeCount > maxEdgeCount ){ + // They are trying to delete a node with a lot of edges + String infMsg = " >> WARNING >> This node has more edges than the max ProtectedEdgeCount: " + edgeCount + ". Max = " + + maxEdgeCount + ". It can be DANGEROUS to delete one of these. << WARNING << "; + System.out.println(infMsg); + logger.info(infMsg); + if( ! overRideProtection ){ + // They cannot delete this kind of node without using the override option + giveProtErrorMsg = true; + } + else { + giveProtOverRideMsg = true; + } + } + + if( thisNodeType != null && !thisNodeType.equals("") && protectedNTypes.contains(thisNodeType) ){ + // They are trying to delete a protected Node Type + String infMsg = " >> WARNING >> This node is a PROTECTED NODE-TYPE (" + thisNodeType + "). " + + " It can be DANGEROUS to delete one of these. << WARNING << "; + System.out.println(infMsg); + logger.info(infMsg); + if( ! overRideProtection ){ + // They cannot delete this kind of node without using the override option + giveProtErrorMsg = true; + } + else { + giveProtOverRideMsg = true; + } + } + + if( giveProtOverRideMsg ){ + String infMsg = " !!>> WARNING >>!! you are using the overRideProtection parameter which will let you do this potentially dangerous delete."; + System.out.println("\n" + infMsg); + logger.info(infMsg); + } + else if( giveProtErrorMsg ) { + String errMsg = " ERROR >> this kind of node can only be deleted if you pass the overRideProtection parameter."; + System.out.println("\n" + errMsg); + LoggingContext.statusCode(StatusCode.ERROR); + LoggingContext.responseCode(LoggingContext.BUSINESS_PROCESS_ERROR); + logger.error(errMsg); + LoggingContext.successStatusFields(); + return false; + } + + System.out.print("\n Are you sure you want to do this delete? (y/n): "); + Scanner s = new Scanner(System.in); + s.useDelimiter(""); + String confirm = s.next(); + s.close(); + + if (!confirm.equalsIgnoreCase("y")) { + String infMsg = " User [" + uid + "] has chosen to abandon this delete request. "; + System.out.println("\n" + infMsg); + logger.info(infMsg); + return false; + } + else { + String infMsg = " User [" + uid + "] has confirmed this delete request. "; + System.out.println("\n" + infMsg); + logger.info(infMsg); + return true; + } + + } // End of getNodeDelConfirmation() + } + + public static JanusGraph setupGraph(EELFLogger logger){ + + JanusGraph janusGraph = null; + + try (InputStream inputStream = new FileInputStream(AAIConstants.REALTIME_DB_CONFIG);){ + + Properties properties = new Properties(); + properties.load(inputStream); + + if("inmemory".equals(properties.get("storage.backend"))){ + janusGraph = AAIGraph.getInstance().getGraph(); + graphType = "inmemory"; + } else { + janusGraph = JanusGraphFactory.open( + new AAIGraphConfig.Builder(AAIConstants.REALTIME_DB_CONFIG) + .forService(ForceDeleteTool.class.getSimpleName()) + .withGraphType("realtime1") + .buildConfiguration() + ); + } + } catch (Exception e) { + logger.error("Unable to open the graph", LogFormatTools.getStackTop(e)); + } + + return janusGraph; + } + + public static void closeGraph(JanusGraph graph, EELFLogger logger){ + + try { + if("inmemory".equals(graphType)) { + return; + } + if( graph != null && graph.isOpen() ){ + graph.tx().close(); + graph.close(); + } + } catch (Exception ex) { + // Don't throw anything because JanusGraph sometimes is just saying that the graph is already closed{ + logger.warn("WARNING from final graph.shutdown()", ex); + } + } +} + diff --git a/src/main/java/org/onap/aai/dbgen/GraphMLTokens.java b/src/main/java/org/onap/aai/dbgen/GraphMLTokens.java new file mode 100644 index 0000000..d43b57f --- /dev/null +++ b/src/main/java/org/onap/aai/dbgen/GraphMLTokens.java @@ -0,0 +1,56 @@ +/** + * ============LICENSE_START======================================================= + * org.onap.aai + * ================================================================================ + * Copyright © 2017-2018 AT&T Intellectual Property. All rights reserved. + * ================================================================================ + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============LICENSE_END========================================================= + */ +package org.onap.aai.dbgen; + +/** + * A collection of tokens used for GraphML related data. + */ +public class GraphMLTokens { + public static final String GRAPHML = "graphml"; + public static final String XMLNS = "xmlns"; + public static final String GRAPHML_XMLNS = "http://graphml.graphdrawing.org/xmlns"; + public static final String G = "G"; + public static final String EDGEDEFAULT = "edgedefault"; + public static final String DIRECTED = "directed"; + public static final String KEY = "key"; + public static final String FOR = "for"; + public static final String ID = "id"; + public static final String ATTR_NAME = "attr.name"; + public static final String ATTR_TYPE = "attr.type"; + public static final String GRAPH = "graph"; + public static final String NODE = "node"; + public static final String EDGE = "edge"; + public static final String SOURCE = "source"; + public static final String TARGET = "target"; + public static final String DATA = "data"; + public static final String LABEL = "label"; + public static final String STRING = "string"; + public static final String FLOAT = "float"; + public static final String DOUBLE = "double"; + public static final String LONG = "long"; + public static final String BOOLEAN = "boolean"; + public static final String INT = "int"; + public static final String ARRAY = "array"; + public static final String SET = "set"; + public static final String LIST = "list"; + public static final String ITEM = "item"; + public static final String _DEFAULT = "_default"; + +} diff --git a/src/main/java/org/onap/aai/dbgen/schemamod/SchemaMod.java b/src/main/java/org/onap/aai/dbgen/schemamod/SchemaMod.java new file mode 100644 index 0000000..c0f8ee9 --- /dev/null +++ b/src/main/java/org/onap/aai/dbgen/schemamod/SchemaMod.java @@ -0,0 +1,177 @@ +/** + * ============LICENSE_START======================================================= + * org.onap.aai + * ================================================================================ + * Copyright © 2017-2018 AT&T Intellectual Property. All rights reserved. + * ================================================================================ + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============LICENSE_END========================================================= + */ +package org.onap.aai.dbgen.schemamod; + +import java.util.Properties; + +import org.onap.aai.dbmap.DBConnectionType; +import org.onap.aai.introspection.Loader; +import org.onap.aai.introspection.LoaderFactory; +import org.onap.aai.introspection.ModelType; +import org.onap.aai.setup.SchemaVersions; +import org.onap.aai.setup.SchemaVersion; +import org.onap.aai.logging.ErrorLogHelper; +import org.onap.aai.serialization.engines.QueryStyle; +import org.onap.aai.serialization.engines.JanusGraphDBEngine; +import org.onap.aai.serialization.engines.TransactionalGraphEngine; +import org.onap.aai.util.AAIConfig; +import org.onap.aai.util.AAIConstants; +import org.onap.aai.util.UniquePropertyCheck; +import org.slf4j.MDC; + +import com.att.eelf.configuration.Configuration; +import com.att.eelf.configuration.EELFLogger; +import com.att.eelf.configuration.EELFManager; +import org.springframework.context.annotation.AnnotationConfigApplicationContext; + +public class SchemaMod { + + private final LoaderFactory loaderFactory; + + private final SchemaVersions schemaVersions; + + public SchemaMod(LoaderFactory loaderFactory, SchemaVersions schemaVersions){ + this.loaderFactory = loaderFactory; + this.schemaVersions = schemaVersions; + } + + public void execute(String[] args) { + + // Set the logging file properties to be used by EELFManager + Properties props = System.getProperties(); + props.setProperty(Configuration.PROPERTY_LOGGING_FILE_NAME, AAIConstants.AAI_SCHEMA_MOD_LOGBACK_PROPS); + props.setProperty(Configuration.PROPERTY_LOGGING_FILE_PATH, AAIConstants.AAI_HOME_BUNDLECONFIG); + + EELFLogger logger = EELFManager.getInstance().getLogger(UniquePropertyCheck.class.getSimpleName()); + MDC.put("logFilenameAppender", SchemaMod.class.getSimpleName()); + + // NOTE -- We're just working with properties that are used for NODES + // for now. + String propName = ""; + String targetDataType = ""; + String targetIndexInfo = ""; + String preserveDataFlag = ""; + + String usageString = "Usage: SchemaMod propertyName targetDataType targetIndexInfo preserveDataFlag \n"; + if (args.length != 4) { + String emsg = "Four Parameters are required. \n" + usageString; + logAndPrint(logger, emsg); + System.exit(1); + } else { + propName = args[0]; + targetDataType = args[1]; + targetIndexInfo = args[2]; + preserveDataFlag = args[3]; + } + + if (propName.equals("")) { + String emsg = "Bad parameter - propertyName cannot be empty. \n" + usageString; + logAndPrint(logger, emsg); + System.exit(1); + } else if (!targetDataType.equals("String") && !targetDataType.equals("Set<String>") + && !targetDataType.equals("Integer") && !targetDataType.equals("Long") + && !targetDataType.equals("Boolean")) { + String emsg = "Unsupported targetDataType. We only support String, Set<String>, Integer, Long or Boolean for now.\n" + + usageString; + logAndPrint(logger, emsg); + System.exit(1); + } else if (!targetIndexInfo.equals("uniqueIndex") && !targetIndexInfo.equals("index") + && !targetIndexInfo.equals("noIndex")) { + String emsg = "Unsupported IndexInfo. We only support: 'uniqueIndex', 'index' or 'noIndex'.\n" + + usageString; + logAndPrint(logger, emsg); + System.exit(1); + } + + try { + AAIConfig.init(); + ErrorLogHelper.loadProperties(); + } catch (Exception ae) { + String emsg = "Problem with either AAIConfig.init() or ErrorLogHelper.LoadProperties(). "; + logAndPrint(logger, emsg + "[" + ae.getMessage() + "]"); + System.exit(1); + } + + // Give a big warning if the DbMaps.PropertyDataTypeMap value does not + // agree with what we're doing + String warningMsg = ""; + + if (!warningMsg.equals("")) { + logAndPrint(logger, "\n>>> WARNING <<<< "); + logAndPrint(logger, ">>> " + warningMsg + " <<<"); + } + + logAndPrint(logger, ">>> Processing will begin in 5 seconds (unless interrupted). <<<"); + try { + // Give them a chance to back out of this + Thread.sleep(5000); + } catch (java.lang.InterruptedException ie) { + logAndPrint(logger, " DB Schema Update has been aborted. "); + System.exit(1); + } + + logAndPrint(logger, " ---- NOTE --- about to open graph (takes a little while)\n"); + + SchemaVersion version = schemaVersions.getDefaultVersion(); + QueryStyle queryStyle = QueryStyle.TRAVERSAL; + ModelType introspectorFactoryType = ModelType.MOXY; + Loader loader = loaderFactory.createLoaderForVersion(introspectorFactoryType, version); + TransactionalGraphEngine engine = null; + try { + engine = new JanusGraphDBEngine(queryStyle, DBConnectionType.REALTIME, loader); + SchemaModInternal internal = new SchemaModInternal(engine, logger, propName, targetDataType, targetIndexInfo, new Boolean(preserveDataFlag)); + internal.execute(); + engine.startTransaction(); + engine.tx().close(); + logAndPrint(logger, "------ Completed the SchemaMod -------- "); + } catch (Exception e) { + String emsg = "Not able to complete the requested SchemaMod \n"; + logAndPrint(logger, e.getMessage()); + logAndPrint(logger, emsg); + System.exit(1); + } + } + /** + * Log and print. + * + * @param logger the logger + * @param msg the msg + */ + protected void logAndPrint(EELFLogger logger, String msg) { + System.out.println(msg); + logger.info(msg); + } + + public static void main(String[] args) { + + AnnotationConfigApplicationContext ctx = new AnnotationConfigApplicationContext( + "org.onap.aai.config", + "org.onap.aai.setup" + ); + + LoaderFactory loaderFactory = ctx.getBean(LoaderFactory.class); + SchemaVersions schemaVersions = ctx.getBean(SchemaVersions.class); + SchemaMod schemaMod = new SchemaMod(loaderFactory, schemaVersions); + schemaMod.execute(args); + + System.exit(0); + } + +}
\ No newline at end of file diff --git a/src/main/java/org/onap/aai/dbgen/schemamod/SchemaModInternal.java b/src/main/java/org/onap/aai/dbgen/schemamod/SchemaModInternal.java new file mode 100644 index 0000000..b5ce16b --- /dev/null +++ b/src/main/java/org/onap/aai/dbgen/schemamod/SchemaModInternal.java @@ -0,0 +1,317 @@ +/** + * ============LICENSE_START======================================================= + * org.onap.aai + * ================================================================================ + * Copyright © 2017-2018 AT&T Intellectual Property. All rights reserved. + * ================================================================================ + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============LICENSE_END========================================================= + */ +package org.onap.aai.dbgen.schemamod; + +import java.util.ArrayList; +import java.util.Iterator; +import java.util.UUID; + +import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal; +import org.apache.tinkerpop.gremlin.structure.Direction; +import org.apache.tinkerpop.gremlin.structure.Edge; +import org.apache.tinkerpop.gremlin.structure.Graph; +import org.apache.tinkerpop.gremlin.structure.Vertex; +import org.apache.tinkerpop.gremlin.structure.VertexProperty; +import org.onap.aai.serialization.engines.TransactionalGraphEngine; +import org.onap.aai.util.FormatDate; +import org.onap.aai.util.UniquePropertyCheck; + +import com.att.eelf.configuration.EELFLogger; +import org.janusgraph.core.Cardinality; +import org.janusgraph.core.PropertyKey; +import org.janusgraph.core.schema.JanusGraphManagement; + +public class SchemaModInternal { + private static final String FROMAPPID = "AAI-UTILS"; + private final String TRANSID = UUID.randomUUID().toString(); + private final TransactionalGraphEngine engine; + private final String propName; + private final Class<?> type; + private final String indexType; + private final boolean preserveData; + private final Cardinality cardinality; + private final EELFLogger logger; + + public SchemaModInternal(TransactionalGraphEngine engine, EELFLogger logger, String propName, String type, String indexType, boolean preserveData) { + this.engine = engine; + this.propName = propName; + this.type = determineClass(type); + this.indexType = indexType; + this.preserveData = preserveData; + this.cardinality = determineCardinality(type); + this.logger = logger; + } + + + private Class<?> determineClass(String type) { + final Class<?> result; + if (type.equals("String")) { + result = String.class; + } else if (type.equals("Set<String>")) { + result = String.class; + } else if (type.equals("Integer")) { + result = Integer.class; + } else if (type.equals("Boolean")) { + result = Boolean.class; + } else if (type.equals("Character")) { + result = Character.class; + } else if (type.equals("Long")) { + result = Long.class; + } else if (type.equals("Float")) { + result = Float.class; + } else if (type.equals("Double")) { + result = Double.class; + } else { + String emsg = "Not able translate the targetDataType [" + type + "] to a Class variable.\n"; + logAndPrint(logger, emsg); + throw new RuntimeException(emsg); + } + + return result; + } + private Cardinality determineCardinality(String type) { + if (type.equals("Set<String>")) { + return Cardinality.SET; + } else { + return Cardinality.SINGLE; + } + } + public void execute() { + JanusGraphManagement graphMgt = null; + boolean success = false; + try { + // Make sure this property is in the DB. + graphMgt = engine.asAdmin().getManagementSystem(); + if (graphMgt == null) { + String emsg = "Not able to get a graph Management object in SchemaMod.java\n"; + logAndPrint(logger, emsg); + System.exit(1); + } + PropertyKey origPropKey = graphMgt.getPropertyKey(propName); + if (origPropKey == null) { + String emsg = "The propName = [" + propName + "] is not defined in our graph. "; + logAndPrint(logger, emsg); + System.exit(1); + } + + if (indexType.equals("uniqueIndex")) { + // Make sure the data in the property being changed can have a + // unique-index put on it. + // Ie. if there are duplicate values, we will not be able to + // migrate the data back into the property. + + + Graph grTmp = engine.tx(); + if( grTmp == null ){ + grTmp = engine.startTransaction(); + } + // This is good to know in the logs + logAndPrint(logger, "-- Starting UniquePropertyCheck. (this may take a loooong time) --"); + + Boolean foundDupesFlag = UniquePropertyCheck.runTheCheckForUniqueness(TRANSID, FROMAPPID, + grTmp, propName, logger); + if (foundDupesFlag) { + logAndPrint(logger, + "\n\n!!!!!! >> Cannot add a uniqueIndex for the property: [" + propName + + "] because duplicate values were found. See the log for details on which" + + " nodes have this value. \nThey will need to be resolved (by updating those values to new" + + " values or deleting unneeded nodes) using the standard REST-API \n"); + System.exit(1); + } + logAndPrint(logger, "-- Finished UniquePropertyCheck. "); // This is good to know in the logs + } + + + // ---- If we made it to here - we must be OK with making this change + + // Rename this property to a backup name (old name with "retired_" + // appended plus a dateStr) + FormatDate fd = new FormatDate("MMddHHmm", "GMT"); + String dteStr= fd.getDateTime(); + + String retiredName = propName + "-" + dteStr + "-RETIRED"; + graphMgt.changeName(origPropKey, retiredName); + + // Create a new property using the original property name and the + // targetDataType + PropertyKey freshPropKey = graphMgt.makePropertyKey(propName).dataType(type) + .cardinality(cardinality).make(); + + // Create the appropriate index (if any) + if (indexType.equals("uniqueIndex")) { + String freshIndexName = propName + dteStr; + graphMgt.buildIndex(freshIndexName, Vertex.class).addKey(freshPropKey).unique().buildCompositeIndex(); + } else if (indexType.equals("index")) { + String freshIndexName = propName + dteStr; + graphMgt.buildIndex(freshIndexName, Vertex.class).addKey(freshPropKey).buildCompositeIndex(); + } + + logAndPrint(logger, "Committing schema changes with graphMgt.commit()"); + graphMgt.commit(); + engine.commit(); + Graph grTmp2 = engine.startTransaction(); + + + // For each node that has this property, update the new from the old + // and then remove the + // old property from that node + Iterator<Vertex> verts = grTmp2.traversal().V().has(retiredName); + int vtxCount = 0; + ArrayList<String> alreadySeenVals = new ArrayList<String>(); + while (verts.hasNext()) { + vtxCount++; + Vertex tmpVtx = verts.next(); + String tmpVid = tmpVtx.id().toString(); + Object origVal = tmpVtx.<Object> property(retiredName).orElse(null); + if (preserveData) { + tmpVtx.property(propName, origVal); + if (indexType.equals("uniqueIndex")) { + // We're working on a property that is being used as a + // unique index + String origValStr = ""; + if (origVal != null) { + origValStr = origVal.toString(); + } + if (alreadySeenVals.contains(origValStr)) { + // This property is supposed to be unique, but we've + // already seen this value in this loop + // This should have been caught up in the first part + // of SchemaMod, but since it wasn't, we + // will just log the problem. + logAndPrint(logger, + "\n\n ---------- ERROR - could not migrate the old data [" + origValStr + + "] for propertyName [" + propName + + "] because this property is having a unique index put on it."); + showPropertiesAndEdges(TRANSID, FROMAPPID, tmpVtx, logger); + logAndPrint(logger, "-----------------------------------\n"); + } else { + // Ok to add this prop in as a unique value + tmpVtx.property(propName, origVal); + logAndPrint(logger, + "INFO -- just did the add of the freshPropertyKey and updated it with the orig value (" + + origValStr + ")"); + } + alreadySeenVals.add(origValStr); + } else { + // We are not working with a unique index + tmpVtx.property(propName, origVal); + logAndPrint(logger, + "INFO -- just did the add of the freshPropertyKey and updated it with the orig value (" + + origVal.toString() + ")"); + } + } else { + // existing nodes just won't have that property anymore + // Not sure if we'd ever actually want to do this -- maybe + // we'd do this if the new + // data type was not compatible with the old? + } + tmpVtx.property(retiredName).remove(); + logAndPrint(logger, "INFO -- just did the remove of the " + retiredName + " from this vertex. (vid=" + + tmpVid + ")"); + } + + success = true; + } catch (Exception ex) { + logAndPrint(logger, "Threw a regular Exception: "); + logAndPrint(logger, ex.getMessage()); + } finally { + if (graphMgt != null && graphMgt.isOpen()) { + // Any changes that worked correctly should have already done + // their commits. + graphMgt.rollback(); + } + if (engine != null) { + if (success) { + engine.commit(); + } else { + engine.rollback(); + } + } + } + } + + /** + * Show properties and edges. + * + * @param transId the trans id + * @param fromAppId the from app id + * @param tVert the t vert + * @param logger the logger + */ + private static void showPropertiesAndEdges(String transId, String fromAppId, Vertex tVert, EELFLogger logger) { + + if (tVert == null) { + logAndPrint(logger, "Null node passed to showPropertiesAndEdges."); + } else { + String nodeType = ""; + Object ob = tVert.<String> property("aai-node-type"); + if (ob == null) { + nodeType = "null"; + } else { + nodeType = ob.toString(); + } + + logAndPrint(logger, " AAINodeType/VtxID for this Node = [" + nodeType + "/" + tVert.id() + "]"); + logAndPrint(logger, " Property Detail: "); + Iterator<VertexProperty<Object>> pI = tVert.properties(); + while (pI.hasNext()) { + VertexProperty<Object> tp = pI.next(); + Object val = tp.value(); + logAndPrint(logger, "Prop: [" + tp.key() + "], val = [" + val + "] "); + } + + Iterator<Edge> eI = tVert.edges(Direction.BOTH); + if (!eI.hasNext()) { + logAndPrint(logger, "No edges were found for this vertex. "); + } + while (eI.hasNext()) { + Edge ed = eI.next(); + String lab = ed.label(); + Vertex vtx; + if (tVert.equals(ed.inVertex())) { + vtx = ed.outVertex(); + } else { + vtx = ed.inVertex(); + } + if (vtx == null) { + logAndPrint(logger, + " >>> COULD NOT FIND VERTEX on the other side of this edge edgeId = " + ed.id() + " <<< "); + } else { + String nType = vtx.<String> property("aai-node-type").orElse(null); + String vid = vtx.id().toString(); + logAndPrint(logger, "Found an edge (" + lab + ") from this vertex to a [" + nType + + "] node with VtxId = " + vid); + } + } + } + } // End of showPropertiesAndEdges() + + /** + * Log and print. + * + * @param logger the logger + * @param msg the msg + */ + protected static void logAndPrint(EELFLogger logger, String msg) { + System.out.println(msg); + logger.info(msg); + } + +} diff --git a/src/main/java/org/onap/aai/dbgen/tags/Command.java b/src/main/java/org/onap/aai/dbgen/tags/Command.java new file mode 100644 index 0000000..ac553f9 --- /dev/null +++ b/src/main/java/org/onap/aai/dbgen/tags/Command.java @@ -0,0 +1,25 @@ +/** + * ============LICENSE_START======================================================= + * org.onap.aai + * ================================================================================ + * Copyright © 2017-2018 AT&T Intellectual Property. All rights reserved. + * ================================================================================ + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============LICENSE_END========================================================= + */ +package org.onap.aai.dbgen.tags; + +@FunctionalInterface +interface Command { + public abstract void execute ( ) throws Exception; +} diff --git a/src/main/java/org/onap/aai/interceptors/AAIContainerFilter.java b/src/main/java/org/onap/aai/interceptors/AAIContainerFilter.java new file mode 100644 index 0000000..6fb7356 --- /dev/null +++ b/src/main/java/org/onap/aai/interceptors/AAIContainerFilter.java @@ -0,0 +1,41 @@ +/** + * ============LICENSE_START======================================================= + * org.onap.aai + * ================================================================================ + * Copyright © 2017-2018 AT&T Intellectual Property. All rights reserved. + * ================================================================================ + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============LICENSE_END========================================================= + */ +package org.onap.aai.interceptors; + +import org.onap.aai.util.FormatDate; + +import java.util.UUID; + +public abstract class AAIContainerFilter { + + protected String genDate() { + FormatDate fd = new FormatDate("YYMMdd-HH:mm:ss:SSS"); + return fd.getDateTime(); + } + + protected boolean isValidUUID(String transId) { + try { + UUID.fromString(transId); + } catch (IllegalArgumentException e) { + return false; + } + return true; + } +} diff --git a/src/main/java/org/onap/aai/interceptors/AAIHeaderProperties.java b/src/main/java/org/onap/aai/interceptors/AAIHeaderProperties.java new file mode 100644 index 0000000..6801aee --- /dev/null +++ b/src/main/java/org/onap/aai/interceptors/AAIHeaderProperties.java @@ -0,0 +1,39 @@ +/** + * ============LICENSE_START======================================================= + * org.onap.aai + * ================================================================================ + * Copyright © 2017-2018 AT&T Intellectual Property. All rights reserved. + * ================================================================================ + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============LICENSE_END========================================================= + */ +package org.onap.aai.interceptors; + +public final class AAIHeaderProperties { + + private AAIHeaderProperties(){} + + public static final String REQUEST_CONTEXT = "aai-request-context"; + + public static final String HTTP_METHOD_OVERRIDE = "X-HTTP-Method-Override"; + + public static final String TRANSACTION_ID = "X-TransactionId"; + + public static final String FROM_APP_ID = "X-FromAppId"; + + public static final String AAI_TX_ID = "X-AAI-TXID"; + + public static final String AAI_REQUEST = "X-REQUEST"; + + public static final String AAI_REQUEST_TS = "X-REQUEST-TS"; +} diff --git a/src/main/java/org/onap/aai/interceptors/package-info.java b/src/main/java/org/onap/aai/interceptors/package-info.java new file mode 100644 index 0000000..ee9c334 --- /dev/null +++ b/src/main/java/org/onap/aai/interceptors/package-info.java @@ -0,0 +1,36 @@ +/** + * ============LICENSE_START======================================================= + * org.onap.aai + * ================================================================================ + * Copyright © 2017-2018 AT&T Intellectual Property. All rights reserved. + * ================================================================================ + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============LICENSE_END========================================================= + */ +/** + * <b>Interceptors</b> package is subdivided to pre and post interceptors + * If you want to add an additional interceptor you would need to add + * the priority level to AAIRequestFilterPriority or AAIResponsePriority + * to give a value which indicates the order in which the interceptor + * will be triggered and also you will add that value like here + * + * <pre> + * <code> + * @Priority(AAIRequestFilterPriority.YOUR_PRIORITY) + * public class YourInterceptor extends AAIContainerFilter implements ContainerRequestFilter { + * + * } + * </code> + * </pre> + */ +package org.onap.aai.interceptors;
\ No newline at end of file diff --git a/src/main/java/org/onap/aai/interceptors/post/AAIResponseFilterPriority.java b/src/main/java/org/onap/aai/interceptors/post/AAIResponseFilterPriority.java new file mode 100644 index 0000000..146f847 --- /dev/null +++ b/src/main/java/org/onap/aai/interceptors/post/AAIResponseFilterPriority.java @@ -0,0 +1,40 @@ +/** + * ============LICENSE_START======================================================= + * org.onap.aai + * ================================================================================ + * Copyright © 2017-2018 AT&T Intellectual Property. All rights reserved. + * ================================================================================ + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============LICENSE_END========================================================= + */ +package org.onap.aai.interceptors.post; + +/** + * Response Filter order is done reverse sorted + * so in the following case the first response filter would be + * HEADER_MANIPULATION, RESPONSE_TRANS_LOGGING, RESET_LOGGING_CONTEXT, + * and INVALID_RESPONSE_STATUS + */ +public final class AAIResponseFilterPriority { + + private AAIResponseFilterPriority() {} + + public static final int INVALID_RESPONSE_STATUS = 1000; + + public static final int RESET_LOGGING_CONTEXT = 2000; + + public static final int RESPONSE_TRANS_LOGGING = 3000; + + public static final int HEADER_MANIPULATION = 4000; + +} diff --git a/src/main/java/org/onap/aai/interceptors/post/InvalidResponseStatus.java b/src/main/java/org/onap/aai/interceptors/post/InvalidResponseStatus.java new file mode 100644 index 0000000..7fd0b9c --- /dev/null +++ b/src/main/java/org/onap/aai/interceptors/post/InvalidResponseStatus.java @@ -0,0 +1,65 @@ +/** + * ============LICENSE_START======================================================= + * org.onap.aai + * ================================================================================ + * Copyright © 2017-2018 AT&T Intellectual Property. All rights reserved. + * ================================================================================ + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============LICENSE_END========================================================= + */ +package org.onap.aai.interceptors.post; + +import org.onap.aai.exceptions.AAIException; +import org.onap.aai.interceptors.AAIContainerFilter; +import org.onap.aai.logging.ErrorLogHelper; + +import javax.annotation.Priority; +import javax.ws.rs.container.ContainerRequestContext; +import javax.ws.rs.container.ContainerResponseContext; +import javax.ws.rs.container.ContainerResponseFilter; +import javax.ws.rs.core.MediaType; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +@Priority(AAIResponseFilterPriority.INVALID_RESPONSE_STATUS) +public class InvalidResponseStatus extends AAIContainerFilter implements ContainerResponseFilter { + + @Override + public void filter(ContainerRequestContext requestContext, ContainerResponseContext responseContext) + throws IOException { + + if(responseContext.getStatus() == 405){ + + responseContext.setStatus(400); + AAIException e = new AAIException("AAI_3012"); + ArrayList<String> templateVars = new ArrayList<>(); + + List<MediaType> mediaTypeList = new ArrayList<>(); + + String contentType = responseContext.getHeaderString("Content-Type"); + + if (contentType == null) { + mediaTypeList.add(MediaType.APPLICATION_XML_TYPE); + } else { + mediaTypeList.add(MediaType.valueOf(contentType)); + } + + String message = ErrorLogHelper.getRESTAPIErrorResponse(mediaTypeList, e, templateVars); + + responseContext.setEntity(message); + } + + } + +} diff --git a/src/main/java/org/onap/aai/interceptors/post/ResetLoggingContext.java b/src/main/java/org/onap/aai/interceptors/post/ResetLoggingContext.java new file mode 100644 index 0000000..baf28ad --- /dev/null +++ b/src/main/java/org/onap/aai/interceptors/post/ResetLoggingContext.java @@ -0,0 +1,98 @@ +/** + * ============LICENSE_START======================================================= + * org.onap.aai + * ================================================================================ + * Copyright © 2017-2018 AT&T Intellectual Property. All rights reserved. + * ================================================================================ + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============LICENSE_END========================================================= + */ +package org.onap.aai.interceptors.post; + +import com.att.eelf.configuration.EELFLogger; +import com.att.eelf.configuration.EELFManager; +import org.onap.aai.interceptors.AAIContainerFilter; +import org.onap.aai.logging.LoggingContext; +import org.onap.aai.logging.LoggingContext.StatusCode; +import org.springframework.beans.factory.annotation.Autowired; + +import javax.annotation.Priority; +import javax.servlet.http.HttpServletRequest; +import javax.ws.rs.container.ContainerRequestContext; +import javax.ws.rs.container.ContainerResponseContext; +import javax.ws.rs.container.ContainerResponseFilter; +import javax.ws.rs.core.Response.Status; +import javax.ws.rs.core.Response.StatusType; +import java.io.IOException; + +@Priority(AAIResponseFilterPriority.RESET_LOGGING_CONTEXT) +public class ResetLoggingContext extends AAIContainerFilter implements ContainerResponseFilter { + + private static final EELFLogger LOGGER = EELFManager.getInstance().getLogger(ResetLoggingContext.class); + + @Autowired + private HttpServletRequest httpServletRequest; + + @Override + public void filter(ContainerRequestContext requestContext, ContainerResponseContext responseContext) + throws IOException { + + this.cleanLoggingContext(responseContext); + + } + + private void cleanLoggingContext(ContainerResponseContext responseContext) { + //String url = httpServletRequest.getRequestURL().toString(); + boolean success = true; + String uri = httpServletRequest.getRequestURI(); + String queryString = httpServletRequest.getQueryString(); + + if(queryString != null && !queryString.isEmpty()){ + uri = uri + "?" + queryString; + } + // For now, we use the the HTTP status code, + // This may change, once the requirements for response codes are defined + + int httpStatusCode = responseContext.getStatus(); + if ( httpStatusCode < 100 || httpStatusCode > 599 ) { + httpStatusCode = Status.INTERNAL_SERVER_ERROR.getStatusCode(); + } + LoggingContext.responseCode(Integer.toString(httpStatusCode)); + + StatusType sType = responseContext.getStatusInfo(); + if ( sType != null ) { + Status.Family sFamily = sType.getFamily(); + if ( ! ( Status.Family.SUCCESSFUL.equals(sFamily) || + ( Status.NOT_FOUND.equals(Status.fromStatusCode(httpStatusCode)) ) ) ) { + success = false; + } + } + else { + if ( (httpStatusCode < 200 || httpStatusCode > 299) && ( ! ( Status.NOT_FOUND.equals(Status.fromStatusCode(httpStatusCode) ) ) ) ) { + success = false; + } + } + if (success) { + LoggingContext.statusCode(StatusCode.COMPLETE); + LOGGER.info(uri + " call succeeded"); + } + else { + LoggingContext.statusCode(StatusCode.ERROR); + LOGGER.error(uri + " call failed with responseCode=" + httpStatusCode); + } + LoggingContext.clear(); + + + } + +} diff --git a/src/main/java/org/onap/aai/interceptors/post/ResponseHeaderManipulation.java b/src/main/java/org/onap/aai/interceptors/post/ResponseHeaderManipulation.java new file mode 100644 index 0000000..9d4efe7 --- /dev/null +++ b/src/main/java/org/onap/aai/interceptors/post/ResponseHeaderManipulation.java @@ -0,0 +1,64 @@ +/** + * ============LICENSE_START======================================================= + * org.onap.aai + * ================================================================================ + * Copyright © 2017-2018 AT&T Intellectual Property. All rights reserved. + * ================================================================================ + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============LICENSE_END========================================================= + */ +package org.onap.aai.interceptors.post; + +import org.onap.aai.interceptors.AAIContainerFilter; +import org.onap.aai.interceptors.AAIHeaderProperties; + +import javax.annotation.Priority; +import javax.ws.rs.container.ContainerRequestContext; +import javax.ws.rs.container.ContainerResponseContext; +import javax.ws.rs.container.ContainerResponseFilter; +import javax.ws.rs.core.MediaType; +import java.io.IOException; + +@Priority(AAIResponseFilterPriority.HEADER_MANIPULATION) +public class ResponseHeaderManipulation extends AAIContainerFilter implements ContainerResponseFilter { + + private static final String DEFAULT_XML_TYPE = MediaType.APPLICATION_XML; + + @Override + public void filter(ContainerRequestContext requestContext, ContainerResponseContext responseContext) + throws IOException { + + updateResponseHeaders(requestContext, responseContext); + + } + + private void updateResponseHeaders(ContainerRequestContext requestContext, + ContainerResponseContext responseContext) { + + responseContext.getHeaders().add(AAIHeaderProperties.AAI_TX_ID, requestContext.getProperty(AAIHeaderProperties.AAI_TX_ID)); + + String responseContentType = responseContext.getHeaderString("Content-Type"); + + if(responseContentType == null){ + String acceptType = requestContext.getHeaderString("Accept"); + + if(acceptType == null || "*/*".equals(acceptType)){ + responseContext.getHeaders().putSingle("Content-Type", DEFAULT_XML_TYPE); + } else { + responseContext.getHeaders().putSingle("Content-Type", acceptType); + } + } + + } + +} diff --git a/src/main/java/org/onap/aai/interceptors/post/ResponseTransactionLogging.java b/src/main/java/org/onap/aai/interceptors/post/ResponseTransactionLogging.java new file mode 100644 index 0000000..547a7c8 --- /dev/null +++ b/src/main/java/org/onap/aai/interceptors/post/ResponseTransactionLogging.java @@ -0,0 +1,123 @@ +/** + * ============LICENSE_START======================================================= + * org.onap.aai + * ================================================================================ + * Copyright © 2017-2018 AT&T Intellectual Property. All rights reserved. + * ================================================================================ + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============LICENSE_END========================================================= + */ +package org.onap.aai.interceptors.post; + +import com.att.eelf.configuration.EELFLogger; +import com.att.eelf.configuration.EELFManager; +import com.google.gson.JsonObject; +import org.onap.aai.exceptions.AAIException; +import org.onap.aai.interceptors.AAIContainerFilter; +import org.onap.aai.interceptors.AAIHeaderProperties; +import org.onap.aai.logging.ErrorLogHelper; +import org.onap.aai.util.AAIConfig; +import org.springframework.beans.factory.annotation.Autowired; + +import javax.annotation.Priority; +import javax.servlet.http.HttpServletResponse; +import javax.ws.rs.container.ContainerRequestContext; +import javax.ws.rs.container.ContainerResponseContext; +import javax.ws.rs.container.ContainerResponseFilter; +import java.io.IOException; +import java.util.Objects; +import java.util.Optional; + +@Priority(AAIResponseFilterPriority.RESPONSE_TRANS_LOGGING) +public class ResponseTransactionLogging extends AAIContainerFilter implements ContainerResponseFilter { + + private static final EELFLogger TRANSACTION_LOGGER = EELFManager.getInstance().getLogger(ResponseTransactionLogging.class); + + @Autowired + private HttpServletResponse httpServletResponse; + + @Override + public void filter(ContainerRequestContext requestContext, ContainerResponseContext responseContext) + throws IOException { + + this.transLogging(requestContext, responseContext); + + } + + private void transLogging(ContainerRequestContext requestContext, ContainerResponseContext responseContext) { + + String logValue; + String getValue; + String postValue; + + try { + logValue = AAIConfig.get("aai.transaction.logging"); + getValue = AAIConfig.get("aai.transaction.logging.get"); + postValue = AAIConfig.get("aai.transaction.logging.post"); + } catch (AAIException e) { + return; + } + + String transId = requestContext.getHeaderString(AAIHeaderProperties.TRANSACTION_ID); + String fromAppId = requestContext.getHeaderString(AAIHeaderProperties.FROM_APP_ID); + String fullUri = requestContext.getUriInfo().getRequestUri().toString(); + String requestTs = (String)requestContext.getProperty(AAIHeaderProperties.AAI_REQUEST_TS); + + String httpMethod = requestContext.getMethod(); + + String status = Integer.toString(responseContext.getStatus()); + + String request = (String)requestContext.getProperty(AAIHeaderProperties.AAI_REQUEST); + String response = this.getResponseString(responseContext); + + if (!Boolean.parseBoolean(logValue)) { + } else if (!Boolean.parseBoolean(getValue) && "GET".equals(httpMethod)) { + } else if (!Boolean.parseBoolean(postValue) && "POST".equals(httpMethod)) { + } else { + + JsonObject logEntry = new JsonObject(); + logEntry.addProperty("transactionId", transId); + logEntry.addProperty("status", status); + logEntry.addProperty("rqstDate", requestTs); + logEntry.addProperty("respDate", this.genDate()); + logEntry.addProperty("sourceId", fromAppId + ":" + transId); + logEntry.addProperty("resourceId", fullUri); + logEntry.addProperty("resourceType", httpMethod); + logEntry.addProperty("rqstBuf", Objects.toString(request, "")); + logEntry.addProperty("respBuf", Objects.toString(response, "")); + + try { + TRANSACTION_LOGGER.debug(logEntry.toString()); + } catch (Exception e) { + ErrorLogHelper.logError("AAI_4000", "Exception writing transaction log."); + } + } + + } + + private String getResponseString(ContainerResponseContext responseContext) { + JsonObject response = new JsonObject(); + response.addProperty("ID", responseContext.getHeaderString(AAIHeaderProperties.AAI_TX_ID)); + response.addProperty("Content-Type", this.httpServletResponse.getContentType()); + response.addProperty("Response-Code", responseContext.getStatus()); + response.addProperty("Headers", responseContext.getHeaders().toString()); + Optional<Object> entityOptional = Optional.ofNullable(responseContext.getEntity()); + if(entityOptional.isPresent()){ + response.addProperty("Entity", entityOptional.get().toString()); + } else { + response.addProperty("Entity", ""); + } + return response.toString(); + } + +} diff --git a/src/main/java/org/onap/aai/interceptors/pre/AAIRequestFilterPriority.java b/src/main/java/org/onap/aai/interceptors/pre/AAIRequestFilterPriority.java new file mode 100644 index 0000000..c3d9d3b --- /dev/null +++ b/src/main/java/org/onap/aai/interceptors/pre/AAIRequestFilterPriority.java @@ -0,0 +1,46 @@ +/** + * ============LICENSE_START======================================================= + * org.onap.aai + * ================================================================================ + * Copyright © 2017-2018 AT&T Intellectual Property. All rights reserved. + * ================================================================================ + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============LICENSE_END========================================================= + */ +package org.onap.aai.interceptors.pre; + +public final class AAIRequestFilterPriority { + + private AAIRequestFilterPriority() {} + + public static final int REQUEST_TRANS_LOGGING = 1000; + + public static final int HEADER_VALIDATION = 2000; + + public static final int SET_LOGGING_CONTEXT = 3000; + + public static final int HTTP_HEADER = 4000; + + public static final int LATEST = 4250; + + public static final int AUTHORIZATION = 4500; + + public static final int RETIRED_SERVICE = 5000; + + public static final int VERSION = 5500; + + public static final int HEADER_MANIPULATION = 6000; + + public static final int REQUEST_MODIFICATION = 7000; + +} diff --git a/src/main/java/org/onap/aai/interceptors/pre/HeaderValidation.java b/src/main/java/org/onap/aai/interceptors/pre/HeaderValidation.java new file mode 100644 index 0000000..afacf66 --- /dev/null +++ b/src/main/java/org/onap/aai/interceptors/pre/HeaderValidation.java @@ -0,0 +1,91 @@ +/** + * ============LICENSE_START======================================================= + * org.onap.aai + * ================================================================================ + * Copyright © 2017-2018 AT&T Intellectual Property. All rights reserved. + * ================================================================================ + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============LICENSE_END========================================================= + */ +package org.onap.aai.interceptors.pre; + +import org.onap.aai.exceptions.AAIException; +import org.onap.aai.interceptors.AAIContainerFilter; +import org.onap.aai.interceptors.AAIHeaderProperties; +import org.onap.aai.logging.ErrorLogHelper; + +import javax.annotation.Priority; +import javax.ws.rs.container.ContainerRequestContext; +import javax.ws.rs.container.ContainerRequestFilter; +import javax.ws.rs.container.PreMatching; +import javax.ws.rs.core.MediaType; +import javax.ws.rs.core.MultivaluedMap; +import javax.ws.rs.core.Response; +import javax.ws.rs.ext.Provider; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Optional; +import java.util.UUID; + +@Provider +@PreMatching +@Priority(AAIRequestFilterPriority.HEADER_VALIDATION) +public class HeaderValidation extends AAIContainerFilter implements ContainerRequestFilter { + + @Override + public void filter(ContainerRequestContext requestContext) throws IOException { + + Optional<Response> oResp; + + MultivaluedMap<String, String> headersMap = requestContext.getHeaders(); + + String transId = headersMap.getFirst(AAIHeaderProperties.TRANSACTION_ID); + String fromAppId = headersMap.getFirst(AAIHeaderProperties.FROM_APP_ID); + + List<MediaType> acceptHeaderValues = requestContext.getAcceptableMediaTypes(); + + oResp = this.validateHeaderValuePresence(fromAppId, "AAI_4009", acceptHeaderValues); + if (oResp.isPresent()) { + requestContext.abortWith(oResp.get()); + return; + } + oResp = this.validateHeaderValuePresence(transId, "AAI_4010", acceptHeaderValues); + if (oResp.isPresent()) { + requestContext.abortWith(oResp.get()); + return; + } + + if (!this.isValidUUID(transId)) { + transId = UUID.randomUUID().toString(); + requestContext.getHeaders().get(AAIHeaderProperties.TRANSACTION_ID).clear(); + requestContext.getHeaders().add(AAIHeaderProperties.TRANSACTION_ID, transId); + } + + } + + private Optional<Response> validateHeaderValuePresence(String value, String errorCode, + List<MediaType> acceptHeaderValues) { + Response response = null; + AAIException aaie; + if (value == null) { + aaie = new AAIException(errorCode); + return Optional.of(Response.status(aaie.getErrorObject().getHTTPResponseCode()) + .entity(ErrorLogHelper.getRESTAPIErrorResponse(acceptHeaderValues, aaie, new ArrayList<>())) + .build()); + } + + return Optional.ofNullable(response); + } + +} diff --git a/src/main/java/org/onap/aai/interceptors/pre/HttpHeaderInterceptor.java b/src/main/java/org/onap/aai/interceptors/pre/HttpHeaderInterceptor.java new file mode 100644 index 0000000..94d8ca1 --- /dev/null +++ b/src/main/java/org/onap/aai/interceptors/pre/HttpHeaderInterceptor.java @@ -0,0 +1,55 @@ +/** + * ============LICENSE_START======================================================= + * org.onap.aai + * ================================================================================ + * Copyright © 2017-2018 AT&T Intellectual Property. All rights reserved. + * ================================================================================ + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============LICENSE_END========================================================= + */ +package org.onap.aai.interceptors.pre; + +import org.onap.aai.interceptors.AAIContainerFilter; +import org.onap.aai.interceptors.AAIHeaderProperties; + +import javax.annotation.Priority; +import javax.ws.rs.HttpMethod; +import javax.ws.rs.container.ContainerRequestContext; +import javax.ws.rs.container.ContainerRequestFilter; +import javax.ws.rs.container.PreMatching; +import javax.ws.rs.core.MultivaluedMap; +import javax.ws.rs.ext.Provider; +import java.io.IOException; + +/** + * The Class HttpHeaderInterceptor + */ +@Provider +@PreMatching +@Priority(AAIRequestFilterPriority.HTTP_HEADER) +public class HttpHeaderInterceptor extends AAIContainerFilter implements ContainerRequestFilter { + public static final String patchMethod = "PATCH"; + + @Override + public void filter(ContainerRequestContext containerRequestContext) throws IOException { + + MultivaluedMap<String, String> headersMap = containerRequestContext.getHeaders(); + String overrideMethod = headersMap.getFirst(AAIHeaderProperties.HTTP_METHOD_OVERRIDE); + String httpMethod = containerRequestContext.getMethod(); + + if (HttpMethod.POST.equalsIgnoreCase(httpMethod) && patchMethod.equalsIgnoreCase(overrideMethod)) { + containerRequestContext.setMethod(patchMethod); + } + } + +} diff --git a/src/main/java/org/onap/aai/interceptors/pre/OneWaySslAuthorization.java b/src/main/java/org/onap/aai/interceptors/pre/OneWaySslAuthorization.java new file mode 100644 index 0000000..6563e23 --- /dev/null +++ b/src/main/java/org/onap/aai/interceptors/pre/OneWaySslAuthorization.java @@ -0,0 +1,81 @@ +/** + * ============LICENSE_START======================================================= + * org.onap.aai + * ================================================================================ + * Copyright © 2017-2018 AT&T Intellectual Property. All rights reserved. + * ================================================================================ + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============LICENSE_END========================================================= + */ +package org.onap.aai.interceptors.pre; + +import org.onap.aai.Profiles; +import org.onap.aai.exceptions.AAIException; +import org.onap.aai.interceptors.AAIContainerFilter; +import org.onap.aai.logging.ErrorLogHelper; +import org.onap.aai.service.AuthorizationService; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.context.annotation.Profile; + +import javax.annotation.Priority; +import javax.ws.rs.container.ContainerRequestContext; +import javax.ws.rs.container.ContainerRequestFilter; +import javax.ws.rs.container.PreMatching; +import javax.ws.rs.core.MediaType; +import javax.ws.rs.core.Response; +import javax.ws.rs.ext.Provider; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Optional; + +@Provider +@Profile(Profiles.ONE_WAY_SSL) +@PreMatching +@Priority(AAIRequestFilterPriority.AUTHORIZATION) +public class OneWaySslAuthorization extends AAIContainerFilter implements ContainerRequestFilter { + + @Autowired + private AuthorizationService authorizationService; + + @Override + public void filter(ContainerRequestContext containerRequestContext) throws IOException + { + + String basicAuth = containerRequestContext.getHeaderString("Authorization"); + List<MediaType> acceptHeaderValues = containerRequestContext.getAcceptableMediaTypes(); + + if(basicAuth == null || !basicAuth.startsWith("Basic ")){ + Optional<Response> responseOptional = errorResponse("AAI_3300", acceptHeaderValues); + containerRequestContext.abortWith(responseOptional.get()); + return; + } + + basicAuth = basicAuth.replaceAll("Basic ", ""); + + if(!authorizationService.checkIfUserAuthorized(basicAuth)){ + Optional<Response> responseOptional = errorResponse("AAI_3300", acceptHeaderValues); + containerRequestContext.abortWith(responseOptional.get()); + return; + } + + } + + private Optional<Response> errorResponse(String errorCode, List<MediaType> acceptHeaderValues) { + AAIException aaie = new AAIException(errorCode); + return Optional.of(Response.status(aaie.getErrorObject().getHTTPResponseCode()) + .entity(ErrorLogHelper.getRESTAPIErrorResponse(acceptHeaderValues, aaie, new ArrayList<>())) + .build()); + + } +} diff --git a/src/main/java/org/onap/aai/interceptors/pre/RequestHeaderManipulation.java b/src/main/java/org/onap/aai/interceptors/pre/RequestHeaderManipulation.java new file mode 100644 index 0000000..ee4807e --- /dev/null +++ b/src/main/java/org/onap/aai/interceptors/pre/RequestHeaderManipulation.java @@ -0,0 +1,62 @@ +/** + * ============LICENSE_START======================================================= + * org.onap.aai + * ================================================================================ + * Copyright © 2017-2018 AT&T Intellectual Property. All rights reserved. + * ================================================================================ + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============LICENSE_END========================================================= + */ +package org.onap.aai.interceptors.pre; + +import org.onap.aai.interceptors.AAIContainerFilter; +import org.onap.aai.interceptors.AAIHeaderProperties; + +import javax.annotation.Priority; +import javax.ws.rs.container.ContainerRequestContext; +import javax.ws.rs.container.ContainerRequestFilter; +import javax.ws.rs.container.PreMatching; +import javax.ws.rs.core.MultivaluedMap; +import javax.ws.rs.ext.Provider; +import java.util.Collections; +import java.util.regex.Matcher; + +@Provider +@PreMatching +@Priority(AAIRequestFilterPriority.HEADER_MANIPULATION) +public class RequestHeaderManipulation extends AAIContainerFilter implements ContainerRequestFilter { + + @Override + public void filter(ContainerRequestContext requestContext) { + + String uri = requestContext.getUriInfo().getPath(); + this.addRequestContext(uri, requestContext.getHeaders()); + + } + + private void addRequestContext(String uri, MultivaluedMap<String, String> requestHeaders) { + + String rc = ""; + + Matcher match = VersionInterceptor.EXTRACT_VERSION_PATTERN.matcher(uri); + if (match.find()) { + rc = match.group(1); + } + + if (requestHeaders.containsKey(AAIHeaderProperties.REQUEST_CONTEXT)) { + requestHeaders.remove(AAIHeaderProperties.REQUEST_CONTEXT); + } + requestHeaders.put(AAIHeaderProperties.REQUEST_CONTEXT, Collections.singletonList(rc)); + } + +} diff --git a/src/main/java/org/onap/aai/interceptors/pre/RequestModification.java b/src/main/java/org/onap/aai/interceptors/pre/RequestModification.java new file mode 100644 index 0000000..9c17ffc --- /dev/null +++ b/src/main/java/org/onap/aai/interceptors/pre/RequestModification.java @@ -0,0 +1,77 @@ +/** + * ============LICENSE_START======================================================= + * org.onap.aai + * ================================================================================ + * Copyright © 2017-2018 AT&T Intellectual Property. All rights reserved. + * ================================================================================ + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============LICENSE_END========================================================= + */ +package org.onap.aai.interceptors.pre; + +import org.onap.aai.interceptors.AAIContainerFilter; + +import javax.annotation.Priority; +import javax.ws.rs.container.ContainerRequestContext; +import javax.ws.rs.container.ContainerRequestFilter; +import javax.ws.rs.container.PreMatching; +import javax.ws.rs.core.MultivaluedMap; +import javax.ws.rs.core.UriBuilder; +import javax.ws.rs.ext.Provider; +import java.io.IOException; +import java.util.Arrays; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; + +@Provider +@PreMatching +@Priority(AAIRequestFilterPriority.REQUEST_MODIFICATION) +public class RequestModification extends AAIContainerFilter implements ContainerRequestFilter { + + @Override + public void filter(ContainerRequestContext requestContext) throws IOException { + + this.cleanDME2QueryParams(requestContext); + + } + + private void cleanDME2QueryParams(ContainerRequestContext request) { + UriBuilder builder = request.getUriInfo().getRequestUriBuilder(); + MultivaluedMap<String, String> queries = request.getUriInfo().getQueryParameters(); + + String[] blacklist = { "version", "envContext", "routeOffer" }; + Set<String> blacklistSet = Arrays.stream(blacklist).collect(Collectors.toSet()); + + boolean remove = true; + + for (String param : blacklistSet) { + if (!queries.containsKey(param)) { + remove = false; + break; + } + } + + if (remove) { + for (Map.Entry<String, List<String>> query : queries.entrySet()) { + String key = query.getKey(); + if (blacklistSet.contains(key)) { + builder.replaceQueryParam(key); + } + } + } + request.setRequestUri(builder.build()); + } + +} diff --git a/src/main/java/org/onap/aai/interceptors/pre/RequestTransactionLogging.java b/src/main/java/org/onap/aai/interceptors/pre/RequestTransactionLogging.java new file mode 100644 index 0000000..b770296 --- /dev/null +++ b/src/main/java/org/onap/aai/interceptors/pre/RequestTransactionLogging.java @@ -0,0 +1,136 @@ +/** + * ============LICENSE_START======================================================= + * org.onap.aai + * ================================================================================ + * Copyright © 2017-2018 AT&T Intellectual Property. All rights reserved. + * ================================================================================ + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============LICENSE_END========================================================= + */ +package org.onap.aai.interceptors.pre; + +import com.att.eelf.configuration.EELFLogger; +import com.att.eelf.configuration.EELFManager; +import com.google.gson.JsonObject; +import org.glassfish.jersey.message.internal.ReaderWriter; +import org.onap.aai.exceptions.AAIException; +import org.onap.aai.interceptors.AAIContainerFilter; +import org.onap.aai.interceptors.AAIHeaderProperties; +import org.onap.aai.logging.LogFormatTools; +import org.onap.aai.util.AAIConfig; +import org.onap.aai.util.AAIConstants; +import org.onap.aai.util.HbaseSaltPrefixer; +import org.springframework.beans.factory.annotation.Autowired; + +import javax.annotation.Priority; +import javax.servlet.http.HttpServletRequest; +import javax.ws.rs.container.ContainerRequestContext; +import javax.ws.rs.container.ContainerRequestFilter; +import javax.ws.rs.container.PreMatching; +import javax.ws.rs.core.MediaType; +import javax.ws.rs.core.MultivaluedMap; +import javax.ws.rs.ext.Provider; +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.security.SecureRandom; +import java.util.Random; +import java.util.UUID; + +@Provider +@PreMatching +@Priority(AAIRequestFilterPriority.REQUEST_TRANS_LOGGING) +public class RequestTransactionLogging extends AAIContainerFilter implements ContainerRequestFilter { + + private static final EELFLogger LOGGER = EELFManager.getInstance().getLogger(RequestTransactionLogging.class); + + @Autowired + private HttpServletRequest httpServletRequest; + + private static final String DEFAULT_CONTENT_TYPE = MediaType.APPLICATION_JSON; + private static final String DEFAULT_RESPONSE_TYPE = MediaType.APPLICATION_XML; + + private static final String CONTENT_TYPE = "Content-Type"; + private static final String ACCEPT = "Accept"; + private static final String TEXT_PLAIN = "text/plain"; + + @Override + public void filter(ContainerRequestContext requestContext) throws IOException { + + String currentTimeStamp = genDate(); + String fullId = this.getAAITxIdToHeader(currentTimeStamp); + this.addToRequestContext(requestContext, AAIHeaderProperties.AAI_TX_ID, fullId); + this.addToRequestContext(requestContext, AAIHeaderProperties.AAI_REQUEST, this.getRequest(requestContext, fullId)); + this.addToRequestContext(requestContext, AAIHeaderProperties.AAI_REQUEST_TS, currentTimeStamp); + this.addDefaultContentType(requestContext); + } + + private void addToRequestContext(ContainerRequestContext requestContext, String name, String aaiTxIdToHeader) { + requestContext.setProperty(name, aaiTxIdToHeader); + } + + private void addDefaultContentType(ContainerRequestContext requestContext) { + + MultivaluedMap<String, String> headersMap = requestContext.getHeaders(); + String contentType = headersMap.getFirst(CONTENT_TYPE); + String acceptType = headersMap.getFirst(ACCEPT); + + if(contentType == null || contentType.contains(TEXT_PLAIN)){ + requestContext.getHeaders().putSingle(CONTENT_TYPE, DEFAULT_CONTENT_TYPE); + } + + if(acceptType == null || acceptType.contains(TEXT_PLAIN)){ + requestContext.getHeaders().putSingle(ACCEPT, DEFAULT_RESPONSE_TYPE); + } + } + + private String getAAITxIdToHeader(String currentTimeStamp) { + String txId = UUID.randomUUID().toString(); + try { + Random rand = new SecureRandom(); + int number = rand.nextInt(99999); + txId = HbaseSaltPrefixer.getInstance().prependSalt(AAIConfig.get(AAIConstants.AAI_NODENAME) + "-" + + currentTimeStamp + "-" + number ); //new Random(System.currentTimeMillis()).nextInt(99999) + } catch (AAIException e) { + } + + return txId; + } + + private String getRequest(ContainerRequestContext requestContext, String fullId) { + + JsonObject request = new JsonObject(); + request.addProperty("ID", fullId); + request.addProperty("Http-Method", requestContext.getMethod()); + request.addProperty(CONTENT_TYPE, httpServletRequest.getContentType()); + request.addProperty("Headers", requestContext.getHeaders().toString()); + + ByteArrayOutputStream out = new ByteArrayOutputStream(); + InputStream in = requestContext.getEntityStream(); + + try { + if (in.available() > 0) { + ReaderWriter.writeTo(in, out); + byte[] requestEntity = out.toByteArray(); + request.addProperty("Payload", new String(requestEntity, "UTF-8")); + requestContext.setEntityStream(new ByteArrayInputStream(requestEntity)); + } + } catch (IOException ex) { + LOGGER.error("An exception occurred during the transaction logging: " + LogFormatTools.getStackTop(ex)); + } + + return request.toString(); + } + +} diff --git a/src/main/java/org/onap/aai/interceptors/pre/RetiredInterceptor.java b/src/main/java/org/onap/aai/interceptors/pre/RetiredInterceptor.java new file mode 100644 index 0000000..9a33b05 --- /dev/null +++ b/src/main/java/org/onap/aai/interceptors/pre/RetiredInterceptor.java @@ -0,0 +1,150 @@ +/** + * ============LICENSE_START======================================================= + * org.onap.aai + * ================================================================================ + * Copyright © 2017-2018 AT&T Intellectual Property. All rights reserved. + * ================================================================================ + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============LICENSE_END========================================================= + */ +package org.onap.aai.interceptors.pre; + +import org.onap.aai.exceptions.AAIException; +import org.onap.aai.interceptors.AAIContainerFilter; +import org.onap.aai.logging.ErrorLogHelper; +import org.onap.aai.service.RetiredService; +import org.onap.aai.util.AAIConfig; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.beans.factory.annotation.Value; + +import javax.annotation.Priority; +import javax.ws.rs.container.ContainerRequestContext; +import javax.ws.rs.container.ContainerRequestFilter; +import javax.ws.rs.container.PreMatching; +import javax.ws.rs.core.Response; +import javax.ws.rs.ext.Provider; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +// Can cache this so if the uri was already cached then it won't run the string +// matching each time but only does it for the first time + +@Provider +@PreMatching +@Priority(AAIRequestFilterPriority.RETIRED_SERVICE) +public class RetiredInterceptor extends AAIContainerFilter implements ContainerRequestFilter { + + private static final Pattern VERSION_PATTERN = Pattern.compile("v\\d+|latest"); + + private RetiredService retiredService; + + private String basePath; + + @Autowired + public RetiredInterceptor(RetiredService retiredService, @Value("${schema.uri.base.path}") String basePath){ + this.retiredService = retiredService; + this.basePath = basePath; + if(!basePath.endsWith("/")){ + this.basePath = basePath + "/"; + } + } + @Override + public void filter(ContainerRequestContext containerRequestContext) throws IOException { + + String requestURI = containerRequestContext.getUriInfo().getAbsolutePath().getPath(); + + String version = extractVersionFromPath(requestURI); + + List<Pattern> retiredAllVersionList = retiredService.getRetiredAllVersionList(); + + + if(checkIfUriRetired(containerRequestContext, retiredAllVersionList, version, requestURI, "")){ + return; + } + + List<Pattern> retiredVersionList = retiredService.getRetiredPatterns(); + + checkIfUriRetired(containerRequestContext, retiredVersionList, version, requestURI); + } + + public boolean checkIfUriRetired(ContainerRequestContext containerRequestContext, + List<Pattern> retiredPatterns, + String version, + String requestURI, + String message){ + + + for(Pattern retiredPattern : retiredPatterns){ + if(retiredPattern.matcher(requestURI).matches()){ + AAIException e; + + if(message == null){ + e = new AAIException("AAI_3007"); + } else { + e = new AAIException("AAI_3015"); + } + + ArrayList<String> templateVars = new ArrayList<>(); + + if (templateVars.isEmpty()) { + templateVars.add("PUT"); + if(requestURI != null){ + requestURI = requestURI.replaceAll(basePath, ""); + } + templateVars.add(requestURI); + if(message == null){ + templateVars.add(version); + templateVars.add(AAIConfig.get("aai.default.api.version", "")); + } + } + + Response response = Response + .status(e.getErrorObject().getHTTPResponseCode()) + .entity( + ErrorLogHelper + .getRESTAPIErrorResponse( + containerRequestContext.getAcceptableMediaTypes(), e, templateVars + ) + ) + .build(); + + containerRequestContext.abortWith(response); + + return true; + } + } + + return false; + } + + public boolean checkIfUriRetired(ContainerRequestContext containerRequestContext, + List<Pattern> retiredPatterns, + String version, + String requestURI){ + return checkIfUriRetired(containerRequestContext, retiredPatterns, version, requestURI, null); + } + + protected String extractVersionFromPath(String requestURI) { + Matcher versionMatcher = VERSION_PATTERN.matcher(requestURI); + String version = null; + + if(versionMatcher.find()){ + version = versionMatcher.group(0); + } + return version; + } + +} diff --git a/src/main/java/org/onap/aai/interceptors/pre/SetLoggingContext.java b/src/main/java/org/onap/aai/interceptors/pre/SetLoggingContext.java new file mode 100644 index 0000000..6c3a7fc --- /dev/null +++ b/src/main/java/org/onap/aai/interceptors/pre/SetLoggingContext.java @@ -0,0 +1,75 @@ +/** + * ============LICENSE_START======================================================= + * org.onap.aai + * ================================================================================ + * Copyright © 2017-2018 AT&T Intellectual Property. All rights reserved. + * ================================================================================ + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============LICENSE_END========================================================= + */ +package org.onap.aai.interceptors.pre; + +import org.onap.aai.interceptors.AAIContainerFilter; +import org.onap.aai.interceptors.AAIHeaderProperties; +import org.onap.aai.logging.LoggingContext; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.core.env.Environment; + +import javax.annotation.Priority; +import javax.servlet.http.HttpServletRequest; +import javax.ws.rs.container.ContainerRequestContext; +import javax.ws.rs.container.ContainerRequestFilter; +import javax.ws.rs.container.PreMatching; +import javax.ws.rs.core.MultivaluedMap; +import javax.ws.rs.ext.Provider; +import java.io.IOException; + +@Provider +@PreMatching +@Priority(AAIRequestFilterPriority.SET_LOGGING_CONTEXT) +public class SetLoggingContext extends AAIContainerFilter implements ContainerRequestFilter { + + @Autowired + private Environment environment; + + @Autowired + private HttpServletRequest httpServletRequest; + + @Override + public void filter(ContainerRequestContext requestContext) throws IOException { + + String uri = httpServletRequest.getRequestURI(); + String queryString = httpServletRequest.getQueryString(); + + if(queryString != null && !queryString.isEmpty()){ + uri = uri + "?" + queryString; + } + + String httpMethod = requestContext.getMethod(); + + MultivaluedMap<String, String> headersMap = requestContext.getHeaders(); + + String transId = headersMap.getFirst(AAIHeaderProperties.TRANSACTION_ID); + String fromAppId = headersMap.getFirst(AAIHeaderProperties.FROM_APP_ID); + + LoggingContext.init(); + LoggingContext.requestId(transId); + LoggingContext.partnerName(fromAppId); + LoggingContext.targetEntity(environment.getProperty("spring.application.name")); + LoggingContext.component(fromAppId); + LoggingContext.serviceName(httpMethod + " " + uri); + LoggingContext.targetServiceName(httpMethod + " " + uri); + LoggingContext.statusCode(LoggingContext.StatusCode.COMPLETE); + } + +} diff --git a/src/main/java/org/onap/aai/interceptors/pre/TwoWaySslAuthorization.java b/src/main/java/org/onap/aai/interceptors/pre/TwoWaySslAuthorization.java new file mode 100644 index 0000000..73b7877 --- /dev/null +++ b/src/main/java/org/onap/aai/interceptors/pre/TwoWaySslAuthorization.java @@ -0,0 +1,187 @@ +/** + * ============LICENSE_START======================================================= + * org.onap.aai + * ================================================================================ + * Copyright © 2017-2018 AT&T Intellectual Property. All rights reserved. + * ================================================================================ + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============LICENSE_END========================================================= + */ +package org.onap.aai.interceptors.pre; + +import org.onap.aai.auth.AAIAuthCore; +import org.onap.aai.exceptions.AAIException; +import org.onap.aai.interceptors.AAIContainerFilter; +import org.onap.aai.interceptors.AAIHeaderProperties; +import org.onap.aai.logging.ErrorLogHelper; +import org.onap.aai.restcore.HttpMethod; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.context.annotation.Profile; + +import javax.annotation.Priority; +import javax.security.auth.x500.X500Principal; +import javax.servlet.http.HttpServletRequest; +import javax.ws.rs.container.ContainerRequestContext; +import javax.ws.rs.container.ContainerRequestFilter; +import javax.ws.rs.container.PreMatching; +import javax.ws.rs.core.MediaType; +import javax.ws.rs.core.Response; +import javax.ws.rs.ext.Provider; +import java.security.cert.X509Certificate; +import java.util.*; +import java.util.stream.Collectors; + +@Provider +@PreMatching +@Priority(AAIRequestFilterPriority.AUTHORIZATION) +@Profile("two-way-ssl") +public class TwoWaySslAuthorization extends AAIContainerFilter implements ContainerRequestFilter { + + @Autowired + private HttpServletRequest httpServletRequest; + + @Autowired + private AAIAuthCore aaiAuthCore; + + @Override + public void filter(ContainerRequestContext requestContext) { + + Optional<Response> oResp; + + String uri = requestContext.getUriInfo().getAbsolutePath().getPath(); + String httpMethod = getHttpMethod(requestContext); + + List<MediaType> acceptHeaderValues = requestContext.getAcceptableMediaTypes(); + + Optional<String> authUser = getUser(this.httpServletRequest); + + if (authUser.isPresent()) { + oResp = this.authorize(uri, httpMethod, acceptHeaderValues, authUser.get(), + this.getHaProxyUser(this.httpServletRequest), getCertIssuer(this.httpServletRequest)); + if (oResp.isPresent()) { + requestContext.abortWith(oResp.get()); + return; + } + } else { + AAIException aaie = new AAIException("AAI_9107"); + requestContext + .abortWith(Response + .status(aaie.getErrorObject().getHTTPResponseCode()).entity(ErrorLogHelper + .getRESTAPIErrorResponseWithLogging(acceptHeaderValues, aaie, new ArrayList<>())) + .build()); + } + + } + + private String getCertIssuer(HttpServletRequest hsr) { + String issuer = hsr.getHeader("X-AAI-SSL-Issuer"); + if (issuer != null && !issuer.isEmpty()) { + // the haproxy header replaces the ', ' with '/' and reverses on the '/' need to undo that. + List<String> broken = Arrays.asList(issuer.split("/")); + broken = broken.stream().filter(s -> !s.isEmpty()).collect(Collectors.toList()); + Collections.reverse(broken); + issuer = String.join(", ", broken); + } else { + if (hsr.getAttribute("javax.servlet.request.cipher_suite") != null) { + X509Certificate[] certChain = (X509Certificate[]) hsr.getAttribute("javax.servlet.request.X509Certificate"); + if (certChain != null && certChain.length > 0) { + X509Certificate clientCert = certChain[0]; + issuer = clientCert.getIssuerX500Principal().getName(); + } + } + } + return issuer; + } + + private String getHttpMethod(ContainerRequestContext requestContext) { + String httpMethod = requestContext.getMethod(); + if ("POST".equalsIgnoreCase(httpMethod) + && "PATCH".equals(requestContext.getHeaderString(AAIHeaderProperties.HTTP_METHOD_OVERRIDE))) { + httpMethod = HttpMethod.MERGE_PATCH.toString(); + } + if (httpMethod.equalsIgnoreCase(HttpMethod.MERGE_PATCH.toString()) || "patch".equalsIgnoreCase(httpMethod)) { + httpMethod = HttpMethod.PUT.toString(); + } + return httpMethod; + } + + private Optional<String> getUser(HttpServletRequest hsr) { + String authUser = null; + if (hsr.getAttribute("javax.servlet.request.cipher_suite") != null) { + X509Certificate[] certChain = (X509Certificate[]) hsr.getAttribute("javax.servlet.request.X509Certificate"); + + /* + * If the certificate is null or the certificate chain length is zero Then + * retrieve the authorization in the request header Authorization Check that it + * is not null and that it starts with Basic and then strip the basic portion to + * get the base64 credentials Check if this is contained in the AAIBasicAuth + * Singleton class If it is, retrieve the username associated with that + * credentials and set to authUser Otherwise, get the principal from certificate + * and use that authUser + */ + + if (certChain == null || certChain.length == 0) { + + String authorization = hsr.getHeader("Authorization"); + + if (authorization != null && authorization.startsWith("Basic ")) { + authUser = authorization.replace("Basic ", ""); + } + + } else { + X509Certificate clientCert = certChain[0]; + X500Principal subjectDN = clientCert.getSubjectX500Principal(); + authUser = subjectDN.toString().toLowerCase(); + } + } + + return Optional.ofNullable(authUser); + } + + private String getHaProxyUser(HttpServletRequest hsr) { + String haProxyUser; + if (Objects.isNull(hsr.getHeader("X-AAI-SSL-Client-CN")) + || Objects.isNull(hsr.getHeader("X-AAI-SSL-Client-OU")) + || Objects.isNull(hsr.getHeader("X-AAI-SSL-Client-O")) + || Objects.isNull(hsr.getHeader("X-AAI-SSL-Client-L")) + || Objects.isNull(hsr.getHeader("X-AAI-SSL-Client-ST")) + || Objects.isNull(hsr.getHeader("X-AAI-SSL-Client-C"))) { + haProxyUser = ""; + } else { + haProxyUser = String.format("CN=%s, OU=%s, O=\"%s\", L=%s, ST=%s, C=%s", + Objects.toString(hsr.getHeader("X-AAI-SSL-Client-CN"), ""), + Objects.toString(hsr.getHeader("X-AAI-SSL-Client-OU"), ""), + Objects.toString(hsr.getHeader("X-AAI-SSL-Client-O"), ""), + Objects.toString(hsr.getHeader("X-AAI-SSL-Client-L"), ""), + Objects.toString(hsr.getHeader("X-AAI-SSL-Client-ST"), ""), + Objects.toString(hsr.getHeader("X-AAI-SSL-Client-C"), "")).toLowerCase(); + } + return haProxyUser; + } + + private Optional<Response> authorize(String uri, String httpMethod, List<MediaType> acceptHeaderValues, + String authUser, String haProxyUser, String issuer) { + Response response = null; + try { + if (!aaiAuthCore.authorize(authUser, uri, httpMethod, haProxyUser, issuer)) { + throw new AAIException("AAI_9101", "Request on " + httpMethod + " " + uri + " status is not OK"); + } + } catch (AAIException e) { + response = Response.status(e.getErrorObject().getHTTPResponseCode()) + .entity(ErrorLogHelper.getRESTAPIErrorResponseWithLogging(acceptHeaderValues, e, new ArrayList<>())) + .build(); + } + return Optional.ofNullable(response); + } + +} diff --git a/src/main/java/org/onap/aai/interceptors/pre/VersionInterceptor.java b/src/main/java/org/onap/aai/interceptors/pre/VersionInterceptor.java new file mode 100644 index 0000000..f591120 --- /dev/null +++ b/src/main/java/org/onap/aai/interceptors/pre/VersionInterceptor.java @@ -0,0 +1,101 @@ +/** + * ============LICENSE_START======================================================= + * org.onap.aai + * ================================================================================ + * Copyright © 2017-2018 AT&T Intellectual Property. All rights reserved. + * ================================================================================ + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============LICENSE_END========================================================= + */ +package org.onap.aai.interceptors.pre; + +import org.onap.aai.exceptions.AAIException; +import org.onap.aai.interceptors.AAIContainerFilter; +import org.onap.aai.logging.ErrorLogHelper; +import org.onap.aai.setup.SchemaVersion; +import org.onap.aai.setup.SchemaVersions; +import org.springframework.beans.factory.annotation.Autowired; + +import javax.annotation.Priority; +import javax.ws.rs.container.ContainerRequestContext; +import javax.ws.rs.container.ContainerRequestFilter; +import javax.ws.rs.container.PreMatching; +import javax.ws.rs.core.Response; +import java.util.ArrayList; +import java.util.Set; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import java.util.stream.Collectors; + +@PreMatching +@Priority(AAIRequestFilterPriority.VERSION) +public class VersionInterceptor extends AAIContainerFilter implements ContainerRequestFilter { + + public static final Pattern EXTRACT_VERSION_PATTERN = Pattern.compile("^(v[1-9][0-9]*).*$"); + + private final Set<String> allowedVersions; + + private final SchemaVersions schemaVersions; + + @Autowired + public VersionInterceptor(SchemaVersions schemaVersions){ + this.schemaVersions = schemaVersions; + allowedVersions = schemaVersions.getVersions() + .stream() + .map(SchemaVersion::toString) + .collect(Collectors.toSet()); + + } + + @Override + public void filter(ContainerRequestContext requestContext) { + + String uri = requestContext.getUriInfo().getPath(); + + if (uri.startsWith("search") || uri.startsWith("util/echo") || uri.startsWith("tools")) { + return; + } + + Matcher matcher = EXTRACT_VERSION_PATTERN.matcher(uri); + + String version = null; + if(matcher.matches()){ + version = matcher.group(1); + } else { + requestContext.abortWith(createInvalidVersionResponse("AAI_3017", requestContext, version)); + return; + } + + if(!allowedVersions.contains(version)){ + requestContext.abortWith(createInvalidVersionResponse("AAI_3016", requestContext, version)); + } + } + + private Response createInvalidVersionResponse(String errorCode, ContainerRequestContext context, String version) { + AAIException e = new AAIException(errorCode); + ArrayList<String> templateVars = new ArrayList<>(); + + if (templateVars.isEmpty()) { + templateVars.add(context.getMethod()); + templateVars.add(context.getUriInfo().getPath()); + templateVars.add(version); + } + + String entity = ErrorLogHelper.getRESTAPIErrorResponse(context.getAcceptableMediaTypes(), e, templateVars); + + return Response + .status(e.getErrorObject().getHTTPResponseCode()) + .entity(entity) + .build(); + } +} diff --git a/src/main/java/org/onap/aai/interceptors/pre/VersionLatestInterceptor.java b/src/main/java/org/onap/aai/interceptors/pre/VersionLatestInterceptor.java new file mode 100644 index 0000000..61008b6 --- /dev/null +++ b/src/main/java/org/onap/aai/interceptors/pre/VersionLatestInterceptor.java @@ -0,0 +1,56 @@ +/** + * ============LICENSE_START======================================================= + * org.onap.aai + * ================================================================================ + * Copyright © 2017-2018 AT&T Intellectual Property. All rights reserved. + * ================================================================================ + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============LICENSE_END========================================================= + */ +package org.onap.aai.interceptors.pre; + +import org.onap.aai.interceptors.AAIContainerFilter; +import org.onap.aai.setup.SchemaVersions; +import org.springframework.beans.factory.annotation.Autowired; + +import javax.annotation.Priority; +import javax.ws.rs.container.ContainerRequestContext; +import javax.ws.rs.container.ContainerRequestFilter; +import javax.ws.rs.container.PreMatching; +import java.net.URI; + +@PreMatching +@Priority(AAIRequestFilterPriority.LATEST) +public class VersionLatestInterceptor extends AAIContainerFilter implements ContainerRequestFilter { + + private final SchemaVersions schemaVersions; + + @Autowired + public VersionLatestInterceptor(SchemaVersions schemaVersions){ + this.schemaVersions = schemaVersions; + } + + @Override + public void filter(ContainerRequestContext requestContext) { + + String uri = requestContext.getUriInfo().getPath(); + + if(uri.startsWith("latest")){ + String absolutePath = requestContext.getUriInfo().getAbsolutePath().toString(); + String latest = absolutePath.replaceFirst("latest", schemaVersions.getDefaultVersion().toString()); + requestContext.setRequestUri(URI.create(latest)); + return; + } + + } +} diff --git a/src/main/java/org/onap/aai/migration/EdgeMigrator.java b/src/main/java/org/onap/aai/migration/EdgeMigrator.java new file mode 100644 index 0000000..99b4896 --- /dev/null +++ b/src/main/java/org/onap/aai/migration/EdgeMigrator.java @@ -0,0 +1,145 @@ +/** + * ============LICENSE_START======================================================= + * org.onap.aai + * ================================================================================ + * Copyright © 2017-2018 AT&T Intellectual Property. All rights reserved. + * ================================================================================ + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============LICENSE_END========================================================= + */ +package org.onap.aai.migration; + +import java.util.List; + +import com.google.common.collect.Multimap; +import org.javatuples.Pair; + +import org.apache.tinkerpop.gremlin.structure.Edge; +import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal; +import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.__; +import org.apache.tinkerpop.gremlin.structure.Vertex; +import org.onap.aai.db.props.AAIProperties; +import org.onap.aai.edges.EdgeIngestor; +import org.onap.aai.edges.EdgeRuleQuery; +import org.onap.aai.introspection.LoaderFactory; +import org.onap.aai.serialization.db.EdgeSerializer; +import org.onap.aai.serialization.engines.TransactionalGraphEngine; +import org.onap.aai.edges.EdgeRule; +import org.onap.aai.setup.SchemaVersions; + +/** + * A migration template for migrating all edge properties between "from" and "to" node from the DbedgeRules.json + * + */ +@MigrationPriority(0) +@MigrationDangerRating(1) +public abstract class EdgeMigrator extends Migrator { + + private boolean success = true; + + public EdgeMigrator(TransactionalGraphEngine engine, LoaderFactory loaderFactory, EdgeIngestor edgeIngestor, EdgeSerializer edgeSerializer, SchemaVersions schemaVersions) { + super(engine, loaderFactory, edgeIngestor, edgeSerializer, schemaVersions); + } + + public EdgeMigrator(TransactionalGraphEngine engine, LoaderFactory loaderFactory, EdgeIngestor edgeIngestor, EdgeSerializer edgeSerializer, SchemaVersions schemaVersions, List<Pair<String, String>> nodePairList) { + super(engine, loaderFactory, edgeIngestor, edgeSerializer, schemaVersions); + } + + + /** + * Do not override this method as an inheritor of this class + */ + @Override + public void run() { + + executeModifyOperation(); + + } + + /** + * This is where inheritors should add their logic + */ + protected void executeModifyOperation() { + + changeEdgeProperties(); + + } + + protected void changeEdgeLabels() { + //TODO: when json file has edge label as well as edge property changes + } + + + + protected void changeEdgeProperties() { + try { + List<Pair<String, String>> nodePairList = this.getAffectedNodePairTypes(); + for (Pair<String, String> nodePair : nodePairList) { + + String NODE_A = nodePair.getValue0(); + String NODE_B = nodePair.getValue1(); + Multimap<String, EdgeRule> result = edgeIngestor.getRules(new EdgeRuleQuery.Builder(NODE_A, NODE_B).build()); + + GraphTraversal<Vertex, Vertex> g = this.engine.asAdmin().getTraversalSource().V(); + /* + * Find Out-Edges from Node A to Node B and change them + * Also Find Out-Edges from Node B to Node A and change them + */ + g.union(__.has(AAIProperties.NODE_TYPE, NODE_A).outE().where(__.inV().has(AAIProperties.NODE_TYPE, NODE_B)), + __.has(AAIProperties.NODE_TYPE, NODE_B).outE().where(__.inV().has(AAIProperties.NODE_TYPE, NODE_A))) + .sideEffect(t -> { + Edge e = t.get(); + try { + Vertex out = e.outVertex(); + Vertex in = e.inVertex(); + if (out == null || in == null) { + logger.error( + e.id() + " invalid because one vertex was null: out=" + out + " in=" + in); + } else { + if (result.containsKey(e.label())) { + EdgeRule rule = result.get(e.label()).iterator().next(); + e.properties().forEachRemaining(prop -> prop.remove()); + edgeSerializer.addProperties(e, rule); + } else { + logger.info("found vertices connected by unkwown label: out=" + out + " label=" + + e.label() + " in=" + in); + } + } + } catch (Exception e1) { + throw new RuntimeException(e1); + } + }).iterate(); + } + + } catch (Exception e) { + logger.error("error encountered", e); + success = false; + } + } + + @Override + public Status getStatus() { + if (success) { + return Status.SUCCESS; + } else { + return Status.FAILURE; + } + } + + /** + * List of node pairs("from" and "to"), you would like EdgeMigrator to migrate from json files + * @return + */ + public abstract List<Pair<String, String>> getAffectedNodePairTypes() ; + +} diff --git a/src/main/java/org/onap/aai/migration/EdgeSwingMigrator.java b/src/main/java/org/onap/aai/migration/EdgeSwingMigrator.java new file mode 100644 index 0000000..616ff02 --- /dev/null +++ b/src/main/java/org/onap/aai/migration/EdgeSwingMigrator.java @@ -0,0 +1,288 @@ +/**
+ * ============LICENSE_START=======================================================
+ * org.onap.aai
+ * ================================================================================
+ * Copyright © 2017-2018 AT&T Intellectual Property. All rights reserved.
+ * ================================================================================
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * ============LICENSE_END=========================================================
+ */
+package org.onap.aai.migration;
+
+
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import org.apache.tinkerpop.gremlin.structure.Edge;
+import org.apache.tinkerpop.gremlin.structure.Property;
+import org.apache.tinkerpop.gremlin.structure.Direction;
+import org.apache.tinkerpop.gremlin.structure.Vertex;
+import org.javatuples.Pair;
+import org.onap.aai.db.props.AAIProperties;
+import org.onap.aai.edges.EdgeIngestor;
+import org.onap.aai.introspection.LoaderFactory;
+import org.onap.aai.serialization.db.EdgeSerializer;
+import org.onap.aai.serialization.engines.TransactionalGraphEngine;
+import org.onap.aai.setup.SchemaVersions;
+
+/**
+ * A migration template for "swinging" edges that terminate on an old-node to a new target node.
+ * That is, given an oldNode and a newNode we will swing edges that terminate on the
+ * oldNode and terminate them on the newNode (actually we drop the old edges and add new ones).
+ *
+ *
+ * We allow the passing of some parameters to restrict what edges get swung over:
+ * > otherEndNodeTypeRestriction: only swing edges that terminate on the oldNode if the
+ * node at the other end of the edge is of this nodeType.
+ * > edgeLabelRestriction: Only swing edges that have this edgeLabel
+ * > edgeDirectionRestriction: Only swing edges that go this direction (from the oldNode)
+ * this is a required parameter. valid values are: BOTH, IN, OUT
+ *
+ */
+@MigrationPriority(0)
+@MigrationDangerRating(1)
+public abstract class EdgeSwingMigrator extends Migrator {
+
+ private boolean success = true;
+ private String nodeTypeRestriction = null;
+ private String edgeLabelRestriction = null;
+ private String edgeDirRestriction = null;
+ private List<Pair<Vertex, Vertex>> nodePairList;
+
+
+ public EdgeSwingMigrator(TransactionalGraphEngine engine , LoaderFactory loaderFactory, EdgeIngestor edgeIngestor, EdgeSerializer edgeSerializer, SchemaVersions schemaVersions) {
+ super(engine, loaderFactory, edgeIngestor, edgeSerializer, schemaVersions);
+ }
+
+
+ /**
+ * Do not override this method as an inheritor of this class
+ */
+ @Override
+ public void run() {
+ executeModifyOperation();
+ cleanupAsAppropriate(this.nodePairList);
+ }
+
+ /**
+ * This is where inheritors should add their logic
+ */
+ protected void executeModifyOperation() {
+
+ try {
+ this.nodeTypeRestriction = this.getNodeTypeRestriction();
+ this.edgeLabelRestriction = this.getEdgeLabelRestriction();
+ this.edgeDirRestriction = this.getEdgeDirRestriction();
+ nodePairList = this.getAffectedNodePairs();
+ for (Pair<Vertex, Vertex> nodePair : nodePairList) {
+ Vertex fromNode = nodePair.getValue0();
+ Vertex toNode = nodePair.getValue1();
+ this.swingEdges(fromNode, toNode,
+ this.nodeTypeRestriction,this.edgeLabelRestriction,this.edgeDirRestriction);
+ }
+ } catch (Exception e) {
+ logger.error("error encountered", e);
+ success = false;
+ }
+ }
+
+
+ protected void swingEdges(Vertex oldNode, Vertex newNode, String nodeTypeRestr, String edgeLabelRestr, String edgeDirRestr) {
+ try {
+ // If the old and new Vertices aren't populated, throw an exception
+ if( oldNode == null ){
+ logger.info ( "null oldNode passed to swingEdges() ");
+ success = false;
+ return;
+ }
+ else if( newNode == null ){
+ logger.info ( "null newNode passed to swingEdges() ");
+ success = false;
+ return;
+ }
+ else if( edgeDirRestr == null ||
+ (!edgeDirRestr.equals("BOTH")
+ && !edgeDirRestr.equals("IN")
+ && !edgeDirRestr.equals("OUT") )
+ ){
+ logger.info ( "invalid direction passed to swingEdges(). valid values are BOTH/IN/OUT ");
+ success = false;
+ return;
+ }
+ else if( edgeLabelRestr != null
+ && (edgeLabelRestr.trim().equals("none") || edgeLabelRestr.trim().equals("")) ){
+ edgeLabelRestr = null;
+ }
+ else if( nodeTypeRestr == null || nodeTypeRestr.trim().equals("") ){
+ nodeTypeRestr = "none";
+ }
+
+ String oldNodeType = oldNode.value(AAIProperties.NODE_TYPE);
+ String oldUri = oldNode.<String> property("aai-uri").isPresent() ? oldNode.<String> property("aai-uri").value() : "URI Not present";
+
+ String newNodeType = newNode.value(AAIProperties.NODE_TYPE);
+ String newUri = newNode.<String> property("aai-uri").isPresent() ? newNode.<String> property("aai-uri").value() : "URI Not present";
+
+ // If the nodeTypes don't match, throw an error
+ if( !oldNodeType.equals(newNodeType) ){
+ logger.info ( "Can not swing edge from a [" + oldNodeType + "] node to a [" +
+ newNodeType + "] node. ");
+ success = false;
+ return;
+ }
+
+ // Find and migrate any applicable OUT edges.
+ if( edgeDirRestr.equals("BOTH") || edgeDirRestr.equals("OUT") ){
+ Iterator <Edge> edgeOutIter = null;
+ if( edgeLabelRestr == null ) {
+ edgeOutIter = oldNode.edges(Direction.OUT);
+ }
+ else {
+ edgeOutIter = oldNode.edges(Direction.OUT, edgeLabelRestr);
+ }
+
+ while( edgeOutIter.hasNext() ){
+ Edge oldOutE = edgeOutIter.next();
+ String eLabel = oldOutE.label();
+ Vertex otherSideNode4ThisEdge = oldOutE.inVertex();
+ String otherSideNodeType = otherSideNode4ThisEdge.value(AAIProperties.NODE_TYPE);
+ if( nodeTypeRestr.equals("none") || nodeTypeRestr.toLowerCase().equals(otherSideNodeType) ){
+ Iterator <Property<Object>> propsIter = oldOutE.properties();
+ HashMap<String, String> propMap = new HashMap<String,String>();
+ while( propsIter.hasNext() ){
+ Property <Object> ep = propsIter.next();
+ propMap.put(ep.key(), ep.value().toString());
+ }
+
+ String otherSideUri = otherSideNode4ThisEdge.<String> property("aai-uri").isPresent() ? otherSideNode4ThisEdge.<String> property("aai-uri").value() : "URI Not present";
+ logger.info ( "\nSwinging [" + eLabel + "] OUT edge. \n >> Unchanged side is ["
+ + otherSideNodeType + "][" + otherSideUri + "] \n >> Edge used to go to [" + oldNodeType
+ + "][" + oldUri + "],\n >> now swung to [" + newNodeType + "][" + newUri + "]. ");
+ // remove the old edge
+ oldOutE.remove();
+
+ // add the new edge with properties that match the edge that was deleted. We don't want to
+ // change any edge properties - just swinging one end of the edge to a new node.
+ // NOTE - addEdge adds an OUT edge to the vertex passed as a parameter, so we are
+ // adding from the newNode side.
+ Edge newOutE = newNode.addEdge(eLabel, otherSideNode4ThisEdge);
+
+ Iterator it = propMap.entrySet().iterator();
+ while (it.hasNext()) {
+ Map.Entry pair = (Map.Entry)it.next();
+ newOutE.property(pair.getKey().toString(), pair.getValue().toString() );
+ }
+
+ }
+ }
+ }
+
+ // Find and migrate any applicable IN edges.
+ if( edgeDirRestr.equals("BOTH") || edgeDirRestr.equals("IN") ){
+ Iterator <Edge> edgeInIter = null;
+ if( edgeLabelRestr == null ) {
+ edgeInIter = oldNode.edges(Direction.IN);
+ }
+ else {
+ edgeInIter = oldNode.edges(Direction.IN, edgeLabelRestr);
+ }
+
+ while( edgeInIter.hasNext() ){
+ Edge oldInE = edgeInIter.next();
+ String eLabel = oldInE.label();
+ Vertex otherSideNode4ThisEdge = oldInE.outVertex();
+ String otherSideNodeType = otherSideNode4ThisEdge.value(AAIProperties.NODE_TYPE);
+ if( nodeTypeRestr.equals("none") || nodeTypeRestr.toLowerCase().equals(otherSideNodeType) ){
+ Iterator <Property<Object>> propsIter = oldInE.properties();
+ HashMap<String, String> propMap = new HashMap<String,String>();
+ while( propsIter.hasNext() ){
+ Property <Object> ep = propsIter.next();
+ propMap.put(ep.key(), ep.value().toString());
+ }
+
+ String otherSideUri = otherSideNode4ThisEdge.<String> property("aai-uri").isPresent() ? otherSideNode4ThisEdge.<String> property("aai-uri").value() : "URI Not present";
+ logger.info ( "\nSwinging [" + eLabel + "] IN edge. \n >> Unchanged side is ["
+ + otherSideNodeType + "][" + otherSideUri + "] \n >> Edge used to go to [" + oldNodeType
+ + "][" + oldUri + "],\n >> now swung to [" + newNodeType + "][" + newUri + "]. ");
+
+ // remove the old edge
+ oldInE.remove();
+
+ // add the new edge with properties that match the edge that was deleted. We don't want to
+ // change any edge properties - just swinging one end of the edge to a new node.
+ // NOTE - addEdge adds an OUT edge to the vertex passed as a parameter, so we are
+ // adding from the node on the other-end of the original edge so we'll get
+ // an IN-edge to the newNode.
+ Edge newInE = otherSideNode4ThisEdge.addEdge(eLabel, newNode);
+
+ Iterator it = propMap.entrySet().iterator();
+ while (it.hasNext()) {
+ Map.Entry pair = (Map.Entry)it.next();
+ newInE.property(pair.getKey().toString(), pair.getValue().toString() );
+ }
+ }
+ }
+ }
+
+ } catch (Exception e) {
+ logger.error("error encountered", e);
+ success = false;
+ }
+ }
+
+ @Override
+ public Status getStatus() {
+ if (success) {
+ return Status.SUCCESS;
+ } else {
+ return Status.FAILURE;
+ }
+ }
+
+
+ /**
+ * Get the List of node pairs("from" and "to"), you would like EdgeSwingMigrator to migrate from json files
+ * @return
+ */
+ public abstract List<Pair<Vertex, Vertex>> getAffectedNodePairs() ;
+
+
+ /**
+ * Get the nodeTypeRestriction that you want EdgeSwingMigrator to use
+ * @return
+ */
+ public abstract String getNodeTypeRestriction() ;
+
+
+ /**
+ * Get the nodeTypeRestriction that you want EdgeSwingMigrator to use
+ * @return
+ */
+ public abstract String getEdgeLabelRestriction() ;
+
+ /**
+ * Get the nodeTypeRestriction that you want EdgeSwingMigrator to use
+ * @return
+ */
+ public abstract String getEdgeDirRestriction() ;
+
+
+
+ /**
+ * Cleanup (remove) the nodes that edges were moved off of if appropriate
+ * @return
+ */
+ public abstract void cleanupAsAppropriate(List<Pair<Vertex, Vertex>> nodePairL);
+
+}
\ No newline at end of file diff --git a/src/main/java/org/onap/aai/migration/Enabled.java b/src/main/java/org/onap/aai/migration/Enabled.java new file mode 100644 index 0000000..1b7bba3 --- /dev/null +++ b/src/main/java/org/onap/aai/migration/Enabled.java @@ -0,0 +1,35 @@ +/** + * ============LICENSE_START======================================================= + * org.onap.aai + * ================================================================================ + * Copyright © 2017-2018 AT&T Intellectual Property. All rights reserved. + * ================================================================================ + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============LICENSE_END========================================================= + */ +package org.onap.aai.migration; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + + +/** + * Used to enable a migration to be picked up by the {@link org.onap.aai.migration.MigrationControllerInternal MigrationController} + */ +@Target(ElementType.TYPE) +@Retention(value = RetentionPolicy.RUNTIME) +public @interface Enabled { + +} diff --git a/src/main/java/org/onap/aai/migration/EventAction.java b/src/main/java/org/onap/aai/migration/EventAction.java new file mode 100644 index 0000000..830685b --- /dev/null +++ b/src/main/java/org/onap/aai/migration/EventAction.java @@ -0,0 +1,29 @@ +/** + * ============LICENSE_START======================================================= + * org.onap.aai + * ================================================================================ + * Copyright © 2017-2018 AT&T Intellectual Property. All rights reserved. + * ================================================================================ + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============LICENSE_END========================================================= + */ +package org.onap.aai.migration; + +/** + * Used to describe the type of DMaaP event you would like to create + */ +public enum EventAction { + CREATE, + UPDATE, + DELETE +} diff --git a/src/main/java/org/onap/aai/migration/MigrationController.java b/src/main/java/org/onap/aai/migration/MigrationController.java new file mode 100644 index 0000000..0e65745 --- /dev/null +++ b/src/main/java/org/onap/aai/migration/MigrationController.java @@ -0,0 +1,78 @@ +/** + * ============LICENSE_START======================================================= + * org.onap.aai + * ================================================================================ + * Copyright © 2017-2018 AT&T Intellectual Property. All rights reserved. + * ================================================================================ + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============LICENSE_END========================================================= + */ +package org.onap.aai.migration; + +import java.util.UUID; + +import org.onap.aai.dbmap.AAIGraph; +import org.onap.aai.edges.EdgeIngestor; +import org.onap.aai.introspection.LoaderFactory; +import org.onap.aai.logging.LoggingContext; +import org.onap.aai.logging.LoggingContext.StatusCode; +import org.onap.aai.serialization.db.EdgeSerializer; +import org.onap.aai.setup.SchemaVersions; +import org.onap.aai.util.AAIConstants; +import org.springframework.context.annotation.AnnotationConfigApplicationContext; + +/** + * Wrapper class to allow {@link org.onap.aai.migration.MigrationControllerInternal MigrationControllerInternal} + * to be run from a shell script + */ +public class MigrationController { + + /** + * The main method. + * + * @param args + * the arguments + */ + public static void main(String[] args) { + + LoggingContext.init(); + LoggingContext.partnerName("Migration"); + LoggingContext.serviceName(AAIConstants.AAI_RESOURCES_MS); + LoggingContext.component("MigrationController"); + LoggingContext.targetEntity(AAIConstants.AAI_RESOURCES_MS); + LoggingContext.targetServiceName("main"); + LoggingContext.requestId(UUID.randomUUID().toString()); + LoggingContext.statusCode(StatusCode.COMPLETE); + LoggingContext.responseCode(LoggingContext.SUCCESS); + + AnnotationConfigApplicationContext ctx = new AnnotationConfigApplicationContext( + "org.onap.aai.config", + "org.onap.aai.setup" + ); + + LoaderFactory loaderFactory = ctx.getBean(LoaderFactory.class); + EdgeIngestor edgeIngestor = ctx.getBean(EdgeIngestor.class); + EdgeSerializer edgeSerializer = ctx.getBean(EdgeSerializer.class); + SchemaVersions schemaVersions = ctx.getBean(SchemaVersions.class); + + MigrationControllerInternal internal = new MigrationControllerInternal(loaderFactory, edgeIngestor, edgeSerializer, schemaVersions); + + try { + internal.run(args); + } catch (Exception e) { + e.printStackTrace(); + } + AAIGraph.getInstance().graphShutdown(); + System.exit(0); + } +} diff --git a/src/main/java/org/onap/aai/migration/MigrationControllerInternal.java b/src/main/java/org/onap/aai/migration/MigrationControllerInternal.java new file mode 100644 index 0000000..8ef0603 --- /dev/null +++ b/src/main/java/org/onap/aai/migration/MigrationControllerInternal.java @@ -0,0 +1,498 @@ +/** + * ============LICENSE_START======================================================= + * org.onap.aai + * ================================================================================ + * Copyright © 2017-2018 AT&T Intellectual Property. All rights reserved. + * ================================================================================ + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============LICENSE_END========================================================= + */ +package org.onap.aai.migration; + +import java.io.File; +import java.io.IOException; +import java.lang.reflect.InvocationTargetException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Properties; +import java.util.Set; +import java.util.stream.Collectors; + +import org.apache.commons.configuration.ConfigurationException; +import org.apache.commons.configuration.PropertiesConfiguration; +import org.apache.commons.lang.exception.ExceptionUtils; +import org.apache.tinkerpop.gremlin.structure.Graph; +import org.apache.tinkerpop.gremlin.structure.io.IoCore; +import org.onap.aai.db.props.AAIProperties; +import org.onap.aai.dbmap.AAIGraph; +import org.onap.aai.dbmap.DBConnectionType; +import org.onap.aai.edges.EdgeIngestor; +import org.onap.aai.exceptions.AAIException; +import org.onap.aai.introspection.Loader; +import org.onap.aai.introspection.LoaderFactory; +import org.onap.aai.introspection.ModelType; +import org.onap.aai.serialization.db.EdgeSerializer; +import org.onap.aai.setup.SchemaVersions; +import org.onap.aai.setup.SchemaVersion; +import org.onap.aai.logging.LoggingContext; +import org.onap.aai.logging.LoggingContext.StatusCode; +import org.onap.aai.serialization.engines.QueryStyle; +import org.onap.aai.serialization.engines.JanusGraphDBEngine; +import org.onap.aai.serialization.engines.TransactionalGraphEngine; +import org.onap.aai.util.AAIConstants; +import org.onap.aai.util.FormatDate; +import org.reflections.Reflections; +import org.slf4j.MDC; + +import com.att.eelf.configuration.Configuration; +import com.att.eelf.configuration.EELFLogger; +import com.att.eelf.configuration.EELFManager; +import com.beust.jcommander.JCommander; +import com.beust.jcommander.Parameter; + +/** + * Runs a series of migrations from a defined directory based on the presence of + * the {@link org.onap.aai.migration.Enabled Enabled} annotation + * + * It will also write a record of the migrations run to the database. + */ +public class MigrationControllerInternal { + + private EELFLogger logger; + private final int DANGER_ZONE = 10; + public static final String VERTEX_TYPE = "migration-list-1707"; + private final List<String> resultsSummary = new ArrayList<>(); + private final List<NotificationHelper> notifications = new ArrayList<>(); + private static final String SNAPSHOT_LOCATION = AAIConstants.AAI_HOME + AAIConstants.AAI_FILESEP + "logs" + AAIConstants.AAI_FILESEP + "data" + AAIConstants.AAI_FILESEP + "migrationSnapshots"; + + private LoaderFactory loaderFactory; + private EdgeIngestor edgeIngestor; + private EdgeSerializer edgeSerializer; + private final SchemaVersions schemaVersions; + + public MigrationControllerInternal(LoaderFactory loaderFactory, EdgeIngestor edgeIngestor, EdgeSerializer edgeSerializer, SchemaVersions schemaVersions){ + this.loaderFactory = loaderFactory; + this.edgeIngestor = edgeIngestor; + this.edgeSerializer = edgeSerializer; + this.schemaVersions = schemaVersions; + } + + /** + * The main method. + * + * @param args + * the arguments + */ + public void run(String[] args) { + // Set the logging file properties to be used by EELFManager + System.setProperty("aai.service.name", MigrationController.class.getSimpleName()); + Properties props = System.getProperties(); + props.setProperty(Configuration.PROPERTY_LOGGING_FILE_NAME, "migration-logback.xml"); + props.setProperty(Configuration.PROPERTY_LOGGING_FILE_PATH, AAIConstants.AAI_HOME_ETC_APP_PROPERTIES); + + logger = EELFManager.getInstance().getLogger(MigrationControllerInternal.class.getSimpleName()); + MDC.put("logFilenameAppender", MigrationController.class.getSimpleName()); + + boolean loadSnapshot = false; + + CommandLineArgs cArgs = new CommandLineArgs(); + + JCommander jCommander = new JCommander(cArgs, args); + jCommander.setProgramName(MigrationController.class.getSimpleName()); + + // Set flag to load from snapshot based on the presence of snapshot and + // graph storage backend of inmemory + if (cArgs.dataSnapshot != null && !cArgs.dataSnapshot.isEmpty()) { + try { + PropertiesConfiguration config = new PropertiesConfiguration(cArgs.config); + if (config.getString("storage.backend").equals("inmemory")) { + loadSnapshot = true; + System.setProperty("load.snapshot.file", "true"); + System.setProperty("snapshot.location", cArgs.dataSnapshot); + } + } catch (ConfigurationException e) { + LoggingContext.statusCode(StatusCode.ERROR); + LoggingContext.responseCode(LoggingContext.DATA_ERROR); + logAndPrint("ERROR: Could not load janusgraph configuration.\n" + ExceptionUtils.getFullStackTrace(e)); + return; + } + } + System.setProperty("realtime.db.config", cArgs.config); + logAndPrint("\n\n---------- Connecting to Graph ----------"); + AAIGraph.getInstance(); + + logAndPrint("---------- Connection Established ----------"); + SchemaVersion version = schemaVersions.getDefaultVersion(); + QueryStyle queryStyle = QueryStyle.TRAVERSAL; + ModelType introspectorFactoryType = ModelType.MOXY; + Loader loader = loaderFactory.createLoaderForVersion(introspectorFactoryType, version); + TransactionalGraphEngine engine = new JanusGraphDBEngine(queryStyle, DBConnectionType.REALTIME, loader); + + if (cArgs.help) { + jCommander.usage(); + engine.rollback(); + return; + } + + Reflections reflections = new Reflections("org.onap.aai.migration"); + List<Class<? extends Migrator>> migratorClasses = new ArrayList<>(findClasses(reflections)); + //Displays list of migration classes which needs to be executed.Pass flag "-l" following by the class names + if (cArgs.list) { + listMigrationWithStatus(cArgs, migratorClasses, engine); + return; + } + + logAndPrint("---------- Looking for migration scripts to be executed. ----------"); + //Excluding any migration class when run migration from script.Pass flag "-e" following by the class names + if (!cArgs.excludeClasses.isEmpty()) { + migratorClasses = filterMigrationClasses(cArgs.excludeClasses, migratorClasses); + listMigrationWithStatus(cArgs, migratorClasses, engine); + } + List<Class<? extends Migrator>> migratorClassesToRun = createMigratorList(cArgs, migratorClasses); + + sortList(migratorClassesToRun); + + if (!cArgs.scripts.isEmpty() && migratorClassesToRun.isEmpty()) { + LoggingContext.statusCode(StatusCode.ERROR); + LoggingContext.responseCode(LoggingContext.BUSINESS_PROCESS_ERROR); + logAndPrint("\tERROR: Failed to find migrations " + cArgs.scripts + "."); + logAndPrint("---------- Done ----------"); + LoggingContext.successStatusFields(); + } + + logAndPrint("\tFound " + migratorClassesToRun.size() + " migration scripts."); + logAndPrint("---------- Executing Migration Scripts ----------"); + + + if (!cArgs.skipPreMigrationSnapShot) { + takePreSnapshotIfRequired(engine, cArgs, migratorClassesToRun); + } + + for (Class<? extends Migrator> migratorClass : migratorClassesToRun) { + String name = migratorClass.getSimpleName(); + Migrator migrator; + if (migratorClass.isAnnotationPresent(Enabled.class)) { + + try { + engine.startTransaction(); + if (!cArgs.forced && hasAlreadyRun(name, engine)) { + logAndPrint("Migration " + name + " has already been run on this database and will not be executed again. Use -f to force execution"); + continue; + } + migrator = migratorClass + .getConstructor( + TransactionalGraphEngine.class, + LoaderFactory.class, + EdgeIngestor.class, + EdgeSerializer.class, + SchemaVersions.class + ).newInstance(engine, loaderFactory, edgeIngestor, edgeSerializer,schemaVersions); + } catch (InstantiationException | IllegalAccessException | IllegalArgumentException | InvocationTargetException | NoSuchMethodException | SecurityException e) { + LoggingContext.statusCode(StatusCode.ERROR); + LoggingContext.responseCode(LoggingContext.DATA_ERROR); + logAndPrint("EXCEPTION caught initalizing migration class " + migratorClass.getSimpleName() + ".\n" + ExceptionUtils.getFullStackTrace(e)); + LoggingContext.successStatusFields(); + engine.rollback(); + continue; + } + logAndPrint("\tRunning " + migratorClass.getSimpleName() + " migration script."); + logAndPrint("\t\t See " + System.getProperty("AJSC_HOME") + "/logs/migration/" + migratorClass.getSimpleName() + "/* for logs."); + MDC.put("logFilenameAppender", migratorClass.getSimpleName() + "/" + migratorClass.getSimpleName()); + + migrator.run(); + + commitChanges(engine, migrator, cArgs); + } else { + logAndPrint("\tSkipping " + migratorClass.getSimpleName() + " migration script because it has been disabled."); + } + } + MDC.put("logFilenameAppender", MigrationController.class.getSimpleName()); + for (NotificationHelper notificationHelper : notifications) { + try { + notificationHelper.triggerEvents(); + } catch (AAIException e) { + LoggingContext.statusCode(StatusCode.ERROR); + LoggingContext.responseCode(LoggingContext.AVAILABILITY_TIMEOUT_ERROR); + logAndPrint("\tcould not event"); + logger.error("could not event", e); + LoggingContext.successStatusFields(); + } + } + logAndPrint("---------- Done ----------"); + + // Save post migration snapshot if snapshot was loaded + if (!cArgs.skipPostMigrationSnapShot) { + generateSnapshot(engine, "post"); + } + + outputResultsSummary(); + } + + /** + * This method is used to remove excluded classes from migration from the + * script command. + * + * @param excludeClasses + * : Classes to be removed from Migration + * @param migratorClasses + * : Classes to execute migration. + * @return + */ + private List<Class<? extends Migrator>> filterMigrationClasses( + List<String> excludeClasses, + List<Class<? extends Migrator>> migratorClasses) { + + List<Class<? extends Migrator>> filteredMigratorClasses = migratorClasses + .stream() + .filter(migratorClass -> !excludeClasses.contains(migratorClass + .getSimpleName())).collect(Collectors.toList()); + + return filteredMigratorClasses; + } + + private void listMigrationWithStatus(CommandLineArgs cArgs, + List<Class<? extends Migrator>> migratorClasses, TransactionalGraphEngine engine) { + sortList(migratorClasses); + engine.startTransaction(); + System.out.println("---------- List of all migrations ----------"); + migratorClasses.forEach(migratorClass -> { + boolean enabledAnnotation = migratorClass.isAnnotationPresent(Enabled.class); + String enabled = enabledAnnotation ? "Enabled" : "Disabled"; + StringBuilder sb = new StringBuilder(); + sb.append(migratorClass.getSimpleName()); + sb.append(" in package "); + sb.append(migratorClass.getPackage().getName().substring(migratorClass.getPackage().getName().lastIndexOf('.')+1)); + sb.append(" is "); + sb.append(enabled); + sb.append(" "); + sb.append("[" + getDbStatus(migratorClass.getSimpleName(), engine) + "]"); + System.out.println(sb.toString()); + }); + engine.rollback(); + System.out.println("---------- Done ----------"); + } + + private String getDbStatus(String name, TransactionalGraphEngine engine) { + if (hasAlreadyRun(name, engine)) { + return "Already executed in this env"; + } + return "Will be run on next execution if Enabled"; + } + + private boolean hasAlreadyRun(String name, TransactionalGraphEngine engine) { + return engine.asAdmin().getReadOnlyTraversalSource().V().has(AAIProperties.NODE_TYPE, VERTEX_TYPE).has(name, true).hasNext(); + } + private Set<Class<? extends Migrator>> findClasses(Reflections reflections) { + Set<Class<? extends Migrator>> migratorClasses = reflections.getSubTypesOf(Migrator.class); + /* + * TODO- Change this to make sure only classes in the specific $release are added in the runList + * Or add a annotation like exclude which folks again need to remember to add ?? + */ + + migratorClasses.remove(PropertyMigrator.class); + migratorClasses.remove(EdgeMigrator.class); + return migratorClasses; + } + + + private void takePreSnapshotIfRequired(TransactionalGraphEngine engine, CommandLineArgs cArgs, List<Class<? extends Migrator>> migratorClassesToRun) { + + /*int sum = 0; + for (Class<? extends Migrator> migratorClass : migratorClassesToRun) { + if (migratorClass.isAnnotationPresent(Enabled.class)) { + sum += migratorClass.getAnnotation(MigrationPriority.class).value(); + } + } + + if (sum >= DANGER_ZONE) { + + logAndPrint("Entered Danger Zone. Taking snapshot."); + }*/ + + //always take snapshot for now + + generateSnapshot(engine, "pre"); + + } + + + private List<Class<? extends Migrator>> createMigratorList(CommandLineArgs cArgs, + List<Class<? extends Migrator>> migratorClasses) { + List<Class<? extends Migrator>> migratorClassesToRun = new ArrayList<>(); + + for (Class<? extends Migrator> migratorClass : migratorClasses) { + if (!cArgs.scripts.isEmpty() && !cArgs.scripts.contains(migratorClass.getSimpleName())) { + continue; + } else { + migratorClassesToRun.add(migratorClass); + } + } + return migratorClassesToRun; + } + + + private void sortList(List<Class<? extends Migrator>> migratorClasses) { + Collections.sort(migratorClasses, (m1, m2) -> { + try { + if (m1.getAnnotation(MigrationPriority.class).value() > m2.getAnnotation(MigrationPriority.class).value()) { + return 1; + } else if (m1.getAnnotation(MigrationPriority.class).value() < m2.getAnnotation(MigrationPriority.class).value()) { + return -1; + } else { + return m1.getSimpleName().compareTo(m2.getSimpleName()); + } + } catch (Exception e) { + return 0; + } + }); + } + + + private void generateSnapshot(TransactionalGraphEngine engine, String phase) { + + FormatDate fd = new FormatDate("yyyyMMddHHmm", "GMT"); + String dateStr= fd.getDateTime(); + String fileName = SNAPSHOT_LOCATION + File.separator + phase + "Migration." + dateStr + ".graphson"; + logAndPrint("Saving snapshot of graph " + phase + " migration to " + fileName); + Graph transaction = null; + try { + + Path pathToFile = Paths.get(fileName); + if (!pathToFile.toFile().exists()) { + Files.createDirectories(pathToFile.getParent()); + } + transaction = engine.startTransaction(); + transaction.io(IoCore.graphson()).writeGraph(fileName); + engine.rollback(); + } catch (IOException e) { + LoggingContext.statusCode(StatusCode.ERROR); + LoggingContext.responseCode(LoggingContext.AVAILABILITY_TIMEOUT_ERROR); + logAndPrint("ERROR: Could not write in memory graph to " + phase + "Migration file. \n" + ExceptionUtils.getFullStackTrace(e)); + LoggingContext.successStatusFields(); + engine.rollback(); + } + + logAndPrint( phase + " migration snapshot saved to " + fileName); + } + /** + * Log and print. + * + * @param msg + * the msg + */ + protected void logAndPrint(String msg) { + System.out.println(msg); + logger.info(msg); + } + + /** + * Commit changes. + * + * @param engine + * the graph transaction + * @param migrator + * the migrator + * @param cArgs + */ + protected void commitChanges(TransactionalGraphEngine engine, Migrator migrator, CommandLineArgs cArgs) { + + String simpleName = migrator.getClass().getSimpleName(); + String message; + if (migrator.getStatus().equals(Status.FAILURE)) { + message = "Migration " + simpleName + " Failed. Rolling back."; + LoggingContext.statusCode(StatusCode.ERROR); + LoggingContext.responseCode(LoggingContext.DATA_ERROR); + logAndPrint("\t" + message); + LoggingContext.successStatusFields(); + migrator.rollback(); + } else if (migrator.getStatus().equals(Status.CHECK_LOGS)) { + message = "Migration " + simpleName + " encountered an anomaly, check logs. Rolling back."; + LoggingContext.statusCode(StatusCode.ERROR); + LoggingContext.responseCode(LoggingContext.DATA_ERROR); + logAndPrint("\t" + message); + LoggingContext.successStatusFields(); + migrator.rollback(); + } else { + MDC.put("logFilenameAppender", simpleName + "/" + simpleName); + + if (cArgs.commit) { + if (!engine.asAdmin().getTraversalSource().V().has(AAIProperties.NODE_TYPE, VERTEX_TYPE).hasNext()) { + engine.asAdmin().getTraversalSource().addV(AAIProperties.NODE_TYPE, VERTEX_TYPE).iterate(); + } + engine.asAdmin().getTraversalSource().V().has(AAIProperties.NODE_TYPE, VERTEX_TYPE) + .property(simpleName, true).iterate(); + MDC.put("logFilenameAppender", MigrationController.class.getSimpleName()); + notifications.add(migrator.getNotificationHelper()); + migrator.commit(); + message = "Migration " + simpleName + " Succeeded. Changes Committed."; + logAndPrint("\t"+ message +"\t"); + } else { + message = "--commit not specified. Not committing changes for " + simpleName + " to database."; + logAndPrint("\t" + message); + migrator.rollback(); + } + + } + + resultsSummary.add(message); + + } + + private void outputResultsSummary() { + logAndPrint("---------------------------------"); + logAndPrint("-------------Summary-------------"); + for (String result : resultsSummary) { + logAndPrint(result); + } + logAndPrint("---------------------------------"); + logAndPrint("---------------------------------"); + } + +} + +class CommandLineArgs { + + @Parameter(names = "--help", help = true) + public boolean help; + + @Parameter(names = "-c", description = "location of configuration file") + public String config; + + @Parameter(names = "-m", description = "names of migration scripts") + public List<String> scripts = new ArrayList<>(); + + @Parameter(names = "-l", description = "list the status of migrations") + public boolean list = false; + + @Parameter(names = "-d", description = "location of data snapshot", hidden = true) + public String dataSnapshot; + + @Parameter(names = "-f", description = "force migrations to be rerun") + public boolean forced = false; + + @Parameter(names = "--commit", description = "commit changes to graph") + public boolean commit = false; + + @Parameter(names = "-e", description = "exclude list of migrator classes") + public List<String> excludeClasses = new ArrayList<>(); + + @Parameter(names = "--skipPreMigrationSnapShot", description = "skips taking the PRE migration snapshot") + public boolean skipPreMigrationSnapShot = false; + + @Parameter(names = "--skipPostMigrationSnapShot", description = "skips taking the POST migration snapshot") + public boolean skipPostMigrationSnapShot = false; +} diff --git a/src/main/java/org/onap/aai/migration/MigrationDangerRating.java b/src/main/java/org/onap/aai/migration/MigrationDangerRating.java new file mode 100644 index 0000000..1d82dc3 --- /dev/null +++ b/src/main/java/org/onap/aai/migration/MigrationDangerRating.java @@ -0,0 +1,41 @@ +/** + * ============LICENSE_START======================================================= + * org.onap.aai + * ================================================================================ + * Copyright © 2017-2018 AT&T Intellectual Property. All rights reserved. + * ================================================================================ + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============LICENSE_END========================================================= + */ +package org.onap.aai.migration; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + + +/** + * Used to enable a migration to be picked up by the {@link com.openecomp.aai.migration.MigrationControllerInternal MigrationController} + * + * The larger the number, the more danger + * + * Range is 0-10 + */ +@Target(ElementType.TYPE) +@Retention(value = RetentionPolicy.RUNTIME) +public @interface MigrationDangerRating { + + int value(); + +} diff --git a/src/main/java/org/onap/aai/migration/MigrationPriority.java b/src/main/java/org/onap/aai/migration/MigrationPriority.java new file mode 100644 index 0000000..d9e84b8 --- /dev/null +++ b/src/main/java/org/onap/aai/migration/MigrationPriority.java @@ -0,0 +1,41 @@ +/** + * ============LICENSE_START======================================================= + * org.onap.aai + * ================================================================================ + * Copyright © 2017-2018 AT&T Intellectual Property. All rights reserved. + * ================================================================================ + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============LICENSE_END========================================================= + */ +package org.onap.aai.migration; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + + +/** + * Used to enable a migration to be picked up by the {@link org.onap.aai.migration.MigrationControllerInternal MigrationController} + * + * The priority of the migration. + * + * Lower number has higher priority + */ +@Target(ElementType.TYPE) +@Retention(value = RetentionPolicy.RUNTIME) +public @interface MigrationPriority { + + int value(); + +} diff --git a/src/main/java/org/onap/aai/migration/Migrator.java b/src/main/java/org/onap/aai/migration/Migrator.java new file mode 100644 index 0000000..106d5e4 --- /dev/null +++ b/src/main/java/org/onap/aai/migration/Migrator.java @@ -0,0 +1,385 @@ +/** + * ============LICENSE_START======================================================= + * org.onap.aai + * ================================================================================ + * Copyright © 2017-2018 AT&T Intellectual Property. All rights reserved. + * ================================================================================ + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============LICENSE_END========================================================= + */ +package org.onap.aai.migration; + +import java.io.File; +import java.io.IOException; +import java.io.UnsupportedEncodingException; +import java.nio.file.Files; +import java.nio.file.Paths; +import java.nio.file.StandardOpenOption; +import java.util.Collections; +import java.util.Iterator; +import java.util.List; +import java.util.Optional; +import java.util.UUID; + +import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.__; +import org.apache.tinkerpop.gremlin.structure.Direction; +import org.apache.tinkerpop.gremlin.structure.Edge; +import org.apache.tinkerpop.gremlin.structure.Property; +import org.apache.tinkerpop.gremlin.structure.Vertex; +import org.apache.tinkerpop.gremlin.structure.VertexProperty; +import org.json.JSONException; +import org.json.JSONObject; +import org.onap.aai.edges.EdgeIngestor; +import org.onap.aai.edges.enums.EdgeType; +import org.onap.aai.edges.exceptions.AmbiguousRuleChoiceException; +import org.onap.aai.edges.exceptions.EdgeRuleNotFoundException; +import org.onap.aai.exceptions.AAIException; +import org.onap.aai.introspection.Introspector; +import org.onap.aai.introspection.Loader; +import org.onap.aai.introspection.LoaderFactory; +import org.onap.aai.introspection.ModelType; +import org.onap.aai.serialization.db.DBSerializer; +import org.onap.aai.serialization.db.EdgeSerializer; +import org.onap.aai.serialization.db.exceptions.NoEdgeRuleFoundException; +import org.onap.aai.serialization.engines.TransactionalGraphEngine; +import org.onap.aai.setup.SchemaVersion; +import org.onap.aai.setup.SchemaVersions; + +import com.att.eelf.configuration.EELFLogger; +import com.att.eelf.configuration.EELFManager; + +/** + * This class defines an A&AI Migration + */ +@MigrationPriority(0) +@MigrationDangerRating(0) +public abstract class Migrator implements Runnable { + + protected EELFLogger logger = null; + + protected DBSerializer serializer = null; + protected Loader loader = null; + + protected TransactionalGraphEngine engine; + protected NotificationHelper notificationHelper; + + protected EdgeSerializer edgeSerializer; + protected EdgeIngestor edgeIngestor; + + protected LoaderFactory loaderFactory; + protected SchemaVersions schemaVersions; + + protected static final String MIGRATION_ERROR = "Migration Error: "; + protected static final String MIGRATION_SUMMARY_COUNT = "Migration Summary Count: "; + + /** + * Instantiates a new migrator. + * + * @param g the g + * @param schemaVersions + */ + public Migrator(TransactionalGraphEngine engine, LoaderFactory loaderFactory, EdgeIngestor edgeIngestor, EdgeSerializer edgeSerializer, SchemaVersions schemaVersions){ + this.engine = engine; + this.loaderFactory = loaderFactory; + this.edgeIngestor = edgeIngestor; + this.edgeSerializer = edgeSerializer; + this.schemaVersions = schemaVersions; + initDBSerializer(); + this.notificationHelper = new NotificationHelper(loader, serializer, loaderFactory, schemaVersions, engine, "AAI-MIGRATION", this.getMigrationName()); + logger = EELFManager.getInstance().getLogger(this.getClass().getSimpleName()); + logger.info("\tInitilization of " + this.getClass().getSimpleName() + " migration script complete."); + } + + /** + * Gets the status. + * + * @return the status + */ + public abstract Status getStatus(); + + /** + * Rollback. + */ + public void rollback() { + engine.rollback(); + } + + /** + * Commit. + */ + public void commit() { + engine.commit(); + } + + /** + * Create files containing vertices for dmaap Event Generation + * @param dmaapMsgList + */ + public void createDmaapFiles(List<String> dmaapMsgList) { + String fileName = getMigrationName() + "-" + UUID.randomUUID(); + String logDirectory = System.getProperty("AJSC_HOME") + "/logs/migration/dmaapEvents"; + + File f = new File(logDirectory); + f.mkdirs(); + + if (dmaapMsgList.size() > 0) { + try { + Files.write(Paths.get(logDirectory+"/"+fileName), (Iterable<String>)dmaapMsgList.stream()::iterator); + } catch (IOException e) { + logger.error("Unable to generate file with dmaap msgs for MigrateHUBEvcInventory", e); + } + } else { + logger.info("No dmaap msgs detected for MigrateForwardEvcCircuitId"); + } + } + + /** + * Create files containing data for dmaap delete Event Generation + * @param dmaapVertexList + */ + public void createDmaapFilesForDelete(List<Introspector> dmaapDeleteIntrospectorList) {try { + System.out.println("dmaapDeleteIntrospectorList :: " + dmaapDeleteIntrospectorList.size()); + String fileName = "DELETE-"+ getMigrationName() + "-" + UUID.randomUUID(); + String logDirectory = System.getProperty("AJSC_HOME") + "/logs/migration/dmaapEvents/"; + File f = new File(logDirectory); + f.mkdirs(); + + try{ + Files.createFile(Paths.get(logDirectory + "/" + fileName)); + }catch(Exception e) { + e.printStackTrace(); + } + + if (dmaapDeleteIntrospectorList.size() > 0) { + dmaapDeleteIntrospectorList.stream().forEach(svIntr-> { + try { + String str = svIntr.marshal(false); + String finalStr=""; + try { + finalStr=svIntr.getName() + "#@#" + svIntr.getURI() + "#@#" + str+"\n"; + Files.write(Paths.get(logDirectory + "/" + fileName),finalStr.getBytes(),StandardOpenOption.APPEND); + } catch (IOException e) { + logger.error("Unable to generate file with dmaap msgs for "+getMigrationName(), e); + } + + }catch (Exception e) { + // TODO Auto-generated catch block + e.printStackTrace(); + } + }); + + //Files.write(Paths.get(logDirectory+"/"+fileName), (Iterable<Vertex>)dmaapVertexList.stream()::iterator); + } + }catch (Exception e) { + e.printStackTrace(); + logger.error("Unable to generate file with dmaap msgs for "+getMigrationName(), e); + }} + + /** + * As string. + * + * @param v the v + * @return the string + */ + protected String asString(Vertex v) { + final JSONObject result = new JSONObject(); + Iterator<VertexProperty<Object>> properties = v.properties(); + Property<Object> pk = null; + try { + while (properties.hasNext()) { + pk = properties.next(); + result.put(pk.key(), pk.value()); + } + } catch (JSONException e) { + logger.error("Warning error reading vertex: " + e); + } + + return result.toString(); + } + + /** + * As string. + * + * @param edge the edge + * @return the string + */ + protected String asString(Edge edge) { + final JSONObject result = new JSONObject(); + Iterator<Property<Object>> properties = edge.properties(); + Property<Object> pk = null; + try { + while (properties.hasNext()) { + pk = properties.next(); + result.put(pk.key(), pk.value()); + } + } catch (JSONException e) { + logger.error("Warning error reading edge: " + e); + } + + return result.toString(); + } + + /** + * + * @param v + * @param numLeadingTabs number of leading \t char's + * @return + */ + protected String toStringForPrinting(Vertex v, int numLeadingTabs) { + String prefix = String.join("", Collections.nCopies(numLeadingTabs, "\t")); + if (v == null) { + return ""; + } + final StringBuilder sb = new StringBuilder(); + sb.append(prefix + v + "\n"); + v.properties().forEachRemaining(prop -> sb.append(prefix + prop + "\n")); + return sb.toString(); + } + + /** + * + * @param e + * @param numLeadingTabs number of leading \t char's + * @return + */ + protected String toStringForPrinting(Edge e, int numLeadingTabs) { + String prefix = String.join("", Collections.nCopies(numLeadingTabs, "\t")); + if (e == null) { + return ""; + } + final StringBuilder sb = new StringBuilder(); + sb.append(prefix + e + "\n"); + sb.append(prefix + e.label() + "\n"); + e.properties().forEachRemaining(prop -> sb.append(prefix + "\t" + prop + "\n")); + return sb.toString(); + } + + /** + * Checks for edge between. + * + * @param a a + * @param b b + * @param d d + * @param edgeLabel the edge label + * @return true, if successful + */ + protected boolean hasEdgeBetween(Vertex a, Vertex b, Direction d, String edgeLabel) { + + if (d.equals(Direction.OUT)) { + return engine.asAdmin().getReadOnlyTraversalSource().V(a).out(edgeLabel).where(__.otherV().hasId(b)).hasNext(); + } else { + return engine.asAdmin().getReadOnlyTraversalSource().V(a).in(edgeLabel).where(__.otherV().hasId(b)).hasNext(); + } + + } + + /** + * Creates the edge + * + * @param type the edge type - COUSIN or TREE + * @param out the out + * @param in the in + * @return the edge + */ + protected Edge createEdge(EdgeType type, Vertex out, Vertex in) throws AAIException { + Edge newEdge = null; + try { + if (type.equals(EdgeType.COUSIN)){ + newEdge = edgeSerializer.addEdge(this.engine.asAdmin().getTraversalSource(), out, in); + } else { + newEdge = edgeSerializer.addTreeEdge(this.engine.asAdmin().getTraversalSource(), out, in); + } + } catch (NoEdgeRuleFoundException e) { + throw new AAIException("AAI_6129", e); + } + return newEdge; + } + + /** + * Creates the edge + * + * @param type the edge type - COUSIN or TREE + * @param out the out + * @param in the in + * @return the edge + */ + protected Edge createPrivateEdge(Vertex out, Vertex in) throws AAIException { + Edge newEdge = null; + try { + newEdge = edgeSerializer.addPrivateEdge(this.engine.asAdmin().getTraversalSource(), out, in, null); + } catch (EdgeRuleNotFoundException | AmbiguousRuleChoiceException e) { + throw new AAIException("AAI_6129", e); + } + return newEdge; + } + + /** + * Creates the TREE edge + * + * @param out the out + * @param in the in + * @return the edge + */ + protected Edge createTreeEdge(Vertex out, Vertex in) throws AAIException { + Edge newEdge = createEdge(EdgeType.TREE, out, in); + return newEdge; + } + + /** + * Creates the COUSIN edge + * + * @param out the out + * @param in the in + * @return the edge + */ + protected Edge createCousinEdge(Vertex out, Vertex in) throws AAIException { + Edge newEdge = createEdge(EdgeType.COUSIN, out, in); + return newEdge; + } + + private void initDBSerializer() { + SchemaVersion version = schemaVersions.getDefaultVersion(); + ModelType introspectorFactoryType = ModelType.MOXY; + loader = loaderFactory.createLoaderForVersion(introspectorFactoryType, version); + try { + this.serializer = new DBSerializer(version, this.engine, introspectorFactoryType, this.getMigrationName()); + } catch (AAIException e) { + throw new RuntimeException("could not create seralizer", e); + } + } + + /** + * These are the node types you would like your traversal to process + * @return + */ + public abstract Optional<String[]> getAffectedNodeTypes(); + + /** + * used as the "fromAppId" when modifying vertices + * @return + */ + public abstract String getMigrationName(); + + /** + * updates all internal vertex properties + * @param v + * @param isNewVertex + */ + protected void touchVertexProperties(Vertex v, boolean isNewVertex) { + this.serializer.touchStandardVertexProperties(v, isNewVertex); + } + + public NotificationHelper getNotificationHelper() { + return this.notificationHelper; + } +} diff --git a/src/main/java/org/onap/aai/migration/NotificationHelper.java b/src/main/java/org/onap/aai/migration/NotificationHelper.java new file mode 100644 index 0000000..ff5c030 --- /dev/null +++ b/src/main/java/org/onap/aai/migration/NotificationHelper.java @@ -0,0 +1,118 @@ +/** + * ============LICENSE_START======================================================= + * org.onap.aai + * ================================================================================ + * Copyright © 2017-2018 AT&T Intellectual Property. All rights reserved. + * ================================================================================ + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============LICENSE_END========================================================= + */ +package org.onap.aai.migration; + +import java.io.UnsupportedEncodingException; +import java.net.URI; +import java.util.HashMap; +import java.util.List; + +import javax.ws.rs.core.Response.Status; + +import org.apache.tinkerpop.gremlin.structure.Vertex; +import org.onap.aai.exceptions.AAIException; +import org.onap.aai.introspection.Introspector; +import org.onap.aai.introspection.Loader; +import org.onap.aai.introspection.LoaderFactory; +import org.onap.aai.introspection.exceptions.AAIUnknownObjectException; +import org.onap.aai.rest.ueb.UEBNotification; +import org.onap.aai.serialization.db.DBSerializer; +import org.onap.aai.serialization.engines.TransactionalGraphEngine; +import org.onap.aai.serialization.engines.query.QueryEngine; + +import com.att.eelf.configuration.EELFLogger; +import com.att.eelf.configuration.EELFManager; +import org.onap.aai.setup.SchemaVersions; + +/** + * Allows for DMaaP notifications from Migrations + */ +public class NotificationHelper { + + private static final EELFLogger LOGGER = EELFManager.getInstance().getLogger(NotificationHelper.class); + protected final DBSerializer serializer; + protected final Loader loader; + protected final TransactionalGraphEngine engine; + protected final String transactionId; + protected final String sourceOfTruth; + protected final UEBNotification notification; + + public NotificationHelper(Loader loader, DBSerializer serializer, LoaderFactory loaderFactory, SchemaVersions schemaVersions, TransactionalGraphEngine engine, String transactionId, String sourceOfTruth) { + this.loader = loader; + this.serializer = serializer; + this.engine = engine; + this.transactionId = transactionId; + this.sourceOfTruth = sourceOfTruth; + this.notification = new UEBNotification(loader, loaderFactory, schemaVersions); + } + + public void addEvent(Vertex v, Introspector obj, EventAction action, URI uri, String basePath) throws UnsupportedEncodingException, AAIException { + HashMap<String, Introspector> relatedObjects = new HashMap<>(); + Status status = mapAction(action); + + if (!obj.isTopLevel()) { + relatedObjects = this.getRelatedObjects(serializer, engine.getQueryEngine(), v); + } + notification.createNotificationEvent(transactionId, sourceOfTruth, status, uri, obj, relatedObjects, basePath); + + } + + public void addDeleteEvent(String transactionId, String sourceOfTruth, EventAction action, URI uri, Introspector obj, HashMap relatedObjects,String basePath) throws UnsupportedEncodingException, AAIException { + Status status = mapAction(action); + notification.createNotificationEvent(transactionId, sourceOfTruth, status, uri, obj, relatedObjects, basePath); + + } + + private HashMap<String, Introspector> getRelatedObjects(DBSerializer serializer, QueryEngine queryEngine, Vertex v) throws AAIException { + HashMap<String, Introspector> relatedVertices = new HashMap<>(); + List<Vertex> vertexChain = queryEngine.findParents(v); + for (Vertex vertex : vertexChain) { + try { + final Introspector vertexObj = serializer.getVertexProperties(vertex); + relatedVertices.put(vertexObj.getObjectId(), vertexObj); + } catch (AAIUnknownObjectException | UnsupportedEncodingException e) { + LOGGER.warn("Unable to get vertex properties, partial list of related vertices returned"); + } + + } + + return relatedVertices; + } + + private Status mapAction(EventAction action) { + if (EventAction.CREATE.equals(action)) { + return Status.CREATED; + } else if (EventAction.UPDATE.equals(action)) { + return Status.OK; + } else if (EventAction.DELETE.equals(action)) { + return Status.NO_CONTENT; + } else { + return Status.OK; + } + } + + public void triggerEvents() throws AAIException { + notification.triggerEvents(); + } + + public UEBNotification getNotifications() { + return this.notification; + } +} diff --git a/src/main/java/org/onap/aai/migration/PropertyMigrator.java b/src/main/java/org/onap/aai/migration/PropertyMigrator.java new file mode 100644 index 0000000..4599243 --- /dev/null +++ b/src/main/java/org/onap/aai/migration/PropertyMigrator.java @@ -0,0 +1,146 @@ +/** + * ============LICENSE_START======================================================= + * org.onap.aai + * ================================================================================ + * Copyright © 2017-2018 AT&T Intellectual Property. All rights reserved. + * ================================================================================ + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============LICENSE_END========================================================= + */ +package org.onap.aai.migration; + +import java.util.Optional; + +import org.apache.tinkerpop.gremlin.process.traversal.P; +import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal; +import org.apache.tinkerpop.gremlin.structure.Vertex; +import org.onap.aai.db.props.AAIProperties; +import org.onap.aai.edges.EdgeIngestor; +import org.onap.aai.introspection.LoaderFactory; +import org.onap.aai.serialization.db.EdgeSerializer; +import org.onap.aai.serialization.engines.TransactionalGraphEngine; + +import org.janusgraph.core.Cardinality; +import org.janusgraph.core.PropertyKey; +import org.janusgraph.core.schema.JanusGraphManagement; +import org.onap.aai.setup.SchemaVersions; + +/** + * A migration template for migrating a property from one name to another + */ +@MigrationPriority(0) +@MigrationDangerRating(1) +public abstract class PropertyMigrator extends Migrator { + + protected String OLD_FIELD; + protected String NEW_FIELD; + protected Integer changedVertexCount; + protected Class<?> fieldType; + protected Cardinality cardinality; + protected final JanusGraphManagement graphMgmt; + + + public PropertyMigrator(TransactionalGraphEngine engine, LoaderFactory loaderFactory, EdgeIngestor edgeIngestor, EdgeSerializer edgeSerializer, SchemaVersions schemaVersions){ + super(engine, loaderFactory, edgeIngestor, edgeSerializer, schemaVersions); + this.changedVertexCount = 0; + this.graphMgmt = engine.asAdmin().getManagementSystem(); + } + + public void initialize(String oldName, String newName, Class<?> type, Cardinality cardinality){ + this.OLD_FIELD = oldName; + this.NEW_FIELD = newName; + this.fieldType = type; + this.cardinality = cardinality; + } + + /** + * Do not override this method as an inheritor of this class + */ + @Override + public void run() { + logger.info("-------- Starting PropertyMigrator for node type " + P.within(this.getAffectedNodeTypes().get()) + + " from property " + OLD_FIELD + " to " + NEW_FIELD + " --------"); + modifySchema(); + executeModifyOperation(); + logger.info(Migrator.MIGRATION_SUMMARY_COUNT + changedVertexCount + " vertices modified."); + } + + protected void modifySchema() { + this.addIndex(this.addProperty()); + graphMgmt.commit(); + } + + /** + * This is where inheritors should add their logic + */ + protected void executeModifyOperation() { + changePropertyName(); + } + + protected void changePropertyName() { + GraphTraversal<Vertex, Vertex> g = this.engine.asAdmin().getTraversalSource().V(); + if (this.getAffectedNodeTypes().isPresent()) { + g.has(AAIProperties.NODE_TYPE, P.within(this.getAffectedNodeTypes().get())); + } + g.has(OLD_FIELD).sideEffect(t -> { + final Vertex v = t.get(); + logger.info("Migrating property for vertex " + v.toString()); + final String value = v.value(OLD_FIELD); + v.property(OLD_FIELD).remove(); + v.property(NEW_FIELD, value); + this.touchVertexProperties(v, false); + this.changedVertexCount += 1; + logger.info(v.toString() + " : Migrated property " + OLD_FIELD + " to " + NEW_FIELD + " with value = " + value); + }).iterate(); + } + + @Override + public Status getStatus() { + GraphTraversal<Vertex, Vertex> g = this.engine.asAdmin().getTraversalSource().V(); + if (this.getAffectedNodeTypes().isPresent()) { + g.has(AAIProperties.NODE_TYPE, P.within(this.getAffectedNodeTypes().get())); + } + long result = g.has(OLD_FIELD).count().next(); + if (result == 0) { + return Status.SUCCESS; + } else { + return Status.FAILURE; + } + } + + protected Optional<PropertyKey> addProperty() { + + if (!graphMgmt.containsPropertyKey(this.NEW_FIELD)) { + logger.info(" PropertyKey [" + this.NEW_FIELD + "] created in the DB. "); + return Optional.of(graphMgmt.makePropertyKey(this.NEW_FIELD).dataType(this.fieldType).cardinality(this.cardinality) + .make()); + } else { + logger.info(" PropertyKey [" + this.NEW_FIELD + "] already existed in the DB. "); + return Optional.empty(); + } + + } + + protected void addIndex(Optional<PropertyKey> key) { + if (isIndexed() && key.isPresent()) { + if (graphMgmt.containsGraphIndex(key.get().name())) { + logger.debug(" Index [" + key.get().name() + "] already existed in the DB. "); + } else { + logger.info("Add index for PropertyKey: [" + key.get().name() + "]"); + graphMgmt.buildIndex(key.get().name(), Vertex.class).addKey(key.get()).buildCompositeIndex(); + } + } + } + public abstract boolean isIndexed(); + +} diff --git a/src/main/java/org/onap/aai/migration/Status.java b/src/main/java/org/onap/aai/migration/Status.java new file mode 100644 index 0000000..0338594 --- /dev/null +++ b/src/main/java/org/onap/aai/migration/Status.java @@ -0,0 +1,29 @@ +/** + * ============LICENSE_START======================================================= + * org.onap.aai + * ================================================================================ + * Copyright © 2017-2018 AT&T Intellectual Property. All rights reserved. + * ================================================================================ + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============LICENSE_END========================================================= + */ +package org.onap.aai.migration; + +/** + * Defines the status of the completed migration + */ +public enum Status { + SUCCESS, + CHECK_LOGS, + FAILURE +} diff --git a/src/main/java/org/onap/aai/migration/ValueMigrator.java b/src/main/java/org/onap/aai/migration/ValueMigrator.java new file mode 100644 index 0000000..6d02563 --- /dev/null +++ b/src/main/java/org/onap/aai/migration/ValueMigrator.java @@ -0,0 +1,104 @@ +/** + * ============LICENSE_START======================================================= + * org.onap.aai + * ================================================================================ + * Copyright © 2017-2018 AT&T Intellectual Property. All rights reserved. + * ================================================================================ + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============LICENSE_END========================================================= + */ +package org.onap.aai.migration; + +import java.util.Map; +import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal; +import org.apache.tinkerpop.gremlin.structure.Vertex; +import org.onap.aai.db.props.AAIProperties; +import org.onap.aai.edges.EdgeIngestor; +import org.onap.aai.introspection.LoaderFactory; +import org.onap.aai.serialization.db.EdgeSerializer; +import org.onap.aai.serialization.engines.TransactionalGraphEngine; +import org.janusgraph.core.schema.JanusGraphManagement; +import org.onap.aai.setup.SchemaVersions; + +/** + * A migration template for filling in default values that are missing or are empty + */ +@MigrationPriority(0) +@MigrationDangerRating(1) +public abstract class ValueMigrator extends Migrator { + + protected final Map<String, Map<String, ?>> propertyValuePairByNodeType; + protected final Boolean updateExistingValues; + protected final JanusGraphManagement graphMgmt; + + /** + * + * @param engine + * @param propertyValuePairByNodeType - format {nodeType: { property: newValue}} + * @param updateExistingValues - if true, updates the value regardless if it already exists + */ + public ValueMigrator(TransactionalGraphEngine engine, LoaderFactory loaderFactory, EdgeIngestor edgeIngestor, EdgeSerializer edgeSerializer, SchemaVersions schemaVersions, Map propertyValuePairByNodeType, Boolean updateExistingValues) { + super(engine, loaderFactory, edgeIngestor, edgeSerializer, schemaVersions); + this.propertyValuePairByNodeType = propertyValuePairByNodeType; + this.updateExistingValues = updateExistingValues; + this.graphMgmt = engine.asAdmin().getManagementSystem(); + } + + /** + * Do not override this method as an inheritor of this class + */ + @Override + public void run() { + updateValues(); + } + + protected void updateValues() { + for (Map.Entry<String, Map<String, ?>> entry: propertyValuePairByNodeType.entrySet()) { + String nodeType = entry.getKey(); + Map<String, ?> propertyValuePair = entry.getValue(); + for (Map.Entry<String, ?> pair : propertyValuePair.entrySet()) { + String property = pair.getKey(); + Object newValue = pair.getValue(); + try { + GraphTraversal<Vertex, Vertex> g = this.engine.asAdmin().getTraversalSource().V() + .has(AAIProperties.NODE_TYPE, nodeType); + while (g.hasNext()) { + Vertex v = g.next(); + if (v.property(property).isPresent() && !updateExistingValues) { + String propertyValue = v.property(property).value().toString(); + if (propertyValue.isEmpty()) { + v.property(property, newValue); + logger.info(String.format("Node Type %s: Property %s is empty, adding value %s", + nodeType, property, newValue.toString())); + this.touchVertexProperties(v, false); + } else { + logger.info(String.format("Node Type %s: Property %s value already exists - skipping", + nodeType, property)); + } + } else { + logger.info(String.format("Node Type %s: Property %s does not exist or " + + "updateExistingValues flag is set to True - adding the property with value %s", + nodeType, property, newValue.toString())); + v.property(property, newValue); + this.touchVertexProperties(v, false); + } + } + } catch (Exception e) { + logger.error(String.format("caught exception updating aai-node-type %s's property %s's value to " + + "%s: %s", nodeType, property, newValue.toString(), e.getMessage())); + logger.error(e.getMessage()); + } + } + } + } +} diff --git a/src/main/java/org/onap/aai/migration/VertexMerge.java b/src/main/java/org/onap/aai/migration/VertexMerge.java new file mode 100644 index 0000000..abf19be --- /dev/null +++ b/src/main/java/org/onap/aai/migration/VertexMerge.java @@ -0,0 +1,255 @@ +/** + * ============LICENSE_START======================================================= + * org.onap.aai + * ================================================================================ + * Copyright © 2017-2018 AT&T Intellectual Property. All rights reserved. + * ================================================================================ + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============LICENSE_END========================================================= + */ +package org.onap.aai.migration; + +import java.io.UnsupportedEncodingException; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.Map; +import java.util.Optional; +import java.util.Set; + +import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversalSource; +import org.apache.tinkerpop.gremlin.structure.Vertex; + +import org.onap.aai.db.props.AAIProperties; +import org.onap.aai.exceptions.AAIException; +import org.onap.aai.introspection.Introspector; +import org.onap.aai.introspection.Loader; +import org.onap.aai.introspection.exceptions.AAIUnknownObjectException; +import org.onap.aai.serialization.db.DBSerializer; +import org.onap.aai.edges.enums.EdgeType; +import org.onap.aai.serialization.db.EdgeSerializer; +import org.onap.aai.serialization.engines.TransactionalGraphEngine; +import com.att.eelf.configuration.EELFLogger; +import com.att.eelf.configuration.EELFManager; + +/** + * This class recursively merges two vertices passed in. + * <br> + * You can start with any two vertices, but after the vertices are merged based off the equality of their keys + * + */ +public class VertexMerge { + + private final EELFLogger logger = EELFManager.getInstance().getLogger(this.getClass().getSimpleName()); + + private final GraphTraversalSource g; + private final TransactionalGraphEngine engine; + private final DBSerializer serializer; + private final EdgeSerializer edgeSerializer; + private final Loader loader; + private final NotificationHelper notificationHelper; + private final boolean hasNotifications; + + private VertexMerge(Builder builder) { + this.engine = builder.getEngine(); + this.serializer = builder.getSerializer(); + this.g = engine.asAdmin().getTraversalSource(); + this.edgeSerializer = builder.getEdgeSerializer(); + this.loader = builder.getLoader(); + this.notificationHelper = builder.getHelper(); + this.hasNotifications = builder.isHasNotifications(); + } + + /** + * Merges vertices. forceCopy is a map of the form [{aai-node-type}:{set of properties}] + * @param primary + * @param secondary + * @param forceCopy + * @throws AAIException + * @throws UnsupportedEncodingException + */ + public void performMerge(Vertex primary, Vertex secondary, Map<String, Set<String>> forceCopy, String basePath) throws AAIException, UnsupportedEncodingException { + final Optional<Introspector> secondarySnapshot; + if (this.hasNotifications) { + secondarySnapshot = Optional.of(serializer.getLatestVersionView(secondary)); + } else { + secondarySnapshot = Optional.empty(); + } + mergeProperties(primary, secondary, forceCopy); + + Collection<Vertex> secondaryChildren = this.engine.getQueryEngine().findChildren(secondary); + Collection<Vertex> primaryChildren = this.engine.getQueryEngine().findChildren(primary); + + mergeChildren(primary, secondary, primaryChildren, secondaryChildren, forceCopy); + + Collection<Vertex> secondaryCousins = this.engine.getQueryEngine().findCousinVertices(secondary); + Collection<Vertex> primaryCousins = this.engine.getQueryEngine().findCousinVertices(primary); + + secondaryCousins.removeAll(primaryCousins); + logger.info("removing vertex after merge: " + secondary ); + if (this.hasNotifications && secondarySnapshot.isPresent()) { + this.notificationHelper.addEvent(secondary, secondarySnapshot.get(), EventAction.DELETE, this.serializer.getURIForVertex(secondary, false), basePath); + } + secondary.remove(); + for (Vertex v : secondaryCousins) { + this.edgeSerializer.addEdgeIfPossible(g, v, primary); + } + if (this.hasNotifications) { + final Introspector primarySnapshot = serializer.getLatestVersionView(primary); + this.notificationHelper.addEvent(primary, primarySnapshot, EventAction.UPDATE, this.serializer.getURIForVertex(primary, false), basePath); + } + } + + /** + * This method may go away if we choose to event on each modification performed + * @param primary + * @param secondary + * @param forceCopy + * @throws AAIException + * @throws UnsupportedEncodingException + */ + protected void performMergeHelper(Vertex primary, Vertex secondary, Map<String, Set<String>> forceCopy) throws AAIException, UnsupportedEncodingException { + mergeProperties(primary, secondary, forceCopy); + + Collection<Vertex> secondaryChildren = this.engine.getQueryEngine().findChildren(secondary); + Collection<Vertex> primaryChildren = this.engine.getQueryEngine().findChildren(primary); + + mergeChildren(primary, secondary, primaryChildren, secondaryChildren, forceCopy); + + Collection<Vertex> secondaryCousins = this.engine.getQueryEngine().findCousinVertices(secondary); + Collection<Vertex> primaryCousins = this.engine.getQueryEngine().findCousinVertices(primary); + + secondaryCousins.removeAll(primaryCousins); + secondary.remove(); + for (Vertex v : secondaryCousins) { + this.edgeSerializer.addEdgeIfPossible(g, v, primary); + } + } + + private String getURI(Vertex v) throws UnsupportedEncodingException, AAIException { + Introspector obj = loader.introspectorFromName(v.<String>property(AAIProperties.NODE_TYPE).orElse("")); + this.serializer.dbToObject(Collections.singletonList(v), obj, 0, true, "false"); + return obj.getURI(); + + } + private void mergeChildren(Vertex primary, Vertex secondary, Collection<Vertex> primaryChildren, Collection<Vertex> secondaryChildren, Map<String, Set<String>> forceCopy) throws UnsupportedEncodingException, AAIException { + Map<String, Vertex> primaryMap = uriMap(primaryChildren); + Map<String, Vertex> secondaryMap = uriMap(secondaryChildren); + Set<String> primaryKeys = new HashSet<>(primaryMap.keySet()); + Set<String> secondaryKeys = new HashSet<>(secondaryMap.keySet()); + primaryKeys.retainAll(secondaryKeys); + final Set<String> mergeItems = new HashSet<>(primaryKeys); + primaryKeys = new HashSet<>(primaryMap.keySet()); + secondaryKeys = new HashSet<>(secondaryMap.keySet()); + secondaryKeys.removeAll(primaryKeys); + final Set<String> copyItems = new HashSet<>(secondaryKeys); + + for (String key : mergeItems) { + this.performMergeHelper(primaryMap.get(key), secondaryMap.get(key), forceCopy); + } + + for (String key : copyItems) { + this.edgeSerializer.addTreeEdgeIfPossible(g, secondaryMap.get(key), primary); + this.serializer.getEdgeBetween(EdgeType.TREE, secondary, secondaryMap.get(key)).remove(); + } + + } + + private Map<String, Vertex> uriMap(Collection<Vertex> vertices) throws UnsupportedEncodingException, AAIException { + final Map<String, Vertex> result = new HashMap<>(); + for (Vertex v : vertices) { + result.put(getURI(v), v); + } + return result; + } + + private void mergeProperties(Vertex primary, Vertex secondary, Map<String, Set<String>> forceCopy) throws AAIUnknownObjectException { + final String primaryType = primary.<String>property(AAIProperties.NODE_TYPE).orElse(""); + final String secondaryType = secondary.<String>property(AAIProperties.NODE_TYPE).orElse(""); + + final Introspector secondaryObj = loader.introspectorFromName(secondaryType); + secondary.properties().forEachRemaining(prop -> { + if (!primary.property(prop.key()).isPresent() || forceCopy.getOrDefault(primaryType, new HashSet<String>()).contains(prop.key())) { + primary.property(prop.key(), prop.value()); + } + if (primary.property(prop.key()).isPresent() && secondary.property(prop.key()).isPresent() && secondaryObj.isListType(prop.key())) { + mergeCollection(primary, prop.key(), secondary.values(prop.key())); + } + }); + } + private void mergeCollection(Vertex primary, String propName, Iterator<Object> secondaryValues) { + secondaryValues.forEachRemaining(item -> { + primary.property(propName, item); + }); + } + + + public static class Builder { + private final TransactionalGraphEngine engine; + + private final DBSerializer serializer; + private EdgeSerializer edgeSerializer; + + private final Loader loader; + private NotificationHelper helper = null; + private boolean hasNotifications = false; + public Builder(Loader loader, TransactionalGraphEngine engine, DBSerializer serializer) { + this.loader = loader; + this.engine = engine; + this.serializer = serializer; + } + + public Builder addNotifications(NotificationHelper helper) { + this.helper = helper; + this.hasNotifications = true; + return this; + } + + public Builder edgeSerializer(EdgeSerializer edgeSerializer){ + this.edgeSerializer = edgeSerializer; + return this; + } + + public EdgeSerializer getEdgeSerializer(){ + return edgeSerializer; + } + + public VertexMerge build() { + return new VertexMerge(this); + } + + protected TransactionalGraphEngine getEngine() { + return engine; + } + + protected DBSerializer getSerializer() { + return serializer; + } + + protected Loader getLoader() { + return loader; + } + + protected NotificationHelper getHelper() { + return helper; + } + + protected boolean isHasNotifications() { + return hasNotifications; + } + + } + +} diff --git a/src/main/java/org/onap/aai/migration/v12/ContainmentDeleteOtherVPropertyMigration.java b/src/main/java/org/onap/aai/migration/v12/ContainmentDeleteOtherVPropertyMigration.java new file mode 100644 index 0000000..361e8bc --- /dev/null +++ b/src/main/java/org/onap/aai/migration/v12/ContainmentDeleteOtherVPropertyMigration.java @@ -0,0 +1,106 @@ +/** + * ============LICENSE_START======================================================= + * org.onap.aai + * ================================================================================ + * Copyright © 2017-2018 AT&T Intellectual Property. All rights reserved. + * ================================================================================ + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============LICENSE_END========================================================= + */ +package org.onap.aai.migration.v12; + +import java.util.Optional; + +import org.apache.commons.lang.exception.ExceptionUtils; +import org.apache.tinkerpop.gremlin.structure.Edge; +import org.onap.aai.edges.EdgeIngestor; +import org.onap.aai.edges.enums.EdgeProperty; +import org.onap.aai.introspection.LoaderFactory; +import org.onap.aai.migration.MigrationDangerRating; +import org.onap.aai.migration.MigrationPriority; +import org.onap.aai.migration.Migrator; +import org.onap.aai.migration.Status; +import org.onap.aai.edges.enums.AAIDirection; +import org.onap.aai.serialization.db.EdgeSerializer; +import org.onap.aai.serialization.engines.TransactionalGraphEngine; +import org.onap.aai.setup.SchemaVersions; + + +//@Enabled +@MigrationPriority(-100) +@MigrationDangerRating(10) +public class ContainmentDeleteOtherVPropertyMigration extends Migrator { + + private boolean success = true; + + public ContainmentDeleteOtherVPropertyMigration(TransactionalGraphEngine engine, LoaderFactory loaderFactory, EdgeIngestor edgeIngestor, EdgeSerializer edgeSerializer, SchemaVersions schemaVersions) { + super(engine, loaderFactory, edgeIngestor, edgeSerializer, schemaVersions); + } + + //just for testing using test edge rule files + public ContainmentDeleteOtherVPropertyMigration(TransactionalGraphEngine engine, LoaderFactory loaderFactory, EdgeIngestor edgeIngestor, EdgeSerializer edgeSerializer, SchemaVersions schemaVersions, String edgeRulesFile) { + super(engine, loaderFactory, edgeIngestor, edgeSerializer, schemaVersions); + } + + @Override + public void run() { + try { + engine.asAdmin().getTraversalSource().E().sideEffect(t -> { + Edge e = t.get(); + logger.info("out vertex: " + e.outVertex().property("aai-node-type").value() + + " in vertex: " + e.inVertex().property("aai-node-type").value() + + " label : " + e.label()); + if (e.property(EdgeProperty.CONTAINS.toString()).isPresent() && + e.property(EdgeProperty.DELETE_OTHER_V.toString()).isPresent()) { + //in case of orphans + if (!("constrained-element-set".equals(e.inVertex().property("aai-node-type").value()) + && "model-element".equals(e.outVertex().property("aai-node-type").value()))) { + //skip the weird horrible problem child edge + String containment = (String) e.property(EdgeProperty.CONTAINS.toString()).value(); + if (AAIDirection.OUT.toString().equalsIgnoreCase(containment) || + AAIDirection.IN.toString().equalsIgnoreCase(containment) || + AAIDirection.BOTH.toString().equalsIgnoreCase(containment)) { + logger.info("updating delete-other-v property"); + e.property(EdgeProperty.DELETE_OTHER_V.toString(), containment); + } + } + } + }).iterate(); + } catch (Exception e) { + logger.info("error encountered " + e.getClass() + " " + e.getMessage() + " " + ExceptionUtils.getFullStackTrace(e)); + logger.error("error encountered " + e.getClass() + " " + e.getMessage() + " " + ExceptionUtils.getFullStackTrace(e)); + success = false; + } + + } + + @Override + public Status getStatus() { + if (success) { + return Status.SUCCESS; + } else { + return Status.FAILURE; + } + } + + @Override + public Optional<String[]> getAffectedNodeTypes() { + return Optional.empty(); + } + + @Override + public String getMigrationName() { + return "migrate-containment-delete-other-v"; + } + +} diff --git a/src/main/java/org/onap/aai/migration/v12/DeletePInterface.java b/src/main/java/org/onap/aai/migration/v12/DeletePInterface.java new file mode 100644 index 0000000..1089b2f --- /dev/null +++ b/src/main/java/org/onap/aai/migration/v12/DeletePInterface.java @@ -0,0 +1,131 @@ +/** + * ============LICENSE_START======================================================= + * org.onap.aai + * ================================================================================ + * Copyright © 2017-2018 AT&T Intellectual Property. All rights reserved. + * ================================================================================ + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============LICENSE_END========================================================= + */ +package org.onap.aai.migration.v12; + +import java.util.Collection; +import java.util.List; +import java.util.Optional; + +import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal; +import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversalSource; +import org.apache.tinkerpop.gremlin.structure.Vertex; +import org.onap.aai.db.props.AAIProperties; +import org.onap.aai.edges.EdgeIngestor; +import org.onap.aai.exceptions.AAIException; +import org.onap.aai.introspection.LoaderFactory; +import org.onap.aai.migration.MigrationDangerRating; +import org.onap.aai.migration.MigrationPriority; +import org.onap.aai.migration.Migrator; +import org.onap.aai.migration.Status; +import org.onap.aai.edges.enums.EdgeType; +import org.onap.aai.serialization.db.EdgeSerializer; +import org.onap.aai.serialization.engines.TransactionalGraphEngine; +import org.onap.aai.setup.SchemaVersions; + +@MigrationPriority(0) +@MigrationDangerRating(0) +public class DeletePInterface extends Migrator { + private boolean success = true; + private final GraphTraversalSource g; + public DeletePInterface(TransactionalGraphEngine engine, LoaderFactory loaderFactory, EdgeIngestor edgeIngestor, EdgeSerializer edgeSerializer, SchemaVersions schemaVersions) { + super(engine, loaderFactory, edgeIngestor, edgeSerializer, schemaVersions); + this.g = this.engine.asAdmin().getTraversalSource(); + } + + @Override + public void run() { + int count = 0; + int skipCount = 0; + int errorCount = 0; + logger.info("---------- Start deleting p-interfaces ----------"); + List<Vertex> pIntfList; + try { + pIntfList = g.V().has(AAIProperties.NODE_TYPE, "p-interface").has("source-of-truth", "AAI-CSVP-INSTARAMS") + .where(this.engine.getQueryBuilder().createEdgeTraversal(EdgeType.TREE, "p-interface", "pnf") + .<GraphTraversal<?, ?>>getQuery()).toList(); + + if (pIntfList != null && !pIntfList.isEmpty()) { + for (Vertex pInterfV : pIntfList) { + try { + Collection<Vertex> cousins = this.engine.getQueryEngine().findCousinVertices(pInterfV); + + Collection<Vertex> children = this.engine.getQueryEngine().findChildren(pInterfV); + if (cousins == null || cousins.isEmpty()) { + if (children == null || children.isEmpty()) { + logger.info("Delete p-interface: " + getVertexURI(pInterfV)); + pInterfV.remove(); + count++; + } else { + skipCount++; + logger.info("skip p-interface " + getVertexURI(pInterfV) + " due to an existing relationship"); + } + } else { + skipCount++; + logger.info("skip p-interface " + getVertexURI(pInterfV) + " due to an existing relationship"); + } + } catch (Exception e) { + success = false; + errorCount++; + logger.error("error occured in deleting p-interface " + getVertexURI(pInterfV) + ", "+ e); + } + } + logger.info ("\n \n ******* Final Summary for deleting p-interfaces Migration ********* \n"); + logger.info("Number of p-interfaces removed: "+ count +"\n"); + logger.info("Number of p-interfaces skipped: "+ skipCount +"\n"); + logger.info("Number of p-interfaces failed to delete due to error : "+ errorCount +"\n"); + } + } catch (AAIException e) { + success = false; + logger.error("error occured in deleting p-interfaces " + e); + } + } + + private String getVertexURI(Vertex v) { + if (v != null) { + if (v.property("aai-uri").isPresent()) { + return v.property("aai-uri").value().toString(); + } else { + return "Vertex ID: " + v.id().toString(); + } + } else { + return ""; + } + } + + @Override + public Status getStatus() { + if (success) { + return Status.SUCCESS; + } else { + return Status.FAILURE; + } + } + + @Override + public Optional<String[]> getAffectedNodeTypes() { + return Optional.of(new String[] { "p-interface" }); + } + + @Override + public String getMigrationName() { + return "DeletePInterface"; + } + +} diff --git a/src/main/java/org/onap/aai/migration/v12/EdgeReportForToscaMigration.java b/src/main/java/org/onap/aai/migration/v12/EdgeReportForToscaMigration.java new file mode 100644 index 0000000..1bdddf3 --- /dev/null +++ b/src/main/java/org/onap/aai/migration/v12/EdgeReportForToscaMigration.java @@ -0,0 +1,162 @@ +/** + * ============LICENSE_START======================================================= + * org.onap.aai + * ================================================================================ + * Copyright © 2017-2018 AT&T Intellectual Property. All rights reserved. + * ================================================================================ + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============LICENSE_END========================================================= + */ +package org.onap.aai.migration.v12; +/*- + * ============LICENSE_START======================================================= + * org.onap.aai + * ================================================================================ + * Copyright (C) 2017 AT&T Intellectual Property. All rights reserved. + * ================================================================================ + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============LICENSE_END========================================================= + */ + + +import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversalSource; +import org.apache.tinkerpop.gremlin.structure.Edge; +import org.apache.tinkerpop.gremlin.structure.Vertex; +import org.onap.aai.edges.EdgeIngestor; +import org.onap.aai.introspection.LoaderFactory; +import org.onap.aai.migration.*; +import org.onap.aai.serialization.db.EdgeSerializer; +import org.onap.aai.serialization.engines.TransactionalGraphEngine; +import org.onap.aai.setup.SchemaVersions; + +import java.util.*; + +@MigrationPriority(0) +@MigrationDangerRating(0) +public class EdgeReportForToscaMigration extends Migrator { + + private boolean success = true; + + public EdgeReportForToscaMigration(TransactionalGraphEngine engine, LoaderFactory loaderFactory, EdgeIngestor edgeIngestor, EdgeSerializer edgeSerializer, SchemaVersions schemaVersions){ + super(engine, loaderFactory, edgeIngestor, edgeSerializer, schemaVersions); + } + + @Override + public Status getStatus() { + if (success) { + return Status.SUCCESS; + } else { + return Status.FAILURE; + } + } + + @Override + public void run() { + Vertex out = null; + Vertex in = null; + String label = ""; + String outURI = ""; + String inURI = ""; + String parentCousinIndicator = "NONE"; + String oldEdgeString = null; + List<String> edgeMissingParentProperty = new ArrayList<>(); + StringBuilder sb = new StringBuilder(); + Set<String> noURI = new HashSet<>(); + sb.append("----------EDGES----------\n"); + + GraphTraversalSource g = engine.asAdmin().getTraversalSource(); + + try { + Set<Edge> edges = g.E().toSet(); + for (Edge edge : edges) { + out = edge.outVertex(); + in = edge.inVertex(); + label = edge.label(); + outURI = this.getVertexURI(out); + inURI = this.getVertexURI(in); + parentCousinIndicator = "NONE"; + oldEdgeString = this.toStringForPrinting(edge, 1); + + if (!outURI.startsWith("/")) { + noURI.add(outURI); + } + if (!inURI.startsWith("/")) { + noURI.add(inURI); + } + + if (out == null || in == null) { + logger.error(edge.id() + " invalid because one vertex was null: out=" + edge.outVertex() + " in=" + edge.inVertex()); + } else { + + if (edge.property("contains-other-v").isPresent()) { + parentCousinIndicator = edge.property("contains-other-v").value().toString(); + } else if (edge.property("isParent").isPresent()) { + if ((Boolean)edge.property("isParent").value()) { + parentCousinIndicator = "OUT"; + } else if (edge.property("isParent-REV").isPresent() && (Boolean)edge.property("isParent-REV").value()) { + parentCousinIndicator = "IN"; + } + } else { + edgeMissingParentProperty.add(this.toStringForPrinting(edge, 1)); + } + + sb.append(outURI + "|" + label + "|" + inURI + "|" + parentCousinIndicator + "\n"); + } + } + } catch(Exception ex){ + logger.error("exception occurred during migration, failing: out=" + out + " in=" + in + "edge=" + oldEdgeString, ex); + success = false; + } + sb.append("--------EDGES END--------\n"); + + logger.info(sb.toString()); + edgeMissingParentProperty.forEach(s -> logger.warn("Edge Missing Parent Property: " + s)); + logger.info("Edge Missing Parent Property Count: " + edgeMissingParentProperty.size()); + logger.info("Vertex Missing URI Property Count: " + noURI.size()); + + } + + private String getVertexURI(Vertex v) { + if (v.property("aai-uri").isPresent()) { + return v.property("aai-uri").value().toString(); + } else { + return v.id().toString() + "(" + v.property("aai-node-type").value().toString() + ")"; + } + } + + @Override + public Optional<String[]> getAffectedNodeTypes() { + return Optional.empty(); + } + + @Override + public String getMigrationName() { + return "edge-report-for-tosca-migration"; + } + + @Override + public void commit() { + engine.rollback(); + } + +} diff --git a/src/main/java/org/onap/aai/migration/v12/MigrateModelVerDistriubutionStatusProperty.java b/src/main/java/org/onap/aai/migration/v12/MigrateModelVerDistriubutionStatusProperty.java new file mode 100644 index 0000000..c09643f --- /dev/null +++ b/src/main/java/org/onap/aai/migration/v12/MigrateModelVerDistriubutionStatusProperty.java @@ -0,0 +1,85 @@ +/** + * ============LICENSE_START======================================================= + * org.onap.aai + * ================================================================================ + * Copyright © 2017-2018 AT&T Intellectual Property. All rights reserved. + * ================================================================================ + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============LICENSE_END========================================================= + */ +package org.onap.aai.migration.v12; +import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal; +import org.apache.tinkerpop.gremlin.structure.Vertex; +import org.onap.aai.db.props.AAIProperties; +import org.onap.aai.edges.EdgeIngestor; +import org.onap.aai.introspection.LoaderFactory; +import org.onap.aai.migration.*; +import org.onap.aai.serialization.db.EdgeSerializer; +import org.onap.aai.serialization.engines.TransactionalGraphEngine; +import org.onap.aai.setup.SchemaVersions; + +import java.util.Optional; + +@MigrationPriority(20) +@MigrationDangerRating(2) +public class MigrateModelVerDistriubutionStatusProperty extends Migrator{ + + private final String PARENT_NODE_TYPE = "model-ver"; + private boolean success = true; + + public MigrateModelVerDistriubutionStatusProperty(TransactionalGraphEngine engine, LoaderFactory loaderFactory, EdgeIngestor edgeIngestor, EdgeSerializer edgeSerializer, SchemaVersions schemaVersions) { + super(engine, loaderFactory, edgeIngestor, edgeSerializer, schemaVersions); + } + + + + @Override + public void run() { + + + GraphTraversal<Vertex, Vertex> f = this.engine.asAdmin().getTraversalSource().V().has(AAIProperties.NODE_TYPE,"model-ver"); + + while(f.hasNext()) { + Vertex v = f.next(); + try { + v.property("distribution-status", "DISTRIBUTION_COMPLETE_OK"); + logger.info("changed model-ver.distribution-status property value for model-version-id: " + v.property("model-version-id").value()); + + } catch (Exception e) { + e.printStackTrace(); + success = false; + logger.error("encountered exception for model-version-id:" + v.property("model-version-id").value(), e); + } + } + } + + + @Override + public Status getStatus() { + if (success) { + return Status.SUCCESS; + } else { + return Status.FAILURE; + } + } + @Override + public Optional<String[]> getAffectedNodeTypes() { + return Optional.of(new String[]{PARENT_NODE_TYPE}); + } + + @Override + public String getMigrationName() { + return "MigrateModelVerDistriubutionStatusProperty"; + } + +} diff --git a/src/main/java/org/onap/aai/migration/v12/MigrateServiceInstanceToConfiguration.java b/src/main/java/org/onap/aai/migration/v12/MigrateServiceInstanceToConfiguration.java new file mode 100644 index 0000000..b4208af --- /dev/null +++ b/src/main/java/org/onap/aai/migration/v12/MigrateServiceInstanceToConfiguration.java @@ -0,0 +1,193 @@ +/** + * ============LICENSE_START======================================================= + * org.onap.aai + * ================================================================================ + * Copyright © 2017-2018 AT&T Intellectual Property. All rights reserved. + * ================================================================================ + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============LICENSE_END========================================================= + */ +package org.onap.aai.migration.v12; + +import java.io.UnsupportedEncodingException; +import java.util.Iterator; +import java.util.Optional; +import java.util.UUID; + +import org.apache.tinkerpop.gremlin.process.traversal.P; +import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal; +import org.apache.tinkerpop.gremlin.structure.Direction; +import org.apache.tinkerpop.gremlin.structure.Vertex; +import org.onap.aai.db.props.AAIProperties; +import org.onap.aai.edges.EdgeIngestor; +import org.onap.aai.exceptions.AAIException; +import org.onap.aai.introspection.Introspector; +import org.onap.aai.introspection.LoaderFactory; +import org.onap.aai.introspection.exceptions.AAIUnknownObjectException; +import org.onap.aai.migration.MigrationDangerRating; +import org.onap.aai.migration.MigrationPriority; +import org.onap.aai.migration.Migrator; +import org.onap.aai.migration.Status; +import org.onap.aai.edges.enums.EdgeType; +import org.onap.aai.serialization.db.EdgeSerializer; +import org.onap.aai.serialization.engines.TransactionalGraphEngine; +import org.onap.aai.setup.SchemaVersions; + +//@Enabled +@MigrationPriority(10) +@MigrationDangerRating(10) +public class MigrateServiceInstanceToConfiguration extends Migrator { + + private boolean success = true; + private final String CONFIGURATION_NODE_TYPE = "configuration"; + private final String SERVICE_INSTANCE_NODE_TYPE = "service-instance"; + private Introspector configObj; + + public MigrateServiceInstanceToConfiguration(TransactionalGraphEngine engine, LoaderFactory loaderFactory, EdgeIngestor edgeIngestor, EdgeSerializer edgeSerializer, SchemaVersions schemaVersions) { + super(engine, loaderFactory, edgeIngestor, edgeSerializer, schemaVersions); + try { + this.configObj = this.loader.introspectorFromName(CONFIGURATION_NODE_TYPE); + } catch (AAIUnknownObjectException e) { + this.configObj = null; + } + } + + @Override + public void run() { + Vertex serviceInstance = null; + Vertex configuration = null; + String serviceInstanceId = "", tunnelBandwidth = ""; + String bandwidthTotal, configType, nodeType; + GraphTraversal<Vertex, Vertex> serviceInstanceItr; + Iterator<Vertex> configurationItr; + + try { + serviceInstanceItr = this.engine.asAdmin().getTraversalSource().V() + .has(AAIProperties.NODE_TYPE, P.within(getAffectedNodeTypes().get())) + .where(this.engine.getQueryBuilder() + .createEdgeTraversal(EdgeType.TREE, "service-instance", "service-subscription") + .getVerticesByProperty("service-type", "DHV") + .<GraphTraversal<?, ?>>getQuery()); + + if (serviceInstanceItr == null || !serviceInstanceItr.hasNext()) { + logger.info("No servince-instance nodes found with service-type of DHV"); + return; + } + + // iterate through all service instances of service-type DHV + while (serviceInstanceItr.hasNext()) { + serviceInstance = serviceInstanceItr.next(); + + if (serviceInstance != null && serviceInstance.property("bandwidth-total").isPresent()) { + serviceInstanceId = serviceInstance.value("service-instance-id"); + logger.info("Processing service instance with id=" + serviceInstanceId); + bandwidthTotal = serviceInstance.value("bandwidth-total"); + + if (bandwidthTotal != null && !bandwidthTotal.isEmpty()) { + + // check for existing edges to configuration nodes + configurationItr = serviceInstance.vertices(Direction.OUT, "has"); + + // create new configuration node if service-instance does not have existing ones + if (!configurationItr.hasNext()) { + logger.info(serviceInstanceId + " has no existing configuration nodes, creating new node"); + createConfigurationNode(serviceInstance, bandwidthTotal); + continue; + } + + // in case if configuration nodes exist, but none are DHV + boolean hasDHVConfig = false; + + // service-instance has existing configuration nodes + while (configurationItr.hasNext()) { + configuration = configurationItr.next(); + nodeType = configuration.value("aai-node-type").toString(); + + if (configuration != null && "configuration".equalsIgnoreCase(nodeType)) { + logger.info("Processing configuration node with id=" + configuration.property("configuration-id").value()); + configType = configuration.value("configuration-type"); + logger.info("Configuration type: " + configType); + + // if configuration-type is DHV, update tunnel-bandwidth to bandwidth-total value + if ("DHV".equalsIgnoreCase(configType)) { + if (configuration.property("tunnel-bandwidth").isPresent()) { + tunnelBandwidth = configuration.value("tunnel-bandwidth"); + } else { + tunnelBandwidth = ""; + } + + logger.info("Existing tunnel-bandwidth: " + tunnelBandwidth); + configuration.property("tunnel-bandwidth", bandwidthTotal); + touchVertexProperties(configuration, false); + logger.info("Updated tunnel-bandwidth: " + configuration.value("tunnel-bandwidth")); + hasDHVConfig = true; + } + } + } + + // create new configuration node if none of existing config nodes are of type DHV + if (!hasDHVConfig) { + logger.info(serviceInstanceId + " has existing configuration nodes, but none are DHV, create new node"); + createConfigurationNode(serviceInstance, bandwidthTotal); + } + } + } + } + } catch (AAIException | UnsupportedEncodingException e) { + logger.error("Caught exception while processing service instance with id=" + serviceInstanceId + " | " + e.toString()); + success = false; + } + } + + private void createConfigurationNode(Vertex serviceInstance, String bandwidthTotal) throws UnsupportedEncodingException, AAIException { + // create new vertex + Vertex configurationNode = serializer.createNewVertex(configObj); + + // configuration-id: UUID format + String configurationUUID = UUID.randomUUID().toString(); + configObj.setValue("configuration-id", configurationUUID); + + // configuration-type: DHV + configObj.setValue("configuration-type", "DHV"); + + // migrate the bandwidth-total property from the service-instance to the + // tunnel-bandwidth property of the related configuration object + configObj.setValue("tunnel-bandwidth", bandwidthTotal); + + // create edge between service instance and configuration: cousinEdge(out, in) + createCousinEdge(serviceInstance, configurationNode); + + // serialize edge & vertex, takes care of everything + serializer.serializeSingleVertex(configurationNode, configObj, "migrations"); + logger.info("Created configuration node with uuid=" + configurationUUID + ", tunnel-bandwidth=" + bandwidthTotal); + } + + @Override + public Status getStatus() { + if (success) { + return Status.SUCCESS; + } else { + return Status.FAILURE; + } + } + + @Override + public Optional<String[]> getAffectedNodeTypes() { + return Optional.of(new String[] {SERVICE_INSTANCE_NODE_TYPE}); + } + + @Override + public String getMigrationName() { + return "service-instance-to-configuration"; + } +} diff --git a/src/main/java/org/onap/aai/migration/v12/SDWANSpeedChangeMigration.java b/src/main/java/org/onap/aai/migration/v12/SDWANSpeedChangeMigration.java new file mode 100644 index 0000000..b420c57 --- /dev/null +++ b/src/main/java/org/onap/aai/migration/v12/SDWANSpeedChangeMigration.java @@ -0,0 +1,258 @@ +/** + * ============LICENSE_START======================================================= + * org.onap.aai + * ================================================================================ + * Copyright © 2017-2018 AT&T Intellectual Property. All rights reserved. + * ================================================================================ + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============LICENSE_END========================================================= + */ +package org.onap.aai.migration.v12; + +import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal; +import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.__; +import org.apache.tinkerpop.gremlin.structure.Vertex; +import org.onap.aai.edges.EdgeIngestor; +import org.onap.aai.exceptions.AAIException; +import org.onap.aai.introspection.LoaderFactory; +import org.onap.aai.migration.*; +import org.onap.aai.edges.enums.EdgeType; +import org.onap.aai.serialization.db.EdgeSerializer; +import org.onap.aai.serialization.engines.TransactionalGraphEngine; +import org.onap.aai.setup.SchemaVersions; + +import java.util.*; + +@MigrationPriority(1) +@MigrationDangerRating(1) +//@Enabled +public class SDWANSpeedChangeMigration extends Migrator { + + private final String PARENT_NODE_TYPE = "alloted-resource"; + private boolean success = true; + + Vertex allottedRsrcVertex; + + Map<String, String> bandwidthMap = new HashMap<>(); + Set<String> bandWidthSet = new HashSet<>(); + + GraphTraversal<Vertex, Vertex> allottedRsrcTraversal; + GraphTraversal<Vertex, Vertex> tunnelXConnectTraversal; + GraphTraversal<Vertex, Vertex> pinterfaceTraversal; + GraphTraversal<Vertex, Vertex> plinkTraversal; + + public SDWANSpeedChangeMigration(TransactionalGraphEngine engine, LoaderFactory loaderFactory, EdgeIngestor edgeIngestor, EdgeSerializer edgeSerializer, SchemaVersions schemaVersions) { + super(engine, loaderFactory, edgeIngestor, edgeSerializer, schemaVersions); + bandWidthSet.add("bandwidth-up-wan1"); + bandWidthSet.add("bandwidth-down-wan1"); + bandWidthSet.add("bandwidth-up-wan2"); + bandWidthSet.add("bandwidth-down-wan2"); + } + + + @Override + public void run() { + + logger.info("Started the migration "+ getMigrationName()); + + try { + + allottedRsrcTraversal = this.engine.asAdmin().getTraversalSource().V() + .has("aai-node-type", "service-subscription") + .has("service-type", "DHV") + .in("org.onap.relationships.inventory.BelongsTo") + .has("aai-node-type", "service-instance") + .out("org.onap.relationships.inventory.Uses") + .has("aai-node-type", "allotted-resource") + .where( + this.engine.getQueryBuilder() + .createEdgeTraversal(EdgeType.TREE, "allotted-resource", "service-instance") + .createEdgeTraversal(EdgeType.TREE, "service-instance", "service-subscription") + .<GraphTraversal<Vertex, Vertex>>getQuery() + .has("service-type", "VVIG") + ); + + if(!(allottedRsrcTraversal.hasNext())){ + + logger.info("unable to find allotted resource to DHV as cousin and child of VVIG"); + } + + while (allottedRsrcTraversal.hasNext()) { + bandwidthMap.clear(); + + allottedRsrcVertex = allottedRsrcTraversal.next(); + String allottedResourceId = allottedRsrcVertex.property("id").value().toString(); + logger.info("Found an allotted resource with id " + allottedResourceId); + + tunnelXConnectTraversal = this.engine.asAdmin().getTraversalSource() + .V(allottedRsrcVertex) + .in("org.onap.relationships.inventory.BelongsTo") + .has("aai-node-type", "tunnel-xconnect"); + + if (tunnelXConnectTraversal != null && tunnelXConnectTraversal.hasNext()) { + Vertex xConnect = tunnelXConnectTraversal.next(); + String tunnelId = xConnect.property("id").value().toString(); + logger.info("Found an tunnelxconnect object with id " + tunnelId); + extractBandwidthProps(xConnect); + modifyPlink(allottedRsrcVertex); + } else { + logger.info("Unable to find the tunnel connect for the current allotted resource traversal"); + } + + } + } catch (AAIException e) { + e.printStackTrace(); + success = false; + } + + logger.info("Successfully finished the " + getMigrationName()); + } + + public void extractBandwidthProps(Vertex vertex) { + logger.info("Trying to extract bandwith props"); + bandWidthSet.stream().forEach((key) -> { + if (vertex.property(key).isPresent()) { + bandwidthMap.put(key, vertex.property(key).value().toString()); + } + }); + logger.info("Extracted bandwith props for tunnelXConnect " +vertex.value("id")); + } + + public void modifyPlink(Vertex v) { + + try { + pinterfaceTraversal = this.engine.asAdmin().getTraversalSource().V(v) + .in("org.onap.relationships.inventory.Uses").has("aai-node-type", "service-instance") + .where( + __.out("org.onap.relationships.inventory.BelongsTo") + .has("aai-node-type", "service-subscription") + .has("service-type", "DHV") + ) + .out("org.onap.relationships.inventory.ComposedOf").has("aai-node-type", "generic-vnf") + .out("tosca.relationships.HostedOn").has("aai-node-type", "vserver") + .out("tosca.relationships.HostedOn").has("aai-node-type", "pserver") + .in("tosca.relationships.network.BindsTo").has("aai-node-type", "p-interface"); + } catch (Exception e) { + logger.info("error trying to find p interfaces"); + } + + + while (pinterfaceTraversal.hasNext()) { + + Vertex pInterfaceVertex = pinterfaceTraversal.next(); + + String pinterfaceName = pInterfaceVertex.property("interface-name").value().toString(); + logger.info("p-interface "+ pinterfaceName + " found from traversal from allotted-resource " +v.value("id")); + String[] parts = pinterfaceName.split("/"); + + if (parts[parts.length - 1].equals("10")) { + + logger.info("Found the pinterface with the interface name ending with /10"); + + try { + plinkTraversal = this.engine.asAdmin().getTraversalSource() + .V(pInterfaceVertex) + .out("tosca.relationships.network.LinksTo") + .has("aai-node-type", "physical-link"); + } catch (Exception e) { + logger.info("error trying to find the p Link for /10"); + } + if (plinkTraversal != null && plinkTraversal.hasNext()) { + Vertex pLink = plinkTraversal.next(); + + + if ( bandwidthMap.containsKey("bandwidth-up-wan1") + && bandwidthMap.containsKey("bandwidth-down-wan1") + && !(("").equals(bandwidthMap.get("bandwidth-up-wan1").replaceAll("[^0-9]", "").trim())) + && !(("").equals(bandwidthMap.get("bandwidth-down-wan1").replaceAll("[^0-9]", "").trim()))) + { + + pLink.property("service-provider-bandwidth-up-value", Integer.valueOf(bandwidthMap.get("bandwidth-up-wan1").replaceAll("[^0-9]", "").trim())); + pLink.property("service-provider-bandwidth-up-units", "Mbps"); + pLink.property("service-provider-bandwidth-down-value", Integer.valueOf(bandwidthMap.get("bandwidth-down-wan1").replaceAll("[^0-9]", "").trim())); + pLink.property("service-provider-bandwidth-down-units", "Mbps"); + logger.info("Successfully modified the plink with link name ", pLink.property("link-name").value().toString()); + this.touchVertexProperties(pLink, false); + } else { + logger.info("missing up and down vals for the plink with link name ", pLink.property("link-name").value().toString()); + } + + + } else { + logger.info("missing plink for p interface" + pinterfaceName); + } + + } + + if (parts[parts.length - 1].equals("11")) { + + logger.info("Found the pinterface with the interface name ending with /11"); + try { + plinkTraversal = this.engine.asAdmin() + .getTraversalSource() + .V(pInterfaceVertex) + .out("tosca.relationships.network.LinksTo") + .has("aai-node-type", "physical-link"); + } catch (Exception e) { + logger.info("error trying to find the p Link for /11"); + } + + if (plinkTraversal != null && plinkTraversal.hasNext()) { + Vertex pLink = plinkTraversal.next(); + + + if ( bandwidthMap.containsKey("bandwidth-up-wan2") + && bandwidthMap.containsKey("bandwidth-down-wan2") + && !(("").equals(bandwidthMap.get("bandwidth-up-wan2").replaceAll("[^0-9]", "").trim())) + && !(("").equals(bandwidthMap.get("bandwidth-down-wan2").replaceAll("[^0-9]", "").trim()))) + { + pLink.property("service-provider-bandwidth-up-value", Integer.valueOf(bandwidthMap.get("bandwidth-up-wan2").replaceAll("[^0-9]", "").trim())); + pLink.property("service-provider-bandwidth-up-units", "Mbps"); + pLink.property("service-provider-bandwidth-down-value", Integer.valueOf(bandwidthMap.get("bandwidth-down-wan2").replaceAll("[^0-9]", "").trim())); + pLink.property("service-provider-bandwidth-down-units", "Mbps"); + logger.info("Successfully modified the plink with link name ", pLink.property("link-name").value().toString()); + this.touchVertexProperties(pLink, false); + } else { + logger.error("missing up and down vals for the plink with link name ", pLink.property("link-name").value().toString()); + } + + } else { + logger.info("missing plink for p interface" + pinterfaceName); + } + } + } + } + + + @Override + public Status getStatus() { + if (success) { + return Status.SUCCESS; + } else { + return Status.FAILURE; + } + } + + @Override + public Optional<String[]> getAffectedNodeTypes() { + + return Optional.of(new String[]{PARENT_NODE_TYPE}); + } + + @Override + public String getMigrationName() { + return "SDWANSpeedChangeMigration"; + } + + +} diff --git a/src/main/java/org/onap/aai/migration/v12/UpdateAaiUriIndexMigration.java b/src/main/java/org/onap/aai/migration/v12/UpdateAaiUriIndexMigration.java new file mode 100644 index 0000000..33689b5 --- /dev/null +++ b/src/main/java/org/onap/aai/migration/v12/UpdateAaiUriIndexMigration.java @@ -0,0 +1,328 @@ +/** + * ============LICENSE_START======================================================= + * org.onap.aai + * ================================================================================ + * Copyright © 2017-2018 AT&T Intellectual Property. All rights reserved. + * ================================================================================ + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============LICENSE_END========================================================= + */ +package org.onap.aai.migration.v12; + +import org.janusgraph.core.Cardinality; +import org.janusgraph.core.PropertyKey; +import org.janusgraph.core.schema.SchemaAction; +import org.janusgraph.core.schema.SchemaStatus; +import org.janusgraph.core.schema.JanusGraphIndex; +import org.janusgraph.core.schema.JanusGraphManagement; +import org.janusgraph.graphdb.database.management.ManagementSystem; +import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversalSource; +import org.apache.tinkerpop.gremlin.structure.Vertex; +import org.onap.aai.db.props.AAIProperties; +import org.onap.aai.dbmap.AAIGraph; +import org.onap.aai.edges.EdgeIngestor; +import org.onap.aai.exceptions.AAIException; +import org.onap.aai.introspection.LoaderFactory; +import org.onap.aai.introspection.ModelType; +import org.onap.aai.serialization.db.EdgeSerializer; +import org.onap.aai.migration.*; +import org.onap.aai.serialization.engines.TransactionalGraphEngine; +import org.onap.aai.setup.SchemaVersion; +import org.onap.aai.setup.SchemaVersions; + +import java.time.temporal.ChronoUnit; +import java.util.*; + +/** + * Remove old aai-uri index per + * https://github.com/JanusGraph/janusgraph/wiki/Indexing + */ + +@Enabled + +@MigrationPriority(500) +@MigrationDangerRating(1000) +public class UpdateAaiUriIndexMigration extends Migrator { + + private final SchemaVersion version; + private final ModelType introspectorFactoryType; + private GraphTraversalSource g; + private JanusGraphManagement graphMgmt; + private Status status = Status.SUCCESS; + + private String retiredName = AAIProperties.AAI_URI + "-RETIRED-" + System.currentTimeMillis(); + + /** + * Instantiates a new migrator. + * + * @param engine + */ + public UpdateAaiUriIndexMigration(TransactionalGraphEngine engine, LoaderFactory loaderFactory, EdgeIngestor edgeIngestor, EdgeSerializer edgeSerializer, SchemaVersions schemaVersions) throws AAIException { + super(engine, loaderFactory, edgeIngestor, edgeSerializer, schemaVersions); + version = schemaVersions.getDefaultVersion(); + introspectorFactoryType = ModelType.MOXY; + loader = loaderFactory.createLoaderForVersion(introspectorFactoryType, version); + g = this.engine.asAdmin().getTraversalSource(); + this.engine.rollback(); + graphMgmt = engine.asAdmin().getManagementSystem(); + + } + + @Override + public Status getStatus() { + return status; + } + + @Override + public Optional<String[]> getAffectedNodeTypes() { + return Optional.empty(); + } + + @Override + public String getMigrationName() { + return "UpdateAaiUriIndex"; + } + + @Override + public void run() { + + // close all but current open titan instances + closeAllButCurrentInstances(); + + // get all indexes containing aai-uri + Set<IndexDetails> indexes = getIndexesWithAaiUri(); + logger.info("Found " + indexes.size() + " aai uri index."); + indexes.stream().map(s -> "\t" + s.getIndexName() + " : " + s.getPropertyName() + " : " + s.getStatus() ).forEach(System.out::println); + + renameAaiUriIndex(indexes); + + // remove all of the aai-uri indexes that are in the list + removeIndexes(indexes); + + //retire old property + verifyGraphManagementIsOpen(); + PropertyKey aaiUri = graphMgmt.getPropertyKey(AAIProperties.AAI_URI); + if (aaiUri != null) { + graphMgmt.changeName(aaiUri, retiredName); + } + graphMgmt.commit(); + + //remove all aai uri keys + logger.info("Remove old keys."); + dropAllKeyProperties(indexes); + + // add aai-uri unique index + logger.info("Create new unique aai-uri index"); + createUniqueAaiUriIndex(); + + + // change index status to ENABLED STATE + logger.info("Enable index"); + enableIndex(); + + this.engine.startTransaction(); + + logger.info("Checking and dropping retired properties."); + g = this.engine.asAdmin().getTraversalSource(); + g.V().has(retiredName).properties(retiredName).drop().iterate(); + logger.info("Done."); + } + + + protected void createUniqueAaiUriIndex() { + verifyGraphManagementIsOpen(); + // create new aaiuri property + PropertyKey aaiUriProperty = graphMgmt.getPropertyKey(AAIProperties.AAI_URI); + if (aaiUriProperty == null) { + logger.info("Creating new aai-uri property."); + aaiUriProperty = graphMgmt.makePropertyKey(AAIProperties.AAI_URI).dataType(String.class) + .cardinality(Cardinality.SINGLE).make(); + } + logger.info("Creating new aai-uri index."); + graphMgmt.buildIndex(AAIProperties.AAI_URI, Vertex.class).addKey(aaiUriProperty).unique().buildCompositeIndex(); + graphMgmt.commit(); + } + + private void dropAllKeyProperties(Set<IndexDetails> indexes) { + indexes.stream().map(e -> e.getPropertyName()).distinct().forEach(p -> { + verifyGraphManagementIsOpen(); + if (graphMgmt.getPropertyKey(p) != null) { + graphMgmt.getPropertyKey(p).remove(); + } + graphMgmt.commit(); + }); + } + + private void renameAaiUriIndex(Set<IndexDetails> indexes) { + verifyGraphManagementIsOpen(); + indexes.stream().filter(s -> s.getIndexName().equals(AAIProperties.AAI_URI)).forEach( s -> { + JanusGraphIndex index = graphMgmt.getGraphIndex(s.getIndexName()); + graphMgmt.changeName(index, retiredName); + s.setIndexName(retiredName); + }); + graphMgmt.commit(); + } + + private void removeIndexes(Set<IndexDetails> indexes) { + + for (IndexDetails index : indexes) { + verifyGraphManagementIsOpen(); + + JanusGraphIndex aaiUriIndex = graphMgmt.getGraphIndex(index.getIndexName()); + + if (!index.getStatus().equals(SchemaStatus.DISABLED)) { + logger.info("Disabling index: " + index.getIndexName()); + logger.info("\tCurrent state: " + aaiUriIndex.getIndexStatus(graphMgmt.getPropertyKey(index.getPropertyName()))); + + graphMgmt.updateIndex(aaiUriIndex, SchemaAction.DISABLE_INDEX); + graphMgmt.commit(); + try { + ManagementSystem.awaitGraphIndexStatus(AAIGraph.getInstance().getGraph(), index.getIndexName()) + .timeout(10, ChronoUnit.MINUTES) + .status(SchemaStatus.DISABLED) + .call(); + } catch (Exception e) { + e.printStackTrace(); + } + } + + verifyGraphManagementIsOpen(); + aaiUriIndex = graphMgmt.getGraphIndex(index.getIndexName()); + if (aaiUriIndex.getIndexStatus(graphMgmt.getPropertyKey(index.getPropertyName())).equals(SchemaStatus.DISABLED)) { + logger.info("Removing index: " + index.getIndexName()); + graphMgmt.updateIndex(aaiUriIndex, SchemaAction.REMOVE_INDEX); + graphMgmt.commit(); + } + if(graphMgmt.isOpen()) { + graphMgmt.commit(); + } + } + + } + + protected Set<IndexDetails> getIndexesWithAaiUri() { + verifyGraphManagementIsOpen(); + Set<IndexDetails> aaiUriIndexName = new HashSet<>(); + + Iterator<JanusGraphIndex> titanIndexes = graphMgmt.getGraphIndexes(Vertex.class).iterator(); + JanusGraphIndex titanIndex; + while (titanIndexes.hasNext()) { + titanIndex = titanIndexes.next(); + if (titanIndex.name().contains(AAIProperties.AAI_URI) && titanIndex.getFieldKeys().length > 0) { + logger.info("Found aai-uri index: " + titanIndex.name()); + aaiUriIndexName.add(new IndexDetails(titanIndex.name(), titanIndex.getIndexStatus(titanIndex.getFieldKeys()[0]), titanIndex.getFieldKeys()[0].name())); + } + } + graphMgmt.rollback(); + return aaiUriIndexName; + } + + private void closeAllButCurrentInstances() { + verifyGraphManagementIsOpen(); + logger.info("Closing all but current titan instances."); + graphMgmt.getOpenInstances().stream().filter(s -> !s.contains("(current)")).forEach(s -> { + logger.info("\t"+s); + graphMgmt.forceCloseInstance(s); + }); + graphMgmt.commit(); + } + + + private void verifyGraphManagementIsOpen() { + if (!graphMgmt.isOpen()) { + graphMgmt = this.engine.asAdmin().getManagementSystem(); + } + } + + private void enableIndex() { + verifyGraphManagementIsOpen(); + JanusGraphIndex aaiUriIndex = graphMgmt.getGraphIndex(AAIProperties.AAI_URI); + SchemaStatus schemaStatus = aaiUriIndex.getIndexStatus(graphMgmt.getPropertyKey(AAIProperties.AAI_URI)); + if (schemaStatus.equals(SchemaStatus.INSTALLED)) { + logger.info("Registering index: " + AAIProperties.AAI_URI); + logger.info("\tCurrent state: " + schemaStatus); + + graphMgmt.updateIndex(aaiUriIndex, SchemaAction.REGISTER_INDEX); + graphMgmt.commit(); + try { + ManagementSystem.awaitGraphIndexStatus(AAIGraph.getInstance().getGraph(), AAIProperties.AAI_URI) + .timeout(10, ChronoUnit.MINUTES) + .status(SchemaStatus.REGISTERED) + .call(); + } catch (Exception e) { + e.printStackTrace(); + } + } + + verifyGraphManagementIsOpen(); + aaiUriIndex = graphMgmt.getGraphIndex(AAIProperties.AAI_URI); + schemaStatus = aaiUriIndex.getIndexStatus(graphMgmt.getPropertyKey(AAIProperties.AAI_URI)); + if (schemaStatus.equals(SchemaStatus.REGISTERED)) { + logger.info("Enabling index: " + AAIProperties.AAI_URI); + logger.info("\tCurrent state: " + schemaStatus); + + graphMgmt.updateIndex(aaiUriIndex, SchemaAction.ENABLE_INDEX); + graphMgmt.commit(); + try { + ManagementSystem.awaitGraphIndexStatus(AAIGraph.getInstance().getGraph(), AAIProperties.AAI_URI) + .timeout(10, ChronoUnit.MINUTES) + .status(SchemaStatus.ENABLED) + .call(); + } catch (Exception e) { + e.printStackTrace(); + } + } + + verifyGraphManagementIsOpen(); + aaiUriIndex = graphMgmt.getGraphIndex(AAIProperties.AAI_URI); + schemaStatus = aaiUriIndex.getIndexStatus(graphMgmt.getPropertyKey(AAIProperties.AAI_URI)); + logger.info("Final state: " + schemaStatus); + graphMgmt.rollback(); + } + + private class IndexDetails { + private String indexName; + private SchemaStatus status; + private String propertyName; + + public IndexDetails(String indexName, SchemaStatus status, String propertyName) { + this.indexName = indexName; + this.status = status; + this.propertyName = propertyName; + } + + public String getIndexName() { + return indexName; + } + + public SchemaStatus getStatus() { + return status; + } + + public String getPropertyName() { + return propertyName; + } + + public void setIndexName(String indexName) { + this.indexName = indexName; + } + + public void setStatus(SchemaStatus status) { + this.status = status; + } + + public void setPropertyName(String propertyName) { + this.propertyName = propertyName; + } + } +} diff --git a/src/main/java/org/onap/aai/migration/v12/UriMigration.java b/src/main/java/org/onap/aai/migration/v12/UriMigration.java new file mode 100644 index 0000000..cb0926e --- /dev/null +++ b/src/main/java/org/onap/aai/migration/v12/UriMigration.java @@ -0,0 +1,180 @@ +/** + * ============LICENSE_START======================================================= + * org.onap.aai + * ================================================================================ + * Copyright © 2017-2018 AT&T Intellectual Property. All rights reserved. + * ================================================================================ + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============LICENSE_END========================================================= + */ +package org.onap.aai.migration.v12; + +import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversalSource; +import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.__; +import org.apache.tinkerpop.gremlin.structure.Vertex; +import org.onap.aai.db.props.AAIProperties; +import org.onap.aai.edges.EdgeIngestor; +import org.onap.aai.edges.enums.EdgeProperty; +import org.onap.aai.exceptions.AAIException; +import org.onap.aai.introspection.LoaderFactory; +import org.onap.aai.introspection.ModelType; +import org.onap.aai.serialization.db.EdgeSerializer; +import org.onap.aai.introspection.exceptions.AAIUnknownObjectException; +import org.onap.aai.migration.*; +import org.onap.aai.edges.enums.AAIDirection; +import org.onap.aai.serialization.db.DBSerializer; +import org.onap.aai.serialization.engines.TransactionalGraphEngine; +import org.onap.aai.setup.SchemaVersion; +import org.onap.aai.setup.SchemaVersions; +import org.springframework.web.util.UriUtils; + +import javax.ws.rs.core.UriBuilder; +import java.io.UnsupportedEncodingException; +import java.util.*; +import java.util.stream.Collectors; + +@Enabled + +@MigrationPriority(1000) +@MigrationDangerRating(1000) +public class UriMigration extends Migrator { + + private final SchemaVersion version; + private final ModelType introspectorFactoryType; + private GraphTraversalSource g; + + private Map<String, UriBuilder> nodeTypeToUri; + private Map<String, Set<String>> nodeTypeToKeys; + + protected Set<Object> seen = new HashSet<>(); + + /** + * Instantiates a new migrator. + * + * @param engine + */ + public UriMigration(TransactionalGraphEngine engine, LoaderFactory loaderFactory, EdgeIngestor edgeIngestor, EdgeSerializer edgeSerializer, SchemaVersions schemaVersions) throws AAIException { + super(engine, loaderFactory, edgeIngestor, edgeSerializer, schemaVersions); + version = schemaVersions.getDefaultVersion(); + introspectorFactoryType = ModelType.MOXY; + loader = loaderFactory.createLoaderForVersion(introspectorFactoryType, version); + g = this.engine.asAdmin().getTraversalSource(); + this.serializer = new DBSerializer(version, this.engine, introspectorFactoryType, this.getMigrationName()); + + } + + @Override + public void run() { + long start = System.currentTimeMillis(); + nodeTypeToUri = loader.getAllObjects().entrySet().stream().filter(e -> e.getValue().getGenericURI().contains("{")).collect( + Collectors.toMap( + e -> e.getKey(), + e -> UriBuilder.fromPath(e.getValue().getFullGenericURI().replaceAll("\\{"+ e.getKey() + "-", "{")) + )); + + nodeTypeToKeys = loader.getAllObjects().entrySet().stream().filter(e -> e.getValue().getGenericURI().contains("{")).collect( + Collectors.toMap( + e -> e.getKey(), + e -> e.getValue().getKeys() + )); + + Set<String> topLevelNodeTypes = loader.getAllObjects().entrySet().stream() + .filter(e -> e.getValue().isTopLevel()).map(Map.Entry::getKey) + .collect(Collectors.toSet()); + + logger.info("Top level count : " + topLevelNodeTypes.size()); + topLevelNodeTypes.stream().forEach(topLevelNodeType -> { + Set<Vertex> parentSet = g.V().has(AAIProperties.NODE_TYPE, topLevelNodeType).toSet(); + logger.info(topLevelNodeType + " : " + parentSet.size()); + try { + this.verifyOrAddUri("", parentSet); + } catch (AAIUnknownObjectException e) { + e.printStackTrace(); + } catch (AAIException e) { + e.printStackTrace(); + } catch (UnsupportedEncodingException e) { + e.printStackTrace(); + } + }); + logger.info("RUNTIME: " + (System.currentTimeMillis() - start)); + logger.info("NO URI: " + g.V().hasNot(AAIProperties.AAI_URI).count().next()); + logger.info("NUM VERTEXES SEEN: " + seen.size()); + seen = new HashSet<>(); + + } + + protected void verifyOrAddUri(String parentUri, Set<Vertex> vertexSet) throws UnsupportedEncodingException, AAIException { + String correctUri; + for (Vertex v : vertexSet) { + seen.add(v.id()); + //if there is an issue generating the uri catch, log and move on; + try { + correctUri = parentUri + this.getUriForVertex(v); + } catch (Exception e) { + logger.error("Vertex has issue generating uri " + e.getMessage() + "\n\t" + this.asString(v)); + continue; + } + try { + v.property(AAIProperties.AAI_URI, correctUri); + } catch (Exception e) { + logger.info(e.getMessage() + "\n\t" + this.asString(v)); + } + if (!v.property(AAIProperties.AAI_UUID).isPresent()) { + v.property(AAIProperties.AAI_UUID, UUID.randomUUID().toString()); + } + this.verifyOrAddUri(correctUri, getChildren(v)); + } + } + + protected Set<Vertex> getChildren(Vertex v) { + + Set<Vertex> children = g.V(v).bothE().not(__.has(EdgeProperty.CONTAINS.toString(), AAIDirection.NONE.toString())).otherV().toSet(); + + return children.stream().filter(child -> !seen.contains(child.id())).collect(Collectors.toSet()); + } + + protected String getUriForVertex(Vertex v) { + String aaiNodeType = v.property(AAIProperties.NODE_TYPE).value().toString(); + + + Map<String, String> parameters = this.nodeTypeToKeys.get(aaiNodeType).stream().collect(Collectors.toMap( + key -> key, + key -> encodeProp(v.property(key).value().toString()) + )); + + return this.nodeTypeToUri.get(aaiNodeType).buildFromEncodedMap(parameters).toString(); + } + + private static String encodeProp(String s) { + try { + return UriUtils.encode(s, "UTF-8"); + } catch (UnsupportedEncodingException e) { + return ""; + } + } + + @Override + public Status getStatus() { + return Status.SUCCESS; + } + + @Override + public Optional<String[]> getAffectedNodeTypes() { + return Optional.empty(); + } + + @Override + public String getMigrationName() { + return UriMigration.class.getSimpleName(); + } +} diff --git a/src/main/java/org/onap/aai/migration/v13/MigrateBooleanDefaultsToFalse.java b/src/main/java/org/onap/aai/migration/v13/MigrateBooleanDefaultsToFalse.java new file mode 100644 index 0000000..89a9459 --- /dev/null +++ b/src/main/java/org/onap/aai/migration/v13/MigrateBooleanDefaultsToFalse.java @@ -0,0 +1,114 @@ +/**
+ * ============LICENSE_START=======================================================
+ * org.onap.aai
+ * ================================================================================
+ * Copyright © 2017-2018 AT&T Intellectual Property. All rights reserved.
+ * ================================================================================
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * ============LICENSE_END=========================================================
+ */
+package org.onap.aai.migration.v13;
+
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Optional;
+
+import org.onap.aai.edges.EdgeIngestor;
+import org.onap.aai.introspection.LoaderFactory;
+import org.onap.aai.serialization.db.EdgeSerializer;
+import org.onap.aai.serialization.engines.TransactionalGraphEngine;
+import org.onap.aai.migration.Enabled;
+import org.onap.aai.migration.MigrationDangerRating;
+import org.onap.aai.migration.MigrationPriority;
+import org.onap.aai.migration.Status;
+import org.onap.aai.migration.ValueMigrator;
+import org.onap.aai.setup.SchemaVersions;
+
+
+@MigrationPriority(1)
+@MigrationDangerRating(1)
+public class MigrateBooleanDefaultsToFalse extends ValueMigrator {
+ protected static final String VNF_NODE_TYPE = "generic-vnf";
+ protected static final String VSERVER_NODE_TYPE = "vserver";
+ protected static final String VNFC_NODE_TYPE = "vnfc";
+ protected static final String L3NETWORK_NODE_TYPE = "l3-network";
+ protected static final String SUBNET_NODE_TYPE = "subnet";
+ protected static final String LINTERFACE_NODE_TYPE = "l-interface";
+ protected static final String VFMODULE_NODE_TYPE = "vf-module";
+
+ private static Map<String, Map> map;
+ private static Map<String, Boolean> pair1;
+ private static Map<String, Boolean> pair2;
+ private static Map<String, Boolean> pair3;
+ private static Map<String, Boolean> pair4;
+ private static Map<String, Boolean> pair5;
+ private static Map<String, Boolean> pair6;
+
+ public MigrateBooleanDefaultsToFalse(TransactionalGraphEngine engine, LoaderFactory loaderFactory, EdgeIngestor edgeIngestor, EdgeSerializer edgeSerializer, SchemaVersions schemaVersions) {
+ super(engine, loaderFactory, edgeIngestor, edgeSerializer, schemaVersions, setBooleanDefaultsToFalse(), false);
+
+ }
+
+ private static Map<String, Map> setBooleanDefaultsToFalse(){
+ map = new HashMap<>();
+ pair1 = new HashMap<>();
+ pair2 = new HashMap<>();
+ pair3 = new HashMap<>();
+ pair4 = new HashMap<>();
+ pair5 = new HashMap<>();
+ pair6 = new HashMap<>();
+
+
+ pair1.put("is-closed-loop-disabled", false);
+ map.put("generic-vnf", pair1);
+ map.put("vnfc", pair1);
+ map.put("vserver", pair1);
+
+ pair2.put("is-bound-to-vpn", false);
+ pair2.put("is-provider-network", false);
+ pair2.put("is-shared-network", false);
+ pair2.put("is-external-network", false);
+ map.put("l3-network", pair2);
+
+ pair3.put("dhcp-enabled", false);
+ map.put("subnet", pair3);
+
+ pair4.put("is-port-mirrored", false);
+ pair4.put("is-ip-unnumbered", false);
+ map.put("l-interface", pair4);
+
+ pair5.put("is-base-vf-module", false);
+ map.put("vf-module", pair5);
+
+ pair6.put("is-ip-unnumbered", false);
+ map.put("vlan", pair6);
+
+ return map;
+ }
+
+ @Override
+ public Status getStatus() {
+ return Status.SUCCESS;
+ }
+
+ @Override
+ public Optional<String[]> getAffectedNodeTypes() {
+ return Optional.of(new String[]{VNF_NODE_TYPE,VSERVER_NODE_TYPE,VNFC_NODE_TYPE,L3NETWORK_NODE_TYPE,SUBNET_NODE_TYPE,LINTERFACE_NODE_TYPE,VFMODULE_NODE_TYPE});
+ }
+
+ @Override
+ public String getMigrationName() {
+ return "MigrateBooleanDefaultsToFalse";
+ }
+
+}
\ No newline at end of file diff --git a/src/main/java/org/onap/aai/migration/v13/MigrateInMaintDefaultToFalse.java b/src/main/java/org/onap/aai/migration/v13/MigrateInMaintDefaultToFalse.java new file mode 100644 index 0000000..1773038 --- /dev/null +++ b/src/main/java/org/onap/aai/migration/v13/MigrateInMaintDefaultToFalse.java @@ -0,0 +1,98 @@ +/**
+ * ============LICENSE_START=======================================================
+ * org.onap.aai
+ * ================================================================================
+ * Copyright © 2017-2018 AT&T Intellectual Property. All rights reserved.
+ * ================================================================================
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * ============LICENSE_END=========================================================
+ */
+package org.onap.aai.migration.v13;
+
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+
+import org.onap.aai.edges.EdgeIngestor;
+import org.onap.aai.introspection.LoaderFactory;
+import org.onap.aai.serialization.db.EdgeSerializer;
+import org.onap.aai.serialization.engines.TransactionalGraphEngine;
+import org.onap.aai.migration.Enabled;
+import org.onap.aai.migration.MigrationDangerRating;
+import org.onap.aai.migration.MigrationPriority;
+import org.onap.aai.migration.Status;
+import org.onap.aai.migration.ValueMigrator;
+import org.onap.aai.setup.SchemaVersions;
+
+
+@MigrationPriority(1)
+@MigrationDangerRating(1)
+public class MigrateInMaintDefaultToFalse extends ValueMigrator {
+
+ protected static final String VNF_NODE_TYPE = "generic-vnf";
+ protected static final String LINTERFACE_NODE_TYPE = "l-interface";
+ protected static final String LAG_INTERFACE_NODE_TYPE = "lag-interface";
+ protected static final String LOGICAL_LINK_NODE_TYPE = "logical-link";
+ protected static final String PINTERFACE_NODE_TYPE = "p-interface";
+ protected static final String VLAN_NODE_TYPE = "vlan";
+ protected static final String VNFC_NODE_TYPE = "vnfc";
+ protected static final String VSERVER_NODE_TYPE = "vserver";
+ protected static final String PSERVER_NODE_TYPE = "pserver";
+ protected static final String PNF_NODE_TYPE = "pnf";
+ protected static final String NOS_SERVER_NODE_TYPE = "nos-server";
+
+ private static Map<String, Map> map;
+ private static Map<String, Boolean> pair;
+
+ public MigrateInMaintDefaultToFalse(TransactionalGraphEngine engine, LoaderFactory loaderFactory, EdgeIngestor edgeIngestor, EdgeSerializer edgeSerializer, SchemaVersions schemaVersions) {
+ super(engine, loaderFactory, edgeIngestor, edgeSerializer, schemaVersions, setInMaintToFalse(), false);
+ }
+
+ private static Map<String, Map> setInMaintToFalse(){
+ map = new HashMap<>();
+ pair = new HashMap<>();
+
+ pair.put("in-maint", false);
+
+ map.put("generic-vnf", pair);
+ map.put("l-interface", pair);
+ map.put("lag-interface", pair);
+ map.put("logical-link", pair);
+ map.put("p-interface", pair);
+ map.put("vlan", pair);
+ map.put("vnfc", pair);
+ map.put("vserver", pair);
+ map.put("pserver", pair);
+ map.put("pnf", pair);
+ map.put("nos-server", pair);
+
+ return map;
+ }
+
+ @Override
+ public Status getStatus() {
+ return Status.SUCCESS;
+ }
+
+ @Override
+ public Optional<String[]> getAffectedNodeTypes() {
+ return Optional.of(new String[]{VNF_NODE_TYPE,LINTERFACE_NODE_TYPE,LAG_INTERFACE_NODE_TYPE,LOGICAL_LINK_NODE_TYPE,PINTERFACE_NODE_TYPE,VLAN_NODE_TYPE,VNFC_NODE_TYPE,VSERVER_NODE_TYPE,PSERVER_NODE_TYPE,PNF_NODE_TYPE,NOS_SERVER_NODE_TYPE});
+ }
+
+ @Override
+ public String getMigrationName() {
+ return "MigrateInMaintDefaultToFalse";
+ }
+
+}
\ No newline at end of file diff --git a/src/main/java/org/onap/aai/migration/v13/MigrateInstanceGroupModelInvariantId.java b/src/main/java/org/onap/aai/migration/v13/MigrateInstanceGroupModelInvariantId.java new file mode 100644 index 0000000..1244c59 --- /dev/null +++ b/src/main/java/org/onap/aai/migration/v13/MigrateInstanceGroupModelInvariantId.java @@ -0,0 +1,85 @@ +/** + * ============LICENSE_START======================================================= + * org.onap.aai + * ================================================================================ + * Copyright © 2017-2018 AT&T Intellectual Property. All rights reserved. + * ================================================================================ + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============LICENSE_END========================================================= + */ +package org.onap.aai.migration.v13; +/*- + * ============LICENSE_START======================================================= + * org.onap.aai + * ================================================================================ + * Copyright (C) 2017 AT&T Intellectual Property. All rights reserved. + * ================================================================================ + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============LICENSE_END========================================================= + */ + + +import org.janusgraph.core.Cardinality; +import org.onap.aai.edges.EdgeIngestor; +import org.onap.aai.introspection.LoaderFactory; +import org.onap.aai.migration.Enabled; +import org.onap.aai.migration.MigrationDangerRating; +import org.onap.aai.migration.MigrationPriority; +import org.onap.aai.migration.PropertyMigrator; +import org.onap.aai.serialization.db.EdgeSerializer; +import org.onap.aai.serialization.engines.TransactionalGraphEngine; +import org.onap.aai.setup.SchemaVersions; + +import java.util.Optional; + + +@MigrationPriority(19) +@MigrationDangerRating(2) +@Enabled +public class MigrateInstanceGroupModelInvariantId extends PropertyMigrator { + + private static final String INSTANCE_GROUP_NODE_TYPE = "instance-group"; + private static final String INSTANCE_GROUP_MODEL_INVARIANT_ID_PROPERTY = "model-invariant-id"; + private static final String INSTANCE_GROUP_MODEL_INVARIANT_ID_LOCAL_PROPERTY = "model-invariant-id-local"; + + public MigrateInstanceGroupModelInvariantId(TransactionalGraphEngine engine, LoaderFactory loaderFactory, EdgeIngestor edgeIngestor, EdgeSerializer edgeSerializer, SchemaVersions schemaVersions) { + super(engine, loaderFactory, edgeIngestor, edgeSerializer, schemaVersions); + this.initialize(INSTANCE_GROUP_MODEL_INVARIANT_ID_PROPERTY, INSTANCE_GROUP_MODEL_INVARIANT_ID_LOCAL_PROPERTY, + String.class, Cardinality.SINGLE); + } + + @Override + public Optional<String[]> getAffectedNodeTypes() { + return Optional.of(new String[]{this.INSTANCE_GROUP_NODE_TYPE}); + } + + @Override + public String getMigrationName() { + return "MigrateInstanceGroupModelInvariantId"; + } + + @Override + public boolean isIndexed() { + return true; + } +} diff --git a/src/main/java/org/onap/aai/migration/v13/MigrateInstanceGroupModelVersionId.java b/src/main/java/org/onap/aai/migration/v13/MigrateInstanceGroupModelVersionId.java new file mode 100644 index 0000000..64341ba --- /dev/null +++ b/src/main/java/org/onap/aai/migration/v13/MigrateInstanceGroupModelVersionId.java @@ -0,0 +1,84 @@ +/** + * ============LICENSE_START======================================================= + * org.onap.aai + * ================================================================================ + * Copyright © 2017-2018 AT&T Intellectual Property. All rights reserved. + * ================================================================================ + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============LICENSE_END========================================================= + */ +package org.onap.aai.migration.v13; +/*- + * ============LICENSE_START======================================================= + * org.onap.aai + * ================================================================================ + * Copyright (C) 2017 AT&T Intellectual Property. All rights reserved. + * ================================================================================ + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============LICENSE_END========================================================= + */ + + +import java.util.Optional; +import org.janusgraph.core.Cardinality; +import org.onap.aai.edges.EdgeIngestor; +import org.onap.aai.introspection.LoaderFactory; +import org.onap.aai.migration.Enabled; +import org.onap.aai.migration.MigrationDangerRating; +import org.onap.aai.migration.MigrationPriority; +import org.onap.aai.migration.PropertyMigrator; +import org.onap.aai.serialization.db.EdgeSerializer; +import org.onap.aai.serialization.engines.TransactionalGraphEngine; +import org.onap.aai.setup.SchemaVersions; + + +@MigrationPriority(19) +@MigrationDangerRating(2) +@Enabled +public class MigrateInstanceGroupModelVersionId extends PropertyMigrator { + + private static final String INSTANCE_GROUP_NODE_TYPE = "instance-group"; + private static final String INSTANCE_GROUP_MODEL_VERSION_ID_PROPERTY = "model-version-id"; + private static final String INSTANCE_GROUP_MODEL_VERSION_ID_LOCAL_PROPERTY = "model-version-id-local"; + + public MigrateInstanceGroupModelVersionId(TransactionalGraphEngine engine, LoaderFactory loaderFactory, EdgeIngestor edgeIngestor, EdgeSerializer edgeSerializer, SchemaVersions schemaVersions) { + super(engine, loaderFactory, edgeIngestor, edgeSerializer, schemaVersions); + this.initialize(INSTANCE_GROUP_MODEL_VERSION_ID_PROPERTY, INSTANCE_GROUP_MODEL_VERSION_ID_LOCAL_PROPERTY, + String.class, Cardinality.SINGLE); + } + + @Override + public Optional<String[]> getAffectedNodeTypes() { + return Optional.of(new String[]{this.INSTANCE_GROUP_NODE_TYPE}); + } + + @Override + public String getMigrationName() { + return "MigrateInstanceGroupModelVersionId"; + } + + @Override + public boolean isIndexed() { + return true; + } +} diff --git a/src/main/java/org/onap/aai/migration/v13/MigrateInstanceGroupSubType.java b/src/main/java/org/onap/aai/migration/v13/MigrateInstanceGroupSubType.java new file mode 100644 index 0000000..6823da8 --- /dev/null +++ b/src/main/java/org/onap/aai/migration/v13/MigrateInstanceGroupSubType.java @@ -0,0 +1,64 @@ +/** + * ============LICENSE_START======================================================= + * org.onap.aai + * ================================================================================ + * Copyright © 2017-2018 AT&T Intellectual Property. All rights reserved. + * ================================================================================ + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============LICENSE_END========================================================= + */ +package org.onap.aai.migration.v13; +import java.util.Optional; + +import org.onap.aai.edges.EdgeIngestor; +import org.onap.aai.introspection.LoaderFactory; +import org.onap.aai.migration.Enabled; +import org.onap.aai.migration.MigrationDangerRating; +import org.onap.aai.migration.MigrationPriority; +import org.onap.aai.migration.PropertyMigrator; +import org.onap.aai.serialization.db.EdgeSerializer; +import org.onap.aai.serialization.engines.TransactionalGraphEngine; + +import org.janusgraph.core.Cardinality; +import org.onap.aai.setup.SchemaVersions; + +@MigrationPriority(20) +@MigrationDangerRating(2) +@Enabled +public class MigrateInstanceGroupSubType extends PropertyMigrator{ + + protected static final String SUB_TYPE_PROPERTY = "sub-type"; + protected static final String INSTANCE_GROUP_ROLE_PROPERTY = "instance-group-role"; + protected static final String INSTANCE_GROUP_NODE_TYPE = "instance-group"; + + public MigrateInstanceGroupSubType(TransactionalGraphEngine engine, LoaderFactory loaderFactory, EdgeIngestor edgeIngestor, EdgeSerializer edgeSerializer, SchemaVersions schemaVersions) { + super(engine, loaderFactory, edgeIngestor, edgeSerializer, schemaVersions); + this.initialize(SUB_TYPE_PROPERTY , INSTANCE_GROUP_ROLE_PROPERTY, String.class,Cardinality.SINGLE); + } + + @Override + public Optional<String[]> getAffectedNodeTypes() { + return Optional.of(new String[]{INSTANCE_GROUP_NODE_TYPE}); + } + + @Override + public String getMigrationName() { + return "MigrateInstanceGroupSubType"; + } + + @Override + public boolean isIndexed() { + return true; + } + +} diff --git a/src/main/java/org/onap/aai/migration/v13/MigrateInstanceGroupType.java b/src/main/java/org/onap/aai/migration/v13/MigrateInstanceGroupType.java new file mode 100644 index 0000000..f3cd669 --- /dev/null +++ b/src/main/java/org/onap/aai/migration/v13/MigrateInstanceGroupType.java @@ -0,0 +1,64 @@ +/** + * ============LICENSE_START======================================================= + * org.onap.aai + * ================================================================================ + * Copyright © 2017-2018 AT&T Intellectual Property. All rights reserved. + * ================================================================================ + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============LICENSE_END========================================================= + */ +package org.onap.aai.migration.v13; +import java.util.Optional; + +import org.onap.aai.edges.EdgeIngestor; +import org.onap.aai.introspection.LoaderFactory; +import org.onap.aai.migration.Enabled; +import org.onap.aai.migration.MigrationDangerRating; +import org.onap.aai.migration.MigrationPriority; +import org.onap.aai.migration.PropertyMigrator; +import org.onap.aai.serialization.db.EdgeSerializer; +import org.onap.aai.serialization.engines.TransactionalGraphEngine; + +import org.janusgraph.core.Cardinality; +import org.onap.aai.setup.SchemaVersions; + +@MigrationPriority(20) +@MigrationDangerRating(2) +@Enabled +public class MigrateInstanceGroupType extends PropertyMigrator{ + + protected static final String TYPE_PROPERTY = "type"; + protected static final String INSTANCE_GROUP_TYPE_PROPERTY = "instance-group-type"; + protected static final String INSTANCE_GROUP_NODE_TYPE = "instance-group"; + + public MigrateInstanceGroupType(TransactionalGraphEngine engine, LoaderFactory loaderFactory, EdgeIngestor edgeIngestor, EdgeSerializer edgeSerializer, SchemaVersions schemaVersions) { + super(engine, loaderFactory, edgeIngestor, edgeSerializer, schemaVersions); + this.initialize(TYPE_PROPERTY , INSTANCE_GROUP_TYPE_PROPERTY, String.class,Cardinality.SINGLE); + } + + @Override + public Optional<String[]> getAffectedNodeTypes() { + return Optional.of(new String[]{INSTANCE_GROUP_NODE_TYPE}); + } + + @Override + public String getMigrationName() { + return "MigrateInstanceGroupType"; + } + + @Override + public boolean isIndexed() { + return true; + } + +} diff --git a/src/main/java/org/onap/aai/migration/v13/MigrateModelVer.java b/src/main/java/org/onap/aai/migration/v13/MigrateModelVer.java new file mode 100644 index 0000000..7bc9a7d --- /dev/null +++ b/src/main/java/org/onap/aai/migration/v13/MigrateModelVer.java @@ -0,0 +1,229 @@ +/** + * ============LICENSE_START======================================================= + * org.onap.aai + * ================================================================================ + * Copyright © 2017-2018 AT&T Intellectual Property. All rights reserved. + * ================================================================================ + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============LICENSE_END========================================================= + */ +package org.onap.aai.migration.v13; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Optional; + +import org.apache.tinkerpop.gremlin.structure.Vertex; +import org.onap.aai.db.props.AAIProperties; +import org.onap.aai.edges.EdgeIngestor; +import org.onap.aai.introspection.LoaderFactory; +import org.onap.aai.migration.Enabled; +import org.onap.aai.migration.MigrationDangerRating; +import org.onap.aai.migration.MigrationPriority; +import org.onap.aai.migration.Migrator; +import org.onap.aai.migration.Status; +import org.onap.aai.serialization.db.EdgeSerializer; +import org.onap.aai.serialization.engines.TransactionalGraphEngine; +import org.onap.aai.setup.SchemaVersions; + +@MigrationPriority(20) +@MigrationDangerRating(2) +@Enabled +public class MigrateModelVer extends Migrator{ + + protected static final String MODELINVARIANTID = "model-invariant-id"; + protected static final String MODELVERSIONID = "model-version-id"; + protected static final String MODELINVARIANTIDLOCAL = "model-invariant-id-local"; + protected static final String MODELVERSIONIDLOCAL = "model-version-id-local"; + + protected static final String MODELVER = "model-ver"; + protected static final String MODEL = "model"; + + protected static final String CONNECTOR_NODETYPE = "connector"; + protected static final String SERVICEINSTANCE_NODETYPE = "service-instance"; + protected static final String CONFIGURATION_NODETYPE = "configuration"; + protected static final String LOGICALLINK_NODETYPE = "logical-link"; + protected static final String VNFC_NODETYPE = "vnfc"; + protected static final String L3NETWORK_NODETYPE = "l3-network"; + protected static final String GENERICVNF_NODETYPE = "generic-vnf"; + protected static final String PNF_NODETYPE = "pnf"; + protected static final String VFMODULE_NODETYPE = "vf-module"; + protected static final String INSTANCEGROUP_NODETYPE = "instance-group"; + protected static final String ALLOTTEDRESOURCE_NODETYPE = "allotted-resource"; + protected static final String COLLECTION_NODETYPE = "collection"; + + private boolean success = true; + + private static Map<String, String> NODETYPEKEYMAP = new HashMap<String, String>(); + + static { + NODETYPEKEYMAP.put(CONNECTOR_NODETYPE,"resource-instance-id"); + NODETYPEKEYMAP.put(SERVICEINSTANCE_NODETYPE,"service-instance-id"); + NODETYPEKEYMAP.put(CONFIGURATION_NODETYPE, "configuration-id"); + NODETYPEKEYMAP.put(LOGICALLINK_NODETYPE,"link-name"); + NODETYPEKEYMAP.put(VNFC_NODETYPE, "vnfc-name"); + NODETYPEKEYMAP.put(L3NETWORK_NODETYPE, "network-id"); + NODETYPEKEYMAP.put(GENERICVNF_NODETYPE,"vnf-id"); + NODETYPEKEYMAP.put(PNF_NODETYPE,"pnf-name"); + NODETYPEKEYMAP.put(VFMODULE_NODETYPE,"vf-module-id"); + NODETYPEKEYMAP.put(INSTANCEGROUP_NODETYPE,"id"); + NODETYPEKEYMAP.put(ALLOTTEDRESOURCE_NODETYPE,"id"); + NODETYPEKEYMAP.put(COLLECTION_NODETYPE,"collection-id"); + } + + public MigrateModelVer(TransactionalGraphEngine engine, LoaderFactory loaderFactory, EdgeIngestor edgeIngestor, EdgeSerializer edgeSerializer, SchemaVersions schemaVersions) { + super(engine, loaderFactory, edgeIngestor, edgeSerializer, schemaVersions); + } + + @Override + public void run() { + + List<Vertex> vertextList = this.engine.asAdmin().getTraversalSource().V().has(AAIProperties.NODE_TYPE, CONNECTOR_NODETYPE).has(MODELINVARIANTIDLOCAL).has(MODELVERSIONIDLOCAL).toList(); + createEdges(vertextList, CONNECTOR_NODETYPE); + + vertextList = this.engine.asAdmin().getTraversalSource().V().has(AAIProperties.NODE_TYPE, SERVICEINSTANCE_NODETYPE).has(MODELINVARIANTIDLOCAL).has(MODELVERSIONIDLOCAL).toList(); + createEdges(vertextList, SERVICEINSTANCE_NODETYPE); + + vertextList = this.engine.asAdmin().getTraversalSource().V().has(AAIProperties.NODE_TYPE, CONFIGURATION_NODETYPE).has(MODELINVARIANTIDLOCAL).has(MODELVERSIONIDLOCAL).toList(); + createEdges(vertextList, CONFIGURATION_NODETYPE); + + vertextList = this.engine.asAdmin().getTraversalSource().V().has(AAIProperties.NODE_TYPE, LOGICALLINK_NODETYPE).has(MODELINVARIANTIDLOCAL).has(MODELVERSIONIDLOCAL).toList(); + createEdges(vertextList, LOGICALLINK_NODETYPE); + + vertextList = this.engine.asAdmin().getTraversalSource().V().has(AAIProperties.NODE_TYPE, VNFC_NODETYPE).has(MODELINVARIANTIDLOCAL).has(MODELVERSIONIDLOCAL).toList(); + createEdges(vertextList, VNFC_NODETYPE); + + vertextList = this.engine.asAdmin().getTraversalSource().V().has(AAIProperties.NODE_TYPE, L3NETWORK_NODETYPE).has(MODELINVARIANTIDLOCAL).has(MODELVERSIONIDLOCAL).toList(); + createEdges(vertextList, L3NETWORK_NODETYPE); + + vertextList = this.engine.asAdmin().getTraversalSource().V().has(AAIProperties.NODE_TYPE, GENERICVNF_NODETYPE).has(MODELINVARIANTIDLOCAL).has(MODELVERSIONIDLOCAL).toList(); + createEdges(vertextList, GENERICVNF_NODETYPE); + + vertextList = this.engine.asAdmin().getTraversalSource().V().has(AAIProperties.NODE_TYPE, PNF_NODETYPE).has(MODELINVARIANTIDLOCAL).has(MODELVERSIONIDLOCAL).toList(); + createEdges(vertextList, PNF_NODETYPE); + + vertextList = this.engine.asAdmin().getTraversalSource().V().has(AAIProperties.NODE_TYPE, VFMODULE_NODETYPE).has(MODELINVARIANTIDLOCAL).has(MODELVERSIONIDLOCAL).toList(); + createEdges(vertextList, VFMODULE_NODETYPE); + + vertextList = this.engine.asAdmin().getTraversalSource().V().has(AAIProperties.NODE_TYPE, INSTANCEGROUP_NODETYPE).has(MODELINVARIANTIDLOCAL).has(MODELVERSIONIDLOCAL).toList(); + createEdges(vertextList, INSTANCEGROUP_NODETYPE); + + vertextList = this.engine.asAdmin().getTraversalSource().V().has(AAIProperties.NODE_TYPE, ALLOTTEDRESOURCE_NODETYPE).has(MODELINVARIANTIDLOCAL).has(MODELVERSIONIDLOCAL).toList(); + createEdges(vertextList, ALLOTTEDRESOURCE_NODETYPE); + + vertextList = this.engine.asAdmin().getTraversalSource().V().has(AAIProperties.NODE_TYPE, COLLECTION_NODETYPE).has(MODELINVARIANTIDLOCAL).has(MODELVERSIONIDLOCAL).toList(); + createEdges(vertextList, COLLECTION_NODETYPE); + } + + private void createEdges(List<Vertex> sourceVertexList, String nodeTypeString) + { + int modelVerEdgeCount = 0; + int modelVerEdgeErrorCount = 0; + + logger.info("---------- Start Creating an Edge for " + nodeTypeString + " nodes with Model Invariant Id and Model Version Id to the model-ver ----------"); + Map<String, Vertex> modelVerUriVtxIdMap = new HashMap<String, Vertex>(); + for (Vertex vertex : sourceVertexList) { + String currentValueModelVersionID = null; + String currrentValueModelInvariantID = null; + try { + currentValueModelVersionID = getModelVersionIdNodeValue(vertex); + currrentValueModelInvariantID = getModelInvariantIdNodeValue(vertex); + + String uri = String.format("/service-design-and-creation/models/model/%s/model-vers/model-ver/%s", currrentValueModelInvariantID, currentValueModelVersionID); + String propertyKey = NODETYPEKEYMAP.get(nodeTypeString); + String propertyValue = vertex.value(propertyKey).toString(); + logger.info("Processing "+nodeTypeString+ " vertex with key "+vertex.value(propertyKey).toString()); + Vertex modelVerVertex = null; + + if (modelVerUriVtxIdMap.containsKey(uri)){ + modelVerVertex = modelVerUriVtxIdMap.get(uri); + } else { + List<Vertex> modelverList = this.engine.asAdmin().getTraversalSource().V().has(MODELINVARIANTID,currrentValueModelInvariantID).has(AAIProperties.NODE_TYPE, MODEL).in() + .has(AAIProperties.NODE_TYPE, "model-ver" ).has("aai-uri", uri).toList(); + if (modelverList != null && !modelverList.isEmpty()) { + modelVerVertex = modelverList.get(0); + modelVerUriVtxIdMap.put(uri, modelVerVertex); + } + } + + if (modelVerVertex != null && modelVerVertex.property("model-version-id").isPresent() ) { + boolean edgePresent = false; + //Check if edge already exists for each of the source vertex + List<Vertex> outVertexList = this.engine.asAdmin().getTraversalSource().V(modelVerVertex).in().has("aai-node-type", nodeTypeString).has(propertyKey, propertyValue).toList(); + Iterator<Vertex> vertexItr = outVertexList.iterator(); + if (outVertexList != null && !outVertexList.isEmpty() && vertexItr.hasNext()){ + logger.info("\t Edge already exists from " + nodeTypeString + " node to models-ver with model-invariant-id :" + currrentValueModelInvariantID + " and model-version-id :" + currentValueModelVersionID); + edgePresent = true; + continue; + } + // Build edge from vertex to modelVerVertex + if (!edgePresent) { + this.createPrivateEdge(vertex, modelVerVertex); + modelVerEdgeCount++; + } + } else + { + modelVerEdgeErrorCount++; + logger.info("\t" + MIGRATION_ERROR + "Unable to create edge. No model-ver vertex found with model-invariant-id :" + currrentValueModelInvariantID + " and model-version-id :" + currentValueModelVersionID); + + } + } catch (Exception e) { + success = false; + modelVerEdgeErrorCount++; + logger.error("\t" + MIGRATION_ERROR + "encountered exception from " + nodeTypeString + " node when trying to create edge to models-ver with model-invariant-id :" + currrentValueModelInvariantID + " and model-version-id :" + currentValueModelVersionID, e); + } + } + + logger.info ("\n \n ******* Summary " + nodeTypeString + " Nodes: Finished creating an Edge for " + nodeTypeString + " nodes with Model Invariant Id and Model Version Id to the model-ver Migration ********* \n"); + logger.info(MIGRATION_SUMMARY_COUNT+"Number of ModelVer edge created from " + nodeTypeString + " nodes: " + modelVerEdgeCount +"\n"); + logger.info(MIGRATION_SUMMARY_COUNT+"Number of ModelVer edge failed to create the edge from the " + nodeTypeString + " nodes due to error : "+ modelVerEdgeErrorCount +"\n"); + + + } + private String getModelInvariantIdNodeValue(Vertex vertex) { + String propertyValue = ""; + if(vertex != null && vertex.property(MODELINVARIANTIDLOCAL).isPresent()){ + propertyValue = vertex.value(MODELINVARIANTIDLOCAL).toString(); + } + return propertyValue; + } + + private String getModelVersionIdNodeValue(Vertex vertex) { + String propertyValue = ""; + if(vertex != null && vertex.property(MODELVERSIONIDLOCAL).isPresent()){ + propertyValue = vertex.value(MODELVERSIONIDLOCAL).toString(); + } + return propertyValue; + } + + @Override + public Status getStatus() { + if (success) { + return Status.SUCCESS; + } else { + return Status.FAILURE; + } + } + + @Override + public Optional<String[]> getAffectedNodeTypes() { + return Optional.of(new String[]{MODELVER}); + } + + @Override + public String getMigrationName() { + return "MigrateModelVer"; + } + +} diff --git a/src/main/java/org/onap/aai/migration/v13/MigratePserverAndPnfEquipType.java b/src/main/java/org/onap/aai/migration/v13/MigratePserverAndPnfEquipType.java new file mode 100644 index 0000000..6788d7f --- /dev/null +++ b/src/main/java/org/onap/aai/migration/v13/MigratePserverAndPnfEquipType.java @@ -0,0 +1,157 @@ +/**
+ * ============LICENSE_START=======================================================
+ * org.onap.aai
+ * ================================================================================
+ * Copyright © 2017-2018 AT&T Intellectual Property. All rights reserved.
+ * ================================================================================
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * ============LICENSE_END=========================================================
+ */
+package org.onap.aai.migration.v13;
+import java.util.List;
+import java.util.Optional;
+
+import org.apache.tinkerpop.gremlin.structure.Vertex;
+import org.onap.aai.db.props.AAIProperties;
+import org.onap.aai.edges.EdgeIngestor;
+import org.onap.aai.introspection.LoaderFactory;
+import org.onap.aai.migration.Enabled;
+import org.onap.aai.migration.MigrationDangerRating;
+import org.onap.aai.migration.MigrationPriority;
+import org.onap.aai.migration.Migrator;
+import org.onap.aai.migration.Status;
+import org.onap.aai.serialization.db.EdgeSerializer;
+import org.onap.aai.serialization.engines.TransactionalGraphEngine;
+import org.onap.aai.setup.SchemaVersions;
+
+@MigrationPriority(20)
+@MigrationDangerRating(2)
+@Enabled
+public class MigratePserverAndPnfEquipType extends Migrator{
+
+ protected static final String EQUIP_TYPE_PROPERTY = "equip-type";
+ protected static final String HOSTNAME_PROPERTY = "hostname";
+ protected static final String PNF_NAME_PROPERTY = "pnf-name";
+ protected static final String PNF_NODE_TYPE = "pnf";
+ protected static final String PSERVER_NODE_TYPE = "pserver";
+ private boolean success = true;
+
+ public MigratePserverAndPnfEquipType(TransactionalGraphEngine engine, LoaderFactory loaderFactory, EdgeIngestor edgeIngestor, EdgeSerializer edgeSerializer, SchemaVersions schemaVersions) {
+ super(engine, loaderFactory, edgeIngestor, edgeSerializer, schemaVersions);
+ }
+
+
+
+ @Override
+ public void run() {
+ int pserverCount = 0;
+ int pnfCount = 0;
+ int pserverErrorCount = 0;
+ int pnfErrorCount = 0;
+ logger.info("---------- Start Updating equip-type for Pserver and Pnf ----------");
+
+ List<Vertex> pserverList = this.engine.asAdmin().getTraversalSource().V().has(AAIProperties.NODE_TYPE, PSERVER_NODE_TYPE).toList();
+ List<Vertex> pnfList = this.engine.asAdmin().getTraversalSource().V().has(AAIProperties.NODE_TYPE, PNF_NODE_TYPE).toList();
+
+ for (Vertex vertex : pserverList) {
+ String currentValueOfEquipType = null;
+ String hostName = null;
+ try {
+ currentValueOfEquipType = getEquipTypeNodeValue(vertex);
+ hostName = getHostNameNodeValue(vertex);
+ if("Server".equals(currentValueOfEquipType) ||"server".equals(currentValueOfEquipType) ){
+ vertex.property(EQUIP_TYPE_PROPERTY, "SERVER");
+ this.touchVertexProperties(vertex, false);
+ logger.info("changed Pserver equip-type from " + currentValueOfEquipType + " to SERVER having hostname : " + hostName);
+ pserverCount++;
+ }
+ } catch (Exception e) {
+ success = false;
+ pserverErrorCount++;
+ logger.error(MIGRATION_ERROR + "encountered exception for equip-type:" + currentValueOfEquipType + " having hostName :" + hostName, e);
+ }
+ }
+
+ for (Vertex vertex : pnfList) {
+ String currentValueOfEquipType = null;
+ String pnfName = null;
+ try {
+ currentValueOfEquipType = getEquipTypeNodeValue(vertex);
+ pnfName = getPnfNameNodeValue(vertex);
+ if("Switch".equals(currentValueOfEquipType)||"switch".equals(currentValueOfEquipType)){
+ vertex.property(EQUIP_TYPE_PROPERTY, "SWITCH");
+ this.touchVertexProperties(vertex, false);
+ logger.info("changed Pnf equip-type from "+ currentValueOfEquipType +" to SWITCH having pnf-name :" + pnfName);
+ pnfCount++;
+ }
+
+ } catch (Exception e) {
+ success = false;
+ pnfErrorCount++;
+ logger.error(MIGRATION_ERROR + "encountered exception for equip-type:" + currentValueOfEquipType +" having pnf-name : "+ pnfName , e);
+ }
+ }
+
+ logger.info ("\n \n ******* Final Summary Updated equip-type for Pserver and Pnf Migration ********* \n");
+ logger.info(MIGRATION_SUMMARY_COUNT+"Number of Pservers updated: "+ pserverCount +"\n");
+ logger.info(MIGRATION_SUMMARY_COUNT+"Number of Pservers failed to update due to error : "+ pserverErrorCount +"\n");
+
+ logger.info(MIGRATION_SUMMARY_COUNT+"Number of Pnf updated: "+ pnfCount +"\n");
+ logger.info(MIGRATION_SUMMARY_COUNT+"Number of Pnf failed to update due to error : "+ pnfErrorCount +"\n");
+
+ }
+
+ private String getEquipTypeNodeValue(Vertex vertex) {
+ String propertyValue = "";
+ if(vertex != null && vertex.property(EQUIP_TYPE_PROPERTY).isPresent()){
+ propertyValue = vertex.property(EQUIP_TYPE_PROPERTY).value().toString();
+ }
+ return propertyValue;
+ }
+
+ private String getHostNameNodeValue(Vertex vertex) {
+ String propertyValue = "";
+ if(vertex != null && vertex.property(HOSTNAME_PROPERTY).isPresent()){
+ propertyValue = vertex.property(HOSTNAME_PROPERTY).value().toString();
+ }
+ return propertyValue;
+ }
+
+ private String getPnfNameNodeValue(Vertex vertex) {
+ String propertyValue = "";
+ if(vertex != null && vertex.property(PNF_NAME_PROPERTY).isPresent()){
+ propertyValue = vertex.property(PNF_NAME_PROPERTY).value().toString();
+ }
+ return propertyValue;
+ }
+
+ @Override
+ public Status getStatus() {
+ if (success) {
+ return Status.SUCCESS;
+ } else {
+ return Status.FAILURE;
+ }
+ }
+
+ @Override
+ public Optional<String[]> getAffectedNodeTypes() {
+ return Optional.of(new String[]{PSERVER_NODE_TYPE,PNF_NODE_TYPE});
+ }
+
+ @Override
+ public String getMigrationName() {
+ return "MigratePserverAndPnfEquipType";
+ }
+
+}
diff --git a/src/main/java/org/onap/aai/migration/v13/MigrateVnfcModelInvariantId.java b/src/main/java/org/onap/aai/migration/v13/MigrateVnfcModelInvariantId.java new file mode 100644 index 0000000..a643842 --- /dev/null +++ b/src/main/java/org/onap/aai/migration/v13/MigrateVnfcModelInvariantId.java @@ -0,0 +1,84 @@ +/** + * ============LICENSE_START======================================================= + * org.onap.aai + * ================================================================================ + * Copyright © 2017-2018 AT&T Intellectual Property. All rights reserved. + * ================================================================================ + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============LICENSE_END========================================================= + */ +package org.onap.aai.migration.v13; +/*- + * ============LICENSE_START======================================================= + * org.onap.aai + * ================================================================================ + * Copyright (C) 2017 AT&T Intellectual Property. All rights reserved. + * ================================================================================ + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============LICENSE_END========================================================= + */ + + +import org.janusgraph.core.Cardinality; +import org.onap.aai.edges.EdgeIngestor; +import org.onap.aai.introspection.LoaderFactory; +import org.onap.aai.migration.Enabled; +import org.onap.aai.migration.MigrationDangerRating; +import org.onap.aai.migration.MigrationPriority; +import org.onap.aai.migration.PropertyMigrator; +import org.onap.aai.serialization.db.EdgeSerializer; +import org.onap.aai.serialization.engines.TransactionalGraphEngine; +import org.onap.aai.setup.SchemaVersions; + +import java.util.Optional; + + +@MigrationPriority(19) +@MigrationDangerRating(2) +@Enabled +public class MigrateVnfcModelInvariantId extends PropertyMigrator { + + private static final String VNFC_NODE_TYPE = "vnfc"; + private static final String VNFC_MODEL_INVARIANT_ID_PROPERTY = "model-invariant-id"; + private static final String VNFC_MODEL_INVARIANT_ID_LOCAL_PROPERTY = "model-invariant-id-local"; + + public MigrateVnfcModelInvariantId(TransactionalGraphEngine engine, LoaderFactory loaderFactory, EdgeIngestor edgeIngestor, EdgeSerializer edgeSerializer, SchemaVersions schemaVersions) { + super(engine, loaderFactory, edgeIngestor, edgeSerializer, schemaVersions); + this.initialize(VNFC_MODEL_INVARIANT_ID_PROPERTY, VNFC_MODEL_INVARIANT_ID_LOCAL_PROPERTY, String.class, Cardinality.SINGLE); + } + + @Override + public Optional<String[]> getAffectedNodeTypes() { + return Optional.of(new String[]{this.VNFC_NODE_TYPE}); + } + + @Override + public String getMigrationName() { + return "MigrateVnfcModelInvariantId"; + } + + @Override + public boolean isIndexed() { + return true; + } +} diff --git a/src/main/java/org/onap/aai/migration/v13/MigrateVnfcModelVersionId.java b/src/main/java/org/onap/aai/migration/v13/MigrateVnfcModelVersionId.java new file mode 100644 index 0000000..13cdb80 --- /dev/null +++ b/src/main/java/org/onap/aai/migration/v13/MigrateVnfcModelVersionId.java @@ -0,0 +1,83 @@ +/** + * ============LICENSE_START======================================================= + * org.onap.aai + * ================================================================================ + * Copyright © 2017-2018 AT&T Intellectual Property. All rights reserved. + * ================================================================================ + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============LICENSE_END========================================================= + */ +package org.onap.aai.migration.v13; +/*- + * ============LICENSE_START======================================================= + * org.onap.aai + * ================================================================================ + * Copyright (C) 2017 AT&T Intellectual Property. All rights reserved. + * ================================================================================ + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============LICENSE_END========================================================= + */ + + +import java.util.Optional; +import org.janusgraph.core.Cardinality; +import org.onap.aai.edges.EdgeIngestor; +import org.onap.aai.introspection.LoaderFactory; +import org.onap.aai.migration.Enabled; +import org.onap.aai.migration.MigrationDangerRating; +import org.onap.aai.migration.MigrationPriority; +import org.onap.aai.migration.PropertyMigrator; +import org.onap.aai.serialization.db.EdgeSerializer; +import org.onap.aai.serialization.engines.TransactionalGraphEngine; +import org.onap.aai.setup.SchemaVersions; + + +@MigrationPriority(19) +@MigrationDangerRating(2) +@Enabled +public class MigrateVnfcModelVersionId extends PropertyMigrator { + + private static final String VNFC_NODE_TYPE = "vnfc"; + private static final String VNFC_MODEL_VERSION_ID_PROPERTY = "model-version-id"; + private static final String VNFC_MODEL_VERSION_ID_LOCAL_PROPERTY = "model-version-id-local"; + + public MigrateVnfcModelVersionId(TransactionalGraphEngine engine, LoaderFactory loaderFactory, EdgeIngestor edgeIngestor, EdgeSerializer edgeSerializer, SchemaVersions schemaVersions) { + super(engine, loaderFactory, edgeIngestor, edgeSerializer, schemaVersions); + this.initialize(VNFC_MODEL_VERSION_ID_PROPERTY, VNFC_MODEL_VERSION_ID_LOCAL_PROPERTY, String.class, Cardinality.SINGLE); + } + + @Override + public Optional<String[]> getAffectedNodeTypes() { + return Optional.of(new String[]{this.VNFC_NODE_TYPE}); + } + + @Override + public String getMigrationName() { + return "MigrateVnfcModelVersionId"; + } + + @Override + public boolean isIndexed() { + return true; + } +} diff --git a/src/main/java/org/onap/aai/rest/ExceptionHandler.java b/src/main/java/org/onap/aai/rest/ExceptionHandler.java new file mode 100644 index 0000000..14c45da --- /dev/null +++ b/src/main/java/org/onap/aai/rest/ExceptionHandler.java @@ -0,0 +1,127 @@ +/** + * ============LICENSE_START======================================================= + * org.onap.aai + * ================================================================================ + * Copyright © 2017-2018 AT&T Intellectual Property. All rights reserved. + * ================================================================================ + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============LICENSE_END========================================================= + */ +package org.onap.aai.rest; + +import com.fasterxml.jackson.core.JsonParseException; +import com.fasterxml.jackson.databind.JsonMappingException; +import com.sun.istack.SAXParseException2; +import org.onap.aai.exceptions.AAIException; +import org.onap.aai.logging.ErrorLogHelper; + +import javax.servlet.http.HttpServletRequest; +import javax.ws.rs.WebApplicationException; +import javax.ws.rs.core.Context; +import javax.ws.rs.core.HttpHeaders; +import javax.ws.rs.core.MediaType; +import javax.ws.rs.core.Response; +import javax.ws.rs.ext.ExceptionMapper; +import javax.ws.rs.ext.Provider; +import java.util.ArrayList; +import java.util.List; + +/** + * The Class ExceptionHandler. + */ +@Provider +public class ExceptionHandler implements ExceptionMapper<Exception> { + + @Context + private HttpServletRequest request; + + @Context + private HttpHeaders headers; + + /** + * @{inheritDoc} + */ + @Override + public Response toResponse(Exception exception) { + + Response response = null; + ArrayList<String> templateVars = new ArrayList<String>(); + + //the general case is that cxf will give us a WebApplicationException + //with a linked exception + if (exception instanceof WebApplicationException) { + WebApplicationException e = (WebApplicationException) exception; + if (e.getCause() != null) { + if (e.getCause() instanceof SAXParseException2) { + templateVars.add("UnmarshalException"); + AAIException ex = new AAIException("AAI_4007", exception); + response = Response + .status(400) + .entity(ErrorLogHelper.getRESTAPIErrorResponse(headers.getAcceptableMediaTypes(), ex, templateVars)) + .build(); + } + } + } else if (exception instanceof JsonParseException) { + //jackson does it differently so we get the direct JsonParseException + templateVars.add("JsonParseException"); + AAIException ex = new AAIException("AAI_4007", exception); + response = Response + .status(400) + .entity(ErrorLogHelper.getRESTAPIErrorResponse(headers.getAcceptableMediaTypes(), ex, templateVars)) + .build(); + } else if (exception instanceof JsonMappingException) { + //jackson does it differently so we get the direct JsonParseException + templateVars.add("JsonMappingException"); + AAIException ex = new AAIException("AAI_4007", exception); + response = Response + .status(400) + .entity(ErrorLogHelper.getRESTAPIErrorResponse(headers.getAcceptableMediaTypes(), ex, templateVars)) + .build(); + } + + // it didn't get set above, we wrap a general fault here + if (response == null) { + + Exception actual_e = exception; + if (exception instanceof WebApplicationException) { + WebApplicationException e = (WebApplicationException) exception; + response = e.getResponse(); + } else { + templateVars.add(request.getMethod()); + templateVars.add("unknown"); + AAIException ex = new AAIException("AAI_4000", actual_e); + List<MediaType> mediaTypes = headers.getAcceptableMediaTypes(); + int setError = 0; + + for (MediaType mediaType : mediaTypes) { + if (MediaType.APPLICATION_XML_TYPE.isCompatible(mediaType)) { + response = Response + .status(400) + .type(MediaType.APPLICATION_XML_TYPE) + .entity(ErrorLogHelper.getRESTAPIErrorResponse(headers.getAcceptableMediaTypes(), ex, templateVars)) + .build(); + setError = 1; + } + } + if (setError == 0) { + response = Response + .status(400) + .type(MediaType.APPLICATION_JSON_TYPE) + .entity(ErrorLogHelper.getRESTAPIErrorResponse(headers.getAcceptableMediaTypes(), ex, templateVars)) + .build(); + } + } + } + return response; + } +} diff --git a/src/main/java/org/onap/aai/rest/QueryConsumer.java b/src/main/java/org/onap/aai/rest/QueryConsumer.java new file mode 100644 index 0000000..85665da --- /dev/null +++ b/src/main/java/org/onap/aai/rest/QueryConsumer.java @@ -0,0 +1,217 @@ +/** + * ============LICENSE_START======================================================= + * org.onap.aai + * ================================================================================ + * Copyright © 2017-2018 AT&T Intellectual Property. All rights reserved. + * ================================================================================ + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============LICENSE_END========================================================= + */ +package org.onap.aai.rest; + +import com.att.eelf.configuration.EELFLogger; +import com.att.eelf.configuration.EELFManager; +import com.google.gson.JsonElement; +import com.google.gson.JsonObject; +import com.google.gson.JsonParser; +import org.onap.aai.concurrent.AaiCallable; +import org.onap.aai.dbmap.DBConnectionType; +import org.onap.aai.exceptions.AAIException; +import org.onap.aai.introspection.ModelType; +import org.onap.aai.rest.dsl.DslQueryProcessor; +import org.onap.aai.logging.LoggingContext; +import org.onap.aai.logging.StopWatch; +import org.onap.aai.rest.db.HttpEntry; +import org.onap.aai.rest.search.GenericQueryProcessor; +import org.onap.aai.rest.search.QueryProcessorType; +import org.onap.aai.restcore.HttpMethod; +import org.onap.aai.restcore.RESTAPI; +import org.onap.aai.serialization.db.DBSerializer; +import org.onap.aai.serialization.engines.TransactionalGraphEngine; +import org.onap.aai.serialization.queryformats.Format; +import org.onap.aai.serialization.queryformats.FormatFactory; +import org.onap.aai.serialization.queryformats.Formatter; +import org.onap.aai.serialization.queryformats.SubGraphStyle; +import org.onap.aai.setup.SchemaVersion; +import org.onap.aai.setup.SchemaVersions; +import org.onap.aai.util.AAIConstants; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.beans.factory.annotation.Value; +import org.springframework.stereotype.Component; + +import javax.servlet.http.HttpServletRequest; +import javax.ws.rs.*; +import javax.ws.rs.core.*; +import javax.ws.rs.core.Response.Status; +import java.util.List; +import java.util.concurrent.TimeUnit; + +@Component +@Path("{version: v[1-9][0-9]*|latest}/dbquery") +public class QueryConsumer extends RESTAPI { + + /** The introspector factory type. */ + private ModelType introspectorFactoryType = ModelType.MOXY; + + private QueryProcessorType processorType = QueryProcessorType.LOCAL_GROOVY; + + private static final String TARGET_ENTITY = "DB"; + private static final EELFLogger LOGGER = EELFManager.getInstance().getLogger(QueryConsumer.class); + + private HttpEntry traversalUriHttpEntry; + + private DslQueryProcessor dslQueryProcessor; + + private SchemaVersions schemaVersions; + + private String basePath; + + @Autowired + public QueryConsumer( + HttpEntry traversalUriHttpEntry, + DslQueryProcessor dslQueryProcessor, + SchemaVersions schemaVersions, + @Value("${schema.uri.base.path}") String basePath + ){ + this.traversalUriHttpEntry = traversalUriHttpEntry; + this.dslQueryProcessor = dslQueryProcessor; + this.basePath = basePath; + this.schemaVersions = schemaVersions; + } + + + @PUT + @Consumes({ MediaType.APPLICATION_JSON}) + @Produces({ MediaType.APPLICATION_JSON}) + public Response executeQuery(String content, @PathParam("version")String versionParam, @PathParam("uri") @Encoded String uri, @DefaultValue("graphson") @QueryParam("format") String queryFormat,@DefaultValue("no_op") @QueryParam("subgraph") String subgraph, @Context HttpHeaders headers, @Context UriInfo info, @Context HttpServletRequest req){ + return runner(AAIConstants.AAI_GRAPHADMIN_TIMEOUT_ENABLED, + AAIConstants.AAI_GRAPHADMIN_TIMEOUT_APP, + AAIConstants.AAI_GRAPHADMIN_TIMEOUT_LIMIT, + headers, + info, + HttpMethod.GET, + new AaiCallable<Response>() { + @Override + public Response process() { + return processExecuteQuery(content, versionParam, uri, queryFormat, subgraph, headers, info, req); + } + } + ); + } + + public Response processExecuteQuery(String content, @PathParam("version")String versionParam, @PathParam("uri") @Encoded String uri, @DefaultValue("graphson") @QueryParam("format") String queryFormat,@DefaultValue("no_op") @QueryParam("subgraph") String subgraph, @Context HttpHeaders headers, @Context UriInfo info, @Context HttpServletRequest req) { + + String methodName = "executeQuery"; + String sourceOfTruth = headers.getRequestHeaders().getFirst("X-FromAppId"); + String realTime = headers.getRequestHeaders().getFirst("Real-Time"); + String queryProcessor = headers.getRequestHeaders().getFirst("QueryProcessor"); + QueryProcessorType processorType = this.processorType; + Response response = null; + TransactionalGraphEngine dbEngine = null; + try { + LoggingContext.save(); + this.checkQueryParams(info.getQueryParameters()); + Format format = Format.getFormat(queryFormat); + if (queryProcessor != null) { + processorType = QueryProcessorType.valueOf(queryProcessor); + } + SubGraphStyle subGraphStyle = SubGraphStyle.valueOf(subgraph); + JsonParser parser = new JsonParser(); + + JsonObject input = parser.parse(content).getAsJsonObject(); + + JsonElement gremlinElement = input.get("gremlin"); + JsonElement dslElement = input.get("dsl"); + String queryURI = ""; + String gremlin = ""; + String dsl = ""; + + SchemaVersion version = new SchemaVersion(versionParam); + DBConnectionType type = this.determineConnectionType(sourceOfTruth, realTime); + traversalUriHttpEntry.setHttpEntryProperties(version, type); + dbEngine = traversalUriHttpEntry.getDbEngine(); + + if (gremlinElement != null) { + gremlin = gremlinElement.getAsString(); + } + if (dslElement != null) { + dsl = dslElement.getAsString(); + } + GenericQueryProcessor processor = null; + + LoggingContext.targetEntity(TARGET_ENTITY); + LoggingContext.targetServiceName(methodName); + LoggingContext.startTime(); + StopWatch.conditionalStart(); + + if(!dsl.equals("")){ + processor = new GenericQueryProcessor.Builder(dbEngine) + .queryFrom(dsl, "dsl") + .queryProcessor(dslQueryProcessor) + .processWith(processorType).create(); + }else { + processor = new GenericQueryProcessor.Builder(dbEngine) + .queryFrom(gremlin, "gremlin") + .processWith(processorType).create(); + } + + String result = ""; + List<Object> vertices = processor.execute(subGraphStyle); + + DBSerializer serializer = new DBSerializer(version, dbEngine, introspectorFactoryType, sourceOfTruth); + FormatFactory ff = new FormatFactory(traversalUriHttpEntry.getLoader(), serializer, schemaVersions, basePath); + + Formatter formater = ff.get(format, info.getQueryParameters()); + + result = formater.output(vertices).toString(); + + double msecs = StopWatch.stopIfStarted(); + LoggingContext.elapsedTime((long)msecs,TimeUnit.MILLISECONDS); + LoggingContext.successStatusFields(); + LOGGER.info ("Completed"); + + response = Response.status(Status.OK) + .type(MediaType.APPLICATION_JSON) + .entity(result).build(); + + } catch (AAIException e) { + response = consumerExceptionResponseGenerator(headers, info, HttpMethod.GET, e); + } catch (Exception e ) { + AAIException ex = new AAIException("AAI_4000", e); + response = consumerExceptionResponseGenerator(headers, info, HttpMethod.GET, ex); + } finally { + LoggingContext.restoreIfPossible(); + LoggingContext.successStatusFields(); + if (dbEngine != null) { + dbEngine.rollback(); + } + + } + + return response; + } + + public void checkQueryParams(MultivaluedMap<String, String> params) throws AAIException { + + if (params.containsKey("depth") && params.getFirst("depth").matches("\\d+")) { + String depth = params.getFirst("depth"); + Integer i = Integer.parseInt(depth); + if (i > 1) { + throw new AAIException("AAI_3303"); + } + } + + + } + +} diff --git a/src/main/java/org/onap/aai/rest/dsl/DslListener.java b/src/main/java/org/onap/aai/rest/dsl/DslListener.java new file mode 100644 index 0000000..e41a946 --- /dev/null +++ b/src/main/java/org/onap/aai/rest/dsl/DslListener.java @@ -0,0 +1,314 @@ +/** + * ============LICENSE_START======================================================= + * org.onap.aai + * ================================================================================ + * Copyright © 2017-2018 AT&T Intellectual Property. All rights reserved. + * ================================================================================ + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============LICENSE_END========================================================= + */ +package org.onap.aai.rest.dsl; + +import com.att.eelf.configuration.EELFLogger; +import com.att.eelf.configuration.EELFManager; +import org.antlr.v4.runtime.tree.TerminalNode; +import org.onap.aai.AAIDslBaseListener; +import org.onap.aai.AAIDslParser; +import org.onap.aai.edges.EdgeIngestor; +import org.onap.aai.edges.EdgeRule; +import org.onap.aai.edges.EdgeRuleQuery; +import org.onap.aai.edges.exceptions.AmbiguousRuleChoiceException; +import org.onap.aai.edges.exceptions.EdgeRuleNotFoundException; +import org.springframework.beans.factory.annotation.Autowired; + +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +/** + * The Class DslListener. + */ +public class DslListener extends AAIDslBaseListener { + + private static final EELFLogger LOGGER = EELFManager.getInstance().getLogger(DslQueryProcessor.class); + private final EdgeIngestor edgeRules; + + //TODO Use StringBuilder to build the query than concat + String query = ""; + + Map<Integer, String> unionMap = new HashMap<>(); + Map<String, String> flags = new HashMap<>(); + + String currentNode = ""; + String prevsNode = ""; + int commas = 0; + + int unionKey = 0; + int unionMembers = 0; + boolean isUnionBeg = false; + boolean isUnionTraversal = false; + + boolean isTraversal = false; + boolean isWhereTraversal = false; + String whereTraversalNode = ""; + + String limitQuery = ""; + boolean isNot = false; + + /** + * Instantiates a new DslListener. + */ + @Autowired + public DslListener(EdgeIngestor edgeIngestor) { + this.edgeRules = edgeIngestor; + } + + @Override + public void enterAaiquery(AAIDslParser.AaiqueryContext ctx) { + query += "builder"; + } + + @Override + public void enterDslStatement(AAIDslParser.DslStatementContext ctx) { + // LOGGER.info("Statement Enter"+ctx.getText()); + /* + * This block of code is entered for every query statement + */ + if (isUnionBeg) { + isUnionBeg = false; + isUnionTraversal = true; + + } else if (unionMembers > 0) { + unionMembers--; + query += ",builder.newInstance()"; + isUnionTraversal = true; + } + + } + + @Override + public void exitDslStatement(AAIDslParser.DslStatementContext ctx) { + /* + * Nothing to be done here for now + * LOGGER.info("Statement Exit"+ctx.getText()); + */ + } + + @Override + public void exitAaiquery(AAIDslParser.AaiqueryContext ctx) { + /* + * dedup is by default for all queries If the query has limit in it + * include this as well LOGGER.info("Statement Exit"+ctx.getText()); + */ + + query += ".cap('x').unfold().dedup()" + limitQuery; + } + + /* + * TODO: The contexts are not inherited from a single parent in AAIDslParser + * Need to find a way to do that + */ + @Override + public void enterSingleNodeStep(AAIDslParser.SingleNodeStepContext ctx) { + + prevsNode = currentNode; + currentNode = ctx.NODE().getText(); + + this.generateQuery(); + if (ctx.STORE() != null && ctx.STORE().getText().equals("*")) { + flags.put(currentNode, "store"); + } + + } + + @Override + public void enterSingleQueryStep(AAIDslParser.SingleQueryStepContext ctx) { + + prevsNode = currentNode; + currentNode = ctx.NODE().getText(); + this.generateQuery(); + + if (ctx.STORE() != null && ctx.STORE().getText().equals("*")) { + flags.put(currentNode, "store"); + } + } + + @Override + public void enterMultiQueryStep(AAIDslParser.MultiQueryStepContext ctx) { + + prevsNode = currentNode; + currentNode = ctx.NODE().getText(); + this.generateQuery(); + + if (ctx.STORE() != null && ctx.STORE().getText().equals("*")) { + flags.put(currentNode, "store"); + } + + } + + /* + * Generates the QueryBuilder syntax for the dsl query + */ + private void generateQuery() { + String edgeType = ""; + + if (isUnionTraversal || isTraversal || isWhereTraversal) { + String previousNode = prevsNode; + if (isUnionTraversal) { + previousNode = unionMap.get(unionKey); + isUnionTraversal = false; + } + + EdgeRuleQuery edgeRuleQuery = new EdgeRuleQuery.Builder(previousNode, currentNode).build(); + EdgeRule edgeRule = null; + + try { + edgeRule = edgeRules.getRule(edgeRuleQuery); + } catch (EdgeRuleNotFoundException | AmbiguousRuleChoiceException e) { + } + + if (edgeRule == null) { + edgeType = "EdgeType.COUSIN"; + } else if ("none".equalsIgnoreCase(edgeRule.getContains())){ + edgeType = "EdgeType.COUSIN"; + }else { + edgeType = "EdgeType.TREE"; + } + + query += ".createEdgeTraversal(" + edgeType + ", '" + previousNode + "','" + currentNode + "')"; + + } + + else + query += ".getVerticesByProperty('aai-node-type', '" + currentNode + "')"; + } + + @Override + public void exitSingleNodeStep(AAIDslParser.SingleNodeStepContext ctx) { + + generateExitStep(); + } + + @Override + public void exitSingleQueryStep(AAIDslParser.SingleQueryStepContext ctx) { + generateExitStep(); + } + + @Override + public void exitMultiQueryStep(AAIDslParser.MultiQueryStepContext ctx) { + generateExitStep(); + + } + + private void generateExitStep() { + if (flags.containsKey(currentNode)) { + String storeFlag = flags.get(currentNode); + if (storeFlag != null && storeFlag.equals("store")) + query += ".store('x')"; + flags.remove(currentNode); + } + } + + @Override + public void enterUnionQueryStep(AAIDslParser.UnionQueryStepContext ctx) { + isUnionBeg = true; + + unionKey++; + unionMap.put(unionKey, currentNode); + query += ".union(builder.newInstance()"; + + List<TerminalNode> commaNodes = ctx.COMMA(); + + for (TerminalNode node : commaNodes) { + unionMembers++; + } + } + + @Override + public void exitUnionQueryStep(AAIDslParser.UnionQueryStepContext ctx) { + isUnionBeg = false; + unionMap.remove(unionKey); + + query += ")"; + unionKey--; + + } + + @Override + public void enterFilterTraverseStep(AAIDslParser.FilterTraverseStepContext ctx) { + isWhereTraversal = true; + whereTraversalNode = currentNode; + query += ".where(builder.newInstance()"; + } + + @Override + public void exitFilterTraverseStep(AAIDslParser.FilterTraverseStepContext ctx) { + query += ")"; + isWhereTraversal = false; + currentNode = whereTraversalNode; + } + + @Override + public void enterFilterStep(AAIDslParser.FilterStepContext ctx) { + if (ctx.NOT() != null && ctx.NOT().getText().equals("!")) + isNot = true; + + List<TerminalNode> nodes = ctx.KEY(); + String key = ctx.KEY(0).getText(); + + if (isNot) { + query += ".getVerticesExcludeByProperty("; + isNot = false; + } else + query += ".getVerticesByProperty("; + + if (nodes.size() == 2) { + query += key + "," + ctx.KEY(1).getText(); + query += ")"; + } + + if (nodes.size() > 2) { + + for (TerminalNode node : nodes) { + if (node.getText().equals(key)) + continue; + + query += key + "," + node.getText(); + query += ")"; + } + + } + + } + + @Override + public void exitFilterStep(AAIDslParser.FilterStepContext ctx) { + // For now do nothing + } + + @Override + public void enterTraverseStep(AAIDslParser.TraverseStepContext ctx) { + isTraversal = true; + } + + @Override + public void exitTraverseStep(AAIDslParser.TraverseStepContext ctx) { + isTraversal = false; + } + + @Override + public void enterLimitStep(AAIDslParser.LimitStepContext ctx) { + String value = ctx.NODE().getText(); + limitQuery += ".limit(" + value + ")"; + } +} diff --git a/src/main/java/org/onap/aai/rest/dsl/DslQueryProcessor.java b/src/main/java/org/onap/aai/rest/dsl/DslQueryProcessor.java new file mode 100644 index 0000000..582f0ea --- /dev/null +++ b/src/main/java/org/onap/aai/rest/dsl/DslQueryProcessor.java @@ -0,0 +1,85 @@ +/** + * ============LICENSE_START======================================================= + * org.onap.aai + * ================================================================================ + * Copyright © 2017-2018 AT&T Intellectual Property. All rights reserved. + * ================================================================================ + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============LICENSE_END========================================================= + */ +package org.onap.aai.rest.dsl; + +import com.att.eelf.configuration.EELFLogger; +import com.att.eelf.configuration.EELFManager; +import org.antlr.v4.runtime.CharStreams; +import org.antlr.v4.runtime.CommonTokenStream; +import org.antlr.v4.runtime.tree.ParseTree; +import org.antlr.v4.runtime.tree.ParseTreeWalker; +import org.onap.aai.AAIDslLexer; +import org.onap.aai.AAIDslParser; +import org.springframework.beans.factory.annotation.Autowired; + +import java.io.ByteArrayInputStream; +import java.io.InputStream; +import java.nio.charset.StandardCharsets; + +/** + * The Class DslQueryProcessor. + */ +public class DslQueryProcessor { + + private static final EELFLogger LOGGER = EELFManager.getInstance().getLogger(DslQueryProcessor.class); + + private DslListener dslListener; + + @Autowired + public DslQueryProcessor(DslListener dslListener){ + this.dslListener = dslListener; + } + + public String parseAaiQuery(String aaiQuery) { + try { + // Create a input stream that reads our string + InputStream stream = new ByteArrayInputStream(aaiQuery.getBytes(StandardCharsets.UTF_8)); + + // Create a lexer from the input CharStream + AAIDslLexer lexer = new AAIDslLexer(CharStreams.fromStream(stream, StandardCharsets.UTF_8)); + + // Get a list of tokens pulled from the lexer + CommonTokenStream tokens = new CommonTokenStream(lexer); + + + // Parser that feeds off of the tokens buffer + AAIDslParser parser = new AAIDslParser(tokens); + + // Specify our entry point + ParseTree ptree = parser.aaiquery(); + LOGGER.info("QUERY-interim" + ptree.toStringTree(parser)); + + // Walk it and attach our listener + ParseTreeWalker walker = new ParseTreeWalker(); + walker.walk(dslListener, ptree); + LOGGER.info("Final QUERY" + dslListener.query); + + /* + * TODO - Visitor patternQueryDslVisitor visitor = new + * QueryDslVisitor(); String query = visitor.visit(ptree); + * + */ + return dslListener.query; + } catch (Exception e) { + LOGGER.error("Error while processing the query"+e.getMessage()); + } + return ""; + } +} diff --git a/src/main/java/org/onap/aai/rest/search/GenericQueryProcessor.java b/src/main/java/org/onap/aai/rest/search/GenericQueryProcessor.java new file mode 100644 index 0000000..2431d11 --- /dev/null +++ b/src/main/java/org/onap/aai/rest/search/GenericQueryProcessor.java @@ -0,0 +1,226 @@ +/** + * ============LICENSE_START======================================================= + * org.onap.aai + * ================================================================================ + * Copyright © 2017-2018 AT&T Intellectual Property. All rights reserved. + * ================================================================================ + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============LICENSE_END========================================================= + */ +package org.onap.aai.rest.search; + +import org.apache.tinkerpop.gremlin.process.traversal.P; +import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal; +import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.__; +import org.apache.tinkerpop.gremlin.structure.Graph; +import org.apache.tinkerpop.gremlin.structure.Vertex; +import org.javatuples.Pair; +import org.onap.aai.exceptions.AAIException; +import org.onap.aai.rest.dsl.DslQueryProcessor; +import org.onap.aai.restcore.search.GroovyQueryBuilderSingleton; +import org.onap.aai.restcore.util.URITools; +import org.onap.aai.serialization.engines.TransactionalGraphEngine; +import org.onap.aai.serialization.queryformats.SubGraphStyle; + +import javax.ws.rs.core.MultivaluedHashMap; +import javax.ws.rs.core.MultivaluedMap; +import java.io.FileNotFoundException; +import java.net.URI; +import java.util.*; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +public abstract class GenericQueryProcessor { + + protected final Optional<URI> uri; + protected final MultivaluedMap<String, String> queryParams; + protected final Optional<Collection<Vertex>> vertices; + protected static Pattern p = Pattern.compile("query/(.*+)"); + protected Optional<String> gremlin; + protected final TransactionalGraphEngine dbEngine; + protected static GroovyQueryBuilderSingleton queryBuilderSingleton = GroovyQueryBuilderSingleton.getInstance(); + protected final boolean isGremlin; + protected Optional<DslQueryProcessor> dslQueryProcessorOptional; + /* dsl parameters to store dsl query and to check + * if this is a DSL request + */ + protected Optional<String> dsl; + protected final boolean isDsl ; + + protected GenericQueryProcessor(Builder builder) { + this.uri = builder.getUri(); + this.dbEngine = builder.getDbEngine(); + this.vertices = builder.getVertices(); + this.gremlin = builder.getGremlin(); + this.isGremlin = builder.isGremlin(); + this.dsl = builder.getDsl(); + this.isDsl = builder.isDsl(); + this.dslQueryProcessorOptional = builder.getDslQueryProcessor(); + + if (uri.isPresent()) { + queryParams = URITools.getQueryMap(uri.get()); + } else { + queryParams = new MultivaluedHashMap<>(); + } + } + + protected abstract GraphTraversal<?,?> runQuery(String query, Map<String, Object> params); + + protected List<Object> processSubGraph(SubGraphStyle style, GraphTraversal<?,?> g) { + final List<Object> resultVertices = new Vector<>(); + g.store("y"); + + if (SubGraphStyle.prune.equals(style) || SubGraphStyle.star.equals(style)) { + g.barrier().bothE(); + if (SubGraphStyle.prune.equals(style)) { + g.where(__.otherV().where(P.within("y"))); + } + g.dedup().subgraph("subGraph").cap("subGraph").map(x -> (Graph)x.get()).next().traversal().V().forEachRemaining(x -> { + resultVertices.add(x); + }); + } else { + resultVertices.addAll(g.toList()); + } + return resultVertices; + } + + public List<Object> execute(SubGraphStyle style) throws FileNotFoundException, AAIException { + final List<Object> resultVertices; + + Pair<String, Map<String, Object>> tuple = this.createQuery(); + String query = tuple.getValue0(); + Map<String, Object> params = tuple.getValue1(); + + if (query.equals("") && (vertices.isPresent() && vertices.get().isEmpty())) { + //nothing to do, just exit + return new ArrayList<>(); + } + GraphTraversal<?,?> g = this.runQuery(query, params); + + resultVertices = this.processSubGraph(style, g); + + return resultVertices; + } + + protected Pair<String, Map<String, Object>> createQuery() throws AAIException { + Map<String, Object> params = new HashMap<>(); + String query = ""; + if (this.isGremlin) { + query = gremlin.get(); + + }else if (this.isDsl) { + String dslUserQuery = dsl.get(); + if(dslQueryProcessorOptional.isPresent()){ + String dslQuery = dslQueryProcessorOptional.get().parseAaiQuery(dslUserQuery); + query = queryBuilderSingleton.executeTraversal(dbEngine, dslQuery, params); + String startPrefix = "g.V()"; + query = startPrefix + query; + } + } + + return new Pair<>(query, params); + } + + public static class Builder { + + private final TransactionalGraphEngine dbEngine; + private Optional<URI> uri = Optional.empty(); + private Optional<String> gremlin = Optional.empty(); + private boolean isGremlin = false; + private Optional<Collection<Vertex>> vertices = Optional.empty(); + private QueryProcessorType processorType = QueryProcessorType.GREMLIN_SERVER; + + private Optional<String> dsl = Optional.empty(); + private boolean isDsl = false; + private DslQueryProcessor dslQueryProcessor; + + public Builder(TransactionalGraphEngine dbEngine) { + this.dbEngine = dbEngine; + } + + public Builder queryFrom(URI uri) { + this.uri = Optional.of(uri); + this.isGremlin = false; + return this; + } + + public Builder startFrom(Collection<Vertex> vertices) { + this.vertices = Optional.of(vertices); + return this; + } + + public Builder queryFrom( String query, String queryType) { + + if(queryType.equals("gremlin")){ + this.gremlin = Optional.of(query); + this.isGremlin = true; + } + if(queryType.equals("dsl")){ + this.dsl = Optional.of(query); + this.isDsl = true; + } + return this; + } + + public Builder processWith(QueryProcessorType type) { + this.processorType = type; + return this; + } + + public Builder queryProcessor(DslQueryProcessor dslQueryProcessor){ + this.dslQueryProcessor = dslQueryProcessor; + return this; + } + + public Optional<DslQueryProcessor> getDslQueryProcessor(){ + return Optional.ofNullable(this.dslQueryProcessor); + } + + public TransactionalGraphEngine getDbEngine() { + return dbEngine; + } + + public Optional<URI> getUri() { + return uri; + } + + public Optional<String> getGremlin() { + return gremlin; + } + + public boolean isGremlin() { + return isGremlin; + } + + public Optional<String> getDsl() { + return dsl; + } + + public boolean isDsl() { + return isDsl; + } + + public Optional<Collection<Vertex>> getVertices() { + return vertices; + } + + public QueryProcessorType getProcessorType() { + return processorType; + } + + public GenericQueryProcessor create() { + return new GroovyShellImpl(this); + } + + } +} diff --git a/src/main/java/org/onap/aai/rest/search/GroovyShellImpl.java b/src/main/java/org/onap/aai/rest/search/GroovyShellImpl.java new file mode 100644 index 0000000..3db4301 --- /dev/null +++ b/src/main/java/org/onap/aai/rest/search/GroovyShellImpl.java @@ -0,0 +1,45 @@ +/** + * ============LICENSE_START======================================================= + * org.onap.aai + * ================================================================================ + * Copyright © 2017-2018 AT&T Intellectual Property. All rights reserved. + * ================================================================================ + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============LICENSE_END========================================================= + */ +package org.onap.aai.rest.search; + +import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal; +import org.onap.aai.restcore.search.GremlinGroovyShellSingleton; + +import java.util.Map; + +public class GroovyShellImpl extends GenericQueryProcessor { + + protected GroovyShellImpl(Builder builder) { + super(builder); + } + + @Override + protected GraphTraversal<?,?> runQuery(String query, Map<String, Object> params) { + + params.put("g", this.dbEngine.asAdmin().getTraversalSource()); + + GremlinGroovyShellSingleton shell = GremlinGroovyShellSingleton.getInstance(); + + return shell.executeTraversal(query, params); + } + +} + + diff --git a/src/main/java/org/onap/aai/rest/search/QueryProcessorType.java b/src/main/java/org/onap/aai/rest/search/QueryProcessorType.java new file mode 100644 index 0000000..c8e1d14 --- /dev/null +++ b/src/main/java/org/onap/aai/rest/search/QueryProcessorType.java @@ -0,0 +1,26 @@ +/** + * ============LICENSE_START======================================================= + * org.onap.aai + * ================================================================================ + * Copyright © 2017-2018 AT&T Intellectual Property. All rights reserved. + * ================================================================================ + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============LICENSE_END========================================================= + */ +package org.onap.aai.rest.search; + +public enum QueryProcessorType { + + GREMLIN_SERVER, + LOCAL_GROOVY +} diff --git a/src/main/java/org/onap/aai/rest/util/EchoResponse.java b/src/main/java/org/onap/aai/rest/util/EchoResponse.java new file mode 100644 index 0000000..05ff38e --- /dev/null +++ b/src/main/java/org/onap/aai/rest/util/EchoResponse.java @@ -0,0 +1,122 @@ +/** + * ============LICENSE_START======================================================= + * org.onap.aai + * ================================================================================ + * Copyright © 2017-2018 AT&T Intellectual Property. All rights reserved. + * ================================================================================ + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============LICENSE_END========================================================= + */ +package org.onap.aai.rest.util; + +import org.onap.aai.exceptions.AAIException; +import org.onap.aai.logging.ErrorLogHelper; +import org.onap.aai.restcore.RESTAPI; +import org.springframework.stereotype.Component; + +import javax.servlet.http.HttpServletRequest; +import javax.ws.rs.GET; +import javax.ws.rs.Path; +import javax.ws.rs.Produces; +import javax.ws.rs.QueryParam; +import javax.ws.rs.core.Context; +import javax.ws.rs.core.HttpHeaders; +import javax.ws.rs.core.MediaType; +import javax.ws.rs.core.Response; +import javax.ws.rs.core.Response.Status; +import java.util.ArrayList; +import java.util.HashMap; + +/** + * The Class EchoResponse. + */ +@Component +@Path("/util") +public class EchoResponse extends RESTAPI { + + protected static String authPolicyFunctionName = "util"; + + public static final String echoPath = "/util/echo"; + + /** + * Simple health-check API that echos back the X-FromAppId and X-TransactionId to clients. + * If there is a query string, a transaction gets logged into hbase, proving the application is connected to the data store. + * If there is no query string, no transacction logging is done to hbase. + * + * @param headers the headers + * @param req the req + * @param myAction if exists will cause transaction to be logged to hbase + * @return the response + */ + @GET + @Produces( { MediaType.APPLICATION_XML, MediaType.APPLICATION_JSON }) + @Path("/echo") + public Response echoResult(@Context HttpHeaders headers, @Context HttpServletRequest req, + @QueryParam("action") String myAction) { + Response response = null; + + AAIException ex = null; + String fromAppId = null; + String transId = null; + + try { + fromAppId = getFromAppId(headers ); + transId = getTransId(headers); + } catch (AAIException e) { + ArrayList<String> templateVars = new ArrayList<String>(); + templateVars.add("PUT uebProvider"); + templateVars.add("addTopic"); + return Response + .status(e.getErrorObject().getHTTPResponseCode()) + .entity(ErrorLogHelper.getRESTAPIErrorResponse(headers.getAcceptableMediaTypes(), e, templateVars)) + .build(); + } + + try { + + HashMap<AAIException, ArrayList<String>> exceptionList = new HashMap<AAIException, ArrayList<String>>(); + + ArrayList<String> templateVars = new ArrayList<String>(); + templateVars.add(fromAppId); + templateVars.add(transId); + + exceptionList.put(new AAIException("AAI_0002", "OK"), templateVars); + + response = Response.status(Status.OK) + .entity(ErrorLogHelper.getRESTAPIInfoResponse( + headers.getAcceptableMediaTypes(), exceptionList)) + .build(); + + } catch (Exception e) { + ex = new AAIException("AAI_4000", e); + ArrayList<String> templateVars = new ArrayList<String>(); + templateVars.add(Action.GET.name()); + templateVars.add(fromAppId +" "+transId); + + response = Response + .status(Status.INTERNAL_SERVER_ERROR) + .entity(ErrorLogHelper.getRESTAPIErrorResponse( + headers.getAcceptableMediaTypes(), ex, + templateVars)).build(); + + } finally { + if (ex != null) { + ErrorLogHelper.logException(ex); + } + + } + + return response; + } + +} diff --git a/src/main/java/org/onap/aai/schema/GenTester.java b/src/main/java/org/onap/aai/schema/GenTester.java new file mode 100644 index 0000000..812c7b0 --- /dev/null +++ b/src/main/java/org/onap/aai/schema/GenTester.java @@ -0,0 +1,162 @@ +/** + * ============LICENSE_START======================================================= + * org.onap.aai + * ================================================================================ + * Copyright © 2017-2018 AT&T Intellectual Property. All rights reserved. + * ================================================================================ + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============LICENSE_END========================================================= + */ +package org.onap.aai.schema; + +import com.att.eelf.configuration.Configuration; +import com.att.eelf.configuration.EELFLogger; +import com.att.eelf.configuration.EELFManager; +import org.janusgraph.core.JanusGraph; +import org.janusgraph.core.schema.JanusGraphManagement; +import org.onap.aai.dbgen.SchemaGenerator; +import org.onap.aai.dbmap.AAIGraph; +import org.onap.aai.logging.ErrorLogHelper; +import org.onap.aai.logging.LoggingContext; +import org.onap.aai.logging.LoggingContext.StatusCode; +import org.onap.aai.util.AAIConfig; +import org.onap.aai.util.AAIConstants; +import org.springframework.context.annotation.AnnotationConfigApplicationContext; + +import java.util.Properties; +import java.util.UUID; + + +public class GenTester { + + private static EELFLogger LOGGER; + + /** + * The main method. + * + * @param args the arguments + */ + public static void main(String[] args) { + + JanusGraph graph = null; + System.setProperty("aai.service.name", GenTester.class.getSimpleName()); + // Set the logging file properties to be used by EELFManager + Properties props = System.getProperties(); + props.setProperty(Configuration.PROPERTY_LOGGING_FILE_NAME, AAIConstants.AAI_LOGBACK_PROPS); + props.setProperty(Configuration.PROPERTY_LOGGING_FILE_PATH, AAIConstants.AAI_HOME_BUNDLECONFIG); + LOGGER = EELFManager.getInstance().getLogger(GenTester.class); + boolean addDefaultCR = true; + + LoggingContext.init(); + LoggingContext.component("DBGenTester"); + LoggingContext.partnerName("AAI-TOOLS"); + LoggingContext.targetEntity("AAI"); + LoggingContext.requestId(UUID.randomUUID().toString()); + LoggingContext.serviceName("AAI"); + LoggingContext.targetServiceName("main"); + LoggingContext.statusCode(StatusCode.COMPLETE); + LoggingContext.responseCode(LoggingContext.SUCCESS); + + AnnotationConfigApplicationContext ctx = new AnnotationConfigApplicationContext( + "org.onap.aai.config", + "org.onap.aai.setup" + ); + + try { + AAIConfig.init(); + if (args != null && args.length > 0 ){ + if( "genDbRulesOnly".equals(args[0]) ){ + ErrorLogHelper.logError("AAI_3100", + " This option is no longer supported. What was in DbRules is now derived from the OXM files. "); + return; + } + else if ( "GEN_DB_WITH_NO_SCHEMA".equals(args[0]) ){ + // Note this is done to create an empty DB with no Schema so that + // an HBase copyTable can be used to set up a copy of the db. + String imsg = " ---- NOTE --- about to load a graph without doing any schema processing (takes a little while) -------- "; + System.out.println(imsg); + LOGGER.info(imsg); + graph = AAIGraph.getInstance().getGraph(); + + if( graph == null ){ + ErrorLogHelper.logError("AAI_5102", "Error creating JanusGraph graph."); + return; + } + else { + String amsg = "Successfully loaded a JanusGraph graph without doing any schema work. "; + System.out.println(amsg); + LOGGER.auditEvent(amsg); + return; + } + } else if ("GEN_DB_WITH_NO_DEFAULT_CR".equals(args[0])) { + addDefaultCR = false; + } + else { + ErrorLogHelper.logError("AAI_3000", "Unrecognized argument passed to GenTester.java: [" + args[0] + "]. "); + + String emsg = "Unrecognized argument passed to GenTester.java: [" + args[0] + "]. "; + System.out.println(emsg); + LoggingContext.statusCode(StatusCode.ERROR); + LoggingContext.responseCode(LoggingContext.BUSINESS_PROCESS_ERROR); + LOGGER.error(emsg); + + emsg = "Either pass no argument for normal processing, or use 'GEN_DB_WITH_NO_SCHEMA'."; + System.out.println(emsg); + LOGGER.error(emsg); + + return; + } + } + + //AAIConfig.init(); + ErrorLogHelper.loadProperties(); + String imsg = " ---- NOTE --- about to open graph (takes a little while)--------;"; + System.out.println(imsg); + LOGGER.info(imsg); + graph = AAIGraph.getInstance().getGraph(); + + if( graph == null ){ + ErrorLogHelper.logError("AAI_5102", "Error creating JanusGraph graph. "); + return; + } + + // Load the propertyKeys, indexes and edge-Labels into the DB + JanusGraphManagement graphMgt = graph.openManagement(); + + imsg = "-- Loading new schema elements into JanusGraph --"; + System.out.println(imsg); + LOGGER.info(imsg); + SchemaGenerator.loadSchemaIntoJanusGraph(graph, graphMgt, null); + } catch(Exception ex) { + ErrorLogHelper.logError("AAI_4000", ex.getMessage()); + } + + + if( graph != null ){ + String imsg = "-- graph commit"; + System.out.println(imsg); + LOGGER.info(imsg); + graph.tx().commit(); + + imsg = "-- graph shutdown "; + System.out.println(imsg); + LOGGER.info(imsg); + graph.close(); + } + + LOGGER.auditEvent("-- all done, if program does not exit, please kill."); + System.exit(0); + } + +} + diff --git a/src/main/java/org/onap/aai/service/AuthorizationService.java b/src/main/java/org/onap/aai/service/AuthorizationService.java new file mode 100644 index 0000000..d2597d0 --- /dev/null +++ b/src/main/java/org/onap/aai/service/AuthorizationService.java @@ -0,0 +1,109 @@ +/** + * ============LICENSE_START======================================================= + * org.onap.aai + * ================================================================================ + * Copyright © 2017-2018 AT&T Intellectual Property. All rights reserved. + * ================================================================================ + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============LICENSE_END========================================================= + */ +package org.onap.aai.service; + +import com.att.eelf.configuration.EELFLogger; +import com.att.eelf.configuration.EELFManager; +import org.eclipse.jetty.util.security.Password; +import org.onap.aai.Profiles; +import org.onap.aai.util.AAIConstants; +import org.springframework.context.annotation.Profile; +import org.springframework.stereotype.Service; + +import javax.annotation.PostConstruct; +import java.io.IOException; +import java.io.UnsupportedEncodingException; +import java.nio.file.Files; +import java.nio.file.Paths; +import java.util.Base64; +import java.util.HashMap; +import java.util.Map; +import java.util.stream.Stream; + +@Profile(Profiles.ONE_WAY_SSL) +@Service +public class AuthorizationService { + + private static final EELFLogger logger = EELFManager.getInstance().getLogger(AuthorizationService.class); + + private final Map<String, String> authorizedUsers = new HashMap<>(); + + private static final Base64.Encoder ENCODER = Base64.getEncoder(); + + @PostConstruct + public void init(){ + + String basicAuthFile = getBasicAuthFilePath(); + + try(Stream<String> stream = Files.lines(Paths.get(basicAuthFile))){ + stream.filter(line -> !line.startsWith("#")).forEach(str -> { + byte [] bytes = null; + + String usernamePassword = null; + String accessType = null; + + try { + String [] userAccessType = str.split(","); + + if(userAccessType == null || userAccessType.length != 2){ + throw new RuntimeException("Please check the realm.properties file as it is not conforming to the basic auth"); + } + + usernamePassword = userAccessType[0]; + accessType = userAccessType[1]; + + String[] usernamePasswordArray = usernamePassword.split(":"); + + if(usernamePasswordArray == null || usernamePasswordArray.length != 3){ + throw new RuntimeException("Not a valid entry for the realm.properties entry: " + usernamePassword); + } + + String username = usernamePasswordArray[0]; + String password = null; + + if(str.contains("OBF:")){ + password = usernamePasswordArray[1] + ":" + usernamePasswordArray[2]; + password = Password.deobfuscate(password); + } + + bytes = ENCODER.encode((username + ":" + password).getBytes("UTF-8")); + + authorizedUsers.put(new String(bytes), accessType); + + } catch (UnsupportedEncodingException e) + { + logger.error("Unable to support the encoding of the file" + basicAuthFile); + } + + authorizedUsers.put(new String(ENCODER.encode(bytes)), accessType); + }); + } catch (IOException e) { + logger.error("IO Exception occurred during the reading of realm.properties", e); + } + } + + public boolean checkIfUserAuthorized(String authorization){ + return authorizedUsers.containsKey(authorization) && "admin".equals(authorizedUsers.get(authorization)); + } + + public String getBasicAuthFilePath(){ + return AAIConstants.AAI_HOME_ETC_AUTH + AAIConstants.AAI_FILESEP + "realm.properties"; + } +} diff --git a/src/main/java/org/onap/aai/service/RetiredService.java b/src/main/java/org/onap/aai/service/RetiredService.java new file mode 100644 index 0000000..5989e31 --- /dev/null +++ b/src/main/java/org/onap/aai/service/RetiredService.java @@ -0,0 +1,67 @@ +/** + * ============LICENSE_START======================================================= + * org.onap.aai + * ================================================================================ + * Copyright © 2017-2018 AT&T Intellectual Property. All rights reserved. + * ================================================================================ + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============LICENSE_END========================================================= + */ +package org.onap.aai.service; + +import org.springframework.beans.factory.annotation.Value; +import org.springframework.context.annotation.PropertySource; +import org.springframework.stereotype.Service; + +import javax.annotation.PostConstruct; +import java.util.Arrays; +import java.util.List; +import java.util.regex.Pattern; +import java.util.stream.Collectors; + +@Service +@PropertySource("classpath:retired.properties") +@PropertySource(value = "file:${server.local.startpath}/retired.properties") +public class RetiredService { + + private String retiredPatterns; + + private String retiredAllVersions; + + private List<Pattern> retiredPatternsList; + private List<Pattern> retiredAllVersionList; + + @PostConstruct + public void initialize(){ + this.retiredPatternsList = Arrays.stream(retiredPatterns.split(",")).map(Pattern::compile).collect(Collectors.toList()); + this.retiredAllVersionList = Arrays.stream(retiredAllVersions.split(",")).map(Pattern::compile).collect(Collectors.toList()); + } + + @Value("${retired.api.pattern.list}") + public void setRetiredPatterns(String retiredPatterns){ + this.retiredPatterns = retiredPatterns; + } + + public List<Pattern> getRetiredPatterns(){ + return retiredPatternsList; + } + + @Value("${retired.api.all.versions}") + public void setRetiredAllVersions(String retiredPatterns){ + this.retiredAllVersions = retiredPatterns; + } + + public List<Pattern> getRetiredAllVersionList(){ + return retiredAllVersionList; + } +} diff --git a/src/main/java/org/onap/aai/util/PositiveNumValidator.java b/src/main/java/org/onap/aai/util/PositiveNumValidator.java new file mode 100644 index 0000000..ee58f55 --- /dev/null +++ b/src/main/java/org/onap/aai/util/PositiveNumValidator.java @@ -0,0 +1,35 @@ +/** + * ============LICENSE_START======================================================= + * org.onap.aai + * ================================================================================ + * Copyright © 2017-2018 AT&T Intellectual Property. All rights reserved. + * ================================================================================ + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============LICENSE_END========================================================= + */ +package org.onap.aai.util; + +import com.beust.jcommander.IParameterValidator; +import com.beust.jcommander.ParameterException; + +public class PositiveNumValidator implements IParameterValidator { + + @Override + public void validate(String name, String value) throws ParameterException { + int num = Integer.parseInt(value); + + if(num < 0) { + throw new ParameterException("Parameter " + name + " should be >= 0"); + } + } +}
\ No newline at end of file diff --git a/src/main/java/org/onap/aai/util/SendDeleteMigrationNotifications.java b/src/main/java/org/onap/aai/util/SendDeleteMigrationNotifications.java new file mode 100644 index 0000000..d9615b0 --- /dev/null +++ b/src/main/java/org/onap/aai/util/SendDeleteMigrationNotifications.java @@ -0,0 +1,183 @@ +/** + * ============LICENSE_START======================================================= + * org.onap.aai + * ================================================================================ + * Copyright © 2017-2018 AT&T Intellectual Property. All rights reserved. + * ================================================================================ + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============LICENSE_END========================================================= + */ +package org.onap.aai.util; + +import com.att.eelf.configuration.Configuration; +import com.att.eelf.configuration.EELFLogger; +import com.att.eelf.configuration.EELFManager; +import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversalSource; +import org.apache.tinkerpop.gremlin.structure.Vertex; +import org.onap.aai.db.props.AAIProperties; +import org.onap.aai.dbmap.AAIGraph; +import org.onap.aai.dbmap.DBConnectionType; +import org.onap.aai.exceptions.AAIException; +import org.onap.aai.introspection.*; +import org.onap.aai.migration.EventAction; +import org.onap.aai.migration.NotificationHelper; +import org.onap.aai.rest.ueb.UEBNotification; +import org.onap.aai.serialization.db.DBSerializer; +import org.onap.aai.serialization.engines.QueryStyle; +import org.onap.aai.serialization.engines.JanusGraphDBEngine; +import org.onap.aai.serialization.engines.TransactionalGraphEngine; +import org.onap.aai.setup.SchemaVersions; +import org.onap.aai.setup.SchemaVersion; +import org.slf4j.MDC; + +import java.io.IOException; +import java.net.URI; +import java.nio.file.Files; +import java.nio.file.Paths; +import java.util.*; +import java.util.concurrent.atomic.AtomicInteger; + +import javax.ws.rs.core.Response.Status; + +public class SendDeleteMigrationNotifications { + + protected EELFLogger logger = EELFManager.getInstance().getLogger(SendDeleteMigrationNotifications.class.getSimpleName()); + + private String config; + private String path; + private Set<String> notifyOn; + long sleepInMilliSecs; + int numToBatch; + private String requestId; + private EventAction eventAction; + private String eventSource; + + protected QueryStyle queryStyle = QueryStyle.TRAVERSAL; + protected ModelType introspectorFactoryType = ModelType.MOXY; + protected Loader loader = null; + protected TransactionalGraphEngine engine = null; + protected NotificationHelper notificationHelper = null; + protected DBSerializer serializer = null; + protected final LoaderFactory loaderFactory; + protected final SchemaVersions schemaVersions; + protected final SchemaVersion version; + + public SendDeleteMigrationNotifications(LoaderFactory loaderFactory, SchemaVersions schemaVersions, String config, String path, Set<String> notifyOn, int sleepInMilliSecs, int numToBatch, String requestId, EventAction eventAction, String eventSource) { + System.setProperty("aai.service.name", SendDeleteMigrationNotifications.class.getSimpleName()); + Properties props = System.getProperties(); + props.setProperty(Configuration.PROPERTY_LOGGING_FILE_NAME, "migration-logback.xml"); + props.setProperty(Configuration.PROPERTY_LOGGING_FILE_PATH, AAIConstants.AAI_HOME_ETC_APP_PROPERTIES); + + MDC.put("logFilenameAppender", SendDeleteMigrationNotifications.class.getSimpleName()); + + this.config = config; + this.path = path; + this.notifyOn = notifyOn; + this.sleepInMilliSecs = sleepInMilliSecs; + this.numToBatch = numToBatch; + this.requestId = requestId; + this.eventAction = eventAction; + this.eventSource = eventSource; + this.loaderFactory = loaderFactory; + this.schemaVersions = schemaVersions; + this.version = schemaVersions.getDefaultVersion(); + initGraph(); + + initFields(); + + + } + + public void process(String basePath) throws Exception { + + try { + Map<Integer, String> deleteDataMap = processFile(); + int count = 0; + for (Map.Entry<Integer, String> entry : deleteDataMap.entrySet()) { + logger.info("Processing " + entry.getKey() + " :: Data :: " + entry.getValue()); + String data = entry.getValue(); + Introspector obj = null; + if (data.contains("#@#")) { + String[] splitLine = data.split("#@#"); + if (splitLine.length == 3) { + obj = loader.unmarshal(splitLine[0], splitLine[2]); + this.notificationHelper.addDeleteEvent(UUID.randomUUID().toString(), splitLine[0], eventAction, + URI.create(splitLine[1]), obj, new HashMap(), basePath); + } + } + count++; + if (count >= this.numToBatch) { + trigger(); + logger.info("Triggered " + entry.getKey()); + count = 0; + Thread.sleep(this.sleepInMilliSecs); + } + } + if (count > 0) { + trigger(); + } + cleanup(); + } catch (Exception e) { + // TODO Auto-generated catch block + e.printStackTrace(); + } + } + + protected void trigger() throws AAIException { + this.notificationHelper.triggerEvents(); + } + + private Map<Integer,String> processFile() throws IOException { + List<String> lines = Files.readAllLines(Paths.get(path)); + final Map<Integer,String> data = new LinkedHashMap<>(); + AtomicInteger counter = new AtomicInteger(0); + lines.stream().forEach(line -> { + if (line.contains("#@#")) { + data.put(counter.incrementAndGet(), line); + } + }); + return data; + } + + protected void cleanup() { + logAndPrint("Events sent, closing graph connections"); + engine.rollback(); + AAIGraph.getInstance().graphShutdown(); + logAndPrint("---------- Done ----------"); + } + + private void initFields() { + this.loader = loaderFactory.createLoaderForVersion(introspectorFactoryType, version); + this.engine = new JanusGraphDBEngine(queryStyle, DBConnectionType.REALTIME, loader); + try { + this.serializer = new DBSerializer(version, this.engine, introspectorFactoryType, this.eventSource); + } catch (AAIException e) { + throw new RuntimeException("could not create serializer", e); + } + this.notificationHelper = new NotificationHelper(loader, serializer, loaderFactory, schemaVersions, engine, requestId, this.eventSource); + } + + protected void initGraph() { + System.setProperty("realtime.db.config", this.config); + logAndPrint("\n\n---------- Connecting to Graph ----------"); + AAIGraph.getInstance(); + logAndPrint("---------- Connection Established ----------"); + } + + protected void logAndPrint(String msg) { + System.out.println(msg); + logger.info(msg); + } + + +}
\ No newline at end of file diff --git a/src/main/java/org/onap/aai/util/SendDeleteMigrationNotificationsMain.java b/src/main/java/org/onap/aai/util/SendDeleteMigrationNotificationsMain.java new file mode 100644 index 0000000..ad96efe --- /dev/null +++ b/src/main/java/org/onap/aai/util/SendDeleteMigrationNotificationsMain.java @@ -0,0 +1,105 @@ +/** + * ============LICENSE_START======================================================= + * org.onap.aai + * ================================================================================ + * Copyright © 2017-2018 AT&T Intellectual Property. All rights reserved. + * ================================================================================ + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============LICENSE_END========================================================= + */ +package org.onap.aai.util; + +import com.beust.jcommander.JCommander; +import com.beust.jcommander.Parameter; +import org.onap.aai.dbmap.AAIGraph; +import org.onap.aai.introspection.LoaderFactory; +import org.onap.aai.logging.LoggingContext; +import org.onap.aai.migration.EventAction; +import org.onap.aai.setup.SchemaVersions; +import org.springframework.context.annotation.AnnotationConfigApplicationContext; + +import java.util.*; + +public class SendDeleteMigrationNotificationsMain { + + public static void main(String[] args) { + + Arrays.asList(args).stream().forEach(System.out::println); + + String requestId = UUID.randomUUID().toString(); + LoggingContext.init(); + LoggingContext.partnerName("Migration"); + LoggingContext.serviceName(AAIConstants.AAI_RESOURCES_MS); + LoggingContext.component("SendMigrationNotifications"); + LoggingContext.targetEntity(AAIConstants.AAI_RESOURCES_MS); + LoggingContext.targetServiceName("main"); + LoggingContext.requestId(requestId); + LoggingContext.statusCode(LoggingContext.StatusCode.COMPLETE); + LoggingContext.responseCode(LoggingContext.SUCCESS); + + AnnotationConfigApplicationContext ctx = new AnnotationConfigApplicationContext( + "org.onap.aai.config", + "org.onap.aai.setup" + ); + + LoaderFactory loaderFactory = ctx.getBean(LoaderFactory.class); + SchemaVersions schemaVersions = ctx.getBean(SchemaVersions.class); + String basePath = ctx.getEnvironment().getProperty("schema.uri.base.path"); + + CommandLineDeleteArgs cArgs = new CommandLineDeleteArgs(); + + JCommander jCommander = new JCommander(cArgs, args); + jCommander.setProgramName(SendDeleteMigrationNotificationsMain.class.getSimpleName()); + + EventAction action = EventAction.valueOf(cArgs.eventAction.toUpperCase()); + + SendDeleteMigrationNotifications internal = new SendDeleteMigrationNotifications(loaderFactory, schemaVersions, cArgs.config, cArgs.file, new HashSet<>(cArgs.notifyOn), cArgs.sleepInMilliSecs, cArgs.numToBatch, requestId, action, cArgs.eventSource); + + try { + internal.process(basePath); + } catch (Exception e) { + e.printStackTrace(); + } + AAIGraph.getInstance().graphShutdown(); + System.exit(0); + } +} + +class CommandLineDeleteArgs { + + @Parameter(names = "--help", help = true) + public boolean help; + + @Parameter(names = "-c", description = "location of configuration file", required = true) + public String config; + + @Parameter(names = "--inputFile", description = "path to input file", required = true) + public String file; + + @Parameter (names = "--notifyOn", description = "path to input file") + public List<String> notifyOn = new ArrayList<>(); + + @Parameter (names = "--sleepInMilliSecs", description = "how long to sleep between sending in seconds", validateWith = PositiveNumValidator.class) + public Integer sleepInMilliSecs = 0; + + @Parameter (names = "--numToBatch", description = "how many to batch before sending", validateWith = PositiveNumValidator.class) + public Integer numToBatch = 1; + + @Parameter (names = "-a", description = "event action type for dmaap event: CREATE, UPDATE, or DELETE") + public String eventAction = EventAction.DELETE.toString(); + + @Parameter (names = "--eventSource", description = "source of truth for notification, defaults to DMAAP-LOAD") + public String eventSource = "DMAAP-LOAD"; +} + + diff --git a/src/main/java/org/onap/aai/util/SendMigrationNotifications.java b/src/main/java/org/onap/aai/util/SendMigrationNotifications.java new file mode 100644 index 0000000..577f577 --- /dev/null +++ b/src/main/java/org/onap/aai/util/SendMigrationNotifications.java @@ -0,0 +1,189 @@ +/** + * ============LICENSE_START======================================================= + * org.onap.aai + * ================================================================================ + * Copyright © 2017-2018 AT&T Intellectual Property. All rights reserved. + * ================================================================================ + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============LICENSE_END========================================================= + */ +package org.onap.aai.util; + +import com.att.eelf.configuration.Configuration; +import com.att.eelf.configuration.EELFLogger; +import com.att.eelf.configuration.EELFManager; +import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversalSource; +import org.apache.tinkerpop.gremlin.structure.Vertex; +import org.onap.aai.db.props.AAIProperties; +import org.onap.aai.dbmap.AAIGraph; +import org.onap.aai.dbmap.DBConnectionType; +import org.onap.aai.exceptions.AAIException; +import org.onap.aai.introspection.*; +import org.onap.aai.migration.EventAction; +import org.onap.aai.migration.NotificationHelper; +import org.onap.aai.serialization.db.DBSerializer; +import org.onap.aai.serialization.engines.QueryStyle; +import org.onap.aai.serialization.engines.JanusGraphDBEngine; +import org.onap.aai.serialization.engines.TransactionalGraphEngine; +import org.onap.aai.setup.SchemaVersions; +import org.onap.aai.setup.SchemaVersion; +import org.slf4j.MDC; + +import java.io.IOException; +import java.net.URI; +import java.nio.file.Files; +import java.nio.file.Paths; +import java.util.*; + +public class SendMigrationNotifications { + + protected EELFLogger logger = EELFManager.getInstance().getLogger(SendMigrationNotifications.class.getSimpleName()); + + private String config; + private String path; + private Set<String> notifyOn; + long sleepInMilliSecs; + int numToBatch; + private String requestId; + private EventAction eventAction; + private String eventSource; + + protected QueryStyle queryStyle = QueryStyle.TRAVERSAL; + protected ModelType introspectorFactoryType = ModelType.MOXY; + protected Loader loader = null; + protected TransactionalGraphEngine engine = null; + protected NotificationHelper notificationHelper = null; + protected DBSerializer serializer = null; + protected final LoaderFactory loaderFactory; + protected final SchemaVersions schemaVersions; + protected final SchemaVersion version; + + public SendMigrationNotifications(LoaderFactory loaderFactory, SchemaVersions schemaVersions, String config, String path, Set<String> notifyOn, int sleepInMilliSecs, int numToBatch, String requestId, EventAction eventAction, String eventSource) { + System.setProperty("aai.service.name", SendMigrationNotifications.class.getSimpleName()); + Properties props = System.getProperties(); + props.setProperty(Configuration.PROPERTY_LOGGING_FILE_NAME, "migration-logback.xml"); + props.setProperty(Configuration.PROPERTY_LOGGING_FILE_PATH, AAIConstants.AAI_HOME_ETC_APP_PROPERTIES); + + MDC.put("logFilenameAppender", SendMigrationNotifications.class.getSimpleName()); + + this.config = config; + this.path = path; + this.notifyOn = notifyOn; + this.sleepInMilliSecs = sleepInMilliSecs; + this.numToBatch = numToBatch; + this.requestId = requestId; + this.eventAction = eventAction; + this.eventSource = eventSource; + this.loaderFactory = loaderFactory; + this.schemaVersions = schemaVersions; + this.version = schemaVersions.getDefaultVersion(); + + initGraph(); + + initFields(); + } + + public void process(String basePath) throws Exception { + + Map<String, String> vertexIds = processFile(); + engine.startTransaction(); + GraphTraversalSource g = engine.asAdmin().getReadOnlyTraversalSource(); + List<Vertex> vertexes; + URI uri; + Vertex v; + int count = 0; + for (Map.Entry<String, String> entry : vertexIds.entrySet()) { + vertexes = g.V(entry.getKey()).toList(); + if (vertexes == null || vertexes.isEmpty()) { + logAndPrint("Vertex " + entry.getKey() + " no longer exists." ); + continue; + } else if (vertexes.size() > 1) { + logAndPrint("Vertex " + entry.getKey() + " query returned " + vertexes.size() + " vertexes." ); + continue; + } else { + logger.info("Processing " + entry.getKey() + "resource-version " + entry.getValue()); + v = vertexes.get(0); + if (notifyOn.isEmpty() || notifyOn.contains(v.value(AAIProperties.NODE_TYPE).toString())) { + if (entry.getValue().equals(v.value(AAIProperties.RESOURCE_VERSION).toString())) { + Introspector introspector = serializer.getLatestVersionView(v); + uri = this.serializer.getURIForVertex(v, false); + this.notificationHelper.addEvent(v, introspector, eventAction, uri, basePath); + count++; + if (count >= this.numToBatch) { + trigger(); + logger.info("Triggered " + entry.getKey()); + count = 0; + Thread.sleep(this.sleepInMilliSecs); + } + } + } + } + } + + if (count > 0) { + trigger(); + } + + cleanup(); + } + + protected void trigger() throws AAIException { + this.notificationHelper.triggerEvents(); + } + + private Map<String, String> processFile() throws IOException { + List<String> lines = Files.readAllLines(Paths.get(path)); + final Map<String,String> vertexIds = new LinkedHashMap<>(); + lines.stream().forEach(line -> { + if (line.contains("_")) { + String[] splitLine = line.split("_"); + if (splitLine.length == 2) { + vertexIds.put(splitLine[0], splitLine[1]); + } + } + }); + return vertexIds; + } + + protected void cleanup() { + logAndPrint("Events sent, closing graph connections"); + engine.rollback(); + AAIGraph.getInstance().graphShutdown(); + logAndPrint("---------- Done ----------"); + } + + private void initFields() { + this.loader = loaderFactory.createLoaderForVersion(introspectorFactoryType, version); + this.engine = new JanusGraphDBEngine(queryStyle, DBConnectionType.REALTIME, loader); + try { + this.serializer = new DBSerializer(version, this.engine, introspectorFactoryType, this.eventSource); + } catch (AAIException e) { + throw new RuntimeException("could not create serializer", e); + } + this.notificationHelper = new NotificationHelper(loader, serializer, loaderFactory, schemaVersions, engine, requestId, this.eventSource); + } + + protected void initGraph() { + System.setProperty("realtime.db.config", this.config); + logAndPrint("\n\n---------- Connecting to Graph ----------"); + AAIGraph.getInstance(); + logAndPrint("---------- Connection Established ----------"); + } + + protected void logAndPrint(String msg) { + System.out.println(msg); + logger.info(msg); + } + + +}
\ No newline at end of file diff --git a/src/main/java/org/onap/aai/util/SendMigrationNotificationsMain.java b/src/main/java/org/onap/aai/util/SendMigrationNotificationsMain.java new file mode 100644 index 0000000..29eb1da --- /dev/null +++ b/src/main/java/org/onap/aai/util/SendMigrationNotificationsMain.java @@ -0,0 +1,105 @@ +/** + * ============LICENSE_START======================================================= + * org.onap.aai + * ================================================================================ + * Copyright © 2017-2018 AT&T Intellectual Property. All rights reserved. + * ================================================================================ + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============LICENSE_END========================================================= + */ +package org.onap.aai.util; + +import com.beust.jcommander.JCommander; +import com.beust.jcommander.Parameter; +import org.onap.aai.dbmap.AAIGraph; +import org.onap.aai.introspection.LoaderFactory; +import org.onap.aai.logging.LoggingContext; +import org.onap.aai.migration.EventAction; +import org.onap.aai.setup.SchemaVersions; +import org.springframework.context.annotation.AnnotationConfigApplicationContext; + +import java.util.*; + +public class SendMigrationNotificationsMain { + + public static void main(String[] args) { + + Arrays.asList(args).stream().forEach(System.out::println); + + String requestId = UUID.randomUUID().toString(); + LoggingContext.init(); + LoggingContext.partnerName("Migration"); + LoggingContext.serviceName(AAIConstants.AAI_RESOURCES_MS); + LoggingContext.component("SendMigrationNotifications"); + LoggingContext.targetEntity(AAIConstants.AAI_RESOURCES_MS); + LoggingContext.targetServiceName("main"); + LoggingContext.requestId(requestId); + LoggingContext.statusCode(LoggingContext.StatusCode.COMPLETE); + LoggingContext.responseCode(LoggingContext.SUCCESS); + + AnnotationConfigApplicationContext ctx = new AnnotationConfigApplicationContext( + "org.onap.aai.config", + "org.onap.aai.setup" + ); + + LoaderFactory loaderFactory = ctx.getBean(LoaderFactory.class); + SchemaVersions schemaVersions = ctx.getBean(SchemaVersions.class); + String basePath = ctx.getEnvironment().getProperty("schema.uri.base.path"); + + CommandLineArgs cArgs = new CommandLineArgs(); + + JCommander jCommander = new JCommander(cArgs, args); + jCommander.setProgramName(SendMigrationNotificationsMain.class.getSimpleName()); + + EventAction action = EventAction.valueOf(cArgs.eventAction.toUpperCase()); + + SendMigrationNotifications internal = new SendMigrationNotifications(loaderFactory, schemaVersions, cArgs.config, cArgs.file, new HashSet<>(cArgs.notifyOn), cArgs.sleepInMilliSecs, cArgs.numToBatch, requestId, action, cArgs.eventSource); + + try { + internal.process(basePath); + } catch (Exception e) { + e.printStackTrace(); + } + AAIGraph.getInstance().graphShutdown(); + System.exit(0); + } +} + +class CommandLineArgs { + + @Parameter(names = "--help", help = true) + public boolean help; + + @Parameter(names = "-c", description = "location of configuration file", required = true) + public String config; + + @Parameter(names = "--inputFile", description = "path to input file", required = true) + public String file; + + @Parameter (names = "--notifyOn", description = "path to input file") + public List<String> notifyOn = new ArrayList<>(); + + @Parameter (names = "--sleepInMilliSecs", description = "how long to sleep between sending in seconds", validateWith = PositiveNumValidator.class) + public Integer sleepInMilliSecs = 0; + + @Parameter (names = "--numToBatch", description = "how many to batch before sending", validateWith = PositiveNumValidator.class) + public Integer numToBatch = 1; + + @Parameter (names = "-a", description = "event action type for dmaap event: CREATE, UPDATE, or DELETE") + public String eventAction = EventAction.CREATE.toString(); + + @Parameter (names = "--eventSource", description = "source of truth for notification, defaults to DMAAP-LOAD") + public String eventSource = "DMAAP-LOAD"; +} + + diff --git a/src/main/java/org/onap/aai/util/UniquePropertyCheck.java b/src/main/java/org/onap/aai/util/UniquePropertyCheck.java new file mode 100644 index 0000000..e96c252 --- /dev/null +++ b/src/main/java/org/onap/aai/util/UniquePropertyCheck.java @@ -0,0 +1,288 @@ +/**
+ * ============LICENSE_START=======================================================
+ * org.onap.aai
+ * ================================================================================
+ * Copyright © 2017-2018 AT&T Intellectual Property. All rights reserved.
+ * ================================================================================
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * ============LICENSE_END=========================================================
+ */
+package org.onap.aai.util;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Properties;
+import java.util.UUID;
+
+import org.apache.tinkerpop.gremlin.structure.Direction;
+import org.apache.tinkerpop.gremlin.structure.Edge;
+import org.apache.tinkerpop.gremlin.structure.Graph;
+import org.apache.tinkerpop.gremlin.structure.Vertex;
+import org.apache.tinkerpop.gremlin.structure.VertexProperty;
+import org.onap.aai.GraphAdminApp;
+import org.onap.aai.exceptions.AAIException;
+import org.onap.aai.logging.LoggingContext;
+import org.onap.aai.logging.LoggingContext.StatusCode;
+import org.slf4j.MDC;
+
+import com.att.eelf.configuration.Configuration;
+import com.att.eelf.configuration.EELFLogger;
+import com.att.eelf.configuration.EELFManager;
+
+import org.janusgraph.core.JanusGraphFactory;
+import org.janusgraph.core.JanusGraph;
+import org.onap.aai.dbmap.AAIGraphConfig;
+
+public class UniquePropertyCheck {
+
+
+ private static final String FROMAPPID = "AAI-UTILS";
+ private static final String TRANSID = UUID.randomUUID().toString();
+ private static final String COMPONENT = "UniquePropertyCheck";
+
+ /**
+ * The main method.
+ *
+ * @param args the arguments
+ */
+ public static void main(String[] args) {
+
+
+ Properties props = System.getProperties();
+ props.setProperty(Configuration.PROPERTY_LOGGING_FILE_NAME, AAIConstants.AAI_LOGBACK_PROPS);
+ props.setProperty(Configuration.PROPERTY_LOGGING_FILE_PATH, AAIConstants.AAI_HOME_BUNDLECONFIG);
+ EELFLogger logger = EELFManager.getInstance().getLogger(UniquePropertyCheck.class.getSimpleName());
+
+ LoggingContext.init();
+ LoggingContext.partnerName(FROMAPPID);
+ LoggingContext.serviceName(GraphAdminApp.APP_NAME);
+ LoggingContext.component(COMPONENT);
+ LoggingContext.targetEntity(GraphAdminApp.APP_NAME);
+ LoggingContext.targetServiceName("main");
+ LoggingContext.requestId(TRANSID);
+ LoggingContext.statusCode(StatusCode.COMPLETE);
+ LoggingContext.responseCode(LoggingContext.SUCCESS);
+
+ MDC.put("logFilenameAppender", UniquePropertyCheck.class.getSimpleName());
+
+ if( args == null || args.length != 1 ){
+ String msg = "usage: UniquePropertyCheck propertyName \n";
+ System.out.println(msg);
+ LoggingContext.statusCode(StatusCode.ERROR);
+ LoggingContext.responseCode(LoggingContext.BUSINESS_PROCESS_ERROR);
+ logAndPrint(logger, msg );
+ System.exit(1);
+ }
+ String propertyName = args[0];
+ Graph graph = null;
+
+ try {
+ AAIConfig.init();
+ System.out.println(" ---- NOTE --- about to open graph (takes a little while)--------\n");
+ JanusGraph tGraph = JanusGraphFactory.open(new AAIGraphConfig.Builder(AAIConstants.REALTIME_DB_CONFIG).forService(UniquePropertyCheck.class.getSimpleName()).withGraphType("realtime").buildConfiguration());
+
+ if( tGraph == null ) {
+ LoggingContext.statusCode(StatusCode.ERROR);
+ LoggingContext.responseCode(LoggingContext.AVAILABILITY_TIMEOUT_ERROR);
+ logAndPrint(logger, " Error: Could not get JanusGraph ");
+ System.exit(1);
+ }
+
+ graph = tGraph.newTransaction();
+ if( graph == null ){
+ LoggingContext.statusCode(StatusCode.ERROR);
+ LoggingContext.responseCode(LoggingContext.AVAILABILITY_TIMEOUT_ERROR);
+ logAndPrint(logger, "could not get graph object in UniquePropertyCheck() \n");
+ System.exit(0);
+ }
+ }
+ catch (AAIException e1) {
+ String msg = "Threw Exception: [" + e1.toString() + "]";
+ LoggingContext.statusCode(StatusCode.ERROR);
+ LoggingContext.responseCode(LoggingContext.UNKNOWN_ERROR);
+ logAndPrint(logger, msg);
+ System.exit(0);
+ }
+ catch (Exception e2) {
+ String msg = "Threw Exception: [" + e2.toString() + "]";
+ LoggingContext.statusCode(StatusCode.ERROR);
+ LoggingContext.responseCode(LoggingContext.UNKNOWN_ERROR);
+ logAndPrint(logger, msg);
+ System.exit(0);
+ }
+
+ runTheCheckForUniqueness( TRANSID, FROMAPPID, graph, propertyName, logger );
+ System.exit(0);
+
+ }// End main()
+
+
+ /**
+ * Run the check for uniqueness.
+ *
+ * @param transId the trans id
+ * @param fromAppId the from app id
+ * @param graph the graph
+ * @param propertyName the property name
+ * @param logger the logger
+ * @return the boolean
+ */
+ public static Boolean runTheCheckForUniqueness( String transId, String fromAppId, Graph graph,
+ String propertyName, EELFLogger logger ){
+
+ // Note - property can be found in more than one nodetype
+ // our uniqueness constraints are always across the entire db - so this
+ // tool looks across all nodeTypes that the property is found in.
+ Boolean foundDupesFlag = false;
+
+ HashMap <String,String> valuesAndVidHash = new HashMap <String, String> ();
+ HashMap <String,String> dupeHash = new HashMap <String, String> ();
+
+ int propCount = 0;
+ int dupeCount = 0;
+ Iterator<Vertex> vertItor = graph.traversal().V().has(propertyName);
+ while( vertItor.hasNext() ){
+ propCount++;
+ Vertex v = vertItor.next();
+ String thisVid = v.id().toString();
+ Object val = (v.<Object>property(propertyName)).orElse(null);
+ if( valuesAndVidHash.containsKey(val) ){
+ // We've seen this one before- track it in our dupe hash
+ dupeCount++;
+ if( dupeHash.containsKey(val) ){
+ // This is not the first one being added to the dupe hash for this value
+ String updatedDupeList = dupeHash.get(val) + "|" + thisVid;
+ dupeHash.put(val.toString(), updatedDupeList);
+ }
+ else {
+ // This is the first time we see this value repeating
+ String firstTwoVids = valuesAndVidHash.get(val) + "|" + thisVid;
+ dupeHash.put(val.toString(), firstTwoVids);
+ }
+ }
+ else {
+ valuesAndVidHash.put(val.toString(), thisVid);
+ }
+ }
+
+
+ String info = "\n Found this property [" + propertyName + "] " + propCount + " times in our db.";
+ logAndPrint(logger, info);
+ info = " Found " + dupeCount + " cases of duplicate values for this property.\n\n";
+ logAndPrint(logger, info);
+
+ try {
+ if( ! dupeHash.isEmpty() ){
+ Iterator <?> dupeItr = dupeHash.entrySet().iterator();
+ while( dupeItr.hasNext() ){
+ foundDupesFlag = true;
+ Map.Entry pair = (Map.Entry) dupeItr.next();
+ String dupeValue = pair.getKey().toString();;
+ String vidsStr = pair.getValue().toString();
+ String[] vidArr = vidsStr.split("\\|");
+ logAndPrint(logger, "\n\n -------------- Found " + vidArr.length
+ + " nodes with " + propertyName + " of this value: [" + dupeValue + "]. Node details: ");
+
+ for( int i = 0; i < vidArr.length; i++ ){
+ String vidString = vidArr[i];
+ Long idLong = Long.valueOf(vidString);
+ Vertex tvx = graph.traversal().V(idLong).next();
+ showPropertiesAndEdges( TRANSID, FROMAPPID, tvx, logger );
+ }
+ }
+ }
+ }
+ catch( Exception e2 ){
+ LoggingContext.statusCode(StatusCode.ERROR);
+ LoggingContext.responseCode(LoggingContext.DATA_ERROR);
+ logAndPrint(logger, "Threw Exception: [" + e2.toString() + "]");
+ }
+
+
+ return foundDupesFlag;
+
+ }// end of runTheCheckForUniqueness()
+
+
+ /**
+ * Show properties and edges.
+ *
+ * @param transId the trans id
+ * @param fromAppId the from app id
+ * @param tVert the t vert
+ * @param logger the logger
+ */
+ private static void showPropertiesAndEdges( String transId, String fromAppId, Vertex tVert,
+ EELFLogger logger ){
+
+ if( tVert == null ){
+ logAndPrint(logger, "Null node passed to showPropertiesAndEdges.");
+ }
+ else {
+ String nodeType = "";
+ Object ob = tVert.<String>property("aai-node-type").orElse(null);
+ if( ob == null ){
+ nodeType = "null";
+ }
+ else{
+ nodeType = ob.toString();
+ }
+
+ logAndPrint(logger, " AAINodeType/VtxID for this Node = [" + nodeType + "/" + tVert.id() + "]");
+ logAndPrint(logger, " Property Detail: ");
+ Iterator<VertexProperty<Object>> pI = tVert.properties();
+ while( pI.hasNext() ){
+ VertexProperty<Object> tp = pI.next();
+ Object val = tp.value();
+ logAndPrint(logger, "Prop: [" + tp.key() + "], val = [" + val + "] ");
+ }
+
+ Iterator <Edge> eI = tVert.edges(Direction.BOTH);
+ if( ! eI.hasNext() ){
+ logAndPrint(logger, "No edges were found for this vertex. ");
+ }
+ while( eI.hasNext() ){
+ Edge ed = eI.next();
+ String lab = ed.label();
+ Vertex vtx;
+ if (tVert.equals(ed.inVertex())) {
+ vtx = ed.outVertex();
+ } else {
+ vtx = ed.inVertex();
+ }
+ if( vtx == null ){
+ logAndPrint(logger, " >>> COULD NOT FIND VERTEX on the other side of this edge edgeId = " + ed.id() + " <<< ");
+ }
+ else {
+ String nType = vtx.<String>property("aai-node-type").orElse(null);
+ String vid = vtx.id().toString();
+ logAndPrint(logger, "Found an edge (" + lab + ") from this vertex to a [" + nType + "] node with VtxId = " + vid);
+ }
+ }
+ }
+ } // End of showPropertiesAndEdges()
+
+
+ /**
+ * Log and print.
+ *
+ * @param logger the logger
+ * @param msg the msg
+ */
+ protected static void logAndPrint(EELFLogger logger, String msg) {
+ System.out.println(msg);
+ logger.info(msg);
+ }
+
+}
+
diff --git a/src/main/java/org/onap/aai/web/JerseyConfiguration.java b/src/main/java/org/onap/aai/web/JerseyConfiguration.java new file mode 100644 index 0000000..436946c --- /dev/null +++ b/src/main/java/org/onap/aai/web/JerseyConfiguration.java @@ -0,0 +1,137 @@ +/** + * ============LICENSE_START======================================================= + * org.onap.aai + * ================================================================================ + * Copyright © 2017-2018 AT&T Intellectual Property. All rights reserved. + * ================================================================================ + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============LICENSE_END========================================================= + */ +package org.onap.aai.web; + +import org.glassfish.jersey.filter.LoggingFilter; +import org.glassfish.jersey.server.ResourceConfig; +import org.glassfish.jersey.servlet.ServletProperties; +import org.onap.aai.rest.QueryConsumer; +import org.onap.aai.rest.util.EchoResponse; +import org.reflections.Reflections; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.context.annotation.Profile; +import org.springframework.core.env.Environment; +import org.springframework.stereotype.Component; + +import javax.annotation.Priority; +import javax.ws.rs.container.ContainerRequestFilter; +import javax.ws.rs.container.ContainerResponseFilter; +import java.util.List; +import java.util.Set; +import java.util.logging.Logger; +import java.util.stream.Collectors; + +@Component +public class JerseyConfiguration extends ResourceConfig { + + private static final Logger log = Logger.getLogger(JerseyConfiguration.class.getName()); + + private Environment env; + + @Autowired + public JerseyConfiguration(Environment env) { + + this.env = env; + + register(QueryConsumer.class); + + register(EchoResponse.class); + + //Request Filters + registerFiltersForRequests(); + // Response Filters + registerFiltersForResponses(); + + property(ServletProperties.FILTER_FORWARD_ON_404, true); + + // Following registers the request headers and response headers + // If the LoggingFilter second argument is set to true, it will print response value as well + if ("true".equalsIgnoreCase(env.getProperty("aai.request.logging.enabled"))) { + register(new LoggingFilter(log, false)); + } + } + + public void registerFiltersForRequests() { + + // Find all the classes within the interceptors package + Reflections reflections = new Reflections("org.onap.aai.interceptors"); + // Filter them based on the clazz that was passed in + Set<Class<? extends ContainerRequestFilter>> filters = reflections.getSubTypesOf(ContainerRequestFilter.class); + + + // Check to ensure that each of the filter has the @Priority annotation and if not throw exception + for (Class filterClass : filters) { + if (filterClass.getAnnotation(Priority.class) == null) { + throw new RuntimeException("Container filter " + filterClass.getName() + " does not have @Priority annotation"); + } + } + + // Turn the set back into a list + List<Class<? extends ContainerRequestFilter>> filtersList = filters + .stream() + .filter(f -> { + if (f.isAnnotationPresent(Profile.class) + && !env.acceptsProfiles(f.getAnnotation(Profile.class).value())) { + return false; + } + return true; + }) + .collect(Collectors.toList()); + + // Sort them by their priority levels value + filtersList.sort((c1, c2) -> Integer.valueOf(c1.getAnnotation(Priority.class).value()).compareTo(c2.getAnnotation(Priority.class).value())); + + // Then register this to the jersey application + filtersList.forEach(this::register); + } + + public void registerFiltersForResponses() { + + // Find all the classes within the interceptors package + Reflections reflections = new Reflections("org.onap.aai.interceptors"); + // Filter them based on the clazz that was passed in + Set<Class<? extends ContainerResponseFilter>> filters = reflections.getSubTypesOf(ContainerResponseFilter.class); + + + // Check to ensure that each of the filter has the @Priority annotation and if not throw exception + for (Class filterClass : filters) { + if (filterClass.getAnnotation(Priority.class) == null) { + throw new RuntimeException("Container filter " + filterClass.getName() + " does not have @Priority annotation"); + } + } + + // Turn the set back into a list + List<Class<? extends ContainerResponseFilter>> filtersList = filters.stream() + .filter(f -> { + if (f.isAnnotationPresent(Profile.class) + && !env.acceptsProfiles(f.getAnnotation(Profile.class).value())) { + return false; + } + return true; + }) + .collect(Collectors.toList()); + + // Sort them by their priority levels value + filtersList.sort((c1, c2) -> Integer.valueOf(c1.getAnnotation(Priority.class).value()).compareTo(c2.getAnnotation(Priority.class).value())); + + // Then register this to the jersey application + filtersList.forEach(this::register); + } +} diff --git a/src/main/java/org/onap/aai/web/LocalHostAccessLog.java b/src/main/java/org/onap/aai/web/LocalHostAccessLog.java new file mode 100644 index 0000000..4e28562 --- /dev/null +++ b/src/main/java/org/onap/aai/web/LocalHostAccessLog.java @@ -0,0 +1,67 @@ +/** + * ============LICENSE_START======================================================= + * org.onap.aai + * ================================================================================ + * Copyright © 2017-2018 AT&T Intellectual Property. All rights reserved. + * ================================================================================ + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============LICENSE_END========================================================= + */ +package org.onap.aai.web; + +import ch.qos.logback.access.jetty.RequestLogImpl; +import org.eclipse.jetty.server.handler.HandlerCollection; +import org.eclipse.jetty.server.handler.RequestLogHandler; +import org.eclipse.jetty.util.thread.QueuedThreadPool; +import org.springframework.beans.factory.annotation.Value; +import org.springframework.boot.context.embedded.EmbeddedServletContainerFactory; +import org.springframework.boot.context.embedded.jetty.JettyEmbeddedServletContainerFactory; +import org.springframework.boot.context.embedded.jetty.JettyServerCustomizer; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; + +import java.util.Arrays; + +@Configuration +public class LocalHostAccessLog { + + @Bean + public EmbeddedServletContainerFactory jettyConfigBean( + @Value("${jetty.threadPool.maxThreads:200}") final String maxThreads, + @Value("${jetty.threadPool.minThreads:8}") final String minThreads + ){ + JettyEmbeddedServletContainerFactory jef = new JettyEmbeddedServletContainerFactory(); + jef.addServerCustomizers((JettyServerCustomizer) server -> { + + HandlerCollection handlers = new HandlerCollection(); + + Arrays.stream(server.getHandlers()).forEach(handlers::addHandler); + + RequestLogHandler requestLogHandler = new RequestLogHandler(); + requestLogHandler.setServer(server); + + RequestLogImpl requestLogImpl = new RequestLogImpl(); + requestLogImpl.setResource("/localhost-access-logback.xml"); + requestLogImpl.start(); + + requestLogHandler.setRequestLog(requestLogImpl); + handlers.addHandler(requestLogHandler); + server.setHandler(handlers); + + final QueuedThreadPool threadPool = server.getBean(QueuedThreadPool.class); + threadPool.setMaxThreads(Integer.valueOf(maxThreads)); + threadPool.setMinThreads(Integer.valueOf(minThreads)); + }); + return jef; + } +} diff --git a/src/main/resources/antlr4/org/onap/aai/AAIDsl.g4 b/src/main/resources/antlr4/org/onap/aai/AAIDsl.g4 new file mode 100644 index 0000000..2713677 --- /dev/null +++ b/src/main/resources/antlr4/org/onap/aai/AAIDsl.g4 @@ -0,0 +1,66 @@ +/** + * Define a grammar called AAIDsl + */ +grammar AAIDsl; + + +aaiquery: dslStatement; + +dslStatement: (queryStep) (traverseStep | unionTraverseStep)* limitStep*; + +queryStep : (singleNodeStep |singleQueryStep | multiQueryStep); + +unionQueryStep: LBRACKET dslStatement ( COMMA (dslStatement))* RBRACKET; + +traverseStep: (TRAVERSE ( queryStep | unionQueryStep)); + +unionTraverseStep: TRAVERSE unionQueryStep; + +singleNodeStep: NODE STORE? ; +singleQueryStep: NODE STORE? (filterStep | filterTraverseStep); +multiQueryStep: NODE STORE? (filterStep | filterTraverseStep) (filterStep)+; + +filterStep: NOT? (LPAREN KEY COMMA KEY (COMMA KEY)*RPAREN); +filterTraverseStep: (LPAREN traverseStep* RPAREN); + +limitStep: LIMIT NODE; + +LIMIT: 'LIMIT'; +NODE: ID; + +KEY: ['] ID ['] ; + +AND: [&]; + +STORE: [*]; + +OR: [|]; + +TRAVERSE: [>] ; + +LPAREN: [(]; + +RPAREN: [)]; + +COMMA: [,] ; + +EQUAL: [=]; + +LBRACKET: [[]; + +RBRACKET: [\]]; + +NOT: [!]; + +VALUE: DIGIT; + +fragment LOWERCASE : [a-z] ; +fragment UPPERCASE : [A-Z] ; +fragment DIGIT : [0-9] ; +ID + : ( LOWERCASE | UPPERCASE | DIGIT) ( LOWERCASE | UPPERCASE | DIGIT | '-' |'.' |'_')* + ; + +WS : [ \t\r\n]+ -> skip ; // skip spaces, tabs, newlines + + diff --git a/src/main/resources/application.properties b/src/main/resources/application.properties new file mode 100644 index 0000000..d636bb6 --- /dev/null +++ b/src/main/resources/application.properties @@ -0,0 +1,64 @@ + +spring.application.name=GraphAdmin + +server.contextPath=/ +spring.autoconfigure.exclude=org.springframework.boot.autoconfigure.jdbc.DataSourceAutoConfiguration,org.springframework.boot.autoconfigure.orm.jpa.HibernateJpaAutoConfiguration + +spring.profiles.active=production,one-way-ssl + +spring.jersey.application-path=${schema.uri.base.path} + +#This property is used to set the Tomcat connector attributes.developers can define multiple attributes separated by comma +#tomcat.connector.attributes=allowTrace-true +#The max number of active threads in this pool +jetty.threadPool.maxThreads=200 +#The minimum number of threads always kept alive +jetty.threadPool.minThreads=8 +#The number of milliseconds before an idle thread shutsdown, unless the number of active threads are less or equal to minSpareThreads +server.tomcat.max-idle-time=60000 + +# If you get an application startup failure that the port is already taken +# If thats not it, please check if the key-store file path makes sense +server.local.startpath=src/main/resources/ +server.basic.auth.location=${server.local.startpath}etc/auth/realm.properties + +server.port=8449 +server.ssl.enabled-protocols=TLSv1.1,TLSv1.2 +server.ssl.key-store=${server.local.startpath}etc/auth/aai_keystore +server.ssl.key-store-password=password(OBF:1vn21ugu1saj1v9i1v941sar1ugw1vo0) +server.ssl.trust-store=${server.local.startpath}etc/auth/aai_keystore +server.ssl.trust-store-password=password(OBF:1vn21ugu1saj1v9i1v941sar1ugw1vo0) +server.ssl.client-auth=want +server.ssl.key-store-type=JKS + +# JMS bind address host port +jms.bind.address=tcp://localhost:61450 +dmaap.ribbon.listOfServers=localhost:3904 + +# Schema related attributes for the oxm and edges +# Any additional schema related attributes should start with prefix schema +schema.configuration.location=N/A +schema.source.name=onap +schema.nodes.location=${server.local.startpath}/schema/${schema.source.name}/oxm/ +schema.edges.location=${server.local.startpath}/schema/${schema.source.name}/dbedgerules/ + +schema.ingest.file=${server.local.startpath}/application.properties + +# Schema Version Related Attributes + +schema.uri.base.path=/aai +# Lists all of the versions in the schema +schema.version.list=v8,v9,v10,v11,v12,v13,v14 +# Specifies from which version should the depth parameter to default to zero +schema.version.depth.start=v9 +# Specifies from which version should the related link be displayed in response payload +schema.version.related.link.start=v10 +# Specifies from which version should the client see only the uri excluding host info +# Before this version server base will also be included +schema.version.app.root.start=v11 + +schema.version.namespace.change.start=v12 +# Specifies from which version should the client start seeing the edge label in payload +schema.version.edge.label.start=v12 +# Specifies the version that the application should default to +schema.version.api.default=v14 diff --git a/src/main/resources/dupeTool-logback.xml b/src/main/resources/dupeTool-logback.xml new file mode 100644 index 0000000..ac27e67 --- /dev/null +++ b/src/main/resources/dupeTool-logback.xml @@ -0,0 +1,62 @@ +<configuration> + <property name="logDirectory" value="${AJSC_HOME}/logs" /> + + <appender name="dupeToollog" class="ch.qos.logback.classic.sift.SiftingAppender"> + <filter class="ch.qos.logback.classic.filter.LevelFilter"> + <level>INFO</level> + <onMatch>ACCEPT</onMatch> + <onMismatch>DENY</onMismatch> + </filter> + <!-- This is MDC value --> + <!-- We will assign a value to 'logFilenameAppender' via Java code --> + <discriminator> + <key>logFilenameAppender</key> + <defaultValue>console</defaultValue> + </discriminator> + <sift> + <!-- A standard RollingFileAppender, the log file is based on 'logFileName' + at runtime --> + <appender name="FILE-${logFilenameAppender}" + class="ch.qos.logback.core.rolling.RollingFileAppender"> + <file>${logDirectory}/dupeTool/${logFilenameAppender}.log</file> + <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy"> + <fileNamePattern>${logDirectory}/dupeTool/${logFilenameAppender}.log.%d{yyyy-MM-dd} + </fileNamePattern> + </rollingPolicy> + <encoder> + <pattern>%d{yyyy-MM-dd'T'HH:mm:ss.SSSXXX}|%m%n</pattern> + </encoder> + </appender> + </sift> + </appender> + + <logger name="org.reflections" level="ERROR" additivity="false"> + <appender-ref ref="dupeToollog" /> + </logger> + <logger name="org.apache.zookeeper" level="ERROR" additivity="false"> + <appender-ref ref="dupeToollog" /> + </logger> + <logger name="org.apache.hadoop" level="ERROR" additivity="false"> + <appender-ref ref="dupeToollog" /> + </logger> + <logger name="org.janusgraph" level="ERROR" additivity="false"> + <appender-ref ref="dupeToollog" /> + </logger> + <logger name="ch.qos.logback.classic" level="ERROR" additivity="false"> + <appender-ref ref="dupeToollog" /> + </logger> + <logger name="ch.qos.logback.core" level="ERROR" additivity="false"> + <appender-ref ref="dupeToollog" /> + </logger> + <logger name="com.att.eelf" level="ERROR" additivity="false"> + <appender-ref ref="dupeToollog" /> + </logger> + <logger name="org.onap.aai" level="ERROR" additivity="false"> + <appender-ref ref="dupeToollog" /> + </logger> + + + <root level="INFO"> + <appender-ref ref="dupeToollog" /> + </root> +</configuration>
\ No newline at end of file diff --git a/src/main/resources/dynamicPayloadGenerator-logback.xml b/src/main/resources/dynamicPayloadGenerator-logback.xml new file mode 100644 index 0000000..d788a87 --- /dev/null +++ b/src/main/resources/dynamicPayloadGenerator-logback.xml @@ -0,0 +1,80 @@ +<!-- + + ============LICENSE_START======================================================= + org.onap.aai + ================================================================================ + Copyright © 2017 AT&T Intellectual Property. All rights reserved. + ================================================================================ + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + ============LICENSE_END========================================================= + + ECOMP is a trademark and service mark of AT&T Intellectual Property. + +--> +<configuration> + <property name="logDirectory" value="${AJSC_HOME}/logs" /> + + <appender name="dynamicPayloadGeneratorlog" class="ch.qos.logback.classic.sift.SiftingAppender"> + <!-- This is MDC value --> + <!-- We will assign a value to 'logFilenameAppender' via Java code --> + <discriminator> + <key>logFilenameAppender</key> + <defaultValue>undefined</defaultValue> + </discriminator> + <sift> + <!-- A standard RollingFileAppender, the log file is based on 'logFileName' + at runtime --> + <appender name="FILE-${logFilenameAppender}" + class="ch.qos.logback.core.rolling.RollingFileAppender"> + <file>${logDirectory}/dynamicPayloadGenerator/${logFilenameAppender}.log</file> + <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy"> + <fileNamePattern>${logDirectory}/dynamicPayloadGenerator/${logFilenameAppender}.log.%d{yyyy-MM-dd} + </fileNamePattern> + </rollingPolicy> + <encoder> + <pattern>%d{yyyy-MM-dd'T'HH:mm:ss.SSSXXX}|%m%n</pattern> + </encoder> + </appender> + </sift> + </appender> + + <logger name="org.reflections" level="ERROR" additivity="false"> + <appender-ref ref="dynamicPayloadGeneratorlog" /> + </logger> + <logger name="org.apache.zookeeper" level="ERROR" additivity="false"> + <appender-ref ref="dynamicPayloadGeneratorlog" /> + </logger> + <logger name="org.apache.hadoop" level="ERROR" additivity="false"> + <appender-ref ref="dynamicPayloadGeneratorlog" /> + </logger> + <logger name="org.janusgraph" level="ERROR" additivity="false"> + <appender-ref ref="dynamicPayloadGeneratorlog" /> + </logger> + <logger name="ch.qos.logback.classic" level="ERROR" additivity="false"> + <appender-ref ref="dynamicPayloadGeneratorlog" /> + </logger> + <logger name="ch.qos.logback.core" level="ERROR" additivity="false"> + <appender-ref ref="dynamicPayloadGeneratorlog" /> + </logger> + <logger name="com.att.eelf" level="ERROR" additivity="false"> + <appender-ref ref="dynamicPayloadGeneratorlog" /> + </logger> + <logger name="org.onap.aai" level="INFO" additivity="false"> + <appender-ref ref="dynamicPayloadGeneratorlog" /> + </logger> + + + <root level="INFO"> + <appender-ref ref="dynamicPayloadGeneratorlog" /> + </root> +</configuration>
\ No newline at end of file diff --git a/src/main/resources/etc/appprops/aaiEventDMaaPPublisher.properties b/src/main/resources/etc/appprops/aaiEventDMaaPPublisher.properties new file mode 100644 index 0000000..a8f5e95 --- /dev/null +++ b/src/main/resources/etc/appprops/aaiEventDMaaPPublisher.properties @@ -0,0 +1,4 @@ +topic=AAI-EVENT
+partition=AAI
+maxBatchSize=100
+maxAgeMs=250
diff --git a/src/main/resources/etc/appprops/aaiconfig.properties b/src/main/resources/etc/appprops/aaiconfig.properties new file mode 100644 index 0000000..8613d93 --- /dev/null +++ b/src/main/resources/etc/appprops/aaiconfig.properties @@ -0,0 +1,144 @@ +#
+# ============LICENSE_START=======================================================
+# org.onap.aai
+# ================================================================================
+# Copyright © 2017-18 AT&T Intellectual Property. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+
+####################################################################
+# REMEMBER TO THINK ABOUT ENVIRONMENTAL DIFFERENCES AND CHANGE THE
+# TEMPLATE AND *ALL* DATAFILES
+####################################################################
+
+aai.config.checktime=1000
+
+# this could come from siteconfig.pl?
+aai.config.nodename=AutomaticallyOverwritten
+
+aai.transaction.logging=true
+aai.transaction.logging.get=true
+aai.transaction.logging.post=true
+
+aai.server.url.base=https://localhost:8443/aai/
+aai.server.url=https://localhost:8443/aai/v14/
+aai.oldserver.url.base=https://localhost:8443/aai/servers/
+aai.oldserver.url=https://localhost:8443/aai/servers/v2/
+aai.global.callback.url=https://localhost:8443/aai/
+
+# Start of INTERNAL Specific Properties
+
+aai.truststore.filename=aai_keystore
+aai.truststore.passwd.x=OBF:1vn21ugu1saj1v9i1v941sar1ugw1vo0
+aai.keystore.filename=aai-client-cert.p12
+aai.keystore.passwd.x=OBF:1vn21ugu1saj1v9i1v941sar1ugw1vo0
+
+aai.realtime.clients=RO,SDNC,MSO,SO
+
+# End of INTERNAL Specific Properties
+
+aai.notification.current.version=v14
+aai.notificationEvent.default.status=UNPROCESSED
+aai.notificationEvent.default.eventType=AAI-EVENT
+aai.notificationEvent.default.domain=devINT1
+aai.notificationEvent.default.sourceName=aai
+aai.notificationEvent.default.sequenceNumber=0
+aai.notificationEvent.default.severity=NORMAL
+aai.notificationEvent.default.version=v14
+# This one lets us enable/disable resource-version checking on updates/deletes
+aai.resourceversion.enableflag=true
+aai.logging.maxStackTraceEntries=10
+aai.default.api.version=v14
+
+# Used by Model-processing code
+aai.model.delete.sleep.per.vtx.msec=500
+aai.model.query.resultset.maxcount=50
+aai.model.query.timeout.sec=90
+
+# Used by Data Grooming
+aai.grooming.default.max.fix=150
+aai.grooming.default.sleep.minutes=7
+
+# Used by DupeTool
+aai.dupeTool.default.max.fix=25
+aai.dupeTool.default.sleep.minutes=7
+
+aai.model.proc.max.levels=50
+aai.edgeTag.proc.max.levels=50
+
+# Used by the ForceDelete tool
+aai.forceDel.protected.nt.list=cloud-region
+aai.forceDel.protected.edge.count=10
+aai.forceDel.protected.descendant.count=10
+
+# Used for CTAG-Pool generation
+aai.ctagPool.rangeString.vplsPe1=2001-2500
+aai.ctagPool.rangeString.vplsPe2=2501-3000
+
+aai.jms.enable=false
+
+#used by the dataGrooming and dataSnapshot cleanup tasks
+aai.cron.enable.datagroomingcleanup=true
+aai.cron.enable.datasnapshotcleanup=true
+aai.datagrooming.agezip=5
+aai.datagrooming.agedelete=30
+aai.datasnapshot.agezip=5
+aai.datasnapshot.agedelete=30
+
+#used by the dataSnapshot and dataGrooming tasks
+aai.cron.enable.dataSnapshot=true
+aai.cron.enable.dataGrooming=true
+
+#used by the dataGrooming tasks
+aai.datagrooming.enableautofix=true
+aai.datagrooming.enabledupefixon=true
+aai.datagrooming.enabledontfixorphans=true
+aai.datagrooming.enabletimewindowminutes=true
+aai.datagrooming.enableskiphostcheck=false
+aai.datagrooming.enablesleepminutes=false
+aai.datagrooming.enableedgesonly=false
+aai.datagrooming.enableskipedgechecks=false
+aai.datagrooming.enablemaxfix=false
+aai.datagrooming.enablesinglecommits=false
+aai.datagrooming.enabledupecheckoff=false
+aai.datagrooming.enableghost2checkoff=false
+aai.datagrooming.enableghost2fixon=false
+aai.datagrooming.enablef=false
+
+# used by the dataGrooming to set values
+aai.datagrooming.timewindowminutesvalue=10500
+aai.datagrooming.sleepminutesvalue=100
+aai.datagrooming.maxfixvalue=10
+aai.datagrooming.fvalue=10
+
+#timeout for traversal enabled flag
+aai.graphadmin.timeoutenabled=true
+
+#timeout app specific -1 to bypass for that app id, a whole number to override the timeout with that value (in ms)
+aai.graphadmin.timeout.appspecific=JUNITTESTAPP1,1|JUNITTESTAPP2,-1|DCAE-CCS,-1|DCAES,-1|AAI-FILEGEN-GFPIP,-1|FitNesse-Test-PS2418,-1|FitNesse-Test-jenkins,-1|FitNesse-Test-ps2418,-1|FitNesse-Relationship-Test-PS2418,-1|FitNesse-Relationship-Test-ps2418,-1|FitNesse-Relationship-Test-jenkins,-1|VPESAT,-1|AAIRctFeed,-1|NewvceCreator,-1|IANewvceCreator,-1|AAI-CSIOVALS,-1
+
+#default timeout limit added for graphadmin if not overridden (in ms)
+aai.graphadmin.timeoutlimit=180000
+
+# Disable the process check which are oriented towards linux OS
+# These props should only be true for local on windows
+aai.disable.check.snapshot.running=true
+aai.disable.check.grooming.running=true
+
+# Specify the params listed right here that you would have send to the dataSnapshot shell script
+# JUST_TAKE_SNAPSHOT
+# THREADED_SNAPSHOT 2 DEBUG
+# THREADED_SNAPSHOT 2
+aai.datasnapshot.params=JUST_TAKE_SNAPSHOT
+
diff --git a/src/main/resources/etc/appprops/datatoolscrons.properties b/src/main/resources/etc/appprops/datatoolscrons.properties new file mode 100644 index 0000000..74b3c9e --- /dev/null +++ b/src/main/resources/etc/appprops/datatoolscrons.properties @@ -0,0 +1,12 @@ +#Cron expressions +#please note these must be in Quartz cron syntax +#column key: seconds minutes hours dayOfMonth month dayOfWeek +#note: dayOfWeek is optional, the rest are mandatory +#for more information refer to http://www.quartz-scheduler.org/documentation/quartz-2.x/tutorials/crontrigger.html +#this site can generate new expressions for you: http://www.cronmaker.com/ +#BUT you must omit the last (seventh) column when you copy its output (spring expects exactly 6 fields and doesn't allow the seventh optional one) +datagroomingcleanup.cron=0 06 0 * * ? +datasnapshotcleanup.cron=0 17 0 * * ? +datasnapshottasks.cron=0 45 * * * ? +datagroomingtasks.cron=0 10 1,5,9,13,17,21 * * ? +dataexporttask.cron=0 02 3 * * ?
\ No newline at end of file diff --git a/src/main/resources/etc/appprops/dynamic.properties b/src/main/resources/etc/appprops/dynamic.properties new file mode 100644 index 0000000..38e1bda --- /dev/null +++ b/src/main/resources/etc/appprops/dynamic.properties @@ -0,0 +1,34 @@ +# +# ============LICENSE_START======================================================= +# org.onap.aai +# ================================================================================ +# Copyright © 2017 AT&T Intellectual Property. All rights reserved. +# ================================================================================ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============LICENSE_END========================================================= +# +# ECOMP is a trademark and service mark of AT&T Intellectual Property. +# +query.fast-property=true +# the following parameters are not reloaded automatically and require a manual bounce +storage.backend=inmemory + +#Kept the below if we need to change from in-memory to dynamic instance +storage.hostname=localhost + +#caching on +cache.db-cache = true +cache.db-cache-clean-wait = 20 +cache.db-cache-time = 180000 +cache.db-cache-size = 0.3 +load.snapshot.file=false diff --git a/src/main/resources/etc/appprops/error.properties b/src/main/resources/etc/appprops/error.properties new file mode 100644 index 0000000..708fb1f --- /dev/null +++ b/src/main/resources/etc/appprops/error.properties @@ -0,0 +1,178 @@ +# Adding comment trying to trigger a build +#------------------------------------------------------------------------------- ---------- +#Key=Disposition:Category:Severity:Error Code:HTTP ResponseCode:RESTError Code:Error Message +#------------------------------------------------------------------------------- ---------- +# testing code, please don't change unless error utility source code changes +AAI_TESTING=5:2:WARN:0000:400:0001:Error code for testing + +# General success +AAI_0000=0:0:INFO:0000:200:0000:Success + +# health check success +AAI_0001=0:0:INFO:0001:200:0001:Success X-FromAppId=%1 X-TransactionId=%2 +AAI_0002=0:0:INFO:0002:200:0001:Successful health check + +# Success with additional info +AAI_0003=0:3:INFO:0003:202:0003:Success with additional info performing %1 on %2. Added %3 with key %4 +AAI_0004=0:3:INFO:0004:202:0003:Added prerequisite object to db + +#--- aairest: 3000-3299 +# svc errors +AAI_3000=5:2:INFO:3000:400:3000:Invalid input performing %1 on %2 +AAI_3001=5:6:INFO:3001:404:3001:Resource not found for %1 using id %2 +AAI_3002=5:1:WARN:3002:400:3002:Error writing output performing %1 on %2 +AAI_3003=5:1:WARN:3003:400:3003:Failed to make edge to missing target node of type %3 with keys %4 performing %1 on %2 +AAI_3005=5:6:WARN:3005:404:3001:Node cannot be directly accessed for read, must be accessed via ancestor(s) +AAI_3006=5:6:WARN:3006:404:3001:Node cannot be directly accessed for write, must be accessed via ancestor(s) +AAI_3007=5:6:INFO:3007:410:3007:This version (%1) of the API is retired, please migrate to %2 +AAI_3008=5:6:WARN:3008:400:3008:URI is not encoded in UTF-8 +AAI_3009=5:6:WARN:3009:400:3002:Malformed URL +AAI_3010=5:6:WARN:3010:400:3002:Cannot write via this URL +AAI_3011=5:6:WARN:3011:400:3000:Unknown XML namespace used in payload +AAI_3012=5:6:WARN:3012:400:3012:Unrecognized AAI function +AAI_3013=5:6:WARN:3013:400:3013:Query payload missing required parameters %1 +AAI_3014=5:6:WARN:3014:400:3014:Query payload is invalid %1 +# pol errors +AAI_3100=5:1:WARN:3100:400:3100:Unsupported operation %1 +AAI_3101=5:1:WARN:3101:403:3101:Attempt by client %1 to execute API %2 +AAI_3102=5:1:WARN:3102:400:3102:Error parsing input performing %1 on %2 +AAI_3300=5:1:WARN:3300:403:3300:Unauthorized +AAI_3301=5:1:WARN:3301:401:3301:Stale credentials +AAI_3302=5:1:WARN:3302:401:3301:Not authenticated +AAI_3303=5:1:WARN:3303:403:3300:Too many objects would be returned by this request, please refine your request and retry + +#--- aaigen: 4000-4099 +AAI_4000=5:4:ERROR:4000:500:3002:Internal Error +AAI_4001=5:4:FATAL:4001:500:3002:Configuration file not found +AAI_4002=5:4:FATAL:4002:500:3002:Error reading Configuration file +AAI_4003=5:4:ERROR:4003:500:3002:Error writing to log file +AAI_4004=5:4:FATAL:4004:500:3002:Error reading/parsing the error properties file +AAI_4005=5:4:FATAL:4005:500:3002:Missing or invalid configuration parameter +AAI_4006=5:4:FATAL:4006:500:3002:Unexpected error in service +AAI_4007=5:4:WARN:4007:500:3102:Input parsing error +AAI_4008=5:4:ERROR:4008:500:3002:Output parsing error +AAI_4009=4:0:WARN:4009:400:3000:Invalid X-FromAppId in header +AAI_4010=4:0:WARN:4010:400:3000:Invalid X-TransactionId in header +AAI_4011=5:4:ERROR:4011:500:3002:Missing data for REST error response +AAI_4014=4:0:WARN:4014:400:3000:Invalid Accept header +AAI_4015=4:0:WARN:4015:400:3000:You must provide at least one indexed property +AAI_4016=4:0:WARN:4016:400:3000:The depth parameter must be a number or the string "all" +AAI_4017=5:2:INFO:4017:400:3000:Could not set property +AAI_4018=5:2:WARN:4018:400:3000:Unable to convert the string to integer +#--- aaidbmap: 5102-5199 +AAI_5102=5:4:FATAL:5102:500:3002:Graph database is null after open +AAI_5105=5:4:ERROR:5105:500:3002:Unexpected error reading/updating database +AAI_5106=5:4:WARN:5106:404:3001:Node not found +AAI_5107=5:2:WARN:5107:400:3000:Required information missing +AAI_5108=5:2:WARN:5108:200:0:Unexpected information in request being ignored + +#--- aaidbgen: 6101-6199 +AAI_6101=5:4:ERROR:6101:500:3002:null JanusGraph object passed +AAI_6102=5:4:WARN:6102:400:3000:Passed-in property is not valid for this nodeType +AAI_6103=5:4:WARN:6103:400:3000:Required Node-property not found in input data +AAI_6104=5:4:WARN:6104:400:3000:Required Node-property was passed with no data +AAI_6105=5:4:WARN:6105:400:3000:Node-Key-Property not defined in DbMaps +AAI_6106=5:4:WARN:6106:400:3000:Passed-in property is not valid for this edgeType +AAI_6107=5:4:WARN:6107:400:3000:Required Edge-property not found in input data +AAI_6108=5:4:WARN:6108:400:3000:Required Edge-property was passed with no data +AAI_6109=5:4:WARN:6109:400:3000:Bad dependent Node value +AAI_6110=5:4:ERROR:6110:400:3100:Node cannot be deleted +AAI_6111=5:4:WARN:6111:400:3000:JSON processing error +AAI_6112=5:4:ERROR:6112:400:3000:More than one node found by getUniqueNode() +AAI_6114=5:4:INFO:6114:404:3001:Node Not Found +AAI_6115=5:4:ERROR:6115:400:3000:Unrecognized NodeType +AAI_6116=5:4:ERROR:6116:400:3000:Unrecognized Property +AAI_6117=5:4:ERROR:6117:400:3000:Uniqueness constraint violated +AAI_6118=5:4:WARN:6118:400:3000:Required Field not passed. +AAI_6120=5:4:WARN:6120:400:3000:Bad Parameter Passed +AAI_6121=5:4:ERROR:6121:400:3000:Problem with internal AAI reference data +AAI_6122=5:4:ERROR:6122:400:3000:Data Set not complete in DB for this request +AAI_6123=5:4:ERROR:6123:500:3000:Bad Data found by DataGrooming Tool - Investigate +AAI_6124=5:4:ERROR:6124:500:3000:File read/write error +AAI_6125=5:4:WARN:6125:500:3000:Problem Pulling Data Set +AAI_6126=5:4:ERROR:6126:400:3000:Edge cannot be deleted +AAI_6127=5:4:INFO:6127:404:3001:Edge Not Found +AAI_6128=5:4:INFO:6128:500:3000:Unexpected error +AAI_6129=5:4:INFO:6129:404:3003:Error making edge to target node +AAI_6130=5:4:WARN:6130:412:3000:Precondition Required +AAI_6131=5:4:WARN:6131:412:3000:Precondition Failed +AAI_6132=5:4:WARN:6132:400:3000:Bad Model Definition +AAI_6133=5:4:WARN:6133:400:3000:Bad Named Query Definition +AAI_6134=5:4:ERROR:6134:500:6134:Could not persist transaction to storage back end. Exhausted retry amount +AAI_6135=5:4:WARN:6135:412:3000:Resource version specified on create +AAI_6136=5:4:ERROR:6136:400:3000:Object cannot hold multiple items +AAI_6137=5:4:ERROR:6137:400:3000:Cannot perform writes on multiple vertices +AAI_6138=5:4:ERROR:6138:400:3000:Cannot delete multiple vertices +AAI_6139=5:4:ERROR:6139:404:3000:Attempted to add edge to vertex that does not exist +AAI_6140=5:4:ERROR:6140:400:3000:Edge multiplicity violated +AAI_6141=5:4:WARN:6141:400:3000:Please Refine Query +AAI_6142=5:4:INFO:6142:400:3000:Retrying transaction +AAI_6143=5:4:INFO:6143:400:3000:Ghost vertex found +AAI_6144=5:4:WARN:6144:400:3000:Cycle found in graph +AAI_6145=5:4:ERROR:6145:400:3000:Cannot create a nested/containment edge via relationship +AAI_6146=5:4:ERROR:6146:400:3000:Ambiguous identity map found, use a URI instead +AAI_6147=5:4:ERROR:6147:400:3000:Payload Limit Reached, reduce payload + +#--- aaicsvp: 7101-7199 +AAI_7101=5:4:ERROR:7101:500:3002:Unexpected error in CSV file processing +AAI_7102=5:4:ERROR:7102:500:3002:Error in cleanup temporary directory +#AAI_7103=4:2:ERROR:7103:500:3002:Unsupported user +AAI_7104=5:4:ERROR:7104:500:3002:Failed to create directory +AAI_7105=5:4:ERROR:7105:500:3002:Temporary directory exists +AAI_7106=5:4:ERROR:7106:500:3002:Cannot delete +AAI_7107=5:4:ERROR:7107:500:3002:Input file does not exist +AAI_7108=5:4:ERROR:7108:500:3002:Output file does not exist +AAI_7109=5:4:ERROR:7109:500:3002:Error closing file +AAI_7110=5:4:ERROR:7110:500:3002:Error loading/reading properties file +AAI_7111=5:4:ERROR:7111:500:3002:Error executing shell script +AAI_7112=5:4:ERROR:7112:500:3002:Error creating output file +AAI_7113=5:4:ERROR:7113:500:3002:Trailer record error +AAI_7114=5:4:ERROR:7114:500:3002:Input file error +AAI_7115=5:4:ERROR:7115:500:3002:Unexpected error +AAI_7116=5:4:ERROR:7116:500:3002:Request error +AAI_7117=5:4:ERROR:7117:500:3002:Error in get http client object +AAI_7118=5:4:ERROR:7118:500:3002:Script Error +AAI_7119=5:4:ERROR:7119:500:3002:Unknown host + +#--- aaisdnc: 7201-7299 +AAI_7202=5:4:ERROR:7202:500:3002:Error getting connection to odl +AAI_7203=5:4:ERROR:7203:500:3002:Unexpected error calling DataChangeNotification API +AAI_7204=5:4:ERROR:7204:500:3002:Error returned by DataChangeNotification API +AAI_7205=5:4:ERROR:7205:500:3002:Unexpected error running notifySDNCOnUpdate +#AAI_7206=5:4:ERROR:7206:500:3002:Invalid data returned from ODL + +#--- NotificationEvent, using UEB space +AAI_7350=5:4:ERROR:7305:500:3002:Notification event creation failed + +#--- aairestctlr: 7401-7499 +AAI_7401=5:4:ERROR:7401:500:3002:Error connecting to AAI REST API +AAI_7402=5:4:ERROR:7402:500:3002:Unexpected error +AAI_7403=5:4:WARN:7403:400:3001:Request error +AAI_7404=5:4:INFO:7404:404:3001:Node not found +AAI_7405=5:4:WARN:7405:200:0:UUID not formatted correctly, generating UUID +AAI_7406=5:4:ERROR:7406:400:7406:Request Timed Out + +#--- aaicsiovals: 7501-7599 +#AAI_7501=5:4:WARN:7501:500:3002:Error getting connection to CSI-OVALS +AAI_7502=5:4:WARN:7502:500:3002:Bad parameter when trying to build request for CSI-OVALS +AAI_7503=5:4:WARN:7503:500:3002:Error returned by CSI-OVALS + +#-- dataexport: 8001-8099 +AAI_8001=5:4:WARN:8001:500:3002:Unable to find data snapshots +AAI_8002=5:4:ERROR:8002:500:3002:Script Error +AAI_8003=5:4:ERROR:8003:500:3002:Dynamic Payload Generator Error +#--- aaiauth: 9101-9199 +AAI_9101=5:0:WARN:9101:403:3300:User is not authorized to perform function +#AAI_9102=5:0:WARN:9102:401:3301:Refresh credentials from source +#AAI_9103=5:0:WARN:9103:403:3300:User not found +#AAI_9104=5:0:WARN:9104:401:3302:Authentication error +#AAI_9105=5:0:WARN:9105:403:3300:Authorization error +#AAI_9106=5:0:WARN:9106:403:3300:Invalid AppId +#AAI_9107=5:0:WARN:9107:403:3300:No Username in Request +AAI_9107=5:0:WARN:9107:403:3300:SSL is not provided in request, please contact admin +AAI_9108=5:0:WARN:9107:403:3300:Basic auth credentials is not provided in the request + +#--- aaiinstar: 9201-9299 +#AAI_9201=5:4:ERROR:9201:500:3002:Unable to send notification +AAI_9202=5:4:ERROR:9202:500:3002:Unable to start a thread + diff --git a/src/main/resources/etc/appprops/janusgraph-cached.properties b/src/main/resources/etc/appprops/janusgraph-cached.properties new file mode 100644 index 0000000..c90816d --- /dev/null +++ b/src/main/resources/etc/appprops/janusgraph-cached.properties @@ -0,0 +1,36 @@ +# +# ============LICENSE_START======================================================= +# org.onap.aai +# ================================================================================ +# Copyright © 2017-18 AT&T Intellectual Property. All rights reserved. +# ================================================================================ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============LICENSE_END========================================================= + +query.fast-property=true +# the following parameters are not reloaded automatically and require a manual bounce +storage.backend=inmemory +storage.hostname=localhost + +#schema.default=none +storage.lock.wait-time=300 +storage.hbase.table=aaigraph-dev1.dev +storage.hbase.ext.zookeeper.znode.parent=/hbase-unsecure +#caching on +cache.db-cache = true +cache.db-cache-clean-wait = 20 +cache.db-cache-time = 180000 +cache.db-cache-size = 0.3 + +#load graphson file on startup +load.snapshot.file=false
\ No newline at end of file diff --git a/src/main/resources/etc/appprops/janusgraph-realtime.properties b/src/main/resources/etc/appprops/janusgraph-realtime.properties new file mode 100644 index 0000000..ccbe5ba --- /dev/null +++ b/src/main/resources/etc/appprops/janusgraph-realtime.properties @@ -0,0 +1,33 @@ +# +# ============LICENSE_START======================================================= +# org.onap.aai +# ================================================================================ +# Copyright © 2017 AT&T Intellectual Property. All rights reserved. +# ================================================================================ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============LICENSE_END========================================================= + +query.fast-property=true +# the following parameters are not reloaded automatically and require a manual bounce +storage.backend=inmemory +storage.hostname=localhost + +#schema.default=none +storage.lock.wait-time=300 +storage.hbase.table=aaigraph-dev1.dev +storage.hbase.ext.zookeeper.znode.parent=/hbase-unsecure +# Setting db-cache to false ensure the fastest propagation of changes across servers +cache.db-cache = false + +#load graphson file on startup +load.snapshot.file=false
\ No newline at end of file diff --git a/src/main/resources/etc/appprops/logging.properties b/src/main/resources/etc/appprops/logging.properties new file mode 100644 index 0000000..e029cc4 --- /dev/null +++ b/src/main/resources/etc/appprops/logging.properties @@ -0,0 +1,128 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +handlers = 1catalina.org.apache.juli.FileHandler, 2localhost.org.apache.juli.FileHandler, 3manager.org.apache.juli.FileHandler, 4host-manager.org.apache.juli.FileHandler, java.util.logging.ConsoleHandler + +.handlers = 1catalina.org.apache.juli.FileHandler, java.util.logging.ConsoleHandler + +############################################################ +# Handler specific properties. +# Describes specific configuration info for Handlers. +############################################################ + +# this is where we will limit logging on components +org.apache.hadoop.level=WARNING +org.apache.zookeeper.level=WARNING +org.reflections.level=WARNING +com.thinkaurelius.level=WARNING + +1catalina.org.apache.juli.FileHandler.level = FINE +1catalina.org.apache.juli.FileHandler.directory = ${catalina.base}/logs +1catalina.org.apache.juli.FileHandler.prefix = catalina. + +2localhost.org.apache.juli.FileHandler.level = FINE +2localhost.org.apache.juli.FileHandler.directory = ${catalina.base}/logs +2localhost.org.apache.juli.FileHandler.prefix = localhost. + +3manager.org.apache.juli.FileHandler.level = FINE +3manager.org.apache.juli.FileHandler.directory = ${catalina.base}/logs +3manager.org.apache.juli.FileHandler.prefix = manager. + +4host-manager.org.apache.juli.FileHandler.level = FINE +4host-manager.org.apache.juli.FileHandler.directory = ${catalina.base}/logs +4host-manager.org.apache.juli.FileHandler.prefix = host-manager. + +java.util.logging.ConsoleHandler.level = INFO +java.util.logging.ConsoleHandler.formatter = java.util.logging.SimpleFormatter + + + +############################################################ +# Facility specific properties. +# Provides extra control for each logger. +############################################################ + +org.apache.catalina.core.ContainerBase.[Catalina].[localhost].level = INFO +org.apache.catalina.core.ContainerBase.[Catalina].[localhost].handlers = 2localhost.org.apache.juli.FileHandler + +org.apache.catalina.core.ContainerBase.[Catalina].[localhost].[/manager].level = INFO +org.apache.catalina.core.ContainerBase.[Catalina].[localhost].[/manager].handlers = 3manager.org.apache.juli.FileHandler + +org.apache.catalina.core.ContainerBase.[Catalina].[localhost].[/host-manager].level = INFO +org.apache.catalina.core.ContainerBase.[Catalina].[localhost].[/host-manager].handlers = 4host-manager.org.apache.juli.FileHandler + +# For example, set the org.apache.catalina.util.LifecycleBase logger to log +# each component that extends LifecycleBase changing state: +#org.apache.catalina.util.LifecycleBase.level = FINE + +# To see debug messages in TldLocationsCache, uncomment the following line: +#org.apache.jasper.compiler.TldLocationsCache.level = FINE + + +################################ +# OpenEJB/TomEE specific loggers +################################ +# +# ACTIVATE LEVEL/HANDLERS YOU WANT +# IF YOU ACTIVATE 5tomee.org.apache.juli.FileHandler +# ADD IT TO handlers LINE LIKE: +# +# handlers = 1catalina.org.apache.juli.FileHandler, 2localhost.org.apache.juli.FileHandler, 3manager.org.apache.juli.FileHandler, 4host-manager.org.apache.juli.FileHandler, 5tomee.org.apache.juli.FileHandler, java.util.logging.ConsoleHandler +# +# LEVELS: +# ======= +# +# OpenEJB.level = WARNING +# OpenEJB.options.level = INFO +# OpenEJB.server.level = INFO +# OpenEJB.startup.level = INFO +# OpenEJB.startup.service.level = WARNING +# OpenEJB.startup.config.level = INFO +# OpenEJB.hsql.level = INFO +# CORBA-Adapter.level = WARNING +# Transaction.level = WARNING +# org.apache.activemq.level = SEVERE +# org.apache.geronimo.level = SEVERE +# openjpa.level = WARNING +# OpenEJB.cdi.level = INFO +# org.apache.webbeans.level = INFO +# org.apache.openejb.level = FINE +# +# HANDLERS: +# ========= +# +# OpenEJB.handlers = 5tomee.org.apache.juli.FileHandler, java.util.logging.ConsoleHandler +# OpenEJB.options.handlers = 5tomee.org.apache.juli.FileHandler, java.util.logging.ConsoleHandler +# OpenEJB.server.handlers = 5tomee.org.apache.juli.FileHandler, java.util.logging.ConsoleHandler +# OpenEJB.startup.handlers = 5tomee.org.apache.juli.FileHandler, java.util.logging.ConsoleHandler +# OpenEJB.startup.service.handlers = 5tomee.org.apache.juli.FileHandler, java.util.logging.ConsoleHandler +# OpenEJB.startup.config.handlers = 5tomee.org.apache.juli.FileHandler, java.util.logging.ConsoleHandler +# OpenEJB.hsql.handlers = 5tomee.org.apache.juli.FileHandler, java.util.logging.ConsoleHandler +# CORBA-Adapter.handlers = 5tomee.org.apache.juli.FileHandler, java.util.logging.ConsoleHandler +# Transaction.handlers = 5tomee.org.apache.juli.FileHandler, java.util.logging.ConsoleHandler +# org.apache.activemq.handlers = 5tomee.org.apache.juli.FileHandler, java.util.logging.ConsoleHandler +# org.apache.geronimo.handlers = 5tomee.org.apache.juli.FileHandler, java.util.logging.ConsoleHandler +# openjpa.handlers = 5tomee.org.apache.juli.FileHandler, java.util.logging.ConsoleHandler +# OpenEJB.cdi.handlers = 5tomee.org.apache.juli.FileHandler, java.util.logging.ConsoleHandler +# org.apache.webbeans.handlers = 5tomee.org.apache.juli.FileHandler, java.util.logging.ConsoleHandler +# org.apache.openejb.handlers = 5tomee.org.apache.juli.FileHandler, java.util.logging.ConsoleHandler +# +# TOMEE HANDLER SAMPLE: +# ===================== +# +# 5tomee.org.apache.juli.FileHandler.level = FINEST +# 5tomee.org.apache.juli.FileHandler.directory = ${catalina.base}/logs +# 5tomee.org.apache.juli.FileHandler.prefix = tomee. + diff --git a/src/main/resources/etc/auth/aai_keystore b/src/main/resources/etc/auth/aai_keystore Binary files differnew file mode 100644 index 0000000..16d93a7 --- /dev/null +++ b/src/main/resources/etc/auth/aai_keystore diff --git a/src/main/resources/etc/auth/realm.properties b/src/main/resources/etc/auth/realm.properties new file mode 100644 index 0000000..f0e0172 --- /dev/null +++ b/src/main/resources/etc/auth/realm.properties @@ -0,0 +1,13 @@ +# format : username: password[,rolename ...] +# default username/password: AAI/AAI, MSO/MSO, ModelLoader/ModelLoader... +AAI:OBF:1gfr1ev31gg7,admin +MSO:OBF:1jzx1lz31k01,admin +SDNC:OBF:1itr1i0l1i151isv,admin +DCAE:OBF:1g8u1f9d1f991g8w,admin +POLICY:OBF:1mk61i171ima1im41i0j1mko,admin +ASDC:OBF:1f991j0u1j001f9d,admin +VID:OBF:1jm91i0v1jl9,admin +APPC:OBF:1f991ksf1ksf1f9d,admin +ModelLoader:OBF:1qvu1v2h1sov1sar1wfw1j7j1wg21saj1sov1v1x1qxw,admin +AaiUI:OBF:1gfr1p571unz1p4j1gg7,admin +OOF:OBF:1img1ke71ily,admin diff --git a/src/main/resources/etc/scriptdata/addmanualdata/README b/src/main/resources/etc/scriptdata/addmanualdata/README new file mode 100644 index 0000000..662f35b --- /dev/null +++ b/src/main/resources/etc/scriptdata/addmanualdata/README @@ -0,0 +1,17 @@ +to add manual data, two files will be populated in the release directory under this folder. +If the release directory does not exist, create it. + +The addManualData.sh script requires the release to be passed +as a parameter. It finds and applies manual data changes under +the release folder matching the parameter. + +This script is expected to be run for each installation. The script uses +the PutTool, and flags the put to ignore 412 errors produced +when the resource already exists. + +100-<file>.txt will contain the resource to be put. +100-<file>.json will be the json file passed to the PutTool. + + +bundleconfig/etc/scriptdata/addmanualdata/1610/<file>.txt +bundleconfig/etc/scriptdata/addmanualdata/1610/<file>.json diff --git a/src/main/resources/etc/scriptdata/addmanualdata/tenant_isolation/README b/src/main/resources/etc/scriptdata/addmanualdata/tenant_isolation/README new file mode 100644 index 0000000..16510a0 --- /dev/null +++ b/src/main/resources/etc/scriptdata/addmanualdata/tenant_isolation/README @@ -0,0 +1 @@ +The tenant_isolation directory is used to store the payload files created by the dynamic payload generator. diff --git a/src/main/resources/etc/scriptdata/tenant_isolation/inputFilters.json b/src/main/resources/etc/scriptdata/tenant_isolation/inputFilters.json new file mode 100644 index 0000000..07ee9c4 --- /dev/null +++ b/src/main/resources/etc/scriptdata/tenant_isolation/inputFilters.json @@ -0,0 +1,104 @@ +{ + "cloud-region" : { + "filtered-node-type": "cloud-region", + "filters": [ + { + "property": "cloud-owner", + "regex": "att-aic" + }, + { + "property": "cloud-region-version", + "regex": "2.5|3.0|aic3.0|3.6" + }, + { + "property": "cloud-region-id", + "regex": "dyh1b|DHY1A|mtn23a|mtn23b|mdt25b|mtn6|au7tx" + } + ] + }, + "complex" : { + "filtered-node-type": "cloud-region", + "filters": [ + { + "property": "cloud-owner", + "regex": "att-aic" + }, + { + "property": "cloud-region-version", + "regex": "2.5|3.0|aic3.0|3.6" + }, + { + "property": "cloud-region-id", + "regex": "dyh1b|DHY1A|mtn23a|mtn23b|mdt25b|mtn6|au7tx" + } + ] + }, + "availability-zone" : { + "filtered-node-type": "cloud-region", + "filters": [ + { + "property": "cloud-owner", + "regex": "att-aic" + }, + { + "property": "cloud-region-version", + "regex": "2.5|3.0|aic3.0|3.6" + }, + { + "property": "cloud-region-id", + "regex": "dyh1b|DHY1A|mtn23a|mtn23b|mdt25b|mtn6|au7tx" + } + ] + }, + "pserver" : { + "filtered-node-type": "cloud-region", + "filters": [ + { + "property": "cloud-owner", + "regex": "att-aic" + }, + { + "property": "cloud-region-version", + "regex": "2.5|3.0|aic3.0|3.6" + }, + { + "property": "cloud-region-id", + "regex": "dyh1b|DHY1A|mtn23a|mtn23b|mdt25b|mtn6|au7tx" + } + ] + }, + "zone" : { + "filtered-node-type": "cloud-region", + "filters": [ + { + "property": "cloud-owner", + "regex": "att-aic" + }, + { + "property": "cloud-region-version", + "regex": "2.5|3.0|aic3.0|3.6" + }, + { + "property": "cloud-region-id", + "regex": "dyh1b|DHY1A|mtn23a|mtn23b|mdt25b|mtn6|au7tx" + } + ] + }, + "tenant" : { + "filtered-node-type": "cloud-region", + "filters": [ + { + "property": "cloud-owner", + "regex": "att-aic" + }, + { + "property": "cloud-region-version", + "regex": "2.5|3.0|aic3.0|3.6" + }, + { + "property": "cloud-region-id", + "regex": "dyh1b|DHY1A|mtn23a|mtn23b|mdt25b|mtn6|au7tx" + } + ] + } +}
\ No newline at end of file diff --git a/src/main/resources/etc/scriptdata/tenant_isolation/inputFiltersAllzones.json b/src/main/resources/etc/scriptdata/tenant_isolation/inputFiltersAllzones.json new file mode 100644 index 0000000..026759d --- /dev/null +++ b/src/main/resources/etc/scriptdata/tenant_isolation/inputFiltersAllzones.json @@ -0,0 +1,78 @@ +{ + "cloud-region" : { + "filtered-node-type": "cloud-region", + "filters": [ + { + "property": "cloud-owner", + "regex": "att-aic" + }, + { + "property": "cloud-region-version", + "regex": "2.5|3.0" + }, + { + "property": "cloud-region-id", + "regex": "m.*" + } + ] + }, + "complex" : { + "filtered-node-type": "complex", + "filters": [] + }, + "availability-zone" : { + "filtered-node-type": "cloud-region", + "filters": [ + { + "property": "cloud-owner", + "regex": "att-aic" + }, + { + "property": "cloud-region-version", + "regex": "2.5|3.0" + }, + { + "property": "cloud-region-id", + "regex": "m.*" + } + ] + }, + "pserver" : { + "filtered-node-type": "cloud-region", + "filters": [ + { + "property": "cloud-owner", + "regex": "att-aic" + }, + { + "property": "cloud-region-version", + "regex": "2.5|3.0" + }, + { + "property": "cloud-region-id", + "regex": "m.*" + } + ] + }, + "zone" : { + "filtered-node-type": "zone", + "filters": [] + }, + "tenant" : { + "filtered-node-type": "cloud-region", + "filters": [ + { + "property": "cloud-owner", + "regex": "att-aic" + }, + { + "property": "cloud-region-version", + "regex": "2.5|3.0" + }, + { + "property": "cloud-region-id", + "regex": "m.*" + } + ] + } +} diff --git a/src/main/resources/etc/scriptdata/tenant_isolation/inputFilters_E2E.json b/src/main/resources/etc/scriptdata/tenant_isolation/inputFilters_E2E.json new file mode 100644 index 0000000..24ee80d --- /dev/null +++ b/src/main/resources/etc/scriptdata/tenant_isolation/inputFilters_E2E.json @@ -0,0 +1,78 @@ +{ + "cloud-region" : { + "filtered-node-type": "cloud-region", + "filters": [ + { + "property": "cloud-owner", + "regex": "att-aic" + }, + { + "property": "cloud-region-version", + "regex": "3.0|aic3.0|3.6" + }, + { + "property": "cloud-region-id", + "regex": "RDM3|RDM5a|RDM5b|RDM6a|RDM6b|DPA2a|DPA2b" + } + ] + }, + "complex" : { + "filtered-node-type": "complex", + "filters": [] + }, + "availability-zone" : { + "filtered-node-type": "cloud-region", + "filters": [ + { + "property": "cloud-owner", + "regex": "att-aic" + }, + { + "property": "cloud-region-version", + "regex": "3.0|aic3.0|3.6" + }, + { + "property": "cloud-region-id", + "regex": "RDM3|RDM5a|RDM5b|RDM6a|RDM6b|DPA2a|DPA2b" + } + ] + }, + "pserver" : { + "filtered-node-type": "cloud-region", + "filters": [ + { + "property": "cloud-owner", + "regex": "att-aic" + }, + { + "property": "cloud-region-version", + "regex": "3.0|aic3.0|3.6" + }, + { + "property": "cloud-region-id", + "regex": "RDM3|RDM5a|RDM5b|RDM6a|RDM6b|DPA2a|DPA2b" + } + ] + }, + "zone" : { + "filtered-node-type": "zone", + "filters": [] + }, + "tenant" : { + "filtered-node-type": "cloud-region", + "filters": [ + { + "property": "cloud-owner", + "regex": "att-aic" + }, + { + "property": "cloud-region-version", + "regex": "3.0|aic3.0|3.6" + }, + { + "property": "cloud-region-id", + "regex": "RDM3|RDM5a|RDM5b|RDM6a|RDM6b|DPA2a|DPA2b" + } + ] + } +} diff --git a/src/main/resources/etc/scriptdata/tenant_isolation/inputFilters_IST.json b/src/main/resources/etc/scriptdata/tenant_isolation/inputFilters_IST.json new file mode 100644 index 0000000..8ebbf6c --- /dev/null +++ b/src/main/resources/etc/scriptdata/tenant_isolation/inputFilters_IST.json @@ -0,0 +1,78 @@ +{ + "cloud-region" : { + "filtered-node-type": "cloud-region", + "filters": [ + { + "property": "cloud-owner", + "regex": "att-aic" + }, + { + "property": "cloud-region-version", + "regex": "3.0|aic3.0|3.6" + }, + { + "property": "cloud-region-id", + "regex": "dyh1b|DHY1A|mtn23a|mtn23b|mdt25b|mtn6|au7tx" + } + ] + }, + "complex" : { + "filtered-node-type": "complex", + "filters": [] + }, + "availability-zone" : { + "filtered-node-type": "cloud-region", + "filters": [ + { + "property": "cloud-owner", + "regex": "att-aic" + }, + { + "property": "cloud-region-version", + "regex": "3.0|aic3.0|3.6" + }, + { + "property": "cloud-region-id", + "regex": "dyh1b|DHY1A|mtn23a|mtn23b|mdt25b|mtn6|au7tx" + } + ] + }, + "pserver" : { + "filtered-node-type": "cloud-region", + "filters": [ + { + "property": "cloud-owner", + "regex": "att-aic" + }, + { + "property": "cloud-region-version", + "regex": "3.0|aic3.0|3.6" + }, + { + "property": "cloud-region-id", + "regex": "dyh1b|DHY1A|mtn23a|mtn23b|mdt25b|mtn6|au7tx" + } + ] + }, + "zone" : { + "filtered-node-type": "zone", + "filters": [] + }, + "tenant" : { + "filtered-node-type": "cloud-region", + "filters": [ + { + "property": "cloud-owner", + "regex": "att-aic" + }, + { + "property": "cloud-region-version", + "regex": "3.0|aic3.0|3.6" + }, + { + "property": "cloud-region-id", + "regex": "dyh1b|DHY1A|mtn23a|mtn23b|mdt25b|mtn6|au7tx" + } + ] + } +} diff --git a/src/main/resources/etc/scriptdata/tenant_isolation/inputFilters_PROD.json b/src/main/resources/etc/scriptdata/tenant_isolation/inputFilters_PROD.json new file mode 100644 index 0000000..24ee80d --- /dev/null +++ b/src/main/resources/etc/scriptdata/tenant_isolation/inputFilters_PROD.json @@ -0,0 +1,78 @@ +{ + "cloud-region" : { + "filtered-node-type": "cloud-region", + "filters": [ + { + "property": "cloud-owner", + "regex": "att-aic" + }, + { + "property": "cloud-region-version", + "regex": "3.0|aic3.0|3.6" + }, + { + "property": "cloud-region-id", + "regex": "RDM3|RDM5a|RDM5b|RDM6a|RDM6b|DPA2a|DPA2b" + } + ] + }, + "complex" : { + "filtered-node-type": "complex", + "filters": [] + }, + "availability-zone" : { + "filtered-node-type": "cloud-region", + "filters": [ + { + "property": "cloud-owner", + "regex": "att-aic" + }, + { + "property": "cloud-region-version", + "regex": "3.0|aic3.0|3.6" + }, + { + "property": "cloud-region-id", + "regex": "RDM3|RDM5a|RDM5b|RDM6a|RDM6b|DPA2a|DPA2b" + } + ] + }, + "pserver" : { + "filtered-node-type": "cloud-region", + "filters": [ + { + "property": "cloud-owner", + "regex": "att-aic" + }, + { + "property": "cloud-region-version", + "regex": "3.0|aic3.0|3.6" + }, + { + "property": "cloud-region-id", + "regex": "RDM3|RDM5a|RDM5b|RDM6a|RDM6b|DPA2a|DPA2b" + } + ] + }, + "zone" : { + "filtered-node-type": "zone", + "filters": [] + }, + "tenant" : { + "filtered-node-type": "cloud-region", + "filters": [ + { + "property": "cloud-owner", + "regex": "att-aic" + }, + { + "property": "cloud-region-version", + "regex": "3.0|aic3.0|3.6" + }, + { + "property": "cloud-region-id", + "regex": "RDM3|RDM5a|RDM5b|RDM6a|RDM6b|DPA2a|DPA2b" + } + ] + } +} diff --git a/src/main/resources/etc/scriptdata/tenant_isolation/nodes.json b/src/main/resources/etc/scriptdata/tenant_isolation/nodes.json new file mode 100644 index 0000000..1bfc62b --- /dev/null +++ b/src/main/resources/etc/scriptdata/tenant_isolation/nodes.json @@ -0,0 +1,26 @@ +{ + "cloud-region": { + "cousins" : ["complex","zone"], + "parents" : [] + }, + "availability-zone": { + "cousins" : ["complex"], + "parents":["cloud-region"] + }, + "pserver" : { + "cousins" : ["zone", "complex", "availability-zone","cloud-region"], + "parents":[] + }, + "complex" : { + "cousins":[], + "parents":[] + }, + "tenant" : { + "cousins":[], + "parents":["cloud-region"] + }, + "zone" : { + "cousins":["complex"], + "parents":[] + } +} diff --git a/src/main/resources/etc/scriptdata/tenant_isolation/nodesAZCloud.json b/src/main/resources/etc/scriptdata/tenant_isolation/nodesAZCloud.json new file mode 100644 index 0000000..b955757 --- /dev/null +++ b/src/main/resources/etc/scriptdata/tenant_isolation/nodesAZCloud.json @@ -0,0 +1,22 @@ +{ + "cloud-region": { + "cousins" : [], + "parents":[] + }, + "availability-zone": { + "cousins" : [], + "parents":["cloud-region"] + }, + "pserver" : { + "cousins" : ["zone", "complex", "availability-zone"], + "parents":[] + }, + "complex" : { + "cousins":[], + "parents":[] + }, + "zone" : { + "cousins":["complex"], + "parents":[] + } +} diff --git a/src/main/resources/etc/scriptdata/tenant_isolation/nodesIncremental.json b/src/main/resources/etc/scriptdata/tenant_isolation/nodesIncremental.json new file mode 100644 index 0000000..0816bc4 --- /dev/null +++ b/src/main/resources/etc/scriptdata/tenant_isolation/nodesIncremental.json @@ -0,0 +1,10 @@ +{ + "pserver" : { + "cousins" : ["zone", "complex", "availability-zone","cloud-region"], + "parents":[] + }, + "tenant" : { + "cousins":[], + "parents":["cloud-region"] + } +} diff --git a/src/main/resources/etc/scriptdata/tenant_isolation/nodesNoAZ.json b/src/main/resources/etc/scriptdata/tenant_isolation/nodesNoAZ.json new file mode 100644 index 0000000..a0dfae5 --- /dev/null +++ b/src/main/resources/etc/scriptdata/tenant_isolation/nodesNoAZ.json @@ -0,0 +1,14 @@ +{ + "pserver" : { + "cousins" : ["zone", "complex"], + "parents":[] + }, + "complex" : { + "cousins":[], + "parents":[] + }, + "zone" : { + "cousins":["complex"], + "parents":[] + } +} diff --git a/src/main/resources/forceDelete-logback.xml b/src/main/resources/forceDelete-logback.xml new file mode 100644 index 0000000..5a3b2e2 --- /dev/null +++ b/src/main/resources/forceDelete-logback.xml @@ -0,0 +1,85 @@ +<!-- + + ============LICENSE_START======================================================= + org.onap.aai + ================================================================================ + Copyright © 2017 AT&T Intellectual Property. All rights reserved. + ================================================================================ + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + ============LICENSE_END========================================================= + + ECOMP is a trademark and service mark of AT&T Intellectual Property. + +--> +<configuration> + <property name="logDirectory" value="${AJSC_HOME}/logs" /> + + <appender name="forceDeletelog" class="ch.qos.logback.classic.sift.SiftingAppender"> + <filter class="ch.qos.logback.classic.filter.LevelFilter"> + <level>INFO</level> + <onMatch>ACCEPT</onMatch> + <onMismatch>DENY</onMismatch> + </filter> + <!-- This is MDC value --> + <!-- We will assign a value to 'logFilenameAppender' via Java code --> + <discriminator> + <key>logFilenameAppender</key> + <defaultValue>console</defaultValue> + </discriminator> + <sift> + <!-- A standard RollingFileAppender, the log file is based on 'logFileName' + at runtime --> + <appender name="FILE-${logFilenameAppender}" + class="ch.qos.logback.core.rolling.RollingFileAppender"> + <file>${logDirectory}/forceDelete/${logFilenameAppender}.log</file> + <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy"> + <fileNamePattern>${logDirectory}/forceDelete/${logFilenameAppender}.log.%d{yyyy-MM-dd} + </fileNamePattern> + </rollingPolicy> + <encoder> + <pattern>%d{yyyy-MM-dd'T'HH:mm:ss.SSSXXX}|%m%n</pattern> + </encoder> + </appender> + </sift> + </appender> + + <logger name="org.reflections" level="ERROR" additivity="false"> + <appender-ref ref="forceDeletelog" /> + </logger> + <logger name="org.apache.zookeeper" level="ERROR" additivity="false"> + <appender-ref ref="forceDeletelog" /> + </logger> + <logger name="org.apache.hadoop" level="ERROR" additivity="false"> + <appender-ref ref="forceDeletelog" /> + </logger> + <logger name="org.janusgraph" level="ERROR" additivity="false"> + <appender-ref ref="forceDeletelog" /> + </logger> + <logger name="ch.qos.logback.classic" level="ERROR" additivity="false"> + <appender-ref ref="forceDeletelog" /> + </logger> + <logger name="ch.qos.logback.core" level="ERROR" additivity="false"> + <appender-ref ref="forceDeletelog" /> + </logger> + <logger name="com.att.eelf" level="ERROR" additivity="false"> + <appender-ref ref="forceDeletelog" /> + </logger> + <logger name="org.onap.aai" level="ERROR" additivity="false"> + <appender-ref ref="forceDeletelog" /> + </logger> + + + <root level="INFO"> + <appender-ref ref="forceDeletelog" /> + </root> +</configuration>
\ No newline at end of file diff --git a/src/main/resources/localhost-access-logback.xml b/src/main/resources/localhost-access-logback.xml new file mode 100644 index 0000000..a318796 --- /dev/null +++ b/src/main/resources/localhost-access-logback.xml @@ -0,0 +1,62 @@ +<!-- + + ============LICENSE_START======================================================= + org.onap.aai + ================================================================================ + Copyright © 2017 AT&T Intellectual Property. All rights reserved. + ================================================================================ + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + ============LICENSE_END========================================================= + + ECOMP is a trademark and service mark of AT&T Intellectual Property. + +--> +<configuration> + <property name="AJSC_HOME" value="${AJSC_HOME:-.}" /> + <appender name="ACCESS" + class="ch.qos.logback.core.rolling.RollingFileAppender"> + <file>${AJSC_HOME}/logs/ajsc-jetty/localhost_access.log</file> + <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy"> + <fileNamePattern>${AJSC_HOME}/logs/ajsc-jetty/localhost_access.log.%d{yyyy-MM-dd} + </fileNamePattern> + </rollingPolicy> + <encoder class="org.onap.aai.logging.CustomLogPatternLayoutEncoder"> + <Pattern>%a %u %z [%t] "%m %U%q" %s %b %y %i{X-TransactionId} %i{X-FromAppId} %i{X-Forwarded-For} %i{X-AAI-SSL-Client-CN} %i{X-AAI-SSL-Client-OU} %i{X-AAI-SSL-Client-O} %i{X-AAI-SSL-Client-L} %i{X-AAI-SSL-Client-ST} %i{X-AAI-SSL-Client-C} %i{X-AAI-SSL-Client-NotBefore} %i{X-AAI-SSL-Client-NotAfter} %i{X-AAI-SSL-Client-DN} %D</Pattern> + </encoder> + </appender> + <appender-ref ref="ACCESS" /> +</configuration> + +<!-- +%a - Remote IP address +%A - Local IP address +%b - Bytes sent, excluding HTTP headers, or '-' if no bytes were sent +%B - Bytes sent, excluding HTTP headers +%h - Remote host name +%H - Request protocol +%l - Remote logical username from identd (always returns '-') +%m - Request method +%p - Local port +%q - Query string (prepended with a '?' if it exists, otherwise an empty string +%r - First line of the request +%s - HTTP status code of the response +%S - User session ID +%t - Date and time, in Common Log Format format +%u - Remote user that was authenticated +%U - Requested URL path +%v - Local server name +%I - current request thread name (can compare later with stacktraces) + +%z - Custom pattern that parses the cert for the subject +%y - Custom pattern determines rest or dme2 + -->
\ No newline at end of file diff --git a/src/main/resources/logback.xml b/src/main/resources/logback.xml new file mode 100644 index 0000000..8f40031 --- /dev/null +++ b/src/main/resources/logback.xml @@ -0,0 +1,701 @@ +<!-- + + ============LICENSE_START======================================================= + org.onap.aai + ================================================================================ + Copyright 2017 AT&T Intellectual Property. All rights reserved. + ================================================================================ + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + ============LICENSE_END========================================================= + + ECOMP is a trademark and service mark of AT&T Intellectual Property. + +--> +<configuration scan="true" scanPeriod="60 seconds" debug="false"> + <statusListener class="ch.qos.logback.core.status.NopStatusListener" /> + + <property resource="application.properties" /> + + <property name="namespace" value="graph-admin"/> + + <property name="AJSC_HOME" value="${AJSC_HOME:-.}" /> + + <property name="logDirectory" value="${AJSC_HOME}/logs" /> + <property name="eelfLogPattern" value="%ecompStartTime|%date{yyyy-MM-dd'T'HH:mm:ss.SSSZ, UTC}|%X{requestId}|%X{serviceInstanceId}|%-10t|%X{serverName}|%X{serviceName}|%X{partnerName}|%ecompStatusCode|%X{responseCode}|%replace(%replace(%X{responseDescription}){'\\|', '!'}){'\r|\n', '^'}|%X{instanceUUID}|%level|%X{severity}|%X{serverIpAddress}|%ecompElapsedTime|%X{server}|%X{clientIpAddress}|%eelfClassOfCaller|%X{unused}|%X{processKey}|%X{customField1}|%X{customField2}|%X{customField3}|%X{customField4}|co=%X{component}:%replace(%replace(%m){'\\|', '!'}){'\r|\n', '^'}%n"/> + <property name="eelfAuditLogPattern" value="%ecompStartTime|%date{yyyy-MM-dd'T'HH:mm:ss.SSSZ, UTC}|%X{requestId}|%X{serviceInstanceId}|%-10t|%X{serverName}|%X{serviceName}|%X{partnerName}|%ecompStatusCode|%X{responseCode}|%replace(%replace(%X{responseDescription}){'\\|', '!'}){'\r|\n|\r\n', '^'}|%X{instanceUUID}|%level|%X{severity}|%X{serverIpAddress}|%ecompElapsedTime|%X{server}|%X{clientIpAddress}|%eelfClassOfCaller|%X{unused}|%X{processKey}|%X{customField1}|%X{customField2}|%X{customField3}|%X{customField4}|co=%X{component}:%replace(%replace(%m){'\\|', '!'}){'\r|\n', '^'}%n"/> + <property name="eelfMetricLogPattern" value="%ecompStartTime|%date{yyyy-MM-dd'T'HH:mm:ss.SSSZ, UTC}|%X{requestId}|%X{serviceInstanceId}|%-10t|%X{serverName}|%X{serviceName}|%X{partnerName}|%X{targetEntity}|%X{targetServiceName}|%ecompStatusCode|%X{responseCode}|%replace(%replace(%X{responseDescription}){'\\|', '!'}){'\r|\n', '^'}|%X{instanceUUID}|%level|%X{severity}|%X{serverIpAddress}|%ecompElapsedTime|%X{server}|%X{clientIpAddress}|%eelfClassOfCaller|%X{unused}|%X{processKey}|%X{targetVirtualEntity}|%X{customField1}|%X{customField2}|%X{customField3}|%X{customField4}|co=%X{component}:%replace(%replace(%m){'\\|', '!'}){'\r|\n', '^'}%n"/> + <!-- <property name="eelfErrorLogPattern" value="%ecompStartTime|%X{requestId}|%-10t|%X{serviceName}|%X{partnerName}|%X{targetEntity}|%X{targetServiceName}|%ecompErrorCategory|%X{responseCode}|%replace(%replace(%X{responseDescription}){'\\|', '!'}){'\r|\n|\r\n', '^'}|co=%X{component}:%replace(%replace(%m){'\\|', '!'}){'\r|\n', '^'}%n"/> --> + <property name="eelfErrorLogPattern" value="%ecompStartTime|%X{requestId}|%-10t|%X{serviceName}|%X{partnerName}|%X{targetEntity}|%X{targetServiceName}|%ecompErrorCategory|%ecompResponseCode|%ecompResponseDescription|co=%X{component}:%replace(%replace(%m){'\\|', '!'}){'\r|\n', '^'}%n"/> + <property name="eelfTransLogPattern" value="%ecompStartTime|%date{yyyy-MM-dd'T'HH:mm:ss.SSSZ, UTC}|%X{requestId}|%X{serviceInstanceId}|%-10t|%X{serverName}|%X{serviceName}|%X{partnerName}|%ecompStatusCode|%X{responseCode}|%replace(%replace(%X{responseDescription}){'\\|', '!'}){'\r|\n', '^'}|%X{instanceUUID}|%level|%X{severity}|%X{serverIpAddress}|%ecompElapsedTime|%X{server}|%X{clientIpAddress}|%eelfClassOfCaller|%X{unused}|%X{processKey}|%X{customField1}|%X{customField2}|%X{customField3}|%X{customField4}|co=%X{partnerName}:%m%n"/> + + <conversionRule conversionWord="clr" converterClass="org.springframework.boot.logging.logback.ColorConverter" /> + <conversionRule conversionWord="wex" converterClass="org.springframework.boot.logging.logback.WhitespaceThrowableProxyConverter" /> + <conversionRule conversionWord="wEx" converterClass="org.springframework.boot.logging.logback.ExtendedWhitespaceThrowableProxyConverter" /> + <appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender"> + <encoder> + <pattern> + %clr(%d{yyyy-MM-dd HH:mm:ss.SSS}){faint} %clr(${LOG_LEVEL_PATTERN:-%5p}) %clr(${PID:- }){magenta} %clr(---){faint} %clr([%15.15t]){faint} %clr(%-40.40logger{39}){cyan} %clr(:){faint} %m%n${LOG_EXCEPTION_CONVERSION_WORD:-%wEx} + </pattern> + </encoder> + </appender> + + <appender name="SANE" class="ch.qos.logback.core.rolling.RollingFileAppender"> + <file>${logDirectory}/rest/sane.log</file> + <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy"> + <fileNamePattern>${logDirectory}/rest/sane.log.%d{yyyy-MM-dd}</fileNamePattern> + </rollingPolicy> + <encoder> + <pattern>%d{HH:mm:ss.SSS} [%thread] %-5level %logger{1024} - %msg%n + </pattern> + </encoder> + </appender> + + <appender name="asyncSANE" class="ch.qos.logback.classic.AsyncAppender"> + <queueSize>1000</queueSize> + <includeCallerData>true</includeCallerData> + <appender-ref ref="SANE" /> + </appender> + + <appender name="METRIC" + class="ch.qos.logback.core.rolling.RollingFileAppender"> + <filter class="ch.qos.logback.classic.filter.LevelFilter"> + <level>INFO</level> + <onMatch>ACCEPT</onMatch> + <onMismatch>DENY</onMismatch> + </filter> + <file>${logDirectory}/rest/metrics.log</file> + <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy"> + <fileNamePattern>${logDirectory}/rest/metrics.log.%d{yyyy-MM-dd} + </fileNamePattern> + </rollingPolicy> + <encoder class="org.onap.aai.logging.EcompEncoder"> + <pattern>${eelfMetricLogPattern}</pattern> + </encoder> + </appender> + <appender name="asyncMETRIC" class="ch.qos.logback.classic.AsyncAppender"> + <queueSize>1000</queueSize> + <includeCallerData>true</includeCallerData> + <appender-ref ref="METRIC" /> + </appender> + + <appender name="DEBUG" + class="ch.qos.logback.core.rolling.RollingFileAppender"> + <filter class="ch.qos.logback.classic.filter.LevelFilter"> + <level>DEBUG</level> + <onMatch>ACCEPT</onMatch> + <onMismatch>DENY</onMismatch> + </filter> + <file>${logDirectory}/rest/debug.log</file> + <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy"> + <fileNamePattern>${logDirectory}/rest/debug.log.%d{yyyy-MM-dd} + </fileNamePattern> + </rollingPolicy> + <encoder class="org.onap.aai.logging.EcompEncoder"> + <pattern>${eelfLogPattern}</pattern> + </encoder> + </appender> + + <appender name="asyncDEBUG" class="ch.qos.logback.classic.AsyncAppender"> + <queueSize>1000</queueSize> + <includeCallerData>true</includeCallerData> + <appender-ref ref="DEBUG" /> + </appender> + + <appender name="ERROR" + class="ch.qos.logback.core.rolling.RollingFileAppender"> + <filter class="ch.qos.logback.classic.filter.ThresholdFilter"> + <level>WARN</level> + </filter> + <file>${logDirectory}/rest/error.log</file> + <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy"> + <fileNamePattern>${logDirectory}/rest/error.log.%d{yyyy-MM-dd} + </fileNamePattern> + </rollingPolicy> + <encoder class="org.onap.aai.logging.EcompEncoder"> + <pattern>${"eelfErrorLogPattern"}</pattern> + </encoder> + </appender> + + <appender name="asyncERROR" class="ch.qos.logback.classic.AsyncAppender"> + <queueSize>1000</queueSize> + <includeCallerData>true</includeCallerData> + <appender-ref ref="ERROR" /> + </appender> + + <appender name="AUDIT" + class="ch.qos.logback.core.rolling.RollingFileAppender"> + <file>${logDirectory}/rest/audit.log</file> + <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy"> + <fileNamePattern>${logDirectory}/rest/audit.log.%d{yyyy-MM-dd} + </fileNamePattern> + </rollingPolicy> + <encoder class="org.onap.aai.logging.EcompEncoder"> + <pattern>${eelfAuditLogPattern}</pattern> + </encoder> + </appender> + + <appender name="asyncAUDIT" class="ch.qos.logback.classic.AsyncAppender"> + <queueSize>1000</queueSize> + <includeCallerData>true</includeCallerData> + <appender-ref ref="AUDIT" /> + </appender> + + <appender name="translog" + class="ch.qos.logback.core.rolling.RollingFileAppender"> + <filter class="ch.qos.logback.classic.filter.LevelFilter"> + <level>DEBUG</level> + <onMatch>ACCEPT</onMatch> + <onMismatch>DENY</onMismatch> + </filter> + <file>${logDirectory}/rest/translog.log</file> + <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy"> + <fileNamePattern>${logDirectory}/rest/translog.log.%d{yyyy-MM-dd} + </fileNamePattern> + </rollingPolicy> + <encoder class="org.onap.aai.logging.EcompEncoder"> + <pattern>${eelfTransLogPattern}</pattern> + </encoder> + </appender> + + <appender name="asynctranslog" class="ch.qos.logback.classic.AsyncAppender"> + <queueSize>1000</queueSize> + <includeCallerData>true</includeCallerData> + <appender-ref ref="translog" /> + </appender> + + <appender name="dmaapAAIEventConsumer" + class="ch.qos.logback.core.rolling.RollingFileAppender"> + <filter class="ch.qos.logback.classic.filter.ThresholdFilter"> + <level>WARN</level> + </filter> + <File>${logDirectory}/dmaapAAIEventConsumer/error.log</File> + <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy"> + <fileNamePattern>${logDirectory}/dmaapAAIEventConsumer/error.log.%d{yyyy-MM-dd} + </fileNamePattern> + </rollingPolicy> + <encoder class="org.onap.aai.logging.EcompEncoder"> + <pattern>${"eelfErrorLogPattern"}</pattern> + </encoder> + </appender> + + <appender name="dmaapAAIEventConsumerDebug" + class="ch.qos.logback.core.rolling.RollingFileAppender"> + <filter class="ch.qos.logback.classic.filter.LevelFilter"> + <level>DEBUG</level> + <onMatch>ACCEPT</onMatch> + <onMismatch>DENY</onMismatch> + </filter> + <File>${logDirectory}/dmaapAAIEventConsumer/debug.log</File> + <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy"> + <fileNamePattern>${logDirectory}/dmaapAAIEventConsumer/debug.log.%d{yyyy-MM-dd} + </fileNamePattern> + </rollingPolicy> + <encoder class="org.onap.aai.logging.EcompEncoder"> + <pattern>${eelfLogPattern}</pattern> + </encoder> + </appender> + <appender name="dmaapAAIEventConsumerMetric" + class="ch.qos.logback.core.rolling.RollingFileAppender"> + <filter class="ch.qos.logback.classic.filter.LevelFilter"> + <level>INFO</level> + <onMatch>ACCEPT</onMatch> + <onMismatch>DENY</onMismatch> + </filter> + <File>${logDirectory}/dmaapAAIEventConsumer/metrics.log</File> + <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy"> + <fileNamePattern>${logDirectory}/dmaapAAIEventConsumer/metrics.log.%d{yyyy-MM-dd} + </fileNamePattern> + </rollingPolicy> + <encoder class="org.onap.aai.logging.EcompEncoder"> + <pattern>${eelfMetricLogPattern}</pattern> + </encoder> + </appender> + <appender name="external" + class="ch.qos.logback.core.rolling.RollingFileAppender"> + <filter class="ch.qos.logback.classic.filter.ThresholdFilter"> + <level>WARN</level> + </filter> + <file>${logDirectory}/external/external.log</file> + <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy"> + <fileNamePattern>${logDirectory}/external/external.log.%d{yyyy-MM-dd} + </fileNamePattern> + </rollingPolicy> + <encoder class="org.onap.aai.logging.EcompEncoder"> + <pattern>${eelfLogPattern}</pattern> + </encoder> + </appender> + + <!-- DataGrooming logs started --> + <appender name="dataGrooming" class="ch.qos.logback.core.rolling.RollingFileAppender"> + <filter class="ch.qos.logback.classic.filter.ThresholdFilter"> + <level>WARN</level> + </filter> + <File>${logDirectory}/dataGrooming/error.log</File> + <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy"> + <fileNamePattern>${logDirectory}/dataGrooming/error.log.%d{yyyy-MM-dd}</fileNamePattern> + </rollingPolicy> + <encoder class="org.onap.aai.logging.EcompEncoder"> + <pattern>${eelfErrorLogPattern}</pattern> + </encoder> + </appender> + + <appender name="dataGroomingdebug" class="ch.qos.logback.core.rolling.RollingFileAppender"> + <filter class="ch.qos.logback.classic.filter.LevelFilter"> + <level>DEBUG</level> + <onMatch>ACCEPT</onMatch> + <onMismatch>DENY</onMismatch> + </filter> + <File>${logDirectory}/dataGrooming/debug.log</File> + <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy"> + <fileNamePattern>${logDirectory}/dataGrooming/debug.log.%d{yyyy-MM-dd}</fileNamePattern> + </rollingPolicy> + <encoder class="org.onap.aai.logging.EcompEncoder"> + <pattern>${eelfLogPattern}</pattern> + </encoder> + </appender> + + <appender name="dataGroomingmetric" class="ch.qos.logback.core.rolling.RollingFileAppender"> + <filter class="ch.qos.logback.classic.filter.LevelFilter"> + <level>INFO</level> + <onMatch>ACCEPT</onMatch> + <onMismatch>DENY</onMismatch> + </filter> + <File>${logDirectory}/dataGrooming/metrics.log</File> + <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy"> + <fileNamePattern>${logDirectory}/dataGrooming/metrics.log.%d{yyyy-MM-dd}</fileNamePattern> + </rollingPolicy> + <encoder class="org.onap.aai.logging.EcompEncoder"> + <pattern>${eelfMetricLogPattern}</pattern> + </encoder> + </appender> + + <!-- DataGrooming logs ended --> + + <!-- DataSnapshot logs started --> + <appender name="dataSnapshot" class="ch.qos.logback.core.rolling.RollingFileAppender"> + <filter class="ch.qos.logback.classic.filter.ThresholdFilter"> + <level>WARN</level> + </filter> + <File>${logDirectory}/dataSnapshot/error.log</File> + <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy"> + <fileNamePattern>${logDirectory}/dataSnapshot/error.log.%d{yyyy-MM-dd}</fileNamePattern> + </rollingPolicy> + <encoder class="org.onap.aai.logging.EcompEncoder"> + <pattern>${eelfErrorLogPattern}</pattern> + </encoder> + </appender> + + <appender name="dataSnapshotdebug" class="ch.qos.logback.core.rolling.RollingFileAppender"> + <filter class="ch.qos.logback.classic.filter.LevelFilter"> + <level>DEBUG</level> + <onMatch>ACCEPT</onMatch> + <onMismatch>DENY</onMismatch> + </filter> + <File>${logDirectory}/dataSnapshot/debug.log</File> + <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy"> + <fileNamePattern>${logDirectory}/dataSnapshot/debug.log.%d{yyyy-MM-dd}</fileNamePattern> + </rollingPolicy> + <encoder class="org.onap.aai.logging.EcompEncoder"> + <pattern>${eelfLogPattern}</pattern> + </encoder> + </appender> + + <appender name="dataSnapshotmetric" class="ch.qos.logback.core.rolling.RollingFileAppender"> + <filter class="ch.qos.logback.classic.filter.LevelFilter"> + <level>INFO</level> + <onMatch>ACCEPT</onMatch> + <onMismatch>DENY</onMismatch> + </filter> + <File>${logDirectory}/dataSnapshot/metrics.log</File> + <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy"> + <fileNamePattern>${logDirectory}/dataSnapshot/metrics.log.%d{yyyy-MM-dd}</fileNamePattern> + </rollingPolicy> + <encoder class="org.onap.aai.logging.EcompEncoder"> + <pattern>${eelfMetricLogPattern}</pattern> + </encoder> + </appender> + + <!-- DataSnapshot logs ended --> + + <!-- CreateDBSchema logs started --> + <appender name="createDBSchema" class="ch.qos.logback.core.rolling.RollingFileAppender"> + <filter class="ch.qos.logback.classic.filter.ThresholdFilter"> + <level>WARN</level> + </filter> + <File>${logDirectory}/createDBSchema/error.log</File> + <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy"> + <fileNamePattern>${logDirectory}/createDBSchema/error.log.%d{yyyy-MM-dd}</fileNamePattern> + </rollingPolicy> + <encoder class="org.onap.aai.logging.EcompEncoder"> + <pattern>${"eelfErrorLogPattern"}</pattern> + </encoder> + </appender> + + <appender name="createDBSchemadebug" class="ch.qos.logback.core.rolling.RollingFileAppender"> + <filter class="ch.qos.logback.classic.filter.LevelFilter"> + <level>DEBUG</level> + <onMatch>ACCEPT</onMatch> + <onMismatch>DENY</onMismatch> + </filter> + <File>${logDirectory}/createDBSchema/debug.log</File> + <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy"> + <fileNamePattern>${logDirectory}/createDBSchema/debug.log.%d{yyyy-MM-dd}</fileNamePattern> + </rollingPolicy> + <encoder class="org.onap.aai.logging.EcompEncoder"> + <pattern>${eelfLogPattern}</pattern> + </encoder> + </appender> + + <appender name="createDBSchemametric" class="ch.qos.logback.core.rolling.RollingFileAppender"> + <filter class="ch.qos.logback.classic.filter.LevelFilter"> + <level>INFO</level> + <onMatch>ACCEPT</onMatch> + <onMismatch>DENY</onMismatch> + </filter> + <File>${logDirectory}/createDBSchema/metrics.log</File> + <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy"> + <fileNamePattern>${logDirectory}/createDBSchema/metrics.log.%d{yyyy-MM-dd}</fileNamePattern> + </rollingPolicy> + <encoder class="org.onap.aai.logging.EcompEncoder"> + <pattern>${eelfMetricLogPattern}</pattern> + </encoder> + </appender> + <!-- CreateDBSchema logs ended --> + + <!-- DataCleanupTasks logs started --> + <appender name="dataCleanuperror" class="ch.qos.logback.core.rolling.RollingFileAppender"> + <filter class="ch.qos.logback.classic.filter.ThresholdFilter"> + <level>WARN</level> + </filter> + <File>${logDirectory}/misc/error.log</File> + <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy"> + <fileNamePattern>${logDirectory}/misc/error.log.%d{yyyy-MM-dd}</fileNamePattern> + </rollingPolicy> + <encoder class="org.onap.aai.logging.EcompEncoder"> + <pattern>${"eelfErrorLogPattern"}</pattern> + </encoder> + </appender> + + <appender name="dataCleanupdebug" class="ch.qos.logback.core.rolling.RollingFileAppender"> + <filter class="ch.qos.logback.classic.filter.LevelFilter"> + <level>DEBUG</level> + <onMatch>ACCEPT</onMatch> + <onMismatch>DENY</onMismatch> + </filter> + <File>${logDirectory}/misc/debug.log</File> + <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy"> + <fileNamePattern>${logDirectory}/misc/debug.log.%d{yyyy-MM-dd}</fileNamePattern> + </rollingPolicy> + <encoder class="org.onap.aai.logging.EcompEncoder"> + <pattern>${eelfLogPattern}</pattern> + </encoder> + </appender> + + <appender name="dataCleanupmetric" class="ch.qos.logback.core.rolling.RollingFileAppender"> + <filter class="ch.qos.logback.classic.filter.LevelFilter"> + <level>INFO</level> + <onMatch>ACCEPT</onMatch> + <onMismatch>DENY</onMismatch> + </filter> + <File>${logDirectory}/misc/metrics.log</File> + <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy"> + <fileNamePattern>${logDirectory}/misc/metrics.log.%d{yyyy-MM-dd}</fileNamePattern> + </rollingPolicy> + <encoder class="org.onap.aai.logging.EcompEncoder"> + <pattern>${eelfMetricLogPattern}</pattern> + </encoder> + </appender> + <!-- DataCleanupTasks logs ended --> + + <!-- pullInvData logs started --> + <appender name="pullInvData" class="ch.qos.logback.core.rolling.RollingFileAppender"> + <filter class="ch.qos.logback.classic.filter.ThresholdFilter"> + <level>WARN</level> + </filter> + <File>${logDirectory}/pullInvData/error.log</File> + <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy"> + <fileNamePattern>${logDirectory}/pullInvData/error.log.%d{yyyy-MM-dd}</fileNamePattern> + </rollingPolicy> + <encoder class="org.onap.aai.logging.EcompEncoder"> + <pattern>${"eelfErrorLogPattern"}</pattern> + </encoder> + </appender> + + <appender name="pullInvDatadebug" class="ch.qos.logback.core.rolling.RollingFileAppender"> + <filter class="ch.qos.logback.classic.filter.LevelFilter"> + <level>DEBUG</level> + <onMatch>ACCEPT</onMatch> + <onMismatch>DENY</onMismatch> + </filter> + <File>${logDirectory}/pullInvData/debug.log</File> + <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy"> + <fileNamePattern>${logDirectory}/pullInvData/debug.log.%d{yyyy-MM-dd}</fileNamePattern> + </rollingPolicy> + <encoder class="org.onap.aai.logging.EcompEncoder"> + <pattern>${eelfLogPattern}</pattern> + </encoder> + </appender> + + <appender name="pullInvDatametric" class="ch.qos.logback.core.rolling.RollingFileAppender"> + <filter class="ch.qos.logback.classic.filter.LevelFilter"> + <level>INFO</level> + <onMatch>ACCEPT</onMatch> + <onMismatch>DENY</onMismatch> + </filter> + <File>${logDirectory}/pullInvData/metrics.log</File> + <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy"> + <fileNamePattern>${logDirectory}/pullInvData/metrics.log.%d{yyyy-MM-dd}</fileNamePattern> + </rollingPolicy> + <encoder class="org.onap.aai.logging.EcompEncoder"> + <pattern>${eelfMetricLogPattern}</pattern> + </encoder> + </appender> + <!-- pullInvData logs ended --> + <!-- DataGrooming logs started --> + <appender name="dataExportError" class="ch.qos.logback.core.rolling.RollingFileAppender"> + <filter class="ch.qos.logback.classic.filter.ThresholdFilter"> + <level>WARN</level> + </filter> + <File>${logDirectory}/dataExport/error.log</File> + <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy"> + <fileNamePattern>${logDirectory}/dataExport/error.log.%d{yyyy-MM-dd}</fileNamePattern> + </rollingPolicy> + <encoder class="org.onap.aai.logging.EcompEncoder"> + <pattern>${eelfErrorLogPattern}</pattern> + </encoder> + </appender> + + <appender name="dataExportDebug" class="ch.qos.logback.core.rolling.RollingFileAppender"> + <filter class="ch.qos.logback.classic.filter.LevelFilter"> + <level>DEBUG</level> + <onMatch>ACCEPT</onMatch> + <onMismatch>DENY</onMismatch> + </filter> + <File>${logDirectory}/dataExport/debug.log</File> + <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy"> + <fileNamePattern>${logDirectory}/dataExport/debug.log.%d{yyyy-MM-dd}</fileNamePattern> + </rollingPolicy> + <encoder class="org.onap.aai.logging.EcompEncoder"> + <pattern>${eelfLogPattern}</pattern> + </encoder> + </appender> + + <appender name="dataExportMetric" class="ch.qos.logback.core.rolling.RollingFileAppender"> + <filter class="ch.qos.logback.classic.filter.LevelFilter"> + <level>INFO</level> + <onMatch>ACCEPT</onMatch> + <onMismatch>DENY</onMismatch> + </filter> + <File>${logDirectory}/dataExport/metrics.log</File> + <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy"> + <fileNamePattern>${logDirectory}/dataExport/metrics.log.%d{yyyy-MM-dd}</fileNamePattern> + </rollingPolicy> + <encoder class="org.onap.aai.logging.EcompEncoder"> + <pattern>${eelfMetricLogPattern}</pattern> + </encoder> + </appender> + + <logger name="org.onap.aai" level="DEBUG" additivity="false"> + <appender-ref ref="asyncDEBUG" /> + <appender-ref ref="asyncERROR" /> + <appender-ref ref="asyncMETRIC" /> + <appender-ref ref="asyncSANE" /> + </logger> + + <!-- Spring related loggers --> + <logger name="org.springframework" level="WARN" /> + <logger name="org.springframework.beans" level="WARN" /> + <logger name="org.springframework.web" level="WARN" /> + <logger name="com.blog.spring.jms" level="WARN" /> + <logger name="com.jayway.jsonpath" level="WARN" /> + + <!-- AJSC Services (bootstrap services) --> + <logger name="ajsc" level="WARN" /> + <logger name="ajsc.RouteMgmtService" level="WARN" /> + <logger name="ajsc.ComputeService" level="WARN" /> + <logger name="ajsc.VandelayService" level="WARN" /> + <logger name="ajsc.FilePersistenceService" level="WARN" /> + <logger name="ajsc.UserDefinedJarService" level="WARN" /> + <logger name="ajsc.UserDefinedBeansDefService" level="WARN" /> + <logger name="ajsc.LoggingConfigurationService" level="WARN" /> + + <!-- AJSC related loggers (DME2 Registration, csi logging, restlet, servlet + logging) --> + <logger name="org.codehaus.groovy" level="WARN" /> + <logger name="com.att.scamper" level="WARN" /> + <logger name="ajsc.utils" level="WARN" /> + <logger name="ajsc.utils.DME2Helper" level="WARN" /> + <logger name="ajsc.filters" level="WARN" /> + <logger name="ajsc.beans.interceptors" level="WARN" /> + <logger name="ajsc.restlet" level="WARN" /> + <logger name="ajsc.servlet" level="WARN" /> + <logger name="com.att.ajsc" level="WARN" /> + <logger name="com.att.ajsc.csi.logging" level="WARN" /> + <logger name="com.att.ajsc.filemonitor" level="WARN" /> + <logger name="com.netflix.loadbalancer" level="WARN" /> + + <logger name="org.apache.zookeeper" level="WARN" /> + + <!-- Other Loggers that may help troubleshoot --> + <logger name="net.sf" level="WARN" /> + <logger name="org.apache.commons.httpclient" level="WARN" /> + <logger name="org.apache.commons" level="WARN" /> + <logger name="org.apache.coyote" level="WARN" /> + <logger name="org.apache.jasper" level="WARN" /> + + <!-- Camel Related Loggers (including restlet/servlet/jaxrs/cxf logging. + May aid in troubleshooting) --> + <logger name="org.apache.camel" level="WARN" /> + <logger name="org.apache.cxf" level="WARN" /> + <logger name="org.apache.camel.processor.interceptor" level="WARN" /> + <logger name="org.apache.cxf.jaxrs.interceptor" level="WARN" /> + <logger name="org.apache.cxf.service" level="WARN" /> + <logger name="org.restlet" level="WARN" /> + <logger name="org.apache.camel.component.restlet" level="WARN" /> + + <logger name="org.hibernate.validator" level="WARN" /> + <logger name="org.hibernate" level="WARN" /> + <logger name="org.hibernate.ejb" level="OFF" /> + + <!-- logback internals logging --> + <logger name="ch.qos.logback.classic" level="WARN" /> + <logger name="ch.qos.logback.core" level="WARN" /> + + <logger name="org.eclipse.jetty" level="WARN" /> + + <!-- logback jms appenders & loggers definition starts here --> + <appender name="auditLogs" + class="ch.qos.logback.core.rolling.RollingFileAppender"> + <filter class="ch.qos.logback.classic.filter.ThresholdFilter" /> + <file>${logDirectory}/perf-audit/Audit-${lrmRVer}-${lrmRO}-${Pid}.log + </file> + <rollingPolicy class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy"> + <fileNamePattern>${logDirectory}/perf-audit/Audit-${lrmRVer}-${lrmRO}-${Pid}.%i.log.zip + </fileNamePattern> + <minIndex>1</minIndex> + <maxIndex>9</maxIndex> + </rollingPolicy> + <triggeringPolicy + class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy"> + <maxFileSize>5MB</maxFileSize> + </triggeringPolicy> + <encoder> + <pattern>eelfAuditLogPattern</pattern> + </encoder> + </appender> + <appender name="perfLogs" + class="ch.qos.logback.core.rolling.RollingFileAppender"> + <filter class="ch.qos.logback.classic.filter.ThresholdFilter" /> + <file>${logDirectory}/perf-audit/Perform-${lrmRVer}-${lrmRO}-${Pid}.log + </file> + <rollingPolicy class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy"> + <fileNamePattern>${logDirectory}/perf-audit/Perform-${lrmRVer}-${lrmRO}-${Pid}.%i.log.zip + </fileNamePattern> + <minIndex>1</minIndex> + <maxIndex>9</maxIndex> + </rollingPolicy> + <triggeringPolicy + class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy"> + <maxFileSize>5MB</maxFileSize> + </triggeringPolicy> + <encoder> + <pattern>"%d [%thread] %-5level %logger{1024} - %msg%n"</pattern> + </encoder> + </appender> + <logger name="AuditRecord" level="INFO" additivity="false"> + <appender-ref ref="auditLogs" /> + </logger> + <logger name="AuditRecord_DirectCall" level="INFO" additivity="false"> + <appender-ref ref="auditLogs" /> + </logger> + <logger name="PerfTrackerRecord" level="INFO" additivity="false"> + <appender-ref ref="perfLogs" /> + </logger> + <!-- logback jms appenders & loggers definition ends here --> + + <logger name="org.onap.aai.interceptors.post" level="DEBUG" + additivity="false"> + <appender-ref ref="asynctranslog" /> + </logger> + + <logger name="org.onap.aai.interceptors.pre.SetLoggingContext" level="DEBUG"> + <appender-ref ref="asyncAUDIT"/> + </logger> + + <logger name="org.onap.aai.interceptors.post.ResetLoggingContext" level="DEBUG"> + <appender-ref ref="asyncAUDIT"/> + </logger> + + <logger name="org.onap.aai.dmaap" level="DEBUG" additivity="false"> + <appender-ref ref="dmaapAAIEventConsumer" /> + <appender-ref ref="dmaapAAIEventConsumerDebug" /> + <appender-ref ref="dmaapAAIEventConsumerMetric" /> + </logger> + + <logger name="org.onap.aai.datasnapshot" level="DEBUG" additivity="false"> + <appender-ref ref="dataSnapshot"/> + <appender-ref ref="dataSnapshotdebug"/> + <appender-ref ref="dataSnapshotmetric"/> + <appender-ref ref="STDOUT"/> + </logger> + + <logger name="org.onap.aai.datagrooming" level="DEBUG" additivity="false"> + <appender-ref ref="dataGrooming"/> + <appender-ref ref="dataGroomingdebug"/> + <appender-ref ref="dataGroomingmetric"/> + <appender-ref ref="STDOUT"/> + </logger> + + <logger name="org.onap.aai.schema" level="DEBUG" additivity="false"> + <appender-ref ref="createDBSchema"/> + <appender-ref ref="createDBSchemadebug"/> + <appender-ref ref="createDBSchemametric"/> + </logger> + + <logger name="org.onap.aai.dbgen.PullInvData" level="DEBUG" additivity="false"> + <appender-ref ref="pullInvData"/> + <appender-ref ref="pullInvDatadebug"/> + <appender-ref ref="pullInvDatametric"/> + </logger> + + <logger name="org.onap.aai.datacleanup" level="INFO" additivity="false"> + <appender-ref ref="dataCleanuperror" /> + <appender-ref ref="dataCleanupdebug" /> + <appender-ref ref="dataCleanupmetric" /> + <appender-ref ref="STDOUT"/> + </logger> + <logger name="org.onap.aai.dataexport" level="DEBUG" additivity="false"> + <appender-ref ref="dataExportError"/> + <appender-ref ref="dataExportDebug"/> + <appender-ref ref="dataExportMetric"/> + <appender-ref ref="STDOUT"/> + </logger> + <logger name="org.apache" level="WARN" /> + <logger name="org.zookeeper" level="WARN" /> + <logger name="com.netflix" level="WARN" /> + <logger name="org.janusgraph" level="WARN" /> + <logger name="com.att.aft.dme2" level="WARN" /> + + <!-- ============================================================================ --> + <!-- General EELF logger --> + <!-- ============================================================================ --> + <logger name="com.att.eelf" level="WARN" additivity="false"> + <appender-ref ref="asyncDEBUG" /> + <appender-ref ref="asyncERROR" /> + <appender-ref ref="asyncMETRIC" /> + </logger> + + <root level="DEBUG"> + <appender-ref ref="external" /> + </root> +</configuration> diff --git a/src/main/resources/migration-logback.xml b/src/main/resources/migration-logback.xml new file mode 100644 index 0000000..ff56f57 --- /dev/null +++ b/src/main/resources/migration-logback.xml @@ -0,0 +1,84 @@ +<!-- + + ============LICENSE_START======================================================= + org.onap.aai + ================================================================================ + Copyright © 2017 AT&T Intellectual Property. All rights reserved. + ================================================================================ + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + ============LICENSE_END========================================================= + + ECOMP is a trademark and service mark of AT&T Intellectual Property. + +--> +<configuration> + <property name="logDirectory" value="${AJSC_HOME}/logs" /> + + <appender name="migrationlog" class="ch.qos.logback.classic.sift.SiftingAppender"> + <filter class="ch.qos.logback.classic.filter.LevelFilter"> + <level>INFO</level> + <onMatch>ACCEPT</onMatch> + <onMismatch>DENY</onMismatch> + </filter> + <!-- This is MDC value --> + <!-- We will assign a value to 'logFilenameAppender' via Java code --> + <discriminator> + <key>logFilenameAppender</key> + <defaultValue>undefined</defaultValue> + </discriminator> + <sift> + <!-- A standard RollingFileAppender, the log file is based on 'logFileName' + at runtime --> + <appender name="FILE-${logFilenameAppender}" + class="ch.qos.logback.core.rolling.RollingFileAppender"> + <file>${logDirectory}/migration/${logFilenameAppender}.log</file> + <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy"> + <fileNamePattern>${logDirectory}/migration/${logFilenameAppender}.log.%d{yyyy-MM-dd} + </fileNamePattern> + </rollingPolicy> + <encoder> + <pattern>%d{yyyy-MM-dd'T'HH:mm:ss.SSSXXX}|%m%n</pattern> + </encoder> + </appender> + </sift> + </appender> + + <logger name="org.reflections" level="ERROR" additivity="false"> + <appender-ref ref="migrationlog" /> + </logger> + <logger name="org.apache.zookeeper" level="ERROR" additivity="false"> + <appender-ref ref="migrationlog" /> + </logger> + <logger name="org.apache.hadoop" level="INFO" additivity="false"> + <appender-ref ref="migrationlog" /> + </logger> + <logger name="org.janusgraph" level="ERROR" additivity="false"> + <appender-ref ref="migrationlog" /> + </logger> + <logger name="ch.qos.logback.classic" level="ERROR" additivity="false"> + <appender-ref ref="migrationlog" /> + </logger> + <logger name="ch.qos.logback.core" level="ERROR" additivity="false"> + <appender-ref ref="migrationlog" /> + </logger> + <logger name="com.att.eelf" level="ERROR" additivity="false"> + <appender-ref ref="migrationlog" /> + </logger> + <logger name="org.onap.aai" level="ERROR" additivity="false"> + <appender-ref ref="migrationlog" /> + </logger> + + <root level="INFO"> + <appender-ref ref="migrationlog" /> + </root> +</configuration>
\ No newline at end of file diff --git a/src/main/resources/retired.properties b/src/main/resources/retired.properties new file mode 100644 index 0000000..940a358 --- /dev/null +++ b/src/main/resources/retired.properties @@ -0,0 +1,6 @@ +# Retired patterns specifying that a version is retired +retired.api.pattern.list=\ + ^/aai/v[2-6]+/.*$ + +# Retired patterns specifying that all versions of the api are retired +retired.api.all.versions=
\ No newline at end of file diff --git a/src/main/resources/schemaMod-logback.xml b/src/main/resources/schemaMod-logback.xml new file mode 100644 index 0000000..d99c7dc --- /dev/null +++ b/src/main/resources/schemaMod-logback.xml @@ -0,0 +1,62 @@ +<configuration> + <property name="logDirectory" value="${AJSC_HOME}/logs" /> + + <appender name="schemaModlog" class="ch.qos.logback.classic.sift.SiftingAppender"> + <filter class="ch.qos.logback.classic.filter.LevelFilter"> + <level>INFO</level> + <onMatch>ACCEPT</onMatch> + <onMismatch>DENY</onMismatch> + </filter> + <!-- This is MDC value --> + <!-- We will assign a value to 'logFilenameAppender' via Java code --> + <discriminator> + <key>logFilenameAppender</key> + <defaultValue>undefined</defaultValue> + </discriminator> + <sift> + <!-- A standard RollingFileAppender, the log file is based on 'logFileName' + at runtime --> + <appender name="FILE-${logFilenameAppender}" + class="ch.qos.logback.core.rolling.RollingFileAppender"> + <file>${logDirectory}/schemaMod/${logFilenameAppender}.log</file> + <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy"> + <fileNamePattern>${logDirectory}/schemaMod/${logFilenameAppender}.log.%d{yyyy-MM-dd} + </fileNamePattern> + </rollingPolicy> + <encoder> + <pattern>%d{yyyy-MM-dd'T'HH:mm:ss.SSSXXX}|%m%n</pattern> + </encoder> + </appender> + </sift> + </appender> + + <logger name="org.reflections" level="WARN" additivity="false"> + <appender-ref ref="schemaModlog" /> + </logger> + <logger name="org.apache.zookeeper" level="ERROR" additivity="false"> + <appender-ref ref="schemaModlog" /> + </logger> + <logger name="org.apache.hadoop" level="ERROR" additivity="false"> + <appender-ref ref="schemaModlog" /> + </logger> + <logger name="com.thinkaurelius" level="WARN" additivity="false"> + <appender-ref ref="schemaModlog" /> + </logger> + <logger name="ch.qos.logback.classic" level="WARN" additivity="false"> + <appender-ref ref="schemaModlog" /> + </logger> + <logger name="ch.qos.logback.core" level="WARN" additivity="false"> + <appender-ref ref="schemaModlog" /> + </logger> + <logger name="com.att.eelf" level="WARN" additivity="false"> + <appender-ref ref="schemaModlog" /> + </logger> + <logger name="org.onap.aai" level="INFO" additivity="false"> + <appender-ref ref="schemaModlog" /> + </logger> + + + <root level="INFO"> + <appender-ref ref="schemaModlog" /> + </root> +</configuration>
\ No newline at end of file diff --git a/src/main/resources/uniquePropertyCheck-logback.xml b/src/main/resources/uniquePropertyCheck-logback.xml new file mode 100644 index 0000000..ca0c2c7 --- /dev/null +++ b/src/main/resources/uniquePropertyCheck-logback.xml @@ -0,0 +1,62 @@ +<configuration> + <property name="logDirectory" value="${AJSC_HOME}/logs" /> + + <appender name="uniquePropertyChecklog" class="ch.qos.logback.classic.sift.SiftingAppender"> + <filter class="ch.qos.logback.classic.filter.LevelFilter"> + <level>INFO</level> + <onMatch>ACCEPT</onMatch> + <onMismatch>DENY</onMismatch> + </filter> + <!-- This is MDC value --> + <!-- We will assign a value to 'logFilenameAppender' via Java code --> + <discriminator> + <key>logFilenameAppender</key> + <defaultValue>undefined</defaultValue> + </discriminator> + <sift> + <!-- A standard RollingFileAppender, the log file is based on 'logFileName' + at runtime --> + <appender name="FILE-${logFilenameAppender}" + class="ch.qos.logback.core.rolling.RollingFileAppender"> + <file>${logDirectory}/uniquePropertyCheck/${logFilenameAppender}.log</file> + <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy"> + <fileNamePattern>${logDirectory}/uniquePropertyCheck/${logFilenameAppender}.log.%d{yyyy-MM-dd} + </fileNamePattern> + </rollingPolicy> + <encoder> + <pattern>%d{yyyy-MM-dd'T'HH:mm:ss.SSSXXX}|%m%n</pattern> + </encoder> + </appender> + </sift> + </appender> + + <logger name="org.reflections" level="WARN" additivity="false"> + <appender-ref ref="uniquePropertyChecklog" /> + </logger> + <logger name="org.apache.zookeeper" level="ERROR" additivity="false"> + <appender-ref ref="uniquePropertyChecklog" /> + </logger> + <logger name="org.apache.hadoop" level="ERROR" additivity="false"> + <appender-ref ref="uniquePropertyChecklog" /> + </logger> + <logger name="com.thinkaurelius" level="WARN" additivity="false"> + <appender-ref ref="uniquePropertyChecklog" /> + </logger> + <logger name="ch.qos.logback.classic" level="WARN" additivity="false"> + <appender-ref ref="uniquePropertyChecklog" /> + </logger> + <logger name="ch.qos.logback.core" level="WARN" additivity="false"> + <appender-ref ref="uniquePropertyChecklog" /> + </logger> + <logger name="com.att.eelf" level="WARN" additivity="false"> + <appender-ref ref="uniquePropertyChecklog" /> + </logger> + <logger name="org.onap.aai" level="INFO" additivity="false"> + <appender-ref ref="uniquePropertyChecklog" /> + </logger> + + + <root level="INFO"> + <appender-ref ref="uniquePropertyChecklog" /> + </root> +</configuration>
\ No newline at end of file diff --git a/src/main/scripts/audit_schema.sh b/src/main/scripts/audit_schema.sh new file mode 100644 index 0000000..686dd49 --- /dev/null +++ b/src/main/scripts/audit_schema.sh @@ -0,0 +1,32 @@ +#!/bin/sh +# +# ============LICENSE_START======================================================= +# org.onap.aai +# ================================================================================ +# Copyright © 2017 AT&T Intellectual Property. All rights reserved. +# ================================================================================ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============LICENSE_END========================================================= +# +# ECOMP is a trademark and service mark of AT&T Intellectual Property. +# +# +COMMON_ENV_PATH=$( cd "$(dirname "$0")" ; pwd -P ) +. ${COMMON_ENV_PATH}/common_functions.sh + +start_date; +check_user; +source_profile; +execute_spring_jar org.onap.aai.db.schema.ScriptDriver "/opt/app/aai-graphadmin/resources/logback.xml" "$@" +end_date; +exit 0 diff --git a/src/main/scripts/common_functions.sh b/src/main/scripts/common_functions.sh new file mode 100644 index 0000000..ed795fe --- /dev/null +++ b/src/main/scripts/common_functions.sh @@ -0,0 +1,65 @@ +#!/bin/ksh +# +# Common functions that can be used throughout multiple scripts +# In order to call these functions, this file needs to be sourced + +# Checks if the user that is currently running is aaiadmin +check_user(){ + + userid=$( id | cut -f2 -d"(" | cut -f1 -d")" ) + + if [ "${userid}" != "aaiadmin" ]; then + echo "You must be aaiadmin to run $0. The id used $userid." + exit 1 + fi +} + +# Sources the profile and sets the project home +source_profile(){ + . /etc/profile.d/aai.sh + PROJECT_HOME=/opt/app/aai-graphadmin +} + +# Runs the spring boot jar based on which main class +# to execute and which logback file to use for that class +execute_spring_jar(){ + + className=$1; + logbackFile=$2; + + shift 2; + + EXECUTABLE_JAR=$(ls ${PROJECT_HOME}/lib/*.jar); + + JAVA_OPTS="${JAVA_PRE_OPTS} -DAJSC_HOME=$PROJECT_HOME"; + JAVA_OPTS="$JAVA_OPTS -DBUNDLECONFIG_DIR=resources"; + JAVA_OPTS="$JAVA_OPTS -Daai.home=$PROJECT_HOME "; + JAVA_OPTS="$JAVA_OPTS -Dhttps.protocols=TLSv1.1,TLSv1.2"; + JAVA_OPTS="$JAVA_OPTS -Dloader.main=${className}"; + JAVA_OPTS="$JAVA_OPTS -Dloader.path=${PROJECT_HOME}/resources"; + JAVA_OPTS="$JAVA_OPTS -Dlogback.configurationFile=${logbackFile}"; + + export SOURCE_NAME=$(grep '^schema.source.name=' ${PROJECT_HOME}/resources/application.properties | cut -d"=" -f2-); + # Needed for the schema ingest library beans + eval $(grep '^schema\.' ${PROJECT_HOME}/resources/application.properties | \ + sed 's/^\(.*\)$/JAVA_OPTS="$JAVA_OPTS -D\1"/g' | \ + sed 's/${server.local.startpath}/${PROJECT_HOME}\/resources/g'| \ + sed 's/${schema.source.name}/'${SOURCE_NAME}'/g'\ + ) + + JAVA_OPTS="${JAVA_OPTS} ${JAVA_POST_OPTS}"; + + ${JAVA_HOME}/bin/java ${JVM_OPTS} ${JAVA_OPTS} -jar ${EXECUTABLE_JAR} "$@" +} + +# Prints the start date and the script that the user called +start_date(){ + echo + echo `date` " Starting $0" +} + +# Prints the end date and the script that the user called +end_date(){ + echo + echo `date` " Done $0" +} diff --git a/src/main/scripts/createDBSchema.sh b/src/main/scripts/createDBSchema.sh new file mode 100644 index 0000000..01fef07 --- /dev/null +++ b/src/main/scripts/createDBSchema.sh @@ -0,0 +1,44 @@ +#!/bin/ksh +# +# ============LICENSE_START======================================================= +# org.onap.aai +# ================================================================================ +# Copyright © 2017 AT&T Intellectual Property. All rights reserved. +# ================================================================================ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============LICENSE_END========================================================= +# +# ECOMP is a trademark and service mark of AT&T Intellectual Property. +# The script invokes GenTester java class to create the DB schema +# +# NOTE: you can pass an option GEN_DB_WITH_NO_SCHEMA if you want it to create an instance of +# the graph - but with no schema (this is useful when using the Hbase copyTable to +# copy our database to different environments). +# Ie. createDbSchema.sh GEN_DB_WITH_NO_SCHEMA +# +# +# +# + +COMMON_ENV_PATH=$( cd "$(dirname "$0")" ; pwd -P ) +. ${COMMON_ENV_PATH}/common_functions.sh +start_date; +check_user; +source_profile; +if [ -z "$1" ]; then + execute_spring_jar org.onap.aai.schema.GenTester ${PROJECT_HOME}/resources/logback.xml +else + execute_spring_jar org.onap.aai.schema.GenTester ${PROJECT_HOME}/resources/logback.xml "$1" +fi; +end_date; +exit 0
\ No newline at end of file diff --git a/src/main/scripts/dataGrooming.sh b/src/main/scripts/dataGrooming.sh new file mode 100644 index 0000000..a6b5f4f --- /dev/null +++ b/src/main/scripts/dataGrooming.sh @@ -0,0 +1,116 @@ +#!/bin/ksh +# +# The script invokes the dataGrooming java class to run some tests and generate a report and +# potentially do some auto-deleteing. +# +# Here are the allowed Parameters. Note - they are all optional and can be mixed and matched. +# +# -f oldFileName (see note below) +# -autoFix +# -sleepMinutes nn +# -edgesOnly +# -skipEdges +# -timeWindowMinutes nn +# -dontFixOrphans +# -maxFix +# -skipHostCheck +# -singleCommits +# -dupeCheckOff +# -dupeFixOn +# -ghost2CheckOff +# -ghost2FixOn +# +# +# +# +# NOTES: +# -f The name of a previous report can optionally be passed in with the "-f" option. +# Just the filename -- ie. "dataGrooming.sh -f dataGrooming.201504272106.out" +# The file will be assumed to be in the directory that it was created in. +# If a filename is passed, then the "deleteCandidate" vertex-id's and bad edges +# listed inside that report file will be deleted on this run if they are encountered as +# bad nodes/edges again. +# +# -autoFix If you don't use the "-f" option, you could choose to use "-autofix" which will +# automatically run the script twice: once to look for problems, then after +# sleeping for a few minutes, it will re-run with the inital-run's output as +# an input file. +# +# -maxFix When using autoFix, you might want to limit how many 'bad' records get fixed. +# This is a safeguard against accidently deleting too many records automatically. +# It has a default value set in AAIConstants: AAI_GROOMING_DEFAULT_MAX_FIX = 15; +# If there are more than maxFix candidates found -- then none will be deleted (ie. +# someone needs to look into it) +# +# -sleepMinutes When using autoFix, this defines how many minutes we sleep before the second run. +# It has a default value set in AAIConstants: AAI_GROOMING_DEFAULT_SLEEP_MINUTES = 7; +# The reason we sleep at all between runs is that our DB is "eventually consistant", so +# we want to give it time to resolve itself if possible. +# +# -edgesOnly Can be used any time you want to limit this tool so it only looks at edges. +# Note - as of 1710, we have not been seeing many purely bad edges, +# (ie. not associated with a phantom node) so this option is not used often. +# +# -skipEdgeChecks Use it to bypass checks for bad Edges (which are pretty rare). +# +# -timeWindowMinutes Use it to limit the nodes looked at to ones whose update-timestamp tells us that it was last updated less than this many minutes ago. Note this is usually used along with the skipEdgeChecks option. +# +# -dontFixOrphans Since there can sometimes be a lot of orphan nodes, and they don't +# harm processing as much as phantom-nodes or bad-edges, it is useful to be +# able to ignore them when fixing things. +# +# -skipHostCheck By default, the grooming tool will check to see that it is running +# on the host that is the first one in the list found in: +# aaiconfig.properties aai.primary.filetransfer.serverlist +# This is so that when run from the cron, it only runs on one machine. +# This option lets you turn that checking off. +# +# -singleCommits By default, the grooming tool will do all of its processing and then do +# a commit of all the changes at once. This option (maybe could have been named better) +# is letting the user override the default behavior and do a commit for each +# individual 'remove" one by one as they are encountered by the grooming logic. +# NOTE - this only applies when using either the "-f" or "-autoFix" options since +# those are the only two that make changes to the database. +# +# -dupeCheckOff By default, we will check all of our nodes for duplicates. This parameter lets +# us turn this check off if we don't want to do it for some reason. +# +# -dupeFixOn When we're fixing data, by default we will NOT fix duplicates This parameter lets us turn +# that fixing ON when we are comfortable that it can pick the correct duplicate to preserve. +# +# -ghost2CheckOff By default, we will check for the "new" kind of ghost that we saw on +# Production in early February 2016. This parameter lets us turn this check off if we +# don't want to do it for some reason. +# +# -ghost2FixOn When we're fixing data, by default we will NOT try to fix the "new" ghost nodes. +# This parameter lets us turn that fixing ON if we want to try to fix them. +# +# +COMMON_ENV_PATH=$( cd "$(dirname "$0")" ; pwd -P ) +. ${COMMON_ENV_PATH}/common_functions.sh + +# TODO: There is a better way where you can pass in the function +# and then let the common functions check if the function exist and invoke it +# So this all can be templated out +start_date; +check_user; + +processStat=$(ps -ef | grep '[D]ataGrooming'); +if [ "$processStat" != "" ] + then + echo "Found dataGrooming is already running: " $processStat + exit 1 +fi + +# Make sure that it's not already running +processStat=`ps -ef|grep aaiadmin|grep -E "org.onap.aai.dbgen.DataGrooming"|grep -v grep` +if [ "$processStat" != "" ] + then + echo "Found dataGrooming is already running: " $processStat + exit 1 +fi + +source_profile; +execute_spring_jar org.onap.aai.datagrooming.DataGrooming $PROJECT_HOME/resources/logback.xml "$@" +end_date; +exit 0 diff --git a/src/main/scripts/dataRestoreFromSnapshot.sh b/src/main/scripts/dataRestoreFromSnapshot.sh new file mode 100644 index 0000000..405a667 --- /dev/null +++ b/src/main/scripts/dataRestoreFromSnapshot.sh @@ -0,0 +1,50 @@ +#!/bin/ksh +# +# This script uses the dataSnapshot and SchemaGenerator (via GenTester) java classes to restore +# data to a database by doing three things: +# 1) clear out whatever data and schema are currently in the db +# 2) rebuild the schema (using the SchemaGenerator) +# 3) reload data from the passed-in datafile (which must found in the dataSnapShots directory and +# contain an xml view of the db data). +# + +COMMON_ENV_PATH=$( cd "$(dirname "$0")" ; pwd -P ) +. ${COMMON_ENV_PATH}/common_functions.sh + +start_date; +check_user; + +if [ "$#" -lt 1 ]; then + echo "Illegal number of parameters" + echo "usage: $0 previous_snapshot_filename" + exit 1 +fi + +source_profile; +export PRE_JAVA_OPTS=${PRE_JAVA_OPTS:--Xms6g -Xmx8g}; + +#### Step 1) clear out the database +execute_spring_jar org.onap.aai.datasnapshot.DataSnapshot ${PROJECT_HOME}/resources/logback.xml "CLEAR_ENTIRE_DATABASE" "$1" "$2" +if [ "$?" -ne "0" ]; then + echo "Problem clearing out database." + exit 1 +fi + +#### Step 2) rebuild the db-schema +execute_spring_jar org.onap.aai.schema.GenTester ${PROJECT_HOME}/resources/logback.xml "GEN_DB_WITH_NO_DEFAULT_CR" +if [ "$?" -ne "0" ]; then + echo "Problem rebuilding the schema (SchemaGenerator)." + exit 1 +fi + +#### Step 3) reload the data from a snapshot file + +execute_spring_jar org.onap.aai.datasnapshot.DataSnapshot ${PROJECT_HOME}/resources/logback.xml "RELOAD_DATA" "$1" +if [ "$?" -ne "0" ]; then + echo "Problem reloading data into the database." + end_date; + exit 1 +fi + +end_date; +exit 0 diff --git a/src/main/scripts/dataSnapshot.sh b/src/main/scripts/dataSnapshot.sh new file mode 100644 index 0000000..f380e85 --- /dev/null +++ b/src/main/scripts/dataSnapshot.sh @@ -0,0 +1,28 @@ +#!/bin/ksh +# +# This script invokes the dataSnapshot java class passing an option to tell it to take +# a snapshot of the database and store it as a single-line XML file. +# +# +# +# +# +COMMON_ENV_PATH=$( cd "$(dirname "$0")" ; pwd -P ) +. ${COMMON_ENV_PATH}/common_functions.sh + +processStat=$(ps -ef | grep '[D]ataSnapshot'); +if [ "$processStat" != "" ] + then + echo "Found dataSnapshot is already running: " $processStat + exit 1 +fi + +# TODO: There is a better way where you can pass in the function +# and then let the common functions check if the function exist and invoke it +# So this all can be templated out +start_date; +check_user; +source_profile; +execute_spring_jar org.onap.aai.datasnapshot.DataSnapshot $PROJECT_HOME/resources/logback.xml "$@" +end_date; +exit 0 diff --git a/src/main/scripts/dupeTool.sh b/src/main/scripts/dupeTool.sh new file mode 100644 index 0000000..350b0bd --- /dev/null +++ b/src/main/scripts/dupeTool.sh @@ -0,0 +1,73 @@ +#!/bin/ksh + +### +# ============LICENSE_START======================================================= +# org.onap.aai +# ================================================================================ +# Copyright (C) 2017 AT&T Intellectual Property. All rights reserved. +# ================================================================================ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============LICENSE_END========================================================= +### +# +# +# dupeTool.sh -- This tool is used to look at or fix duplicate nodes for one nodeType +# at a time and can be used to limit what it's looking at to just nodes created +# within a recent time window. +# It is made to deal with situations (like we have in 1610/1702) where one type +# of node keeps needing to have duplicates cleaned up (tenant nodes). +# It is needed because DataGrooming cannot be run often and cannot be focused just +# on duplicates or just on one nodeType. +# +# Parameters: +# +# -userId (required) must be followed by a userid +# -nodeType (required) must be followed by a valid nodeType +# -timeWindowMinutes (optional) by default we would look at all nodes of the +# given nodeType, but if a window is given, then we will only look at +# nodes created that many (or fewer) minutes ago. +# -autoFix (optional) use this if you want duplicates fixed automatically (if we +# can figure out which to delete) +# -maxFix (optional) like with dataGrooming lets you override the default maximum +# number of dupes that can be processed at one time +# -skipHostCheck (optional) By default, the dupe tool will check to see that it is running +# on the host that is the first one in the list found in: +# aaiconfig.properties aai.primary.filetransfer.serverlist +# This is so that when run from the cron, it only runs on one machine. +# This option lets you turn that checking off. +# -sleepMinutes (optional) like with DataGrooming, you can override the +# sleep time done when doing autoFix between first and second checks of the data. +# -params4Collect (optional) followed by a string to tell what properties/values to use +# to limit the nodes being looked at. Must be in the format +# of “propertName|propValue” use commas to separate if there +# are more than one name/value being passed. +# -specialTenantRule (optional) turns on logic which will use extra logic to figure +# out which tenant node can be deleted in a common scenario. +# +# +# For example (there are many valid ways to use it): +# +# dupeTool.sh -userId am8383 -nodeType tenant -timeWindowMinutes 60 -autoFix +# or +# dupeTool.sh -userId am8383 -nodeType tenant -specialTenantRule -autoFix -maxFix 100 +# + +COMMON_ENV_PATH=$( cd "$(dirname "$0")" ; pwd -P ) +. ${COMMON_ENV_PATH}/common_functions.sh + +start_date; +check_user; +source_profile; +execute_spring_jar org.onap.aai.dbgen.DupeTool ${PROJECT_HOME}/resources/dupeTool-logback.xml "$@" +end_date; +exit 0
\ No newline at end of file diff --git a/src/main/scripts/dynamicPayloadArchive.sh b/src/main/scripts/dynamicPayloadArchive.sh new file mode 100644 index 0000000..87cce13 --- /dev/null +++ b/src/main/scripts/dynamicPayloadArchive.sh @@ -0,0 +1,75 @@ +#!/bin/ksh +# +### +# ============LICENSE_START======================================================= +# org.onap.aai +# ================================================================================ +# Copyright (C) 2017 AT&T Intellectual Property. All rights reserved. +# ================================================================================ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============LICENSE_END========================================================= +### + +# +# The script is called to tar and gzip the files under /opt/app/aai-graphadmin/data/scriptdata/addmanualdata/tenant_isolation/payload +# which contains the payload files created by the dynamicPayloadGenerator.sh tool. +# /opt/app/aai-graphadmin/data/scriptdata/addmanualdata/tenant_isolation is mounted to the docker container +# + +COMMON_ENV_PATH=$( cd "$(dirname "$0")" ; pwd -P ) +. ${COMMON_ENV_PATH}/common_functions.sh + +. /etc/profile.d/aai.sh +PROJECT_HOME=/opt/app/aai-graphadmin + +PROGNAME=$(basename $0) + +TS=$(date "+%Y_%m_%d_%H_%M_%S") + +CHECK_USER="aaiadmin" +userid=$( id | cut -f2 -d"(" | cut -f1 -d")" ) +if [ "${userid}" != $CHECK_USER ]; then + echo "You must be $CHECK_USER to run $0. The id used $userid." + exit 1 +fi +PAYLOAD_DIRECTORY=${PROJECT_HOME}/resources/etc/scriptdata/addmanualdata/tenant_isolation/payload +ARCHIVE_DIRECTORY=${PROJECT_HOME}/resources/etc/scriptdata/addmanualdata/tenant_isolation/archive +if [ ! -d ${PAYLOAD_DIRECTORY} ] +then + echo " ${PAYLOAD_DIRECTORY} doesn't exist" + exit 1 +fi +if [ ! -d ${ARCHIVE_DIRECTORY} ] +then + mkdir -p ${ARCHIVE_DIRECTORY} + chown aaiadmin:aaiadmin ${ARCHIVE_DIRECTORY} + chmod u+w ${ARCHIVE_DIRECTORY} +fi +cd ${PAYLOAD_DIRECTORY} +tar c * -f ${ARCHIVE_DIRECTORY}/dynamicPayloadArchive_${TS}.tar --exclude=payload +if [ $? -ne 0 ] +then + echo " Unable to tar ${PAYLOAD_DIRECTORY}" + exit 1 +fi + +cd ${ARCHIVE_DIRECTORY} +gzip ${ARCHIVE_DIRECTORY}/dynamicPayloadArchive_${TS}.tar + +if [ $? -ne 0 ] +then + echo " Unable to gzip ${ARCHIVE_DIRECTORY}/dynamicPayloadArchive_${TS}.tar" + exit 1 +fi +echo "Completed successfully: ${ARCHIVE_DIRECTORY}/dynamicPayloadArchive_${TS}.tar" +exit 0 diff --git a/src/main/scripts/dynamicPayloadGenerator.sh b/src/main/scripts/dynamicPayloadGenerator.sh new file mode 100644 index 0000000..3d30790 --- /dev/null +++ b/src/main/scripts/dynamicPayloadGenerator.sh @@ -0,0 +1,155 @@ +#!/bin/ksh +# +### +# ============LICENSE_START======================================================= +# org.onap.aai +# ================================================================================ +# Copyright (C) 2017 AT&T Intellectual Property. All rights reserved. +# ================================================================================ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============LICENSE_END========================================================= +### + +# +# dynamicPayloadGenerator.sh -- This tool is used to dynamically load payloads from snapshots +# It is used to load a snapshot into memory and generate payloads for any input nodes +# +# +# Parameters: +# +# -d (required) name of the fully qualified Datasnapshot file that you need to load +# -s (optional) true or false to enable or disable schema, By default it is true for production, +# you can change to false if the snapshot has duplicates +# -c (optional) config file to use for loading snapshot into memory. +# -o (required) output file to store the data files +# -f (optional) PAYLOAD or DMAAP-MR +# -n (optional) input file for the script +# +# +# For example (there are many valid ways to use it): +# +# dynamicPayloadGenerator.sh -d '/opt/app/snapshots/snaphot.graphSON' -o '/opt/app/aai-graphadmin/resources/etc/scriptdata/addmanualdata/payload_dir/' +# +# or +# dynamicPayloadGenerator.sh -d '/opt/app/snapshots/snaphot.graphSON' -s false -c '/opt/app/aai-graphadmin/resources/etc/appprops/dynamic.properties' +# -o '/opt/app/aai-graphadmin/resources/etc/scriptdata/addmanualdata/payload_dir/' -f PAYLOAD -n '/opt/app/aai-graphadmin/resources/etc/scriptdata/nodes.json' +# + + +echo +echo `date` " Starting $0" + +display_usage() { + cat <<EOF + Usage: $0 [options] + + 1. Usage: dynamicPayloadGenerator -d <graphsonPath> -o <output-path> + 2. This script has 2 arguments that are required. + a. -d (required) Name of the fully qualified Datasnapshot file that you need to load + b. -o (required) output file to store the data files + 3. Optional Parameters: + a. -s (optional) true or false to enable or disable schema, By default it is true for production, + b. -c (optional) config file to use for loading snapshot into memory. By default it is set to /opt/app/aai-graphadmin/resources/etc/appprops/dynamic.properties + c. -f (optional) PAYLOAD or DMAAP-MR + d. -n (optional) input file specifying the nodes and relationships to export. Default: /opt/app/aai-graphadmin/scriptdata/tenant_isolation/nodes.json + e. -m (optional) true or false to read multiple snapshots or not, by default is false + f. -i (optional) the file containing the input filters based on node property and regex/value. By default, it is: /opt/app/aai-graphadmin/scriptdata/tenant_isolation/inputFilters.json + 4. For example (there are many valid ways to use it): + dynamicPayloadGenerator.sh -d '/opt/app/snapshots/snaphot.graphSON' -o '/opt/app/aai-graphadmin/resources/etc/scriptdata/addmanualdata/tenant_isolation/' + + dynamicPayloadGenerator.sh -d '/opt/app/snapshots/snaphot.graphSON' -s false -c '/opt/app/aai-graphadmin/resources/etc/appprops/dynamic.properties' + -o '/opt/app/aai-graphadmin/resources/etc/scriptdata/addmanualdata/tenant_isolation/' -f PAYLOAD -n '/opt/app/aai-graphadmin/resources/etc/scriptdata/tenant_isolation/nodes.json' + -m false -i '/opt/app/aai-graphadmin/resources/etc/scriptdata/tenant_isolation/inputFilters.json' + +EOF +} +if [ $# -eq 0 ]; then + display_usage + exit 1 +fi + +COMMON_ENV_PATH=$( cd "$(dirname "$0")" ; pwd -P ) +. ${COMMON_ENV_PATH}/common_functions.sh + +start_date; +check_user; +source_profile; +export JVM_OPTS="-Xmx9000m -Xms9000m" + +while getopts ":f:s:d:n:c:i:m:o:p:" opt; do + case ${opt} in + f ) + PAYLOAD=$OPTARG + echo ${opt} + ;; + s ) + VALIDATE_SCHEMA=$OPTARG + echo ${opt} + ;; + d ) + INPUT_DATASNAPSHOT_FILE=$OPTARG + echo ${opt} + ;; + n ) + NODE_CONFIG_FILE=$OPTARG + echo ${opt} + ;; + c ) + DYNAMIC_CONFIG_FILE=$OPTARG + echo ${opt} + ;; + i ) + INPUT_FILTER_FILE=$OPTARG + echo ${opt} + ;; + m ) + MULTIPLE_SNAPSHOTS=$OPTARG + echo ${opt} + ;; + p ) + PARTIAL=$OPTARG + echo ${opt} + ;; + o ) + OUTPUT_DIR=$OPTARG + echo ${opt} + ;; + \? ) + echo "Invalid Option: -$OPTARG" 1>&2 + ;; + : ) + echo "Invalid Option: -$OPTARG requires an argument" 1>&2 + ;; + esac + done + shift $((OPTIND -1)) + +echo 'Done' + +set -A nodes pserver cloud-region availability-zone tenant zone complex + +#Create empty partial file + > $INPUT_DATASNAPSHOT_FILE".partial" + +for nodeType in ${nodes[@]} + do + grep "aai-node-type.*\"value\":\"$nodeType\"" $INPUT_DATASNAPSHOT_FILE >>$INPUT_DATASNAPSHOT_FILE'.partial' + done + + +execute_spring_jar org.onap.aai.dbgen.DynamicPayloadGenerator ${PROJECT_HOME}/resources/dynamicPayloadGenerator-logback.xml -s ${VALIDATE_SCHEMA} \ + -f ${PAYLOAD} -o ${OUTPUT_DIR} -c ${DYNAMIC_CONFIG_FILE} -i ${INPUT_FILTER_FILE} -m ${MULTIPLE_SNAPSHOTS} \ + -d ${INPUT_DATASNAPSHOT_FILE} -n ${NODE_CONFIG_FILE} ; + +end_date; +exit 0 diff --git a/src/main/scripts/dynamicPayloadPartial.sh b/src/main/scripts/dynamicPayloadPartial.sh new file mode 100644 index 0000000..8021aa6 --- /dev/null +++ b/src/main/scripts/dynamicPayloadPartial.sh @@ -0,0 +1,13 @@ +#!/bin/ksh + +#Create empty partial snapshot file +INPUT_DATASNAPSHOT_FILE=$1 + +set -A nodes pserver cloud-region availability-zone tenant zone complex + > $INPUT_DATASNAPSHOT_FILE".partial" + +for nodeType in ${nodes[@]} + do + grep "aai-node-type.*\"value\":\"$nodeType\"" $INPUT_DATASNAPSHOT_FILE >>$INPUT_DATASNAPSHOT_FILE'.partial' + done +exit 0
\ No newline at end of file diff --git a/src/main/scripts/forceDeleteTool.sh b/src/main/scripts/forceDeleteTool.sh new file mode 100644 index 0000000..2d42fda --- /dev/null +++ b/src/main/scripts/forceDeleteTool.sh @@ -0,0 +1,84 @@ +#!/bin/ksh +# +# ============LICENSE_START======================================================= +# org.onap.aai +# ================================================================================ +# Copyright © 2017 AT&T Intellectual Property. All rights reserved. +# ================================================================================ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============LICENSE_END========================================================= +# +# ECOMP is a trademark and service mark of AT&T Intellectual Property. +# +# +# +# forceDeleteTool.sh -- This tool is used to delete nodes that cannot be deleted using +# the normal REST API because of internal DB problems. For example, Phantom nodes +# and duplicate nodes cause errors to happen in "normal" REST API codes and must +# be deleted using this tool. +# Since it is not using the "normal" REST logic, it is also not invoking the "normal" +# edge rules that we use to cascade deletes to "child" nodes. So - this tool can be dangerous. +# Ie. if you accidently delete a parent node (like a cloud-region) that has many dependent +# child nodes, there will be no way to get to any of those child-nodes after the cloud-region +# has been deleted. +# There are several environment variables defined in aaiconfig.properties to help minimize errors like that. +# aai.forceDel.protected.nt.list=cloud-region +# aai.forceDel.protected.edge.count=10 +# aai.forceDel.protected.descendant.count=10 +# +# Parameters: +# +# -action (required) valid values: COLLECT_DATA or DELETE_NODE or DELETE_EDGE +# -userId (required) must be followed by a userid +# -params4Collect (followed by a string to tell what properties/values to use +# as part of a COLLECT_DATA request. Must be in the format +# of ?propertName|propValue? use commas to separate if there +# are more than one name/value being passed. +# -vertexId - required for a DELETE_NODE request +# -edgeId - required for a DELETE_EDGE request +# -overRideProtection --- WARNING ? This over-rides the protections we introduced! +# It will let you override a protected vertex or vertex that has more +# than the allowed number of edges or descendants. +# -DISPLAY_ALL_VIDS (optional) - in the rare case when you want to see the +# vertex-ids (vids) of all the CONNECTED vertices, you can use this. By +# default, we do not show them. +# +# +# For example: +# +# forceDeleteTool.sh -action COLLECT_DATA -userId am8383 -params4Collect "tenant-id|junk tenant01 ID 0224" +# +# forceDeleteTool.sh -action COLLECT_DATA -userId am8383 -params4Collect "cloud-owner|junkTesterCloudOwner 0224,cloud-region-id|junkTesterCloud REgion ID 0224" +# +# forceDeleteTool.sh -action DELETE_NODE -userId am8383 -vertexId 1234567 +# +# forceDeleteTool.sh -action DELETE_EDGE -userId am8383 -edgeId 9876543 +# +# + +COMMON_ENV_PATH=$( cd "$(dirname "$0")" ; pwd -P ) +. ${COMMON_ENV_PATH}/common_functions.sh + +start_date; + +echo " NOTE - if you are deleting data, please run the dataSnapshot.sh script first or " +echo " at least make a note the details of the node that you are deleting. " + +check_user; +source_profile; + +execute_spring_jar org.onap.aai.dbgen.ForceDeleteTool ${PROJECT_HOME}/resources/forceDelete-logback.xml "$@" + +end_date; + +exit 0 diff --git a/src/main/scripts/migration_verification.sh b/src/main/scripts/migration_verification.sh new file mode 100644 index 0000000..1e1b228 --- /dev/null +++ b/src/main/scripts/migration_verification.sh @@ -0,0 +1,61 @@ +#!/bin/ksh + +### +# ============LICENSE_START======================================================= +# org.onap.aai +# ================================================================================ +# Copyright (C) 2018 AT&T Intellectual Property. All rights reserved. +# ================================================================================ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============LICENSE_END========================================================= +### + +# +# migration_verification.sh -- This tool is used to provide a summary of migration logs +# This searches for pre-defined strings "Migration Error" and "Migration Summary Count" in log files and outputs those lines. +# + +display_usage() { + cat << EOF + Usage: $0 [options] + + 1. Usage: migration_verification.sh <last_modified> <logs_path> + 2. The <logs_path> should be a directory containing all of the logs. If empty, default path is /opt/app/aai-graphadmin/logs/migration. + 3. The <last_modified> parameter should be an integer for up to how many minutes ago a log file should be parsed. + 4. Example: migration_verification.sh 60 /opt/app/aai-graphadmin/logs/migration +EOF +} + +if [ $# -eq 0 ]; then + display_usage + exit 1 +fi + +LOGS_DIRECTORY=${2:-/opt/app/aai-graphadmin/logs/migration/} +MTIME=$1 + +echo +echo 'Running migration summary:' +print "Logs directory: $LOGS_DIRECTORY" +print "Searching log files modified within last $MTIME minutes: \n" +echo + +for i in $(find -L $LOGS_DIRECTORY -mtime -$MTIME -name '*.log' ); +do + echo "Checking Log File: $i" + grep "Migration Error:" $i + grep "Migration Summary Count:" $i + echo +done + +echo 'Done' diff --git a/src/main/scripts/run_Migrations.sh b/src/main/scripts/run_Migrations.sh new file mode 100644 index 0000000..2b0f5c5 --- /dev/null +++ b/src/main/scripts/run_Migrations.sh @@ -0,0 +1,49 @@ +#!/bin/sh + +### +# ============LICENSE_START======================================================= +# org.onap.aai +# ================================================================================ +# Copyright (C) 2017-2018 AT&T Intellectual Property. All rights reserved. +# ================================================================================ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============LICENSE_END========================================================= +### + +COMMON_ENV_PATH=$( cd "$(dirname "$0")" ; pwd -P ) +. ${COMMON_ENV_PATH}/common_functions.sh + +# TODO: There is a better way where you can pass in the function +# and then let the common functions check if the function exist and invoke it +# So this all can be templated out +start_date; +check_user; +source_profile; + +ARGS="-c ${PROJECT_HOME}/resources/etc/appprops/janusgraph-realtime.properties"; + +if [ -f "$PROJECT_HOME/resources/application.properties" ]; then + # Get the application properties file and look for all lines + # starting with either jms dmaap or niws + # Turn them into system properties and export JAVA_PRE_OPTS so + # execute spring jar will get those values + # This is only needed since dmaap is used by run_migrations + JAVA_PRE_OPTS="-Xms8g -Xmx8g"; + JMS_PROPS=$(egrep '^jms.bind.address' $PROJECT_HOME/resources/application.properties | cut -d"=" -f2- | sed 's/^\(.*\)$/-Dactivemq.tcp.url=\1/g' | tr '\n' ' '); + JAVA_PRE_OPTS="${JAVA_PRE_OPTS} ${JMS_PROPS}"; + export JAVA_PRE_OPTS; +fi; + +execute_spring_jar org.onap.aai.migration.MigrationController ${PROJECT_HOME}/resources/migration-logback.xml ${ARGS} "$@" +end_date; +exit 0
\ No newline at end of file diff --git a/src/main/scripts/run_SendDeleteMigrationNotification.sh b/src/main/scripts/run_SendDeleteMigrationNotification.sh new file mode 100644 index 0000000..ebd8677 --- /dev/null +++ b/src/main/scripts/run_SendDeleteMigrationNotification.sh @@ -0,0 +1,65 @@ +#!/bin/ksh + +### +# ============LICENSE_START======================================================= +# org.onap.aai +# ================================================================================ +# Copyright (C) 2017 AT&T Intellectual Property. All rights reserved. +# ================================================================================ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============LICENSE_END========================================================= +### + +COMMON_ENV_PATH=$( cd "$(dirname "$0")" ; pwd -P ) +. ${COMMON_ENV_PATH}/common_functions.sh + + +start_date; +check_user; +source_profile; + +INPUT_PATH=$1 + +if [ ! -d "$INPUT_PATH" ]; then + echo "Input directory $INPUT_PATH does not exist!!"; + exit +fi + +if [ $(ls ${INPUT_PATH}/* 2> /dev/null | wc -l) -eq 0 ]; then + echo "Input directory $INPUT_PATH does not contain any migration files!!"; + exit +fi + +INPUT_DIR_FOR_JAVA=${INPUT_PATH}/deleteevents +mkdir -p "$INPUT_DIR_FOR_JAVA" +INPUT_FILE_FOR_JAVA=${INPUT_DIR_FOR_JAVA}/dmaap_delete_files.txt +#sort --numeric-sort -k 1 -t '_' $(find ${INPUT_PATH}/DELETE-* -maxdepth 0 -type f) | awk -F '_' '{ print $2"_"$3; }' > $INPUT_FILE_FOR_JAVA +find ${INPUT_PATH} -type f -name 'DELETE-*' -exec cat {} + > $INPUT_FILE_FOR_JAVA + +shift + +ARGS="-c ${PROJECT_HOME}/resources/etc/appprops/janusgraph-realtime.properties --inputFile $INPUT_FILE_FOR_JAVA" + +if [ -f "$PROJECT_HOME/resources/application.properties" ]; then + # Get the application properties file and look for all lines + # starting with either jms dmaap or niws + # Turn them into system properties and export JAVA_PRE_OPTS so + # execute spring jar will get those values + # This is only needed since dmaap is used by run_migrations + JAVA_PRE_OPTS=$(egrep '^jms.bind.address' $PROJECT_HOME/resources/application.properties | cut -d"=" -f2- | sed 's/^\(.*\)$/-Dactivemq.tcp.url=\1/g' | tr '\n' ' '); + export JAVA_PRE_OPTS; +fi; + +execute_spring_jar org.onap.aai.util.SendDeleteMigrationNotificationsMain ${PROJECT_HOME}/resources/migration-logback.xml ${ARGS} "$@" +end_date; +exit 0
\ No newline at end of file diff --git a/src/main/scripts/run_SendMigrationNotification.sh b/src/main/scripts/run_SendMigrationNotification.sh new file mode 100644 index 0000000..4bcc0d9 --- /dev/null +++ b/src/main/scripts/run_SendMigrationNotification.sh @@ -0,0 +1,64 @@ +#!/bin/ksh + +### +# ============LICENSE_START======================================================= +# org.onap.aai +# ================================================================================ +# Copyright (C) 2017 AT&T Intellectual Property. All rights reserved. +# ================================================================================ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============LICENSE_END========================================================= +### + +COMMON_ENV_PATH=$( cd "$(dirname "$0")" ; pwd -P ) +. ${COMMON_ENV_PATH}/common_functions.sh + + +start_date; +check_user; +source_profile; + +INPUT_PATH=$1 + +if [ ! -d "$INPUT_PATH" ]; then + echo "Input directory $INPUT_PATH does not exist!!"; + exit +fi + +if [ $(ls ${INPUT_PATH}/* 2> /dev/null | wc -l) -eq 0 ]; then + echo "Input directory $INPUT_PATH does not contain any migration files!!"; + exit +fi + +INPUT_DIR_FOR_JAVA=${INPUT_PATH}/combined +mkdir -p "$INPUT_DIR_FOR_JAVA" +INPUT_FILE_FOR_JAVA=${INPUT_DIR_FOR_JAVA}/sorted_dmaap_files.txt +sort --numeric-sort -k 1 -t '_' $(find ${INPUT_PATH}/* -maxdepth 0 -type f) | awk -F '_' '{ print $2"_"$3; }' > $INPUT_FILE_FOR_JAVA + +shift + +ARGS="-c ${PROJECT_HOME}/resources/etc/appprops/janusgraph-realtime.properties --inputFile $INPUT_FILE_FOR_JAVA" + +if [ -f "$PROJECT_HOME/resources/application.properties" ]; then + # Get the application properties file and look for all lines + # starting with either jms dmaap or niws + # Turn them into system properties and export JAVA_PRE_OPTS so + # execute spring jar will get those values + # This is only needed since dmaap is used by run_migrations + JAVA_PRE_OPTS=$(egrep '^jms.bind.address' $PROJECT_HOME/resources/application.properties | cut -d"=" -f2- | sed 's/^\(.*\)$/-Dactivemq.tcp.url=\1/g' | tr '\n' ' '); + export JAVA_PRE_OPTS; +fi; + +execute_spring_jar org.onap.aai.util.SendMigrationNotificationsMain ${PROJECT_HOME}/resources/migration-logback.xml ${ARGS} "$@" +end_date; +exit 0
\ No newline at end of file diff --git a/src/main/scripts/schemaMod.sh b/src/main/scripts/schemaMod.sh new file mode 100644 index 0000000..d1fb009 --- /dev/null +++ b/src/main/scripts/schemaMod.sh @@ -0,0 +1,50 @@ +#!/bin/ksh +# +# This script is used to correct mistakes made in the database schema. +# It currently just allows you to change either the dataType and/or indexType on properties used by nodes. +# +# NOTE - Titan is not elegant in 0.5.3 about making changes to the schema. Bad properties never +# actually leave the database, they just get renamed and stop getting used. So it is +# really worthwhile to get indexes and dataTypes correct the first time around. +# Note also - This script just makes changes to the schema that is currently live. +# If you were to create a new schema in a brandy-new environment, it would look like +# whatever ex5.json (as of June 2015) told it to look like. So, part of making a +# change to the db schema should Always first be to make the change in ex5.json so that +# future environments will have the change. This script is just to change existing +# instances of the schema since schemaGenerator (as of June 2015) does not update things - it +# just does the initial creation. +# +# Boy, this is getting to be a big comment section... +# +# To use this script, you need to pass four parameters: +# propertyName -- the name of the property that you need to change either the index or dataType on +# targetDataType -- whether it's changing or not, you need to give it: String, Integer, Boolean or Long +# targetIndexInfo -- whether it's changing or not, you need to give it: index, noIndex or uniqueIndex +# preserveDataFlag -- true or false. The only reason I can think of why you'd ever want to +# set this to false would be maybe if you were changing to an incompatible dataType so didn't +# want it to try to use the old data (and fail). But 99% of the time this will just be 'true'. +# +# Ie. schemaMod flavor-id String index true +# + +COMMON_ENV_PATH=$( cd "$(dirname "$0")" ; pwd -P ) +. ${COMMON_ENV_PATH}/common_functions.sh +start_date; +check_user; + +if [ "$#" -ne 4 ]; then + echo "Illegal number of parameters" + echo "usage: $0 propertyName targetDataType targetIndexInfo preserveDataFlag" + exit 1 +fi + +source_profile; +execute_spring_jar org.onap.aai.dbgen.schemamod.SchemaMod ${PROJECT_HOME}/resources/schemaMod-logback.xml "$1" "$2" "$3" "$4" +if [ "$?" -ne "0" ]; then + echo "Problem executing schemaMod " + end_date; + exit 1 +fi + +end_date; +exit 0 diff --git a/src/main/scripts/uniquePropertyCheck.sh b/src/main/scripts/uniquePropertyCheck.sh new file mode 100644 index 0000000..c3c92bf --- /dev/null +++ b/src/main/scripts/uniquePropertyCheck.sh @@ -0,0 +1,24 @@ +#!/bin/ksh +# +# The script invokes UniqueProperty java class to see if the passed property is unique in the db and if +# not, to display where duplicate values are found. +# +# For example: uniquePropertyCheck.sh subscriber-name +# + +COMMON_ENV_PATH=$( cd "$(dirname "$0")" ; pwd -P ) +. ${COMMON_ENV_PATH}/common_functions.sh +start_date; +check_user; +source_profile; + +#execute_spring_jar org.onap.aai.util.UniquePropertyCheck ${PROJECT_HOME}/resources/uniquePropertyCheck-logback.xml "$@" +execute_spring_jar org.onap.aai.util.UniquePropertyCheck ${PROJECT_HOME}/resources/uniquePropertyCheck-logback.xml "$@" +ret_code=$? +if [ $ret_code != 0 ]; then + end_date; + exit $ret_code +fi + +end_date; +exit 0
\ No newline at end of file diff --git a/src/main/scripts/updatePem.sh b/src/main/scripts/updatePem.sh new file mode 100644 index 0000000..e43a2eb --- /dev/null +++ b/src/main/scripts/updatePem.sh @@ -0,0 +1,38 @@ +#!/bin/ksh + +### +# ============LICENSE_START======================================================= +# org.onap.aai +# ================================================================================ +# Copyright (C) 2017 AT&T Intellectual Property. All rights reserved. +# ================================================================================ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============LICENSE_END========================================================= +### +# +COMMON_ENV_PATH=$( cd "$(dirname "$0")" ; pwd -P ) +. ${COMMON_ENV_PATH}/common_functions.sh + +start_date; +check_user; +source_profile; + +CERTPATH=$PROJECT_HOME/resources/etc/auth/ +KEYNAME=aaiClientPrivateKey.pem +CERTNAME=aaiClientPublicCert.pem + +pw=$(execute_spring_jar org.onap.aai.util.AAIConfigCommandLinePropGetter "" "aai.keystore.passwd" 2> /dev/null | tail -1) +openssl pkcs12 -in ${CERTPATH}/aai-client-cert.p12 -out $CERTPATH$CERTNAME -clcerts -nokeys -passin pass:$pw +openssl pkcs12 -in ${CERTPATH}/aai-client-cert.p12 -out $CERTPATH$KEYNAME -nocerts -nodes -passin pass:$pw +end_date; +exit 0
\ No newline at end of file |