diff options
Diffstat (limited to 'src/main/scripts')
-rw-r--r-- | src/main/scripts/add_vertex_label.sh | 15 | ||||
-rw-r--r-- | src/main/scripts/common_functions.sh | 4 | ||||
-rw-r--r-- | src/main/scripts/dynamicPayloadGenerator.sh | 63 | ||||
-rw-r--r-- | src/main/scripts/historyCreateDBSchema.sh | 37 | ||||
-rw-r--r-- | src/main/scripts/historyDbInitialLoad.sh | 54 | ||||
-rw-r--r-- | src/main/scripts/historySchemaMod.sh | 44 | ||||
-rw-r--r-- | src/main/scripts/historyTruncateDb.sh | 53 | ||||
-rw-r--r-- | src/main/scripts/resend-dmaap-events.sh | 362 | ||||
-rw-r--r-- | src/main/scripts/schemaMod.sh | 29 | ||||
-rw-r--r-- | src/main/scripts/updatePropertyTool.sh | 58 |
10 files changed, 683 insertions, 36 deletions
diff --git a/src/main/scripts/add_vertex_label.sh b/src/main/scripts/add_vertex_label.sh new file mode 100644 index 0000000..f026bd0 --- /dev/null +++ b/src/main/scripts/add_vertex_label.sh @@ -0,0 +1,15 @@ +#!/bin/bash + +filename=$1; + +if [ -z "${filename}" ]; then + echo "Please provide a graphson file"; + exit 1; +fi; + +if [ ! -f "${filename}" ]; then + echo "Unable to find the graphson file ${filename}"; + exit 1; +fi; + +sed 's/"label":"vertex"\(.*\)"aai-node-type":\[{"id":"\([^"]*\)","value":"\([^"]*\)"/"label":"\3"\1"aai-node-type":[{"id":"\2","value":"\3"/g' ${filename} > "with_label_${filename}";
\ No newline at end of file diff --git a/src/main/scripts/common_functions.sh b/src/main/scripts/common_functions.sh index 55fb516..f279334 100644 --- a/src/main/scripts/common_functions.sh +++ b/src/main/scripts/common_functions.sh @@ -41,7 +41,7 @@ execute_spring_jar(){ export SOURCE_NAME=$(grep '^schema.source.name=' ${PROJECT_HOME}/resources/application.properties | cut -d"=" -f2-); # Needed for the schema ingest library beans - eval $(grep '^schema\.' ${PROJECT_HOME}/resources/application.properties | \ + eval $(egrep '^(schema|server|history)\.' ${PROJECT_HOME}/resources/application.properties | \ sed 's/^\(.*\)$/JAVA_OPTS="$JAVA_OPTS -D\1"/g' | \ sed 's/${server.local.startpath}/${PROJECT_HOME}\/resources/g'| \ sed 's/${schema.source.name}/'${SOURCE_NAME}'/g'\ @@ -49,7 +49,7 @@ execute_spring_jar(){ JAVA_OPTS="${JAVA_OPTS} ${JAVA_POST_OPTS}"; - ${JAVA_HOME}/bin/java ${JVM_OPTS} ${JAVA_OPTS} -jar ${EXECUTABLE_JAR} "$@" || { + "${JAVA_HOME}/bin/java" ${JVM_OPTS} ${JAVA_OPTS} -jar ${EXECUTABLE_JAR} "$@" || { echo "Failed to run the tool $0 successfully"; exit 1; } diff --git a/src/main/scripts/dynamicPayloadGenerator.sh b/src/main/scripts/dynamicPayloadGenerator.sh index 2140354..323b161 100644 --- a/src/main/scripts/dynamicPayloadGenerator.sh +++ b/src/main/scripts/dynamicPayloadGenerator.sh @@ -31,7 +31,7 @@ # -s (optional) true or false to enable or disable schema, By default it is true for production, # you can change to false if the snapshot has duplicates # -c (optional) config file to use for loading snapshot into memory. -# -o (required) output file to store the data files +# -o (optional) output file to store the data files # -f (optional) PAYLOAD or DMAAP-MR # -n (optional) input file for the script # @@ -54,22 +54,22 @@ display_usage() { Usage: $0 [options] 1. Usage: dynamicPayloadGenerator -d <graphsonPath> -o <output-path> - 2. This script has 2 arguments that are required. + 2. This script has 1 argument that is required. a. -d (required) Name of the fully qualified Datasnapshot file that you need to load - b. -o (required) output file to store the data files + 3. Optional Parameters: a. -s (optional) true or false to enable or disable schema, By default it is true for production, b. -c (optional) config file to use for loading snapshot into memory. By default it is set to /opt/app/aai-graphadmin/resources/etc/appprops/dynamic.properties c. -f (optional) PAYLOAD or DMAAP-MR d. -n (optional) input file specifying the nodes and relationships to export. Default: /opt/app/aai-graphadmin/scriptdata/tenant_isolation/nodes.json - e. -m (optional) true or false to read multiple snapshots or not, by default is false - f. -i (optional) the file containing the input filters based on node property and regex/value. By default, it is: /opt/app/aai-graphadmin/scriptdata/tenant_isolation/inputFilters.json + e. -i (optional) the file containing the input filters based on node property and regex/value. By default, it is: /opt/app/aai-graphadmin/scriptdata/tenant_isolation/inputFilters.json + f. -o (optional) output directory to store the data files 4. For example (there are many valid ways to use it): dynamicPayloadGenerator.sh -d '/opt/app/snapshots/snaphot.graphSON' -o '/opt/app/aai-graphadmin/resources/etc/scriptdata/addmanualdata/tenant_isolation/' dynamicPayloadGenerator.sh -d '/opt/app/snapshots/snaphot.graphSON' -s false -c '/opt/app/aai-graphadmin/resources/etc/appprops/dynamic.properties' -o '/opt/app/aai-graphadmin/resources/etc/scriptdata/addmanualdata/tenant_isolation/' -f PAYLOAD -n '/opt/app/aai-graphadmin/resources/etc/scriptdata/tenant_isolation/nodes.json' - -m false -i '/opt/app/aai-graphadmin/resources/etc/scriptdata/tenant_isolation/inputFilters.json' + -i '/opt/app/aai-graphadmin/resources/etc/scriptdata/tenant_isolation/inputFilters.json' EOF } @@ -86,7 +86,7 @@ check_user; source_profile; export JVM_OPTS="-Xmx9000m -Xms9000m" -while getopts ":f:s:d:n:c:i:m:o:p:" opt; do +while getopts ":f:s:d:n:c:i:o:" opt; do case ${opt} in f ) PAYLOAD=$OPTARG @@ -112,14 +112,6 @@ while getopts ":f:s:d:n:c:i:m:o:p:" opt; do INPUT_FILTER_FILE=$OPTARG echo ${opt} ;; - m ) - MULTIPLE_SNAPSHOTS=$OPTARG - echo ${opt} - ;; - p ) - PARTIAL=$OPTARG - echo ${opt} - ;; o ) OUTPUT_DIR=$OPTARG echo ${opt} @@ -145,11 +137,42 @@ for nodeType in ${nodes[@]} grep "aai-node-type.*\"value\":\"$nodeType\"" $INPUT_DATASNAPSHOT_FILE'.P'* >>$INPUT_DATASNAPSHOT_FILE'.out' cat $INPUT_DATASNAPSHOT_FILE'.out' | cut -d':' -f2- > $INPUT_DATASNAPSHOT_FILE'.partial' done +if [ -z ${OUTPUT_DIR} ] +then + OUTPUT_DIR=${PROJECT_HOME}/data/scriptdata/addmanualdata/tenant_isolation/payload +fi - -execute_spring_jar org.onap.aai.dbgen.DynamicPayloadGenerator ${PROJECT_HOME}/resources/dynamicPayloadGenerator-logback.xml -s ${VALIDATE_SCHEMA} \ - -f ${PAYLOAD} -o ${OUTPUT_DIR} -c ${DYNAMIC_CONFIG_FILE} -i ${INPUT_FILTER_FILE} -m ${MULTIPLE_SNAPSHOTS} \ - -d ${INPUT_DATASNAPSHOT_FILE} -n ${NODE_CONFIG_FILE} ; - +# Build the command +COMMAND="execute_spring_jar org.onap.aai.dbgen.DynamicPayloadGenerator ${PROJECT_HOME}/resources/dynamicPayloadGenerator-logback.xml" +if [ ! -z ${VALIDATE_SCHEMA} ] +then + COMMAND="${COMMAND} -s ${VALIDATE_SCHEMA}" +fi +if [ ! -z ${PAYLOAD} ] +then + COMMAND="${COMMAND} -f ${PAYLOAD}" +fi +if [ ! -z ${INPUT_FILTER_FILE} ] +then + COMMAND="${COMMAND} -i ${INPUT_FILTER_FILE}" +fi +if [ ! -z ${NODE_CONFIG_FILE} ] +then + COMMAND="${COMMAND} -n ${NODE_CONFIG_FILE}" +fi +if [ ! -z ${INPUT_DATASNAPSHOT_FILE} ] +then + COMMAND="${COMMAND} -d ${INPUT_DATASNAPSHOT_FILE}" +else + display_usage + exit 1 +fi +# Removing the multiple snapshot option because there is just one .partial file +# (-m ${MULTIPLE_SNAPSHOTS}) +# The class only needs to read the ".partial" file and the default value for multiple snapshots is false if you don't pass it +#execute_spring_jar org.onap.aai.dbgen.DynamicPayloadGenerator ${PROJECT_HOME}/resources/dynamicPayloadGenerator-logback.xml -s ${VALIDATE_SCHEMA} \ +# -f ${PAYLOAD} -o ${OUTPUT_DIR} -c ${DYNAMIC_CONFIG_FILE} -i ${INPUT_FILTER_FILE} -m ${MULTIPLE_SNAPSHOTS} \ +# -d ${INPUT_DATASNAPSHOT_FILE} -n ${NODE_CONFIG_FILE} ; +${COMMAND}; end_date; exit 0 diff --git a/src/main/scripts/historyCreateDBSchema.sh b/src/main/scripts/historyCreateDBSchema.sh new file mode 100644 index 0000000..7a08a68 --- /dev/null +++ b/src/main/scripts/historyCreateDBSchema.sh @@ -0,0 +1,37 @@ +#!/bin/ksh +# +# ============LICENSE_START======================================================= +# org.onap.aai +# ================================================================================ +# Copyright © 2017 AT&T Intellectual Property. All rights reserved. +# ================================================================================ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============LICENSE_END========================================================= +# +# ECOMP is a trademark and service mark of AT&T Intellectual Property. +# The script invokes GenTester java class to create the DB schema +# +# + +COMMON_ENV_PATH=$( cd "$(dirname "$0")" ; pwd -P ) +. ${COMMON_ENV_PATH}/common_functions.sh +start_date; +check_user; +source_profile; +if [ -z "$1" ]; then + execute_spring_jar org.onap.aai.schema.GenTester4Hist ${PROJECT_HOME}/resources/logback.xml +else + execute_spring_jar org.onap.aai.schema.GenTester4Hist ${PROJECT_HOME}/resources/logback.xml "$1" +fi; +end_date; +exit 0
\ No newline at end of file diff --git a/src/main/scripts/historyDbInitialLoad.sh b/src/main/scripts/historyDbInitialLoad.sh new file mode 100644 index 0000000..1341b86 --- /dev/null +++ b/src/main/scripts/historyDbInitialLoad.sh @@ -0,0 +1,54 @@ +#!/bin/ksh +# +# This script uses "history" versions of dataSnapshot and SchemaGenerator (via genTester) +# java classes to do the INITIAL load of a history database based on regular dataSnapShot +# files (assumed to be 'clean') from an existing non-history database. +# Steps: +# 1) Make sure the db is empty: clear out any existing data and schema. +# 2) rebuild the schema (using the SchemaGenerator4Hist) +# 3) reload data from the passed-in datafiles (which must found in the dataSnapShots directory and +# contain a json view of the db data). +# + +COMMON_ENV_PATH=$( cd "$(dirname "$0")" ; pwd -P ) +. ${COMMON_ENV_PATH}/common_functions.sh + +start_date; +check_user; + +if [ "$#" -lt 1 ]; then + echo "Illegal number of parameters" + echo "usage: $0 base_snapshot_filename" + exit 1 +fi + +source_profile; +export JAVA_PRE_OPTS=${JAVA_PRE_OPTS:--Xms6g -Xmx8g}; + +#### Step 1) Make sure the target database is cleared +echo "---- First Step: clear the db ----" +execute_spring_jar org.onap.aai.datasnapshot.DataSnapshot4HistInit ${PROJECT_HOME}/resources/logback.xml "-c" "CLEAR_ENTIRE_DATABASE" "-f" "$1" +if [ "$?" -ne "0" ]; then + echo "Problem clearing out database." + exit 1 +fi + +#### Step 2) rebuild the db-schema +echo "---- Second Step: rebuild the db schema ----" +execute_spring_jar org.onap.aai.schema.GenTester4Hist ${PROJECT_HOME}/resources/logback.xml "GEN_DB_WITH_NO_DEFAULT_CR" +if [ "$?" -ne "0" ]; then + echo "Problem rebuilding the schema (SchemaGenerator4Hist)." + exit 1 +fi + +#### Step 3) load the data from snapshot files +echo "---- Third Step: Load data from snapshot files ----" +execute_spring_jar org.onap.aai.datasnapshot.DataSnapshot4HistInit ${PROJECT_HOME}/resources/logback.xml "-c" "MULTITHREAD_RELOAD" "-f" "$1" +if [ "$?" -ne "0" ]; then + echo "Problem reloading data into the database." + end_date; + exit 1 +fi + +end_date; +exit 0 diff --git a/src/main/scripts/historySchemaMod.sh b/src/main/scripts/historySchemaMod.sh new file mode 100644 index 0000000..c098f0e --- /dev/null +++ b/src/main/scripts/historySchemaMod.sh @@ -0,0 +1,44 @@ +#!/bin/ksh +# +# This script is used to correct mistakes made in the database schema. +# It currently just allows you to change either the dataType and/or indexType on properties used by nodes. +# +# NOTE - This script is for the History db. That is different than the +# regular schemaMod in these two ways: 1) it will never create a unique index. +# Indexes can be created, but they will never be defined as unique. +# 2) the last parameter (preserveDataFlag) is ignored since for history, we do +# not want to 'migrate' old data. Old data should not disappear or change. +# +# +# To use this script, you need to pass four parameters: +# propertyName -- the name of the property that you need to change either the index or dataType on +# targetDataType -- whether it's changing or not, you need to give it: String, Integer, Boolean or Long +# targetIndexInfo -- whether it's changing or not, you need to give it: index, noIndex or uniqueIndex +# preserveDataFlag -- true or false. The only reason I can think of why you'd ever want to +# set this to false would be maybe if you were changing to an incompatible dataType so didn't +# want it to try to use the old data (and fail). But 99% of the time this will just be 'true'. +# +# Ie. historySchemaMod flavor-id String index true +# + +COMMON_ENV_PATH=$( cd "$(dirname "$0")" ; pwd -P ) +. ${COMMON_ENV_PATH}/common_functions.sh +start_date; +check_user; + +if [ "$#" -ne 4 ]; then + echo "Illegal number of parameters" + echo "usage: $0 propertyName targetDataType targetIndexInfo preserveDataFlag" + exit 1 +fi + +source_profile; +execute_spring_jar org.onap.aai.dbgen.schemamod.SchemaMod4Hist ${PROJECT_HOME}/resources/schemaMod-logback.xml "$1" "$2" "$3" "$4" +if [ "$?" -ne "0" ]; then + echo "Problem executing schemaMod " + end_date; + exit 1 +fi + +end_date; +exit 0 diff --git a/src/main/scripts/historyTruncateDb.sh b/src/main/scripts/historyTruncateDb.sh new file mode 100644 index 0000000..b0ad39e --- /dev/null +++ b/src/main/scripts/historyTruncateDb.sh @@ -0,0 +1,53 @@ +#!/bin/ksh + +### +# ============LICENSE_START======================================================= +# org.onap.aai +# ================================================================================ +# Copyright (C) 2017 AT&T Intellectual Property. All rights reserved. +# ================================================================================ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============LICENSE_END========================================================= +### +# +# +# historyTruncateDb.sh -- This tool is usually run from a cron. +# It uses the application.property "history.truncate.window.days" to look for +# and delete nodes and edges that have an end-ts earlier than the truncate window. +# Or, that can be over-ridden using the command line param, "-truncateWindowDays". +# That is - they were deleted from the 'real' database before the window. +# So, if the window is set to 90 days, we will delete all nodes and edges +# from the history db that were deleted from the real db more than 90 days ago. +# +# It also uses the property, "history.truncate.mode". Can be over-ridden using +# the command line property "-truncateMode" +# "LOG_ONLY" - look for candidate nodes/edges, but just log them (no deleting) +# "DELETE_AND_LOG" - like it says... does the deletes and logs what +# it deleted (node and edge properties) +# "SILENT_DELETE" - not entirely silent, but will pare the logs way back to +# just recording vertex and edge ids that are deleted. +# +# Ie. historyTruncateDb.sh -truncateWindowDays 60 -truncateMode LOG_ONLY +# +# + +COMMON_ENV_PATH=$( cd "$(dirname "$0")" ; pwd -P ) +. ${COMMON_ENV_PATH}/common_functions.sh + +start_date; +check_user; +source_profile; +execute_spring_jar org.onap.aai.historytruncate.HistoryTruncate ${PROJECT_HOME}/resources/logback.xml "$@" +end_date; + +exit 0
\ No newline at end of file diff --git a/src/main/scripts/resend-dmaap-events.sh b/src/main/scripts/resend-dmaap-events.sh new file mode 100644 index 0000000..2afa3a7 --- /dev/null +++ b/src/main/scripts/resend-dmaap-events.sh @@ -0,0 +1,362 @@ +#!/bin/bash + +### +# ============LICENSE_START======================================================= +# org.onap.aai +# ================================================================================ +# Copyright (C) 2017-18 AT&T Intellectual Property. All rights reserved. +# ================================================================================ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============LICENSE_END========================================================= +### + +# +# resend-dmaap-events.sh -- This tool is used to resend dmaap events. +# On certain scenarios due to dns or other networking issue, if A&AI fails to publish events +# We need a mechanism to resend the dmaap events for objects that haven't modified since +# So if a pserver object event was supposed to be sent but got lost and a later dmaap event +# was sent out then we shouldn't be sending dmaap messages +# It identifies if a dmaap message was already sent by looking at the resource version +# of the dmaap object that was failed to sendand checks the snapshot and see if they are the same +# +# Parameters: +# +# -b, (required) <string> the base url for the dmaap server +# -e, (required) <file> filename containing the missed events +# -l, (optional) indicating that the script should be run it debug mode +# it will not send the dmaap messages to dmaap server +# but it will write to a file named resend_dmaap_server.out +# -p, (required) <string> the password for the dmaap server +# -s, (required) <file> containing the data snapshot graphson file to compare the resource versions against +# -u, (required) <string> the username for the dmaap server +# +# An example of how to use the tool: +# Please use right credentials and right dmaap server in the cluster +# +# ./resend-dmaap-events.sh -e example_events.txt -s dataSnapshot.graphSON.201808091545 -u username -p example_pass -b https://localhost:3905 +# +# For each dmaap message in the example_events.txt, it will check +# against graphson and try to send it to the dmaap server +# If the example_events.txt contains two events one that wasn't sent to dmaap +# and the other that was already updated by another PUT/DELETE +# and the output of the run will look something like this: +# +# Output: +# Dmaap Event with Id 7f7d8a7b-4034-46f3-a969-d7e5cbcbf75f was sent +# Dmaap Event with Id 7f7d8a7b-4034-46f3-a969-d7e5cbcbf75f not sent +# +# If lets say, there was a username password issue, you will see something like this: +# Dmaap Event with Id 7f7d8a7b-4034-46f3-a969-d7e5cbcbf75f was not sent due to dmaap error, please check logs +# Dmaap Event with Id 7f7d8a7b-4034-46f3-a969-d7e5cbcbf75f not sent +# +# From the directory in which you are executing the script (not where the script is located) +# You can have it be located and executed in the same place +# Check for a file called resend_dmaap_error.log as it will give you more details on the error +# +# For testing purposes, if you are trying to run this script and don't want to actually +# send it to a dmaap server, then you can run either of the following: +# +# ./resend-dmaap-events.sh -l -e example_events.txt -s dataSnapshot.graphSON.201808091545 +# or +# ./resend-dmaap-events.sh -l -e example_events.txt -s dataSnapshot.graphSON.201808091545 -u username -p example_pass -b https://localhost:3905 +# +# Following will output what would have been sent out based on checking the datasnapshot with example_events.txt +# +# Output: +# Dmaap Event with Id 7f7d8a7b-4034-46f3-a969-d7e5cbcbf75f was sent +# Dmaap Event with Id 7f7d8a7b-4034-46f3-a969-d7e5cbcbf75f not sent +# +# Also it will write the dmaap events to a file called dmaap_messages.out that +# would have been sent out in the current directory where you are executing this script +# + +current_directory=$( cd "$(dirname "$0")" ; pwd -P ); +resend_error_log=${current_directory}/resend_dmaap_error.log +resend_output=${current_directory}/dmaap_messages.out + +# Prints the usage of the shell script +usage(){ + echo "Usage $0 [options...]"; + echo; + echo " -b, <string> the base url for the dmaap server"; + echo " -e, <file> filename containing the missed events"; + echo " -l, (optional) indicating that the script should be run it debug mode" + echo " it will not send the dmaap messages to dmaap server " + echo " but it will write to a file named resend_dmaap_server.out" + echo " -p, <string> the password for the dmaap server"; + echo " -s, <file> containing the data snapshot graphson file to compare the resource versions against"; + echo " -u, <string> the username for the dmaap server"; + echo; + echo; + echo " An example of how to use the tool:"; + echo " Please use right credentials and right dmaap server in the cluster"; + echo; + echo " ./resend-dmaap-events.sh -e example_events.txt -s dataSnapshot.graphSON.201808091545 -u username -p example_pass -b https://localhost:3905"; + echo; + echo " For each dmaap message in the example_events.txt, it will check "; + echo " against graphson and try to send it to the dmaap server"; + echo " If the example_events.txt contains two events one that wasn't sent to dmaap"; + echo " and the other that was already updated by another PUT/DELETE"; + echo " and the output of the run will look something like this:"; + echo; + echo " Output:"; + echo " Dmaap Event with Id 7f7d8a7b-4034-46f3-a969-d7e5cbcbf75f was sent"; + echo " Dmaap Event with Id 7f7d8a7b-4034-46f3-a969-d7e5cbcbf75f not sent"; + echo " "; + echo " If lets say, there was a username password issue, you will see something like this:"; + echo " Dmaap Event with Id 7f7d8a7b-4034-46f3-a969-d7e5cbcbf75f was not sent due to dmaap error, please check logs"; + echo " Dmaap Event with Id 7f7d8a7b-4034-46f3-a969-d7e5cbcbf75f not sent"; + echo; + echo " From the directory in which you are executing the script (not where the script is located)"; + echo " You can have it be located and executed in the same place "; + echo " Check for a file called resend_dmaap_error.log as it will give you more details on the error"; + echo; + echo " For testing purposes, if you are trying to run this script and don't want to actually"; + echo " send it to a dmaap server, then you can run either of the following:"; + echo; + echo " ./resend-dmaap-events.sh -l -e example_events.txt -s dataSnapshot.graphSON.201808091545"; + echo " or"; + echo " ./resend-dmaap-events.sh -l -e example_events.txt -s dataSnapshot.graphSON.201808091545 -u username -p example_pass -b https://localhost:3905"; + echo; + echo " Following will output what would have been sent out based on checking the datasnapshot with example_events.txt"; + echo; + echo " Output:"; + echo " Dmaap Event with Id 7f7d8a7b-4034-46f3-a969-d7e5cbcbf75f was sent"; + echo " Dmaap Event with Id 7f7d8a7b-4034-46f3-a969-d7e5cbcbf75f not sent"; + echo; + echo " Also it will write the dmaap events to a file called dmaap_messages.out that "; + echo " would have been sent out in the current directory where you are executing this script"; + exit; +} + +# Validate the arguments being passed by user +# Checks if the argument of the string is greater than zero +# Also check if the file actually exists +validate(){ + local type_of_file=$1; + + if [ $# -eq 0 ]; then + echo "Error expecting the validate method to have at least one argument indicating what type"; + exit -1; + fi; + + shift; + + local arg=$1; + + if [ -z "$arg" ]; then + echo "Error missing the expected argument for ${type_of_file}"; + exit -1; + fi; + + if [ ! -f "$arg" ]; then + echo "Error: file $arg cannot be found, please check the file again"; + exit -1; + fi; +} + +# Checks if the resource version in dmaap message passed for an aai-uri +# is the same as the value in the snapshot file for that version +# If the resource version is the same it will return 0 for success +# Otherwise it will return non zero to indicate that this method failed +resource_version_matches_snapshot_file(){ + + local snapshot_file=$1; + local entity_link=$2; + local resource_version=$3; + local action=$4; + + if [ -z ${resource_version} ]; then + echo "Missing the parameter resource version to be passed"; + return -1; + fi + + # Modify the entity link passed to remove the /aai/v[0-9]+ + aai_uri=$(echo $entity_link | sed 's/\/aai\/v[0-9][0-9]*//g'); + + local line=$(grep '"value":"'${aai_uri}'"' ${snapshot_file} 2> /dev/null); + + if [ -z "${line}" ] ; then + if [ "${action}" = "DELETE" ]; then + return 0; + else + return -1; + fi; + fi; + + cnt=$(echo $line | grep -o '"resource-version":\[{"id":"[^"]*","value":"'$resource_version'"}\]' | wc -l); + + if [ $cnt -eq 1 ]; then + return 0; + else + return -1; + fi; +} + +# From a array being passed, it will determine the smallest element +# and return the index of the smallest element +# If the array length is zero, then it will return -1 +retrieve_smallest_index(){ + + local elements=("${@}"); + + if [ ${#elements} -eq 0 ]; then + return -1; + fi; + + local smallest_element=${elements[0]}; + + local index=0; + local smallest_index=0; + + for element in ${elements[@]}; do + if [ $element -lt $smallest_element ]; then + smallest_index=${index}; + fi; + index=$((index+1)); + done; + + return ${smallest_index}; +} + +# Send the dmaap event to the host based on +# the line that was send to the function +send_dmaap(){ + + local local_mode=$1; + local line=$2; + local username=$3; + local password=$4; + local baseurl=$5; + local resp_code=0; + + generated_file=$(uuidgen); + + local json_file=/tmp/${generated_file}.json; + local curl_output=/tmp/${generated_file}.txt; + + echo ${line} > ${json_file}; + > ${curl_output}; + id=$(echo $line | grep -o '"id":"[^"]*"' | cut -d":" -f2- | sed 's/"//g'); + + if [ "$local_mode" = true ]; then + echo $line >> ${resend_output}; + else + + response_code=$(curl \ + -k -o ${curl_output} -s -w "%{http_code}\n" \ + -u "${username}:${password}" \ + -X POST \ + -H "Content-Type: application/json" \ + -d "@${json_file}" \ + "${baseurl}/events/AAI-EVENT"\ + ); + + if [ "$response_code" -ne "200" ]; then + echo -n "Response failure for dmaap message with id ${id}," >> ${resend_error_log}; + echo " code: ${response_code} body: $(cat ${curl_output})" >> ${resend_error_log}; + resp_code=-1; + fi; + fi; + + if [ -f "${json_file}" ]; then + rm $json_file; + fi; + + if [ -f "${curl_output}" ]; then + rm $curl_output; + fi; + + return ${resp_code}; +} + +# Validates the events file and the snapshot file +# Goes through each line in the missed events file +# Gets all the resource versions there are +# Finds the smallest resource version there +# checks if the smallest resource version for the aai uri +# is what is currently in the last snapshot file provided by user +# If it is, it will send an dmaap event out + +main(){ + + if [ "${#}" -eq 0 ]; then + usage; + fi; + + # Get the first character of the first command line argument + # If the character doesn't start with dash (-) + # Then fail the script and display usage + + if [ "${1:0:1}" != "-" ]; then + echo "Invalid option: $1" >&2 + usage; + fi; + + while getopts ":e:s:u:lp:b:h" opt; do + case ${opt} in + l ) # Specify that the application will not send messages to dmaap but save it a file + local local_mode=true + ;; + e ) # Specify the file for missed events + local missed_events_file=$OPTARG + ;; + s ) # Specify the file for snapshot + local snapshot_file=$OPTARG + ;; + u ) # Specify the username to dmaap + local username=$OPTARG + ;; + p ) # Specify the password to dmaap + local password=$OPTARG + ;; + b ) # Specify the baseurl to dmaap + local hostname=$OPTARG + ;; + h ) + usage; + ;; + \? ) + echo "Invalid option: -$OPTARG" >&2 + usage; + ;; + esac + done; + + validate "events_file" $missed_events_file; + validate "snapshot_file" $snapshot_file; + + if [ "$local_mode" = true ]; then + > ${resend_output}; + fi; + + while read dmaap_event; do + entity_link=$(echo $dmaap_event | grep -o '"entity-link":"[^"]*"' | cut -d":" -f2- | sed 's/"//g'); + id=$(echo $dmaap_event | grep -o '"id":"[^"]*"' | cut -d":" -f2- | sed 's/"//g'); + action=$(echo $dmaap_event | grep -o '"action":"[^"]*"' | cut -d":" -f2- | sed 's/"//g'); + smallest_resource_version=$(echo $dmaap_event | jq -M '.' | grep 'resource-version' | sort | tail -1 | sed 's/[^0-9]//g'); + resource_version_matches_snapshot_file "${snapshot_file}" "${entity_link}" "${smallest_resource_version}" "${action}" && { + send_dmaap "${local_mode}" "$dmaap_event" "$username" "$password" "$hostname" && { + echo "Dmaap Event with Id $id was sent"; + } || { + echo "Dmaap Event with Id $id was not sent due to dmaap error, please check logs"; + } + } || { + echo "Dmaap Event with Id $id not sent"; + } + + done < ${missed_events_file}; + +} + +main $@ diff --git a/src/main/scripts/schemaMod.sh b/src/main/scripts/schemaMod.sh index d1fb009..c7b8ce9 100644 --- a/src/main/scripts/schemaMod.sh +++ b/src/main/scripts/schemaMod.sh @@ -3,20 +3,17 @@ # This script is used to correct mistakes made in the database schema. # It currently just allows you to change either the dataType and/or indexType on properties used by nodes. # -# NOTE - Titan is not elegant in 0.5.3 about making changes to the schema. Bad properties never -# actually leave the database, they just get renamed and stop getting used. So it is -# really worthwhile to get indexes and dataTypes correct the first time around. +# NOTE - JanusGraph is not particularly elegant in about making changes to the schema. +# So it is really worthwhile to get indexes and dataTypes correct the first time around. # Note also - This script just makes changes to the schema that is currently live. # If you were to create a new schema in a brandy-new environment, it would look like -# whatever ex5.json (as of June 2015) told it to look like. So, part of making a -# change to the db schema should Always first be to make the change in ex5.json so that -# future environments will have the change. This script is just to change existing -# instances of the schema since schemaGenerator (as of June 2015) does not update things - it -# just does the initial creation. +# whatever our OXM files told it to look like. So, part of making a +# change to the live db schema should Always first be to make the change in the appropriate +# OXM schema file so that future environments will have the change. This script is +# just to change existing instances of the schema since schemaGenerator does not +# update things - it just does the initial creation. # -# Boy, this is getting to be a big comment section... -# -# To use this script, you need to pass four parameters: +# To use this script, there are 4 required parameters, and one optional: # propertyName -- the name of the property that you need to change either the index or dataType on # targetDataType -- whether it's changing or not, you need to give it: String, Integer, Boolean or Long # targetIndexInfo -- whether it's changing or not, you need to give it: index, noIndex or uniqueIndex @@ -24,7 +21,11 @@ # set this to false would be maybe if you were changing to an incompatible dataType so didn't # want it to try to use the old data (and fail). But 99% of the time this will just be 'true'. # +# commitBlockSize -- OPTIONAL -- how many updates to commit at once. +# Default will be used if no value is passed. +# # Ie. schemaMod flavor-id String index true +# or, schemaMod flavor-id String noIndex true 50000 # COMMON_ENV_PATH=$( cd "$(dirname "$0")" ; pwd -P ) @@ -32,14 +33,14 @@ COMMON_ENV_PATH=$( cd "$(dirname "$0")" ; pwd -P ) start_date; check_user; -if [ "$#" -ne 4 ]; then +if [ "$#" -ne 4 ] && [ "$#" -ne 5 ]; then echo "Illegal number of parameters" - echo "usage: $0 propertyName targetDataType targetIndexInfo preserveDataFlag" + echo "usage: $0 propertyName targetDataType targetIndexInfo preserveDataFlag [blockSize]" exit 1 fi source_profile; -execute_spring_jar org.onap.aai.dbgen.schemamod.SchemaMod ${PROJECT_HOME}/resources/schemaMod-logback.xml "$1" "$2" "$3" "$4" +execute_spring_jar org.onap.aai.dbgen.schemamod.SchemaMod ${PROJECT_HOME}/resources/schemaMod-logback.xml "$@" if [ "$?" -ne "0" ]; then echo "Problem executing schemaMod " end_date; diff --git a/src/main/scripts/updatePropertyTool.sh b/src/main/scripts/updatePropertyTool.sh new file mode 100644 index 0000000..7e53a3f --- /dev/null +++ b/src/main/scripts/updatePropertyTool.sh @@ -0,0 +1,58 @@ +#!/bin/ksh +# +# ============LICENSE_START======================================================= +# org.onap.aai +# ================================================================================ +# Copyright © 2017 AT&T Intellectual Property. All rights reserved. +# ================================================================================ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============LICENSE_END========================================================= +# +# ECOMP is a trademark and service mark of AT&T Intellectual Property. +# +# updatePropertyTool.sh -- This tool is used to update properties in corrupt vertices +# in the event that an update or delete occurs to a node simultaneously, resulting +# in inconsistent data. Updating the aai-uri can reset the index and restore +# the GET information on the node. +# +# Parameters: +# +# At least one of following two parameters are required +# The following values are needed to identify the node(s) to be updated +# --filename, -f filename of a .txt extension required with a list of vertexIds. Vertex Ids must be separated line after line in text file. +# --vertexId, -v option that may occur multiple times as entries of a list +# +# --property, -p (required) value to be updated in the corrupted node +# --help, -h (optional) used to display help on usage of the function +# +# +# For example: +# +# updatePropertyTool.sh --filename myFile.txt --vertexId 123 --property myProperty +# updatePropertyTool.sh --filename myFile.txt --vertexId 123 --vertexId 456 --property myProperty +# updatePropertyTool.sh -f myFile.txt --vertexId 123 -v 456 -p myProperty +# updatePropertyTool.sh -f myFile.txt -p -myProperty +# updatePropertyTool.sh -v 123 -v 456 -p -myProperty +# updatePropertyTool.sh -v 123 -p -myProperty +# +COMMON_ENV_PATH=$( cd "$(dirname "$0")" ; pwd -P ) +. ${COMMON_ENV_PATH}/common_functions.sh + +start_date; + +check_user; +source_profile; +execute_spring_jar org.onap.aai.dbgen.UpdatePropertyTool ${PROJECT_HOME}/resources/updatePropertyTool-logback.xml "$@" +end_date; + +exit 0 |