diff options
author | Kajur, Harish (vk250x) <vk250x@att.com> | 2018-08-13 05:32:35 -0400 |
---|---|---|
committer | Kajur, Harish (vk250x) <vk250x@att.com> | 2018-08-13 14:09:01 -0400 |
commit | 16700753dcf0e3600f600b0b769c89eb2273f1d6 (patch) | |
tree | f1d2d285f0be3a739135b4cc6b5c3eb2774c7573 /src/main/scripts | |
parent | 3889e9be653a7c284986b2830aa02252fb6485fe (diff) |
Initial seed code for graphadmin
Issue-ID: AAI-1469
Change-Id: Ic170c326ad1fe4b43960de674797766f6f7b94bf
Signed-off-by: Kajur, Harish (vk250x) <vk250x@att.com>
Diffstat (limited to 'src/main/scripts')
-rw-r--r-- | src/main/scripts/audit_schema.sh | 32 | ||||
-rw-r--r-- | src/main/scripts/common_functions.sh | 65 | ||||
-rw-r--r-- | src/main/scripts/createDBSchema.sh | 44 | ||||
-rw-r--r-- | src/main/scripts/dataGrooming.sh | 116 | ||||
-rw-r--r-- | src/main/scripts/dataRestoreFromSnapshot.sh | 50 | ||||
-rw-r--r-- | src/main/scripts/dataSnapshot.sh | 28 | ||||
-rw-r--r-- | src/main/scripts/dupeTool.sh | 73 | ||||
-rw-r--r-- | src/main/scripts/dynamicPayloadArchive.sh | 75 | ||||
-rw-r--r-- | src/main/scripts/dynamicPayloadGenerator.sh | 155 | ||||
-rw-r--r-- | src/main/scripts/dynamicPayloadPartial.sh | 13 | ||||
-rw-r--r-- | src/main/scripts/forceDeleteTool.sh | 84 | ||||
-rw-r--r-- | src/main/scripts/migration_verification.sh | 61 | ||||
-rw-r--r-- | src/main/scripts/run_Migrations.sh | 49 | ||||
-rw-r--r-- | src/main/scripts/run_SendDeleteMigrationNotification.sh | 65 | ||||
-rw-r--r-- | src/main/scripts/run_SendMigrationNotification.sh | 64 | ||||
-rw-r--r-- | src/main/scripts/schemaMod.sh | 50 | ||||
-rw-r--r-- | src/main/scripts/uniquePropertyCheck.sh | 24 | ||||
-rw-r--r-- | src/main/scripts/updatePem.sh | 38 |
18 files changed, 1086 insertions, 0 deletions
diff --git a/src/main/scripts/audit_schema.sh b/src/main/scripts/audit_schema.sh new file mode 100644 index 0000000..686dd49 --- /dev/null +++ b/src/main/scripts/audit_schema.sh @@ -0,0 +1,32 @@ +#!/bin/sh +# +# ============LICENSE_START======================================================= +# org.onap.aai +# ================================================================================ +# Copyright © 2017 AT&T Intellectual Property. All rights reserved. +# ================================================================================ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============LICENSE_END========================================================= +# +# ECOMP is a trademark and service mark of AT&T Intellectual Property. +# +# +COMMON_ENV_PATH=$( cd "$(dirname "$0")" ; pwd -P ) +. ${COMMON_ENV_PATH}/common_functions.sh + +start_date; +check_user; +source_profile; +execute_spring_jar org.onap.aai.db.schema.ScriptDriver "/opt/app/aai-graphadmin/resources/logback.xml" "$@" +end_date; +exit 0 diff --git a/src/main/scripts/common_functions.sh b/src/main/scripts/common_functions.sh new file mode 100644 index 0000000..ed795fe --- /dev/null +++ b/src/main/scripts/common_functions.sh @@ -0,0 +1,65 @@ +#!/bin/ksh +# +# Common functions that can be used throughout multiple scripts +# In order to call these functions, this file needs to be sourced + +# Checks if the user that is currently running is aaiadmin +check_user(){ + + userid=$( id | cut -f2 -d"(" | cut -f1 -d")" ) + + if [ "${userid}" != "aaiadmin" ]; then + echo "You must be aaiadmin to run $0. The id used $userid." + exit 1 + fi +} + +# Sources the profile and sets the project home +source_profile(){ + . /etc/profile.d/aai.sh + PROJECT_HOME=/opt/app/aai-graphadmin +} + +# Runs the spring boot jar based on which main class +# to execute and which logback file to use for that class +execute_spring_jar(){ + + className=$1; + logbackFile=$2; + + shift 2; + + EXECUTABLE_JAR=$(ls ${PROJECT_HOME}/lib/*.jar); + + JAVA_OPTS="${JAVA_PRE_OPTS} -DAJSC_HOME=$PROJECT_HOME"; + JAVA_OPTS="$JAVA_OPTS -DBUNDLECONFIG_DIR=resources"; + JAVA_OPTS="$JAVA_OPTS -Daai.home=$PROJECT_HOME "; + JAVA_OPTS="$JAVA_OPTS -Dhttps.protocols=TLSv1.1,TLSv1.2"; + JAVA_OPTS="$JAVA_OPTS -Dloader.main=${className}"; + JAVA_OPTS="$JAVA_OPTS -Dloader.path=${PROJECT_HOME}/resources"; + JAVA_OPTS="$JAVA_OPTS -Dlogback.configurationFile=${logbackFile}"; + + export SOURCE_NAME=$(grep '^schema.source.name=' ${PROJECT_HOME}/resources/application.properties | cut -d"=" -f2-); + # Needed for the schema ingest library beans + eval $(grep '^schema\.' ${PROJECT_HOME}/resources/application.properties | \ + sed 's/^\(.*\)$/JAVA_OPTS="$JAVA_OPTS -D\1"/g' | \ + sed 's/${server.local.startpath}/${PROJECT_HOME}\/resources/g'| \ + sed 's/${schema.source.name}/'${SOURCE_NAME}'/g'\ + ) + + JAVA_OPTS="${JAVA_OPTS} ${JAVA_POST_OPTS}"; + + ${JAVA_HOME}/bin/java ${JVM_OPTS} ${JAVA_OPTS} -jar ${EXECUTABLE_JAR} "$@" +} + +# Prints the start date and the script that the user called +start_date(){ + echo + echo `date` " Starting $0" +} + +# Prints the end date and the script that the user called +end_date(){ + echo + echo `date` " Done $0" +} diff --git a/src/main/scripts/createDBSchema.sh b/src/main/scripts/createDBSchema.sh new file mode 100644 index 0000000..01fef07 --- /dev/null +++ b/src/main/scripts/createDBSchema.sh @@ -0,0 +1,44 @@ +#!/bin/ksh +# +# ============LICENSE_START======================================================= +# org.onap.aai +# ================================================================================ +# Copyright © 2017 AT&T Intellectual Property. All rights reserved. +# ================================================================================ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============LICENSE_END========================================================= +# +# ECOMP is a trademark and service mark of AT&T Intellectual Property. +# The script invokes GenTester java class to create the DB schema +# +# NOTE: you can pass an option GEN_DB_WITH_NO_SCHEMA if you want it to create an instance of +# the graph - but with no schema (this is useful when using the Hbase copyTable to +# copy our database to different environments). +# Ie. createDbSchema.sh GEN_DB_WITH_NO_SCHEMA +# +# +# +# + +COMMON_ENV_PATH=$( cd "$(dirname "$0")" ; pwd -P ) +. ${COMMON_ENV_PATH}/common_functions.sh +start_date; +check_user; +source_profile; +if [ -z "$1" ]; then + execute_spring_jar org.onap.aai.schema.GenTester ${PROJECT_HOME}/resources/logback.xml +else + execute_spring_jar org.onap.aai.schema.GenTester ${PROJECT_HOME}/resources/logback.xml "$1" +fi; +end_date; +exit 0
\ No newline at end of file diff --git a/src/main/scripts/dataGrooming.sh b/src/main/scripts/dataGrooming.sh new file mode 100644 index 0000000..a6b5f4f --- /dev/null +++ b/src/main/scripts/dataGrooming.sh @@ -0,0 +1,116 @@ +#!/bin/ksh +# +# The script invokes the dataGrooming java class to run some tests and generate a report and +# potentially do some auto-deleteing. +# +# Here are the allowed Parameters. Note - they are all optional and can be mixed and matched. +# +# -f oldFileName (see note below) +# -autoFix +# -sleepMinutes nn +# -edgesOnly +# -skipEdges +# -timeWindowMinutes nn +# -dontFixOrphans +# -maxFix +# -skipHostCheck +# -singleCommits +# -dupeCheckOff +# -dupeFixOn +# -ghost2CheckOff +# -ghost2FixOn +# +# +# +# +# NOTES: +# -f The name of a previous report can optionally be passed in with the "-f" option. +# Just the filename -- ie. "dataGrooming.sh -f dataGrooming.201504272106.out" +# The file will be assumed to be in the directory that it was created in. +# If a filename is passed, then the "deleteCandidate" vertex-id's and bad edges +# listed inside that report file will be deleted on this run if they are encountered as +# bad nodes/edges again. +# +# -autoFix If you don't use the "-f" option, you could choose to use "-autofix" which will +# automatically run the script twice: once to look for problems, then after +# sleeping for a few minutes, it will re-run with the inital-run's output as +# an input file. +# +# -maxFix When using autoFix, you might want to limit how many 'bad' records get fixed. +# This is a safeguard against accidently deleting too many records automatically. +# It has a default value set in AAIConstants: AAI_GROOMING_DEFAULT_MAX_FIX = 15; +# If there are more than maxFix candidates found -- then none will be deleted (ie. +# someone needs to look into it) +# +# -sleepMinutes When using autoFix, this defines how many minutes we sleep before the second run. +# It has a default value set in AAIConstants: AAI_GROOMING_DEFAULT_SLEEP_MINUTES = 7; +# The reason we sleep at all between runs is that our DB is "eventually consistant", so +# we want to give it time to resolve itself if possible. +# +# -edgesOnly Can be used any time you want to limit this tool so it only looks at edges. +# Note - as of 1710, we have not been seeing many purely bad edges, +# (ie. not associated with a phantom node) so this option is not used often. +# +# -skipEdgeChecks Use it to bypass checks for bad Edges (which are pretty rare). +# +# -timeWindowMinutes Use it to limit the nodes looked at to ones whose update-timestamp tells us that it was last updated less than this many minutes ago. Note this is usually used along with the skipEdgeChecks option. +# +# -dontFixOrphans Since there can sometimes be a lot of orphan nodes, and they don't +# harm processing as much as phantom-nodes or bad-edges, it is useful to be +# able to ignore them when fixing things. +# +# -skipHostCheck By default, the grooming tool will check to see that it is running +# on the host that is the first one in the list found in: +# aaiconfig.properties aai.primary.filetransfer.serverlist +# This is so that when run from the cron, it only runs on one machine. +# This option lets you turn that checking off. +# +# -singleCommits By default, the grooming tool will do all of its processing and then do +# a commit of all the changes at once. This option (maybe could have been named better) +# is letting the user override the default behavior and do a commit for each +# individual 'remove" one by one as they are encountered by the grooming logic. +# NOTE - this only applies when using either the "-f" or "-autoFix" options since +# those are the only two that make changes to the database. +# +# -dupeCheckOff By default, we will check all of our nodes for duplicates. This parameter lets +# us turn this check off if we don't want to do it for some reason. +# +# -dupeFixOn When we're fixing data, by default we will NOT fix duplicates This parameter lets us turn +# that fixing ON when we are comfortable that it can pick the correct duplicate to preserve. +# +# -ghost2CheckOff By default, we will check for the "new" kind of ghost that we saw on +# Production in early February 2016. This parameter lets us turn this check off if we +# don't want to do it for some reason. +# +# -ghost2FixOn When we're fixing data, by default we will NOT try to fix the "new" ghost nodes. +# This parameter lets us turn that fixing ON if we want to try to fix them. +# +# +COMMON_ENV_PATH=$( cd "$(dirname "$0")" ; pwd -P ) +. ${COMMON_ENV_PATH}/common_functions.sh + +# TODO: There is a better way where you can pass in the function +# and then let the common functions check if the function exist and invoke it +# So this all can be templated out +start_date; +check_user; + +processStat=$(ps -ef | grep '[D]ataGrooming'); +if [ "$processStat" != "" ] + then + echo "Found dataGrooming is already running: " $processStat + exit 1 +fi + +# Make sure that it's not already running +processStat=`ps -ef|grep aaiadmin|grep -E "org.onap.aai.dbgen.DataGrooming"|grep -v grep` +if [ "$processStat" != "" ] + then + echo "Found dataGrooming is already running: " $processStat + exit 1 +fi + +source_profile; +execute_spring_jar org.onap.aai.datagrooming.DataGrooming $PROJECT_HOME/resources/logback.xml "$@" +end_date; +exit 0 diff --git a/src/main/scripts/dataRestoreFromSnapshot.sh b/src/main/scripts/dataRestoreFromSnapshot.sh new file mode 100644 index 0000000..405a667 --- /dev/null +++ b/src/main/scripts/dataRestoreFromSnapshot.sh @@ -0,0 +1,50 @@ +#!/bin/ksh +# +# This script uses the dataSnapshot and SchemaGenerator (via GenTester) java classes to restore +# data to a database by doing three things: +# 1) clear out whatever data and schema are currently in the db +# 2) rebuild the schema (using the SchemaGenerator) +# 3) reload data from the passed-in datafile (which must found in the dataSnapShots directory and +# contain an xml view of the db data). +# + +COMMON_ENV_PATH=$( cd "$(dirname "$0")" ; pwd -P ) +. ${COMMON_ENV_PATH}/common_functions.sh + +start_date; +check_user; + +if [ "$#" -lt 1 ]; then + echo "Illegal number of parameters" + echo "usage: $0 previous_snapshot_filename" + exit 1 +fi + +source_profile; +export PRE_JAVA_OPTS=${PRE_JAVA_OPTS:--Xms6g -Xmx8g}; + +#### Step 1) clear out the database +execute_spring_jar org.onap.aai.datasnapshot.DataSnapshot ${PROJECT_HOME}/resources/logback.xml "CLEAR_ENTIRE_DATABASE" "$1" "$2" +if [ "$?" -ne "0" ]; then + echo "Problem clearing out database." + exit 1 +fi + +#### Step 2) rebuild the db-schema +execute_spring_jar org.onap.aai.schema.GenTester ${PROJECT_HOME}/resources/logback.xml "GEN_DB_WITH_NO_DEFAULT_CR" +if [ "$?" -ne "0" ]; then + echo "Problem rebuilding the schema (SchemaGenerator)." + exit 1 +fi + +#### Step 3) reload the data from a snapshot file + +execute_spring_jar org.onap.aai.datasnapshot.DataSnapshot ${PROJECT_HOME}/resources/logback.xml "RELOAD_DATA" "$1" +if [ "$?" -ne "0" ]; then + echo "Problem reloading data into the database." + end_date; + exit 1 +fi + +end_date; +exit 0 diff --git a/src/main/scripts/dataSnapshot.sh b/src/main/scripts/dataSnapshot.sh new file mode 100644 index 0000000..f380e85 --- /dev/null +++ b/src/main/scripts/dataSnapshot.sh @@ -0,0 +1,28 @@ +#!/bin/ksh +# +# This script invokes the dataSnapshot java class passing an option to tell it to take +# a snapshot of the database and store it as a single-line XML file. +# +# +# +# +# +COMMON_ENV_PATH=$( cd "$(dirname "$0")" ; pwd -P ) +. ${COMMON_ENV_PATH}/common_functions.sh + +processStat=$(ps -ef | grep '[D]ataSnapshot'); +if [ "$processStat" != "" ] + then + echo "Found dataSnapshot is already running: " $processStat + exit 1 +fi + +# TODO: There is a better way where you can pass in the function +# and then let the common functions check if the function exist and invoke it +# So this all can be templated out +start_date; +check_user; +source_profile; +execute_spring_jar org.onap.aai.datasnapshot.DataSnapshot $PROJECT_HOME/resources/logback.xml "$@" +end_date; +exit 0 diff --git a/src/main/scripts/dupeTool.sh b/src/main/scripts/dupeTool.sh new file mode 100644 index 0000000..350b0bd --- /dev/null +++ b/src/main/scripts/dupeTool.sh @@ -0,0 +1,73 @@ +#!/bin/ksh + +### +# ============LICENSE_START======================================================= +# org.onap.aai +# ================================================================================ +# Copyright (C) 2017 AT&T Intellectual Property. All rights reserved. +# ================================================================================ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============LICENSE_END========================================================= +### +# +# +# dupeTool.sh -- This tool is used to look at or fix duplicate nodes for one nodeType +# at a time and can be used to limit what it's looking at to just nodes created +# within a recent time window. +# It is made to deal with situations (like we have in 1610/1702) where one type +# of node keeps needing to have duplicates cleaned up (tenant nodes). +# It is needed because DataGrooming cannot be run often and cannot be focused just +# on duplicates or just on one nodeType. +# +# Parameters: +# +# -userId (required) must be followed by a userid +# -nodeType (required) must be followed by a valid nodeType +# -timeWindowMinutes (optional) by default we would look at all nodes of the +# given nodeType, but if a window is given, then we will only look at +# nodes created that many (or fewer) minutes ago. +# -autoFix (optional) use this if you want duplicates fixed automatically (if we +# can figure out which to delete) +# -maxFix (optional) like with dataGrooming lets you override the default maximum +# number of dupes that can be processed at one time +# -skipHostCheck (optional) By default, the dupe tool will check to see that it is running +# on the host that is the first one in the list found in: +# aaiconfig.properties aai.primary.filetransfer.serverlist +# This is so that when run from the cron, it only runs on one machine. +# This option lets you turn that checking off. +# -sleepMinutes (optional) like with DataGrooming, you can override the +# sleep time done when doing autoFix between first and second checks of the data. +# -params4Collect (optional) followed by a string to tell what properties/values to use +# to limit the nodes being looked at. Must be in the format +# of “propertName|propValue” use commas to separate if there +# are more than one name/value being passed. +# -specialTenantRule (optional) turns on logic which will use extra logic to figure +# out which tenant node can be deleted in a common scenario. +# +# +# For example (there are many valid ways to use it): +# +# dupeTool.sh -userId am8383 -nodeType tenant -timeWindowMinutes 60 -autoFix +# or +# dupeTool.sh -userId am8383 -nodeType tenant -specialTenantRule -autoFix -maxFix 100 +# + +COMMON_ENV_PATH=$( cd "$(dirname "$0")" ; pwd -P ) +. ${COMMON_ENV_PATH}/common_functions.sh + +start_date; +check_user; +source_profile; +execute_spring_jar org.onap.aai.dbgen.DupeTool ${PROJECT_HOME}/resources/dupeTool-logback.xml "$@" +end_date; +exit 0
\ No newline at end of file diff --git a/src/main/scripts/dynamicPayloadArchive.sh b/src/main/scripts/dynamicPayloadArchive.sh new file mode 100644 index 0000000..87cce13 --- /dev/null +++ b/src/main/scripts/dynamicPayloadArchive.sh @@ -0,0 +1,75 @@ +#!/bin/ksh +# +### +# ============LICENSE_START======================================================= +# org.onap.aai +# ================================================================================ +# Copyright (C) 2017 AT&T Intellectual Property. All rights reserved. +# ================================================================================ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============LICENSE_END========================================================= +### + +# +# The script is called to tar and gzip the files under /opt/app/aai-graphadmin/data/scriptdata/addmanualdata/tenant_isolation/payload +# which contains the payload files created by the dynamicPayloadGenerator.sh tool. +# /opt/app/aai-graphadmin/data/scriptdata/addmanualdata/tenant_isolation is mounted to the docker container +# + +COMMON_ENV_PATH=$( cd "$(dirname "$0")" ; pwd -P ) +. ${COMMON_ENV_PATH}/common_functions.sh + +. /etc/profile.d/aai.sh +PROJECT_HOME=/opt/app/aai-graphadmin + +PROGNAME=$(basename $0) + +TS=$(date "+%Y_%m_%d_%H_%M_%S") + +CHECK_USER="aaiadmin" +userid=$( id | cut -f2 -d"(" | cut -f1 -d")" ) +if [ "${userid}" != $CHECK_USER ]; then + echo "You must be $CHECK_USER to run $0. The id used $userid." + exit 1 +fi +PAYLOAD_DIRECTORY=${PROJECT_HOME}/resources/etc/scriptdata/addmanualdata/tenant_isolation/payload +ARCHIVE_DIRECTORY=${PROJECT_HOME}/resources/etc/scriptdata/addmanualdata/tenant_isolation/archive +if [ ! -d ${PAYLOAD_DIRECTORY} ] +then + echo " ${PAYLOAD_DIRECTORY} doesn't exist" + exit 1 +fi +if [ ! -d ${ARCHIVE_DIRECTORY} ] +then + mkdir -p ${ARCHIVE_DIRECTORY} + chown aaiadmin:aaiadmin ${ARCHIVE_DIRECTORY} + chmod u+w ${ARCHIVE_DIRECTORY} +fi +cd ${PAYLOAD_DIRECTORY} +tar c * -f ${ARCHIVE_DIRECTORY}/dynamicPayloadArchive_${TS}.tar --exclude=payload +if [ $? -ne 0 ] +then + echo " Unable to tar ${PAYLOAD_DIRECTORY}" + exit 1 +fi + +cd ${ARCHIVE_DIRECTORY} +gzip ${ARCHIVE_DIRECTORY}/dynamicPayloadArchive_${TS}.tar + +if [ $? -ne 0 ] +then + echo " Unable to gzip ${ARCHIVE_DIRECTORY}/dynamicPayloadArchive_${TS}.tar" + exit 1 +fi +echo "Completed successfully: ${ARCHIVE_DIRECTORY}/dynamicPayloadArchive_${TS}.tar" +exit 0 diff --git a/src/main/scripts/dynamicPayloadGenerator.sh b/src/main/scripts/dynamicPayloadGenerator.sh new file mode 100644 index 0000000..3d30790 --- /dev/null +++ b/src/main/scripts/dynamicPayloadGenerator.sh @@ -0,0 +1,155 @@ +#!/bin/ksh +# +### +# ============LICENSE_START======================================================= +# org.onap.aai +# ================================================================================ +# Copyright (C) 2017 AT&T Intellectual Property. All rights reserved. +# ================================================================================ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============LICENSE_END========================================================= +### + +# +# dynamicPayloadGenerator.sh -- This tool is used to dynamically load payloads from snapshots +# It is used to load a snapshot into memory and generate payloads for any input nodes +# +# +# Parameters: +# +# -d (required) name of the fully qualified Datasnapshot file that you need to load +# -s (optional) true or false to enable or disable schema, By default it is true for production, +# you can change to false if the snapshot has duplicates +# -c (optional) config file to use for loading snapshot into memory. +# -o (required) output file to store the data files +# -f (optional) PAYLOAD or DMAAP-MR +# -n (optional) input file for the script +# +# +# For example (there are many valid ways to use it): +# +# dynamicPayloadGenerator.sh -d '/opt/app/snapshots/snaphot.graphSON' -o '/opt/app/aai-graphadmin/resources/etc/scriptdata/addmanualdata/payload_dir/' +# +# or +# dynamicPayloadGenerator.sh -d '/opt/app/snapshots/snaphot.graphSON' -s false -c '/opt/app/aai-graphadmin/resources/etc/appprops/dynamic.properties' +# -o '/opt/app/aai-graphadmin/resources/etc/scriptdata/addmanualdata/payload_dir/' -f PAYLOAD -n '/opt/app/aai-graphadmin/resources/etc/scriptdata/nodes.json' +# + + +echo +echo `date` " Starting $0" + +display_usage() { + cat <<EOF + Usage: $0 [options] + + 1. Usage: dynamicPayloadGenerator -d <graphsonPath> -o <output-path> + 2. This script has 2 arguments that are required. + a. -d (required) Name of the fully qualified Datasnapshot file that you need to load + b. -o (required) output file to store the data files + 3. Optional Parameters: + a. -s (optional) true or false to enable or disable schema, By default it is true for production, + b. -c (optional) config file to use for loading snapshot into memory. By default it is set to /opt/app/aai-graphadmin/resources/etc/appprops/dynamic.properties + c. -f (optional) PAYLOAD or DMAAP-MR + d. -n (optional) input file specifying the nodes and relationships to export. Default: /opt/app/aai-graphadmin/scriptdata/tenant_isolation/nodes.json + e. -m (optional) true or false to read multiple snapshots or not, by default is false + f. -i (optional) the file containing the input filters based on node property and regex/value. By default, it is: /opt/app/aai-graphadmin/scriptdata/tenant_isolation/inputFilters.json + 4. For example (there are many valid ways to use it): + dynamicPayloadGenerator.sh -d '/opt/app/snapshots/snaphot.graphSON' -o '/opt/app/aai-graphadmin/resources/etc/scriptdata/addmanualdata/tenant_isolation/' + + dynamicPayloadGenerator.sh -d '/opt/app/snapshots/snaphot.graphSON' -s false -c '/opt/app/aai-graphadmin/resources/etc/appprops/dynamic.properties' + -o '/opt/app/aai-graphadmin/resources/etc/scriptdata/addmanualdata/tenant_isolation/' -f PAYLOAD -n '/opt/app/aai-graphadmin/resources/etc/scriptdata/tenant_isolation/nodes.json' + -m false -i '/opt/app/aai-graphadmin/resources/etc/scriptdata/tenant_isolation/inputFilters.json' + +EOF +} +if [ $# -eq 0 ]; then + display_usage + exit 1 +fi + +COMMON_ENV_PATH=$( cd "$(dirname "$0")" ; pwd -P ) +. ${COMMON_ENV_PATH}/common_functions.sh + +start_date; +check_user; +source_profile; +export JVM_OPTS="-Xmx9000m -Xms9000m" + +while getopts ":f:s:d:n:c:i:m:o:p:" opt; do + case ${opt} in + f ) + PAYLOAD=$OPTARG + echo ${opt} + ;; + s ) + VALIDATE_SCHEMA=$OPTARG + echo ${opt} + ;; + d ) + INPUT_DATASNAPSHOT_FILE=$OPTARG + echo ${opt} + ;; + n ) + NODE_CONFIG_FILE=$OPTARG + echo ${opt} + ;; + c ) + DYNAMIC_CONFIG_FILE=$OPTARG + echo ${opt} + ;; + i ) + INPUT_FILTER_FILE=$OPTARG + echo ${opt} + ;; + m ) + MULTIPLE_SNAPSHOTS=$OPTARG + echo ${opt} + ;; + p ) + PARTIAL=$OPTARG + echo ${opt} + ;; + o ) + OUTPUT_DIR=$OPTARG + echo ${opt} + ;; + \? ) + echo "Invalid Option: -$OPTARG" 1>&2 + ;; + : ) + echo "Invalid Option: -$OPTARG requires an argument" 1>&2 + ;; + esac + done + shift $((OPTIND -1)) + +echo 'Done' + +set -A nodes pserver cloud-region availability-zone tenant zone complex + +#Create empty partial file + > $INPUT_DATASNAPSHOT_FILE".partial" + +for nodeType in ${nodes[@]} + do + grep "aai-node-type.*\"value\":\"$nodeType\"" $INPUT_DATASNAPSHOT_FILE >>$INPUT_DATASNAPSHOT_FILE'.partial' + done + + +execute_spring_jar org.onap.aai.dbgen.DynamicPayloadGenerator ${PROJECT_HOME}/resources/dynamicPayloadGenerator-logback.xml -s ${VALIDATE_SCHEMA} \ + -f ${PAYLOAD} -o ${OUTPUT_DIR} -c ${DYNAMIC_CONFIG_FILE} -i ${INPUT_FILTER_FILE} -m ${MULTIPLE_SNAPSHOTS} \ + -d ${INPUT_DATASNAPSHOT_FILE} -n ${NODE_CONFIG_FILE} ; + +end_date; +exit 0 diff --git a/src/main/scripts/dynamicPayloadPartial.sh b/src/main/scripts/dynamicPayloadPartial.sh new file mode 100644 index 0000000..8021aa6 --- /dev/null +++ b/src/main/scripts/dynamicPayloadPartial.sh @@ -0,0 +1,13 @@ +#!/bin/ksh + +#Create empty partial snapshot file +INPUT_DATASNAPSHOT_FILE=$1 + +set -A nodes pserver cloud-region availability-zone tenant zone complex + > $INPUT_DATASNAPSHOT_FILE".partial" + +for nodeType in ${nodes[@]} + do + grep "aai-node-type.*\"value\":\"$nodeType\"" $INPUT_DATASNAPSHOT_FILE >>$INPUT_DATASNAPSHOT_FILE'.partial' + done +exit 0
\ No newline at end of file diff --git a/src/main/scripts/forceDeleteTool.sh b/src/main/scripts/forceDeleteTool.sh new file mode 100644 index 0000000..2d42fda --- /dev/null +++ b/src/main/scripts/forceDeleteTool.sh @@ -0,0 +1,84 @@ +#!/bin/ksh +# +# ============LICENSE_START======================================================= +# org.onap.aai +# ================================================================================ +# Copyright © 2017 AT&T Intellectual Property. All rights reserved. +# ================================================================================ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============LICENSE_END========================================================= +# +# ECOMP is a trademark and service mark of AT&T Intellectual Property. +# +# +# +# forceDeleteTool.sh -- This tool is used to delete nodes that cannot be deleted using +# the normal REST API because of internal DB problems. For example, Phantom nodes +# and duplicate nodes cause errors to happen in "normal" REST API codes and must +# be deleted using this tool. +# Since it is not using the "normal" REST logic, it is also not invoking the "normal" +# edge rules that we use to cascade deletes to "child" nodes. So - this tool can be dangerous. +# Ie. if you accidently delete a parent node (like a cloud-region) that has many dependent +# child nodes, there will be no way to get to any of those child-nodes after the cloud-region +# has been deleted. +# There are several environment variables defined in aaiconfig.properties to help minimize errors like that. +# aai.forceDel.protected.nt.list=cloud-region +# aai.forceDel.protected.edge.count=10 +# aai.forceDel.protected.descendant.count=10 +# +# Parameters: +# +# -action (required) valid values: COLLECT_DATA or DELETE_NODE or DELETE_EDGE +# -userId (required) must be followed by a userid +# -params4Collect (followed by a string to tell what properties/values to use +# as part of a COLLECT_DATA request. Must be in the format +# of ?propertName|propValue? use commas to separate if there +# are more than one name/value being passed. +# -vertexId - required for a DELETE_NODE request +# -edgeId - required for a DELETE_EDGE request +# -overRideProtection --- WARNING ? This over-rides the protections we introduced! +# It will let you override a protected vertex or vertex that has more +# than the allowed number of edges or descendants. +# -DISPLAY_ALL_VIDS (optional) - in the rare case when you want to see the +# vertex-ids (vids) of all the CONNECTED vertices, you can use this. By +# default, we do not show them. +# +# +# For example: +# +# forceDeleteTool.sh -action COLLECT_DATA -userId am8383 -params4Collect "tenant-id|junk tenant01 ID 0224" +# +# forceDeleteTool.sh -action COLLECT_DATA -userId am8383 -params4Collect "cloud-owner|junkTesterCloudOwner 0224,cloud-region-id|junkTesterCloud REgion ID 0224" +# +# forceDeleteTool.sh -action DELETE_NODE -userId am8383 -vertexId 1234567 +# +# forceDeleteTool.sh -action DELETE_EDGE -userId am8383 -edgeId 9876543 +# +# + +COMMON_ENV_PATH=$( cd "$(dirname "$0")" ; pwd -P ) +. ${COMMON_ENV_PATH}/common_functions.sh + +start_date; + +echo " NOTE - if you are deleting data, please run the dataSnapshot.sh script first or " +echo " at least make a note the details of the node that you are deleting. " + +check_user; +source_profile; + +execute_spring_jar org.onap.aai.dbgen.ForceDeleteTool ${PROJECT_HOME}/resources/forceDelete-logback.xml "$@" + +end_date; + +exit 0 diff --git a/src/main/scripts/migration_verification.sh b/src/main/scripts/migration_verification.sh new file mode 100644 index 0000000..1e1b228 --- /dev/null +++ b/src/main/scripts/migration_verification.sh @@ -0,0 +1,61 @@ +#!/bin/ksh + +### +# ============LICENSE_START======================================================= +# org.onap.aai +# ================================================================================ +# Copyright (C) 2018 AT&T Intellectual Property. All rights reserved. +# ================================================================================ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============LICENSE_END========================================================= +### + +# +# migration_verification.sh -- This tool is used to provide a summary of migration logs +# This searches for pre-defined strings "Migration Error" and "Migration Summary Count" in log files and outputs those lines. +# + +display_usage() { + cat << EOF + Usage: $0 [options] + + 1. Usage: migration_verification.sh <last_modified> <logs_path> + 2. The <logs_path> should be a directory containing all of the logs. If empty, default path is /opt/app/aai-graphadmin/logs/migration. + 3. The <last_modified> parameter should be an integer for up to how many minutes ago a log file should be parsed. + 4. Example: migration_verification.sh 60 /opt/app/aai-graphadmin/logs/migration +EOF +} + +if [ $# -eq 0 ]; then + display_usage + exit 1 +fi + +LOGS_DIRECTORY=${2:-/opt/app/aai-graphadmin/logs/migration/} +MTIME=$1 + +echo +echo 'Running migration summary:' +print "Logs directory: $LOGS_DIRECTORY" +print "Searching log files modified within last $MTIME minutes: \n" +echo + +for i in $(find -L $LOGS_DIRECTORY -mtime -$MTIME -name '*.log' ); +do + echo "Checking Log File: $i" + grep "Migration Error:" $i + grep "Migration Summary Count:" $i + echo +done + +echo 'Done' diff --git a/src/main/scripts/run_Migrations.sh b/src/main/scripts/run_Migrations.sh new file mode 100644 index 0000000..2b0f5c5 --- /dev/null +++ b/src/main/scripts/run_Migrations.sh @@ -0,0 +1,49 @@ +#!/bin/sh + +### +# ============LICENSE_START======================================================= +# org.onap.aai +# ================================================================================ +# Copyright (C) 2017-2018 AT&T Intellectual Property. All rights reserved. +# ================================================================================ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============LICENSE_END========================================================= +### + +COMMON_ENV_PATH=$( cd "$(dirname "$0")" ; pwd -P ) +. ${COMMON_ENV_PATH}/common_functions.sh + +# TODO: There is a better way where you can pass in the function +# and then let the common functions check if the function exist and invoke it +# So this all can be templated out +start_date; +check_user; +source_profile; + +ARGS="-c ${PROJECT_HOME}/resources/etc/appprops/janusgraph-realtime.properties"; + +if [ -f "$PROJECT_HOME/resources/application.properties" ]; then + # Get the application properties file and look for all lines + # starting with either jms dmaap or niws + # Turn them into system properties and export JAVA_PRE_OPTS so + # execute spring jar will get those values + # This is only needed since dmaap is used by run_migrations + JAVA_PRE_OPTS="-Xms8g -Xmx8g"; + JMS_PROPS=$(egrep '^jms.bind.address' $PROJECT_HOME/resources/application.properties | cut -d"=" -f2- | sed 's/^\(.*\)$/-Dactivemq.tcp.url=\1/g' | tr '\n' ' '); + JAVA_PRE_OPTS="${JAVA_PRE_OPTS} ${JMS_PROPS}"; + export JAVA_PRE_OPTS; +fi; + +execute_spring_jar org.onap.aai.migration.MigrationController ${PROJECT_HOME}/resources/migration-logback.xml ${ARGS} "$@" +end_date; +exit 0
\ No newline at end of file diff --git a/src/main/scripts/run_SendDeleteMigrationNotification.sh b/src/main/scripts/run_SendDeleteMigrationNotification.sh new file mode 100644 index 0000000..ebd8677 --- /dev/null +++ b/src/main/scripts/run_SendDeleteMigrationNotification.sh @@ -0,0 +1,65 @@ +#!/bin/ksh + +### +# ============LICENSE_START======================================================= +# org.onap.aai +# ================================================================================ +# Copyright (C) 2017 AT&T Intellectual Property. All rights reserved. +# ================================================================================ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============LICENSE_END========================================================= +### + +COMMON_ENV_PATH=$( cd "$(dirname "$0")" ; pwd -P ) +. ${COMMON_ENV_PATH}/common_functions.sh + + +start_date; +check_user; +source_profile; + +INPUT_PATH=$1 + +if [ ! -d "$INPUT_PATH" ]; then + echo "Input directory $INPUT_PATH does not exist!!"; + exit +fi + +if [ $(ls ${INPUT_PATH}/* 2> /dev/null | wc -l) -eq 0 ]; then + echo "Input directory $INPUT_PATH does not contain any migration files!!"; + exit +fi + +INPUT_DIR_FOR_JAVA=${INPUT_PATH}/deleteevents +mkdir -p "$INPUT_DIR_FOR_JAVA" +INPUT_FILE_FOR_JAVA=${INPUT_DIR_FOR_JAVA}/dmaap_delete_files.txt +#sort --numeric-sort -k 1 -t '_' $(find ${INPUT_PATH}/DELETE-* -maxdepth 0 -type f) | awk -F '_' '{ print $2"_"$3; }' > $INPUT_FILE_FOR_JAVA +find ${INPUT_PATH} -type f -name 'DELETE-*' -exec cat {} + > $INPUT_FILE_FOR_JAVA + +shift + +ARGS="-c ${PROJECT_HOME}/resources/etc/appprops/janusgraph-realtime.properties --inputFile $INPUT_FILE_FOR_JAVA" + +if [ -f "$PROJECT_HOME/resources/application.properties" ]; then + # Get the application properties file and look for all lines + # starting with either jms dmaap or niws + # Turn them into system properties and export JAVA_PRE_OPTS so + # execute spring jar will get those values + # This is only needed since dmaap is used by run_migrations + JAVA_PRE_OPTS=$(egrep '^jms.bind.address' $PROJECT_HOME/resources/application.properties | cut -d"=" -f2- | sed 's/^\(.*\)$/-Dactivemq.tcp.url=\1/g' | tr '\n' ' '); + export JAVA_PRE_OPTS; +fi; + +execute_spring_jar org.onap.aai.util.SendDeleteMigrationNotificationsMain ${PROJECT_HOME}/resources/migration-logback.xml ${ARGS} "$@" +end_date; +exit 0
\ No newline at end of file diff --git a/src/main/scripts/run_SendMigrationNotification.sh b/src/main/scripts/run_SendMigrationNotification.sh new file mode 100644 index 0000000..4bcc0d9 --- /dev/null +++ b/src/main/scripts/run_SendMigrationNotification.sh @@ -0,0 +1,64 @@ +#!/bin/ksh + +### +# ============LICENSE_START======================================================= +# org.onap.aai +# ================================================================================ +# Copyright (C) 2017 AT&T Intellectual Property. All rights reserved. +# ================================================================================ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============LICENSE_END========================================================= +### + +COMMON_ENV_PATH=$( cd "$(dirname "$0")" ; pwd -P ) +. ${COMMON_ENV_PATH}/common_functions.sh + + +start_date; +check_user; +source_profile; + +INPUT_PATH=$1 + +if [ ! -d "$INPUT_PATH" ]; then + echo "Input directory $INPUT_PATH does not exist!!"; + exit +fi + +if [ $(ls ${INPUT_PATH}/* 2> /dev/null | wc -l) -eq 0 ]; then + echo "Input directory $INPUT_PATH does not contain any migration files!!"; + exit +fi + +INPUT_DIR_FOR_JAVA=${INPUT_PATH}/combined +mkdir -p "$INPUT_DIR_FOR_JAVA" +INPUT_FILE_FOR_JAVA=${INPUT_DIR_FOR_JAVA}/sorted_dmaap_files.txt +sort --numeric-sort -k 1 -t '_' $(find ${INPUT_PATH}/* -maxdepth 0 -type f) | awk -F '_' '{ print $2"_"$3; }' > $INPUT_FILE_FOR_JAVA + +shift + +ARGS="-c ${PROJECT_HOME}/resources/etc/appprops/janusgraph-realtime.properties --inputFile $INPUT_FILE_FOR_JAVA" + +if [ -f "$PROJECT_HOME/resources/application.properties" ]; then + # Get the application properties file and look for all lines + # starting with either jms dmaap or niws + # Turn them into system properties and export JAVA_PRE_OPTS so + # execute spring jar will get those values + # This is only needed since dmaap is used by run_migrations + JAVA_PRE_OPTS=$(egrep '^jms.bind.address' $PROJECT_HOME/resources/application.properties | cut -d"=" -f2- | sed 's/^\(.*\)$/-Dactivemq.tcp.url=\1/g' | tr '\n' ' '); + export JAVA_PRE_OPTS; +fi; + +execute_spring_jar org.onap.aai.util.SendMigrationNotificationsMain ${PROJECT_HOME}/resources/migration-logback.xml ${ARGS} "$@" +end_date; +exit 0
\ No newline at end of file diff --git a/src/main/scripts/schemaMod.sh b/src/main/scripts/schemaMod.sh new file mode 100644 index 0000000..d1fb009 --- /dev/null +++ b/src/main/scripts/schemaMod.sh @@ -0,0 +1,50 @@ +#!/bin/ksh +# +# This script is used to correct mistakes made in the database schema. +# It currently just allows you to change either the dataType and/or indexType on properties used by nodes. +# +# NOTE - Titan is not elegant in 0.5.3 about making changes to the schema. Bad properties never +# actually leave the database, they just get renamed and stop getting used. So it is +# really worthwhile to get indexes and dataTypes correct the first time around. +# Note also - This script just makes changes to the schema that is currently live. +# If you were to create a new schema in a brandy-new environment, it would look like +# whatever ex5.json (as of June 2015) told it to look like. So, part of making a +# change to the db schema should Always first be to make the change in ex5.json so that +# future environments will have the change. This script is just to change existing +# instances of the schema since schemaGenerator (as of June 2015) does not update things - it +# just does the initial creation. +# +# Boy, this is getting to be a big comment section... +# +# To use this script, you need to pass four parameters: +# propertyName -- the name of the property that you need to change either the index or dataType on +# targetDataType -- whether it's changing or not, you need to give it: String, Integer, Boolean or Long +# targetIndexInfo -- whether it's changing or not, you need to give it: index, noIndex or uniqueIndex +# preserveDataFlag -- true or false. The only reason I can think of why you'd ever want to +# set this to false would be maybe if you were changing to an incompatible dataType so didn't +# want it to try to use the old data (and fail). But 99% of the time this will just be 'true'. +# +# Ie. schemaMod flavor-id String index true +# + +COMMON_ENV_PATH=$( cd "$(dirname "$0")" ; pwd -P ) +. ${COMMON_ENV_PATH}/common_functions.sh +start_date; +check_user; + +if [ "$#" -ne 4 ]; then + echo "Illegal number of parameters" + echo "usage: $0 propertyName targetDataType targetIndexInfo preserveDataFlag" + exit 1 +fi + +source_profile; +execute_spring_jar org.onap.aai.dbgen.schemamod.SchemaMod ${PROJECT_HOME}/resources/schemaMod-logback.xml "$1" "$2" "$3" "$4" +if [ "$?" -ne "0" ]; then + echo "Problem executing schemaMod " + end_date; + exit 1 +fi + +end_date; +exit 0 diff --git a/src/main/scripts/uniquePropertyCheck.sh b/src/main/scripts/uniquePropertyCheck.sh new file mode 100644 index 0000000..c3c92bf --- /dev/null +++ b/src/main/scripts/uniquePropertyCheck.sh @@ -0,0 +1,24 @@ +#!/bin/ksh +# +# The script invokes UniqueProperty java class to see if the passed property is unique in the db and if +# not, to display where duplicate values are found. +# +# For example: uniquePropertyCheck.sh subscriber-name +# + +COMMON_ENV_PATH=$( cd "$(dirname "$0")" ; pwd -P ) +. ${COMMON_ENV_PATH}/common_functions.sh +start_date; +check_user; +source_profile; + +#execute_spring_jar org.onap.aai.util.UniquePropertyCheck ${PROJECT_HOME}/resources/uniquePropertyCheck-logback.xml "$@" +execute_spring_jar org.onap.aai.util.UniquePropertyCheck ${PROJECT_HOME}/resources/uniquePropertyCheck-logback.xml "$@" +ret_code=$? +if [ $ret_code != 0 ]; then + end_date; + exit $ret_code +fi + +end_date; +exit 0
\ No newline at end of file diff --git a/src/main/scripts/updatePem.sh b/src/main/scripts/updatePem.sh new file mode 100644 index 0000000..e43a2eb --- /dev/null +++ b/src/main/scripts/updatePem.sh @@ -0,0 +1,38 @@ +#!/bin/ksh + +### +# ============LICENSE_START======================================================= +# org.onap.aai +# ================================================================================ +# Copyright (C) 2017 AT&T Intellectual Property. All rights reserved. +# ================================================================================ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============LICENSE_END========================================================= +### +# +COMMON_ENV_PATH=$( cd "$(dirname "$0")" ; pwd -P ) +. ${COMMON_ENV_PATH}/common_functions.sh + +start_date; +check_user; +source_profile; + +CERTPATH=$PROJECT_HOME/resources/etc/auth/ +KEYNAME=aaiClientPrivateKey.pem +CERTNAME=aaiClientPublicCert.pem + +pw=$(execute_spring_jar org.onap.aai.util.AAIConfigCommandLinePropGetter "" "aai.keystore.passwd" 2> /dev/null | tail -1) +openssl pkcs12 -in ${CERTPATH}/aai-client-cert.p12 -out $CERTPATH$CERTNAME -clcerts -nokeys -passin pass:$pw +openssl pkcs12 -in ${CERTPATH}/aai-client-cert.p12 -out $CERTPATH$KEYNAME -nocerts -nodes -passin pass:$pw +end_date; +exit 0
\ No newline at end of file |