aboutsummaryrefslogtreecommitdiffstats
path: root/kubernetes
diff options
context:
space:
mode:
Diffstat (limited to 'kubernetes')
-rw-r--r--kubernetes/aaf/components/aaf-cass/values.yaml2
-rw-r--r--kubernetes/aai/components/aai-traversal/resources/config/application-keycloak.properties13
-rw-r--r--kubernetes/aai/components/aai-traversal/templates/configmap.yaml1
-rw-r--r--kubernetes/aai/components/aai-traversal/templates/deployment.yaml3
-rw-r--r--kubernetes/aai/components/aai-traversal/values.yaml16
-rw-r--r--kubernetes/appc/components/appc-cdt/values.yaml32
-rwxr-xr-xkubernetes/appc/resources/config/appc/opt/onap/appc/bin/startODL.sh2
-rw-r--r--kubernetes/common/cassandra/values.yaml2
-rwxr-xr-xkubernetes/helm/plugins/deploy/deploy.sh14
-rwxr-xr-xkubernetes/helm/plugins/undeploy/undeploy.sh2
-rw-r--r--kubernetes/portal/components/portal-mariadb/resources/config/mariadb/docker-entrypoint.sh10
-rwxr-xr-xkubernetes/robot/instantiate-k8s.sh8
12 files changed, 63 insertions, 42 deletions
diff --git a/kubernetes/aaf/components/aaf-cass/values.yaml b/kubernetes/aaf/components/aaf-cass/values.yaml
index 525674434e..a1a1abe55a 100644
--- a/kubernetes/aaf/components/aaf-cass/values.yaml
+++ b/kubernetes/aaf/components/aaf-cass/values.yaml
@@ -93,4 +93,4 @@ persistence:
mountSubPath: "cass"
volumeReclaimPolicy: Retain
accessMode: ReadWriteOnce
- size: 20Gi
+ size: 5Gi
diff --git a/kubernetes/aai/components/aai-traversal/resources/config/application-keycloak.properties b/kubernetes/aai/components/aai-traversal/resources/config/application-keycloak.properties
new file mode 100644
index 0000000000..dd1956b63f
--- /dev/null
+++ b/kubernetes/aai/components/aai-traversal/resources/config/application-keycloak.properties
@@ -0,0 +1,13 @@
+spring.autoconfigure.exclude=\
+ org.springframework.boot.autoconfigure.jdbc.DataSourceAutoConfiguration,\
+ org.springframework.boot.autoconfigure.orm.jpa.HibernateJpaAutoConfiguration
+
+multi.tenancy.enabled={{ .Values.config.keycloak.multiTenancy.enabled }}
+keycloak.auth-server-url=http://{{ .Values.config.keycloak.host }}:{{ .Values.config.keycloak.port }}/auth
+keycloak.realm={{ .Values.config.keycloak.realm }}
+keycloak.resource={{ .Values.config.keycloak.resource }}
+keycloak.public-client=false
+keycloak.principal-attribute=preferred_username
+
+keycloak.ssl-required=external
+keycloak.bearer-only=true
diff --git a/kubernetes/aai/components/aai-traversal/templates/configmap.yaml b/kubernetes/aai/components/aai-traversal/templates/configmap.yaml
index c0bcb3b491..8f1bd2ddc8 100644
--- a/kubernetes/aai/components/aai-traversal/templates/configmap.yaml
+++ b/kubernetes/aai/components/aai-traversal/templates/configmap.yaml
@@ -32,6 +32,7 @@ data:
{{ tpl (.Files.Glob "resources/config/janusgraph-cached.properties").AsConfig . | indent 2 }}
{{ tpl (.Files.Glob "resources/config/aaiconfig.properties").AsConfig . | indent 2 }}
{{ tpl (.Files.Glob "resources/config/application.properties").AsConfig . | indent 2 }}
+{{ tpl (.Files.Glob "resources/config/application-keycloak.properties").AsConfig . | indent 2 }}
{{ tpl (.Files.Glob "resources/config/realm.properties").AsConfig . | indent 2 }}
---
apiVersion: v1
diff --git a/kubernetes/aai/components/aai-traversal/templates/deployment.yaml b/kubernetes/aai/components/aai-traversal/templates/deployment.yaml
index 7e54f1d432..dc1c010261 100644
--- a/kubernetes/aai/components/aai-traversal/templates/deployment.yaml
+++ b/kubernetes/aai/components/aai-traversal/templates/deployment.yaml
@@ -188,6 +188,9 @@ spec:
- mountPath: /opt/app/aai-traversal/resources/application.properties
name: {{ include "common.fullname" . }}-config
subPath: application.properties
+ - mountPath: /opt/app/aai-traversal/resources/application-keycloak.properties
+ name: {{ include "common.fullname" . }}-config
+ subPath: application-keycloak.properties
ports:
- containerPort: {{ .Values.service.internalPort }}
- containerPort: {{ .Values.service.internalPort2 }}
diff --git a/kubernetes/aai/components/aai-traversal/values.yaml b/kubernetes/aai/components/aai-traversal/values.yaml
index 38011a0c98..ad4279a543 100644
--- a/kubernetes/aai/components/aai-traversal/values.yaml
+++ b/kubernetes/aai/components/aai-traversal/values.yaml
@@ -59,7 +59,7 @@ global: # global defaults
# Active spring profiles for the resources microservice
profiles:
- active: production,dmaap,aaf-auth
+ active: production,dmaap,aaf-auth #,keycloak
# Notification event specific properties
notification:
@@ -168,6 +168,20 @@ aai_enpoints:
# application configuration
config:
+ # configure keycloak according to your environment.
+ # don't forget to add keycloak in active profiles above (global.config.profiles)
+ keycloak:
+ host: keycloak.your.domain
+ port: 8180
+ # Specifies a set of users, credentials, roles, and groups
+ realm: aai-traversal
+ # Used by any client application for enabling fine-grained authorization for their protected resources
+ resource: aai-traversal-app
+ # If set to true, additional criteria will be added into traversal query to returns all the vertices that match
+ # the data-owner property with the given role to the user in keycloak
+ multiTenancy:
+ enabled: true
+
# Specifies timeout information such as application specific and limits
timeout:
# If set to true application will timeout for queries taking longer than limit
diff --git a/kubernetes/appc/components/appc-cdt/values.yaml b/kubernetes/appc/components/appc-cdt/values.yaml
index 3b1ff47116..5765d3482d 100644
--- a/kubernetes/appc/components/appc-cdt/values.yaml
+++ b/kubernetes/appc/components/appc-cdt/values.yaml
@@ -38,27 +38,17 @@ certInitializer:
cadi_longitude: "-72.0"
credsPath: /opt/app/osaaf/local
aaf_add_config: |
- echo "*** retrieving password for keystore"
- export $(/opt/app/aaf_config/bin/agent.sh local showpass \
- {{.Values.fqi}} {{ .Values.fqdn }} | grep '^c' | xargs -0)
- if [ -z "$cadi_keystore_password_p12" ]
- then
- echo " /!\ certificates retrieval failed"
- exit 1
- else
- cd {{ .Values.credsPath }};
- mkdir -p certs;
- echo "*** transform AAF certs into pem files"
- mkdir -p {{ .Values.credsPath }}/certs
- openssl pkcs12 -in {{ .Values.credsPath }}/{{ .Values.fqi_namespace }}.p12 \
- -nokeys -out {{ .Values.credsPath }}/certs/cert.pem \
- -passin pass:$cadi_keystore_password_p12 \
- -passout pass:$cadi_keystore_password_p12
- echo "*** copy key file"
- cp {{ .Values.fqi_namespace }}.key certs/key.pem;
- echo "*** change ownership of certificates to targeted user"
- chown -R 1000 {{ .Values.credsPath }}
- fi
+ echo "*** transform AAF certs into pem files"
+ mkdir -p {{ .Values.credsPath }}/certs
+ openssl pkcs12 -in {{ .Values.credsPath }}/{{ .Values.fqi_namespace }}.p12 \
+ -nokeys -out {{ .Values.credsPath }}/certs/cert.pem \
+ -passin pass:$cadi_keystore_password_p12 \
+ -passout pass:$cadi_keystore_password_p12
+ echo "*** copy key file"
+ cp {{ .Values.credsPath }}/{{ .Values.fqi_namespace }}.key \
+ {{ .Values.credsPath }}/certs/key.pem
+ echo "*** change ownership of certificates to targeted user"
+ chown -R 1000 {{ .Values.credsPath }}
#################################################################
# Application configuration defaults.
diff --git a/kubernetes/appc/resources/config/appc/opt/onap/appc/bin/startODL.sh b/kubernetes/appc/resources/config/appc/opt/onap/appc/bin/startODL.sh
index 2fd6db1360..85f5aac246 100755
--- a/kubernetes/appc/resources/config/appc/opt/onap/appc/bin/startODL.sh
+++ b/kubernetes/appc/resources/config/appc/opt/onap/appc/bin/startODL.sh
@@ -65,7 +65,7 @@ DBINIT_DIR=${DBINIT_DIR:-/opt/opendaylight/current/daexim}
# Wait for database to init properly
#
echo "Waiting for mariadbgalera"
-until mysql -h {{.Values.config.mariadbGaleraSVCName}}.{{.Release.Namespace}} -u root -p${MYSQL_PASSWD} mysql &> /dev/null
+until mysql -h {{.Values.config.mariadbGaleraSVCName}}.{{.Release.Namespace}} -u root -p${MYSQL_PASSWD} mysql >/dev/null 2>&1
do
printf "."
sleep 1
diff --git a/kubernetes/common/cassandra/values.yaml b/kubernetes/common/cassandra/values.yaml
index 5a50d8e9e8..9f19bf5c14 100644
--- a/kubernetes/common/cassandra/values.yaml
+++ b/kubernetes/common/cassandra/values.yaml
@@ -126,7 +126,7 @@ persistence:
## storageClass: "-"
## Not set as it depends of the backup enabledment or not.
accessMode: ReadWriteOnce
- size: 2Gi
+ size: 10Gi
mountPath: /dockerdata-nfs
mountSubPath: cassandra
storageType: local
diff --git a/kubernetes/helm/plugins/deploy/deploy.sh b/kubernetes/helm/plugins/deploy/deploy.sh
index 6267a35312..44e8e56aa5 100755
--- a/kubernetes/helm/plugins/deploy/deploy.sh
+++ b/kubernetes/helm/plugins/deploy/deploy.sh
@@ -2,7 +2,7 @@
usage() {
cat << EOF
-Install (or upgrade) an umbrella Helm Chart, and its subcharts, as separate Helm Releases
+Install (or upgrade) an umbrella Helm Chart, and its subcharts, as separate Helm Releases
The umbrella Helm Chart is broken apart into a parent release and subchart releases.
Subcharts the are disabled (<chart>.enabled=false) will not be installed or upgraded.
@@ -54,7 +54,7 @@ generate_overrides() {
cat $COMPUTED_OVERRIDES | sed '/common:/,/consul:/d' \
| sed -n '/^'"$START"'/,/'log:'/p' | sed '1d;$d' >> $GLOBAL_OVERRIDES
else
- SUBCHART_DIR="$CACHE_SUBCHART_DIR/$(cut -d':' -f1 <<<"$START")"
+ SUBCHART_DIR="$CACHE_SUBCHART_DIR/$(echo "$START" |cut -d':' -f1)"
if [[ -d "$SUBCHART_DIR" ]]; then
if [[ -z "$END" ]]; then
cat $COMPUTED_OVERRIDES | sed -n '/^'"$START"'/,/'"$END"'/p' \
@@ -96,8 +96,8 @@ deploy() {
RELEASE=$1
CHART_URL=$2
FLAGS=${@:3}
- CHART_REPO="$(cut -d'/' -f1 <<<"$CHART_URL")"
- CHART_NAME="$(cut -d'/' -f2 <<<"$CHART_URL")"
+ CHART_REPO="$(echo "$CHART_URL" |cut -d'/' -f1)"
+ CHART_NAME="$(echo "$CHART_URL" |cut -d'/' -f2)"
if [[ $HELM_VER = "v3."* ]]; then
CACHE_DIR=~/.local/share/helm/plugins/deploy/cache
else
@@ -146,9 +146,9 @@ deploy() {
DEPLOY_FLAGS=$(resolve_deploy_flags "$FLAGS")
# determine if upgrading individual subchart or entire parent + subcharts
- SUBCHART_RELEASE="$(cut -d'-' -f2 <<<"$RELEASE")"
+ SUBCHART_RELEASE="$(echo "$RELEASE" |cut -d'-' -f2)"
# update specified subchart without parent
- RELEASE="$(cut -d'-' -f1 <<<"$RELEASE")"
+ RELEASE="$(echo "$RELEASE" |cut -d'-' -f1)"
if [[ $SUBCHART_RELEASE = $RELEASE ]]; then
SUBCHART_RELEASE=
fi
@@ -257,7 +257,7 @@ deploy() {
n=${#array[*]}
for (( i = n-1; i >= 0; i-- )); do
if [[ $HELM_VER = "v3."* ]]; then
- helm del "${array[i]}"
+ helm del "${array[i]}"
else
helm del "${array[i]}" --purge
fi
diff --git a/kubernetes/helm/plugins/undeploy/undeploy.sh b/kubernetes/helm/plugins/undeploy/undeploy.sh
index 8191174314..e5c0c12711 100755
--- a/kubernetes/helm/plugins/undeploy/undeploy.sh
+++ b/kubernetes/helm/plugins/undeploy/undeploy.sh
@@ -2,7 +2,7 @@
usage() {
cat << EOF
-Delete an umbrella Helm Chart, and its subcharts, that was previously deployed using 'Helm deploy'.
+Delete an umbrella Helm Chart, and its subcharts, that was previously deployed using 'Helm deploy'.
Example of deleting all Releases that have the prefix 'demo'.
$ helm undeploy demo
diff --git a/kubernetes/portal/components/portal-mariadb/resources/config/mariadb/docker-entrypoint.sh b/kubernetes/portal/components/portal-mariadb/resources/config/mariadb/docker-entrypoint.sh
index 390241fa1d..c4a21b927f 100644
--- a/kubernetes/portal/components/portal-mariadb/resources/config/mariadb/docker-entrypoint.sh
+++ b/kubernetes/portal/components/portal-mariadb/resources/config/mariadb/docker-entrypoint.sh
@@ -107,7 +107,7 @@ docker_temp_server_start() {
if [ -z "$DATABASE_ALREADY_EXISTS" ]; then
extraArgs+=( '--dont-use-mysql-root-password' )
fi
- if docker_process_sql "${extraArgs[@]}" --database=mysql <<<'SELECT 1' &> /dev/null; then
+ if echo 'SELECT 1' |docker_process_sql "${extraArgs[@]}" --database=mysql >/dev/null 2>&1; then
break
fi
sleep 1
@@ -263,19 +263,19 @@ docker_setup_db() {
# Creates a custom database and user if specified
if [ -n "$MYSQL_DATABASE" ]; then
mysql_note "Creating database ${MYSQL_DATABASE}"
- docker_process_sql --database=mysql <<<"CREATE DATABASE IF NOT EXISTS \`$MYSQL_DATABASE\` ;"
+ echo "CREATE DATABASE IF NOT EXISTS \`$MYSQL_DATABASE\` ;" |docker_process_sql --database=mysql
fi
if [ -n "$MYSQL_USER" ] && [ -n "$MYSQL_PASSWORD" ]; then
mysql_note "Creating user ${MYSQL_USER}"
- docker_process_sql --database=mysql <<<"CREATE USER '$MYSQL_USER'@'%' IDENTIFIED BY '$MYSQL_PASSWORD' ;"
+ echo "CREATE USER '$MYSQL_USER'@'%' IDENTIFIED BY '$MYSQL_PASSWORD' ;" |docker_process_sql --database=mysql
if [ -n "$MYSQL_DATABASE" ]; then
mysql_note "Giving user ${MYSQL_USER} access to schema ${MYSQL_DATABASE}"
- docker_process_sql --database=mysql <<<"GRANT ALL ON \`${MYSQL_DATABASE//_/\\_}\`.* TO '$MYSQL_USER'@'%' ;"
+ echo "GRANT ALL ON \`${MYSQL_DATABASE//_/\\_}\`.* TO '$MYSQL_USER'@'%' ;" |docker_process_sql --database=mysql
fi
- docker_process_sql --database=mysql <<<"FLUSH PRIVILEGES ;"
+ echo "FLUSH PRIVILEGES ;" |docker_process_sql --database=mysql
fi
}
diff --git a/kubernetes/robot/instantiate-k8s.sh b/kubernetes/robot/instantiate-k8s.sh
index f10ad7e493..f4f6b04e4c 100755
--- a/kubernetes/robot/instantiate-k8s.sh
+++ b/kubernetes/robot/instantiate-k8s.sh
@@ -111,9 +111,9 @@ kubectl --namespace $NAMESPACE cp $FOLDER ${POD}:/tmp/vnfdata.${BUILDNUM}
echo "Executing instantiation..."
if [ $POLL = 1 ]; then
- kubectl --namespace $NAMESPACE exec ${POD} -- bash -c "${ETEHOME}/runTags.sh ${VARIABLEFILES} ${VARIABLES} -d /share/logs/${OUTPUT_FOLDER} ${TAGS} --listener ${ETEHOME}/testsuite/eteutils/robotframework-onap/listeners/OVPListener.py --display $DISPLAY_NUM > /tmp/vnf_instantiation.$BUILDNUM.log 2>&1 &"
+ kubectl --namespace $NAMESPACE exec ${POD} -- bash -c "${ETEHOME}/runTags.sh ${VARIABLEFILES} ${VARIABLES} -d /share/logs/${OUTPUT_FOLDER} ${TAGS} --listener ${ETEHOME}/testsuite/eteutils/robotframework-onap/listeners/OVPListener.py --display $DISPLAY_NUM > /tmp/vnf_instantiation.$BUILDNUM.log 2>&1 &"
- pid=`kubectl --namespace $NAMESPACE exec ${POD} -- bash -c "pgrep runTags.sh -n"`
+ pid=`kubectl --namespace $NAMESPACE exec ${POD} -- bash -c "pgrep runTags.sh -n"`
if [ -z "$pid" ]; then
echo "robot testsuite unable to start"
@@ -123,10 +123,10 @@ if [ $POLL = 1 ]; then
kubectl --namespace $NAMESPACE exec ${POD} -- bash -c "while ps -p \"$pid\" --no-headers | grep -v defunct; do echo \$'\n\n'; echo \"Testsuite still running \"\`date\`; echo \"LOG FILE: \"; tail -10 /tmp/vnf_instantiation.$BUILDNUM.log; sleep 30; done"
else
- kubectl --namespace $NAMESPACE exec ${POD} -- bash -c "${ETEHOME}/runTags.sh ${VARIABLEFILES} ${VARIABLES} -d /share/logs/${OUTPUT_FOLDER} ${TAGS} --listener ${ETEHOME}/testsuite/eteutils/robotframework-onap/listeners/OVPListener.py --display $DISPLAY_NUM"
+ kubectl --namespace $NAMESPACE exec ${POD} -- bash -c "${ETEHOME}/runTags.sh ${VARIABLEFILES} ${VARIABLES} -d /share/logs/${OUTPUT_FOLDER} ${TAGS} --listener ${ETEHOME}/testsuite/eteutils/robotframework-onap/listeners/OVPListener.py --display $DISPLAY_NUM"
fi
-set +x
+set +x
echo "testsuite has finished"