summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorsendil kumar Jaya kumar <sendil.kumar@wipro.com>2022-03-03 15:15:05 +0530
committersendil kumar Jaya kumar <sendil.kumar@wipro.com>2023-03-31 12:32:53 +0530
commit93d9a28ba317475083a367d040ab606d6576fb03 (patch)
treec231446eb527ceb9397bce857c8b3dad10b617ce
parent5f69c24ad78121a2840b5299583791e557f8b535 (diff)
Add ml-prediction-ms with basic build infrastruct.
Added offline training code, along with ml slice prediction model, The main ml-prediction-ms component code changes are added to perform prediction on single slice. Issue-ID: DCAEGEN2-3067 Signed-off-by: sendil kumar Jaya kumar <sendil.kumar@wipro.com> Change-Id: Ia2e50dcbd97d03a96884a2bf5417edbf2fa04b4a
-rw-r--r--components/ml-prediction-ms/.gitignore100
-rwxr-xr-xcomponents/ml-prediction-ms/Changelog.md9
-rwxr-xr-xcomponents/ml-prediction-ms/Dockerfile47
-rwxr-xr-xcomponents/ml-prediction-ms/LICENSE.txt18
-rwxr-xr-xcomponents/ml-prediction-ms/README.md114
-rwxr-xr-xcomponents/ml-prediction-ms/conftest.py18
-rwxr-xr-xcomponents/ml-prediction-ms/docker-compose.yaml69
-rwxr-xr-xcomponents/ml-prediction-ms/log_config.yaml45
-rwxr-xr-xcomponents/ml-prediction-ms/ml-prediction-ms.config37
-rwxr-xr-xcomponents/ml-prediction-ms/model/best_model.h5bin0 -> 72704 bytes
-rwxr-xr-xcomponents/ml-prediction-ms/pom.xml247
-rwxr-xr-xcomponents/ml-prediction-ms/pyproject.toml6
-rwxr-xr-xcomponents/ml-prediction-ms/requirements.txt29
-rwxr-xr-xcomponents/ml-prediction-ms/setup.py30
-rwxr-xr-xcomponents/ml-prediction-ms/src/__init__.py24
-rwxr-xr-xcomponents/ml-prediction-ms/src/run.py680
-rwxr-xr-xcomponents/ml-prediction-ms/tests/__init__.py22
-rwxr-xr-xcomponents/ml-prediction-ms/tests/unit/__init__.py18
-rw-r--r--components/ml-prediction-ms/tests/unit/sample.json1
-rwxr-xr-xcomponents/ml-prediction-ms/tests/unit/test.xlsxbin0 -> 18133 bytes
-rwxr-xr-xcomponents/ml-prediction-ms/tests/unit/test_predict_unittest.py232
-rwxr-xr-xcomponents/ml-prediction-ms/tox.ini41
-rwxr-xr-xcomponents/ml-prediction-ms/train/ExampleSample_train_data_s1.xlsxbin0 -> 3773850 bytes
-rwxr-xr-xcomponents/ml-prediction-ms/train/ExampleSample_train_data_s1_smaller.xlsxbin0 -> 5921 bytes
-rwxr-xr-xcomponents/ml-prediction-ms/train/ParseTrain.py336
-rwxr-xr-xcomponents/ml-prediction-ms/version.properties26
26 files changed, 2149 insertions, 0 deletions
diff --git a/components/ml-prediction-ms/.gitignore b/components/ml-prediction-ms/.gitignore
new file mode 100644
index 00000000..98ba212b
--- /dev/null
+++ b/components/ml-prediction-ms/.gitignore
@@ -0,0 +1,100 @@
+mvn-phase-lib.sh
+tox-local.ini
+*.wgn
+.pytest_cache/
+xunit-results.xml
+.DS_Store
+# Byte-compiled / optimized / DLL files
+__pycache__/
+*.py[cod]
+*$py.class
+
+# C extensions
+*.so
+
+# Distribution / packaging
+.Python
+env/
+build/
+develop-eggs/
+dist/
+downloads/
+eggs/
+.eggs/
+lib/
+lib64/
+parts/
+sdist/
+var/
+*.egg-info/
+.installed.cfg
+*.egg
+
+# PyInstaller
+# Usually these files are written by a python script from a template
+# before PyInstaller builds the exe, so as to inject date/other infos into it.
+*.manifest
+*.spec
+
+# Installer logs
+pip-log.txt
+pip-delete-this-directory.txt
+
+# Unit test / coverage reports
+htmlcov/
+venv-tox/
+.tox/
+.coverage
+.coverage.*
+.cache
+nosetests.xml
+coverage.xml
+*,cover
+.hypothesis/
+
+# Translations
+*.mo
+*.pot
+
+# Django stuff:
+*.log
+local_settings.py
+
+# Flask stuff:
+instance/
+.webassets-cache
+
+# Scrapy stuff:
+.scrapy
+
+# Sphinx documentation
+docs/_build/
+
+# PyBuilder
+target/
+
+# IPython Notebook
+.ipynb_checkpoints
+
+# pyenv
+.python-version
+
+# celery beat schedule file
+celerybeat-schedule
+
+# dotenv
+.env
+
+# virtualenv
+venv/
+ENV/
+
+# Spyder project settings
+.spyderproject
+
+# Rope project settings
+.ropeproject
+
+# Test report
+xunit-reports
+coverage-reports
diff --git a/components/ml-prediction-ms/Changelog.md b/components/ml-prediction-ms/Changelog.md
new file mode 100755
index 00000000..25bb5351
--- /dev/null
+++ b/components/ml-prediction-ms/Changelog.md
@@ -0,0 +1,9 @@
+# Change Log
+All notable changes to this project will be documented in this file.
+
+The format is based on [Keep a Changelog](http://keepachangelog.com/)
+and this project adheres to [Semantic Versioning](http://semver.org/).
+
+## [1.0.0] - 2022/03/03
+
+- [DCAEGEN2-3067](https://jira.onap.org/browse/DCAEGEN2-3067) - Added Training and Prediction code for ml-prediction-ms
diff --git a/components/ml-prediction-ms/Dockerfile b/components/ml-prediction-ms/Dockerfile
new file mode 100755
index 00000000..f2233a05
--- /dev/null
+++ b/components/ml-prediction-ms/Dockerfile
@@ -0,0 +1,47 @@
+# LICENSE_START=======================================================
+# ml-prediction-ms
+# ================================================================================
+# Copyright (C) 2023 Wipro Limited
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+
+
+
+FROM nexus3.onap.org:10001/onap/integration-python:10.1.0 as build
+
+USER root
+RUN set -eux; \
+ apk add \
+ build-base \
+ python3-dev
+
+ARG user=onap
+ARG group=onap
+USER $user
+
+FROM python
+
+ADD requirements.txt requirements.txt
+RUN pip3 install -r ./requirements.txt
+
+ADD src src
+ADD model model
+ADD ml-prediction-ms.config ml-prediction-ms.config
+ADD tests tests
+
+EXPOSE 5000
+
+RUN chmod 755 /src/run.py
+
+CMD ["/src/run.py"]
diff --git a/components/ml-prediction-ms/LICENSE.txt b/components/ml-prediction-ms/LICENSE.txt
new file mode 100755
index 00000000..766adc61
--- /dev/null
+++ b/components/ml-prediction-ms/LICENSE.txt
@@ -0,0 +1,18 @@
+# ============LICENSE_START=======================================================
+# ml-prediction-ms
+# ================================================================================
+# Copyright (C) 2023 Wipro Limited
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+
diff --git a/components/ml-prediction-ms/README.md b/components/ml-prediction-ms/README.md
new file mode 100755
index 00000000..11a50aab
--- /dev/null
+++ b/components/ml-prediction-ms/README.md
@@ -0,0 +1,114 @@
+# LICENSE_START=======================================================
+# ml-prediction-ms
+# ================================================================================
+# Copyright (C) 2023 Wipro Limited
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+
+This Project aims at training an ML model to predict the 'maxNumberOfConns' for each cell. The model is trained against the 'Performance Measurement' data to predict the output. This model will be later used to predict the real time configurations of cells and slices in ml-prediction-ms.
+
+We are using a stacked LSTM model to perform predictions of the slices and cell configurations.
+
+Assumption
+----------
+Training module is a offline code, this is not part of the ml-prediction-ms
+
+Also refer a pretrained model files is shared along with the source code at /ml-prediction-ms/model/best_model.h5 this is generic trained model file that can be used for live predition.
+
+However if you choose to retrain the model Please refer Wiki step 1: ML Offline Training reference to train the machine learning model
+
+https://wiki.onap.org/display/DW/Smart+Intent+Guarantee+based+on+Closed-loop+-+ML+MS+Enhancements
+
+Offline training code is presented at location /ml-prediction-ms/train/ParseTrain.py. This ML training code consume or prepares the training data from following Prerequisites Topics of ranim and ConfigDb.
+
+Here by we share a sample example training file that need to prepared using the below RANSim topics and ConfigDB application
+/services/components/ml-prediction-ms/train/ExampleSample_train_data_s1
+
+Prerequisites Topics to run both prediction module and Training code:
+-------------------------------------------------------------
+1. Training module requires following RANSim Topics. To setup RANsim, refer
+ https://wiki.onap.org/pages/viewpage.action?pageId=93002871
+
+ -'http://<<ransim>>:8081/ransim/api/GenerateIntelligentSlicingPmData'- Start data generation.
+ -'http://<<ransim>>:8081/ransim/api/stopIntelligentSlicingPmData'- Stop data generation.
+ -'http://message-router:3904/events/unauthenticated.PERFORMANCE_MEASUREMENTS/mlms-cg/mlms-cid'- To receive the PM data (3 feature vectors). If there's any error, check and restart the code.
+
+ ConfigDb application reference https://wiki.onap.org/plugins/servlet/mobile?contentId=1015829#content/view/93002873
+ -'http://<<config-db>>:8086/api/sdnc-config-db/v4/nrcellcu-configdata/'+cell_id+'/snssai/01-B989BD' - To get the saved config details (For 4th and 5th Feature)
+
+ Please refer this wiki link for the configuration and relevant component deployment.
+ https://wiki.onap.org/display/DW/Smart+Intent+Guarantee+based+on+Closed-loop
+
+
+ Please note :
+ - As configdb is onap service, above <<config-db>> should be switched to corresponding servicename or IP address in ml-prediction-ms.config file
+ - As ransim is onap service, above <<ransim>> should be switched to corresponding servicename or IP address in ml-prediction-ms.config file
+ - In standalone mode we need to set the IP address and port number for message-router service
+
+
+
+
+
+HOW TO TRAIN:
+-------------
+In order to complete the training process, we at least need 0.1 million samples of PM data. These are gathered from one slice. These samples shall be gathered from the simulation environment. Here in Simulation enviroment each time instance data are generated for all cells at interval for every 3 second versus at the acutual enviroment, each time instance data are generated for every 15 mintues. So here, We used the RAN PM Data simulation enviroment to generated data that is needed for training. The required predicted data for each time instance for the cell are synthetically generated and used in Supervised learning.
+
+To run the training module by using the RAN PM data simulation
+move to folder location 'ml-prediction-ms' then Run python3 train/ParseTrain.py
+
+The above module acquires the training data from the RAN PM data simulation. Once the data are acquired via the topics, the module performs training.
+
+However, if the data are acquired from the Real Time RAN enviroement where each sample are generated after 15 minutes, we need to adjust the sleep time from line 146 in ParseTrain.py to generate 0.1 million samples(counts across all cells and slices).
+
+#Wait for 2 hrs for data generation
+time.sleep(3600*2)
+
+In order to train the ML model to acquire large amount of slice and cell data in small duration
+we have made changes in the config to generate time series data for all slices and cells for every 300ms instead of 10 sec duration,
+This is done to speed up the training time period.
+
+More Reference https://wiki.onap.org/display/DW/Smart+Intent+Guarantee+based+on+Closed-loop+-+ML+MS+Enhancements
+contains the design details and requirements for training.
+
+
+
+HOW TO LAUNCH Prediction:
+-------------------------
+To Run Prediction move to folder location 'ml-prediction-ms' then Run python3 src/run.py
+
+HOW TO LAUNCH Training:
+move to folder location 'ml-prediction-ms' then Run python3 train/ParseTrain.py in the command line
+
+
+Training Approach that is followed to build the Machine Learning model
+1. The data are acquired in offine mode.
+2. The data acquisition for training will take around around 4-5 hours to have enough samples for training.
+3. The time series data generated by RAN Simulator using PERFORMANCE_MEASUREMET topic was having an interval of one second between two instance data
+4. After this data acquisition process the model will take around 15 minutes to run for 250 epochs with 512 batch size.
+5. The training model is designed to auto select the best hyperparameters for the machine learning model generation.
+
+The functionality in Training module
+1. Get Slices/Cell Data: The module performs start and stop on Topics to acquire the data.
+The Topics should be configured to generate Single Slice data only. In future we will support multiple slices.However, there is no limitation on the number of cells
+2. Process Slices/Cells Data: The data acquired from Dmaap topic. The data are preprocess and parsed. Here, we are synthetically generating the 5th feature I.e. ‘Predicted_maxNumberOfConns’ based on the percent increase in the ‘Failure/Total session requested’ ratios.
+3. we train the model on a single slice. So we append all the cells one after the other as a part of training statergy.
+- Now we have the data in the form of single cell.
+- We have increased the number of training samples.
+
+
+In Train functions following activities are performed.
+1. The data is divided into 2 categories: a)Training set (Used for the training). b)Test Data (Used for test/validation). After this step, the data is ready to train a model.
+2. Train: This will take the training set created in the last step, normalize it using MinMaxScaler and then 'series_to_supervised' method converts the timeseries into Forecast series to be fed to the LSTM.
+3. After some more pre-processing, the data is fed to the Keras sequential model which contains 20 stacked LSTMs and 1 dense layer at the end.
+4. The best model is chosen after each checkpoints based on the 'validation loss' and archived to be used for predictions.
diff --git a/components/ml-prediction-ms/conftest.py b/components/ml-prediction-ms/conftest.py
new file mode 100755
index 00000000..9870baf2
--- /dev/null
+++ b/components/ml-prediction-ms/conftest.py
@@ -0,0 +1,18 @@
+# ===================LICENSE_START=======================================================
+# ml-prediction-ms
+# ================================================================================
+# Copyright (C) 2023 Wipro Limited
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+
diff --git a/components/ml-prediction-ms/docker-compose.yaml b/components/ml-prediction-ms/docker-compose.yaml
new file mode 100755
index 00000000..fcdaa628
--- /dev/null
+++ b/components/ml-prediction-ms/docker-compose.yaml
@@ -0,0 +1,69 @@
+# ============LICENSE_START=======================================================
+# ml-prediction-ms
+# ================================================================================
+# Copyright (C) 2020 Wipro Limited.
+# ==============================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+
+version: '2.4'
+networks:
+ ml-prediction-ms-default:
+ driver: bridge
+ driver_opts:
+ com.docker.network.driver.mtu: 1400
+services:
+ ### if kafka is not required comment out zookeeper and kafka ###
+ zookeeper:
+ image: confluentinc/cp-zookeeper:6.2.1
+ container_name: zookeeper
+ ports:
+ - '2181:2181'
+ environment:
+ ZOOKEEPER_CLIENT_PORT: 2181
+ networks:
+ - ml-prediction-ms-default
+
+ kafka:
+ image: confluentinc/cp-kafka:6.2.1
+ container_name: kafka
+ ports:
+ - "19092:19092"
+ depends_on:
+ - zookeeper
+ environment:
+ KAFKA_BROKER_ID: 1
+ KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
+ KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka:9092,CONNECTIONS_FROM_HOST://localhost:19092
+ KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,CONNECTIONS_FROM_HOST:PLAINTEXT
+ KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
+ volumes:
+ - /var/run/docker.sock:/var/run/docker.sock
+ depends_on:
+ - zookeeper
+ networks:
+ - ml-prediction-ms-default
+
+ ml-prediction-ms:
+ container_name: ml-prediction-ms
+ build:
+ context: ./
+ ports:
+ - "5000:5000"
+ environment:
+ - DEBUG=True
+ depends_on:
+ - zookeeper
+ - kafka
+ networks:
+ - ml-prediction-ms-default
diff --git a/components/ml-prediction-ms/log_config.yaml b/components/ml-prediction-ms/log_config.yaml
new file mode 100755
index 00000000..a129113e
--- /dev/null
+++ b/components/ml-prediction-ms/log_config.yaml
@@ -0,0 +1,45 @@
+# ============LICENSE_START=======================================================
+# ml-prediction-ms
+# ================================================================================
+# Copyright (C) 2023 Wipro Limited.
+# ==============================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+
+
+version: 1
+
+disable_existing_loggers: true
+
+loggers:
+ onap_logger:
+ level: INFO
+ handlers: [onap_log_handler, stdout_handler]
+ propagate: false
+handlers:
+ onap_log_handler:
+ class: logging.handlers.RotatingFileHandler
+ filename: /var/log/ONAP/dcaegen2/services/ml-prediction-ms/application.log
+ mode: a
+ maxBytes: 10000000
+ backupCount: 10
+ formatter: mdcFormatter
+ stdout_handler:
+ class: logging.StreamHandler
+ formatter: mdcFormatter
+formatters:
+ mdcFormatter:
+ format: '%(asctime)s - %(levelname)s - %(levelno)s - %(process)d - %(name)s | %(message)s'
+ mdcfmt: '{ServiceName} | {RequestID} | {InvocationID}'
+ datefmt: '%Y-%m-%dT%H:%M:%S%z'
+ (): onaplogging.mdcformatter.MDCFormatter
diff --git a/components/ml-prediction-ms/ml-prediction-ms.config b/components/ml-prediction-ms/ml-prediction-ms.config
new file mode 100755
index 00000000..8673540c
--- /dev/null
+++ b/components/ml-prediction-ms/ml-prediction-ms.config
@@ -0,0 +1,37 @@
+# ===================LICENSE_START=======================================================
+# ml-prediction-ms
+# ================================================================================
+# Copyright (C) 2023 Wipro Limited
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+
+
+[PM DATA TOPICS]
+PathToStartPMData = http://<<ransim>>:8081/ransim/api/GenerateIntelligentSlicingPmData
+PathToStopPMData = http://<<ransim>>:8081/ransim/api/stopIntelligentSlicingPmData
+#Verify Topic Name
+#http://10.31.4.44:3904/events/unauthenticated.PERFORMANCE_MEASUREMENTS/mlms-cg/mlms-cid
+GetDataTopic = PERFORMANCE_MEASUREMENTS
+
+
+[CONFIG DATA TOPICS]
+PathToGetConfigData = http://10.31.4.45:8086/execute/11/get-nrcellcu-configdata
+
+[POLICY UPDATE]
+GetPolicyTopic = ML_RESPONSE_TOPIC
+#Verify Topic Name
+#PathToDmapTopic = http://10.31.4.44:3904/events/unauthenticated.ML_RESPONSE_TOPIC
+
+[SERVER NAME]
+ServerName = cucpserver1
diff --git a/components/ml-prediction-ms/model/best_model.h5 b/components/ml-prediction-ms/model/best_model.h5
new file mode 100755
index 00000000..af26e5d0
--- /dev/null
+++ b/components/ml-prediction-ms/model/best_model.h5
Binary files differ
diff --git a/components/ml-prediction-ms/pom.xml b/components/ml-prediction-ms/pom.xml
new file mode 100755
index 00000000..0140e245
--- /dev/null
+++ b/components/ml-prediction-ms/pom.xml
@@ -0,0 +1,247 @@
+<?xml version="1.0"?>
+<!--
+#############################################################################
+# ============LICENSE_START=======================================================
+# ml-prediction-ms
+# ================================================================================
+# Copyright (C) 2023 Wipro Limited
+# ==============================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+#
+###############################################################################
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+ <modelVersion>4.0.0</modelVersion>
+
+ <parent>
+ <groupId>org.onap.oparent</groupId>
+ <artifactId>oparent</artifactId>
+ <version>3.0.0</version>
+ </parent>
+ <!-- CHANGE THE FOLLOWING 3 OBJECTS for your own repo -->
+ <groupId>org.onap.dcaegen2.services</groupId>
+ <artifactId>ml-prediction-ms</artifactId>
+ <name>dcaegen2-services-ml-prediction-ms</name>
+ <version>1.0.0-SNAPSHOT</version>
+ <properties>
+ <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
+ <sonar.sources>.</sonar.sources>
+ <sonar.junit.reportsPath>xunit-results.xml</sonar.junit.reportsPath>
+ <sonar.python.coverage.reportPaths>coverage.xml</sonar.python.coverage.reportPaths>
+ <sonar.python.xunit.reportPath>xunit-results.xml</sonar.python.xunit.reportPath>
+ <sonar.language>py</sonar.language>
+ <sonar.pluginname>python</sonar.pluginname>
+ <sonar.inclusions>**/*.py</sonar.inclusions>
+ <sonar.exclusions>target/**,tests/**,setup.py,**/__init__.py</sonar.exclusions>
+ <exec-mvn-plugin-version>1.2.1</exec-mvn-plugin-version>
+ </properties>
+ <build>
+ <finalName>${project.artifactId}-${project.version}</finalName>
+ <pluginManagement>
+ <plugins>
+ <!-- the following plugins are invoked from oparent, we do not need them -->
+ <plugin>
+ <groupId>org.sonatype.plugins</groupId>
+ <artifactId>nexus-staging-maven-plugin</artifactId>
+ <configuration>
+ <skipNexusStagingDeployMojo>true</skipNexusStagingDeployMojo>
+ </configuration>
+ </plugin>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-deploy-plugin</artifactId>
+ <!-- This version supports the "deployAtEnd" parameter -->
+ <configuration>
+ <skip>true</skip>
+ </configuration>
+ </plugin>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-enforcer-plugin</artifactId>
+ <configuration>
+ <skip>true</skip>
+ </configuration>
+ </plugin>
+ <!-- first disable the default Java plugins at various stages -->
+ <!-- maven-resources-plugin is called during "*resource" phases by default behavior. it prepares the resources
+ dir. we do not need it -->
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-resources-plugin</artifactId>
+ <configuration>
+ <skip>true</skip>
+ </configuration>
+ </plugin>
+ <!-- maven-compiler-plugin is called during "compile" phases by default behavior. we do not need it -->
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-compiler-plugin</artifactId>
+ <configuration>
+ <skip>true</skip>
+ </configuration>
+ </plugin>
+ <!-- maven-jar-plugin is called during "compile" phase by default behavior. we do not need it -->
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-jar-plugin</artifactId>
+ <executions>
+ <execution>
+ <id>default-jar</id>
+ </execution>
+ </executions>
+ </plugin>
+ <!-- maven-install-plugin is called during "install" phase by default behavior. it tries to copy stuff under
+ target dir to ~/.m2. we do not need it -->
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-install-plugin</artifactId>
+ <configuration>
+ <skip>true</skip>
+ </configuration>
+ </plugin>
+ <!-- maven-surefire-plugin is called during "test" phase by default behavior. it triggers junit test.
+ we do not need it -->
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-surefire-plugin</artifactId>
+ <configuration>
+ <skipTests>true</skipTests>
+ </configuration>
+ </plugin>
+ <plugin>
+ <groupId>org.codehaus.mojo</groupId>
+ <artifactId>exec-maven-plugin</artifactId>
+ <version>${exec-mvn-plugin-version}</version>
+ <configuration>
+ <environmentVariables>
+ <!-- make mvn properties as env for our script -->
+ <MVN_PROJECT_GROUPID>${project.groupId}</MVN_PROJECT_GROUPID>
+ <MVN_PROJECT_ARTIFACTID>${project.artifactId}</MVN_PROJECT_ARTIFACTID>
+ <MVN_PROJECT_VERSION>${project.version}</MVN_PROJECT_VERSION>
+ <MVN_NEXUSPROXY>${onap.nexus.url}</MVN_NEXUSPROXY>
+ <MVN_RAWREPO_BASEURL_UPLOAD>${onap.nexus.rawrepo.baseurl.upload}</MVN_RAWREPO_BASEURL_UPLOAD>
+ <MVN_RAWREPO_BASEURL_DOWNLOAD>${onap.nexus.rawrepo.baseurl.download}</MVN_RAWREPO_BASEURL_DOWNLOAD>
+ <MVN_RAWREPO_SERVERID>${onap.nexus.rawrepo.serverid}</MVN_RAWREPO_SERVERID>
+ <MVN_DOCKERREGISTRY_DAILY>${onap.nexus.dockerregistry.daily}</MVN_DOCKERREGISTRY_DAILY>
+ <MVN_DOCKERREGISTRY_SNAPSHOT>${onap.nexus.dockerregistry.daily}</MVN_DOCKERREGISTRY_SNAPSHOT>
+ <MVN_DOCKERREGISTRY_RELEASE>${onap.nexus.dockerregistry.release}</MVN_DOCKERREGISTRY_RELEASE>
+ </environmentVariables>
+ </configuration>
+ </plugin>
+ </plugins>
+ </pluginManagement>
+ <plugins>
+ <!-- now we configure custom action (calling a script) at various lifecycle phases -->
+ <plugin>
+ <groupId>org.codehaus.mojo</groupId>
+ <artifactId>exec-maven-plugin</artifactId>
+ <version>${exec-mvn-plugin-version}</version>
+ <executions>
+ <execution>
+ <id>clean phase script</id>
+ <phase>clean</phase>
+ <goals>
+ <goal>exec</goal>
+ </goals>
+ <configuration>
+ <arguments>
+ <argument>__</argument>
+ <argument>clean</argument>
+ </arguments>
+ </configuration>
+ </execution>
+ <execution>
+ <id>generate-sources script</id>
+ <phase>generate-sources</phase>
+ <goals>
+ <goal>exec</goal>
+ </goals>
+ <configuration>
+ <arguments>
+ <argument>__</argument>
+ <argument>generate-sources</argument>
+ </arguments>
+ </configuration>
+ </execution>
+ <execution>
+ <id>compile script</id>
+ <phase>compile</phase>
+ <goals>
+ <goal>exec</goal>
+ </goals>
+ <configuration>
+ <arguments>
+ <argument>__</argument>
+ <argument>compile</argument>
+ </arguments>
+ </configuration>
+ </execution>
+ <execution>
+ <id>package script</id>
+ <phase>package</phase>
+ <goals>
+ <goal>exec</goal>
+ </goals>
+ <configuration>
+ <arguments>
+ <argument>__</argument>
+ <argument>package</argument>
+ </arguments>
+ </configuration>
+ </execution>
+ <execution>
+ <id>test script</id>
+ <phase>test</phase>
+ <goals>
+ <goal>exec</goal>
+ </goals>
+ <configuration>
+ <arguments>
+ <argument>__</argument>
+ <argument>test</argument>
+ </arguments>
+ </configuration>
+ </execution>
+ <execution>
+ <id>install script</id>
+ <phase>install</phase>
+ <goals>
+ <goal>exec</goal>
+ </goals>
+ <configuration>
+ <arguments>
+ <argument>__</argument>
+ <argument>install</argument>
+ </arguments>
+ </configuration>
+ </execution>
+ <execution>
+ <id>deploy script</id>
+ <phase>deploy</phase>
+ <goals>
+ <goal>exec</goal>
+ </goals>
+ <configuration>
+ <arguments>
+ <argument>__</argument>
+ <argument>deploy</argument>
+ </arguments>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+ </plugins>
+ </build>
+</project>
diff --git a/components/ml-prediction-ms/pyproject.toml b/components/ml-prediction-ms/pyproject.toml
new file mode 100755
index 00000000..bf54bd47
--- /dev/null
+++ b/components/ml-prediction-ms/pyproject.toml
@@ -0,0 +1,6 @@
+[build-system]
+requires = [
+ "setuptools >= 35.0.2",
+ "setuptools_scm >= 2.0.0, <3"
+]
+build-backend = "setuptools.build_meta"
diff --git a/components/ml-prediction-ms/requirements.txt b/components/ml-prediction-ms/requirements.txt
new file mode 100755
index 00000000..57e60f78
--- /dev/null
+++ b/components/ml-prediction-ms/requirements.txt
@@ -0,0 +1,29 @@
+ # requirements.txt
+ #
+ # installs dependencies from ./setup.py, and the package itself,
+ # in editable mode
+ # -e .
+
+ # (the -e above is optional). you could also just install the package
+ # normally with just the line below (after uncommenting)
+
+ flask==1.1.2
+ requests==2.24.0
+ responses==0.16.0
+ pandas==1.1.5
+ tensorflow
+ scikit-learn
+ scipy
+ matplotlib
+ pytest-cov
+ xlrd
+ mock==4.0.3
+ openpyxl==3.0.10
+ configparser
+ pyyaml
+ pytest
+ requests-mock
+ numpy==1.23.1
+ confluent_kafka
+
+
diff --git a/components/ml-prediction-ms/setup.py b/components/ml-prediction-ms/setup.py
new file mode 100755
index 00000000..1db0a537
--- /dev/null
+++ b/components/ml-prediction-ms/setup.py
@@ -0,0 +1,30 @@
+# LICENSE_START=======================================================
+# ml-prediction-ms
+# ================================================================================
+# Copyright (C) 2023 Wipro Limited
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+
+from setuptools import setup, find_packages
+
+setup(
+ name="ml-prediction-ms",
+ version="1.0.0",
+ author="sendil.kumar@wipro.com",
+ author_email="sendil.kumar@wipro.com",
+ license='Apache 2',
+ description="Slice Intelligence Machine Learning Prediction",
+ url="https://gerrit.onap.org/r/gitweb?p=dcaegen2/services.git;a=tree;f=components;hb=HEAD",
+ packages=find_packages()
+)
diff --git a/components/ml-prediction-ms/src/__init__.py b/components/ml-prediction-ms/src/__init__.py
new file mode 100755
index 00000000..321d9d7a
--- /dev/null
+++ b/components/ml-prediction-ms/src/__init__.py
@@ -0,0 +1,24 @@
+# ==============LICENSE_START=====================================================
+# ml-prediction-ms
+# ================================================================================
+# Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+# Copyright (C) 2023 Wipro Limited
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+
+# empty __init__.py so that pytest can add correct path to coverage report,
+# -- per pytest best practice guideline
+
+import pathlib, sys
+sys.path.append(str(pathlib.Path(__file__).parent))
diff --git a/components/ml-prediction-ms/src/run.py b/components/ml-prediction-ms/src/run.py
new file mode 100755
index 00000000..752355d8
--- /dev/null
+++ b/components/ml-prediction-ms/src/run.py
@@ -0,0 +1,680 @@
+#!/usr/bin/env python3
+# ============LICENSE_START=======================================================
+# ml-prediction-ms
+# ================================================================================
+# Copyright (C) 2023 Wipro Limited
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+
+import os
+os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # or any {'0', '1', '2'}
+
+import pandas as pd
+from pandas import DataFrame, read_excel,concat
+from tensorflow.keras.models import load_model
+from sklearn.preprocessing import MinMaxScaler
+from numpy import concatenate
+from datetime import datetime
+import json
+import time
+import requests
+from requests.auth import HTTPBasicAuth
+import logging
+import configparser
+import uuid
+
+from confluent_kafka import Consumer
+from confluent_kafka import Producer
+import socket
+
+global window_size
+window_size=4
+
+# Create and configure logger
+import io, os, sys
+try:
+ # Python 3, open as binary, then wrap in a TextIOWrapper with write-through.
+ sys.stdout = io.TextIOWrapper(open(sys.stdout.fileno(), 'wb', 0), write_through=True)
+ # If flushing on newlines is sufficient, as of 3.7 you can instead just call:
+ sys.stdout.reconfigure(line_buffering=True)
+except TypeError:
+ # older version of Python
+ pass
+
+
+
+logging.basicConfig(filename="IntelligentSliceMl.log",
+ format='%(asctime)s - %(levelname)s - %(levelno)s - %(process)d - %(name)s - %(message)s',
+ filemode='w')
+
+# Creating an object
+logger = logging.getLogger('ml_ms_prediction')
+logger.setLevel(logging.DEBUG)
+
+logger.info("traceID-%s : Start Prediction", str(uuid.uuid4())[:8])
+
+class Config:
+
+ def __init__(self):
+ config = configparser.ConfigParser()
+ config.readfp(open(r'ml-prediction-ms.config'))
+
+ self.pathToStartPMData = config.get('PM DATA TOPICS', 'PathToStartPMData')
+ self.pathToStopPMData = config.get('PM DATA TOPICS', 'PathToStopPMData')
+ self.getDataTopic = config.get('PM DATA TOPICS', 'GetDataTopic')
+
+ self.pathToGetConfigData = config.get('CONFIG DATA TOPICS', 'PathToGetConfigData')
+ self.getPolicyTopic = config.get('POLICY UPDATE', 'GetPolicyTopic')
+
+ self.serverName = config.get('SERVER NAME', 'ServerName')
+
+ def get_pathToStartPMData(self):
+ return self.pathToStartPMData
+
+ def get_pathToStopPMData(self):
+ return self.pathToStopPMData
+
+ def get_DataTopic(self):
+ return self.getDataTopic
+
+ def get_pathToGetConfigData(self):
+ return self.pathToGetConfigData
+
+ def get_PolicyTopic(self):
+ return self.getPolicyTopic
+
+ def get_serverName(self):
+ return self.serverName
+
+class Parser:
+
+ def __init__(self):
+ self.Config_Object=Config()
+
+ def Data_Parser(self, data_val,data_dic,features, slice_name):
+ """
+ Perform Data Parser
+ READ THE ACTUAL PARAMETERS FROM THE topic MESSAGE AND ADDS IT INTO A DICTIONARY
+ Args:
+ cells_data_list: Cell data list object
+ data_dic: The Parsed data on cell data contained in dictionary
+ features: Data featurs (PM Metrics)
+ slice_name : Slice name
+ Returns:
+ data_dic: none
+ Raises:
+ RuntimeError: Error while Process Slices Cells Data.
+ """
+ try:
+ len_data_val=len(data_val)
+ for i in range(len_data_val):
+ cell_id = data_val[i]['measObjInstId']
+ response = requests.get(self.Config_Object.get_pathToGetConfigData() + cell_id + '/snssai/'+ slice_name, timeout=3)
+ config_data=response.json()['maxNumberOfConns']
+
+ results= data_val[i]['measResults']
+ len_results=len(results)
+ for j in range(len_results):
+ p=int(results[j]['p'])
+ value=int(results[j]['sValue'])
+ key = slice_name+'_'+cell_id +'_'+features[p-1].split('-')[0]
+ if key not in data_dic:
+ data_dic[key]=[value]
+ else:
+ data_dic[key].append(value)
+ #We are normalising the prediction, we are have the actual prediction starting from the 5th time instance
+ #so for the first 4 time instances we are generating synthetic data generation for prediction result
+ #this is done as a softmax correction, essential for better accuracy
+ #After the first 4 time instances the predicted values are used and and taken forward.
+ for j in range(3,5):
+ key = slice_name+'_'+cell_id +features[j]
+ if key not in data_dic:
+ data_dic[key]=[config_data]
+ elif j==3:
+ data_dic[key].append(config_data)
+ elif j==4:
+ change = (
+ data_dic[slice_name + "_" + cell_id + "_SM.PDUSessionSetupFail.0"][-1]
+ / data_dic[slice_name + "_" + cell_id + "_SM.PDUSessionSetupReq.01"][-1]
+ - data_dic[slice_name + "_" + cell_id + "_SM.PDUSessionSetupFail.0"][-2]
+ / data_dic[slice_name + "_" + cell_id + "_SM.PDUSessionSetupReq.01"][-2]
+ )
+ data_dic[key].append(change*config_data+config_data)
+ except Exception as e:
+ logger.error("traceID-%s Error in Parser Slices Cells Data:\n%s", str(uuid.uuid4())[:8], e)
+ except requests.Timeout as error:
+ logger.critical("traceId-%s Timeout from Get CONFIG DATA topic :\n%s",str(uuid.uuid4())[:8], error)
+ return data_dic
+
+class Prediction:
+
+ # Time Series Prediction using the LSTM Model and appplies the Logic to give the final predicted output
+ modelfile = 'model/best_model.h5'
+ model= load_model(modelfile, compile=False)
+
+ def __init__(self):
+ self.Config_Object=Config()
+
+ def IsPolicyUpdate_url_Exist(self):
+ """
+ Get the status of the Policy response topic by checking its execution status for unit test module.
+
+ Args:
+ none: none
+ Returns:
+ status:Bool status of Topic
+
+ """
+ status = True
+ try:
+ Post_Message ={}
+ json_object = json.dumps(Post_Message, indent = 4)
+ conf = {'bootstrap.servers': "kafka:9092",'client.id': socket.gethostname()}
+
+ producer = Producer(conf)
+ producer.produce(self.Config_Object.get_PolicyTopic(), value=json_object.encode('utf-8'))
+ producer.poll(1)
+ except Exception as e:
+ status = False
+ except requests.Timeout as error:
+ status = False
+ return status
+
+ def series_to_supervised(self, data, n_in=1, n_out=1, dropnan=True):
+ """
+ Convert the timeseries into Forecast series
+ Args:
+ data: Input time series data to be processed
+ n_in: Input window size for time series to carry previous nth time instance value.
+ n_out: output future n time instance value to be forecasted against the timeseries
+ dropnan : Flag to drop nan values
+
+ Returns:
+ agg (list): Aggregated list of past time series inputs as per the input window and the time series of the predicted future time instance.
+
+ Raises:
+ RuntimeError: Error Pre Processing Slices Cells Data.
+ """
+ try:
+ n_vars = 1 if type(data) is list else data.shape[1]
+ df = DataFrame(data)
+ cols, names = [],[]
+ # input sequence (t-n, ... t-1)
+ for i in range(n_in, 0, -1):
+ cols.append(df.shift(i))
+ names += [('var%d(t-%d)' % (j+1, i)) for j in range(n_vars)]
+ # forecast sequence (t, t+1, ... t+n)
+ for i in range(0, n_out):
+ cols.append(df.shift(-i))
+ if i == 0:
+ names += [('var%d(t)' % (j+1)) for j in range(n_vars)]
+ else:
+ names += [('var%d(t+%d)' % (j+1, i)) for j in range(n_vars)]
+ # put it all together
+ agg = concat(cols, axis=1)
+ agg.columns = names
+
+ # drop rows with NaN values
+ if dropnan:
+ agg.dropna(inplace=True)
+
+ except Exception as e:
+ logger.error("traceID-%s Error Pre Processing Slices Cells Data f:\n%s",str(uuid.uuid4())[:8], e)
+ return agg
+
+ def Predict_Model(self, test):
+ """
+ Does the Actual Prediction on the Input data
+
+ Args:
+ test: Input data to model with current and last 4 time instances (window_size)
+ Returns:
+ inv_yhat= A 2-D list with predicted results.
+ Raises:
+ RuntimeError: Error in Prediction.
+ """
+
+ inv_yhat =[]
+
+ try:
+ scaler = MinMaxScaler(feature_range=(-1, 1))
+ scaled = scaler.fit_transform(test)
+ reframed = self.series_to_supervised(scaled,window_size, 1)
+ test=reframed.values
+ test_X= test[:, :window_size*scaled.shape[1] + scaled.shape[1]-1]
+ test_X = test_X.reshape((test_X.shape[0], 1, test_X.shape[1]))
+ yhat = self.model.predict(test_X)
+ test_X = test_X.reshape((test_X.shape[0], test_X.shape[2]))
+ inv_yhat = concatenate((test_X[:,-4:],yhat), axis=1)
+ inv_yhat = scaler.inverse_transform(inv_yhat)
+ except Exception as e:
+ logger.critical("traceId-%s Error in Prediction:\n%s",str(uuid.uuid4())[:8], e)
+ return inv_yhat
+
+ def Logic(self, Prev_Thresh,Current_Thresh):
+ """
+ Post prediction, Applies the post processing Logic i.e
+ (+-)10% cap on Slice config and (+-)25% on cell config against the predicted values
+
+ Args:
+ Prev_Thresh: List of Previous Configuration('maxumberOfConns') as read from config DB
+ Current_Thresh: List of Predicted Configuration('maxumberOfConns')
+ Returns:
+ Final_Pred_Val= List of Final Configuration('maxumberOfConns')
+ Raises:
+ RuntimeError: Error in Prediction.
+ """
+ try:
+ Sum_Prev_Thresh=sum(Prev_Thresh)
+ Sum_Pred_Thresh= sum(Current_Thresh)
+ Global_change= abs(sum(Current_Thresh)-sum(Prev_Thresh))/sum(Prev_Thresh)
+ #logger.info('Global_change',Global_change)
+ Final_Pred_Val=[]
+ Percent_Change=[]
+ sum_Pred_thresh_change=0
+ # Rule 1 is applied to compute cell based Min /Max (-25%, 25%)
+ len_Prev_Thresh=len(Prev_Thresh)
+ for cell_instance, prev_t in enumerate(Prev_Thresh):
+ if (Current_Thresh[cell_instance]-prev_t)/prev_t > 0.25:
+ Rule_based_Percent = 0.25 # rule bases total percentage
+ elif (Current_Thresh[cell_instance]-prev_t)/prev_t <-0.25:
+ Rule_based_Percent = -0.25
+ else:
+ Rule_based_Percent=(Current_Thresh[cell_instance]-prev_t)/prev_t
+
+ Percent_Change.append(Rule_based_Percent)
+ # predicted sum of threshold change for all cells
+ sum_Pred_thresh_change=sum_Pred_thresh_change+Rule_based_Percent
+
+
+ if Global_change <= 0.10:
+ for cell_instance, prev_t in enumerate(Prev_Thresh):
+ Final_Pred_Val.append(prev_t+prev_t*Percent_Change[cell_instance])
+ else:
+ #Rule 2 - to distribut global threshold to all cells based on only 10% increase in slice
+ Thresh_Rule_2 = []
+ extra = 0.1*Sum_Prev_Thresh
+
+ for i in range(len_Prev_Thresh):
+ new_val = Prev_Thresh[i]+extra*Percent_Change[i]/abs(sum_Pred_thresh_change)
+ if abs(extra*Percent_Change[i]/abs(sum_Pred_thresh_change))> abs(Percent_Change[i]*Prev_Thresh[i]):
+ new_val = Prev_Thresh[i]+Prev_Thresh[i]*Percent_Change[i]
+ Final_Pred_Val.append(new_val)
+ except Exception as e:
+ logger.error("traceId-%s Error in Post_Prediction_Logic:\n%s", str(uuid.uuid4())[:8], e)
+ return Final_Pred_Val
+
+ def acked(err, msg):
+ """
+ Function to format the error in case of exception being None
+
+ Args:
+ err: Exception object
+ msg: Error message
+ Returns:
+ None: None
+
+ """
+ if err is not None:
+ logger.error("traceId-%s Failed to deliver message: %s",str(uuid.uuid4())[:8], str(err))
+ else:
+ logger.info('traceId-%s %s',str(uuid.uuid4())[:8], (str(msg)))
+
+
+ def Final_Post_Method(self, Predicted_Results, Previous_Results, slices, server_name):
+ """
+ Posts the final predicted output (Final Output of the ML Service)
+ Args:
+ Predicted_Results: Contains Predicted results w.r.t the cell ID
+ Previous_Results: Contains Previous Configured values w.r.t the cell ID
+ slices: Slice name
+ Server_name: contains server name
+ Returns:
+ status: True on post with content success, False on post failure.
+ Raises:
+ RuntimeError: Error Posting the Final Output
+ """
+ status = True
+ try:
+ Post_Message ={}
+ Post_Message["snssai"]= slices
+ Post_Message['data'] = []
+ #for server in server_info.keys():
+ temp_dict_1 = {}
+ temp_dict_1['gNBCUName'] = server_name
+ temp_dict_1['cellCUList'] = []
+ for key, Predicted_Result in Predicted_Results.items():
+ temp_dict_2={}
+ temp_dict_2['cellLocalId'] = key
+ temp_dict_2['configData'] = {}
+ temp_dict_2['configData']['maxNumberofConns'] = int(Previous_Results[key])
+ temp_dict_2['configData']['predictedMaxNumberofConns'] = int(Predicted_Result)
+ now = datetime.now()
+ dt_string = now.strftime("%d/%m/%Y %H:%M:%S")
+ temp_dict_2['configData']['lastUpdatedTS'] = dt_string
+ temp_dict_1['cellCUList'].append(temp_dict_2)
+ Post_Message['data'].append(temp_dict_1)
+
+ json_object = json.dumps(Post_Message, indent = 4)
+ response = []
+
+ try:
+ conf = {'bootstrap.servers': "kafka:9092",'client.id': socket.gethostname()}
+ producer = Producer(conf)
+ producer.produce(self.Config_Object.get_PolicyTopic(), value=json_object.encode('utf-8'))
+
+ producer.poll(1)
+
+ except requests.Timeout as error:
+ status = False
+ logger.error("traceId-%s Posting the Final Output To Dmap Topic:\n%s",str(uuid.uuid4())[:8], error)
+ #print(response)
+ logger.info('traceId-%s %s',str(uuid.uuid4())[:8], Post_Message)
+ except Exception as e:
+ status = False
+ logger.error("traceId-%s Posting the Final Output:\n%s",str(uuid.uuid4())[:8], e)
+
+ return status
+
+class Controller:
+
+ def __init__(self):
+ self.data_dic={}
+ self.Parser_Object=Parser()
+ self.Predict_Object=Prediction()
+ self.Config_Object=Config()
+
+
+ def GetData(self, consumer):
+ """
+ Get the data generation from ransim topic on PM Data for Intelligenct slicing
+
+ Args:
+ consumer: Consumer Topic instance to get the PM Data
+ Returns:
+ pm_data: Slices and cells PM data
+
+ Raises:
+ RuntimeError: Error while Get Data from topic.
+ """
+ pm_data = []
+ try:
+ msg = consumer.poll(timeout=-1)
+
+ if msg is None:
+ # Initial message consumption may take up to
+ # `session.timeout.ms` for the consumer group to
+ # rebalance and start consuming
+ #print("Waiting...")
+ logger.info("traceId-%s PM Get Data from topic Waiting...: ",str(uuid.uuid4())[:8])
+ elif msg.error():
+ #print("ERROR: %s".format(msg.error()))
+ logger.critical("traceId-%s Error while PM Get Data from topic :\n%s",str(uuid.uuid4())[:8], msg.error())
+ else:
+ # Extract the (optional) key and value, and print.
+ #print('Received message: {}'.format(msg.value().decode('utf-8')))
+
+ pm_data = msg.value().decode('utf-8')
+
+
+ except Exception as e:
+ logger.critical("traceId-%s Error while Get Data from topic :\n%s",str(uuid.uuid4())[:8], e)
+ pm_data = []
+ except requests.Timeout as error:
+ logger.critical("traceId-%s Timeout from Get Data topic :\n%s",str(uuid.uuid4())[:8], error)
+ pm_data = []
+ return pm_data
+
+ def simulatedTestDataToReplaceTopic(self):
+ """
+ Simulate the test PM data and simulate required time seriess data and performs Predcition.
+ This function help to quickly check Tensorflow the environment,
+ that is used in the prediction and Training process.
+
+ Args:
+ none: none
+ Returns:
+ status: Execution status on predcition task
+
+ Raises:
+ RuntimeError: Error while executing prediction task.
+ """
+
+ #with pd.ExcelFile('test.xlsx', engine="openpyxl") as excel:
+ # df = pd.read_excel(excel)
+ status = True
+
+ try:
+ df = pd.read_excel('tests/unit/test.xlsx', engine='openpyxl')
+
+ new_columns1=[]
+ serverName = self.Config_Object.get_serverName()
+ #new_columns2=[]
+ len_dfcolumns=len(df.columns)
+ for i in range(len_dfcolumns):
+ new_columns1.append('01-B989BD_'+df.columns[i])
+ df.columns=new_columns1
+ slice_name=df.columns[0].split('.')[0]
+ data_df=pd.DataFrame()
+ len_df=len(df)
+ for i in range(len_df-1):
+ temp_df=df.iloc[[i]]
+ data_df=data_df.append(temp_df)
+ # parse pm data + configured data + predicted dummy data(=configured data- to be changed after pred)
+ if len(data_df)<window_size+1:
+ continue
+
+ configured={}
+ predicted={}
+ len_data_dfcol=len(data_df.columns)
+ for x in range(0,len_data_dfcol,window_size+1):
+ test=data_df.iloc[-5:,x:x+5]
+ cell=test.columns[0].split('_')[1]
+ inv_yhat = self.Predict_Object.Predict_Model(test) # Predict using model
+ configured[cell]= test.iat[-2,4]
+ inv_yhat = float(inv_yhat[:,-1])
+ predicted[cell]=inv_yhat
+ updated_predicted= self.Predict_Object.Logic(list(configured.values()), list(predicted.values()))
+ count=0
+ for x in range(0,len_data_dfcol, window_size+1):
+ data_df.iloc[[i],[x+4]]= updated_predicted[count]
+ count+=1
+ ret = self.Predict_Object.Final_Post_Method(predicted, configured, slice_name, serverName)
+ if(ret == False):
+ status = False
+ break
+
+
+ results=data_df
+ logger.info("traceId-%s predicted data: ",str(uuid.uuid4())[:8])
+ logger.debug(predicted)
+ dateTimeObj = datetime.now()
+ timestampStr = dateTimeObj.strftime("%d-%b-%Y-%H-%M-%S")
+ #print('Current Timestamp : ', timestampStr)
+
+
+ results.to_csv(timestampStr+"predicted.csv", index=0)
+ except Exception as e:
+ logger.critical("traceId-%s Error while prediction :\n%s",str(uuid.uuid4())[:8], e)
+ status = False
+ return status
+
+
+ def PreprocessAndPredict(self,pm_data):
+ """
+ Preprocess the data and start the prediction for each slices
+
+ Args:
+ pm_data: Performance Monitoring Data of all slices and cells collection
+ Returns:
+ status: Preprocssing and Prediction status
+
+ Raises:
+ RuntimeError: Error while Preprocessing data during prediction.
+ """
+ status = True
+ try:
+ len_pm_data=len(pm_data)
+ temp_data=json.loads(pm_data[len_pm_data-1])
+ sub_data1 = temp_data['event']['perf3gppFields']['measDataCollection']['measInfoList']
+
+ len_sub_data=len(sub_data1)
+ for r in range(len_sub_data):
+ data_dic1={}
+ server_name = temp_data['event']['perf3gppFields']['measDataCollection']['measuredEntityDn']
+ sub_data = temp_data['event']['perf3gppFields']['measDataCollection']['measInfoList'][r]
+
+ features=sub_data['measTypes']['sMeasTypesList']
+
+ features.extend(['_maxNumberOfConns.configured', '_maxNumberOfConns.predicted'])
+
+ slice_name=features[0].split('.')[2]
+ data_val= sub_data['measValuesList']
+
+ data_dic1= self.Parser_Object.Data_Parser(data_val,data_dic1,features,slice_name)
+
+ #data_df=pd.DataFrame(self.data_dic)
+ data_df=pd.DataFrame(data_dic1)
+
+ predic_res=self.predictionandresults(data_df)
+ except Exception as e:
+ logger.critical("traceId-%s Error while Preprocessing data during prediction :\n%s",str(uuid.uuid4())[:8], e)
+
+ return status
+
+
+
+ def predictionandresults(self,data_df):
+ """
+ Process the data and start the prediction for each cell in slice
+
+ Args:
+ pm_data: Performance Monitoring Data of a slices and cells collection
+ Returns:
+ status: Prediction status
+
+ Raises:
+ RuntimeError: Error while prediction.
+ """
+ status = True
+ try:
+ data_df1=pd.DataFrame()
+ slice_name=data_df.columns[0].split('.')[0]
+ len_data_df=len(data_df)
+ print('lendatadf:',len_data_df)
+ for i in range(len_data_df):
+ temp_df=data_df.iloc[[i]]
+ data_df1=data_df1.append(temp_df)
+ if len(data_df1)<window_size+1:
+ continue
+
+ configured={}
+ predicted={}
+ len_data_dfcol=len(data_df.columns)
+ for x in range(0,len_data_dfcol,window_size+1):
+ test=data_df1.iloc[-5:,x:x+5]
+ cell=test.columns[0].split('_')[1]
+ inv_yhat = self.Predict_Object.Predict_Model(test) # Predict using model
+ configured[cell]= float(test.iat[-2,4])
+ inv_yhat = float(inv_yhat[:,-1])
+ predicted[cell]=inv_yhat
+ #data_df.iloc[[i],[x+4]]=inv_yhat
+ #self.data_dic[data_df.columns[x+4]][-1] = inv_yhat
+ #logger.info("predicted data: "+ predicted)
+ logger.info("traceId-%s SUCCESS predicted data: ",str(uuid.uuid4())[:8])
+ logger.debug( predicted)
+ updated_predicted= self.Predict_Object.Logic(list(configured.values()), list(predicted.values()))
+ #logger.info('updated',updated_predicted)
+ logger.info('traceId-%s SUCCESS updated:', str(uuid.uuid4())[:8])
+ logger.debug(updated_predicted)
+ count=0
+ for x in range(0,len_data_dfcol, window_size+1):
+
+ data_df1.iloc[[i],[x+4]]= updated_predicted[count]
+ count+=1
+
+ self.Predict_Object.Final_Post_Method(predicted, configured, slice_name, server_name)
+
+
+ if len(data_df)>=window_size+1:
+ results=pd.DataFrame(self.data_dic)
+ #print("Predicted Results:",results)
+ dateTimeObj = datetime.now()
+ timestampStr = dateTimeObj.strftime("%d-%b-%Y-%H-%M-%S")
+ #print('Current Timestamp : ', timestampStr)
+ results.to_csv(timestampStr+"predicted.csv", index=0)
+ except Exception as e:
+ logger.critical("traceId-%s Error while Preprocessing data during prediction :\n%s",str(uuid.uuid4())[:8], e)
+ status = False
+
+ return status
+
+
+ def Execute(self):
+ """
+ Executes workflow of task methods to get data from topics, then performs preprocessng and Prediction.
+
+ Args:
+ none: none
+ Returns:
+ none: none
+
+ Raises:
+ RuntimeError: Error during Prediction start process.
+ """
+ status = True
+ bExecute = True
+ pm_data = []
+ try:
+ conf = {'bootstrap.servers': "kafka:9092",'group.id': "1",'auto.offset.reset': 'smallest'}
+
+ consumer = Consumer(conf)
+ consumer.subscribe(([self.Config_Object.get_DataTopic(), -1]))
+
+ while bExecute:
+ #self.StartDataGeneration()
+ #time.sleep(15)
+
+ #self.StopDataGeneration()
+ #time.sleep(5)
+ while True:
+ pm_data = self.GetData(consumer)
+
+ if pm_data==[]:
+ # Delay for 1 minute (60 seconds)
+ #time.sleep(60)
+ break
+
+ self.PreprocessAndPredict(pm_data)
+
+
+ except Exception as e:
+ logger.critical("traceId-%s Error during Prediction start process f:\n%s",str(uuid.uuid4())[:8], e)
+ status = False
+
+ return status
+
+
+if __name__ == "__main__":
+ try:
+
+ time.sleep(60)
+ logger.info("traceId-%s : Start Prediction",str(uuid.uuid4())[:8])
+ Controller_Object = Controller()
+ Controller_Object.Execute()
+ #unit test code
+ #Controller_Object.simulatedTestDataToReplaceTopic()
+ except Exception as e:
+ logger.critical("traceId-%s Error onStart Prediction Process:\n%s",str(uuid.uuid4())[:8], e)
+
diff --git a/components/ml-prediction-ms/tests/__init__.py b/components/ml-prediction-ms/tests/__init__.py
new file mode 100755
index 00000000..e778f70b
--- /dev/null
+++ b/components/ml-prediction-ms/tests/__init__.py
@@ -0,0 +1,22 @@
+# ==============LICENSE_START=====================================================
+# ml-prediction-ms
+# ================================================================================
+# Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+# Copyright (C) 2023 Wipro Limited
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+
+# empty __init__.py so that pytest can add correct path to coverage report,
+# -- per pytest best practice guideline
+
diff --git a/components/ml-prediction-ms/tests/unit/__init__.py b/components/ml-prediction-ms/tests/unit/__init__.py
new file mode 100755
index 00000000..db09c74a
--- /dev/null
+++ b/components/ml-prediction-ms/tests/unit/__init__.py
@@ -0,0 +1,18 @@
+# ==============LICENSE_START=====================================================
+# ml-prediction-ms
+# ================================================================================
+# Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+# Copyright (C) 2023 Wipro Limited
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
diff --git a/components/ml-prediction-ms/tests/unit/sample.json b/components/ml-prediction-ms/tests/unit/sample.json
new file mode 100644
index 00000000..aa99be7d
--- /dev/null
+++ b/components/ml-prediction-ms/tests/unit/sample.json
@@ -0,0 +1 @@
+["{\"event\":{\"commonEventHeader\":{\"domain\":\"perf3gpp\",\"eventId\":\"f4009916-3c1a-4d72-920d-a76ad5201590\",\"sequence\":0,\"eventName\":\"perf3gpp_RnNode-Slicing_pmMeasResult\",\"sourceName\":\"cucpserver1\",\"reportingEntityName\":\"\",\"priority\":\"Normal\",\"startEpochMicrosec\":1644487518572,\"lastEpochMicrosec\":1644487518572,\"version\":\"4.0\",\"vesEventListenerVersion\":\"7.1\",\"timeZoneOffset\":\"UTC+05:30\"},\"perf3gppFields\":{\"perf3gppFieldsVersion\":\"1.0\",\"measDataCollection\":{\"granularityPeriod\":900,\"measuredEntityUserName\":\"\",\"measuredEntityDn\":\"cucpserver1\",\"measuredEntitySoftwareVersion\":\"r0.1\",\"measInfoList\":[{\"measInfoId\":{\"sMeasInfoId\":\"measInfoIsVal\"},\"measTypes\":{\"sMeasTypesList\":[\"SM.PDUSessionSetupReq.01-B989BD\",\"SM.PDUSessionSetupSucc.01-B989BD\",\"SM.PDUSessionSetupFail.0\"]},\"measValuesList\":[{\"measObjInstId\":\"13999\",\"suspectFlag\":\"false\",\"measResults\":[{\"p\":1,\"sValue\":\"398\"},{\"p\":2,\"sValue\":\"283\"}]},{\"measObjInstId\":\"14000\",\"suspectFlag\":\"false\",\"measResults\":[{\"p\":1,\"sValue\":\"433\"},{\"p\":2,\"sValue\":\"265\"}]},{\"measObjInstId\":\"15155\",\"suspectFlag\":\"false\",\"measResults\":[{\"p\":1,\"sValue\":\"912\"},{\"p\":2,\"sValue\":\"637\"}]},{\"measObjInstId\":\"15174\",\"suspectFlag\":\"false\",\"measResults\":[{\"p\":1,\"sValue\":\"865\"},{\"p\":2,\"sValue\":\"529\"}]},{\"measObjInstId\":\"15175\",\"suspectFlag\":\"false\",\"measResults\":[{\"p\":1,\"sValue\":\"959\"},{\"p\":2,\"sValue\":\"595\"}]},{\"measObjInstId\":\"15176\",\"suspectFlag\":\"false\",\"measResults\":[{\"p\":1,\"sValue\":\"257\"},{\"p\":2,\"sValue\":\"167\"}]},{\"measObjInstId\":\"15289\",\"suspectFlag\":\"false\",\"measResults\":[{\"p\":1,\"sValue\":\"933\"},{\"p\":2,\"sValue\":\"627\"}]},{\"measObjInstId\":\"15290\",\"suspectFlag\":\"false\",\"measResults\":[{\"p\":1,\"sValue\":\"825\"},{\"p\":2,\"sValue\":\"536\"}]},{\"measObjInstId\":\"15296\",\"suspectFlag\":\"false\",\"measResults\":[{\"p\":1,\"sValue\":\"856\"},{\"p\":2,\"sValue\":\"571\"}]},{\"measObjInstId\":\"15825\",\"suspectFlag\":\"false\",\"measResults\":[{\"p\":1,\"sValue\":\"422\"},{\"p\":2,\"sValue\":\"291\"}]},{\"measObjInstId\":\"15826\",\"suspectFlag\":\"false\",\"measResults\":[{\"p\":1,\"sValue\":\"412\"},{\"p\":2,\"sValue\":\"303\"}]}]}]}}}}"]
diff --git a/components/ml-prediction-ms/tests/unit/test.xlsx b/components/ml-prediction-ms/tests/unit/test.xlsx
new file mode 100755
index 00000000..ecde30c3
--- /dev/null
+++ b/components/ml-prediction-ms/tests/unit/test.xlsx
Binary files differ
diff --git a/components/ml-prediction-ms/tests/unit/test_predict_unittest.py b/components/ml-prediction-ms/tests/unit/test_predict_unittest.py
new file mode 100755
index 00000000..004893cf
--- /dev/null
+++ b/components/ml-prediction-ms/tests/unit/test_predict_unittest.py
@@ -0,0 +1,232 @@
+# ============LICENSE_START=======================================================
+# ml-prediction-ms
+# ================================================================================
+# Copyright (C) 2023 Wipro Limited
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+
+import pytest
+
+from src.run import Parser, Prediction, Controller
+
+import unittest
+
+import requests
+import responses
+
+from unittest import TestCase
+from unittest import mock
+from mock import patch # for Python >= 3.3 use unittest.mock
+
+import pandas as pd
+import numpy as np
+from pandas import DataFrame, read_csv, read_excel
+from pandas import concat
+from tensorflow.keras.models import load_model
+from sklearn.preprocessing import MinMaxScaler
+from numpy import concatenate
+from sklearn.metrics import mean_squared_error
+from math import sqrt
+from datetime import datetime
+import json
+import time
+import requests
+from requests.auth import HTTPBasicAuth
+
+from confluent_kafka import Consumer
+from confluent_kafka import Producer
+import socket
+
+import requests_mock
+from mock import patch
+
+# This method will be used by the mock to replace requests.get
+def mocked_requests_get(*args, **kwargs):
+ class MockResponse:
+ def __init__(self, json_data, status_code):
+ self.json_data = json_data
+ self.status_code = status_code
+
+ def json(self):
+ return self.json_data
+
+ return MockResponse({"key1": "value1"}, 200)
+
+# Our test case class
+class ControllerTestCase(unittest.TestCase):
+
+ # We patch 'requests.get' with our own method. The mock object is passed in to our test case method.
+ @mock.patch('requests.get', side_effect=mocked_requests_get)
+ def test_GetData(self, mock_get):
+ status = True
+ # Assert requests.get calls
+ ctl = Controller()
+ conf = {'bootstrap.servers': "kafka:9092",'group.id': "1",'auto.offset.reset': 'smallest'}
+
+ consumer = Consumer(conf)
+ consumer.subscribe(([ctl.Config_Object.get_DataTopic(), 1]))
+
+ # We can even assert that our mocked method was called with the right parameters
+ msg = consumer.poll(1)
+ #json_data = msg.value().decode('utf-8')
+
+ #assert len(msg) != 0, "the list is non empty"
+ assert status != False
+
+
+ def test_simulatedTestDataToReplaceTopic(self):
+ self.Controller_Object = Controller()
+ status = self.Controller_Object.simulatedTestDataToReplaceTopic()
+
+ assert status != False
+
+ def test_PreprocessAndPredict(self):
+ ctl = Controller()
+
+ # Opening JSON file
+ f = open('tests/unit/sample.json',)
+
+ # returns JSON object as
+ # a dictionary
+ json_data = json.load(f)
+
+ status = ctl.PreprocessAndPredict(json_data)
+ assert status != False
+
+
+# This method will be used by the mock to replace requests.POST
+def mocked_requests_post(*args, **kwargs):
+ class MockResponse:
+ def __init__(self, json_data, status_code):
+ self.json_data = json_data
+ self.status_code = status_code
+
+ def json(self):
+ return self.json_data
+
+ return MockResponse({"key1": "value1"}, 200)
+
+
+ #return MockResponse(None, 404)
+
+# Our test case class
+class PredictionTestCase(unittest.TestCase):
+
+ # We patch 'requests.get' with our own method. The mock object is passed in to our test case method.
+ @mock.patch('requests.post', side_effect=mocked_requests_post)
+ def test_IsPolicyUpdate_url_Exist(self, mock_post):
+ # Assert requests.post calls
+ pred = Prediction()
+ status = pred.IsPolicyUpdate_url_Exist()
+
+ assert status == True, "Failed"
+
+
+
+class TestPredict(unittest.TestCase):
+
+ def test_Parser(self):
+ Controller_Object = Controller()
+
+ conf = {'bootstrap.servers': "kafka:9092",'group.id': "1",'auto.offset.reset': 'smallest'}
+ consumer = Consumer(conf)
+ consumer.subscribe(([self.Config_Object.get_DataTopic(), -1]))
+
+ pm_data = Controller_Object.GetData(consumer)
+
+ Parser_Object = Parser()
+ data_dic={}
+
+ status = False
+
+ len_pm_data=len(pm_data)
+ for i in range(len_pm_data):
+ temp_data=json.loads(pm_data[i])
+ sub_data = temp_data['event']['perf3gppFields']['measDataCollection']['measInfoList'][0]
+ server_name = temp_data['event']['perf3gppFields']['measDataCollection']['measuredEntityDn']
+
+ features=sub_data['measTypes']['sMeasTypesList']
+ features.extend(['_maxNumberOfConns.configured', '_maxNumberOfConns.predicted'])
+ slice_name=features[0].split('.')[2]
+ data_val= sub_data['measValuesList']
+ data_dic= Parser_Object.Data_Parser(data_val,data_dic,features,slice_name)
+ data_df=pd.DataFrame(data_dic)
+
+ if len(data_df)<window_size+1:
+ continue
+ else:
+ status = True
+
+ assert status == False, "Failed"
+
+ def test_Parser(self):
+ data_dic={}
+ Parser_Object=Parser()
+ data_val={}
+ features={}
+ slice_name=""
+ data_dic= Parser_Object.Data_Parser(data_val,data_dic,features,slice_name)
+ assert data_dic == {}, "Failed"
+
+
+ def test_Post_Config_Topic(self):
+ window_size=4
+ self.Predict_Object=Prediction()
+
+
+ df = pd.read_excel('tests/unit/test.xlsx', engine='openpyxl')
+ new_columns1=[]
+ len_dfcolumns=len(df.columns)
+ for i in range(len_dfcolumns):
+ new_columns1.append('01-B989BD_'+df.columns[i])
+ df.columns=new_columns1
+ slice_name=df.columns[0].split('.')[0]
+ data_df=pd.DataFrame()
+ len_df=len(df)
+ for i in range(len_df-1):
+ temp_df=df.iloc[[i]]
+ data_df=data_df.append(temp_df)
+ # parse pm data + configured data + predicted dummy data(=configured data- to be changed after pred)
+ if len(data_df)<window_size+1:
+ continue
+ configured={}
+ predicted={}
+ len_data_dfcol=len(data_df.columns)
+ for x in range(0,len_data_dfcol,window_size+1):
+ test=data_df.iloc[-5:,x:x+5]
+ cell=test.columns[0].split('_')[1]
+ inv_yhat = self.Predict_Object.Predict_Model(test) # Predict using model
+ configured[cell]= test.iat[-2,4]
+ inv_yhat = float(inv_yhat[:,-1])
+ predicted[cell]=inv_yhat
+ updated_predicted= self.Predict_Object.Logic(list(configured.values()), list(predicted.values()))
+ count=0
+ for x in range(0,len_data_dfcol, window_size+1):
+ data_df.iloc[[i],[x+4]]= updated_predicted[count]
+ count+=1
+ status = self.Predict_Object.Final_Post_Method(predicted, configured, slice_name, 'cucpserver1') #hardcoding the server name
+
+ if status == False:
+ break
+
+ assert status == True, "Failed"
+
+
+ '''def test_Execute(self):
+ self.Controller_Object = Controller()
+ status = self.Controller_Object.Execute()
+ assert bool(status) != False'''
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/components/ml-prediction-ms/tox.ini b/components/ml-prediction-ms/tox.ini
new file mode 100755
index 00000000..ea7baacd
--- /dev/null
+++ b/components/ml-prediction-ms/tox.ini
@@ -0,0 +1,41 @@
+# LICENSE_START=======================================================
+# ml-prediction-ms
+# ================================================================================
+# Copyright (C) 2023 Wipro Limited
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+
+[tox]
+envlist = py39
+skip_missing_interpreters = false
+tox_pip_extensions_ext_venv_update = true
+
+
+
+[testenv]
+deps =
+ -rrequirements.txt
+ pytest
+ coverage
+ pytest-cov
+ responses
+ requests-mock
+ scikit-learn
+setenv=
+ PYTHONPATH={toxinidir}/src:{toxinidir}/tests
+
+
+commands =
+ pytest ./tests/unit/__init__.py ./tests/unit/test_predict_unittest.py -v -s
+ pytest --cov-report term:skip-covered --cov=./src ./tests/unit/test_predict_unittest.py -v -s
diff --git a/components/ml-prediction-ms/train/ExampleSample_train_data_s1.xlsx b/components/ml-prediction-ms/train/ExampleSample_train_data_s1.xlsx
new file mode 100755
index 00000000..ffac8c80
--- /dev/null
+++ b/components/ml-prediction-ms/train/ExampleSample_train_data_s1.xlsx
Binary files differ
diff --git a/components/ml-prediction-ms/train/ExampleSample_train_data_s1_smaller.xlsx b/components/ml-prediction-ms/train/ExampleSample_train_data_s1_smaller.xlsx
new file mode 100755
index 00000000..2ca30a17
--- /dev/null
+++ b/components/ml-prediction-ms/train/ExampleSample_train_data_s1_smaller.xlsx
Binary files differ
diff --git a/components/ml-prediction-ms/train/ParseTrain.py b/components/ml-prediction-ms/train/ParseTrain.py
new file mode 100755
index 00000000..d3aa321c
--- /dev/null
+++ b/components/ml-prediction-ms/train/ParseTrain.py
@@ -0,0 +1,336 @@
+#!/usr/bin/env python3
+# LICENSE_START=======================================================
+# ml-prediction-ms
+# ================================================================================
+# Copyright (C) 2023 Wipro Limited
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+
+
+
+
+import json
+import time
+import requests
+import logging
+import numpy as np
+import pandas as pd
+import configparser
+
+from requests.auth import HTTPBasicAuth
+from math import sqrt
+from numpy import concatenate
+from matplotlib import pyplot
+from pandas import DataFrame, read_csv, read_excel
+from pandas import concat
+from sklearn.preprocessing import MinMaxScaler
+from sklearn.preprocessing import LabelEncoder
+from sklearn.metrics import mean_squared_error
+from keras.layers import Dense, LSTM
+from keras.models import Sequential
+from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
+
+
+# Create and configure logger
+logging.basicConfig(filename="ml-prediction-ms-Training.log",
+ format='%(asctime)s %(message)s',
+ filemode='w')
+# Creating an object
+logger = logging.getLogger()
+
+
+config = configparser.ConfigParser()
+config.readfp(open(r'ml-prediction-ms.config'))
+
+pathToStartPMData = config.get('PM DATA TOPICS', 'PathToStartPMData')
+pathToStopPMData = config.get('PM DATA TOPICS', 'PathToStopPMData')
+pathToGetData = config.get('PM DATA TOPICS', 'PathToGetData')
+
+pathToGetConfigData = config.get('CONFIG DATA TOPICS', 'PathToGetConfigData')
+
+def Parser(cells_data_list,data_dic,features, slice_name):
+
+ """
+ Perform Data Parser
+ READ THE ACTUAL PARAMETERS FROM THE topic MESSAGE AND ADDS IT INTO A DICTIONARY
+
+ Args:
+ cells_data_list: Cell data list object
+ data_dic: The Parsed data on cell data contained in dictionary
+ features: Data featurs
+ slice_name: Slice Name
+
+ Returns:
+ data_dic: none
+
+ Raises:
+ RuntimeError: Error while Process Slices Cells Data.
+ """
+ try:
+
+ for cells_data in cells_data_list:
+ cell_id =cells_data['measObjInstId']
+ response = requests.get(pathToGetConfigData + cell_id + '/snssai/'+ slice_name)
+ config_data=response.json()['maxNumberOfConns']
+ print(config_data)
+ results= cells_data['measResults']
+
+ for result in results:
+ p=int(result['p'])
+ value=int(result['sValue'])
+ key = cell_id +'_'+features[p-1].split('-')[0]
+ if key not in data_dic:
+ data_dic[key]=[value]
+ else:
+ data_dic[key].append(value)
+
+ for j in range(3,5):
+ key = cell_id +features[j]
+ if key not in data_dic:
+ data_dic[key]=[config_data]
+ elif j==3:
+ data_dic[key].append(config_data)
+ elif j==4:
+ change = (
+ data_dic[cell_id + "_SM.PDUSessionSetupFail.0"][-1]
+ / data_dic[cell_id + "_SM.PDUSessionSetupReq.01"][-1]
+ - data_dic[cell_id + "_SM.PDUSessionSetupFail.0"][-2]
+ / data_dic[cell_id + "_SM.PDUSessionSetupReq.01"][-2]
+ )
+ data_dic[key].append(change*config_data+config_data)
+ except Exception as e:
+ logger.error("Error in Parser Slices Cells Data:\n%s" % e)
+
+ return data_dic
+
+
+
+
+def GetSlicesCellsData():
+ """
+ Start the data simulation topic for configured slices and cells
+ The process waits for the data to be simulated. Waits for 2 hour period.
+ The topic to stop the data simulation are called.
+ The process waits for the data to be simulated. Waits for 2 hour period.
+
+ Args:
+ none: none.
+
+ Returns:
+ none: none
+
+ Raises:
+ RuntimeError: if Topics cannot be retrived.
+ """
+
+ try:
+ #Start topic to generated data
+ response = requests.post(pathToStartPMData, verify=False )
+ print(response)
+ except Exception as e:
+ logger.error("Error while Start topic to generated data:\n%s" % e)
+
+
+ #Wait for 2 hrs for data generation
+ time.sleep(3600*2)
+
+ try:
+ #stop topic to generated data
+ response = requests.post(pathToStopPMData, verify=False)
+ print(response)
+ except Exception as e:
+ logger.error("Error while Stop topic on Data Generation:\n%s" % e)
+
+ time.sleep(10)
+
+def ProcessSlicesCellsData():
+ """
+ Start Process Slices Cells Data
+
+ Args:
+ none: none.
+
+ Returns:
+ none: none
+
+ Raises:
+ RuntimeError: Error while Process Slices Cells Data.
+ """
+
+ data_dic={} # Final data will be stored here
+
+ try:
+
+ #infinite loop, looking for input messages from dmaap, it will run till the topic returns a null and then create csv
+ while True:
+ response = requests.get(pathToGetData, verify=False)
+ data = response.json()
+ print(data)
+
+ if data==[]:
+ break
+
+ for datum in data:
+ sub_data=json.loads(datum)
+ sub_data = sub_data['event']['perf3gppFields']['measDataCollection']['measInfoList'][0]
+ features=sub_data['measTypes']['sMeasTypesList']
+ features.extend(['_maxNumberOfConns.configured', '_maxNumberOfConns.predicted'])
+ cells_data_list= sub_data['measValuesList']
+ slice_name = features[0].split('.')[2]
+ data_dic = Parser(cells_data_list, data_dic, features, slice_name) #calling the parser method
+
+ time.sleep(1)
+
+ data_df=pd.DataFrame(data_dic)
+
+ #data_df.to_excel('PM_train_data.xlsx', index=0)
+ #df=pd.read_excel('Final_train_data_s1.xlsx',engine='openpyxl')
+
+ data_df.columns = [i.split('_')[1] for i in data_df.columns]
+ print(data_df.columns)
+
+ #To append all the cells one after the other for training
+ for x in range(0,len(data_df.columns),5):
+ temp_df = data_df.iloc[:,x:x+5]
+ if x==0:
+ result= pd.DataFrame(columns=temp_df.columns)
+ result = result.append(temp_df, ignore_index=True)
+
+ test_data= result.iloc[int(0.9*len(result)):,:]
+ train_data=result.iloc[:int(0.9*len(result)),:]
+ train_data.to_csv('Train_Data.csv', index=0)
+ test_data.to_csv('Test_Data.csv',index=0)
+ except Exception as e:
+ logger.error("Error while Process Slices Cells Data:\n%s" % e)
+
+
+
+def series_to_supervised(data, n_in=1, n_out=1, dropnan=True):
+ """
+ Convert the timeseries into Forecast series
+
+ Args:
+ n_in: Input window size for time series to carry previous nth time instance value.
+ n_out: output future n time instance value to be forecasted against the timeseries
+ Returns:
+ agg (list): Aggregated list of past time series inputs as per the input window and the time series of the predicted future time instance.
+
+ Raises:
+ RuntimeError: Error Pre Processing Slices Cells Data.
+ """
+
+ try:
+ n_vars = 1 if type(data) is list else data.shape[1]
+ df = DataFrame(data)
+ cols, names = [],[]
+
+ # input sequence (t-n, ... t-1)
+ for i in range(n_in, 0, -1):
+ cols.append(df.shift(i))
+ names += [('var%d(t-%d)' % (j+1, i)) for j in range(n_vars)]
+
+ # forecast sequence (t, t+1, ... t+n)
+ for i in range(0, n_out):
+ cols.append(df.shift(-i))
+ if i == 0:
+ names += [('var%d(t)' % (j+1)) for j in range(n_vars)]
+ else:
+ names += [('var%d(t+%d)' % (j+1, i)) for j in range(n_vars)]
+
+ # put it all together
+ agg = concat(cols, axis=1)
+ agg.columns = names
+
+ # drop rows with NaN values
+ if dropnan:
+ agg.dropna(inplace=True)
+ except Exception as e:
+ logger.error("Error Pre Processing Slices Cells Data f:\n%s" % e)
+
+ return agg
+
+def Train():
+ """
+ Train the data against the Machine learning model.
+
+ Args:
+ none: none.
+
+ Returns:
+ none: none
+
+ Raises:
+ RuntimeError: if AAI Network Function data cannot be retrieved.
+ """
+ try:
+
+ #Training data will be read here and will be concatenated here to form 5 columns only.
+ #(Will depend on how they will provide training data)
+ file_name= "Train_Data.csv"
+ value = pd.read_csv(file_name)
+ print(value)
+
+ scaler = MinMaxScaler(feature_range=(-1, 1))
+ scaled = scaler.fit_transform(value)
+ window_size=4
+ reframed = series_to_supervised(scaled,window_size, 1)
+ value=reframed.values
+
+
+ train = value[:int(0.75*len(value)), :] #set the range of train, validation and test data
+ val= value[int(0.75*len(value)):int(0.90*len(value)),:]
+ test = value[int(0.90*len(value)):, :]
+ columns=scaled.shape[1]
+
+ #Divide the data into input and output for the model to learn
+ train_X, train_y = train[:, :window_size*scaled.shape[1] + scaled.shape[1]-1], train[:, -1]
+ val_X, val_y = val[:,:window_size*scaled.shape[1] + scaled.shape[1]-1], val[:,-1]
+ test_X, test_y = test[:, :window_size*scaled.shape[1] + scaled.shape[1]-1], test[:, -1]
+
+ #Reshaping the data to feed lstm model
+ train_X = train_X.reshape((train_X.shape[0], 1, train_X.shape[1]))
+ val_X = val_X.reshape((val_X.shape[0], 1, val_X.shape[1]))
+ test_X = test_X.reshape((test_X.shape[0], 1, test_X.shape[1]))
+
+ print(val_X.shape,val_y.shape, test_y.shape)
+ #Defining the model block here
+ model = Sequential()
+ model.add(LSTM(20, input_shape=(train_X.shape[1], train_X.shape[2])))
+ model.add(Dense(1))
+
+ checkpoint = ModelCheckpoint('best_model.h5', verbose=1, monitor='val_loss',save_best_only=True, mode='auto')
+ model.compile(optimizer='adam', loss='mean_squared_error')
+
+ history = model.fit(train_X, train_y, epochs=250, batch_size=512, callbacks=[checkpoint], validation_data=(val_X, val_y), verbose=2, shuffle=False) #epochs>2k
+ # plot history
+ best_model.save("best_model.h5")
+ #print(history.history.keys())
+ pyplot.plot(history.history['loss'], label='train')
+ pyplot.plot(history.history['val_loss'], label='test')
+ pyplot.legend()
+ pyplot.show()
+
+
+ except Exception as e:
+ logger.error("Error during ML Training Process:\n%s" % e)
+
+if __name__ == "__main__":
+ try:
+ logger.info(": starting to get the Simulated slices cells data")
+ GetSlicesCellsData()
+ ProcessSlicesCellsData()
+ Train()
+ except Exception as e:
+ logger.error("Error while starting to get data for ML Training Process:\n%s" % e)
+
diff --git a/components/ml-prediction-ms/version.properties b/components/ml-prediction-ms/version.properties
new file mode 100755
index 00000000..8a7e10a2
--- /dev/null
+++ b/components/ml-prediction-ms/version.properties
@@ -0,0 +1,26 @@
+###############################################################################
+# ============LICENSE_START=======================================================
+# ml-prediction-ms
+# ================================================================================
+# Copyright (C) 2023 Wipro Limited
+# ==============================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+#
+###############################################################################
+major=1
+minor=0
+patch=0
+base_version=${major}.${minor}.${patch}
+release_version=${base_version}
+snapshot_version=${base_version}-SNAPSHOT