From 93d9a28ba317475083a367d040ab606d6576fb03 Mon Sep 17 00:00:00 2001 From: sendil kumar Jaya kumar Date: Thu, 3 Mar 2022 15:15:05 +0530 Subject: Add ml-prediction-ms with basic build infrastruct. Added offline training code, along with ml slice prediction model, The main ml-prediction-ms component code changes are added to perform prediction on single slice. Issue-ID: DCAEGEN2-3067 Signed-off-by: sendil kumar Jaya kumar Change-Id: Ia2e50dcbd97d03a96884a2bf5417edbf2fa04b4a --- components/ml-prediction-ms/.gitignore | 100 +++ components/ml-prediction-ms/Changelog.md | 9 + components/ml-prediction-ms/Dockerfile | 47 ++ components/ml-prediction-ms/LICENSE.txt | 18 + components/ml-prediction-ms/README.md | 114 ++++ components/ml-prediction-ms/conftest.py | 18 + components/ml-prediction-ms/docker-compose.yaml | 69 +++ components/ml-prediction-ms/log_config.yaml | 45 ++ .../ml-prediction-ms/ml-prediction-ms.config | 37 ++ components/ml-prediction-ms/model/best_model.h5 | Bin 0 -> 72704 bytes components/ml-prediction-ms/pom.xml | 247 ++++++++ components/ml-prediction-ms/pyproject.toml | 6 + components/ml-prediction-ms/requirements.txt | 29 + components/ml-prediction-ms/setup.py | 30 + components/ml-prediction-ms/src/__init__.py | 24 + components/ml-prediction-ms/src/run.py | 680 +++++++++++++++++++++ components/ml-prediction-ms/tests/__init__.py | 22 + components/ml-prediction-ms/tests/unit/__init__.py | 18 + components/ml-prediction-ms/tests/unit/sample.json | 1 + components/ml-prediction-ms/tests/unit/test.xlsx | Bin 0 -> 18133 bytes .../tests/unit/test_predict_unittest.py | 232 +++++++ components/ml-prediction-ms/tox.ini | 41 ++ .../train/ExampleSample_train_data_s1.xlsx | Bin 0 -> 3773850 bytes .../train/ExampleSample_train_data_s1_smaller.xlsx | Bin 0 -> 5921 bytes components/ml-prediction-ms/train/ParseTrain.py | 336 ++++++++++ components/ml-prediction-ms/version.properties | 26 + 26 files changed, 2149 insertions(+) create mode 100644 components/ml-prediction-ms/.gitignore create mode 100755 components/ml-prediction-ms/Changelog.md create mode 100755 components/ml-prediction-ms/Dockerfile create mode 100755 components/ml-prediction-ms/LICENSE.txt create mode 100755 components/ml-prediction-ms/README.md create mode 100755 components/ml-prediction-ms/conftest.py create mode 100755 components/ml-prediction-ms/docker-compose.yaml create mode 100755 components/ml-prediction-ms/log_config.yaml create mode 100755 components/ml-prediction-ms/ml-prediction-ms.config create mode 100755 components/ml-prediction-ms/model/best_model.h5 create mode 100755 components/ml-prediction-ms/pom.xml create mode 100755 components/ml-prediction-ms/pyproject.toml create mode 100755 components/ml-prediction-ms/requirements.txt create mode 100755 components/ml-prediction-ms/setup.py create mode 100755 components/ml-prediction-ms/src/__init__.py create mode 100755 components/ml-prediction-ms/src/run.py create mode 100755 components/ml-prediction-ms/tests/__init__.py create mode 100755 components/ml-prediction-ms/tests/unit/__init__.py create mode 100644 components/ml-prediction-ms/tests/unit/sample.json create mode 100755 components/ml-prediction-ms/tests/unit/test.xlsx create mode 100755 components/ml-prediction-ms/tests/unit/test_predict_unittest.py create mode 100755 components/ml-prediction-ms/tox.ini create mode 100755 components/ml-prediction-ms/train/ExampleSample_train_data_s1.xlsx create mode 100755 components/ml-prediction-ms/train/ExampleSample_train_data_s1_smaller.xlsx create mode 100755 components/ml-prediction-ms/train/ParseTrain.py create mode 100755 components/ml-prediction-ms/version.properties diff --git a/components/ml-prediction-ms/.gitignore b/components/ml-prediction-ms/.gitignore new file mode 100644 index 00000000..98ba212b --- /dev/null +++ b/components/ml-prediction-ms/.gitignore @@ -0,0 +1,100 @@ +mvn-phase-lib.sh +tox-local.ini +*.wgn +.pytest_cache/ +xunit-results.xml +.DS_Store +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +env/ +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +*.egg-info/ +.installed.cfg +*.egg + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +venv-tox/ +.tox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*,cover +.hypothesis/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# IPython Notebook +.ipynb_checkpoints + +# pyenv +.python-version + +# celery beat schedule file +celerybeat-schedule + +# dotenv +.env + +# virtualenv +venv/ +ENV/ + +# Spyder project settings +.spyderproject + +# Rope project settings +.ropeproject + +# Test report +xunit-reports +coverage-reports diff --git a/components/ml-prediction-ms/Changelog.md b/components/ml-prediction-ms/Changelog.md new file mode 100755 index 00000000..25bb5351 --- /dev/null +++ b/components/ml-prediction-ms/Changelog.md @@ -0,0 +1,9 @@ +# Change Log +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](http://keepachangelog.com/) +and this project adheres to [Semantic Versioning](http://semver.org/). + +## [1.0.0] - 2022/03/03 + +- [DCAEGEN2-3067](https://jira.onap.org/browse/DCAEGEN2-3067) - Added Training and Prediction code for ml-prediction-ms diff --git a/components/ml-prediction-ms/Dockerfile b/components/ml-prediction-ms/Dockerfile new file mode 100755 index 00000000..f2233a05 --- /dev/null +++ b/components/ml-prediction-ms/Dockerfile @@ -0,0 +1,47 @@ +# LICENSE_START======================================================= +# ml-prediction-ms +# ================================================================================ +# Copyright (C) 2023 Wipro Limited +# ================================================================================ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============LICENSE_END========================================================= + + + +FROM nexus3.onap.org:10001/onap/integration-python:10.1.0 as build + +USER root +RUN set -eux; \ + apk add \ + build-base \ + python3-dev + +ARG user=onap +ARG group=onap +USER $user + +FROM python + +ADD requirements.txt requirements.txt +RUN pip3 install -r ./requirements.txt + +ADD src src +ADD model model +ADD ml-prediction-ms.config ml-prediction-ms.config +ADD tests tests + +EXPOSE 5000 + +RUN chmod 755 /src/run.py + +CMD ["/src/run.py"] diff --git a/components/ml-prediction-ms/LICENSE.txt b/components/ml-prediction-ms/LICENSE.txt new file mode 100755 index 00000000..766adc61 --- /dev/null +++ b/components/ml-prediction-ms/LICENSE.txt @@ -0,0 +1,18 @@ +# ============LICENSE_START======================================================= +# ml-prediction-ms +# ================================================================================ +# Copyright (C) 2023 Wipro Limited +# ================================================================================ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============LICENSE_END========================================================= + diff --git a/components/ml-prediction-ms/README.md b/components/ml-prediction-ms/README.md new file mode 100755 index 00000000..11a50aab --- /dev/null +++ b/components/ml-prediction-ms/README.md @@ -0,0 +1,114 @@ +# LICENSE_START======================================================= +# ml-prediction-ms +# ================================================================================ +# Copyright (C) 2023 Wipro Limited +# ================================================================================ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============LICENSE_END========================================================= + +This Project aims at training an ML model to predict the 'maxNumberOfConns' for each cell. The model is trained against the 'Performance Measurement' data to predict the output. This model will be later used to predict the real time configurations of cells and slices in ml-prediction-ms. + +We are using a stacked LSTM model to perform predictions of the slices and cell configurations. + +Assumption +---------- +Training module is a offline code, this is not part of the ml-prediction-ms + +Also refer a pretrained model files is shared along with the source code at /ml-prediction-ms/model/best_model.h5 this is generic trained model file that can be used for live predition. + +However if you choose to retrain the model Please refer Wiki step 1: ML Offline Training reference to train the machine learning model + +https://wiki.onap.org/display/DW/Smart+Intent+Guarantee+based+on+Closed-loop+-+ML+MS+Enhancements + +Offline training code is presented at location /ml-prediction-ms/train/ParseTrain.py. This ML training code consume or prepares the training data from following Prerequisites Topics of ranim and ConfigDb. + +Here by we share a sample example training file that need to prepared using the below RANSim topics and ConfigDB application +/services/components/ml-prediction-ms/train/ExampleSample_train_data_s1 + +Prerequisites Topics to run both prediction module and Training code: +------------------------------------------------------------- +1. Training module requires following RANSim Topics. To setup RANsim, refer + https://wiki.onap.org/pages/viewpage.action?pageId=93002871 + + -'http://<>:8081/ransim/api/GenerateIntelligentSlicingPmData'- Start data generation. + -'http://<>:8081/ransim/api/stopIntelligentSlicingPmData'- Stop data generation. + -'http://message-router:3904/events/unauthenticated.PERFORMANCE_MEASUREMENTS/mlms-cg/mlms-cid'- To receive the PM data (3 feature vectors). If there's any error, check and restart the code. + + ConfigDb application reference https://wiki.onap.org/plugins/servlet/mobile?contentId=1015829#content/view/93002873 + -'http://<>:8086/api/sdnc-config-db/v4/nrcellcu-configdata/'+cell_id+'/snssai/01-B989BD' - To get the saved config details (For 4th and 5th Feature) + + Please refer this wiki link for the configuration and relevant component deployment. + https://wiki.onap.org/display/DW/Smart+Intent+Guarantee+based+on+Closed-loop + + + Please note : + - As configdb is onap service, above <> should be switched to corresponding servicename or IP address in ml-prediction-ms.config file + - As ransim is onap service, above <> should be switched to corresponding servicename or IP address in ml-prediction-ms.config file + - In standalone mode we need to set the IP address and port number for message-router service + + + + + +HOW TO TRAIN: +------------- +In order to complete the training process, we at least need 0.1 million samples of PM data. These are gathered from one slice. These samples shall be gathered from the simulation environment. Here in Simulation enviroment each time instance data are generated for all cells at interval for every 3 second versus at the acutual enviroment, each time instance data are generated for every 15 mintues. So here, We used the RAN PM Data simulation enviroment to generated data that is needed for training. The required predicted data for each time instance for the cell are synthetically generated and used in Supervised learning. + +To run the training module by using the RAN PM data simulation +move to folder location 'ml-prediction-ms' then Run python3 train/ParseTrain.py + +The above module acquires the training data from the RAN PM data simulation. Once the data are acquired via the topics, the module performs training. + +However, if the data are acquired from the Real Time RAN enviroement where each sample are generated after 15 minutes, we need to adjust the sleep time from line 146 in ParseTrain.py to generate 0.1 million samples(counts across all cells and slices). + +#Wait for 2 hrs for data generation +time.sleep(3600*2) + +In order to train the ML model to acquire large amount of slice and cell data in small duration +we have made changes in the config to generate time series data for all slices and cells for every 300ms instead of 10 sec duration, +This is done to speed up the training time period. + +More Reference https://wiki.onap.org/display/DW/Smart+Intent+Guarantee+based+on+Closed-loop+-+ML+MS+Enhancements +contains the design details and requirements for training. + + + +HOW TO LAUNCH Prediction: +------------------------- +To Run Prediction move to folder location 'ml-prediction-ms' then Run python3 src/run.py + +HOW TO LAUNCH Training: +move to folder location 'ml-prediction-ms' then Run python3 train/ParseTrain.py in the command line + + +Training Approach that is followed to build the Machine Learning model +1. The data are acquired in offine mode. +2. The data acquisition for training will take around around 4-5 hours to have enough samples for training. +3. The time series data generated by RAN Simulator using PERFORMANCE_MEASUREMET topic was having an interval of one second between two instance data +4. After this data acquisition process the model will take around 15 minutes to run for 250 epochs with 512 batch size. +5. The training model is designed to auto select the best hyperparameters for the machine learning model generation. + +The functionality in Training module +1. Get Slices/Cell Data: The module performs start and stop on Topics to acquire the data. +The Topics should be configured to generate Single Slice data only. In future we will support multiple slices.However, there is no limitation on the number of cells +2. Process Slices/Cells Data: The data acquired from Dmaap topic. The data are preprocess and parsed. Here, we are synthetically generating the 5th feature I.e. ‘Predicted_maxNumberOfConns’ based on the percent increase in the ‘Failure/Total session requested’ ratios. +3. we train the model on a single slice. So we append all the cells one after the other as a part of training statergy. +- Now we have the data in the form of single cell. +- We have increased the number of training samples. + + +In Train functions following activities are performed. +1. The data is divided into 2 categories: a)Training set (Used for the training). b)Test Data (Used for test/validation). After this step, the data is ready to train a model. +2. Train: This will take the training set created in the last step, normalize it using MinMaxScaler and then 'series_to_supervised' method converts the timeseries into Forecast series to be fed to the LSTM. +3. After some more pre-processing, the data is fed to the Keras sequential model which contains 20 stacked LSTMs and 1 dense layer at the end. +4. The best model is chosen after each checkpoints based on the 'validation loss' and archived to be used for predictions. diff --git a/components/ml-prediction-ms/conftest.py b/components/ml-prediction-ms/conftest.py new file mode 100755 index 00000000..9870baf2 --- /dev/null +++ b/components/ml-prediction-ms/conftest.py @@ -0,0 +1,18 @@ +# ===================LICENSE_START======================================================= +# ml-prediction-ms +# ================================================================================ +# Copyright (C) 2023 Wipro Limited +# ================================================================================ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============LICENSE_END========================================================= + diff --git a/components/ml-prediction-ms/docker-compose.yaml b/components/ml-prediction-ms/docker-compose.yaml new file mode 100755 index 00000000..fcdaa628 --- /dev/null +++ b/components/ml-prediction-ms/docker-compose.yaml @@ -0,0 +1,69 @@ +# ============LICENSE_START======================================================= +# ml-prediction-ms +# ================================================================================ +# Copyright (C) 2020 Wipro Limited. +# ============================================================================== +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============LICENSE_END========================================================= + +version: '2.4' +networks: + ml-prediction-ms-default: + driver: bridge + driver_opts: + com.docker.network.driver.mtu: 1400 +services: + ### if kafka is not required comment out zookeeper and kafka ### + zookeeper: + image: confluentinc/cp-zookeeper:6.2.1 + container_name: zookeeper + ports: + - '2181:2181' + environment: + ZOOKEEPER_CLIENT_PORT: 2181 + networks: + - ml-prediction-ms-default + + kafka: + image: confluentinc/cp-kafka:6.2.1 + container_name: kafka + ports: + - "19092:19092" + depends_on: + - zookeeper + environment: + KAFKA_BROKER_ID: 1 + KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181 + KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka:9092,CONNECTIONS_FROM_HOST://localhost:19092 + KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,CONNECTIONS_FROM_HOST:PLAINTEXT + KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1 + volumes: + - /var/run/docker.sock:/var/run/docker.sock + depends_on: + - zookeeper + networks: + - ml-prediction-ms-default + + ml-prediction-ms: + container_name: ml-prediction-ms + build: + context: ./ + ports: + - "5000:5000" + environment: + - DEBUG=True + depends_on: + - zookeeper + - kafka + networks: + - ml-prediction-ms-default diff --git a/components/ml-prediction-ms/log_config.yaml b/components/ml-prediction-ms/log_config.yaml new file mode 100755 index 00000000..a129113e --- /dev/null +++ b/components/ml-prediction-ms/log_config.yaml @@ -0,0 +1,45 @@ +# ============LICENSE_START======================================================= +# ml-prediction-ms +# ================================================================================ +# Copyright (C) 2023 Wipro Limited. +# ============================================================================== +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============LICENSE_END========================================================= + + +version: 1 + +disable_existing_loggers: true + +loggers: + onap_logger: + level: INFO + handlers: [onap_log_handler, stdout_handler] + propagate: false +handlers: + onap_log_handler: + class: logging.handlers.RotatingFileHandler + filename: /var/log/ONAP/dcaegen2/services/ml-prediction-ms/application.log + mode: a + maxBytes: 10000000 + backupCount: 10 + formatter: mdcFormatter + stdout_handler: + class: logging.StreamHandler + formatter: mdcFormatter +formatters: + mdcFormatter: + format: '%(asctime)s - %(levelname)s - %(levelno)s - %(process)d - %(name)s | %(message)s' + mdcfmt: '{ServiceName} | {RequestID} | {InvocationID}' + datefmt: '%Y-%m-%dT%H:%M:%S%z' + (): onaplogging.mdcformatter.MDCFormatter diff --git a/components/ml-prediction-ms/ml-prediction-ms.config b/components/ml-prediction-ms/ml-prediction-ms.config new file mode 100755 index 00000000..8673540c --- /dev/null +++ b/components/ml-prediction-ms/ml-prediction-ms.config @@ -0,0 +1,37 @@ +# ===================LICENSE_START======================================================= +# ml-prediction-ms +# ================================================================================ +# Copyright (C) 2023 Wipro Limited +# ================================================================================ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============LICENSE_END========================================================= + + +[PM DATA TOPICS] +PathToStartPMData = http://<>:8081/ransim/api/GenerateIntelligentSlicingPmData +PathToStopPMData = http://<>:8081/ransim/api/stopIntelligentSlicingPmData +#Verify Topic Name +#http://10.31.4.44:3904/events/unauthenticated.PERFORMANCE_MEASUREMENTS/mlms-cg/mlms-cid +GetDataTopic = PERFORMANCE_MEASUREMENTS + + +[CONFIG DATA TOPICS] +PathToGetConfigData = http://10.31.4.45:8086/execute/11/get-nrcellcu-configdata + +[POLICY UPDATE] +GetPolicyTopic = ML_RESPONSE_TOPIC +#Verify Topic Name +#PathToDmapTopic = http://10.31.4.44:3904/events/unauthenticated.ML_RESPONSE_TOPIC + +[SERVER NAME] +ServerName = cucpserver1 diff --git a/components/ml-prediction-ms/model/best_model.h5 b/components/ml-prediction-ms/model/best_model.h5 new file mode 100755 index 00000000..af26e5d0 Binary files /dev/null and b/components/ml-prediction-ms/model/best_model.h5 differ diff --git a/components/ml-prediction-ms/pom.xml b/components/ml-prediction-ms/pom.xml new file mode 100755 index 00000000..0140e245 --- /dev/null +++ b/components/ml-prediction-ms/pom.xml @@ -0,0 +1,247 @@ + + + + 4.0.0 + + + org.onap.oparent + oparent + 3.0.0 + + + org.onap.dcaegen2.services + ml-prediction-ms + dcaegen2-services-ml-prediction-ms + 1.0.0-SNAPSHOT + + UTF-8 + . + xunit-results.xml + coverage.xml + xunit-results.xml + py + python + **/*.py + target/**,tests/**,setup.py,**/__init__.py + 1.2.1 + + + ${project.artifactId}-${project.version} + + + + + org.sonatype.plugins + nexus-staging-maven-plugin + + true + + + + org.apache.maven.plugins + maven-deploy-plugin + + + true + + + + org.apache.maven.plugins + maven-enforcer-plugin + + true + + + + + + org.apache.maven.plugins + maven-resources-plugin + + true + + + + + org.apache.maven.plugins + maven-compiler-plugin + + true + + + + + org.apache.maven.plugins + maven-jar-plugin + + + default-jar + + + + + + org.apache.maven.plugins + maven-install-plugin + + true + + + + + org.apache.maven.plugins + maven-surefire-plugin + + true + + + + org.codehaus.mojo + exec-maven-plugin + ${exec-mvn-plugin-version} + + + + ${project.groupId} + ${project.artifactId} + ${project.version} + ${onap.nexus.url} + ${onap.nexus.rawrepo.baseurl.upload} + ${onap.nexus.rawrepo.baseurl.download} + ${onap.nexus.rawrepo.serverid} + ${onap.nexus.dockerregistry.daily} + ${onap.nexus.dockerregistry.daily} + ${onap.nexus.dockerregistry.release} + + + + + + + + + org.codehaus.mojo + exec-maven-plugin + ${exec-mvn-plugin-version} + + + clean phase script + clean + + exec + + + + __ + clean + + + + + generate-sources script + generate-sources + + exec + + + + __ + generate-sources + + + + + compile script + compile + + exec + + + + __ + compile + + + + + package script + package + + exec + + + + __ + package + + + + + test script + test + + exec + + + + __ + test + + + + + install script + install + + exec + + + + __ + install + + + + + deploy script + deploy + + exec + + + + __ + deploy + + + + + + + + diff --git a/components/ml-prediction-ms/pyproject.toml b/components/ml-prediction-ms/pyproject.toml new file mode 100755 index 00000000..bf54bd47 --- /dev/null +++ b/components/ml-prediction-ms/pyproject.toml @@ -0,0 +1,6 @@ +[build-system] +requires = [ + "setuptools >= 35.0.2", + "setuptools_scm >= 2.0.0, <3" +] +build-backend = "setuptools.build_meta" diff --git a/components/ml-prediction-ms/requirements.txt b/components/ml-prediction-ms/requirements.txt new file mode 100755 index 00000000..57e60f78 --- /dev/null +++ b/components/ml-prediction-ms/requirements.txt @@ -0,0 +1,29 @@ + # requirements.txt + # + # installs dependencies from ./setup.py, and the package itself, + # in editable mode + # -e . + + # (the -e above is optional). you could also just install the package + # normally with just the line below (after uncommenting) + + flask==1.1.2 + requests==2.24.0 + responses==0.16.0 + pandas==1.1.5 + tensorflow + scikit-learn + scipy + matplotlib + pytest-cov + xlrd + mock==4.0.3 + openpyxl==3.0.10 + configparser + pyyaml + pytest + requests-mock + numpy==1.23.1 + confluent_kafka + + diff --git a/components/ml-prediction-ms/setup.py b/components/ml-prediction-ms/setup.py new file mode 100755 index 00000000..1db0a537 --- /dev/null +++ b/components/ml-prediction-ms/setup.py @@ -0,0 +1,30 @@ +# LICENSE_START======================================================= +# ml-prediction-ms +# ================================================================================ +# Copyright (C) 2023 Wipro Limited +# ================================================================================ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============LICENSE_END========================================================= + +from setuptools import setup, find_packages + +setup( + name="ml-prediction-ms", + version="1.0.0", + author="sendil.kumar@wipro.com", + author_email="sendil.kumar@wipro.com", + license='Apache 2', + description="Slice Intelligence Machine Learning Prediction", + url="https://gerrit.onap.org/r/gitweb?p=dcaegen2/services.git;a=tree;f=components;hb=HEAD", + packages=find_packages() +) diff --git a/components/ml-prediction-ms/src/__init__.py b/components/ml-prediction-ms/src/__init__.py new file mode 100755 index 00000000..321d9d7a --- /dev/null +++ b/components/ml-prediction-ms/src/__init__.py @@ -0,0 +1,24 @@ +# ==============LICENSE_START===================================================== +# ml-prediction-ms +# ================================================================================ +# Copyright © 2017 AT&T Intellectual Property. All rights reserved. +# Copyright (C) 2023 Wipro Limited +# ================================================================================ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============LICENSE_END========================================================= + +# empty __init__.py so that pytest can add correct path to coverage report, +# -- per pytest best practice guideline + +import pathlib, sys +sys.path.append(str(pathlib.Path(__file__).parent)) diff --git a/components/ml-prediction-ms/src/run.py b/components/ml-prediction-ms/src/run.py new file mode 100755 index 00000000..752355d8 --- /dev/null +++ b/components/ml-prediction-ms/src/run.py @@ -0,0 +1,680 @@ +#!/usr/bin/env python3 +# ============LICENSE_START======================================================= +# ml-prediction-ms +# ================================================================================ +# Copyright (C) 2023 Wipro Limited +# ================================================================================ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============LICENSE_END========================================================= + +import os +os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # or any {'0', '1', '2'} + +import pandas as pd +from pandas import DataFrame, read_excel,concat +from tensorflow.keras.models import load_model +from sklearn.preprocessing import MinMaxScaler +from numpy import concatenate +from datetime import datetime +import json +import time +import requests +from requests.auth import HTTPBasicAuth +import logging +import configparser +import uuid + +from confluent_kafka import Consumer +from confluent_kafka import Producer +import socket + +global window_size +window_size=4 + +# Create and configure logger +import io, os, sys +try: + # Python 3, open as binary, then wrap in a TextIOWrapper with write-through. + sys.stdout = io.TextIOWrapper(open(sys.stdout.fileno(), 'wb', 0), write_through=True) + # If flushing on newlines is sufficient, as of 3.7 you can instead just call: + sys.stdout.reconfigure(line_buffering=True) +except TypeError: + # older version of Python + pass + + + +logging.basicConfig(filename="IntelligentSliceMl.log", + format='%(asctime)s - %(levelname)s - %(levelno)s - %(process)d - %(name)s - %(message)s', + filemode='w') + +# Creating an object +logger = logging.getLogger('ml_ms_prediction') +logger.setLevel(logging.DEBUG) + +logger.info("traceID-%s : Start Prediction", str(uuid.uuid4())[:8]) + +class Config: + + def __init__(self): + config = configparser.ConfigParser() + config.readfp(open(r'ml-prediction-ms.config')) + + self.pathToStartPMData = config.get('PM DATA TOPICS', 'PathToStartPMData') + self.pathToStopPMData = config.get('PM DATA TOPICS', 'PathToStopPMData') + self.getDataTopic = config.get('PM DATA TOPICS', 'GetDataTopic') + + self.pathToGetConfigData = config.get('CONFIG DATA TOPICS', 'PathToGetConfigData') + self.getPolicyTopic = config.get('POLICY UPDATE', 'GetPolicyTopic') + + self.serverName = config.get('SERVER NAME', 'ServerName') + + def get_pathToStartPMData(self): + return self.pathToStartPMData + + def get_pathToStopPMData(self): + return self.pathToStopPMData + + def get_DataTopic(self): + return self.getDataTopic + + def get_pathToGetConfigData(self): + return self.pathToGetConfigData + + def get_PolicyTopic(self): + return self.getPolicyTopic + + def get_serverName(self): + return self.serverName + +class Parser: + + def __init__(self): + self.Config_Object=Config() + + def Data_Parser(self, data_val,data_dic,features, slice_name): + """ + Perform Data Parser + READ THE ACTUAL PARAMETERS FROM THE topic MESSAGE AND ADDS IT INTO A DICTIONARY + Args: + cells_data_list: Cell data list object + data_dic: The Parsed data on cell data contained in dictionary + features: Data featurs (PM Metrics) + slice_name : Slice name + Returns: + data_dic: none + Raises: + RuntimeError: Error while Process Slices Cells Data. + """ + try: + len_data_val=len(data_val) + for i in range(len_data_val): + cell_id = data_val[i]['measObjInstId'] + response = requests.get(self.Config_Object.get_pathToGetConfigData() + cell_id + '/snssai/'+ slice_name, timeout=3) + config_data=response.json()['maxNumberOfConns'] + + results= data_val[i]['measResults'] + len_results=len(results) + for j in range(len_results): + p=int(results[j]['p']) + value=int(results[j]['sValue']) + key = slice_name+'_'+cell_id +'_'+features[p-1].split('-')[0] + if key not in data_dic: + data_dic[key]=[value] + else: + data_dic[key].append(value) + #We are normalising the prediction, we are have the actual prediction starting from the 5th time instance + #so for the first 4 time instances we are generating synthetic data generation for prediction result + #this is done as a softmax correction, essential for better accuracy + #After the first 4 time instances the predicted values are used and and taken forward. + for j in range(3,5): + key = slice_name+'_'+cell_id +features[j] + if key not in data_dic: + data_dic[key]=[config_data] + elif j==3: + data_dic[key].append(config_data) + elif j==4: + change = ( + data_dic[slice_name + "_" + cell_id + "_SM.PDUSessionSetupFail.0"][-1] + / data_dic[slice_name + "_" + cell_id + "_SM.PDUSessionSetupReq.01"][-1] + - data_dic[slice_name + "_" + cell_id + "_SM.PDUSessionSetupFail.0"][-2] + / data_dic[slice_name + "_" + cell_id + "_SM.PDUSessionSetupReq.01"][-2] + ) + data_dic[key].append(change*config_data+config_data) + except Exception as e: + logger.error("traceID-%s Error in Parser Slices Cells Data:\n%s", str(uuid.uuid4())[:8], e) + except requests.Timeout as error: + logger.critical("traceId-%s Timeout from Get CONFIG DATA topic :\n%s",str(uuid.uuid4())[:8], error) + return data_dic + +class Prediction: + + # Time Series Prediction using the LSTM Model and appplies the Logic to give the final predicted output + modelfile = 'model/best_model.h5' + model= load_model(modelfile, compile=False) + + def __init__(self): + self.Config_Object=Config() + + def IsPolicyUpdate_url_Exist(self): + """ + Get the status of the Policy response topic by checking its execution status for unit test module. + + Args: + none: none + Returns: + status:Bool status of Topic + + """ + status = True + try: + Post_Message ={} + json_object = json.dumps(Post_Message, indent = 4) + conf = {'bootstrap.servers': "kafka:9092",'client.id': socket.gethostname()} + + producer = Producer(conf) + producer.produce(self.Config_Object.get_PolicyTopic(), value=json_object.encode('utf-8')) + producer.poll(1) + except Exception as e: + status = False + except requests.Timeout as error: + status = False + return status + + def series_to_supervised(self, data, n_in=1, n_out=1, dropnan=True): + """ + Convert the timeseries into Forecast series + Args: + data: Input time series data to be processed + n_in: Input window size for time series to carry previous nth time instance value. + n_out: output future n time instance value to be forecasted against the timeseries + dropnan : Flag to drop nan values + + Returns: + agg (list): Aggregated list of past time series inputs as per the input window and the time series of the predicted future time instance. + + Raises: + RuntimeError: Error Pre Processing Slices Cells Data. + """ + try: + n_vars = 1 if type(data) is list else data.shape[1] + df = DataFrame(data) + cols, names = [],[] + # input sequence (t-n, ... t-1) + for i in range(n_in, 0, -1): + cols.append(df.shift(i)) + names += [('var%d(t-%d)' % (j+1, i)) for j in range(n_vars)] + # forecast sequence (t, t+1, ... t+n) + for i in range(0, n_out): + cols.append(df.shift(-i)) + if i == 0: + names += [('var%d(t)' % (j+1)) for j in range(n_vars)] + else: + names += [('var%d(t+%d)' % (j+1, i)) for j in range(n_vars)] + # put it all together + agg = concat(cols, axis=1) + agg.columns = names + + # drop rows with NaN values + if dropnan: + agg.dropna(inplace=True) + + except Exception as e: + logger.error("traceID-%s Error Pre Processing Slices Cells Data f:\n%s",str(uuid.uuid4())[:8], e) + return agg + + def Predict_Model(self, test): + """ + Does the Actual Prediction on the Input data + + Args: + test: Input data to model with current and last 4 time instances (window_size) + Returns: + inv_yhat= A 2-D list with predicted results. + Raises: + RuntimeError: Error in Prediction. + """ + + inv_yhat =[] + + try: + scaler = MinMaxScaler(feature_range=(-1, 1)) + scaled = scaler.fit_transform(test) + reframed = self.series_to_supervised(scaled,window_size, 1) + test=reframed.values + test_X= test[:, :window_size*scaled.shape[1] + scaled.shape[1]-1] + test_X = test_X.reshape((test_X.shape[0], 1, test_X.shape[1])) + yhat = self.model.predict(test_X) + test_X = test_X.reshape((test_X.shape[0], test_X.shape[2])) + inv_yhat = concatenate((test_X[:,-4:],yhat), axis=1) + inv_yhat = scaler.inverse_transform(inv_yhat) + except Exception as e: + logger.critical("traceId-%s Error in Prediction:\n%s",str(uuid.uuid4())[:8], e) + return inv_yhat + + def Logic(self, Prev_Thresh,Current_Thresh): + """ + Post prediction, Applies the post processing Logic i.e + (+-)10% cap on Slice config and (+-)25% on cell config against the predicted values + + Args: + Prev_Thresh: List of Previous Configuration('maxumberOfConns') as read from config DB + Current_Thresh: List of Predicted Configuration('maxumberOfConns') + Returns: + Final_Pred_Val= List of Final Configuration('maxumberOfConns') + Raises: + RuntimeError: Error in Prediction. + """ + try: + Sum_Prev_Thresh=sum(Prev_Thresh) + Sum_Pred_Thresh= sum(Current_Thresh) + Global_change= abs(sum(Current_Thresh)-sum(Prev_Thresh))/sum(Prev_Thresh) + #logger.info('Global_change',Global_change) + Final_Pred_Val=[] + Percent_Change=[] + sum_Pred_thresh_change=0 + # Rule 1 is applied to compute cell based Min /Max (-25%, 25%) + len_Prev_Thresh=len(Prev_Thresh) + for cell_instance, prev_t in enumerate(Prev_Thresh): + if (Current_Thresh[cell_instance]-prev_t)/prev_t > 0.25: + Rule_based_Percent = 0.25 # rule bases total percentage + elif (Current_Thresh[cell_instance]-prev_t)/prev_t <-0.25: + Rule_based_Percent = -0.25 + else: + Rule_based_Percent=(Current_Thresh[cell_instance]-prev_t)/prev_t + + Percent_Change.append(Rule_based_Percent) + # predicted sum of threshold change for all cells + sum_Pred_thresh_change=sum_Pred_thresh_change+Rule_based_Percent + + + if Global_change <= 0.10: + for cell_instance, prev_t in enumerate(Prev_Thresh): + Final_Pred_Val.append(prev_t+prev_t*Percent_Change[cell_instance]) + else: + #Rule 2 - to distribut global threshold to all cells based on only 10% increase in slice + Thresh_Rule_2 = [] + extra = 0.1*Sum_Prev_Thresh + + for i in range(len_Prev_Thresh): + new_val = Prev_Thresh[i]+extra*Percent_Change[i]/abs(sum_Pred_thresh_change) + if abs(extra*Percent_Change[i]/abs(sum_Pred_thresh_change))> abs(Percent_Change[i]*Prev_Thresh[i]): + new_val = Prev_Thresh[i]+Prev_Thresh[i]*Percent_Change[i] + Final_Pred_Val.append(new_val) + except Exception as e: + logger.error("traceId-%s Error in Post_Prediction_Logic:\n%s", str(uuid.uuid4())[:8], e) + return Final_Pred_Val + + def acked(err, msg): + """ + Function to format the error in case of exception being None + + Args: + err: Exception object + msg: Error message + Returns: + None: None + + """ + if err is not None: + logger.error("traceId-%s Failed to deliver message: %s",str(uuid.uuid4())[:8], str(err)) + else: + logger.info('traceId-%s %s',str(uuid.uuid4())[:8], (str(msg))) + + + def Final_Post_Method(self, Predicted_Results, Previous_Results, slices, server_name): + """ + Posts the final predicted output (Final Output of the ML Service) + Args: + Predicted_Results: Contains Predicted results w.r.t the cell ID + Previous_Results: Contains Previous Configured values w.r.t the cell ID + slices: Slice name + Server_name: contains server name + Returns: + status: True on post with content success, False on post failure. + Raises: + RuntimeError: Error Posting the Final Output + """ + status = True + try: + Post_Message ={} + Post_Message["snssai"]= slices + Post_Message['data'] = [] + #for server in server_info.keys(): + temp_dict_1 = {} + temp_dict_1['gNBCUName'] = server_name + temp_dict_1['cellCUList'] = [] + for key, Predicted_Result in Predicted_Results.items(): + temp_dict_2={} + temp_dict_2['cellLocalId'] = key + temp_dict_2['configData'] = {} + temp_dict_2['configData']['maxNumberofConns'] = int(Previous_Results[key]) + temp_dict_2['configData']['predictedMaxNumberofConns'] = int(Predicted_Result) + now = datetime.now() + dt_string = now.strftime("%d/%m/%Y %H:%M:%S") + temp_dict_2['configData']['lastUpdatedTS'] = dt_string + temp_dict_1['cellCUList'].append(temp_dict_2) + Post_Message['data'].append(temp_dict_1) + + json_object = json.dumps(Post_Message, indent = 4) + response = [] + + try: + conf = {'bootstrap.servers': "kafka:9092",'client.id': socket.gethostname()} + producer = Producer(conf) + producer.produce(self.Config_Object.get_PolicyTopic(), value=json_object.encode('utf-8')) + + producer.poll(1) + + except requests.Timeout as error: + status = False + logger.error("traceId-%s Posting the Final Output To Dmap Topic:\n%s",str(uuid.uuid4())[:8], error) + #print(response) + logger.info('traceId-%s %s',str(uuid.uuid4())[:8], Post_Message) + except Exception as e: + status = False + logger.error("traceId-%s Posting the Final Output:\n%s",str(uuid.uuid4())[:8], e) + + return status + +class Controller: + + def __init__(self): + self.data_dic={} + self.Parser_Object=Parser() + self.Predict_Object=Prediction() + self.Config_Object=Config() + + + def GetData(self, consumer): + """ + Get the data generation from ransim topic on PM Data for Intelligenct slicing + + Args: + consumer: Consumer Topic instance to get the PM Data + Returns: + pm_data: Slices and cells PM data + + Raises: + RuntimeError: Error while Get Data from topic. + """ + pm_data = [] + try: + msg = consumer.poll(timeout=-1) + + if msg is None: + # Initial message consumption may take up to + # `session.timeout.ms` for the consumer group to + # rebalance and start consuming + #print("Waiting...") + logger.info("traceId-%s PM Get Data from topic Waiting...: ",str(uuid.uuid4())[:8]) + elif msg.error(): + #print("ERROR: %s".format(msg.error())) + logger.critical("traceId-%s Error while PM Get Data from topic :\n%s",str(uuid.uuid4())[:8], msg.error()) + else: + # Extract the (optional) key and value, and print. + #print('Received message: {}'.format(msg.value().decode('utf-8'))) + + pm_data = msg.value().decode('utf-8') + + + except Exception as e: + logger.critical("traceId-%s Error while Get Data from topic :\n%s",str(uuid.uuid4())[:8], e) + pm_data = [] + except requests.Timeout as error: + logger.critical("traceId-%s Timeout from Get Data topic :\n%s",str(uuid.uuid4())[:8], error) + pm_data = [] + return pm_data + + def simulatedTestDataToReplaceTopic(self): + """ + Simulate the test PM data and simulate required time seriess data and performs Predcition. + This function help to quickly check Tensorflow the environment, + that is used in the prediction and Training process. + + Args: + none: none + Returns: + status: Execution status on predcition task + + Raises: + RuntimeError: Error while executing prediction task. + """ + + #with pd.ExcelFile('test.xlsx', engine="openpyxl") as excel: + # df = pd.read_excel(excel) + status = True + + try: + df = pd.read_excel('tests/unit/test.xlsx', engine='openpyxl') + + new_columns1=[] + serverName = self.Config_Object.get_serverName() + #new_columns2=[] + len_dfcolumns=len(df.columns) + for i in range(len_dfcolumns): + new_columns1.append('01-B989BD_'+df.columns[i]) + df.columns=new_columns1 + slice_name=df.columns[0].split('.')[0] + data_df=pd.DataFrame() + len_df=len(df) + for i in range(len_df-1): + temp_df=df.iloc[[i]] + data_df=data_df.append(temp_df) + # parse pm data + configured data + predicted dummy data(=configured data- to be changed after pred) + if len(data_df)=window_size+1: + results=pd.DataFrame(self.data_dic) + #print("Predicted Results:",results) + dateTimeObj = datetime.now() + timestampStr = dateTimeObj.strftime("%d-%b-%Y-%H-%M-%S") + #print('Current Timestamp : ', timestampStr) + results.to_csv(timestampStr+"predicted.csv", index=0) + except Exception as e: + logger.critical("traceId-%s Error while Preprocessing data during prediction :\n%s",str(uuid.uuid4())[:8], e) + status = False + + return status + + + def Execute(self): + """ + Executes workflow of task methods to get data from topics, then performs preprocessng and Prediction. + + Args: + none: none + Returns: + none: none + + Raises: + RuntimeError: Error during Prediction start process. + """ + status = True + bExecute = True + pm_data = [] + try: + conf = {'bootstrap.servers': "kafka:9092",'group.id': "1",'auto.offset.reset': 'smallest'} + + consumer = Consumer(conf) + consumer.subscribe(([self.Config_Object.get_DataTopic(), -1])) + + while bExecute: + #self.StartDataGeneration() + #time.sleep(15) + + #self.StopDataGeneration() + #time.sleep(5) + while True: + pm_data = self.GetData(consumer) + + if pm_data==[]: + # Delay for 1 minute (60 seconds) + #time.sleep(60) + break + + self.PreprocessAndPredict(pm_data) + + + except Exception as e: + logger.critical("traceId-%s Error during Prediction start process f:\n%s",str(uuid.uuid4())[:8], e) + status = False + + return status + + +if __name__ == "__main__": + try: + + time.sleep(60) + logger.info("traceId-%s : Start Prediction",str(uuid.uuid4())[:8]) + Controller_Object = Controller() + Controller_Object.Execute() + #unit test code + #Controller_Object.simulatedTestDataToReplaceTopic() + except Exception as e: + logger.critical("traceId-%s Error onStart Prediction Process:\n%s",str(uuid.uuid4())[:8], e) + diff --git a/components/ml-prediction-ms/tests/__init__.py b/components/ml-prediction-ms/tests/__init__.py new file mode 100755 index 00000000..e778f70b --- /dev/null +++ b/components/ml-prediction-ms/tests/__init__.py @@ -0,0 +1,22 @@ +# ==============LICENSE_START===================================================== +# ml-prediction-ms +# ================================================================================ +# Copyright © 2017 AT&T Intellectual Property. All rights reserved. +# Copyright (C) 2023 Wipro Limited +# ================================================================================ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============LICENSE_END========================================================= + +# empty __init__.py so that pytest can add correct path to coverage report, +# -- per pytest best practice guideline + diff --git a/components/ml-prediction-ms/tests/unit/__init__.py b/components/ml-prediction-ms/tests/unit/__init__.py new file mode 100755 index 00000000..db09c74a --- /dev/null +++ b/components/ml-prediction-ms/tests/unit/__init__.py @@ -0,0 +1,18 @@ +# ==============LICENSE_START===================================================== +# ml-prediction-ms +# ================================================================================ +# Copyright © 2017 AT&T Intellectual Property. All rights reserved. +# Copyright (C) 2023 Wipro Limited +# ================================================================================ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============LICENSE_END========================================================= diff --git a/components/ml-prediction-ms/tests/unit/sample.json b/components/ml-prediction-ms/tests/unit/sample.json new file mode 100644 index 00000000..aa99be7d --- /dev/null +++ b/components/ml-prediction-ms/tests/unit/sample.json @@ -0,0 +1 @@ +["{\"event\":{\"commonEventHeader\":{\"domain\":\"perf3gpp\",\"eventId\":\"f4009916-3c1a-4d72-920d-a76ad5201590\",\"sequence\":0,\"eventName\":\"perf3gpp_RnNode-Slicing_pmMeasResult\",\"sourceName\":\"cucpserver1\",\"reportingEntityName\":\"\",\"priority\":\"Normal\",\"startEpochMicrosec\":1644487518572,\"lastEpochMicrosec\":1644487518572,\"version\":\"4.0\",\"vesEventListenerVersion\":\"7.1\",\"timeZoneOffset\":\"UTC+05:30\"},\"perf3gppFields\":{\"perf3gppFieldsVersion\":\"1.0\",\"measDataCollection\":{\"granularityPeriod\":900,\"measuredEntityUserName\":\"\",\"measuredEntityDn\":\"cucpserver1\",\"measuredEntitySoftwareVersion\":\"r0.1\",\"measInfoList\":[{\"measInfoId\":{\"sMeasInfoId\":\"measInfoIsVal\"},\"measTypes\":{\"sMeasTypesList\":[\"SM.PDUSessionSetupReq.01-B989BD\",\"SM.PDUSessionSetupSucc.01-B989BD\",\"SM.PDUSessionSetupFail.0\"]},\"measValuesList\":[{\"measObjInstId\":\"13999\",\"suspectFlag\":\"false\",\"measResults\":[{\"p\":1,\"sValue\":\"398\"},{\"p\":2,\"sValue\":\"283\"}]},{\"measObjInstId\":\"14000\",\"suspectFlag\":\"false\",\"measResults\":[{\"p\":1,\"sValue\":\"433\"},{\"p\":2,\"sValue\":\"265\"}]},{\"measObjInstId\":\"15155\",\"suspectFlag\":\"false\",\"measResults\":[{\"p\":1,\"sValue\":\"912\"},{\"p\":2,\"sValue\":\"637\"}]},{\"measObjInstId\":\"15174\",\"suspectFlag\":\"false\",\"measResults\":[{\"p\":1,\"sValue\":\"865\"},{\"p\":2,\"sValue\":\"529\"}]},{\"measObjInstId\":\"15175\",\"suspectFlag\":\"false\",\"measResults\":[{\"p\":1,\"sValue\":\"959\"},{\"p\":2,\"sValue\":\"595\"}]},{\"measObjInstId\":\"15176\",\"suspectFlag\":\"false\",\"measResults\":[{\"p\":1,\"sValue\":\"257\"},{\"p\":2,\"sValue\":\"167\"}]},{\"measObjInstId\":\"15289\",\"suspectFlag\":\"false\",\"measResults\":[{\"p\":1,\"sValue\":\"933\"},{\"p\":2,\"sValue\":\"627\"}]},{\"measObjInstId\":\"15290\",\"suspectFlag\":\"false\",\"measResults\":[{\"p\":1,\"sValue\":\"825\"},{\"p\":2,\"sValue\":\"536\"}]},{\"measObjInstId\":\"15296\",\"suspectFlag\":\"false\",\"measResults\":[{\"p\":1,\"sValue\":\"856\"},{\"p\":2,\"sValue\":\"571\"}]},{\"measObjInstId\":\"15825\",\"suspectFlag\":\"false\",\"measResults\":[{\"p\":1,\"sValue\":\"422\"},{\"p\":2,\"sValue\":\"291\"}]},{\"measObjInstId\":\"15826\",\"suspectFlag\":\"false\",\"measResults\":[{\"p\":1,\"sValue\":\"412\"},{\"p\":2,\"sValue\":\"303\"}]}]}]}}}}"] diff --git a/components/ml-prediction-ms/tests/unit/test.xlsx b/components/ml-prediction-ms/tests/unit/test.xlsx new file mode 100755 index 00000000..ecde30c3 Binary files /dev/null and b/components/ml-prediction-ms/tests/unit/test.xlsx differ diff --git a/components/ml-prediction-ms/tests/unit/test_predict_unittest.py b/components/ml-prediction-ms/tests/unit/test_predict_unittest.py new file mode 100755 index 00000000..004893cf --- /dev/null +++ b/components/ml-prediction-ms/tests/unit/test_predict_unittest.py @@ -0,0 +1,232 @@ +# ============LICENSE_START======================================================= +# ml-prediction-ms +# ================================================================================ +# Copyright (C) 2023 Wipro Limited +# ================================================================================ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============LICENSE_END========================================================= + +import pytest + +from src.run import Parser, Prediction, Controller + +import unittest + +import requests +import responses + +from unittest import TestCase +from unittest import mock +from mock import patch # for Python >= 3.3 use unittest.mock + +import pandas as pd +import numpy as np +from pandas import DataFrame, read_csv, read_excel +from pandas import concat +from tensorflow.keras.models import load_model +from sklearn.preprocessing import MinMaxScaler +from numpy import concatenate +from sklearn.metrics import mean_squared_error +from math import sqrt +from datetime import datetime +import json +import time +import requests +from requests.auth import HTTPBasicAuth + +from confluent_kafka import Consumer +from confluent_kafka import Producer +import socket + +import requests_mock +from mock import patch + +# This method will be used by the mock to replace requests.get +def mocked_requests_get(*args, **kwargs): + class MockResponse: + def __init__(self, json_data, status_code): + self.json_data = json_data + self.status_code = status_code + + def json(self): + return self.json_data + + return MockResponse({"key1": "value1"}, 200) + +# Our test case class +class ControllerTestCase(unittest.TestCase): + + # We patch 'requests.get' with our own method. The mock object is passed in to our test case method. + @mock.patch('requests.get', side_effect=mocked_requests_get) + def test_GetData(self, mock_get): + status = True + # Assert requests.get calls + ctl = Controller() + conf = {'bootstrap.servers': "kafka:9092",'group.id': "1",'auto.offset.reset': 'smallest'} + + consumer = Consumer(conf) + consumer.subscribe(([ctl.Config_Object.get_DataTopic(), 1])) + + # We can even assert that our mocked method was called with the right parameters + msg = consumer.poll(1) + #json_data = msg.value().decode('utf-8') + + #assert len(msg) != 0, "the list is non empty" + assert status != False + + + def test_simulatedTestDataToReplaceTopic(self): + self.Controller_Object = Controller() + status = self.Controller_Object.simulatedTestDataToReplaceTopic() + + assert status != False + + def test_PreprocessAndPredict(self): + ctl = Controller() + + # Opening JSON file + f = open('tests/unit/sample.json',) + + # returns JSON object as + # a dictionary + json_data = json.load(f) + + status = ctl.PreprocessAndPredict(json_data) + assert status != False + + +# This method will be used by the mock to replace requests.POST +def mocked_requests_post(*args, **kwargs): + class MockResponse: + def __init__(self, json_data, status_code): + self.json_data = json_data + self.status_code = status_code + + def json(self): + return self.json_data + + return MockResponse({"key1": "value1"}, 200) + + + #return MockResponse(None, 404) + +# Our test case class +class PredictionTestCase(unittest.TestCase): + + # We patch 'requests.get' with our own method. The mock object is passed in to our test case method. + @mock.patch('requests.post', side_effect=mocked_requests_post) + def test_IsPolicyUpdate_url_Exist(self, mock_post): + # Assert requests.post calls + pred = Prediction() + status = pred.IsPolicyUpdate_url_Exist() + + assert status == True, "Failed" + + + +class TestPredict(unittest.TestCase): + + def test_Parser(self): + Controller_Object = Controller() + + conf = {'bootstrap.servers': "kafka:9092",'group.id': "1",'auto.offset.reset': 'smallest'} + consumer = Consumer(conf) + consumer.subscribe(([self.Config_Object.get_DataTopic(), -1])) + + pm_data = Controller_Object.GetData(consumer) + + Parser_Object = Parser() + data_dic={} + + status = False + + len_pm_data=len(pm_data) + for i in range(len_pm_data): + temp_data=json.loads(pm_data[i]) + sub_data = temp_data['event']['perf3gppFields']['measDataCollection']['measInfoList'][0] + server_name = temp_data['event']['perf3gppFields']['measDataCollection']['measuredEntityDn'] + + features=sub_data['measTypes']['sMeasTypesList'] + features.extend(['_maxNumberOfConns.configured', '_maxNumberOfConns.predicted']) + slice_name=features[0].split('.')[2] + data_val= sub_data['measValuesList'] + data_dic= Parser_Object.Data_Parser(data_val,data_dic,features,slice_name) + data_df=pd.DataFrame(data_dic) + + if len(data_df)2k + # plot history + best_model.save("best_model.h5") + #print(history.history.keys()) + pyplot.plot(history.history['loss'], label='train') + pyplot.plot(history.history['val_loss'], label='test') + pyplot.legend() + pyplot.show() + + + except Exception as e: + logger.error("Error during ML Training Process:\n%s" % e) + +if __name__ == "__main__": + try: + logger.info(": starting to get the Simulated slices cells data") + GetSlicesCellsData() + ProcessSlicesCellsData() + Train() + except Exception as e: + logger.error("Error while starting to get data for ML Training Process:\n%s" % e) + diff --git a/components/ml-prediction-ms/version.properties b/components/ml-prediction-ms/version.properties new file mode 100755 index 00000000..8a7e10a2 --- /dev/null +++ b/components/ml-prediction-ms/version.properties @@ -0,0 +1,26 @@ +############################################################################### +# ============LICENSE_START======================================================= +# ml-prediction-ms +# ================================================================================ +# Copyright (C) 2023 Wipro Limited +# ============================================================================== +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============LICENSE_END========================================================= +# +############################################################################### +major=1 +minor=0 +patch=0 +base_version=${major}.${minor}.${patch} +release_version=${base_version} +snapshot_version=${base_version}-SNAPSHOT -- cgit 1.2.3-korg