summaryrefslogtreecommitdiffstats
path: root/miss_htbt_service/mod
diff options
context:
space:
mode:
Diffstat (limited to 'miss_htbt_service/mod')
-rw-r--r--[-rwxr-xr-x]miss_htbt_service/mod/__init__.py0
-rw-r--r--[-rwxr-xr-x]miss_htbt_service/mod/trapd_exit.py0
-rw-r--r--[-rwxr-xr-x]miss_htbt_service/mod/trapd_get_cbs_config.py3
-rw-r--r--[-rwxr-xr-x]miss_htbt_service/mod/trapd_http_session.py0
-rw-r--r--[-rwxr-xr-x]miss_htbt_service/mod/trapd_io.py624
-rw-r--r--[-rwxr-xr-x]miss_htbt_service/mod/trapd_runtime_pid.py0
-rw-r--r--[-rwxr-xr-x]miss_htbt_service/mod/trapd_settings.py0
-rw-r--r--miss_htbt_service/mod/trapd_vnf_table.py106
8 files changed, 419 insertions, 314 deletions
diff --git a/miss_htbt_service/mod/__init__.py b/miss_htbt_service/mod/__init__.py
index 1875bf6..1875bf6 100755..100644
--- a/miss_htbt_service/mod/__init__.py
+++ b/miss_htbt_service/mod/__init__.py
diff --git a/miss_htbt_service/mod/trapd_exit.py b/miss_htbt_service/mod/trapd_exit.py
index 6247f4b..6247f4b 100755..100644
--- a/miss_htbt_service/mod/trapd_exit.py
+++ b/miss_htbt_service/mod/trapd_exit.py
diff --git a/miss_htbt_service/mod/trapd_get_cbs_config.py b/miss_htbt_service/mod/trapd_get_cbs_config.py
index c108107..d2b615f 100755..100644
--- a/miss_htbt_service/mod/trapd_get_cbs_config.py
+++ b/miss_htbt_service/mod/trapd_get_cbs_config.py
@@ -33,7 +33,6 @@ import string
import time
import traceback
import collections
-
import mod.trapd_settings as tds
from onap_dcae_cbs_docker_client.client import get_config
from mod.trapd_exit import cleanup,cleanup_and_exit
@@ -92,7 +91,7 @@ def get_cbs_config():
msg = "Unable to load CBS_HTBT_JSON " + _cbs_sim_json_file + \
" (invalid json?) - FATAL ERROR, exiting"
stdout_logger(msg)
- cleanup_and_exit(1,None)
+ cleanup_and_exit(0,None)
# recalc timeout, set default if not present
try:
diff --git a/miss_htbt_service/mod/trapd_http_session.py b/miss_htbt_service/mod/trapd_http_session.py
index b34c19d..b34c19d 100755..100644
--- a/miss_htbt_service/mod/trapd_http_session.py
+++ b/miss_htbt_service/mod/trapd_http_session.py
diff --git a/miss_htbt_service/mod/trapd_io.py b/miss_htbt_service/mod/trapd_io.py
index c89eaa3..1c40346 100755..100644
--- a/miss_htbt_service/mod/trapd_io.py
+++ b/miss_htbt_service/mod/trapd_io.py
@@ -36,7 +36,6 @@ import string
import time
import traceback
import unicodedata
-
# dcae_snmptrap
import mod.trapd_settings as tds
from mod.trapd_exit import cleanup_and_exit
@@ -49,327 +48,328 @@ prog_name = os.path.basename(__file__)
# # # # # # # # # # ## # # # # # # #
-def roll_all_logs():
- """
- roll all active logs to timestamped version, open new one
- based on frequency defined in files.roll_frequency
- """
-
- # first roll all the eelf files
- # NOTE: this will go away when onap logging is standardized/available
- try:
- # open various ecomp logs - if any fails, exit
- for fd in [tds.eelf_error_fd, tds.eelf_debug_fd, tds.eelf_audit_fd,
- tds.eelf_metrics_fd, tds.arriving_traps_fd, tds.json_traps_fd]:
- fd.close()
-
- roll_file(tds.eelf_error_file_name)
- roll_file(tds.eelf_debug_file_name)
- roll_file(tds.eelf_audit_file_name)
- roll_file(tds.eelf_metrics_file_name)
-
- except Exception as e:
- msg = "Error closing logs: " + str(e)
- stdout_logger(msg)
- cleanup_and_exit(1, tds.pid_file_name)
-
- reopened_successfully = open_eelf_logs()
- if not reopened_successfully:
- msg = "Error re-opening EELF logs during roll-over to timestamped versions - EXITING"
- stdout_logger(msg)
- cleanup_and_exit(1, tds.pid_file_name)
-
- # json log
- roll_file(tds.json_traps_filename)
-
- try:
- tds.json_traps_fd = open_file(tds.json_traps_filename)
- except Exception as e:
- msg = ("Error opening json_log %s : %s" %
- (json_traps_filename, str(e)))
- stdout_logger(msg)
- cleanup_and_exit(1, tds.pid_file_name)
-
- # arriving trap log
- roll_file(tds.arriving_traps_filename)
-
- try:
- tds.arriving_traps_fd = open_file(tds.arriving_traps_filename)
- except Exception as e:
- msg = ("Error opening arriving traps %s : %s" %
- (arriving_traps_filename, str(e)))
- stdout_logger(msg)
- cleanup_and_exit(1, tds.pid_file_name)
-
-
+#def roll_all_logs():
+# """
+# roll all active logs to timestamped version, open new one
+# based on frequency defined in files.roll_frequency
+# """
+#
+# # first roll all the eelf files
+# # NOTE: this will go away when onap logging is standardized/available
+# try:
+# # open various ecomp logs - if any fails, exit
+# for fd in [tds.eelf_error_fd, tds.eelf_debug_fd, tds.eelf_audit_fd,
+# tds.eelf_metrics_fd, tds.arriving_traps_fd, tds.json_traps_fd]:
+# fd.close()
+#
+# roll_file(tds.eelf_error_file_name)
+# roll_file(tds.eelf_debug_file_name)
+# roll_file(tds.eelf_audit_file_name)
+# roll_file(tds.eelf_metrics_file_name)
+#
+# except Exception as e:
+# msg = "Error closing logs: " + str(e)
+# stdout_logger(msg)
+# cleanup_and_exit(1, tds.pid_file_name)
+#
+# reopened_successfully = open_eelf_logs()
+# if not reopened_successfully:
+# msg = "Error re-opening EELF logs during roll-over to timestamped versions - EXITING"
+# stdout_logger(msg)
+# cleanup_and_exit(1, tds.pid_file_name)
+#
+# # json log
+# roll_file(tds.json_traps_filename)
+
+## try:
+# tds.json_traps_fd = open_file(tds.json_traps_filename)
+# except Exception as e:
+# msg = ("Error opening json_log %s : %s" %
+# (json_traps_filename, str(e)))
+# stdout_logger(msg)
+# cleanup_and_exit(1, tds.pid_file_name)
+#
+# # arriving trap log
+# roll_file(tds.arriving_traps_filename)
+#
+# try:
+# tds.arriving_traps_fd = open_file(tds.arriving_traps_filename)
+# except Exception as e:
+# msg = ("Error opening arriving traps %s : %s" %
+# (arriving_traps_filename, str(e)))
+# stdout_logger(msg)
+# cleanup_and_exit(1, tds.pid_file_name)
+#
+#
# # # # # # # # # # # # # # # # # # #
# fx: setup_ecomp_logs -> log in eelf format until standard
# is released for python via LOG-161
# # # # # # # # # # ## # # # # # # #
-def open_eelf_logs():
- """
- open various (multiple ???) logs
- """
-
- try:
- # open various ecomp logs - if any fails, exit
-
- tds.eelf_error_file_name = (
- tds.c_config['files.eelf_base_dir'] + "/" + tds.c_config['files.eelf_error'])
- tds.eelf_error_fd = open_file(tds.eelf_error_file_name)
-
- except Exception as e:
- msg = "Error opening eelf error log : " + str(e)
- stdout_logger(msg)
- cleanup_and_exit(1, tds.pid_file_name)
-
- try:
- tds.eelf_debug_file_name = (
- tds.c_config['files.eelf_base_dir'] + "/" + tds.c_config['files.eelf_debug'])
- tds.eelf_debug_fd = open_file(tds.eelf_debug_file_name)
-
- except Exception as e:
- msg = "Error opening eelf debug log : " + str(e)
- stdout_logger(msg)
- cleanup_and_exit(1, tds.pid_file_name)
-
- try:
- tds.eelf_audit_file_name = (
- tds.c_config['files.eelf_base_dir'] + "/" + tds.c_config['files.eelf_audit'])
- tds.eelf_audit_fd = open_file(tds.eelf_audit_file_name)
- except Exception as e:
- msg = "Error opening eelf audit log : " + str(e)
- stdout_logger(msg)
- cleanup_and_exit(1, tds.pid_file_name)
-
- try:
- tds.eelf_metrics_file_name = (
- tds.c_config['files.eelf_base_dir'] + "/" + tds.c_config['files.eelf_metrics'])
- tds.eelf_metrics_fd = open_file(tds.eelf_metrics_file_name)
- except Exception as e:
- msg = "Error opening eelf metric log : " + str(e)
- stdout_logger(msg)
- cleanup_and_exit(1, tds.pid_file_name)
-
- return True
-
-# # # # # # # # # # # # # # # # # # #
+#def open_eelf_logs():
+# """
+# open various (multiple ???) logs
+# """
+#
+# try:
+# # open various ecomp logs - if any fails, exit
+#
+# tds.eelf_error_file_name = (
+# tds.c_config['files.eelf_base_dir'] + "/" + tds.c_config['files.eelf_error'])
+# tds.eelf_error_fd = open_file(tds.eelf_error_file_name)
+#
+# except Exception as e:
+# msg = "Error opening eelf error log : " + str(e)
+# stdout_logger(msg)
+# cleanup_and_exit(1, tds.pid_file_name)
+#
+# try:
+# tds.eelf_debug_file_name = (
+# tds.c_config['files.eelf_base_dir'] + "/" + tds.c_config['files.eelf_debug'])
+# tds.eelf_debug_fd = open_file(tds.eelf_debug_file_name)
+#
+# except Exception as e:
+# msg = "Error opening eelf debug log : " + str(e)
+# stdout_logger(msg)
+# cleanup_and_exit(1, tds.pid_file_name)
+#
+# try:
+# tds.eelf_audit_file_name = (
+# tds.c_config['files.eelf_base_dir'] + "/" + tds.c_config['files.eelf_audit'])
+# tds.eelf_audit_fd = open_file(tds.eelf_audit_file_name)
+# except Exception as e:
+# msg = "Error opening eelf audit log : " + str(e)
+# stdout_logger(msg)
+# cleanup_and_exit(1, tds.pid_file_name)
+#
+# try:
+# tds.eelf_metrics_file_name = (
+# tds.c_config['files.eelf_base_dir'] + "/" + tds.c_config['files.eelf_metrics'])
+# tds.eelf_metrics_fd = open_file(tds.eelf_metrics_file_name)
+# except Exception as e:
+# msg = "Error opening eelf metric log : " + str(e)
+# stdout_logger(msg)
+# cleanup_and_exit(1, tds.pid_file_name)
+#
+# return True
+#
+## # # # # # # # # # # # # # # # # # #
# fx: roll_log_file -> move provided filename to timestamped version
# # # # # # # # # # ## # # # # # # #
-def roll_file(_loc_file_name):
- """
- move active file to timestamped archive
- """
-
- _file_name_suffix = "%s" % (datetime.datetime.fromtimestamp(time.time()).
- fromtimestamp(time.time()).
- strftime('%Y-%m-%dT%H:%M:%S'))
-
- _loc_file_name_bak = _loc_file_name + '.' + _file_name_suffix
-
- # roll existing file if present
- if os.path.isfile(_loc_file_name):
- try:
- os.rename(_loc_file_name, _loc_file_name_bak)
- return True
- except Exception as e:
- _msg = ("ERROR: Unable to rename %s to %s"
- % (_loc_file_name,
- _loc_file_name_bak))
- ecomp_logger(tds.LOG_TYPE_ERROR, tds.SEV_CRIT,
- tds.CODE_GENERAL, _msg)
- return False
-
- return False
-
-# # # # # # # # # # # # #
-# fx: open_log_file
-# # # # # # # # # # # # #
-
-
-def open_file(_loc_file_name):
- """
- open _loc_file_name, return file handle
- """
-
- try:
- # open append mode just in case so nothing is lost, but should be
- # non-existent file
- _loc_fd = open(_loc_file_name, 'a')
- return _loc_fd
- except Exception as e:
- msg = "Error opening " + _loc_file_name + " append mode - " + str(e)
- stdout_logger(msg)
- cleanup_and_exit(1, tds.pid_file_name)
-
-
-# # # # # # # # # # # # #
-# fx: close_file
-# # # # # # # # # # # # #
- """
- close _loc_file_name, return True with success, False otherwise
- """
-
-
-def close_file(_loc_fd, _loc_filename):
-
- try:
- _loc_fd.close()
- return True
- except Exception as e:
- msg = "Error closing %s : %s - results indeterminate" % (
- _loc_filename, str(e))
- ecomp_logger(tds.LOG_TYPE_ERROR, tds.SEV_FATAL, tds.CODE_GENERAL, msg)
- return False
-
-# # # # # # # # # # # # # # # # # # #
-# fx: ecomp_logger -> log in eelf format until standard
-# is released for python via LOG-161
-# # # # # # # # # # ## # # # # # # #
-
-def ecomp_logger(_log_type, _sev, _error_code, _msg):
- """
- Log to ecomp-style logfiles. Logs include:
-
- Note: this will be updated when https://jira.onap.org/browse/LOG-161
- is closed/available; until then, we resort to a generic format with
- valuable info in "extra=" field (?)
-
- :Parameters:
- _msg -
- :Exceptions:
- none
- :Keywords:
- eelf logging
- :Log Styles:
-
- :error.log:
-
- if CommonLogger.verbose: print("using CommonLogger.ErrorFile")
- self._logger.log(50, '%s|%s|%s|%s|%s|%s|%s|%s|%s|%s' \
- % (requestID, threadID, serviceName, partnerName, targetEntity, targetServiceName,
- errorCategory, errorCode, errorDescription, detailMessage))
-
- error.log example:
-
- 2018-02-20T07:21:34,007+00:00||MainThread|snmp_log_monitor||||FATAL|900||Tue Feb 20 07:21:11 UTC 2018 CRITICAL: [a0cae74e-160e-11e8-8f9f-0242ac110002] ALL publish attempts failed to DMAPP server: dcae-mrtr-zltcrdm5bdce1.1dff83.rdm5b.tci.att.com, topic: DCAE-COLLECTOR-UCSNMP, 339 trap(s) not published in epoch_serno range: 15191112530000 - 15191112620010
-
- :debug.log:
-
- if CommonLogger.verbose: print("using CommonLogger.DebugFile")
- self._logger.log(50, '%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s' \
- % (requestID, threadID, serverName, serviceName, instanceUUID, upperLogLevel,
- severity, serverIPAddress, server, IPAddress, className, timer, detailMessage))
-
- debug.log example:
-
- none available
-
- :audit.log:
-
- if CommonLogger.verbose: print("using CommonLogger.AuditFile")
- endAuditTime, endAuditMsec = self._getTime()
- if self._begTime is not None:
- d = {'begtime': self._begTime, 'begmsecs': self._begMsec, 'endtime': endAuditTime,
- 'endmsecs': endAuditMsec}
- else:
- d = {'begtime': endAuditTime, 'begmsecs': endAuditMsec, 'endtime': endAuditTime,
- 'endmsecs': endAuditMsec}
-
- self._logger.log(50, '%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s' \
- % (requestID, serviceInstanceID, threadID, serverName, serviceName, partnerName,
- statusCode, responseCode, responseDescription, instanceUUID, upperLogLevel,
- severity, serverIPAddress, timer, server, IPAddress, className, unused,
- processKey, customField1, customField2, customField3, customField4,
- detailMessage), extra=d)
-
-
- :metrics.log:
-
- self._logger.log(50,'%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s' \
- % (requestID, serviceInstanceID, threadID, serverName, serviceName, partnerName,
- targetEntity, targetServiceName, statusCode, responseCode, responseDescription,
- instanceUUID, upperLogLevel, severity, serverIPAddress, timer, server,
- IPAddress,
- className, unused, processKey, targetVirtualEntity, customField1, customField2,
- customField3, customField4, detailMessage), extra=d)
-
- metrics.log example:
-
- none available
-
-
- """
-
- unused = ""
-
- # above were various attempts at setting time string found in other
- # libs; instead, let's keep it real:
- t_out = datetime.datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S,%f")[:-3]
- calling_fx = inspect.stack()[1][3]
-
- # DLFM: this entire module is a hack to override concept of prog logging
- # written across multiple files (???), making diagnostics IMPOSSIBLE!
- # Hoping to leverage ONAP logging libraries & standards when available
-
- # catch invalid log type
- if _log_type < 1 or _log_type > 5:
- msg = ("INVALID log type: %s " % _log_type)
- _out_rec = ("%s|%s|%s|%s|%s|%s|%s|%s|%s"
- % (calling_fx, "snmptrapd", unused, unused, unused, tds.SEV_TYPES[_sev], _error_code, unused, (msg + _msg)))
- try:
- tds.eelf_error_fd.write('%s|%s\n' % (t_out, str(_out_rec)))
- except Exception as e:
- stdout_logger(str(_out_rec))
-
- return False
-
- if _sev >= tds.minimum_severity_to_log:
- # log to appropriate eelf log (different files ??)
- if _log_type == tds.LOG_TYPE_ERROR:
- _out_rec = ('%s|%s|%s|%s|%s|%s|%s|%s|%s'
- % (calling_fx, "snmptrapd", unused, unused, unused, tds.SEV_TYPES[_sev], _error_code, unused, _msg))
- try:
- tds.eelf_error_fd.write('%s|%s\n' % (t_out, str(_out_rec)))
- except Exception as e:
- stdout_logger(str(_out_rec))
- elif _log_type == tds.LOG_TYPE_AUDIT:
- # log message in AUDIT format
- _out_rec = ('%s|%s|%s|%s|%s|%s|%s|%s|%s'
- % (calling_fx, "snmptrapd", unused, unused, unused, tds.SEV_TYPES[_sev], _error_code, unused, _msg))
- try:
- tds.eelf_audit_fd.write('%s|%s\n' % (t_out, str(_out_rec)))
- except Exception as e:
- stdout_logger(str(_out_rec))
- elif _log_type == tds.LOG_TYPE_METRICS:
- # log message in METRICS format
- _out_rec = ('%s|%s|%s|%s|%s|%s|%s|%s|%s'
- % (calling_fx, "snmptrapd", unused, unused, unused, tds.SEV_TYPES[_sev], _error_code, unused, _msg))
- try:
- tds.eelf_metrics_fd.write('%s|%s\n' % (t_out, str(_out_rec)))
- except Exception as e:
- stdout_logger(str(_out_rec))
-
- # DEBUG *AND* others - there *MUST BE* a single time-sequenced log for diagnostics!
- # DLFM: too much I/O !!!
- # always write to debug; we need ONE logfile that has time-sequence full view !!!
- # log message in DEBUG format
- _out_rec = ("%s|%s|%s|%s|%s|%s|%s|%s|%s"
- % (calling_fx, "snmptrapd", unused, unused, unused, tds.SEV_TYPES[_sev], _error_code, unused, _msg))
- try:
- tds.eelf_debug_fd.write('%s|%s\n' % (t_out, str(_out_rec)))
- except Exception as e:
- stdout_logger(str(_out_rec))
-
- return True
-
-# # # # # # # # # # # # #
-# fx: stdout_logger
-# # # # # # # # # # # # #
+#def roll_file(_loc_file_name):
+# """
+# move active file to timestamped archive
+# """
+#
+# _file_name_suffix = "%s" % (datetime.datetime.fromtimestamp(time.time()).
+# fromtimestamp(time.time()).
+# strftime('%Y-%m-%dT%H:%M:%S'))
+#
+# _loc_file_name_bak = _loc_file_name + '.' + _file_name_suffix
+#
+# # roll existing file if present
+# if os.path.isfile(_loc_file_name):
+# try:
+# os.rename(_loc_file_name, _loc_file_name_bak)
+# return True
+# except Exception as e:
+# _msg = ("ERROR: Unable to rename %s to %s"
+# % (_loc_file_name,
+# _loc_file_name_bak))
+# ecomp_logger(tds.LOG_TYPE_ERROR, tds.SEV_CRIT,
+# tds.CODE_GENERAL, _msg)
+# return False
+#
+# return False
+#
+## # # # # # # # # # # # #
+## fx: open_log_file
+## # # # # # # # # # # # #
+#
+#
+#def open_file(_loc_file_name):
+# """
+# open _loc_file_name, return file handle
+# """
+#
+# try:
+# # open append mode just in case so nothing is lost, but should be
+# # non-existent file
+# _loc_fd = open(_loc_file_name, 'a')
+# return _loc_fd
+# except Exception as e:
+# msg = "Error opening " + _loc_file_name + " append mode - " + str(e)
+# stdout_logger(msg)
+# cleanup_and_exit(1, tds.pid_file_name)
+#
+#
+## # # # # # # # # # # # #
+## fx: close_file
+## # # # # # # # # # # # #
+# """
+# close _loc_file_name, return True with success, False otherwise
+# """
+#
+#
+#def close_file(_loc_fd, _loc_filename):
+#
+# try:
+#
+# _loc_fd.close()
+# return True
+# except Exception as e:
+# msg = "Error closing %s : %s - results indeterminate" % (
+# _loc_filename, str(e))
+# ecomp_logger(tds.LOG_TYPE_ERROR, tds.SEV_FATAL, tds.CODE_GENERAL, msg)
+# return False
+#
+## # # # # # # # # # # # # # # # # # #
+## fx: ecomp_logger -> log in eelf format until standard
+## is released for python via LOG-161
+## # # # # # # # # # ## # # # # # # #
+#
+#def ecomp_logger(_log_type, _sev, _error_code, _msg):
+# """
+# Log to ecomp-style logfiles. Logs include:
+#
+# Note: this will be updated when https://jira.onap.org/browse/LOG-161
+# is closed/available; until then, we resort to a generic format with
+# valuable info in "extra=" field (?)
+#
+# :Parameters:
+# _msg -
+# :Exceptions:
+# none
+# :Keywords:
+# eelf logging
+# :Log Styles:
+#
+# :error.log:
+#
+# if CommonLogger.verbose: print("using CommonLogger.ErrorFile")
+# self._logger.log(50, '%s|%s|%s|%s|%s|%s|%s|%s|%s|%s' \
+# % (requestID, threadID, serviceName, partnerName, targetEntity, targetServiceName,
+# errorCategory, errorCode, errorDescription, detailMessage))
+#
+# error.log example:
+#
+# 2018-02-20T07:21:34,007+00:00||MainThread|snmp_log_monitor||||FATAL|900||Tue Feb 20 07:21:11 UTC 2018 CRITICAL: [a0cae74e-160e-11e8-8f9f-0242ac110002] ALL publish attempts failed to DMAPP server: dcae-mrtr-zltcrdm5bdce1.1dff83.rdm5b.tci.att.com, topic: DCAE-COLLECTOR-UCSNMP, 339 trap(s) not published in epoch_serno range: 15191112530000 - 15191112620010
+#
+# :debug.log:
+#
+# if CommonLogger.verbose: print("using CommonLogger.DebugFile")
+# self._logger.log(50, '%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s' \
+# % (requestID, threadID, serverName, serviceName, instanceUUID, upperLogLevel,
+# severity, serverIPAddress, server, IPAddress, className, timer, detailMessage))
+#
+# debug.log example:
+#
+# none available
+#
+# :audit.log:
+#
+# if CommonLogger.verbose: print("using CommonLogger.AuditFile")
+# endAuditTime, endAuditMsec = self._getTime()
+# if self._begTime is not None:
+# d = {'begtime': self._begTime, 'begmsecs': self._begMsec, 'endtime': endAuditTime,
+# 'endmsecs': endAuditMsec}
+# else:
+# d = {'begtime': endAuditTime, 'begmsecs': endAuditMsec, 'endtime': endAuditTime,
+# 'endmsecs': endAuditMsec}
+#
+# self._logger.log(50, '%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s' \
+# % (requestID, serviceInstanceID, threadID, serverName, serviceName, partnerName,
+# statusCode, responseCode, responseDescription, instanceUUID, upperLogLevel,
+# severity, serverIPAddress, timer, server, IPAddress, className, unused,
+# processKey, customField1, customField2, customField3, customField4,
+# detailMessage), extra=d)
+#
+#
+# :metrics.log:
+#
+# self._logger.log(50,'%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s' \
+# % (requestID, serviceInstanceID, threadID, serverName, serviceName, partnerName,
+# targetEntity, targetServiceName, statusCode, responseCode, responseDescription,
+# instanceUUID, upperLogLevel, severity, serverIPAddress, timer, server,
+# IPAddress,
+# className, unused, processKey, targetVirtualEntity, customField1, customField2,
+# customField3, customField4, detailMessage), extra=d)
+#
+# metrics.log example:
+#
+# none available
+#
+#
+# """
+#
+# unused = ""
+#
+# # above were various attempts at setting time string found in other
+# # libs; instead, let's keep it real:
+# t_out = datetime.datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S,%f")[:-3]
+# calling_fx = inspect.stack()[1][3]
+#
+# # DLFM: this entire module is a hack to override concept of prog logging
+# # written across multiple files (???), making diagnostics IMPOSSIBLE!
+# # Hoping to leverage ONAP logging libraries & standards when available
+#
+# # catch invalid log type
+# if _log_type < 1 or _log_type > 5:
+# msg = ("INVALID log type: %s " % _log_type)
+# _out_rec = ("%s|%s|%s|%s|%s|%s|%s|%s|%s"
+# % (calling_fx, "snmptrapd", unused, unused, unused, tds.SEV_TYPES[_sev], _error_code, unused, (msg + _msg)))
+# try:
+# tds.eelf_error_fd.write('%s|%s\n' % (t_out, str(_out_rec)))
+# except Exception as e:
+# stdout_logger(str(_out_rec))
+#
+# return False
+#
+# if _sev >= tds.minimum_severity_to_log:
+# # log to appropriate eelf log (different files ??)
+# if _log_type == tds.LOG_TYPE_ERROR:
+# _out_rec = ('%s|%s|%s|%s|%s|%s|%s|%s|%s'
+# % (calling_fx, "snmptrapd", unused, unused, unused, tds.SEV_TYPES[_sev], _error_code, unused, _msg))
+# try:
+# tds.eelf_error_fd.write('%s|%s\n' % (t_out, str(_out_rec)))
+# except Exception as e:
+# stdout_logger(str(_out_rec))
+# elif _log_type == tds.LOG_TYPE_AUDIT:
+# # log message in AUDIT format
+# _out_rec = ('%s|%s|%s|%s|%s|%s|%s|%s|%s'
+# % (calling_fx, "snmptrapd", unused, unused, unused, tds.SEV_TYPES[_sev], _error_code, unused, _msg))
+# try:
+# tds.eelf_audit_fd.write('%s|%s\n' % (t_out, str(_out_rec)))
+# except Exception as e:
+# stdout_logger(str(_out_rec))
+# elif _log_type == tds.LOG_TYPE_METRICS:
+# # log message in METRICS format
+# _out_rec = ('%s|%s|%s|%s|%s|%s|%s|%s|%s'
+# % (calling_fx, "snmptrapd", unused, unused, unused, tds.SEV_TYPES[_sev], _error_code, unused, _msg))
+# try:
+# tds.eelf_metrics_fd.write('%s|%s\n' % (t_out, str(_out_rec)))
+# except Exception as e:
+# stdout_logger(str(_out_rec))
+#
+# # DEBUG *AND* others - there *MUST BE* a single time-sequenced log for diagnostics!
+# # DLFM: too much I/O !!!
+# # always write to debug; we need ONE logfile that has time-sequence full view !!!
+# # log message in DEBUG format
+# _out_rec = ("%s|%s|%s|%s|%s|%s|%s|%s|%s"
+# % (calling_fx, "snmptrapd", unused, unused, unused, tds.SEV_TYPES[_sev], _error_code, unused, _msg))
+# try:
+# tds.eelf_debug_fd.write('%s|%s\n' % (t_out, str(_out_rec)))
+# except Exception as e:
+# stdout_logger(str(_out_rec))
+#
+# return True
+#
+## # # # # # # # # # # # #
+## fx: stdout_logger
+## # # # # # # # # # # # #
def stdout_logger(_msg):
diff --git a/miss_htbt_service/mod/trapd_runtime_pid.py b/miss_htbt_service/mod/trapd_runtime_pid.py
index c6ef76e..c6ef76e 100755..100644
--- a/miss_htbt_service/mod/trapd_runtime_pid.py
+++ b/miss_htbt_service/mod/trapd_runtime_pid.py
diff --git a/miss_htbt_service/mod/trapd_settings.py b/miss_htbt_service/mod/trapd_settings.py
index be87e26..be87e26 100755..100644
--- a/miss_htbt_service/mod/trapd_settings.py
+++ b/miss_htbt_service/mod/trapd_settings.py
diff --git a/miss_htbt_service/mod/trapd_vnf_table.py b/miss_htbt_service/mod/trapd_vnf_table.py
new file mode 100644
index 0000000..a76c886
--- /dev/null
+++ b/miss_htbt_service/mod/trapd_vnf_table.py
@@ -0,0 +1,106 @@
+# ============LICENSE_START=======================================================
+# org.onap.dcae
+# ================================================================================
+# Copyright (c) 2017-2018 AT&T Intellectual Property. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+#
+# ECOMP is a trademark and service mark of AT&T Intellectual Property.
+#
+## Author Kiran Mandal (km386e)
+"""
+trapd_vnf_table verifies the successful creation of DB Tables.
+"""
+
+
+import psycopg2
+import os
+import sys
+import htbtworker as pm
+import misshtbtd as db
+import config_notif as cf
+import cbs_polling as cbs
+import logging
+import get_logger
+import yaml
+import os.path as path
+
+prog_name = os.path.basename(__file__)
+hb_properties_file = path.abspath(path.join(__file__, "../../config/hbproperties.yaml"))
+
+def hb_properties():
+ #Read the hbproperties.yaml for postgress and CBS related data
+ s=open(hb_properties_file, 'r')
+ a=yaml.load(s)
+ ip_address = a['pg_ipAddress']
+ port_num = a['pg_portNum']
+ user_name = a['pg_userName']
+ password = a['pg_passwd']
+ dbName = a['pg_dbName']
+ db_name = dbName.lower()
+ cbs_polling_required = a['CBS_polling_allowed']
+ cbs_polling_interval = a['CBS_polling_interval']
+ s.close()
+ return ip_address, port_num, user_name, password, db_name, cbs_polling_required, cbs_polling_interval
+
+ip_address, port_num, user_name, password, db_name, cbs_polling_required, cbs_polling_interval = hb_properties()
+
+def verify_DB_creation_1(user_name,password,ip_address,port_num,db_name):
+ connection_db = pm.postgres_db_open(user_name,password,ip_address,port_num,db_name)
+ # cur = connection_db.cursor()
+ try:
+ _db_status=pm.db_table_creation_check(connection_db,"vnf_table_1")
+ except Exception as e:
+ return None
+
+ return _db_status
+
+def verify_DB_creation_2(user_name,password,ip_address,port_num,db_name):
+
+ connection_db = pm.postgres_db_open(user_name,password,ip_address,port_num,db_name)
+ # cur = connection_db.cursor()
+ try:
+ _db_status=pm.db_table_creation_check(connection_db,"vnf_table_2")
+ except Exception as e:
+ return None
+
+ return _db_status
+
+def verify_DB_creation_hb_common(user_name,password,ip_address,port_num,db_name):
+
+ connection_db = pm.postgres_db_open(user_name,password,ip_address,port_num,db_name)
+ #cur = connection_db.cursor()
+ try:
+ _db_status=pm.db_table_creation_check(connection_db,"hb_common")
+ except Exception as e:
+ return None
+
+ return _db_status
+
+
+def verify_cbsPolling_required():
+ try:
+ _cbspolling_status=cf.config_notif_run()
+ except Exception as e:
+ return None
+
+ return _cbspolling_status
+
+def verify_cbspolling():
+ try:
+ _cbspolling=cbs.currentpidMain(10)
+ except Exception as e:
+ return None
+
+ return _cbspolling