summaryrefslogtreecommitdiffstats
path: root/bin/mod
diff options
context:
space:
mode:
authorLadue, David (dl3158) <dl3158@att.com>2018-03-19 11:19:34 -0400
committerLadue, David (dl3158) <dl3158@att.com>2018-03-19 15:39:21 -0400
commit623442e3e9deab63f9d5c4bf88fd7077bc7f64b5 (patch)
tree9e80533141b3c03274dafd7d00e29777cd60d6b6 /bin/mod
parentfb0aff81014b052a938a60b0815d6052404be162 (diff)
codestyle updates and docker
Change-Id: I68dafc17a4e9734bd86f925a720b22a951fe8d5f Issue-ID: DCAEGEN2-271 Signed-off-by: Ladue, David (dl3158) <dl3158@att.com>
Diffstat (limited to 'bin/mod')
-rw-r--r--bin/mod/trapd_file_utils.py41
-rw-r--r--bin/mod/trapd_get_cbs_config.py22
-rw-r--r--bin/mod/trapd_http_session.py3
-rw-r--r--bin/mod/trapd_logging.py44
-rw-r--r--bin/mod/trapd_settings.py19
5 files changed, 67 insertions, 62 deletions
diff --git a/bin/mod/trapd_file_utils.py b/bin/mod/trapd_file_utils.py
index 2da099b..e62b528 100644
--- a/bin/mod/trapd_file_utils.py
+++ b/bin/mod/trapd_file_utils.py
@@ -60,8 +60,8 @@ def roll_all_logs():
# NOTE: this will go away when onap logging is standardized/available
try:
# open various ecomp logs - if any fails, exit
- for fd in [tds.eelf_error_fd, tds.eelf_debug_fd, tds.eelf_audit_fd, \
- tds.eelf_metrics_fd, tds.arriving_traps_fd, tds.json_traps_fd]:
+ for fd in [tds.eelf_error_fd, tds.eelf_debug_fd, tds.eelf_audit_fd,
+ tds.eelf_metrics_fd, tds.arriving_traps_fd, tds.json_traps_fd]:
fd.close()
roll_file(tds.eelf_error_file_name)
@@ -86,7 +86,8 @@ def roll_all_logs():
try:
tds.json_traps_fd = open_file(tds.json_traps_filename)
except Exception as e:
- msg = ("Error opening json_log %s : %s" % (json_traps_filename, str(e)))
+ msg = ("Error opening json_log %s : %s" %
+ (json_traps_filename, str(e)))
stdout_logger(msg)
cleanup_and_exit(1, tds.pid_file_name)
@@ -96,11 +97,11 @@ def roll_all_logs():
try:
tds.arriving_traps_fd = open_file(tds.arriving_traps_filename)
except Exception as e:
- msg = ("Error opening arriving traps %s : %s" % (arriving_traps_filename, str(e)))
+ msg = ("Error opening arriving traps %s : %s" %
+ (arriving_traps_filename, str(e)))
stdout_logger(msg)
cleanup_and_exit(1, tds.pid_file_name)
-
# # # # # # # # # # # # # # # # # # #
# fx: setup_ecomp_logs -> log in eelf format until standard
@@ -116,7 +117,8 @@ def open_eelf_logs():
try:
# open various ecomp logs - if any fails, exit
- tds.eelf_error_file_name = (tds.c_config['files.eelf_base_dir'] + "/" + tds.c_config['files.eelf_error'])
+ tds.eelf_error_file_name = (
+ tds.c_config['files.eelf_base_dir'] + "/" + tds.c_config['files.eelf_error'])
tds.eelf_error_fd = open_file(tds.eelf_error_file_name)
except Exception as e:
@@ -124,9 +126,9 @@ def open_eelf_logs():
stdout_logger(msg)
cleanup_and_exit(1, tds.pid_file_name)
-
try:
- tds.eelf_debug_file_name = (tds.c_config['files.eelf_base_dir'] + "/" + tds.c_config['files.eelf_debug'])
+ tds.eelf_debug_file_name = (
+ tds.c_config['files.eelf_base_dir'] + "/" + tds.c_config['files.eelf_debug'])
tds.eelf_debug_fd = open_file(tds.eelf_debug_file_name)
except Exception as e:
@@ -135,7 +137,8 @@ def open_eelf_logs():
cleanup_and_exit(1, tds.pid_file_name)
try:
- tds.eelf_audit_file_name = (tds.c_config['files.eelf_base_dir'] + "/" + tds.c_config['files.eelf_audit'])
+ tds.eelf_audit_file_name = (
+ tds.c_config['files.eelf_base_dir'] + "/" + tds.c_config['files.eelf_audit'])
tds.eelf_audit_fd = open_file(tds.eelf_audit_file_name)
except Exception as e:
msg = "Error opening eelf audit log : " + str(e)
@@ -143,7 +146,8 @@ def open_eelf_logs():
cleanup_and_exit(1, tds.pid_file_name)
try:
- tds.eelf_metrics_file_name = (tds.c_config['files.eelf_base_dir'] + "/" + tds.c_config['files.eelf_metrics'])
+ tds.eelf_metrics_file_name = (
+ tds.c_config['files.eelf_base_dir'] + "/" + tds.c_config['files.eelf_metrics'])
tds.eelf_metrics_fd = open_file(tds.eelf_metrics_file_name)
except Exception as e:
msg = "Error opening eelf metric log : " + str(e)
@@ -163,8 +167,8 @@ def roll_file(_loc_file_name):
"""
_file_name_suffix = "%s" % (datetime.datetime.fromtimestamp(time.time()).
- fromtimestamp(time.time()).
- strftime('%Y-%m-%dT%H:%M:%S'))
+ fromtimestamp(time.time()).
+ strftime('%Y-%m-%dT%H:%M:%S'))
_loc_file_name_bak = _loc_file_name + '.' + _file_name_suffix
@@ -172,11 +176,12 @@ def roll_file(_loc_file_name):
if os.path.isfile(_loc_file_name):
try:
os.rename(_loc_file_name, _loc_file_name_bak)
- except:
+ except Exception as e:
_msg = ("ERROR: Unable to rename %s to %s"
- % (_loc_file_name,
- _loc_file_name_bak))
- ecomp_logger(tds.LOG_TYPE_ERROR, tds.SEV_CRIT, tds.CODE_GENERAL, _msg)
+ % (_loc_file_name,
+ _loc_file_name_bak))
+ ecomp_logger(tds.LOG_TYPE_ERROR, tds.SEV_CRIT,
+ tds.CODE_GENERAL, _msg)
# # # # # # # # # # # # #
@@ -189,7 +194,6 @@ def open_file(_loc_file_name):
open _loc_file_name, return file handle
"""
-
try:
# open append mode just in case so nothing is lost, but should be
# non-existent file
@@ -215,6 +219,7 @@ def close_file(_loc_fd, _loc_filename):
_loc_fd.close()
return True
except Exception as e:
- msg = "Error closing %s : %s - results indeterminate" % (_loc_filename, str(e))
+ msg = "Error closing %s : %s - results indeterminate" % (
+ _loc_filename, str(e))
ecomp_logger(tds.LOG_TYPE_ERROR, tds.SEV_FATAL, tds.CODE_GENERAL, msg)
return False
diff --git a/bin/mod/trapd_get_cbs_config.py b/bin/mod/trapd_get_cbs_config.py
index 775e0b2..e0f5ca8 100644
--- a/bin/mod/trapd_get_cbs_config.py
+++ b/bin/mod/trapd_get_cbs_config.py
@@ -20,7 +20,7 @@
#
"""
Look for CBS broker and return application config; if not present, look for
-env variable that specifies JSON equiv of CBS config (typically used for
+env variable that specifies JSON equiv of CBS config (typically used for
testing purposes)
"""
@@ -63,54 +63,56 @@ def get_cbs_config():
if tds.c_config == {}:
msg = "Unable to fetch CBS config or it is erroneously empty - trying override/simulator config"
stdout_logger(msg)
-
+
# if no CBS present, default to JSON config specified via CBS_SIM_JSON env var
except Exception as e:
msg = "ONAP controller not present, trying json config override via CBS_SIM_JSON env variable"
stdout_logger(msg)
-
+
try:
_cbs_sim_json_file = os.getenv("CBS_SIM_JSON", "None")
except Exception as e:
msg = "CBS_SIM_JSON not defined - FATAL ERROR, exiting"
stdout_logger(msg)
cleanup_and_exit(1, pid_file_name)
-
+
if _cbs_sim_json_file == "None":
msg = "CBS_SIM_JSON not defined - FATAL ERROR, exiting"
stdout_logger(msg)
cleanup_and_exit(1, pid_file_name)
else:
- msg = ("ONAP controller override specified via CBS_SIM_JSON: %s" % _cbs_sim_json_file )
+ msg = ("ONAP controller override specified via CBS_SIM_JSON: %s" %
+ _cbs_sim_json_file)
stdout_logger(msg)
try:
tds.c_config = json.load(open(_cbs_sim_json_file))
except Exception as e:
- msg = "Unable to load CBS_SIM_JSON " + _cbs_sim_json_file + " (invalid json?) - FATAL ERROR, exiting"
+ msg = "Unable to load CBS_SIM_JSON " + _cbs_sim_json_file + \
+ " (invalid json?) - FATAL ERROR, exiting"
stdout_logger(msg)
cleanup_and_exit(1, tds.pid_file_name)
# recalc timeout, set default if not present
try:
tds.timeout_seconds = tds.c_config['publisher.http_timeout_milliseconds'] / 1000.0
- except:
+ except Exception as e:
tds.timeout_seconds = 1.5
# recalc seconds_between_retries, set default if not present
try:
tds.seconds_between_retries = tds.c_config['publisher.http_milliseconds_between_retries'] / 1000.0
- except:
+ except Exception as e:
tds.seconds_between_retries = .750
# recalc min_severity_to_log, set default if not present
try:
tds.minimum_severity_to_log = tds.c_config['files.minimum_severity_to_log']
- except:
+ except Exception as e:
tds.minimum_severity_to_log = 3
try:
tds.publisher_retries = tds.c_config['publisher.http_retries']
- except:
+ except Exception as e:
tds.publisher_retries = 3
return True
diff --git a/bin/mod/trapd_http_session.py b/bin/mod/trapd_http_session.py
index 2e0b77e..b34c19d 100644
--- a/bin/mod/trapd_http_session.py
+++ b/bin/mod/trapd_http_session.py
@@ -29,9 +29,6 @@ import os
import requests
import traceback
-# snmptrapd
-import trapd_settings
-
prog_name = os.path.basename(__file__)
diff --git a/bin/mod/trapd_logging.py b/bin/mod/trapd_logging.py
index 435f03e..ae5a1a0 100644
--- a/bin/mod/trapd_logging.py
+++ b/bin/mod/trapd_logging.py
@@ -43,7 +43,7 @@ prog_name = os.path.basename(__file__)
# # # # # # # # # # # # # # # # # # #
-# fx: ecomp_logger -> log in eelf format until standard
+# fx: ecomp_logger -> log in eelf format until standard
# is released for python via LOG-161
# # # # # # # # # # ## # # # # # # #
@@ -51,16 +51,16 @@ def ecomp_logger(_log_type, _sev, _error_code, _msg):
"""
Log to ecomp-style logfiles. Logs include:
- Note: this will be updated when https://jira.onap.org/browse/LOG-161
+ Note: this will be updated when https://jira.onap.org/browse/LOG-161
is closed/available; until then, we resort to a generic format with
valuable info in "extra=" field (?)
:Parameters:
- _msg -
+ _msg -
:Exceptions:
none
:Keywords:
- eelf logging
+ eelf logging
:Log Styles:
:error.log:
@@ -95,7 +95,7 @@ def ecomp_logger(_log_type, _sev, _error_code, _msg):
else:
d = {'begtime': endAuditTime, 'begmsecs': endAuditMsec, 'endtime': endAuditTime,
'endmsecs': endAuditMsec}
-
+
self._logger.log(50, '%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s' \
% (requestID, serviceInstanceID, threadID, serverName, serviceName, partnerName,
statusCode, responseCode, responseDescription, instanceUUID, upperLogLevel,
@@ -132,43 +132,43 @@ def ecomp_logger(_log_type, _sev, _error_code, _msg):
t_out = datetime.datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S,%f")[:-3]
calling_fx = inspect.stack()[1][3]
- # FIXME: this entire module is a hack to override concept of prog logging
- # written across multiple files (???), making diagnostics IMPOSSIBLE!
+ # FIXME: this entire module is a hack to override concept of prog logging
+ # written across multiple files (???), making diagnostics IMPOSSIBLE!
# Hoping to leverage ONAP logging libraries & standards when available
# catch invalid log type
- if _log_type < 1 or _log_type > 5:
- msg = ("INVALID log type: %s " % _log_type )
- _out_rec = ("%s|%s|%s|%s|%s|%s|%s|%s|%s|%s" \
- % ((t_out, calling_fx, "snmptrapd", unused, unused, unused, tds.SEV_TYPES[_sev], _error_code, unused, (msg + _msg))))
+ if _log_type < 1 or _log_type > 5:
+ msg = ("INVALID log type: %s " % _log_type)
+ _out_rec = ("%s|%s|%s|%s|%s|%s|%s|%s|%s|%s"
+ % ((t_out, calling_fx, "snmptrapd", unused, unused, unused, tds.SEV_TYPES[_sev], _error_code, unused, (msg + _msg))))
tds.eelf_error_fd.write('%s\n' % str(_out_rec))
return False
if _sev >= tds.minimum_severity_to_log:
# log to appropriate eelf log (different files ??)
if _log_type == tds.LOG_TYPE_ERROR:
- _out_rec = ('%s|%s|%s|%s|%s|%s|%s|%s|%s|%s' \
- % ((t_out, calling_fx, "snmptrapd", unused, unused, unused, tds.SEV_TYPES[_sev], _error_code, unused, _msg)))
+ _out_rec = ('%s|%s|%s|%s|%s|%s|%s|%s|%s|%s'
+ % ((t_out, calling_fx, "snmptrapd", unused, unused, unused, tds.SEV_TYPES[_sev], _error_code, unused, _msg)))
tds.eelf_error_fd.write('%s\n' % str(_out_rec))
elif _log_type == tds.LOG_TYPE_AUDIT:
# log message in AUDIT format
- _out_rec = ('%s|%s|%s|%s|%s|%s|%s|%s|%s|%s' \
- % ((t_out, calling_fx, "snmptrapd", unused, unused, unused, tds.SEV_TYPES[_sev], _error_code, unused, _msg)))
+ _out_rec = ('%s|%s|%s|%s|%s|%s|%s|%s|%s|%s'
+ % ((t_out, calling_fx, "snmptrapd", unused, unused, unused, tds.SEV_TYPES[_sev], _error_code, unused, _msg)))
tds.eelf_audit_fd.write('%s\n' % str(_out_rec))
elif _log_type == tds.LOG_TYPE_METRICS:
# log message in METRICS format
- _out_rec = ('%s|%s|%s|%s|%s|%s|%s|%s|%s|%s' \
- % ((t_out, calling_fx, "snmptrapd", unused, unused, unused, tds.SEV_TYPES[_sev], _error_code, unused, _msg)))
+ _out_rec = ('%s|%s|%s|%s|%s|%s|%s|%s|%s|%s'
+ % ((t_out, calling_fx, "snmptrapd", unused, unused, unused, tds.SEV_TYPES[_sev], _error_code, unused, _msg)))
tds.eelf_metrics_fd.write('%s\n' % str(_out_rec))
-
+
# DEBUG *AND* others - there *MUST BE* a single time-sequenced log for diagnostics!
# FIXME: too much I/O !!!
# always write to debug; we need ONE logfile that has time-sequence full view !!!
# if (_log_type == tds.LOG_TYPE_DEBUG and _sev >= tds.current_min_sev_log_level) or (_log_type != tds.LOG_TYPE_DEBUG):
-
+
# log message in DEBUG format
- _out_rec = ("%s|%s|%s|%s|%s|%s|%s|%s|%s|%s" \
- % ((t_out, calling_fx, "snmptrapd", unused, unused, unused, tds.SEV_TYPES[_sev], _error_code, unused, _msg)))
+ _out_rec = ("%s|%s|%s|%s|%s|%s|%s|%s|%s|%s"
+ % ((t_out, calling_fx, "snmptrapd", unused, unused, unused, tds.SEV_TYPES[_sev], _error_code, unused, _msg)))
tds.eelf_debug_fd.write('%s\n' % str(_out_rec))
return True
@@ -196,4 +196,4 @@ def stdout_logger(_msg):
t_out = datetime.datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S,%f")[:-3]
# calling_fx = inspect.stack()[1][3]
- print('%s %s' % ( t_out, _msg))
+ print('%s %s' % (t_out, _msg))
diff --git a/bin/mod/trapd_settings.py b/bin/mod/trapd_settings.py
index 735b68c..be87e26 100644
--- a/bin/mod/trapd_settings.py
+++ b/bin/mod/trapd_settings.py
@@ -23,6 +23,7 @@
__docformat__ = 'restructuredtext'
+
def init():
# <CONSUL config cache>
@@ -36,7 +37,8 @@ def init():
# dns_cache_ip_to_name
# key [ip address] -> fqdn
# dns_cache_ip_expires
- # key [ip address] -> epoch time this entry expires and must be reloaded
+ # key [ip address] -> epoch time this entry expires and must
+ # be reloaded
global dns_cache_ip_to_name
dns_cache_ip_to_name = {}
global dns_cache_ip_expires
@@ -85,9 +87,9 @@ def init():
all_vb_json_str = ""
global trap_uuids_in_buffer
trap_uuids_in_buffer = ""
- # </trap and varbind dictionaries>
+ # </trap and varbind dictionaries>
- # <publish timers and counters>
+ # <publish timers and counters>
global traps_in_minute
traps_in_minute = 0
global last_epoch_second
@@ -104,12 +106,12 @@ def init():
seconds_between_retries = 2
global publisher_retries
publisher_retries = 2
- # </publish timers and counters>
+ # </publish timers and counters>
- # <publish http request session (persistent as much as possible)>
+ # <publish http request session (persistent as much as possible)>
global http_requ_session
http_requ_session = None
- # </publish http request session>
+ # </publish http request session>
# <json log of traps published>
global json_traps_filename
@@ -159,10 +161,9 @@ def init():
SEV_FATAL = 5
global CODE_GENERAL
- CODE_GENERAL="100"
+ CODE_GENERAL = "100"
global minimum_severity_to_log
- minimum_severity_to_log=3
-
+ minimum_severity_to_log = 3
# </logging types and severities>