diff options
author | Arthur Martella <arthur.martella.1@att.com> | 2019-03-15 12:20:16 -0400 |
---|---|---|
committer | Arthur Martella <arthur.martella.1@att.com> | 2019-03-15 12:20:16 -0400 |
commit | 46dea67fc3adbee89c94ebd1c1c44aa3797d6490 (patch) | |
tree | 40cd0acea37e7188c4bd3db32ceb92649faae766 /engine/src | |
parent | f0a9edb94c527c74f45416b9f20c5c90a11bb5de (diff) |
Initial upload of F-GPS seed code 7/21
Includes:
Engine resource manager beans
Change-Id: Ife9be836dc65faeaae8b90788b8f0b35d6adbe4f
Issue-ID: OPTFRA-440
Signed-off-by: arthur.martella.1@att.com
Diffstat (limited to 'engine/src')
8 files changed, 1423 insertions, 0 deletions
diff --git a/engine/src/valet/engine/resource_manager/resources/__init__.py b/engine/src/valet/engine/resource_manager/resources/__init__.py new file mode 100644 index 0000000..bd50995 --- /dev/null +++ b/engine/src/valet/engine/resource_manager/resources/__init__.py @@ -0,0 +1,18 @@ +# +# ------------------------------------------------------------------------- +# Copyright (c) 2019 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ------------------------------------------------------------------------- +# diff --git a/engine/src/valet/engine/resource_manager/resources/datacenter.py b/engine/src/valet/engine/resource_manager/resources/datacenter.py new file mode 100644 index 0000000..6f03bae --- /dev/null +++ b/engine/src/valet/engine/resource_manager/resources/datacenter.py @@ -0,0 +1,85 @@ +# +# ------------------------------------------------------------------------- +# Copyright (c) 2019 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ------------------------------------------------------------------------- +# +#!/bin/python + + +class Datacenter(object): + """Container for datacenter resource.""" + + def __init__(self, _name): + self.name = _name + + self.status = "enabled" + + # Enabled group objects (e.g., aggregate) + self.memberships = {} + + self.vCPUs = 0 + self.avail_vCPUs = 0 + + self.mem_cap = 0 # MB + self.avail_mem_cap = 0 + + self.local_disk_cap = 0 # GB, ephemeral + self.avail_local_disk_cap = 0 + + # Enabled host_group (rack) or host objects + self.resources = {} + + # A list of placed servers + self.server_list = [] + + self.updated = False + + def is_available(self): + """Check if host is available.""" + + if self.status == "enabled": + return True + else: + return False + + def init_resources(self): + self.vCPUs = 0 + self.avail_vCPUs = 0 + self.mem_cap = 0 + self.avail_mem_cap = 0 + self.local_disk_cap = 0 + self.avail_local_disk_cap = 0 + + def get_json_info(self): + membership_list = [] + for gk in self.memberships.keys(): + membership_list.append(gk) + + child_list = [] + for ck in self.resources.keys(): + child_list.append(ck) + + return {'status': self.status, + 'name': self.name, + 'membership_list': membership_list, + 'vCPUs': self.vCPUs, + 'avail_vCPUs': self.avail_vCPUs, + 'mem': self.mem_cap, + 'avail_mem': self.avail_mem_cap, + 'local_disk': self.local_disk_cap, + 'avail_local_disk': self.avail_local_disk_cap, + 'children': child_list, + 'server_list': self.server_list} diff --git a/engine/src/valet/engine/resource_manager/resources/flavor.py b/engine/src/valet/engine/resource_manager/resources/flavor.py new file mode 100644 index 0000000..ef8b13e --- /dev/null +++ b/engine/src/valet/engine/resource_manager/resources/flavor.py @@ -0,0 +1,67 @@ +# +# ------------------------------------------------------------------------- +# Copyright (c) 2019 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ------------------------------------------------------------------------- +# +import six + + +class Flavor(object): + """Container for flavor resource.""" + + def __init__(self, _name): + self.name = _name + + self.flavor_id = None + + self.status = "enabled" + + self.vCPUs = 0 + self.mem_cap = 0 # MB + self.disk_cap = 0 # including ephemeral (GB) and swap (MB) + + self.extra_specs = {} + + self.updated = False + + def set_info(self, _f): + """Copy detailed flavor information.""" + + self.status = _f.status + + self.vCPUs = _f.vCPUs + self.mem_cap = _f.mem_cap + self.disk_cap = _f.disk_cap + + for ek, ev in _f.extra_specs.iteritems(): + self.extra_specs[ek] = ev + + def need_numa_alignment(self): + """Check if this flavor requires NUMA alignment.""" + + for key, req in six.iteritems(self.extra_specs): + if key == "hw:numa_nodes" and int(req) == 1: + return True + + return False + + def get_json_info(self): + return {'status': self.status, + 'flavor_id': self.flavor_id, + 'vCPUs': self.vCPUs, + 'mem': self.mem_cap, + 'disk': self.disk_cap, + 'extra_specs': self.extra_specs} diff --git a/engine/src/valet/engine/resource_manager/resources/group.py b/engine/src/valet/engine/resource_manager/resources/group.py new file mode 100644 index 0000000..eef9771 --- /dev/null +++ b/engine/src/valet/engine/resource_manager/resources/group.py @@ -0,0 +1,401 @@ +# +# ------------------------------------------------------------------------- +# Copyright (c) 2019 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ------------------------------------------------------------------------- +# +class Group(object): + """Container for groups.""" + + def __init__(self, _name): + """Define logical group of compute hosts.""" + + self.name = _name + + self.uuid = None + + # Group includes + # - host-aggregate, availability-zone, + # - server groups: affinity, diversity, soft-affinity, soft-diversity, + # - affinity, diversity, quorum-diversity, exclusivity + self.group_type = None + + self.level = None + + # Where the group is originated + # - 'valet' or 'nova' or 'server-group' or other cloud platform + self.factory = None + + self.status = "enabled" + + # A list of host_names and their placed servers + # Value is a list of server infos. + self.member_hosts = {} + + # For Host-Aggregate group + self.metadata = {} + + # Group rule object for valet groups + self.rule = None + + # A list of placed servers (e.g., VMs) + # Value is a list of server infos. + self.server_list = [] + + self.updated = False + + self.new = False + + def has_server(self, _s_info): + """Check if the server exists in this group.""" + + for s_info in self.server_list: + if _s_info["uuid"] != "none": + if s_info["uuid"] != "none" and \ + s_info["uuid"] == _s_info["uuid"]: + return True + + if _s_info["stack_id"] != "none": + if (s_info["stack_id"] != "none" and \ + s_info["stack_id"] == _s_info["stack_id"]) and \ + s_info["name"] == _s_info["name"]: + return True + + if _s_info["stack_name"] != "none": + if (s_info["stack_name"] != "none" and \ + s_info["stack_name"] == _s_info["stack_name"]) and \ + s_info["name"] == _s_info["name"]: + return True + + return False + + def has_server_uuid(self, _uuid): + """Check if the server exists in this group with uuid.""" + + for s_info in self.server_list: + if s_info["uuid"] == _uuid: + return True + + return False + + def has_server_in_host(self, _host_name, _s_info): + """Check if the server exists in the host in this group.""" + + if _host_name in self.member_hosts.keys(): + server_list = self.member_hosts[_host_name] + + for s_info in server_list: + if _s_info["uuid"] != "none": + if s_info["uuid"] != "none" and \ + s_info["uuid"] == _s_info["uuid"]: + return True + + if _s_info["stack_id"] != "none": + if (s_info["stack_id"] != "none" and \ + s_info["stack_id"] == _s_info["stack_id"]) and \ + s_info["name"] == _s_info["name"]: + return True + + if _s_info["stack_name"] != "none": + if (s_info["stack_name"] != "none" and \ + s_info["stack_name"] == _s_info["stack_name"]) and \ + s_info["name"] == _s_info["name"]: + return True + + return False + + def get_server_info(self, _s_info): + """Get server info.""" + + for s_info in self.server_list: + if _s_info["uuid"] != "none": + if s_info["uuid"] != "none" and \ + s_info["uuid"] == _s_info["uuid"]: + return s_info + + if _s_info["stack_id"] != "none": + if (s_info["stack_id"] != "none" and \ + s_info["stack_id"] == _s_info["stack_id"]) and \ + s_info["name"] == _s_info["name"]: + return s_info + + if _s_info["stack_name"] != "none": + if (s_info["stack_name"] != "none" and \ + s_info["stack_name"] == _s_info["stack_name"]) and \ + s_info["name"] == _s_info["name"]: + return s_info + + return None + + def get_server_info_in_host(self, _host_name, _s_info): + """Get server info.""" + + if _host_name in self.member_hosts.keys(): + server_list = self.member_hosts[_host_name] + + for s_info in server_list: + if _s_info["uuid"] != "none": + if s_info["uuid"] != "none" and \ + s_info["uuid"] == _s_info["uuid"]: + return s_info + + if _s_info["stack_id"] != "none": + if (s_info["stack_id"] != "none" and \ + s_info["stack_id"] == _s_info["stack_id"]) and \ + s_info["name"] == _s_info["name"]: + return s_info + + if _s_info["stack_name"] != "none": + if (s_info["stack_name"] != "none" and \ + s_info["stack_name"] == _s_info["stack_name"]) and \ + s_info["name"] == _s_info["name"]: + return s_info + + return None + + def add_server(self, _s_info, _host_name): + """Add server to this group.""" + + if self.has_server(_s_info): + return False + + if self.has_server_in_host(_host_name, _s_info): + return False + + self.server_list.append(_s_info) + + if self.factory in ("valet", "server-group"): + if _host_name not in self.member_hosts.keys(): + self.member_hosts[_host_name] = [] + + self.member_hosts[_host_name].append(_s_info) + + return True + + def remove_server(self, _s_info): + """Remove server from this group's server_list.""" + + for s_info in self.server_list: + if _s_info["uuid"] != "none": + if s_info["uuid"] != "none" and \ + s_info["uuid"] == _s_info["uuid"]: + self.server_list.remove(s_info) + return True + + if _s_info["stack_id"] != "none": + if (s_info["stack_id"] != "none" and \ + s_info["stack_id"] == _s_info["stack_id"]) and \ + s_info["name"] == _s_info["name"]: + self.server_list.remove(s_info) + return True + + if _s_info["stack_name"] != "none": + if (s_info["stack_name"] != "none" and \ + s_info["stack_name"] == _s_info["stack_name"]) and \ + s_info["name"] == _s_info["name"]: + self.server_list.remove(s_info) + return True + + return False + + def remove_server_from_host(self, _host_name, _s_info): + """Remove server from the host of this group.""" + + if _host_name in self.member_hosts.keys(): + for s_info in self.member_hosts[_host_name]: + if _s_info["uuid"] != "none": + if s_info["uuid"] != "none" and \ + s_info["uuid"] == _s_info["uuid"]: + self.member_hosts[_host_name].remove(s_info) + return True + + if _s_info["stack_id"] != "none": + if (s_info["stack_id"] != "none" and \ + s_info["stack_id"] == _s_info["stack_id"]) and \ + s_info["name"] == _s_info["name"]: + self.member_hosts[_host_name].remove(s_info) + return True + + if _s_info["stack_name"] != "none": + if (s_info["stack_name"] != "none" and \ + s_info["stack_name"] == _s_info["stack_name"]) and \ + s_info["name"] == _s_info["name"]: + self.member_hosts[_host_name].remove(s_info) + return True + + return False + + def remove_member(self, _host_name): + """Remove the host from this group's memberships if it is empty. + + To return the host to pool for other placements. + """ + + if self.factory in ("valet", "server-group"): + if _host_name in self.member_hosts.keys() and \ + len(self.member_hosts[_host_name]) == 0: + del self.member_hosts[_host_name] + + return True + + return False + + def clean_server(self, _uuid, _host_name): + """Clean the server that does not have enriched info.""" + + if _uuid == "none": + return + + for s_info in self.server_list: + if s_info["uuid"] == _uuid and s_info["name"] == "none": + self.server_list.remove(s_info) + break + + if _host_name in self.member_hosts.keys(): + for s_info in self.member_hosts[_host_name]: + if s_info["uuid"] == _uuid and s_info["name"] == "none": + self.member_hosts[_host_name].remove(s_info) + break + + if _host_name in self.member_hosts.keys() and \ + len(self.member_hosts[_host_name]) == 0: + del self.member_hosts[_host_name] + + def update_server(self, _s_info): + """Update server with info from given info. + + The info comes from platform or request (e.g., Heat stack). + """ + + updated = False + + s_info = self.get_server_info(_s_info) + + if s_info is not None: + if _s_info["stack_id"] != "none" and \ + _s_info["stack_id"] != s_info["stack_id"]: + s_info["stack_id"] = _s_info["stack_id"] + updated = True + + if _s_info["uuid"] != "none" and \ + _s_info["uuid"] != s_info["uuid"]: + s_info["uuid"] = _s_info["uuid"] + updated = True + + if _s_info["flavor_id"] != "none" and \ + _s_info["flavor_id"] != s_info["flavor_id"]: + s_info["flavor_id"] = _s_info["flavor_id"] + updated = True + + if _s_info["vcpus"] != -1 and \ + _s_info["vcpus"] != s_info["vcpus"]: + s_info["vcpus"] = _s_info["vcpus"] + updated = True + + if _s_info["mem"] != -1 and \ + _s_info["mem"] != s_info["mem"]: + s_info["mem"] = _s_info["mem"] + updated = True + + if _s_info["disk"] != -1 and \ + _s_info["disk"] != s_info["disk"]: + s_info["disk"] = _s_info["disk"] + updated = True + + if _s_info["image_id"] != "none" and \ + _s_info["image_id"] != s_info["image_id"]: + s_info["image_id"] = _s_info["image_id"] + updated = True + + if _s_info["state"] != "none" and \ + _s_info["state"] != s_info["state"]: + s_info["state"] = _s_info["state"] + updated = True + + if _s_info["status"] != "none" and \ + _s_info["status"] != s_info["status"]: + s_info["status"] = _s_info["status"] + updated = True + + if _s_info["numa"] != "none" and \ + _s_info["numa"] != s_info["numa"]: + s_info["numa"] = _s_info["numa"] + updated = True + + return updated + + def update_server_in_host(self, _host_name, _s_info): + """Updateserver in the host of this group.""" + + if _host_name in self.member_hosts.keys(): + s_info = self.get_server_info_in_host(_host_name, _s_info) + + if s_info is not None: + if _s_info["stack_id"] != "none" and \ + _s_info["stack_id"] != s_info["stack_id"]: + s_info["stack_id"] = _s_info["stack_id"] + + if _s_info["uuid"] != "none" and \ + _s_info["uuid"] != s_info["uuid"]: + s_info["uuid"] = _s_info["uuid"] + + if _s_info["flavor_id"] != "none" and \ + _s_info["flavor_id"] != s_info["flavor_id"]: + s_info["flavor_id"] = _s_info["flavor_id"] + + if _s_info["vcpus"] != -1 and \ + _s_info["vcpus"] != s_info["vcpus"]: + s_info["vcpus"] = _s_info["vcpus"] + + if _s_info["mem"] != -1 and \ + _s_info["mem"] != s_info["mem"]: + s_info["mem"] = _s_info["mem"] + + if _s_info["disk"] != -1 and \ + _s_info["disk"] != s_info["disk"]: + s_info["disk"] = _s_info["disk"] + + if _s_info["image_id"] != "none" and \ + _s_info["image_id"] != s_info["image_id"]: + s_info["image_id"] = _s_info["image_id"] + + if _s_info["state"] != "none" and \ + _s_info["state"] != s_info["state"]: + s_info["state"] = _s_info["state"] + + if _s_info["status"] != "none" and \ + _s_info["status"] != s_info["status"]: + s_info["status"] = _s_info["status"] + + if _s_info["numa"] != "none" and \ + _s_info["numa"] != s_info["numa"]: + s_info["numa"] = _s_info["numa"] + + def get_json_info(self): + """Get group info as JSON format.""" + + rule_id = "none" + if self.rule is not None: + rule_id = self.rule.rule_id + + return {'status': self.status, + 'uuid': self.uuid, + 'group_type': self.group_type, + 'level': self.level, + 'factory': self.factory, + 'rule_id': rule_id, + 'metadata': self.metadata, + 'server_list': self.server_list, + 'member_hosts': self.member_hosts} diff --git a/engine/src/valet/engine/resource_manager/resources/group_rule.py b/engine/src/valet/engine/resource_manager/resources/group_rule.py new file mode 100644 index 0000000..d43dc12 --- /dev/null +++ b/engine/src/valet/engine/resource_manager/resources/group_rule.py @@ -0,0 +1,52 @@ +# +# ------------------------------------------------------------------------- +# Copyright (c) 2019 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ------------------------------------------------------------------------- +# +#!/bin/python + + +class GroupRule(object): + """Container for valet group rule.""" + + def __init__(self, _id): + self.rule_id = _id + + self.status = "enabled" + + self.app_scope = "lcp" + self.rule_type = "affinity" + self.level = "host" + + self.members = [] # a lit of tenent ids who can use this rule + + self.desc = None + + # self.groups = [] # a list of group ids generated under this rule + + self.updated = False + + def get_json_info(self): + """Get group info as JSON format.""" + + return {'status': self.status, + 'app_scope': self.app_scope, + 'rule_type': self.rule_type, + 'level': self.level, + 'members': self.members, + 'desc': self.desc + # 'groups': self.groups + } diff --git a/engine/src/valet/engine/resource_manager/resources/host.py b/engine/src/valet/engine/resource_manager/resources/host.py new file mode 100644 index 0000000..a4d92ba --- /dev/null +++ b/engine/src/valet/engine/resource_manager/resources/host.py @@ -0,0 +1,428 @@ +# +# ------------------------------------------------------------------------- +# Copyright (c) 2019 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ------------------------------------------------------------------------- +# +from valet.engine.resource_manager.resources.numa import NUMA + + +class Host(object): + """Container for compute host.""" + + def __init__(self, _name): + """Define compute host.""" + + self.name = _name + + self.uuid = None + + self.status = "enabled" + self.state = "up" + + # Enabled group objects (e.g., aggregate) this hosting server is in + self.memberships = {} + + self.vCPUs = 0 + self.original_vCPUs = 0 + self.avail_vCPUs = 0 + + self.mem_cap = 0 # MB + self.original_mem_cap = 0 + self.avail_mem_cap = 0 + + self.local_disk_cap = 0 # GB, ephemeral + self.original_local_disk_cap = 0 + self.avail_local_disk_cap = 0 + + self.vCPUs_used = 0 + self.free_mem_mb = 0 + self.free_disk_gb = 0 + self.disk_available_least = 0 + + # To track available cores and memory per NUMA cell + self.NUMA = NUMA() + + self.host_group = None # host_group object (e.g., rack) + + # Kepp a list of placed servers' information + # Here, server_info including {uuid, orch_id, name, + # stack_id, stack_name, + # flavor_id, image_id, tenent_id, + # vcpus, mem, disk, numa, + # state, status} + self.server_list = [] + + # If this host is not defined yet (unknown host). + self.candidate_host_types = {} + + self.updated = False + + def is_available(self): + """Check if host is available.""" + + if self.status == "enabled" and self.state == "up": + return True + else: + return False + + def has_server(self, _s_info): + """Check if server is located in this host.""" + + for s_info in self.server_list: + if _s_info["uuid"] != "none": + if s_info["uuid"] != "none" and \ + s_info["uuid"] == _s_info["uuid"]: + return True + + if _s_info["stack_id"] != "none": + if (s_info["stack_id"] != "none" and \ + s_info["stack_id"] == _s_info["stack_id"]) and \ + s_info["name"] == _s_info["name"]: + return True + + if _s_info["stack_name"] != "none": + if (s_info["stack_name"] != "none" and \ + s_info["stack_name"] == _s_info["stack_name"]) and \ + s_info["name"] == _s_info["name"]: + return True + + return False + + def get_server_info(self, _s_info): + """Get server info.""" + + for s_info in self.server_list: + if _s_info["uuid"] != "none": + if s_info["uuid"] != "none" and \ + s_info["uuid"] == _s_info["uuid"]: + return s_info + + if _s_info["stack_id"] != "none": + if (s_info["stack_id"] != "none" and \ + s_info["stack_id"] == _s_info["stack_id"]) and \ + s_info["name"] == _s_info["name"]: + return s_info + + if _s_info["stack_name"] != "none": + if (s_info["stack_name"] != "none" and \ + s_info["stack_name"] == _s_info["stack_name"]) and \ + s_info["name"] == _s_info["name"]: + return s_info + + return None + + def add_server(self, _s_info): + """Add new server to this host.""" + + self.server_list.append(_s_info) + + def remove_server(self, _s_info): + """Remove server from this host.""" + + for s_info in self.server_list: + if _s_info["uuid"] != "none": + if s_info["uuid"] != "none" and \ + s_info["uuid"] == _s_info["uuid"]: + self.server_list.remove(s_info) + return True + + if _s_info["stack_id"] != "none": + if (s_info["stack_id"] != "none" and \ + s_info["stack_id"] == _s_info["stack_id"]) and \ + s_info["name"] == _s_info["name"]: + self.server_list.remove(s_info) + return True + + if _s_info["stack_name"] != "none": + if (s_info["stack_name"] != "none" and \ + s_info["stack_name"] == _s_info["stack_name"]) and \ + s_info["name"] == _s_info["name"]: + self.server_list.remove(s_info) + return True + + return False + + def update_server(self, _s_info): + """Update server with info from given info. + + The info comes from platform or request (e.g., Heat stack). + """ + + updated = None + + s_info = self.get_server_info(_s_info) + + if s_info is not None: + if _s_info["stack_id"] != "none" and \ + _s_info["stack_id"] != s_info["stack_id"]: + s_info["stack_id"] = _s_info["stack_id"] + updated = s_info + + if _s_info["uuid"] != "none" and \ + _s_info["uuid"] != s_info["uuid"]: + s_info["uuid"] = _s_info["uuid"] + updated = s_info + + if _s_info["flavor_id"] != "none" and \ + _s_info["flavor_id"] != s_info["flavor_id"]: + s_info["flavor_id"] = _s_info["flavor_id"] + updated = s_info + + if _s_info["vcpus"] != -1 and \ + _s_info["vcpus"] != s_info["vcpus"]: + s_info["vcpus"] = _s_info["vcpus"] + updated = s_info + + if _s_info["mem"] != -1 and \ + _s_info["mem"] != s_info["mem"]: + s_info["mem"] = _s_info["mem"] + updated = s_info + + if _s_info["disk"] != -1 and \ + _s_info["disk"] != s_info["disk"]: + s_info["disk"] = _s_info["disk"] + updated = s_info + + if _s_info["image_id"] != "none" and \ + _s_info["image_id"] != s_info["image_id"]: + s_info["image_id"] = _s_info["image_id"] + updated = s_info + + if _s_info["state"] != "none" and \ + _s_info["state"] != s_info["state"]: + s_info["state"] = _s_info["state"] + updated = s_info + + if _s_info["status"] != "none" and \ + _s_info["status"] != s_info["status"]: + s_info["status"] = _s_info["status"] + updated = s_info + + if _s_info["numa"] != "none" and \ + _s_info["numa"] != s_info["numa"]: + s_info["numa"] = _s_info["numa"] + updated = s_info + + if updated is not None: + cell = self.NUMA.pop_cell_of_server(updated) + + if updated["numa"] == "none": + if cell != "none": + updated["numa"] = cell + + self.NUMA.add_server(updated) + + return updated + + def remove_membership(self, _g): + """Remove a membership. + + To return to the resource pool for other placements. + """ + + if _g.factory in ("valet", "server-group"): + if self.name not in _g.member_hosts.keys(): + del self.memberships[_g.name] + + return True + + return False + + def compute_cpus(self, _overcommit_ratio): + """Compute and init oversubscribed CPUs.""" + + if self.vCPUs == 0: + # New host case + + self.vCPUs = self.original_vCPUs * _overcommit_ratio + self.avail_vCPUs = self.vCPUs + self.NUMA.init_cpus(self.vCPUs) + else: + vcpus = self.original_vCPUs * _overcommit_ratio + + if vcpus != self.vCPUs: + # Change of overcommit_ratio + + self.NUMA.adjust_cpus(self.vCPUs, vcpus) + + used = self.vCPUs - self.avail_vCPUs + + self.vCPUs = vcpus + self.avail_vCPUs = self.vCPUs - used + + def compute_avail_cpus(self): + """Compute available CPUs after placements.""" + + avail_vcpus = self.vCPUs - self.vCPUs_used + + if avail_vcpus != self.avail_vCPUs: + # Incurred due to unknown server placement. + + diff = self.avail_vCPUs - avail_vcpus + self.NUMA.apply_unknown_cpus(diff) + + self.avail_vCPUs = avail_vcpus + + return "avail cpus changed (" + str(diff) + ") in " + self.name + + return "ok" + + def compute_mem(self, _overcommit_ratio): + """Compute and init oversubscribed mem capacity.""" + + if self.mem_cap == 0: + # New host case + + self.mem_cap = self.original_mem_cap * _overcommit_ratio + + self.avail_mem_cap = self.mem_cap + + self.NUMA.init_mem(self.mem_cap) + else: + mem_cap = self.original_mem_cap * _overcommit_ratio + + if mem_cap != self.mem_cap: + # Change of overcommit_ratio + + self.NUMA.adjust_mem(self.mem_cap, mem_cap) + + used = self.mem_cap - self.avail_mem_cap + + self.mem_cap = mem_cap + self.avail_mem_cap = self.mem_cap - used + + def compute_avail_mem(self): + """Compute available mem capacity after placements.""" + + used_mem_mb = self.original_mem_cap - self.free_mem_mb + + avail_mem_cap = self.mem_cap - used_mem_mb + + if avail_mem_cap != self.avail_mem_cap: + # Incurred due to unknown server placement. + + diff = self.avail_mem_cap - avail_mem_cap + self.NUMA.apply_unknown_mem(diff) + + self.avail_mem_cap = avail_mem_cap + + return "avail mem changed(" + str(diff) + ") in " + self.name + + return "ok" + + def compute_disk(self, _overcommit_ratio): + """Compute and init oversubscribed disk capacity.""" + + if self.local_disk_cap == 0: + # New host case + + self.local_disk_cap = self.original_local_disk_cap * _overcommit_ratio + + self.avail_local_disk_cap = self.local_disk_cap + else: + local_disk_cap = self.original_local_disk_cap * _overcommit_ratio + + if local_disk_cap != self.local_disk_cap: + # Change of overcommit_ratio + + used = self.local_disk_cap - self.avail_local_disk_cap + + self.local_disk_cap = local_disk_cap + self.avail_local_disk_cap = self.local_disk_cap - used + + def compute_avail_disk(self): + """Compute available disk capacity after placements.""" + + free_disk_cap = self.free_disk_gb + if self.disk_available_least > 0: + free_disk_cap = min(self.free_disk_gb, self.disk_available_least) + + used_disk_cap = self.original_local_disk_cap - free_disk_cap + + avail_local_disk_cap = self.local_disk_cap - used_disk_cap + + if avail_local_disk_cap != self.avail_local_disk_cap: + diff = self.avail_local_disk_cap - avail_local_disk_cap + + self.avail_local_disk_cap = avail_local_disk_cap + + return "avail disk changed(" + str(diff) + ") in " + self.name + + return "ok" + + def deduct_avail_resources(self, _s_info): + """Deduct available amount of resources of this host.""" + + if _s_info.get("vcpus") != -1: + self.avail_vCPUs -= _s_info.get("vcpus") + self.avail_mem_cap -= _s_info.get("mem") + self.avail_local_disk_cap -= _s_info.get("disk") + + def rollback_avail_resources(self, _s_info): + """Rollback available amount of resources of this host.""" + + if _s_info.get("vcpus") != -1: + self.avail_vCPUs += _s_info.get("vcpus") + self.avail_mem_cap += _s_info.get("mem") + self.avail_local_disk_cap += _s_info.get("disk") + + def get_availability_zone(self): + """Get the availability-zone of this host.""" + + for gk, g in self.memberships.iteritems(): + if g.group_type == "az": + return g + + return None + + def get_aggregates(self): + """Get the list of Host-Aggregates of this host.""" + + aggregates = [] + + for gk, g in self.memberships.iteritems(): + if g.group_type == "aggr": + aggregates.append(g) + + return aggregates + + def get_json_info(self): + """Get compute host info as JSON format""" + + membership_list = [] + for gk in self.memberships.keys(): + membership_list.append(gk) + + return {'status': self.status, 'state': self.state, + 'uuid': self.uuid, + 'membership_list': membership_list, + 'vCPUs': self.vCPUs, + 'original_vCPUs': self.original_vCPUs, + 'avail_vCPUs': self.avail_vCPUs, + 'mem': self.mem_cap, + 'original_mem': self.original_mem_cap, + 'avail_mem': self.avail_mem_cap, + 'local_disk': self.local_disk_cap, + 'original_local_disk': self.original_local_disk_cap, + 'avail_local_disk': self.avail_local_disk_cap, + 'vCPUs_used': self.vCPUs_used, + 'free_mem_mb': self.free_mem_mb, + 'free_disk_gb': self.free_disk_gb, + 'disk_available_least': self.disk_available_least, + 'NUMA': self.NUMA.get_json_info(), + 'parent': self.host_group.name, + 'server_list': self.server_list, + 'candidate_host_types': self.candidate_host_types} diff --git a/engine/src/valet/engine/resource_manager/resources/host_group.py b/engine/src/valet/engine/resource_manager/resources/host_group.py new file mode 100644 index 0000000..ebf9a15 --- /dev/null +++ b/engine/src/valet/engine/resource_manager/resources/host_group.py @@ -0,0 +1,108 @@ +# +# ------------------------------------------------------------------------- +# Copyright (c) 2019 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ------------------------------------------------------------------------- +# +from valet.engine.app_manager.group import LEVEL + + +class HostGroup(object): + """Container for host group (rack).""" + + def __init__(self, _id): + self.name = _id + + self.status = "enabled" + self.host_group = None + + # 'rack' or 'cluster' (e.g., power domain, zone) + self.host_type = "rack" + + self.parent_resource = None # e.g., datacenter object + self.child_resources = {} # e.g., hosting server objects + + # Enabled group objects (e.g., aggregate) in this group + self.memberships = {} + + self.vCPUs = 0 + self.avail_vCPUs = 0 + + self.mem_cap = 0 # MB + self.avail_mem_cap = 0 + + self.local_disk_cap = 0 # GB, ephemeral + self.avail_local_disk_cap = 0 + + # A list of placed servers' info + self.server_list = [] + + self.updated = False + + def is_available(self): + if self.status == "enabled": + return True + else: + return False + + def init_resources(self): + self.vCPUs = 0 + self.avail_vCPUs = 0 + self.mem_cap = 0 # MB + self.avail_mem_cap = 0 + self.local_disk_cap = 0 # GB, ephemeral + self.avail_local_disk_cap = 0 + + def init_memberships(self): + for gk in self.memberships.keys(): + g = self.memberships[gk] + + if g.factory == "valet": + if LEVEL.index(g.level) < LEVEL.index(self.host_type): + del self.memberships[gk] + else: + del self.memberships[gk] + + def remove_membership(self, _g): + """Remove a membership. """ + + if _g.factory == "valet": + if self.name not in _g.member_hosts.keys(): + del self.memberships[_g.name] + return True + + return False + + def get_json_info(self): + membership_list = [] + for gk in self.memberships.keys(): + membership_list.append(gk) + + child_list = [] + for ck in self.child_resources.keys(): + child_list.append(ck) + + return {'status': self.status, + 'host_type': self.host_type, + 'membership_list': membership_list, + 'vCPUs': self.vCPUs, + 'avail_vCPUs': self.avail_vCPUs, + 'mem': self.mem_cap, + 'avail_mem': self.avail_mem_cap, + 'local_disk': self.local_disk_cap, + 'avail_local_disk': self.avail_local_disk_cap, + 'parent': self.parent_resource.name, + 'children': child_list, + 'server_list': self.server_list} diff --git a/engine/src/valet/engine/resource_manager/resources/numa.py b/engine/src/valet/engine/resource_manager/resources/numa.py new file mode 100644 index 0000000..c6c9542 --- /dev/null +++ b/engine/src/valet/engine/resource_manager/resources/numa.py @@ -0,0 +1,264 @@ +# +# ------------------------------------------------------------------------- +# Copyright (c) 2019 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ------------------------------------------------------------------------- +# +class NUMA(object): + """Container for NUMA cells.""" + + def __init__(self, numa=None): + """Init NUMA cells. + + Assume 2 NUMA cells of each compute host + """ + + self.cell_0 = {} + + # Available resources + self.cell_0["cpus"] = 0 + self.cell_0["mem"] = 0 + + # A list of server infos + self.cell_0["server_list"] = [] + + self.cell_1 = {} + + # Available resources + self.cell_1["cpus"] = 0 + self.cell_1["mem"] = 0 + + # A list of server infos + self.cell_1["server_list"] = [] + + if numa is not None: + self.cell_0["cpus"] = numa["cell_0"]["cpus"] + self.cell_0["mem"] = numa["cell_0"]["mem"] + self.cell_0["server_list"] = numa["cell_0"]["server_list"] + + self.cell_1["cpus"] = numa["cell_1"]["cpus"] + self.cell_1["mem"] = numa["cell_1"]["mem"] + self.cell_1["server_list"] = numa["cell_1"]["server_list"] + + def init_cpus(self, _cpus): + """Apply CPU capacity faily across NUMA cells. + + Caused by new compute host. + """ + + div = int(float(_cpus) / 2.0) + + self.cell_0["cpus"] = div + self.cell_1["cpus"] = (_cpus - div) + + def init_mem(self, _mem): + """Apply mem capacity faily across NUMA cells. + + Caused by new compute host. + """ + + div = int(float(_mem) / 2.0) + + self.cell_0["mem"] = div + self.cell_1["mem"] = (_mem - div) + + def adjust_cpus(self, _old_cpus, _new_cpus): + """Adjust CPU capacity across NUMA cells. + + Caused by change in compute host. + """ + + div = int(float(_old_cpus) / 2.0) + + old_cpus_0 = div + old_cpus_1 = (_old_cpus - div) + + used_0 = old_cpus_0 - self.cell_0["cpus"] + used_1 = old_cpus_1 - self.cell_1["cpus"] + + div = int(float(_new_cpus) / 2.0) + + self.cell_0["cpus"] = div - used_0 + self.cell_1["cpus"] = _new_cpus - div - used_1 + + def adjust_mem(self, _old_mem, _new_mem): + """Adjust mem capacity across NUMA cells. + + Caused by change in compute host. + """ + + div = int(float(_old_mem) / 2.0) + + old_mem_0 = div + old_mem_1 = (_old_mem - div) + + used_0 = old_mem_0 - self.cell_0["mem"] + used_1 = old_mem_1 - self.cell_1["mem"] + + div = int(float(_new_mem) / 2.0) + + self.cell_0["mem"] = div - used_0 + self.cell_1["mem"] = _new_mem - div - used_1 + + def has_enough_resources(self, _vcpus, _mem): + """Check if any cell has enough resources.""" + + if _vcpus <= self.cell_0["cpus"] and _mem <= self.cell_0["mem"]: + return True + + if _vcpus <= self.cell_1["cpus"] and _mem <= self.cell_1["mem"]: + return True + + return False + + def pop_cell_of_server(self, _s_info): + """Get which cell server is placed.""" + + cell = None + + for s_info in self.cell_0["server_list"]: + if _s_info["uuid"] != "none": + if s_info["uuid"] != "none" and \ + s_info["uuid"] == _s_info["uuid"]: + cell = "cell_0" + self.cell_0["server_list"].remove(s_info) + break + + if _s_info["stack_id"] != "none": + if (s_info["stack_id"] != "none" and \ + s_info["stack_id"] == _s_info["stack_id"]) and \ + s_info["name"] == _s_info["name"]: + cell = "cell_0" + self.cell_0["server_list"].remove(s_info) + break + + if _s_info["stack_name"] != "none": + if (s_info["stack_name"] != "none" and \ + s_info["stack_name"] == _s_info["stack_name"]) and \ + s_info["name"] == _s_info["name"]: + cell = "cell_0" + self.cell_0["server_list"].remove(s_info) + break + + if cell is None: + for s_info in self.cell_1["server_list"]: + if _s_info["uuid"] != "none": + if s_info["uuid"] != "none" and \ + s_info["uuid"] == _s_info["uuid"]: + cell = "cell_1" + self.cell_1["server_list"].remove(s_info) + break + + if _s_info["stack_id"] != "none": + if (s_info["stack_id"] != "none" and \ + s_info["stack_id"] == _s_info["stack_id"]) and \ + s_info["name"] == _s_info["name"]: + cell = "cell_1" + self.cell_1["server_list"].remove(s_info) + break + + if _s_info["stack_name"] != "none": + if (s_info["stack_name"] != "none" and \ + s_info["stack_name"] == _s_info["stack_name"]) and \ + s_info["name"] == _s_info["name"]: + cell = "cell_1" + self.cell_1["server_list"].remove(s_info) + break + + if cell is None: + return "none" + else: + return cell + + def deduct_server_resources(self, _s_info): + """Reduce the available resources in a cell by adding a server.""" + + self.pop_cell_of_server(_s_info) + + if self.cell_0["cpus"] > self.cell_1["cpus"]: + self.cell_0["cpus"] -= _s_info.get("vcpus") + self.cell_0["mem"] -= _s_info.get("mem") + self.cell_0["server_list"].append(_s_info) + return "cell_0" + else: + self.cell_1["cpus"] -= _s_info.get("vcpus") + self.cell_1["mem"] -= _s_info.get("mem") + self.cell_1["server_list"].append(_s_info) + return "cell_1" + + def rollback_server_resources(self, _s_info): + """Rollback the server placement in cell by removing server.""" + + cell = self.pop_cell_of_server(_s_info) + + if cell == "cell_0": + self.cell_0["cpus"] += _s_info.get("vcpus") + self.cell_0["mem"] += _s_info.get("mem") + elif cell == "cell_1": + self.cell_1["cpus"] += _s_info.get("vcpus") + self.cell_1["mem"] += _s_info.get("mem") + + # TODO: need to non-NUMA server? + # else: + # self.apply_cpus_fairly(-1.0*_cpus) + # self.apply_mem_fairly(-1.0*_mem) + + def add_server(self, _s_info): + """Add the server info into the cell.""" + + if _s_info["numa"] == "cell_0": + self.cell_0["server_list"].append(_s_info) + elif _s_info["numa"] == "cell_1": + self.cell_1["server_list"].append(_s_info) + + def apply_unknown_cpus(self, _diff): + """Apply unknown cpus fairly across cells.""" + + if _diff > 0: + # Deduct + + div = int(float(_diff) / 2.0) + self.cell_0["cpus"] -= div + self.cell_1["cpus"] -= (_diff - div) + elif _diff < 0: + # Rollback + _diff *= -1 + + div = int(float(_diff) / 2.0) + self.cell_0["cpus"] += div + self.cell_1["cpus"] += (_diff - div) + + def apply_unknown_mem(self, _diff): + """Apply unknown mem capacity fairly across cells.""" + + if _diff > 0: + # Deduct + + div = int(float(_diff) / 2.0) + self.cell_0["mem"] -= div + self.cell_1["mem"] -= (_diff - div) + elif _diff < 0: + # Rollback + _diff *= -1 + + div = int(float(_diff) / 2.0) + self.cell_0["mem"] += div + self.cell_1["mem"] += (_diff - div) + + def get_json_info(self): + """Get NUMA info as JSON format""" + + return {'cell_0': self.cell_0, + 'cell_1': self.cell_1} |