<html><head><meta name="color-scheme" content="light dark"></head><body><pre style="word-wrap: break-word; white-space: pre-wrap;">"""
РњРѕРЅРёС‚РѕСЂРёРЅРі СѓС‚РёР»РёР·Р°С†РёРё СЂРµСЃСѓСЂСЃРѕРІ РћРЎ, Рё Р°РІС‚Рѕ-С‚СЋРЅРёРЅРі РєРѕРЅС‚РµР№РЅРµСЂРѕРІ.

"""
import argparse
import json
import logging
import os
import re
import shutil
import subprocess
import sys
from pathlib import Path

import time


class Vm:
    def __init__(
            self,
            ctid=None,
            name=None,
            cpu_idle=None,
            cpu_uptime=None,
            cpu_used=None,
            cpu_max_latency=None,
            cpu_total_latency=None,
            cpu_num_scheduled=None,
            cpu_avg_1min=None,
            cpu_avg_5min=None,
            cpu_avg_30min=None,
            cpu_avg_1h=None,
            cpu_avg_2h=None,
            cpu_avg_12h=None,
            cpu_avg_1d=None,
            cpu_avg_7d=None,
            cpu_avg_1mon=None,
            **_kwargs):
        self.ctid = ctid
        self.name = name
        # vestat fields
        self.cpu_user_j = None
        self.cpu_nice_j = None
        self.cpu_system_j = None
        self.cpu_uptime_j = None
        self.cpu_idle = cpu_idle
        self.cpu_uptime = cpu_uptime
        self.cpu_used = cpu_used
        self.cpu_max_latency = cpu_max_latency
        self.cpu_total_latency = cpu_total_latency
        self.cpu_num_scheduled = cpu_num_scheduled
        # vzlist data
        self.raw_vz_list = None
        self.cpus = None
        self.uptime = None
        self.mem_limit = None
        self.mem_used = None
        self.swap_limit = None
        self.swap_used = None
        self.disk_limit = None
        self.disk_used = None
        # procfs data
        self.cpus_mask = None
        self.cpus_list = None
        self.cpus_allowed = None  # len(cpus_list)
        self.cpu_profile_idx = None  # РќРѕРјРµСЂ РїСЂРѕС„РёР»СЏ, РµСЃР»Рё СЃРѕРѕС‚РІРµС‚СЃС‚РІСѓРµС‚.
        # calculated
        self.cpu_uptime_delta = None
        self.cpu_used_delta = None
        self.cpu_idle_delta = None
        self.cpu_used_ratio = None
        self.cpu_idle_ratio = None
        # avg - СЃРєРѕР»СЊР·СЏС‰РµРµ СЃСЂРµРґРЅРµРµ Р·Р° РїРµСЂРёРѕРґ
        self.cpu_avg_1min = cpu_avg_1min
        self.cpu_avg_5min = cpu_avg_5min
        self.cpu_avg_30min = cpu_avg_30min
        self.cpu_avg_1h = cpu_avg_1h
        self.cpu_avg_2h = cpu_avg_2h
        self.cpu_avg_12h = cpu_avg_12h
        self.cpu_avg_1d = cpu_avg_1d
        self.cpu_avg_7d = cpu_avg_7d
        self.cpu_avg_1mon = cpu_avg_1mon

    @staticmethod
    def _calc_avg(avg_curr, avg_period, val_curr, val_period):
        """Р&nbsp;Р°СЃС‡С‘С‚ СЃРєРѕР»СЊР·СЏС‰РµРіРѕ СЃСЂРµРґРЅРµРіРѕ."""
        if not avg_curr:
            avg_curr = 0
        avg = avg_curr + (val_curr - avg_curr) * val_period / avg_period
        # РќРѕСЂРјР°Р»РёР·Р°С†РёСЏ Р·РЅР°С‡РµРЅРёР№, СѓС‡РёС‚С‹РІР°СЏ РЅР°С€Рё РѕСЃСЃРѕР±РµРЅРЅРѕСЃС‚Рё.
        if avg &lt; 0:
            avg = 0
        return avg

    def recalc_cpu_stats(self, prev_vm, cps):
        # TODO СЃС‡РёС‚Р°С‚СЊ СЃС‚Р°С‚РёСЃС‚РёРєСѓ РІРѕ РІСЃРµС… РІРѕР·РјРѕР¶РЅС‹С… СЃР»СѓС‡Р°СЏС…: РІС‹РєР»СЋС‡РµРЅРёРµ, РїРµСЂРµР·Р°РіСЂСѓР·РєР°, РјРёРіСЂР°С†РёСЏ...
        if not prev_vm \
                or not self.cpu_uptime \
                or not prev_vm.cpu_uptime \
                or self.cpu_uptime &lt; prev_vm.cpu_uptime \
                or self.cpu_idle &lt; prev_vm.cpu_idle \
                or self.cpu_used &lt; prev_vm.cpu_used:
            # print(f'{self}  .. skip, no data')
            return
        self.cpu_uptime_delta = self.cpu_uptime-prev_vm.cpu_uptime
        self.cpu_used_delta = self.cpu_used-prev_vm.cpu_used
        self.cpu_idle_delta = self.cpu_idle-prev_vm.cpu_idle
        self.cpu_used_ratio = self.cpu_used_delta / self.cpu_uptime_delta
        self.cpu_idle_ratio = self.cpu_idle_delta / self.cpu_uptime_delta
        # avg
        self.cpu_avg_1min = self._calc_avg(prev_vm.cpu_avg_1min, 60, self.cpu_used_ratio, self.cpu_uptime_delta/cps)
        self.cpu_avg_5min = self._calc_avg(prev_vm.cpu_avg_5min, 5*60, self.cpu_used_ratio, self.cpu_uptime_delta/cps)
        self.cpu_avg_30min = self._calc_avg(prev_vm.cpu_avg_30min, 30*60, self.cpu_used_ratio, self.cpu_uptime_delta/cps)
        self.cpu_avg_1h = self._calc_avg(prev_vm.cpu_avg_1h, 3600, self.cpu_used_ratio, self.cpu_uptime_delta/cps)
        self.cpu_avg_2h = self._calc_avg(prev_vm.cpu_avg_2h, 2*3600, self.cpu_used_ratio, self.cpu_uptime_delta/cps)
        self.cpu_avg_12h = self._calc_avg(prev_vm.cpu_avg_12h, 12*3600, self.cpu_used_ratio, self.cpu_uptime_delta/cps)
        self.cpu_avg_1d = self._calc_avg(prev_vm.cpu_avg_1d, 24*3600, self.cpu_used_ratio, self.cpu_uptime_delta/cps)
        self.cpu_avg_7d = self._calc_avg(prev_vm.cpu_avg_7d, 7*24*3600, self.cpu_used_ratio, self.cpu_uptime_delta/cps)
        self.cpu_avg_1mon = self._calc_avg(prev_vm.cpu_avg_1mon, 30*24*3600, self.cpu_used_ratio, self.cpu_uptime_delta/cps)
        # latency РёР»Рё РЅРµ СЂР°Р±РѕС‚Р°РµС‚, РёР»Рё СЂР°Р±РѕС‚Р°РµС‚ РЅРµ РєР°Рє РѕР¶РёРґР°РµС‚СЃСЏ, РІСЃРµРіРґР° 0
        # lat_delta = curr_vm.cpu_total_latency-prev_vm.cpu_total_latency

    def as_dict(self):
        # Р”Р°РЅРЅС‹Рµ РґР»СЏ СЃРѕС…СЂР°РЅРµРЅРёСЏ
        return {
            'ctid': self.ctid,
            'name': self.name,
            'cpu_idle': self.cpu_idle,
            'cpu_uptime': self.cpu_uptime,
            'cpu_used': self.cpu_used,
            'cpu_max_latency': self.cpu_max_latency,
            'cpu_total_latency': self.cpu_total_latency,
            'cpu_num_scheduled': self.cpu_num_scheduled,
            'cpu_avg_1min': self.cpu_avg_1min,
            'cpu_avg_5min': self.cpu_avg_5min,
            'cpu_avg_30min': self.cpu_avg_30min,
            'cpu_avg_1h': self.cpu_avg_1h,
            'cpu_avg_2h': self.cpu_avg_2h,
            'cpu_avg_12h': self.cpu_avg_12h,
            'cpu_avg_1d': self.cpu_avg_1d,
            'cpu_avg_7d': self.cpu_avg_7d,
            'cpu_avg_1mon': self.cpu_avg_1mon,
        }

    def __repr__(self):
        return f'Vm({self.name}:{self.ctid})'


class VmCollection:
    VESTAT_RE = re.compile(
        r'^\s*(?P&lt;VEID&gt;\S+)'
        r'\s*(?P&lt;user&gt;\d+)\s*(?P&lt;nice&gt;\d+)\s*(?P&lt;system&gt;\d+)\s*(?P&lt;uptime_j&gt;\d+)'
        r'\s*(?P&lt;idle&gt;\d+)\s*(?P&lt;strv&gt;\d+)\s*(?P&lt;uptime&gt;\d+)\s*(?P&lt;used&gt;\d+)'
        r'\s*(?P&lt;maxlat&gt;\d+)\s*(?P&lt;totlat&gt;\d+)\s*(?P&lt;numsched&gt;\d+)\s*$')
    VZ_STATS_RE = re.compile(r'^(?P&lt;var&gt;[^:]+):\s*(?P&lt;val&gt;.*)\s*$')
    MEM_INFO_RE = re.compile(r'^(?P&lt;var&gt;[^:]+):\s*(?P&lt;val&gt;\d+)(?: (?P&lt;mul&gt;kB))?\s*$')
    VCMMD_FREE_RE = re.compile(
        r'^(?P&lt;available&gt;\d+)'
        r'\s*(?P&lt;qemu&gt;\d+)'
        r'\s*(?P&lt;host&gt;\d+)'
        r'\s*(?P&lt;swap&gt;\d+)'
        r'\s*(?P&lt;total&gt;\d+)'
        r'\s*(?P&lt;guarantee&gt;\d+)\s*$')

    DEFAULT_STATE_PATH = Path('/var/lib/loadctl/vm_stat.json')

    def __init__(self):
        self.vm_list = []
        self.vm_names = {}
        self.vm_ids = {}
        # params
        # vz_stat
        self.vz_stat_timestamp = None
        self.cycles_per_jiffy = None
        self.jiffies_per_second = None
        self.cycles_per_second = None
        self.node_cpus = None
        self.node_cpu_list = None
        self.node_category = None
        self.cpu_profiles = None
        self.mem_total = None
        self.mem_available = None
        self.mem_free = None
        self.mem_buffers = None
        self.mem_cached = None
        self.mem_slab = None
        self.swap_total = None
        self.swap_free = None
        self.vcmmd_available = None
        self.vcmmd_qemu = None
        self.vcmmd_host = None
        self.vcmmd_swap = None
        self.vcmmd_total = None
        self.vcmmd_guarantee = None

    @classmethod
    def load(cls, path=None):
        vms = cls()
        if not path:
            path = cls.DEFAULT_STATE_PATH
        if path.exists():
            try:
                with path.open() as state_file:
                    for line in state_file:
                        vm_data = json.loads(line)
                        vm = Vm(**vm_data)
                        vms.vm_list.append(vm)
                        if vm.ctid:
                            vms.vm_ids[vm.ctid] = vm
                        if vm.name:
                            vms.vm_names[vm.name] = vm
            except (OSError, ValueError):
                logging.exception('State file %s read error', path)
        return vms

    def save(self, path=None):
        if not path:
            path = self.DEFAULT_STATE_PATH
        tmp_path = path.with_name(f'{path.name}.{os.getpid()}')
        tmp_path.parent.mkdir(parents=True, exist_ok=True)
        with tmp_path.open('w') as tmp_file:
            for vm in self.vm_list:
                json.dump(vm.as_dict(), tmp_file)
                tmp_file.write('\n')
        tmp_path.rename(path)

    @staticmethod
    def node_scan_cpu_list():
        cpu_list = []
        with Path(f'/proc/cpuinfo').open() as status_file:
            for st_line in status_file:
                var, _sep, val = st_line.partition(':')
                if var.strip() == 'processor':
                    cpu_list.append(int(val.strip()))
        return cpu_list

    def node_scan(self):
        self.node_cpu_list = self.node_scan_cpu_list()
        self.node_cpus = len(self.node_cpu_list)
        # РџРѕРєР° 2 РїСЂРѕС„РёР»СЏ:
        #   0 - Р±РµР· РѕРіСЂР°РЅРёС‡РµРЅРёР№
        #   1 - РєРѕРЅС‚РµР№РЅРµСЂС‹ СЃ РІС‹СЃРѕРєРѕР№ РЅР°РіСЂСѓР·РєРѕР№
        self.cpu_profiles = [list(self.node_cpu_list)]
        if self.node_cpus &lt;= 4:
            self.cpu_profiles.append(self.node_cpu_list[-1:])  # РїРѕСЃР»РµРґРЅРµРµ СЏРґСЂРѕ
        elif self.node_cpus &lt;= 8:
            self.cpu_profiles.append(self.node_cpu_list[-2:])  # 2 СЏРґСЂР°
        else:
            self.cpu_profiles.append(self.node_cpu_list[-4:])  # 4 СЏРґСЂР°
        # OS Mem
        self.load_mem_stat()
        # Guarantee Mem
        proc_ret = subprocess.run(
            ['/usr/sbin/vcmmdctl', 'free', '-b'],
            stdout=subprocess.PIPE, check=True, timeout=20, universal_newlines=True)
        vcmmd_match = self.VCMMD_FREE_RE.match(proc_ret.stdout.split('\n')[1])
        self.vcmmd_available = int(vcmmd_match.group('available'))
        self.vcmmd_qemu = int(vcmmd_match.group('qemu'))
        self.vcmmd_host = int(vcmmd_match.group('host'))
        self.vcmmd_swap = int(vcmmd_match.group('swap'))
        self.vcmmd_total = int(vcmmd_match.group('total'))
        self.vcmmd_guarantee = int(vcmmd_match.group('guarantee'))
        # РРЅС„Р° РїРѕ Р’Рњ
        self.load_vestat()
        self.load_vzlist()
        self.load_fox_conf()

    def load_mem_stat(self):
        """
        [root@node6.cloudfox.com ~]# cat /proc/meminfo
        MemTotal:       65705284 kB
        MemAvailable:   42754260 kB
        MemFree:         8043208 kB
        Buffers:         1217484 kB
        Cached:         11590560 kB
        Slab:           28200492 kB
        SwapTotal:      15769264 kB
        SwapFree:       14650300 kB
        """
        params = {}
        with Path(f'/proc/meminfo').open() as status_file:
            for st_line in status_file:
                match = self.MEM_INFO_RE.match(st_line)
                if match:
                    params[match.group('var')] = int(match.group('val'))
                    if match.group('mul') == 'kB':
                        params[match.group('var')] *= 1024
        self.mem_total = params['MemTotal']
        self.mem_available = params['MemAvailable']
        self.mem_free = params['MemFree']
        self.mem_buffers = params['Buffers']
        self.mem_cached = params['Cached']
        self.mem_slab = params['Slab']
        self.swap_total = params['SwapTotal']
        self.swap_free = params['SwapFree']

    def load_vzlist(self):
        """
        [
          {
            "ctid": "e4c7fc48-4a38-40dd-884a-506aa8ecb425",
            "private": "/vz/private/e4c7fc48-4a38-40dd-884a-506aa8ecb425",
            "root": "/vz/root/e4c7fc48-4a38-40dd-884a-506aa8ecb425",
            "hostname": "vps363741.cloudfox.local",
            "name": "vps363741",
            "smart_name": "vps363741",
            "description": "VPS vps363741 product 363741",
            "ostemplate": "fox-centos-6-vps-v1.0",
            "ip": ["10.255.10.213"],
            "nameserver": [],
            "searchdomain": [],
            "status": "running",
            "numproc": {"held": 18, "maxheld": 18, "barrier": 131072, "limit": 131072, "failcnt": 0},
            "kmemsize": {"held": 5193728, "maxheld": 6524928, "barrier": 9223372036854775807, "limit": 9223372036854775807, "failcnt": 0},
            "lockedpages": {"held": 0, "maxheld": 0, "barrier": 9223372036854775807, "limit": 9223372036854775807, "failcnt": 0},
            "privvmpages": {"held": 9858, "maxheld": 13718, "barrier": 9223372036854775807, "limit": 9223372036854775807, "failcnt": 0},
            "shmpages": {"held": 129, "maxheld": 129, "barrier": 9223372036854775807, "limit": 9223372036854775807, "failcnt": 0},
            "numproc": {"held": 18, "maxheld": 18, "barrier": 131072, "limit": 131072, "failcnt": 0},
            "physpages": {"held": 11879, "maxheld": 12739, "barrier": 65536, "limit": 65536, "failcnt": 0},
            "vmguarpages": {"held": 0, "maxheld": 0, "barrier": 9223372036854775807, "limit": 9223372036854775807, "failcnt": 0},
            "oomguarpages": {"held": 11879, "maxheld": 12739, "barrier": 0, "limit": 0, "failcnt": 0},
            "numtcpsock": {"held": 0, "maxheld": 0, "barrier": 9223372036854775807, "limit": 9223372036854775807, "failcnt": 0},
            "numflock": {"held": 5, "maxheld": 6, "barrier": 9223372036854775807, "limit": 9223372036854775807, "failcnt": 0},
            "numpty": {"held": 0, "maxheld": 0, "barrier": 9223372036854775807, "limit": 9223372036854775807, "failcnt": 0},
            "numsiginfo": {"held": 0, "maxheld": 6, "barrier": 9223372036854775807, "limit": 9223372036854775807, "failcnt": 0},
            "tcpsndbuf": {"held": 0, "maxheld": 0, "barrier": 9223372036854775807, "limit": 9223372036854775807, "failcnt": 0},
            "tcprcvbuf": {"held": 0, "maxheld": 0, "barrier": 9223372036854775807, "limit": 9223372036854775807, "failcnt": 0},
            "othersockbuf": {"held": 0, "maxheld": 0, "barrier": 9223372036854775807, "limit": 9223372036854775807, "failcnt": 0},
            "dgramrcvbuf": {"held": 0, "maxheld": 0, "barrier": 9223372036854775807, "limit": 9223372036854775807, "failcnt": 0},
            "numothersock": {"held": 0, "maxheld": 0, "barrier": 9223372036854775807, "limit": 9223372036854775807, "failcnt": 0},
            "dcachesize": {"held": 978944, "maxheld": 983040, "barrier": 9223372036854775807, "limit": 9223372036854775807, "failcnt": 0},
            "numfile": {"held": 312, "maxheld": 355, "barrier": 9223372036854775807, "limit": 9223372036854775807, "failcnt": 0},
            "numiptent": {"held": 22, "maxheld": 22, "barrier": 2000, "limit": 2000, "failcnt": 0},
            "swappages": {"held": 0, "maxheld": 0, "barrier": 65536, "limit": 65536, "failcnt": 0},
            "diskspace": {"usage": 698196, "softlimit": 5242880, "hardlimit": 5242880},
            "diskinodes": {"usage": 23438, "softlimit": 655360, "hardlimit": 655360},
            "laverage": [0.00, 0.01, 0.03],
            "uptime": 37695.211,
            "cpulimit": 0,
            "cpuunits": 1000,
            "cpus": 2,
            "ioprio": null,
            "iolimit": 0,
            "iopslimit": 0,
            "onboot": false,
            "bootorder": 0,
            "layout": 5,
            "features": null,
            "disabled": false,
            "netfilter": "full"
          }
        ]
        """
        proc_ret = subprocess.run(['/usr/sbin/vzlist', '-j'], stdout=subprocess.PIPE, check=True, timeout=20)
        data = json.loads(proc_ret.stdout)
        for rec in data:
            ctid = rec['ctid']
            name = rec.get('name')
            if ctid not in self.vm_ids:
                new_vm = Vm(ctid=ctid)
                self.vm_ids[ctid] = new_vm
                self.vm_list.append(new_vm)
            vm = self.vm_ids[ctid]
            if name:
                self.vm_names[name] = vm
            vm.raw_vz_list = rec  # share ref
            vm.name = name
            vm.cpus = rec.get('cpus')
            vm.uptime = rec.get('uptime')
            physpages = rec.get('physpages')
            if physpages:
                vm.mem_limit = physpages['limit'] * 4096
                vm.mem_used = physpages['held'] * 4096
            swappages = rec.get('swappages')
            if swappages:
                vm.swap_limit = swappages['limit'] * 4096
                vm.swap_used = swappages['held'] * 4096
            diskspace = rec.get('diskspace')
            if diskspace:
                vm.disk_limit = diskspace['hardlimit'] * 1024
                vm.disk_used = diskspace['usage'] * 1024
            # РџРѕС…РѕР¶Рµ cpu mask РЅРµ СЃРѕС…СЂР°РЅСЏРµС‚СЃСЏ, РЅСѓР¶РЅРѕ СЃР°РјРёРј РІС‹С‡РёСЃР»СЏС‚СЊ
            vm.cpus_mask = get_ct_cpu_mask(ctid)
            vm.cpus_list = mask_to_list(vm.cpus_mask)
            vm.cpus_allowed = len(vm.cpus_list)
            for idx in range(len(self.cpu_profiles)):
                if vm.cpus_list == self.cpu_profiles[idx]:
                    vm.cpu_profile_idx = idx
                    break

    def load_vestat(self):
        """
        # cat /proc/vz/vestat
        Version: 2.2
                        VEID                 user                 nice               system               uptime                 idle                 strv               uptime                 used               maxlat               totlat             numsched
                         100                 1634                    0                 1712            343011435     1277332114762816                    0      638679179771038          16971458021                 4921           5633592426               145722

        - column 1 "VEID": VE id
        - column 2 "user", 3 "nice", 4 "system", 5 "uptime" - corresponding std user/nice/system/uptime values in jiffies to standalone linux /proc/stat. Note, there is no "idle" time here, since it can't be calculated this way.

        the next group comes in cycles units:

        - column 6 "idle" - idle time, 7 "strv" - not used, 8 "uptime" - uptime in cycles, 9 "used" - used time by VE across all CPUs in cycles

        the next group is scheduling latency statistics in cycles:
        - column 10 "maxlat" - max latency in cycles meaning how long VE process has to wait before it actually got CPU time.
        - column 11/12 "totlat/numsched", i.e. divide 11 on 12 to get average scheduling latency.

        # cat /proc/vz/stats
        Version: 2.6
        cycles_per_jiffy: 1860831
        jiffies_per_second: 1000
        """
        with Path('/proc/vz/vestat').open() as vestat_file:
            for line in vestat_file:
                match = self.VESTAT_RE.match(line)
                if match:
                    vm_id = match.group('VEID')
                    if vm_id not in self.vm_ids:
                        new_vm = Vm(ctid=vm_id)
                        self.vm_ids[vm_id] = new_vm
                        self.vm_list.append(new_vm)
                    vm = self.vm_ids[vm_id]
                    vm.cpu_user_j = int(match.group('user'))
                    vm.cpu_nice_j = int(match.group('nice'))
                    vm.cpu_system_j = int(match.group('system'))
                    vm.cpu_uptime_j = int(match.group('uptime_j'))
                    vm.cpu_idle = int(match.group('idle'))
                    vm.cpu_uptime = int(match.group('uptime'))
                    vm.cpu_used = int(match.group('used'))
                    vm.cpu_max_latency = int(match.group('maxlat'))
                    vm.cpu_total_latency = int(match.group('totlat'))
                    vm.cpu_num_scheduled = int(match.group('numsched'))
        with Path('/proc/vz/stats').open() as vzstat_file:
            for line in vzstat_file:
                match = self.VZ_STATS_RE.match(line)
                if match:
                    if match.group('var') == 'cycles_per_jiffy':
                        self.cycles_per_jiffy = int(match.group('val').strip())
                    elif match.group('var') == 'jiffies_per_second':
                        self.jiffies_per_second = int(match.group('val').strip())
        self.cycles_per_second = self.cycles_per_jiffy * self.jiffies_per_second
        self.vz_stat_timestamp = int(time.time())

    def load_fox_conf(self):
        pass


def get_ct_cpu_mask(ctid):
    """
    cpu affinity СѓСЃС‚Р°РЅР°РІР»РёРІР°РµС‚СЃСЏ РґР»СЏ РІСЃРµРіРѕ РєРѕРЅС‚РµР№РЅРµСЂР°.
    РЈ РІСЃРµС… РїСЂРѕС†РµСЃСЃРѕРІ РєРѕРЅС‚РµР№РЅРµСЂР° РІС‹СЃС‚Р°РІР»СЏРµС‚СЃСЏ РµРґРёРЅРЅС‹Р№ Р°С„С„РёРЅРёС‚Рё Рё РёР·РјРµРЅРёС‚СЊ РёР·РЅСѓС‚СЂРё РєРѕРЅС‚РµР№РЅРµСЂР° РµРіРѕ РЅРµР»СЊР·СЏ.
    Р‘СѓРґРµРј Р±СЂР°С‚СЊ РїСЂРѕС†РµСЃСЃС‹ РєРѕРЅС‚РµР№РЅРµСЂР° Рё Сѓ РїРµСЂРІРѕРіРѕ РїРѕРїР°РІС€РµРіРѕСЃСЏ РїРѕР»СѓС‡Р°РµРј Р°С„С„РёРЅРёС‚Рё.
    """
    def get_pid_cpus_mask():
        with Path(f'/proc/{pid}/status').open() as status_file:
            for st_line in status_file:
                if st_line.startswith('Cpus_allowed:'):
                    return int(st_line[len('Cpus_allowed:'):].strip(), 16)
    with Path(f'/sys/fs/cgroup/pids/machine.slice/{ctid}/tasks').open() as tasks_file:
        for line in tasks_file:
            try:
                pid = int(line.strip())
            except ValueError:
                continue
            try:
                cpus_mask = get_pid_cpus_mask()
                if cpus_mask:
                    return cpus_mask
            except OSError:
                continue


def mask_to_list(mask):
    result = []
    n = 0
    while mask:
        if mask &amp; (2**n):
            result.append(n)
            mask -= 2**n
        n += 1
    return result


def cpu_stat_recalc(curr, prev):
    # TODO СЃС‡РёС‚Р°С‚СЊ СЃС‚Р°С‚РёСЃС‚РёРєСѓ РґР°Р¶Рµ РµСЃР»Рё РєРѕРЅС‚РµР№РЅРµСЂ РЅРµ Р·Р°РїСѓС‰РµРЅ
    for curr_vm in curr.vm_list:  # type: Vm
        prev_vm = prev.vm_ids.get(curr_vm.ctid)  # type: Vm
        curr_vm.recalc_cpu_stats(prev_vm, curr.cycles_per_second)


def print_cpu_stats(coll, vm=None, detail=True):
    def print_vm():
        if detail:
            print(f'{vm} .. analyze')
            print(f'vm period {vm.cpu_uptime_delta/cps:.1f} sec')
            print(f'vzlist cpus {vm.cpus}, mask {vm.cpus_mask}, list {vm.cpus_list}')
            print(f'vzlist uptime {vm.uptime}')
            print(f'vestat uptime {vm.cpu_uptime//cps} sec')
            print(f'vestat used {vm.cpu_used_delta/cps:.2f} sec, {vm.cpu_used_delta/vm.cpu_uptime_delta*100:.2f}%.')
            print(f'vestat idle {vm.cpu_idle_delta/cps:.2f} sec, {vm.cpu_idle_delta/vm.cpu_uptime_delta*100:.2f}%.')
            # print(f'vestat latency {lat_delta/cps:.2f} sec, {lat_delta/uptime_delta*100:.2f}%, {lat_delta} cycles.')
        else:
            print(
                f'{vm.name or vm.ctid:30}:'
                f' cpus {vm.cpus},'
                f' used {vm.cpu_used_delta/vm.cpu_uptime_delta*100:.2f}%'
                f' (1m/5m/1h = {vm.cpu_avg_1min:.2f}/{vm.cpu_avg_5min:.2f}/{vm.cpu_avg_1h:.2f}),'
                f' idle {vm.cpu_idle_delta/vm.cpu_uptime_delta*100:.2f}%,'
                # f' lat {lat_delta/uptime_delta*100:.2f}%,'
                f' mask {vm.cpus_mask}, list: {vm.cpus_list}'
            )

    cps = coll.cycles_per_second  # shortcut
    if vm:
        print_vm()
    else:
        for vm in coll.vm_list:
            print_vm()


def curses_monitor_wrapper(*args, **kwargs):
    import curses
    return curses.wrapper(curses_monitor, *args, **kwargs)


def human_duration(secs):
    secs = int(secs)
    return f'{secs // (3600 * 24)}d:{(secs // 3600) % 24}h:{(secs // 60) % 60:02}m:{secs%60:02}s'


def curses_monitor(stdscr, period=1, **_kwargs):
    def draw_summary():
        bar_len = 20
        cpu_used = 0
        for vm in curr_vms.vm_list:
            if vm.cpu_used_ratio:
                cpu_used += vm.cpu_used_ratio
        cpu_used_len = round(bar_len*cpu_used/curr_vms.node_cpus)
        cpu_bar = '#'*cpu_used_len + '.'*(bar_len - cpu_used_len)
        if cpu_used/curr_vms.node_cpus &lt; 0.4:
            cpu_attr = green_pair
        elif cpu_used/curr_vms.node_cpus &lt; 0.7:
            cpu_attr = yellow_pair
        else:
            cpu_attr = red_pair

        mem_used = curr_vms.mem_total-curr_vms.mem_available
        mem_used_len = round(bar_len*mem_used/curr_vms.mem_total)
        mem_cache_len = round(bar_len*(curr_vms.mem_buffers+curr_vms.mem_cached+curr_vms.mem_slab)/curr_vms.mem_total)
        mem_bar = '*'*mem_used_len + '+'*mem_cache_len + '.'*(bar_len-mem_used_len-mem_cache_len)
        if mem_used/curr_vms.mem_total &lt; 0.5:
            mem_attr = green_pair
        elif mem_used/curr_vms.mem_total &lt; 0.8:
            mem_attr = yellow_pair
        else:
            mem_attr = red_pair

        swap_used = curr_vms.swap_total-curr_vms.swap_free
        swap_used_len = round(bar_len*swap_used/curr_vms.swap_total)
        swap_bar = '*'*swap_used_len + '.'*(bar_len-swap_used_len)
        # РџРѕ-С…РѕСЂРѕС€РµРјСѓ РЅР°РґРѕ СЃРјРѕС‚СЂРµС‚СЊ Р°РєС‚РёРІРЅРѕСЃС‚СЊ РїРѕРґРєР°С‡РєРё, Р° РЅРµ Р·Р°РїРѕР»РЅРµРЅРЅРѕСЃС‚СЊ.
        # РќР° РЅРѕРґРµ РјРЅРѕРіРѕ РјР°С€РёРЅ РЅРµ РІСЃРµ Р°РєС‚РёРІРЅС‹Рµ, РјРѕР¶РµС‚ РјРЅРѕРіРѕ РїР°РјСЏС‚Рё РІС‹РіСЂСѓР¶Р°С‚СЊСЃСЏ.
        if swap_used/curr_vms.swap_total &lt; 0.1:
            swap_attr = green_pair
        elif swap_used/curr_vms.swap_total &lt; 0.3:
            swap_attr = yellow_pair
        else:
            swap_attr = red_pair

        vcmmd_used = curr_vms.vcmmd_total-curr_vms.vcmmd_available
        gua_sys_len = round(bar_len*(vcmmd_used-curr_vms.vcmmd_guarantee)/curr_vms.vcmmd_total)
        gua_used_len = round(bar_len*vcmmd_used/curr_vms.vcmmd_total)
        gua_bar = '='*gua_sys_len + '*'*(gua_used_len-gua_sys_len) + '.'*(bar_len-gua_used_len)
        if vcmmd_used/curr_vms.vcmmd_total &lt; 0.5:
            gua_attr = green_pair
        elif vcmmd_used/curr_vms.vcmmd_total &lt; 0.8:
            gua_attr = yellow_pair
        else:
            gua_attr = red_pair

        disk_stat = shutil.disk_usage('/vz/.')
        disk_used_len = round(bar_len*disk_stat.used/disk_stat.total)
        disk_bar = '#'*disk_used_len + '.'*(bar_len-disk_used_len)
        if disk_stat.used/disk_stat.total &lt; 0.6:
            disk_attr = green_pair
        elif disk_stat.used/disk_stat.total &lt; 0.75:
            disk_attr = yellow_pair
        else:
            disk_attr = red_pair

        summary.addnstr(
            0, 0,
            f'{"Ct CPU:":8}[{cpu_bar}] {cpu_used:.02f}/{curr_vms.node_cpus:.02f}',
            cols, cpu_attr)
        summary.addnstr(
            1, 0,
            f'{"Ct Mem:":8}[{mem_bar}] {mem_used//2**20}/{curr_vms.mem_total//2**20}',
            50, mem_attr)
        summary.addnstr(
            1, 50,
            f'{"Ct Gua:":8}[{gua_bar}] {vcmmd_used//2**20}/{curr_vms.vcmmd_total//2**20}',
            cols-50, gua_attr)
        summary.addnstr(
            2, 0,
            f'{"Swap:":8}[{swap_bar}] {swap_used//2**20}/{curr_vms.swap_total//2**20}',
            50, swap_attr)
        summary.addnstr(
            2, 50,
            f'{"Ct Disk:":8}[{disk_bar}] {disk_stat.used//2**30}/{disk_stat.total//2**30}',
            cols-50, disk_attr)
        summary.noutrefresh()

    def draw_header():
        header.addnstr(
            0, 0,
            f'{"ctid":37}|{"name":17}|{"uptime":16}|{"cpus":7}|{"used CPU":11}|{"cpu time":16}|{"affinity":9}|'
            f'{"mem":9}|{"used mem":9}|{"swap":9}|{"used swap":9}|{"disk":9}|{"used disk":9}|',
            cols, curses.A_STANDOUT)
        header.noutrefresh()

    def draw_table():
        table.clear()
        table.resize(max(1, len(curr_vms.vm_list)), cols)
        for idx, vm in enumerate(curr_vms.vm_list):
            if vm.cpu_used_delta is not None:
                cpu_usage_str = f'{vm.cpu_used_delta/vm.cpu_uptime_delta*100:.01f}%'
            else:
                cpu_usage_str = '-'
            affinity = f'{vm.cpus_allowed}/{vm.cpus_mask:x}'
            table.addnstr(
                idx, 0,
                f'{vm.ctid:38}{vm.name[-17:] if vm.name else "-":18}{human_duration(vm.uptime):17}'
                f'{vm.cpus:&lt;8}{cpu_usage_str:12}{human_duration(vm.cpu_used/curr_vms.cycles_per_second):17}'
                f'{affinity:10}'
                f'{vm.mem_limit//2**20:&lt;10}{vm.mem_used//2**20:&lt;10}'
                f'{vm.swap_limit//2**20:&lt;10}{vm.swap_used//2**20:&lt;10}'
                f'{vm.disk_limit/2**30:&lt;10.01f}{vm.disk_used/2**30:&lt;10.01f}',
                cols)
        table.noutrefresh(table_pos, 0, 4, 0, lines - 1, cols)

    def next_data():
        nonlocal prev_vms, curr_vms
        prev_vms = curr_vms
        curr_vms = VmCollection()
        curr_vms.node_scan()
        if prev_vms:
            cpu_stat_recalc(curr_vms, prev_vms)

    import curses
    prev_vms = None  # type: VmCollection
    curr_vms = None  # type: VmCollection
    next_data()

    curses.init_pair(1, curses.COLOR_RED, curses.COLOR_BLACK)
    curses.init_pair(2, curses.COLOR_GREEN, curses.COLOR_BLACK)
    curses.init_pair(3, curses.COLOR_YELLOW, curses.COLOR_BLACK)
    green_pair = curses.color_pair(2)
    yellow_pair = curses.color_pair(3)
    red_pair = curses.color_pair(1)
    curses.curs_set(0)
    stdscr.clear()
    lines, cols = stdscr.getmaxyx()
    summary = stdscr.derwin(3, cols, 0, 0)
    # summary.addnstr(1, 0, f'lines {lines}, cols {cols}', cols)
    header = stdscr.derwin(1, cols, 3, 0)
    table = curses.newpad(1, cols)
    table_pos = 0  # scroll_pos
    draw_summary()
    draw_header()
    draw_table()
    curses.doupdate()
    upd_ts = time.monotonic()

    while True:
        # read and process keys
        now = time.monotonic()
        if now - upd_ts &gt;= period:
            upd_ts = now
            next_data()
            draw_summary()
            draw_table()
        curses.doupdate()
        curses.napms(100)


def do_monitor(period=1, **kwargs):
    try:
        curses_monitor_wrapper(period=period, **kwargs)
    except KeyboardInterrupt:
        print('Interrupt')


def cpu_balancer(vms, dry_run=False):
    for vm in vms.vm_list:
        if vm.uptime:
            uptime = int(vm.uptime)
            uptime_str = f'{uptime//(3600*24)}d:{(uptime//3600)%24}h:{(uptime//60)%60}m'
        else:
            uptime_str = '-'
        vm_repr = f'{vm.ctid} {vm.name:12} cpus {vm.cpus:2} uptime {uptime_str:12}'

        if vm.cpu_used_ratio is None:
            logging.info(f'{vm_repr} empty cpu stat')
            continue
        target_profile = vm.cpu_profile_idx
        if vm.cpu_profile_idx == 0:
            if (
                    vm.cpu_avg_30min &gt; 4
                    or vm.cpu_avg_2h &gt; 2
                    or (vm.cpus &lt;= 2 and vm.cpu_avg_1d &gt; 1.5)
            ):
                target_profile = 1
        elif vm.cpu_profile_idx == 1:
            if vm.cpus &gt; 2 and vm.cpu_avg_2h &lt; 1.5:
                target_profile = 0
            elif vm.cpus &lt;= 2 and vm.cpu_avg_1d &lt; 1.3:
                target_profile = 0

        if target_profile is None:
            logging.info(f'{vm_repr} unknown profile: {vm.cpus_list} ... skip.')
        elif vm.cpu_profile_idx != target_profile:
            logging.info(f'{vm_repr} set profile {vms.cpu_profiles[target_profile]}'
                         f' (30m={vm.cpu_avg_30min:.2f} 2h={vm.cpu_avg_2h:.2f} 1d={vm.cpu_avg_1d:.2f})')
            if not dry_run:
                vzctl_ret = subprocess.run(
                    [
                        '/usr/sbin/vzctl', 'set', vm.ctid,
                        f'--cpumask={",".join(map(str,vms.cpu_profiles[target_profile]))}'],
                    stdout=subprocess.PIPE, stderr=subprocess.PIPE)
                if vzctl_ret.returncode:
                    logging.error(
                        'set profile error, cmd %s, ret=%s, stdout:\n%s\n\nstd err:\n%s',
                        vzctl_ret.args, vzctl_ret.returncode, vzctl_ret.stdout, vzctl_ret.stderr)
        else:
            logging.info(f'{vm_repr} cur profile {vms.cpu_profiles[target_profile]}'
                         f' 30m={vm.cpu_avg_30min:.2f} 2h={vm.cpu_avg_2h:.2f} 1d={vm.cpu_avg_1d:.2f})')


def do_balancer(dry_run=False, **_kwargs):
    prev_vms = VmCollection.load()
    curr_vms = VmCollection()
    curr_vms.node_scan()
    # print(curr_vms.vm_list)
    cpu_stat_recalc(curr_vms, prev_vms)
    cpu_balancer(curr_vms, dry_run=dry_run)
    curr_vms.save()


def main(args=None):
    if args is None:
        args = sys.argv
    logging.info('Start %s', args)
    arg_parser = argparse.ArgumentParser(prog=args and args[0])
    arg_parser.set_defaults(action=do_monitor)
    arg_parser.add_argument('--dry-run', '-n', action='store_true', help='РќРёС‡РµРіРѕ РЅРµ РјРµРЅСЏС‚СЊ')
    sub_parsers = arg_parser.add_subparsers(dest='command')

    monitor_parser = sub_parsers.add_parser('monitor', help='РѕРЅ-Р»Р°Р№РЅ РјРѕРЅРёС‚РѕСЂРёРЅРі РЅРѕРґС‹ Рё РєРѕРЅС‚РµР№РЅРµСЂРѕРІ, РєР°Рє vztop')
    monitor_parser.add_argument('--csv', action='store_true', help='Р’С‹РІРѕРґ РІ CSV')
    monitor_parser.add_argument('--json', action='store_true', help='Р’С‹РІРѕРґ РІ JSON', dest='json_')
    monitor_parser.add_argument('--period', '-p', type=float, help='РџРµСЂРёРѕРґ РѕР±РЅРѕРІР»РµРЅРёСЏ РІ СЃРµРєСѓРЅРґР°С…')
    monitor_parser.set_defaults(action=do_monitor)

    balancer_parser = sub_parsers.add_parser('balancer', help='РР·РјРµРЅРµРЅРёРµ РїСЂРѕС„РёР»РµР№ РєРѕРЅС‚РµР№РЅРµСЂРѕРІ РІ Р·Р°РІРёСЃРёРјРѕСЃС‚Рё РѕС‚ РЅР°РіСЂСѓР·РєРё')
    balancer_parser.set_defaults(action=do_balancer)

    params = arg_parser.parse_args(args[1:])
    params.action(**vars(params))
    logging.info('Finish %s', args)
    return 0


if __name__ == '__main__':
    logging.basicConfig(level=logging.INFO, format='%(asctime)s %(process)d %(name)s %(levelname)s %(message)s')
    main()
</pre></body></html>