提交 23a19983fcdc154690de32ac956f40c08bf2d0b3

作者 qianyingz
1 个父辈 8694e401

监控数据汇总

  1 +from datetime import datetime, timedelta
  2 +
  3 +from sqlalchemy.sql.functions import func
  4 +from .models import MonitorLog, db
  5 +from sqlalchemy import and_
  6 +from app.util.component.ApiTemplate import ApiTemplate
  7 +
  8 +
  9 +class Api(ApiTemplate):
  10 + api_name = "统计指标"
  11 +
  12 + def process(self):
  13 +
  14 + # 返回结果
  15 + res = {}
  16 + res["data"] = {}
  17 + logs = []
  18 + try:
  19 + server = self.para.get("server") # server
  20 + # metrics_type = self.para.get("metrics_type")
  21 + grain = self.para.get("grain")
  22 + count = int(self.para.get("count"))
  23 + start_time = self.para.get("start")
  24 + to_time = self.para.get("to")
  25 +
  26 + cur_now = datetime.now()
  27 + start_datetime = datetime.strptime(start_time, "%Y-%m-%d %H:%M:%S")
  28 + end_datetime = datetime.strptime(to_time, "%Y-%m-%d %H:%M:%S")
  29 +
  30 + if grain == "day":
  31 + # 粒度是一天
  32 + format = "%Y-%m-%d"
  33 + logs = db.session.query(MonitorLog.date_stamp.label("key"), func.avg(MonitorLog.cpu_usage).label("cpu_usage"),
  34 + func.avg(MonitorLog.total_mem).label(
  35 + "total_mem"),
  36 + func.avg(MonitorLog.available_mem).label(
  37 + "available_mem"),
  38 + func.avg(MonitorLog.used_mem).label(
  39 + "used_mem"),
  40 + func.avg(MonitorLog.disk).label(
  41 + "disk"),
  42 + func.avg(MonitorLog.disk_usage).label(
  43 + "disk_usage"),
  44 + func.avg(MonitorLog.net_recv).label(
  45 + "net_recv"),
  46 + func.avg(MonitorLog.net_send).label("net_send")).filter(and_(
  47 + MonitorLog.date_stamp <= end_datetime.strftime(
  48 + format),
  49 + MonitorLog.date_stamp > start_datetime.strftime(
  50 + format),
  51 + MonitorLog.server == server)).group_by(MonitorLog.date_stamp).order_by(MonitorLog.date_stamp.asc()).all()
  52 +
  53 + keys = []
  54 + keys_map = {}
  55 + while start_datetime <= end_datetime:
  56 + keys.append(self.get_monitor_log(
  57 + start_datetime.strftime('%Y-%m-%d')))
  58 + keys_map[start_datetime.strftime('%Y-%m-%d')] = len(keys)-1
  59 + start_datetime += timedelta(days=1)
  60 +
  61 + res["data"] = self.get_result(logs, keys, keys_map)
  62 + elif grain == "minu_five":
  63 + # 粒度是5分钟
  64 + cur_minu = int(end_datetime.strftime("%M")) % 5
  65 + logs = db.session.query(MonitorLog.hour_stamp.label("hour_stamp"),
  66 + MonitorLog.f_minu_stamp.label(
  67 + "minu_stamp"),
  68 + func.avg(MonitorLog.cpu_usage).label(
  69 + "cpu_usage"),
  70 + func.avg(MonitorLog.total_mem).label(
  71 + "total_mem"),
  72 + func.avg(MonitorLog.available_mem).label(
  73 + "available_mem"),
  74 + func.avg(MonitorLog.used_mem).label(
  75 + "used_mem"),
  76 + func.avg(MonitorLog.disk).label(
  77 + "disk"),
  78 + func.avg(MonitorLog.disk_usage).label(
  79 + "disk_usage"),
  80 + func.avg(MonitorLog.net_recv).label(
  81 + "net_recv"),
  82 + func.avg(MonitorLog.net_send).label("net_send")).filter(and_(
  83 + MonitorLog.time_stamp <= "{}:00".format(
  84 + end_datetime.strftime("%Y-%m-%d %H:%M")),
  85 + MonitorLog.time_stamp >= "{}:00".format(
  86 + start_datetime.strftime("%Y-%m-%d %H:%M")),
  87 + MonitorLog.server == server)).group_by(MonitorLog.date_stamp, MonitorLog.hour_stamp, MonitorLog.f_minu_stamp).order_by(
  88 + MonitorLog.hour_stamp.asc(), MonitorLog.f_minu_stamp.asc()).all()
  89 +
  90 + keys = []
  91 + keys_map = {}
  92 + while start_datetime <= end_datetime:
  93 + key = start_datetime.strftime('%H:%M')
  94 + keys.append(self.get_monitor_log(key))
  95 + keys_map[key] = len(keys)-1
  96 + start_datetime += timedelta(minutes=5)
  97 +
  98 + logs = list(map(lambda log:
  99 + {"key": "%02d:%02d" % (log.hour_stamp, log.minu_stamp*5+cur_minu),
  100 + "cpu_usage": log.cpu_usage, "total_mem": log.total_mem,
  101 + "available_mem": log.available_mem, "used_mem": log.used_mem, "disk": log.disk, "disk_usage": log.disk_usage,
  102 + "net_recv": log.net_recv, "net_send": log.net_send},
  103 + logs))
  104 +
  105 + res["data"] = self.get_result_from_list(logs, keys, keys_map)
  106 + elif grain == "minu_ten":
  107 + # 粒度是10分钟
  108 + cur_minu = int(end_datetime.strftime("%M")) % 10
  109 + logs = db.session.query(MonitorLog.hour_stamp.label("hour_stamp"),
  110 + MonitorLog.d_minu_stamp.label(
  111 + "minu_stamp"),
  112 + func.avg(MonitorLog.cpu_usage).label(
  113 + "cpu_usage"),
  114 + func.avg(MonitorLog.total_mem).label(
  115 + "total_mem"),
  116 + func.avg(MonitorLog.available_mem).label(
  117 + "available_mem"),
  118 + func.avg(MonitorLog.used_mem).label(
  119 + "used_mem"),
  120 + func.avg(MonitorLog.disk).label(
  121 + "disk"),
  122 + func.avg(MonitorLog.disk_usage).label(
  123 + "disk_usage"),
  124 + func.avg(MonitorLog.net_recv).label(
  125 + "net_recv"),
  126 + func.avg(MonitorLog.net_send).label("net_send")).filter(and_(
  127 + MonitorLog.time_stamp <= "{}:00".format(
  128 + end_datetime.strftime("%Y-%m-%d %H:%M")),
  129 + MonitorLog.time_stamp >= "{}:00".format(
  130 + start_datetime.strftime("%Y-%m-%d %H:%M")),
  131 + MonitorLog.server == server)).group_by(MonitorLog.date_stamp,
  132 + MonitorLog.hour_stamp,
  133 + MonitorLog.d_minu_stamp).order_by(MonitorLog.hour_stamp.asc(), MonitorLog.d_minu_stamp.asc()).all()
  134 + keys = []
  135 + keys_map = {}
  136 + while start_datetime <= end_datetime:
  137 + key = start_datetime.strftime('%H:%M')
  138 + keys.append(self.get_monitor_log(key))
  139 + keys_map[key] = len(keys)-1
  140 + start_datetime += timedelta(minutes=10)
  141 +
  142 + logs = list(map(lambda log:
  143 + {"key": "%02d:%02d" % (log.hour_stamp, log.minu_stamp*10+cur_minu),
  144 + "cpu_usage": log.cpu_usage, "total_mem": log.total_mem,
  145 + "available_mem": log.available_mem, "used_mem": log.used_mem, "disk": log.disk, "disk_usage": log.disk_usage,
  146 + "net_recv": log.net_recv, "net_send": log.net_send},
  147 + logs))
  148 +
  149 + res["data"] = self.get_result_from_list(logs, keys, keys_map)
  150 + elif grain == "hour":
  151 + # 粒度是一小时
  152 + logs = db.session.query(MonitorLog.hour_stamp.label("key"), func.avg(MonitorLog.cpu_usage).label("cpu_usage"),
  153 + func.avg(MonitorLog.total_mem).label(
  154 + "total_mem"),
  155 + func.avg(MonitorLog.available_mem).label(
  156 + "available_mem"),
  157 + func.avg(MonitorLog.used_mem).label(
  158 + "used_mem"),
  159 + func.avg(MonitorLog.disk).label(
  160 + "disk"),
  161 + func.avg(MonitorLog.disk_usage).label(
  162 + "disk_usage"),
  163 + func.avg(MonitorLog.net_recv).label(
  164 + "net_recv"),
  165 + func.avg(MonitorLog.net_send).label("net_send")).filter(and_(
  166 + MonitorLog.time_stamp <= "{}:59:59".format(
  167 + end_datetime.strftime("%Y-%m-%d %H")),
  168 + MonitorLog.time_stamp >= "{}:59:59".format(
  169 + start_datetime.strftime("%Y-%m-%d %H")),
  170 + MonitorLog.server == server)).group_by(MonitorLog.hour_stamp).order_by(MonitorLog.hour_stamp.asc()).all()
  171 + keys = []
  172 + keys_map = {}
  173 + while start_datetime <= end_datetime:
  174 + key = int(start_datetime.strftime('%H'))
  175 + keys.append(self.get_monitor_log("%02d时" % key))
  176 + keys_map[str(key)] = len(keys)-1
  177 + start_datetime += timedelta(hours=1)
  178 +
  179 + res["data"] = self.get_result(logs, keys, keys_map)
  180 + else:
  181 + # 按分钟统计
  182 + logs = db.session.query(MonitorLog.hour_stamp.label("hour_stamp"), MonitorLog.minu_stamp.label("minu_stamp"), func.avg(MonitorLog.cpu_usage).label("cpu_usage"),
  183 + func.avg(MonitorLog.total_mem).label(
  184 + "total_mem"),
  185 + func.avg(MonitorLog.available_mem).label(
  186 + "available_mem"),
  187 + func.avg(MonitorLog.used_mem).label("used_mem"),
  188 + func.avg(MonitorLog.disk).label(
  189 + "disk"),
  190 + func.avg(MonitorLog.disk_usage).label(
  191 + "disk_usage"),
  192 + func.avg(MonitorLog.net_recv).label(
  193 + "net_recv"),
  194 + func.avg(MonitorLog.net_send).label("net_send")).filter(and_(
  195 + MonitorLog.time_stamp <= end_datetime.strftime("%Y-%m-%d %H:%M:%S"),
  196 + MonitorLog.time_stamp >= start_datetime.strftime("%Y-%m-%d %H:%M:%S"),
  197 + MonitorLog.server == server)).group_by(MonitorLog.hour_stamp, MonitorLog.minu_stamp).order_by(
  198 + MonitorLog.hour_stamp.asc(), MonitorLog.minu_stamp.asc()).all()
  199 + keys = []
  200 + keys_map = {}
  201 + while start_datetime <= end_datetime:
  202 + key = start_datetime.strftime('%H:%M')
  203 + keys.append(self.get_monitor_log(key))
  204 + keys_map[key] = len(keys)-1
  205 + start_datetime += timedelta(minutes=1)
  206 +
  207 + logs = list(map(lambda log:
  208 + {"key": "%02d:%02d" % (log.hour_stamp, log.minu_stamp),
  209 + "cpu_usage": log.cpu_usage, "total_mem": log.total_mem,
  210 + "available_mem": log.available_mem, "used_mem": log.used_mem, "disk": log.disk, "disk_usage": log.disk_usage,
  211 + "net_recv": log.net_recv, "net_send": log.net_send},
  212 + logs))
  213 +
  214 + res["data"] = self.get_result_from_list(logs, keys, keys_map)
  215 + res["result"] = True
  216 + except Exception as e:
  217 + raise e
  218 + return res
  219 +
  220 + api_doc = {
  221 + "tags": ["监控接口"],
  222 + "parameters": [
  223 + {"name": "server",
  224 + "in": "query",
  225 + "type": "string",
  226 + "description": "服务器地址"},
  227 + {"name": "grain",
  228 + "in": "query",
  229 + "type": "string",
  230 + "description": "指标粒度:minu:分钟,minu_five:5分钟,minu_ten:10分钟,hour:1小时,day:每天"},
  231 + {"name": "count",
  232 + "in": "query",
  233 + "type": "string",
  234 + "description": "个数"},
  235 + {"name": "to",
  236 + "in": "query",
  237 + "type": "string",
  238 + "description": "查询终止时间"},
  239 + {"name": "start",
  240 + "in": "query",
  241 + "type": "string",
  242 + "description": "查询起始时间"}
  243 + ],
  244 + "responses": {
  245 + 200: {
  246 + "schema": {
  247 + "properties": {
  248 + }
  249 + }
  250 + }
  251 + }
  252 + }
  253 +
  254 + def get_monitor_log(self, key):
  255 + return {"key": key,
  256 + "cpu_usage": None, "total_mem": None,
  257 + "available_mem": None, "used_mem": None, "disk": None, "disk_usage": None,
  258 + "net_recv": None, "net_send": None}
  259 +
  260 + def get_result(self, logs, keys, keys_map):
  261 + keys_map_key=keys_map.keys()
  262 + for log in logs:
  263 + if str(log.key) in keys_map_key:
  264 + tmp = keys[keys_map[str(log.key)]]
  265 + if tmp != None:
  266 + tmp['cpu_usage'] = log.cpu_usage
  267 + tmp['total_mem'] = log.total_mem
  268 + tmp['available_mem'] = log.available_mem
  269 + tmp['used_mem'] = log.used_mem
  270 + tmp['disk'] = log.disk
  271 + tmp['disk_usage'] = log.disk_usage
  272 + tmp['net_recv'] = log.net_recv
  273 + tmp['net_send'] = log.net_send
  274 + return keys
  275 +
  276 + def get_result_from_list(self, logs, keys, keys_map):
  277 + keys_map_key=keys_map.keys()
  278 + for log in logs:
  279 + if str(log["key"]) in keys_map_key:
  280 + tmp = keys[keys_map[str(log["key"])]]
  281 + if tmp != None:
  282 + tmp['cpu_usage'] = log["cpu_usage"]
  283 + tmp['total_mem'] = log["total_mem"]
  284 + tmp['available_mem'] = log["available_mem"]
  285 + tmp['used_mem'] = log["used_mem"]
  286 + tmp['disk'] = log["disk"]
  287 + tmp['disk_usage'] = log["disk_usage"]
  288 + tmp['net_recv'] = log["net_recv"]
  289 + tmp['net_send'] = log["net_send"]
  290 + return keys
... ...
1 1 from datetime import datetime, timedelta
2   -
  2 +from operator import or_
  3 +from sqlalchemy.sql.expression import intersect_all
  4 +import re
3 5 from sqlalchemy.sql.functions import func
4   -from .models import MonitorLog, db
  6 +from .models import db, MonitorInfo
5 7 from sqlalchemy import and_
6 8 from app.util.component.ApiTemplate import ApiTemplate
  9 +import math
  10 +from functools import reduce
7 11
8 12
9 13 class Api(ApiTemplate):
... ... @@ -13,205 +17,62 @@ class Api(ApiTemplate):
13 17
14 18 # 返回结果
15 19 res = {}
16   - res["data"] = {}
  20 + res["data"] = []
17 21 logs = []
18 22 try:
19 23 server = self.para.get("server") # server
20   - # metrics_type = self.para.get("metrics_type")
21   - grain = self.para.get("grain")
22   - count = int(self.para.get("count"))
  24 + metrics_type = self.para.get("metrics_type")
  25 + interval = self.para.get("interval")
23 26 start_time = self.para.get("start")
24 27 to_time = self.para.get("to")
25   -
26   - cur_now = datetime.now()
27 28 start_datetime = datetime.strptime(start_time, "%Y-%m-%d %H:%M:%S")
28 29 end_datetime = datetime.strptime(to_time, "%Y-%m-%d %H:%M:%S")
29   -
30   - if grain == "day":
31   - # 粒度是一天
32   - format = "%Y-%m-%d"
33   - logs = db.session.query(MonitorLog.date_stamp.label("key"), func.avg(MonitorLog.cpu_usage).label("cpu_usage"),
34   - func.avg(MonitorLog.total_mem).label(
35   - "total_mem"),
36   - func.avg(MonitorLog.available_mem).label(
37   - "available_mem"),
38   - func.avg(MonitorLog.used_mem).label(
39   - "used_mem"),
40   - func.avg(MonitorLog.disk).label(
41   - "disk"),
42   - func.avg(MonitorLog.disk_usage).label(
43   - "disk_usage"),
44   - func.avg(MonitorLog.net_recv).label(
45   - "net_recv"),
46   - func.avg(MonitorLog.net_send).label("net_send")).filter(and_(
47   - MonitorLog.date_stamp <= end_datetime.strftime(
48   - format),
49   - MonitorLog.date_stamp > start_datetime.strftime(
50   - format),
51   - MonitorLog.server == server)).group_by(MonitorLog.date_stamp).order_by(MonitorLog.date_stamp.asc()).all()
52   -
53   - keys = []
54   - keys_map = {}
55   - while start_datetime <= end_datetime:
56   - keys.append(self.get_monitor_log(
57   - start_datetime.strftime('%Y-%m-%d')))
58   - keys_map[start_datetime.strftime('%Y-%m-%d')] = len(keys)-1
59   - start_datetime += timedelta(days=1)
60   -
61   - res["data"] = self.get_result(logs, keys, keys_map)
62   - elif grain == "minu_five":
63   - # 粒度是5分钟
64   - cur_minu = int(end_datetime.strftime("%M")) % 5
65   - logs = db.session.query(MonitorLog.hour_stamp.label("hour_stamp"),
66   - MonitorLog.f_minu_stamp.label(
67   - "minu_stamp"),
68   - func.avg(MonitorLog.cpu_usage).label(
69   - "cpu_usage"),
70   - func.avg(MonitorLog.total_mem).label(
71   - "total_mem"),
72   - func.avg(MonitorLog.available_mem).label(
73   - "available_mem"),
74   - func.avg(MonitorLog.used_mem).label(
75   - "used_mem"),
76   - func.avg(MonitorLog.disk).label(
77   - "disk"),
78   - func.avg(MonitorLog.disk_usage).label(
79   - "disk_usage"),
80   - func.avg(MonitorLog.net_recv).label(
81   - "net_recv"),
82   - func.avg(MonitorLog.net_send).label("net_send")).filter(and_(
83   - MonitorLog.time_stamp <= "{}:00".format(
84   - end_datetime.strftime("%Y-%m-%d %H:%M")),
85   - MonitorLog.time_stamp >= "{}:00".format(
86   - start_datetime.strftime("%Y-%m-%d %H:%M")),
87   - MonitorLog.server == server)).group_by(MonitorLog.date_stamp, MonitorLog.hour_stamp, MonitorLog.f_minu_stamp).order_by(
88   - MonitorLog.hour_stamp.asc(), MonitorLog.f_minu_stamp.asc()).all()
89   -
90   - keys = []
91   - keys_map = {}
92   - while start_datetime <= end_datetime:
93   - key = start_datetime.strftime('%H:%M')
94   - keys.append(self.get_monitor_log(key))
95   - keys_map[key] = len(keys)-1
96   - start_datetime += timedelta(minutes=5)
97   -
98   - logs = list(map(lambda log:
99   - {"key": "%02d:%02d" % (log.hour_stamp, log.minu_stamp*5+cur_minu),
100   - "cpu_usage": log.cpu_usage, "total_mem": log.total_mem,
101   - "available_mem": log.available_mem, "used_mem": log.used_mem, "disk": log.disk, "disk_usage": log.disk_usage,
102   - "net_recv": log.net_recv, "net_send": log.net_send},
103   - logs))
104   -
105   - res["data"] = self.get_result_from_list(logs, keys, keys_map)
106   - elif grain == "minu_ten":
107   - # 粒度是10分钟
108   - cur_minu = int(end_datetime.strftime("%M")) % 10
109   - logs = db.session.query(MonitorLog.hour_stamp.label("hour_stamp"),
110   - MonitorLog.d_minu_stamp.label(
111   - "minu_stamp"),
112   - func.avg(MonitorLog.cpu_usage).label(
113   - "cpu_usage"),
114   - func.avg(MonitorLog.total_mem).label(
115   - "total_mem"),
116   - func.avg(MonitorLog.available_mem).label(
117   - "available_mem"),
118   - func.avg(MonitorLog.used_mem).label(
119   - "used_mem"),
120   - func.avg(MonitorLog.disk).label(
121   - "disk"),
122   - func.avg(MonitorLog.disk_usage).label(
123   - "disk_usage"),
124   - func.avg(MonitorLog.net_recv).label(
125   - "net_recv"),
126   - func.avg(MonitorLog.net_send).label("net_send")).filter(and_(
127   - MonitorLog.time_stamp <= "{}:00".format(
128   - end_datetime.strftime("%Y-%m-%d %H:%M")),
129   - MonitorLog.time_stamp >= "{}:00".format(
130   - start_datetime.strftime("%Y-%m-%d %H:%M")),
131   - MonitorLog.server == server)).group_by(MonitorLog.date_stamp,
132   - MonitorLog.hour_stamp,
133   - MonitorLog.d_minu_stamp).order_by(MonitorLog.hour_stamp.asc(), MonitorLog.d_minu_stamp.asc()).all()
134   - keys = []
135   - keys_map = {}
136   - while start_datetime <= end_datetime:
137   - key = start_datetime.strftime('%H:%M')
138   - keys.append(self.get_monitor_log(key))
139   - keys_map[key] = len(keys)-1
140   - start_datetime += timedelta(minutes=10)
141   -
142   - logs = list(map(lambda log:
143   - {"key": "%02d:%02d" % (log.hour_stamp, log.minu_stamp*10+cur_minu),
144   - "cpu_usage": log.cpu_usage, "total_mem": log.total_mem,
145   - "available_mem": log.available_mem, "used_mem": log.used_mem, "disk": log.disk, "disk_usage": log.disk_usage,
146   - "net_recv": log.net_recv, "net_send": log.net_send},
147   - logs))
148   -
149   - res["data"] = self.get_result_from_list(logs, keys, keys_map)
150   - elif grain == "hour":
151   - # 粒度是一小时
152   - logs = db.session.query(MonitorLog.hour_stamp.label("key"), func.avg(MonitorLog.cpu_usage).label("cpu_usage"),
153   - func.avg(MonitorLog.total_mem).label(
154   - "total_mem"),
155   - func.avg(MonitorLog.available_mem).label(
156   - "available_mem"),
157   - func.avg(MonitorLog.used_mem).label(
158   - "used_mem"),
159   - func.avg(MonitorLog.disk).label(
160   - "disk"),
161   - func.avg(MonitorLog.disk_usage).label(
162   - "disk_usage"),
163   - func.avg(MonitorLog.net_recv).label(
164   - "net_recv"),
165   - func.avg(MonitorLog.net_send).label("net_send")).filter(and_(
166   - MonitorLog.time_stamp <= "{}:59:59".format(
167   - end_datetime.strftime("%Y-%m-%d %H")),
168   - MonitorLog.time_stamp >= "{}:59:59".format(
169   - start_datetime.strftime("%Y-%m-%d %H")),
170   - MonitorLog.server == server)).group_by(MonitorLog.hour_stamp).order_by(MonitorLog.hour_stamp.asc()).all()
171   - keys = []
172   - keys_map = {}
173   - while start_datetime <= end_datetime:
174   - key = int(start_datetime.strftime('%H'))
175   - keys.append(self.get_monitor_log("%02d时" % key))
176   - keys_map[str(key)] = len(keys)-1
177   - start_datetime += timedelta(hours=1)
178   -
179   - res["data"] = self.get_result(logs, keys, keys_map)
  30 + format = "%Y-%m-%d %H:%M:%S"
  31 +
  32 + interval_size = int(re.findall("\d+", interval)[0])
  33 + interval_unit = re.findall("\D+", interval)[0]
  34 +
  35 + if interval_size == 1 and interval_unit == 'm':
  36 + logs = db.session.query(MonitorInfo.time_stamp.label('key'), MonitorInfo.value).filter(
  37 + and_(MonitorInfo.time_stamp <= end_datetime.strftime(format),
  38 + MonitorInfo.time_stamp > start_datetime.strftime(
  39 + format),
  40 + MonitorInfo.server == server,
  41 + MonitorInfo.metrics == metrics_type)).order_by(MonitorInfo.time_stamp).group_by(
  42 + MonitorInfo.time_stamp, MonitorInfo.value)
  43 + datas = list(map(lambda log:
  44 + {"key": datetime.strftime(log.key, format),
  45 + "value": log.value},
  46 + logs))
  47 + datas = reduce(lambda y, x: y if (x['key'] in [i['key'] for i in y]) else (
  48 + lambda z, u: (z.append(u), z))(y, x)[1], datas, [])
  49 + for data in datas:
  50 + res['data'].append([data['key'], data['value']])
180 51 else:
181   - # 按分钟统计
182   - logs = db.session.query(MonitorLog.hour_stamp.label("hour_stamp"), MonitorLog.minu_stamp.label("minu_stamp"), func.avg(MonitorLog.cpu_usage).label("cpu_usage"),
183   - func.avg(MonitorLog.total_mem).label(
184   - "total_mem"),
185   - func.avg(MonitorLog.available_mem).label(
186   - "available_mem"),
187   - func.avg(MonitorLog.used_mem).label("used_mem"),
188   - func.avg(MonitorLog.disk).label(
189   - "disk"),
190   - func.avg(MonitorLog.disk_usage).label(
191   - "disk_usage"),
192   - func.avg(MonitorLog.net_recv).label(
193   - "net_recv"),
194   - func.avg(MonitorLog.net_send).label("net_send")).filter(and_(
195   - MonitorLog.time_stamp <= end_datetime.strftime("%Y-%m-%d %H:%M:%S"),
196   - MonitorLog.time_stamp >= start_datetime.strftime("%Y-%m-%d %H:%M:%S"),
197   - MonitorLog.server == server)).group_by(MonitorLog.hour_stamp, MonitorLog.minu_stamp).order_by(
198   - MonitorLog.hour_stamp.asc(), MonitorLog.minu_stamp.asc()).all()
199   - keys = []
200   - keys_map = {}
201   - while start_datetime <= end_datetime:
202   - key = start_datetime.strftime('%H:%M')
203   - keys.append(self.get_monitor_log(key))
204   - keys_map[key] = len(keys)-1
205   - start_datetime += timedelta(minutes=1)
206   -
207   - logs = list(map(lambda log:
208   - {"key": "%02d:%02d" % (log.hour_stamp, log.minu_stamp),
209   - "cpu_usage": log.cpu_usage, "total_mem": log.total_mem,
210   - "available_mem": log.available_mem, "used_mem": log.used_mem, "disk": log.disk, "disk_usage": log.disk_usage,
211   - "net_recv": log.net_recv, "net_send": log.net_send},
212   - logs))
213   -
214   - res["data"] = self.get_result_from_list(logs, keys, keys_map)
  52 + interval_start_datetime = start_datetime
  53 + interval_end_datatime = self.get_end_interval(
  54 + interval_start_datetime, interval_unit, interval_size)
  55 + res_format = "%Y-%m-%d %H:%M:%S"
  56 +
  57 + while interval_end_datatime <= end_datetime:
  58 + logs = db.session.query(MonitorInfo.time_stamp.label('key'), MonitorInfo.value).filter(
  59 + and_(MonitorInfo.time_stamp <= interval_end_datatime.strftime(format),
  60 + MonitorInfo.time_stamp > interval_start_datetime.strftime(
  61 + format), MonitorInfo.server == server, MonitorInfo.metrics == metrics_type)).order_by(MonitorInfo.time_stamp).group_by(
  62 + MonitorInfo.time_stamp, MonitorInfo.value).all()
  63 + tmp_data = list(map(lambda log:
  64 + {"key": datetime.strftime(log.key, res_format),
  65 + "value": log.value},
  66 + logs))
  67 + datas = self.get_sample_data(tmp_data)
  68 + datas = reduce(lambda y, x: y if (x['key'] in [i['key'] for i in y]) else (
  69 + lambda z, u: (z.append(u), z))(y, x)[1], datas, [])
  70 + for data in datas:
  71 + res['data'].append([data['key'], data['value']])
  72 +
  73 + interval_start_datetime = interval_end_datatime
  74 + interval_end_datatime = self.get_end_interval(
  75 + interval_start_datetime, interval_unit, interval_size)
215 76 res["result"] = True
216 77 except Exception as e:
217 78 raise e
... ... @@ -224,14 +85,10 @@ class Api(ApiTemplate):
224 85 "in": "query",
225 86 "type": "string",
226 87 "description": "服务器地址"},
227   - {"name": "grain",
228   - "in": "query",
229   - "type": "string",
230   - "description": "指标粒度:minu:分钟,minu_five:5分钟,minu_ten:10分钟,hour:1小时,day:每天"},
231   - {"name": "count",
  88 + {"name": "interval",
232 89 "in": "query",
233 90 "type": "string",
234   - "description": "个数"},
  91 + "description": "间隔"},
235 92 {"name": "to",
236 93 "in": "query",
237 94 "type": "string",
... ... @@ -239,7 +96,11 @@ class Api(ApiTemplate):
239 96 {"name": "start",
240 97 "in": "query",
241 98 "type": "string",
242   - "description": "查询起始时间"}
  99 + "description": "查询起始时间"},
  100 + {"name": "metrics_type",
  101 + "in": "query",
  102 + "type": "string",
  103 + "description": "查询指标"}
243 104 ],
244 105 "responses": {
245 106 200: {
... ... @@ -251,40 +112,50 @@ class Api(ApiTemplate):
251 112 }
252 113 }
253 114
254   - def get_monitor_log(self, key):
255   - return {"key": key,
256   - "cpu_usage": None, "total_mem": None,
257   - "available_mem": None, "used_mem": None, "disk": None, "disk_usage": None,
258   - "net_recv": None, "net_send": None}
259   -
260   - def get_result(self, logs, keys, keys_map):
261   - keys_map_key=keys_map.keys()
262   - for log in logs:
263   - if str(log.key) in keys_map_key:
264   - tmp = keys[keys_map[str(log.key)]]
265   - if tmp != None:
266   - tmp['cpu_usage'] = log.cpu_usage
267   - tmp['total_mem'] = log.total_mem
268   - tmp['available_mem'] = log.available_mem
269   - tmp['used_mem'] = log.used_mem
270   - tmp['disk'] = log.disk
271   - tmp['disk_usage'] = log.disk_usage
272   - tmp['net_recv'] = log.net_recv
273   - tmp['net_send'] = log.net_send
274   - return keys
  115 + def get_end_interval(self, start, unit, size):
  116 + if unit == 'm':
  117 + return start+timedelta(minutes=size)
  118 + elif unit == 'h':
  119 + return start+timedelta(hours=size)
  120 + elif unit == 'd':
  121 + return start+timedelta(days=size)
  122 + else:
  123 + return None
  124 +
  125 + def get_sample_data(self, orginal):
  126 + res = []
  127 + size = len(orginal)
  128 + orginal_stamp = {'head': 1, 'tail': size}
  129 + if size > 1:
  130 + stamp = {'P0': 1,
  131 + 'P50': math.floor(0.5*size),
  132 + # 'P90': math.floor(0.9*size),
  133 + # 'P95': math.floor(0.95*size),
  134 + # 'P99': math.floor(0.99*size),
  135 + 'P100': size}
  136 + elif size == 1:
  137 + stamp = {'P0': 1,
  138 + 'P50': size,
  139 + # 'P90': size,
  140 + # 'P95': size,
  141 + # 'P99': size,
  142 + 'P100': size}
  143 + else:
  144 + return res
  145 +
  146 + for key in dict.keys(orginal_stamp):
  147 + cur_data = orginal[orginal_stamp[key]-1]
  148 + info = {'key': cur_data['key'], 'value': cur_data['value']}
  149 + res.append(info)
  150 +
  151 + data = sorted(orginal, key=lambda x: x['value'])
  152 + for key in dict.keys(stamp):
  153 + cur_data = data[stamp[key]-1]
  154 + info = {'key': cur_data['key'], 'value': cur_data['value']}
  155 + res.append(info)
  156 +
  157 + res.sort(key=self.takeKey)
  158 + return res
275 159
276   - def get_result_from_list(self, logs, keys, keys_map):
277   - keys_map_key=keys_map.keys()
278   - for log in logs:
279   - if str(log["key"]) in keys_map_key:
280   - tmp = keys[keys_map[str(log["key"])]]
281   - if tmp != None:
282   - tmp['cpu_usage'] = log["cpu_usage"]
283   - tmp['total_mem'] = log["total_mem"]
284   - tmp['available_mem'] = log["available_mem"]
285   - tmp['used_mem'] = log["used_mem"]
286   - tmp['disk'] = log["disk"]
287   - tmp['disk_usage'] = log["disk_usage"]
288   - tmp['net_recv'] = log["net_recv"]
289   - tmp['net_send'] = log["net_send"]
290   - return keys
  160 + def takeKey(self, elem):
  161 + return elem['key']
... ...
... ... @@ -6,7 +6,7 @@
6 6
7 7 from datetime import datetime
8 8 from time import time
9   -from sqlalchemy import Column, Integer, String, ForeignKey, Text, DateTime, Time, Float, Binary
  9 +from sqlalchemy import Column, Integer, String, ForeignKey, Text, DateTime, Time, Float, Binary,TIMESTAMP
10 10 from sqlalchemy.orm import relationship
11 11 from sqlalchemy.sql.expression import column
12 12 from app.models import db
... ... @@ -44,4 +44,28 @@ class MonitorHost(db.Model):
44 44 user = Column(String(256))
45 45 password = Column(String(256))
46 46 type = Column(String(256))
47   - host_name=Column(String(256))
  47 + host_name = Column(String(256))
  48 +
  49 +
  50 +class MonitorCpu(db.Model):
  51 + '''
  52 + '''
  53 + __tablename__ = "dmap_monitor_cpu"
  54 + guid = Column(String(256), primary_key=True)
  55 + server = Column(String(256))
  56 + date_stamp = Column(Text)
  57 + time_stamp = Column(TIMESTAMP) # 创建时间戳
  58 + stamp = Column(Text) # P0,P50,P90,P95,P99,P100
  59 + value = Column(Float)
  60 +
  61 +class MonitorInfo(db.Model):
  62 + '''
  63 + '''
  64 + __tablename__ = "dmap_monitor_info"
  65 + guid = Column(String(256), primary_key=True)
  66 + metrics=Column(String(256))
  67 + server = Column(String(256))
  68 + date_stamp = Column(Text)
  69 + time_stamp = Column(TIMESTAMP) # 创建时间戳
  70 + stamp = Column(Text) # P0,P50,P90,P95,P99,P100
  71 + value = Column(Float)
\ No newline at end of file
... ...
1 1 # import schedule
2 2 from flask import json
3   -import paramiko
4   -from sqlalchemy.sql.sqltypes import JSON
5   -from .models import MonitorHost, MonitorLog
  3 +from .models import MonitorHost, MonitorInfo
6 4 import datetime
7 5 import math
8 6 import time
... ... @@ -13,6 +11,10 @@ from app.util.component.PGUtil import PGUtil
13 11 import configure
14 12 from app.util.component.StructuredPrint import StructurePrint
15 13 import traceback
  14 +import requests
  15 +
  16 +min_size = 60
  17 +size = 60
16 18
17 19
18 20 def pull_metric():
... ... @@ -22,25 +24,50 @@ def pull_metric():
22 24 configure.SQLALCHEMY_DATABASE_URI)
23 25 sys_ds = PGUtil.open_pg_data_source(
24 26 0, configure.SQLALCHEMY_DATABASE_URI)
25   -
26   - #拉取服务器信息
  27 +
  28 + # 拉取服务器信息
27 29
28 30 hosts = sys_session.query(
29 31 MonitorHost.host)
30 32 for host in hosts:
31   - request_uri="http://{}".format(host.host)
32   -
33   -
34   -
  33 + try:
  34 + host_name = host.host
  35 + request_uri = "http://{}".format(host_name)
  36 + response = requests.request("get", request_uri)
  37 + if response.status_code == 200:
  38 + text = response.text
  39 + data = json.loads(text)
  40 + len_metric = len(data)
  41 + if len_metric > min_size:
  42 + metric_data = data[len_metric-min_size:len_metric]
  43 + else:
  44 + continue
  45 +
  46 + type_list = ["cpu_per", "memory_per", "disk_per",
  47 + "disk_read", "disk_write","disk_read_count",
  48 + "disk_write_count","network_sent","network_recv"]
  49 + for type in type_list:
  50 + sample_data = get_sample_data(
  51 + metric_data, type, host_name)
  52 + sys_session.add_all(sample_data)
  53 + sys_session.commit()
  54 +
  55 + except Exception as e:
  56 + StructurePrint().print(e.__str__()+":" + traceback.format_exc(), "error")
35 57 # 获取数据并汇聚为1min的数据入库
36 58 # 结束
37   - pass
38 59 except Exception as e:
39 60 StructurePrint().print(e.__str__()+":" + traceback.format_exc(), "error")
40   - pass
  61 + finally:
  62 + sys_session.rollback()
  63 + if sys_session:
  64 + sys_session.close()
  65 + if sys_ds:
  66 + sys_ds.Destroy()
  67 +
41 68
42 69 def start_schedule():
43   - # # 2分钟巡检一次
  70 + # # 1分钟巡检一次
44 71 try:
45 72 StructurePrint().print("start_schedule")
46 73 schedule.every(1).minutes.do(pull_metric)
... ... @@ -52,3 +79,54 @@ def start_schedule():
52 79 # # Stop the background thread
53 80 # time.sleep(10)
54 81 # stop_run_continuously.set()
  82 +
  83 +
  84 +def get_sample_data(orginal, name, host):
  85 + res = []
  86 + size = len(orginal)
  87 + orginal_stamp = {'head': 1, 'tail': size}
  88 + if size > 1:
  89 + stamp = {'P0': 1,
  90 + 'P50': math.floor(0.5*size),
  91 + # 'P90': math.floor(0.9*size),
  92 + # 'P95': math.floor(0.95*size),
  93 + # 'P99': math.floor(0.99*size),
  94 + 'P100': size}
  95 + elif size == 1:
  96 + stamp = {'P0': 1,
  97 + 'P50': size,
  98 + # 'P90': size,
  99 + # 'P95': size,
  100 + # 'P99': size,
  101 + 'P100': size}
  102 + else:
  103 + return res
  104 +
  105 + for key in dict.keys(orginal_stamp):
  106 + cur_data = orginal[orginal_stamp[key]-1]
  107 + info = get_data(key, host, name, cur_data)
  108 + res.append(info)
  109 +
  110 + data = sorted(orginal, key=lambda x: x[name])
  111 + for key in dict.keys(stamp):
  112 + cur_data = data[stamp[key]-1]
  113 + info = get_data(key, host, name, cur_data)
  114 + res.append(info)
  115 +
  116 + return res
  117 +
  118 +
  119 +def get_data(stamp, host, metrics_name, cur_data):
  120 + time_stamp = datetime.datetime.strptime(
  121 + cur_data['timestamp'], "%Y-%m-%d %H:%M:%S")
  122 + date_stamp = time_stamp.strftime("%Y-%m-%d")
  123 + guid = uuid.uuid1().__str__()
  124 + value = cur_data[metrics_name]
  125 + info = MonitorInfo(guid=guid,
  126 + server=host,
  127 + date_stamp=date_stamp,
  128 + time_stamp=time_stamp,
  129 + stamp=stamp,
  130 + value=value,
  131 + metrics=metrics_name)
  132 + return info
... ...
注册登录 后发表评论