提交 80e88bfde60d3b99211049197694143b66d62573

作者 nheweijun
2 个父辈 e0de742a 7b211188

Merge branch 'master' of http://gitlab.ctune.cn/weijunh/DMapManager

merge task
1 1 from datetime import datetime, timedelta
2   -
  2 +from operator import or_
  3 +from sqlalchemy.sql.expression import intersect_all
  4 +import re
3 5 from sqlalchemy.sql.functions import func
4   -from .models import MonitorLog, db
  6 +from .models import db, MonitorInfo
5 7 from sqlalchemy import and_
6 8 from app.util.component.ApiTemplate import ApiTemplate
  9 +import math
  10 +from functools import reduce
7 11
8 12
9 13 class Api(ApiTemplate):
... ... @@ -13,205 +17,62 @@ class Api(ApiTemplate):
13 17
14 18 # 返回结果
15 19 res = {}
16   - res["data"] = {}
  20 + res["data"] = []
17 21 logs = []
18 22 try:
19 23 server = self.para.get("server") # server
20   - # metrics_type = self.para.get("metrics_type")
21   - grain = self.para.get("grain")
22   - count = int(self.para.get("count"))
  24 + metrics_type = self.para.get("metrics_type")
  25 + interval = self.para.get("interval")
23 26 start_time = self.para.get("start")
24 27 to_time = self.para.get("to")
25   -
26   - cur_now = datetime.now()
27 28 start_datetime = datetime.strptime(start_time, "%Y-%m-%d %H:%M:%S")
28 29 end_datetime = datetime.strptime(to_time, "%Y-%m-%d %H:%M:%S")
29   -
30   - if grain == "day":
31   - # 粒度是一天
32   - format = "%Y-%m-%d"
33   - logs = db.session.query(MonitorLog.date_stamp.label("key"), func.avg(MonitorLog.cpu_usage).label("cpu_usage"),
34   - func.avg(MonitorLog.total_mem).label(
35   - "total_mem"),
36   - func.avg(MonitorLog.available_mem).label(
37   - "available_mem"),
38   - func.avg(MonitorLog.used_mem).label(
39   - "used_mem"),
40   - func.avg(MonitorLog.disk).label(
41   - "disk"),
42   - func.avg(MonitorLog.disk_usage).label(
43   - "disk_usage"),
44   - func.avg(MonitorLog.net_recv).label(
45   - "net_recv"),
46   - func.avg(MonitorLog.net_send).label("net_send")).filter(and_(
47   - MonitorLog.date_stamp <= end_datetime.strftime(
48   - format),
49   - MonitorLog.date_stamp > start_datetime.strftime(
50   - format),
51   - MonitorLog.server == server)).group_by(MonitorLog.date_stamp).order_by(MonitorLog.date_stamp.asc()).all()
52   -
53   - keys = []
54   - keys_map = {}
55   - while start_datetime <= end_datetime:
56   - keys.append(self.get_monitor_log(
57   - start_datetime.strftime('%Y-%m-%d')))
58   - keys_map[start_datetime.strftime('%Y-%m-%d')] = len(keys)-1
59   - start_datetime += timedelta(days=1)
60   -
61   - res["data"] = self.get_result(logs, keys, keys_map)
62   - elif grain == "minu_five":
63   - # 粒度是5分钟
64   - cur_minu = int(end_datetime.strftime("%M")) % 5
65   - logs = db.session.query(MonitorLog.hour_stamp.label("hour_stamp"),
66   - MonitorLog.f_minu_stamp.label(
67   - "minu_stamp"),
68   - func.avg(MonitorLog.cpu_usage).label(
69   - "cpu_usage"),
70   - func.avg(MonitorLog.total_mem).label(
71   - "total_mem"),
72   - func.avg(MonitorLog.available_mem).label(
73   - "available_mem"),
74   - func.avg(MonitorLog.used_mem).label(
75   - "used_mem"),
76   - func.avg(MonitorLog.disk).label(
77   - "disk"),
78   - func.avg(MonitorLog.disk_usage).label(
79   - "disk_usage"),
80   - func.avg(MonitorLog.net_recv).label(
81   - "net_recv"),
82   - func.avg(MonitorLog.net_send).label("net_send")).filter(and_(
83   - MonitorLog.time_stamp <= "{}:00".format(
84   - end_datetime.strftime("%Y-%m-%d %H:%M")),
85   - MonitorLog.time_stamp >= "{}:00".format(
86   - start_datetime.strftime("%Y-%m-%d %H:%M")),
87   - MonitorLog.server == server)).group_by(MonitorLog.date_stamp, MonitorLog.hour_stamp, MonitorLog.f_minu_stamp).order_by(
88   - MonitorLog.hour_stamp.asc(), MonitorLog.f_minu_stamp.asc()).all()
89   -
90   - keys = []
91   - keys_map = {}
92   - while start_datetime <= end_datetime:
93   - key = start_datetime.strftime('%H:%M')
94   - keys.append(self.get_monitor_log(key))
95   - keys_map[key] = len(keys)-1
96   - start_datetime += timedelta(minutes=5)
97   -
98   - logs = list(map(lambda log:
99   - {"key": "%02d:%02d" % (log.hour_stamp, log.minu_stamp*5+cur_minu),
100   - "cpu_usage": log.cpu_usage, "total_mem": log.total_mem,
101   - "available_mem": log.available_mem, "used_mem": log.used_mem, "disk": log.disk, "disk_usage": log.disk_usage,
102   - "net_recv": log.net_recv, "net_send": log.net_send},
103   - logs))
104   -
105   - res["data"] = self.get_result_from_list(logs, keys, keys_map)
106   - elif grain == "minu_ten":
107   - # 粒度是10分钟
108   - cur_minu = int(end_datetime.strftime("%M")) % 10
109   - logs = db.session.query(MonitorLog.hour_stamp.label("hour_stamp"),
110   - MonitorLog.d_minu_stamp.label(
111   - "minu_stamp"),
112   - func.avg(MonitorLog.cpu_usage).label(
113   - "cpu_usage"),
114   - func.avg(MonitorLog.total_mem).label(
115   - "total_mem"),
116   - func.avg(MonitorLog.available_mem).label(
117   - "available_mem"),
118   - func.avg(MonitorLog.used_mem).label(
119   - "used_mem"),
120   - func.avg(MonitorLog.disk).label(
121   - "disk"),
122   - func.avg(MonitorLog.disk_usage).label(
123   - "disk_usage"),
124   - func.avg(MonitorLog.net_recv).label(
125   - "net_recv"),
126   - func.avg(MonitorLog.net_send).label("net_send")).filter(and_(
127   - MonitorLog.time_stamp <= "{}:00".format(
128   - end_datetime.strftime("%Y-%m-%d %H:%M")),
129   - MonitorLog.time_stamp >= "{}:00".format(
130   - start_datetime.strftime("%Y-%m-%d %H:%M")),
131   - MonitorLog.server == server)).group_by(MonitorLog.date_stamp,
132   - MonitorLog.hour_stamp,
133   - MonitorLog.d_minu_stamp).order_by(MonitorLog.hour_stamp.asc(), MonitorLog.d_minu_stamp.asc()).all()
134   - keys = []
135   - keys_map = {}
136   - while start_datetime <= end_datetime:
137   - key = start_datetime.strftime('%H:%M')
138   - keys.append(self.get_monitor_log(key))
139   - keys_map[key] = len(keys)-1
140   - start_datetime += timedelta(minutes=10)
141   -
142   - logs = list(map(lambda log:
143   - {"key": "%02d:%02d" % (log.hour_stamp, log.minu_stamp*10+cur_minu),
144   - "cpu_usage": log.cpu_usage, "total_mem": log.total_mem,
145   - "available_mem": log.available_mem, "used_mem": log.used_mem, "disk": log.disk, "disk_usage": log.disk_usage,
146   - "net_recv": log.net_recv, "net_send": log.net_send},
147   - logs))
148   -
149   - res["data"] = self.get_result_from_list(logs, keys, keys_map)
150   - elif grain == "hour":
151   - # 粒度是一小时
152   - logs = db.session.query(MonitorLog.hour_stamp.label("key"), func.avg(MonitorLog.cpu_usage).label("cpu_usage"),
153   - func.avg(MonitorLog.total_mem).label(
154   - "total_mem"),
155   - func.avg(MonitorLog.available_mem).label(
156   - "available_mem"),
157   - func.avg(MonitorLog.used_mem).label(
158   - "used_mem"),
159   - func.avg(MonitorLog.disk).label(
160   - "disk"),
161   - func.avg(MonitorLog.disk_usage).label(
162   - "disk_usage"),
163   - func.avg(MonitorLog.net_recv).label(
164   - "net_recv"),
165   - func.avg(MonitorLog.net_send).label("net_send")).filter(and_(
166   - MonitorLog.time_stamp <= "{}:59:59".format(
167   - end_datetime.strftime("%Y-%m-%d %H")),
168   - MonitorLog.time_stamp >= "{}:59:59".format(
169   - start_datetime.strftime("%Y-%m-%d %H")),
170   - MonitorLog.server == server)).group_by(MonitorLog.hour_stamp).order_by(MonitorLog.hour_stamp.asc()).all()
171   - keys = []
172   - keys_map = {}
173   - while start_datetime <= end_datetime:
174   - key = int(start_datetime.strftime('%H'))
175   - keys.append(self.get_monitor_log("%02d时" % key))
176   - keys_map[str(key)] = len(keys)-1
177   - start_datetime += timedelta(hours=1)
178   -
179   - res["data"] = self.get_result(logs, keys, keys_map)
  30 + format = "%Y-%m-%d %H:%M:%S"
  31 +
  32 + interval_size = int(re.findall("\d+", interval)[0])
  33 + interval_unit = re.findall("\D+", interval)[0]
  34 +
  35 + if interval_size == 1 and interval_unit == 'm':
  36 + logs = db.session.query(MonitorInfo.time_stamp.label('key'), MonitorInfo.value).filter(
  37 + and_(MonitorInfo.time_stamp <= end_datetime.strftime(format),
  38 + MonitorInfo.time_stamp > start_datetime.strftime(
  39 + format),
  40 + MonitorInfo.server == server,
  41 + MonitorInfo.metrics == metrics_type)).order_by(MonitorInfo.time_stamp).group_by(
  42 + MonitorInfo.time_stamp, MonitorInfo.value)
  43 + datas = list(map(lambda log:
  44 + {"key": datetime.strftime(log.key, format),
  45 + "value": log.value},
  46 + logs))
  47 + datas = reduce(lambda y, x: y if (x['key'] in [i['key'] for i in y]) else (
  48 + lambda z, u: (z.append(u), z))(y, x)[1], datas, [])
  49 + for data in datas:
  50 + res['data'].append([data['key'], data['value']])
180 51 else:
181   - # 按分钟统计
182   - logs = db.session.query(MonitorLog.hour_stamp.label("hour_stamp"), MonitorLog.minu_stamp.label("minu_stamp"), func.avg(MonitorLog.cpu_usage).label("cpu_usage"),
183   - func.avg(MonitorLog.total_mem).label(
184   - "total_mem"),
185   - func.avg(MonitorLog.available_mem).label(
186   - "available_mem"),
187   - func.avg(MonitorLog.used_mem).label("used_mem"),
188   - func.avg(MonitorLog.disk).label(
189   - "disk"),
190   - func.avg(MonitorLog.disk_usage).label(
191   - "disk_usage"),
192   - func.avg(MonitorLog.net_recv).label(
193   - "net_recv"),
194   - func.avg(MonitorLog.net_send).label("net_send")).filter(and_(
195   - MonitorLog.time_stamp <= end_datetime.strftime("%Y-%m-%d %H:%M:%S"),
196   - MonitorLog.time_stamp >= start_datetime.strftime("%Y-%m-%d %H:%M:%S"),
197   - MonitorLog.server == server)).group_by(MonitorLog.hour_stamp, MonitorLog.minu_stamp).order_by(
198   - MonitorLog.hour_stamp.asc(), MonitorLog.minu_stamp.asc()).all()
199   - keys = []
200   - keys_map = {}
201   - while start_datetime <= end_datetime:
202   - key = start_datetime.strftime('%H:%M')
203   - keys.append(self.get_monitor_log(key))
204   - keys_map[key] = len(keys)-1
205   - start_datetime += timedelta(minutes=1)
206   -
207   - logs = list(map(lambda log:
208   - {"key": "%02d:%02d" % (log.hour_stamp, log.minu_stamp),
209   - "cpu_usage": log.cpu_usage, "total_mem": log.total_mem,
210   - "available_mem": log.available_mem, "used_mem": log.used_mem, "disk": log.disk, "disk_usage": log.disk_usage,
211   - "net_recv": log.net_recv, "net_send": log.net_send},
212   - logs))
213   -
214   - res["data"] = self.get_result_from_list(logs, keys, keys_map)
  52 + interval_start_datetime = start_datetime
  53 + interval_end_datatime = self.get_end_interval(
  54 + interval_start_datetime, interval_unit, interval_size)
  55 + res_format = "%Y-%m-%d %H:%M:%S"
  56 +
  57 + while interval_end_datatime <= end_datetime:
  58 + logs = db.session.query(MonitorInfo.time_stamp.label('key'), MonitorInfo.value).filter(
  59 + and_(MonitorInfo.time_stamp <= interval_end_datatime.strftime(format),
  60 + MonitorInfo.time_stamp > interval_start_datetime.strftime(
  61 + format), MonitorInfo.server == server, MonitorInfo.metrics == metrics_type)).order_by(MonitorInfo.time_stamp).group_by(
  62 + MonitorInfo.time_stamp, MonitorInfo.value).all()
  63 + tmp_data = list(map(lambda log:
  64 + {"key": datetime.strftime(log.key, res_format),
  65 + "value": log.value},
  66 + logs))
  67 + datas = self.get_sample_data(tmp_data)
  68 + datas = reduce(lambda y, x: y if (x['key'] in [i['key'] for i in y]) else (
  69 + lambda z, u: (z.append(u), z))(y, x)[1], datas, [])
  70 + for data in datas:
  71 + res['data'].append([data['key'], data['value']])
  72 +
  73 + interval_start_datetime = interval_end_datatime
  74 + interval_end_datatime = self.get_end_interval(
  75 + interval_start_datetime, interval_unit, interval_size)
215 76 res["result"] = True
216 77 except Exception as e:
217 78 raise e
... ... @@ -224,14 +85,10 @@ class Api(ApiTemplate):
224 85 "in": "query",
225 86 "type": "string",
226 87 "description": "服务器地址"},
227   - {"name": "grain",
228   - "in": "query",
229   - "type": "string",
230   - "description": "指标粒度:minu:分钟,minu_five:5分钟,minu_ten:10分钟,hour:1小时,day:每天"},
231   - {"name": "count",
  88 + {"name": "interval",
232 89 "in": "query",
233 90 "type": "string",
234   - "description": "个数"},
  91 + "description": "间隔"},
235 92 {"name": "to",
236 93 "in": "query",
237 94 "type": "string",
... ... @@ -239,7 +96,11 @@ class Api(ApiTemplate):
239 96 {"name": "start",
240 97 "in": "query",
241 98 "type": "string",
242   - "description": "查询起始时间"}
  99 + "description": "查询起始时间"},
  100 + {"name": "metrics_type",
  101 + "in": "query",
  102 + "type": "string",
  103 + "description": "查询指标"}
243 104 ],
244 105 "responses": {
245 106 200: {
... ... @@ -251,40 +112,50 @@ class Api(ApiTemplate):
251 112 }
252 113 }
253 114
254   - def get_monitor_log(self, key):
255   - return {"key": key,
256   - "cpu_usage": None, "total_mem": None,
257   - "available_mem": None, "used_mem": None, "disk": None, "disk_usage": None,
258   - "net_recv": None, "net_send": None}
259   -
260   - def get_result(self, logs, keys, keys_map):
261   - keys_map_key=keys_map.keys()
262   - for log in logs:
263   - if str(log.key) in keys_map_key:
264   - tmp = keys[keys_map[str(log.key)]]
265   - if tmp != None:
266   - tmp['cpu_usage'] = log.cpu_usage
267   - tmp['total_mem'] = log.total_mem
268   - tmp['available_mem'] = log.available_mem
269   - tmp['used_mem'] = log.used_mem
270   - tmp['disk'] = log.disk
271   - tmp['disk_usage'] = log.disk_usage
272   - tmp['net_recv'] = log.net_recv
273   - tmp['net_send'] = log.net_send
274   - return keys
  115 + def get_end_interval(self, start, unit, size):
  116 + if unit == 'm':
  117 + return start+timedelta(minutes=size)
  118 + elif unit == 'h':
  119 + return start+timedelta(hours=size)
  120 + elif unit == 'd':
  121 + return start+timedelta(days=size)
  122 + else:
  123 + return None
  124 +
  125 + def get_sample_data(self, orginal):
  126 + res = []
  127 + size = len(orginal)
  128 + orginal_stamp = {'head': 1, 'tail': size}
  129 + if size > 1:
  130 + stamp = {'P0': 1,
  131 + 'P50': math.floor(0.5*size),
  132 + # 'P90': math.floor(0.9*size),
  133 + # 'P95': math.floor(0.95*size),
  134 + # 'P99': math.floor(0.99*size),
  135 + 'P100': size}
  136 + elif size == 1:
  137 + stamp = {'P0': 1,
  138 + 'P50': size,
  139 + # 'P90': size,
  140 + # 'P95': size,
  141 + # 'P99': size,
  142 + 'P100': size}
  143 + else:
  144 + return res
  145 +
  146 + for key in dict.keys(orginal_stamp):
  147 + cur_data = orginal[orginal_stamp[key]-1]
  148 + info = {'key': cur_data['key'], 'value': cur_data['value']}
  149 + res.append(info)
  150 +
  151 + data = sorted(orginal, key=lambda x: x['value'])
  152 + for key in dict.keys(stamp):
  153 + cur_data = data[stamp[key]-1]
  154 + info = {'key': cur_data['key'], 'value': cur_data['value']}
  155 + res.append(info)
  156 +
  157 + res.sort(key=self.takeKey)
  158 + return res
275 159
276   - def get_result_from_list(self, logs, keys, keys_map):
277   - keys_map_key=keys_map.keys()
278   - for log in logs:
279   - if str(log["key"]) in keys_map_key:
280   - tmp = keys[keys_map[str(log["key"])]]
281   - if tmp != None:
282   - tmp['cpu_usage'] = log["cpu_usage"]
283   - tmp['total_mem'] = log["total_mem"]
284   - tmp['available_mem'] = log["available_mem"]
285   - tmp['used_mem'] = log["used_mem"]
286   - tmp['disk'] = log["disk"]
287   - tmp['disk_usage'] = log["disk_usage"]
288   - tmp['net_recv'] = log["net_recv"]
289   - tmp['net_send'] = log["net_send"]
290   - return keys
  160 + def takeKey(self, elem):
  161 + return elem['key']
... ...
1 1 # coding=utf-8
2   -# author: 4N
3   -#createtime: 2021/6/11
4   -#email: nheweijun@sina.com
5   -
6   -
  2 +# author: resioR
  3 +#createtime: 2021/12/1
  4 +#email: qianyingz@chinadci.com
7 5 from datetime import datetime
8 6 from time import time
9   -from sqlalchemy import Column, Integer, String, ForeignKey, Text, DateTime, Time, Float, Binary
  7 +from sqlalchemy import Column, Integer, String, ForeignKey, Text, DateTime, Time, Float, Binary,TIMESTAMP
10 8 from sqlalchemy.orm import relationship
11 9 from sqlalchemy.sql.expression import column
12 10 from app.models import db
13 11
14   -
15   -class MonitorLog(db.Model):
16   - '''
17   - '''
18   - __tablename__ = "dmap_monitor_log"
19   - guid = Column(String(256), primary_key=True)
20   - server = Column(String(256))
21   - date_stamp = Column(Text)
22   - hour_stamp = Column(Integer)
23   - minu_stamp = Column(Integer)
24   - d_minu_stamp = Column(Integer) # 10min粒度 1-6
25   - f_minu_stamp = Column(Integer) # 5min粒度 1~12
26   - time_stamp = Column(DateTime) # 创建时间戳
27   - cpu_usage = Column(Float)
28   - total_mem = Column(Float)
29   - available_mem = Column(Float)
30   - used_mem = Column(Float)
31   - disk = Column(Float)
32   - disk_usage = Column(Float)
33   - net_recv = Column(Float)
34   - net_send = Column(Float)
35   -
36   -
37 12 class MonitorHost(db.Model):
38 13 '''
39 14 监控服务器配置
... ... @@ -44,4 +19,16 @@ class MonitorHost(db.Model):
44 19 user = Column(String(256))
45 20 password = Column(String(256))
46 21 type = Column(String(256))
47   - host_name=Column(String(256))
  22 + host_name = Column(String(256))
  23 +
  24 +class MonitorInfo(db.Model):
  25 + '''
  26 + '''
  27 + __tablename__ = "dmap_monitor_info"
  28 + guid = Column(String(256), primary_key=True)
  29 + metrics=Column(String(256))
  30 + server = Column(String(256))
  31 + date_stamp = Column(Text)
  32 + time_stamp = Column(TIMESTAMP) # 创建时间戳
  33 + stamp = Column(Text) # P0,P50,P90,P95,P99,P100
  34 + value = Column(Float)
\ No newline at end of file
... ...
  1 +# coding=utf-8
  2 +# author: resioR
  3 +#createtime: 2021/12/1
  4 +#email: qianyingz@chinadci.com
  5 +
1 6 # import schedule
2 7 from flask import json
3   -import paramiko
4   -from sqlalchemy.sql.sqltypes import JSON
5   -from .models import MonitorHost, MonitorLog
  8 +from .models import MonitorHost, MonitorInfo
6 9 import datetime
7 10 import math
8 11 import time
... ... @@ -13,6 +16,10 @@ from app.util.component.PGUtil import PGUtil
13 16 import configure
14 17 from app.util.component.StructuredPrint import StructurePrint
15 18 import traceback
  19 +import requests
  20 +
  21 +min_size = 60
  22 +size = 60
16 23
17 24
18 25 def pull_metric():
... ... @@ -22,25 +29,50 @@ def pull_metric():
22 29 configure.SQLALCHEMY_DATABASE_URI)
23 30 sys_ds = PGUtil.open_pg_data_source(
24 31 0, configure.SQLALCHEMY_DATABASE_URI)
25   -
26   - #拉取服务器信息
  32 +
  33 + # 拉取服务器信息
27 34
28 35 hosts = sys_session.query(
29 36 MonitorHost.host)
30 37 for host in hosts:
31   - request_uri="http://{}".format(host.host)
32   -
33   -
34   -
  38 + try:
  39 + host_name = host.host
  40 + request_uri = "http://{}".format(host_name)
  41 + response = requests.request("get", request_uri)
  42 + if response.status_code == 200:
  43 + text = response.text
  44 + data = json.loads(text)
  45 + len_metric = len(data)
  46 + if len_metric > min_size:
  47 + metric_data = data[len_metric-min_size:len_metric]
  48 + else:
  49 + continue
  50 +
  51 + type_list = ["cpu_per", "memory_per", "disk_per",
  52 + "disk_read", "disk_write","disk_read_count",
  53 + "disk_write_count","network_sent","network_recv"]
  54 + for type in type_list:
  55 + sample_data = get_sample_data(
  56 + metric_data, type, host_name)
  57 + sys_session.add_all(sample_data)
  58 + sys_session.commit()
  59 +
  60 + except Exception as e:
  61 + StructurePrint().print(e.__str__()+":" + traceback.format_exc(), "error")
35 62 # 获取数据并汇聚为1min的数据入库
36 63 # 结束
37   - pass
38 64 except Exception as e:
39 65 StructurePrint().print(e.__str__()+":" + traceback.format_exc(), "error")
40   - pass
  66 + finally:
  67 + sys_session.rollback()
  68 + if sys_session:
  69 + sys_session.close()
  70 + if sys_ds:
  71 + sys_ds.Destroy()
  72 +
41 73
42 74 def start_schedule():
43   - # # 2分钟巡检一次
  75 + # # 1分钟巡检一次
44 76 try:
45 77 StructurePrint().print("start_schedule")
46 78 schedule.every(1).minutes.do(pull_metric)
... ... @@ -52,3 +84,54 @@ def start_schedule():
52 84 # # Stop the background thread
53 85 # time.sleep(10)
54 86 # stop_run_continuously.set()
  87 +
  88 +
  89 +def get_sample_data(orginal, name, host):
  90 + res = []
  91 + size = len(orginal)
  92 + orginal_stamp = {'head': 1, 'tail': size}
  93 + if size > 1:
  94 + stamp = {'P0': 1,
  95 + 'P50': math.floor(0.5*size),
  96 + # 'P90': math.floor(0.9*size),
  97 + # 'P95': math.floor(0.95*size),
  98 + # 'P99': math.floor(0.99*size),
  99 + 'P100': size}
  100 + elif size == 1:
  101 + stamp = {'P0': 1,
  102 + 'P50': size,
  103 + # 'P90': size,
  104 + # 'P95': size,
  105 + # 'P99': size,
  106 + 'P100': size}
  107 + else:
  108 + return res
  109 +
  110 + for key in dict.keys(orginal_stamp):
  111 + cur_data = orginal[orginal_stamp[key]-1]
  112 + info = get_data(key, host, name, cur_data)
  113 + res.append(info)
  114 +
  115 + data = sorted(orginal, key=lambda x: x[name])
  116 + for key in dict.keys(stamp):
  117 + cur_data = data[stamp[key]-1]
  118 + info = get_data(key, host, name, cur_data)
  119 + res.append(info)
  120 +
  121 + return res
  122 +
  123 +
  124 +def get_data(stamp, host, metrics_name, cur_data):
  125 + time_stamp = datetime.datetime.strptime(
  126 + cur_data['timestamp'], "%Y-%m-%d %H:%M:%S")
  127 + date_stamp = time_stamp.strftime("%Y-%m-%d")
  128 + guid = uuid.uuid1().__str__()
  129 + value = cur_data[metrics_name]
  130 + info = MonitorInfo(guid=guid,
  131 + server=host,
  132 + date_stamp=date_stamp,
  133 + time_stamp=time_stamp,
  134 + stamp=stamp,
  135 + value=value,
  136 + metrics=metrics_name)
  137 + return info
... ...
注册登录 后发表评论