提交 3f50b0cdd11023fda67958484e32effcfd6ce13d

作者 qianyingz
1 个父辈 7b211188

监控部署

@@ -97,3 +97,39 @@ def create_app(): @@ -97,3 +97,39 @@ def create_app():
97 # start_schedule() 97 # start_schedule()
98 return app 98 return app
99 99
  100 +def create_schedule():
  101 + monitor = Flask(__name__)
  102 + monitor.config['SQLALCHEMY_DATABASE_URI'] = configure.SQLALCHEMY_DATABASE_URI
  103 + monitor.config['echo'] = True
  104 + monitor.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True
  105 + monitor.config['JSON_AS_ASCII'] = False
  106 + monitor.config['SECRET_KEY'] = configure.SECRET_KEY
  107 +
  108 + # allows cookies and credentials to be submitted across domains
  109 + monitor.config['CORS_SUPPORTS_CREDENTIALS'] = true
  110 + monitor.config['CORS_ORIGINS ']="*"
  111 +
  112 + # swagger设置
  113 + swagger_config = Swagger.DEFAULT_CONFIG
  114 + swagger_config.update(configure.swagger_configure)
  115 + Swagger(monitor, config=swagger_config)
  116 +
  117 + # 创建数据库
  118 + db.init_app(monitor)
  119 + db.create_all(app=monitor)
  120 +
  121 + # 跨域设置
  122 + CORS(monitor)
  123 +
  124 + # 日志
  125 + logging.basicConfig(level=configure.log_level)
  126 + log_file = os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))), "logs", "monitor_log.txt")
  127 + handler = logging.FileHandler(log_file, encoding='UTF-8') # 设置日志字符集和存储路径名字
  128 + logging_format = logging.Formatter('[%(levelname)s] %(asctime)s %(message)s')
  129 + handler.setFormatter(logging_format)
  130 + monitor.logger.addHandler(handler)
  131 +
  132 + # 不检测https
  133 + os.environ['OAUTHLIB_INSECURE_TRANSPORT'] = '1'
  134 + start_schedule()
  135 + return monitor
不能预览此文件类型
1 -import paramiko  
2 -from .models import MonitorHost, MonitorLog  
3 -import datetime  
4 -import math  
5 -import time  
6 -import uuid  
7 -from app.util.component.PGUtil import PGUtil  
8 -import configure  
9 -from app.util.component.StructuredPrint import StructurePrint  
10 -import traceback  
11 -  
12 -def background_job():  
13 - try:  
14 - # servers = [{'sid': 'src1', 'hostname': '172.26.99.160',  
15 - # 'username': 'monitor', 'password': '123456'},  
16 - # {'sid': 'src2', 'hostname': '172.26.60.100',  
17 - # 'username': 'root', 'password': 'DMap@123'}]  
18 -  
19 - cur_time = datetime.datetime.now()  
20 - time_stamp = cur_time.strftime(  
21 - "%Y-%m-%d %H:%M:%S")  
22 - struct_time = time.strptime(time_stamp, "%Y-%m-%d %H:%M:%S")  
23 - d_minu_stamp = math.floor(struct_time.tm_min/10)  
24 - f_minu_stamp = math.floor(struct_time.tm_min/5)  
25 -  
26 - sys_session = PGUtil.get_db_session(  
27 - configure.SQLALCHEMY_DATABASE_URI)  
28 - sys_ds = PGUtil.open_pg_data_source(  
29 - 0, configure.SQLALCHEMY_DATABASE_URI)  
30 -  
31 - hosts = sys_session.query(  
32 - MonitorHost.host, MonitorHost.user, MonitorHost.password, MonitorHost.type, MonitorHost.srcid)  
33 - servers = list(map(lambda host:  
34 - {'hostname': host.host, 'username': host.user,  
35 - 'password': host.password, 'type': host.type,  
36 - 'sid': host.srcid},  
37 - hosts))  
38 - logs = []  
39 - for info in servers:  
40 - try:  
41 - StructurePrint().print("schedule,host:{},user:{}".format(  
42 - info['hostname'], info['username']))  
43 - # 业务逻辑  
44 - client = paramiko.SSHClient()  
45 - client.set_missing_host_key_policy(paramiko.AutoAddPolicy)  
46 - client.connect(hostname=info['hostname'],  
47 - username=info['username'], password=info['password'])  
48 -  
49 - # cpu  
50 - order = "top -b -n1 | sed -n '3p' | awk '{print $2}'"  
51 - stdin, stdout, stderr = client.exec_command(order)  
52 - cpu_usage = stdout.read().decode().split("\n")[0] # cpu使用率  
53 -  
54 - # 内存  
55 - order = "free -m | sed -n '2p' | awk '{print $2}'"  
56 - stdin, stdout, stderr = client.exec_command(order)  
57 - totalMem = stdout.read().decode().split("\n")[0] # 总内存  
58 -  
59 - order = "free -m | sed -n '2p' | awk '{print $7}'"  
60 - stdin, stdout, stderr = client.exec_command(order)  
61 - availableMem = stdout.read().decode().split("\n")[0] # 可用内存  
62 -  
63 - order = "free -m | sed -n '2p' | awk '{print $3}'"  
64 - stdin, stdout, stderr = client.exec_command(order)  
65 - usedMem = stdout.read().decode().split("\n")[0] # 已用内存  
66 -  
67 - # disk  
68 - order = "df -m | grep -v 'overlay\|Filesystem' | awk '{print $1,$2,$3}' | grep /dev | awk '{print $2}' | awk -v total=0 '{total+=$1}END{print total}'"  
69 - stdin, stdout, stderr = client.exec_command(order)  
70 - totalDisk = int(stdout.read().decode().split("\n")  
71 - [0]) # 总磁盘空间,单位Mb  
72 -  
73 - order = "df -m | grep -v 'overlay\|Filesystem' | awk '{print $1,$2,$3}' | grep /dev | awk '{print $3}' | awk -v total=0 '{total+=$1}END{print total}'"  
74 - stdin, stdout, stderr = client.exec_command(order)  
75 - usedDisk = int(stdout.read().decode().split("\n")  
76 - [0]) # 已使用磁盘空间,单位Mb  
77 -  
78 - # network  
79 - # 接收的字节数  
80 - rx_time = []  
81 - rx_bytes = []  
82 - tx_time = []  
83 - tx_bytes = []  
84 -  
85 - # 接收的字节数  
86 - order = "ifconfig | grep RX | grep -v 'errors'| awk -v total=0 '{total+=$5}END{print total}'"  
87 - i = 0  
88 - while i < 2:  
89 - i = i+1  
90 - stdin, stdout, stderr = client.exec_command(order)  
91 - rx_time.append(time.time())  
92 - rx_bytes.append(int(stdout.read().decode().split("\n")[0]))  
93 -  
94 - # 发送的字节数  
95 - order = "ifconfig | grep TX | grep -v 'errors'| awk -v total=0 '{total+=$5}END{print total}'"  
96 - i = 0  
97 - while i < 2:  
98 - i = i+1  
99 - stdin, stdout, stderr = client.exec_command(order)  
100 - tx_time.append(time.time())  
101 - tx_bytes.append(int(stdout.read().decode().split("\n")[0]))  
102 -  
103 - log_guid = uuid.uuid1().__str__()  
104 - monitor_log = MonitorLog(guid=log_guid,  
105 - server=info["hostname"],  
106 - time_stamp=cur_time,  
107 - cpu_usage=float(  
108 - "%.2f" % float(cpu_usage)),  
109 - total_mem=totalMem,  
110 - available_mem=availableMem,  
111 - used_mem=usedMem,  
112 - disk=totalDisk,  
113 - disk_usage=usedDisk,  
114 - net_recv=float("%.2f" % float((  
115 - rx_bytes[1] - rx_bytes[0])/(rx_time[1]-rx_time[0]))),  
116 - net_send=float("%.2f" % float((  
117 - tx_bytes[1] - tx_bytes[0])/(tx_time[1]-tx_time[0]))),  
118 - date_stamp=cur_time.strftime(  
119 - "%Y-%m-%d"),  
120 - hour_stamp=struct_time.tm_hour,  
121 - minu_stamp=struct_time.tm_min,  
122 - d_minu_stamp=1 if d_minu_stamp == 0 else d_minu_stamp,  
123 - f_minu_stamp=1 if f_minu_stamp == 0 else f_minu_stamp)  
124 -  
125 - logs.append(monitor_log)  
126 -  
127 - except Exception as e:  
128 - StructurePrint().print(e.__str__()+":" + traceback.format_exc(), "error")  
129 - sys_session.rollback()  
130 - sys_session.add_all(logs)  
131 - sys_session.commit()  
132 - except Exception as e2:  
133 - StructurePrint().print(e2.__str__()+":" + traceback.format_exc(), "error")  
134 - finally:  
135 - sys_session.rollback()  
136 - client.close()  
137 - if sys_session:  
138 - sys_session.close()  
139 - if sys_ds:  
140 - sys_ds.Destroy()  
141 -  
142 -  
143 -  
144 -def format_value(value):  
145 - # 1024*1024*1024  
146 - if value > 1_073_741_824:  
147 - value = "{}GB".format(format(value/1_073_741_824, '.1f'))  
148 - elif value > 1_048_576:  
149 - # 1024*1024  
150 - value = "{}MB".format(format(value / 1_048_576, '.1f'))  
151 - elif value > 1024:  
152 - value = "{}KB".format(format(value / 1024.0, '.1f'))  
153 - else:  
154 - value = "{}B".format(format(value, '.1f'))  
155 - return value  
156 -  
157 -  
158 -def Mb_format_value(value):  
159 - if value > 1024:  
160 - value = "{}GB".format(format(value/1024, '.1f'))  
161 - else:  
162 - value = "{}MB".format(format(value, '.1f'))  
163 - return value  
@@ -57,7 +57,7 @@ class Api(ApiTemplate): @@ -57,7 +57,7 @@ class Api(ApiTemplate):
57 while interval_end_datatime <= end_datetime: 57 while interval_end_datatime <= end_datetime:
58 logs = db.session.query(MonitorInfo.time_stamp.label('key'), MonitorInfo.value).filter( 58 logs = db.session.query(MonitorInfo.time_stamp.label('key'), MonitorInfo.value).filter(
59 and_(MonitorInfo.time_stamp <= interval_end_datatime.strftime(format), 59 and_(MonitorInfo.time_stamp <= interval_end_datatime.strftime(format),
60 - MonitorInfo.time_stamp > interval_start_datetime.strftime( 60 + MonitorInfo.time_stamp > interval_start_datetime.strftime(
61 format), MonitorInfo.server == server, MonitorInfo.metrics == metrics_type)).order_by(MonitorInfo.time_stamp).group_by( 61 format), MonitorInfo.server == server, MonitorInfo.metrics == metrics_type)).order_by(MonitorInfo.time_stamp).group_by(
62 MonitorInfo.time_stamp, MonitorInfo.value).all() 62 MonitorInfo.time_stamp, MonitorInfo.value).all()
63 tmp_data = list(map(lambda log: 63 tmp_data = list(map(lambda log:
@@ -129,15 +129,15 @@ class Api(ApiTemplate): @@ -129,15 +129,15 @@ class Api(ApiTemplate):
129 if size > 1: 129 if size > 1:
130 stamp = {'P0': 1, 130 stamp = {'P0': 1,
131 'P50': math.floor(0.5*size), 131 'P50': math.floor(0.5*size),
132 - # 'P90': math.floor(0.9*size),  
133 - # 'P95': math.floor(0.95*size), 132 + 'P90': math.floor(0.9*size),
  133 + 'P95': math.floor(0.95*size),
134 # 'P99': math.floor(0.99*size), 134 # 'P99': math.floor(0.99*size),
135 'P100': size} 135 'P100': size}
136 elif size == 1: 136 elif size == 1:
137 stamp = {'P0': 1, 137 stamp = {'P0': 1,
138 'P50': size, 138 'P50': size,
139 - # 'P90': size,  
140 - # 'P95': size, 139 + 'P90': size,
  140 + 'P95': size,
141 # 'P99': size, 141 # 'P99': size,
142 'P100': size} 142 'P100': size}
143 else: 143 else:
1 -from .models import MonitorHost, MonitorLog, db 1 +from .models import MonitorHost, db
2 from sqlalchemy import and_ 2 from sqlalchemy import and_
3 from app.util.component.ApiTemplate import ApiTemplate 3 from app.util.component.ApiTemplate import ApiTemplate
4 import uuid 4 import uuid
@@ -8,7 +8,6 @@ from flask import json @@ -8,7 +8,6 @@ from flask import json
8 from .models import MonitorHost, MonitorInfo 8 from .models import MonitorHost, MonitorInfo
9 import datetime 9 import datetime
10 import math 10 import math
11 -import time  
12 import uuid 11 import uuid
13 import schedule 12 import schedule
14 from app.util.component.RunContinuous import run_continuously 13 from app.util.component.RunContinuous import run_continuously
@@ -93,15 +92,15 @@ def get_sample_data(orginal, name, host): @@ -93,15 +92,15 @@ def get_sample_data(orginal, name, host):
93 if size > 1: 92 if size > 1:
94 stamp = {'P0': 1, 93 stamp = {'P0': 1,
95 'P50': math.floor(0.5*size), 94 'P50': math.floor(0.5*size),
96 - # 'P90': math.floor(0.9*size),  
97 - # 'P95': math.floor(0.95*size), 95 + 'P90': math.floor(0.9*size),
  96 + 'P95': math.floor(0.95*size),
98 # 'P99': math.floor(0.99*size), 97 # 'P99': math.floor(0.99*size),
99 'P100': size} 98 'P100': size}
100 elif size == 1: 99 elif size == 1:
101 stamp = {'P0': 1, 100 stamp = {'P0': 1,
102 'P50': size, 101 'P50': size,
103 - # 'P90': size,  
104 - # 'P95': size, 102 + 'P90': size,
  103 + 'P95': size,
105 # 'P99': size, 104 # 'P99': size,
106 'P100': size} 105 'P100': size}
107 else: 106 else:
@@ -40,6 +40,7 @@ ServerRoot "/etc/httpd" @@ -40,6 +40,7 @@ ServerRoot "/etc/httpd"
40 # 40 #
41 #Listen 12.34.56.78:80 41 #Listen 12.34.56.78:80
42 Listen 80 42 Listen 80
  43 +Listen 81
43 44
44 # 45 #
45 # Dynamic Shared Object (DSO) Support 46 # Dynamic Shared Object (DSO) Support
@@ -355,13 +356,27 @@ IncludeOptional conf.d/*.conf @@ -355,13 +356,27 @@ IncludeOptional conf.d/*.conf
355 LoadModule wsgi_module "/usr/lib64/httpd/modules/mod_wsgi-py37.cpython-37m-x86_64-linux-gnu.so" 356 LoadModule wsgi_module "/usr/lib64/httpd/modules/mod_wsgi-py37.cpython-37m-x86_64-linux-gnu.so"
356 "/var/gdal" 357 "/var/gdal"
357 358
358 - dmapmanager processes=4 threads=16 display-name=%{GROUP}  
359 - dmapmanager  
360 -#Authorization请求头顺利转发  
361 - On  
362 - %{GLOBAL} 359 +<VirtualHost *:80>
  360 + dmapmanager processes=4 threads=16 display-name=%{GROUP}
  361 + dmapmanager
  362 + On
  363 + %{GLOBAL}
  364 + / /usr/src/app/run.wsgi
  365 + <Directory /usr/>
  366 + Require all granted
  367 + </Directory>
  368 +</VirtualHost>
  369 +
  370 +<VirtualHost *:81>
  371 + monitormanager processes=1 threads=8 display-name=%{GROUP}
  372 + monitormanager
  373 + %{GLOBAL}
  374 + / /usr/src/app/monitor.wsgi
  375 + <Directory /usr/>
  376 + Require all granted
  377 + </Directory>
  378 +</VirtualHost>
363 379
364 - / /usr/src/app/run.wsgi  
365 <Directory /usr/> 380 <Directory /usr/>
366 Require all granted 381 Require all granted
367 </Directory> 382 </Directory>
  1 +import sys
  2 +import os
  3 +file_path = os.path.dirname(os.path.realpath(__file__))
  4 +sys.path.insert(0, file_path)
  5 +from run_monitor import monitor as application
@@ -21,10 +21,21 @@ port=$1 @@ -21,10 +21,21 @@ port=$1
21 echo "端口设置为$1 ..." 21 echo "端口设置为$1 ..."
22 fi 22 fi
23 23
  24 +port2=""
  25 +if [ ! -n "$2" ] ;then
  26 +port="8841"
  27 +echo "未设置端口2,使用默认8840端口..."
  28 +else
  29 +port2=$2
  30 +echo "端口2设置为$2 ..."
  31 +fi
  32 +
  33 +
24 #启动容器和apache 34 #启动容器和apache
25 echo "正在启动容器..." 35 echo "正在启动容器..."
26 set="--privileged=true -e TZ="Asia/Shanghai" --restart=always -e ALLOW_IP_RANGE=0.0.0.0/0" 36 set="--privileged=true -e TZ="Asia/Shanghai" --restart=always -e ALLOW_IP_RANGE=0.0.0.0/0"
27 -docker run -d --name $dn $set -p $port:80 -v $curPath/logs/apache.error:/var/log/httpd/error_log -v $curPath:/usr/src/app -v $curPath/httpd.conf:/etc/httpd/conf/httpd.conf dci/dmapmanager:4.1 /usr/sbin/init 37 +docker run -d --name $dn $set -p $port:80 -p $port2:81 -v $curPath/logs/apache.error:/var/log/httpd/error_log -v $curPath:/usr/src/app -v $curPath/httpd.conf:/etc/httpd/conf/httpd.conf dci/dmapmanager:4.1 /usr/sbin/init
28 docker exec -d $dn systemctl start httpd 38 docker exec -d $dn systemctl start httpd
29 sleep 5 39 sleep 5
30 curl localhost:$port/release 40 curl localhost:$port/release
  41 +curl localhost:$port2
  1 +# coding=utf-8
  2 +from flask import Flask
  3 +from app import create_schedule
  4 +import os
  5 +os.environ['AUTHLIB_INSECURE_TRANSPORT'] = '1'
  6 +monitor:Flask=create_schedule()
  7 +if __name__ == '__main__':
  8 + monitor.run(host="0.0.0.0", port="8840", threaded=True, debug=True)
  9 + # app.run(host="0.0.0.0", port="8840", threaded=True)
注册登录 后发表评论