mirror of
https://github.com/reddit-archive/reddit.git
synced 2026-04-27 03:00:12 -04:00
added cache lifetime support to service monitor. Also updated db to show max connections.
This commit is contained in:
@@ -19,16 +19,19 @@
|
||||
# All portions of the code written by CondeNet are Copyright (c) 2006-2008
|
||||
# CondeNet, Inc. All Rights Reserved.
|
||||
################################################################################
|
||||
import os, re, sys, socket, time
|
||||
import os, re, sys, socket, time, random, time
|
||||
from itertools import chain
|
||||
|
||||
from wrapped import Wrapped
|
||||
from datetime import datetime, timedelta
|
||||
from pylons import g
|
||||
from r2.lib.utils import tup
|
||||
from itertools import chain
|
||||
from r2.lib.cache import Memcache
|
||||
|
||||
class AppServiceMonitor(Wrapped):
|
||||
cache_key = "machine_datalogger_data_"
|
||||
cache_key_small = "machine_datalogger_db_summary_"
|
||||
cache_lifetime = "memcached_lifetime"
|
||||
|
||||
"""
|
||||
Master controller class for service monitoring.
|
||||
@@ -65,6 +68,15 @@ class AppServiceMonitor(Wrapped):
|
||||
self.hostlogs = []
|
||||
Wrapped.__init__(self)
|
||||
|
||||
@classmethod
|
||||
def set_cache_lifetime(cls, data):
|
||||
g.rendercache.set(cls.cache_lifetime, data)
|
||||
|
||||
@classmethod
|
||||
def get_cache_lifetime(cls, average = None):
|
||||
d = g.rendercache.get(cls.cache_lifetime, DataLogger())
|
||||
return d(average)
|
||||
|
||||
@classmethod
|
||||
def from_cache(cls, host):
|
||||
key = cls.cache_key + str(host)
|
||||
@@ -401,3 +413,54 @@ def check_database(db_names, proc = "postgres", check_vacuum = True, user='ri'):
|
||||
continue
|
||||
|
||||
return res
|
||||
|
||||
def monitor_cache_lifetime(minutes, retest = 10, ntest = -1,
|
||||
cache_key = "cache_life_", verbose = False):
|
||||
|
||||
# list of list of active memcache test keys
|
||||
keys = []
|
||||
period = 60 # 1 minute cycle time
|
||||
data = DataLogger()
|
||||
|
||||
|
||||
# we'll create an independent connection to memcached for this test
|
||||
mc = Memcache(g.memcaches)
|
||||
|
||||
counter = 0
|
||||
while ntest:
|
||||
|
||||
if counter == 0 or (retest and counter % retest == 0):
|
||||
randstr = random.random()
|
||||
newkeys = [("%s_%s_%d" % (cache_key, randstr, x), x+1)
|
||||
for x in xrange(minutes)]
|
||||
|
||||
# set N keys, and tell them not to live for longer than this test
|
||||
mc.set_multi(dict(newkeys),
|
||||
#time = minutes * period)
|
||||
time = (minutes+1) * period)
|
||||
|
||||
# add the list in reverse order since we'll be poping.
|
||||
newkeys.reverse()
|
||||
keys.append(newkeys)
|
||||
|
||||
# wait for the next key to (potentially) expire
|
||||
counter += 1
|
||||
time.sleep(period)
|
||||
|
||||
for k in keys:
|
||||
key, age = k.pop()
|
||||
if mc.get(key) is None or k == []:
|
||||
if verbose:
|
||||
print "cache expiration: %d seconds" % (period * age)
|
||||
data.add(period * age)
|
||||
AppServiceMonitor.set_cache_lifetime(data)
|
||||
# wipe out the list for removal by the subsequent filter
|
||||
while k: k.pop()
|
||||
|
||||
# clear out any empty key lists
|
||||
if [] in keys:
|
||||
keys = filter(None, keys)
|
||||
ntest -= 1
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -1025,9 +1025,14 @@ textarea.gray { color: gray; }
|
||||
}
|
||||
|
||||
.server-status .membar {
|
||||
height:10px;
|
||||
height:11px;
|
||||
border:1px solid white;
|
||||
background-color:#336699;
|
||||
background-color:#6699FF;
|
||||
position: relative;
|
||||
}
|
||||
.server-status .membar span {
|
||||
position: absolute;
|
||||
font-size: smaller;
|
||||
}
|
||||
.server-status .cpu50 {
|
||||
height: 5px;
|
||||
|
||||
@@ -21,21 +21,28 @@
|
||||
################################################################################
|
||||
|
||||
<%
|
||||
cpu_col = 75.
|
||||
mem_col = 40.
|
||||
hide_data = "style='display:none'" if len(thing.hostlogs) > 4 else ''
|
||||
import datetime
|
||||
cpu_col = 75.
|
||||
mem_col = 40.
|
||||
hide_data = "style='display:none'" if len(thing.hostlogs) > 4 else ''
|
||||
%>
|
||||
|
||||
<div class="server-status">
|
||||
<h3>Rendered by PID ${g.reddit_pid} on ${g.reddit_host}</h3>
|
||||
<%
|
||||
cache_lifetime = thing.get_cache_lifetime()
|
||||
if cache_lifetime > 0:
|
||||
cache_lifetime = '%s' % datetime.timedelta(0, max(cache_lifetime, 0))
|
||||
else:
|
||||
cache_lifetime = "--:--.--"
|
||||
%>
|
||||
<h4>Cache lifetime: ${'%s' % cache_lifetime}</h4>
|
||||
%if any(h.database for h in thing.hostlogs):
|
||||
<table class="monitor-database">
|
||||
<tr class="title-region">
|
||||
<th>database</th>
|
||||
<th>connections</th>
|
||||
<th>
|
||||
<span style="color:green">1 min</span> /
|
||||
<span style="color:red">5 min</span>
|
||||
</th>
|
||||
<th></th>
|
||||
</tr>
|
||||
@@ -45,6 +52,14 @@
|
||||
host_id = host.host.replace('.', '-')
|
||||
load = host.load()
|
||||
load_level = min(max(int(load+0.4), 0),4)
|
||||
if host.database.max_connections > 0:
|
||||
max_conn = host.database.max_connections
|
||||
conn = float(host.database.connections()) / max_conn
|
||||
conn_wid = int(cpu_col*min(1, conn))
|
||||
else:
|
||||
max_conn = conn = 0
|
||||
conn_wid = 0
|
||||
|
||||
%>
|
||||
<tr class="load${load_level} title-region" id="${host_id}">
|
||||
<th>
|
||||
@@ -53,12 +68,18 @@
|
||||
<th>
|
||||
%if host.database.vacuuming:
|
||||
<blink style="color:red">VACUUMING!</blink>
|
||||
%else:
|
||||
<div class="membar" style="width:${conn_wid}px;">
|
||||
<span>
|
||||
${"%3.0f%%" % (100* conn) }
|
||||
</span>
|
||||
</div>
|
||||
%endif
|
||||
</th>
|
||||
<td>
|
||||
<span style="color:green">${host.database.connections()}</span>
|
||||
<span style="color:green">${int(conn * max_conn)}</span>
|
||||
/
|
||||
<span style="color:red">${host.database.connections(300)}</span>
|
||||
<span>${max_conn}</span>
|
||||
</td>
|
||||
</tr>
|
||||
<%
|
||||
|
||||
Reference in New Issue
Block a user