Commit 848b6798 authored by ale's avatar ale
Browse files

apparently the context manager syntax for Pipeline objects is not supported in...

apparently the context manager syntax for Pipeline objects is not supported in the Redis Python library of Debian stable
parent 6e184a7b
......@@ -12,72 +12,79 @@ class Accounting(object):
def add_connection(self, cn, conn_info):
data = json.dumps(conn_info)
with self._local_db.pipeline() as local_pipe:
local_pipe.sadd('in_cns', cn)
local_pipe.rpush('aggr_in:%s' % cn, data)
local_pipe = self._local_db.pipeline()
local_pipe.sadd('in_cns', cn)
local_pipe.rpush('aggr_in:%s' % cn, data)
local_pipe.execute()
def get_connections(self, cn, n=0):
return self._aggr_db.lrange('connections:%s' % cn, 0, (n - 1))
def aggregate(self, cn):
conns = []
with self._local_db.pipeline() as local_pipe:
while True:
try:
key = 'aggr_in:%s' % cn
local_pipe.watch(key)
for data in local_pipe.lrange(key, 0, -1):
conns.append(data)
local_pipe.delete(key)
break
except redis.WatchError:
del conns[:]
local_pipe = self._local_db.pipeline()
while True:
try:
key = 'aggr_in:%s' % cn
local_pipe.watch(key)
for data in local_pipe.lrange(key, 0, -1):
conns.append(data)
local_pipe.delete(key)
break
except redis.WatchError:
del conns[:]
finally:
local_pipe.reset()
# Compute daily aggregates, and copy the connection data to the master.
aggr = {}
with self._aggr_db.pipeline() as pipe:
pipe.sadd('all_cns', cn)
for data in conns:
pipe.lpush('connections:%s' % cn, data)
conn_info = json.loads(data)
day = time.strftime('%Y%m%d', time.gmtime(conn_info['end_time']))
aggr_day = aggr.setdefault(day, {'conn_time': 0,
'bytes_sent': 0,
'bytes_recv': 0})
for attr in ('conn_time', 'bytes_recv', 'bytes_sent'):
aggr_day[attr] += conn_info[attr]
pipe = self._aggr_db.pipeline()
pipe.sadd('all_cns', cn)
for data in conns:
pipe.lpush('connections:%s' % cn, data)
conn_info = json.loads(data)
day = time.strftime('%Y%m%d', time.gmtime(conn_info['end_time']))
aggr_day = aggr.setdefault(day, {'conn_time': 0,
'bytes_sent': 0,
'bytes_recv': 0})
for attr in ('conn_time', 'bytes_recv', 'bytes_sent'):
aggr_day[attr] += conn_info[attr]
pipe.execute()
# Aggregate values on the master server.
days = aggr.keys()
aggr_key = 'aggr:%s' % cn
with self._aggr_db.pipeline() as pipe:
while True:
try:
pipe.watch(aggr_key)
pipe.multi()
old_values = [json.loads(x) for x in
pipe.hmget(aggr_key, days).execute()]
old_aggr = dict(zip(days, old_values))
pipe.multi()
for day, aggr_data in aggr.iteritems():
old_aggr_data = old_aggr.get(day, {})
for attr in aggr_data:
aggr_data[attr] += old_aggr_data.get(attr, 0)
pipe.hset(aggr_key, day, json.dumps(aggr_data))
break
except redis.WatchError:
continue
pipe = self._aggr_db.pipeline()
while True:
try:
pipe.watch(aggr_key)
old_values = [json.loads(x) for x in pipe.hmget(aggr_key, days)]
old_aggr = dict(zip(days, old_values))
pipe.multi()
for day, aggr_data in aggr.iteritems():
old_aggr_data = old_aggr.get(day, {})
for attr in aggr_data:
aggr_data[attr] += old_aggr_data.get(attr, 0)
pipe.hset(aggr_key, day, json.dumps(aggr_data))
pipe.execute()
break
except redis.WatchError:
continue
finally:
pipe.reset()
def aggregate_all(self):
with self._local_db.pipeline() as local_pipe:
while True:
try:
local_pipe.watch('in_cns')
input_cns = local_pipe.get('in_cns')
local_pipe.delete('in_cns')
break
except redis.WatchError:
continue
local_pipe = self._local_db.pipeline()
while True:
try:
local_pipe.watch('in_cns')
input_cns = local_pipe.get('in_cns')
local_pipe.delete('in_cns')
break
except redis.WatchError:
continue
finally:
local_pipe.reset()
for cn in input_cns:
self.aggregate(cn)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment