Skip to content
Snippets Groups Projects
Commit ddfc287b authored by ale's avatar ale
Browse files

Add singleflight logic to the metrics endpoint cache

Prevents the scenario where requests for /metrics start taking longer
than the Prometheus scraping interval, and the whole process would
soon OOM due to the concurrent cache refreshes.
parent fe51e74b
No related branches found
No related tags found
No related merge requests found
......@@ -6,6 +6,7 @@ import ssl
import subprocess
import sys
import time
import threading
from functools import wraps
from flask import Flask, request, abort, make_response, jsonify, g
......@@ -132,14 +133,24 @@ def regenerate_archives():
### Instrumentation.
def cache(timeout):
_store = {'deadline': 0, 'value': None}
"""Decorator for caching expensive results.
Allows a single thread to recompute the cached value even in
presence of concurrent / overlapping requests (will serve stale
data in the meantime).
"""
_store = {'deadline': 0, 'value': '', 'lock': threading.Lock()}
def _cache_wrapper(fn):
@wraps(fn)
def _cache(*args, **kwargs):
now = time.time()
if now > _store['deadline']:
if now > _store['deadline'] and _store['lock'].acquire(False):
try:
_store['value'] = fn(*args, **kwargs)
_store['deadline'] = now + timeout
finally:
_store['lock'].release()
return _store['value']
return _cache
return _cache_wrapper
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment