Merge pull request #1184 from matrix-org/paul/metrics

Bugfix for process-wide metric export on split processes
This commit is contained in:
Paul Evans 2016-10-27 18:27:36 +01:00 committed by GitHub
commit f9d5b60a24
3 changed files with 9 additions and 9 deletions

View file

@ -52,7 +52,6 @@ from synapse.config.homeserver import HomeServerConfig
from synapse.crypto import context_factory from synapse.crypto import context_factory
from synapse.util.logcontext import LoggingContext from synapse.util.logcontext import LoggingContext
from synapse.metrics import register_memory_metrics, get_metrics_for from synapse.metrics import register_memory_metrics, get_metrics_for
from synapse.metrics.process_collector import register_process_collector
from synapse.metrics.resource import MetricsResource, METRICS_PREFIX from synapse.metrics.resource import MetricsResource, METRICS_PREFIX
from synapse.replication.resource import ReplicationResource, REPLICATION_PREFIX from synapse.replication.resource import ReplicationResource, REPLICATION_PREFIX
from synapse.federation.transport.server import TransportLayerServer from synapse.federation.transport.server import TransportLayerServer
@ -338,7 +337,6 @@ def setup(config_options):
hs.get_replication_layer().start_get_pdu_cache() hs.get_replication_layer().start_get_pdu_cache()
register_memory_metrics(hs) register_memory_metrics(hs)
register_process_collector()
reactor.callWhenRunning(start) reactor.callWhenRunning(start)

View file

@ -24,6 +24,7 @@ from .metric import (
CounterMetric, CallbackMetric, DistributionMetric, CacheMetric, CounterMetric, CallbackMetric, DistributionMetric, CacheMetric,
MemoryUsageMetric, MemoryUsageMetric,
) )
from .process_collector import register_process_collector
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -41,6 +42,9 @@ class Metrics(object):
def __init__(self, name): def __init__(self, name):
self.name_prefix = name self.name_prefix = name
def make_subspace(self, name):
return Metrics("%s_%s" % (self.name_prefix, name))
def register_collector(self, func): def register_collector(self, func):
all_collectors.append(func) all_collectors.append(func)
@ -118,6 +122,8 @@ reactor_metrics.register_callback(
"gc_counts", lambda: {(i,): v for i, v in enumerate(gc.get_count())}, labels=["gen"] "gc_counts", lambda: {(i,): v for i, v in enumerate(gc.get_count())}, labels=["gen"]
) )
register_process_collector(get_metrics_for("process"))
def runUntilCurrentTimer(func): def runUntilCurrentTimer(func):

View file

@ -20,8 +20,6 @@ import os
import stat import stat
from resource import getrusage, RUSAGE_SELF from resource import getrusage, RUSAGE_SELF
from synapse.metrics import get_metrics_for
TICKS_PER_SEC = 100 TICKS_PER_SEC = 100
BYTES_PER_PAGE = 4096 BYTES_PER_PAGE = 4096
@ -111,10 +109,10 @@ def _process_fds():
return counts return counts
def register_process_collector(): def register_process_collector(process_metrics):
# Legacy synapse-invented metric names # Legacy synapse-invented metric names
resource_metrics = get_metrics_for("process.resource") resource_metrics = process_metrics.make_subspace("resource")
resource_metrics.register_collector(update_resource_metrics) resource_metrics.register_collector(update_resource_metrics)
@ -125,12 +123,10 @@ def register_process_collector():
# kilobytes # kilobytes
resource_metrics.register_callback("maxrss", lambda: rusage.ru_maxrss * 1024) resource_metrics.register_callback("maxrss", lambda: rusage.ru_maxrss * 1024)
get_metrics_for("process").register_callback("fds", _process_fds, labels=["type"]) process_metrics.register_callback("fds", _process_fds, labels=["type"])
# New prometheus-standard metric names # New prometheus-standard metric names
process_metrics = get_metrics_for("process")
if HAVE_PROC_SELF_STAT: if HAVE_PROC_SELF_STAT:
process_metrics.register_callback( process_metrics.register_callback(
"cpu_user_seconds_total", "cpu_user_seconds_total",