The following components are included in this product:
-goautoneg
-http://bitbucket.org/ww/goautoneg
-Copyright 2011, Open Knowledge Foundation Ltd.
-See README.txt for license details.
-
perks - a fork of https://github.com/bmizerany/perks
https://github.com/beorn7/perks
Copyright 2013-2015 Blake Mizerany, Björn Rabenstein
-# Overview
-This is the [Prometheus](http://www.prometheus.io) telemetric
-instrumentation client [Go](http://golang.org) client library. It
-enable authors to define process-space metrics for their servers and
-expose them through a web service interface for extraction,
-aggregation, and a whole slew of other post processing techniques.
-
-# Installing
- $ go get github.com/prometheus/client_golang/prometheus
-
-# Example
-```go
-package main
-
-import (
- "net/http"
-
- "github.com/prometheus/client_golang/prometheus"
-)
-
-var (
- indexed = prometheus.NewCounter(prometheus.CounterOpts{
- Namespace: "my_company",
- Subsystem: "indexer",
- Name: "documents_indexed",
- Help: "The number of documents indexed.",
- })
- size = prometheus.NewGauge(prometheus.GaugeOpts{
- Namespace: "my_company",
- Subsystem: "storage",
- Name: "documents_total_size_bytes",
- Help: "The total size of all documents in the storage.",
- })
-)
-
-func main() {
- http.Handle("/metrics", prometheus.Handler())
-
- indexed.Inc()
- size.Set(5)
-
- http.ListenAndServe(":8080", nil)
-}
-
-func init() {
- prometheus.MustRegister(indexed)
- prometheus.MustRegister(size)
-}
-```
-
-# Documentation
-
-[![GoDoc](https://godoc.org/github.com/prometheus/client_golang?status.png)](https://godoc.org/github.com/prometheus/client_golang)
+See [![go-doc](https://godoc.org/github.com/prometheus/client_golang/prometheus?status.svg)](https://godoc.org/github.com/prometheus/client_golang/prometheus).
// Collector is the interface implemented by anything that can be used by
// Prometheus to collect metrics. A Collector has to be registered for
-// collection. See Register, MustRegister, RegisterOrGet, and MustRegisterOrGet.
+// collection. See Registerer.Register.
//
-// The stock metrics provided by this package (like Gauge, Counter, Summary) are
-// also Collectors (which only ever collect one metric, namely itself). An
-// implementer of Collector may, however, collect multiple metrics in a
-// coordinated fashion and/or create metrics on the fly. Examples for collectors
-// already implemented in this library are the metric vectors (i.e. collection
-// of multiple instances of the same Metric but with different label values)
-// like GaugeVec or SummaryVec, and the ExpvarCollector.
+// The stock metrics provided by this package (Gauge, Counter, Summary,
+// Histogram, Untyped) are also Collectors (which only ever collect one metric,
+// namely itself). An implementer of Collector may, however, collect multiple
+// metrics in a coordinated fashion and/or create metrics on the fly. Examples
+// for collectors already implemented in this library are the metric vectors
+// (i.e. collection of multiple instances of the same Metric but with different
+// label values) like GaugeVec or SummaryVec, and the ExpvarCollector.
type Collector interface {
// Describe sends the super-set of all possible descriptors of metrics
// collected by this Collector to the provided channel and returns once
// executing this method, it must send an invalid descriptor (created
// with NewInvalidDesc) to signal the error to the registry.
Describe(chan<- *Desc)
- // Collect is called by Prometheus when collecting metrics. The
- // implementation sends each collected metric via the provided channel
- // and returns once the last metric has been sent. The descriptor of
- // each sent metric is one of those returned by Describe. Returned
- // metrics that share the same descriptor must differ in their variable
- // label values. This method may be called concurrently and must
- // therefore be implemented in a concurrency safe way. Blocking occurs
- // at the expense of total performance of rendering all registered
- // metrics. Ideally, Collector implementations support concurrent
- // readers.
+ // Collect is called by the Prometheus registry when collecting
+ // metrics. The implementation sends each collected metric via the
+ // provided channel and returns once the last metric has been sent. The
+ // descriptor of each sent metric is one of those returned by
+ // Describe. Returned metrics that share the same descriptor must differ
+ // in their variable label values. This method may be called
+ // concurrently and must therefore be implemented in a concurrency safe
+ // way. Blocking occurs at the expense of total performance of rendering
+ // all registered metrics. Ideally, Collector implementations support
+ // concurrent readers.
Collect(chan<- Metric)
}
-// SelfCollector implements Collector for a single Metric so that that the
-// Metric collects itself. Add it as an anonymous field to a struct that
-// implements Metric, and call Init with the Metric itself as an argument.
-type SelfCollector struct {
+// selfCollector implements Collector for a single Metric so that the Metric
+// collects itself. Add it as an anonymous field to a struct that implements
+// Metric, and call init with the Metric itself as an argument.
+type selfCollector struct {
self Metric
}
-// Init provides the SelfCollector with a reference to the metric it is supposed
+// init provides the selfCollector with a reference to the metric it is supposed
// to collect. It is usually called within the factory function to create a
// metric. See example.
-func (c *SelfCollector) Init(self Metric) {
+func (c *selfCollector) init(self Metric) {
c.self = self
}
// Describe implements Collector.
-func (c *SelfCollector) Describe(ch chan<- *Desc) {
+func (c *selfCollector) Describe(ch chan<- *Desc) {
ch <- c.self.Desc()
}
// Collect implements Collector.
-func (c *SelfCollector) Collect(ch chan<- Metric) {
+func (c *selfCollector) Collect(ch chan<- Metric) {
ch <- c.self
}
Metric
Collector
- // Set is used to set the Counter to an arbitrary value. It is only used
- // if you have to transfer a value from an external counter into this
- // Prometheus metric. Do not use it for regular handling of a
- // Prometheus counter (as it can be used to break the contract of
- // monotonically increasing values).
- Set(float64)
- // Inc increments the counter by 1.
+ // Inc increments the counter by 1. Use Add to increment it by arbitrary
+ // non-negative values.
Inc()
// Add adds the given value to the counter. It panics if the value is <
// 0.
opts.ConstLabels,
)
result := &counter{value: value{desc: desc, valType: CounterValue, labelPairs: desc.constLabelPairs}}
- result.Init(result) // Init self-collection.
+ result.init(result) // Init self-collection.
return result
}
// CounterVec embeds MetricVec. See there for a full list of methods with
// detailed documentation.
type CounterVec struct {
- MetricVec
+ *MetricVec
}
// NewCounterVec creates a new CounterVec based on the provided CounterOpts and
opts.ConstLabels,
)
return &CounterVec{
- MetricVec: MetricVec{
- children: map[uint64]Metric{},
- desc: desc,
- newMetric: func(lvs ...string) Metric {
- result := &counter{value: value{
- desc: desc,
- valType: CounterValue,
- labelPairs: makeLabelPairs(desc, lvs),
- }}
- result.Init(result) // Init self-collection.
- return result
- },
- },
+ MetricVec: newMetricVec(desc, func(lvs ...string) Metric {
+ result := &counter{value: value{
+ desc: desc,
+ valType: CounterValue,
+ labelPairs: makeLabelPairs(desc, lvs),
+ }}
+ result.init(result) // Init self-collection.
+ return result
+ }),
}
}
+// Copyright 2016 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
package prometheus
import (
"errors"
"fmt"
- "regexp"
"sort"
"strings"
"github.com/golang/protobuf/proto"
+ "github.com/prometheus/common/model"
dto "github.com/prometheus/client_model/go"
)
-var (
- metricNameRE = regexp.MustCompile(`^[a-zA-Z_][a-zA-Z0-9_:]*$`)
- labelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$")
-)
-
// reservedLabelPrefix is a prefix which is not legal in user-supplied
// label names.
const reservedLabelPrefix = "__"
// Help string. Each Desc with the same fqName must have the same
// dimHash.
dimHash uint64
- // err is an error that occured during construction. It is reported on
+ // err is an error that occurred during construction. It is reported on
// registration time.
err error
}
d.err = errors.New("empty help string")
return d
}
- if !metricNameRE.MatchString(fqName) {
+ if !model.IsValidMetricName(model.LabelValue(fqName)) {
d.err = fmt.Errorf("%q is not a valid metric name", fqName)
return d
}
}
func checkLabelName(l string) bool {
- return labelNameRE.MatchString(l) &&
+ return model.LabelName(l).IsValid() &&
!strings.HasPrefix(l, reservedLabelPrefix)
}
// See the License for the specific language governing permissions and
// limitations under the License.
-// Package prometheus provides embeddable metric primitives for servers and
-// standardized exposition of telemetry through a web services interface.
+// Package prometheus provides metrics primitives to instrument code for
+// monitoring. It also offers a registry for metrics. Sub-packages allow to
+// expose the registered metrics via HTTP (package promhttp) or push them to a
+// Pushgateway (package push).
//
// All exported functions and methods are safe to be used concurrently unless
// specified otherwise.
//
-// To expose metrics registered with the Prometheus registry, an HTTP server
-// needs to know about the Prometheus handler. The usual endpoint is "/metrics".
+// A Basic Example
//
-// http.Handle("/metrics", prometheus.Handler())
-//
-// As a starting point a very basic usage example:
+// As a starting point, a very basic usage example:
//
// package main
//
// "net/http"
//
// "github.com/prometheus/client_golang/prometheus"
+// "github.com/prometheus/client_golang/prometheus/promhttp"
// )
//
// var (
// Name: "cpu_temperature_celsius",
// Help: "Current temperature of the CPU.",
// })
-// hdFailures = prometheus.NewCounter(prometheus.CounterOpts{
-// Name: "hd_errors_total",
-// Help: "Number of hard-disk errors.",
-// })
+// hdFailures = prometheus.NewCounterVec(
+// prometheus.CounterOpts{
+// Name: "hd_errors_total",
+// Help: "Number of hard-disk errors.",
+// },
+// []string{"device"},
+// )
// )
//
// func init() {
+// // Metrics have to be registered to be exposed:
// prometheus.MustRegister(cpuTemp)
// prometheus.MustRegister(hdFailures)
// }
//
// func main() {
// cpuTemp.Set(65.3)
-// hdFailures.Inc()
+// hdFailures.With(prometheus.Labels{"device":"/dev/sda"}).Inc()
//
-// http.Handle("/metrics", prometheus.Handler())
-// http.ListenAndServe(":8080", nil)
+// // The Handler function provides a default handler to expose metrics
+// // via an HTTP server. "/metrics" is the usual endpoint for that.
+// http.Handle("/metrics", promhttp.Handler())
+// log.Fatal(http.ListenAndServe(":8080", nil))
// }
//
//
-// This is a complete program that exports two metrics, a Gauge and a Counter.
-// It also exports some stats about the HTTP usage of the /metrics
-// endpoint. (See the Handler function for more detail.)
+// This is a complete program that exports two metrics, a Gauge and a Counter,
+// the latter with a label attached to turn it into a (one-dimensional) vector.
+//
+// Metrics
+//
+// The number of exported identifiers in this package might appear a bit
+// overwhelming. However, in addition to the basic plumbing shown in the example
+// above, you only need to understand the different metric types and their
+// vector versions for basic usage.
+//
+// Above, you have already touched the Counter and the Gauge. There are two more
+// advanced metric types: the Summary and Histogram. A more thorough description
+// of those four metric types can be found in the Prometheus docs:
+// https://prometheus.io/docs/concepts/metric_types/
//
-// Two more advanced metric types are the Summary and Histogram.
+// A fifth "type" of metric is Untyped. It behaves like a Gauge, but signals the
+// Prometheus server not to assume anything about its type.
//
-// In addition to the fundamental metric types Gauge, Counter, Summary, and
-// Histogram, a very important part of the Prometheus data model is the
-// partitioning of samples along dimensions called labels, which results in
+// In addition to the fundamental metric types Gauge, Counter, Summary,
+// Histogram, and Untyped, a very important part of the Prometheus data model is
+// the partitioning of samples along dimensions called labels, which results in
// metric vectors. The fundamental types are GaugeVec, CounterVec, SummaryVec,
-// and HistogramVec.
-//
-// Those are all the parts needed for basic usage. Detailed documentation and
-// examples are provided below.
-//
-// Everything else this package offers is essentially for "power users" only. A
-// few pointers to "power user features":
-//
-// All the various ...Opts structs have a ConstLabels field for labels that
-// never change their value (which is only useful under special circumstances,
-// see documentation of the Opts type).
-//
-// The Untyped metric behaves like a Gauge, but signals the Prometheus server
-// not to assume anything about its type.
-//
-// Functions to fine-tune how the metric registry works: EnableCollectChecks,
-// PanicOnCollectError, Register, Unregister, SetMetricFamilyInjectionHook.
-//
-// For custom metric collection, there are two entry points: Custom Metric
-// implementations and custom Collector implementations. A Metric is the
-// fundamental unit in the Prometheus data model: a sample at a point in time
-// together with its meta-data (like its fully-qualified name and any number of
-// pairs of label name and label value) that knows how to marshal itself into a
-// data transfer object (aka DTO, implemented as a protocol buffer). A Collector
-// gets registered with the Prometheus registry and manages the collection of
-// one or more Metrics. Many parts of this package are building blocks for
-// Metrics and Collectors. Desc is the metric descriptor, actually used by all
-// metrics under the hood, and by Collectors to describe the Metrics to be
-// collected, but only to be dealt with by users if they implement their own
-// Metrics or Collectors. To create a Desc, the BuildFQName function will come
-// in handy. Other useful components for Metric and Collector implementation
-// include: LabelPairSorter to sort the DTO version of label pairs,
-// NewConstMetric and MustNewConstMetric to create "throw away" Metrics at
-// collection time, MetricVec to bundle custom Metrics into a metric vector
-// Collector, SelfCollector to make a custom Metric collect itself.
-//
-// A good example for a custom Collector is the ExpVarCollector included in this
-// package, which exports variables exported via the "expvar" package as
-// Prometheus metrics.
+// HistogramVec, and UntypedVec.
+//
+// While only the fundamental metric types implement the Metric interface, both
+// the metrics and their vector versions implement the Collector interface. A
+// Collector manages the collection of a number of Metrics, but for convenience,
+// a Metric can also “collect itself”. Note that Gauge, Counter, Summary,
+// Histogram, and Untyped are interfaces themselves while GaugeVec, CounterVec,
+// SummaryVec, HistogramVec, and UntypedVec are not.
+//
+// To create instances of Metrics and their vector versions, you need a suitable
+// …Opts struct, i.e. GaugeOpts, CounterOpts, SummaryOpts, HistogramOpts, or
+// UntypedOpts.
+//
+// Custom Collectors and constant Metrics
+//
+// While you could create your own implementations of Metric, most likely you
+// will only ever implement the Collector interface on your own. At a first
+// glance, a custom Collector seems handy to bundle Metrics for common
+// registration (with the prime example of the different metric vectors above,
+// which bundle all the metrics of the same name but with different labels).
+//
+// There is a more involved use case, too: If you already have metrics
+// available, created outside of the Prometheus context, you don't need the
+// interface of the various Metric types. You essentially want to mirror the
+// existing numbers into Prometheus Metrics during collection. An own
+// implementation of the Collector interface is perfect for that. You can create
+// Metric instances “on the fly” using NewConstMetric, NewConstHistogram, and
+// NewConstSummary (and their respective Must… versions). That will happen in
+// the Collect method. The Describe method has to return separate Desc
+// instances, representative of the “throw-away” metrics to be created later.
+// NewDesc comes in handy to create those Desc instances.
+//
+// The Collector example illustrates the use case. You can also look at the
+// source code of the processCollector (mirroring process metrics), the
+// goCollector (mirroring Go metrics), or the expvarCollector (mirroring expvar
+// metrics) as examples that are used in this package itself.
+//
+// If you just need to call a function to get a single float value to collect as
+// a metric, GaugeFunc, CounterFunc, or UntypedFunc might be interesting
+// shortcuts.
+//
+// Advanced Uses of the Registry
+//
+// While MustRegister is the by far most common way of registering a Collector,
+// sometimes you might want to handle the errors the registration might cause.
+// As suggested by the name, MustRegister panics if an error occurs. With the
+// Register function, the error is returned and can be handled.
+//
+// An error is returned if the registered Collector is incompatible or
+// inconsistent with already registered metrics. The registry aims for
+// consistency of the collected metrics according to the Prometheus data model.
+// Inconsistencies are ideally detected at registration time, not at collect
+// time. The former will usually be detected at start-up time of a program,
+// while the latter will only happen at scrape time, possibly not even on the
+// first scrape if the inconsistency only becomes relevant later. That is the
+// main reason why a Collector and a Metric have to describe themselves to the
+// registry.
+//
+// So far, everything we did operated on the so-called default registry, as it
+// can be found in the global DefaultRegistry variable. With NewRegistry, you
+// can create a custom registry, or you can even implement the Registerer or
+// Gatherer interfaces yourself. The methods Register and Unregister work in the
+// same way on a custom registry as the global functions Register and Unregister
+// on the default registry.
+//
+// There are a number of uses for custom registries: You can use registries with
+// special properties, see NewPedanticRegistry. You can avoid global state, as
+// it is imposed by the DefaultRegistry. You can use multiple registries at the
+// same time to expose different metrics in different ways. You can use separate
+// registries for testing purposes.
+//
+// Also note that the DefaultRegistry comes registered with a Collector for Go
+// runtime metrics (via NewGoCollector) and a Collector for process metrics (via
+// NewProcessCollector). With a custom registry, you are in control and decide
+// yourself about the Collectors to register.
+//
+// HTTP Exposition
+//
+// The Registry implements the Gatherer interface. The caller of the Gather
+// method can then expose the gathered metrics in some way. Usually, the metrics
+// are served via HTTP on the /metrics endpoint. That's happening in the example
+// above. The tools to expose metrics via HTTP are in the promhttp sub-package.
+// (The top-level functions in the prometheus package are deprecated.)
+//
+// Pushing to the Pushgateway
+//
+// Function for pushing to the Pushgateway can be found in the push sub-package.
+//
+// Graphite Bridge
+//
+// Functions and examples to push metrics from a Gatherer to Graphite can be
+// found in the graphite sub-package.
+//
+// Other Means of Exposition
+//
+// More ways of exposing metrics can easily be added by following the approaches
+// of the existing implementations.
package prometheus
"expvar"
)
-// ExpvarCollector collects metrics from the expvar interface. It provides a
-// quick way to expose numeric values that are already exported via expvar as
-// Prometheus metrics. Note that the data models of expvar and Prometheus are
-// fundamentally different, and that the ExpvarCollector is inherently
-// slow. Thus, the ExpvarCollector is probably great for experiments and
-// prototying, but you should seriously consider a more direct implementation of
-// Prometheus metrics for monitoring production systems.
-//
-// Use NewExpvarCollector to create new instances.
-type ExpvarCollector struct {
+type expvarCollector struct {
exports map[string]*Desc
}
-// NewExpvarCollector returns a newly allocated ExpvarCollector that still has
-// to be registered with the Prometheus registry.
+// NewExpvarCollector returns a newly allocated expvar Collector that still has
+// to be registered with a Prometheus registry.
+//
+// An expvar Collector collects metrics from the expvar interface. It provides a
+// quick way to expose numeric values that are already exported via expvar as
+// Prometheus metrics. Note that the data models of expvar and Prometheus are
+// fundamentally different, and that the expvar Collector is inherently slower
+// than native Prometheus metrics. Thus, the expvar Collector is probably great
+// for experiments and prototying, but you should seriously consider a more
+// direct implementation of Prometheus metrics for monitoring production
+// systems.
//
// The exports map has the following meaning:
//
// sample values.
//
// Anything that does not fit into the scheme above is silently ignored.
-func NewExpvarCollector(exports map[string]*Desc) *ExpvarCollector {
- return &ExpvarCollector{
+func NewExpvarCollector(exports map[string]*Desc) Collector {
+ return &expvarCollector{
exports: exports,
}
}
// Describe implements Collector.
-func (e *ExpvarCollector) Describe(ch chan<- *Desc) {
+func (e *expvarCollector) Describe(ch chan<- *Desc) {
for _, desc := range e.exports {
ch <- desc
}
}
// Collect implements Collector.
-func (e *ExpvarCollector) Collect(ch chan<- Metric) {
+func (e *expvarCollector) Collect(ch chan<- Metric) {
for name, desc := range e.exports {
var m Metric
expVar := expvar.Get(name)
// Set sets the Gauge to an arbitrary value.
Set(float64)
- // Inc increments the Gauge by 1.
+ // Inc increments the Gauge by 1. Use Add to increment it by arbitrary
+ // values.
Inc()
- // Dec decrements the Gauge by 1.
+ // Dec decrements the Gauge by 1. Use Sub to decrement it by arbitrary
+ // values.
Dec()
- // Add adds the given value to the Gauge. (The value can be
- // negative, resulting in a decrease of the Gauge.)
+ // Add adds the given value to the Gauge. (The value can be negative,
+ // resulting in a decrease of the Gauge.)
Add(float64)
// Sub subtracts the given value from the Gauge. (The value can be
// negative, resulting in an increase of the Gauge.)
Sub(float64)
+
+ // SetToCurrentTime sets the Gauge to the current Unix time in seconds.
+ SetToCurrentTime()
}
// GaugeOpts is an alias for Opts. See there for doc comments.
// (e.g. number of operations queued, partitioned by user and operation
// type). Create instances with NewGaugeVec.
type GaugeVec struct {
- MetricVec
+ *MetricVec
}
// NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and
opts.ConstLabels,
)
return &GaugeVec{
- MetricVec: MetricVec{
- children: map[uint64]Metric{},
- desc: desc,
- newMetric: func(lvs ...string) Metric {
- return newValue(desc, GaugeValue, 0, lvs...)
- },
- },
+ MetricVec: newMetricVec(desc, func(lvs ...string) Metric {
+ return newValue(desc, GaugeValue, 0, lvs...)
+ }),
}
}
)
type goCollector struct {
- goroutines Gauge
- gcDesc *Desc
+ goroutinesDesc *Desc
+ threadsDesc *Desc
+ gcDesc *Desc
// metrics to describe and collect
metrics memStatsMetrics
// NewGoCollector returns a collector which exports metrics about the current
// go process.
-func NewGoCollector() *goCollector {
+func NewGoCollector() Collector {
return &goCollector{
- goroutines: NewGauge(GaugeOpts{
- Namespace: "go",
- Name: "goroutines",
- Help: "Number of goroutines that currently exist.",
- }),
+ goroutinesDesc: NewDesc(
+ "go_goroutines",
+ "Number of goroutines that currently exist.",
+ nil, nil),
+ threadsDesc: NewDesc(
+ "go_threads",
+ "Number of OS threads created",
+ nil, nil),
gcDesc: NewDesc(
"go_gc_duration_seconds",
"A summary of the GC invocation durations.",
}, {
desc: NewDesc(
memstatNamespace("sys_bytes"),
- "Number of bytes obtained by system. Sum of all system allocations.",
+ "Number of bytes obtained from system.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.Sys) },
valType: GaugeValue,
}, {
desc: NewDesc(
- memstatNamespace("heap_released_bytes_total"),
- "Total number of heap bytes released to OS.",
+ memstatNamespace("heap_released_bytes"),
+ "Number of heap bytes released to OS.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapReleased) },
- valType: CounterValue,
+ valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("heap_objects"),
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.LastGC) / 1e9 },
valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("gc_cpu_fraction"),
+ "The fraction of this program's available CPU time used by the GC since the program started.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return ms.GCCPUFraction },
+ valType: GaugeValue,
},
},
}
// Describe returns all descriptions of the collector.
func (c *goCollector) Describe(ch chan<- *Desc) {
- ch <- c.goroutines.Desc()
+ ch <- c.goroutinesDesc
+ ch <- c.threadsDesc
ch <- c.gcDesc
-
for _, i := range c.metrics {
ch <- i.desc
}
// Collect returns the current state of all metrics of the collector.
func (c *goCollector) Collect(ch chan<- Metric) {
- c.goroutines.Set(float64(runtime.NumGoroutine()))
- ch <- c.goroutines
+ ch <- MustNewConstMetric(c.goroutinesDesc, GaugeValue, float64(runtime.NumGoroutine()))
+ n, _ := runtime.ThreadCreateProfile(nil)
+ ch <- MustNewConstMetric(c.threadsDesc, GaugeValue, float64(n))
var stats debug.GCStats
stats.PauseQuantiles = make([]time.Duration, 5)
// bucket of a histogram ("le" -> "less or equal").
const bucketLabel = "le"
+// DefBuckets are the default Histogram buckets. The default buckets are
+// tailored to broadly measure the response time (in seconds) of a network
+// service. Most likely, however, you will be required to define buckets
+// customized to your use case.
var (
- // DefBuckets are the default Histogram buckets. The default buckets are
- // tailored to broadly measure the response time (in seconds) of a
- // network service. Most likely, however, you will be required to define
- // buckets customized to your use case.
DefBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10}
errBucketLabelNotAllowed = fmt.Errorf(
// Finally we know the final length of h.upperBounds and can make counts.
h.counts = make([]uint64, len(h.upperBounds))
- h.Init(h) // Init self-collection.
+ h.init(h) // Init self-collection.
return h
}
sumBits uint64
count uint64
- SelfCollector
+ selfCollector
// Note that there is no mutex required.
desc *Desc
// (e.g. HTTP request latencies, partitioned by status code and method). Create
// instances with NewHistogramVec.
type HistogramVec struct {
- MetricVec
+ *MetricVec
}
// NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and
opts.ConstLabels,
)
return &HistogramVec{
- MetricVec: MetricVec{
- children: map[uint64]Metric{},
- desc: desc,
- newMetric: func(lvs ...string) Metric {
- return newHistogram(desc, opts, lvs...)
- },
- },
+ MetricVec: newMetricVec(desc, func(lvs ...string) Metric {
+ return newHistogram(desc, opts, lvs...)
+ }),
}
}
// GetMetricWithLabelValues replaces the method of the same name in
-// MetricVec. The difference is that this method returns a Histogram and not a
-// Metric so that no type conversion is required.
-func (m *HistogramVec) GetMetricWithLabelValues(lvs ...string) (Histogram, error) {
+// MetricVec. The difference is that this method returns an Observer and not a
+// Metric so that no type conversion to an Observer is required.
+func (m *HistogramVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) {
metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...)
if metric != nil {
- return metric.(Histogram), err
+ return metric.(Observer), err
}
return nil, err
}
// GetMetricWith replaces the method of the same name in MetricVec. The
-// difference is that this method returns a Histogram and not a Metric so that no
-// type conversion is required.
-func (m *HistogramVec) GetMetricWith(labels Labels) (Histogram, error) {
+// difference is that this method returns an Observer and not a Metric so that no
+// type conversion to an Observer is required.
+func (m *HistogramVec) GetMetricWith(labels Labels) (Observer, error) {
metric, err := m.MetricVec.GetMetricWith(labels)
if metric != nil {
- return metric.(Histogram), err
+ return metric.(Observer), err
}
return nil, err
}
// GetMetricWithLabelValues would have returned an error. By not returning an
// error, WithLabelValues allows shortcuts like
// myVec.WithLabelValues("404", "GET").Observe(42.21)
-func (m *HistogramVec) WithLabelValues(lvs ...string) Histogram {
- return m.MetricVec.WithLabelValues(lvs...).(Histogram)
+func (m *HistogramVec) WithLabelValues(lvs ...string) Observer {
+ return m.MetricVec.WithLabelValues(lvs...).(Observer)
}
// With works as GetMetricWith, but panics where GetMetricWithLabels would have
// returned an error. By not returning an error, With allows shortcuts like
// myVec.With(Labels{"code": "404", "method": "GET"}).Observe(42.21)
-func (m *HistogramVec) With(labels Labels) Histogram {
- return m.MetricVec.With(labels).(Histogram)
+func (m *HistogramVec) With(labels Labels) Observer {
+ return m.MetricVec.With(labels).(Observer)
}
type constHistogram struct {
import (
"bufio"
+ "bytes"
+ "compress/gzip"
+ "fmt"
"io"
"net"
"net/http"
"strconv"
"strings"
+ "sync"
"time"
+
+ "github.com/prometheus/common/expfmt"
+)
+
+// TODO(beorn7): Remove this whole file. It is a partial mirror of
+// promhttp/http.go (to avoid circular import chains) where everything HTTP
+// related should live. The functions here are just for avoiding
+// breakage. Everything is deprecated.
+
+const (
+ contentTypeHeader = "Content-Type"
+ contentLengthHeader = "Content-Length"
+ contentEncodingHeader = "Content-Encoding"
+ acceptEncodingHeader = "Accept-Encoding"
)
+var bufPool sync.Pool
+
+func getBuf() *bytes.Buffer {
+ buf := bufPool.Get()
+ if buf == nil {
+ return &bytes.Buffer{}
+ }
+ return buf.(*bytes.Buffer)
+}
+
+func giveBuf(buf *bytes.Buffer) {
+ buf.Reset()
+ bufPool.Put(buf)
+}
+
+// Handler returns an HTTP handler for the DefaultGatherer. It is
+// already instrumented with InstrumentHandler (using "prometheus" as handler
+// name).
+//
+// Deprecated: Please note the issues described in the doc comment of
+// InstrumentHandler. You might want to consider using promhttp.Handler instead
+// (which is not instrumented).
+func Handler() http.Handler {
+ return InstrumentHandler("prometheus", UninstrumentedHandler())
+}
+
+// UninstrumentedHandler returns an HTTP handler for the DefaultGatherer.
+//
+// Deprecated: Use promhttp.Handler instead. See there for further documentation.
+func UninstrumentedHandler() http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
+ mfs, err := DefaultGatherer.Gather()
+ if err != nil {
+ http.Error(w, "An error has occurred during metrics collection:\n\n"+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ contentType := expfmt.Negotiate(req.Header)
+ buf := getBuf()
+ defer giveBuf(buf)
+ writer, encoding := decorateWriter(req, buf)
+ enc := expfmt.NewEncoder(writer, contentType)
+ var lastErr error
+ for _, mf := range mfs {
+ if err := enc.Encode(mf); err != nil {
+ lastErr = err
+ http.Error(w, "An error has occurred during metrics encoding:\n\n"+err.Error(), http.StatusInternalServerError)
+ return
+ }
+ }
+ if closer, ok := writer.(io.Closer); ok {
+ closer.Close()
+ }
+ if lastErr != nil && buf.Len() == 0 {
+ http.Error(w, "No metrics encoded, last error:\n\n"+err.Error(), http.StatusInternalServerError)
+ return
+ }
+ header := w.Header()
+ header.Set(contentTypeHeader, string(contentType))
+ header.Set(contentLengthHeader, fmt.Sprint(buf.Len()))
+ if encoding != "" {
+ header.Set(contentEncodingHeader, encoding)
+ }
+ w.Write(buf.Bytes())
+ })
+}
+
+// decorateWriter wraps a writer to handle gzip compression if requested. It
+// returns the decorated writer and the appropriate "Content-Encoding" header
+// (which is empty if no compression is enabled).
+func decorateWriter(request *http.Request, writer io.Writer) (io.Writer, string) {
+ header := request.Header.Get(acceptEncodingHeader)
+ parts := strings.Split(header, ",")
+ for _, part := range parts {
+ part := strings.TrimSpace(part)
+ if part == "gzip" || strings.HasPrefix(part, "gzip;") {
+ return gzip.NewWriter(writer), "gzip"
+ }
+ }
+ return writer, ""
+}
+
var instLabels = []string{"method", "code"}
type nower interface {
// has a constant label named "handler" with the provided handlerName as
// value. http_requests_total is a metric vector partitioned by HTTP method
// (label name "method") and HTTP status code (label name "code").
+//
+// Deprecated: InstrumentHandler has several issues:
+//
+// - It uses Summaries rather than Histograms. Summaries are not useful if
+// aggregation across multiple instances is required.
+//
+// - It uses microseconds as unit, which is deprecated and should be replaced by
+// seconds.
+//
+// - The size of the request is calculated in a separate goroutine. Since this
+// calculator requires access to the request header, it creates a race with
+// any writes to the header performed during request handling.
+// httputil.ReverseProxy is a prominent example for a handler
+// performing such writes.
+//
+// - It has additional issues with HTTP/2, cf.
+// https://github.com/prometheus/client_golang/issues/272.
+//
+// Upcoming versions of this package will provide ways of instrumenting HTTP
+// handlers that are more flexible and have fewer issues. Please prefer direct
+// instrumentation in the meantime.
func InstrumentHandler(handlerName string, handler http.Handler) http.HandlerFunc {
return InstrumentHandlerFunc(handlerName, handler.ServeHTTP)
}
// InstrumentHandlerFunc wraps the given function for instrumentation. It
-// otherwise works in the same way as InstrumentHandler.
+// otherwise works in the same way as InstrumentHandler (and shares the same
+// issues).
+//
+// Deprecated: InstrumentHandlerFunc is deprecated for the same reasons as
+// InstrumentHandler is.
func InstrumentHandlerFunc(handlerName string, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc {
return InstrumentHandlerFuncWithOpts(
SummaryOpts{
Subsystem: "http",
ConstLabels: Labels{"handler": handlerName},
+ Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
},
handlerFunc,
)
}
-// InstrumentHandlerWithOpts works like InstrumentHandler but provides more
-// flexibility (at the cost of a more complex call syntax). As
-// InstrumentHandler, this function registers four metric collectors, but it
-// uses the provided SummaryOpts to create them. However, the fields "Name" and
-// "Help" in the SummaryOpts are ignored. "Name" is replaced by
-// "requests_total", "request_duration_microseconds", "request_size_bytes", and
-// "response_size_bytes", respectively. "Help" is replaced by an appropriate
+// InstrumentHandlerWithOpts works like InstrumentHandler (and shares the same
+// issues) but provides more flexibility (at the cost of a more complex call
+// syntax). As InstrumentHandler, this function registers four metric
+// collectors, but it uses the provided SummaryOpts to create them. However, the
+// fields "Name" and "Help" in the SummaryOpts are ignored. "Name" is replaced
+// by "requests_total", "request_duration_microseconds", "request_size_bytes",
+// and "response_size_bytes", respectively. "Help" is replaced by an appropriate
// help string. The names of the variable labels of the http_requests_total
// CounterVec are "method" (get, post, etc.), and "code" (HTTP status code).
//
// cannot use SummaryOpts. Instead, a CounterOpts struct is created internally,
// and all its fields are set to the equally named fields in the provided
// SummaryOpts.
+//
+// Deprecated: InstrumentHandlerWithOpts is deprecated for the same reasons as
+// InstrumentHandler is.
func InstrumentHandlerWithOpts(opts SummaryOpts, handler http.Handler) http.HandlerFunc {
return InstrumentHandlerFuncWithOpts(opts, handler.ServeHTTP)
}
-// InstrumentHandlerFuncWithOpts works like InstrumentHandlerFunc but provides
-// more flexibility (at the cost of a more complex call syntax). See
-// InstrumentHandlerWithOpts for details how the provided SummaryOpts are used.
+// InstrumentHandlerFuncWithOpts works like InstrumentHandlerFunc (and shares
+// the same issues) but provides more flexibility (at the cost of a more complex
+// call syntax). See InstrumentHandlerWithOpts for details how the provided
+// SummaryOpts are used.
+//
+// Deprecated: InstrumentHandlerFuncWithOpts is deprecated for the same reasons
+// as InstrumentHandler is.
func InstrumentHandlerFuncWithOpts(opts SummaryOpts, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc {
reqCnt := NewCounterVec(
CounterOpts{
},
instLabels,
)
+ if err := Register(reqCnt); err != nil {
+ if are, ok := err.(AlreadyRegisteredError); ok {
+ reqCnt = are.ExistingCollector.(*CounterVec)
+ } else {
+ panic(err)
+ }
+ }
opts.Name = "request_duration_microseconds"
opts.Help = "The HTTP request latencies in microseconds."
reqDur := NewSummary(opts)
+ if err := Register(reqDur); err != nil {
+ if are, ok := err.(AlreadyRegisteredError); ok {
+ reqDur = are.ExistingCollector.(Summary)
+ } else {
+ panic(err)
+ }
+ }
opts.Name = "request_size_bytes"
opts.Help = "The HTTP request sizes in bytes."
reqSz := NewSummary(opts)
+ if err := Register(reqSz); err != nil {
+ if are, ok := err.(AlreadyRegisteredError); ok {
+ reqSz = are.ExistingCollector.(Summary)
+ } else {
+ panic(err)
+ }
+ }
opts.Name = "response_size_bytes"
opts.Help = "The HTTP response sizes in bytes."
resSz := NewSummary(opts)
-
- regReqCnt := MustRegisterOrGet(reqCnt).(*CounterVec)
- regReqDur := MustRegisterOrGet(reqDur).(Summary)
- regReqSz := MustRegisterOrGet(reqSz).(Summary)
- regResSz := MustRegisterOrGet(resSz).(Summary)
+ if err := Register(resSz); err != nil {
+ if are, ok := err.(AlreadyRegisteredError); ok {
+ resSz = are.ExistingCollector.(Summary)
+ } else {
+ panic(err)
+ }
+ }
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
now := time.Now()
delegate := &responseWriterDelegator{ResponseWriter: w}
- out := make(chan int)
- urlLen := 0
- if r.URL != nil {
- urlLen = len(r.URL.String())
- }
- go computeApproximateRequestSize(r, out, urlLen)
+ out := computeApproximateRequestSize(r)
_, cn := w.(http.CloseNotifier)
_, fl := w.(http.Flusher)
method := sanitizeMethod(r.Method)
code := sanitizeCode(delegate.status)
- regReqCnt.WithLabelValues(method, code).Inc()
- regReqDur.Observe(elapsed)
- regResSz.Observe(float64(delegate.written))
- regReqSz.Observe(float64(<-out))
+ reqCnt.WithLabelValues(method, code).Inc()
+ reqDur.Observe(elapsed)
+ resSz.Observe(float64(delegate.written))
+ reqSz.Observe(float64(<-out))
})
}
-func computeApproximateRequestSize(r *http.Request, out chan int, s int) {
- s += len(r.Method)
- s += len(r.Proto)
- for name, values := range r.Header {
- s += len(name)
- for _, value := range values {
- s += len(value)
- }
+func computeApproximateRequestSize(r *http.Request) <-chan int {
+ // Get URL length in current go routine for avoiding a race condition.
+ // HandlerFunc that runs in parallel may modify the URL.
+ s := 0
+ if r.URL != nil {
+ s += len(r.URL.String())
}
- s += len(r.Host)
- // N.B. r.Form and r.MultipartForm are assumed to be included in r.URL.
+ out := make(chan int, 1)
- if r.ContentLength != -1 {
- s += int(r.ContentLength)
- }
- out <- s
+ go func() {
+ s += len(r.Method)
+ s += len(r.Proto)
+ for name, values := range r.Header {
+ s += len(name)
+ for _, value := range values {
+ s += len(value)
+ }
+ }
+ s += len(r.Host)
+
+ // N.B. r.Form and r.MultipartForm are assumed to be included in r.URL.
+
+ if r.ContentLength != -1 {
+ s += int(r.ContentLength)
+ }
+ out <- s
+ close(out)
+ }()
+
+ return out
}
type responseWriterDelegator struct {
const separatorByte byte = 255
// A Metric models a single sample value with its meta data being exported to
-// Prometheus. Implementers of Metric in this package inclued Gauge, Counter,
-// Untyped, and Summary. Users can implement their own Metric types, but that
-// should be rarely needed. See the example for SelfCollector, which is also an
-// example for a user-implemented Metric.
+// Prometheus. Implementations of Metric in this package are Gauge, Counter,
+// Histogram, Summary, and Untyped.
type Metric interface {
// Desc returns the descriptor for the Metric. This method idempotently
// returns the same descriptor throughout the lifetime of the
// Write encodes the Metric into a "Metric" Protocol Buffer data
// transmission object.
//
- // Implementers of custom Metric types must observe concurrency safety
- // as reads of this metric may occur at any time, and any blocking
- // occurs at the expense of total performance of rendering all
- // registered metrics. Ideally Metric implementations should support
- // concurrent readers.
+ // Metric implementations must observe concurrency safety as reads of
+ // this metric may occur at any time, and any blocking occurs at the
+ // expense of total performance of rendering all registered
+ // metrics. Ideally, Metric implementations should support concurrent
+ // readers.
//
- // The Prometheus client library attempts to minimize memory allocations
- // and will provide a pre-existing reset dto.Metric pointer. Prometheus
- // may recycle the dto.Metric proto message, so Metric implementations
- // should just populate the provided dto.Metric and then should not keep
- // any reference to it.
- //
- // While populating dto.Metric, labels must be sorted lexicographically.
- // (Implementers may find LabelPairSorter useful for that.)
+ // While populating dto.Metric, it is the responsibility of the
+ // implementation to ensure validity of the Metric protobuf (like valid
+ // UTF-8 strings or syntactically valid metric and label names). It is
+ // recommended to sort labels lexicographically. (Implementers may find
+ // LabelPairSorter useful for that.) Callers of Write should still make
+ // sure of sorting if they depend on it.
Write(*dto.Metric) error
+ // TODO(beorn7): The original rationale of passing in a pre-allocated
+ // dto.Metric protobuf to save allocations has disappeared. The
+ // signature of this method should be changed to "Write() (*dto.Metric,
+ // error)".
}
// Opts bundles the options for creating most Metric types. Each metric
--- /dev/null
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+// Observer is the interface that wraps the Observe method, which is used by
+// Histogram and Summary to add observations.
+type Observer interface {
+ Observe(float64)
+}
+
+// The ObserverFunc type is an adapter to allow the use of ordinary
+// functions as Observers. If f is a function with the appropriate
+// signature, ObserverFunc(f) is an Observer that calls f.
+//
+// This adapter is usually used in connection with the Timer type, and there are
+// two general use cases:
+//
+// The most common one is to use a Gauge as the Observer for a Timer.
+// See the "Gauge" Timer example.
+//
+// The more advanced use case is to create a function that dynamically decides
+// which Observer to use for observing the duration. See the "Complex" Timer
+// example.
+type ObserverFunc func(float64)
+
+// Observe calls f(value). It implements Observer.
+func (f ObserverFunc) Observe(value float64) {
+ f(value)
+}
+
+// ObserverVec is an interface implemented by `HistogramVec` and `SummaryVec`.
+type ObserverVec interface {
+ GetMetricWith(Labels) (Observer, error)
+ GetMetricWithLabelValues(lvs ...string) (Observer, error)
+ With(Labels) Observer
+ WithLabelValues(...string) Observer
+
+ Collector
+}
pid int
collectFn func(chan<- Metric)
pidFn func() (int, error)
- cpuTotal Counter
- openFDs, maxFDs Gauge
- vsize, rss Gauge
- startTime Gauge
+ cpuTotal *Desc
+ openFDs, maxFDs *Desc
+ vsize, rss *Desc
+ startTime *Desc
}
// NewProcessCollector returns a collector which exports the current state of
// process metrics including cpu, memory and file descriptor usage as well as
// the process start time for the given process id under the given namespace.
-func NewProcessCollector(pid int, namespace string) *processCollector {
+func NewProcessCollector(pid int, namespace string) Collector {
return NewProcessCollectorPIDFn(
func() (int, error) { return pid, nil },
namespace,
func NewProcessCollectorPIDFn(
pidFn func() (int, error),
namespace string,
-) *processCollector {
+) Collector {
+ ns := ""
+ if len(namespace) > 0 {
+ ns = namespace + "_"
+ }
+
c := processCollector{
pidFn: pidFn,
collectFn: func(chan<- Metric) {},
- cpuTotal: NewCounter(CounterOpts{
- Namespace: namespace,
- Name: "process_cpu_seconds_total",
- Help: "Total user and system CPU time spent in seconds.",
- }),
- openFDs: NewGauge(GaugeOpts{
- Namespace: namespace,
- Name: "process_open_fds",
- Help: "Number of open file descriptors.",
- }),
- maxFDs: NewGauge(GaugeOpts{
- Namespace: namespace,
- Name: "process_max_fds",
- Help: "Maximum number of open file descriptors.",
- }),
- vsize: NewGauge(GaugeOpts{
- Namespace: namespace,
- Name: "process_virtual_memory_bytes",
- Help: "Virtual memory size in bytes.",
- }),
- rss: NewGauge(GaugeOpts{
- Namespace: namespace,
- Name: "process_resident_memory_bytes",
- Help: "Resident memory size in bytes.",
- }),
- startTime: NewGauge(GaugeOpts{
- Namespace: namespace,
- Name: "process_start_time_seconds",
- Help: "Start time of the process since unix epoch in seconds.",
- }),
+ cpuTotal: NewDesc(
+ ns+"process_cpu_seconds_total",
+ "Total user and system CPU time spent in seconds.",
+ nil, nil,
+ ),
+ openFDs: NewDesc(
+ ns+"process_open_fds",
+ "Number of open file descriptors.",
+ nil, nil,
+ ),
+ maxFDs: NewDesc(
+ ns+"process_max_fds",
+ "Maximum number of open file descriptors.",
+ nil, nil,
+ ),
+ vsize: NewDesc(
+ ns+"process_virtual_memory_bytes",
+ "Virtual memory size in bytes.",
+ nil, nil,
+ ),
+ rss: NewDesc(
+ ns+"process_resident_memory_bytes",
+ "Resident memory size in bytes.",
+ nil, nil,
+ ),
+ startTime: NewDesc(
+ ns+"process_start_time_seconds",
+ "Start time of the process since unix epoch in seconds.",
+ nil, nil,
+ ),
}
// Set up process metric collection if supported by the runtime.
// Describe returns all descriptions of the collector.
func (c *processCollector) Describe(ch chan<- *Desc) {
- ch <- c.cpuTotal.Desc()
- ch <- c.openFDs.Desc()
- ch <- c.maxFDs.Desc()
- ch <- c.vsize.Desc()
- ch <- c.rss.Desc()
- ch <- c.startTime.Desc()
+ ch <- c.cpuTotal
+ ch <- c.openFDs
+ ch <- c.maxFDs
+ ch <- c.vsize
+ ch <- c.rss
+ ch <- c.startTime
}
// Collect returns the current state of all metrics of the collector.
}
if stat, err := p.NewStat(); err == nil {
- c.cpuTotal.Set(stat.CPUTime())
- ch <- c.cpuTotal
- c.vsize.Set(float64(stat.VirtualMemory()))
- ch <- c.vsize
- c.rss.Set(float64(stat.ResidentMemory()))
- ch <- c.rss
-
+ ch <- MustNewConstMetric(c.cpuTotal, CounterValue, stat.CPUTime())
+ ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(stat.VirtualMemory()))
+ ch <- MustNewConstMetric(c.rss, GaugeValue, float64(stat.ResidentMemory()))
if startTime, err := stat.StartTime(); err == nil {
- c.startTime.Set(startTime)
- ch <- c.startTime
+ ch <- MustNewConstMetric(c.startTime, GaugeValue, startTime)
}
}
if fds, err := p.FileDescriptorsLen(); err == nil {
- c.openFDs.Set(float64(fds))
- ch <- c.openFDs
+ ch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(fds))
}
if limits, err := p.NewLimits(); err == nil {
- c.maxFDs.Set(float64(limits.OpenFiles))
- ch <- c.maxFDs
+ ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(limits.OpenFiles))
}
}
--- /dev/null
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build !go1.8
+
+package promhttp
+
+import (
+ "io"
+ "net/http"
+)
+
+func newDelegator(w http.ResponseWriter) delegator {
+ d := &responseWriterDelegator{ResponseWriter: w}
+
+ _, cn := w.(http.CloseNotifier)
+ _, fl := w.(http.Flusher)
+ _, hj := w.(http.Hijacker)
+ _, rf := w.(io.ReaderFrom)
+ if cn && fl && hj && rf {
+ return &fancyDelegator{d}
+ }
+
+ return d
+}
--- /dev/null
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build go1.8
+
+package promhttp
+
+import (
+ "io"
+ "net/http"
+)
+
+// newDelegator handles the four different methods of upgrading a
+// http.ResponseWriter to delegator.
+func newDelegator(w http.ResponseWriter) delegator {
+ d := &responseWriterDelegator{ResponseWriter: w}
+
+ _, cn := w.(http.CloseNotifier)
+ _, fl := w.(http.Flusher)
+ _, hj := w.(http.Hijacker)
+ _, ps := w.(http.Pusher)
+ _, rf := w.(io.ReaderFrom)
+
+ // Check for the four most common combination of interfaces a
+ // http.ResponseWriter might implement.
+ switch {
+ case cn && fl && hj && rf && ps:
+ // All interfaces.
+ return &fancyPushDelegator{
+ fancyDelegator: &fancyDelegator{d},
+ p: &pushDelegator{d},
+ }
+ case cn && fl && hj && rf:
+ // All interfaces, except http.Pusher.
+ return &fancyDelegator{d}
+ case ps:
+ // Just http.Pusher.
+ return &pushDelegator{d}
+ }
+
+ return d
+}
+
+type fancyPushDelegator struct {
+ p *pushDelegator
+
+ *fancyDelegator
+}
+
+func (f *fancyPushDelegator) Push(target string, opts *http.PushOptions) error {
+ return f.p.Push(target, opts)
+}
+
+type pushDelegator struct {
+ *responseWriterDelegator
+}
+
+func (f *pushDelegator) Push(target string, opts *http.PushOptions) error {
+ return f.ResponseWriter.(http.Pusher).Push(target, opts)
+}
--- /dev/null
+// Copyright 2016 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package promhttp provides tooling around HTTP servers and clients.
+//
+// First, the package allows the creation of http.Handler instances to expose
+// Prometheus metrics via HTTP. promhttp.Handler acts on the
+// prometheus.DefaultGatherer. With HandlerFor, you can create a handler for a
+// custom registry or anything that implements the Gatherer interface. It also
+// allows the creation of handlers that act differently on errors or allow to
+// log errors.
+//
+// Second, the package provides tooling to instrument instances of http.Handler
+// via middleware. Middleware wrappers follow the naming scheme
+// InstrumentHandlerX, where X describes the intended use of the middleware.
+// See each function's doc comment for specific details.
+package promhttp
+
+import (
+ "bytes"
+ "compress/gzip"
+ "fmt"
+ "io"
+ "net/http"
+ "strings"
+ "sync"
+
+ "github.com/prometheus/common/expfmt"
+
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+const (
+ contentTypeHeader = "Content-Type"
+ contentLengthHeader = "Content-Length"
+ contentEncodingHeader = "Content-Encoding"
+ acceptEncodingHeader = "Accept-Encoding"
+)
+
+var bufPool sync.Pool
+
+func getBuf() *bytes.Buffer {
+ buf := bufPool.Get()
+ if buf == nil {
+ return &bytes.Buffer{}
+ }
+ return buf.(*bytes.Buffer)
+}
+
+func giveBuf(buf *bytes.Buffer) {
+ buf.Reset()
+ bufPool.Put(buf)
+}
+
+// Handler returns an HTTP handler for the prometheus.DefaultGatherer. The
+// Handler uses the default HandlerOpts, i.e. report the first error as an HTTP
+// error, no error logging, and compression if requested by the client.
+//
+// If you want to create a Handler for the DefaultGatherer with different
+// HandlerOpts, create it with HandlerFor with prometheus.DefaultGatherer and
+// your desired HandlerOpts.
+func Handler() http.Handler {
+ return HandlerFor(prometheus.DefaultGatherer, HandlerOpts{})
+}
+
+// HandlerFor returns an http.Handler for the provided Gatherer. The behavior
+// of the Handler is defined by the provided HandlerOpts.
+func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
+ mfs, err := reg.Gather()
+ if err != nil {
+ if opts.ErrorLog != nil {
+ opts.ErrorLog.Println("error gathering metrics:", err)
+ }
+ switch opts.ErrorHandling {
+ case PanicOnError:
+ panic(err)
+ case ContinueOnError:
+ if len(mfs) == 0 {
+ http.Error(w, "No metrics gathered, last error:\n\n"+err.Error(), http.StatusInternalServerError)
+ return
+ }
+ case HTTPErrorOnError:
+ http.Error(w, "An error has occurred during metrics gathering:\n\n"+err.Error(), http.StatusInternalServerError)
+ return
+ }
+ }
+
+ contentType := expfmt.Negotiate(req.Header)
+ buf := getBuf()
+ defer giveBuf(buf)
+ writer, encoding := decorateWriter(req, buf, opts.DisableCompression)
+ enc := expfmt.NewEncoder(writer, contentType)
+ var lastErr error
+ for _, mf := range mfs {
+ if err := enc.Encode(mf); err != nil {
+ lastErr = err
+ if opts.ErrorLog != nil {
+ opts.ErrorLog.Println("error encoding metric family:", err)
+ }
+ switch opts.ErrorHandling {
+ case PanicOnError:
+ panic(err)
+ case ContinueOnError:
+ // Handled later.
+ case HTTPErrorOnError:
+ http.Error(w, "An error has occurred during metrics encoding:\n\n"+err.Error(), http.StatusInternalServerError)
+ return
+ }
+ }
+ }
+ if closer, ok := writer.(io.Closer); ok {
+ closer.Close()
+ }
+ if lastErr != nil && buf.Len() == 0 {
+ http.Error(w, "No metrics encoded, last error:\n\n"+err.Error(), http.StatusInternalServerError)
+ return
+ }
+ header := w.Header()
+ header.Set(contentTypeHeader, string(contentType))
+ header.Set(contentLengthHeader, fmt.Sprint(buf.Len()))
+ if encoding != "" {
+ header.Set(contentEncodingHeader, encoding)
+ }
+ w.Write(buf.Bytes())
+ // TODO(beorn7): Consider streaming serving of metrics.
+ })
+}
+
+// HandlerErrorHandling defines how a Handler serving metrics will handle
+// errors.
+type HandlerErrorHandling int
+
+// These constants cause handlers serving metrics to behave as described if
+// errors are encountered.
+const (
+ // Serve an HTTP status code 500 upon the first error
+ // encountered. Report the error message in the body.
+ HTTPErrorOnError HandlerErrorHandling = iota
+ // Ignore errors and try to serve as many metrics as possible. However,
+ // if no metrics can be served, serve an HTTP status code 500 and the
+ // last error message in the body. Only use this in deliberate "best
+ // effort" metrics collection scenarios. It is recommended to at least
+ // log errors (by providing an ErrorLog in HandlerOpts) to not mask
+ // errors completely.
+ ContinueOnError
+ // Panic upon the first error encountered (useful for "crash only" apps).
+ PanicOnError
+)
+
+// Logger is the minimal interface HandlerOpts needs for logging. Note that
+// log.Logger from the standard library implements this interface, and it is
+// easy to implement by custom loggers, if they don't do so already anyway.
+type Logger interface {
+ Println(v ...interface{})
+}
+
+// HandlerOpts specifies options how to serve metrics via an http.Handler. The
+// zero value of HandlerOpts is a reasonable default.
+type HandlerOpts struct {
+ // ErrorLog specifies an optional logger for errors collecting and
+ // serving metrics. If nil, errors are not logged at all.
+ ErrorLog Logger
+ // ErrorHandling defines how errors are handled. Note that errors are
+ // logged regardless of the configured ErrorHandling provided ErrorLog
+ // is not nil.
+ ErrorHandling HandlerErrorHandling
+ // If DisableCompression is true, the handler will never compress the
+ // response, even if requested by the client.
+ DisableCompression bool
+}
+
+// decorateWriter wraps a writer to handle gzip compression if requested. It
+// returns the decorated writer and the appropriate "Content-Encoding" header
+// (which is empty if no compression is enabled).
+func decorateWriter(request *http.Request, writer io.Writer, compressionDisabled bool) (io.Writer, string) {
+ if compressionDisabled {
+ return writer, ""
+ }
+ header := request.Header.Get(acceptEncodingHeader)
+ parts := strings.Split(header, ",")
+ for _, part := range parts {
+ part := strings.TrimSpace(part)
+ if part == "gzip" || strings.HasPrefix(part, "gzip;") {
+ return gzip.NewWriter(writer), "gzip"
+ }
+ }
+ return writer, ""
+}
--- /dev/null
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package promhttp
+
+import (
+ "bufio"
+ "io"
+ "net"
+ "net/http"
+ "strconv"
+ "strings"
+ "time"
+
+ dto "github.com/prometheus/client_model/go"
+
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+// InstrumentHandlerInFlight is a middleware that wraps the provided
+// http.Handler. It sets the provided prometheus.Gauge to the number of
+// requests currently handled by the wrapped http.Handler.
+//
+// See the example for InstrumentHandlerDuration for example usage.
+func InstrumentHandlerInFlight(g prometheus.Gauge, next http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ g.Inc()
+ defer g.Dec()
+ next.ServeHTTP(w, r)
+ })
+}
+
+// InstrumentHandlerDuration is a middleware that wraps the provided
+// http.Handler to observe the request duration with the provided ObserverVec.
+// The ObserverVec must have zero, one, or two labels. The only allowed label
+// names are "code" and "method". The function panics if any other instance
+// labels are provided. The Observe method of the Observer in the ObserverVec
+// is called with the request duration in seconds. Partitioning happens by HTTP
+// status code and/or HTTP method if the respective instance label names are
+// present in the ObserverVec. For unpartitioned observations, use an
+// ObserverVec with zero labels. Note that partitioning of Histograms is
+// expensive and should be used judiciously.
+//
+// If the wrapped Handler does not set a status code, a status code of 200 is assumed.
+//
+// If the wrapped Handler panics, no values are reported.
+func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc {
+ code, method := checkLabels(obs)
+
+ if code {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ now := time.Now()
+ d := newDelegator(w)
+ next.ServeHTTP(d, r)
+
+ obs.With(labels(code, method, r.Method, d.Status())).Observe(time.Since(now).Seconds())
+ })
+ }
+
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ now := time.Now()
+ next.ServeHTTP(w, r)
+ obs.With(labels(code, method, r.Method, 0)).Observe(time.Since(now).Seconds())
+ })
+}
+
+// InstrumentHandlerCounter is a middleware that wraps the provided
+// http.Handler to observe the request result with the provided CounterVec.
+// The CounterVec must have zero, one, or two labels. The only allowed label
+// names are "code" and "method". The function panics if any other instance
+// labels are provided. Partitioning of the CounterVec happens by HTTP status
+// code and/or HTTP method if the respective instance label names are present
+// in the CounterVec. For unpartitioned observations, use a CounterVec with
+// zero labels.
+//
+// If the wrapped Handler does not set a status code, a status code of 200 is assumed.
+//
+// If the wrapped Handler panics, the Counter is not incremented.
+//
+// See the example for InstrumentHandlerDuration for example usage.
+func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler) http.HandlerFunc {
+ code, method := checkLabels(counter)
+
+ if code {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ d := newDelegator(w)
+ next.ServeHTTP(d, r)
+ counter.With(labels(code, method, r.Method, d.Status())).Inc()
+ })
+ }
+
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ next.ServeHTTP(w, r)
+ counter.With(labels(code, method, r.Method, 0)).Inc()
+ })
+}
+
+// InstrumentHandlerRequestSize is a middleware that wraps the provided
+// http.Handler to observe the request size with the provided ObserverVec.
+// The ObserverVec must have zero, one, or two labels. The only allowed label
+// names are "code" and "method". The function panics if any other instance
+// labels are provided. The Observe method of the Observer in the ObserverVec
+// is called with the request size in bytes. Partitioning happens by HTTP
+// status code and/or HTTP method if the respective instance label names are
+// present in the ObserverVec. For unpartitioned observations, use an
+// ObserverVec with zero labels. Note that partitioning of Histograms is
+// expensive and should be used judiciously.
+//
+// If the wrapped Handler does not set a status code, a status code of 200 is assumed.
+//
+// If the wrapped Handler panics, no values are reported.
+//
+// See the example for InstrumentHandlerDuration for example usage.
+func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc {
+ code, method := checkLabels(obs)
+
+ if code {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ d := newDelegator(w)
+ next.ServeHTTP(d, r)
+ size := computeApproximateRequestSize(r)
+ obs.With(labels(code, method, r.Method, d.Status())).Observe(float64(size))
+ })
+ }
+
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ next.ServeHTTP(w, r)
+ size := computeApproximateRequestSize(r)
+ obs.With(labels(code, method, r.Method, 0)).Observe(float64(size))
+ })
+}
+
+// InstrumentHandlerResponseSize is a middleware that wraps the provided
+// http.Handler to observe the response size with the provided ObserverVec.
+// The ObserverVec must have zero, one, or two labels. The only allowed label
+// names are "code" and "method". The function panics if any other instance
+// labels are provided. The Observe method of the Observer in the ObserverVec
+// is called with the response size in bytes. Partitioning happens by HTTP
+// status code and/or HTTP method if the respective instance label names are
+// present in the ObserverVec. For unpartitioned observations, use an
+// ObserverVec with zero labels. Note that partitioning of Histograms is
+// expensive and should be used judiciously.
+//
+// If the wrapped Handler does not set a status code, a status code of 200 is assumed.
+//
+// If the wrapped Handler panics, no values are reported.
+//
+// See the example for InstrumentHandlerDuration for example usage.
+func InstrumentHandlerResponseSize(obs prometheus.ObserverVec, next http.Handler) http.Handler {
+ code, method := checkLabels(obs)
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ d := newDelegator(w)
+ next.ServeHTTP(d, r)
+ obs.With(labels(code, method, r.Method, d.Status())).Observe(float64(d.Written()))
+ })
+}
+
+func checkLabels(c prometheus.Collector) (code bool, method bool) {
+ // TODO(beorn7): Remove this hacky way to check for instance labels
+ // once Descriptors can have their dimensionality queried.
+ var (
+ desc *prometheus.Desc
+ pm dto.Metric
+ )
+
+ descc := make(chan *prometheus.Desc, 1)
+ c.Describe(descc)
+
+ select {
+ case desc = <-descc:
+ default:
+ panic("no description provided by collector")
+ }
+ select {
+ case <-descc:
+ panic("more than one description provided by collector")
+ default:
+ }
+
+ close(descc)
+
+ if _, err := prometheus.NewConstMetric(desc, prometheus.UntypedValue, 0); err == nil {
+ return
+ } else if m, err := prometheus.NewConstMetric(desc, prometheus.UntypedValue, 0, ""); err == nil {
+ if err := m.Write(&pm); err != nil {
+ panic("error checking metric for labels")
+ }
+
+ name := *pm.Label[0].Name
+ if name == "code" {
+ code = true
+ } else if name == "method" {
+ method = true
+ } else {
+ panic("metric partitioned with non-supported labels")
+ }
+ return
+ } else if m, err := prometheus.NewConstMetric(desc, prometheus.UntypedValue, 0, "", ""); err == nil {
+ if err := m.Write(&pm); err != nil {
+ panic("error checking metric for labels")
+ }
+
+ for _, label := range pm.Label {
+ if *label.Name == "code" || *label.Name == "method" {
+ continue
+ }
+ panic("metric partitioned with non-supported labels")
+ }
+
+ code = true
+ method = true
+ return
+ }
+
+ panic("metric partitioned with non-supported labels")
+}
+
+// emptyLabels is a one-time allocation for non-partitioned metrics to avoid
+// unnecessary allocations on each request.
+var emptyLabels = prometheus.Labels{}
+
+func labels(code, method bool, reqMethod string, status int) prometheus.Labels {
+ if !(code || method) {
+ return emptyLabels
+ }
+ labels := prometheus.Labels{}
+
+ if code {
+ labels["code"] = sanitizeCode(status)
+ }
+ if method {
+ labels["method"] = sanitizeMethod(reqMethod)
+ }
+
+ return labels
+}
+
+func computeApproximateRequestSize(r *http.Request) int {
+ s := 0
+ if r.URL != nil {
+ s += len(r.URL.String())
+ }
+
+ s += len(r.Method)
+ s += len(r.Proto)
+ for name, values := range r.Header {
+ s += len(name)
+ for _, value := range values {
+ s += len(value)
+ }
+ }
+ s += len(r.Host)
+
+ // N.B. r.Form and r.MultipartForm are assumed to be included in r.URL.
+
+ if r.ContentLength != -1 {
+ s += int(r.ContentLength)
+ }
+ return s
+}
+
+func sanitizeMethod(m string) string {
+ switch m {
+ case "GET", "get":
+ return "get"
+ case "PUT", "put":
+ return "put"
+ case "HEAD", "head":
+ return "head"
+ case "POST", "post":
+ return "post"
+ case "DELETE", "delete":
+ return "delete"
+ case "CONNECT", "connect":
+ return "connect"
+ case "OPTIONS", "options":
+ return "options"
+ case "NOTIFY", "notify":
+ return "notify"
+ default:
+ return strings.ToLower(m)
+ }
+}
+
+// If the wrapped http.Handler has not set a status code, i.e. the value is
+// currently 0, santizeCode will return 200, for consistency with behavior in
+// the stdlib.
+func sanitizeCode(s int) string {
+ switch s {
+ case 100:
+ return "100"
+ case 101:
+ return "101"
+
+ case 200, 0:
+ return "200"
+ case 201:
+ return "201"
+ case 202:
+ return "202"
+ case 203:
+ return "203"
+ case 204:
+ return "204"
+ case 205:
+ return "205"
+ case 206:
+ return "206"
+
+ case 300:
+ return "300"
+ case 301:
+ return "301"
+ case 302:
+ return "302"
+ case 304:
+ return "304"
+ case 305:
+ return "305"
+ case 307:
+ return "307"
+
+ case 400:
+ return "400"
+ case 401:
+ return "401"
+ case 402:
+ return "402"
+ case 403:
+ return "403"
+ case 404:
+ return "404"
+ case 405:
+ return "405"
+ case 406:
+ return "406"
+ case 407:
+ return "407"
+ case 408:
+ return "408"
+ case 409:
+ return "409"
+ case 410:
+ return "410"
+ case 411:
+ return "411"
+ case 412:
+ return "412"
+ case 413:
+ return "413"
+ case 414:
+ return "414"
+ case 415:
+ return "415"
+ case 416:
+ return "416"
+ case 417:
+ return "417"
+ case 418:
+ return "418"
+
+ case 500:
+ return "500"
+ case 501:
+ return "501"
+ case 502:
+ return "502"
+ case 503:
+ return "503"
+ case 504:
+ return "504"
+ case 505:
+ return "505"
+
+ case 428:
+ return "428"
+ case 429:
+ return "429"
+ case 431:
+ return "431"
+ case 511:
+ return "511"
+
+ default:
+ return strconv.Itoa(s)
+ }
+}
+
+type delegator interface {
+ Status() int
+ Written() int64
+
+ http.ResponseWriter
+}
+
+type responseWriterDelegator struct {
+ http.ResponseWriter
+
+ handler, method string
+ status int
+ written int64
+ wroteHeader bool
+}
+
+func (r *responseWriterDelegator) Status() int {
+ return r.status
+}
+
+func (r *responseWriterDelegator) Written() int64 {
+ return r.written
+}
+
+func (r *responseWriterDelegator) WriteHeader(code int) {
+ r.status = code
+ r.wroteHeader = true
+ r.ResponseWriter.WriteHeader(code)
+}
+
+func (r *responseWriterDelegator) Write(b []byte) (int, error) {
+ if !r.wroteHeader {
+ r.WriteHeader(http.StatusOK)
+ }
+ n, err := r.ResponseWriter.Write(b)
+ r.written += int64(n)
+ return n, err
+}
+
+type fancyDelegator struct {
+ *responseWriterDelegator
+}
+
+func (r *fancyDelegator) CloseNotify() <-chan bool {
+ return r.ResponseWriter.(http.CloseNotifier).CloseNotify()
+}
+
+func (r *fancyDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) {
+ return r.ResponseWriter.(http.Hijacker).Hijack()
+}
+
+func (r *fancyDelegator) Flush() {
+ r.ResponseWriter.(http.Flusher).Flush()
+}
+
+func (r *fancyDelegator) ReadFrom(re io.Reader) (int64, error) {
+ if !r.wroteHeader {
+ r.WriteHeader(http.StatusOK)
+ }
+ n, err := r.ResponseWriter.(io.ReaderFrom).ReadFrom(re)
+ r.written += n
+ return n, err
+}
+++ /dev/null
-// Copyright 2015 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Copyright (c) 2013, The Prometheus Authors
-// All rights reserved.
-//
-// Use of this source code is governed by a BSD-style license that can be found
-// in the LICENSE file.
-
-package prometheus
-
-// Push triggers a metric collection by the default registry and pushes all
-// collected metrics to the Pushgateway specified by url. See the Pushgateway
-// documentation for detailed implications of the job and instance
-// parameter. instance can be left empty. You can use just host:port or ip:port
-// as url, in which case 'http://' is added automatically. You can also include
-// the schema in the URL. However, do not include the '/metrics/jobs/...' part.
-//
-// Note that all previously pushed metrics with the same job and instance will
-// be replaced with the metrics pushed by this call. (It uses HTTP method 'PUT'
-// to push to the Pushgateway.)
-func Push(job, instance, url string) error {
- return defRegistry.Push(job, instance, url, "PUT")
-}
-
-// PushAdd works like Push, but only previously pushed metrics with the same
-// name (and the same job and instance) will be replaced. (It uses HTTP method
-// 'POST' to push to the Pushgateway.)
-func PushAdd(job, instance, url string) error {
- return defRegistry.Push(job, instance, url, "POST")
-}
-
-// PushCollectors works like Push, but it does not collect from the default
-// registry. Instead, it collects from the provided collectors. It is a
-// convenient way to push only a few metrics.
-func PushCollectors(job, instance, url string, collectors ...Collector) error {
- return pushCollectors(job, instance, url, "PUT", collectors...)
-}
-
-// PushAddCollectors works like PushAdd, but it does not collect from the
-// default registry. Instead, it collects from the provided collectors. It is a
-// convenient way to push only a few metrics.
-func PushAddCollectors(job, instance, url string, collectors ...Collector) error {
- return pushCollectors(job, instance, url, "POST", collectors...)
-}
-
-func pushCollectors(job, instance, url, method string, collectors ...Collector) error {
- r := newRegistry()
- for _, collector := range collectors {
- if _, err := r.Register(collector); err != nil {
- return err
- }
- }
- return r.Push(job, instance, url, method)
-}
// See the License for the specific language governing permissions and
// limitations under the License.
-// Copyright (c) 2013, The Prometheus Authors
-// All rights reserved.
-//
-// Use of this source code is governed by a BSD-style license that can be found
-// in the LICENSE file.
-
package prometheus
import (
"bytes"
- "compress/gzip"
"errors"
"fmt"
- "io"
- "net/http"
- "net/url"
"os"
"sort"
- "strings"
"sync"
"github.com/golang/protobuf/proto"
- "github.com/prometheus/common/expfmt"
dto "github.com/prometheus/client_model/go"
)
-var (
- defRegistry = newDefaultRegistry()
- errAlreadyReg = errors.New("duplicate metrics collector registration attempted")
-)
-
-// Constants relevant to the HTTP interface.
const (
- // APIVersion is the version of the format of the exported data. This
- // will match this library's version, which subscribes to the Semantic
- // Versioning scheme.
- APIVersion = "0.0.4"
-
- // DelimitedTelemetryContentType is the content type set on telemetry
- // data responses in delimited protobuf format.
- DelimitedTelemetryContentType = `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited`
- // TextTelemetryContentType is the content type set on telemetry data
- // responses in text format.
- TextTelemetryContentType = `text/plain; version=` + APIVersion
- // ProtoTextTelemetryContentType is the content type set on telemetry
- // data responses in protobuf text format. (Only used for debugging.)
- ProtoTextTelemetryContentType = `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=text`
- // ProtoCompactTextTelemetryContentType is the content type set on
- // telemetry data responses in protobuf compact text format. (Only used
- // for debugging.)
- ProtoCompactTextTelemetryContentType = `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=compact-text`
-
- // Constants for object pools.
- numBufs = 4
- numMetricFamilies = 1000
- numMetrics = 10000
-
// Capacity for the channel to collect metrics and descriptors.
capMetricChan = 1000
capDescChan = 10
+)
- contentTypeHeader = "Content-Type"
- contentLengthHeader = "Content-Length"
- contentEncodingHeader = "Content-Encoding"
-
- acceptEncodingHeader = "Accept-Encoding"
- acceptHeader = "Accept"
+// DefaultRegisterer and DefaultGatherer are the implementations of the
+// Registerer and Gatherer interface a number of convenience functions in this
+// package act on. Initially, both variables point to the same Registry, which
+// has a process collector (see NewProcessCollector) and a Go collector (see
+// NewGoCollector) already registered. This approach to keep default instances
+// as global state mirrors the approach of other packages in the Go standard
+// library. Note that there are caveats. Change the variables with caution and
+// only if you understand the consequences. Users who want to avoid global state
+// altogether should not use the convenience function and act on custom
+// instances instead.
+var (
+ defaultRegistry = NewRegistry()
+ DefaultRegisterer Registerer = defaultRegistry
+ DefaultGatherer Gatherer = defaultRegistry
)
-// Handler returns the HTTP handler for the global Prometheus registry. It is
-// already instrumented with InstrumentHandler (using "prometheus" as handler
-// name). Usually the handler is used to handle the "/metrics" endpoint.
-func Handler() http.Handler {
- return InstrumentHandler("prometheus", defRegistry)
+func init() {
+ MustRegister(NewProcessCollector(os.Getpid(), ""))
+ MustRegister(NewGoCollector())
}
-// UninstrumentedHandler works in the same way as Handler, but the returned HTTP
-// handler is not instrumented. This is useful if no instrumentation is desired
-// (for whatever reason) or if the instrumentation has to happen with a
-// different handler name (or with a different instrumentation approach
-// altogether). See the InstrumentHandler example.
-func UninstrumentedHandler() http.Handler {
- return defRegistry
+// NewRegistry creates a new vanilla Registry without any Collectors
+// pre-registered.
+func NewRegistry() *Registry {
+ return &Registry{
+ collectorsByID: map[uint64]Collector{},
+ descIDs: map[uint64]struct{}{},
+ dimHashesByName: map[string]uint64{},
+ }
}
-// Register registers a new Collector to be included in metrics collection. It
-// returns an error if the descriptors provided by the Collector are invalid or
-// if they - in combination with descriptors of already registered Collectors -
-// do not fulfill the consistency and uniqueness criteria described in the Desc
-// documentation.
+// NewPedanticRegistry returns a registry that checks during collection if each
+// collected Metric is consistent with its reported Desc, and if the Desc has
+// actually been registered with the registry.
//
-// Do not register the same Collector multiple times concurrently. (Registering
-// the same Collector twice would result in an error anyway, but on top of that,
-// it is not safe to do so concurrently.)
-func Register(m Collector) error {
- _, err := defRegistry.Register(m)
- return err
+// Usually, a Registry will be happy as long as the union of all collected
+// Metrics is consistent and valid even if some metrics are not consistent with
+// their own Desc or a Desc provided by their registered Collector. Well-behaved
+// Collectors and Metrics will only provide consistent Descs. This Registry is
+// useful to test the implementation of Collectors and Metrics.
+func NewPedanticRegistry() *Registry {
+ r := NewRegistry()
+ r.pedanticChecksEnabled = true
+ return r
}
-// MustRegister works like Register but panics where Register would have
-// returned an error.
-func MustRegister(m Collector) {
- err := Register(m)
- if err != nil {
- panic(err)
- }
+// Registerer is the interface for the part of a registry in charge of
+// registering and unregistering. Users of custom registries should use
+// Registerer as type for registration purposes (rather then the Registry type
+// directly). In that way, they are free to use custom Registerer implementation
+// (e.g. for testing purposes).
+type Registerer interface {
+ // Register registers a new Collector to be included in metrics
+ // collection. It returns an error if the descriptors provided by the
+ // Collector are invalid or if they — in combination with descriptors of
+ // already registered Collectors — do not fulfill the consistency and
+ // uniqueness criteria described in the documentation of metric.Desc.
+ //
+ // If the provided Collector is equal to a Collector already registered
+ // (which includes the case of re-registering the same Collector), the
+ // returned error is an instance of AlreadyRegisteredError, which
+ // contains the previously registered Collector.
+ //
+ // It is in general not safe to register the same Collector multiple
+ // times concurrently.
+ Register(Collector) error
+ // MustRegister works like Register but registers any number of
+ // Collectors and panics upon the first registration that causes an
+ // error.
+ MustRegister(...Collector)
+ // Unregister unregisters the Collector that equals the Collector passed
+ // in as an argument. (Two Collectors are considered equal if their
+ // Describe method yields the same set of descriptors.) The function
+ // returns whether a Collector was unregistered.
+ //
+ // Note that even after unregistering, it will not be possible to
+ // register a new Collector that is inconsistent with the unregistered
+ // Collector, e.g. a Collector collecting metrics with the same name but
+ // a different help string. The rationale here is that the same registry
+ // instance must only collect consistent metrics throughout its
+ // lifetime.
+ Unregister(Collector) bool
}
-// RegisterOrGet works like Register but does not return an error if a Collector
-// is registered that equals a previously registered Collector. (Two Collectors
-// are considered equal if their Describe method yields the same set of
-// descriptors.) Instead, the previously registered Collector is returned (which
-// is helpful if the new and previously registered Collectors are equal but not
-// identical, i.e. not pointers to the same object).
+// Gatherer is the interface for the part of a registry in charge of gathering
+// the collected metrics into a number of MetricFamilies. The Gatherer interface
+// comes with the same general implication as described for the Registerer
+// interface.
+type Gatherer interface {
+ // Gather calls the Collect method of the registered Collectors and then
+ // gathers the collected metrics into a lexicographically sorted slice
+ // of MetricFamily protobufs. Even if an error occurs, Gather attempts
+ // to gather as many metrics as possible. Hence, if a non-nil error is
+ // returned, the returned MetricFamily slice could be nil (in case of a
+ // fatal error that prevented any meaningful metric collection) or
+ // contain a number of MetricFamily protobufs, some of which might be
+ // incomplete, and some might be missing altogether. The returned error
+ // (which might be a MultiError) explains the details. In scenarios
+ // where complete collection is critical, the returned MetricFamily
+ // protobufs should be disregarded if the returned error is non-nil.
+ Gather() ([]*dto.MetricFamily, error)
+}
+
+// Register registers the provided Collector with the DefaultRegisterer.
//
-// As for Register, it is still not safe to call RegisterOrGet with the same
-// Collector multiple times concurrently.
-func RegisterOrGet(m Collector) (Collector, error) {
- return defRegistry.RegisterOrGet(m)
+// Register is a shortcut for DefaultRegisterer.Register(c). See there for more
+// details.
+func Register(c Collector) error {
+ return DefaultRegisterer.Register(c)
}
-// MustRegisterOrGet works like Register but panics where RegisterOrGet would
-// have returned an error.
-func MustRegisterOrGet(m Collector) Collector {
- existing, err := RegisterOrGet(m)
- if err != nil {
- panic(err)
- }
- return existing
+// MustRegister registers the provided Collectors with the DefaultRegisterer and
+// panics if any error occurs.
+//
+// MustRegister is a shortcut for DefaultRegisterer.MustRegister(cs...). See
+// there for more details.
+func MustRegister(cs ...Collector) {
+ DefaultRegisterer.MustRegister(cs...)
}
-// Unregister unregisters the Collector that equals the Collector passed in as
-// an argument. (Two Collectors are considered equal if their Describe method
-// yields the same set of descriptors.) The function returns whether a Collector
-// was unregistered.
+// Unregister removes the registration of the provided Collector from the
+// DefaultRegisterer.
+//
+// Unregister is a shortcut for DefaultRegisterer.Unregister(c). See there for
+// more details.
func Unregister(c Collector) bool {
- return defRegistry.Unregister(c)
+ return DefaultRegisterer.Unregister(c)
}
-// SetMetricFamilyInjectionHook sets a function that is called whenever metrics
-// are collected. The hook function must be set before metrics collection begins
-// (i.e. call SetMetricFamilyInjectionHook before setting the HTTP handler.) The
-// MetricFamily protobufs returned by the hook function are merged with the
-// metrics collected in the usual way.
-//
-// This is a way to directly inject MetricFamily protobufs managed and owned by
-// the caller. The caller has full responsibility. As no registration of the
-// injected metrics has happened, there is no descriptor to check against, and
-// there are no registration-time checks. If collect-time checks are disabled
-// (see function EnableCollectChecks), no sanity checks are performed on the
-// returned protobufs at all. If collect-checks are enabled, type and uniqueness
-// checks are performed, but no further consistency checks (which would require
-// knowledge of a metric descriptor).
-//
-// Sorting concerns: The caller is responsible for sorting the label pairs in
-// each metric. However, the order of metrics will be sorted by the registry as
-// it is required anyway after merging with the metric families collected
-// conventionally.
-//
-// The function must be callable at any time and concurrently.
-func SetMetricFamilyInjectionHook(hook func() []*dto.MetricFamily) {
- defRegistry.metricFamilyInjectionHook = hook
+// GathererFunc turns a function into a Gatherer.
+type GathererFunc func() ([]*dto.MetricFamily, error)
+
+// Gather implements Gatherer.
+func (gf GathererFunc) Gather() ([]*dto.MetricFamily, error) {
+ return gf()
+}
+
+// AlreadyRegisteredError is returned by the Register method if the Collector to
+// be registered has already been registered before, or a different Collector
+// that collects the same metrics has been registered before. Registration fails
+// in that case, but you can detect from the kind of error what has
+// happened. The error contains fields for the existing Collector and the
+// (rejected) new Collector that equals the existing one. This can be used to
+// find out if an equal Collector has been registered before and switch over to
+// using the old one, as demonstrated in the example.
+type AlreadyRegisteredError struct {
+ ExistingCollector, NewCollector Collector
}
-// PanicOnCollectError sets the behavior whether a panic is caused upon an error
-// while metrics are collected and served to the HTTP endpoint. By default, an
-// internal server error (status code 500) is served with an error message.
-func PanicOnCollectError(b bool) {
- defRegistry.panicOnCollectError = b
+func (err AlreadyRegisteredError) Error() string {
+ return "duplicate metrics collector registration attempted"
}
-// EnableCollectChecks enables (or disables) additional consistency checks
-// during metrics collection. These additional checks are not enabled by default
-// because they inflict a performance penalty and the errors they check for can
-// only happen if the used Metric and Collector types have internal programming
-// errors. It can be helpful to enable these checks while working with custom
-// Collectors or Metrics whose correctness is not well established yet.
-func EnableCollectChecks(b bool) {
- defRegistry.collectChecksEnabled = b
+// MultiError is a slice of errors implementing the error interface. It is used
+// by a Gatherer to report multiple errors during MetricFamily gathering.
+type MultiError []error
+
+func (errs MultiError) Error() string {
+ if len(errs) == 0 {
+ return ""
+ }
+ buf := &bytes.Buffer{}
+ fmt.Fprintf(buf, "%d error(s) occurred:", len(errs))
+ for _, err := range errs {
+ fmt.Fprintf(buf, "\n* %s", err)
+ }
+ return buf.String()
+}
+
+// MaybeUnwrap returns nil if len(errs) is 0. It returns the first and only
+// contained error as error if len(errs is 1). In all other cases, it returns
+// the MultiError directly. This is helpful for returning a MultiError in a way
+// that only uses the MultiError if needed.
+func (errs MultiError) MaybeUnwrap() error {
+ switch len(errs) {
+ case 0:
+ return nil
+ case 1:
+ return errs[0]
+ default:
+ return errs
+ }
}
-// encoder is a function that writes a dto.MetricFamily to an io.Writer in a
-// certain encoding. It returns the number of bytes written and any error
-// encountered. Note that pbutil.WriteDelimited and pbutil.MetricFamilyToText
-// are encoders.
-type encoder func(io.Writer, *dto.MetricFamily) (int, error)
-
-type registry struct {
- mtx sync.RWMutex
- collectorsByID map[uint64]Collector // ID is a hash of the descIDs.
- descIDs map[uint64]struct{}
- dimHashesByName map[string]uint64
- bufPool chan *bytes.Buffer
- metricFamilyPool chan *dto.MetricFamily
- metricPool chan *dto.Metric
- metricFamilyInjectionHook func() []*dto.MetricFamily
-
- panicOnCollectError, collectChecksEnabled bool
+// Registry registers Prometheus collectors, collects their metrics, and gathers
+// them into MetricFamilies for exposition. It implements both Registerer and
+// Gatherer. The zero value is not usable. Create instances with NewRegistry or
+// NewPedanticRegistry.
+type Registry struct {
+ mtx sync.RWMutex
+ collectorsByID map[uint64]Collector // ID is a hash of the descIDs.
+ descIDs map[uint64]struct{}
+ dimHashesByName map[string]uint64
+ pedanticChecksEnabled bool
}
-func (r *registry) Register(c Collector) (Collector, error) {
- descChan := make(chan *Desc, capDescChan)
+// Register implements Registerer.
+func (r *Registry) Register(c Collector) error {
+ var (
+ descChan = make(chan *Desc, capDescChan)
+ newDescIDs = map[uint64]struct{}{}
+ newDimHashesByName = map[string]uint64{}
+ collectorID uint64 // Just a sum of all desc IDs.
+ duplicateDescErr error
+ )
go func() {
c.Describe(descChan)
close(descChan)
}()
-
- newDescIDs := map[uint64]struct{}{}
- newDimHashesByName := map[string]uint64{}
- var collectorID uint64 // Just a sum of all desc IDs.
- var duplicateDescErr error
-
r.mtx.Lock()
defer r.mtx.Unlock()
- // Coduct various tests...
+ // Conduct various tests...
for desc := range descChan {
// Is the descriptor valid at all?
if desc.err != nil {
- return c, fmt.Errorf("descriptor %s is invalid: %s", desc, desc.err)
+ return fmt.Errorf("descriptor %s is invalid: %s", desc, desc.err)
}
// Is the descID unique?
// First check existing descriptors...
if dimHash, exists := r.dimHashesByName[desc.fqName]; exists {
if dimHash != desc.dimHash {
- return nil, fmt.Errorf("a previously registered descriptor with the same fully-qualified name as %s has different label names or a different help string", desc)
+ return fmt.Errorf("a previously registered descriptor with the same fully-qualified name as %s has different label names or a different help string", desc)
}
} else {
// ...then check the new descriptors already seen.
if dimHash, exists := newDimHashesByName[desc.fqName]; exists {
if dimHash != desc.dimHash {
- return nil, fmt.Errorf("descriptors reported by collector have inconsistent label names or help strings for the same fully-qualified name, offender is %s", desc)
+ return fmt.Errorf("descriptors reported by collector have inconsistent label names or help strings for the same fully-qualified name, offender is %s", desc)
}
} else {
newDimHashesByName[desc.fqName] = desc.dimHash
}
// Did anything happen at all?
if len(newDescIDs) == 0 {
- return nil, errors.New("collector has no descriptors")
+ return errors.New("collector has no descriptors")
}
if existing, exists := r.collectorsByID[collectorID]; exists {
- return existing, errAlreadyReg
+ return AlreadyRegisteredError{
+ ExistingCollector: existing,
+ NewCollector: c,
+ }
}
// If the collectorID is new, but at least one of the descs existed
// before, we are in trouble.
if duplicateDescErr != nil {
- return nil, duplicateDescErr
+ return duplicateDescErr
}
// Only after all tests have passed, actually register.
for name, dimHash := range newDimHashesByName {
r.dimHashesByName[name] = dimHash
}
- return c, nil
-}
-
-func (r *registry) RegisterOrGet(m Collector) (Collector, error) {
- existing, err := r.Register(m)
- if err != nil && err != errAlreadyReg {
- return nil, err
- }
- return existing, nil
+ return nil
}
-func (r *registry) Unregister(c Collector) bool {
- descChan := make(chan *Desc, capDescChan)
+// Unregister implements Registerer.
+func (r *Registry) Unregister(c Collector) bool {
+ var (
+ descChan = make(chan *Desc, capDescChan)
+ descIDs = map[uint64]struct{}{}
+ collectorID uint64 // Just a sum of the desc IDs.
+ )
go func() {
c.Describe(descChan)
close(descChan)
}()
-
- descIDs := map[uint64]struct{}{}
- var collectorID uint64 // Just a sum of the desc IDs.
for desc := range descChan {
if _, exists := descIDs[desc.id]; !exists {
collectorID += desc.id
return true
}
-func (r *registry) Push(job, instance, pushURL, method string) error {
- if !strings.Contains(pushURL, "://") {
- pushURL = "http://" + pushURL
- }
- if strings.HasSuffix(pushURL, "/") {
- pushURL = pushURL[:len(pushURL)-1]
- }
- pushURL = fmt.Sprintf("%s/metrics/jobs/%s", pushURL, url.QueryEscape(job))
- if instance != "" {
- pushURL += "/instances/" + url.QueryEscape(instance)
- }
- buf := r.getBuf()
- defer r.giveBuf(buf)
- if err := r.writePB(expfmt.NewEncoder(buf, expfmt.FmtProtoDelim)); err != nil {
- if r.panicOnCollectError {
+// MustRegister implements Registerer.
+func (r *Registry) MustRegister(cs ...Collector) {
+ for _, c := range cs {
+ if err := r.Register(c); err != nil {
panic(err)
}
- return err
- }
- req, err := http.NewRequest(method, pushURL, buf)
- if err != nil {
- return err
- }
- req.Header.Set(contentTypeHeader, DelimitedTelemetryContentType)
- resp, err := http.DefaultClient.Do(req)
- if err != nil {
- return err
- }
- defer resp.Body.Close()
- if resp.StatusCode != 202 {
- return fmt.Errorf("unexpected status code %d while pushing to %s", resp.StatusCode, pushURL)
}
- return nil
-}
-
-func (r *registry) ServeHTTP(w http.ResponseWriter, req *http.Request) {
- contentType := expfmt.Negotiate(req.Header)
- buf := r.getBuf()
- defer r.giveBuf(buf)
- writer, encoding := decorateWriter(req, buf)
- if err := r.writePB(expfmt.NewEncoder(writer, contentType)); err != nil {
- if r.panicOnCollectError {
- panic(err)
- }
- http.Error(w, "An error has occurred:\n\n"+err.Error(), http.StatusInternalServerError)
- return
- }
- if closer, ok := writer.(io.Closer); ok {
- closer.Close()
- }
- header := w.Header()
- header.Set(contentTypeHeader, string(contentType))
- header.Set(contentLengthHeader, fmt.Sprint(buf.Len()))
- if encoding != "" {
- header.Set(contentEncodingHeader, encoding)
- }
- w.Write(buf.Bytes())
}
-func (r *registry) writePB(encoder expfmt.Encoder) error {
- var metricHashes map[uint64]struct{}
- if r.collectChecksEnabled {
- metricHashes = make(map[uint64]struct{})
- }
- metricChan := make(chan Metric, capMetricChan)
- wg := sync.WaitGroup{}
+// Gather implements Gatherer.
+func (r *Registry) Gather() ([]*dto.MetricFamily, error) {
+ var (
+ metricChan = make(chan Metric, capMetricChan)
+ metricHashes = map[uint64]struct{}{}
+ dimHashes = map[string]uint64{}
+ wg sync.WaitGroup
+ errs MultiError // The collected errors to return in the end.
+ registeredDescIDs map[uint64]struct{} // Only used for pedantic checks
+ )
r.mtx.RLock()
metricFamiliesByName := make(map[string]*dto.MetricFamily, len(r.dimHashesByName))
collector.Collect(metricChan)
}(collector)
}
+
+ // In case pedantic checks are enabled, we have to copy the map before
+ // giving up the RLock.
+ if r.pedanticChecksEnabled {
+ registeredDescIDs = make(map[uint64]struct{}, len(r.descIDs))
+ for id := range r.descIDs {
+ registeredDescIDs[id] = struct{}{}
+ }
+ }
+
r.mtx.RUnlock()
// Drain metricChan in case of premature return.
defer func() {
- for _ = range metricChan {
+ for range metricChan {
}
}()
// of metricFamiliesByName (and of metricHashes if checks are
// enabled). Most likely not worth it.
desc := metric.Desc()
+ dtoMetric := &dto.Metric{}
+ if err := metric.Write(dtoMetric); err != nil {
+ errs = append(errs, fmt.Errorf(
+ "error collecting metric %v: %s", desc, err,
+ ))
+ continue
+ }
metricFamily, ok := metricFamiliesByName[desc.fqName]
- if !ok {
- metricFamily = r.getMetricFamily()
- defer r.giveMetricFamily(metricFamily)
+ if ok {
+ if metricFamily.GetHelp() != desc.help {
+ errs = append(errs, fmt.Errorf(
+ "collected metric %s %s has help %q but should have %q",
+ desc.fqName, dtoMetric, desc.help, metricFamily.GetHelp(),
+ ))
+ continue
+ }
+ // TODO(beorn7): Simplify switch once Desc has type.
+ switch metricFamily.GetType() {
+ case dto.MetricType_COUNTER:
+ if dtoMetric.Counter == nil {
+ errs = append(errs, fmt.Errorf(
+ "collected metric %s %s should be a Counter",
+ desc.fqName, dtoMetric,
+ ))
+ continue
+ }
+ case dto.MetricType_GAUGE:
+ if dtoMetric.Gauge == nil {
+ errs = append(errs, fmt.Errorf(
+ "collected metric %s %s should be a Gauge",
+ desc.fqName, dtoMetric,
+ ))
+ continue
+ }
+ case dto.MetricType_SUMMARY:
+ if dtoMetric.Summary == nil {
+ errs = append(errs, fmt.Errorf(
+ "collected metric %s %s should be a Summary",
+ desc.fqName, dtoMetric,
+ ))
+ continue
+ }
+ case dto.MetricType_UNTYPED:
+ if dtoMetric.Untyped == nil {
+ errs = append(errs, fmt.Errorf(
+ "collected metric %s %s should be Untyped",
+ desc.fqName, dtoMetric,
+ ))
+ continue
+ }
+ case dto.MetricType_HISTOGRAM:
+ if dtoMetric.Histogram == nil {
+ errs = append(errs, fmt.Errorf(
+ "collected metric %s %s should be a Histogram",
+ desc.fqName, dtoMetric,
+ ))
+ continue
+ }
+ default:
+ panic("encountered MetricFamily with invalid type")
+ }
+ } else {
+ metricFamily = &dto.MetricFamily{}
metricFamily.Name = proto.String(desc.fqName)
metricFamily.Help = proto.String(desc.help)
+ // TODO(beorn7): Simplify switch once Desc has type.
+ switch {
+ case dtoMetric.Gauge != nil:
+ metricFamily.Type = dto.MetricType_GAUGE.Enum()
+ case dtoMetric.Counter != nil:
+ metricFamily.Type = dto.MetricType_COUNTER.Enum()
+ case dtoMetric.Summary != nil:
+ metricFamily.Type = dto.MetricType_SUMMARY.Enum()
+ case dtoMetric.Untyped != nil:
+ metricFamily.Type = dto.MetricType_UNTYPED.Enum()
+ case dtoMetric.Histogram != nil:
+ metricFamily.Type = dto.MetricType_HISTOGRAM.Enum()
+ default:
+ errs = append(errs, fmt.Errorf(
+ "empty metric collected: %s", dtoMetric,
+ ))
+ continue
+ }
metricFamiliesByName[desc.fqName] = metricFamily
}
- dtoMetric := r.getMetric()
- defer r.giveMetric(dtoMetric)
- if err := metric.Write(dtoMetric); err != nil {
- // TODO: Consider different means of error reporting so
- // that a single erroneous metric could be skipped
- // instead of blowing up the whole collection.
- return fmt.Errorf("error collecting metric %v: %s", desc, err)
- }
- switch {
- case metricFamily.Type != nil:
- // Type already set. We are good.
- case dtoMetric.Gauge != nil:
- metricFamily.Type = dto.MetricType_GAUGE.Enum()
- case dtoMetric.Counter != nil:
- metricFamily.Type = dto.MetricType_COUNTER.Enum()
- case dtoMetric.Summary != nil:
- metricFamily.Type = dto.MetricType_SUMMARY.Enum()
- case dtoMetric.Untyped != nil:
- metricFamily.Type = dto.MetricType_UNTYPED.Enum()
- case dtoMetric.Histogram != nil:
- metricFamily.Type = dto.MetricType_HISTOGRAM.Enum()
- default:
- return fmt.Errorf("empty metric collected: %s", dtoMetric)
+ if err := checkMetricConsistency(metricFamily, dtoMetric, metricHashes, dimHashes); err != nil {
+ errs = append(errs, err)
+ continue
}
- if r.collectChecksEnabled {
- if err := r.checkConsistency(metricFamily, dtoMetric, desc, metricHashes); err != nil {
- return err
+ if r.pedanticChecksEnabled {
+ // Is the desc registered at all?
+ if _, exist := registeredDescIDs[desc.id]; !exist {
+ errs = append(errs, fmt.Errorf(
+ "collected metric %s %s with unregistered descriptor %s",
+ metricFamily.GetName(), dtoMetric, desc,
+ ))
+ continue
+ }
+ if err := checkDescConsistency(metricFamily, dtoMetric, desc); err != nil {
+ errs = append(errs, err)
+ continue
}
}
metricFamily.Metric = append(metricFamily.Metric, dtoMetric)
}
+ return normalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap()
+}
- if r.metricFamilyInjectionHook != nil {
- for _, mf := range r.metricFamilyInjectionHook() {
+// Gatherers is a slice of Gatherer instances that implements the Gatherer
+// interface itself. Its Gather method calls Gather on all Gatherers in the
+// slice in order and returns the merged results. Errors returned from the
+// Gather calles are all returned in a flattened MultiError. Duplicate and
+// inconsistent Metrics are skipped (first occurrence in slice order wins) and
+// reported in the returned error.
+//
+// Gatherers can be used to merge the Gather results from multiple
+// Registries. It also provides a way to directly inject existing MetricFamily
+// protobufs into the gathering by creating a custom Gatherer with a Gather
+// method that simply returns the existing MetricFamily protobufs. Note that no
+// registration is involved (in contrast to Collector registration), so
+// obviously registration-time checks cannot happen. Any inconsistencies between
+// the gathered MetricFamilies are reported as errors by the Gather method, and
+// inconsistent Metrics are dropped. Invalid parts of the MetricFamilies
+// (e.g. syntactically invalid metric or label names) will go undetected.
+type Gatherers []Gatherer
+
+// Gather implements Gatherer.
+func (gs Gatherers) Gather() ([]*dto.MetricFamily, error) {
+ var (
+ metricFamiliesByName = map[string]*dto.MetricFamily{}
+ metricHashes = map[uint64]struct{}{}
+ dimHashes = map[string]uint64{}
+ errs MultiError // The collected errors to return in the end.
+ )
+
+ for i, g := range gs {
+ mfs, err := g.Gather()
+ if err != nil {
+ if multiErr, ok := err.(MultiError); ok {
+ for _, err := range multiErr {
+ errs = append(errs, fmt.Errorf("[from Gatherer #%d] %s", i+1, err))
+ }
+ } else {
+ errs = append(errs, fmt.Errorf("[from Gatherer #%d] %s", i+1, err))
+ }
+ }
+ for _, mf := range mfs {
existingMF, exists := metricFamiliesByName[mf.GetName()]
- if !exists {
- metricFamiliesByName[mf.GetName()] = mf
- if r.collectChecksEnabled {
- for _, m := range mf.Metric {
- if err := r.checkConsistency(mf, m, nil, metricHashes); err != nil {
- return err
- }
- }
+ if exists {
+ if existingMF.GetHelp() != mf.GetHelp() {
+ errs = append(errs, fmt.Errorf(
+ "gathered metric family %s has help %q but should have %q",
+ mf.GetName(), mf.GetHelp(), existingMF.GetHelp(),
+ ))
+ continue
}
- continue
+ if existingMF.GetType() != mf.GetType() {
+ errs = append(errs, fmt.Errorf(
+ "gathered metric family %s has type %s but should have %s",
+ mf.GetName(), mf.GetType(), existingMF.GetType(),
+ ))
+ continue
+ }
+ } else {
+ existingMF = &dto.MetricFamily{}
+ existingMF.Name = mf.Name
+ existingMF.Help = mf.Help
+ existingMF.Type = mf.Type
+ metricFamiliesByName[mf.GetName()] = existingMF
}
for _, m := range mf.Metric {
- if r.collectChecksEnabled {
- if err := r.checkConsistency(existingMF, m, nil, metricHashes); err != nil {
- return err
- }
+ if err := checkMetricConsistency(existingMF, m, metricHashes, dimHashes); err != nil {
+ errs = append(errs, err)
+ continue
}
existingMF.Metric = append(existingMF.Metric, m)
}
}
}
+ return normalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap()
+}
+
+// metricSorter is a sortable slice of *dto.Metric.
+type metricSorter []*dto.Metric
- // Now that MetricFamilies are all set, sort their Metrics
- // lexicographically by their label values.
+func (s metricSorter) Len() int {
+ return len(s)
+}
+
+func (s metricSorter) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+
+func (s metricSorter) Less(i, j int) bool {
+ if len(s[i].Label) != len(s[j].Label) {
+ // This should not happen. The metrics are
+ // inconsistent. However, we have to deal with the fact, as
+ // people might use custom collectors or metric family injection
+ // to create inconsistent metrics. So let's simply compare the
+ // number of labels in this case. That will still yield
+ // reproducible sorting.
+ return len(s[i].Label) < len(s[j].Label)
+ }
+ for n, lp := range s[i].Label {
+ vi := lp.GetValue()
+ vj := s[j].Label[n].GetValue()
+ if vi != vj {
+ return vi < vj
+ }
+ }
+
+ // We should never arrive here. Multiple metrics with the same
+ // label set in the same scrape will lead to undefined ingestion
+ // behavior. However, as above, we have to provide stable sorting
+ // here, even for inconsistent metrics. So sort equal metrics
+ // by their timestamp, with missing timestamps (implying "now")
+ // coming last.
+ if s[i].TimestampMs == nil {
+ return false
+ }
+ if s[j].TimestampMs == nil {
+ return true
+ }
+ return s[i].GetTimestampMs() < s[j].GetTimestampMs()
+}
+
+// normalizeMetricFamilies returns a MetricFamily slice with empty
+// MetricFamilies pruned and the remaining MetricFamilies sorted by name within
+// the slice, with the contained Metrics sorted within each MetricFamily.
+func normalizeMetricFamilies(metricFamiliesByName map[string]*dto.MetricFamily) []*dto.MetricFamily {
for _, mf := range metricFamiliesByName {
sort.Sort(metricSorter(mf.Metric))
}
-
- // Write out MetricFamilies sorted by their name.
names := make([]string, 0, len(metricFamiliesByName))
- for name := range metricFamiliesByName {
- names = append(names, name)
+ for name, mf := range metricFamiliesByName {
+ if len(mf.Metric) > 0 {
+ names = append(names, name)
+ }
}
sort.Strings(names)
-
+ result := make([]*dto.MetricFamily, 0, len(names))
for _, name := range names {
- if err := encoder.Encode(metricFamiliesByName[name]); err != nil {
- return err
- }
+ result = append(result, metricFamiliesByName[name])
}
- return nil
+ return result
}
-func (r *registry) checkConsistency(metricFamily *dto.MetricFamily, dtoMetric *dto.Metric, desc *Desc, metricHashes map[uint64]struct{}) error {
-
+// checkMetricConsistency checks if the provided Metric is consistent with the
+// provided MetricFamily. It also hashed the Metric labels and the MetricFamily
+// name. If the resulting hash is alread in the provided metricHashes, an error
+// is returned. If not, it is added to metricHashes. The provided dimHashes maps
+// MetricFamily names to their dimHash (hashed sorted label names). If dimHashes
+// doesn't yet contain a hash for the provided MetricFamily, it is
+// added. Otherwise, an error is returned if the existing dimHashes in not equal
+// the calculated dimHash.
+func checkMetricConsistency(
+ metricFamily *dto.MetricFamily,
+ dtoMetric *dto.Metric,
+ metricHashes map[uint64]struct{},
+ dimHashes map[string]uint64,
+) error {
// Type consistency with metric family.
if metricFamily.GetType() == dto.MetricType_GAUGE && dtoMetric.Gauge == nil ||
metricFamily.GetType() == dto.MetricType_COUNTER && dtoMetric.Counter == nil ||
h := hashNew()
h = hashAdd(h, metricFamily.GetName())
h = hashAddByte(h, separatorByte)
+ dh := hashNew()
// Make sure label pairs are sorted. We depend on it for the consistency
- // check. Label pairs must be sorted by contract. But the point of this
- // method is to check for contract violations. So we better do the sort
- // now.
+ // check.
sort.Sort(LabelPairSorter(dtoMetric.Label))
for _, lp := range dtoMetric.Label {
h = hashAdd(h, lp.GetValue())
h = hashAddByte(h, separatorByte)
+ dh = hashAdd(dh, lp.GetName())
+ dh = hashAddByte(dh, separatorByte)
}
if _, exists := metricHashes[h]; exists {
return fmt.Errorf(
metricFamily.GetName(), dtoMetric,
)
}
- metricHashes[h] = struct{}{}
-
- if desc == nil {
- return nil // Nothing left to check if we have no desc.
+ if dimHash, ok := dimHashes[metricFamily.GetName()]; ok {
+ if dimHash != dh {
+ return fmt.Errorf(
+ "collected metric %s %s has label dimensions inconsistent with previously collected metrics in the same metric family",
+ metricFamily.GetName(), dtoMetric,
+ )
+ }
+ } else {
+ dimHashes[metricFamily.GetName()] = dh
}
+ metricHashes[h] = struct{}{}
+ return nil
+}
- // Desc consistency with metric family.
- if metricFamily.GetName() != desc.fqName {
- return fmt.Errorf(
- "collected metric %s %s has name %q but should have %q",
- metricFamily.GetName(), dtoMetric, metricFamily.GetName(), desc.fqName,
- )
- }
+func checkDescConsistency(
+ metricFamily *dto.MetricFamily,
+ dtoMetric *dto.Metric,
+ desc *Desc,
+) error {
+ // Desc help consistency with metric family help.
if metricFamily.GetHelp() != desc.help {
return fmt.Errorf(
"collected metric %s %s has help %q but should have %q",
)
}
}
-
- r.mtx.RLock() // Remaining checks need the read lock.
- defer r.mtx.RUnlock()
-
- // Is the desc registered?
- if _, exist := r.descIDs[desc.id]; !exist {
- return fmt.Errorf(
- "collected metric %s %s with unregistered descriptor %s",
- metricFamily.GetName(), dtoMetric, desc,
- )
- }
-
return nil
}
-
-func (r *registry) getBuf() *bytes.Buffer {
- select {
- case buf := <-r.bufPool:
- return buf
- default:
- return &bytes.Buffer{}
- }
-}
-
-func (r *registry) giveBuf(buf *bytes.Buffer) {
- buf.Reset()
- select {
- case r.bufPool <- buf:
- default:
- }
-}
-
-func (r *registry) getMetricFamily() *dto.MetricFamily {
- select {
- case mf := <-r.metricFamilyPool:
- return mf
- default:
- return &dto.MetricFamily{}
- }
-}
-
-func (r *registry) giveMetricFamily(mf *dto.MetricFamily) {
- mf.Reset()
- select {
- case r.metricFamilyPool <- mf:
- default:
- }
-}
-
-func (r *registry) getMetric() *dto.Metric {
- select {
- case m := <-r.metricPool:
- return m
- default:
- return &dto.Metric{}
- }
-}
-
-func (r *registry) giveMetric(m *dto.Metric) {
- m.Reset()
- select {
- case r.metricPool <- m:
- default:
- }
-}
-
-func newRegistry() *registry {
- return ®istry{
- collectorsByID: map[uint64]Collector{},
- descIDs: map[uint64]struct{}{},
- dimHashesByName: map[string]uint64{},
- bufPool: make(chan *bytes.Buffer, numBufs),
- metricFamilyPool: make(chan *dto.MetricFamily, numMetricFamilies),
- metricPool: make(chan *dto.Metric, numMetrics),
- }
-}
-
-func newDefaultRegistry() *registry {
- r := newRegistry()
- r.Register(NewProcessCollector(os.Getpid(), ""))
- r.Register(NewGoCollector())
- return r
-}
-
-// decorateWriter wraps a writer to handle gzip compression if requested. It
-// returns the decorated writer and the appropriate "Content-Encoding" header
-// (which is empty if no compression is enabled).
-func decorateWriter(request *http.Request, writer io.Writer) (io.Writer, string) {
- header := request.Header.Get(acceptEncodingHeader)
- parts := strings.Split(header, ",")
- for _, part := range parts {
- part := strings.TrimSpace(part)
- if part == "gzip" || strings.HasPrefix(part, "gzip;") {
- return gzip.NewWriter(writer), "gzip"
- }
- }
- return writer, ""
-}
-
-type metricSorter []*dto.Metric
-
-func (s metricSorter) Len() int {
- return len(s)
-}
-
-func (s metricSorter) Swap(i, j int) {
- s[i], s[j] = s[j], s[i]
-}
-
-func (s metricSorter) Less(i, j int) bool {
- if len(s[i].Label) != len(s[j].Label) {
- // This should not happen. The metrics are
- // inconsistent. However, we have to deal with the fact, as
- // people might use custom collectors or metric family injection
- // to create inconsistent metrics. So let's simply compare the
- // number of labels in this case. That will still yield
- // reproducible sorting.
- return len(s[i].Label) < len(s[j].Label)
- }
- for n, lp := range s[i].Label {
- vi := lp.GetValue()
- vj := s[j].Label[n].GetValue()
- if vi != vj {
- return vi < vj
- }
- }
-
- // We should never arrive here. Multiple metrics with the same
- // label set in the same scrape will lead to undefined ingestion
- // behavior. However, as above, we have to provide stable sorting
- // here, even for inconsistent metrics. So sort equal metrics
- // by their timestamp, with missing timestamps (implying "now")
- // coming last.
- if s[i].TimestampMs == nil {
- return false
- }
- if s[j].TimestampMs == nil {
- return true
- }
- return s[i].GetTimestampMs() < s[j].GetTimestampMs()
-}
Observe(float64)
}
+// DefObjectives are the default Summary quantile values.
+//
+// Deprecated: DefObjectives will not be used as the default objectives in
+// v0.10 of the library. The default Summary will have no quantiles then.
var (
- // DefObjectives are the default Summary quantile values.
DefObjectives = map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}
errQuantileLabelNotAllowed = fmt.Errorf(
ConstLabels Labels
// Objectives defines the quantile rank estimates with their respective
- // absolute error. If Objectives[q] = e, then the value reported
- // for q will be the φ-quantile value for some φ between q-e and q+e.
- // The default value is DefObjectives.
+ // absolute error. If Objectives[q] = e, then the value reported for q
+ // will be the φ-quantile value for some φ between q-e and q+e. The
+ // default value is DefObjectives. It is used if Objectives is left at
+ // its zero value (i.e. nil). To create a Summary without Objectives,
+ // set it to an empty map (i.e. map[float64]float64{}).
+ //
+ // Deprecated: Note that the current value of DefObjectives is
+ // deprecated. It will be replaced by an empty map in v0.10 of the
+ // library. Please explicitly set Objectives to the desired value.
Objectives map[float64]float64
// MaxAge defines the duration for which an observation stays relevant
BufCap uint32
}
-// TODO: Great fuck-up with the sliding-window decay algorithm... The Merge
-// method of perk/quantile is actually not working as advertised - and it might
-// be unfixable, as the underlying algorithm is apparently not capable of
-// merging summaries in the first place. To avoid using Merge, we are currently
-// adding observations to _each_ age bucket, i.e. the effort to add a sample is
+// Great fuck-up with the sliding-window decay algorithm... The Merge method of
+// perk/quantile is actually not working as advertised - and it might be
+// unfixable, as the underlying algorithm is apparently not capable of merging
+// summaries in the first place. To avoid using Merge, we are currently adding
+// observations to _each_ age bucket, i.e. the effort to add a sample is
// essentially multiplied by the number of age buckets. When rotating age
// buckets, we empty the previous head stream. On scrape time, we simply take
// the quantiles from the head stream (no merging required). Result: More effort
}
}
- if len(opts.Objectives) == 0 {
+ if opts.Objectives == nil {
opts.Objectives = DefObjectives
}
}
sort.Float64s(s.sortedObjectives)
- s.Init(s) // Init self-collection.
+ s.init(s) // Init self-collection.
return s
}
type summary struct {
- SelfCollector
+ selfCollector
bufMtx sync.Mutex // Protects hotBuf and hotBufExpTime.
mtx sync.Mutex // Protects every other moving part.
// (e.g. HTTP request latencies, partitioned by status code and method). Create
// instances with NewSummaryVec.
type SummaryVec struct {
- MetricVec
+ *MetricVec
}
// NewSummaryVec creates a new SummaryVec based on the provided SummaryOpts and
opts.ConstLabels,
)
return &SummaryVec{
- MetricVec: MetricVec{
- children: map[uint64]Metric{},
- desc: desc,
- newMetric: func(lvs ...string) Metric {
- return newSummary(desc, opts, lvs...)
- },
- },
+ MetricVec: newMetricVec(desc, func(lvs ...string) Metric {
+ return newSummary(desc, opts, lvs...)
+ }),
}
}
-// GetMetricWithLabelValues replaces the method of the same name in
-// MetricVec. The difference is that this method returns a Summary and not a
-// Metric so that no type conversion is required.
-func (m *SummaryVec) GetMetricWithLabelValues(lvs ...string) (Summary, error) {
+// GetMetricWithLabelValues replaces the method of the same name in MetricVec.
+// The difference is that this method returns an Observer and not a Metric so
+// that no type conversion to an Observer is required.
+func (m *SummaryVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) {
metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...)
if metric != nil {
- return metric.(Summary), err
+ return metric.(Observer), err
}
return nil, err
}
// GetMetricWith replaces the method of the same name in MetricVec. The
-// difference is that this method returns a Summary and not a Metric so that no
-// type conversion is required.
-func (m *SummaryVec) GetMetricWith(labels Labels) (Summary, error) {
+// difference is that this method returns an Observer and not a Metric so that
+// no type conversion to an Observer is required.
+func (m *SummaryVec) GetMetricWith(labels Labels) (Observer, error) {
metric, err := m.MetricVec.GetMetricWith(labels)
if metric != nil {
- return metric.(Summary), err
+ return metric.(Observer), err
}
return nil, err
}
// GetMetricWithLabelValues would have returned an error. By not returning an
// error, WithLabelValues allows shortcuts like
// myVec.WithLabelValues("404", "GET").Observe(42.21)
-func (m *SummaryVec) WithLabelValues(lvs ...string) Summary {
- return m.MetricVec.WithLabelValues(lvs...).(Summary)
+func (m *SummaryVec) WithLabelValues(lvs ...string) Observer {
+ return m.MetricVec.WithLabelValues(lvs...).(Observer)
}
// With works as GetMetricWith, but panics where GetMetricWithLabels would have
// returned an error. By not returning an error, With allows shortcuts like
// myVec.With(Labels{"code": "404", "method": "GET"}).Observe(42.21)
-func (m *SummaryVec) With(labels Labels) Summary {
- return m.MetricVec.With(labels).(Summary)
+func (m *SummaryVec) With(labels Labels) Observer {
+ return m.MetricVec.With(labels).(Observer)
}
type constSummary struct {
--- /dev/null
+// Copyright 2016 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import "time"
+
+// Timer is a helper type to time functions. Use NewTimer to create new
+// instances.
+type Timer struct {
+ begin time.Time
+ observer Observer
+}
+
+// NewTimer creates a new Timer. The provided Observer is used to observe a
+// duration in seconds. Timer is usually used to time a function call in the
+// following way:
+// func TimeMe() {
+// timer := NewTimer(myHistogram)
+// defer timer.ObserveDuration()
+// // Do actual work.
+// }
+func NewTimer(o Observer) *Timer {
+ return &Timer{
+ begin: time.Now(),
+ observer: o,
+ }
+}
+
+// ObserveDuration records the duration passed since the Timer was created with
+// NewTimer. It calls the Observe method of the Observer provided during
+// construction with the duration in seconds as an argument. ObserveDuration is
+// usually called with a defer statement.
+func (t *Timer) ObserveDuration() {
+ if t.observer != nil {
+ t.observer.Observe(time.Since(t.begin).Seconds())
+ }
+}
// no type information is implied.
//
// To create Untyped instances, use NewUntyped.
+//
+// Deprecated: The Untyped type is deprecated because it doesn't make sense in
+// direct instrumentation. If you need to mirror an external metric of unknown
+// type (usually while writing exporters), Use MustNewConstMetric to create an
+// untyped metric instance on the fly.
type Untyped interface {
Metric
Collector
// labels. This is used if you want to count the same thing partitioned by
// various dimensions. Create instances with NewUntypedVec.
type UntypedVec struct {
- MetricVec
+ *MetricVec
}
// NewUntypedVec creates a new UntypedVec based on the provided UntypedOpts and
opts.ConstLabels,
)
return &UntypedVec{
- MetricVec: MetricVec{
- children: map[uint64]Metric{},
- desc: desc,
- newMetric: func(lvs ...string) Metric {
- return newValue(desc, UntypedValue, 0, lvs...)
- },
- },
+ MetricVec: newMetricVec(desc, func(lvs ...string) Metric {
+ return newValue(desc, UntypedValue, 0, lvs...)
+ }),
}
}
"math"
"sort"
"sync/atomic"
+ "time"
dto "github.com/prometheus/client_model/go"
// ValueType. This is a low-level building block used by the library to back the
// implementations of Counter, Gauge, and Untyped.
type value struct {
- // valBits containst the bits of the represented float64 value. It has
+ // valBits contains the bits of the represented float64 value. It has
// to go first in the struct to guarantee alignment for atomic
// operations. http://golang.org/pkg/sync/atomic/#pkg-note-BUG
valBits uint64
- SelfCollector
+ selfCollector
desc *Desc
valType ValueType
valBits: math.Float64bits(val),
labelPairs: makeLabelPairs(desc, labelValues),
}
- result.Init(result)
+ result.init(result)
return result
}
atomic.StoreUint64(&v.valBits, math.Float64bits(val))
}
+func (v *value) SetToCurrentTime() {
+ v.Set(float64(time.Now().UnixNano()) / 1e9)
+}
+
func (v *value) Inc() {
v.Add(1)
}
// library to back the implementations of CounterFunc, GaugeFunc, and
// UntypedFunc.
type valueFunc struct {
- SelfCollector
+ selfCollector
desc *Desc
valType ValueType
function: function,
labelPairs: makeLabelPairs(desc, nil),
}
- result.Init(result)
+ result.init(result)
return result
}
import (
"fmt"
"sync"
+
+ "github.com/prometheus/common/model"
)
// MetricVec is a Collector to bundle metrics of the same name that
// provided in this package.
type MetricVec struct {
mtx sync.RWMutex // Protects the children.
- children map[uint64]Metric
+ children map[uint64][]metricWithLabelValues
desc *Desc
- newMetric func(labelValues ...string) Metric
+ newMetric func(labelValues ...string) Metric
+ hashAdd func(h uint64, s string) uint64 // replace hash function for testing collision handling
+ hashAddByte func(h uint64, b byte) uint64
+}
+
+// newMetricVec returns an initialized MetricVec. The concrete value is
+// returned for embedding into another struct.
+func newMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *MetricVec {
+ return &MetricVec{
+ children: map[uint64][]metricWithLabelValues{},
+ desc: desc,
+ newMetric: newMetric,
+ hashAdd: hashAdd,
+ hashAddByte: hashAddByte,
+ }
+}
+
+// metricWithLabelValues provides the metric and its label values for
+// disambiguation on hash collision.
+type metricWithLabelValues struct {
+ values []string
+ metric Metric
}
// Describe implements Collector. The length of the returned slice
m.mtx.RLock()
defer m.mtx.RUnlock()
- for _, metric := range m.children {
- ch <- metric
+ for _, metrics := range m.children {
+ for _, metric := range metrics {
+ ch <- metric.metric
+ }
}
}
return nil, err
}
- m.mtx.RLock()
- metric, ok := m.children[h]
- m.mtx.RUnlock()
- if ok {
- return metric, nil
- }
-
- m.mtx.Lock()
- defer m.mtx.Unlock()
- return m.getOrCreateMetric(h, lvs...), nil
+ return m.getOrCreateMetricWithLabelValues(h, lvs), nil
}
// GetMetricWith returns the Metric for the given Labels map (the label names
return nil, err
}
- m.mtx.RLock()
- metric, ok := m.children[h]
- m.mtx.RUnlock()
- if ok {
- return metric, nil
- }
-
- lvs := make([]string, len(labels))
- for i, label := range m.desc.variableLabels {
- lvs[i] = labels[label]
- }
- m.mtx.Lock()
- defer m.mtx.Unlock()
- return m.getOrCreateMetric(h, lvs...), nil
+ return m.getOrCreateMetricWithLabels(h, labels), nil
}
// WithLabelValues works as GetMetricWithLabelValues, but panics if an error
if err != nil {
return false
}
- if _, ok := m.children[h]; !ok {
- return false
- }
- delete(m.children, h)
- return true
+ return m.deleteByHashWithLabelValues(h, lvs)
}
// Delete deletes the metric where the variable labels are the same as those
if err != nil {
return false
}
- if _, ok := m.children[h]; !ok {
+
+ return m.deleteByHashWithLabels(h, labels)
+}
+
+// deleteByHashWithLabelValues removes the metric from the hash bucket h. If
+// there are multiple matches in the bucket, use lvs to select a metric and
+// remove only that metric.
+func (m *MetricVec) deleteByHashWithLabelValues(h uint64, lvs []string) bool {
+ metrics, ok := m.children[h]
+ if !ok {
return false
}
- delete(m.children, h)
+
+ i := m.findMetricWithLabelValues(metrics, lvs)
+ if i >= len(metrics) {
+ return false
+ }
+
+ if len(metrics) > 1 {
+ m.children[h] = append(metrics[:i], metrics[i+1:]...)
+ } else {
+ delete(m.children, h)
+ }
+ return true
+}
+
+// deleteByHashWithLabels removes the metric from the hash bucket h. If there
+// are multiple matches in the bucket, use lvs to select a metric and remove
+// only that metric.
+func (m *MetricVec) deleteByHashWithLabels(h uint64, labels Labels) bool {
+ metrics, ok := m.children[h]
+ if !ok {
+ return false
+ }
+ i := m.findMetricWithLabels(metrics, labels)
+ if i >= len(metrics) {
+ return false
+ }
+
+ if len(metrics) > 1 {
+ m.children[h] = append(metrics[:i], metrics[i+1:]...)
+ } else {
+ delete(m.children, h)
+ }
return true
}
}
h := hashNew()
for _, val := range vals {
- h = hashAdd(h, val)
+ h = m.hashAdd(h, val)
+ h = m.hashAddByte(h, model.SeparatorByte)
}
return h, nil
}
if !ok {
return 0, fmt.Errorf("label name %q missing in label map", label)
}
- h = hashAdd(h, val)
+ h = m.hashAdd(h, val)
+ h = m.hashAddByte(h, model.SeparatorByte)
}
return h, nil
}
-func (m *MetricVec) getOrCreateMetric(hash uint64, labelValues ...string) Metric {
- metric, ok := m.children[hash]
+// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value
+// or creates it and returns the new one.
+//
+// This function holds the mutex.
+func (m *MetricVec) getOrCreateMetricWithLabelValues(hash uint64, lvs []string) Metric {
+ m.mtx.RLock()
+ metric, ok := m.getMetricWithLabelValues(hash, lvs)
+ m.mtx.RUnlock()
+ if ok {
+ return metric
+ }
+
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+ metric, ok = m.getMetricWithLabelValues(hash, lvs)
+ if !ok {
+ // Copy to avoid allocation in case wo don't go down this code path.
+ copiedLVs := make([]string, len(lvs))
+ copy(copiedLVs, lvs)
+ metric = m.newMetric(copiedLVs...)
+ m.children[hash] = append(m.children[hash], metricWithLabelValues{values: copiedLVs, metric: metric})
+ }
+ return metric
+}
+
+// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value
+// or creates it and returns the new one.
+//
+// This function holds the mutex.
+func (m *MetricVec) getOrCreateMetricWithLabels(hash uint64, labels Labels) Metric {
+ m.mtx.RLock()
+ metric, ok := m.getMetricWithLabels(hash, labels)
+ m.mtx.RUnlock()
+ if ok {
+ return metric
+ }
+
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+ metric, ok = m.getMetricWithLabels(hash, labels)
if !ok {
- // Copy labelValues. Otherwise, they would be allocated even if we don't go
- // down this code path.
- copiedLabelValues := append(make([]string, 0, len(labelValues)), labelValues...)
- metric = m.newMetric(copiedLabelValues...)
- m.children[hash] = metric
+ lvs := m.extractLabelValues(labels)
+ metric = m.newMetric(lvs...)
+ m.children[hash] = append(m.children[hash], metricWithLabelValues{values: lvs, metric: metric})
}
return metric
}
+
+// getMetricWithLabelValues gets a metric while handling possible collisions in
+// the hash space. Must be called while holding read mutex.
+func (m *MetricVec) getMetricWithLabelValues(h uint64, lvs []string) (Metric, bool) {
+ metrics, ok := m.children[h]
+ if ok {
+ if i := m.findMetricWithLabelValues(metrics, lvs); i < len(metrics) {
+ return metrics[i].metric, true
+ }
+ }
+ return nil, false
+}
+
+// getMetricWithLabels gets a metric while handling possible collisions in
+// the hash space. Must be called while holding read mutex.
+func (m *MetricVec) getMetricWithLabels(h uint64, labels Labels) (Metric, bool) {
+ metrics, ok := m.children[h]
+ if ok {
+ if i := m.findMetricWithLabels(metrics, labels); i < len(metrics) {
+ return metrics[i].metric, true
+ }
+ }
+ return nil, false
+}
+
+// findMetricWithLabelValues returns the index of the matching metric or
+// len(metrics) if not found.
+func (m *MetricVec) findMetricWithLabelValues(metrics []metricWithLabelValues, lvs []string) int {
+ for i, metric := range metrics {
+ if m.matchLabelValues(metric.values, lvs) {
+ return i
+ }
+ }
+ return len(metrics)
+}
+
+// findMetricWithLabels returns the index of the matching metric or len(metrics)
+// if not found.
+func (m *MetricVec) findMetricWithLabels(metrics []metricWithLabelValues, labels Labels) int {
+ for i, metric := range metrics {
+ if m.matchLabels(metric.values, labels) {
+ return i
+ }
+ }
+ return len(metrics)
+}
+
+func (m *MetricVec) matchLabelValues(values []string, lvs []string) bool {
+ if len(values) != len(lvs) {
+ return false
+ }
+ for i, v := range values {
+ if v != lvs[i] {
+ return false
+ }
+ }
+ return true
+}
+
+func (m *MetricVec) matchLabels(values []string, labels Labels) bool {
+ if len(labels) != len(values) {
+ return false
+ }
+ for i, k := range m.desc.variableLabels {
+ if values[i] != labels[k] {
+ return false
+ }
+ }
+ return true
+}
+
+func (m *MetricVec) extractLabelValues(labels Labels) []string {
+ labelValues := make([]string, len(labels))
+ for i, k := range m.desc.variableLabels {
+ labelValues[i] = labels[k]
+ }
+ return labelValues
+}
"ignore": "test",
"package": [
{
+ "path": "all",
+ "revision": ""
+ },
+ {
+ "checksumSHA1": "DWPL08pD/SQ2GzLfoR7ZXnjj7Sw=",
"path": "github.com/Sirupsen/logrus",
"revision": "4b6ea7319e214d98c938f12692336f7ca9348d6b",
"revisionTime": "2016-03-17T14:11:10Z"
},
{
+ "checksumSHA1": "4QnLdmB1kG3N+KlDd1N+G9TWAGQ=",
"path": "github.com/beorn7/perks/quantile",
"revision": "3ac7bf7a47d159a033b107610db8a1b6575507a4",
"revisionTime": "2016-02-29T22:34:45+01:00"
},
{
+ "checksumSHA1": "UQtJP2n/HEBhHaq6a9R2hCHgy+8=",
"path": "github.com/golang/protobuf/proto",
"revision": "8d92cf5fc15a4382f8964b08e1f42a75c0591aa3",
"revisionTime": "2016-03-19T05:57:00+11:00"
},
{
+ "checksumSHA1": "Q2vw4HZBbnU8BLFt8VrzStwqSJg=",
"path": "github.com/matttproud/golang_protobuf_extensions/pbutil",
"revision": "d0c3fe89de86839aecf2e0579c40ba3bb336a453",
"revisionTime": "2015-10-11T12:25:29+02:00"
"revisionTime": "2016-06-14T16:21:01Z"
},
{
+ "checksumSHA1": "X5E2qzbDix710dxoN9Qu0qeHGqU=",
"path": "github.com/prometheus/client_golang/prometheus",
- "revision": "90c15b5efa0dc32a7d259234e02ac9a99e6d3b82",
- "revisionTime": "2016-03-17T14:26:13+01:00"
+ "revision": "7d9484283ebefa862b5b7727d4344cfdf9a0d138",
+ "revisionTime": "2017-04-25T21:35:58Z"
+ },
+ {
+ "checksumSHA1": "tqpiUpHWFso3wcHDAPXoEhT2CoM=",
+ "path": "github.com/prometheus/client_golang/prometheus/promhttp",
+ "revision": "7d9484283ebefa862b5b7727d4344cfdf9a0d138",
+ "revisionTime": "2017-04-25T21:35:58Z"
},
{
+ "checksumSHA1": "DvwvOlPNAgRntBzt3b3OSRMS2N4=",
"path": "github.com/prometheus/client_model/go",
"revision": "fa8ad6fec33561be4280a8f0514318c79d7f6cb6",
"revisionTime": "2015-02-12T10:17:44Z"
},
{
+ "checksumSHA1": "lxe3NRr1SukdE8Mf+xNGcTUR0NI=",
"path": "github.com/prometheus/common/config",
"revision": "167b27da48d058a9b46d84b834d67f68f0243f67",
"revisionTime": "2016-03-18T12:23:18Z"
},
{
+ "checksumSHA1": "Zsc9IQzQDkOzqiAITqxEm0JXdws=",
"path": "github.com/prometheus/common/expfmt",
"revision": "167b27da48d058a9b46d84b834d67f68f0243f67",
"revisionTime": "2016-03-18T12:23:18Z"
},
{
+ "checksumSHA1": "GWlM3d2vPYyNATtTFgftS10/A9w=",
"path": "github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg",
"revision": "167b27da48d058a9b46d84b834d67f68f0243f67",
"revisionTime": "2016-03-18T12:23:18Z"
"revisionTime": "2016-05-03T22:05:32Z"
},
{
+ "checksumSHA1": "IwxsYOL9A/K9rvHvGxfyY37dusk=",
"path": "github.com/prometheus/common/model",
"revision": "167b27da48d058a9b46d84b834d67f68f0243f67",
"revisionTime": "2016-03-18T12:23:18Z"
"revisionTime": "2016-05-03T22:05:32Z"
},
{
+ "checksumSHA1": "kO2MAzPuortudABaRd7fY+7EaoM=",
"path": "github.com/prometheus/procfs",
"revision": "406e5b7bfd8201a36e2bb5f7bdae0b03380c2ce8",
"revisionTime": "2015-10-29T15:50:50-04:00"
},
{
+ "checksumSHA1": "11zFB6NFMfY7R7pwOhDCurgIjJY=",
"path": "golang.org/x/net/icmp",
"revision": "31df19d69da8728e9220def59b80ee577c3e48bf",
"revisionTime": "2016-03-29T10:16:23+11:00"
},
{
+ "checksumSHA1": "yRuyntx9a59ugMi5NlN4ST0XRcI=",
"path": "golang.org/x/net/internal/iana",
"revision": "31df19d69da8728e9220def59b80ee577c3e48bf",
"revisionTime": "2016-03-29T10:16:23+11:00"
},
{
+ "checksumSHA1": "5Um0EwFCpweaWXfaslYqSfoWhV8=",
"path": "golang.org/x/net/ipv4",
"revision": "31df19d69da8728e9220def59b80ee577c3e48bf",
"revisionTime": "2016-03-29T10:16:23+11:00"
},
{
+ "checksumSHA1": "8iCMTIfuxZ2KU2dyb6hJmzm4CeA=",
"path": "golang.org/x/net/ipv6",
"revision": "31df19d69da8728e9220def59b80ee577c3e48bf",
"revisionTime": "2016-03-29T10:16:23+11:00"
},
{
+ "checksumSHA1": "85cxg2Rd2bSb8OLyjHqCSu2xbVY=",
"path": "golang.org/x/sys/unix",
"revision": "7a56174f0086b32866ebd746a794417edbc678a1",
"revisionTime": "2016-03-02T21:58:15+02:00"
},
{
+ "checksumSHA1": "+OgOXBoiQ+X+C2dsAeiOHwBIEH0=",
"path": "gopkg.in/yaml.v2",
"revision": "a83829b6f1293c91addabc89d0571c246397bbf4",
"revisionTime": "2016-03-01T17:40:22-03:00"