remove local telegraf plugins.inputs (#563)
parent
3df2536bb6
commit
3754e0cbe3
@ -1,64 +0,0 @@
|
||||
# GitHub Input Plugin
|
||||
|
||||
Gather repository information from [GitHub][] hosted repositories.
|
||||
|
||||
**Note:** Telegraf also contains the [webhook][] input which can be used as an
|
||||
alternative method for collecting repository information.
|
||||
|
||||
### Configuration
|
||||
|
||||
```toml
|
||||
[[inputs.github]]
|
||||
## List of repositories to monitor
|
||||
repositories = [
|
||||
"influxdata/telegraf",
|
||||
"influxdata/influxdb"
|
||||
]
|
||||
|
||||
## Github API access token. Unauthenticated requests are limited to 60 per hour.
|
||||
# access_token = ""
|
||||
|
||||
## Github API enterprise url. Github Enterprise accounts must specify their base url.
|
||||
# enterprise_base_url = ""
|
||||
|
||||
## Timeout for HTTP requests.
|
||||
# http_timeout = "5s"
|
||||
```
|
||||
|
||||
### Metrics
|
||||
|
||||
- github_repository
|
||||
- tags:
|
||||
- name - The repository name
|
||||
- owner - The owner of the repository
|
||||
- language - The primary language of the repository
|
||||
- license - The license set for the repository
|
||||
- fields:
|
||||
- forks (int)
|
||||
- open_issues (int)
|
||||
- networks (int)
|
||||
- size (int)
|
||||
- subscribers (int)
|
||||
- stars (int)
|
||||
- watchers (int)
|
||||
|
||||
When the [internal][] input is enabled:
|
||||
|
||||
+ internal_github
|
||||
- tags:
|
||||
- access_token - An obfuscated reference to the configured access token or "Unauthenticated"
|
||||
- fields:
|
||||
- limit - How many requests you are limited to (per hour)
|
||||
- remaining - How many requests you have remaining (per hour)
|
||||
- blocks - How many requests have been blocked due to rate limit
|
||||
|
||||
### Example Output
|
||||
|
||||
```
|
||||
github_repository,language=Go,license=MIT\ License,name=telegraf,owner=influxdata forks=2679i,networks=2679i,open_issues=794i,size=23263i,stars=7091i,subscribers=316i,watchers=7091i 1563901372000000000
|
||||
internal_github,access_token=Unauthenticated rate_limit_remaining=59i,rate_limit_limit=60i,rate_limit_blocks=0i 1552653551000000000
|
||||
```
|
||||
|
||||
[GitHub]: https://www.github.com
|
||||
[internal]: /plugins/inputs/internal
|
||||
[webhook]: /plugins/inputs/webhooks/github
|
@ -1,200 +0,0 @@
|
||||
package github
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/google/go-github/v32/github"
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/selfstat"
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
// GitHub - plugin main structure
|
||||
type GitHub struct {
|
||||
Repositories []string `toml:"repositories"`
|
||||
AccessToken string `toml:"access_token"`
|
||||
EnterpriseBaseURL string `toml:"enterprise_base_url"`
|
||||
HTTPTimeout time.Duration `toml:"http_timeout"`
|
||||
githubClient *github.Client
|
||||
|
||||
obfuscatedToken string
|
||||
|
||||
RateLimit selfstat.Stat
|
||||
RateLimitErrors selfstat.Stat
|
||||
RateRemaining selfstat.Stat
|
||||
}
|
||||
|
||||
const sampleConfig = `
|
||||
## List of repositories to monitor.
|
||||
repositories = [
|
||||
"influxdata/telegraf",
|
||||
"influxdata/influxdb"
|
||||
]
|
||||
|
||||
## Github API access token. Unauthenticated requests are limited to 60 per hour.
|
||||
# access_token = ""
|
||||
|
||||
## Github API enterprise url. Github Enterprise accounts must specify their base url.
|
||||
# enterprise_base_url = ""
|
||||
|
||||
## Timeout for HTTP requests.
|
||||
# http_timeout = "5s"
|
||||
`
|
||||
|
||||
// SampleConfig returns sample configuration for this plugin.
|
||||
func (g *GitHub) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
// Description returns the plugin description.
|
||||
func (g *GitHub) Description() string {
|
||||
return "Gather repository information from GitHub hosted repositories."
|
||||
}
|
||||
|
||||
// Create GitHub Client
|
||||
func (g *GitHub) createGitHubClient(ctx context.Context) (*github.Client, error) {
|
||||
httpClient := &http.Client{
|
||||
Transport: &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
},
|
||||
Timeout: g.HTTPTimeout,
|
||||
}
|
||||
|
||||
g.obfuscatedToken = "Unauthenticated"
|
||||
|
||||
if g.AccessToken != "" {
|
||||
tokenSource := oauth2.StaticTokenSource(
|
||||
&oauth2.Token{AccessToken: g.AccessToken},
|
||||
)
|
||||
oauthClient := oauth2.NewClient(ctx, tokenSource)
|
||||
_ = context.WithValue(ctx, oauth2.HTTPClient, oauthClient)
|
||||
|
||||
g.obfuscatedToken = g.AccessToken[0:4] + "..." + g.AccessToken[len(g.AccessToken)-3:]
|
||||
|
||||
return g.newGithubClient(oauthClient)
|
||||
}
|
||||
|
||||
return g.newGithubClient(httpClient)
|
||||
}
|
||||
|
||||
func (g *GitHub) newGithubClient(httpClient *http.Client) (*github.Client, error) {
|
||||
if g.EnterpriseBaseURL != "" {
|
||||
return github.NewEnterpriseClient(g.EnterpriseBaseURL, "", httpClient)
|
||||
}
|
||||
return github.NewClient(httpClient), nil
|
||||
}
|
||||
|
||||
// Gather GitHub Metrics
|
||||
func (g *GitHub) Gather(acc telegraf.Accumulator) error {
|
||||
ctx := context.Background()
|
||||
|
||||
if g.githubClient == nil {
|
||||
githubClient, err := g.createGitHubClient(ctx)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
g.githubClient = githubClient
|
||||
|
||||
tokenTags := map[string]string{
|
||||
"access_token": g.obfuscatedToken,
|
||||
}
|
||||
|
||||
g.RateLimitErrors = selfstat.Register("github", "rate_limit_blocks", tokenTags)
|
||||
g.RateLimit = selfstat.Register("github", "rate_limit_limit", tokenTags)
|
||||
g.RateRemaining = selfstat.Register("github", "rate_limit_remaining", tokenTags)
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(len(g.Repositories))
|
||||
|
||||
for _, repository := range g.Repositories {
|
||||
go func(repositoryName string, acc telegraf.Accumulator) {
|
||||
defer wg.Done()
|
||||
|
||||
owner, repository, err := splitRepositoryName(repositoryName)
|
||||
if err != nil {
|
||||
acc.AddError(err)
|
||||
return
|
||||
}
|
||||
|
||||
repositoryInfo, response, err := g.githubClient.Repositories.Get(ctx, owner, repository)
|
||||
|
||||
if _, ok := err.(*github.RateLimitError); ok {
|
||||
g.RateLimitErrors.Incr(1)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
acc.AddError(err)
|
||||
return
|
||||
}
|
||||
|
||||
g.RateLimit.Set(int64(response.Rate.Limit))
|
||||
g.RateRemaining.Set(int64(response.Rate.Remaining))
|
||||
|
||||
now := time.Now()
|
||||
tags := getTags(repositoryInfo)
|
||||
fields := getFields(repositoryInfo)
|
||||
|
||||
acc.AddFields("github_repository", fields, tags, now)
|
||||
}(repository, acc)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
return nil
|
||||
}
|
||||
|
||||
func splitRepositoryName(repositoryName string) (string, string, error) {
|
||||
splits := strings.SplitN(repositoryName, "/", 2)
|
||||
|
||||
if len(splits) != 2 {
|
||||
return "", "", fmt.Errorf("%v is not of format 'owner/repository'", repositoryName)
|
||||
}
|
||||
|
||||
return splits[0], splits[1], nil
|
||||
}
|
||||
|
||||
func getLicense(rI *github.Repository) string {
|
||||
if licenseName := rI.GetLicense().GetName(); licenseName != "" {
|
||||
return licenseName
|
||||
}
|
||||
|
||||
return "None"
|
||||
}
|
||||
|
||||
func getTags(repositoryInfo *github.Repository) map[string]string {
|
||||
return map[string]string{
|
||||
"owner": repositoryInfo.GetOwner().GetLogin(),
|
||||
"name": repositoryInfo.GetName(),
|
||||
"language": repositoryInfo.GetLanguage(),
|
||||
"license": getLicense(repositoryInfo),
|
||||
}
|
||||
}
|
||||
|
||||
func getFields(repositoryInfo *github.Repository) map[string]interface{} {
|
||||
return map[string]interface{}{
|
||||
"stars": repositoryInfo.GetStargazersCount(),
|
||||
"subscribers": repositoryInfo.GetSubscribersCount(),
|
||||
"watchers": repositoryInfo.GetWatchersCount(),
|
||||
"networks": repositoryInfo.GetNetworkCount(),
|
||||
"forks": repositoryInfo.GetForksCount(),
|
||||
"open_issues": repositoryInfo.GetOpenIssuesCount(),
|
||||
"size": repositoryInfo.GetSize(),
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
func init() {
|
||||
inputs.Add("github", func() telegraf.Input {
|
||||
return &GitHub{
|
||||
HTTPTimeout: internal.Duration{Duration: time.Second * 5},
|
||||
}
|
||||
})
|
||||
}
|
||||
*/
|
@ -1,140 +0,0 @@
|
||||
package github
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
gh "github.com/google/go-github/v32/github"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestNewGithubClient(t *testing.T) {
|
||||
httpClient := &http.Client{}
|
||||
g := &GitHub{}
|
||||
client, err := g.newGithubClient(httpClient)
|
||||
require.NoError(t, err)
|
||||
require.Contains(t, client.BaseURL.String(), "api.github.com")
|
||||
g.EnterpriseBaseURL = "api.example.com/"
|
||||
enterpriseClient, err := g.newGithubClient(httpClient)
|
||||
require.NoError(t, err)
|
||||
require.Contains(t, enterpriseClient.BaseURL.String(), "api.example.com")
|
||||
}
|
||||
|
||||
func TestSplitRepositoryNameWithWorkingExample(t *testing.T) {
|
||||
var validRepositoryNames = []struct {
|
||||
fullName string
|
||||
owner string
|
||||
repository string
|
||||
}{
|
||||
{"influxdata/telegraf", "influxdata", "telegraf"},
|
||||
{"influxdata/influxdb", "influxdata", "influxdb"},
|
||||
{"rawkode/saltstack-dotfiles", "rawkode", "saltstack-dotfiles"},
|
||||
}
|
||||
|
||||
for _, tt := range validRepositoryNames {
|
||||
t.Run(tt.fullName, func(t *testing.T) {
|
||||
owner, repository, _ := splitRepositoryName(tt.fullName)
|
||||
|
||||
require.Equal(t, tt.owner, owner)
|
||||
require.Equal(t, tt.repository, repository)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSplitRepositoryNameWithNoSlash(t *testing.T) {
|
||||
var invalidRepositoryNames = []string{
|
||||
"influxdata-influxdb",
|
||||
}
|
||||
|
||||
for _, tt := range invalidRepositoryNames {
|
||||
t.Run(tt, func(t *testing.T) {
|
||||
_, _, err := splitRepositoryName(tt)
|
||||
|
||||
require.Error(t, err)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetLicenseWhenExists(t *testing.T) {
|
||||
licenseName := "MIT"
|
||||
license := gh.License{Name: &licenseName}
|
||||
repository := gh.Repository{License: &license}
|
||||
|
||||
getLicenseReturn := getLicense(&repository)
|
||||
|
||||
require.Equal(t, "MIT", getLicenseReturn)
|
||||
}
|
||||
|
||||
func TestGetLicenseWhenMissing(t *testing.T) {
|
||||
repository := gh.Repository{}
|
||||
|
||||
getLicenseReturn := getLicense(&repository)
|
||||
|
||||
require.Equal(t, "None", getLicenseReturn)
|
||||
}
|
||||
|
||||
func TestGetTags(t *testing.T) {
|
||||
licenseName := "MIT"
|
||||
license := gh.License{Name: &licenseName}
|
||||
|
||||
ownerName := "influxdata"
|
||||
owner := gh.User{Login: &ownerName}
|
||||
|
||||
fullName := "influxdata/influxdb"
|
||||
repositoryName := "influxdb"
|
||||
|
||||
language := "Go"
|
||||
|
||||
repository := gh.Repository{
|
||||
FullName: &fullName,
|
||||
Name: &repositoryName,
|
||||
License: &license,
|
||||
Owner: &owner,
|
||||
Language: &language,
|
||||
}
|
||||
|
||||
getTagsReturn := getTags(&repository)
|
||||
|
||||
correctTagsReturn := map[string]string{
|
||||
"owner": ownerName,
|
||||
"name": repositoryName,
|
||||
"language": language,
|
||||
"license": licenseName,
|
||||
}
|
||||
|
||||
require.Equal(t, true, reflect.DeepEqual(getTagsReturn, correctTagsReturn))
|
||||
}
|
||||
|
||||
func TestGetFields(t *testing.T) {
|
||||
stars := 1
|
||||
forks := 2
|
||||
openIssues := 3
|
||||
size := 4
|
||||
subscribers := 5
|
||||
watchers := 6
|
||||
|
||||
repository := gh.Repository{
|
||||
StargazersCount: &stars,
|
||||
ForksCount: &forks,
|
||||
OpenIssuesCount: &openIssues,
|
||||
Size: &size,
|
||||
NetworkCount: &forks,
|
||||
SubscribersCount: &subscribers,
|
||||
WatchersCount: &watchers,
|
||||
}
|
||||
|
||||
getFieldsReturn := getFields(&repository)
|
||||
|
||||
correctFieldReturn := make(map[string]interface{})
|
||||
|
||||
correctFieldReturn["stars"] = 1
|
||||
correctFieldReturn["forks"] = 2
|
||||
correctFieldReturn["networks"] = 2
|
||||
correctFieldReturn["open_issues"] = 3
|
||||
correctFieldReturn["size"] = 4
|
||||
correctFieldReturn["subscribers"] = 5
|
||||
correctFieldReturn["watchers"] = 6
|
||||
|
||||
require.Equal(t, true, reflect.DeepEqual(getFieldsReturn, correctFieldReturn))
|
||||
}
|
File diff suppressed because one or more lines are too long
@ -1,16 +0,0 @@
|
||||
version: '3'
|
||||
services:
|
||||
mongodb:
|
||||
image: mongo
|
||||
|
||||
telegraf:
|
||||
image: glinton/scratch
|
||||
volumes:
|
||||
- ./telegraf.conf:/telegraf.conf
|
||||
- ../../../../telegraf:/telegraf
|
||||
depends_on:
|
||||
- mongodb
|
||||
entrypoint:
|
||||
- /telegraf
|
||||
- --config
|
||||
- /telegraf.conf
|
@ -1,9 +0,0 @@
|
||||
[agent]
|
||||
interval="1s"
|
||||
flush_interval="3s"
|
||||
|
||||
[[inputs.mongodb]]
|
||||
servers = ["mongodb://mongodb:27017"]
|
||||
|
||||
[[outputs.file]]
|
||||
files = ["stdout"]
|
@ -1,200 +0,0 @@
|
||||
package mongodb
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/url"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
tlsint "github.com/influxdata/telegraf/plugins/common/tls"
|
||||
"gopkg.in/mgo.v2"
|
||||
)
|
||||
|
||||
type MongoDB struct {
|
||||
Servers []string
|
||||
Ssl Ssl
|
||||
Mongos map[string]*Server
|
||||
GatherClusterStatus bool
|
||||
GatherPerdbStats bool
|
||||
GatherColStats bool
|
||||
ColStatsDbs []string
|
||||
tlsint.ClientConfig
|
||||
|
||||
Log telegraf.Logger
|
||||
}
|
||||
|
||||
type Ssl struct {
|
||||
Enabled bool
|
||||
CaCerts []string `toml:"cacerts"`
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
## An array of URLs of the form:
|
||||
## "mongodb://" [user ":" pass "@"] host [ ":" port]
|
||||
## For example:
|
||||
## mongodb://user:auth_key@10.10.3.30:27017,
|
||||
## mongodb://10.10.3.33:18832,
|
||||
servers = ["mongodb://127.0.0.1:27017"]
|
||||
|
||||
## When true, collect cluster status
|
||||
## Note that the query that counts jumbo chunks triggers a COLLSCAN, which
|
||||
## may have an impact on performance.
|
||||
# gather_cluster_status = true
|
||||
|
||||
## When true, collect per database stats
|
||||
# gather_perdb_stats = false
|
||||
|
||||
## When true, collect per collection stats
|
||||
# gather_col_stats = false
|
||||
|
||||
## List of db where collections stats are collected
|
||||
## If empty, all db are concerned
|
||||
# col_stats_dbs = ["local"]
|
||||
|
||||
## Optional TLS Config
|
||||
# tls_ca = "/etc/telegraf/ca.pem"
|
||||
# tls_cert = "/etc/telegraf/cert.pem"
|
||||
# tls_key = "/etc/telegraf/key.pem"
|
||||
## Use TLS but skip chain & host verification
|
||||
# insecure_skip_verify = false
|
||||
`
|
||||
|
||||
func (m *MongoDB) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (*MongoDB) Description() string {
|
||||
return "Read metrics from one or many MongoDB servers"
|
||||
}
|
||||
|
||||
var localhost = &url.URL{Host: "mongodb://127.0.0.1:27017"}
|
||||
|
||||
// Reads stats from all configured servers accumulates stats.
|
||||
// Returns one of the errors encountered while gather stats (if any).
|
||||
func (m *MongoDB) Gather(acc telegraf.Accumulator) error {
|
||||
if len(m.Servers) == 0 {
|
||||
m.gatherServer(m.getMongoServer(localhost), acc)
|
||||
return nil
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
for i, serv := range m.Servers {
|
||||
if !strings.HasPrefix(serv, "mongodb://") {
|
||||
// Preserve backwards compatibility for hostnames without a
|
||||
// scheme, broken in go 1.8. Remove in Telegraf 2.0
|
||||
serv = "mongodb://" + serv
|
||||
m.Log.Warnf("Using %q as connection URL; please update your configuration to use an URL", serv)
|
||||
m.Servers[i] = serv
|
||||
}
|
||||
|
||||
u, err := url.Parse(serv)
|
||||
if err != nil {
|
||||
m.Log.Errorf("Unable to parse address %q: %s", serv, err.Error())
|
||||
continue
|
||||
}
|
||||
if u.Host == "" {
|
||||
m.Log.Errorf("Unable to parse address %q", serv)
|
||||
continue
|
||||
}
|
||||
|
||||
wg.Add(1)
|
||||
go func(srv *Server) {
|
||||
defer wg.Done()
|
||||
err := m.gatherServer(srv, acc)
|
||||
if err != nil {
|
||||
m.Log.Errorf("Error in plugin: %v", err)
|
||||
}
|
||||
}(m.getMongoServer(u))
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *MongoDB) getMongoServer(url *url.URL) *Server {
|
||||
if _, ok := m.Mongos[url.Host]; !ok {
|
||||
m.Mongos[url.Host] = &Server{
|
||||
Log: m.Log,
|
||||
Url: url,
|
||||
}
|
||||
}
|
||||
return m.Mongos[url.Host]
|
||||
}
|
||||
|
||||
func (m *MongoDB) gatherServer(server *Server, acc telegraf.Accumulator) error {
|
||||
if server.Session == nil {
|
||||
var dialAddrs []string
|
||||
if server.Url.User != nil {
|
||||
dialAddrs = []string{server.Url.String()}
|
||||
} else {
|
||||
dialAddrs = []string{server.Url.Host}
|
||||
}
|
||||
dialInfo, err := mgo.ParseURL(dialAddrs[0])
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to parse URL %q: %s", dialAddrs[0], err.Error())
|
||||
}
|
||||
dialInfo.Direct = true
|
||||
dialInfo.Timeout = 5 * time.Second
|
||||
|
||||
var tlsConfig *tls.Config
|
||||
|
||||
if m.Ssl.Enabled {
|
||||
// Deprecated TLS config
|
||||
tlsConfig = &tls.Config{}
|
||||
if len(m.Ssl.CaCerts) > 0 {
|
||||
roots := x509.NewCertPool()
|
||||
for _, caCert := range m.Ssl.CaCerts {
|
||||
ok := roots.AppendCertsFromPEM([]byte(caCert))
|
||||
if !ok {
|
||||
return fmt.Errorf("failed to parse root certificate")
|
||||
}
|
||||
}
|
||||
tlsConfig.RootCAs = roots
|
||||
} else {
|
||||
tlsConfig.InsecureSkipVerify = true
|
||||
}
|
||||
} else {
|
||||
tlsConfig, err = m.ClientConfig.TLSConfig()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// If configured to use TLS, add a dial function
|
||||
if tlsConfig != nil {
|
||||
dialInfo.DialServer = func(addr *mgo.ServerAddr) (net.Conn, error) {
|
||||
conn, err := tls.Dial("tcp", addr.String(), tlsConfig)
|
||||
if err != nil {
|
||||
fmt.Printf("error in Dial, %s\n", err.Error())
|
||||
}
|
||||
return conn, err
|
||||
}
|
||||
}
|
||||
|
||||
sess, err := mgo.DialWithInfo(dialInfo)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to connect to MongoDB: %s", err.Error())
|
||||
}
|
||||
server.Session = sess
|
||||
}
|
||||
return server.gatherData(acc, m.GatherClusterStatus, m.GatherPerdbStats, m.GatherColStats, m.ColStatsDbs)
|
||||
}
|
||||
|
||||
/*
|
||||
func init() {
|
||||
inputs.Add("mongodb", func() telegraf.Input {
|
||||
return &MongoDB{
|
||||
Mongos: make(map[string]*Server),
|
||||
GatherClusterStatus: true,
|
||||
GatherPerdbStats: false,
|
||||
GatherColStats: false,
|
||||
ColStatsDbs: []string{"local"},
|
||||
}
|
||||
})
|
||||
}
|
||||
*/
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -1,41 +0,0 @@
|
||||
// +build integration
|
||||
|
||||
package mongodb
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestGetDefaultTags(t *testing.T) {
|
||||
var tagTests = []struct {
|
||||
in string
|
||||
out string
|
||||
}{
|
||||
{"hostname", server.Url.Host},
|
||||
}
|
||||
defaultTags := server.getDefaultTags()
|
||||
for _, tt := range tagTests {
|
||||
if defaultTags[tt.in] != tt.out {
|
||||
t.Errorf("expected %q, got %q", tt.out, defaultTags[tt.in])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAddDefaultStats(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
|
||||
err := server.gatherData(&acc, false)
|
||||
require.NoError(t, err)
|
||||
|
||||
// need to call this twice so it can perform the diff
|
||||
err = server.gatherData(&acc, false)
|
||||
require.NoError(t, err)
|
||||
|
||||
for key := range DefaultStats {
|
||||
assert.True(t, acc.HasInt64Field("mongodb", key))
|
||||
}
|
||||
}
|
@ -1,71 +0,0 @@
|
||||
// +build integration
|
||||
|
||||
package mongodb
|
||||
|
||||
import (
|
||||
"log"
|
||||
"math/rand"
|
||||
"net/url"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"gopkg.in/mgo.v2"
|
||||
)
|
||||
|
||||
var connect_url string
|
||||
var server *Server
|
||||
|
||||
func init() {
|
||||
connect_url = os.Getenv("MONGODB_URL")
|
||||
if connect_url == "" {
|
||||
connect_url = "127.0.0.1:27017"
|
||||
server = &Server{Url: &url.URL{Host: connect_url}}
|
||||
} else {
|
||||
full_url, err := url.Parse(connect_url)
|
||||
if err != nil {
|
||||
log.Fatalf("Unable to parse URL (%s), %s\n", full_url, err.Error())
|
||||
}
|
||||
server = &Server{Url: full_url}
|
||||
}
|
||||
}
|
||||
|
||||
func testSetup(m *testing.M) {
|
||||
var err error
|
||||
var dialAddrs []string
|
||||
if server.Url.User != nil {
|
||||
dialAddrs = []string{server.Url.String()}
|
||||
} else {
|
||||
dialAddrs = []string{server.Url.Host}
|
||||
}
|
||||
dialInfo, err := mgo.ParseURL(dialAddrs[0])
|
||||
if err != nil {
|
||||
log.Fatalf("Unable to parse URL (%s), %s\n", dialAddrs[0], err.Error())
|
||||
}
|
||||
dialInfo.Direct = true
|
||||
dialInfo.Timeout = 5 * time.Second
|
||||
sess, err := mgo.DialWithInfo(dialInfo)
|
||||
if err != nil {
|
||||
log.Fatalf("Unable to connect to MongoDB, %s\n", err.Error())
|
||||
}
|
||||
server.Session = sess
|
||||
server.Session, _ = mgo.Dial(server.Url.Host)
|
||||
if err != nil {
|
||||
log.Fatalln(err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func testTeardown(m *testing.M) {
|
||||
server.Session.Close()
|
||||
}
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
// seed randomness for use with tests
|
||||
rand.Seed(time.Now().UTC().UnixNano())
|
||||
|
||||
testSetup(m)
|
||||
res := m.Run()
|
||||
testTeardown(m)
|
||||
|
||||
os.Exit(res)
|
||||
}
|
File diff suppressed because it is too large
Load Diff
@ -1,205 +0,0 @@
|
||||
package mongodb
|
||||
|
||||
import (
|
||||
"testing"
|
||||
//"time"
|
||||
|
||||
//"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestLatencyStats(t *testing.T) {
|
||||
|
||||
sl := NewStatLine(
|
||||
MongoStatus{
|
||||
ServerStatus: &ServerStatus{
|
||||
Connections: &ConnectionStats{},
|
||||
Mem: &MemStats{
|
||||
Bits: 0,
|
||||
Resident: 0,
|
||||
Virtual: 0,
|
||||
Supported: false,
|
||||
Mapped: 0,
|
||||
MappedWithJournal: 0,
|
||||
},
|
||||
},
|
||||
},
|
||||
MongoStatus{
|
||||
ServerStatus: &ServerStatus{
|
||||
Connections: &ConnectionStats{},
|
||||
Mem: &MemStats{
|
||||
Bits: 0,
|
||||
Resident: 0,
|
||||
Virtual: 0,
|
||||
Supported: false,
|
||||
Mapped: 0,
|
||||
MappedWithJournal: 0,
|
||||
},
|
||||
OpLatencies: &OpLatenciesStats{
|
||||
Reads: &LatencyStats{
|
||||
Ops: 0,
|
||||
Latency: 0,
|
||||
},
|
||||
Writes: &LatencyStats{
|
||||
Ops: 0,
|
||||
Latency: 0,
|
||||
},
|
||||
Commands: &LatencyStats{
|
||||
Ops: 0,
|
||||
Latency: 0,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"foo",
|
||||
true,
|
||||
60,
|
||||
)
|
||||
|
||||
assert.Equal(t, sl.CommandLatency, int64(0))
|
||||
assert.Equal(t, sl.ReadLatency, int64(0))
|
||||
assert.Equal(t, sl.WriteLatency, int64(0))
|
||||
assert.Equal(t, sl.CommandOpsCnt, int64(0))
|
||||
assert.Equal(t, sl.ReadOpsCnt, int64(0))
|
||||
assert.Equal(t, sl.WriteOpsCnt, int64(0))
|
||||
}
|
||||
|
||||
func TestLatencyStatsDiffZero(t *testing.T) {
|
||||
|
||||
sl := NewStatLine(
|
||||
MongoStatus{
|
||||
ServerStatus: &ServerStatus{
|
||||
Connections: &ConnectionStats{},
|
||||
Mem: &MemStats{
|
||||
Bits: 0,
|
||||
Resident: 0,
|
||||
Virtual: 0,
|
||||
Supported: false,
|
||||
Mapped: 0,
|
||||
MappedWithJournal: 0,
|
||||
},
|
||||
OpLatencies: &OpLatenciesStats{
|
||||
Reads: &LatencyStats{
|
||||
Ops: 0,
|
||||
Latency: 0,
|
||||
},
|
||||
Writes: &LatencyStats{
|
||||
Ops: 0,
|
||||
Latency: 0,
|
||||
},
|
||||
Commands: &LatencyStats{
|
||||
Ops: 0,
|
||||
Latency: 0,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
MongoStatus{
|
||||
ServerStatus: &ServerStatus{
|
||||
Connections: &ConnectionStats{},
|
||||
Mem: &MemStats{
|
||||
Bits: 0,
|
||||
Resident: 0,
|
||||
Virtual: 0,
|
||||
Supported: false,
|
||||
Mapped: 0,
|
||||
MappedWithJournal: 0,
|
||||
},
|
||||
OpLatencies: &OpLatenciesStats{
|
||||
Reads: &LatencyStats{
|
||||
Ops: 0,
|
||||
Latency: 0,
|
||||
},
|
||||
Writes: &LatencyStats{
|
||||
Ops: 0,
|
||||
Latency: 0,
|
||||
},
|
||||
Commands: &LatencyStats{
|
||||
Ops: 0,
|
||||
Latency: 0,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"foo",
|
||||
true,
|
||||
60,
|
||||
)
|
||||
|
||||
assert.Equal(t, sl.CommandLatency, int64(0))
|
||||
assert.Equal(t, sl.ReadLatency, int64(0))
|
||||
assert.Equal(t, sl.WriteLatency, int64(0))
|
||||
assert.Equal(t, sl.CommandOpsCnt, int64(0))
|
||||
assert.Equal(t, sl.ReadOpsCnt, int64(0))
|
||||
assert.Equal(t, sl.WriteOpsCnt, int64(0))
|
||||
}
|
||||
|
||||
func TestLatencyStatsDiff(t *testing.T) {
|
||||
|
||||
sl := NewStatLine(
|
||||
MongoStatus{
|
||||
ServerStatus: &ServerStatus{
|
||||
Connections: &ConnectionStats{},
|
||||
Mem: &MemStats{
|
||||
Bits: 0,
|
||||
Resident: 0,
|
||||
Virtual: 0,
|
||||
Supported: false,
|
||||
Mapped: 0,
|
||||
MappedWithJournal: 0,
|
||||
},
|
||||
OpLatencies: &OpLatenciesStats{
|
||||
Reads: &LatencyStats{
|
||||
Ops: 4189041956,
|
||||
Latency: 2255922322753,
|
||||
},
|
||||
Writes: &LatencyStats{
|
||||
Ops: 1691019457,
|
||||
Latency: 494478256915,
|
||||
},
|
||||
Commands: &LatencyStats{
|
||||
Ops: 1019150402,
|
||||
Latency: 59177710371,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
MongoStatus{
|
||||
ServerStatus: &ServerStatus{
|
||||
Connections: &ConnectionStats{},
|
||||
Mem: &MemStats{
|
||||
Bits: 0,
|
||||
Resident: 0,
|
||||
Virtual: 0,
|
||||
Supported: false,
|
||||
Mapped: 0,
|
||||
MappedWithJournal: 0,
|
||||
},
|
||||
OpLatencies: &OpLatenciesStats{
|
||||
Reads: &LatencyStats{
|
||||
Ops: 4189049884,
|
||||
Latency: 2255946760057,
|
||||
},
|
||||
Writes: &LatencyStats{
|
||||
Ops: 1691021287,
|
||||
Latency: 494479456987,
|
||||
},
|
||||
Commands: &LatencyStats{
|
||||
Ops: 1019152861,
|
||||
Latency: 59177981552,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"foo",
|
||||
true,
|
||||
60,
|
||||
)
|
||||
|
||||
assert.Equal(t, sl.CommandLatency, int64(59177981552))
|
||||
assert.Equal(t, sl.ReadLatency, int64(2255946760057))
|
||||
assert.Equal(t, sl.WriteLatency, int64(494479456987))
|
||||
assert.Equal(t, sl.CommandOpsCnt, int64(1019152861))
|
||||
assert.Equal(t, sl.ReadOpsCnt, int64(4189049884))
|
||||
assert.Equal(t, sl.WriteOpsCnt, int64(1691021287))
|
||||
}
|
@ -0,0 +1,20 @@
|
||||
package mongodb
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/didi/nightingale/src/modules/monapi/plugins"
|
||||
)
|
||||
|
||||
func TestCollect(t *testing.T) {
|
||||
input := plugins.PluginTest(t, &MongodbRule{
|
||||
Servers: []string{"mongodb://root:root@127.0.0.1:27017"},
|
||||
GatherClusterStatus: true,
|
||||
GatherPerdbStats: true,
|
||||
GatherColStats: true,
|
||||
})
|
||||
|
||||
time.Sleep(time.Second)
|
||||
plugins.PluginInputTest(t, input)
|
||||
}
|
@ -1,171 +0,0 @@
|
||||
# Prometheus Input Plugin
|
||||
|
||||
The prometheus input plugin gathers metrics from HTTP servers exposing metrics
|
||||
in Prometheus format.
|
||||
|
||||
### Configuration:
|
||||
|
||||
```toml
|
||||
# Read metrics from one or many prometheus clients
|
||||
[[inputs.prometheus]]
|
||||
## An array of urls to scrape metrics from.
|
||||
urls = ["http://localhost:9100/metrics"]
|
||||
|
||||
## Metric version controls the mapping from Prometheus metrics into
|
||||
## Telegraf metrics. When using the prometheus_client output, use the same
|
||||
## value in both plugins to ensure metrics are round-tripped without
|
||||
## modification.
|
||||
##
|
||||
## example: metric_version = 1; deprecated in 1.13
|
||||
## metric_version = 2; recommended version
|
||||
# metric_version = 1
|
||||
|
||||
## An array of Kubernetes services to scrape metrics from.
|
||||
# kubernetes_services = ["http://my-service-dns.my-namespace:9100/metrics"]
|
||||
|
||||
## Kubernetes config file to create client from.
|
||||
# kube_config = "/path/to/kubernetes.config"
|
||||
|
||||
## Scrape Kubernetes pods for the following prometheus annotations:
|
||||
## - prometheus.io/scrape: Enable scraping for this pod
|
||||
## - prometheus.io/scheme: If the metrics endpoint is secured then you will need to
|
||||
## set this to `https` & most likely set the tls config.
|
||||
## - prometheus.io/path: If the metrics path is not /metrics, define it with this annotation.
|
||||
## - prometheus.io/port: If port is not 9102 use this annotation
|
||||
# monitor_kubernetes_pods = true
|
||||
## Restricts Kubernetes monitoring to a single namespace
|
||||
## ex: monitor_kubernetes_pods_namespace = "default"
|
||||
# monitor_kubernetes_pods_namespace = ""
|
||||
# label selector to target pods which have the label
|
||||
# kubernetes_label_selector = "env=dev,app=nginx"
|
||||
# field selector to target pods
|
||||
# eg. To scrape pods on a specific node
|
||||
# kubernetes_field_selector = "spec.nodeName=$HOSTNAME"
|
||||
|
||||
## Use bearer token for authorization. ('bearer_token' takes priority)
|
||||
# bearer_token = "/path/to/bearer/token"
|
||||
## OR
|
||||
# bearer_token_string = "abc_123"
|
||||
|
||||
## HTTP Basic Authentication username and password. ('bearer_token' and
|
||||
## 'bearer_token_string' take priority)
|
||||
# username = ""
|
||||
# password = ""
|
||||
|
||||
## Specify timeout duration for slower prometheus clients (default is 3s)
|
||||
# response_timeout = "3s"
|
||||
|
||||
## Optional TLS Config
|
||||
# tls_ca = /path/to/cafile
|
||||
# tls_cert = /path/to/certfile
|
||||
# tls_key = /path/to/keyfile
|
||||
## Use TLS but skip chain & host verification
|
||||
# insecure_skip_verify = false
|
||||
```
|
||||
|
||||
`urls` can contain a unix socket as well. If a different path is required (default is `/metrics` for both http[s] and unix) for a unix socket, add `path` as a query parameter as follows: `unix:///var/run/prometheus.sock?path=/custom/metrics`
|
||||
|
||||
#### Kubernetes Service Discovery
|
||||
|
||||
URLs listed in the `kubernetes_services` parameter will be expanded
|
||||
by looking up all A records assigned to the hostname as described in
|
||||
[Kubernetes DNS service discovery](https://kubernetes.io/docs/concepts/services-networking/service/#dns).
|
||||
|
||||
This method can be used to locate all
|
||||
[Kubernetes headless services](https://kubernetes.io/docs/concepts/services-networking/service/#headless-services).
|
||||
|
||||
#### Kubernetes scraping
|
||||
|
||||
Enabling this option will allow the plugin to scrape for prometheus annotation on Kubernetes
|
||||
pods. Currently, you can run this plugin in your kubernetes cluster, or we use the kubeconfig
|
||||
file to determine where to monitor.
|
||||
Currently the following annotation are supported:
|
||||
|
||||
* `prometheus.io/scrape` Enable scraping for this pod.
|
||||
* `prometheus.io/scheme` If the metrics endpoint is secured then you will need to set this to `https` & most likely set the tls config. (default 'http')
|
||||
* `prometheus.io/path` Override the path for the metrics endpoint on the service. (default '/metrics')
|
||||
* `prometheus.io/port` Used to override the port. (default 9102)
|
||||
|
||||
Using the `monitor_kubernetes_pods_namespace` option allows you to limit which pods you are scraping.
|
||||
|
||||
#### Bearer Token
|
||||
|
||||
If set, the file specified by the `bearer_token` parameter will be read on
|
||||
each interval and its contents will be appended to the Bearer string in the
|
||||
Authorization header.
|
||||
|
||||
### Usage for Caddy HTTP server
|
||||
|
||||
If you want to monitor Caddy, you need to use Caddy with its Prometheus plugin:
|
||||
|
||||
* Download Caddy+Prometheus plugin [here](https://caddyserver.com/download/linux/amd64?plugins=http.prometheus)
|
||||
* Add the `prometheus` directive in your `CaddyFile`
|
||||
* Restart Caddy
|
||||
* Configure Telegraf to fetch metrics on it:
|
||||
|
||||
```toml
|
||||
[[inputs.prometheus]]
|
||||
# ## An array of urls to scrape metrics from.
|
||||
urls = ["http://localhost:9180/metrics"]
|
||||
```
|
||||
|
||||
> This is the default URL where Caddy Prometheus plugin will send data.
|
||||
> For more details, please read the [Caddy Prometheus documentation](https://github.com/miekg/caddy-prometheus/blob/master/README.md).
|
||||
|
||||
### Metrics:
|
||||
|
||||
Measurement names are based on the Metric Family and tags are created for each
|
||||
label. The value is added to a field named based on the metric type.
|
||||
|
||||
All metrics receive the `url` tag indicating the related URL specified in the
|
||||
Telegraf configuration. If using Kubernetes service discovery the `address`
|
||||
tag is also added indicating the discovered ip address.
|
||||
|
||||
### Example Output:
|
||||
|
||||
**Source**
|
||||
```
|
||||
# HELP go_gc_duration_seconds A summary of the GC invocation durations.
|
||||
# TYPE go_gc_duration_seconds summary
|
||||
go_gc_duration_seconds{quantile="0"} 7.4545e-05
|
||||
go_gc_duration_seconds{quantile="0.25"} 7.6999e-05
|
||||
go_gc_duration_seconds{quantile="0.5"} 0.000277935
|
||||
go_gc_duration_seconds{quantile="0.75"} 0.000706591
|
||||
go_gc_duration_seconds{quantile="1"} 0.000706591
|
||||
go_gc_duration_seconds_sum 0.00113607
|
||||
go_gc_duration_seconds_count 4
|
||||
# HELP go_goroutines Number of goroutines that currently exist.
|
||||
# TYPE go_goroutines gauge
|
||||
go_goroutines 15
|
||||
# HELP cpu_usage_user Telegraf collected metric
|
||||
# TYPE cpu_usage_user gauge
|
||||
cpu_usage_user{cpu="cpu0"} 1.4112903225816156
|
||||
cpu_usage_user{cpu="cpu1"} 0.702106318955865
|
||||
cpu_usage_user{cpu="cpu2"} 2.0161290322588776
|
||||
cpu_usage_user{cpu="cpu3"} 1.5045135406226022
|
||||
```
|
||||
|
||||
**Output**
|
||||
```
|
||||
go_gc_duration_seconds,url=http://example.org:9273/metrics 1=0.001336611,count=14,sum=0.004527551,0=0.000057965,0.25=0.000083812,0.5=0.000286537,0.75=0.000365303 1505776733000000000
|
||||
go_goroutines,url=http://example.org:9273/metrics gauge=21 1505776695000000000
|
||||
cpu_usage_user,cpu=cpu0,url=http://example.org:9273/metrics gauge=1.513622603430151 1505776751000000000
|
||||
cpu_usage_user,cpu=cpu1,url=http://example.org:9273/metrics gauge=5.829145728641773 1505776751000000000
|
||||
cpu_usage_user,cpu=cpu2,url=http://example.org:9273/metrics gauge=2.119071644805144 1505776751000000000
|
||||
cpu_usage_user,cpu=cpu3,url=http://example.org:9273/metrics gauge=1.5228426395944945 1505776751000000000
|
||||
```
|
||||
|
||||
**Output (when metric_version = 2)**
|
||||
```
|
||||
prometheus,quantile=1,url=http://example.org:9273/metrics go_gc_duration_seconds=0.005574303 1556075100000000000
|
||||
prometheus,quantile=0.75,url=http://example.org:9273/metrics go_gc_duration_seconds=0.0001046 1556075100000000000
|
||||
prometheus,quantile=0.5,url=http://example.org:9273/metrics go_gc_duration_seconds=0.0000719 1556075100000000000
|
||||
prometheus,quantile=0.25,url=http://example.org:9273/metrics go_gc_duration_seconds=0.0000579 1556075100000000000
|
||||
prometheus,quantile=0,url=http://example.org:9273/metrics go_gc_duration_seconds=0.0000349 1556075100000000000
|
||||
prometheus,url=http://example.org:9273/metrics go_gc_duration_seconds_count=324,go_gc_duration_seconds_sum=0.091340353 1556075100000000000
|
||||
prometheus,url=http://example.org:9273/metrics go_goroutines=15 1556075100000000000
|
||||
prometheus,cpu=cpu0,url=http://example.org:9273/metrics cpu_usage_user=1.513622603430151 1505776751000000000
|
||||
prometheus,cpu=cpu1,url=http://example.org:9273/metrics cpu_usage_user=5.829145728641773 1505776751000000000
|
||||
prometheus,cpu=cpu2,url=http://example.org:9273/metrics cpu_usage_user=2.119071644805144 1505776751000000000
|
||||
prometheus,cpu=cpu3,url=http://example.org:9273/metrics cpu_usage_user=1.5228426395944945 1505776751000000000
|
||||
```
|
@ -1,237 +0,0 @@
|
||||
package prometheus
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log"
|
||||
"net"
|
||||
"net/url"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ericchiang/k8s"
|
||||
corev1 "github.com/ericchiang/k8s/apis/core/v1"
|
||||
"github.com/ghodss/yaml"
|
||||
)
|
||||
|
||||
type payload struct {
|
||||
eventype string
|
||||
pod *corev1.Pod
|
||||
}
|
||||
|
||||
// loadClient parses a kubeconfig from a file and returns a Kubernetes
|
||||
// client. It does not support extensions or client auth providers.
|
||||
func loadClient(kubeconfig string) (*k8s.Client, error) {
|
||||
// data, err := ioutil.ReadFile(kubeconfigPath)
|
||||
// if err != nil {
|
||||
// return nil, fmt.Errorf("failed reading '%s': %v", kubeconfigPath, err)
|
||||
// }
|
||||
|
||||
// Unmarshal YAML into a Kubernetes config object.
|
||||
var config k8s.Config
|
||||
if err := yaml.Unmarshal([]byte(kubeconfig), &config); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return k8s.NewClient(&config)
|
||||
}
|
||||
|
||||
func (p *Prometheus) start(ctx context.Context) error {
|
||||
client, err := k8s.NewInClusterClient()
|
||||
if err != nil {
|
||||
// u, err := user.Current()
|
||||
// if err != nil {
|
||||
// return fmt.Errorf("Failed to get current user - %v", err)
|
||||
// }
|
||||
|
||||
// configLocation := filepath.Join(u.HomeDir, ".kube/config")
|
||||
// if p.KubeConfig != "" {
|
||||
// configLocation = p.KubeConfig
|
||||
// }
|
||||
client, err = loadClient(p.KubeConfigContent)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
p.wg = sync.WaitGroup{}
|
||||
|
||||
p.wg.Add(1)
|
||||
go func() {
|
||||
defer p.wg.Done()
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-time.After(time.Second):
|
||||
err := p.watch(ctx, client)
|
||||
if err != nil {
|
||||
p.Log.Errorf("Unable to watch resources: %s", err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// An edge case exists if a pod goes offline at the same time a new pod is created
|
||||
// (without the scrape annotations). K8s may re-assign the old pod ip to the non-scrape
|
||||
// pod, causing errors in the logs. This is only true if the pod going offline is not
|
||||
// directed to do so by K8s.
|
||||
func (p *Prometheus) watch(ctx context.Context, client *k8s.Client) error {
|
||||
|
||||
selectors := podSelector(p)
|
||||
|
||||
pod := &corev1.Pod{}
|
||||
watcher, err := client.Watch(ctx, p.PodNamespace, &corev1.Pod{}, selectors...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer watcher.Close()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil
|
||||
default:
|
||||
pod = &corev1.Pod{}
|
||||
// An error here means we need to reconnect the watcher.
|
||||
eventType, err := watcher.Next(pod)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// If the pod is not "ready", there will be no ip associated with it.
|
||||
if pod.GetMetadata().GetAnnotations()["prometheus.io/scrape"] != "true" ||
|
||||
!podReady(pod.Status.GetContainerStatuses()) {
|
||||
continue
|
||||
}
|
||||
|
||||
switch eventType {
|
||||
case k8s.EventAdded:
|
||||
registerPod(pod, p)
|
||||
case k8s.EventModified:
|
||||
// To avoid multiple actions for each event, unregister on the first event
|
||||
// in the delete sequence, when the containers are still "ready".
|
||||
if pod.Metadata.GetDeletionTimestamp() != nil {
|
||||
unregisterPod(pod, p)
|
||||
} else {
|
||||
registerPod(pod, p)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func podReady(statuss []*corev1.ContainerStatus) bool {
|
||||
if len(statuss) == 0 {
|
||||
return false
|
||||
}
|
||||
for _, cs := range statuss {
|
||||
if !cs.GetReady() {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func podSelector(p *Prometheus) []k8s.Option {
|
||||
options := []k8s.Option{}
|
||||
|
||||
if len(p.KubernetesLabelSelector) > 0 {
|
||||
options = append(options, k8s.QueryParam("labelSelector", p.KubernetesLabelSelector))
|
||||
}
|
||||
|
||||
if len(p.KubernetesFieldSelector) > 0 {
|
||||
options = append(options, k8s.QueryParam("fieldSelector", p.KubernetesFieldSelector))
|
||||
}
|
||||
|
||||
return options
|
||||
|
||||
}
|
||||
|
||||
func registerPod(pod *corev1.Pod, p *Prometheus) {
|
||||
if p.kubernetesPods == nil {
|
||||
p.kubernetesPods = map[string]URLAndAddress{}
|
||||
}
|
||||
targetURL := getScrapeURL(pod)
|
||||
if targetURL == nil {
|
||||
return
|
||||
}
|
||||
|
||||
log.Printf("D! [inputs.prometheus] will scrape metrics from %q", *targetURL)
|
||||
// add annotation as metrics tags
|
||||
tags := pod.GetMetadata().GetAnnotations()
|
||||
if tags == nil {
|
||||
tags = map[string]string{}
|
||||
}
|
||||
tags["pod_name"] = pod.GetMetadata().GetName()
|
||||
tags["namespace"] = pod.GetMetadata().GetNamespace()
|
||||
// add labels as metrics tags
|
||||
for k, v := range pod.GetMetadata().GetLabels() {
|
||||
tags[k] = v
|
||||
}
|
||||
URL, err := url.Parse(*targetURL)
|
||||
if err != nil {
|
||||
log.Printf("E! [inputs.prometheus] could not parse URL %q: %s", *targetURL, err.Error())
|
||||
return
|
||||
}
|
||||
podURL := p.AddressToURL(URL, URL.Hostname())
|
||||
p.lock.Lock()
|
||||
p.kubernetesPods[podURL.String()] = URLAndAddress{
|
||||
URL: podURL,
|
||||
Address: URL.Hostname(),
|
||||
OriginalURL: URL,
|
||||
Tags: tags,
|
||||
}
|
||||
p.lock.Unlock()
|
||||
}
|
||||
|
||||
func getScrapeURL(pod *corev1.Pod) *string {
|
||||
ip := pod.Status.GetPodIP()
|
||||
if ip == "" {
|
||||
// return as if scrape was disabled, we will be notified again once the pod
|
||||
// has an IP
|
||||
return nil
|
||||
}
|
||||
|
||||
scheme := pod.GetMetadata().GetAnnotations()["prometheus.io/scheme"]
|
||||
path := pod.GetMetadata().GetAnnotations()["prometheus.io/path"]
|
||||
port := pod.GetMetadata().GetAnnotations()["prometheus.io/port"]
|
||||
|
||||
if scheme == "" {
|
||||
scheme = "http"
|
||||
}
|
||||
if port == "" {
|
||||
port = "9102"
|
||||
}
|
||||
if path == "" {
|
||||
path = "/metrics"
|
||||
}
|
||||
|
||||
u := &url.URL{
|
||||
Scheme: scheme,
|
||||
Host: net.JoinHostPort(ip, port),
|
||||
Path: path,
|
||||
}
|
||||
|
||||
x := u.String()
|
||||
|
||||
return &x
|
||||
}
|
||||
|
||||
func unregisterPod(pod *corev1.Pod, p *Prometheus) {
|
||||
url := getScrapeURL(pod)
|
||||
if url == nil {
|
||||
return
|
||||
}
|
||||
|
||||
log.Printf("D! [inputs.prometheus] registered a delete request for %q in namespace %q",
|
||||
pod.GetMetadata().GetName(), pod.GetMetadata().GetNamespace())
|
||||
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
if _, ok := p.kubernetesPods[*url]; ok {
|
||||
delete(p.kubernetesPods, *url)
|
||||
log.Printf("D! [inputs.prometheus] will stop scraping for %q", *url)
|
||||
}
|
||||
}
|
@ -1,155 +0,0 @@
|
||||
package prometheus
|
||||
|
||||
import (
|
||||
"github.com/ericchiang/k8s"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
v1 "github.com/ericchiang/k8s/apis/core/v1"
|
||||
metav1 "github.com/ericchiang/k8s/apis/meta/v1"
|
||||
)
|
||||
|
||||
func TestScrapeURLNoAnnotations(t *testing.T) {
|
||||
p := &v1.Pod{Metadata: &metav1.ObjectMeta{}}
|
||||
p.GetMetadata().Annotations = map[string]string{}
|
||||
url := getScrapeURL(p)
|
||||
assert.Nil(t, url)
|
||||
}
|
||||
|
||||
func TestScrapeURLAnnotationsNoScrape(t *testing.T) {
|
||||
p := &v1.Pod{Metadata: &metav1.ObjectMeta{}}
|
||||
p.Metadata.Name = str("myPod")
|
||||
p.Metadata.Annotations = map[string]string{"prometheus.io/scrape": "false"}
|
||||
url := getScrapeURL(p)
|
||||
assert.Nil(t, url)
|
||||
}
|
||||
|
||||
func TestScrapeURLAnnotations(t *testing.T) {
|
||||
p := pod()
|
||||
p.Metadata.Annotations = map[string]string{"prometheus.io/scrape": "true"}
|
||||
url := getScrapeURL(p)
|
||||
assert.Equal(t, "http://127.0.0.1:9102/metrics", *url)
|
||||
}
|
||||
|
||||
func TestScrapeURLAnnotationsCustomPort(t *testing.T) {
|
||||
p := pod()
|
||||
p.Metadata.Annotations = map[string]string{"prometheus.io/scrape": "true", "prometheus.io/port": "9000"}
|
||||
url := getScrapeURL(p)
|
||||
assert.Equal(t, "http://127.0.0.1:9000/metrics", *url)
|
||||
}
|
||||
|
||||
func TestScrapeURLAnnotationsCustomPath(t *testing.T) {
|
||||
p := pod()
|
||||
p.Metadata.Annotations = map[string]string{"prometheus.io/scrape": "true", "prometheus.io/path": "mymetrics"}
|
||||
url := getScrapeURL(p)
|
||||
assert.Equal(t, "http://127.0.0.1:9102/mymetrics", *url)
|
||||
}
|
||||
|
||||
func TestScrapeURLAnnotationsCustomPathWithSep(t *testing.T) {
|
||||
p := pod()
|
||||
p.Metadata.Annotations = map[string]string{"prometheus.io/scrape": "true", "prometheus.io/path": "/mymetrics"}
|
||||
url := getScrapeURL(p)
|
||||
assert.Equal(t, "http://127.0.0.1:9102/mymetrics", *url)
|
||||
}
|
||||
|
||||
func TestAddPod(t *testing.T) {
|
||||
prom := &Prometheus{Log: testutil.Logger{}}
|
||||
|
||||
p := pod()
|
||||
p.Metadata.Annotations = map[string]string{"prometheus.io/scrape": "true"}
|
||||
registerPod(p, prom)
|
||||
assert.Equal(t, 1, len(prom.kubernetesPods))
|
||||
}
|
||||
|
||||
func TestAddMultipleDuplicatePods(t *testing.T) {
|
||||
prom := &Prometheus{Log: testutil.Logger{}}
|
||||
|
||||
p := pod()
|
||||
p.Metadata.Annotations = map[string]string{"prometheus.io/scrape": "true"}
|
||||
registerPod(p, prom)
|
||||
p.Metadata.Name = str("Pod2")
|
||||
registerPod(p, prom)
|
||||
assert.Equal(t, 1, len(prom.kubernetesPods))
|
||||
}
|
||||
|
||||
func TestAddMultiplePods(t *testing.T) {
|
||||
prom := &Prometheus{Log: testutil.Logger{}}
|
||||
|
||||
p := pod()
|
||||
p.Metadata.Annotations = map[string]string{"prometheus.io/scrape": "true"}
|
||||
registerPod(p, prom)
|
||||
p.Metadata.Name = str("Pod2")
|
||||
p.Status.PodIP = str("127.0.0.2")
|
||||
registerPod(p, prom)
|
||||
assert.Equal(t, 2, len(prom.kubernetesPods))
|
||||
}
|
||||
|
||||
func TestDeletePods(t *testing.T) {
|
||||
prom := &Prometheus{Log: testutil.Logger{}}
|
||||
|
||||
p := pod()
|
||||
p.Metadata.Annotations = map[string]string{"prometheus.io/scrape": "true"}
|
||||
registerPod(p, prom)
|
||||
unregisterPod(p, prom)
|
||||
assert.Equal(t, 0, len(prom.kubernetesPods))
|
||||
}
|
||||
|
||||
func TestPodSelector(t *testing.T) {
|
||||
|
||||
cases := []struct {
|
||||
expected []k8s.Option
|
||||
labelselector string
|
||||
fieldselector string
|
||||
}{
|
||||
{
|
||||
expected: []k8s.Option{
|
||||
k8s.QueryParam("labelSelector", "key1=val1,key2=val2,key3"),
|
||||
k8s.QueryParam("fieldSelector", "spec.nodeName=ip-1-2-3-4.acme.com"),
|
||||
},
|
||||
labelselector: "key1=val1,key2=val2,key3",
|
||||
fieldselector: "spec.nodeName=ip-1-2-3-4.acme.com",
|
||||
},
|
||||
{
|
||||
expected: []k8s.Option{
|
||||
k8s.QueryParam("labelSelector", "key1"),
|
||||
k8s.QueryParam("fieldSelector", "spec.nodeName=ip-1-2-3-4.acme.com"),
|
||||
},
|
||||
labelselector: "key1",
|
||||
fieldselector: "spec.nodeName=ip-1-2-3-4.acme.com",
|
||||
},
|
||||
{
|
||||
expected: []k8s.Option{
|
||||
k8s.QueryParam("labelSelector", "key1"),
|
||||
k8s.QueryParam("fieldSelector", "somefield"),
|
||||
},
|
||||
labelselector: "key1",
|
||||
fieldselector: "somefield",
|
||||
},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
prom := &Prometheus{
|
||||
Log: testutil.Logger{},
|
||||
KubernetesLabelSelector: c.labelselector,
|
||||
KubernetesFieldSelector: c.fieldselector,
|
||||
}
|
||||
|
||||
output := podSelector(prom)
|
||||
|
||||
assert.Equal(t, len(output), len(c.expected))
|
||||
}
|
||||
}
|
||||
|
||||
func pod() *v1.Pod {
|
||||
p := &v1.Pod{Metadata: &metav1.ObjectMeta{}, Status: &v1.PodStatus{}, Spec: &v1.PodSpec{}}
|
||||
p.Status.PodIP = str("127.0.0.1")
|
||||
p.Metadata.Name = str("myPod")
|
||||
p.Metadata.Namespace = str("default")
|
||||
return p
|
||||
}
|
||||
|
||||
func str(x string) *string {
|
||||
return &x
|
||||
}
|
File diff suppressed because it is too large
Load Diff
@ -1,167 +0,0 @@
|
||||
package prometheus
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
var exptime = time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC)
|
||||
|
||||
const validUniqueGauge = `# HELP cadvisor_version_info A metric with a constant '1' value labeled by kernel version, OS version, docker version, cadvisor version & cadvisor revision.
|
||||
# TYPE cadvisor_version_info gauge
|
||||
cadvisor_version_info{cadvisorRevision="",cadvisorVersion="",dockerVersion="1.8.2",kernelVersion="3.10.0-229.20.1.el7.x86_64",osVersion="CentOS Linux 7 (Core)"} 1
|
||||
`
|
||||
|
||||
const validUniqueCounter = `# HELP get_token_fail_count Counter of failed Token() requests to the alternate token source
|
||||
# TYPE get_token_fail_count counter
|
||||
get_token_fail_count 0
|
||||
`
|
||||
|
||||
const validUniqueLine = `# HELP get_token_fail_count Counter of failed Token() requests to the alternate token source
|
||||
`
|
||||
|
||||
const validUniqueSummary = `# HELP http_request_duration_microseconds The HTTP request latencies in microseconds.
|
||||
# TYPE http_request_duration_microseconds summary
|
||||
http_request_duration_microseconds{handler="prometheus",quantile="0.5"} 552048.506
|
||||
http_request_duration_microseconds{handler="prometheus",quantile="0.9"} 5.876804288e+06
|
||||
http_request_duration_microseconds{handler="prometheus",quantile="0.99"} 5.876804288e+06
|
||||
http_request_duration_microseconds_sum{handler="prometheus"} 1.8909097205e+07
|
||||
http_request_duration_microseconds_count{handler="prometheus"} 9
|
||||
`
|
||||
|
||||
const validUniqueHistogram = `# HELP apiserver_request_latencies Response latency distribution in microseconds for each verb, resource and client.
|
||||
# TYPE apiserver_request_latencies histogram
|
||||
apiserver_request_latencies_bucket{resource="bindings",verb="POST",le="125000"} 1994
|
||||
apiserver_request_latencies_bucket{resource="bindings",verb="POST",le="250000"} 1997
|
||||
apiserver_request_latencies_bucket{resource="bindings",verb="POST",le="500000"} 2000
|
||||
apiserver_request_latencies_bucket{resource="bindings",verb="POST",le="1e+06"} 2005
|
||||
apiserver_request_latencies_bucket{resource="bindings",verb="POST",le="2e+06"} 2012
|
||||
apiserver_request_latencies_bucket{resource="bindings",verb="POST",le="4e+06"} 2017
|
||||
apiserver_request_latencies_bucket{resource="bindings",verb="POST",le="8e+06"} 2024
|
||||
apiserver_request_latencies_bucket{resource="bindings",verb="POST",le="+Inf"} 2025
|
||||
apiserver_request_latencies_sum{resource="bindings",verb="POST"} 1.02726334e+08
|
||||
apiserver_request_latencies_count{resource="bindings",verb="POST"} 2025
|
||||
`
|
||||
|
||||
const validData = `# HELP cadvisor_version_info A metric with a constant '1' value labeled by kernel version, OS version, docker version, cadvisor version & cadvisor revision.
|
||||
# TYPE cadvisor_version_info gauge
|
||||
cadvisor_version_info{cadvisorRevision="",cadvisorVersion="",dockerVersion="1.8.2",kernelVersion="3.10.0-229.20.1.el7.x86_64",osVersion="CentOS Linux 7 (Core)"} 1
|
||||
# HELP go_gc_duration_seconds A summary of the GC invocation durations.
|
||||
# TYPE go_gc_duration_seconds summary
|
||||
go_gc_duration_seconds{quantile="0"} 0.013534896000000001
|
||||
go_gc_duration_seconds{quantile="0.25"} 0.02469263
|
||||
go_gc_duration_seconds{quantile="0.5"} 0.033727822000000005
|
||||
go_gc_duration_seconds{quantile="0.75"} 0.03840335
|
||||
go_gc_duration_seconds{quantile="1"} 0.049956604
|
||||
go_gc_duration_seconds_sum 1970.341293002
|
||||
go_gc_duration_seconds_count 65952
|
||||
# HELP http_request_duration_microseconds The HTTP request latencies in microseconds.
|
||||
# TYPE http_request_duration_microseconds summary
|
||||
http_request_duration_microseconds{handler="prometheus",quantile="0.5"} 552048.506
|
||||
http_request_duration_microseconds{handler="prometheus",quantile="0.9"} 5.876804288e+06
|
||||
http_request_duration_microseconds{handler="prometheus",quantile="0.99"} 5.876804288e+06
|
||||
http_request_duration_microseconds_sum{handler="prometheus"} 1.8909097205e+07
|
||||
http_request_duration_microseconds_count{handler="prometheus"} 9
|
||||
# HELP get_token_fail_count Counter of failed Token() requests to the alternate token source
|
||||
# TYPE get_token_fail_count counter
|
||||
get_token_fail_count 0
|
||||
# HELP apiserver_request_latencies Response latency distribution in microseconds for each verb, resource and client.
|
||||
# TYPE apiserver_request_latencies histogram
|
||||
apiserver_request_latencies_bucket{resource="bindings",verb="POST",le="125000"} 1994
|
||||
apiserver_request_latencies_bucket{resource="bindings",verb="POST",le="250000"} 1997
|
||||
apiserver_request_latencies_bucket{resource="bindings",verb="POST",le="500000"} 2000
|
||||
apiserver_request_latencies_bucket{resource="bindings",verb="POST",le="1e+06"} 2005
|
||||
apiserver_request_latencies_bucket{resource="bindings",verb="POST",le="2e+06"} 2012
|
||||
apiserver_request_latencies_bucket{resource="bindings",verb="POST",le="4e+06"} 2017
|
||||
apiserver_request_latencies_bucket{resource="bindings",verb="POST",le="8e+06"} 2024
|
||||
apiserver_request_latencies_bucket{resource="bindings",verb="POST",le="+Inf"} 2025
|
||||
apiserver_request_latencies_sum{resource="bindings",verb="POST"} 1.02726334e+08
|
||||
apiserver_request_latencies_count{resource="bindings",verb="POST"} 2025
|
||||
`
|
||||
|
||||
const prometheusMulti = `
|
||||
cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
|
||||
cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
|
||||
cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
|
||||
cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
|
||||
cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
|
||||
cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
|
||||
cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
|
||||
`
|
||||
|
||||
const prometheusMultiSomeInvalid = `
|
||||
cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
|
||||
cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
|
||||
cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
|
||||
cpu,cpu=cpu3, host=foo,datacenter=us-east usage_idle=99,usage_busy=1
|
||||
cpu,cpu=cpu4 , usage_idle=99,usage_busy=1
|
||||
cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
|
||||
`
|
||||
|
||||
func TestParseValidPrometheus(t *testing.T) {
|
||||
// Gauge value
|
||||
metrics, err := Parse([]byte(validUniqueGauge), http.Header{})
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, metrics, 1)
|
||||
assert.Equal(t, "cadvisor_version_info", metrics[0].Name())
|
||||
assert.Equal(t, map[string]interface{}{
|
||||
"gauge": float64(1),
|
||||
}, metrics[0].Fields())
|
||||
assert.Equal(t, map[string]string{
|
||||
"osVersion": "CentOS Linux 7 (Core)",
|
||||
"cadvisorRevision": "",
|
||||
"cadvisorVersion": "",
|
||||
"dockerVersion": "1.8.2",
|
||||
"kernelVersion": "3.10.0-229.20.1.el7.x86_64",
|
||||
}, metrics[0].Tags())
|
||||
|
||||
// Counter value
|
||||
metrics, err = Parse([]byte(validUniqueCounter), http.Header{})
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, metrics, 1)
|
||||
assert.Equal(t, "get_token_fail_count", metrics[0].Name())
|
||||
assert.Equal(t, map[string]interface{}{
|
||||
"counter": float64(0),
|
||||
}, metrics[0].Fields())
|
||||
assert.Equal(t, map[string]string{}, metrics[0].Tags())
|
||||
|
||||
// Summary data
|
||||
//SetDefaultTags(map[string]string{})
|
||||
metrics, err = Parse([]byte(validUniqueSummary), http.Header{})
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, metrics, 1)
|
||||
assert.Equal(t, "http_request_duration_microseconds", metrics[0].Name())
|
||||
assert.Equal(t, map[string]interface{}{
|
||||
"0.5": 552048.506,
|
||||
"0.9": 5.876804288e+06,
|
||||
"0.99": 5.876804288e+06,
|
||||
"count": 9.0,
|
||||
"sum": 1.8909097205e+07,
|
||||
}, metrics[0].Fields())
|
||||
assert.Equal(t, map[string]string{"handler": "prometheus"}, metrics[0].Tags())
|
||||
|
||||
// histogram data
|
||||
metrics, err = Parse([]byte(validUniqueHistogram), http.Header{})
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, metrics, 1)
|
||||
assert.Equal(t, "apiserver_request_latencies", metrics[0].Name())
|
||||
assert.Equal(t, map[string]interface{}{
|
||||
"500000": 2000.0,
|
||||
"count": 2025.0,
|
||||
"sum": 1.02726334e+08,
|
||||
"250000": 1997.0,
|
||||
"2e+06": 2012.0,
|
||||
"4e+06": 2017.0,
|
||||
"8e+06": 2024.0,
|
||||
"+Inf": 2025.0,
|
||||
"125000": 1994.0,
|
||||
"1e+06": 2005.0,
|
||||
}, metrics[0].Fields())
|
||||
assert.Equal(t,
|
||||
map[string]string{"verb": "POST", "resource": "bindings"},
|
||||
metrics[0].Tags())
|
||||
|
||||
}
|
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in new issue