From 59366e4d3a73c27ca7c79170f2e5aca269598c5c Mon Sep 17 00:00:00 2001 From: qinyening <710leo@gmail.com> Date: Tue, 13 Apr 2021 11:38:40 +0800 Subject: [PATCH] =?UTF-8?q?=E5=8F=91=E5=B8=83v4=E7=89=88=E6=9C=AC=20(#651)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * init --- .gitignore | 10 - changelog | 16 +- control | 49 +- etc/address.yml | 47 +- etc/{agent.yml => agentd.yml} | 5 +- etc/ams.yml | 12 - etc/dict.json | 119 +- etc/identity.yml | 4 +- etc/index.yml | 4 - etc/job.yml | 15 - etc/json/stra.json | 410 --- etc/judge.yml | 18 - etc/monapi.yml | 47 - etc/nginx.conf | 12 +- etc/password-changed-email.tpl | 2 +- etc/rdb.yml | 110 - etc/screen/n9e_mudules | 88 - etc/server.yml | 172 ++ etc/service/{agent.service => agentd.service} | 2 +- etc/service/job.service | 21 - etc/service/judge.service | 21 - etc/service/monapi.service | 21 - etc/service/{index.service => prober.service} | 4 +- etc/service/rdb.service | 21 - etc/service/{ams.service => server.service} | 4 +- etc/service/transfer.service | 21 - etc/service/tsdb.service | 21 - etc/transfer.yml | 52 - etc/tsdb.yml | 8 - go.mod | 14 +- go.sum | 24 +- sql/n9e_mon.sql | 61 +- src/common/address/address.go | 2 + src/{modules/agent => common}/client/cli.go | 31 +- src/common/compress/compress.go | 48 + src/common/dataobj/judge.go | 2 +- src/common/dataobj/snmp.go | 5 + src/common/dataobj/tsdb.go | 4 +- src/{toolkits => common}/exit/exit.go | 0 src/{toolkits => common}/go-tdigest/LICENSE | 0 src/{toolkits => common}/go-tdigest/README.md | 0 .../go-tdigest/serialization.go | 0 .../go-tdigest/serialization_test.go | 0 .../go-tdigest/summary.go | 0 .../go-tdigest/summary_test.go | 0 .../go-tdigest/tdigest.go | 0 .../go-tdigest/tdigest_test.go | 0 src/{toolkits => common}/i18n/i18n.go | 0 src/common/loggeri/logger.go | 28 - src/common/loggeri/loggeri.go | 38 + src/common/ping/ping.go | 76 + src/{toolkits => common}/pools/opentsdb.go | 0 src/{toolkits => common}/pools/pools.go | 0 src/common/report/report.go | 104 +- src/{toolkits => common}/slice/slice.go | 51 + src/{toolkits => common}/stack/stack.go | 0 src/{toolkits => common}/stats/counter.go | 0 src/{toolkits => common}/stats/init.go | 8 +- src/{toolkits => common}/str/checksum.go | 0 src/{toolkits => common}/str/format.go | 5 +- src/{toolkits => common}/str/parser.go | 0 src/{toolkits => common}/str/slice.go | 0 src/models/host_register.go | 215 ++ src/models/init.go | 2 +- src/models/{mon_hbs.go => instance.go} | 43 +- src/models/ldap.go | 33 +- src/models/mon_aggr.go | 4 +- src/models/mon_collect.go | 109 + src/models/mon_collect_rule.go | 8 +- src/models/mon_event.go | 9 +- src/models/nems_mib.go | 134 + src/models/nems_nethw.go | 298 ++ src/models/node.go | 4 +- ...ield_value.go => node_cate_field_value.go} | 0 src/models/node_resource.go | 2 +- src/models/node_role.go | 1 - src/models/{captcha.go => rdb_captcha.go} | 0 src/models/{configs.go => rdb_configs.go} | 0 src/models/{session.go => rdb_session.go} | 0 .../{role_operation.go => role_opration.go} | 0 src/models/user.go | 15 +- src/models/{stats.go => user_stats.go} | 0 src/modules/agent/stra/cron.go | 81 - .../{agent/agent.go => agentd/agentd.go} | 36 +- src/modules/{agent => agentd}/cache/cache.go | 2 +- .../{agent => agentd}/config/config.go | 12 +- src/modules/{agent => agentd}/core/clients.go | 0 src/modules/{agent => agentd}/core/common.go | 2 +- .../{agent/client => agentd/core}/meta.go | 11 +- src/modules/{agent => agentd}/core/push.go | 14 +- .../{agent => agentd}/http/http_server.go | 8 +- src/modules/{agent => agentd}/http/router.go | 0 .../http/router_collector.go | 12 +- .../{agent => agentd}/http/router_endpoint.go | 3 +- .../{agent => agentd}/http/router_executor.go | 6 +- .../{agent => agentd}/http/router_funcs.go | 0 .../{agent => agentd}/http/router_health.go | 0 .../{agent => agentd}/log/reader/reader.go | 0 .../log/reader/reader_test.go | 0 .../log/reader/reader_util.go | 0 .../log/reader/reader_util_test.go | 0 .../log/strategy/strategy.go | 5 +- .../{agent => agentd}/log/worker/cached.go | 4 +- .../{agent => agentd}/log/worker/control.go | 8 +- .../log/worker/control_test.go | 0 .../{agent => agentd}/log/worker/counter.go | 6 +- .../{agent => agentd}/log/worker/push.go | 10 +- .../{agent => agentd}/log/worker/worker.go | 10 +- .../log/worker/worker_test.go | 0 .../{agent => agentd}/report/report.go | 53 +- .../{agent => agentd}/statsd/aggr_config.go | 0 .../{agent => agentd}/statsd/aggr_counter.go | 0 .../statsd/aggr_counter_e.go | 0 .../{agent => agentd}/statsd/aggr_gauge.go | 0 .../statsd/aggr_histogram.go | 2 +- .../statsd/aggr_interface.go | 0 .../{agent => agentd}/statsd/aggr_ratio.go | 0 .../{agent => agentd}/statsd/aggr_rpc.go | 0 .../{agent => agentd}/statsd/aggr_rpc_e.go | 0 src/modules/{agent => agentd}/statsd/clock.go | 0 .../{agent => agentd}/statsd/statsd.go | 0 .../statsd/statsd_receiver.go | 2 +- .../statsd/statsd_reporter.go | 12 +- .../{agent => agentd}/statsd/statsd_state.go | 2 +- src/modules/{agent => agentd}/statsd/utils.go | 0 src/modules/agentd/stra/cron.go | 46 + src/modules/{agent => agentd}/stra/init.go | 2 +- src/modules/{agent => agentd}/stra/log.go | 13 +- .../{agent => agentd}/stra/log_test.go | 0 src/modules/{agent => agentd}/stra/port.go | 8 +- src/modules/{agent => agentd}/stra/proc.go | 8 +- src/modules/{agent => agentd}/sys/config.go | 0 .../{agent => agentd}/sys/funcs/collector.go | 4 +- .../{agent => agentd}/sys/funcs/cpustat.go | 6 +- .../{agent => agentd}/sys/funcs/cron.go | 8 +- .../{agent => agentd}/sys/funcs/dfstat.go | 8 +- .../{agent => agentd}/sys/funcs/diskstat.go | 6 +- .../{agent => agentd}/sys/funcs/fsstat.go | 8 +- .../{agent => agentd}/sys/funcs/funcs.go | 4 +- .../{agent => agentd}/sys/funcs/ifstat.go | 6 +- .../{agent => agentd}/sys/funcs/loadavg.go | 6 +- .../{agent => agentd}/sys/funcs/meminfo.go | 6 +- .../{agent => agentd}/sys/funcs/netfilter.go | 6 +- .../{agent => agentd}/sys/funcs/ntp.go | 6 +- .../{agent => agentd}/sys/funcs/snmp.go | 6 +- .../{agent => agentd}/sys/funcs/sockstas.go | 6 +- .../{agent => agentd}/sys/funcs/sys.go | 6 +- .../{agent => agentd}/sys/plugins/cron.go | 0 .../{agent => agentd}/sys/plugins/plugin.go | 0 .../{agent => agentd}/sys/plugins/reader.go | 6 +- .../sys/plugins/scheduler.go | 6 +- .../{agent => agentd}/sys/ports/cron.go | 2 +- .../{agent => agentd}/sys/ports/port.go | 2 +- .../{agent => agentd}/sys/ports/scheduler.go | 10 +- .../{agent => agentd}/sys/procs/cron.go | 2 +- .../{agent => agentd}/sys/procs/proc.go | 2 +- .../{agent => agentd}/sys/procs/scheduler.go | 8 +- .../{agent => agentd}/sys/procs/sys.go | 0 .../{agent => agentd}/timer/heartbeat.go | 10 +- src/modules/{agent => agentd}/timer/killer.go | 4 +- src/modules/{agent => agentd}/timer/task.go | 8 +- src/modules/{agent => agentd}/timer/tasks.go | 4 +- src/modules/{agent => agentd}/udp/handler.go | 4 +- src/modules/{agent => agentd}/udp/udp.go | 2 +- src/modules/ams/ams.go | 86 - src/modules/ams/config/yaml.go | 66 - src/modules/ams/http/http_middleware.go | 97 - src/modules/ams/http/http_server.go | 70 - src/modules/ams/http/router.go | 36 - src/modules/ams/http/router_funcs.go | 226 -- src/modules/ams/http/router_health.go | 7 - src/modules/index/cache/counter_map.go | 54 - src/modules/index/cache/endpoint_map.go | 150 - src/modules/index/cache/indexdb.go | 346 --- src/modules/index/cache/metric_map.go | 128 - src/modules/index/cache/tag.go | 153 - src/modules/index/cache/tag_map.go | 99 - src/modules/index/config/config.go | 85 - .../index/http/routes/health_router.go | 51 - src/modules/index/http/routes/index_router.go | 624 ---- src/modules/index/http/routes/routes.go | 29 - src/modules/index/index.go | 119 - src/modules/index/rpc/push.go | 47 - src/modules/index/rpc/rpc.go | 52 - src/modules/job/config/config.go | 74 - src/modules/job/http/http_middleware.go | 97 - src/modules/job/http/http_server.go | 70 - src/modules/job/http/router.go | 43 - src/modules/job/http/router_health.go | 7 - src/modules/job/job.go | 112 - src/modules/job/rpc/rpc.go | 45 - src/modules/judge/backend/redi/funcs.go | 46 - src/modules/judge/backend/redi/redis.go | 96 - src/modules/judge/config/config.go | 94 - src/modules/judge/http/http.go | 65 - src/modules/judge/http/routes/health.go | 47 - src/modules/judge/http/routes/routes.go | 43 - src/modules/judge/judge.go | 137 - src/modules/judge/readme.md | 2 - src/modules/judge/rpc/push.go | 45 - src/modules/judge/rpc/rpc.go | 68 - src/modules/judge/stra/stra.go | 101 - src/modules/monapi/acache/init.go | 6 - src/modules/monapi/acache/stra.go | 35 - src/modules/monapi/config/const.go | 16 - src/modules/monapi/config/funcs.go | 76 - src/modules/monapi/config/yaml.go | 195 -- src/modules/monapi/http/http_middleware.go | 96 - src/modules/monapi/http/http_server.go | 76 - src/modules/monapi/http/router.go | 193 -- src/modules/monapi/http/router_funcs.go | 235 -- src/modules/monapi/http/router_proxy.go | 31 - src/modules/monapi/monapi.go | 152 - src/modules/monapi/plugins/all/all.go | 28 - src/modules/monapi/plugins/demo/lib/Makefile | 4 - src/modules/monapi/plugins/demo/lib/lib.go | 5 - .../monapi/plugins/dns_query/dns_query.go | 96 - .../plugins/dns_query/dns_query_test.go | 13 - src/modules/monapi/plugins/haproxy/haproxy.go | 71 - src/modules/monapi/plugins/mysql/sample.out | 417 --- src/modules/monapi/plugins/redis/sample.out | 129 - src/modules/monapi/redisc/funcs.go | 84 - src/modules/monapi/redisc/redis.go | 62 - src/modules/monapi/scache/collect_cache.go | 41 - src/modules/monapi/scache/init.go | 53 - src/modules/monapi/tools/user.go | 39 - src/modules/prober/cache/cache.go | 2 +- src/modules/prober/cache/collectrule.go | 54 +- src/modules/prober/cache/history.go | 2 +- src/modules/prober/config/config.go | 8 +- src/modules/prober/config/plugin.go | 7 +- src/modules/prober/core/common.go | 2 +- src/modules/prober/core/push.go | 12 +- src/modules/prober/expr/expr.go | 3 +- src/modules/prober/http/http_server.go | 8 +- src/modules/prober/http/router.go | 3 +- src/modules/prober/http/router_funcs.go | 6 +- .../prober/manager/accumulator/accumulator.go | 5 +- src/modules/prober/manager/collectrule.go | 11 +- src/modules/prober/manager/manager.go | 11 +- src/modules/prober/manager/manager_test.go | 7 +- src/modules/prober/prober.go | 28 +- src/modules/rdb/config/const.go | 8 - src/modules/rdb/config/yaml.go | 187 -- src/modules/rdb/cron/sender_init.go | 21 - src/modules/rdb/http/router.go | 235 -- src/modules/rdb/http/router_funcs.go | 233 -- src/modules/rdb/http/router_health.go | 7 - src/modules/rdb/http/router_home.go | 10 - src/modules/rdb/http/router_stats.go | 26 - src/modules/rdb/rdb.go | 122 - src/modules/rdb/redisc/reader.go | 42 - src/modules/rdb/redisc/writer.go | 53 - src/modules/{transfer => server}/aggr/aggr.go | 8 +- .../{transfer => server}/aggr/kafka.go | 0 .../{monapi => server}/alarm/callback.go | 16 +- .../{monapi => server}/alarm/event_cleaner.go | 6 +- .../alarm/event_consumer.go | 22 +- .../{monapi => server}/alarm/event_merge.go | 21 +- .../{monapi => server}/alarm/event_reader.go | 16 +- src/modules/{monapi => server}/alarm/mask.go | 12 +- src/modules/{monapi => server}/alarm/stra.go | 6 +- src/modules/{rdb => server}/auth/auth.go | 6 +- .../{rdb => server}/auth/authenticator.go | 11 +- .../backend/datasource.go | 2 +- .../backend/influxdb/influxdb.go | 4 +- .../backend/influxdb/model.go | 2 +- .../backend/influxdb/query.go | 2 +- .../{transfer => server}/backend/init.go | 10 +- .../{transfer => server}/backend/kafka.go | 4 +- .../backend/m3db/.gitignore | 0 .../backend/m3db/benchmark/benchmark.yml | 0 .../backend/m3db/benchmark/main.go | 5 +- .../backend/m3db/convert.go | 9 +- .../{transfer => server}/backend/m3db/m3db.go | 5 +- .../backend/m3db/query.go | 3 +- .../{transfer => server}/backend/opentsdb.go | 6 +- .../backend/tsdb/index.go | 6 +- .../backend/tsdb/query.go | 8 +- .../{transfer => server}/backend/tsdb/ring.go | 0 .../{transfer => server}/backend/tsdb/tsdb.go | 6 +- .../mask.go => server/cache/alarm_mask.go} | 2 +- src/modules/server/cache/alarm_stra.go | 35 + src/modules/{rdb => server}/cache/cache.go | 2 +- src/modules/{rdb => server}/cache/config.go | 2 +- src/modules/server/cache/init.go | 117 + .../event.go => server/cache/judge_event.go} | 2 +- .../cache/judge_history.go} | 2 +- .../index.go => server/cache/judge_index.go} | 0 .../cache/judge_linkedlist.go} | 2 +- .../stra.go => server/cache/judge_stra.go} | 4 +- .../cache/monapi_aggr.go} | 4 +- src/modules/server/cache/monapi_collect.go | 144 + .../server/cache/monapi_snmp_metric.go | 80 + .../cache/monapi_stra.go} | 4 +- .../cache/prober_collect.go} | 22 +- .../{monapi/scache => server/cache}/ring.go | 2 +- src/modules/{rdb => server}/cache/session.go | 0 .../aggr.go => server/cache/transfer_aggr.go} | 2 +- .../cache/transfer_queue.go} | 0 .../stra.go => server/cache/transfer_stra.go} | 2 +- src/modules/{transfer => server}/calc/aggr.go | 2 +- .../collector/basecollector.go | 7 +- .../{monapi => server}/collector/collector.go | 5 +- .../{monapi => server}/collector/template.go | 0 src/modules/server/config/config.go | 441 +++ src/modules/{index => server}/config/const.go | 0 src/modules/{rdb => server}/config/i18n.go | 2 +- src/modules/{rdb => server}/config/ops.go | 0 src/modules/{rdb => server}/cron/cleaner.go | 2 +- src/modules/server/cron/init.go | 21 + .../judge.go => server/cron/judge_ring.go} | 26 +- src/modules/server/cron/judge_stra.go | 80 + .../aggr.go => server/cron/monapi_aggr.go} | 9 +- src/modules/server/cron/monapi_api_collect.go | 50 + .../server/cron/monapi_api_detector.go | 75 + .../cron/monapi_collect.go} | 9 +- .../server/cron/monapi_snmp_collect.go | 149 + .../server/cron/monapi_snmp_detector.go | 77 + src/modules/server/cron/monapi_snmp_hw.go | 52 + .../stra.go => server/cron/monapi_stra.go} | 12 +- src/modules/server/cron/report.go | 39 + src/modules/{rdb => server}/cron/sender_im.go | 31 +- src/modules/server/cron/sender_init.go | 31 + .../{rdb => server}/cron/sender_mail.go | 22 +- .../{rdb => server}/cron/sender_sms.go | 19 +- .../{rdb => server}/cron/sender_voice.go | 19 +- .../aggr.go => server/cron/transfer_aggr.go} | 65 +- .../pool.go => server/cron/transfer_pool.go} | 6 +- src/modules/server/cron/transfer_queue.go | 35 + src/modules/server/cron/transfer_stra.go | 76 + .../{rdb => server}/dingtalk/dingtalk.go | 6 +- .../http/http_funcs.go} | 181 +- .../{rdb => server}/http/http_middleware.go | 10 +- .../{rdb => server}/http/http_server.go | 14 +- src/modules/server/http/router.go | 551 ++++ .../{monapi => server}/http/router_aggr.go | 8 +- .../{rdb => server}/http/router_auth.go | 49 +- .../{monapi => server}/http/router_chart.go | 2 +- .../{monapi => server}/http/router_collect.go | 47 +- .../{rdb => server}/http/router_configs.go | 4 +- .../{rdb => server}/http/router_container.go | 0 .../{monapi => server}/http/router_event.go | 6 +- .../{rdb => server}/http/router_hbs.go | 2 +- .../http/router_health.go} | 13 +- src/modules/server/http/router_home.go | 11 + .../{ams => server}/http/router_host.go | 217 +- .../{ams => server}/http/router_host_field.go | 4 +- .../{monapi => server}/http/router_index.go | 13 +- src/modules/server/http/router_judge.go | 31 + .../{rdb => server}/http/router_log.go | 3 +- .../http/router_maskconf.go | 2 +- src/modules/server/http/router_mib.go | 184 ++ src/modules/server/http/router_nethw.go | 402 +++ .../{rdb => server}/http/router_node.go | 8 +- .../{rdb => server}/http/router_node_cate.go | 4 +- .../http/router_node_cate_field.go | 3 +- .../http/router_node_field_value.go | 3 +- .../{rdb => server}/http/router_node_role.go | 3 +- .../{rdb => server}/http/router_node_trash.go | 3 +- .../{rdb => server}/http/router_ops.go | 3 +- .../{rdb => server}/http/router_perm.go | 6 +- .../http/router_push.go} | 15 +- .../http/router_query.go} | 43 +- .../{rdb => server}/http/router_resource.go | 4 +- .../{rdb => server}/http/router_role.go | 3 +- .../{monapi => server}/http/router_screen.go | 2 +- .../{rdb => server}/http/router_self.go | 9 +- .../{rdb => server}/http/router_sender.go | 16 +- .../{rdb => server}/http/router_sso.go | 3 +- .../http/router_stats.go} | 53 +- .../{monapi => server}/http/router_stra.go | 10 +- .../{job => server}/http/router_task.go | 22 +- .../{job => server}/http/router_task_tpl.go | 2 +- .../{rdb => server}/http/router_team.go | 4 +- .../http/router_tmpchar.go} | 4 +- .../{monapi => server}/http/router_tpl.go | 16 +- .../{rdb => server}/http/router_tree.go | 4 +- .../{rdb => server}/http/router_user.go | 7 +- .../{rdb => server/http}/session/session.go | 8 +- .../http}/session/session_db.go | 5 +- .../http}/session/session_mem.go | 6 +- .../http}/session/session_options.go | 0 .../http}/session/session_test.go | 0 .../judge.go => server/judge/backend.go} | 52 +- src/modules/{judge => server}/judge/func.go | 2 +- src/modules/{judge => server}/judge/judge.go | 33 +- src/modules/{judge => server}/judge/nodata.go | 9 +- .../backend => server/judge}/query/index.go | 14 +- .../backend => server/judge}/query/init.go | 10 +- .../backend => server/judge}/query/query.go | 12 +- .../{monapi => server}/notify/notify.go | 100 +- src/modules/server/plugins/all/all.go | 28 + .../{monapi => server}/plugins/all/dlopen.go | 0 .../{monapi => server}/plugins/api/api.go | 6 +- .../{monapi => server}/plugins/demo/demo.go | 12 +- .../plugins/demo/demo/demo.go | 0 .../plugins/demo/demo_test.go | 2 +- src/modules/server/plugins/demo/lib/lib.go | 5 + .../server/plugins/dns_query/dns_query.go | 96 + .../plugins/dns_query/dns_query_test.go | 14 + .../plugins/elasticsearch/elasticsearch.go | 11 +- .../elasticsearch/elasticsearch_test.go | 3 +- .../plugins/github/github.go | 10 +- .../plugins/github/github_test.go | 2 +- src/modules/server/plugins/haproxy/haproxy.go | 71 + .../plugins/haproxy/haproxy/haproxy.go | 0 .../plugins/http_response/http_response.go | 38 +- .../http_response/http_response_test.go | 2 +- .../{monapi => server}/plugins/log/log.go | 4 +- .../plugins/mongodb/mongodb.go | 6 +- .../plugins/mongodb/mongodb_test.go | 2 +- .../{monapi => server}/plugins/mysql/mysql.go | 37 +- .../plugins/mysql/mysql_test.go | 2 +- .../plugins/net_response/net_response.go | 11 +- .../plugins/net_response/net_response_test.go | 2 +- .../{monapi => server}/plugins/nginx/nginx.go | 18 +- .../plugins/nginx/nginx_test.go | 3 +- .../{monapi => server}/plugins/ping/ping.go | 9 +- .../plugins/ping/ping/exec.go | 0 .../plugins/ping/ping/exec_unix.go | 0 .../plugins/ping/ping/exec_windows.go | 0 .../plugins/ping/ping/ping.go | 0 .../plugins/ping/ping/ping_notwindows.go | 0 .../plugins/ping/ping/ping_test.go | 0 .../plugins/ping/ping/ping_windows.go | 0 .../plugins/ping/ping/ping_windows_test.go | 0 .../plugins/ping/ping_test.go | 3 +- .../plugins/plugin/plugin.go | 5 +- .../{monapi => server}/plugins/port/port.go | 5 +- .../{monapi => server}/plugins/proc/proc.go | 5 +- .../plugins/prometheus/prometheus.go | 9 +- .../plugins/prometheus/prometheus_test.go | 2 +- .../plugins/rabbitmq/rabbitmq.go | 54 +- .../{monapi => server}/plugins/redis/redis.go | 7 +- .../plugins/redis/redist_test.go | 2 +- .../plugins/tengine/tengine.go | 14 +- .../plugins/tengine/tengine_test.go | 3 +- .../{monapi => server}/plugins/types.go | 3 +- .../{monapi => server}/plugins/util.go | 5 +- .../plugins/zookeeper/zookeeper.go | 10 +- .../plugins/zookeeper/zookeeper_test.go | 3 +- src/modules/{rdb => server}/rabbitmq/conn.go | 24 +- .../{rdb => server}/rabbitmq/queue_consume.go | 0 .../rabbitmq/request_handler.go | 2 +- src/modules/server/redisc/funcs.go | 187 ++ src/modules/{rdb => server}/redisc/redis.go | 42 +- src/modules/server/rpc/ams_host.go | 17 + src/modules/server/rpc/hbs_heartbeat.go | 26 + .../rpc/meta.go => server/rpc/job_meta.go} | 8 +- .../report.go => server/rpc/job_report.go} | 8 +- src/modules/server/rpc/judge_send.go | 13 + src/modules/server/rpc/mon_collect.go | 30 + src/modules/server/rpc/mon_snmp.go | 51 + src/modules/{job => server}/rpc/ping.go | 2 +- src/modules/{transfer => server}/rpc/rpc.go | 13 +- .../rpc/transfer_consumer.go} | 8 +- .../push.go => server/rpc/transfer_push.go} | 20 +- .../query.go => server/rpc/transfer_query.go} | 6 +- src/modules/server/server.go | 186 ++ .../service/job_scheduler.go} | 2 +- .../service/job_timeout.go} | 2 +- src/modules/{rdb => server}/ssoc/sso.go | 9 +- .../timer/job_heartbeat.go} | 4 +- .../timer/job_host_doing.go} | 2 +- .../timer/job_scheduler.go} | 6 +- .../timer/job_task_cleaner.go} | 2 +- src/modules/{rdb => server}/wechat/wechat.go | 12 + src/modules/transfer/config/config.go | 187 -- src/modules/transfer/config/const.go | 5 - src/modules/transfer/cron/init.go | 8 - src/modules/transfer/cron/queue.go | 35 - src/modules/transfer/cron/stra.go | 116 - src/modules/transfer/http/http_server.go | 67 - src/modules/transfer/http/router_funcs.go | 130 - src/modules/transfer/http/routes.go | 34 - src/modules/transfer/transfer.go | 118 - src/modules/tsdb/backend/rpc/init.go | 27 - src/modules/tsdb/backend/rpc/push.go | 76 - src/modules/tsdb/cache/cache.go | 234 -- src/modules/tsdb/cache/chunk.go | 135 - src/modules/tsdb/cache/chunks.go | 223 -- src/modules/tsdb/cache/iter.go | 13 - src/modules/tsdb/cache/point.go | 7 - src/modules/tsdb/config/config.go | 134 - src/modules/tsdb/config/const.go | 5 - src/modules/tsdb/http/http.go | 84 - src/modules/tsdb/http/middleware/logger.go | 29 - src/modules/tsdb/http/middleware/recovery.go | 54 - src/modules/tsdb/http/render/render.go | 40 - src/modules/tsdb/http/routes/health_router.go | 19 - src/modules/tsdb/http/routes/op_router.go | 70 - src/modules/tsdb/http/routes/rotuer.go | 71 - src/modules/tsdb/index/cache.go | 84 - src/modules/tsdb/index/index.go | 62 - src/modules/tsdb/index/init.go | 89 - src/modules/tsdb/index/update_all.go | 83 - src/modules/tsdb/index/update_incr.go | 69 - src/modules/tsdb/migrate/init.go | 117 - src/modules/tsdb/migrate/push.go | 48 - src/modules/tsdb/migrate/query.go | 146 - src/modules/tsdb/migrate/ring.go | 51 - src/modules/tsdb/migrate/worker.go | 206 -- src/modules/tsdb/rpc/push.go | 146 - src/modules/tsdb/rpc/query.go | 447 --- src/modules/tsdb/rpc/rpc.go | 74 - src/modules/tsdb/rrdtool/rrdtool.go | 142 - src/modules/tsdb/rrdtool/sync_disk.go | 330 -- src/modules/tsdb/tsdb.go | 145 - src/modules/tsdb/utils/utils.go | 61 - src/toolkits/compress/tar.go | 127 - src/toolkits/http/http.go | 61 - src/toolkits/http/middleware/logger.go | 295 -- src/toolkits/http/middleware/recovery.go | 160 - src/toolkits/http/render/funcs.go | 26 - vendor/github.com/alouca/gologger/.gitignore | 22 + vendor/github.com/alouca/gologger/README.md | 4 + vendor/github.com/alouca/gologger/logger.go | 88 + .../github.com/codegangsta/negroni/.gitignore | 1 - .../codegangsta/negroni/.travis.yml | 27 - .../codegangsta/negroni/CHANGELOG.md | 69 - .../github.com/codegangsta/negroni/README.md | 549 ---- vendor/github.com/codegangsta/negroni/doc.go | 25 - .../github.com/codegangsta/negroni/logger.go | 78 - .../github.com/codegangsta/negroni/negroni.go | 169 - .../codegangsta/negroni/recovery.go | 194 -- .../codegangsta/negroni/response_writer.go | 113 - .../negroni/response_writer_pusher.go | 16 - .../github.com/codegangsta/negroni/static.go | 88 - vendor/github.com/dgryski/go-tsz/.gitignore | 3 - vendor/github.com/dgryski/go-tsz/.travis.yml | 38 - vendor/github.com/dgryski/go-tsz/LICENSE | 23 - vendor/github.com/dgryski/go-tsz/Makefile | 203 -- vendor/github.com/dgryski/go-tsz/README.md | 32 - vendor/github.com/dgryski/go-tsz/VERSION | 1 - vendor/github.com/dgryski/go-tsz/bstream.go | 205 -- vendor/github.com/dgryski/go-tsz/fuzz.go | 69 - .../dgryski/go-tsz/testdata/data.go | 34 - vendor/github.com/dgryski/go-tsz/tsz.go | 408 --- .../go-fastping}/.gitignore | 6 +- .../go-fastping}/LICENSE | 2 +- .../freedomkk-qfeng/go-fastping/README.md | 54 + .../freedomkk-qfeng/go-fastping/fastping.go | 685 ++++ vendor/github.com/gaochao1/gosnmp/.gitignore | 28 + .../{gosnmp => gaochao1}/gosnmp/LICENSE | 13 +- vendor/github.com/gaochao1/gosnmp/README.md | 77 + vendor/github.com/gaochao1/gosnmp/decode.go | 177 ++ vendor/github.com/gaochao1/gosnmp/gosnmp.go | 284 ++ vendor/github.com/gaochao1/gosnmp/helper.go | 208 ++ vendor/github.com/gaochao1/gosnmp/packet.go | 378 +++ vendor/github.com/gaochao1/sw/README.md | 1 + vendor/github.com/gaochao1/sw/conn.go | 42 + vendor/github.com/gaochao1/sw/cpustat.go | 208 ++ vendor/github.com/gaochao1/sw/descrstat.go | 119 + vendor/github.com/gaochao1/sw/fastping.go | 32 + vendor/github.com/gaochao1/sw/goping.go | 189 ++ vendor/github.com/gaochao1/sw/ifstat.go | 468 +++ .../github.com/gaochao1/sw/ifstat_snmpwalk.go | 334 ++ vendor/github.com/gaochao1/sw/memstat.go | 220 ++ vendor/github.com/gaochao1/sw/modelstat.go | 46 + vendor/github.com/gaochao1/sw/namestat.go | 24 + vendor/github.com/gaochao1/sw/parseip.go | 139 + vendor/github.com/gaochao1/sw/ping.go | 20 + vendor/github.com/gaochao1/sw/pingstat.go | 59 + vendor/github.com/gaochao1/sw/runsnmp.go | 88 + vendor/github.com/gaochao1/sw/uptimestat.go | 50 + vendor/github.com/go-ping/ping/.editorconfig | 16 + vendor/github.com/go-ping/ping/.gitignore | 2 + vendor/github.com/go-ping/ping/.golangci.yml | 6 + .../github.com/go-ping/ping/.goreleaser.yml | 46 + .../{influxdata/wlog => go-ping/ping}/LICENSE | 2 +- vendor/github.com/go-ping/ping/README.md | 130 + vendor/github.com/go-ping/ping/go.mod | 5 + vendor/github.com/go-ping/ping/go.sum | 10 + vendor/github.com/go-ping/ping/ping.go | 669 ++++ vendor/github.com/gorilla/mux/AUTHORS | 8 - vendor/github.com/gorilla/mux/README.md | 718 ----- vendor/github.com/gorilla/mux/context.go | 18 - vendor/github.com/gorilla/mux/doc.go | 306 -- vendor/github.com/gorilla/mux/go.mod | 1 - vendor/github.com/gorilla/mux/middleware.go | 79 - vendor/github.com/gorilla/mux/mux.go | 607 ---- vendor/github.com/gorilla/mux/regexp.go | 345 --- vendor/github.com/gorilla/mux/route.go | 710 ----- vendor/github.com/gorilla/mux/test_helpers.go | 19 - vendor/github.com/gosnmp/gosnmp/.gitignore | 75 - vendor/github.com/gosnmp/gosnmp/.golangci.yml | 57 - vendor/github.com/gosnmp/gosnmp/AUTHORS.md | 88 - vendor/github.com/gosnmp/gosnmp/CHANGELOG.md | 92 - vendor/github.com/gosnmp/gosnmp/Dockerfile | 39 - vendor/github.com/gosnmp/gosnmp/README.md | 272 -- .../gosnmp/gosnmp/asn1ber_string.go | 37 - .../github.com/gosnmp/gosnmp/build_tests.sh | 10 - vendor/github.com/gosnmp/gosnmp/go.mod | 8 - vendor/github.com/gosnmp/gosnmp/go.sum | 20 - vendor/github.com/gosnmp/gosnmp/goimports2 | 21 - .../github.com/gosnmp/gosnmp/goimports2_all | 6 - vendor/github.com/gosnmp/gosnmp/gosnmp.go | 621 ---- vendor/github.com/gosnmp/gosnmp/helper.go | 810 ----- vendor/github.com/gosnmp/gosnmp/interface.go | 340 -- .../github.com/gosnmp/gosnmp/local_tests.sh | 7 - vendor/github.com/gosnmp/gosnmp/marshal.go | 1164 ------- vendor/github.com/gosnmp/gosnmp/snmp_users.sh | 50 - .../gosnmp/gosnmp/snmperror_string.go | 16 - .../gosnmp/snmpv3authprotocol_string.go | 30 - .../gosnmp/snmpv3privprotocol_string.go | 30 - vendor/github.com/gosnmp/gosnmp/trap.go | 400 --- vendor/github.com/gosnmp/gosnmp/trap.md | 100 - vendor/github.com/gosnmp/gosnmp/v3.go | 486 --- .../gosnmp/gosnmp/v3_testing_credentials.go | 162 - vendor/github.com/gosnmp/gosnmp/v3_usm.go | 916 ------ vendor/github.com/gosnmp/gosnmp/walk.go | 115 - .../telegraf/internal/snmp/config.go | 34 - .../telegraf/internal/snmp/wrapper.go | 188 -- .../plugins/inputs/dns_query/README.md | 73 + .../plugins/inputs/dns_query/dns_query.go | 215 ++ .../plugins/inputs/http_response/README.md | 120 + .../inputs/http_response/http_response.go | 479 +++ .../plugins/inputs/net_response/README.md | 56 + .../inputs/net_response/net_response.go | 271 ++ .../telegraf/plugins/inputs/snmp/README.md | 238 -- .../telegraf/plugins/inputs/snmp/snmp.go | 951 ------ .../inputs/snmp/snmp_mocks_generate.go | 102 - vendor/github.com/influxdata/wlog/README.md | 81 - vendor/github.com/influxdata/wlog/writer.go | 166 - vendor/github.com/miekg/dns/.codecov.yml | 8 + vendor/github.com/miekg/dns/.gitignore | 4 + vendor/github.com/miekg/dns/.travis.yml | 17 + vendor/github.com/miekg/dns/AUTHORS | 1 + vendor/github.com/miekg/dns/CODEOWNERS | 1 + vendor/github.com/miekg/dns/CONTRIBUTORS | 10 + vendor/github.com/miekg/dns/COPYRIGHT | 9 + .../{gorilla/mux => miekg/dns}/LICENSE | 11 +- vendor/github.com/miekg/dns/Makefile.fuzz | 33 + vendor/github.com/miekg/dns/Makefile.release | 52 + vendor/github.com/miekg/dns/README.md | 175 ++ vendor/github.com/miekg/dns/acceptfunc.go | 61 + vendor/github.com/miekg/dns/client.go | 415 +++ vendor/github.com/miekg/dns/clientconfig.go | 135 + vendor/github.com/miekg/dns/dane.go | 43 + vendor/github.com/miekg/dns/defaults.go | 378 +++ vendor/github.com/miekg/dns/dns.go | 134 + vendor/github.com/miekg/dns/dnssec.go | 794 +++++ vendor/github.com/miekg/dns/dnssec_keygen.go | 140 + vendor/github.com/miekg/dns/dnssec_keyscan.go | 322 ++ vendor/github.com/miekg/dns/dnssec_privkey.go | 94 + vendor/github.com/miekg/dns/doc.go | 268 ++ vendor/github.com/miekg/dns/duplicate.go | 38 + vendor/github.com/miekg/dns/edns.go | 675 ++++ vendor/github.com/miekg/dns/format.go | 93 + vendor/github.com/miekg/dns/fuzz.go | 32 + vendor/github.com/miekg/dns/generate.go | 247 ++ vendor/github.com/miekg/dns/go.mod | 11 + vendor/github.com/miekg/dns/go.sum | 39 + vendor/github.com/miekg/dns/labels.go | 212 ++ vendor/github.com/miekg/dns/listen_go111.go | 44 + .../github.com/miekg/dns/listen_go_not111.go | 23 + vendor/github.com/miekg/dns/msg.go | 1196 +++++++ vendor/github.com/miekg/dns/msg_helpers.go | 810 +++++ vendor/github.com/miekg/dns/msg_truncate.go | 111 + vendor/github.com/miekg/dns/nsecx.go | 95 + vendor/github.com/miekg/dns/privaterr.go | 114 + vendor/github.com/miekg/dns/reverse.go | 52 + vendor/github.com/miekg/dns/sanitize.go | 86 + vendor/github.com/miekg/dns/scan.go | 1408 +++++++++ vendor/github.com/miekg/dns/scan_rr.go | 1764 +++++++++++ vendor/github.com/miekg/dns/serve_mux.go | 123 + vendor/github.com/miekg/dns/server.go | 764 +++++ vendor/github.com/miekg/dns/sig0.go | 209 ++ vendor/github.com/miekg/dns/singleinflight.go | 61 + vendor/github.com/miekg/dns/smimea.go | 44 + vendor/github.com/miekg/dns/tlsa.go | 44 + vendor/github.com/miekg/dns/tsig.go | 389 +++ vendor/github.com/miekg/dns/types.go | 1527 +++++++++ vendor/github.com/miekg/dns/udp.go | 102 + vendor/github.com/miekg/dns/udp_windows.go | 35 + vendor/github.com/miekg/dns/update.go | 110 + vendor/github.com/miekg/dns/version.go | 15 + vendor/github.com/miekg/dns/xfr.go | 266 ++ vendor/github.com/miekg/dns/zduplicate.go | 1157 +++++++ vendor/github.com/miekg/dns/zmsg.go | 2741 +++++++++++++++++ vendor/github.com/miekg/dns/ztypes.go | 898 ++++++ .../github.com/open-falcon/rrdlite/.gitignore | 3 - vendor/github.com/open-falcon/rrdlite/LICENSE | 30 - .../github.com/open-falcon/rrdlite/README.md | 11 - vendor/github.com/open-falcon/rrdlite/fnv.h | 114 - .../github.com/open-falcon/rrdlite/hash_32.c | 154 - vendor/github.com/open-falcon/rrdlite/rrd.go | 156 - vendor/github.com/open-falcon/rrdlite/rrd.h | 281 -- .../github.com/open-falcon/rrdlite/rrd_c.go | 306 -- .../open-falcon/rrdlite/rrd_config.h | 400 --- .../open-falcon/rrdlite/rrd_config_bottom.h | 241 -- .../open-falcon/rrdlite/rrd_create.c | 731 ----- .../github.com/open-falcon/rrdlite/rrd_diff.c | 123 - .../open-falcon/rrdlite/rrd_error.c | 185 -- .../open-falcon/rrdlite/rrd_error.h | 126 - .../open-falcon/rrdlite/rrd_error.sh | 7 - .../open-falcon/rrdlite/rrd_fetch.c | 367 --- .../open-falcon/rrdlite/rrd_format.c | 103 - .../open-falcon/rrdlite/rrd_format.h | 428 --- .../github.com/open-falcon/rrdlite/rrd_hw.c | 471 --- .../github.com/open-falcon/rrdlite/rrd_hw.h | 61 - .../open-falcon/rrdlite/rrd_hw_math.c | 143 - .../open-falcon/rrdlite/rrd_hw_math.h | 132 - .../open-falcon/rrdlite/rrd_hw_update.c | 475 --- .../open-falcon/rrdlite/rrd_hw_update.h | 44 - .../github.com/open-falcon/rrdlite/rrd_info.c | 380 --- .../open-falcon/rrdlite/rrd_is_thread_safe.h | 28 - .../open-falcon/rrdlite/rrd_nan_inf.c | 40 - .../github.com/open-falcon/rrdlite/rrd_open.c | 769 ----- .../open-falcon/rrdlite/rrd_parsetime.c | 1043 ------- .../open-falcon/rrdlite/rrd_parsetime.h | 8 - .../open-falcon/rrdlite/rrd_rpncalc.c | 964 ------ .../open-falcon/rrdlite/rrd_rpncalc.h | 88 - .../github.com/open-falcon/rrdlite/rrd_tool.h | 133 - .../open-falcon/rrdlite/rrd_update.c | 1734 ----------- .../open-falcon/rrdlite/rrd_utils.c | 183 -- .../open-falcon/rrdlite/rrd_version.c | 21 - .../github.com/open-falcon/rrdlite/rrdfunc.c | 50 - .../github.com/open-falcon/rrdlite/rrdfunc.h | 5 - .../github.com/open-falcon/rrdlite/unused.h | 14 - vendor/github.com/sparrc/go-ping/.gitignore | 1 + .../negroni => sparrc/go-ping}/LICENSE | 2 +- vendor/github.com/sparrc/go-ping/README.md | 102 + vendor/github.com/sparrc/go-ping/ping.go | 602 ++++ vendor/github.com/toolkits/file/downloader.go | 26 + vendor/github.com/toolkits/file/file.go | 223 ++ vendor/github.com/toolkits/file/reader.go | 66 + vendor/github.com/toolkits/file/writer.go | 20 + vendor/github.com/toolkits/sys/cmd.go | 63 + vendor/github.com/unrolled/render/.travis.yml | 15 - vendor/github.com/unrolled/render/README.md | 508 --- vendor/github.com/unrolled/render/buffer.go | 46 - vendor/github.com/unrolled/render/doc.go | 55 - vendor/github.com/unrolled/render/engine.go | 217 -- vendor/github.com/unrolled/render/fs.go | 21 - vendor/github.com/unrolled/render/go.mod | 5 - vendor/github.com/unrolled/render/go.sum | 2 - vendor/github.com/unrolled/render/helpers.go | 21 - .../unrolled/render/helpers_pre16.go | 26 - vendor/github.com/unrolled/render/render.go | 480 --- vendor/golang.org/x/net/icmp/dstunreach.go | 59 + vendor/golang.org/x/net/icmp/echo.go | 173 ++ vendor/golang.org/x/net/icmp/endpoint.go | 113 + vendor/golang.org/x/net/icmp/extension.go | 170 + vendor/golang.org/x/net/icmp/helper_posix.go | 75 + vendor/golang.org/x/net/icmp/interface.go | 322 ++ vendor/golang.org/x/net/icmp/ipv4.go | 69 + vendor/golang.org/x/net/icmp/ipv6.go | 23 + vendor/golang.org/x/net/icmp/listen_posix.go | 103 + vendor/golang.org/x/net/icmp/listen_stub.go | 33 + vendor/golang.org/x/net/icmp/message.go | 162 + vendor/golang.org/x/net/icmp/messagebody.go | 52 + vendor/golang.org/x/net/icmp/mpls.go | 77 + vendor/golang.org/x/net/icmp/multipart.go | 129 + vendor/golang.org/x/net/icmp/packettoobig.go | 43 + vendor/golang.org/x/net/icmp/paramprob.go | 72 + vendor/golang.org/x/net/icmp/sys_freebsd.go | 11 + vendor/golang.org/x/net/icmp/timeexceeded.go | 57 + vendor/modules.txt | 39 +- 760 files changed, 33789 insertions(+), 39319 deletions(-) rename etc/{agent.yml => agentd.yml} (97%) delete mode 100644 etc/ams.yml delete mode 100644 etc/index.yml delete mode 100644 etc/job.yml delete mode 100644 etc/json/stra.json delete mode 100644 etc/judge.yml delete mode 100644 etc/monapi.yml delete mode 100644 etc/rdb.yml delete mode 100644 etc/screen/n9e_mudules create mode 100644 etc/server.yml rename etc/service/{agent.service => agentd.service} (83%) delete mode 100644 etc/service/job.service delete mode 100644 etc/service/judge.service delete mode 100644 etc/service/monapi.service rename etc/service/{index.service => prober.service} (76%) delete mode 100644 etc/service/rdb.service rename etc/service/{ams.service => server.service} (76%) delete mode 100644 etc/service/transfer.service delete mode 100644 etc/service/tsdb.service delete mode 100644 etc/transfer.yml delete mode 100644 etc/tsdb.yml rename src/{modules/agent => common}/client/cli.go (69%) create mode 100644 src/common/compress/compress.go rename src/{toolkits => common}/exit/exit.go (100%) rename src/{toolkits => common}/go-tdigest/LICENSE (100%) rename src/{toolkits => common}/go-tdigest/README.md (100%) rename src/{toolkits => common}/go-tdigest/serialization.go (100%) rename src/{toolkits => common}/go-tdigest/serialization_test.go (100%) rename src/{toolkits => common}/go-tdigest/summary.go (100%) rename src/{toolkits => common}/go-tdigest/summary_test.go (100%) rename src/{toolkits => common}/go-tdigest/tdigest.go (100%) rename src/{toolkits => common}/go-tdigest/tdigest_test.go (100%) rename src/{toolkits => common}/i18n/i18n.go (100%) delete mode 100644 src/common/loggeri/logger.go create mode 100644 src/common/loggeri/loggeri.go create mode 100644 src/common/ping/ping.go rename src/{toolkits => common}/pools/opentsdb.go (100%) rename src/{toolkits => common}/pools/pools.go (100%) rename src/{toolkits => common}/slice/slice.go (65%) rename src/{toolkits => common}/stack/stack.go (100%) rename src/{toolkits => common}/stats/counter.go (100%) rename src/{toolkits => common}/stats/init.go (88%) rename src/{toolkits => common}/str/checksum.go (100%) rename src/{toolkits => common}/str/format.go (98%) rename src/{toolkits => common}/str/parser.go (100%) rename src/{toolkits => common}/str/slice.go (100%) create mode 100644 src/models/host_register.go rename src/models/{mon_hbs.go => instance.go} (65%) create mode 100644 src/models/nems_mib.go create mode 100644 src/models/nems_nethw.go rename src/models/{node_field_value.go => node_cate_field_value.go} (100%) rename src/models/{captcha.go => rdb_captcha.go} (100%) rename src/models/{configs.go => rdb_configs.go} (100%) rename src/models/{session.go => rdb_session.go} (100%) rename src/models/{role_operation.go => role_opration.go} (100%) rename src/models/{stats.go => user_stats.go} (100%) delete mode 100644 src/modules/agent/stra/cron.go rename src/modules/{agent/agent.go => agentd/agentd.go} (65%) rename src/modules/{agent => agentd}/cache/cache.go (94%) rename src/modules/{agent => agentd}/config/config.go (94%) rename src/modules/{agent => agentd}/core/clients.go (100%) rename src/modules/{agent => agentd}/core/common.go (92%) rename src/modules/{agent/client => agentd/core}/meta.go (63%) rename src/modules/{agent => agentd}/core/push.go (93%) rename src/modules/{agent => agentd}/http/http_server.go (87%) rename src/modules/{agent => agentd}/http/router.go (100%) rename src/modules/{agent => agentd}/http/router_collector.go (72%) rename src/modules/{agent => agentd}/http/router_endpoint.go (66%) rename src/modules/{agent => agentd}/http/router_executor.go (91%) rename src/modules/{agent => agentd}/http/router_funcs.go (100%) rename src/modules/{agent => agentd}/http/router_health.go (100%) rename src/modules/{agent => agentd}/log/reader/reader.go (100%) rename src/modules/{agent => agentd}/log/reader/reader_test.go (100%) rename src/modules/{agent => agentd}/log/reader/reader_util.go (100%) rename src/modules/{agent => agentd}/log/reader/reader_util_test.go (100%) rename src/modules/{agent => agentd}/log/strategy/strategy.go (97%) rename src/modules/{agent => agentd}/log/worker/cached.go (98%) rename src/modules/{agent => agentd}/log/worker/control.go (94%) rename src/modules/{agent => agentd}/log/worker/control_test.go (100%) rename src/modules/{agent => agentd}/log/worker/counter.go (97%) rename src/modules/{agent => agentd}/log/worker/push.go (95%) rename src/modules/{agent => agentd}/log/worker/worker.go (97%) rename src/modules/{agent => agentd}/log/worker/worker_test.go (100%) rename src/modules/{agent => agentd}/report/report.go (67%) rename src/modules/{agent => agentd}/statsd/aggr_config.go (100%) rename src/modules/{agent => agentd}/statsd/aggr_counter.go (100%) rename src/modules/{agent => agentd}/statsd/aggr_counter_e.go (100%) rename src/modules/{agent => agentd}/statsd/aggr_gauge.go (100%) rename src/modules/{agent => agentd}/statsd/aggr_histogram.go (98%) rename src/modules/{agent => agentd}/statsd/aggr_interface.go (100%) rename src/modules/{agent => agentd}/statsd/aggr_ratio.go (100%) rename src/modules/{agent => agentd}/statsd/aggr_rpc.go (100%) rename src/modules/{agent => agentd}/statsd/aggr_rpc_e.go (100%) rename src/modules/{agent => agentd}/statsd/clock.go (100%) rename src/modules/{agent => agentd}/statsd/statsd.go (100%) rename src/modules/{agent => agentd}/statsd/statsd_receiver.go (95%) rename src/modules/{agent => agentd}/statsd/statsd_reporter.go (95%) rename src/modules/{agent => agentd}/statsd/statsd_state.go (99%) rename src/modules/{agent => agentd}/statsd/utils.go (100%) create mode 100644 src/modules/agentd/stra/cron.go rename src/modules/{agent => agentd}/stra/init.go (59%) rename src/modules/{agent => agentd}/stra/log.go (98%) rename src/modules/{agent => agentd}/stra/log_test.go (100%) rename src/modules/{agent => agentd}/stra/port.go (92%) rename src/modules/{agent => agentd}/stra/proc.go (92%) rename src/modules/{agent => agentd}/sys/config.go (100%) rename src/modules/{agent => agentd}/sys/funcs/collector.go (58%) rename src/modules/{agent => agentd}/sys/funcs/cpustat.go (98%) rename src/modules/{agent => agentd}/sys/funcs/cron.go (85%) rename src/modules/{agent => agentd}/sys/funcs/dfstat.go (95%) rename src/modules/{agent => agentd}/sys/funcs/diskstat.go (97%) rename src/modules/{agent => agentd}/sys/funcs/fsstat.go (95%) rename src/modules/{agent => agentd}/sys/funcs/funcs.go (94%) rename src/modules/{agent => agentd}/sys/funcs/ifstat.go (96%) rename src/modules/{agent => agentd}/sys/funcs/loadavg.go (90%) rename src/modules/{agent => agentd}/sys/funcs/meminfo.go (93%) rename src/modules/{agent => agentd}/sys/funcs/netfilter.go (90%) rename src/modules/{agent => agentd}/sys/funcs/ntp.go (89%) rename src/modules/{agent => agentd}/sys/funcs/snmp.go (92%) rename src/modules/{agent => agentd}/sys/funcs/sockstas.go (90%) rename src/modules/{agent => agentd}/sys/funcs/sys.go (95%) rename src/modules/{agent => agentd}/sys/plugins/cron.go (100%) rename src/modules/{agent => agentd}/sys/plugins/plugin.go (100%) rename src/modules/{agent => agentd}/sys/plugins/reader.go (95%) rename src/modules/{agent => agentd}/sys/plugins/scheduler.go (96%) rename src/modules/{agent => agentd}/sys/ports/cron.go (81%) rename src/modules/{agent => agentd}/sys/ports/port.go (94%) rename src/modules/{agent => agentd}/sys/ports/scheduler.go (86%) rename src/modules/{agent => agentd}/sys/procs/cron.go (81%) rename src/modules/{agent => agentd}/sys/procs/proc.go (95%) rename src/modules/{agent => agentd}/sys/procs/scheduler.go (93%) rename src/modules/{agent => agentd}/sys/procs/sys.go (100%) rename src/modules/{agent => agentd}/timer/heartbeat.go (81%) rename src/modules/{agent => agentd}/timer/killer.go (83%) rename src/modules/{agent => agentd}/timer/task.go (97%) rename src/modules/{agent => agentd}/timer/tasks.go (97%) rename src/modules/{agent => agentd}/udp/handler.go (88%) rename src/modules/{agent => agentd}/udp/udp.go (92%) delete mode 100644 src/modules/ams/ams.go delete mode 100644 src/modules/ams/config/yaml.go delete mode 100644 src/modules/ams/http/http_middleware.go delete mode 100644 src/modules/ams/http/http_server.go delete mode 100644 src/modules/ams/http/router.go delete mode 100644 src/modules/ams/http/router_funcs.go delete mode 100644 src/modules/ams/http/router_health.go delete mode 100644 src/modules/index/cache/counter_map.go delete mode 100644 src/modules/index/cache/endpoint_map.go delete mode 100644 src/modules/index/cache/indexdb.go delete mode 100644 src/modules/index/cache/metric_map.go delete mode 100644 src/modules/index/cache/tag.go delete mode 100644 src/modules/index/cache/tag_map.go delete mode 100644 src/modules/index/config/config.go delete mode 100644 src/modules/index/http/routes/health_router.go delete mode 100644 src/modules/index/http/routes/index_router.go delete mode 100644 src/modules/index/http/routes/routes.go delete mode 100644 src/modules/index/index.go delete mode 100644 src/modules/index/rpc/push.go delete mode 100644 src/modules/index/rpc/rpc.go delete mode 100644 src/modules/job/config/config.go delete mode 100644 src/modules/job/http/http_middleware.go delete mode 100644 src/modules/job/http/http_server.go delete mode 100644 src/modules/job/http/router.go delete mode 100644 src/modules/job/http/router_health.go delete mode 100644 src/modules/job/job.go delete mode 100644 src/modules/job/rpc/rpc.go delete mode 100644 src/modules/judge/backend/redi/funcs.go delete mode 100644 src/modules/judge/backend/redi/redis.go delete mode 100644 src/modules/judge/config/config.go delete mode 100644 src/modules/judge/http/http.go delete mode 100644 src/modules/judge/http/routes/health.go delete mode 100644 src/modules/judge/http/routes/routes.go delete mode 100644 src/modules/judge/judge.go delete mode 100644 src/modules/judge/readme.md delete mode 100644 src/modules/judge/rpc/push.go delete mode 100644 src/modules/judge/rpc/rpc.go delete mode 100644 src/modules/judge/stra/stra.go delete mode 100644 src/modules/monapi/acache/init.go delete mode 100644 src/modules/monapi/acache/stra.go delete mode 100644 src/modules/monapi/config/const.go delete mode 100644 src/modules/monapi/config/funcs.go delete mode 100644 src/modules/monapi/config/yaml.go delete mode 100644 src/modules/monapi/http/http_middleware.go delete mode 100644 src/modules/monapi/http/http_server.go delete mode 100644 src/modules/monapi/http/router.go delete mode 100644 src/modules/monapi/http/router_funcs.go delete mode 100644 src/modules/monapi/http/router_proxy.go delete mode 100644 src/modules/monapi/monapi.go delete mode 100644 src/modules/monapi/plugins/all/all.go delete mode 100644 src/modules/monapi/plugins/demo/lib/Makefile delete mode 100644 src/modules/monapi/plugins/demo/lib/lib.go delete mode 100644 src/modules/monapi/plugins/dns_query/dns_query.go delete mode 100644 src/modules/monapi/plugins/dns_query/dns_query_test.go delete mode 100644 src/modules/monapi/plugins/haproxy/haproxy.go delete mode 100644 src/modules/monapi/plugins/mysql/sample.out delete mode 100644 src/modules/monapi/plugins/redis/sample.out delete mode 100644 src/modules/monapi/redisc/funcs.go delete mode 100644 src/modules/monapi/redisc/redis.go delete mode 100644 src/modules/monapi/scache/collect_cache.go delete mode 100644 src/modules/monapi/scache/init.go delete mode 100644 src/modules/monapi/tools/user.go delete mode 100644 src/modules/rdb/config/const.go delete mode 100644 src/modules/rdb/config/yaml.go delete mode 100644 src/modules/rdb/cron/sender_init.go delete mode 100644 src/modules/rdb/http/router.go delete mode 100644 src/modules/rdb/http/router_funcs.go delete mode 100644 src/modules/rdb/http/router_health.go delete mode 100644 src/modules/rdb/http/router_home.go delete mode 100644 src/modules/rdb/http/router_stats.go delete mode 100644 src/modules/rdb/rdb.go delete mode 100644 src/modules/rdb/redisc/reader.go delete mode 100644 src/modules/rdb/redisc/writer.go rename src/modules/{transfer => server}/aggr/aggr.go (95%) rename src/modules/{transfer => server}/aggr/kafka.go (100%) rename src/modules/{monapi => server}/alarm/callback.go (91%) rename src/modules/{monapi => server}/alarm/event_cleaner.go (78%) rename src/modules/{monapi => server}/alarm/event_consumer.go (94%) rename src/modules/{monapi => server}/alarm/event_merge.go (85%) rename src/modules/{monapi => server}/alarm/event_reader.go (92%) rename src/modules/{monapi => server}/alarm/mask.go (86%) rename src/modules/{monapi => server}/alarm/stra.go (88%) rename src/modules/{rdb => server}/auth/auth.go (85%) rename src/modules/{rdb => server}/auth/authenticator.go (97%) rename src/modules/{transfer => server}/backend/datasource.go (97%) rename src/modules/{transfer => server}/backend/influxdb/influxdb.go (97%) rename src/modules/{transfer => server}/backend/influxdb/model.go (98%) rename src/modules/{transfer => server}/backend/influxdb/query.go (99%) rename src/modules/{transfer => server}/backend/init.go (88%) rename src/modules/{transfer => server}/backend/kafka.go (97%) rename src/modules/{transfer => server}/backend/m3db/.gitignore (100%) rename src/modules/{transfer => server}/backend/m3db/benchmark/benchmark.yml (100%) rename src/modules/{transfer => server}/backend/m3db/benchmark/main.go (95%) rename src/modules/{transfer => server}/backend/m3db/convert.go (96%) rename src/modules/{transfer => server}/backend/m3db/m3db.go (99%) rename src/modules/{transfer => server}/backend/m3db/query.go (99%) rename src/modules/{transfer => server}/backend/opentsdb.go (95%) rename src/modules/{transfer => server}/backend/tsdb/index.go (84%) rename src/modules/{transfer => server}/backend/tsdb/query.go (98%) rename src/modules/{transfer => server}/backend/tsdb/ring.go (100%) rename src/modules/{transfer => server}/backend/tsdb/tsdb.go (97%) rename src/modules/{monapi/acache/mask.go => server/cache/alarm_mask.go} (97%) create mode 100644 src/modules/server/cache/alarm_stra.go rename src/modules/{rdb => server}/cache/cache.go (94%) rename src/modules/{rdb => server}/cache/config.go (95%) create mode 100644 src/modules/server/cache/init.go rename src/modules/{judge/cache/event.go => server/cache/judge_event.go} (89%) rename src/modules/{judge/cache/history.go => server/cache/judge_history.go} (97%) rename src/modules/{judge/cache/index.go => server/cache/judge_index.go} (100%) rename src/modules/{judge/cache/linkedlist.go => server/cache/judge_linkedlist.go} (98%) rename src/modules/{judge/cache/stra.go => server/cache/judge_stra.go} (91%) rename src/modules/{monapi/scache/aggr_cache.go => server/cache/monapi_aggr.go} (88%) create mode 100644 src/modules/server/cache/monapi_collect.go create mode 100644 src/modules/server/cache/monapi_snmp_metric.go rename src/modules/{monapi/scache/stra_cache.go => server/cache/monapi_stra.go} (94%) rename src/modules/{monapi/scache/collectrule.go => server/cache/prober_collect.go} (89%) rename src/modules/{monapi/scache => server/cache}/ring.go (99%) rename src/modules/{rdb => server}/cache/session.go (100%) rename src/modules/{transfer/cache/aggr.go => server/cache/transfer_aggr.go} (95%) rename src/modules/{transfer/cache/queue.go => server/cache/transfer_queue.go} (100%) rename src/modules/{transfer/cache/stra.go => server/cache/transfer_stra.go} (94%) rename src/modules/{transfer => server}/calc/aggr.go (98%) rename src/modules/{monapi => server}/collector/basecollector.go (96%) rename src/modules/{monapi => server}/collector/collector.go (96%) rename src/modules/{monapi => server}/collector/template.go (100%) create mode 100644 src/modules/server/config/config.go rename src/modules/{index => server}/config/const.go (100%) rename src/modules/{rdb => server}/config/i18n.go (99%) rename src/modules/{rdb => server}/config/ops.go (100%) rename src/modules/{rdb => server}/cron/cleaner.go (82%) create mode 100644 src/modules/server/cron/init.go rename src/modules/{monapi/scache/judge.go => server/cron/judge_ring.go} (58%) create mode 100644 src/modules/server/cron/judge_stra.go rename src/modules/{monapi/scache/aggr.go => server/cron/monapi_aggr.go} (92%) create mode 100644 src/modules/server/cron/monapi_api_collect.go create mode 100644 src/modules/server/cron/monapi_api_detector.go rename src/modules/{monapi/scache/collect.go => server/cron/monapi_collect.go} (95%) create mode 100644 src/modules/server/cron/monapi_snmp_collect.go create mode 100644 src/modules/server/cron/monapi_snmp_detector.go create mode 100644 src/modules/server/cron/monapi_snmp_hw.go rename src/modules/{monapi/scache/stra.go => server/cron/monapi_stra.go} (89%) create mode 100644 src/modules/server/cron/report.go rename src/modules/{rdb => server}/cron/sender_im.go (84%) create mode 100644 src/modules/server/cron/sender_init.go rename src/modules/{rdb => server}/cron/sender_mail.go (85%) rename src/modules/{rdb => server}/cron/sender_sms.go (74%) rename src/modules/{rdb => server}/cron/sender_voice.go (74%) rename src/modules/{transfer/cron/aggr.go => server/cron/transfer_aggr.go} (58%) rename src/modules/{transfer/cron/pool.go => server/cron/transfer_pool.go} (68%) create mode 100644 src/modules/server/cron/transfer_queue.go create mode 100644 src/modules/server/cron/transfer_stra.go rename src/modules/{rdb => server}/dingtalk/dingtalk.go (93%) rename src/modules/{job/http/router_funcs.go => server/http/http_funcs.go} (65%) rename src/modules/{rdb => server}/http/http_middleware.go (91%) rename src/modules/{rdb => server}/http/http_server.go (82%) create mode 100644 src/modules/server/http/router.go rename src/modules/{monapi => server}/http/router_aggr.go (93%) rename src/modules/{rdb => server}/http/router_auth.go (94%) rename src/modules/{monapi => server}/http/router_chart.go (98%) rename src/modules/{monapi => server}/http/router_collect.go (89%) rename src/modules/{rdb => server}/http/router_configs.go (96%) rename src/modules/{rdb => server}/http/router_container.go (100%) rename src/modules/{monapi => server}/http/router_event.go (99%) rename src/modules/{rdb => server}/http/router_hbs.go (96%) rename src/modules/{monapi/http/router_sys.go => server/http/router_health.go} (56%) create mode 100644 src/modules/server/http/router_home.go rename src/modules/{ams => server}/http/router_host.go (57%) rename src/modules/{ams => server}/http/router_host_field.go (97%) rename src/modules/{monapi => server}/http/router_index.go (90%) create mode 100644 src/modules/server/http/router_judge.go rename src/modules/{rdb => server}/http/router_log.go (97%) rename src/modules/{monapi => server}/http/router_maskconf.go (98%) create mode 100644 src/modules/server/http/router_mib.go create mode 100644 src/modules/server/http/router_nethw.go rename src/modules/{rdb => server}/http/router_node.go (97%) rename src/modules/{rdb => server}/http/router_node_cate.go (97%) rename src/modules/{rdb => server}/http/router_node_cate_field.go (96%) rename src/modules/{rdb => server}/http/router_node_field_value.go (90%) rename src/modules/{rdb => server}/http/router_node_role.go (98%) rename src/modules/{rdb => server}/http/router_node_trash.go (91%) rename src/modules/{rdb => server}/http/router_ops.go (77%) rename src/modules/{rdb => server}/http/router_perm.go (95%) rename src/modules/{transfer/http/push_router.go => server/http/router_push.go} (50%) rename src/modules/{transfer/http/query_router.go => server/http/router_query.go} (79%) rename src/modules/{rdb => server}/http/router_resource.go (99%) rename src/modules/{rdb => server}/http/router_role.go (98%) rename src/modules/{monapi => server}/http/router_screen.go (99%) rename src/modules/{rdb => server}/http/router_self.go (93%) rename src/modules/{rdb => server}/http/router_sender.go (75%) rename src/modules/{rdb => server}/http/router_sso.go (91%) rename src/modules/{transfer/http/health_router.go => server/http/router_stats.go} (57%) rename src/modules/{monapi => server}/http/router_stra.go (92%) rename src/modules/{job => server}/http/router_task.go (96%) rename src/modules/{job => server}/http/router_task_tpl.go (99%) rename src/modules/{rdb => server}/http/router_team.go (99%) rename src/modules/{monapi/http/router_tmpchart.go => server/http/router_tmpchar.go} (91%) rename src/modules/{monapi => server}/http/router_tpl.go (57%) rename src/modules/{rdb => server}/http/router_tree.go (99%) rename src/modules/{rdb => server}/http/router_user.go (98%) rename src/modules/{rdb => server/http}/session/session.go (96%) rename src/modules/{rdb => server/http}/session/session_db.go (93%) rename src/modules/{rdb => server/http}/session/session_mem.go (90%) rename src/modules/{rdb => server/http}/session/session_options.go (100%) rename src/modules/{rdb => server/http}/session/session_test.go (100%) rename src/modules/{transfer/backend/judge.go => server/judge/backend.go} (77%) rename src/modules/{judge => server}/judge/func.go (99%) rename src/modules/{judge => server}/judge/judge.go (93%) rename src/modules/{judge => server}/judge/nodata.go (92%) rename src/modules/{judge/backend => server/judge}/query/index.go (74%) rename src/modules/{judge/backend => server/judge}/query/init.go (75%) rename src/modules/{judge/backend => server/judge}/query/query.go (95%) rename src/modules/{monapi => server}/notify/notify.go (83%) create mode 100644 src/modules/server/plugins/all/all.go rename src/modules/{monapi => server}/plugins/all/dlopen.go (100%) rename src/modules/{monapi => server}/plugins/api/api.go (95%) rename src/modules/{monapi => server}/plugins/demo/demo.go (77%) rename src/modules/{monapi => server}/plugins/demo/demo/demo.go (100%) rename src/modules/{monapi => server}/plugins/demo/demo_test.go (69%) create mode 100644 src/modules/server/plugins/demo/lib/lib.go create mode 100644 src/modules/server/plugins/dns_query/dns_query.go create mode 100644 src/modules/server/plugins/dns_query/dns_query_test.go rename src/modules/{monapi => server}/plugins/elasticsearch/elasticsearch.go (95%) rename src/modules/{monapi => server}/plugins/elasticsearch/elasticsearch_test.go (79%) rename src/modules/{monapi => server}/plugins/github/github.go (86%) rename src/modules/{monapi => server}/plugins/github/github_test.go (72%) create mode 100644 src/modules/server/plugins/haproxy/haproxy.go rename src/modules/{monapi => server}/plugins/haproxy/haproxy/haproxy.go (100%) rename src/modules/{monapi => server}/plugins/http_response/http_response.go (76%) rename src/modules/{monapi => server}/plugins/http_response/http_response_test.go (79%) rename src/modules/{monapi => server}/plugins/log/log.go (97%) rename src/modules/{monapi => server}/plugins/mongodb/mongodb.go (94%) rename src/modules/{monapi => server}/plugins/mongodb/mongodb_test.go (85%) rename src/modules/{monapi => server}/plugins/mysql/mysql.go (82%) rename src/modules/{monapi => server}/plugins/mysql/mysql_test.go (82%) rename src/modules/{monapi => server}/plugins/net_response/net_response.go (87%) rename src/modules/{monapi => server}/plugins/net_response/net_response_test.go (70%) rename src/modules/{monapi => server}/plugins/nginx/nginx.go (64%) rename src/modules/{monapi => server}/plugins/nginx/nginx_test.go (79%) rename src/modules/{monapi => server}/plugins/ping/ping.go (94%) rename src/modules/{monapi => server}/plugins/ping/ping/exec.go (100%) rename src/modules/{monapi => server}/plugins/ping/ping/exec_unix.go (100%) rename src/modules/{monapi => server}/plugins/ping/ping/exec_windows.go (100%) rename src/modules/{monapi => server}/plugins/ping/ping/ping.go (100%) rename src/modules/{monapi => server}/plugins/ping/ping/ping_notwindows.go (100%) rename src/modules/{monapi => server}/plugins/ping/ping/ping_test.go (100%) rename src/modules/{monapi => server}/plugins/ping/ping/ping_windows.go (100%) rename src/modules/{monapi => server}/plugins/ping/ping/ping_windows_test.go (100%) rename src/modules/{monapi => server}/plugins/ping/ping_test.go (71%) rename src/modules/{monapi => server}/plugins/plugin/plugin.go (97%) rename src/modules/{monapi => server}/plugins/port/port.go (97%) rename src/modules/{monapi => server}/plugins/proc/proc.go (97%) rename src/modules/{monapi => server}/plugins/prometheus/prometheus.go (94%) rename src/modules/{monapi => server}/plugins/prometheus/prometheus_test.go (97%) rename src/modules/{monapi => server}/plugins/rabbitmq/rabbitmq.go (50%) rename src/modules/{monapi => server}/plugins/redis/redis.go (94%) rename src/modules/{monapi => server}/plugins/redis/redist_test.go (83%) rename src/modules/{monapi => server}/plugins/tengine/tengine.go (80%) rename src/modules/{monapi => server}/plugins/tengine/tengine_test.go (78%) rename src/modules/{monapi => server}/plugins/types.go (96%) rename src/modules/{monapi => server}/plugins/util.go (94%) rename src/modules/{monapi => server}/plugins/zookeeper/zookeeper.go (91%) rename src/modules/{monapi => server}/plugins/zookeeper/zookeeper_test.go (78%) rename src/modules/{rdb => server}/rabbitmq/conn.go (74%) rename src/modules/{rdb => server}/rabbitmq/queue_consume.go (100%) rename src/modules/{rdb => server}/rabbitmq/request_handler.go (98%) create mode 100644 src/modules/server/redisc/funcs.go rename src/modules/{rdb => server}/redisc/redis.go (57%) create mode 100644 src/modules/server/rpc/ams_host.go create mode 100644 src/modules/server/rpc/hbs_heartbeat.go rename src/modules/{job/rpc/meta.go => server/rpc/job_meta.go} (56%) rename src/modules/{job/rpc/report.go => server/rpc/job_report.go} (82%) create mode 100644 src/modules/server/rpc/judge_send.go create mode 100644 src/modules/server/rpc/mon_collect.go create mode 100644 src/modules/server/rpc/mon_snmp.go rename src/modules/{job => server}/rpc/ping.go (59%) rename src/modules/{transfer => server}/rpc/rpc.go (75%) rename src/modules/{transfer/rpc/consumer.go => server/rpc/transfer_consumer.go} (90%) rename src/modules/{transfer/rpc/push.go => server/rpc/transfer_push.go} (84%) rename src/modules/{transfer/rpc/query.go => server/rpc/transfer_query.go} (54%) create mode 100644 src/modules/server/server.go rename src/modules/{job/service/scheduler.go => server/service/job_scheduler.go} (98%) rename src/modules/{job/service/timeout.go => server/service/job_timeout.go} (95%) rename src/modules/{rdb => server}/ssoc/sso.go (97%) rename src/modules/{job/timer/heartbeat.go => server/timer/job_heartbeat.go} (93%) rename src/modules/{job/timer/host_doing.go => server/timer/job_host_doing.go} (94%) rename src/modules/{job/timer/scheduler.go => server/timer/job_scheduler.go} (89%) rename src/modules/{job/timer/cleaner.go => server/timer/job_task_cleaner.go} (92%) rename src/modules/{rdb => server}/wechat/wechat.go (96%) delete mode 100644 src/modules/transfer/config/config.go delete mode 100644 src/modules/transfer/config/const.go delete mode 100644 src/modules/transfer/cron/init.go delete mode 100644 src/modules/transfer/cron/queue.go delete mode 100644 src/modules/transfer/cron/stra.go delete mode 100644 src/modules/transfer/http/http_server.go delete mode 100644 src/modules/transfer/http/router_funcs.go delete mode 100644 src/modules/transfer/http/routes.go delete mode 100644 src/modules/transfer/transfer.go delete mode 100644 src/modules/tsdb/backend/rpc/init.go delete mode 100644 src/modules/tsdb/backend/rpc/push.go delete mode 100644 src/modules/tsdb/cache/cache.go delete mode 100644 src/modules/tsdb/cache/chunk.go delete mode 100644 src/modules/tsdb/cache/chunks.go delete mode 100644 src/modules/tsdb/cache/iter.go delete mode 100644 src/modules/tsdb/cache/point.go delete mode 100644 src/modules/tsdb/config/config.go delete mode 100644 src/modules/tsdb/config/const.go delete mode 100644 src/modules/tsdb/http/http.go delete mode 100644 src/modules/tsdb/http/middleware/logger.go delete mode 100644 src/modules/tsdb/http/middleware/recovery.go delete mode 100644 src/modules/tsdb/http/render/render.go delete mode 100644 src/modules/tsdb/http/routes/health_router.go delete mode 100644 src/modules/tsdb/http/routes/op_router.go delete mode 100644 src/modules/tsdb/http/routes/rotuer.go delete mode 100644 src/modules/tsdb/index/cache.go delete mode 100644 src/modules/tsdb/index/index.go delete mode 100644 src/modules/tsdb/index/init.go delete mode 100644 src/modules/tsdb/index/update_all.go delete mode 100644 src/modules/tsdb/index/update_incr.go delete mode 100644 src/modules/tsdb/migrate/init.go delete mode 100644 src/modules/tsdb/migrate/push.go delete mode 100644 src/modules/tsdb/migrate/query.go delete mode 100644 src/modules/tsdb/migrate/ring.go delete mode 100644 src/modules/tsdb/migrate/worker.go delete mode 100644 src/modules/tsdb/rpc/push.go delete mode 100644 src/modules/tsdb/rpc/query.go delete mode 100644 src/modules/tsdb/rpc/rpc.go delete mode 100644 src/modules/tsdb/rrdtool/rrdtool.go delete mode 100644 src/modules/tsdb/rrdtool/sync_disk.go delete mode 100644 src/modules/tsdb/tsdb.go delete mode 100644 src/modules/tsdb/utils/utils.go delete mode 100644 src/toolkits/compress/tar.go delete mode 100644 src/toolkits/http/http.go delete mode 100644 src/toolkits/http/middleware/logger.go delete mode 100644 src/toolkits/http/middleware/recovery.go delete mode 100644 src/toolkits/http/render/funcs.go create mode 100644 vendor/github.com/alouca/gologger/.gitignore create mode 100644 vendor/github.com/alouca/gologger/README.md create mode 100644 vendor/github.com/alouca/gologger/logger.go delete mode 100644 vendor/github.com/codegangsta/negroni/.gitignore delete mode 100644 vendor/github.com/codegangsta/negroni/.travis.yml delete mode 100644 vendor/github.com/codegangsta/negroni/CHANGELOG.md delete mode 100644 vendor/github.com/codegangsta/negroni/README.md delete mode 100644 vendor/github.com/codegangsta/negroni/doc.go delete mode 100644 vendor/github.com/codegangsta/negroni/logger.go delete mode 100644 vendor/github.com/codegangsta/negroni/negroni.go delete mode 100644 vendor/github.com/codegangsta/negroni/recovery.go delete mode 100644 vendor/github.com/codegangsta/negroni/response_writer.go delete mode 100644 vendor/github.com/codegangsta/negroni/response_writer_pusher.go delete mode 100644 vendor/github.com/codegangsta/negroni/static.go delete mode 100644 vendor/github.com/dgryski/go-tsz/.gitignore delete mode 100644 vendor/github.com/dgryski/go-tsz/.travis.yml delete mode 100644 vendor/github.com/dgryski/go-tsz/LICENSE delete mode 100644 vendor/github.com/dgryski/go-tsz/Makefile delete mode 100644 vendor/github.com/dgryski/go-tsz/README.md delete mode 100644 vendor/github.com/dgryski/go-tsz/VERSION delete mode 100644 vendor/github.com/dgryski/go-tsz/bstream.go delete mode 100644 vendor/github.com/dgryski/go-tsz/fuzz.go delete mode 100644 vendor/github.com/dgryski/go-tsz/testdata/data.go delete mode 100644 vendor/github.com/dgryski/go-tsz/tsz.go rename vendor/github.com/{unrolled/render => freedomkk-qfeng/go-fastping}/.gitignore (91%) rename vendor/github.com/{unrolled/render => freedomkk-qfeng/go-fastping}/LICENSE (96%) create mode 100644 vendor/github.com/freedomkk-qfeng/go-fastping/README.md create mode 100644 vendor/github.com/freedomkk-qfeng/go-fastping/fastping.go create mode 100644 vendor/github.com/gaochao1/gosnmp/.gitignore rename vendor/github.com/{gosnmp => gaochao1}/gosnmp/LICENSE (89%) create mode 100644 vendor/github.com/gaochao1/gosnmp/README.md create mode 100644 vendor/github.com/gaochao1/gosnmp/decode.go create mode 100644 vendor/github.com/gaochao1/gosnmp/gosnmp.go create mode 100644 vendor/github.com/gaochao1/gosnmp/helper.go create mode 100644 vendor/github.com/gaochao1/gosnmp/packet.go create mode 100644 vendor/github.com/gaochao1/sw/README.md create mode 100644 vendor/github.com/gaochao1/sw/conn.go create mode 100644 vendor/github.com/gaochao1/sw/cpustat.go create mode 100644 vendor/github.com/gaochao1/sw/descrstat.go create mode 100644 vendor/github.com/gaochao1/sw/fastping.go create mode 100644 vendor/github.com/gaochao1/sw/goping.go create mode 100644 vendor/github.com/gaochao1/sw/ifstat.go create mode 100644 vendor/github.com/gaochao1/sw/ifstat_snmpwalk.go create mode 100644 vendor/github.com/gaochao1/sw/memstat.go create mode 100644 vendor/github.com/gaochao1/sw/modelstat.go create mode 100644 vendor/github.com/gaochao1/sw/namestat.go create mode 100644 vendor/github.com/gaochao1/sw/parseip.go create mode 100644 vendor/github.com/gaochao1/sw/ping.go create mode 100644 vendor/github.com/gaochao1/sw/pingstat.go create mode 100644 vendor/github.com/gaochao1/sw/runsnmp.go create mode 100644 vendor/github.com/gaochao1/sw/uptimestat.go create mode 100644 vendor/github.com/go-ping/ping/.editorconfig create mode 100644 vendor/github.com/go-ping/ping/.gitignore create mode 100644 vendor/github.com/go-ping/ping/.golangci.yml create mode 100644 vendor/github.com/go-ping/ping/.goreleaser.yml rename vendor/github.com/{influxdata/wlog => go-ping/ping}/LICENSE (95%) create mode 100644 vendor/github.com/go-ping/ping/README.md create mode 100644 vendor/github.com/go-ping/ping/go.mod create mode 100644 vendor/github.com/go-ping/ping/go.sum create mode 100644 vendor/github.com/go-ping/ping/ping.go delete mode 100644 vendor/github.com/gorilla/mux/AUTHORS delete mode 100644 vendor/github.com/gorilla/mux/README.md delete mode 100644 vendor/github.com/gorilla/mux/context.go delete mode 100644 vendor/github.com/gorilla/mux/doc.go delete mode 100644 vendor/github.com/gorilla/mux/go.mod delete mode 100644 vendor/github.com/gorilla/mux/middleware.go delete mode 100644 vendor/github.com/gorilla/mux/mux.go delete mode 100644 vendor/github.com/gorilla/mux/regexp.go delete mode 100644 vendor/github.com/gorilla/mux/route.go delete mode 100644 vendor/github.com/gorilla/mux/test_helpers.go delete mode 100644 vendor/github.com/gosnmp/gosnmp/.gitignore delete mode 100644 vendor/github.com/gosnmp/gosnmp/.golangci.yml delete mode 100644 vendor/github.com/gosnmp/gosnmp/AUTHORS.md delete mode 100644 vendor/github.com/gosnmp/gosnmp/CHANGELOG.md delete mode 100644 vendor/github.com/gosnmp/gosnmp/Dockerfile delete mode 100644 vendor/github.com/gosnmp/gosnmp/README.md delete mode 100644 vendor/github.com/gosnmp/gosnmp/asn1ber_string.go delete mode 100644 vendor/github.com/gosnmp/gosnmp/build_tests.sh delete mode 100644 vendor/github.com/gosnmp/gosnmp/go.mod delete mode 100644 vendor/github.com/gosnmp/gosnmp/go.sum delete mode 100644 vendor/github.com/gosnmp/gosnmp/goimports2 delete mode 100644 vendor/github.com/gosnmp/gosnmp/goimports2_all delete mode 100644 vendor/github.com/gosnmp/gosnmp/gosnmp.go delete mode 100644 vendor/github.com/gosnmp/gosnmp/helper.go delete mode 100644 vendor/github.com/gosnmp/gosnmp/interface.go delete mode 100644 vendor/github.com/gosnmp/gosnmp/local_tests.sh delete mode 100644 vendor/github.com/gosnmp/gosnmp/marshal.go delete mode 100644 vendor/github.com/gosnmp/gosnmp/snmp_users.sh delete mode 100644 vendor/github.com/gosnmp/gosnmp/snmperror_string.go delete mode 100644 vendor/github.com/gosnmp/gosnmp/snmpv3authprotocol_string.go delete mode 100644 vendor/github.com/gosnmp/gosnmp/snmpv3privprotocol_string.go delete mode 100644 vendor/github.com/gosnmp/gosnmp/trap.go delete mode 100644 vendor/github.com/gosnmp/gosnmp/trap.md delete mode 100644 vendor/github.com/gosnmp/gosnmp/v3.go delete mode 100644 vendor/github.com/gosnmp/gosnmp/v3_testing_credentials.go delete mode 100644 vendor/github.com/gosnmp/gosnmp/v3_usm.go delete mode 100644 vendor/github.com/gosnmp/gosnmp/walk.go delete mode 100644 vendor/github.com/influxdata/telegraf/internal/snmp/config.go delete mode 100644 vendor/github.com/influxdata/telegraf/internal/snmp/wrapper.go create mode 100644 vendor/github.com/influxdata/telegraf/plugins/inputs/dns_query/README.md create mode 100644 vendor/github.com/influxdata/telegraf/plugins/inputs/dns_query/dns_query.go create mode 100644 vendor/github.com/influxdata/telegraf/plugins/inputs/http_response/README.md create mode 100644 vendor/github.com/influxdata/telegraf/plugins/inputs/http_response/http_response.go create mode 100644 vendor/github.com/influxdata/telegraf/plugins/inputs/net_response/README.md create mode 100644 vendor/github.com/influxdata/telegraf/plugins/inputs/net_response/net_response.go delete mode 100644 vendor/github.com/influxdata/telegraf/plugins/inputs/snmp/README.md delete mode 100644 vendor/github.com/influxdata/telegraf/plugins/inputs/snmp/snmp.go delete mode 100644 vendor/github.com/influxdata/telegraf/plugins/inputs/snmp/snmp_mocks_generate.go delete mode 100644 vendor/github.com/influxdata/wlog/README.md delete mode 100644 vendor/github.com/influxdata/wlog/writer.go create mode 100644 vendor/github.com/miekg/dns/.codecov.yml create mode 100644 vendor/github.com/miekg/dns/.gitignore create mode 100644 vendor/github.com/miekg/dns/.travis.yml create mode 100644 vendor/github.com/miekg/dns/AUTHORS create mode 100644 vendor/github.com/miekg/dns/CODEOWNERS create mode 100644 vendor/github.com/miekg/dns/CONTRIBUTORS create mode 100644 vendor/github.com/miekg/dns/COPYRIGHT rename vendor/github.com/{gorilla/mux => miekg/dns}/LICENSE (76%) create mode 100644 vendor/github.com/miekg/dns/Makefile.fuzz create mode 100644 vendor/github.com/miekg/dns/Makefile.release create mode 100644 vendor/github.com/miekg/dns/README.md create mode 100644 vendor/github.com/miekg/dns/acceptfunc.go create mode 100644 vendor/github.com/miekg/dns/client.go create mode 100644 vendor/github.com/miekg/dns/clientconfig.go create mode 100644 vendor/github.com/miekg/dns/dane.go create mode 100644 vendor/github.com/miekg/dns/defaults.go create mode 100644 vendor/github.com/miekg/dns/dns.go create mode 100644 vendor/github.com/miekg/dns/dnssec.go create mode 100644 vendor/github.com/miekg/dns/dnssec_keygen.go create mode 100644 vendor/github.com/miekg/dns/dnssec_keyscan.go create mode 100644 vendor/github.com/miekg/dns/dnssec_privkey.go create mode 100644 vendor/github.com/miekg/dns/doc.go create mode 100644 vendor/github.com/miekg/dns/duplicate.go create mode 100644 vendor/github.com/miekg/dns/edns.go create mode 100644 vendor/github.com/miekg/dns/format.go create mode 100644 vendor/github.com/miekg/dns/fuzz.go create mode 100644 vendor/github.com/miekg/dns/generate.go create mode 100644 vendor/github.com/miekg/dns/go.mod create mode 100644 vendor/github.com/miekg/dns/go.sum create mode 100644 vendor/github.com/miekg/dns/labels.go create mode 100644 vendor/github.com/miekg/dns/listen_go111.go create mode 100644 vendor/github.com/miekg/dns/listen_go_not111.go create mode 100644 vendor/github.com/miekg/dns/msg.go create mode 100644 vendor/github.com/miekg/dns/msg_helpers.go create mode 100644 vendor/github.com/miekg/dns/msg_truncate.go create mode 100644 vendor/github.com/miekg/dns/nsecx.go create mode 100644 vendor/github.com/miekg/dns/privaterr.go create mode 100644 vendor/github.com/miekg/dns/reverse.go create mode 100644 vendor/github.com/miekg/dns/sanitize.go create mode 100644 vendor/github.com/miekg/dns/scan.go create mode 100644 vendor/github.com/miekg/dns/scan_rr.go create mode 100644 vendor/github.com/miekg/dns/serve_mux.go create mode 100644 vendor/github.com/miekg/dns/server.go create mode 100644 vendor/github.com/miekg/dns/sig0.go create mode 100644 vendor/github.com/miekg/dns/singleinflight.go create mode 100644 vendor/github.com/miekg/dns/smimea.go create mode 100644 vendor/github.com/miekg/dns/tlsa.go create mode 100644 vendor/github.com/miekg/dns/tsig.go create mode 100644 vendor/github.com/miekg/dns/types.go create mode 100644 vendor/github.com/miekg/dns/udp.go create mode 100644 vendor/github.com/miekg/dns/udp_windows.go create mode 100644 vendor/github.com/miekg/dns/update.go create mode 100644 vendor/github.com/miekg/dns/version.go create mode 100644 vendor/github.com/miekg/dns/xfr.go create mode 100644 vendor/github.com/miekg/dns/zduplicate.go create mode 100644 vendor/github.com/miekg/dns/zmsg.go create mode 100644 vendor/github.com/miekg/dns/ztypes.go delete mode 100644 vendor/github.com/open-falcon/rrdlite/.gitignore delete mode 100644 vendor/github.com/open-falcon/rrdlite/LICENSE delete mode 100644 vendor/github.com/open-falcon/rrdlite/README.md delete mode 100644 vendor/github.com/open-falcon/rrdlite/fnv.h delete mode 100644 vendor/github.com/open-falcon/rrdlite/hash_32.c delete mode 100644 vendor/github.com/open-falcon/rrdlite/rrd.go delete mode 100644 vendor/github.com/open-falcon/rrdlite/rrd.h delete mode 100644 vendor/github.com/open-falcon/rrdlite/rrd_c.go delete mode 100644 vendor/github.com/open-falcon/rrdlite/rrd_config.h delete mode 100644 vendor/github.com/open-falcon/rrdlite/rrd_config_bottom.h delete mode 100644 vendor/github.com/open-falcon/rrdlite/rrd_create.c delete mode 100644 vendor/github.com/open-falcon/rrdlite/rrd_diff.c delete mode 100644 vendor/github.com/open-falcon/rrdlite/rrd_error.c delete mode 100644 vendor/github.com/open-falcon/rrdlite/rrd_error.h delete mode 100644 vendor/github.com/open-falcon/rrdlite/rrd_error.sh delete mode 100644 vendor/github.com/open-falcon/rrdlite/rrd_fetch.c delete mode 100644 vendor/github.com/open-falcon/rrdlite/rrd_format.c delete mode 100644 vendor/github.com/open-falcon/rrdlite/rrd_format.h delete mode 100644 vendor/github.com/open-falcon/rrdlite/rrd_hw.c delete mode 100644 vendor/github.com/open-falcon/rrdlite/rrd_hw.h delete mode 100644 vendor/github.com/open-falcon/rrdlite/rrd_hw_math.c delete mode 100644 vendor/github.com/open-falcon/rrdlite/rrd_hw_math.h delete mode 100644 vendor/github.com/open-falcon/rrdlite/rrd_hw_update.c delete mode 100644 vendor/github.com/open-falcon/rrdlite/rrd_hw_update.h delete mode 100644 vendor/github.com/open-falcon/rrdlite/rrd_info.c delete mode 100644 vendor/github.com/open-falcon/rrdlite/rrd_is_thread_safe.h delete mode 100644 vendor/github.com/open-falcon/rrdlite/rrd_nan_inf.c delete mode 100644 vendor/github.com/open-falcon/rrdlite/rrd_open.c delete mode 100644 vendor/github.com/open-falcon/rrdlite/rrd_parsetime.c delete mode 100644 vendor/github.com/open-falcon/rrdlite/rrd_parsetime.h delete mode 100644 vendor/github.com/open-falcon/rrdlite/rrd_rpncalc.c delete mode 100644 vendor/github.com/open-falcon/rrdlite/rrd_rpncalc.h delete mode 100644 vendor/github.com/open-falcon/rrdlite/rrd_tool.h delete mode 100644 vendor/github.com/open-falcon/rrdlite/rrd_update.c delete mode 100644 vendor/github.com/open-falcon/rrdlite/rrd_utils.c delete mode 100644 vendor/github.com/open-falcon/rrdlite/rrd_version.c delete mode 100644 vendor/github.com/open-falcon/rrdlite/rrdfunc.c delete mode 100644 vendor/github.com/open-falcon/rrdlite/rrdfunc.h delete mode 100644 vendor/github.com/open-falcon/rrdlite/unused.h create mode 100644 vendor/github.com/sparrc/go-ping/.gitignore rename vendor/github.com/{codegangsta/negroni => sparrc/go-ping}/LICENSE (96%) create mode 100644 vendor/github.com/sparrc/go-ping/README.md create mode 100644 vendor/github.com/sparrc/go-ping/ping.go create mode 100644 vendor/github.com/toolkits/file/downloader.go create mode 100644 vendor/github.com/toolkits/file/file.go create mode 100644 vendor/github.com/toolkits/file/reader.go create mode 100644 vendor/github.com/toolkits/file/writer.go create mode 100644 vendor/github.com/toolkits/sys/cmd.go delete mode 100644 vendor/github.com/unrolled/render/.travis.yml delete mode 100644 vendor/github.com/unrolled/render/README.md delete mode 100644 vendor/github.com/unrolled/render/buffer.go delete mode 100644 vendor/github.com/unrolled/render/doc.go delete mode 100644 vendor/github.com/unrolled/render/engine.go delete mode 100644 vendor/github.com/unrolled/render/fs.go delete mode 100644 vendor/github.com/unrolled/render/go.mod delete mode 100644 vendor/github.com/unrolled/render/go.sum delete mode 100644 vendor/github.com/unrolled/render/helpers.go delete mode 100644 vendor/github.com/unrolled/render/helpers_pre16.go delete mode 100644 vendor/github.com/unrolled/render/render.go create mode 100644 vendor/golang.org/x/net/icmp/dstunreach.go create mode 100644 vendor/golang.org/x/net/icmp/echo.go create mode 100644 vendor/golang.org/x/net/icmp/endpoint.go create mode 100644 vendor/golang.org/x/net/icmp/extension.go create mode 100644 vendor/golang.org/x/net/icmp/helper_posix.go create mode 100644 vendor/golang.org/x/net/icmp/interface.go create mode 100644 vendor/golang.org/x/net/icmp/ipv4.go create mode 100644 vendor/golang.org/x/net/icmp/ipv6.go create mode 100644 vendor/golang.org/x/net/icmp/listen_posix.go create mode 100644 vendor/golang.org/x/net/icmp/listen_stub.go create mode 100644 vendor/golang.org/x/net/icmp/message.go create mode 100644 vendor/golang.org/x/net/icmp/messagebody.go create mode 100644 vendor/golang.org/x/net/icmp/mpls.go create mode 100644 vendor/golang.org/x/net/icmp/multipart.go create mode 100644 vendor/golang.org/x/net/icmp/packettoobig.go create mode 100644 vendor/golang.org/x/net/icmp/paramprob.go create mode 100644 vendor/golang.org/x/net/icmp/sys_freebsd.go create mode 100644 vendor/golang.org/x/net/icmp/timeexceeded.go diff --git a/.gitignore b/.gitignore index f8da1179..6cf01bb1 100644 --- a/.gitignore +++ b/.gitignore @@ -45,16 +45,6 @@ _test /n9e-* -/src/modules/agent/agent -/src/modules/ams/ams -/src/modules/index/index -/src/modules/job/job -/src/modules/judge/judge -/src/modules/monapi/monapi -/src/modules/rdb/rdb -/src/modules/transfer/transfer -/src/modules/tsdb/tsdb - tmp/ main Makefile diff --git a/changelog b/changelog index e96622b3..a8e1b6c4 100644 --- a/changelog +++ b/changelog @@ -125,4 +125,18 @@ - prober去掉组件采集默认的白名单设置 升级方法: - 替换n9e-rdb n9e-prober n9e-monapi n9e-transfer二进制 - - 将etc/password-changed-email.tpl放到生产环境的etc目录下 \ No newline at end of file + - 将etc/password-changed-email.tpl放到生产环境的etc目录下 + +4.0.0 +升级内容: + - 服务端模块合并为一个模块 + - agentd和server的调用全部走rpc + +重新安装:见 https://n9e.didiyun.com/docs/install/binary/ + +升级方法: + - 使用新的etc替换掉原来的etc + - 使用etc/nginx.conf替换原来的nginx.conf + - n9e-prober替换旧的n9e-prober + - n9e-agentd替换n9e-agent + - n9e-server替换n9e-rdb、n9e-ams、n9e-job、n9e-monapi、n9e-transfer、n9e-judge \ No newline at end of file diff --git a/control b/control index 6cd9e4ac..4ec012fe 100755 --- a/control +++ b/control @@ -1,7 +1,7 @@ #!/bin/bash # release version -version=3.8.0 +version=4.0.0 CWD=$(cd $(dirname $0)/; pwd) cd $CWD @@ -14,15 +14,8 @@ usage() start_all() { - test -x n9e-rdb && start rdb - test -x n9e-job && start job - test -x n9e-ams && start ams - test -x n9e-monapi && start monapi - test -x n9e-tsdb && start tsdb - test -x n9e-index && start index - test -x n9e-transfer && start transfer - test -x n9e-judge && start judge - test -x n9e-agent && start agent + test -x n9e-server && start server + test -x n9e-agentd && start agentd test -x n9e-prober && start prober } @@ -68,16 +61,9 @@ start() stop_all() { - test -x n9e-agent && stop agent - test -x n9e-judge && stop judge - test -x n9e-transfer && stop transfer - test -x n9e-index && stop index - test -x n9e-tsdb && stop tsdb - test -x n9e-monapi && stop monapi - test -x n9e-ams && stop ams - test -x n9e-job && stop job - test -x n9e-rdb && stop rdb + test -x n9e-server && stop server test -x n9e-prober && stop prober + test -x n9e-agentd && stop agentd } stop() @@ -160,15 +146,8 @@ build() mod=$1 if [ "x${mod}" = "x" ]; then - build_one monapi - build_one transfer - build_one index - build_one judge - build_one agent - build_one tsdb - build_one rdb - build_one ams - build_one job + build_one server + build_one agentd build_one prober return fi @@ -182,15 +161,8 @@ build_local() mod=$1 if [ "x${mod}" = "x" ]; then - build_local_one monapi - build_local_one transfer - build_local_one index - build_local_one judge - build_local_one agent - build_local_one tsdb - build_local_one rdb - build_local_one ams - build_local_one job + build_local_one server + build_local_one agentd build_local_one prober return fi @@ -242,8 +214,9 @@ exec() fi done else - echo $1 + echo "todo $1 at "$(date "+%Y-%m-%d %H:%M:%S") $1 + echo "done $1 at "$(date "+%Y-%m-%d %H:%M:%S") fi } diff --git a/etc/address.yml b/etc/address.yml index 0a7e06d7..ed5a68ed 100644 --- a/etc/address.yml +++ b/etc/address.yml @@ -1,51 +1,12 @@ --- -rdb: +server: http: 0.0.0.0:8000 - addresses: - - 127.0.0.1 - -ams: - http: 0.0.0.0:8002 - addresses: - - 127.0.0.1 - -job: - http: 0.0.0.0:8004 - rpc: 0.0.0.0:8005 - addresses: - - 127.0.0.1 - -monapi: - http: 0.0.0.0:8006 - addresses: - - 127.0.0.1 - -transfer: - http: 0.0.0.0:8008 - rpc: 0.0.0.0:8009 - addresses: - - 127.0.0.1 - -tsdb: - http: 0.0.0.0:8010 - rpc: 0.0.0.0:8011 - -index: - http: 0.0.0.0:8012 - rpc: 0.0.0.0:8013 - addresses: - - 127.0.0.1 - -judge: - http: 0.0.0.0:8014 - rpc: 0.0.0.0:8015 + rpc: 0.0.0.0:8001 addresses: - 127.0.0.1 prober: http: 0.0.0.0:8023 - addresses: - - 127.0.0.1 -agent: - http: 0.0.0.0:2080 +agentd: + http: 0.0.0.0:2080 \ No newline at end of file diff --git a/etc/agent.yml b/etc/agentd.yml similarity index 97% rename from etc/agent.yml rename to etc/agentd.yml index b6c8cdf0..22440362 100644 --- a/etc/agent.yml +++ b/etc/agentd.yml @@ -1,5 +1,5 @@ logger: - dir: logs/agent + dir: logs/agentd level: INFO keepHours: 24 @@ -75,5 +75,6 @@ sys: - cpu.core.softirq - cpu.core.iowait - cpu.core.steal + #ntpServers: - # - ntp1.aliyun.com + # - ntp1.aliyun.com \ No newline at end of file diff --git a/etc/ams.yml b/etc/ams.yml deleted file mode 100644 index 704b50fe..00000000 --- a/etc/ams.yml +++ /dev/null @@ -1,12 +0,0 @@ -logger: - dir: logs/ams - level: INFO - keepHours: 24 - -http: - mode: release - cookieDomain: "" - cookieName: ecmc-sid - -tokens: - - ams-builtin-token diff --git a/etc/dict.json b/etc/dict.json index 2b057680..9e26dfee 100644 --- a/etc/dict.json +++ b/etc/dict.json @@ -1,118 +1 @@ -{ - "zh": { - "stra not found": "聚合策略为找到", - "same stra name %s in node": "同节点下策略名称 %s 已存在", - "collect type not support": "采集类型不合法", - "[%s] is blank": "参数[%s]值不能为空", - "cannot convert %s to int64": "%s 无法转为 int64 类型", - "cannot convert %s to int": "%s 无法转为 int 类型", - "arg[%s] not found": "参数[%s]没找到", - "cannot retrieve node[%d]: %v": "获取不到节点[%d],原因:%v", - "no such node[%d]": "节点[%d]不存在", - "no such task[id:%d]": "任务[%d]不存在", - "no such task tpl[id:%d]": "任务模板[%d]不存在", - "cannot retrieve screen[%d]: %v": "获取不到大盘[%d],原因:%v", - "no such screen[%d]": "大盘[%d]不存在", - "cannot retrieve subclass[%d]: %v": "获取不到大盘分组[%d],原因:%v", - "no such subclass[%d]": "大盘分组[%d]不存在", - "cannot retrieve chart[%d]: %v": "获取不到大盘图表[%d],原因:%v", - "no such chart[%d]": "大盘图表[%d]不存在", - "cannot retrieve eventCur[%d]: %v": "获取不到未恢复告警事件[%d],原因:%v", - "no such eventCur[%d]": "未恢复告警事件[%d]不存在", - "cannot retrieve event[%d]: %v": "获取不到告警事件[%d],原因:%v", - "no such event[%d]": "告警事件[%d]不存在", - "cannot retrieve user[%d]: %v": "获取不到用户[%d],原因:%v", - "no such user[%d]": "用户[%d]不存在", - "no such user: %s": "用户[%s]不存在", - "cannot retrieve team[%d]: %v": "获取不到团队[%d],原因:%v", - "no such team[%d]": "团队[%d]不存在", - "cannot retrieve role[%d]: %v": "获取不到角色[%d],原因:%v", - "no such role[%d]": "角色[%d]不存在", - "no such NodeCate[id:%d]": "节点类型[%d]没找到", - "no such field": "扩展字段为找到", - "field_type cannot modify": "字段类型不能被修改", - "arg[endpoints] empty": "参数不能[endpoints]为空", - "arg[cur_nid_paths] empty": "参数不能[cur_nid_paths]为空", - "arg[tags] empty": "参数不能[tags]为空", - "arg[hosts] empty": "参数不能[hosts]为空", - "arg[btime,etime] empty": "参数[btime,etime]不合规范", - "arg[name] empty": "参数[name]不合规范", - "arg[name] is blank": "参数[名称]不能为空", - "arg[ids] is empty": "参数[ids]不能为空", - "%s invalid": "%s 不符合规范", - "%s too long > 64": "%s 超过64长度限制", - "arg[%s] too long > %d": "参数 %s 长度不能超过 %d", - "cate is blank": "节点分类不能为空", - "uuid is blank": "uuid不能为空", - "ident is blank": "唯一标识不能为空", - "tenant is blank": "租户不能为空", - "ids is blank": "ids不能为空", - "items empty": "提交内容不能为空", - "url param[%s] is blank": "url参数[%s]不能为空", - "query param[%s] is necessary": "query参数[%s]不能为空", - "ident legal characters: [a-z0-9_-]": "唯一标识英文只能字母开头,包括数字、中划线、下划线", - "ident length should be less than 32": "唯一标识长度需小于32", - "cannot modify tenant's node-category": "租户分类不允许修改", - "cannot modify node-category to tenant": "节点分类不允许修改为租户", - "node is managed by other system": "租户正在被系统系统使用", - "resources not found by %s": "通过 %s 没有找到资源", - "cannot delete root user": "root用户不能删除", - "user not found": "用户未找到", - - "Unable to get captcha": "无法获得验证码", - "Invalid captcha answer": "错误的验证码", - "Username %s is invalid": "用户名 %s 不符合规范", - "Username %s too long > 64": "用户名 %s 太长(64)", - "Unable to get login arguments": "无法获得登陆参数", - "Deny Access from %s with whitelist control": "来自 %s 的访问被白名单规则拒绝", - "Invalid login type %s": "不支持的登陆类型 %s", - "Unable to get type, sms-code | email-code": "无法获得验证码类型", - "Unable to get code arg": "无法获得验证码类型", - "sms/email sender is disabled": "无法发送 短信/邮件 验证码", - "Invalid code type %s": "不支持的验证码类型 %s", - "Cannot find the user by %s": "无法用 %s 找到相关用户", - "Unable to get password": "无法获取密码", - "Invalid code": "不符合规范的验证码", - "The code is incorrect": "无效的验证码", - "The code has expired": "失效的验证码", - "Invalid arguments %s": "不合法的参数 %s", - "Login fail, check your username and password": "登陆失败,请检查用户名/密码", - "User dose not exist": "用户不存在", - "Username %s already exists": "用户名 %s 已存在", - - "Upper char": "大写字母", - "Lower char": "小写字母", - "Number": "数字", - "Special char": "特殊字符", - "Must include %s": "必须包含 %s", - "Invalid Password, %s": "密码不符合规范, %s", - "character: %s not supported": "不支持的字符 %s", - "Incorrect login/password %s times, you still have %s chances": "登陆失败%d次,你还有%d次机会", - "The limited sessions %d": "会话数量限制,最多%d个会话", - "Password has been expired": "密码已过期,请重置密码", - "User is inactive": "用户已禁用", - "User is locked": "用户已锁定", - "User is frozen": "用户已休眠", - "User is writen off": "用户已注销", - "Minimum password length %d": "密码最小长度 %d", - "Password too short (min:%d) %s": "密码太短 (最小 %d) %s", - "%s format error":"%s 所填内容不符合规范", - "%s %s format error":"%s %s 所填内容不符合规范", - "username too long (max:%d)": "用户名太长 (最长:%d)", - "dispname too long (max:%d)": "昵称太长 (最长:%d)", - "email %s or phone %s is exists": "邮箱 %s 或者 手机号 %s 已存在", - "Password is not set": "密码未设置", - "Incorrect old password": "密码错误", - "The password is the same as the old password": "密码与历史密码重复", - "phone": "手机号", - "email": "邮箱", - "username": "用户名", - "dispname": "昵称", - "Temporary user has expired": "临时账户,已过有效期", - "Invalid user status %d": "异常的用户状态 %d", - "Password expired, please change the password in time": "密码过期,请及时修改密码", - "First Login, please change the password in time": "初始登陆,请及时修改密码", - - "EOF": "" - } -} +{} \ No newline at end of file diff --git a/etc/identity.yml b/etc/identity.yml index c584b9ce..475faee3 100644 --- a/etc/identity.yml +++ b/etc/identity.yml @@ -1,9 +1,9 @@ -# 用来做心跳,给服务端上报本机ip +# for heartbeat, connected by other modules ip: specify: "" shell: ifconfig `route|grep '^default'|awk '{print $NF}'`|grep inet|awk '{print $2}'|head -n 1|awk -F':' '{print $NF}' -# MON、JOB, judge, prober 的客户端拿来做本机标识 +# as identity. equals to endpoint. used by agentd, prober, server ident: specify: "" shell: ifconfig `route|grep '^default'|awk '{print $NF}'`|grep inet|awk '{print $2}'|head -n 1|awk -F':' '{print $NF}' diff --git a/etc/index.yml b/etc/index.yml deleted file mode 100644 index 1700fdd6..00000000 --- a/etc/index.yml +++ /dev/null @@ -1,4 +0,0 @@ -logger: - dir: logs/index - level: INFO - keepHours: 24 \ No newline at end of file diff --git a/etc/job.yml b/etc/job.yml deleted file mode 100644 index 3477ce29..00000000 --- a/etc/job.yml +++ /dev/null @@ -1,15 +0,0 @@ -logger: - dir: logs/job - level: INFO - keepHours: 24 - -http: - mode: release - cookieDomain: "" - cookieName: ecmc-sid - -output: - # database | remote - comeFrom: database - remotePort: 2080 - diff --git a/etc/json/stra.json b/etc/json/stra.json deleted file mode 100644 index 034f1188..00000000 --- a/etc/json/stra.json +++ /dev/null @@ -1,410 +0,0 @@ -[ - { - "name": "内存利用率大于75%", - "category": 1, - "alert_dur": 60, - "recovery_dur": 0, - "recovery_notify": 1, - "enable_stime": "00:00", - "enable_etime": "23:59", - "priority": 2, - "exprs": [ - { - "eopt": ">", - "func": "all", - "metric": "mem.bytes.used.percent", - "params": [], - "threshold": 75 - } - ], - "tags": [], - "enable_days_of_week": [ - 0, - 1, - 2, - 3, - 4, - 5, - 6 - ], - "converge": [ - 36000, - 1 - ], - "endpoints": null - }, - { - "name": "机器loadavg大于16", - "category": 1, - "alert_dur": 60, - "recovery_dur": 0, - "recovery_notify": 1, - "enable_stime": "00:00", - "enable_etime": "23:59", - "priority": 2, - "exprs": [ - { - "eopt": ">", - "func": "all", - "metric": "cpu.loadavg.1", - "params": [], - "threshold": 16 - } - ], - "tags": [], - "enable_days_of_week": [ - 0, - 1, - 2, - 3, - 4, - 5, - 6 - ], - "converge": [ - 36000, - 1 - ], - "endpoints": null - }, - { - "name": "某磁盘无法正常读写", - "category": 1, - "alert_dur": 60, - "recovery_dur": 0, - "recovery_notify": 1, - "enable_stime": "00:00", - "enable_etime": "23:59", - "priority": 1, - "exprs": [ - { - "eopt": ">", - "func": "all", - "metric": "disk.rw.error", - "params": [], - "threshold": 0 - } - ], - "tags": [], - "enable_days_of_week": [ - 0, - 1, - 2, - 3, - 4, - 5, - 6 - ], - "converge": [ - 36000, - 1 - ], - "endpoints": null - }, - { - "name": "监控agent失联", - "category": 1, - "alert_dur": 60, - "recovery_dur": 0, - "recovery_notify": 1, - "enable_stime": "00:00", - "enable_etime": "23:59", - "priority": 1, - "exprs": [ - { - "eopt": "=", - "func": "nodata", - "metric": "proc.agent.alive", - "params": [], - "threshold": 0 - } - ], - "tags": [], - "enable_days_of_week": [ - 0, - 1, - 2, - 3, - 4, - 5, - 6 - ], - "converge": [ - 36000, - 1 - ], - "endpoints": null - }, - { - "name": "磁盘利用率达到85%", - "category": 1, - "alert_dur": 60, - "recovery_dur": 0, - "recovery_notify": 1, - "enable_stime": "00:00", - "enable_etime": "23:59", - "priority": 3, - "exprs": [ - { - "eopt": ">", - "func": "all", - "metric": "disk.bytes.used.percent", - "params": [], - "threshold": 85 - } - ], - "tags": [], - "enable_days_of_week": [ - 0, - 1, - 2, - 3, - 4, - 5, - 6 - ], - "converge": [ - 36000, - 1 - ], - "endpoints": null - }, - { - "name": "磁盘利用率达到88%", - "category": 1, - "alert_dur": 60, - "recovery_dur": 0, - "recovery_notify": 1, - "enable_stime": "00:00", - "enable_etime": "23:59", - "priority": 2, - "exprs": [ - { - "eopt": ">", - "func": "all", - "metric": "disk.bytes.used.percent", - "params": [], - "threshold": 88 - } - ], - "tags": [], - "enable_days_of_week": [ - 0, - 1, - 2, - 3, - 4, - 5, - 6 - ], - "converge": [ - 36000, - 1 - ], - "endpoints": null - }, - { - "name": "磁盘利用率达到92%", - "category": 1, - "alert_dur": 60, - "recovery_dur": 0, - "recovery_notify": 1, - "enable_stime": "00:00", - "enable_etime": "23:59", - "priority": 1, - "exprs": [ - { - "eopt": ">", - "func": "all", - "metric": "disk.bytes.used.percent", - "params": [], - "threshold": 92 - } - ], - "tags": [], - "enable_days_of_week": [ - 0, - 1, - 2, - 3, - 4, - 5, - 6 - ], - "converge": [ - 36000, - 1 - ], - "endpoints": null - }, - { - "name": "端口挂了", - "category": 1, - "alert_dur": 60, - "recovery_dur": 0, - "recovery_notify": 1, - "enable_stime": "00:00", - "enable_etime": "23:59", - "priority": 2, - "exprs": [ - { - "eopt": "!=", - "func": "all", - "metric": "proc.port.listen", - "params": [], - "threshold": 1 - } - ], - "tags": [], - "enable_days_of_week": [ - 0, - 1, - 2, - 3, - 4, - 5, - 6 - ], - "converge": [ - 36000, - 1 - ], - "endpoints": null - }, - { - "name": "网卡入方向丢包", - "category": 1, - "alert_dur": 60, - "recovery_dur": 0, - "recovery_notify": 1, - "enable_stime": "00:00", - "enable_etime": "23:59", - "priority": 2, - "exprs": [ - { - "eopt": ">", - "func": "all", - "metric": "net.in.dropped", - "params": [], - "threshold": 3 - } - ], - "tags": [], - "enable_days_of_week": [ - 0, - 1, - 2, - 3, - 4, - 5, - 6 - ], - "converge": [ - 36000, - 1 - ], - "endpoints": null - }, - { - "name": "网卡出方向丢包", - "category": 1, - "alert_dur": 60, - "recovery_dur": 0, - "recovery_notify": 1, - "enable_stime": "00:00", - "enable_etime": "23:59", - "priority": 2, - "exprs": [ - { - "eopt": ">", - "func": "all", - "metric": "net.out.dropped", - "params": [], - "threshold": 3 - } - ], - "tags": [], - "enable_days_of_week": [ - 0, - 1, - 2, - 3, - 4, - 5, - 6 - ], - "converge": [ - 36000, - 1 - ], - "endpoints": null - }, - { - "name": "进程总数超过3000", - "category": 1, - "alert_dur": 60, - "recovery_dur": 0, - "recovery_notify": 1, - "enable_stime": "00:00", - "enable_etime": "23:59", - "priority": 1, - "exprs": [ - { - "eopt": ">", - "func": "all", - "metric": "sys.ps.process.total", - "params": [], - "threshold": 3000 - } - ], - "tags": [], - "enable_days_of_week": [ - 0, - 1, - 2, - 3, - 4, - 5, - 6 - ], - "converge": [ - 36000, - 1 - ], - "endpoints": null - }, - { - "name": "进程挂了", - "category": 1, - "alert_dur": 60, - "recovery_dur": 0, - "recovery_notify": 1, - "enable_stime": "00:00", - "enable_etime": "23:59", - "priority": 2, - "exprs": [ - { - "eopt": "<", - "func": "all", - "metric": "proc.num", - "params": [], - "threshold": 1 - } - ], - "tags": [], - "enable_days_of_week": [ - 0, - 1, - 2, - 3, - 4, - 5, - 6 - ], - "converge": [ - 36000, - 1 - ], - "endpoints": null - } -] \ No newline at end of file diff --git a/etc/judge.yml b/etc/judge.yml deleted file mode 100644 index cd6f3f3f..00000000 --- a/etc/judge.yml +++ /dev/null @@ -1,18 +0,0 @@ -query: - connTimeout: 1000 - callTimeout: 2000 - indexCallTimeout: 2000 - -redis: - addrs: - - 127.0.0.1:6379 - pass: "" - # timeout: - # conn: 500 - # read: 3000 - # write: 3000 - -logger: - dir: logs/judge - level: INFO - keepHours: 24 \ No newline at end of file diff --git a/etc/monapi.yml b/etc/monapi.yml deleted file mode 100644 index 26792d79..00000000 --- a/etc/monapi.yml +++ /dev/null @@ -1,47 +0,0 @@ ---- -tokens: - - monapi-internal-third-module-pass-fjsdi - -logger: - dir: logs/monapi - level: INFO - keepHours: 24 - -alarmEnabled: true - -region: - - default - -# clean history event -cleaner: - # retention days - days: 100 - # number of events deleted per time - batch: 100 - -# read alert from redis -redis: - addr: 127.0.0.1:6379 - pass: "" - # timeout: - # conn: 500 - # read: 3000 - # write: 3000 -i18n: - lang: zh - -notify: - p1: ["voice", "sms", "mail", "im"] - p2: ["sms", "mail", "im"] - p3: ["mail", "im"] - -# addresses accessible using browser -link: - stra: http://n9e.com/mon/strategy/%v - event: http://n9e.com/mon/history/his/%v - claim: http://n9e.com/mon/history/cur/%v - -http: - mode: release - cookieDomain: "" - cookieName: ecmc-sid diff --git a/etc/nginx.conf b/etc/nginx.conf index bb15e3f1..97f59b47 100644 --- a/etc/nginx.conf +++ b/etc/nginx.conf @@ -67,33 +67,33 @@ http { } upstream n9e.ams { - server 127.0.0.1:8002; + server 127.0.0.1:8000; keepalive 60; } upstream n9e.job { - server 127.0.0.1:8004; + server 127.0.0.1:8000; keepalive 60; } upstream n9e.monapi { - server 127.0.0.1:8006; + server 127.0.0.1:8000; keepalive 60; } upstream n9e.transfer { - server 127.0.0.1:8008; + server 127.0.0.1:8000; keepalive 60; } upstream n9e.index { - server 127.0.0.1:8012; + server 127.0.0.1:8000; keepalive 60; } server { listen 80 default_server; - server_name n9e.example.com; + server_name localhost; root /home/n9e/pub; # Load configuration files for the default server block. diff --git a/etc/password-changed-email.tpl b/etc/password-changed-email.tpl index 761ea162..4c8a252c 100644 --- a/etc/password-changed-email.tpl +++ b/etc/password-changed-email.tpl @@ -1 +1 @@ -您好,您的密码已被重置 +您好,您的密码已被重置 \ No newline at end of file diff --git a/etc/rdb.yml b/etc/rdb.yml deleted file mode 100644 index 7eb37098..00000000 --- a/etc/rdb.yml +++ /dev/null @@ -1,110 +0,0 @@ -logger: - dir: logs/rdb - level: INFO - keepHours: 24 - -http: - mode: release - session: - cookieName: ecmc-sid - domain: "" - httpOnly: true - gcInterval: 60 - cookieLifetime: 86400 # 单位秒,0: 与浏览器相同 - -i18n: - lang: zh - -sso: - enable: false - ssoAddr: "http://{sso-host}" - redirectURL: "http://{rdb-host}/auth-callback" - clientId: "" - clientSecret: "" - apiKey: "" - attributes: - dispname: "display_name" - email: "email" - phone: "phone" - im: "" - coverAttributes: false - stateExpiresIn: 300 - -auth: - captcha: false - extraMode: - enable: false # enable whiteList, login retry lock, userControl, ... - whiteList: false - frozenDays: 90 # frozen time (day) - writenOffDays: 365 # writenOff time (day) - -tokens: - - rdb-builtin-token - -# for ldap authorization -ldap: - host: "ldap.example.org" - port: 389 - baseDn: "dc=example,dc=org" - # AD: manange@example.org - bindUser: "cn=manager,dc=example,dc=org" - bindPass: "*******" - # openldap: (&(uid=%s)) - # AD: (&(sAMAccountName=%s)) - authFilter: "(&(uid=%s))" - attributes: - dispname: "cn" - email: "mail" - phone: "mobile" - im: "" - coverAttributes: false - autoRegist: true - tls: false - startTLS: false - -# as queue for sender -redis: - enable: true - addr: 127.0.0.1:6379 - pass: "" - idle: 5 - timeout: - conn: 500 - read: 3000 - write: 3000 - -rabbitmq: - enable: false - addr: amqp://root:1234@127.0.0.1:5672/ - queue: test - -sender: - mail: - # three choice: smtp|shell|api - way: smtp - worker: 10 - api: http://127.0.0.1:2008/mail - sms: - # two choice: shell|api - way: api - worker: 10 - api: http://127.0.0.1:2008/sms - voice: - # two choice: shell|api - way: shell - worker: 10 - api: http://127.0.0.1:2008/voice - im: - # five choice: shell|api|wechat|wechat_robot|dingtalk_robot - way: shell - worker: 10 - api: http://127.0.0.1:2008/im - -wechat: - corp_id: "xxxxxxxxxxxxx" - agent_id: 1000000 - secret: "xxxxxxxxxxxxxxxxx" - -#webhook: -# - addr: http://localhost:2008/api/event -# token: "1234" diff --git a/etc/screen/n9e_mudules b/etc/screen/n9e_mudules deleted file mode 100644 index db3929fa..00000000 --- a/etc/screen/n9e_mudules +++ /dev/null @@ -1,88 +0,0 @@ -[ - { - "name": "N9E模块监控", - "node_path": "", - "tags": [ - { - "name": "transfer", - "weight": 0, - "charts": [ - { - "configs": "{\"title\":\"transfer接收点数\",\"type\":\"chart\",\"now\":\"1614909881267\",\"start\":\"1614906281267\",\"end\":\"1614909881267\",\"comparisonOptions\":[{\"label\":\"1小时\",\"labelEn\":\"1 hour\",\"value\":\"3600000\"},{\"label\":\"2小时\",\"labelEn\":\"2 hours\",\"value\":\"7200000\"},{\"label\":\"1天\",\"labelEn\":\"1 day\",\"value\":\"86400000\"},{\"label\":\"2天\",\"labelEn\":\"2 days\",\"value\":\"172800000\"},{\"label\":\"7天\",\"labelEn\":\"7 days\",\"value\":\"604800000\"}],\"legend\":false,\"shared\":true,\"linkVisible\":true,\"metrics\":[{\"selectedNid\":7,\"endpointsKey\":\"endpoints\",\"selectedEndpoint\":[\"=all\"],\"selectedMetric\":\"n9e.transfer.points.in\",\"selectedTagkv\":[{\"tagk\":\"endpoint\",\"tagv\":[\"=all\"]}],\"counterListCount\":1,\"aggrFunc\":\"max\"}],\"chartTypeOptions\":{\"chartType\":\"singleValue\",\"targetValue\":\"max\",\"subType\":\"normal\",\"valueMap\":\"range\",\"mapConf\":[{\"color\":\"#06c658\",\"from\":0,\"to\":50000},{\"from\":50000,\"to\":800000,\"color\":\"#da8e58\"},{\"from\":80000,\"to\":100000,\"color\":\"#f48b71\"},{\"from\":10000,\"color\":\"#fa0505\"}]},\"id\":25}", - "weight": 0 - }, - { - "configs": "{\"title\":\"transfer发送tsdb点数\",\"type\":\"chart\",\"now\":\"1614909881267\",\"start\":\"1614906281267\",\"end\":\"1614909881267\",\"comparisonOptions\":[{\"label\":\"1小时\",\"labelEn\":\"1 hour\",\"value\":\"3600000\"},{\"label\":\"2小时\",\"labelEn\":\"2 hours\",\"value\":\"7200000\"},{\"label\":\"1天\",\"labelEn\":\"1 day\",\"value\":\"86400000\"},{\"label\":\"2天\",\"labelEn\":\"2 days\",\"value\":\"172800000\"},{\"label\":\"7天\",\"labelEn\":\"7 days\",\"value\":\"604800000\"}],\"legend\":false,\"shared\":true,\"linkVisible\":true,\"metrics\":[{\"selectedNid\":7,\"endpointsKey\":\"endpoints\",\"selectedEndpoint\":[\"=all\"],\"selectedMetric\":\"n9e.transfer.points.out.tsdb\",\"selectedTagkv\":[{\"tagk\":\"endpoint\",\"tagv\":[\"=all\"]}],\"counterListCount\":1,\"aggrFunc\":\"max\"}],\"chartTypeOptions\":{\"chartType\":\"singleValue\",\"targetValue\":\"max\",\"subType\":\"normal\",\"valueMap\":\"range\",\"mapConf\":[{\"color\":\"#06c658\",\"from\":0,\"to\":50000},{\"from\":50000,\"to\":800000,\"color\":\"#da8e58\"},{\"from\":80000,\"to\":100000,\"color\":\"#f48b71\"},{\"from\":10000,\"color\":\"#fa0505\"}]},\"id\":26}", - "weight": 1 - }, - { - "configs": "{\"title\":\"transfer向judge发送的点数\",\"type\":\"chart\",\"now\":\"1614909881267\",\"start\":\"1614906281267\",\"end\":\"1614909881267\",\"comparisonOptions\":[{\"label\":\"1小时\",\"labelEn\":\"1 hour\",\"value\":\"3600000\"},{\"label\":\"2小时\",\"labelEn\":\"2 hours\",\"value\":\"7200000\"},{\"label\":\"1天\",\"labelEn\":\"1 day\",\"value\":\"86400000\"},{\"label\":\"2天\",\"labelEn\":\"2 days\",\"value\":\"172800000\"},{\"label\":\"7天\",\"labelEn\":\"7 days\",\"value\":\"604800000\"}],\"legend\":false,\"shared\":true,\"linkVisible\":true,\"metrics\":[{\"selectedNid\":7,\"endpointsKey\":\"endpoints\",\"selectedEndpoint\":[\"=all\"],\"selectedMetric\":\"n9e.transfer.points.out.judge\",\"selectedTagkv\":[{\"tagk\":\"endpoint\",\"tagv\":[\"=all\"]}],\"counterListCount\":1,\"aggrFunc\":\"max\"}],\"chartTypeOptions\":{\"chartType\":\"singleValue\",\"targetValue\":\"max\",\"subType\":\"normal\",\"valueMap\":\"range\",\"mapConf\":[{\"color\":\"#06c658\",\"from\":0,\"to\":50000},{\"from\":50000,\"to\":800000,\"color\":\"#da8e58\"},{\"from\":80000,\"to\":100000,\"color\":\"#f48b71\"},{\"from\":10000,\"color\":\"#fa0505\"}]},\"id\":28}", - "weight": 2 - }, - { - "configs": "{\"title\":\"\",\"type\":\"chart\",\"now\":\"1614667049309\",\"start\":\"1614663449309\",\"end\":\"1614667049309\",\"comparisonOptions\":[{\"label\":\"1小时\",\"labelEn\":\"1 hour\",\"value\":\"3600000\"},{\"label\":\"2小时\",\"labelEn\":\"2 hours\",\"value\":\"7200000\"},{\"label\":\"1天\",\"labelEn\":\"1 day\",\"value\":\"86400000\"},{\"label\":\"2天\",\"labelEn\":\"2 days\",\"value\":\"172800000\"},{\"label\":\"7天\",\"labelEn\":\"7 days\",\"value\":\"604800000\"}],\"legend\":false,\"shared\":true,\"linkVisible\":true,\"metrics\":[{\"selectedNid\":7,\"endpointsKey\":\"endpoints\",\"selectedEndpoint\":[\"=all\"],\"selectedMetric\":\"n9e.transfer.get.index.err\",\"selectedTagkv\":[{\"tagk\":\"endpoint\",\"tagv\":[\"=all\"]}],\"counterListCount\":1}]}", - "weight": 3 - }, - { - "configs": "{\"title\":\"\",\"type\":\"chart\",\"now\":\"1614667049309\",\"start\":\"1614663449309\",\"end\":\"1614667049309\",\"comparisonOptions\":[{\"label\":\"1小时\",\"labelEn\":\"1 hour\",\"value\":\"3600000\"},{\"label\":\"2小时\",\"labelEn\":\"2 hours\",\"value\":\"7200000\"},{\"label\":\"1天\",\"labelEn\":\"1 day\",\"value\":\"86400000\"},{\"label\":\"2天\",\"labelEn\":\"2 days\",\"value\":\"172800000\"},{\"label\":\"7天\",\"labelEn\":\"7 days\",\"value\":\"604800000\"}],\"legend\":false,\"shared\":true,\"linkVisible\":true,\"metrics\":[{\"selectedNid\":7,\"endpointsKey\":\"endpoints\",\"selectedEndpoint\":[\"=all\"],\"selectedMetric\":\"n9e.transfer.judge.get.err\",\"selectedTagkv\":[{\"tagk\":\"endpoint\",\"tagv\":[\"=all\"]}],\"counterListCount\":1}]}", - "weight": 4 - }, - { - "configs": "{\"title\":\"transfer向judge发送的点数\",\"type\":\"chart\",\"now\":\"1614909881267\",\"start\":\"1614906281267\",\"end\":\"1614909881267\",\"comparisonOptions\":[{\"label\":\"1小时\",\"labelEn\":\"1 hour\",\"value\":\"3600000\"},{\"label\":\"2小时\",\"labelEn\":\"2 hours\",\"value\":\"7200000\"},{\"label\":\"1天\",\"labelEn\":\"1 day\",\"value\":\"86400000\"},{\"label\":\"2天\",\"labelEn\":\"2 days\",\"value\":\"172800000\"},{\"label\":\"7天\",\"labelEn\":\"7 days\",\"value\":\"604800000\"}],\"legend\":false,\"shared\":true,\"linkVisible\":true,\"metrics\":[{\"selectedNid\":7,\"endpointsKey\":\"endpoints\",\"selectedEndpoint\":[\"=all\"],\"selectedMetric\":\"n9e.transfer.stra.count\",\"selectedTagkv\":[{\"tagk\":\"endpoint\",\"tagv\":[\"=all\"]}],\"counterListCount\":1,\"aggrFunc\":\"max\"}],\"chartTypeOptions\":{\"chartType\":\"singleValue\",\"targetValue\":\"max\",\"subType\":\"normal\",\"valueMap\":\"range\",\"mapConf\":[{\"color\":\"#06c658\",\"from\":0,\"to\":5000},{\"from\":5000,\"to\":80000,\"color\":\"#da8e58\"},{\"from\":8000,\"to\":10000,\"color\":\"#f48b71\"},{\"from\":10000,\"color\":\"#fa0505\"}]},\"id\":54}", - "weight": 5 - } - ] - }, - { - "name": "tsdb", - "weight": 1, - "charts": [ - { - "configs": "{\"title\":\"\",\"type\":\"chart\",\"now\":\"1614909881267\",\"start\":\"1614906281267\",\"end\":\"1614909881267\",\"comparisonOptions\":[{\"label\":\"1小时\",\"labelEn\":\"1 hour\",\"value\":\"3600000\"},{\"label\":\"2小时\",\"labelEn\":\"2 hours\",\"value\":\"7200000\"},{\"label\":\"1天\",\"labelEn\":\"1 day\",\"value\":\"86400000\"},{\"label\":\"2天\",\"labelEn\":\"2 days\",\"value\":\"172800000\"},{\"label\":\"7天\",\"labelEn\":\"7 days\",\"value\":\"604800000\"}],\"legend\":false,\"shared\":true,\"linkVisible\":true,\"metrics\":[{\"selectedNid\":7,\"endpointsKey\":\"endpoints\",\"selectedEndpoint\":[\"=all\"],\"selectedMetric\":\"n9e.tsdb.points.in\",\"selectedTagkv\":[{\"tagk\":\"endpoint\",\"tagv\":[\"=all\"]}],\"counterListCount\":1,\"aggrFunc\":\"max\"}],\"chartTypeOptions\":{\"chartType\":\"singleValue\",\"targetValue\":\"max\",\"subType\":\"normal\",\"valueMap\":\"range\",\"mapConf\":[{\"from\":0,\"to\":50000,\"color\":\"#06c663\"},{\"from\":50000,\"to\":80000,\"color\":\"#da8e0b\"},{\"from\":80000,\"to\":100000,\"color\":\"#f48b71\"},{\"from\":100000,\"color\":\"#fa0505\"}]},\"id\":33}", - "weight": 0 - }, - { - "configs": "{\"title\":\"\",\"type\":\"chart\",\"now\":\"1614909881267\",\"start\":\"1614906281267\",\"end\":\"1614909881267\",\"comparisonOptions\":[{\"label\":\"1小时\",\"labelEn\":\"1 hour\",\"value\":\"3600000\"},{\"label\":\"2小时\",\"labelEn\":\"2 hours\",\"value\":\"7200000\"},{\"label\":\"1天\",\"labelEn\":\"1 day\",\"value\":\"86400000\"},{\"label\":\"2天\",\"labelEn\":\"2 days\",\"value\":\"172800000\"},{\"label\":\"7天\",\"labelEn\":\"7 days\",\"value\":\"604800000\"}],\"legend\":false,\"shared\":true,\"linkVisible\":true,\"metrics\":[{\"selectedNid\":7,\"endpointsKey\":\"endpoints\",\"selectedEndpoint\":[\"=all\"],\"selectedMetric\":\"n9e.tsdb.index.out\",\"selectedTagkv\":[{\"tagk\":\"endpoint\",\"tagv\":[\"=all\"]}],\"counterListCount\":1,\"aggrFunc\":\"avg\"}],\"chartTypeOptions\":{\"chartType\":\"singleValue\",\"targetValue\":\"avg\",\"subType\":\"normal\",\"valueMap\":\"range\",\"mapConf\":[{\"from\":0,\"to\":50000,\"color\":\"#06c663\"},{\"from\":50000,\"to\":80000,\"color\":\"#da8e0b\"},{\"from\":80000,\"to\":100000,\"color\":\"#f48b71\"},{\"from\":100000,\"color\":\"#fa0505\"}]},\"id\":34}", - "weight": 1 - }, - { - "configs": "{\"title\":\"\",\"type\":\"chart\",\"now\":\"1614909881267\",\"start\":\"1614906281267\",\"end\":\"1614909881267\",\"comparisonOptions\":[{\"label\":\"1小时\",\"labelEn\":\"1 hour\",\"value\":\"3600000\"},{\"label\":\"2小时\",\"labelEn\":\"2 hours\",\"value\":\"7200000\"},{\"label\":\"1天\",\"labelEn\":\"1 day\",\"value\":\"86400000\"},{\"label\":\"2天\",\"labelEn\":\"2 days\",\"value\":\"172800000\"},{\"label\":\"7天\",\"labelEn\":\"7 days\",\"value\":\"604800000\"}],\"legend\":false,\"shared\":true,\"linkVisible\":true,\"metrics\":[{\"selectedNid\":7,\"endpointsKey\":\"endpoints\",\"selectedEndpoint\":[\"=all\"],\"selectedMetric\":\"n9e.tsdb.query.hit.cache\",\"selectedTagkv\":[{\"tagk\":\"endpoint\",\"tagv\":[\"=all\"]}],\"counterListCount\":1,\"aggrFunc\":\"avg\"}],\"chartTypeOptions\":{\"chartType\":\"singleValue\",\"targetValue\":\"avg\",\"subType\":\"normal\",\"valueMap\":\"range\",\"mapConf\":[{\"from\":0,\"to\":50000,\"color\":\"#06c663\"},{\"from\":50000,\"to\":80000,\"color\":\"#da8e0b\"},{\"from\":80000,\"to\":100000,\"color\":\"#f48b71\"},{\"from\":100000,\"color\":\"#fa0505\"}]},\"id\":35}", - "weight": 2 - } - ] - }, - { - "name": "Judge", - "weight": 2, - "charts": [ - { - "configs": "{\"title\":\"judge接收点数\",\"type\":\"chart\",\"now\":\"1614909881045\",\"start\":\"1614906281045\",\"end\":\"1614909881045\",\"comparisonOptions\":[{\"label\":\"1小时\",\"labelEn\":\"1 hour\",\"value\":\"3600000\"},{\"label\":\"2小时\",\"labelEn\":\"2 hours\",\"value\":\"7200000\"},{\"label\":\"1天\",\"labelEn\":\"1 day\",\"value\":\"86400000\"},{\"label\":\"2天\",\"labelEn\":\"2 days\",\"value\":\"172800000\"},{\"label\":\"7天\",\"labelEn\":\"7 days\",\"value\":\"604800000\"}],\"legend\":false,\"shared\":true,\"linkVisible\":true,\"metrics\":[{\"selectedNid\":5,\"endpointsKey\":\"endpoints\",\"selectedEndpoint\":[\"=all\"],\"selectedMetric\":\"n9e.judge.push.in\",\"selectedTagkv\":[{\"tagk\":\"endpoint\",\"tagv\":[\"=all\"]}],\"counterListCount\":1}]}", - "weight": 0 - }, - { - "configs": "{\"title\":\"正在执行的judge任务数\",\"type\":\"chart\",\"now\":\"1614909881045\",\"start\":\"1614906281045\",\"end\":\"1614909881045\",\"comparisonOptions\":[{\"label\":\"1小时\",\"labelEn\":\"1 hour\",\"value\":\"3600000\"},{\"label\":\"2小时\",\"labelEn\":\"2 hours\",\"value\":\"7200000\"},{\"label\":\"1天\",\"labelEn\":\"1 day\",\"value\":\"86400000\"},{\"label\":\"2天\",\"labelEn\":\"2 days\",\"value\":\"172800000\"},{\"label\":\"7天\",\"labelEn\":\"7 days\",\"value\":\"604800000\"}],\"legend\":false,\"shared\":true,\"linkVisible\":true,\"metrics\":[{\"selectedNid\":5,\"endpointsKey\":\"endpoints\",\"selectedEndpoint\":[\"=all\"],\"selectedMetric\":\"n9e.judge.running\",\"selectedTagkv\":[{\"tagk\":\"endpoint\",\"tagv\":[\"=all\"]}],\"counterListCount\":1}]}", - "weight": 1 - }, - { - "configs": "{\"title\":\"获取的策略数\",\"type\":\"chart\",\"now\":\"1614909881045\",\"start\":\"1614906281045\",\"end\":\"1614909881045\",\"comparisonOptions\":[{\"label\":\"1小时\",\"labelEn\":\"1 hour\",\"value\":\"3600000\"},{\"label\":\"2小时\",\"labelEn\":\"2 hours\",\"value\":\"7200000\"},{\"label\":\"1天\",\"labelEn\":\"1 day\",\"value\":\"86400000\"},{\"label\":\"2天\",\"labelEn\":\"2 days\",\"value\":\"172800000\"},{\"label\":\"7天\",\"labelEn\":\"7 days\",\"value\":\"604800000\"}],\"legend\":false,\"shared\":true,\"linkVisible\":true,\"metrics\":[{\"selectedNid\":5,\"endpointsKey\":\"endpoints\",\"selectedEndpoint\":[\"=all\"],\"selectedMetric\":\"n9e.judge.stra.count\",\"selectedTagkv\":[{\"tagk\":\"endpoint\",\"tagv\":[\"=all\"]}],\"counterListCount\":1,\"aggrFunc\":\"max\"}],\"chartTypeOptions\":{\"chartType\":\"singleValue\",\"targetValue\":\"max\",\"subType\":\"normal\",\"valueMap\":\"range\",\"mapConf\":[{\"from\":0,\"to\":5000,\"color\":\"#06c663\"},{\"from\":5000,\"to\":8000,\"color\":\"#f88e0b\"},{\"from\":8000,\"to\":10000,\"color\":\"#f48b71\"},{\"from\":10000,\"color\":\"#fa0505\"}]},\"id\":60}", - "weight": 2 - } - ] - }, - { - "name": "index", - "weight": 3, - "charts": [ - { - "configs": "{\"title\":\"fullmatch接口查索引未命中次数\",\"type\":\"chart\",\"now\":\"1614909881045\",\"start\":\"1614906281045\",\"end\":\"1614909881045\",\"comparisonOptions\":[{\"label\":\"1小时\",\"labelEn\":\"1 hour\",\"value\":\"3600000\"},{\"label\":\"2小时\",\"labelEn\":\"2 hours\",\"value\":\"7200000\"},{\"label\":\"1天\",\"labelEn\":\"1 day\",\"value\":\"86400000\"},{\"label\":\"2天\",\"labelEn\":\"2 days\",\"value\":\"172800000\"},{\"label\":\"7天\",\"labelEn\":\"7 days\",\"value\":\"604800000\"}],\"legend\":false,\"shared\":true,\"linkVisible\":true,\"metrics\":[{\"selectedNid\":5,\"endpointsKey\":\"endpoints\",\"selectedEndpoint\":[\"=all\"],\"selectedMetric\":\"n9e.index.query.counter.miss\",\"selectedTagkv\":[{\"tagk\":\"endpoint\",\"tagv\":[\"=all\"]}],\"counterListCount\":1}]}", - "weight": 0 - }, - { - "configs": "{\"title\":\"xclude接口查索引命中次数\",\"type\":\"chart\",\"now\":\"1614909881045\",\"start\":\"1614906281045\",\"end\":\"1614909881045\",\"comparisonOptions\":[{\"label\":\"1小时\",\"labelEn\":\"1 hour\",\"value\":\"3600000\"},{\"label\":\"2小时\",\"labelEn\":\"2 hours\",\"value\":\"7200000\"},{\"label\":\"1天\",\"labelEn\":\"1 day\",\"value\":\"86400000\"},{\"label\":\"2天\",\"labelEn\":\"2 days\",\"value\":\"172800000\"},{\"label\":\"7天\",\"labelEn\":\"7 days\",\"value\":\"604800000\"}],\"legend\":false,\"shared\":true,\"linkVisible\":true,\"metrics\":[{\"selectedNid\":5,\"endpointsKey\":\"endpoints\",\"selectedEndpoint\":[\"=all\"],\"selectedMetric\":\"n9e.index.xclude.qp10s\",\"selectedTagkv\":[{\"tagk\":\"endpoint\",\"tagv\":[\"=all\"]}],\"counterListCount\":1}]}", - "weight": 1 - } - ] - } - ] - } -] diff --git a/etc/server.yml b/etc/server.yml new file mode 100644 index 00000000..7fbf23f6 --- /dev/null +++ b/etc/server.yml @@ -0,0 +1,172 @@ +logger: + dir: logs/server + level: INFO + # rotate by time + keepHours: 24 + ## rotate by size + #rotatenum: 3 + #rotatesize: 256 # unit: MB + +http: + mode: release + showLog: true + session: + cookieName: ecmc-sid + cookieDomain: "" + httpOnly: true + gcInterval: 60 + cookieLifetime: 86400 # unit: second, 0: cookie life same to browser + +tokens: + - rdb-builtin-token + +redis: + # as queue + local: + enable: true + addr: 127.0.0.1:6379 + pass: "" + idle: 5 + timeout: + conn: 500 + read: 3000 + write: 3000 + + +rdb: + # for ldap authorization + ldap: + host: "ldap.example.org" + port: 389 + baseDn: "dc=example,dc=org" + # AD: manange@example.org + bindUser: "cn=manager,dc=example,dc=org" + bindPass: "*******" + # openldap: (&(uid=%s)) + # AD: (&(sAMAccountName=%s)) + authFilter: "(&(uid=%s))" + attributes: + dispname: "cn" + email: "mail" + phone: "mobile" + im: "" + coverAttributes: false + autoRegist: true + tls: false + startTLS: false + ## for enterprise edition + #sso: + # enable: false + # ssoAddr: "http://{sso-host}" + # redirectURL: "http://{rdb-host}/auth-callback" + # clientId: "" + # clientSecret: "" + # apiKey: "" + # attributes: + # dispname: "display_name" + # email: "email" + # phone: "phone" + # im: "" + # coverAttributes: false + # stateExpiresIn: 300 + #auth: + # captcha: false + # extraMode: + # enable: false # enable whiteList, login retry lock, userControl, ... + # whiteList: false + # frozenDays: 90 # frozen time (day) + # writenOffDays: 365 # writenOff time (day) + + +#i18n: +# lang: zh + +job: + enable: true + ## database | remote + #outputComeFrom: database + #remoteAgtdPort: 2080 + +transfer: + enable: true + backend: + datasource: "m3db" + m3db: + enabled: true + maxSeriesPoints: 720 # default 720 + name: "m3db" + namespace: "default" + seriesLimit: 0 + docsLimit: 0 + daysLimit: 7 # max query time + # https://m3db.github.io/m3/m3db/architecture/consistencylevels/ + writeConsistencyLevel: "majority" # one|majority|all + readConsistencyLevel: "unstrict_majority" # one|unstrict_majority|majority|all + config: + service: + # KV environment, zone, and service from which to write/read KV data (placement + # and configuration). Leave these as the default values unless you know what + # you're doing. + env: default_env + zone: embedded + service: m3db + etcdClusters: + - zone: embedded + endpoints: + - 127.0.0.1:2379 + #tls: + # caCrtPath: /etc/etcd/certs/ca.pem + # crtPath: /etc/etcd/certs/etcd-client.pem + # keyPath: /etc/etcd/certs/etcd-client-key.pem + #tsdb: + # enabled: false + # name: "tsdb" + # cluster: + # tsdb01: 127.0.0.1:8011 + #influxdb: + # enabled: false + # username: "influx" + # password: "admin123" + # precision: "s" + # database: "n9e" + # address: "http://127.0.0.1:8086" + #opentsdb: + # enabled: false + # address: "127.0.0.1:4242" + #kafka: + # enabled: false + # brokersPeers: "192.168.1.1:9092,192.168.1.2:9092" + # topic: "n9e" +monapi: + indexMod: server + alarmEnabled: true + region: + - default + + # clean history event + cleaner: + # retention days + days: 100 + # number of events deleted per time + batch: 100 + notify: + p1: ["voice", "sms", "mail", "im"] + p2: ["sms", "mail", "im"] + p3: ["mail", "im"] + + # addresses accessible using browser + link: + stra: http://n9e.com/mon/strategy/%v + event: http://n9e.com/mon/history/his/%v + claim: http://n9e.com/mon/history/cur/%v + +judge: + query: + connTimeout: 1000 + callTimeout: 2000 + maxConn: 2000 + maxIdle: 100 + connTimeout: 1000 + callTimeout: 2000 + indexCallTimeout: 2000 + indexMod: server \ No newline at end of file diff --git a/etc/service/agent.service b/etc/service/agentd.service similarity index 83% rename from etc/service/agent.service rename to etc/service/agentd.service index c0f0b25c..3c73bcd3 100644 --- a/etc/service/agent.service +++ b/etc/service/agentd.service @@ -10,7 +10,7 @@ Group=root Type=simple Environment="GIN_MODE=release" -ExecStart=/home/n9e/n9e-agent +ExecStart=/home/n9e/n9e-agentd WorkingDirectory=/home/n9e Restart=always diff --git a/etc/service/job.service b/etc/service/job.service deleted file mode 100644 index 45baa1e8..00000000 --- a/etc/service/job.service +++ /dev/null @@ -1,21 +0,0 @@ -[Unit] -Description=n9e job -After=network-online.target -Wants=network-online.target - -[Service] -# modify when deploy in prod env -User=root -Group=root - -Type=simple -Environment="GIN_MODE=release" -ExecStart=/home/n9e/n9e-job -WorkingDirectory=/home/n9e - -Restart=always -RestartSec=1 -StartLimitInterval=0 - -[Install] -WantedBy=multi-user.target \ No newline at end of file diff --git a/etc/service/judge.service b/etc/service/judge.service deleted file mode 100644 index a62eef0e..00000000 --- a/etc/service/judge.service +++ /dev/null @@ -1,21 +0,0 @@ -[Unit] -Description=n9e judge -After=network-online.target -Wants=network-online.target - -[Service] -# modify when deploy in prod env -User=root -Group=root - -Type=simple -Environment="GIN_MODE=release" -ExecStart=/home/n9e/n9e-judge -WorkingDirectory=/home/n9e - -Restart=always -RestartSec=1 -StartLimitInterval=0 - -[Install] -WantedBy=multi-user.target \ No newline at end of file diff --git a/etc/service/monapi.service b/etc/service/monapi.service deleted file mode 100644 index fdb619a1..00000000 --- a/etc/service/monapi.service +++ /dev/null @@ -1,21 +0,0 @@ -[Unit] -Description=n9e monapi -After=network-online.target -Wants=network-online.target - -[Service] -# modify when deploy in prod env -User=root -Group=root - -Type=simple -Environment="GIN_MODE=release" -ExecStart=/home/n9e/n9e-monapi -WorkingDirectory=/home/n9e - -Restart=always -RestartSec=1 -StartLimitInterval=0 - -[Install] -WantedBy=multi-user.target \ No newline at end of file diff --git a/etc/service/index.service b/etc/service/prober.service similarity index 76% rename from etc/service/index.service rename to etc/service/prober.service index b575768b..752b9323 100644 --- a/etc/service/index.service +++ b/etc/service/prober.service @@ -1,5 +1,5 @@ [Unit] -Description=n9e index +Description=n9e prober After=network-online.target Wants=network-online.target @@ -10,7 +10,7 @@ Group=root Type=simple Environment="GIN_MODE=release" -ExecStart=/home/n9e/n9e-index +ExecStart=/home/n9e/n9e-prober WorkingDirectory=/home/n9e Restart=always diff --git a/etc/service/rdb.service b/etc/service/rdb.service deleted file mode 100644 index 275028f3..00000000 --- a/etc/service/rdb.service +++ /dev/null @@ -1,21 +0,0 @@ -[Unit] -Description=n9e rdb -After=network-online.target -Wants=network-online.target - -[Service] -# modify when deploy in prod env -User=root -Group=root - -Type=simple -Environment="GIN_MODE=release" -ExecStart=/home/n9e/n9e-rdb -WorkingDirectory=/home/n9e - -Restart=always -RestartSec=1 -StartLimitInterval=0 - -[Install] -WantedBy=multi-user.target \ No newline at end of file diff --git a/etc/service/ams.service b/etc/service/server.service similarity index 76% rename from etc/service/ams.service rename to etc/service/server.service index 643ea6e1..57b52846 100644 --- a/etc/service/ams.service +++ b/etc/service/server.service @@ -1,5 +1,5 @@ [Unit] -Description=n9e ams +Description=n9e server After=network-online.target Wants=network-online.target @@ -10,7 +10,7 @@ Group=root Type=simple Environment="GIN_MODE=release" -ExecStart=/home/n9e/n9e-ams +ExecStart=/home/n9e/n9e-server WorkingDirectory=/home/n9e Restart=always diff --git a/etc/service/transfer.service b/etc/service/transfer.service deleted file mode 100644 index 0f0d7157..00000000 --- a/etc/service/transfer.service +++ /dev/null @@ -1,21 +0,0 @@ -[Unit] -Description=n9e transfer -After=network-online.target -Wants=network-online.target - -[Service] -# modify when deploy in prod env -User=root -Group=root - -Type=simple -Environment="GIN_MODE=release" -ExecStart=/home/n9e/n9e-transfer -WorkingDirectory=/home/n9e - -Restart=always -RestartSec=1 -StartLimitInterval=0 - -[Install] -WantedBy=multi-user.target \ No newline at end of file diff --git a/etc/service/tsdb.service b/etc/service/tsdb.service deleted file mode 100644 index 1c1a0a3b..00000000 --- a/etc/service/tsdb.service +++ /dev/null @@ -1,21 +0,0 @@ -[Unit] -Description=n9e tsdb -After=network-online.target -Wants=network-online.target - -[Service] -# modify when deploy in prod env -User=root -Group=root - -Type=simple -Environment="GIN_MODE=release" -ExecStart=/home/n9e/n9e-tsdb -WorkingDirectory=/home/n9e - -Restart=always -RestartSec=1 -StartLimitInterval=0 - -[Install] -WantedBy=multi-user.target \ No newline at end of file diff --git a/etc/transfer.yml b/etc/transfer.yml deleted file mode 100644 index b048ba90..00000000 --- a/etc/transfer.yml +++ /dev/null @@ -1,52 +0,0 @@ -backend: - datasource: "tsdb" - m3db: - enabled: false - maxSeriesPoints: 720 # default 720 - name: "m3db" - namespace: "default" - seriesLimit: 0 - docsLimit: 0 - daysLimit: 7 # max query time - # https://m3db.github.io/m3/m3db/architecture/consistencylevels/ - writeConsistencyLevel: "majority" # one|majority|all - readConsistencyLevel: "unstrict_majority" # one|unstrict_majority|majority|all - config: - service: - # KV environment, zone, and service from which to write/read KV data (placement - # and configuration). Leave these as the default values unless you know what - # you're doing. - env: default_env - zone: embedded - service: m3db - etcdClusters: - - zone: embedded - endpoints: - - 127.0.0.1:2379 - tls: - caCrtPath: /etc/etcd/certs/ca.pem - crtPath: /etc/etcd/certs/etcd-client.pem - keyPath: /etc/etcd/certs/etcd-client-key.pem - tsdb: - enabled: true - name: "tsdb" - cluster: - tsdb01: 127.0.0.1:8011 - influxdb: - enabled: false - username: "influx" - password: "admin123" - precision: "s" - database: "n9e" - address: "http://127.0.0.1:8086" - opentsdb: - enabled: false - address: "127.0.0.1:4242" - kafka: - enabled: false - brokersPeers: "192.168.1.1:9092,192.168.1.2:9092" - topic: "n9e" -logger: - dir: logs/transfer - level: INFO - keepHours: 24 diff --git a/etc/tsdb.yml b/etc/tsdb.yml deleted file mode 100644 index 00b05cf0..00000000 --- a/etc/tsdb.yml +++ /dev/null @@ -1,8 +0,0 @@ -rrd: - storage: data/8011 -cache: - keepMinutes: 120 -logger: - dir: logs/tsdb - level: WARNING - keepHours: 2 \ No newline at end of file diff --git a/go.mod b/go.mod index 270bcbd5..86a0f36b 100644 --- a/go.mod +++ b/go.mod @@ -1,20 +1,21 @@ -module github.com/didi/nightingale +module github.com/didi/nightingale/v4 go 1.12 require ( github.com/Shopify/sarama v1.27.2 + github.com/alouca/gologger v0.0.0-20120904114645-7d4b7291de9c // indirect github.com/cespare/xxhash v1.1.0 - github.com/codegangsta/negroni v1.0.0 github.com/coreos/go-oidc v2.2.1+incompatible - github.com/dgryski/go-tsz v0.0.0-20180227144327-03b7d791f4fe + github.com/freedomkk-qfeng/go-fastping v0.0.0-20160109021039-d7bb493dee3e // indirect + github.com/gaochao1/gosnmp v0.0.0-20150630013918-783a67a067fd // indirect + github.com/gaochao1/sw v4.0.0+incompatible github.com/garyburd/redigo v1.6.2 github.com/gin-contrib/pprof v1.3.0 github.com/gin-gonic/gin v1.6.3 github.com/go-ping/ping v0.0.0-20201115131931-3300c582a663 github.com/go-sql-driver/mysql v1.5.0 github.com/google/uuid v1.1.2 - github.com/gorilla/mux v1.7.3 github.com/hashicorp/golang-lru v0.5.4 github.com/hpcloud/tail v1.0.0 github.com/influxdata/influxdb v1.8.0 @@ -23,17 +24,18 @@ require ( github.com/mattn/go-isatty v0.0.12 github.com/mattn/go-sqlite3 v1.14.0 // indirect github.com/mojocn/base64Captcha v1.3.1 - github.com/open-falcon/rrdlite v0.0.0-20200214140804-bf5829f786ad github.com/pquerna/cachecontrol v0.0.0-20200819021114-67c6ae64274f // indirect github.com/robfig/go-cache v0.0.0-20130306151617-9fc39e0dbf62 // indirect github.com/shirou/gopsutil v3.20.11+incompatible // indirect github.com/spaolacci/murmur3 v1.1.0 + github.com/sparrc/go-ping v0.0.0-20190613174326-4e5b6552494c github.com/spf13/viper v1.7.1 github.com/streadway/amqp v1.0.0 github.com/stretchr/testify v1.6.1 + github.com/toolkits/file v0.0.0-20160325033739-a5b3c5147e07 // indirect github.com/toolkits/pkg v1.1.3 + github.com/toolkits/sys v0.0.0-20170615103026-1f33b217ffaf // indirect github.com/ugorji/go/codec v1.1.7 - github.com/unrolled/render v1.0.3 go.uber.org/automaxprocs v1.3.0 // indirect golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d golang.org/x/text v0.3.3 diff --git a/go.sum b/go.sum index 5919c8bc..29580d98 100644 --- a/go.sum +++ b/go.sum @@ -109,6 +109,8 @@ github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d h1:UQZhZ2O0vMHr2c github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/alexbrainman/sspi v0.0.0-20180613141037-e580b900e9f5 h1:P5U+E4x5OkVEKQDklVPmzs71WM56RTTRqV4OrDC//Y4= github.com/alexbrainman/sspi v0.0.0-20180613141037-e580b900e9f5/go.mod h1:976q2ETgjT2snVCf2ZaBnyBbVoPERGjUz+0sofzEfro= +github.com/alouca/gologger v0.0.0-20120904114645-7d4b7291de9c h1:k/7/05/5kPRX7HaKyVYlsGVX6XkFTyYLqkqHzceUVlU= +github.com/alouca/gologger v0.0.0-20120904114645-7d4b7291de9c/go.mod h1:SI1d/2/wpSTDjHgdS9ZLy6hqvsdhzVYAc8RLztweMpA= github.com/amir/raidman v0.0.0-20170415203553-1ccc43bfb9c9/go.mod h1:eliMa/PW+RDr2QLWRmLH1R1ZA4RInpmvOzDDXtaIZkc= github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= github.com/andybalholm/cascadia v1.1.0/go.mod h1:GsXiBklL0woXo1j/WYWtSYYC4ouU9PqHO0sqidkEA4Y= @@ -184,8 +186,6 @@ github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa h1:OaNxuTZr github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd h1:qMd81Ts1T2OTKmB4acZcyKaMtRnY5Y44NuXGX2GFJ1w= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= -github.com/codegangsta/negroni v1.0.0 h1:+aYywywx4bnKXWvoWtRfJ91vC59NbEhEY03sZjQhbVY= -github.com/codegangsta/negroni v1.0.0/go.mod h1:v0y3T5G7Y1UlFfyxFn/QLRU4a2EuNau2iZY63YTKWo0= github.com/containerd/containerd v1.4.1/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/continuity v0.0.0-20200413184840-d3ef23f19fbb/go.mod h1:Dq467ZllaHgAtVp4p1xUQWBrFXR9s/wyoTpG8zOJGkY= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= @@ -224,8 +224,6 @@ github.com/dgrijalva/jwt-go/v4 v4.0.0-preview1/go.mod h1:+hnT3ywWDTAFrW5aE+u2Sa/ github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8/go.mod h1:VMaSuZ+SZcx/wljOQKvp5srsbCiKDEb6K2wC4+PiBmQ= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/dgryski/go-sip13 v0.0.0-20190329191031-25c5027a8c7b/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= -github.com/dgryski/go-tsz v0.0.0-20180227144327-03b7d791f4fe h1:VOrqop9SqFzqwZpROEOZpIufuLEUoJ3reNhdOdC9Zzw= -github.com/dgryski/go-tsz v0.0.0-20180227144327-03b7d791f4fe/go.mod h1:ft6P746mYUFQBCsH3OkFBG8FtjLx1XclLMo+9Jh1Yts= github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= github.com/docker/distribution v2.6.0-rc.1.0.20170726174610-edc3ab29cdff+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v17.12.0-ce-rc1.0.20200916142827-bd33bbf0497b+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= @@ -247,8 +245,6 @@ github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFP github.com/eclipse/paho.mqtt.golang v1.2.0/go.mod h1:H9keYFcgq3Qr5OUJm/JZI/i6U7joQ8SYLhZwfeOo6Ts= github.com/edsrzf/mmap-go v1.0.0 h1:CEBF7HpRnUCSJgGUb5h1Gm7e3VkmVDrR8lvWVLtrOFw= github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= -github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385 h1:clC1lXBpe2kTj2VHdaIu9ajZQe4kcEY9j0NsnDDBZ3o= -github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385/go.mod h1:0vRUJqYpeSZifjYj7uP3BG/gKcuzL9xWVV/Y+cK33KM= github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emirpasic/gods v1.12.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o= @@ -269,8 +265,14 @@ github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVB github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= github.com/frankban/quicktest v1.10.2 h1:19ARM85nVi4xH7xPXuc5eM/udya5ieh7b/Sv+d844Tk= github.com/frankban/quicktest v1.10.2/go.mod h1:K+q6oSqb0W0Ininfk863uOk1lMy69l/P6txr3mVT54s= +github.com/freedomkk-qfeng/go-fastping v0.0.0-20160109021039-d7bb493dee3e h1:g8x+P3+xjxt7c53bucQW0ymvj+whjKfCLZH+99UMLS0= +github.com/freedomkk-qfeng/go-fastping v0.0.0-20160109021039-d7bb493dee3e/go.mod h1:UcrAEbxjAhuq5beDj0conKRHGUhBPLkFt8aUmN/jrHY= github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/gaochao1/gosnmp v0.0.0-20150630013918-783a67a067fd h1:GO2tQE1AIt+P5dwEAVhjH9SlyUcyiN6PJZv8x33QsgU= +github.com/gaochao1/gosnmp v0.0.0-20150630013918-783a67a067fd/go.mod h1:2hQbnwhmIsamIldRzbGxojAjH14ikUBgB4kjk/f6/eI= +github.com/gaochao1/sw v4.0.0+incompatible h1:+f5OVzQHjK8WqJRiSD5y6M3ZANy62VR8o7LFHkQ41iU= +github.com/gaochao1/sw v4.0.0+incompatible/go.mod h1:p/CZIynLtXJo0fzukfdEEYvmwOBi3v8OylDKWJ4rwGo= github.com/garethr/kubeval v0.0.0-20180821130434-c44f5193dc94/go.mod h1:L8VwozDBY4bGI25r29I6FURZus8xlVo/B7lNOSfre2g= github.com/garyburd/redigo v1.6.2 h1:yE/pwKCrbLpLpQICzYTeZ7JsTA/C53wFTJHaEtRqniM= github.com/garyburd/redigo v1.6.2/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= @@ -777,8 +779,6 @@ github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1Cpa github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= -github.com/open-falcon/rrdlite v0.0.0-20200214140804-bf5829f786ad h1:GXUy5t8CYdaaEj1lRnE22CbHVY1M5h6Rv4kk0PJQc54= -github.com/open-falcon/rrdlite v0.0.0-20200214140804-bf5829f786ad/go.mod h1:pXROoG0iWVnqq4u2Ii97S0Vt9iCTVypshsl9HXsV6cs= github.com/openconfig/gnmi v0.0.0-20180912164834-33a1865c3029/go.mod h1:t+O9It+LKzfOAhKTT5O0ehDix+MTqbtT0T9t+7zzOvc= github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= @@ -933,6 +933,8 @@ github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJ github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/sparrc/go-ping v0.0.0-20190613174326-4e5b6552494c h1:gqEdF4VwBu3lTKGHS9rXE9x1/pEaSwCXRLOZRF6qtlw= +github.com/sparrc/go-ping v0.0.0-20190613174326-4e5b6552494c/go.mod h1:eMyUVp6f/5jnzM+3zahzl7q6UXLbgSc3MKg/+ow9QW0= github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.2.2 h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc= @@ -996,8 +998,12 @@ github.com/tj/go-spin v1.1.0/go.mod h1:Mg1mzmePZm4dva8Qz60H2lHwmJ2loum4VIrLgVnKw github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 h1:LnC5Kc/wtumK+WB441p7ynQJzVuNRJiqddSIE3IlSEQ= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/toolkits/file v0.0.0-20160325033739-a5b3c5147e07 h1:d/VUIMNTk65Xz69htmRPNfjypq2uNRqVsymcXQu6kKk= +github.com/toolkits/file v0.0.0-20160325033739-a5b3c5147e07/go.mod h1:FbXpUxsx5in7z/OrWFDdhYetOy3/VGIJsVHN9G7RUPA= github.com/toolkits/pkg v1.1.3 h1:cjZMz9hmuTv4v7ivYERA9mWJCLKyr8JMd4S+CL/YzMM= github.com/toolkits/pkg v1.1.3/go.mod h1:ge83E8FQqUnFk+2wtVtZ8kvbmoSjE1l8FP3f+qmR0fY= +github.com/toolkits/sys v0.0.0-20170615103026-1f33b217ffaf h1:1/LnhAvvotcSWDl1ntwUePzLXcyHjAzZ0Ih0F+kKGZU= +github.com/toolkits/sys v0.0.0-20170615103026-1f33b217ffaf/go.mod h1:GejnAYmB2Pr/2fWKp7OGdd6MzuXvRwClmdQAnvnr++I= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= github.com/twmb/murmur3 v1.1.4/go.mod h1:Qq/R7NUyOfr65zD+6Q5IHKsJLwP7exErjN6lyyq3OSQ= github.com/twotwotwo/sorts v0.0.0-20160814051341-bf5c1f2b8553 h1:DRC1ubdb3ZmyyIeCSTxjZIQAnpLPfKVgYrLETQuOPjo= @@ -1017,8 +1023,6 @@ github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljT github.com/ugorji/go/codec v1.1.7 h1:2SvQaVZ1ouYrrKKwoSk2pzd4A9evlKJb9oTL+OaLUSs= github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= github.com/ulikunitz/xz v0.5.5/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8= -github.com/unrolled/render v1.0.3 h1:baO+NG1bZSF2WR4zwh+0bMWauWky7DVrTOfvE2w+aFo= -github.com/unrolled/render v1.0.3/go.mod h1:gN9T0NhL4Bfbwu8ann7Ry/TGHYfosul+J0obPf6NBdM= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio= diff --git a/sql/n9e_mon.sql b/sql/n9e_mon.sql index 9e718009..2e144e80 100644 --- a/sql/n9e_mon.sql +++ b/sql/n9e_mon.sql @@ -241,7 +241,7 @@ CREATE TABLE `log_collect` ( `created` datetime NOT NULL COMMENT 'created', `last_updator` varchar(64) NOT NULL DEFAULT '' COMMENT 'last_updator', `last_updated` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, - `whether_attach_one_log_line` tinyint(1) not null default 0, + `whether_attach_one_log_line` tinyint(1) not null default 0, PRIMARY KEY (`id`), KEY `idx_nid` (`nid`), KEY `idx_collect_type` (`collect_type`) @@ -340,65 +340,6 @@ CREATE TABLE `collect_rule` ( KEY `idx_collect_type` (`collect_type`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT 'collect rule'; -CREATE TABLE `aggr_calc` ( - `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT 'id', - `nid` bigint(20) unsigned NOT NULL DEFAULT '0' COMMENT 'nid', - `category` int(1) NOT NULL COMMENT '1 机器 2业务', - `new_metric` varchar(255) NOT NULL DEFAULT '' COMMENT 'new_metric', - `new_step` int(11) NOT NULL DEFAULT '0' COMMENT 'new_step', - `groupby` varchar(255) NOT NULL DEFAULT '' COMMENT 'groupby', - `raw_metrics` text comment 'raw_metrics', - `global_operator` varchar(32) NOT NULL DEFAULT '' COMMENT 'global_operator', - `expression` varchar(255) NOT NULL DEFAULT '' COMMENT 'expression', - `rpn` varchar(255) NOT NULL DEFAULT '' COMMENT 'rpn', - `status` int(1) NOT NULL COMMENT '', - `quota` int(10) NOT NULL COMMENT '', - `comment` varchar(255) NOT NULL DEFAULT '' COMMENT 'comment', - `creator` varchar(64) NOT NULL COMMENT '创建者', - `created` timestamp NOT NULL DEFAULT '1971-01-01 00:00:00' COMMENT 'created', - `last_updator` varchar(64) NOT NULL DEFAULT '', - `last_updated` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, - PRIMARY KEY (`id`), - KEY `idx_nid` (`nid`), - KEY `idx_new_metric` (`new_metric`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT 'aggr_calc'; - -CREATE TABLE `nginx_log_stra` ( - `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT 'id', - `nid` bigint(20) unsigned NOT NULL DEFAULT '0' COMMENT 'nid', - `service` varchar(255) NOT NULL DEFAULT '' COMMENT 'service', - `interval` int(11) NOT NULL DEFAULT '0' COMMENT 'interval', - `domain` varchar(2048) NOT NULL DEFAULT '' COMMENT 'domain', - `url_path_prefix` varchar(2048) NOT NULL DEFAULT '' COMMENT 'url_path_prefix', - `append_tags` varchar(2048) NOT NULL DEFAULT '' COMMENT 'append_tags', - `creator` varchar(64) NOT NULL COMMENT '创建者', - `created` timestamp NOT NULL DEFAULT '1971-01-01 00:00:00' COMMENT 'created', - `last_updator` varchar(64) NOT NULL DEFAULT '', - `last_updated` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, - PRIMARY KEY (`id`), - KEY `idx_nid` (`nid`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT 'nginx_log_stra'; - -CREATE TABLE `binlog_stra` ( - `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT 'id', - `nid` bigint(20) unsigned NOT NULL DEFAULT '0' COMMENT 'nid', - `metric` varchar(255) NOT NULL DEFAULT '' COMMENT 'metric', - `interval` int(11) NOT NULL DEFAULT '0' COMMENT 'interval', - `db` varchar(2048) NOT NULL DEFAULT '' COMMENT 'db', - `column_change` varchar(2048) NOT NULL DEFAULT '' COMMENT 'column_change', - `tags_column` varchar(2048) NOT NULL DEFAULT '' COMMENT 'tags_column', - `append_tags` varchar(2048) NOT NULL DEFAULT '' COMMENT 'append_tags', - `func` varchar(255) NOT NULL DEFAULT '' COMMENT 'func', - `sql_type` varchar(255) NOT NULL DEFAULT '' COMMENT 'sql_type', - `value_column` varchar(255) NOT NULL DEFAULT '' COMMENT 'value_column', - `creator` varchar(64) NOT NULL COMMENT '创建者', - `created` timestamp NOT NULL DEFAULT '1971-01-01 00:00:00' COMMENT 'created', - `last_updator` varchar(64) NOT NULL DEFAULT '', - `last_updated` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, - PRIMARY KEY (`id`), - KEY `idx_nid` (`nid`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT 'binlog_stra'; - CREATE TABLE `collect_hist` ( `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT 'id', `cid` bigint(20) unsigned NOT NULL DEFAULT '0' COMMENT 'collect id', diff --git a/src/common/address/address.go b/src/common/address/address.go index 73606c73..ae2359c4 100644 --- a/src/common/address/address.go +++ b/src/common/address/address.go @@ -2,6 +2,7 @@ package address import ( "fmt" + "log" "os" "path" "strconv" @@ -118,6 +119,7 @@ func parseConf() { os.Exit(1) } + log.Println(c) mods = c } diff --git a/src/modules/agent/client/cli.go b/src/common/client/cli.go similarity index 69% rename from src/modules/agent/client/cli.go rename to src/common/client/cli.go index 94c244da..ee69bee5 100644 --- a/src/modules/agent/client/cli.go +++ b/src/common/client/cli.go @@ -1,24 +1,28 @@ package client import ( + "bufio" + "io" "net" "net/rpc" + "reflect" "time" + "github.com/didi/nightingale/v4/src/common/address" + "github.com/ugorji/go/codec" + "github.com/toolkits/pkg/logger" "github.com/toolkits/pkg/net/gobrpc" - - "github.com/didi/nightingale/src/common/address" ) var cli *gobrpc.RPCClient -func getCli() *gobrpc.RPCClient { +func getCli(mod string) *gobrpc.RPCClient { if cli != nil { return cli } - servers := address.GetRPCAddresses("job") + servers := address.GetRPCAddresses(mod) // detect the fastest server var ( @@ -40,11 +44,22 @@ func getCli() *gobrpc.RPCClient { continue } - c := rpc.NewClient(conn) + var bufConn = struct { + io.Closer + *bufio.Reader + *bufio.Writer + }{conn, bufio.NewReader(conn), bufio.NewWriter(conn)} + + var mh codec.MsgpackHandle + mh.MapType = reflect.TypeOf(map[string]interface{}(nil)) + + rpcCodec := codec.MsgpackSpecRpc.ClientCodec(bufConn, &mh) + c := rpc.NewClientWithCodec(rpcCodec) + acm[addr] = c var out string - err = c.Call("Scheduler.Ping", "", &out) + err = c.Call("Server.Ping", "", &out) if err != nil { logger.Warningf("ping %s fail: %s", addr, err) continue @@ -77,9 +92,9 @@ func getCli() *gobrpc.RPCClient { } // GetCli 探测所有server端的延迟,自动选择最快的 -func GetCli() *gobrpc.RPCClient { +func GetCli(mod string) *gobrpc.RPCClient { for { - c := getCli() + c := getCli(mod) if c != nil { return c } diff --git a/src/common/compress/compress.go b/src/common/compress/compress.go new file mode 100644 index 00000000..cdfeac70 --- /dev/null +++ b/src/common/compress/compress.go @@ -0,0 +1,48 @@ +package compress + +import ( + "archive/tar" + "compress/gzip" + "io" + "os" + "strings" +) + +func UnCompress(tarFile, dest string) error { + srcFile, err := os.Open(tarFile) + if err != nil { + return err + } + defer srcFile.Close() + gr, err := gzip.NewReader(srcFile) + if err != nil { + return err + } + defer gr.Close() + tr := tar.NewReader(gr) + for { + hdr, err := tr.Next() + if err != nil { + if err == io.EOF { + break + } else { + return err + } + } + filename := dest + "/" + hdr.Name + file, err := createFile(filename) + if err != nil { + return err + } + io.Copy(file, tr) + } + return nil +} + +func createFile(name string) (*os.File, error) { + err := os.MkdirAll(string([]rune(name)[0:strings.LastIndex(name, "/")]), 0755) + if err != nil { + return nil, err + } + return os.Create(name) +} diff --git a/src/common/dataobj/judge.go b/src/common/dataobj/judge.go index c4cf4896..2c2fc37b 100644 --- a/src/common/dataobj/judge.go +++ b/src/common/dataobj/judge.go @@ -3,7 +3,7 @@ package dataobj import ( "strconv" - "github.com/didi/nightingale/src/toolkits/str" + "github.com/didi/nightingale/v4/src/common/str" gstr "github.com/toolkits/pkg/str" ) diff --git a/src/common/dataobj/snmp.go b/src/common/dataobj/snmp.go index 3232433e..ae0722ef 100644 --- a/src/common/dataobj/snmp.go +++ b/src/common/dataobj/snmp.go @@ -9,6 +9,11 @@ const ( COMMON_MODULE = "common" ) +type IPAndSnmpRpcResp struct { + Data []*IPAndSnmp + Msg string +} + type IPAndSnmp struct { IP string `json:"ip"` Module string `json:"module"` diff --git a/src/common/dataobj/tsdb.go b/src/common/dataobj/tsdb.go index 64e07cc1..524e3a07 100644 --- a/src/common/dataobj/tsdb.go +++ b/src/common/dataobj/tsdb.go @@ -5,7 +5,7 @@ import ( "math" "time" - "github.com/didi/nightingale/src/toolkits/str" + "github.com/didi/nightingale/v4/src/common/str" ) type JsonFloat float64 @@ -95,7 +95,7 @@ func (t *TsdbItem) PrimaryKey() string { } func (t *TsdbItem) MD5() string { - return str.MD5(t.Endpoint, t.Metric, str.SortedTags(t.TagsMap)) + return str.ToMD5(t.Endpoint, t.Metric, str.SortedTags(t.TagsMap)) } func (t *TsdbItem) UUID() string { diff --git a/src/toolkits/exit/exit.go b/src/common/exit/exit.go similarity index 100% rename from src/toolkits/exit/exit.go rename to src/common/exit/exit.go diff --git a/src/toolkits/go-tdigest/LICENSE b/src/common/go-tdigest/LICENSE similarity index 100% rename from src/toolkits/go-tdigest/LICENSE rename to src/common/go-tdigest/LICENSE diff --git a/src/toolkits/go-tdigest/README.md b/src/common/go-tdigest/README.md similarity index 100% rename from src/toolkits/go-tdigest/README.md rename to src/common/go-tdigest/README.md diff --git a/src/toolkits/go-tdigest/serialization.go b/src/common/go-tdigest/serialization.go similarity index 100% rename from src/toolkits/go-tdigest/serialization.go rename to src/common/go-tdigest/serialization.go diff --git a/src/toolkits/go-tdigest/serialization_test.go b/src/common/go-tdigest/serialization_test.go similarity index 100% rename from src/toolkits/go-tdigest/serialization_test.go rename to src/common/go-tdigest/serialization_test.go diff --git a/src/toolkits/go-tdigest/summary.go b/src/common/go-tdigest/summary.go similarity index 100% rename from src/toolkits/go-tdigest/summary.go rename to src/common/go-tdigest/summary.go diff --git a/src/toolkits/go-tdigest/summary_test.go b/src/common/go-tdigest/summary_test.go similarity index 100% rename from src/toolkits/go-tdigest/summary_test.go rename to src/common/go-tdigest/summary_test.go diff --git a/src/toolkits/go-tdigest/tdigest.go b/src/common/go-tdigest/tdigest.go similarity index 100% rename from src/toolkits/go-tdigest/tdigest.go rename to src/common/go-tdigest/tdigest.go diff --git a/src/toolkits/go-tdigest/tdigest_test.go b/src/common/go-tdigest/tdigest_test.go similarity index 100% rename from src/toolkits/go-tdigest/tdigest_test.go rename to src/common/go-tdigest/tdigest_test.go diff --git a/src/toolkits/i18n/i18n.go b/src/common/i18n/i18n.go similarity index 100% rename from src/toolkits/i18n/i18n.go rename to src/common/i18n/i18n.go diff --git a/src/common/loggeri/logger.go b/src/common/loggeri/logger.go deleted file mode 100644 index 88a9446f..00000000 --- a/src/common/loggeri/logger.go +++ /dev/null @@ -1,28 +0,0 @@ -package loggeri - -import ( - "fmt" - "os" - - "github.com/toolkits/pkg/logger" -) - -type Config struct { - Dir string `yaml:"dir"` - Level string `yaml:"level"` - KeepHours uint `yaml:"keepHours"` -} - -// InitLogger init logger toolkit -func Init(c Config) { - lb, err := logger.NewFileBackend(c.Dir) - if err != nil { - fmt.Println("cannot init logger:", err) - os.Exit(1) - } - - lb.SetRotateByHour(true) - lb.SetKeepHours(c.KeepHours) - - logger.SetLogging(c.Level, lb) -} diff --git a/src/common/loggeri/loggeri.go b/src/common/loggeri/loggeri.go new file mode 100644 index 00000000..6d86a565 --- /dev/null +++ b/src/common/loggeri/loggeri.go @@ -0,0 +1,38 @@ +package loggeri + +import ( + "fmt" + "os" + + "github.com/toolkits/pkg/logger" +) + +type Config struct { + Dir string `yaml:"dir"` + Level string `yaml:"level"` + KeepHours uint `yaml:"keepHours"` + Rotatenum int `yaml:"rotatenum"` + Rotatesize uint64 `yaml:"rotatesize"` +} + +// InitLogger init logger toolkit +func Init(c Config) { + lb, err := logger.NewFileBackend(c.Dir) + if err != nil { + fmt.Println("cannot init logger:", err) + os.Exit(1) + } + + //设置了以小时切换文件,优先使用小时切割文件 + if c.KeepHours != 0 { + lb.SetRotateByHour(true) + lb.SetKeepHours(c.KeepHours) + } else if c.Rotatenum != 0 { + lb.Rotate(c.Rotatenum, c.Rotatesize*1024*1024) + } else { + fmt.Println("cannot init logger: KeepHours and Rotatenum is 0") + os.Exit(2) + } + + logger.SetLogging(c.Level, lb) +} diff --git a/src/common/ping/ping.go b/src/common/ping/ping.go new file mode 100644 index 00000000..72ac043d --- /dev/null +++ b/src/common/ping/ping.go @@ -0,0 +1,76 @@ +package ping + +import ( + "time" + + ping "github.com/sparrc/go-ping" + "github.com/toolkits/pkg/logger" +) + +type ipRes struct { + IP string + Good bool +} + +func FilterIP(ips []string) []string { + workerNum := 100 + worker := make(chan struct{}, workerNum) // 控制 goroutine 并发数 + dataChan := make(chan *ipRes, 20000) + done := make(chan struct{}, 1) + goodIps := []string{} + + go func() { + defer func() { done <- struct{}{} }() + for d := range dataChan { + if d.Good { + goodIps = append(goodIps, d.IP) + } + } + }() + + for _, ip := range ips { + worker <- struct{}{} + go fastPingRtt(ip, 300, worker, dataChan) + } + + // 等待所有 goroutine 执行完成 + for i := 0; i < workerNum; i++ { + worker <- struct{}{} + } + + close(dataChan) + <-done + + return goodIps +} + +func fastPingRtt(ip string, timeout int, worker chan struct{}, dataChan chan *ipRes) { + defer func() { + <-worker + }() + res := &ipRes{ + IP: ip, + Good: goping(ip, timeout), + } + dataChan <- res +} + +func goping(ip string, timeout int) bool { + pinger, err := ping.NewPinger(ip) + if err != nil { + panic(err) + } + + pinger.SetPrivileged(true) + pinger.Count = 2 + pinger.Timeout = time.Duration(timeout) * time.Millisecond + pinger.Interval = time.Duration(timeout) * time.Millisecond + pinger.Run() // blocks until finished + stats := pinger.Statistics() // get send/receive/rtt stats + if stats.PacketsRecv > 0 { + return true + } + + logger.Debugf("%+v\n", stats) + return false +} diff --git a/src/toolkits/pools/opentsdb.go b/src/common/pools/opentsdb.go similarity index 100% rename from src/toolkits/pools/opentsdb.go rename to src/common/pools/opentsdb.go diff --git a/src/toolkits/pools/pools.go b/src/common/pools/pools.go similarity index 100% rename from src/toolkits/pools/pools.go rename to src/common/pools/pools.go diff --git a/src/common/report/report.go b/src/common/report/report.go index 81bbd843..246627a2 100644 --- a/src/common/report/report.go +++ b/src/common/report/report.go @@ -2,15 +2,13 @@ package report import ( "fmt" - "math/rand" "time" - "github.com/didi/nightingale/src/common/address" - "github.com/didi/nightingale/src/common/identity" - "github.com/didi/nightingale/src/models" + "github.com/didi/nightingale/v4/src/common/client" + "github.com/didi/nightingale/v4/src/common/identity" + "github.com/didi/nightingale/v4/src/models" "github.com/toolkits/pkg/logger" - "github.com/toolkits/pkg/net/httplib" ) type ReportSection struct { @@ -26,85 +24,49 @@ type ReportSection struct { var Config ReportSection -func Init(cfg ReportSection, mod string) { +func Init(cfg ReportSection) { Config = cfg - - addrs := address.GetHTTPAddresses(mod) - - t1 := time.NewTicker(time.Duration(Config.Interval) * time.Millisecond) - report(addrs) for { - <-t1.C - report(addrs) + report() + time.Sleep(time.Duration(Config.Interval) * time.Millisecond) } } -type reportRes struct { - Err string `json:"err"` - Dat string `json:"dat"` -} - -func report(addrs []string) { - perm := rand.Perm(len(addrs)) - for i := range perm { - url := fmt.Sprintf("http://%s/api/hbs/heartbeat", addrs[perm[i]]) - - ident, _ := identity.GetIdent() - m := map[string]string{ - "module": Config.Mod, - "identity": ident, - "rpc_port": Config.RPCPort, - "http_port": Config.HTTPPort, - "remark": Config.Remark, - "region": Config.Region, - } - - var body reportRes - err := httplib.Post(url).JSONBodyQuiet(m).SetTimeout(3 * time.Second).ToJSON(&body) - if err != nil { - logger.Errorf("curl %s fail: %v", url, err) - continue - } - - if body.Err != "" { - logger.Error(body.Err) - continue - } +func report() { + ident, _ := identity.GetIdent() + instance := models.Instance{ + Module: Config.Mod, + Identity: ident, + RPCPort: Config.RPCPort, + HTTPPort: Config.HTTPPort, + Remark: Config.Remark, + Region: Config.Region, + } + var resp string + err := client.GetCli("server").Call("Server.HeartBeat", instance, &resp) + if err != nil { + client.CloseCli() return } -} -type instanceRes struct { - Err string `json:"err"` - Dat []*models.Instance `json:"dat"` + if resp != "" { + logger.Errorf("report instance:%+v err:%s", instance, resp) + } } -func GetAlive(wantedMod, serverMod string) ([]*models.Instance, error) { - addrs := address.GetHTTPAddresses(serverMod) - perm := rand.Perm(len(addrs)) +func GetAlive(wantedMod string) ([]*models.Instance, error) { - timeout := 3000 - if Config.Timeout != 0 { - timeout = Config.Timeout + var resp *models.InstancesResp + err := client.GetCli("server").Call("Server.InstanceGets", wantedMod, &resp) + if err != nil { + client.CloseCli() + return []*models.Instance{}, fmt.Errorf("get %s instances err:%v", wantedMod, err) } - var body instanceRes - var err error - for i := range perm { - url := fmt.Sprintf("http://%s/api/hbs/instances?mod=%s&alive=1", addrs[perm[i]], wantedMod) - err = httplib.Get(url).SetTimeout(time.Duration(timeout) * time.Millisecond).ToJSON(&body) - - if err != nil { - logger.Warningf("curl %s fail: %v", url, err) - continue - } - - if body.Err != "" { - err = fmt.Errorf("curl %s fail: %v", url, body.Err) - logger.Warning(err) - continue - } + if resp.Msg != "" { + return []*models.Instance{}, fmt.Errorf("get %s instances err:%s", wantedMod, resp.Msg) } - return body.Dat, err + + return resp.Data, err } diff --git a/src/toolkits/slice/slice.go b/src/common/slice/slice.go similarity index 65% rename from src/toolkits/slice/slice.go rename to src/common/slice/slice.go index 87d5c029..7cce9884 100644 --- a/src/toolkits/slice/slice.go +++ b/src/common/slice/slice.go @@ -92,3 +92,54 @@ func Int64In(val int64, slice []int64) bool { return false } + +// slice set +func Set(s []string) []string { + m := make(map[string]interface{}) + for i := 0; i < len(s); i++ { + if strings.TrimSpace(s[i]) == "" { + continue + } + + m[s[i]] = 1 + } + + s2 := []string{} + for k := range m { + s2 = append(s2, k) + } + + return s2 +} + +func InSlice(val string, slice []string) bool { + for i := 0; i < len(slice); i++ { + if slice[i] == val { + return true + } + } + + return false +} + +func SplitN(m, n int) [][]int { + var res [][]int + + if n <= 0 { + return [][]int{[]int{0, m}} + } + + for i := 0; i < m; i = i + n { + var start, end int + start = i + end = i + n + + if end >= m { + end = m + } + + res = append(res, []int{start, end}) + + } + return res +} diff --git a/src/toolkits/stack/stack.go b/src/common/stack/stack.go similarity index 100% rename from src/toolkits/stack/stack.go rename to src/common/stack/stack.go diff --git a/src/toolkits/stats/counter.go b/src/common/stats/counter.go similarity index 100% rename from src/toolkits/stats/counter.go rename to src/common/stats/counter.go diff --git a/src/toolkits/stats/init.go b/src/common/stats/init.go similarity index 88% rename from src/toolkits/stats/init.go rename to src/common/stats/init.go index 178f5220..774e153a 100644 --- a/src/toolkits/stats/init.go +++ b/src/common/stats/init.go @@ -8,8 +8,8 @@ import ( "path" "time" - "github.com/didi/nightingale/src/common/address" - "github.com/didi/nightingale/src/common/dataobj" + "github.com/didi/nightingale/v4/src/common/address" + "github.com/didi/nightingale/v4/src/common/dataobj" "github.com/toolkits/pkg/file" "github.com/toolkits/pkg/logger" @@ -27,9 +27,9 @@ func Init(prefix string, addr ...string) { } else if file.IsExist(path.Join(runner.Cwd, "etc", "address.yml")) { //address.yml 存在,则使用配置文件的地址 - newAddr := address.GetHTTPAddresses("agent") + newAddr := address.GetHTTPAddresses("agentd") if len(newAddr) == 0 { - port := address.GetHTTPPort("agent") + port := address.GetHTTPPort("agentd") PushUrl = fmt.Sprintf("http://127.0.0.1:%d/v1/push", port) } else { PushUrl = fmt.Sprintf("http://%s/v1/push", newAddr[0]) diff --git a/src/toolkits/str/checksum.go b/src/common/str/checksum.go similarity index 100% rename from src/toolkits/str/checksum.go rename to src/common/str/checksum.go diff --git a/src/toolkits/str/format.go b/src/common/str/format.go similarity index 98% rename from src/toolkits/str/format.go rename to src/common/str/format.go index 3ae3a585..da537b74 100644 --- a/src/toolkits/str/format.go +++ b/src/common/str/format.go @@ -9,9 +9,8 @@ import ( "sync" "time" - "github.com/toolkits/pkg/str" - "github.com/cespare/xxhash" + "github.com/toolkits/pkg/str" ) const SEPERATOR = "/" @@ -98,7 +97,7 @@ func XXhash(strs ...string) uint64 { return xxhash.Sum64(ret.Bytes()) } -func MD5(endpoint string, metric string, tags string) string { +func ToMD5(endpoint string, metric string, tags string) string { return str.MD5(PK(endpoint, metric, tags)) } diff --git a/src/toolkits/str/parser.go b/src/common/str/parser.go similarity index 100% rename from src/toolkits/str/parser.go rename to src/common/str/parser.go diff --git a/src/toolkits/str/slice.go b/src/common/str/slice.go similarity index 100% rename from src/toolkits/str/slice.go rename to src/common/str/slice.go diff --git a/src/models/host_register.go b/src/models/host_register.go new file mode 100644 index 00000000..ef1bef3d --- /dev/null +++ b/src/models/host_register.go @@ -0,0 +1,215 @@ +package models + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/toolkits/pkg/cache" +) + +type HostRegisterForm struct { + SN string `json:"sn"` + IP string `json:"ip"` + Ident string `json:"ident"` + Name string `json:"name"` + Cate string `json:"cate"` + UniqKey string `json:"uniqkey"` + Fields map[string]interface{} `json:"fields"` + Digest string `json:"digest"` +} + +func (f HostRegisterForm) Validate() error { + if f.IP == "" { + return fmt.Errorf("ip is blank") + } + + if f.UniqKey == "" { + return fmt.Errorf("uniqkey is blank") + } + + if f.Digest == "" { + return fmt.Errorf("digest is blank") + } + return nil +} + +// mapKeyClear map key clear +func MapKeyClear(src map[string]interface{}, save map[string]struct{}) { + var dels []string + for k := range src { + if _, ok := save[k]; !ok { + dels = append(dels, k) + } + } + + for i := 0; i < len(dels); i++ { + delete(src, dels[i]) + } +} + +func HostRegister(f HostRegisterForm) error { + oldFields := make(map[string]interface{}, len(f.Fields)) + for k, v := range f.Fields { + oldFields[k] = v + } + + uniqValue := "" + + if f.UniqKey == "sn" { + uniqValue = f.SN + } + + if f.UniqKey == "ip" { + uniqValue = f.IP + } + + if f.UniqKey == "ident" { + uniqValue = f.Ident + } + + if f.UniqKey == "name" { + uniqValue = f.Name + } + + if uniqValue == "" { + return fmt.Errorf("%s is blank", f.UniqKey) + } + + cacheKey := "/host/info/" + f.UniqKey + "/" + uniqValue + + var val string + if err := cache.Get(cacheKey, &val); err == nil { + if f.Digest == val { + // 说明客户端采集到的各个字段信息并无变化,无需更新DB + + return nil + } + } else { + if err.Error() != cache.ErrCacheMiss.Error() { + return fmt.Errorf("get cache:%+v err:%v", f, err) + } + } + + host, err := HostGet(f.UniqKey+" = ?", uniqValue) + if err != nil { + return fmt.Errorf("get host:%+v err:%v", f, err) + } + + hFixed := map[string]struct{}{ + "cpu": struct{}{}, + "mem": struct{}{}, + "disk": struct{}{}, + } + + MapKeyClear(f.Fields, hFixed) + + if host == nil { + msg := "create host failed" + host, err = HostNew(f.SN, f.IP, f.Ident, f.Name, f.Cate, f.Fields) + if err != nil { + return fmt.Errorf("new host:%+v err:%v", f, err) + } + + if host == nil { + return fmt.Errorf("%s, report info:%v", msg, f) + } + } else { + f.Fields["sn"] = f.SN + f.Fields["ip"] = f.IP + f.Fields["ident"] = f.Ident + f.Fields["name"] = f.Name + f.Fields["cate"] = f.Cate + f.Fields["clock"] = time.Now().Unix() + + err = host.Update(f.Fields) + if err != nil { + return fmt.Errorf("update host:%+v err:%v", f, err) + } + } + + if v, ok := oldFields["tenant"]; ok { + vStr := v.(string) + if vStr != "" { + err = HostUpdateTenant([]int64{host.Id}, vStr) + if err != nil { + return fmt.Errorf("update host:%+v tenant err:%v", f, err) + } + + err = ResourceRegister([]Host{*host}, vStr) + if err != nil { + return fmt.Errorf("resource %+v register err:%v", host, err) + } + } + } + + if host.Tenant != "" { + // 已经分配给某个租户了,那肯定对应某个resource,需要更新resource的信息 + res, err := ResourceGet("uuid=?", fmt.Sprintf("host-%d", host.Id)) + if err != nil { + return fmt.Errorf("get resource %v err:%v", host.Id, res) + } + + if res == nil { + // 数据不干净,ams里有这个host,而且是已分配状态,但是resource表里没有,重新注册一下 + err := ResourceRegister([]Host{*host}, host.Tenant) + if err != nil { + return fmt.Errorf("resource %+v register err:%v", host, err) + } + + // 注册完了,重新查询一下试试 + res, err = ResourceGet("uuid=?", fmt.Sprintf("host-%d", host.Id)) + if err != nil { + return fmt.Errorf("get resource %v err:%v", host.Id, res) + } + + if res == nil { + return fmt.Errorf("resource %+v register fail, unknown error", host) + } + } + + res.Ident = f.Ident + res.Name = f.Name + res.Cate = f.Cate + + MapKeyClear(f.Fields, hFixed) + + js, err := json.Marshal(f.Fields) + if err != nil { + return fmt.Errorf("json marshal fields:%v err:%v", f.Fields, err) + } + + res.Extend = string(js) + + err = res.Update("ident", "name", "cate", "extend") + if err != nil { + return fmt.Errorf("update err:%v", err) + } + } + + var objs []HostFieldValue + for k, v := range oldFields { + if k == "tenant" { + continue + } + + if _, ok := hFixed[k]; !ok { + tmp := HostFieldValue{HostId: host.Id, FieldIdent: k, FieldValue: v.(string)} + objs = append(objs, tmp) + } + } + + if len(objs) > 0 { + err = HostFieldValuePuts(host.Id, objs) + if err != nil { + return fmt.Errorf("host:%+v FieldValue %+v Puts err:%v", host, objs, err) + } + } + + err = cache.Set(cacheKey, f.Digest, cache.DEFAULT) + if err != nil { + return fmt.Errorf("set host:%v cache:%s %v err:%v", f, cacheKey, cache.DEFAULT, err) + } + + return nil +} diff --git a/src/models/init.go b/src/models/init.go index be7c041f..07dbcd1b 100644 --- a/src/models/init.go +++ b/src/models/init.go @@ -4,7 +4,7 @@ import ( "fmt" "time" - "github.com/didi/nightingale/src/toolkits/i18n" + "github.com/didi/nightingale/v4/src/common/i18n" "github.com/toolkits/pkg/cache" ) diff --git a/src/models/mon_hbs.go b/src/models/instance.go similarity index 65% rename from src/models/mon_hbs.go rename to src/models/instance.go index 098f2dad..32f54710 100644 --- a/src/models/mon_hbs.go +++ b/src/models/instance.go @@ -1,6 +1,15 @@ package models -import "time" +import ( + "fmt" + "time" +) + +//rpc +type InstancesResp struct { + Data []*Instance + Msg string +} type Instance struct { Id int64 `json:"id"` @@ -64,3 +73,35 @@ func DelById(id int64) error { _, err := DB["hbs"].Where("id=?", id).Delete(new(Instance)) return err } + +func ReportHeartBeat(rev Instance) error { + instance, err := GetInstanceBy(rev.Module, rev.Identity, rev.RPCPort, rev.HTTPPort) + if err != nil { + return fmt.Errorf("get instance:%+v err:%v", rev, err) + } + + now := time.Now().Unix() + if instance == nil { + instance = &Instance{ + Identity: rev.Identity, + Module: rev.Module, + RPCPort: rev.RPCPort, + HTTPPort: rev.HTTPPort, + Region: rev.Region, + TS: now, + } + err := instance.Add() + if err != nil { + return fmt.Errorf("instance:%+v add err:%v", rev, err) + } + } else { + instance.TS = now + instance.HTTPPort = rev.HTTPPort + instance.Region = rev.Region + err := instance.Update() + if err != nil { + return fmt.Errorf("instance:%+v update err:%v", rev, err) + } + } + return nil +} diff --git a/src/models/ldap.go b/src/models/ldap.go index a9bfce2f..14b0f464 100644 --- a/src/models/ldap.go +++ b/src/models/ldap.go @@ -7,13 +7,38 @@ import ( "gopkg.in/ldap.v3" "github.com/toolkits/pkg/logger" - - "github.com/didi/nightingale/src/modules/rdb/config" ) +type LDAPSection struct { + DefaultUse bool `yaml:"defaultUse"` + Host string `yaml:"host"` + Port int `yaml:"port"` + BaseDn string `yaml:"baseDn"` + BindUser string `yaml:"bindUser"` + BindPass string `yaml:"bindPass"` + AuthFilter string `yaml:"authFilter"` + Attributes ldapAttributes `yaml:"attributes"` + CoverAttributes bool `yaml:"coverAttributes"` + TLS bool `yaml:"tls"` + StartTLS bool `yaml:"startTLS"` +} + +type ldapAttributes struct { + Dispname string `yaml:"dispname"` + Phone string `yaml:"phone"` + Email string `yaml:"email"` + Im string `yaml:"im"` +} + +var LDAPConfig LDAPSection + +func InitLDAP(conf LDAPSection) { + LDAPConfig = conf +} + func genLdapAttributeSearchList() []string { var ldapAttributes []string - attrs := config.Config.LDAP.Attributes + attrs := LDAPConfig.Attributes if attrs.Dispname != "" { ldapAttributes = append(ldapAttributes, attrs.Dispname) } @@ -32,7 +57,7 @@ func genLdapAttributeSearchList() []string { func ldapReq(user, pass string) (*ldap.SearchResult, error) { var conn *ldap.Conn var err error - lc := config.Config.LDAP + lc := LDAPConfig addr := fmt.Sprintf("%s:%d", lc.Host, lc.Port) if lc.TLS { diff --git a/src/models/mon_aggr.go b/src/models/mon_aggr.go index 268b71de..efc49aa7 100644 --- a/src/models/mon_aggr.go +++ b/src/models/mon_aggr.go @@ -8,8 +8,8 @@ import ( "time" "unicode" - "github.com/didi/nightingale/src/common/dataobj" - "github.com/didi/nightingale/src/toolkits/stack" + "github.com/didi/nightingale/v4/src/common/dataobj" + "github.com/didi/nightingale/v4/src/common/stack" ) type AggrCalc struct { diff --git a/src/models/mon_collect.go b/src/models/mon_collect.go index ef015ad6..c2e5a879 100644 --- a/src/models/mon_collect.go +++ b/src/models/mon_collect.go @@ -8,6 +8,7 @@ import ( "sync" "time" + "github.com/didi/nightingale/v4/src/common/dataobj" "xorm.io/xorm" ) @@ -191,6 +192,11 @@ type LogCollect struct { WhetherAttachOneLogLine int `json:"whether_attach_one_log_line" xorm:"'whether_attach_one_log_line'"` } +type ApiCollectRpcResp struct { + Data []*ApiCollect + Msg string +} + type ApiCollect struct { Id int64 `json:"id"` Nid int64 `json:"nid"` @@ -531,6 +537,109 @@ func (a *ApiCollect) Update() error { return err } +func GetSnmpCollects(nid int64) ([]*SnmpCollect, error) { + collects := []*SnmpCollect{} + if nid == 0 { + err := DB["mon"].Find(&collects) + return collects, err + } + + err := DB["mon"].Where("nid = ?", nid).Find(&collects) + return collects, err +} + +func (s *SnmpCollect) Encode() error { + if s.OidType < 1 || s.OidType > 3 { + return fmt.Errorf("oid type not support") + } + + if s.OidType == 1 { + //补全oid + s.Module = dataobj.COMMON_MODULE + mib, err := MibGet("module=? and metric=?", s.Module, s.Metric) + if err != nil { + return fmt.Errorf("get mib err:%v", err) + } + + s.Oid = mib.Oid + } else { + if s.Module == "" { + return fmt.Errorf("module is null") + } + + if s.Oid == "" { + return fmt.Errorf("oid is null") + } + + if s.MetricType == "" { + return fmt.Errorf("type is null") + } + } + + if s.Metric == "" { + return fmt.Errorf("metric is null") + } + + indexes, err := json.Marshal(s.Indexes) + if err != nil { + return fmt.Errorf("encode indexes err:%v", err) + } + + s.IndexesStr = string(indexes) + return nil +} + +func (s *SnmpCollect) Decode() error { + err := json.Unmarshal([]byte(s.IndexesStr), &s.Indexes) + if err != nil { + return fmt.Errorf("decode indexes err:%v", err) + } + + return err +} + +func (s *SnmpCollect) Update() error { + session := DB["mon"].NewSession() + defer session.Close() + + err := session.Begin() + if err != nil { + return err + } + + if _, err = session.Id(s.Id).AllCols().Update(s); err != nil { + session.Rollback() + return err + } + + b, err := json.Marshal(s) + if err != nil { + session.Rollback() + return err + } + + if err := saveHistory(s.Id, "snmp", "update", s.Creator, string(b), session); err != nil { + session.Rollback() + return err + } + + if err = session.Commit(); err != nil { + return err + } + + return err +} + +func (s *SnmpCollect) GetByOidAndModule() (*SnmpCollect, error) { + collect := new(SnmpCollect) + has, err := DB["mon"].Where("oid = ? and nid = ? and module = ? and metric = ?", s.Oid, s.Nid, s.Module, s.Metric).Get(collect) + if !has { + return nil, err + } + collect.Decode() + return collect, err +} + func CreateCollect(collectType, creator string, collect interface{}, dryRun bool) (err error) { session := DB["mon"].NewSession() if err = session.Begin(); err != nil { diff --git a/src/models/mon_collect_rule.go b/src/models/mon_collect_rule.go index a0468523..31364b2e 100644 --- a/src/models/mon_collect_rule.go +++ b/src/models/mon_collect_rule.go @@ -4,7 +4,8 @@ import ( "encoding/json" "fmt" - "github.com/didi/nightingale/src/common/dataobj" + "github.com/didi/nightingale/v4/src/common/dataobj" + "xorm.io/xorm" ) @@ -12,6 +13,11 @@ const ( defaultStep = 10 ) +type CollectRuleRpcResp struct { + Data []*CollectRule + Msg string +} + type CollectRule struct { Id int64 `json:"id"` Nid int64 `json:"nid"` diff --git a/src/models/mon_event.go b/src/models/mon_event.go index 35aab353..d42c49d7 100644 --- a/src/models/mon_event.go +++ b/src/models/mon_event.go @@ -4,8 +4,13 @@ import ( "encoding/json" "strings" "time" +) + +const ALERT = "alert" +const RECOVERY = "recovery" - "github.com/didi/nightingale/src/modules/monapi/config" +var ( + EventTypeMap = map[string]string{RECOVERY: "恢复", ALERT: "报警"} ) type Event struct { @@ -220,7 +225,7 @@ func EventAlertUpgradeUnMarshal(str string) (EventAlertUpgrade, error) { } func EventCnt(hashid uint64, stime, etime int64, isUpgrade bool) (int64, error) { - session := DB["mon"].Where("hashid = ? and event_type = ? and etime between ? and ?", hashid, config.ALERT, stime, etime) + session := DB["mon"].Where("hashid = ? and event_type = ? and etime between ? and ?", hashid, ALERT, stime, etime) if isUpgrade { return session.In("status", GetFlagsByStatus([]string{STATUS_UPGRADE, STATUS_SEND})).Count(new(Event)) diff --git a/src/models/nems_mib.go b/src/models/nems_mib.go new file mode 100644 index 00000000..c4ab1354 --- /dev/null +++ b/src/models/nems_mib.go @@ -0,0 +1,134 @@ +package models + +import ( + "encoding/json" + "regexp" + "strings" + "time" + + "xorm.io/xorm" +) + +type Module struct { + // A list of OIDs. + Walk []string `yaml:"walk,omitempty"` + Get []string `yaml:"get,omitempty"` + Metrics []*Metric `yaml:"metrics"` + WalkParams WalkParams `yaml:",inline"` +} + +type WalkParams struct { + Version int `yaml:"version,omitempty"` + MaxRepetitions uint8 `yaml:"max_repetitions,omitempty"` + Retries int `yaml:"retries,omitempty"` + Timeout time.Duration `yaml:"timeout,omitempty"` + Auth Auth `yaml:"auth,omitempty"` +} + +type Metric struct { + Name string `yaml:"name"` + Oid string `yaml:"oid"` + Type string `yaml:"type"` + Help string `yaml:"help"` + Indexes []*Index `yaml:"indexes,omitempty"` + Lookups []*Lookup `yaml:"lookups,omitempty"` + RegexpExtracts map[string][]RegexpExtract `yaml:"regex_extracts,omitempty"` + EnumValues map[int]string `yaml:"enum_values,omitempty"` +} + +type RegexpExtract struct { + Value string `yaml:"value"` + Regex Regexp `yaml:"regex"` +} + +// Regexp encapsulates a regexp.Regexp and makes it YAML marshalable. +type Regexp struct { + *regexp.Regexp +} + +type Mib struct { + Id int64 `json:"id"` + Module string `json:"module"` + Metric string `json:"metric"` + Oid string `json:"oid"` + Mtype string `json:"mtype"` //gauge,counter + EnumValues string `json:"enum_values"` + Indexes string `json:"indexes"` + Note string `json:"note"` +} + +func NewMib(module string, m *Metric) *Mib { + enumValues, _ := json.Marshal(m.EnumValues) + indexes, _ := json.Marshal(m.Indexes) + + mib := &Mib{ + Module: module, + Metric: m.Name, + Oid: m.Oid, + Mtype: m.Type, + EnumValues: string(enumValues), + Indexes: string(indexes), + Note: m.Help, + } + return mib +} + +func (m *Mib) Save() error { + _, err := DB["nems"].InsertOne(m) + return err +} + +func MibDel(id int64) error { + _, err := DB["nems"].Where("id=?", id).Delete(new(Mib)) + return err +} + +func MibTotal(query string) (int64, error) { + return buildMibWhere(query).Count() +} + +func MibGet(where string, args ...interface{}) (*Mib, error) { + var obj Mib + has, err := DB["nems"].Where(where, args...).Get(&obj) + if !has { + return nil, err + } + + return &obj, err +} + +func MibGets(where string, args ...interface{}) ([]Mib, error) { + var objs []Mib + err := DB["nems"].Where(where, args...).Find(&objs) + return objs, err +} + +func MibGetsGroupBy(group string, where string, args ...interface{}) ([]Mib, error) { + var objs []Mib + var err error + if where == "" { + err = DB["nems"].GroupBy(group).Find(&objs) + } else { + err = DB["nems"].Where(where, args...).GroupBy(group).Find(&objs) + } + return objs, err +} + +func MibGetsByQuery(query string, limit, offset int) ([]Mib, error) { + session := buildMibWhere(query) + var objs []Mib + err := session.Limit(limit, offset).Find(&objs) + return objs, err +} + +func buildMibWhere(query string) *xorm.Session { + session := DB["nems"].Table(new(Mib)) + if query != "" { + arr := strings.Fields(query) + for i := 0; i < len(arr); i++ { + q := "%" + arr[i] + "%" + session = session.Where("module like ? or oid like ? or metric like ?", q, q, q) + } + } + return session +} diff --git a/src/models/nems_nethw.go b/src/models/nems_nethw.go new file mode 100644 index 00000000..32ac667d --- /dev/null +++ b/src/models/nems_nethw.go @@ -0,0 +1,298 @@ +package models + +import ( + "encoding/json" + "strings" + + "github.com/toolkits/pkg/logger" + "github.com/toolkits/pkg/str" + "xorm.io/xorm" +) + +type NetworkHardwareRpcResp struct { + Data []*NetworkHardware + Msg string +} + +type NetworkHardware struct { + Id int64 `json:"id"` + SN string `json:"sn" xorm:"sn"` + IP string `json:"ip" xorm:"ip"` + Name string `json:"name"` + Note string `json:"note"` + Cate string `json:"cate"` + SnmpVersion string `json:"snmp_version"` + Auth string `json:"auth"` + Region string `json:"region"` + Info string `json:"info"` + Tenant string `json:"tenant"` + Uptime int64 `json:"uptime"` +} + +func MakeNetworkHardware(ip, cate, version, auth, region, note string) *NetworkHardware { + obj := &NetworkHardware{ + IP: ip, + SnmpVersion: version, + Auth: auth, + Region: region, + Note: note, + Cate: cate, + } + return obj +} + +func NetworkHardwareNew(objPtr *NetworkHardware) error { + session := DB["nems"].NewSession() + defer session.Close() + + if err := session.Begin(); err != nil { + return err + } + + old, err := NetworkHardwareGet("ip=?", objPtr.IP) + if err != nil { + session.Rollback() + return err + } + if old != nil { + session.Rollback() + return nil + } + + _, err = session.Insert(objPtr) + if err != nil { + session.Rollback() + return err + } + + return session.Commit() +} + +func NetworkHardwareGet(where string, args ...interface{}) (*NetworkHardware, error) { + var obj NetworkHardware + has, err := DB["nems"].Where(where, args...).Get(&obj) + if err != nil { + return nil, err + } + + if !has { + return nil, nil + } + + return &obj, nil +} + +func (n *NetworkHardware) Update(cols ...string) error { + session := DB["nems"].NewSession() + defer session.Close() + + if err := session.Begin(); err != nil { + return err + } + + _, err := session.Where("id=?", n.Id).Cols(cols...).Update(n) + if err != nil { + session.Rollback() + return err + } + + return session.Commit() +} + +// func (h *Host) Del() error { +// _, err := DB["ams"].Where("id=?", h.Id).Delete(new(Host)) +// return err +// } + +func (n *NetworkHardware) Del() error { + _, err := DB["nems"].Where("id=?", n.Id).Delete(new(NetworkHardware)) + return err +} + +func NetworkHardwareCount(where string, args ...interface{}) (int64, error) { + if where != "" { + return DB["nems"].Where(where, args...).Count(new(NetworkHardware)) + } + + return DB["nems"].Count(new(NetworkHardware)) +} + +func NetworkHardwareTotal(query string) (int64, error) { + return buildHWWhere(query).Count() +} + +func NetworkHardwareList(query string, limit, offset int) ([]NetworkHardware, error) { + session := buildHWWhere(query) + var objs []NetworkHardware + err := session.Limit(limit, offset).OrderBy("id desc").Find(&objs) + return objs, err +} + +func buildHWWhere(query string) *xorm.Session { + session := DB["nems"].Table(new(NetworkHardware)) + if query != "" { + arr := strings.Fields(query) + for i := 0; i < len(arr); i++ { + q := "%" + arr[i] + "%" + session = session.Where("cate = ? or ip like ? or name like ? or note like ?", arr[i], q, q, q) + } + } + return session +} + +func NetworkHardwareDel(id int64) error { + session := DB["nems"].NewSession() + defer session.Close() + + if err := session.Begin(); err != nil { + return err + } + + var obj NetworkHardware + has, err := session.Where("id=?", id).Get(&obj) + if err != nil { + session.Rollback() + return err + } + + if !has { + return err + } + + _, err = session.Where("id=?", id).Delete(new(NetworkHardware)) + if err != nil { + session.Rollback() + return err + } + + return session.Commit() +} + +// ResourceRegister 资源分配给某个租户的时候调用 +func NetworkHardwareResourceRegister(hws []*NetworkHardware, tenant string) error { + count := len(hws) + for i := 0; i < count; i++ { + uuid := hws[i].SN + res, err := ResourceGet("uuid=?", uuid) + if err != nil { + return err + } + + if res == nil { + res = &Resource{ + UUID: uuid, + Ident: hws[i].IP, + Name: hws[i].Name, + Cate: hws[i].Cate, + Tenant: tenant, + } + + // 如果host加个字段,并且要放到extend里,这里要改 + fields := map[string]interface{}{ + "region": hws[i].Region, + } + + js, err := json.Marshal(fields) + if err != nil { + return err + } + + res.Extend = string(js) + err = res.Save() + if err != nil { + return err + } + } else { + if res.Tenant != tenant { + // 之前有归属,如果归属发生变化,解除之前的挂载关系 + err = NodeResourceUnbindByRids([]int64{res.Id}) + if err != nil { + return err + } + } + + res.Ident = hws[i].IP + res.Name = hws[i].Name + res.Cate = hws[i].Cate + res.Tenant = tenant + + fields := map[string]interface{}{ + "region": hws[i].Region, + } + + js, err := json.Marshal(fields) + if err != nil { + return err + } + + res.Extend = string(js) + err = res.Update("ident", "name", "cate", "extend", "tenant") + if err != nil { + return err + } + } + } + return nil +} + +// NwSearch 普通用户查询 +func NwSearch(batch, field string) ([]NetworkHardware, error) { + arr := str.ParseLines(strings.Replace(batch, ",", "\n", -1)) + if len(arr) == 0 { + return []NetworkHardware{}, nil + } + + var objs []NetworkHardware + err := DB["nems"].Table("network_hardware").In(field, arr).Find(&objs) + return objs, err +} + +func NwTotalForAdmin(tenant, query, batch, field string) (int64, error) { + return buildNwWhere(tenant, query, batch, field).Count() +} + +func NwGetsForAdmin(tenant, query, batch, field string, limit, offset int) ([]NetworkHardware, error) { + var objs []NetworkHardware + err := buildNwWhere(tenant, query, batch, field).Limit(limit, offset).Find(&objs) + return objs, err +} + +func buildNwWhere(tenant, query, batch, field string) *xorm.Session { + session := DB["nems"].Table("network_hardware").OrderBy("id") + + if tenant == "0" { + session = session.Where("tenant=?", "") + } else if tenant != "" { + session = session.Where("tenant=?", tenant) + } + + if batch == "" && query != "" { + arr := strings.Fields(query) + for i := 0; i < len(arr); i++ { + q := "%" + arr[i] + "%" + session = session.Where("cate=? or sn=? or ip like ? or name like ? or note like ?", arr[i], arr[i], q, q, q) + } + } + + if batch != "" { + arr := str.ParseLines(strings.Replace(batch, ",", "\n", -1)) + if len(arr) > 0 { + session = session.In(field, arr) + } + } + + return session +} + +func GetHardwareInfoBy(ips []string) []*NetworkHardware { + var hws []*NetworkHardware + for _, ip := range ips { + hw, err := NetworkHardwareGet("ip=?", ip) + if err != nil { + logger.Error(err) + continue + } + hws = append(hws, hw) + } + return hws +} diff --git a/src/models/node.go b/src/models/node.go index 2089e14b..a0148094 100644 --- a/src/models/node.go +++ b/src/models/node.go @@ -6,7 +6,6 @@ import ( "time" "github.com/toolkits/pkg/slice" - "github.com/toolkits/pkg/str" ) @@ -591,7 +590,8 @@ func GetLeafNidsForMon(nid int64, exclNid []int64) ([]int64, error) { if node == nil { // 节点已经被删了,相关的告警策略也删除 - StraDelByNid(nid) + // todo 逻辑需要优化 + //StraDelByNid(nid) return []int64{}, nil } diff --git a/src/models/node_field_value.go b/src/models/node_cate_field_value.go similarity index 100% rename from src/models/node_field_value.go rename to src/models/node_cate_field_value.go diff --git a/src/models/node_resource.go b/src/models/node_resource.go index b920e4ef..9442b727 100644 --- a/src/models/node_resource.go +++ b/src/models/node_resource.go @@ -3,7 +3,7 @@ package models import ( "fmt" - "github.com/didi/nightingale/src/toolkits/slice" + "github.com/didi/nightingale/v4/src/common/slice" ) type NodeResource struct { diff --git a/src/models/node_role.go b/src/models/node_role.go index 0d77800a..3b44030a 100644 --- a/src/models/node_role.go +++ b/src/models/node_role.go @@ -34,7 +34,6 @@ func NodeRoleDel(nodeId, roleId int64, username string) error { return err } -// RoleIdsBindingUsername func RoleIdsBindingUsername(username string, nids []int64) ([]int64, error) { var ids []int64 err := DB["rdb"].Table("node_role").Where("username=?", username).In("node_id", nids).Select("role_id").Find(&ids) diff --git a/src/models/captcha.go b/src/models/rdb_captcha.go similarity index 100% rename from src/models/captcha.go rename to src/models/rdb_captcha.go diff --git a/src/models/configs.go b/src/models/rdb_configs.go similarity index 100% rename from src/models/configs.go rename to src/models/rdb_configs.go diff --git a/src/models/session.go b/src/models/rdb_session.go similarity index 100% rename from src/models/session.go rename to src/models/rdb_session.go diff --git a/src/models/role_operation.go b/src/models/role_opration.go similarity index 100% rename from src/models/role_operation.go rename to src/models/role_opration.go diff --git a/src/models/user.go b/src/models/user.go index 208214ef..3ff080b4 100644 --- a/src/models/user.go +++ b/src/models/user.go @@ -7,15 +7,12 @@ import ( "strings" "time" - "github.com/toolkits/pkg/slice" - "github.com/toolkits/pkg/cache" "github.com/toolkits/pkg/errors" "github.com/toolkits/pkg/logger" + "github.com/toolkits/pkg/slice" "github.com/toolkits/pkg/str" "gopkg.in/ldap.v3" - - "github.com/didi/nightingale/src/modules/rdb/config" ) const ( @@ -111,7 +108,7 @@ func (u *User) Validate() error { } func (u *User) CopyLdapAttr(sr *ldap.SearchResult) { - attrs := config.Config.LDAP.Attributes + attrs := LDAPConfig.Attributes if attrs.Dispname != "" { u.Dispname = sr.Entries[0].GetAttributeValue(attrs.Dispname) } @@ -173,7 +170,7 @@ func LdapLogin(username, pass string) (*User, error) { user.CopyLdapAttr(sr) if has { - if config.Config.LDAP.CoverAttributes { + if LDAPConfig.CoverAttributes { _, err := DB["rdb"].Where("id=?", user.Id).Update(user) return &user, err } else { @@ -710,10 +707,10 @@ func UsersGet(where string, args ...interface{}) ([]User, error) { return objs, nil } -func (u *User) PermByNode(node *Node) ([]string, error) { +func (u *User) PermByNode(node *Node, localOpsList []string) ([]string, error) { // 我是超管,自然有权限 if u.IsRoot == 1 { - return config.LocalOpsList, nil + return localOpsList, nil } // 我是path上游的某个admin,自然有权限 @@ -729,7 +726,7 @@ func (u *User) PermByNode(node *Node) ([]string, error) { if yes, err := NodesAdminExists(nodeIds, u.Id); err != nil { return nil, err } else if yes { - return config.LocalOpsList, nil + return localOpsList, nil } if roleIds, err := RoleIdsBindingUsername(u.Username, nodeIds); err != nil { diff --git a/src/models/stats.go b/src/models/user_stats.go similarity index 100% rename from src/models/stats.go rename to src/models/user_stats.go diff --git a/src/modules/agent/stra/cron.go b/src/modules/agent/stra/cron.go deleted file mode 100644 index e1c3603e..00000000 --- a/src/modules/agent/stra/cron.go +++ /dev/null @@ -1,81 +0,0 @@ -package stra - -import ( - "fmt" - "math/rand" - "time" - - "github.com/toolkits/pkg/logger" - "github.com/toolkits/pkg/net/httplib" - - "github.com/didi/nightingale/src/common/address" - "github.com/didi/nightingale/src/models" - "github.com/didi/nightingale/src/modules/agent/config" -) - -func GetCollects() { - if !config.Config.Stra.Enable { - return - } - - detect() - go loopDetect() -} - -func loopDetect() { - t1 := time.NewTicker(time.Duration(config.Config.Stra.Interval) * time.Second) - for { - <-t1.C - detect() - } -} - -func detect() { - c, err := GetCollectsRetry() - if err != nil { - logger.Errorf("get collect err:%v", err) - return - } - - Collect.Update(&c) -} - -type CollectResp struct { - Dat models.Collect `json:"dat"` - Err string `json:"err"` -} - -func GetCollectsRetry() (models.Collect, error) { - count := len(address.GetHTTPAddresses("monapi")) - var resp CollectResp - var err error - for i := 0; i < count; i++ { - resp, err = getCollects() - if err == nil { - if resp.Err != "" { - err = fmt.Errorf(resp.Err) - continue - } - return resp.Dat, err - } - } - - return resp.Dat, err -} - -func getCollects() (CollectResp, error) { - addrs := address.GetHTTPAddresses("monapi") - i := rand.Intn(len(addrs)) - addr := addrs[i] - - var res CollectResp - var err error - - url := fmt.Sprintf("http://%s%s%s", addr, config.Config.Stra.Api, config.Endpoint) - err = httplib.Get(url).SetTimeout(time.Duration(config.Config.Stra.Timeout) * time.Millisecond).ToJSON(&res) - if err != nil { - err = fmt.Errorf("get collects from remote:%s failed, error:%v", url, err) - } - - return res, err -} diff --git a/src/modules/agent/agent.go b/src/modules/agentd/agentd.go similarity index 65% rename from src/modules/agent/agent.go rename to src/modules/agentd/agentd.go index c2aae2c8..df349684 100644 --- a/src/modules/agent/agent.go +++ b/src/modules/agentd/agentd.go @@ -7,22 +7,22 @@ import ( "os/signal" "syscall" - "github.com/didi/nightingale/src/common/loggeri" - "github.com/didi/nightingale/src/modules/agent/cache" - "github.com/didi/nightingale/src/modules/agent/config" - "github.com/didi/nightingale/src/modules/agent/http" - "github.com/didi/nightingale/src/modules/agent/log/worker" - "github.com/didi/nightingale/src/modules/agent/report" - "github.com/didi/nightingale/src/modules/agent/statsd" - "github.com/didi/nightingale/src/modules/agent/stra" - "github.com/didi/nightingale/src/modules/agent/sys" - "github.com/didi/nightingale/src/modules/agent/sys/funcs" - "github.com/didi/nightingale/src/modules/agent/sys/plugins" - "github.com/didi/nightingale/src/modules/agent/sys/ports" - "github.com/didi/nightingale/src/modules/agent/sys/procs" - "github.com/didi/nightingale/src/modules/agent/timer" - "github.com/didi/nightingale/src/modules/agent/udp" - "github.com/didi/nightingale/src/toolkits/stats" + "github.com/didi/nightingale/v4/src/common/loggeri" + "github.com/didi/nightingale/v4/src/common/stats" + "github.com/didi/nightingale/v4/src/modules/agentd/cache" + "github.com/didi/nightingale/v4/src/modules/agentd/config" + "github.com/didi/nightingale/v4/src/modules/agentd/http" + "github.com/didi/nightingale/v4/src/modules/agentd/log/worker" + "github.com/didi/nightingale/v4/src/modules/agentd/report" + "github.com/didi/nightingale/v4/src/modules/agentd/statsd" + "github.com/didi/nightingale/v4/src/modules/agentd/stra" + "github.com/didi/nightingale/v4/src/modules/agentd/sys" + "github.com/didi/nightingale/v4/src/modules/agentd/sys/funcs" + "github.com/didi/nightingale/v4/src/modules/agentd/sys/plugins" + "github.com/didi/nightingale/v4/src/modules/agentd/sys/ports" + "github.com/didi/nightingale/v4/src/modules/agentd/sys/procs" + "github.com/didi/nightingale/v4/src/modules/agentd/timer" + "github.com/didi/nightingale/v4/src/modules/agentd/udp" "github.com/toolkits/pkg/logger" "github.com/toolkits/pkg/runner" @@ -61,7 +61,7 @@ func main() { parseConf() loggeri.Init(config.Config.Logger) - stats.Init("agent") + stats.Init("agentd") if err := report.GatherBase(); err != nil { fmt.Println("gatherBase fail: ", err) @@ -143,5 +143,5 @@ func endingProc() { logger.Close() http.Shutdown() - fmt.Println("portal stopped successfully") + fmt.Println("agentd stopped successfully") } diff --git a/src/modules/agent/cache/cache.go b/src/modules/agentd/cache/cache.go similarity index 94% rename from src/modules/agent/cache/cache.go rename to src/modules/agentd/cache/cache.go index fa74be6b..07c84907 100644 --- a/src/modules/agent/cache/cache.go +++ b/src/modules/agentd/cache/cache.go @@ -4,7 +4,7 @@ import ( "sync" "time" - "github.com/didi/nightingale/src/common/dataobj" + "github.com/didi/nightingale/v4/src/common/dataobj" ) var MetricHistory *History diff --git a/src/modules/agent/config/config.go b/src/modules/agentd/config/config.go similarity index 94% rename from src/modules/agent/config/config.go rename to src/modules/agentd/config/config.go index 46ec5d2f..23ae87cd 100644 --- a/src/modules/agent/config/config.go +++ b/src/modules/agentd/config/config.go @@ -6,12 +6,12 @@ import ( "strings" "time" + "github.com/didi/nightingale/v4/src/common/identity" + "github.com/didi/nightingale/v4/src/common/loggeri" + "github.com/didi/nightingale/v4/src/modules/agentd/sys" + "github.com/spf13/viper" "github.com/toolkits/pkg/file" - - "github.com/didi/nightingale/src/common/identity" - "github.com/didi/nightingale/src/common/loggeri" - "github.com/didi/nightingale/src/modules/agent/sys" ) type ConfigT struct { @@ -166,12 +166,12 @@ func Parse() error { } func getYmlFile() string { - yml := "etc/agent.local.yml" + yml := "etc/agentd.local.yml" if file.IsExist(yml) { return yml } - yml = "etc/agent.yml" + yml = "etc/agentd.yml" if file.IsExist(yml) { return yml } diff --git a/src/modules/agent/core/clients.go b/src/modules/agentd/core/clients.go similarity index 100% rename from src/modules/agent/core/clients.go rename to src/modules/agentd/core/clients.go diff --git a/src/modules/agent/core/common.go b/src/modules/agentd/core/common.go similarity index 92% rename from src/modules/agent/core/common.go rename to src/modules/agentd/core/common.go index b49fdec5..7cf2660a 100644 --- a/src/modules/agent/core/common.go +++ b/src/modules/agentd/core/common.go @@ -3,7 +3,7 @@ package core import ( "strings" - "github.com/didi/nightingale/src/common/dataobj" + "github.com/didi/nightingale/v4/src/common/dataobj" ) func NewMetricValue(metric string, val interface{}, dataType string, tags ...string) *dataobj.MetricValue { diff --git a/src/modules/agent/client/meta.go b/src/modules/agentd/core/meta.go similarity index 63% rename from src/modules/agent/client/meta.go rename to src/modules/agentd/core/meta.go index 434fa4b2..b66acb8b 100644 --- a/src/modules/agent/client/meta.go +++ b/src/modules/agentd/core/meta.go @@ -1,18 +1,21 @@ -package client +package core import ( "fmt" - "github.com/toolkits/pkg/logger" + "github.com/didi/nightingale/v4/src/common/client" + "github.com/didi/nightingale/v4/src/common/dataobj" - "github.com/didi/nightingale/src/common/dataobj" + "github.com/toolkits/pkg/logger" ) // Meta 从Server端获取任务元信息 func Meta(id int64) (script string, args string, account string, err error) { var resp dataobj.TaskMetaResponse - err = GetCli().Call("Scheduler.GetTaskMeta", id, &resp) + err = client.GetCli("server").Call("Server.GetTaskMeta", id, &resp) if err != nil { + logger.Error("rpc call Server.GetTaskMeta get error: ", err) + client.CloseCli() return } diff --git a/src/modules/agent/core/push.go b/src/modules/agentd/core/push.go similarity index 93% rename from src/modules/agent/core/push.go rename to src/modules/agentd/core/push.go index b0f19f1f..74677424 100644 --- a/src/modules/agent/core/push.go +++ b/src/modules/agentd/core/push.go @@ -10,13 +10,13 @@ import ( "reflect" "time" + "github.com/didi/nightingale/v4/src/common/address" + "github.com/didi/nightingale/v4/src/common/dataobj" + "github.com/didi/nightingale/v4/src/modules/agentd/cache" + "github.com/didi/nightingale/v4/src/modules/agentd/config" + "github.com/toolkits/pkg/logger" "github.com/ugorji/go/codec" - - "github.com/didi/nightingale/src/common/address" - "github.com/didi/nightingale/src/common/dataobj" - "github.com/didi/nightingale/src/modules/agent/cache" - "github.com/didi/nightingale/src/modules/agent/config" ) func Push(metricItems []*dataobj.MetricValue) error { @@ -52,7 +52,7 @@ func Push(metricItems []*dataobj.MetricValue) error { items = append(items, item) } - addrs := address.GetRPCAddresses("transfer") + addrs := address.GetRPCAddresses("server") count := len(addrs) retry := 0 for { @@ -106,7 +106,7 @@ func rpcCall(addr string, items []*dataobj.MetricValue) (dataobj.TransferResp, e done := make(chan error, 1) go func() { - err := client.Call("Transfer.Push", items, &reply) + err := client.Call("Server.Push", items, &reply) done <- err }() diff --git a/src/modules/agent/http/http_server.go b/src/modules/agentd/http/http_server.go similarity index 87% rename from src/modules/agent/http/http_server.go rename to src/modules/agentd/http/http_server.go index 48ce7a11..74a9ba48 100644 --- a/src/modules/agent/http/http_server.go +++ b/src/modules/agentd/http/http_server.go @@ -7,10 +7,10 @@ import ( "os" "time" - "github.com/gin-gonic/gin" + "github.com/didi/nightingale/v4/src/common/address" + "github.com/didi/nightingale/v4/src/common/middleware" - "github.com/didi/nightingale/src/common/address" - "github.com/didi/nightingale/src/common/middleware" + "github.com/gin-gonic/gin" ) var srv = &http.Server{ @@ -30,7 +30,7 @@ func Start() { Config(r) - srv.Addr = address.GetHTTPListen("agent") + srv.Addr = address.GetHTTPListen("agentd") srv.Handler = r go func() { diff --git a/src/modules/agent/http/router.go b/src/modules/agentd/http/router.go similarity index 100% rename from src/modules/agent/http/router.go rename to src/modules/agentd/http/router.go diff --git a/src/modules/agent/http/router_collector.go b/src/modules/agentd/http/router_collector.go similarity index 72% rename from src/modules/agent/http/router_collector.go rename to src/modules/agentd/http/router_collector.go index ad41bb8c..93dac5d5 100644 --- a/src/modules/agent/http/router_collector.go +++ b/src/modules/agentd/http/router_collector.go @@ -1,14 +1,14 @@ package http import ( + "github.com/didi/nightingale/v4/src/common/dataobj" + "github.com/didi/nightingale/v4/src/modules/agentd/core" + "github.com/didi/nightingale/v4/src/modules/agentd/log/strategy" + "github.com/didi/nightingale/v4/src/modules/agentd/log/worker" + "github.com/didi/nightingale/v4/src/modules/agentd/stra" + "github.com/gin-gonic/gin" "github.com/toolkits/pkg/errors" - - "github.com/didi/nightingale/src/common/dataobj" - "github.com/didi/nightingale/src/modules/agent/core" - "github.com/didi/nightingale/src/modules/agent/log/strategy" - "github.com/didi/nightingale/src/modules/agent/log/worker" - "github.com/didi/nightingale/src/modules/agent/stra" ) func pushData(c *gin.Context) { diff --git a/src/modules/agent/http/router_endpoint.go b/src/modules/agentd/http/router_endpoint.go similarity index 66% rename from src/modules/agent/http/router_endpoint.go rename to src/modules/agentd/http/router_endpoint.go index 8c8dac7f..a5adcf0c 100644 --- a/src/modules/agent/http/router_endpoint.go +++ b/src/modules/agentd/http/router_endpoint.go @@ -1,7 +1,8 @@ package http import ( - "github.com/didi/nightingale/src/modules/agent/config" + "github.com/didi/nightingale/v4/src/modules/agentd/config" + "github.com/gin-gonic/gin" ) diff --git a/src/modules/agent/http/router_executor.go b/src/modules/agentd/http/router_executor.go similarity index 91% rename from src/modules/agent/http/router_executor.go rename to src/modules/agentd/http/router_executor.go index 51a7c7bf..cb002f21 100644 --- a/src/modules/agent/http/router_executor.go +++ b/src/modules/agentd/http/router_executor.go @@ -6,11 +6,11 @@ import ( "path" "strconv" + "github.com/didi/nightingale/v4/src/modules/agentd/config" + "github.com/didi/nightingale/v4/src/modules/agentd/timer" + "github.com/gin-gonic/gin" "github.com/toolkits/pkg/file" - - "github.com/didi/nightingale/src/modules/agent/config" - "github.com/didi/nightingale/src/modules/agent/timer" ) func output(idstr string, typ string) (string, error) { diff --git a/src/modules/agent/http/router_funcs.go b/src/modules/agentd/http/router_funcs.go similarity index 100% rename from src/modules/agent/http/router_funcs.go rename to src/modules/agentd/http/router_funcs.go diff --git a/src/modules/agent/http/router_health.go b/src/modules/agentd/http/router_health.go similarity index 100% rename from src/modules/agent/http/router_health.go rename to src/modules/agentd/http/router_health.go diff --git a/src/modules/agent/log/reader/reader.go b/src/modules/agentd/log/reader/reader.go similarity index 100% rename from src/modules/agent/log/reader/reader.go rename to src/modules/agentd/log/reader/reader.go diff --git a/src/modules/agent/log/reader/reader_test.go b/src/modules/agentd/log/reader/reader_test.go similarity index 100% rename from src/modules/agent/log/reader/reader_test.go rename to src/modules/agentd/log/reader/reader_test.go diff --git a/src/modules/agent/log/reader/reader_util.go b/src/modules/agentd/log/reader/reader_util.go similarity index 100% rename from src/modules/agent/log/reader/reader_util.go rename to src/modules/agentd/log/reader/reader_util.go diff --git a/src/modules/agent/log/reader/reader_util_test.go b/src/modules/agentd/log/reader/reader_util_test.go similarity index 100% rename from src/modules/agent/log/reader/reader_util_test.go rename to src/modules/agentd/log/reader/reader_util_test.go diff --git a/src/modules/agent/log/strategy/strategy.go b/src/modules/agentd/log/strategy/strategy.go similarity index 97% rename from src/modules/agent/log/strategy/strategy.go rename to src/modules/agentd/log/strategy/strategy.go index 1ab6917e..583fc6ea 100644 --- a/src/modules/agent/log/strategy/strategy.go +++ b/src/modules/agentd/log/strategy/strategy.go @@ -3,9 +3,9 @@ package strategy import ( "fmt" - "github.com/toolkits/pkg/logger" + "github.com/didi/nightingale/v4/src/modules/agentd/stra" - "github.com/didi/nightingale/src/modules/agent/stra" + "github.com/toolkits/pkg/logger" ) // 后续开发者切记 : 没有锁,不能修改globalStrategy,更新的时候直接替换,否则会panic @@ -90,5 +90,6 @@ func DeepCopyStrategy(p *stra.Strategy) *stra.Strategy { s.SrvUpdated = p.SrvUpdated s.LocalUpdated = p.LocalUpdated s.WhetherAttachOneLogLine = p.WhetherAttachOneLogLine + return &s } diff --git a/src/modules/agent/log/worker/cached.go b/src/modules/agentd/log/worker/cached.go similarity index 98% rename from src/modules/agent/log/worker/cached.go rename to src/modules/agentd/log/worker/cached.go index 900069e2..a7333808 100644 --- a/src/modules/agent/log/worker/cached.go +++ b/src/modules/agentd/log/worker/cached.go @@ -7,9 +7,9 @@ import ( "sync" "time" - "github.com/toolkits/pkg/logger" + "github.com/didi/nightingale/v4/src/common/dataobj" - "github.com/didi/nightingale/src/common/dataobj" + "github.com/toolkits/pkg/logger" ) // cached时间周期 diff --git a/src/modules/agent/log/worker/control.go b/src/modules/agentd/log/worker/control.go similarity index 94% rename from src/modules/agent/log/worker/control.go rename to src/modules/agentd/log/worker/control.go index 670f13ed..2b63766c 100644 --- a/src/modules/agent/log/worker/control.go +++ b/src/modules/agentd/log/worker/control.go @@ -4,11 +4,11 @@ import ( "sync" "time" - "github.com/toolkits/pkg/logger" + "github.com/didi/nightingale/v4/src/modules/agentd/config" + "github.com/didi/nightingale/v4/src/modules/agentd/log/reader" + "github.com/didi/nightingale/v4/src/modules/agentd/log/strategy" - "github.com/didi/nightingale/src/modules/agent/config" - "github.com/didi/nightingale/src/modules/agent/log/reader" - "github.com/didi/nightingale/src/modules/agent/log/strategy" + "github.com/toolkits/pkg/logger" ) type ConfigInfo struct { diff --git a/src/modules/agent/log/worker/control_test.go b/src/modules/agentd/log/worker/control_test.go similarity index 100% rename from src/modules/agent/log/worker/control_test.go rename to src/modules/agentd/log/worker/control_test.go diff --git a/src/modules/agent/log/worker/counter.go b/src/modules/agentd/log/worker/counter.go similarity index 97% rename from src/modules/agent/log/worker/counter.go rename to src/modules/agentd/log/worker/counter.go index 248c3bee..e7a3157b 100644 --- a/src/modules/agent/log/worker/counter.go +++ b/src/modules/agentd/log/worker/counter.go @@ -10,9 +10,9 @@ import ( "github.com/toolkits/pkg/logger" - "github.com/didi/nightingale/src/common/dataobj" - "github.com/didi/nightingale/src/modules/agent/log/strategy" - "github.com/didi/nightingale/src/modules/agent/stra" + "github.com/didi/nightingale/v4/src/common/dataobj" + "github.com/didi/nightingale/v4/src/modules/agentd/log/strategy" + "github.com/didi/nightingale/v4/src/modules/agentd/stra" ) //从worker往计算部分推的Point diff --git a/src/modules/agent/log/worker/push.go b/src/modules/agentd/log/worker/push.go similarity index 95% rename from src/modules/agent/log/worker/push.go rename to src/modules/agentd/log/worker/push.go index 21ddc42e..dd63c2e0 100644 --- a/src/modules/agent/log/worker/push.go +++ b/src/modules/agentd/log/worker/push.go @@ -6,12 +6,12 @@ import ( "sort" "time" - "github.com/toolkits/pkg/logger" + "github.com/didi/nightingale/v4/src/common/dataobj" + "github.com/didi/nightingale/v4/src/modules/agentd/config" + "github.com/didi/nightingale/v4/src/modules/agentd/core" + "github.com/didi/nightingale/v4/src/modules/agentd/stra" - "github.com/didi/nightingale/src/common/dataobj" - "github.com/didi/nightingale/src/modules/agent/config" - "github.com/didi/nightingale/src/modules/agent/core" - "github.com/didi/nightingale/src/modules/agent/stra" + "github.com/toolkits/pkg/logger" ) var pushQueue chan *dataobj.MetricValue diff --git a/src/modules/agent/log/worker/worker.go b/src/modules/agentd/log/worker/worker.go similarity index 97% rename from src/modules/agent/log/worker/worker.go rename to src/modules/agentd/log/worker/worker.go index 0f36ef0c..f8f503b2 100644 --- a/src/modules/agent/log/worker/worker.go +++ b/src/modules/agentd/log/worker/worker.go @@ -9,11 +9,11 @@ import ( "sync/atomic" "time" - "github.com/toolkits/pkg/logger" + "github.com/didi/nightingale/v4/src/modules/agentd/config" + "github.com/didi/nightingale/v4/src/modules/agentd/log/strategy" + "github.com/didi/nightingale/v4/src/modules/agentd/stra" - "github.com/didi/nightingale/src/modules/agent/config" - "github.com/didi/nightingale/src/modules/agent/log/strategy" - "github.com/didi/nightingale/src/modules/agent/stra" + "github.com/toolkits/pkg/logger" ) type callbackHandler func(int64, int64) @@ -314,6 +314,8 @@ func (w *Worker) producer(line string, strategy *stra.Strategy) (*AnalysPoint, e Tms: tms.Unix(), Tags: tag, } + + // ==1代表要开启带上一条日志 if strategy.WhetherAttachOneLogLine == 1 { logger.Debugf("[strategy:%+v][WhetherAttacheOneLogLine:%+v]", strategy, strategy.WhetherAttachOneLogLine) ret.OneLogLine = line diff --git a/src/modules/agent/log/worker/worker_test.go b/src/modules/agentd/log/worker/worker_test.go similarity index 100% rename from src/modules/agent/log/worker/worker_test.go rename to src/modules/agentd/log/worker/worker_test.go diff --git a/src/modules/agent/report/report.go b/src/modules/agentd/report/report.go similarity index 67% rename from src/modules/agent/report/report.go rename to src/modules/agentd/report/report.go index 47874fe6..bf1e15ca 100644 --- a/src/modules/agent/report/report.go +++ b/src/modules/agentd/report/report.go @@ -1,21 +1,19 @@ package report import ( - "encoding/json" "fmt" - "math/rand" "os" "sort" "time" + "github.com/didi/nightingale/v4/src/common/client" + "github.com/didi/nightingale/v4/src/common/identity" + "github.com/didi/nightingale/v4/src/models" + "github.com/didi/nightingale/v4/src/modules/agentd/config" + "github.com/toolkits/pkg/logger" - "github.com/toolkits/pkg/net/httplib" "github.com/toolkits/pkg/str" "github.com/toolkits/pkg/sys" - - "github.com/didi/nightingale/src/common/address" - "github.com/didi/nightingale/src/common/identity" - "github.com/didi/nightingale/src/modules/agent/config" ) var ( @@ -58,8 +56,8 @@ func GatherBase() error { return nil } -func gatherFields(m map[string]string) (map[string]string, error) { - ret := make(map[string]string) +func gatherFields(m map[string]string) (map[string]interface{}, error) { + ret := make(map[string]interface{}) for k, v := range m { output, err := exec(v) if err != nil { @@ -97,7 +95,7 @@ func report() error { return err } - form := hostRegisterForm{ + form := models.HostRegisterForm{ SN: SN, IP: IP, Ident: Ident, @@ -110,38 +108,31 @@ func report() error { content := form.SN + form.IP + form.Ident + form.Name + form.Cate + form.UniqKey var keys []string for key := range fields { - keys = append(keys, key, fields[key]) + keys = append(keys, key, fields[key].(string)) } sort.Strings(keys) for _, key := range keys { - content += fields[key] + if fields[key] == nil { + continue + } + content += fields[key].(string) } form.Digest = str.MD5(content) - servers := address.GetHTTPAddresses("ams") - for _, i := range rand.Perm(len(servers)) { - url := fmt.Sprintf("http://%s/v1/ams-ce/hosts/register", servers[i]) - - logger.Debugf("report: %+v", form) - - var body errRes - err := httplib.Post(url).JSONBodyQuiet(form).Header("X-Srv-Token", config.Config.Report.Token).SetTimeout(time.Second * 5).ToJSON(&body) - if err != nil { - js, _ := json.Marshal(form) - logger.Errorf("report payload: %s, token: %s", string(js), config.Config.Report.Token) - return fmt.Errorf("curl %s fail: %v", url, err) - } - - if body.Err != "" { - return fmt.Errorf(body.Err) - } + var msg string + err = client.GetCli("server").Call("Server.HostRegister", form, &msg) + if err != nil { + client.CloseCli() + return fmt.Errorf("Server.HostRegister err:%v", err) + } - return nil + if msg != "" { + return fmt.Errorf(msg) } - return fmt.Errorf("all server instance is dead") + return nil } func exec(shell string) (string, error) { diff --git a/src/modules/agent/statsd/aggr_config.go b/src/modules/agentd/statsd/aggr_config.go similarity index 100% rename from src/modules/agent/statsd/aggr_config.go rename to src/modules/agentd/statsd/aggr_config.go diff --git a/src/modules/agent/statsd/aggr_counter.go b/src/modules/agentd/statsd/aggr_counter.go similarity index 100% rename from src/modules/agent/statsd/aggr_counter.go rename to src/modules/agentd/statsd/aggr_counter.go diff --git a/src/modules/agent/statsd/aggr_counter_e.go b/src/modules/agentd/statsd/aggr_counter_e.go similarity index 100% rename from src/modules/agent/statsd/aggr_counter_e.go rename to src/modules/agentd/statsd/aggr_counter_e.go diff --git a/src/modules/agent/statsd/aggr_gauge.go b/src/modules/agentd/statsd/aggr_gauge.go similarity index 100% rename from src/modules/agent/statsd/aggr_gauge.go rename to src/modules/agentd/statsd/aggr_gauge.go diff --git a/src/modules/agent/statsd/aggr_histogram.go b/src/modules/agentd/statsd/aggr_histogram.go similarity index 98% rename from src/modules/agent/statsd/aggr_histogram.go rename to src/modules/agentd/statsd/aggr_histogram.go index fe713603..513ef175 100644 --- a/src/modules/agent/statsd/aggr_histogram.go +++ b/src/modules/agentd/statsd/aggr_histogram.go @@ -6,7 +6,7 @@ import ( "fmt" "strconv" - tdigest "github.com/didi/nightingale/src/toolkits/go-tdigest" + tdigest "github.com/didi/nightingale/v4/src/common/go-tdigest" ) type histogramAggregator struct { diff --git a/src/modules/agent/statsd/aggr_interface.go b/src/modules/agentd/statsd/aggr_interface.go similarity index 100% rename from src/modules/agent/statsd/aggr_interface.go rename to src/modules/agentd/statsd/aggr_interface.go diff --git a/src/modules/agent/statsd/aggr_ratio.go b/src/modules/agentd/statsd/aggr_ratio.go similarity index 100% rename from src/modules/agent/statsd/aggr_ratio.go rename to src/modules/agentd/statsd/aggr_ratio.go diff --git a/src/modules/agent/statsd/aggr_rpc.go b/src/modules/agentd/statsd/aggr_rpc.go similarity index 100% rename from src/modules/agent/statsd/aggr_rpc.go rename to src/modules/agentd/statsd/aggr_rpc.go diff --git a/src/modules/agent/statsd/aggr_rpc_e.go b/src/modules/agentd/statsd/aggr_rpc_e.go similarity index 100% rename from src/modules/agent/statsd/aggr_rpc_e.go rename to src/modules/agentd/statsd/aggr_rpc_e.go diff --git a/src/modules/agent/statsd/clock.go b/src/modules/agentd/statsd/clock.go similarity index 100% rename from src/modules/agent/statsd/clock.go rename to src/modules/agentd/statsd/clock.go diff --git a/src/modules/agent/statsd/statsd.go b/src/modules/agentd/statsd/statsd.go similarity index 100% rename from src/modules/agent/statsd/statsd.go rename to src/modules/agentd/statsd/statsd.go diff --git a/src/modules/agent/statsd/statsd_receiver.go b/src/modules/agentd/statsd/statsd_receiver.go similarity index 95% rename from src/modules/agent/statsd/statsd_receiver.go rename to src/modules/agentd/statsd/statsd_receiver.go index e6fbd191..e98cbee2 100644 --- a/src/modules/agent/statsd/statsd_receiver.go +++ b/src/modules/agentd/statsd/statsd_receiver.go @@ -3,7 +3,7 @@ package statsd import ( "strings" - "github.com/didi/nightingale/src/toolkits/stats" + "github.com/didi/nightingale/v4/src/common/stats" "github.com/toolkits/pkg/logger" ) diff --git a/src/modules/agent/statsd/statsd_reporter.go b/src/modules/agentd/statsd/statsd_reporter.go similarity index 95% rename from src/modules/agent/statsd/statsd_reporter.go rename to src/modules/agentd/statsd/statsd_reporter.go index f0285d0c..4655edf8 100644 --- a/src/modules/agent/statsd/statsd_reporter.go +++ b/src/modules/agentd/statsd/statsd_reporter.go @@ -6,11 +6,11 @@ import ( "sync" "time" - "github.com/didi/nightingale/src/common/dataobj" - "github.com/didi/nightingale/src/modules/agent/config" - "github.com/didi/nightingale/src/modules/agent/core" - "github.com/didi/nightingale/src/toolkits/exit" - "github.com/didi/nightingale/src/toolkits/stats" + "github.com/didi/nightingale/v4/src/common/dataobj" + "github.com/didi/nightingale/v4/src/common/exit" + "github.com/didi/nightingale/v4/src/common/stats" + "github.com/didi/nightingale/v4/src/modules/agentd/config" + "github.com/didi/nightingale/v4/src/modules/agentd/core" "github.com/toolkits/pkg/logger" ) @@ -125,7 +125,6 @@ func (self StatsdReporter) nextTenSeconds(t time.Time) time.Time { func (self StatsdReporter) translateAndSend(state *state, reportTime time.Time, frequency int, prefix string) (cnt int) { - cnt = 0 // 业务上报的点 oldPoints := self.translateToPoints(state, reportTime) @@ -138,6 +137,7 @@ func (self StatsdReporter) translateAndSend(state *state, reportTime time.Time, } self.setLastPoints(oldPoints) + cnt = len(oldPoints) if len(oldPoints) == 0 { return } diff --git a/src/modules/agent/statsd/statsd_state.go b/src/modules/agentd/statsd/statsd_state.go similarity index 99% rename from src/modules/agent/statsd/statsd_state.go rename to src/modules/agentd/statsd/statsd_state.go index 26e35ecd..d38e8933 100644 --- a/src/modules/agent/statsd/statsd_state.go +++ b/src/modules/agentd/statsd/statsd_state.go @@ -5,7 +5,7 @@ import ( "sync" "time" - "github.com/didi/nightingale/src/toolkits/stats" + "github.com/didi/nightingale/v4/src/common/stats" "github.com/toolkits/pkg/logger" ) diff --git a/src/modules/agent/statsd/utils.go b/src/modules/agentd/statsd/utils.go similarity index 100% rename from src/modules/agent/statsd/utils.go rename to src/modules/agentd/statsd/utils.go diff --git a/src/modules/agentd/stra/cron.go b/src/modules/agentd/stra/cron.go new file mode 100644 index 00000000..45e5c99e --- /dev/null +++ b/src/modules/agentd/stra/cron.go @@ -0,0 +1,46 @@ +package stra + +import ( + "encoding/json" + "time" + + "github.com/didi/nightingale/v4/src/common/client" + "github.com/didi/nightingale/v4/src/models" + "github.com/didi/nightingale/v4/src/modules/agentd/config" + + "github.com/toolkits/pkg/logger" +) + +func GetCollects() { + if !config.Config.Stra.Enable { + return + } + + go loopDetect() +} + +func loopDetect() { + for { + detect() + time.Sleep(time.Duration(config.Config.Stra.Interval) * time.Second) + } +} + +func detect() { + var resp string + var c models.Collect + err := client.GetCli("server").Call("Server.GetCollectBy", config.Endpoint, &resp) + if err != nil { + logger.Error("get collects err:", err) + return + } + + err = json.Unmarshal([]byte(resp), &c) + if err != nil { + logger.Error("get collects %s unmarshal err:", resp, err) + return + } + + logger.Debugf("get collect:%+v", c) + Collect.Update(&c) +} diff --git a/src/modules/agent/stra/init.go b/src/modules/agentd/stra/init.go similarity index 59% rename from src/modules/agent/stra/init.go rename to src/modules/agentd/stra/init.go index 5ffd31ee..7f408872 100644 --- a/src/modules/agent/stra/init.go +++ b/src/modules/agentd/stra/init.go @@ -1,6 +1,6 @@ package stra -import "github.com/didi/nightingale/src/models" +import "github.com/didi/nightingale/v4/src/models" var Collect models.Collect diff --git a/src/modules/agent/stra/log.go b/src/modules/agentd/stra/log.go similarity index 98% rename from src/modules/agent/stra/log.go rename to src/modules/agentd/stra/log.go index 138d1793..4816670f 100644 --- a/src/modules/agent/stra/log.go +++ b/src/modules/agentd/stra/log.go @@ -6,11 +6,11 @@ import ( "regexp" "strings" + "github.com/didi/nightingale/v4/src/models" + "github.com/didi/nightingale/v4/src/modules/agentd/config" + "github.com/toolkits/pkg/file" "github.com/toolkits/pkg/logger" - - "github.com/didi/nightingale/src/models" - "github.com/didi/nightingale/src/modules/agent/config" ) type Strategy struct { @@ -135,6 +135,7 @@ func ToStrategy(p *models.LogCollect) *Strategy { s.SrvUpdated = p.LastUpdated.String() s.LocalUpdated = p.LocalUpdated s.WhetherAttachOneLogLine = p.WhetherAttachOneLogLine + return &s } @@ -258,9 +259,6 @@ func GetPatAndTimeFormat(tf string) (string, string) { case "yyyy/mm/dd HH:MM:SS": pat = `(2[0-9]{3})/(0[1-9]|1[012])/([012][0-9]|3[01])\s([01][0-9]|2[0-4])(:[012345][0-9]){2}` timeFormat = "2006/01/02 15:04:05" - case "yyyy/mm/dd - HH:MM:SS": - pat = `(2[0-9]{3})/(0[1-9]|1[012])/([012][0-9]|3[01])\s-\s([01][0-9]|2[0-4])(:[012345][0-9]){2}` - timeFormat = "2006/01/02 - 15:04:05" case "yyyymmdd HH:MM:SS": pat = `(2[0-9]{3})(0[1-9]|1[012])([012][0-9]|3[01])\s([01][0-9]|2[0-4])(:[012345][0-9]){2}` timeFormat = "20060102 15:04:05" @@ -276,6 +274,9 @@ func GetPatAndTimeFormat(tf string) (string, string) { case "mm-dd HH:MM:SS": pat = `(0[1-9]|1[012])-([012][0-9]|3[01])\s([01][0-9]|2[0-4])(:[012345][0-9]){2}` timeFormat = "01-02 15:04:05" + case "yyyy/mm/dd - HH:MM:SS": + pat = `(2[0-9]{3})/(0[1-9]|1[012])/([012][0-9]|3[01])\s-\s([01][0-9]|2[0-4])(:[012345][0-9]){2}` + timeFormat = "2006/01/02 - 15:04:05" default: logger.Errorf("match time pac failed : [timeFormat:%s]", tf) return "", "" diff --git a/src/modules/agent/stra/log_test.go b/src/modules/agentd/stra/log_test.go similarity index 100% rename from src/modules/agent/stra/log_test.go rename to src/modules/agentd/stra/log_test.go diff --git a/src/modules/agent/stra/port.go b/src/modules/agentd/stra/port.go similarity index 92% rename from src/modules/agent/stra/port.go rename to src/modules/agentd/stra/port.go index 160ddcdb..cb82db4d 100644 --- a/src/modules/agent/stra/port.go +++ b/src/modules/agentd/stra/port.go @@ -8,12 +8,12 @@ import ( "strings" "time" + "github.com/didi/nightingale/v4/src/common/str" + "github.com/didi/nightingale/v4/src/models" + "github.com/didi/nightingale/v4/src/modules/agentd/config" + "github.com/toolkits/pkg/file" "github.com/toolkits/pkg/logger" - - "github.com/didi/nightingale/src/models" - "github.com/didi/nightingale/src/modules/agent/config" - "github.com/didi/nightingale/src/toolkits/str" ) func NewPortCollect(port, step int, tags string, modTime time.Time) *models.PortCollect { diff --git a/src/modules/agent/stra/proc.go b/src/modules/agentd/stra/proc.go similarity index 92% rename from src/modules/agent/stra/proc.go rename to src/modules/agentd/stra/proc.go index 4566de8b..cc5b2e69 100644 --- a/src/modules/agent/stra/proc.go +++ b/src/modules/agentd/stra/proc.go @@ -8,12 +8,12 @@ import ( "strings" "time" + "github.com/didi/nightingale/v4/src/common/str" + "github.com/didi/nightingale/v4/src/models" + "github.com/didi/nightingale/v4/src/modules/agentd/config" + "github.com/toolkits/pkg/file" "github.com/toolkits/pkg/logger" - - "github.com/didi/nightingale/src/models" - "github.com/didi/nightingale/src/modules/agent/config" - "github.com/didi/nightingale/src/toolkits/str" ) func NewProcCollect(method, name, tags string, step int, modTime time.Time) *models.ProcCollect { diff --git a/src/modules/agent/sys/config.go b/src/modules/agentd/sys/config.go similarity index 100% rename from src/modules/agent/sys/config.go rename to src/modules/agentd/sys/config.go diff --git a/src/modules/agent/sys/funcs/collector.go b/src/modules/agentd/sys/funcs/collector.go similarity index 58% rename from src/modules/agent/sys/funcs/collector.go rename to src/modules/agentd/sys/funcs/collector.go index 47d53475..1298b747 100644 --- a/src/modules/agent/sys/funcs/collector.go +++ b/src/modules/agentd/sys/funcs/collector.go @@ -1,8 +1,8 @@ package funcs import ( - "github.com/didi/nightingale/src/common/dataobj" - "github.com/didi/nightingale/src/modules/agent/core" + "github.com/didi/nightingale/v4/src/common/dataobj" + "github.com/didi/nightingale/v4/src/modules/agentd/core" ) func CollectorMetrics() []*dataobj.MetricValue { diff --git a/src/modules/agent/sys/funcs/cpustat.go b/src/modules/agentd/sys/funcs/cpustat.go similarity index 98% rename from src/modules/agent/sys/funcs/cpustat.go rename to src/modules/agentd/sys/funcs/cpustat.go index 952f82ca..7719d4c7 100644 --- a/src/modules/agent/sys/funcs/cpustat.go +++ b/src/modules/agentd/sys/funcs/cpustat.go @@ -19,11 +19,11 @@ import ( "sync" "time" + "github.com/didi/nightingale/v4/src/common/dataobj" + "github.com/didi/nightingale/v4/src/modules/agentd/core" + "github.com/toolkits/pkg/logger" "github.com/toolkits/pkg/nux" - - "github.com/didi/nightingale/src/common/dataobj" - "github.com/didi/nightingale/src/modules/agent/core" ) const ( diff --git a/src/modules/agent/sys/funcs/cron.go b/src/modules/agentd/sys/funcs/cron.go similarity index 85% rename from src/modules/agent/sys/funcs/cron.go rename to src/modules/agentd/sys/funcs/cron.go index 3990bfb4..28578ff3 100644 --- a/src/modules/agent/sys/funcs/cron.go +++ b/src/modules/agentd/sys/funcs/cron.go @@ -17,10 +17,10 @@ package funcs import ( "time" - "github.com/didi/nightingale/src/common/dataobj" - "github.com/didi/nightingale/src/modules/agent/config" - "github.com/didi/nightingale/src/modules/agent/core" - "github.com/didi/nightingale/src/modules/agent/sys" + "github.com/didi/nightingale/v4/src/common/dataobj" + "github.com/didi/nightingale/v4/src/modules/agentd/config" + "github.com/didi/nightingale/v4/src/modules/agentd/core" + "github.com/didi/nightingale/v4/src/modules/agentd/sys" ) func Collect() { diff --git a/src/modules/agent/sys/funcs/dfstat.go b/src/modules/agentd/sys/funcs/dfstat.go similarity index 95% rename from src/modules/agent/sys/funcs/dfstat.go rename to src/modules/agentd/sys/funcs/dfstat.go index b764d3a0..f72c476e 100644 --- a/src/modules/agent/sys/funcs/dfstat.go +++ b/src/modules/agentd/sys/funcs/dfstat.go @@ -18,13 +18,13 @@ import ( "fmt" "strings" + "github.com/didi/nightingale/v4/src/common/dataobj" + "github.com/didi/nightingale/v4/src/modules/agentd/core" + "github.com/didi/nightingale/v4/src/modules/agentd/sys" + "github.com/toolkits/pkg/logger" "github.com/toolkits/pkg/nux" "github.com/toolkits/pkg/slice" - - "github.com/didi/nightingale/src/common/dataobj" - "github.com/didi/nightingale/src/modules/agent/core" - "github.com/didi/nightingale/src/modules/agent/sys" ) func DeviceMetrics() []*dataobj.MetricValue { diff --git a/src/modules/agent/sys/funcs/diskstat.go b/src/modules/agentd/sys/funcs/diskstat.go similarity index 97% rename from src/modules/agent/sys/funcs/diskstat.go rename to src/modules/agentd/sys/funcs/diskstat.go index 6a17f47b..2442da49 100644 --- a/src/modules/agent/sys/funcs/diskstat.go +++ b/src/modules/agentd/sys/funcs/diskstat.go @@ -19,11 +19,11 @@ import ( "sync" "time" + "github.com/didi/nightingale/v4/src/common/dataobj" + "github.com/didi/nightingale/v4/src/modules/agentd/core" + "github.com/toolkits/pkg/logger" "github.com/toolkits/pkg/nux" - - "github.com/didi/nightingale/src/common/dataobj" - "github.com/didi/nightingale/src/modules/agent/core" ) var ( diff --git a/src/modules/agent/sys/funcs/fsstat.go b/src/modules/agentd/sys/funcs/fsstat.go similarity index 95% rename from src/modules/agent/sys/funcs/fsstat.go rename to src/modules/agentd/sys/funcs/fsstat.go index 42d335d6..20682684 100644 --- a/src/modules/agent/sys/funcs/fsstat.go +++ b/src/modules/agentd/sys/funcs/fsstat.go @@ -23,13 +23,13 @@ import ( "path/filepath" "time" + "github.com/didi/nightingale/v4/src/common/dataobj" + "github.com/didi/nightingale/v4/src/modules/agentd/core" + "github.com/didi/nightingale/v4/src/modules/agentd/sys" + "github.com/toolkits/pkg/logger" "github.com/toolkits/pkg/nux" "github.com/toolkits/pkg/slice" - - "github.com/didi/nightingale/src/common/dataobj" - "github.com/didi/nightingale/src/modules/agent/core" - "github.com/didi/nightingale/src/modules/agent/sys" ) func FsRWMetrics() []*dataobj.MetricValue { diff --git a/src/modules/agent/sys/funcs/funcs.go b/src/modules/agentd/sys/funcs/funcs.go similarity index 94% rename from src/modules/agent/sys/funcs/funcs.go rename to src/modules/agentd/sys/funcs/funcs.go index 6357cb29..65795867 100644 --- a/src/modules/agent/sys/funcs/funcs.go +++ b/src/modules/agentd/sys/funcs/funcs.go @@ -17,8 +17,8 @@ package funcs import ( "log" - "github.com/didi/nightingale/src/common/dataobj" - "github.com/didi/nightingale/src/modules/agent/sys" + "github.com/didi/nightingale/v4/src/common/dataobj" + "github.com/didi/nightingale/v4/src/modules/agentd/sys" ) type FuncsAndInterval struct { diff --git a/src/modules/agent/sys/funcs/ifstat.go b/src/modules/agentd/sys/funcs/ifstat.go similarity index 96% rename from src/modules/agent/sys/funcs/ifstat.go rename to src/modules/agentd/sys/funcs/ifstat.go index ff35c19b..097178e6 100644 --- a/src/modules/agent/sys/funcs/ifstat.go +++ b/src/modules/agentd/sys/funcs/ifstat.go @@ -19,9 +19,9 @@ import ( "strings" "time" - "github.com/didi/nightingale/src/common/dataobj" - "github.com/didi/nightingale/src/modules/agent/core" - "github.com/didi/nightingale/src/modules/agent/sys" + "github.com/didi/nightingale/v4/src/common/dataobj" + "github.com/didi/nightingale/v4/src/modules/agentd/core" + "github.com/didi/nightingale/v4/src/modules/agentd/sys" "github.com/toolkits/pkg/logger" "github.com/toolkits/pkg/nux" diff --git a/src/modules/agent/sys/funcs/loadavg.go b/src/modules/agentd/sys/funcs/loadavg.go similarity index 90% rename from src/modules/agent/sys/funcs/loadavg.go rename to src/modules/agentd/sys/funcs/loadavg.go index 15752220..d0707e33 100644 --- a/src/modules/agent/sys/funcs/loadavg.go +++ b/src/modules/agentd/sys/funcs/loadavg.go @@ -15,11 +15,11 @@ package funcs import ( + "github.com/didi/nightingale/v4/src/common/dataobj" + "github.com/didi/nightingale/v4/src/modules/agentd/core" + "github.com/toolkits/pkg/logger" "github.com/toolkits/pkg/nux" - - "github.com/didi/nightingale/src/common/dataobj" - "github.com/didi/nightingale/src/modules/agent/core" ) func LoadAvgMetrics() []*dataobj.MetricValue { diff --git a/src/modules/agent/sys/funcs/meminfo.go b/src/modules/agentd/sys/funcs/meminfo.go similarity index 93% rename from src/modules/agent/sys/funcs/meminfo.go rename to src/modules/agentd/sys/funcs/meminfo.go index 674f96a6..dcb42477 100644 --- a/src/modules/agent/sys/funcs/meminfo.go +++ b/src/modules/agentd/sys/funcs/meminfo.go @@ -15,11 +15,11 @@ package funcs import ( + "github.com/didi/nightingale/v4/src/common/dataobj" + "github.com/didi/nightingale/v4/src/modules/agentd/core" + "github.com/toolkits/pkg/logger" "github.com/toolkits/pkg/nux" - - "github.com/didi/nightingale/src/common/dataobj" - "github.com/didi/nightingale/src/modules/agent/core" ) func MemMetrics() []*dataobj.MetricValue { diff --git a/src/modules/agent/sys/funcs/netfilter.go b/src/modules/agentd/sys/funcs/netfilter.go similarity index 90% rename from src/modules/agent/sys/funcs/netfilter.go rename to src/modules/agentd/sys/funcs/netfilter.go index 3f812f10..2500002a 100644 --- a/src/modules/agent/sys/funcs/netfilter.go +++ b/src/modules/agentd/sys/funcs/netfilter.go @@ -1,11 +1,11 @@ package funcs import ( + "github.com/didi/nightingale/v4/src/common/dataobj" + "github.com/didi/nightingale/v4/src/modules/agentd/core" + "github.com/toolkits/pkg/file" "github.com/toolkits/pkg/logger" - - "github.com/didi/nightingale/src/common/dataobj" - "github.com/didi/nightingale/src/modules/agent/core" ) func NfMetrics() []*dataobj.MetricValue { diff --git a/src/modules/agent/sys/funcs/ntp.go b/src/modules/agentd/sys/funcs/ntp.go similarity index 89% rename from src/modules/agent/sys/funcs/ntp.go rename to src/modules/agentd/sys/funcs/ntp.go index 2768d405..402eb6aa 100644 --- a/src/modules/agent/sys/funcs/ntp.go +++ b/src/modules/agentd/sys/funcs/ntp.go @@ -3,9 +3,9 @@ package funcs import ( "time" - "github.com/didi/nightingale/src/common/dataobj" - "github.com/didi/nightingale/src/modules/agent/core" - "github.com/didi/nightingale/src/modules/agent/sys" + "github.com/didi/nightingale/v4/src/common/dataobj" + "github.com/didi/nightingale/v4/src/modules/agentd/core" + "github.com/didi/nightingale/v4/src/modules/agentd/sys" "github.com/toolkits/pkg/logger" "github.com/toolkits/pkg/nux" diff --git a/src/modules/agent/sys/funcs/snmp.go b/src/modules/agentd/sys/funcs/snmp.go similarity index 92% rename from src/modules/agent/sys/funcs/snmp.go rename to src/modules/agentd/sys/funcs/snmp.go index a1751fb7..bede8bb1 100644 --- a/src/modules/agent/sys/funcs/snmp.go +++ b/src/modules/agentd/sys/funcs/snmp.go @@ -15,11 +15,11 @@ package funcs import ( + "github.com/didi/nightingale/v4/src/common/dataobj" + "github.com/didi/nightingale/v4/src/modules/agentd/core" + "github.com/toolkits/pkg/logger" "github.com/toolkits/pkg/nux" - - "github.com/didi/nightingale/src/common/dataobj" - "github.com/didi/nightingale/src/modules/agent/core" ) func UdpMetrics() []*dataobj.MetricValue { diff --git a/src/modules/agent/sys/funcs/sockstas.go b/src/modules/agentd/sys/funcs/sockstas.go similarity index 90% rename from src/modules/agent/sys/funcs/sockstas.go rename to src/modules/agentd/sys/funcs/sockstas.go index 4ea9809c..a8797310 100644 --- a/src/modules/agent/sys/funcs/sockstas.go +++ b/src/modules/agentd/sys/funcs/sockstas.go @@ -15,11 +15,11 @@ package funcs import ( + "github.com/didi/nightingale/v4/src/common/dataobj" + "github.com/didi/nightingale/v4/src/modules/agentd/core" + "github.com/toolkits/pkg/logger" "github.com/toolkits/pkg/nux" - - "github.com/didi/nightingale/src/common/dataobj" - "github.com/didi/nightingale/src/modules/agent/core" ) func SocketStatSummaryMetrics() []*dataobj.MetricValue { diff --git a/src/modules/agent/sys/funcs/sys.go b/src/modules/agentd/sys/funcs/sys.go similarity index 95% rename from src/modules/agent/sys/funcs/sys.go rename to src/modules/agentd/sys/funcs/sys.go index 54c37735..de527d9f 100644 --- a/src/modules/agent/sys/funcs/sys.go +++ b/src/modules/agentd/sys/funcs/sys.go @@ -19,12 +19,12 @@ import ( "strconv" "strings" + "github.com/didi/nightingale/v4/src/common/dataobj" + "github.com/didi/nightingale/v4/src/modules/agentd/core" + "github.com/toolkits/pkg/file" "github.com/toolkits/pkg/logger" "github.com/toolkits/pkg/nux" - - "github.com/didi/nightingale/src/common/dataobj" - "github.com/didi/nightingale/src/modules/agent/core" ) func FsKernelMetrics() []*dataobj.MetricValue { diff --git a/src/modules/agent/sys/plugins/cron.go b/src/modules/agentd/sys/plugins/cron.go similarity index 100% rename from src/modules/agent/sys/plugins/cron.go rename to src/modules/agentd/sys/plugins/cron.go diff --git a/src/modules/agent/sys/plugins/plugin.go b/src/modules/agentd/sys/plugins/plugin.go similarity index 100% rename from src/modules/agent/sys/plugins/plugin.go rename to src/modules/agentd/sys/plugins/plugin.go diff --git a/src/modules/agent/sys/plugins/reader.go b/src/modules/agentd/sys/plugins/reader.go similarity index 95% rename from src/modules/agent/sys/plugins/reader.go rename to src/modules/agentd/sys/plugins/reader.go index db3d288b..aa9515c3 100644 --- a/src/modules/agent/sys/plugins/reader.go +++ b/src/modules/agentd/sys/plugins/reader.go @@ -20,11 +20,11 @@ import ( "strconv" "strings" + "github.com/didi/nightingale/v4/src/modules/agentd/stra" + "github.com/didi/nightingale/v4/src/modules/agentd/sys" + "github.com/toolkits/pkg/file" "github.com/toolkits/pkg/logger" - - "github.com/didi/nightingale/src/modules/agent/stra" - "github.com/didi/nightingale/src/modules/agent/sys" ) // key: 60_ntp.py diff --git a/src/modules/agent/sys/plugins/scheduler.go b/src/modules/agentd/sys/plugins/scheduler.go similarity index 96% rename from src/modules/agent/sys/plugins/scheduler.go rename to src/modules/agentd/sys/plugins/scheduler.go index 23fb3336..5bc0e3bd 100644 --- a/src/modules/agent/sys/plugins/scheduler.go +++ b/src/modules/agentd/sys/plugins/scheduler.go @@ -23,12 +23,12 @@ import ( "strings" "time" + "github.com/didi/nightingale/v4/src/common/dataobj" + "github.com/didi/nightingale/v4/src/modules/agentd/core" + "github.com/toolkits/pkg/file" "github.com/toolkits/pkg/logger" "github.com/toolkits/pkg/sys" - - "github.com/didi/nightingale/src/common/dataobj" - "github.com/didi/nightingale/src/modules/agent/core" ) type PluginScheduler struct { diff --git a/src/modules/agent/sys/ports/cron.go b/src/modules/agentd/sys/ports/cron.go similarity index 81% rename from src/modules/agent/sys/ports/cron.go rename to src/modules/agentd/sys/ports/cron.go index d811661c..1869a752 100644 --- a/src/modules/agent/sys/ports/cron.go +++ b/src/modules/agentd/sys/ports/cron.go @@ -3,7 +3,7 @@ package ports import ( "time" - "github.com/didi/nightingale/src/modules/agent/stra" + "github.com/didi/nightingale/v4/src/modules/agentd/stra" ) func Detect() { diff --git a/src/modules/agent/sys/ports/port.go b/src/modules/agentd/sys/ports/port.go similarity index 94% rename from src/modules/agent/sys/ports/port.go rename to src/modules/agentd/sys/ports/port.go index 807a004b..4025fe31 100644 --- a/src/modules/agent/sys/ports/port.go +++ b/src/modules/agentd/sys/ports/port.go @@ -1,7 +1,7 @@ package ports import ( - "github.com/didi/nightingale/src/models" + "github.com/didi/nightingale/v4/src/models" ) var ( diff --git a/src/modules/agent/sys/ports/scheduler.go b/src/modules/agentd/sys/ports/scheduler.go similarity index 86% rename from src/modules/agent/sys/ports/scheduler.go rename to src/modules/agentd/sys/ports/scheduler.go index 5a5c363c..28df000c 100644 --- a/src/modules/agent/sys/ports/scheduler.go +++ b/src/modules/agentd/sys/ports/scheduler.go @@ -7,11 +7,11 @@ import ( "github.com/toolkits/pkg/logger" - "github.com/didi/nightingale/src/common/dataobj" - "github.com/didi/nightingale/src/models" - "github.com/didi/nightingale/src/modules/agent/config" - "github.com/didi/nightingale/src/modules/agent/core" - "github.com/didi/nightingale/src/modules/agent/report" + "github.com/didi/nightingale/v4/src/common/dataobj" + "github.com/didi/nightingale/v4/src/models" + "github.com/didi/nightingale/v4/src/modules/agentd/config" + "github.com/didi/nightingale/v4/src/modules/agentd/core" + "github.com/didi/nightingale/v4/src/modules/agentd/report" ) type PortScheduler struct { diff --git a/src/modules/agent/sys/procs/cron.go b/src/modules/agentd/sys/procs/cron.go similarity index 81% rename from src/modules/agent/sys/procs/cron.go rename to src/modules/agentd/sys/procs/cron.go index ae4320d0..317ab44f 100644 --- a/src/modules/agent/sys/procs/cron.go +++ b/src/modules/agentd/sys/procs/cron.go @@ -3,7 +3,7 @@ package procs import ( "time" - "github.com/didi/nightingale/src/modules/agent/stra" + "github.com/didi/nightingale/v4/src/modules/agentd/stra" ) func Detect() { diff --git a/src/modules/agent/sys/procs/proc.go b/src/modules/agentd/sys/procs/proc.go similarity index 95% rename from src/modules/agent/sys/procs/proc.go rename to src/modules/agentd/sys/procs/proc.go index c645d4d9..3cb8b2e3 100644 --- a/src/modules/agent/sys/procs/proc.go +++ b/src/modules/agentd/sys/procs/proc.go @@ -1,7 +1,7 @@ package procs import ( - "github.com/didi/nightingale/src/models" + "github.com/didi/nightingale/v4/src/models" ) var ( diff --git a/src/modules/agent/sys/procs/scheduler.go b/src/modules/agentd/sys/procs/scheduler.go similarity index 93% rename from src/modules/agent/sys/procs/scheduler.go rename to src/modules/agentd/sys/procs/scheduler.go index ff1a4e42..09961d6b 100644 --- a/src/modules/agent/sys/procs/scheduler.go +++ b/src/modules/agentd/sys/procs/scheduler.go @@ -4,10 +4,10 @@ import ( "strings" "time" - "github.com/didi/nightingale/src/common/dataobj" - "github.com/didi/nightingale/src/models" - "github.com/didi/nightingale/src/modules/agent/config" - "github.com/didi/nightingale/src/modules/agent/core" + "github.com/didi/nightingale/v4/src/common/dataobj" + "github.com/didi/nightingale/v4/src/models" + "github.com/didi/nightingale/v4/src/modules/agentd/config" + "github.com/didi/nightingale/v4/src/modules/agentd/core" "github.com/toolkits/pkg/logger" "github.com/toolkits/pkg/nux" diff --git a/src/modules/agent/sys/procs/sys.go b/src/modules/agentd/sys/procs/sys.go similarity index 100% rename from src/modules/agent/sys/procs/sys.go rename to src/modules/agentd/sys/procs/sys.go diff --git a/src/modules/agent/timer/heartbeat.go b/src/modules/agentd/timer/heartbeat.go similarity index 81% rename from src/modules/agent/timer/heartbeat.go rename to src/modules/agentd/timer/heartbeat.go index 866794c6..ab004460 100644 --- a/src/modules/agent/timer/heartbeat.go +++ b/src/modules/agentd/timer/heartbeat.go @@ -4,11 +4,11 @@ import ( "math/rand" "time" - "github.com/toolkits/pkg/logger" + "github.com/didi/nightingale/v4/src/common/client" + "github.com/didi/nightingale/v4/src/common/dataobj" + "github.com/didi/nightingale/v4/src/modules/agentd/config" - "github.com/didi/nightingale/src/common/dataobj" - "github.com/didi/nightingale/src/modules/agent/client" - "github.com/didi/nightingale/src/modules/agent/config" + "github.com/toolkits/pkg/logger" ) func Heartbeat() { @@ -33,7 +33,7 @@ func heartbeat() { } var resp dataobj.ReportResponse - err := client.GetCli().Call("Scheduler.Report", req, &resp) + err := client.GetCli("server").Call("Server.Report", req, &resp) if err != nil { logger.Error("rpc call Scheduler.Report fail:", err) client.CloseCli() diff --git a/src/modules/agent/timer/killer.go b/src/modules/agentd/timer/killer.go similarity index 83% rename from src/modules/agent/timer/killer.go rename to src/modules/agentd/timer/killer.go index 53a0fd3c..a7402b31 100644 --- a/src/modules/agent/timer/killer.go +++ b/src/modules/agentd/timer/killer.go @@ -4,9 +4,9 @@ import ( "fmt" "strings" - "github.com/toolkits/pkg/sys" + "github.com/didi/nightingale/v4/src/modules/agentd/config" - "github.com/didi/nightingale/src/modules/agent/config" + "github.com/toolkits/pkg/sys" ) func KillProcessByTaskID(id int64) error { diff --git a/src/modules/agent/timer/task.go b/src/modules/agentd/timer/task.go similarity index 97% rename from src/modules/agent/timer/task.go rename to src/modules/agentd/timer/task.go index b6a43b72..6d33b868 100644 --- a/src/modules/agent/timer/task.go +++ b/src/modules/agentd/timer/task.go @@ -9,12 +9,12 @@ import ( "strings" "sync" + "github.com/didi/nightingale/v4/src/modules/agentd/config" + "github.com/didi/nightingale/v4/src/modules/agentd/core" + "github.com/toolkits/pkg/file" "github.com/toolkits/pkg/logger" "github.com/toolkits/pkg/sys" - - "github.com/didi/nightingale/src/modules/agent/client" - "github.com/didi/nightingale/src/modules/agent/config" ) type Task struct { @@ -146,7 +146,7 @@ func (t *Task) prepare() error { t.Account = account } else { // 从远端读取,再写入磁盘 - script, args, account, err := client.Meta(t.Id) + script, args, account, err := core.Meta(t.Id) if err != nil { log.Println("[E] query task meta fail:", err) return err diff --git a/src/modules/agent/timer/tasks.go b/src/modules/agentd/timer/tasks.go similarity index 97% rename from src/modules/agent/timer/tasks.go rename to src/modules/agentd/timer/tasks.go index 29b90463..0aeed6a5 100644 --- a/src/modules/agent/timer/tasks.go +++ b/src/modules/agentd/timer/tasks.go @@ -1,9 +1,9 @@ package timer import ( - "github.com/toolkits/pkg/logger" + "github.com/didi/nightingale/v4/src/common/dataobj" - "github.com/didi/nightingale/src/common/dataobj" + "github.com/toolkits/pkg/logger" ) type LocalTasksT struct { diff --git a/src/modules/agent/udp/handler.go b/src/modules/agentd/udp/handler.go similarity index 88% rename from src/modules/agent/udp/handler.go rename to src/modules/agentd/udp/handler.go index e0b22ecc..46b91453 100644 --- a/src/modules/agent/udp/handler.go +++ b/src/modules/agentd/udp/handler.go @@ -3,8 +3,8 @@ package udp import ( "sync" - "github.com/didi/nightingale/src/modules/agent/statsd" - "github.com/didi/nightingale/src/toolkits/exit" + "github.com/didi/nightingale/v4/src/common/exit" + "github.com/didi/nightingale/v4/src/modules/agentd/statsd" "github.com/toolkits/pkg/logger" ) diff --git a/src/modules/agent/udp/udp.go b/src/modules/agentd/udp/udp.go similarity index 92% rename from src/modules/agent/udp/udp.go rename to src/modules/agentd/udp/udp.go index 22ef0cbe..e54c1355 100644 --- a/src/modules/agent/udp/udp.go +++ b/src/modules/agentd/udp/udp.go @@ -5,7 +5,7 @@ import ( "log" "net" - "github.com/didi/nightingale/src/modules/agent/config" + "github.com/didi/nightingale/v4/src/modules/agentd/config" ) var ( diff --git a/src/modules/ams/ams.go b/src/modules/ams/ams.go deleted file mode 100644 index 523b8937..00000000 --- a/src/modules/ams/ams.go +++ /dev/null @@ -1,86 +0,0 @@ -package main - -import ( - "flag" - "fmt" - "os" - "os/signal" - "syscall" - - _ "github.com/go-sql-driver/mysql" - - "github.com/toolkits/pkg/logger" - "github.com/toolkits/pkg/runner" - - "github.com/didi/nightingale/src/common/loggeri" - "github.com/didi/nightingale/src/models" - "github.com/didi/nightingale/src/modules/ams/config" - "github.com/didi/nightingale/src/modules/ams/http" - "github.com/didi/nightingale/src/toolkits/i18n" -) - -var ( - vers *bool - help *bool - conf *string - - version = "No Version Provided" -) - -func init() { - vers = flag.Bool("v", false, "display the version.") - help = flag.Bool("h", false, "print this help.") - conf = flag.String("f", "", "specify configuration file.") - flag.Parse() - - if *vers { - fmt.Println("Version:", version) - os.Exit(0) - } - - if *help { - flag.Usage() - os.Exit(0) - } - - i18n.Init() - - runner.Init() - fmt.Println("runner.cwd:", runner.Cwd) - fmt.Println("runner.hostname:", runner.Hostname) -} - -func main() { - parseConf() - - loggeri.Init(config.Config.Logger) - - // 初始化数据库和相关数据 - models.InitMySQL("rdb", "ams") - - i18n.Init(config.Config.I18n) - - http.Start() - - endingProc() -} - -func parseConf() { - if err := config.Parse(); err != nil { - fmt.Println("cannot parse configuration file:", err) - os.Exit(1) - } -} - -func endingProc() { - c := make(chan os.Signal, 1) - signal.Notify(c, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT) - select { - case <-c: - fmt.Printf("stop signal caught, stopping... pid=%d\n", os.Getpid()) - } - - logger.Close() - http.Shutdown() - fmt.Println("process stopped successfully") -} diff --git a/src/modules/ams/config/yaml.go b/src/modules/ams/config/yaml.go deleted file mode 100644 index b994c9ce..00000000 --- a/src/modules/ams/config/yaml.go +++ /dev/null @@ -1,66 +0,0 @@ -package config - -import ( - "fmt" - - "github.com/didi/nightingale/src/common/loggeri" - "github.com/didi/nightingale/src/toolkits/i18n" - - "github.com/toolkits/pkg/file" -) - -type ConfigT struct { - Logger loggeri.Config `yaml:"logger"` - HTTP httpSection `yaml:"http"` - Tokens []string `yaml:"tokens"` - I18n i18n.I18nSection `yaml:"i18n"` -} - -type httpSection struct { - Mode string `yaml:"mode"` - CookieName string `yaml:"cookieName"` - CookieDomain string `yaml:"cookieDomain"` -} - -var Config *ConfigT - -// Parse configuration file -func Parse() error { - ymlFile := getYmlFile() - if ymlFile == "" { - return fmt.Errorf("configuration file not found") - } - - var c ConfigT - err := file.ReadYaml(ymlFile, &c) - if err != nil { - return fmt.Errorf("cannot read yml[%s]: %v", ymlFile, err) - } - - Config = &c - if Config.I18n.DictPath == "" { - Config.I18n.DictPath = "etc/dict.json" - } - - if Config.I18n.Lang == "" { - Config.I18n.Lang = "zh" - } - - fmt.Println("config.file:", ymlFile) - - return nil -} - -func getYmlFile() string { - yml := "etc/ams.local.yml" - if file.IsExist(yml) { - return yml - } - - yml = "etc/ams.yml" - if file.IsExist(yml) { - return yml - } - - return "" -} diff --git a/src/modules/ams/http/http_middleware.go b/src/modules/ams/http/http_middleware.go deleted file mode 100644 index 3dc178eb..00000000 --- a/src/modules/ams/http/http_middleware.go +++ /dev/null @@ -1,97 +0,0 @@ -package http - -import ( - "github.com/gin-gonic/gin" - "github.com/toolkits/pkg/logger" - "github.com/toolkits/pkg/slice" - - "github.com/didi/nightingale/src/models" - "github.com/didi/nightingale/src/modules/ams/config" -) - -func shouldBeLogin() gin.HandlerFunc { - return func(c *gin.Context) { - c.Set("username", mustUsername(c)) - c.Next() - } -} - -func shouldBeRoot() gin.HandlerFunc { - return func(c *gin.Context) { - username := mustUsername(c) - - user, err := models.UserGet("username=?", username) - dangerous(err) - - if user.IsRoot != 1 { - bomb("forbidden") - } - - c.Set("username", username) - c.Set("user", user) - c.Next() - } -} - -func shouldBeService() gin.HandlerFunc { - return func(c *gin.Context) { - token := c.GetHeader("X-Srv-Token") - if token == "" { - bomb("X-Srv-Token is blank") - } - if !slice.ContainsString(config.Config.Tokens, token) { - bomb("X-Srv-Token[%s] invalid", token) - } - c.Next() - } -} - -func mustUsername(c *gin.Context) string { - username := sessionUsername(c) - if username == "" { - username = headerUsername(c) - } - - if username == "" { - bomb("unauthorized") - } - - return username -} - -func sessionUsername(c *gin.Context) string { - sess, err := models.SessionGetWithCache(readSessionId(c)) - if err != nil { - return "" - } - return sess.Username -} - -func headerUsername(c *gin.Context) string { - token := c.GetHeader("X-User-Token") - if token == "" { - return "" - } - - ut, err := models.UserTokenGet("token=?", token) - if err != nil { - logger.Warningf("UserTokenGet[%s] fail: %v", token, err) - return "" - } - - if ut == nil { - return "" - } - - return ut.Username -} - -// ------------ - -func readSessionId(c *gin.Context) string { - sid, err := c.Cookie(config.Config.HTTP.CookieName) - if err != nil { - return "" - } - return sid -} diff --git a/src/modules/ams/http/http_server.go b/src/modules/ams/http/http_server.go deleted file mode 100644 index 2fcf9a4b..00000000 --- a/src/modules/ams/http/http_server.go +++ /dev/null @@ -1,70 +0,0 @@ -package http - -import ( - "context" - "fmt" - "net/http" - "os" - "strings" - "time" - - "github.com/gin-gonic/gin" - - "github.com/didi/nightingale/src/common/address" - "github.com/didi/nightingale/src/common/middleware" - "github.com/didi/nightingale/src/modules/ams/config" -) - -var srv = &http.Server{ - ReadTimeout: 10 * time.Second, - WriteTimeout: 10 * time.Second, - MaxHeaderBytes: 1 << 20, -} - -var skipPaths = []string{} - -func Start() { - c := config.Config - - loggerMid := middleware.LoggerWithConfig(middleware.LoggerConfig{SkipPaths: skipPaths}) - recoveryMid := middleware.Recovery() - - if strings.ToLower(c.HTTP.Mode) == "release" { - gin.SetMode(gin.ReleaseMode) - middleware.DisableConsoleColor() - } - - r := gin.New() - r.Use(loggerMid, recoveryMid) - - Config(r) - - srv.Addr = address.GetHTTPListen("ams") - srv.Handler = r - - go func() { - fmt.Println("http.listening:", srv.Addr) - if err := srv.ListenAndServe(); err != nil && err != http.ErrServerClosed { - fmt.Printf("listening %s occur error: %s\n", srv.Addr, err) - os.Exit(3) - } - }() -} - -// Shutdown http server -func Shutdown() { - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - if err := srv.Shutdown(ctx); err != nil { - fmt.Println("cannot shutdown http server:", err) - os.Exit(2) - } - - // catching ctx.Done(). timeout of 5 seconds. - select { - case <-ctx.Done(): - fmt.Println("shutdown http server timeout of 5 seconds.") - default: - fmt.Println("http server stopped") - } -} diff --git a/src/modules/ams/http/router.go b/src/modules/ams/http/router.go deleted file mode 100644 index d176e6ba..00000000 --- a/src/modules/ams/http/router.go +++ /dev/null @@ -1,36 +0,0 @@ -package http - -import "github.com/gin-gonic/gin" - -func Config(r *gin.Engine) { - notLogin := r.Group("/api/ams-ce") - { - notLogin.GET("/ping", ping) - } - - userLogin := r.Group("/api/ams-ce").Use(shouldBeLogin()) - { - userLogin.GET("/hosts", hostGets) - userLogin.POST("/hosts", hostPost) - userLogin.GET("/host/:id", hostGet) - userLogin.PUT("/hosts/tenant", hostTenantPut) - userLogin.PUT("/hosts/node", hostNodePut) - userLogin.PUT("/hosts/back", hostBackPut) - userLogin.PUT("/hosts/note", hostNotePut) - userLogin.PUT("/hosts/cate", hostCatePut) - userLogin.DELETE("/hosts", hostDel) - userLogin.GET("/hosts/search", hostSearchGets) - userLogin.POST("/hosts/fields", hostFieldNew) - userLogin.GET("/hosts/fields", hostFieldsGets) - userLogin.GET("/hosts/field/:id", hostFieldGet) - userLogin.PUT("/hosts/field/:id", hostFieldPut) - userLogin.DELETE("/hosts/field/:id", hostFieldDel) - userLogin.GET("/host/:id/fields", hostFieldGets) - userLogin.PUT("/host/:id/fields", hostFieldPuts) - } - - v1 := r.Group("/v1/ams-ce").Use(shouldBeService()) - { - v1.POST("/hosts/register", v1HostRegister) - } -} diff --git a/src/modules/ams/http/router_funcs.go b/src/modules/ams/http/router_funcs.go deleted file mode 100644 index 6d4dc8d3..00000000 --- a/src/modules/ams/http/router_funcs.go +++ /dev/null @@ -1,226 +0,0 @@ -package http - -import ( - "strconv" - - "github.com/gin-gonic/gin" - "github.com/toolkits/pkg/errors" - - "github.com/didi/nightingale/src/models" - "github.com/didi/nightingale/src/toolkits/i18n" -) - -func dangerous(v interface{}) { - errors.Dangerous(v) -} - -func bomb(format string, a ...interface{}) { - errors.Bomb(i18n.Sprintf(format, a...)) -} - -func bind(c *gin.Context, ptr interface{}) { - dangerous(c.ShouldBindJSON(ptr)) -} - -func urlParamStr(c *gin.Context, field string) string { - val := c.Param(field) - - if val == "" { - bomb("url param[%s] is blank", field) - } - - return val -} - -func urlParamInt64(c *gin.Context, field string) int64 { - strval := urlParamStr(c, field) - intval, err := strconv.ParseInt(strval, 10, 64) - if err != nil { - bomb("cannot convert %s to int64", strval) - } - - return intval -} - -func urlParamInt(c *gin.Context, field string) int { - return int(urlParamInt64(c, field)) -} - -func queryStr(c *gin.Context, key string, defaultVal ...string) string { - val := c.Query(key) - if val != "" { - return val - } - - if len(defaultVal) == 0 { - bomb("query param[%s] is necessary", key) - } - - return defaultVal[0] -} - -func queryInt(c *gin.Context, key string, defaultVal ...int) int { - strv := c.Query(key) - if strv != "" { - intv, err := strconv.Atoi(strv) - if err != nil { - bomb("cannot convert [%s] to int", strv) - } - return intv - } - - if len(defaultVal) == 0 { - bomb("query param[%s] is necessary", key) - } - - return defaultVal[0] -} - -func queryInt64(c *gin.Context, key string, defaultVal ...int64) int64 { - strv := c.Query(key) - if strv != "" { - intv, err := strconv.ParseInt(strv, 10, 64) - if err != nil { - bomb("cannot convert [%s] to int64", strv) - } - return intv - } - - if len(defaultVal) == 0 { - bomb("query param[%s] is necessary", key) - } - - return defaultVal[0] -} - -func offset(c *gin.Context, limit int) int { - if limit <= 0 { - limit = 10 - } - - page := queryInt(c, "p", 1) - return (page - 1) * limit -} - -func renderMessage(c *gin.Context, v interface{}) { - if v == nil { - c.JSON(200, gin.H{"err": ""}) - return - } - - switch t := v.(type) { - case string: - c.JSON(200, gin.H{"err": t}) - case error: - c.JSON(200, gin.H{"err": t.Error()}) - } -} - -func renderData(c *gin.Context, data interface{}, err error) { - if err == nil { - c.JSON(200, gin.H{"dat": data, "err": ""}) - return - } - - renderMessage(c, err.Error()) -} - -func renderZeroPage(c *gin.Context) { - renderData(c, gin.H{ - "list": []int{}, - "total": 0, - }, nil) -} - -// ------------ - -type idsForm struct { - Ids []int64 `json:"ids"` -} - -// ------------ - -func loginUsername(c *gin.Context) string { - value, has := c.Get("username") - if !has { - bomb("unauthorized") - } - - if value == nil { - bomb("unauthorized") - } - - return value.(string) -} - -func loginUser(c *gin.Context) *models.User { - username := loginUsername(c) - - user, err := models.UserGet("username=?", username) - dangerous(err) - - if user == nil { - bomb("unauthorized") - } - - return user -} - -func loginRoot(c *gin.Context) *models.User { - value, has := c.Get("user") - if !has { - bomb("unauthorized") - } - - return value.(*models.User) -} - -func User(id int64) *models.User { - user, err := models.UserGet("id=?", id) - if err != nil { - bomb("cannot retrieve user[%d]: %v", id, err) - } - - if user == nil { - bomb("no such user[%d]", id) - } - - return user -} - -func Team(id int64) *models.Team { - team, err := models.TeamGet("id=?", id) - if err != nil { - bomb("cannot retrieve team[%d]: %v", id, err) - } - - if team == nil { - bomb("no such team[%d]", id) - } - - return team -} - -func Role(id int64) *models.Role { - role, err := models.RoleGet("id=?", id) - if err != nil { - bomb("cannot retrieve role[%d]: %v", id, err) - } - - if role == nil { - bomb("no such role[%d]", id) - } - - return role -} - -func Node(id int64) *models.Node { - node, err := models.NodeGet("id=?", id) - dangerous(err) - - if node == nil { - bomb("no such node[id:%d]", id) - } - - return node -} diff --git a/src/modules/ams/http/router_health.go b/src/modules/ams/http/router_health.go deleted file mode 100644 index 4b6a7523..00000000 --- a/src/modules/ams/http/router_health.go +++ /dev/null @@ -1,7 +0,0 @@ -package http - -import "github.com/gin-gonic/gin" - -func ping(c *gin.Context) { - c.String(200, "pong") -} diff --git a/src/modules/index/cache/counter_map.go b/src/modules/index/cache/counter_map.go deleted file mode 100644 index 926183c2..00000000 --- a/src/modules/index/cache/counter_map.go +++ /dev/null @@ -1,54 +0,0 @@ -package cache - -import ( - "sync" - - "github.com/didi/nightingale/src/toolkits/stats" - - "github.com/toolkits/pkg/logger" -) - -// Counter: sorted tags -type CounterTsMap struct { - sync.RWMutex - M map[string]int64 `json:"counters"` // map[counter]ts -} - -func NewCounterTsMap() *CounterTsMap { - return &CounterTsMap{M: make(map[string]int64)} -} - -func (c *CounterTsMap) Set(counter string, ts int64) { - c.Lock() - defer c.Unlock() - c.M[counter] = ts -} - -func (c *CounterTsMap) Clean(now, timeDuration int64, endpoint, metric string) { - c.Lock() - defer c.Unlock() - for counter, ts := range c.M { - if now-ts > timeDuration { - delete(c.M, counter) - stats.Counter.Set("counter.clean", 1) - - logger.Debugf("clean endpoint index:%s metric:%s counter:%s", endpoint, metric, counter) - } - } -} - -func (c *CounterTsMap) GetCounters() map[string]int64 { - c.RLock() - defer c.RUnlock() - m := make(map[string]int64) - for k, v := range c.M { - m[k] = v - } - return m -} - -func (c *CounterTsMap) Len() int { - c.RLock() - defer c.RUnlock() - return len(c.M) -} diff --git a/src/modules/index/cache/endpoint_map.go b/src/modules/index/cache/endpoint_map.go deleted file mode 100644 index 845c6668..00000000 --- a/src/modules/index/cache/endpoint_map.go +++ /dev/null @@ -1,150 +0,0 @@ -package cache - -import ( - "fmt" - "sync" - "time" - - "github.com/didi/nightingale/src/common/dataobj" - "github.com/didi/nightingale/src/toolkits/stats" - - "github.com/toolkits/pkg/logger" -) - -type EndpointIndexMap struct { - sync.RWMutex - M map[string]*MetricIndexMap `json:"endpoint_index"` //map[endpoint]metricMap{map[metric]Index} -} - -// Push 索引数据 -func (e *EndpointIndexMap) Push(item dataobj.IndexModel, now int64) { - tags := dataobj.SortedTags(item.Tags) - metric := item.Metric - var key string - if item.Nid != "" { - key = item.Nid - } else { - key = item.Endpoint - } - - // 先判断 endpoint 是否已经被记录,不存在则直接初始化 - metricIndexMap, exists := e.GetMetricIndexMap(key) - if !exists { - metricIndexMap = &MetricIndexMap{Data: make(map[string]*MetricIndex)} - metricIndexMap.SetMetricIndex(metric, NewMetricIndex(item, tags, now)) - e.SetMetricIndexMap(key, metricIndexMap) - - if item.Nid == "" { - NewEndpoints.PushFront(key) //必须在 metricIndexMap 成功之后再 push - } - return - } - - // 再判断该 endpoint 下的具体某个 metric 是否存在 - metricIndex, exists := metricIndexMap.GetMetricIndex(metric) - if !exists { - metricIndexMap.SetMetricIndex(metric, NewMetricIndex(item, tags, now)) - return - } - metricIndexMap.Lock() - metricIndex.Set(item, tags, now) - metricIndexMap.Unlock() -} - -func (e *EndpointIndexMap) Clean(timeDuration int64) { - endpoints := e.GetEndpoints() - now := time.Now().Unix() - for _, endpoint := range endpoints { - metricIndexMap, exists := e.GetMetricIndexMap(endpoint) - if !exists { - continue - } - - metricIndexMap.Clean(now, timeDuration, endpoint) - if metricIndexMap.Len() < 1 { - e.Lock() - delete(e.M, endpoint) - stats.Counter.Set("endpoint.clean", 1) - e.Unlock() - logger.Debug("clean index endpoint:", endpoint) - } - } -} - -func (e *EndpointIndexMap) GetMetricIndex(endpoint, metric string) (*MetricIndex, bool) { - e.RLock() - defer e.RUnlock() - - metricIndexMap, exists := e.M[endpoint] - if !exists { - return nil, false - } - return metricIndexMap.GetMetricIndex(metric) -} - -func (e *EndpointIndexMap) GetMetricIndexMap(endpoint string) (*MetricIndexMap, bool) { - e.RLock() - defer e.RUnlock() - - metricIndexMap, exists := e.M[endpoint] - return metricIndexMap, exists -} - -func (e *EndpointIndexMap) SetMetricIndexMap(endpoint string, metricIndex *MetricIndexMap) { - e.Lock() - defer e.Unlock() - - e.M[endpoint] = metricIndex -} - -func (e *EndpointIndexMap) GetMetricsBy(endpoint string) []string { - e.RLock() - defer e.RUnlock() - - if _, exists := e.M[endpoint]; !exists { - return []string{} - } - return e.M[endpoint].GetMetrics() -} - -func (e *EndpointIndexMap) GetIndexByClude(endpoint, metric string, include, exclude []*TagPair) ([]string, error) { - metricIndex, exists := e.GetMetricIndex(endpoint, metric) - if !exists { - return []string{}, nil - } - - tagkvs := metricIndex.TagkvMap.GetTagkvMap() - tags := getMatchedTags(tagkvs, include, exclude) - // 部分 tagk 的 tagv 全部被 exclude 或者 完全没有匹配的 - if len(tags) != len(tagkvs) || len(tags) == 0 { - return []string{}, nil - } - - if OverMaxLimit(tags, Config.MaxQueryCount) { - err := fmt.Errorf("xclude fullmatch get too much counters, endpoint:%s metric:%s, "+ - "include:%v, exclude:%v\n", endpoint, metric, include, exclude) - return []string{}, err - } - - return GetAllCounter(GetSortTags(tags)), nil -} - -func (e *EndpointIndexMap) GetEndpoints() []string { - e.RLock() - defer e.RUnlock() - - ret := make([]string, len(e.M)) - i := 0 - for endpoint := range e.M { - ret[i] = endpoint - i++ - } - return ret -} - -func (e *EndpointIndexMap) DelByEndpoint(endpoint string) { - e.Lock() - defer e.Unlock() - - delete(e.M, endpoint) -} diff --git a/src/modules/index/cache/indexdb.go b/src/modules/index/cache/indexdb.go deleted file mode 100644 index 83845517..00000000 --- a/src/modules/index/cache/indexdb.go +++ /dev/null @@ -1,346 +0,0 @@ -package cache - -import ( - "encoding/json" - "fmt" - "io" - "io/ioutil" - "math/rand" - "net/http" - "os" - "time" - - "github.com/didi/nightingale/src/common/identity" - "github.com/didi/nightingale/src/common/report" - "github.com/didi/nightingale/src/models" - "github.com/didi/nightingale/src/toolkits/compress" - "github.com/didi/nightingale/src/toolkits/stats" - - "github.com/toolkits/pkg/concurrent/semaphore" - "github.com/toolkits/pkg/container/list" - "github.com/toolkits/pkg/file" - "github.com/toolkits/pkg/logger" -) - -type CacheSection struct { - CacheDuration int `yaml:"cacheDuration"` - CleanInterval int `yaml:"cleanInterval"` - PersistInterval int `yaml:"persistInterval"` - PersistDir string `yaml:"persistDir"` - RebuildWorker int `yaml:"rebuildWorker"` - MaxQueryCount int `yaml:"maxQueryCount"` - ReportEndpoint bool `yaml:"reportEndpoint"` - HbsMod string `yaml:"hbsMod"` -} - -var IndexDB *EndpointIndexMap -var NidIndexDB *EndpointIndexMap -var Config CacheSection -var NewEndpoints *list.SafeListLimited - -var semaPermanence = semaphore.NewSemaphore(1) - -func InitDB(cfg CacheSection) { - Config = cfg - - IndexDB = &EndpointIndexMap{M: make(map[string]*MetricIndexMap)} - NidIndexDB = &EndpointIndexMap{M: make(map[string]*MetricIndexMap)} - NewEndpoints = list.NewSafeListLimited(100000) - - Rebuild(Config.PersistDir, Config.RebuildWorker) - - go StartCleaner(Config.CleanInterval, Config.CacheDuration) - go StartPersist(Config.PersistInterval) - -} - -func StartCleaner(interval int, cacheDuration int) { - ticker := time.NewTicker(time.Duration(interval) * time.Second) - for { - <-ticker.C - - start := time.Now() - IndexDB.Clean(int64(cacheDuration)) - NidIndexDB.Clean(int64(cacheDuration)) - logger.Infof("clean took %.2f ms\n", float64(time.Since(start).Nanoseconds())*1e-6) - } -} - -func StartPersist(interval int) { - ticker := time.NewTicker(time.Duration(interval) * time.Second) - for { - <-ticker.C - - if err := Persist("normal"); err != nil { - logger.Errorf("persist error:%+v", err) - stats.Counter.Set("persist.err", 1) - } - } -} - -func Rebuild(persistenceDir string, concurrency int) { - var dbDir string - indexList := IndexList() - if len(indexList) > 0 { - err := getIndexFromRemote(indexList) - if err == nil { - dbDir = fmt.Sprintf("%s/%s", persistenceDir, "download") - } - } - - // dbDir 为空说明从远端下载索引失败,从本地读取 - if dbDir == "" { - logger.Debug("rebuild index from local disk") - dbDir = fmt.Sprintf("%s/%s", persistenceDir, "db") - } - - endpointDir := dbDir + "/endpoint" - nidDir := dbDir + "/nid" - - if err := RebuildFromDisk(IndexDB, endpointDir, concurrency); err != nil { - logger.Warningf("rebuild index from local disk error:%+v", err) - } - - if err := RebuildFromDisk(NidIndexDB, nidDir, concurrency); err != nil { - logger.Warningf("rebuild index from local disk error:%+v", err) - } -} - -func RebuildFromDisk(indexDB *EndpointIndexMap, indexFileDir string, concurrency int) error { - logger.Info("Try to rebuild index from disk") - if !file.IsExist(indexFileDir) { - return fmt.Errorf("index persistence dir [%s] don't exist", indexFileDir) - } - - // 遍历目录 - files, err := ioutil.ReadDir(indexFileDir) - if err != nil { - return err - } - logger.Infof("There're [%d] endpoints need to rebuild", len(files)) - - sema := semaphore.NewSemaphore(concurrency) - for _, fileObj := range files { - // 只处理文件 - if fileObj.IsDir() { - continue - } - endpoint := fileObj.Name() - - sema.Acquire() - go func(endpoint string) { - defer sema.Release() - - metricIndexMap, err := ReadIndexFromFile(indexFileDir, endpoint) - if err != nil { - logger.Errorf("read file error, [endpoint:%s][reason:%v]", endpoint, err) - return - } - // 没有标记上报过的 endpoint 需要重新上报给 monapi - if !metricIndexMap.IsReported() { - NewEndpoints.PushFront(endpoint) - } - - indexDB.Lock() - indexDB.M[endpoint] = metricIndexMap - indexDB.Unlock() - }(endpoint) - - } - logger.Infof("rebuild from disk done") - return nil -} - -func Persist(mode string) error { - indexFileDir := Config.PersistDir - - switch mode { - case "end": - semaPermanence.Acquire() - defer semaPermanence.Release() - case "normal", "download": - if !semaPermanence.TryAcquire() { - return fmt.Errorf("permanence operate is already running") - } - defer semaPermanence.Release() - default: - return fmt.Errorf("wrong mode:%s", mode) - } - - var tmpDir string - if mode == "download" { - tmpDir = fmt.Sprintf("%s/%s", indexFileDir, "download") - } else { - tmpDir = fmt.Sprintf("%s/%s", indexFileDir, "tmp") - } - - // write endpoint index data to disk - endpointDir := tmpDir + "/endpoint" - if err := os.RemoveAll(endpointDir); err != nil { - return err - } - // create tmp directory - if err := os.MkdirAll(endpointDir, 0777); err != nil { - return err - } - - endpoints := IndexDB.GetEndpoints() - epLength := len(endpoints) - logger.Infof("save index data to disk[num:%d][mode:%s]\n", epLength, mode) - - for i, endpoint := range endpoints { - logger.Infof("sync [%s] to disk, [%d%%] complete\n", endpoint, int((float64(i)/float64(epLength))*100)) - - if err := WriteIndexToFile("endpoint", endpointDir, endpoint); err != nil { - logger.Errorf("write %s index to file err:%v\n", endpoint, err) - } - } - - nidDir := tmpDir + "/nid" - if err := os.RemoveAll(nidDir); err != nil { - return err - } - // create tmp directory - if err := os.MkdirAll(nidDir, 0777); err != nil { - return err - } - - nids := NidIndexDB.GetEndpoints() - nidLength := len(nids) - logger.Infof("save index data to disk[num:%d][mode:%s]\n", nidLength, mode) - - for i, nid := range nids { - logger.Infof("sync [%s] to disk, [%d%%] complete\n", nid, int((float64(i)/float64(nidLength))*100)) - - if err := WriteIndexToFile("nid", nidDir, nid); err != nil { - logger.Errorf("write %s index to file err:%v\n", nid, err) - } - } - - logger.Info("finish syncing index data") - - if mode == "download" { - idxPath := fmt.Sprintf("%s/%s", indexFileDir, "db.tar.gz") - if err := compress.TarGz(idxPath, tmpDir); err != nil { - return err - } - } - - // clean up the discard directory - oleIndexDir := fmt.Sprintf("%s/%s", indexFileDir, "db") - if err := os.RemoveAll(oleIndexDir); err != nil { - return err - } - - // rename directory - if err := os.Rename(tmpDir, oleIndexDir); err != nil { - return err - } - - return nil -} - -func WriteIndexToFile(mod, indexDir, endpoint string) error { - var metricIndexMap *MetricIndexMap - var exists bool - if mod == "endpoint" { - metricIndexMap, exists = IndexDB.GetMetricIndexMap(endpoint) - if !exists || metricIndexMap == nil { - return fmt.Errorf("endpoint index doesn't found") - } - } else { - metricIndexMap, exists = NidIndexDB.GetMetricIndexMap(endpoint) - if !exists || metricIndexMap == nil { - return fmt.Errorf("endpoint index doesn't found") - } - } - - metricIndexMap.RLock() - body, err := json.Marshal(metricIndexMap) - stats.Counter.Set("write.file", 1) - metricIndexMap.RUnlock() - - if err != nil { - return fmt.Errorf("marshal struct to json failed:%v", err) - } - - err = ioutil.WriteFile(fmt.Sprintf("%s/%s", indexDir, endpoint), body, 0666) - return err -} - -func ReadIndexFromFile(indexDir, endpoint string) (*MetricIndexMap, error) { - metricIndexMap := new(MetricIndexMap) - - body, err := ioutil.ReadFile(fmt.Sprintf("%s/%s", indexDir, endpoint)) - if err != nil { - return metricIndexMap, err - } - - err = json.Unmarshal(body, metricIndexMap) - return metricIndexMap, err -} - -func IndexList() []*models.Instance { - activeIndexes, err := report.GetAlive("index", Config.HbsMod) - if err != nil { - return []*models.Instance{} - } - - var instances []*models.Instance - for _, instance := range activeIndexes { - ident, _ := identity.GetIdent() - if instance.Identity != ident { - instances = append(instances, instance) - } - } - return instances -} - -func getIndexFromRemote(instances []*models.Instance) error { - filepath := "db.tar.gz" - request := func(idx int) error { - url := fmt.Sprintf("http://%s:%s/api/index/idxfile", instances[idx].Identity, instances[idx].HTTPPort) - resp, err := http.Get(url) - if err != nil { - logger.Warningf("get index from:%s err:%v", url, err) - return err - } - defer resp.Body.Close() - - // Create the file - out, err := os.Create(filepath) - if err != nil { - logger.Warningf("create file:%s err:%v", filepath, err) - return err - } - defer out.Close() - - // Write the body to file - _, err = io.Copy(out, resp.Body) - if err != nil { - logger.Warningf("io.Copy error:%+v", err) - return err - } - return nil - } - - perm := rand.Perm(len(instances)) - var err error - // retry - for i := range perm { - err = request(perm[i]) - if err == nil { - break - } - } - - if err != nil { - return err - } - - if err := compress.UnTarGz(filepath, "."); err != nil { - return err - } - - return os.Remove(filepath) -} diff --git a/src/modules/index/cache/metric_map.go b/src/modules/index/cache/metric_map.go deleted file mode 100644 index 114f0e10..00000000 --- a/src/modules/index/cache/metric_map.go +++ /dev/null @@ -1,128 +0,0 @@ -package cache - -import ( - "sync" - - "github.com/didi/nightingale/src/common/dataobj" - "github.com/didi/nightingale/src/toolkits/stats" -) - -type MetricIndex struct { - sync.RWMutex - Metric string `json:"metric"` - Step int `json:"step"` - DsType string `json:"dstype"` - TagkvMap *TagkvIndex `json:"tags"` - CounterMap *CounterTsMap `json:"counters"` - Ts int64 `json:"ts"` -} - -func NewMetricIndex(item dataobj.IndexModel, counter string, now int64) *MetricIndex { - metricIndex := &MetricIndex{ - Metric: item.Metric, - Step: item.Step, - DsType: item.DsType, - TagkvMap: NewTagkvIndex(), - CounterMap: NewCounterTsMap(), - Ts: now, - } - - for k, v := range item.Tags { - metricIndex.TagkvMap.Set(k, v, now) - } - - metricIndex.CounterMap.Set(counter, now) - - return metricIndex -} - -func (m *MetricIndex) Set(item dataobj.IndexModel, counter string, now int64) { - m.Lock() - defer m.Unlock() - - m.Step = item.Step - m.DsType = item.DsType - m.Ts = now - - for k, v := range item.Tags { - m.TagkvMap.Set(k, v, now) - } - - m.CounterMap.Set(counter, now) -} - -type MetricIndexMap struct { - sync.RWMutex - Reported bool // 用于判断 endpoint 是否已成功上报给 monapi - Data map[string]*MetricIndex -} - -func (m *MetricIndexMap) Clean(now, timeDuration int64, endpoint string) { - m.Lock() - defer m.Unlock() - - for metric, metricIndex := range m.Data { - // 删除过期 tagkv - if now-metricIndex.Ts > timeDuration { - stats.Counter.Set("metric.clean", 1) - delete(m.Data, metric) - continue - } - metricIndex.TagkvMap.Clean(now, timeDuration) - metricIndex.CounterMap.Clean(now, timeDuration, endpoint, metric) - } -} - -func (m *MetricIndexMap) DelMetric(metric string) { - m.Lock() - defer m.Unlock() - - delete(m.Data, metric) -} - -func (m *MetricIndexMap) Len() int { - m.RLock() - defer m.RUnlock() - - return len(m.Data) -} - -func (m *MetricIndexMap) GetMetricIndex(metric string) (*MetricIndex, bool) { - m.RLock() - defer m.RUnlock() - - metricIndex, exists := m.Data[metric] - return metricIndex, exists -} - -func (m *MetricIndexMap) SetMetricIndex(metric string, metricIndex *MetricIndex) { - m.Lock() - defer m.Unlock() - - m.Data[metric] = metricIndex -} - -func (m *MetricIndexMap) GetMetrics() []string { - m.RLock() - defer m.RUnlock() - - var metrics []string - for k := range m.Data { - metrics = append(metrics, k) - } - return metrics -} - -func (m *MetricIndexMap) SetReported() { - m.Lock() - defer m.Unlock() - - m.Reported = true -} - -func (m *MetricIndexMap) IsReported() bool { - m.RLock() - defer m.RUnlock() - - return m.Reported -} diff --git a/src/modules/index/cache/tag.go b/src/modules/index/cache/tag.go deleted file mode 100644 index a9d7206b..00000000 --- a/src/modules/index/cache/tag.go +++ /dev/null @@ -1,153 +0,0 @@ -package cache - -import ( - "sort" -) - -type TagPair struct { - Key string `json:"tagk"` // json 和变量不一致为了兼容前端 - Values []string `json:"tagv"` -} - -type TagPairs []*TagPair - -func (t TagPairs) Len() int { - return len(t) -} - -func (t TagPairs) Less(i, j int) bool { - return t[i].Key > t[j].Key -} - -func (t TagPairs) Swap(i, j int) { - t[i], t[j] = t[j], t[i] -} - -func getMatchedTags(tagsMap map[string][]string, include, exclude []*TagPair) map[string][]string { - inMap := make(map[string]map[string]bool) - exMap := make(map[string]map[string]bool) - - if len(include) > 0 { - for _, tagPair := range include { - // include 中的 tagKey 在 tags 列表中不存在 - if _, exists := tagsMap[tagPair.Key]; !exists { - return nil - } - // tagKey 存在,初始化 map - if _, found := inMap[tagPair.Key]; !found { - inMap[tagPair.Key] = make(map[string]bool) - } - // 对存在的值进行标记 - for _, tagv := range tagPair.Values { - inMap[tagPair.Key][tagv] = true - } - } - } - - if len(exclude) > 0 { - for _, tagPair := range exclude { - if _, found := exMap[tagPair.Key]; !found { - exMap[tagPair.Key] = make(map[string]bool) - } - for _, tagv := range tagPair.Values { - exMap[tagPair.Key][tagv] = true - } - } - } - - fullMatch := make(map[string][]string) - for tagk, tagvs := range tagsMap { - for _, tagv := range tagvs { - // 排除必须排除的, exclude 的优先级高于 include - if _, tagkExists := exMap[tagk]; tagkExists { - if _, tagvExists := exMap[tagk][tagv]; tagvExists { - continue - } - } - // 包含必须包含的 - if _, tagkExists := inMap[tagk]; tagkExists { - if _, tagvExists := inMap[tagk][tagv]; tagvExists { - if _, found := fullMatch[tagk]; !found { - fullMatch[tagk] = make([]string, 0) - } - fullMatch[tagk] = append(fullMatch[tagk], tagv) - } - continue - } - // 除此之外全都包含 - if _, found := fullMatch[tagk]; !found { - fullMatch[tagk] = make([]string, 0) - } - fullMatch[tagk] = append(fullMatch[tagk], tagv) - } - } - - return fullMatch -} - -// GetAllCounter returns all possible tags combination. -// But not all of them will be in the CounterMaps. -func GetAllCounter(tags []*TagPair) []string { - if len(tags) == 0 { - return []string{} - } - - head := tags[0] - firstList := make([]string, len(head.Values)) - - for i, v := range head.Values { - firstList[i] = head.Key + "=" + v - } - - otherList := GetAllCounter(tags[1:]) - if len(otherList) == 0 { - return firstList - } - - rest := make([]string, len(otherList)*len(firstList)) - i := 0 - for _, firstV := range firstList { - for _, otherV := range otherList { - rest[i] = firstV + "," + otherV - i++ - } - } - - return rest -} - -// OverMaxLimit check whether it can over limit or not. -func OverMaxLimit(tagMap map[string][]string, limit int) bool { - multiRes := 1 - - for _, values := range tagMap { - multiRes = multiRes * len(values) - if multiRes > limit { - return true - } - } - - return false -} - -func TagPairToMap(tagPairs []*TagPair) map[string][]string { - tagMap := make(map[string][]string) - for _, tagPair := range tagPairs { - tagMap[tagPair.Key] = tagPair.Values - } - return tagMap -} - -func GetSortTags(tagMap map[string][]string) []*TagPair { - var keys []string - for key := range tagMap { - keys = append(keys, key) - } - sort.Strings(keys) - - newTags := make([]*TagPair, len(keys)) - for i, key := range keys { - newTags[i] = &TagPair{Key: key, Values: tagMap[key]} - } - return newTags -} diff --git a/src/modules/index/cache/tag_map.go b/src/modules/index/cache/tag_map.go deleted file mode 100644 index a57ff80c..00000000 --- a/src/modules/index/cache/tag_map.go +++ /dev/null @@ -1,99 +0,0 @@ -package cache - -import ( - "sync" -) - -// TagKeys -type TagkvIndex struct { - sync.RWMutex - Tagkv map[string]map[string]int64 `json:"tagkv"` // map[tagk]map[tagv]ts -} - -func NewTagkvIndex() *TagkvIndex { - return &TagkvIndex{ - Tagkv: make(map[string]map[string]int64), - } -} - -func (t *TagkvIndex) Set(tagk, tagv string, now int64) { - t.Lock() - defer t.Unlock() - - if _, exists := t.Tagkv[tagk]; !exists { - t.Tagkv[tagk] = make(map[string]int64) - } - t.Tagkv[tagk][tagv] = now -} - -func (t *TagkvIndex) GetTagkv() []*TagPair { - t.RLock() - defer t.RUnlock() - - var tagkvs []*TagPair - for k, vm := range t.Tagkv { - var vs []string - for v := range vm { - vs = append(vs, v) - } - tagkv := TagPair{ - Key: k, - Values: vs, - } - tagkvs = append(tagkvs, &tagkv) - } - - return tagkvs -} - -func (t *TagkvIndex) GetTagkvMap() map[string][]string { - t.RLock() - defer t.RUnlock() - - tagkvs := make(map[string][]string) - for k, vm := range t.Tagkv { - var vs []string - for v := range vm { - vs = append(vs, v) - } - tagkvs[k] = vs - } - - return tagkvs -} - -func (t *TagkvIndex) Clean(now, timeDuration int64) { - t.Lock() - defer t.Unlock() - - for k, vm := range t.Tagkv { - for v, ts := range vm { - if now-ts > timeDuration { - delete(t.Tagkv[k], v) - } - } - if len(t.Tagkv[k]) == 0 { - delete(t.Tagkv, k) - } - } -} - -func (t *TagkvIndex) DelTag(tagk, tagv string) { - t.Lock() - defer t.Unlock() - - if _, exists := t.Tagkv[tagk]; exists { - delete(t.Tagkv[tagk], tagv) - } - - if len(t.Tagkv[tagk]) == 0 { - delete(t.Tagkv, tagk) - } -} - -func (t *TagkvIndex) Len() int { - t.RLock() - defer t.RUnlock() - - return len(t.Tagkv) -} diff --git a/src/modules/index/config/config.go b/src/modules/index/config/config.go deleted file mode 100644 index 89a97d80..00000000 --- a/src/modules/index/config/config.go +++ /dev/null @@ -1,85 +0,0 @@ -package config - -import ( - "bytes" - "fmt" - "strconv" - - "github.com/didi/nightingale/src/common/address" - "github.com/didi/nightingale/src/common/identity" - "github.com/didi/nightingale/src/common/loggeri" - "github.com/didi/nightingale/src/common/report" - "github.com/didi/nightingale/src/modules/index/cache" - - "github.com/spf13/viper" - "github.com/toolkits/pkg/file" -) - -type ConfYaml struct { - HTTP HTTPSection `yaml:"http"` - RPC RPCSection `yaml:"rpc"` - Cache cache.CacheSection `yaml:"cache"` - Logger loggeri.Config `yaml:"logger"` - Identity identity.Identity `yaml:"identity"` - Report report.ReportSection `yaml:"report"` -} - -type HTTPSection struct { - Enabled bool `yaml:"enabled"` -} - -type RPCSection struct { - Enabled bool `yaml:"enabled"` -} - -var ( - Config *ConfYaml -) - -func GetCfgYml() *ConfYaml { - return Config -} - -func Parse(conf string) error { - bs, err := file.ReadBytes(conf) - if err != nil { - return fmt.Errorf("cannot read yml[%s]: %v", conf, err) - } - - viper.SetConfigType("yaml") - err = viper.ReadConfig(bytes.NewBuffer(bs)) - if err != nil { - return fmt.Errorf("cannot read yml[%s]: %v", conf, err) - } - - viper.SetDefault("http.enabled", true) - viper.SetDefault("rpc.enabled", true) - - viper.SetDefault("cache.cacheDuration", 90000) - viper.SetDefault("cache.cleanInterval", 3600) //清理周期,单位秒 - viper.SetDefault("cache.persistInterval", 900) //数据落盘周期,单位秒 - viper.SetDefault("cache.persistDir", "./.index") //索引落盘目录 - viper.SetDefault("cache.rebuildWorker", 20) //从磁盘读取所以的数据的并发个数 - viper.SetDefault("cache.maxQueryCount", 100000) //clude接口支持查询的最大曲线个数 - viper.SetDefault("cache.reportEndpoint", true) - viper.SetDefault("cache.hbsMod", "rdb") - - viper.SetDefault("report", map[string]interface{}{ - "mod": "index", - "enabled": true, - "interval": 4000, - "timeout": 3000, - "api": "api/hbs/heartbeat", - "remark": "", - }) - - err = viper.Unmarshal(&Config) - if err != nil { - return fmt.Errorf("unmarshal %v", err) - } - - Config.Report.HTTPPort = strconv.Itoa(address.GetHTTPPort("index")) - Config.Report.RPCPort = strconv.Itoa(address.GetRPCPort("index")) - - return err -} diff --git a/src/modules/index/http/routes/health_router.go b/src/modules/index/http/routes/health_router.go deleted file mode 100644 index ff123f7f..00000000 --- a/src/modules/index/http/routes/health_router.go +++ /dev/null @@ -1,51 +0,0 @@ -package routes - -import ( - "fmt" - "os" - - "github.com/didi/nightingale/src/modules/index/cache" - "github.com/didi/nightingale/src/toolkits/http/render" - - "github.com/gin-gonic/gin" -) - -func ping(c *gin.Context) { - c.String(200, "pong") -} - -func addr(c *gin.Context) { - c.String(200, c.Request.RemoteAddr) -} - -func pid(c *gin.Context) { - c.String(200, fmt.Sprintf("%d", os.Getpid())) -} - -func indexTotal(c *gin.Context) { - var total int - total += getIndexDBCount(cache.IndexDB) - total += getIndexDBCount(cache.NidIndexDB) - render.Data(c, total, nil) -} - -func getIndexDBCount(indexDB *cache.EndpointIndexMap) int { - endpoints := indexDB.GetEndpoints() - var total int - for _, endpoint := range endpoints { - metricIndexMap, exists := cache.IndexDB.GetMetricIndexMap(endpoint) - if !exists || metricIndexMap == nil { - continue - } - - metrics := metricIndexMap.GetMetrics() - for _, metric := range metrics { - metricIndex, exists := metricIndexMap.GetMetricIndex(metric) - if !exists || metricIndex == nil { - continue - } - total += metricIndex.CounterMap.Len() - } - } - return total -} diff --git a/src/modules/index/http/routes/index_router.go b/src/modules/index/http/routes/index_router.go deleted file mode 100644 index 21410677..00000000 --- a/src/modules/index/http/routes/index_router.go +++ /dev/null @@ -1,624 +0,0 @@ -package routes - -import ( - "fmt" - "strings" - - "github.com/didi/nightingale/src/modules/index/cache" - "github.com/didi/nightingale/src/toolkits/http/render" - "github.com/didi/nightingale/src/toolkits/stats" - - "github.com/gin-gonic/gin" - "github.com/toolkits/pkg/errors" - "github.com/toolkits/pkg/logger" -) - -type EndpointsRecv struct { - Endpoints []string `json:"endpoints"` - Nids []string `json:"nids"` -} - -type MetricList struct { - Metrics []string `json:"metrics"` -} - -func GetMetrics(c *gin.Context) { - stats.Counter.Set("metric.qp10s", 1) - recv := EndpointsRecv{} - errors.Dangerous(c.ShouldBindJSON(&recv)) - - m := make(map[string]struct{}) - resp := MetricList{} - - if len(recv.Nids) > 0 { - for _, nid := range recv.Nids { - metrics := cache.NidIndexDB.GetMetricsBy(nid) - for _, metric := range metrics { - if _, exists := m[metric]; !exists { - m[metric] = struct{}{} - resp.Metrics = append(resp.Metrics, metric) - } - } - } - } else { - for _, endpoint := range recv.Endpoints { - metrics := cache.IndexDB.GetMetricsBy(endpoint) - for _, metric := range metrics { - if _, exists := m[metric]; !exists { - m[metric] = struct{}{} - resp.Metrics = append(resp.Metrics, metric) - } - } - } - } - - render.Data(c, resp, nil) -} - -type EndpointRecv struct { - Endpoints []string `json:"endpoints"` - Nids []string `json:"nids"` -} - -func DelIdxByEndpoint(c *gin.Context) { - recv := EndpointRecv{} - errors.Dangerous(c.ShouldBindJSON(&recv)) - if len(recv.Nids) > 0 { - for _, nid := range recv.Nids { - cache.NidIndexDB.DelByEndpoint(nid) - } - } else { - for _, endpoint := range recv.Endpoints { - cache.IndexDB.DelByEndpoint(endpoint) - } - } - - render.Data(c, "ok", nil) -} - -type EndpointMetricRecv struct { - Endpoints []string `json:"endpoints"` - Nids []string `json:"nids"` - Metrics []string `json:"metrics"` -} - -func DelMetrics(c *gin.Context) { - recv := EndpointMetricRecv{} - errors.Dangerous(c.ShouldBindJSON(&recv)) - - if len(recv.Nids) > 0 { - for _, nid := range recv.Nids { - if metricIndexMap, exists := cache.NidIndexDB.GetMetricIndexMap(nid); exists { - for _, metric := range recv.Metrics { - metricIndexMap.DelMetric(metric) - } - } - } - } else { - for _, endpoint := range recv.Endpoints { - if metricIndexMap, exists := cache.IndexDB.GetMetricIndexMap(endpoint); exists { - for _, metric := range recv.Metrics { - metricIndexMap.DelMetric(metric) - } - } - } - } - - render.Data(c, "ok", nil) -} - -type IndexTagkvResp struct { - Endpoints []string `json:"endpoints"` - Nids []string `json:"nids"` - Metric string `json:"metric"` - Tagkv []*cache.TagPair `json:"tagkv"` -} - -func DelCounter(c *gin.Context) { - recv := IndexTagkvResp{} - errors.Dangerous(c.ShouldBindJSON(&recv)) - - if len(recv.Nids) > 0 { - for _, nid := range recv.Nids { - metricIndex, exists := cache.NidIndexDB.GetMetricIndex(nid, recv.Metric) - if !exists { - continue - } - - for _, tagPair := range recv.Tagkv { - for _, v := range tagPair.Values { - metricIndex.Lock() - metricIndex.TagkvMap.DelTag(tagPair.Key, v) - metricIndex.Unlock() - } - } - } - } else { - for _, endpoint := range recv.Endpoints { - metricIndex, exists := cache.IndexDB.GetMetricIndex(endpoint, recv.Metric) - if !exists { - continue - } - - for _, tagPair := range recv.Tagkv { - for _, v := range tagPair.Values { - metricIndex.Lock() - metricIndex.TagkvMap.DelTag(tagPair.Key, v) - metricIndex.Unlock() - } - } - } - } - - render.Data(c, "ok", nil) -} - -func GetTagPairs(c *gin.Context) { - stats.Counter.Set("tag.qp10s", 1) - recv := EndpointMetricRecv{} - errors.Dangerous(c.ShouldBindJSON(&recv)) - - var keys []string - var indexDB *cache.EndpointIndexMap - - if len(recv.Nids) > 0 { - indexDB = cache.NidIndexDB - for _, nid := range recv.Nids { - keys = append(keys, nid) - } - } else { - indexDB = cache.IndexDB - for _, endpoint := range recv.Endpoints { - keys = append(keys, endpoint) - } - } - - resp := make([]*IndexTagkvResp, 0) - for _, metric := range recv.Metrics { - tagkvFilter := make(map[string]map[string]struct{}) - tagkvs := make([]*cache.TagPair, 0) - - for _, key := range keys { - metricIndex, exists := indexDB.GetMetricIndex(key, metric) - if !exists { - logger.Debugf("index not found by %s %s", key, metric) - stats.Counter.Set("query.tag.miss", 1) - continue - } - - metricIndex.RLock() - tagkvMap := metricIndex.TagkvMap.GetTagkvMap() - metricIndex.RUnlock() - - for tagk, tagvs := range tagkvMap { - tagvFilter, exists := tagkvFilter[tagk] - if !exists { - tagvFilter = make(map[string]struct{}) - } - - for _, tagv := range tagvs { - if _, exists := tagvFilter[tagv]; !exists { - tagvFilter[tagv] = struct{}{} - } - } - - tagkvFilter[tagk] = tagvFilter - } - } - - for tagk, tagvFilter := range tagkvFilter { - var tagvs []string - for v := range tagvFilter { - tagvs = append(tagvs, v) - } - tagkv := &cache.TagPair{ - Key: tagk, - Values: tagvs, - } - tagkvs = append(tagkvs, tagkv) - } - - TagkvResp := IndexTagkvResp{ - Endpoints: recv.Endpoints, - Nids: recv.Nids, - Metric: metric, - Tagkv: tagkvs, - } - resp = append(resp, &TagkvResp) - } - render.Data(c, resp, nil) -} - -type GetIndexByFullTagsRecv struct { - Nids []string `json:"nids"` - Endpoints []string `json:"endpoints"` - Metric string `json:"metric"` - Tagkv []*cache.TagPair `json:"tagkv"` -} - -type GetIndexByFullTagsResp struct { - Endpoints []string `json:"endpoints"` - Nids []string `json:"nids"` - Metric string `json:"metric"` - Tags []string `json:"tags"` - Step int `json:"step"` - DsType string `json:"dstype"` -} - -type FullmathResp struct { - List []GetIndexByFullTagsResp `json:"list"` - Count int `json:"count"` -} - -func GetIndexByFullTags(c *gin.Context) { - stats.Counter.Set("counter.qp10s", 1) - recv := make([]GetIndexByFullTagsRecv, 0) - errors.Dangerous(c.ShouldBindJSON(&recv)) - - tagFilter := make(map[string]struct{}) - tagsList := make([]string, 0) - counterCount := 0 - var endpoints, nids []string - var mod string - var resp FullmathResp - var resp2 []GetIndexByFullTagsResp - - if strings.Contains(c.GetHeader("Referer"), "ccp/admin") { - for _, r := range recv { - var keys []string - var indexDB *cache.EndpointIndexMap - if len(r.Nids) > 0 { - mod = "nid" - indexDB = cache.NidIndexDB - for _, nid := range r.Nids { - keys = append(keys, nid) - } - } else { - mod = "endpoint" - indexDB = cache.IndexDB - for _, endpoint := range r.Endpoints { - keys = append(keys, endpoint) - } - } - - metric := r.Metric - tagkv := r.Tagkv - step := 0 - dsType := "" - - for _, key := range keys { - if key == "" { - logger.Debugf("invalid request: lack of key param:%v\n", r) - stats.Counter.Set("query.counter.miss", 1) - continue - } - if metric == "" { - logger.Debugf("invalid request: lack of metric param:%v\n", r) - stats.Counter.Set("query.counter.miss", 1) - continue - } - - metricIndex, exists := indexDB.GetMetricIndex(key, metric) - if !exists { - logger.Debugf("can't found index by key:%s metric:%v\n", key, metric) - stats.Counter.Set("query.counter.miss", 1) - continue - } - if mod == "nid" { - nids = append(nids, key) - } else { - endpoints = append(endpoints, key) - } - - metricIndex.RLock() - if step == 0 || dsType == "" { - step = metricIndex.Step - dsType = metricIndex.DsType - } - - countersMap := metricIndex.CounterMap.GetCounters() - metricIndex.RUnlock() - - tagPairs := cache.GetSortTags(cache.TagPairToMap(tagkv)) - tags := cache.GetAllCounter(tagPairs) - - if len(tags) == 0 { - counterCount++ - } - - for _, tag := range tags { - // 校验和 tag 有关的 counter 是否存在 - // 如果一个指标,比如 port.listen 有 name=uic,port=8056 和 name=hsp,port=8002。避免产生 4 个曲线 - if _, exists := countersMap[tag]; !exists { - stats.Counter.Set("query.counter.miss", 1) - logger.Debugf("can't found counters by key:%s metric:%v tags:%v\n", key, metric, tag) - continue - } - - counterCount++ - if _, exists := tagFilter[tag]; !exists { - tagsList = append(tagsList, tag) - tagFilter[tag] = struct{}{} - } - } - } - - resp2 = append(resp2, GetIndexByFullTagsResp{ - Endpoints: endpoints, - Nids: nids, - Metric: r.Metric, - Tags: tagsList, - Step: step, - DsType: dsType, - }) - - render.Data(c, resp2, nil) - return - } - } - - for _, r := range recv { - var keys []string - var indexDB *cache.EndpointIndexMap - if len(r.Nids) > 0 { - mod = "nid" - indexDB = cache.NidIndexDB - for _, nid := range r.Nids { - keys = append(keys, nid) - } - } else { - mod = "endpoint" - indexDB = cache.IndexDB - for _, endpoint := range r.Endpoints { - keys = append(keys, endpoint) - } - } - - metric := r.Metric - tagkv := r.Tagkv - step := 0 - dsType := "" - - for _, key := range keys { - if key == "" { - logger.Debugf("invalid request: lack of key param:%v\n", r) - stats.Counter.Set("query.counter.miss", 1) - continue - } - if metric == "" { - logger.Debugf("invalid request: lack of metric param:%v\n", r) - stats.Counter.Set("query.counter.miss", 1) - continue - } - - metricIndex, exists := indexDB.GetMetricIndex(key, metric) - if !exists { - logger.Debugf("can't found index by key:%s metric:%v\n", key, metric) - stats.Counter.Set("query.counter.miss", 1) - continue - } - if mod == "nid" { - nids = append(nids, key) - } else { - endpoints = append(endpoints, key) - } - - metricIndex.RLock() - if step == 0 || dsType == "" { - step = metricIndex.Step - dsType = metricIndex.DsType - } - - countersMap := metricIndex.CounterMap.GetCounters() - metricIndex.RUnlock() - - tagPairs := cache.GetSortTags(cache.TagPairToMap(tagkv)) - tags := cache.GetAllCounter(tagPairs) - - if len(tags) == 0 { - counterCount++ - } - - for _, tag := range tags { - // 校验和 tag 有关的 counter 是否存在 - // 如果一个指标,比如 port.listen 有 name=uic,port=8056 和 name=hsp,port=8002。避免产生 4 个曲线 - if _, exists := countersMap[tag]; !exists { - stats.Counter.Set("query.counter.miss", 1) - logger.Debugf("can't found counters by key:%s metric:%v tags:%v\n", key, metric, tag) - continue - } - - counterCount++ - if _, exists := tagFilter[tag]; !exists { - tagsList = append(tagsList, tag) - tagFilter[tag] = struct{}{} - } - } - } - - resp.List = append(resp.List, GetIndexByFullTagsResp{ - Endpoints: endpoints, - Nids: nids, - Metric: r.Metric, - Tags: tagsList, - Step: step, - DsType: dsType, - }) - } - - resp.Count = counterCount - render.Data(c, resp, nil) -} - -type CludeRecv struct { - Endpoints []string `json:"endpoints"` - Nids []string `json:"nids"` - Metric string `json:"metric"` - Include []*cache.TagPair `json:"include"` - Exclude []*cache.TagPair `json:"exclude"` -} - -type XcludeResp struct { - Endpoint string `json:"endpoint"` - Nid string `json:"nid"` - Metric string `json:"metric"` - Tags []string `json:"tags"` - Step int `json:"step"` - DsType string `json:"dstype"` -} - -func GetIndexByClude(c *gin.Context) { - stats.Counter.Set("xclude.qp10s", 1) - - recv := make([]CludeRecv, 0) - errors.Dangerous(c.ShouldBindJSON(&recv)) - - var resp []XcludeResp - - for _, r := range recv { - metric := r.Metric - includeList := r.Include - excludeList := r.Exclude - step := 0 - dsType := "" - - var keys []string - var indexDB *cache.EndpointIndexMap - if len(r.Nids) > 0 { - indexDB = cache.NidIndexDB - for _, nid := range r.Nids { - keys = append(keys, nid) - } - } else { - indexDB = cache.IndexDB - for _, endpoint := range r.Endpoints { - keys = append(keys, endpoint) - } - } - - for _, key := range keys { - tagList := make([]string, 0) - tagFilter := make(map[string]struct{}) - - if key == "" { - logger.Debugf("invalid request: lack of key param:%v\n", r) - stats.Counter.Set("xclude.miss", 1) - continue - } - if metric == "" { - logger.Debugf("invalid request: lack of metric param:%v\n", r) - stats.Counter.Set("xclude.miss", 1) - continue - } - - metricIndex, exists := indexDB.GetMetricIndex(key, metric) - if !exists { - tmp := XcludeResp{ - Metric: metric, - Tags: tagList, - Step: step, - DsType: dsType, - } - - if len(r.Nids) > 0 { - tmp.Nid = key - } else { - tmp.Endpoint = key - } - - resp = append(resp, tmp) - - logger.Debugf("can't found index by key:%s metric:%v\n", key, metric) - stats.Counter.Set("xclude.miss", 1) - - continue - } - - metricIndex.RLock() - if step == 0 || dsType == "" { - step = metricIndex.Step - dsType = metricIndex.DsType - } - - // 校验和 tag 有关的 counter 是否存在 - // 如果一个指标,比如 port.listen 有 name=uic,port=8056 和 name=hsp,port=8002。避免产生 4 个曲线 - counterMap := metricIndex.CounterMap.GetCounters() - metricIndex.RUnlock() - - var err error - var tags []string - if len(includeList) == 0 && len(excludeList) == 0 { - for counter := range counterMap { - tagList = append(tagList, counter) - } - tmp := XcludeResp{ - Metric: metric, - Tags: tagList, - Step: step, - DsType: dsType, - } - - if len(r.Nids) > 0 { - tmp.Nid = key - } else { - tmp.Endpoint = key - } - - resp = append(resp, tmp) - continue - } else { - tags, err = indexDB.GetIndexByClude(key, metric, includeList, excludeList) - if err != nil { - logger.Warning(err) - continue - } - } - - for _, tag := range tags { - //过滤掉空字符串 - if tag == "" { - continue - } - - // 校验和 tag 有关的 counter 是否存在 - // 如果一个指标,比如 port.listen 有 name=uic,port=8056 和 name=hsp,port=8002。避免产生 4 个曲线 - if _, exists := counterMap[tag]; !exists { - logger.Debugf("can't found counters by key:%s metric:%v tags:%v\n", key, metric, tag) - stats.Counter.Set("xclude.miss", 1) - continue - } - - if _, exists := tagFilter[tag]; !exists { - tagList = append(tagList, tag) - tagFilter[tag] = struct{}{} - } - } - - resp = append(resp, XcludeResp{ - Endpoint: key, - Nid: key, - Metric: metric, - Tags: tagList, - Step: step, - DsType: dsType, - }) - } - } - - render.Data(c, resp, nil) -} - -func DumpIndex(c *gin.Context) { - err := cache.Persist("normal") - errors.Dangerous(err) - - render.Data(c, "ok", nil) -} - -func GetIdxFile(c *gin.Context) { - err := cache.Persist("download") - errors.Dangerous(err) - - traGz := fmt.Sprintf("%s/%s", cache.Config.PersistDir, "db.tar.gz") - c.File(traGz) -} diff --git a/src/modules/index/http/routes/routes.go b/src/modules/index/http/routes/routes.go deleted file mode 100644 index c99e0246..00000000 --- a/src/modules/index/http/routes/routes.go +++ /dev/null @@ -1,29 +0,0 @@ -package routes - -import ( - "github.com/gin-contrib/pprof" - "github.com/gin-gonic/gin" -) - -// Config routes -func Config(r *gin.Engine) { - sys := r.Group("/api/index") - { - sys.GET("/ping", ping) - sys.GET("/pid", pid) - sys.GET("/addr", addr) - sys.GET("/index-total", indexTotal) - - sys.POST("/metrics", GetMetrics) - sys.DELETE("/metrics", DelMetrics) - sys.DELETE("/endpoints", DelIdxByEndpoint) - sys.DELETE("/counter", DelCounter) - sys.POST("/tagkv", GetTagPairs) - sys.POST("/counter/fullmatch", GetIndexByFullTags) - sys.POST("/counter/clude", GetIndexByClude) - sys.POST("/dump", DumpIndex) - sys.GET("/idxfile", GetIdxFile) - } - - pprof.Register(r, "/api/index/debug/pprof") -} diff --git a/src/modules/index/index.go b/src/modules/index/index.go deleted file mode 100644 index fc3f8fb8..00000000 --- a/src/modules/index/index.go +++ /dev/null @@ -1,119 +0,0 @@ -package main - -import ( - "flag" - "fmt" - "os" - "os/signal" - "syscall" - - "github.com/didi/nightingale/src/common/identity" - "github.com/didi/nightingale/src/common/loggeri" - "github.com/didi/nightingale/src/common/report" - "github.com/didi/nightingale/src/modules/index/cache" - "github.com/didi/nightingale/src/modules/index/config" - "github.com/didi/nightingale/src/modules/index/http/routes" - "github.com/didi/nightingale/src/modules/index/rpc" - "github.com/didi/nightingale/src/toolkits/http" - "github.com/didi/nightingale/src/toolkits/stats" - - "github.com/gin-gonic/gin" - "github.com/toolkits/pkg/file" - "github.com/toolkits/pkg/logger" - "github.com/toolkits/pkg/runner" -) - -var ( - vers *bool - help *bool - conf *string - - version = "No Version Provided" -) - -func init() { - vers = flag.Bool("v", false, "display the version.") - help = flag.Bool("h", false, "print this help.") - conf = flag.String("f", "", "specify configuration file.") - flag.Parse() - - if *vers { - fmt.Println("Version:", version) - os.Exit(0) - } - - if *help { - flag.Usage() - os.Exit(0) - } -} - -func main() { - aconf() - pconf() - start() - - cfg := config.Config - - loggeri.Init(cfg.Logger) - go stats.Init("n9e.index") - - identity.Parse() - cache.InitDB(cfg.Cache) - - go report.Init(cfg.Report, "rdb") - go rpc.Start() - - r := gin.New() - routes.Config(r) - http.Start(r, "index", cfg.Logger.Level) - ending() -} - -// auto detect configuration file -func aconf() { - if *conf != "" && file.IsExist(*conf) { - return - } - - *conf = "etc/index.local.yml" - if file.IsExist(*conf) { - return - } - - *conf = "etc/index.yml" - if file.IsExist(*conf) { - return - } - - fmt.Println("no configuration file for index") - os.Exit(1) -} - -// parse configuration file -func pconf() { - if err := config.Parse(*conf); err != nil { - fmt.Println("cannot parse configuration file:", err) - os.Exit(1) - } -} - -func ending() { - c := make(chan os.Signal, 1) - signal.Notify(c, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT) - select { - case <-c: - fmt.Printf("stop signal caught, stopping... pid=%d\n", os.Getpid()) - } - - logger.Close() - http.Shutdown() - fmt.Println("sender stopped successfully") -} - -func start() { - runner.Init() - fmt.Println("index start, use configuration file:", *conf) - fmt.Println("runner.Cwd:", runner.Cwd) - fmt.Println("runner.Hostname:", runner.Hostname) -} diff --git a/src/modules/index/rpc/push.go b/src/modules/index/rpc/push.go deleted file mode 100644 index 21735c92..00000000 --- a/src/modules/index/rpc/push.go +++ /dev/null @@ -1,47 +0,0 @@ -package rpc - -import ( - "time" - - "github.com/didi/nightingale/src/common/dataobj" - "github.com/didi/nightingale/src/modules/index/cache" - "github.com/didi/nightingale/src/toolkits/stats" - - "github.com/toolkits/pkg/logger" -) - -func (idx *Index) Ping(args string, reply *string) error { - *reply = args - return nil -} - -func (idx *Index) IncrPush(args []*dataobj.IndexModel, reply *dataobj.IndexResp) error { - push(args, reply) - stats.Counter.Set("index.incr.in", len(args)) - return nil -} - -func (idx *Index) Push(args []*dataobj.IndexModel, reply *dataobj.IndexResp) error { - push(args, reply) - stats.Counter.Set("index.all.in", len(args)) - - return nil -} - -func push(args []*dataobj.IndexModel, reply *dataobj.IndexResp) { - start := time.Now() - reply.Invalid = 0 - now := time.Now().Unix() - for _, item := range args { - logger.Debugf("<---index %v", item) - - if item.Nid != "" { - cache.NidIndexDB.Push(*item, now) - } else { - cache.IndexDB.Push(*item, now) - } - } - - reply.Total = len(args) - reply.Latency = (time.Now().UnixNano() - start.UnixNano()) / 1000000 -} diff --git a/src/modules/index/rpc/rpc.go b/src/modules/index/rpc/rpc.go deleted file mode 100644 index e24f183b..00000000 --- a/src/modules/index/rpc/rpc.go +++ /dev/null @@ -1,52 +0,0 @@ -package rpc - -import ( - "bufio" - "io" - "net" - "net/rpc" - "os" - "reflect" - "time" - - "github.com/toolkits/pkg/logger" - "github.com/ugorji/go/codec" - - "github.com/didi/nightingale/src/common/address" -) - -type Index int - -func Start() { - addr := address.GetRPCListen("index") - - server := rpc.NewServer() - server.Register(new(Index)) - - l, e := net.Listen("tcp", addr) - if e != nil { - logger.Fatal("cannot listen ", addr, e) - os.Exit(1) - } - logger.Info("listening ", addr) - - var mh codec.MsgpackHandle - mh.MapType = reflect.TypeOf(map[string]interface{}(nil)) - - for { - conn, err := l.Accept() - if err != nil { - logger.Warning("listener accept error: ", err) - time.Sleep(time.Duration(100) * time.Millisecond) - continue - } - - var bufconn = struct { - io.Closer - *bufio.Reader - *bufio.Writer - }{conn, bufio.NewReader(conn), bufio.NewWriter(conn)} - - go server.ServeCodec(codec.MsgpackSpecRpc.ServerCodec(bufconn, &mh)) - } -} diff --git a/src/modules/job/config/config.go b/src/modules/job/config/config.go deleted file mode 100644 index f8067a5f..00000000 --- a/src/modules/job/config/config.go +++ /dev/null @@ -1,74 +0,0 @@ -package config - -import ( - "fmt" - - "github.com/toolkits/pkg/file" - - "github.com/didi/nightingale/src/common/identity" - "github.com/didi/nightingale/src/common/loggeri" - "github.com/didi/nightingale/src/toolkits/i18n" -) - -type ConfigT struct { - Logger loggeri.Config `yaml:"logger"` - HTTP httpSection `yaml:"http"` - Tokens []string `yaml:"tokens"` - Output outputSection `yaml:"output"` - I18n i18n.I18nSection `yaml:"i18n"` -} - -type httpSection struct { - Mode string `yaml:"mode"` - CookieName string `yaml:"cookieName"` - CookieDomain string `yaml:"cookieDomain"` -} - -type outputSection struct { - ComeFrom string `yaml:"comeFrom"` - RemotePort int `yaml:"remotePort"` -} - -var Config *ConfigT - -// Parse configuration file -func Parse() error { - ymlFile := getYmlFile() - if ymlFile == "" { - return fmt.Errorf("configuration file not found") - } - - var c ConfigT - err := file.ReadYaml(ymlFile, &c) - if err != nil { - return fmt.Errorf("cannot read yml[%s]: %v", ymlFile, err) - } - - Config = &c - - if Config.I18n.DictPath == "" { - Config.I18n.DictPath = "etc/dict.json" - } - - if Config.I18n.Lang == "" { - Config.I18n.Lang = "zh" - } - - fmt.Println("config.file:", ymlFile) - - return identity.Parse() -} - -func getYmlFile() string { - yml := "etc/job.local.yml" - if file.IsExist(yml) { - return yml - } - - yml = "etc/job.yml" - if file.IsExist(yml) { - return yml - } - - return "" -} diff --git a/src/modules/job/http/http_middleware.go b/src/modules/job/http/http_middleware.go deleted file mode 100644 index f5fb41ac..00000000 --- a/src/modules/job/http/http_middleware.go +++ /dev/null @@ -1,97 +0,0 @@ -package http - -import ( - "github.com/gin-gonic/gin" - "github.com/toolkits/pkg/logger" - "github.com/toolkits/pkg/slice" - - "github.com/didi/nightingale/src/models" - "github.com/didi/nightingale/src/modules/job/config" -) - -func shouldBeLogin() gin.HandlerFunc { - return func(c *gin.Context) { - c.Set("username", mustUsername(c)) - c.Next() - } -} - -func shouldBeRoot() gin.HandlerFunc { - return func(c *gin.Context) { - username := mustUsername(c) - - user, err := models.UserGet("username=?", username) - dangerous(err) - - if user.IsRoot != 1 { - bomb("forbidden") - } - - c.Set("username", username) - c.Set("user", user) - c.Next() - } -} - -func shouldBeService() gin.HandlerFunc { - return func(c *gin.Context) { - token := c.GetHeader("X-Srv-Token") - if token == "" { - bomb("X-Srv-Token is blank") - } - if !slice.ContainsString(config.Config.Tokens, token) { - bomb("X-Srv-Token[%s] invalid", token) - } - c.Next() - } -} - -func mustUsername(c *gin.Context) string { - username := sessionUsername(c) - if username == "" { - username = headerUsername(c) - } - - if username == "" { - bomb("unauthorized") - } - - return username -} - -func sessionUsername(c *gin.Context) string { - sess, err := models.SessionGetWithCache(readSessionId(c)) - if err != nil { - return "" - } - return sess.Username -} - -func headerUsername(c *gin.Context) string { - token := c.GetHeader("X-User-Token") - if token == "" { - return "" - } - - ut, err := models.UserTokenGet("token=?", token) - if err != nil { - logger.Warningf("UserTokenGet[%s] fail: %v", token, err) - return "" - } - - if ut == nil { - return "" - } - - return ut.Username -} - -// ------------ - -func readSessionId(c *gin.Context) string { - sid, err := c.Cookie(config.Config.HTTP.CookieName) - if err != nil { - return "" - } - return sid -} diff --git a/src/modules/job/http/http_server.go b/src/modules/job/http/http_server.go deleted file mode 100644 index 4a9d8bc6..00000000 --- a/src/modules/job/http/http_server.go +++ /dev/null @@ -1,70 +0,0 @@ -package http - -import ( - "context" - "fmt" - "net/http" - "os" - "strings" - "time" - - "github.com/gin-gonic/gin" - - "github.com/didi/nightingale/src/common/address" - "github.com/didi/nightingale/src/common/middleware" - "github.com/didi/nightingale/src/modules/job/config" -) - -var srv = &http.Server{ - ReadTimeout: 10 * time.Second, - WriteTimeout: 10 * time.Second, - MaxHeaderBytes: 1 << 20, -} - -var skipPaths = []string{} - -func Start() { - c := config.Config - - loggerMid := middleware.LoggerWithConfig(middleware.LoggerConfig{SkipPaths: skipPaths}) - recoveryMid := middleware.Recovery() - - if strings.ToLower(c.HTTP.Mode) == "release" { - gin.SetMode(gin.ReleaseMode) - middleware.DisableConsoleColor() - } - - r := gin.New() - r.Use(loggerMid, recoveryMid) - - Config(r) - - srv.Addr = address.GetHTTPListen("job") - srv.Handler = r - - go func() { - fmt.Println("http.listening:", srv.Addr) - if err := srv.ListenAndServe(); err != nil && err != http.ErrServerClosed { - fmt.Printf("listening %s occur error: %s\n", srv.Addr, err) - os.Exit(3) - } - }() -} - -// Shutdown http server -func Shutdown() { - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - if err := srv.Shutdown(ctx); err != nil { - fmt.Println("cannot shutdown http server:", err) - os.Exit(2) - } - - // catching ctx.Done(). timeout of 5 seconds. - select { - case <-ctx.Done(): - fmt.Println("shutdown http server timeout of 5 seconds.") - default: - fmt.Println("http server stopped") - } -} diff --git a/src/modules/job/http/router.go b/src/modules/job/http/router.go deleted file mode 100644 index fdadc6f7..00000000 --- a/src/modules/job/http/router.go +++ /dev/null @@ -1,43 +0,0 @@ -package http - -import "github.com/gin-gonic/gin" - -func Config(r *gin.Engine) { - notLogin := r.Group("/api/job-ce") - { - notLogin.GET("/ping", ping) - notLogin.POST("/callback", taskCallback) - notLogin.GET("/task/:id/stdout", taskStdout) - notLogin.GET("/task/:id/stderr", taskStderr) - notLogin.GET("/task/:id/state", apiTaskState) - notLogin.GET("/task/:id/result", apiTaskResult) - notLogin.GET("/task/:id/host/:host/output", taskHostOutput) - notLogin.GET("/task/:id/host/:host/stdout", taskHostStdout) - notLogin.GET("/task/:id/host/:host/stderr", taskHostStderr) - notLogin.GET("/task/:id/stdout.txt", taskStdoutTxt) - notLogin.GET("/task/:id/stderr.txt", taskStderrTxt) - notLogin.GET("/task/:id/stdout.json", apiTaskJSONStdouts) - notLogin.GET("/task/:id/stderr.json", apiTaskJSONStderrs) - } - - userLogin := r.Group("/api/job-ce").Use(shouldBeLogin()) - { - userLogin.GET("/task-tpls", taskTplGets) - userLogin.POST("/task-tpls", taskTplPost) - userLogin.GET("/task-tpl/:id", taskTplGet) - userLogin.PUT("/task-tpl/:id", taskTplPut) - userLogin.DELETE("/task-tpl/:id", taskTplDel) - userLogin.POST("/task-tpl/:id/run", taskTplRun) - userLogin.PUT("/task-tpls/tags", taskTplTagsPut) - userLogin.PUT("/task-tpls/node", taskTplNodePut) - - userLogin.POST("/tasks", taskPost) - userLogin.GET("/tasks", taskGets) - userLogin.GET("/task/:id", taskView) - userLogin.PUT("/task/:id/action", taskActionPut) - userLogin.PUT("/task/:id/host", taskHostPut) - - // 专门针对工单系统开发的接口 - userLogin.POST("/run/:id", taskRunForTT) - } -} diff --git a/src/modules/job/http/router_health.go b/src/modules/job/http/router_health.go deleted file mode 100644 index 4b6a7523..00000000 --- a/src/modules/job/http/router_health.go +++ /dev/null @@ -1,7 +0,0 @@ -package http - -import "github.com/gin-gonic/gin" - -func ping(c *gin.Context) { - c.String(200, "pong") -} diff --git a/src/modules/job/job.go b/src/modules/job/job.go deleted file mode 100644 index a98d90d1..00000000 --- a/src/modules/job/job.go +++ /dev/null @@ -1,112 +0,0 @@ -package main - -import ( - "flag" - "fmt" - "os" - "os/signal" - "syscall" - - _ "github.com/go-sql-driver/mysql" - - "github.com/toolkits/pkg/logger" - "github.com/toolkits/pkg/runner" - - "github.com/didi/nightingale/src/common/identity" - "github.com/didi/nightingale/src/common/loggeri" - "github.com/didi/nightingale/src/models" - "github.com/didi/nightingale/src/modules/job/config" - "github.com/didi/nightingale/src/modules/job/http" - "github.com/didi/nightingale/src/modules/job/rpc" - "github.com/didi/nightingale/src/modules/job/timer" - "github.com/didi/nightingale/src/toolkits/i18n" -) - -var ( - vers *bool - help *bool - conf *string - - version = "No Version Provided" -) - -func init() { - vers = flag.Bool("v", false, "display the version.") - help = flag.Bool("h", false, "print this help.") - conf = flag.String("f", "", "specify configuration file.") - flag.Parse() - - if *vers { - fmt.Println("Version:", version) - os.Exit(0) - } - - if *help { - flag.Usage() - os.Exit(0) - } - - runner.Init() - fmt.Println("runner.cwd:", runner.Cwd) - fmt.Println("runner.hostname:", runner.Hostname) -} - -func checkIdentity() { - ip, err := identity.GetIP() - if err != nil { - fmt.Println("cannot get ip:", err) - os.Exit(1) - } - - fmt.Println("ip:", ip) - - if ip == "127.0.0.1" { - fmt.Println("identity: 127.0.0.1, cannot work") - os.Exit(2) - } -} - -func main() { - parseConf() - - loggeri.Init(config.Config.Logger) - - checkIdentity() - - // 初始化数据库和相关数据 - models.InitMySQL("rdb", "job") - - go timer.Heartbeat() - go timer.Schedule() - go timer.CleanLong() - - // 将task_host_doing表缓存到内存里,减少DB压力 - timer.CacheHostDoing() - - i18n.Init(config.Config.I18n) - - go rpc.Start() - http.Start() - - endingProc() -} - -func parseConf() { - if err := config.Parse(); err != nil { - fmt.Println("cannot parse configuration file:", err) - os.Exit(1) - } -} - -func endingProc() { - c := make(chan os.Signal, 1) - signal.Notify(c, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT) - select { - case <-c: - fmt.Printf("stop signal caught, stopping... pid=%d\n", os.Getpid()) - } - - logger.Close() - http.Shutdown() - fmt.Println("process stopped successfully") -} diff --git a/src/modules/job/rpc/rpc.go b/src/modules/job/rpc/rpc.go deleted file mode 100644 index 36d48475..00000000 --- a/src/modules/job/rpc/rpc.go +++ /dev/null @@ -1,45 +0,0 @@ -package rpc - -import ( - "fmt" - "net" - "net/rpc" - "os" - - "github.com/toolkits/pkg/logger" - - "github.com/didi/nightingale/src/common/address" -) - -// Scheduler rpc cursor -type Scheduler int - -// Start rpc server -func Start() { - addr := address.GetRPCListen("job") - - tcpAddr, err := net.ResolveTCPAddr("tcp", addr) - if err != nil { - fmt.Println("net.ResolveTCPAddr fail:", err) - os.Exit(2) - } - - listener, err := net.ListenTCP("tcp", tcpAddr) - if err != nil { - fmt.Printf("listen %s fail: %s\n", addr, err) - os.Exit(3) - } else { - fmt.Println("rpc.listening:", addr) - } - - rpc.Register(new(Scheduler)) - - for { - conn, err := listener.Accept() - if err != nil { - logger.Warning("listener.Accept occur error:", err) - continue - } - go rpc.ServeConn(conn) - } -} diff --git a/src/modules/judge/backend/redi/funcs.go b/src/modules/judge/backend/redi/funcs.go deleted file mode 100644 index ef337bbb..00000000 --- a/src/modules/judge/backend/redi/funcs.go +++ /dev/null @@ -1,46 +0,0 @@ -package redi - -import ( - "encoding/json" - "errors" - "fmt" - - "github.com/didi/nightingale/src/common/dataobj" - "github.com/didi/nightingale/src/toolkits/stats" - - "github.com/toolkits/pkg/logger" -) - -func Push(event *dataobj.Event) error { - bytes, err := json.Marshal(event) - if err != nil { - err = fmt.Errorf("redis publish failed, error:%v", err) - return err - } - - succ := false - if len(RedisConnPools) == 0 { - return errors.New("redis publish failed: empty conn pool") - } - - for i := range RedisConnPools { - rc := RedisConnPools[i].Get() - defer rc.Close() - - // 如果写入用lpush 则读出应该用 rpop - // 如果写入用rpush 则读出应该用 lpop - stats.Counter.Set("redis.push", 1) - _, err = rc.Do("LPUSH", event.Partition, string(bytes)) - if err == nil { - succ = true - break - } - } - - if succ { - logger.Debugf("redis publish succ, event: %s", string(bytes)) - return nil - } - - return fmt.Errorf("redis publish failed finally:%v", err) -} diff --git a/src/modules/judge/backend/redi/redis.go b/src/modules/judge/backend/redi/redis.go deleted file mode 100644 index 83c9a0f9..00000000 --- a/src/modules/judge/backend/redi/redis.go +++ /dev/null @@ -1,96 +0,0 @@ -package redi - -import ( - "log" - "time" - - "github.com/didi/nightingale/src/toolkits/stats" - "github.com/garyburd/redigo/redis" - "github.com/toolkits/pkg/logger" -) - -var RedisConnPools []*redis.Pool -var Config RedisSection - -type RedisSection struct { - Addrs []string `yaml:"addrs"` - Pass string `yaml:"pass"` - DB int `yaml:"db"` - Idle int `yaml:"idle"` - Timeout TimeoutSection `yaml:"timeout"` - Prefix string `yaml:"prefix"` -} - -type TimeoutSection struct { - Conn int `yaml:"conn"` - Read int `yaml:"read"` - Write int `yaml:"write"` -} - -func Init(cfg RedisSection) { - Config = cfg - - addrs := cfg.Addrs - pass := cfg.Pass - db := cfg.DB - maxIdle := cfg.Idle - idleTimeout := 240 * time.Second - - connTimeout := time.Duration(cfg.Timeout.Conn) * time.Millisecond - readTimeout := time.Duration(cfg.Timeout.Read) * time.Millisecond - writeTimeout := time.Duration(cfg.Timeout.Write) * time.Millisecond - for i := range addrs { - addr := addrs[i] - redisConnPool := &redis.Pool{ - MaxIdle: maxIdle, - IdleTimeout: idleTimeout, - Dial: func() (redis.Conn, error) { - c, err := redis.Dial("tcp", addr, redis.DialConnectTimeout(connTimeout), redis.DialReadTimeout(readTimeout), redis.DialWriteTimeout(writeTimeout)) - if err != nil { - logger.Errorf("conn redis err:%v", err) - stats.Counter.Set("redis.conn.failed", 1) - return nil, err - } - - if pass != "" { - if _, err := c.Do("AUTH", pass); err != nil { - c.Close() - logger.Errorf("ERR: redis auth fail:%v", err) - stats.Counter.Set("redis.conn.failed", 1) - - return nil, err - } - } - - if db != 0 { - if _, err := c.Do("SELECT", db); err != nil { - c.Close() - logger.Error("redis select db fail, db: ", db) - stats.Counter.Set("redis.conn.failed", 1) - return nil, err - } - } - - return c, err - }, - TestOnBorrow: PingRedis, - } - RedisConnPools = append(RedisConnPools, redisConnPool) - } - -} - -func PingRedis(c redis.Conn, t time.Time) error { - _, err := c.Do("ping") - if err != nil { - log.Println("ERR: ping redis fail", err) - } - return err -} - -func CloseRedis() { - log.Println("INFO: closing redis...") - for i := range RedisConnPools { - RedisConnPools[i].Close() - } -} diff --git a/src/modules/judge/config/config.go b/src/modules/judge/config/config.go deleted file mode 100644 index a034db6a..00000000 --- a/src/modules/judge/config/config.go +++ /dev/null @@ -1,94 +0,0 @@ -package config - -import ( - "bytes" - "fmt" - "strconv" - - "github.com/didi/nightingale/src/common/address" - "github.com/didi/nightingale/src/common/identity" - "github.com/didi/nightingale/src/common/loggeri" - "github.com/didi/nightingale/src/common/report" - "github.com/didi/nightingale/src/modules/judge/backend/query" - "github.com/didi/nightingale/src/modules/judge/backend/redi" - "github.com/didi/nightingale/src/modules/judge/stra" - - "github.com/spf13/viper" - "github.com/toolkits/pkg/file" -) - -type ConfYaml struct { - Logger loggeri.Config `yaml:"logger"` - Query query.SeriesQuerySection `yaml:"query"` - Redis redi.RedisSection `yaml:"redis"` - Strategy stra.StrategySection `yaml:"strategy"` - Identity identity.Identity `yaml:"identity"` - Report report.ReportSection `yaml:"report"` - NodataConcurrency int `yaml:"nodataConcurrency"` -} - -var ( - Config *ConfYaml -) - -func Parse(conf string) error { - bs, err := file.ReadBytes(conf) - if err != nil { - return fmt.Errorf("cannot read yml[%s]: %v", conf, err) - } - - viper.SetConfigType("yaml") - err = viper.ReadConfig(bytes.NewBuffer(bs)) - if err != nil { - return fmt.Errorf("cannot read yml[%s]: %v", conf, err) - } - - viper.SetDefault("query", map[string]interface{}{ - "maxConn": 100, - "maxIdle": 10, - "connTimeout": 1000, - "callTimeout": 2000, - "indexCallTimeout": 2000, - "indexMod": "index", - "indexPath": "/api/index/counter/clude", - }) - - viper.SetDefault("redis.idle", 5) - viper.SetDefault("redis.prefix", "/n9e") - viper.SetDefault("redis.timeout", map[string]int{ - "conn": 500, - "read": 3000, - "write": 3000, - }) - - viper.SetDefault("strategy", map[string]interface{}{ - "partitionApi": "/api/mon/stras/effective?instance=%s:%s", - "updateInterval": 9000, - "indexInterval": 60000, - "timeout": 5000, - "mod": "monapi", - "eventPrefix": "n9e", - }) - - viper.SetDefault("report", map[string]interface{}{ - "mod": "judge", - "enabled": true, - "interval": 4000, - "timeout": 3000, - "api": "api/hbs/heartbeat", - "remark": "", - }) - - viper.SetDefault("nodataConcurrency", 1000) - viper.SetDefault("pushUrl", "http://127.0.0.1:2058/v1/push") - - err = viper.Unmarshal(&Config) - if err != nil { - return fmt.Errorf("cannot read yml[%s]: %v\n", conf, err) - } - - Config.Report.HTTPPort = strconv.Itoa(address.GetHTTPPort("judge")) - Config.Report.RPCPort = strconv.Itoa(address.GetRPCPort("judge")) - - return err -} diff --git a/src/modules/judge/http/http.go b/src/modules/judge/http/http.go deleted file mode 100644 index f00d555a..00000000 --- a/src/modules/judge/http/http.go +++ /dev/null @@ -1,65 +0,0 @@ -package http - -import ( - "context" - "log" - "net/http" - _ "net/http/pprof" - "time" - - "github.com/didi/nightingale/src/modules/judge/http/routes" - "github.com/didi/nightingale/src/toolkits/http/middleware" - - "github.com/gin-gonic/gin" -) - -var srv = &http.Server{ - ReadTimeout: 10 * time.Second, - WriteTimeout: 10 * time.Second, - MaxHeaderBytes: 1 << 20, -} - -// Start http server -func Start(listen string, logLevel string) { - loggerMid := middleware.LoggerWithConfig(middleware.LoggerConfig{}) - recoveryMid := middleware.Recovery() - - if logLevel != "DEBUG" { - gin.SetMode(gin.ReleaseMode) - middleware.DisableConsoleColor() - } else { - srv.WriteTimeout = 120 * time.Second - } - - r := gin.New() - r.Use(loggerMid, recoveryMid) - - routes.Config(r) - - srv.Addr = listen - srv.Handler = r - - go func() { - log.Println("starting http server, listening on:", srv.Addr) - if err := srv.ListenAndServe(); err != nil && err != http.ErrServerClosed { - log.Fatalf("listening %s occur error: %s\n", srv.Addr, err) - } - }() -} - -// Shutdown http server -func Shutdown() { - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - if err := srv.Shutdown(ctx); err != nil { - log.Fatalln("cannot shutdown http server:", err) - } - - // catching ctx.Done(). timeout of 5 seconds. - select { - case <-ctx.Done(): - log.Println("shutdown http server timeout of 5 seconds.") - default: - log.Println("http server stopped") - } -} diff --git a/src/modules/judge/http/routes/health.go b/src/modules/judge/http/routes/health.go deleted file mode 100644 index cfd2820e..00000000 --- a/src/modules/judge/http/routes/health.go +++ /dev/null @@ -1,47 +0,0 @@ -package routes - -import ( - "fmt" - "os" - - "github.com/didi/nightingale/src/common/dataobj" - "github.com/didi/nightingale/src/modules/judge/cache" - "github.com/didi/nightingale/src/toolkits/http/render" - - "github.com/gin-gonic/gin" - "github.com/toolkits/pkg/errors" -) - -func ping(c *gin.Context) { - c.String(200, "pong") -} - -func addr(c *gin.Context) { - c.String(200, c.Request.RemoteAddr) -} - -func pid(c *gin.Context) { - c.String(200, fmt.Sprintf("%d", os.Getpid())) -} - -func getStra(c *gin.Context) { - sid := urlParamInt64(c, "id") - - stra, exists := cache.Strategy.Get(sid) - if exists { - render.Data(c, stra, nil) - return - } - - stra, _ = cache.NodataStra.Get(sid) - render.Data(c, stra, nil) -} - -func getData(c *gin.Context) { - var input dataobj.JudgeItem - errors.Dangerous(c.ShouldBind(&input)) - pk := input.MD5() - linkedList, _ := cache.HistoryBigMap[pk[0:2]].Get(pk) - data := linkedList.HistoryData() - render.Data(c, data, nil) -} diff --git a/src/modules/judge/http/routes/routes.go b/src/modules/judge/http/routes/routes.go deleted file mode 100644 index ef1c6f49..00000000 --- a/src/modules/judge/http/routes/routes.go +++ /dev/null @@ -1,43 +0,0 @@ -package routes - -import ( - "strconv" - - "github.com/gin-contrib/pprof" - "github.com/gin-gonic/gin" - "github.com/toolkits/pkg/errors" -) - -// Config routes -func Config(r *gin.Engine) { - sys := r.Group("/api/judge") - { - sys.GET("/ping", ping) - sys.GET("/pid", pid) - sys.GET("/addr", addr) - sys.GET("/stra/:id", getStra) - sys.POST("/data", getData) - } - - pprof.Register(r, "/api/judge/debug/pprof") -} - -func urlParamStr(c *gin.Context, field string) string { - val := c.Param(field) - - if val == "" { - errors.Bomb("[%s] is blank", field) - } - - return val -} - -func urlParamInt64(c *gin.Context, field string) int64 { - strval := urlParamStr(c, field) - intval, err := strconv.ParseInt(strval, 10, 64) - if err != nil { - errors.Bomb("cannot convert %s to int64", strval) - } - - return intval -} diff --git a/src/modules/judge/judge.go b/src/modules/judge/judge.go deleted file mode 100644 index e7e01100..00000000 --- a/src/modules/judge/judge.go +++ /dev/null @@ -1,137 +0,0 @@ -package main - -import ( - "flag" - "fmt" - "os" - "os/signal" - "syscall" - - "github.com/didi/nightingale/src/common/identity" - "github.com/didi/nightingale/src/common/loggeri" - "github.com/didi/nightingale/src/common/report" - "github.com/didi/nightingale/src/modules/judge/backend/query" - "github.com/didi/nightingale/src/modules/judge/backend/redi" - "github.com/didi/nightingale/src/modules/judge/cache" - "github.com/didi/nightingale/src/modules/judge/config" - "github.com/didi/nightingale/src/modules/judge/http/routes" - "github.com/didi/nightingale/src/modules/judge/judge" - "github.com/didi/nightingale/src/modules/judge/rpc" - "github.com/didi/nightingale/src/modules/judge/stra" - "github.com/didi/nightingale/src/toolkits/http" - "github.com/didi/nightingale/src/toolkits/stats" - - "github.com/gin-gonic/gin" - "github.com/toolkits/pkg/file" - "github.com/toolkits/pkg/logger" - "github.com/toolkits/pkg/runner" -) - -var ( - vers *bool - help *bool - conf *string - - version = "No Version Provided" -) - -func init() { - vers = flag.Bool("v", false, "display the version.") - help = flag.Bool("h", false, "print this help.") - conf = flag.String("f", "", "specify configuration file.") - flag.Parse() - - if *vers { - fmt.Println("Version:", version) - os.Exit(0) - } - - if *help { - flag.Usage() - os.Exit(0) - } -} - -func main() { - aconf() - pconf() - start() - - cfg := config.Config - identity.Parse() - loggeri.Init(cfg.Logger) - go stats.Init("n9e.judge") - - query.Init(cfg.Query, "rdb") - redi.Init(cfg.Redis) - - cache.InitHistoryBigMap() - cache.Strategy = cache.NewStrategyMap() - cache.NodataStra = cache.NewStrategyMap() - cache.SeriesMap = cache.NewIndexMap() - - go rpc.Start() - - go stra.GetStrategy(cfg.Strategy) - go judge.NodataJudge(cfg.NodataConcurrency) - go report.Init(cfg.Report, "rdb") - - if cfg.Logger.Level != "DEBUG" { - gin.SetMode(gin.ReleaseMode) - } - - r := gin.New() - routes.Config(r) - go http.Start(r, "judge", cfg.Logger.Level) - - ending() -} - -// auto detect configuration file -func aconf() { - if *conf != "" && file.IsExist(*conf) { - return - } - - *conf = "etc/judge.local.yml" - if file.IsExist(*conf) { - return - } - - *conf = "etc/judge.yml" - if file.IsExist(*conf) { - return - } - - fmt.Println("no configuration file for judge") - os.Exit(1) -} - -// parse configuration file -func pconf() { - if err := config.Parse(*conf); err != nil { - fmt.Println("cannot parse configuration file:", err) - os.Exit(1) - } -} - -func start() { - runner.Init() - fmt.Println("judge start, use configuration file:", *conf) - fmt.Println("runner.Cwd:", runner.Cwd) - fmt.Println("runner.Hostname:", runner.Hostname) -} - -func ending() { - c := make(chan os.Signal, 1) - signal.Notify(c, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT) - select { - case <-c: - fmt.Printf("stop signal caught, stopping... pid=%d\n", os.Getpid()) - } - - logger.Close() - http.Shutdown() - redi.CloseRedis() - fmt.Println("alarm stopped successfully") -} diff --git a/src/modules/judge/readme.md b/src/modules/judge/readme.md deleted file mode 100644 index aeaf5468..00000000 --- a/src/modules/judge/readme.md +++ /dev/null @@ -1,2 +0,0 @@ -push 数据 - diff --git a/src/modules/judge/rpc/push.go b/src/modules/judge/rpc/push.go deleted file mode 100644 index f93bdcee..00000000 --- a/src/modules/judge/rpc/push.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright 2017 Xiaomi, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package rpc - -import ( - "github.com/didi/nightingale/src/common/dataobj" - "github.com/didi/nightingale/src/modules/judge/cache" - "github.com/didi/nightingale/src/modules/judge/judge" - "github.com/didi/nightingale/src/toolkits/stats" - - "github.com/toolkits/pkg/logger" -) - -type Judge int - -func (j *Judge) Ping(req dataobj.NullRpcRequest, resp *dataobj.SimpleRpcResponse) error { - return nil -} - -func (j *Judge) Send(items []*dataobj.JudgeItem, resp *dataobj.SimpleRpcResponse) error { - // 把当前时间的计算放在最外层,是为了减少获取时间时的系统调用开销 - - for _, item := range items { - now := item.Timestamp - pk := item.MD5() - logger.Debugf("recv-->%+v", item) - stats.Counter.Set("push.in", 1) - - go judge.ToJudge(cache.HistoryBigMap[pk[0:2]], pk, item, now) - } - - return nil -} diff --git a/src/modules/judge/rpc/rpc.go b/src/modules/judge/rpc/rpc.go deleted file mode 100644 index d722a4ad..00000000 --- a/src/modules/judge/rpc/rpc.go +++ /dev/null @@ -1,68 +0,0 @@ -package rpc - -import ( - "bufio" - "io" - "net" - "net/rpc" - "os" - "reflect" - "time" - - "github.com/didi/nightingale/src/common/address" - - "github.com/toolkits/pkg/logger" - "github.com/ugorji/go/codec" -) - -var Close_chan, Close_done_chan chan int - -func init() { - Close_chan = make(chan int, 1) - Close_done_chan = make(chan int, 1) -} - -func Start() { - addr := address.GetRPCListen("judge") - - server := rpc.NewServer() - server.Register(new(Judge)) - - l, e := net.Listen("tcp", addr) - if e != nil { - logger.Fatal("cannot listen ", addr, e) - os.Exit(1) - } - logger.Info("rpc listening ", addr) - - var mh codec.MsgpackHandle - mh.MapType = reflect.TypeOf(map[string]interface{}(nil)) - - go func() { - for { - conn, err := l.Accept() - if err != nil { - logger.Error("listener accept error: ", err) - time.Sleep(time.Duration(100) * time.Millisecond) - continue - } - - var bufconn = struct { - io.Closer - *bufio.Reader - *bufio.Writer - }{conn, bufio.NewReader(conn), bufio.NewWriter(conn)} - - go server.ServeCodec(codec.MsgpackSpecRpc.ServerCodec(bufconn, &mh)) - } - }() - - select { - case <-Close_chan: - logger.Info("rpc, recv sigout and exiting...") - l.Close() - Close_done_chan <- 1 - - return - } -} diff --git a/src/modules/judge/stra/stra.go b/src/modules/judge/stra/stra.go deleted file mode 100644 index 7562415b..00000000 --- a/src/modules/judge/stra/stra.go +++ /dev/null @@ -1,101 +0,0 @@ -package stra - -import ( - "fmt" - "math/rand" - "time" - - "github.com/toolkits/pkg/logger" - "github.com/toolkits/pkg/net/httplib" - - "github.com/didi/nightingale/src/common/address" - "github.com/didi/nightingale/src/common/identity" - "github.com/didi/nightingale/src/common/report" - "github.com/didi/nightingale/src/models" - "github.com/didi/nightingale/src/modules/judge/cache" - "github.com/didi/nightingale/src/toolkits/stats" -) - -type StrategySection struct { - PartitionApi string `yaml:"partitionApi"` - Timeout int `yaml:"timeout"` - Token string `yaml:"token"` - UpdateInterval int `yaml:"updateInterval"` - IndexInterval int `yaml:"indexInterval"` - ReportInterval int `yaml:"reportInterval"` - Mod string `yaml:"mod"` -} - -type StrasResp struct { - Data []*models.Stra `json:"dat"` - Err string `json:"err"` -} - -func GetStrategy(cfg StrategySection) { - t1 := time.NewTicker(time.Duration(cfg.UpdateInterval) * time.Millisecond) - getStrategy(cfg) - for { - <-t1.C - getStrategy(cfg) - } -} - -func getStrategy(opts StrategySection) { - addrs := address.GetHTTPAddresses(opts.Mod) - if len(addrs) == 0 { - logger.Error("empty config addr") - return - } - - var resp StrasResp - perm := rand.Perm(len(addrs)) - for i := range perm { - ident, err := identity.GetIdent() - if err != nil { - logger.Error("err") - } - - url := fmt.Sprintf("http://%s"+opts.PartitionApi, addrs[perm[i]], ident, report.Config.RPCPort) - err = httplib.Get(url).SetTimeout(time.Duration(opts.Timeout) * time.Millisecond).ToJSON(&resp) - if err != nil { - logger.Warningf("get strategy from remote failed, error:%v", err) - stats.Counter.Set("stra.get.err", 1) - continue - } - - if resp.Err != "" { - logger.Warningf("get strategy from remote failed, error:%v", resp.Err) - stats.Counter.Set("stra.get.err", 1) - continue - } - - if len(resp.Data) > 0 { - break - } - } - - straCount := len(resp.Data) - stats.Counter.Set("stra.count", straCount) - if straCount == 0 { //获取策略数为0,不正常,不更新策略缓存 - return - } - - for _, stra := range resp.Data { - if len(stra.Exprs) < 1 { - logger.Warningf("strategy:%v exprs < 1", stra) - stats.Counter.Set("stra.illegal", 1) - continue - } - - if stra.Exprs[0].Func == "nodata" { - stats.Counter.Set("stra.nodata", 1) - cache.NodataStra.Set(stra.Id, stra) - } else { - stats.Counter.Set("stra.common", 1) - cache.Strategy.Set(stra.Id, stra) - } - } - - cache.NodataStra.Clean() - cache.Strategy.Clean() -} diff --git a/src/modules/monapi/acache/init.go b/src/modules/monapi/acache/init.go deleted file mode 100644 index 989c38aa..00000000 --- a/src/modules/monapi/acache/init.go +++ /dev/null @@ -1,6 +0,0 @@ -package acache - -func Init() { - MaskCache = NewMaskCache() - StraCache = NewStraCache() -} diff --git a/src/modules/monapi/acache/stra.go b/src/modules/monapi/acache/stra.go deleted file mode 100644 index b86b0451..00000000 --- a/src/modules/monapi/acache/stra.go +++ /dev/null @@ -1,35 +0,0 @@ -package acache - -import ( - "sync" - - "github.com/didi/nightingale/src/models" -) - -type StraCacheMap struct { - sync.RWMutex - Data map[int64]*models.Stra -} - -var StraCache *StraCacheMap - -func NewStraCache() *StraCacheMap { - return &StraCacheMap{ - Data: make(map[int64]*models.Stra), - } -} - -func (this *StraCacheMap) SetAll(m map[int64]*models.Stra) { - this.Lock() - defer this.Unlock() - this.Data = m -} - -func (this *StraCacheMap) GetById(id int64) (*models.Stra, bool) { - this.RLock() - defer this.RUnlock() - - value, exists := this.Data[id] - - return value, exists -} diff --git a/src/modules/monapi/config/const.go b/src/modules/monapi/config/const.go deleted file mode 100644 index a9179fb4..00000000 --- a/src/modules/monapi/config/const.go +++ /dev/null @@ -1,16 +0,0 @@ -package config - -const Version = 1 - -const JudgesReplicas = 500 -const ProbersReplicas = 500 -const DetectorReplicas = 500 - -const ( - RECOVERY = "recovery" - ALERT = "alert" -) - -var ( - EventTypeMap = map[string]string{RECOVERY: "恢复", ALERT: "报警"} -) diff --git a/src/modules/monapi/config/funcs.go b/src/modules/monapi/config/funcs.go deleted file mode 100644 index e031b64b..00000000 --- a/src/modules/monapi/config/funcs.go +++ /dev/null @@ -1,76 +0,0 @@ -package config - -import ( - "fmt" - "os" - "strings" - - "github.com/toolkits/pkg/logger" -) - -// InitLogger init logger toolkits -func InitLogger() { - c := Get().Logger - - lb, err := logger.NewFileBackend(c.Dir) - if err != nil { - fmt.Println("cannot init logger:", err) - os.Exit(1) - } - - lb.SetRotateByHour(true) - lb.SetKeepHours(c.KeepHours) - - logger.SetLogging(c.Level, lb) -} - -// slice set -func Set(s []string) []string { - m := make(map[string]interface{}) - for i := 0; i < len(s); i++ { - if strings.TrimSpace(s[i]) == "" { - continue - } - - m[s[i]] = 1 - } - - s2 := []string{} - for k := range m { - s2 = append(s2, k) - } - - return s2 -} - -func InSlice(val string, slice []string) bool { - for i := 0; i < len(slice); i++ { - if slice[i] == val { - return true - } - } - - return false -} - -func SplitN(m, n int) [][]int { - var res [][]int - - if n <= 0 { - return [][]int{[]int{0, m}} - } - - for i := 0; i < m; i = i + n { - var start, end int - start = i - end = i + n - - if end >= m { - end = m - } - - res = append(res, []int{start, end}) - - } - return res -} diff --git a/src/modules/monapi/config/yaml.go b/src/modules/monapi/config/yaml.go deleted file mode 100644 index a9d18010..00000000 --- a/src/modules/monapi/config/yaml.go +++ /dev/null @@ -1,195 +0,0 @@ -package config - -import ( - "bytes" - "fmt" - "sync" - - "github.com/didi/nightingale/src/toolkits/i18n" - - "github.com/spf13/viper" - "github.com/toolkits/pkg/file" -) - -type ConfYaml struct { - Tokens []string `yaml:"tokens"` - Logger loggerSection `yaml:"logger"` - HTTP httpSection `yaml:"http"` - Proxy proxySection `yaml:"proxy"` - Region []string `yaml:"region"` - Habits habitsSection `yaml:"habits"` - Report reportSection `yaml:"report"` - AlarmEnabled bool `yaml:"alarmEnabled"` - TicketEnabled bool `yaml:"ticketEnabled"` - Redis redisSection `yaml:"redis"` - Queue queueSection `yaml:"queue"` - Cleaner cleanerSection `yaml:"cleaner"` - Merge mergeSection `yaml:"merge"` - Notify map[string][]string `yaml:"notify"` - Link linkSection `yaml:"link"` - IndexMod string `yaml:"indexMod"` - I18n i18n.I18nSection `yaml:"i18n"` - Tpl tplSection `yaml:"tpl"` -} - -type tplSection struct { - AlertPath string `yaml:"alertPath"` - ScreenPath string `yaml:"screenPath"` -} - -type mergeSection struct { - Hash string `yaml:"hash"` - Max int `yaml:"max"` - Interval int `yaml:"interval"` -} - -type cleanerSection struct { - Days int `yaml:"days"` - Batch int `yaml:"batch"` - Converge bool `yaml:"converge"` -} - -type queueSection struct { - High []interface{} `yaml:"high"` - Low []interface{} `yaml:"low"` - Callback string `yaml:"callback"` -} - -type mvpSection struct { - URL string `yaml:"url"` - BID int `yaml:"bid"` - TPL map[string]string `yaml:"tpl"` -} - -type linkSection struct { - Stra string `yaml:"stra"` - Event string `yaml:"event"` - Claim string `yaml:"claim"` -} - -type redisSection struct { - Addr string `yaml:"addr"` - Pass string `yaml:"pass"` - Idle int `yaml:"idle"` - Timeout timeoutSection `yaml:"timeout"` -} - -type timeoutSection struct { - Conn int `yaml:"conn"` - Read int `yaml:"read"` - Write int `yaml:"write"` -} - -type identitySection struct { - Specify string `yaml:"specify"` - Shell string `yaml:"shell"` -} - -type reportSection struct { - Addrs []string `yaml:"addrs"` - Interval int `yaml:"interval"` -} - -type habitsSection struct { - Identity string `yaml:"identity"` -} - -type loggerSection struct { - Dir string `yaml:"dir"` - Level string `yaml:"level"` - KeepHours uint `yaml:"keepHours"` -} - -type httpSection struct { - Mode string `yaml:"mode"` - CookieName string `yaml:"cookieName"` - CookieDomain string `yaml:"cookieDomain"` -} - -type proxySection struct { - Transfer string `yaml:"transfer"` - Index string `yaml:"index"` -} - -var ( - yaml *ConfYaml - lock = new(sync.RWMutex) -) - -// Get configuration file -func Get() *ConfYaml { - lock.RLock() - defer lock.RUnlock() - return yaml -} - -// Parse configuration file -func Parse(ymlfile string) error { - bs, err := file.ReadBytes(ymlfile) - if err != nil { - return fmt.Errorf("cannot read yml[%s]: %v", ymlfile, err) - } - - lock.Lock() - defer lock.Unlock() - - viper.SetConfigType("yaml") - err = viper.ReadConfig(bytes.NewBuffer(bs)) - if err != nil { - return fmt.Errorf("cannot read yml[%s]: %v", ymlfile, err) - } - - viper.SetDefault("proxy", map[string]string{ - "transfer": "http://127.0.0.1:7900", - "index": "http://127.0.0.1:7904", - }) - - viper.SetDefault("report", map[string]interface{}{ - "interval": 4000, - }) - - viper.SetDefault("alarmEnabled", "true") - viper.SetDefault("indexMod", "index") - - viper.SetDefault("habits.identity", "ip") - - viper.SetDefault("i18n.dictPath", "etc/dict.json") - viper.SetDefault("i18n.lang", "zh") - - viper.SetDefault("redis.idle", 5) - viper.SetDefault("redis.timeout", map[string]int{ - "conn": 500, - "read": 3000, - "write": 3000, - }) - - viper.SetDefault("merge", map[string]interface{}{ - "hash": "mon-merge", - "max": 100, //merge的最大条数 - "interval": 10, //merge等待的数据,单位秒 - }) - - viper.SetDefault("queue", map[string]interface{}{ - "high": []string{"/n9e/event/p1"}, - "low": []string{"/n9e/event/p2", "/n9e/event/p3"}, - "callback": "/ecmc.io/alarm/callback", - }) - - viper.SetDefault("cleaner", map[string]interface{}{ - "days": 31, - "batch": 100, - "converge": true, // 历史告警的数据库表,对于已收敛的告警,默认删掉,不保留,省得告警太多 - }) - - viper.SetDefault("tpl", map[string]string{ - "alertPath": "./etc/alert", - "screenPath": "./etc/screen", - }) - - err = viper.Unmarshal(&yaml) - if err != nil { - return fmt.Errorf("Unmarshal %v", err) - } - - return nil -} diff --git a/src/modules/monapi/http/http_middleware.go b/src/modules/monapi/http/http_middleware.go deleted file mode 100644 index 2dd4dc61..00000000 --- a/src/modules/monapi/http/http_middleware.go +++ /dev/null @@ -1,96 +0,0 @@ -package http - -import ( - "fmt" - - "github.com/didi/nightingale/src/models" - "github.com/didi/nightingale/src/modules/monapi/config" - - "github.com/gin-gonic/gin" - "github.com/toolkits/pkg/errors" - "github.com/toolkits/pkg/slice" -) - -// GetCookieUser 从cookie中获取username -func GetCookieUser() gin.HandlerFunc { - return func(c *gin.Context) { - username := sessionUsername(c) - if username == "" { - username = headerUser(c) - } - - if username == "" { - bomb("unauthorized") - } - - c.Set("username", username) - c.Next() - } -} - -func headerUser(c *gin.Context) string { - token := c.GetHeader("X-User-Token") - if token == "" { - return "" - } - - user, err := getUserByToken(token) - errors.Dangerous(err) - - if user == nil { - return "" - } - - return user.Username -} - -const internalToken = "monapi-builtin-token" - -// CheckHeaderToken check thirdparty X-Srv-Token -func CheckHeaderToken() gin.HandlerFunc { - return func(c *gin.Context) { - token := c.GetHeader("X-Srv-Token") - if token != internalToken && !slice.ContainsString(config.Get().Tokens, token) { - bomb("token[%s] invalid", token) - } - c.Next() - } -} - -func getUserByToken(token string) (user *models.User, err error) { - ut, err := models.UserTokenGet("token=?", token) - if err != nil { - return - } - - if ut == nil { - return user, fmt.Errorf("token not found") - } - - user, err = models.UserGet("id=?", ut.UserId) - if err != nil { - return - } - - if user == nil { - return user, fmt.Errorf("user not found") - } - - return -} - -func sessionUsername(c *gin.Context) string { - sess, err := models.SessionGetWithCache(readSessionId(c)) - if err != nil { - return "" - } - return sess.Username -} - -func readSessionId(c *gin.Context) string { - sid, err := c.Cookie(config.Get().HTTP.CookieName) - if err != nil { - return "" - } - return sid -} diff --git a/src/modules/monapi/http/http_server.go b/src/modules/monapi/http/http_server.go deleted file mode 100644 index 3552ffff..00000000 --- a/src/modules/monapi/http/http_server.go +++ /dev/null @@ -1,76 +0,0 @@ -package http - -import ( - "context" - "fmt" - "net/http" - _ "net/http/pprof" - "os" - "strings" - "time" - - "github.com/didi/nightingale/src/common/address" - "github.com/didi/nightingale/src/common/middleware" - "github.com/didi/nightingale/src/modules/monapi/config" - - "github.com/gin-gonic/gin" -) - -var srv = &http.Server{ - ReadTimeout: 10 * time.Second, - WriteTimeout: 10 * time.Second, - MaxHeaderBytes: 1 << 20, -} - -var skipPaths = []string{ - "/api/mon/ping", -} - -// Start http server -func Start() { - c := config.Get() - - loggerMid := middleware.LoggerWithConfig(middleware.LoggerConfig{SkipPaths: skipPaths}) - recoveryMid := middleware.Recovery() - - if strings.ToLower(c.HTTP.Mode) == "release" { - gin.SetMode(gin.ReleaseMode) - middleware.DisableConsoleColor() - } else { - srv.WriteTimeout = 120 * time.Second - } - - r := gin.New() - r.Use(loggerMid, recoveryMid) - - Config(r) - - srv.Addr = address.GetHTTPListen("monapi") - srv.Handler = r - - go func() { - fmt.Println("http.listening:", srv.Addr) - if err := srv.ListenAndServe(); err != nil && err != http.ErrServerClosed { - fmt.Printf("listening %s occur error: %s\n", srv.Addr, err) - os.Exit(3) - } - }() -} - -// Shutdown http server -func Shutdown() { - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - if err := srv.Shutdown(ctx); err != nil { - fmt.Println("cannot shutdown http server:", err) - os.Exit(2) - } - - // catching ctx.Done(). timeout of 5 seconds. - select { - case <-ctx.Done(): - fmt.Println("shutdown http server timeout of 5 seconds.") - default: - fmt.Println("http server stopped") - } -} diff --git a/src/modules/monapi/http/router.go b/src/modules/monapi/http/router.go deleted file mode 100644 index f4a7902c..00000000 --- a/src/modules/monapi/http/router.go +++ /dev/null @@ -1,193 +0,0 @@ -package http - -import ( - "github.com/didi/nightingale/src/modules/monapi/config" - - "github.com/gin-contrib/pprof" - "github.com/gin-gonic/gin" -) - -// Config routes -func Config(r *gin.Engine) { - r.Static("/pub", "./pub") - r.StaticFile("/favicon.ico", "./pub/favicon.ico") - - sys := r.Group("/api/mon/sys") - { - sys.GET("/ping", ping) - sys.GET("/version", version) - sys.GET("/pid", pid) - sys.GET("/addr", addr) - } - - generic := r.Group("/api/mon").Use(GetCookieUser()) - { - generic.GET("/regions", func(c *gin.Context) { renderData(c, config.Get().Region, nil) }) - } - - node := r.Group("/api/mon/node").Use(GetCookieUser()) - { - node.GET("/:id/maskconf", maskconfGets) - node.GET("/:id/screen", screenGets) - node.POST("/:id/screen", screenPost) - } - - maskconf := r.Group("/api/mon/maskconf").Use(GetCookieUser()) - { - maskconf.POST("", maskconfPost) - maskconf.PUT("/:id", maskconfPut) - maskconf.DELETE("/:id", maskconfDel) - } - - screen := r.Group("/api/mon/screen").Use(GetCookieUser()) - { - screen.GET("/:id", screenGet) - screen.PUT("/:id", screenPut) - screen.DELETE("/:id", screenDel) - screen.GET("/:id/subclass", screenSubclassGets) - screen.POST("/:id/subclass", screenSubclassPost) - } - - subclass := r.Group("/api/mon/subclass").Use(GetCookieUser()) - { - subclass.PUT("", screenSubclassPut) - subclass.DELETE("/:id", screenSubclassDel) - subclass.GET("/:id/chart", chartGets) - subclass.POST("/:id/chart", chartPost) - } - - subclasses := r.Group("/api/mon/subclasses").Use(GetCookieUser()) - { - subclasses.PUT("/loc", screenSubclassLocPut) - } - - chart := r.Group("/api/mon/chart").Use(GetCookieUser()) - { - chart.PUT("/:id", chartPut) - chart.DELETE("/:id", chartDel) - } - - charts := r.Group("/api/mon/charts").Use(GetCookieUser()) - { - charts.PUT("/weights", chartWeightsPut) - } - - tmpchart := r.Group("/api/mon/tmpchart").Use(GetCookieUser()) - { - tmpchart.GET("", tmpChartGet) - tmpchart.POST("", tmpChartPost) - } - - event := r.Group("/api/mon/event").Use(GetCookieUser()) - { - event.GET("/cur", eventCurGets) - event.GET("/cur/:id", eventCurGetById) - event.DELETE("/cur/:id", eventCurDel) - event.GET("/his", eventHisGets) - event.GET("/his/:id", eventHisGetById) - event.POST("/cur/claim", eventCurClaim) - } - - // TODO: merge to collect-rule - collect := r.Group("/api/mon/collect").Use(GetCookieUser()) - { - collect.POST("", collectRulePost) // create a collect rule - collect.GET("/list", collectRulesGet) // get collect rules - collect.GET("", collectRuleGet) // get collect rule by type & id - collect.PUT("", collectRulePut) // update collect rule by type & id - collect.DELETE("", collectsRuleDel) // delete collect rules by type & ids - collect.POST("/check", regExpCheck) // check collect rule - } - - // TODO: merge to collect-rules, used by agent - collects := r.Group("/api/mon/collects") - { - collects.GET("/:endpoint", collectRulesGetByLocalEndpoint) // get collect rules by endpoint, for agent - collects.GET("", collectRulesGet) // get collect rules - } - - collectRules := r.Group("/api/mon/collect-rules").Use(GetCookieUser()) - { - collectRules.POST("", collectRulePost) // create a collect rule - collectRules.GET("/list", collectRulesGetV2) // get collect rules - collectRules.GET("", collectRuleGet) // get collect rule by type & id - collectRules.PUT("", collectRulePut) // update collect rule by type & id - collectRules.DELETE("", collectsRuleDel) // delete collect rules by type & ids - collectRules.POST("/check", regExpCheck) // check collect rule - collectRules.GET("/types", collectRuleTypesGet) // get collect types, category: local|remote - collectRules.GET("/types/:type/template", collectRuleTemplateGet) // get collect teplate by type - - } - - collectRulesAnonymous := r.Group("/api/mon/collect-rules") - { - collectRulesAnonymous.GET("/endpoints/:endpoint/local", collectRulesGetByLocalEndpoint) // for agent - } - - stra := r.Group("/api/mon/stra").Use(GetCookieUser()) - { - stra.POST("", straPost) - stra.PUT("", straPut) - stra.DELETE("", strasDel) - stra.GET("", strasGet) - stra.GET("/:sid", straGet) - } - - stras := r.Group("/api/mon/stras") - { - stras.GET("/effective", effectiveStrasGet) - stras.GET("", strasAll) - } - - aggr := r.Group("/api/mon/aggr").Use(GetCookieUser()) - { - aggr.POST("", aggrCalcPost) - aggr.PUT("", aggrCalcPut) - aggr.DELETE("", aggrCalcsDel) - aggr.GET("", aggrCalcsGet) - aggr.GET("/:id", aggrCalcGet) - } - - tpl := r.Group("/api/mon/tpl") - { - tpl.GET("", tplNameGets) - tpl.GET("/content", tplGet) - } - - aggrs := r.Group("/api/mon/aggrs").Use() - { - aggrs.GET("", aggrCalcsWithEndpointGet) - } - - index := r.Group("/api/mon/index") - { - index.POST("/metrics", getMetrics) - index.POST("/tagkv", getTagkvs) - } - - transferProxy := r.Group("/api/transfer") - { - transferProxy.GET("/req", transferReq) - transferProxy.POST("/data", transferReq) - transferProxy.POST("/data/ui", transferReq) - transferProxy.POST("/push", transferReq) - } - - indexProxy := r.Group("/api/index") - { - indexProxy.POST("/metrics", indexReq) - indexProxy.POST("/tagkv", indexReq) - indexProxy.POST("/counter/fullmatch", indexReq) - indexProxy.POST("/counter/clude", indexReq) - indexProxy.POST("/counter/detail", indexReq) - } - - v1 := r.Group("/v1/mon") - { - v1.GET("/collect-rules/endpoints/:endpoint/remote", collectRulesGetByRemoteEndpoint) // for prober - } - - if config.Get().Logger.Level == "DEBUG" { - pprof.Register(r, "/api/monapi/debug/pprof") - } -} diff --git a/src/modules/monapi/http/router_funcs.go b/src/modules/monapi/http/router_funcs.go deleted file mode 100644 index 00850f67..00000000 --- a/src/modules/monapi/http/router_funcs.go +++ /dev/null @@ -1,235 +0,0 @@ -package http - -import ( - "strconv" - - "github.com/didi/nightingale/src/models" - "github.com/didi/nightingale/src/toolkits/i18n" - - "github.com/gin-gonic/gin" - "github.com/toolkits/pkg/errors" -) - -func dangerous(v interface{}) { - errors.Dangerous(v) -} - -func bomb(format string, a ...interface{}) { - errors.Bomb(i18n.Sprintf(format, a...)) -} - -func urlParamStr(c *gin.Context, field string) string { - val := c.Param(field) - - if val == "" { - bomb("[%s] is blank", field) - } - - return val -} - -func urlParamInt64(c *gin.Context, field string) int64 { - strval := urlParamStr(c, field) - intval, err := strconv.ParseInt(strval, 10, 64) - if err != nil { - bomb("cannot convert %s to int64", strval) - } - - return intval -} - -func urlParamInt(c *gin.Context, field string) int { - return int(urlParamInt64(c, field)) -} - -func queryStr(c *gin.Context, key string, defaultVal string) string { - val := c.Query(key) - if val == "" { - return defaultVal - } - - return val -} - -func mustQueryStr(c *gin.Context, key string) string { - val := c.Query(key) - if val == "" { - bomb("arg[%s] not found", key) - } - - return val -} - -func mustQueryInt(c *gin.Context, key string) int { - strv := mustQueryStr(c, key) - - intv, err := strconv.Atoi(strv) - if err != nil { - bomb("cannot convert [%s] to int", strv) - } - - return intv -} - -func mustQueryInt64(c *gin.Context, key string) int64 { - strv := mustQueryStr(c, key) - - intv, err := strconv.ParseInt(strv, 10, 64) - if err != nil { - bomb("cannot convert [%s] to int64", strv) - } - - return intv -} - -func queryInt(c *gin.Context, key string, defaultVal int) int { - strv := c.Query(key) - if strv == "" { - return defaultVal - } - - intv, err := strconv.Atoi(strv) - if err != nil { - bomb("cannot convert [%s] to int", strv) - } - - return intv -} - -func queryInt64(c *gin.Context, key string, defaultVal int64) int64 { - strv := c.Query(key) - if strv == "" { - return defaultVal - } - - intv, err := strconv.ParseInt(strv, 10, 64) - if err != nil { - bomb("cannot convert [%s] to int64", strv) - } - - return intv -} - -func offset(c *gin.Context, limit int, total interface{}) int { - if limit <= 0 { - limit = 10 - } - - page := queryInt(c, "p", 1) - return (page - 1) * limit -} - -func renderMessage(c *gin.Context, v interface{}) { - if v == nil { - c.JSON(200, gin.H{"err": ""}) - return - } - - switch t := v.(type) { - case string: - c.JSON(200, gin.H{"err": t}) - case error: - c.JSON(200, gin.H{"err": t.Error()}) - } -} - -func renderData(c *gin.Context, data interface{}, err error) { - if err == nil { - c.JSON(200, gin.H{"dat": data, "err": ""}) - return - } - - renderMessage(c, err.Error()) -} - -func loginUsername(c *gin.Context) string { - username1, has := c.Get("username") - if has { - return username1.(string) - } - - username2 := sessionUsername(c) - if username2 == "" { - bomb("unauthorized") - } - - return username2 -} - -func mustNode(id int64) *models.Node { - node, err := models.NodeGet("id=?", id) - if err != nil { - bomb("cannot retrieve node[%d]: %v", id, err) - } - - if node == nil { - bomb("no such node[%d]", id) - } - - return node -} - -func mustScreen(id int64) *models.Screen { - screen, err := models.ScreenGet("id", id) - if err != nil { - bomb("cannot retrieve screen[%d]: %v", id, err) - } - - if screen == nil { - bomb("no such screen[%d]", id) - } - - return screen -} - -func mustScreenSubclass(id int64) *models.ScreenSubclass { - subclass, err := models.ScreenSubclassGet("id", id) - if err != nil { - bomb("cannot retrieve subclass[%d]: %v", id, err) - } - - if subclass == nil { - bomb("no such subclass[%d]", id) - } - - return subclass -} - -func mustChart(id int64) *models.Chart { - chart, err := models.ChartGet("id", id) - if err != nil { - bomb("cannot retrieve chart[%d]: %v", id, err) - } - - if chart == nil { - bomb("no such chart[%d]", id) - } - - return chart -} - -func mustEventCur(id int64) *models.EventCur { - eventCur, err := models.EventCurGet("id", id) - if err != nil { - bomb("cannot retrieve eventCur[%d]: %v", id, err) - } - - if eventCur == nil { - bomb("no such eventCur[%d]", id) - } - - return eventCur -} - -func mustEvent(id int64) *models.Event { - eventCur, err := models.EventGet("id", id) - if err != nil { - bomb("cannot retrieve event[%d]: %v", id, err) - } - - if eventCur == nil { - bomb("no such event[%d]", id) - } - - return eventCur -} diff --git a/src/modules/monapi/http/router_proxy.go b/src/modules/monapi/http/router_proxy.go deleted file mode 100644 index c7bdfe98..00000000 --- a/src/modules/monapi/http/router_proxy.go +++ /dev/null @@ -1,31 +0,0 @@ -package http - -import ( - "net/http/httputil" - "net/url" - - "github.com/didi/nightingale/src/modules/monapi/config" - - "github.com/gin-gonic/gin" - "github.com/toolkits/pkg/errors" -) - -func transferReq(c *gin.Context) { - target, err := url.Parse(config.Get().Proxy.Transfer) - errors.Dangerous(err) - - proxy := httputil.NewSingleHostReverseProxy(target) - c.Request.Header.Set("X-Forwarded-Host", c.Request.Header.Get("Host")) - - proxy.ServeHTTP(c.Writer, c.Request) -} - -func indexReq(c *gin.Context) { - target, err := url.Parse(config.Get().Proxy.Index) - errors.Dangerous(err) - - proxy := httputil.NewSingleHostReverseProxy(target) - c.Request.Header.Set("X-Forwarded-Host", c.Request.Header.Get("Host")) - - proxy.ServeHTTP(c.Writer, c.Request) -} diff --git a/src/modules/monapi/monapi.go b/src/modules/monapi/monapi.go deleted file mode 100644 index 6dc7ba4a..00000000 --- a/src/modules/monapi/monapi.go +++ /dev/null @@ -1,152 +0,0 @@ -package main - -import ( - "flag" - "fmt" - "log" - "os" - "os/signal" - "syscall" - "time" - - "github.com/didi/nightingale/src/models" - "github.com/didi/nightingale/src/modules/monapi/acache" - "github.com/didi/nightingale/src/modules/monapi/alarm" - "github.com/didi/nightingale/src/modules/monapi/collector" - "github.com/didi/nightingale/src/modules/monapi/config" - "github.com/didi/nightingale/src/modules/monapi/http" - "github.com/didi/nightingale/src/modules/monapi/redisc" - "github.com/didi/nightingale/src/modules/monapi/scache" - "github.com/didi/nightingale/src/toolkits/i18n" - - _ "github.com/didi/nightingale/src/modules/monapi/plugins/all" - _ "github.com/go-sql-driver/mysql" - - "github.com/toolkits/pkg/cache" - "github.com/toolkits/pkg/file" - "github.com/toolkits/pkg/logger" - "github.com/toolkits/pkg/runner" -) - -var ( - vers *bool - help *bool - conf *string - - version = "No Version Provided" -) - -func init() { - vers = flag.Bool("v", false, "display the version.") - help = flag.Bool("h", false, "print this help.") - conf = flag.String("f", "", "specify configuration file.") - flag.Parse() - - if *vers { - fmt.Println("Version:", version) - os.Exit(0) - } - - if *help { - flag.Usage() - os.Exit(0) - } - - runner.Init() - fmt.Println("monapi start, use configuration file:", *conf) - fmt.Println("runner.Cwd:", runner.Cwd) - fmt.Println("runner.Hostname:", runner.Hostname) -} - -func main() { - aconf() - pconf() - - cache.InitMemoryCache(time.Hour) - config.InitLogger() - models.InitMySQL("mon", "rdb") - - scache.Init() - - i18n.Init(config.Get().I18n) - - if err := scache.CheckJudge(); err != nil { - logger.Errorf("check judge fail: %v", err) - } - - if config.Get().AlarmEnabled { - acache.Init() - - if err := alarm.SyncMaskconf(); err != nil { - log.Fatalf("sync maskconf fail: %v", err) - } - - if err := alarm.SyncStra(); err != nil { - log.Fatalf("sync stra fail: %v", err) - } - - redisc.InitRedis() - - go alarm.SyncMaskconfLoop() - go alarm.SyncStraLoop() - go alarm.CleanStraLoop() - go alarm.ReadHighEvent() - go alarm.ReadLowEvent() - go alarm.CallbackConsumer() - go alarm.MergeEvent() - go alarm.CleanEventLoop() - } - - pluginInfo() - - http.Start() - ending() -} - -func ending() { - c := make(chan os.Signal, 1) - signal.Notify(c, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT) - select { - case <-c: - fmt.Printf("stop signal caught, stopping... pid=%d\n", os.Getpid()) - } - - logger.Close() - http.Shutdown() - fmt.Println("monapi stopped successfully") -} - -// auto detect configuration file -func aconf() { - if *conf != "" && file.IsExist(*conf) { - return - } - - *conf = "etc/monapi.local.yml" - if file.IsExist(*conf) { - return - } - - *conf = "etc/monapi.yml" - if file.IsExist(*conf) { - return - } - - fmt.Println("no configuration file for monapi") - os.Exit(1) -} - -// parse configuration file -func pconf() { - if err := config.Parse(*conf); err != nil { - fmt.Println("cannot parse configuration file:", err) - os.Exit(1) - } -} - -func pluginInfo() { - fmt.Println("remote collector") - for k, v := range collector.GetRemoteCollectors() { - fmt.Printf(" %d %s\n", k, v) - } -} diff --git a/src/modules/monapi/plugins/all/all.go b/src/modules/monapi/plugins/all/all.go deleted file mode 100644 index 57dc4ab0..00000000 --- a/src/modules/monapi/plugins/all/all.go +++ /dev/null @@ -1,28 +0,0 @@ -package all - -import ( - // remote - // _ "github.com/didi/nightingale/src/modules/monapi/plugins/api" - // telegraf style - _ "github.com/didi/nightingale/src/modules/monapi/plugins/dns_query" - _ "github.com/didi/nightingale/src/modules/monapi/plugins/elasticsearch" - _ "github.com/didi/nightingale/src/modules/monapi/plugins/github" - _ "github.com/didi/nightingale/src/modules/monapi/plugins/haproxy" - _ "github.com/didi/nightingale/src/modules/monapi/plugins/http_response" - _ "github.com/didi/nightingale/src/modules/monapi/plugins/mongodb" - _ "github.com/didi/nightingale/src/modules/monapi/plugins/mysql" - _ "github.com/didi/nightingale/src/modules/monapi/plugins/net_response" - _ "github.com/didi/nightingale/src/modules/monapi/plugins/nginx" - _ "github.com/didi/nightingale/src/modules/monapi/plugins/ping" - _ "github.com/didi/nightingale/src/modules/monapi/plugins/prometheus" - _ "github.com/didi/nightingale/src/modules/monapi/plugins/rabbitmq" - _ "github.com/didi/nightingale/src/modules/monapi/plugins/redis" - _ "github.com/didi/nightingale/src/modules/monapi/plugins/tengine" - _ "github.com/didi/nightingale/src/modules/monapi/plugins/zookeeper" - - // local - _ "github.com/didi/nightingale/src/modules/monapi/plugins/log" - _ "github.com/didi/nightingale/src/modules/monapi/plugins/plugin" - _ "github.com/didi/nightingale/src/modules/monapi/plugins/port" - _ "github.com/didi/nightingale/src/modules/monapi/plugins/proc" -) diff --git a/src/modules/monapi/plugins/demo/lib/Makefile b/src/modules/monapi/plugins/demo/lib/Makefile deleted file mode 100644 index 067a0e3e..00000000 --- a/src/modules/monapi/plugins/demo/lib/Makefile +++ /dev/null @@ -1,4 +0,0 @@ -all: lib.so - -lib.so: lib.go - go build -buildmode=plugin -o $@ diff --git a/src/modules/monapi/plugins/demo/lib/lib.go b/src/modules/monapi/plugins/demo/lib/lib.go deleted file mode 100644 index fe57764f..00000000 --- a/src/modules/monapi/plugins/demo/lib/lib.go +++ /dev/null @@ -1,5 +0,0 @@ -package main - -import ( - _ "github.com/didi/nightingale/src/modules/monapi/plugins/demo" -) diff --git a/src/modules/monapi/plugins/dns_query/dns_query.go b/src/modules/monapi/plugins/dns_query/dns_query.go deleted file mode 100644 index 7839b339..00000000 --- a/src/modules/monapi/plugins/dns_query/dns_query.go +++ /dev/null @@ -1,96 +0,0 @@ -package dns_query - -import ( - "fmt" - "github.com/didi/nightingale/src/modules/monapi/collector" - "github.com/didi/nightingale/src/toolkits/i18n" - "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/plugins/inputs/dns_query" -) - -func init() { - collector.CollectorRegister(NewCollector()) // for monapi - i18n.DictRegister(langDict) -} - -type Collector struct { - *collector.BaseCollector -} - -func NewCollector() *Collector { - return &Collector{BaseCollector: collector.NewBaseCollector( - "dns_query", - collector.RemoteCategory, - func() collector.TelegrafPlugin { return &Rule{} }, - )} -} - -var ( - langDict = map[string]map[string]string{ - "zh": map[string]string{ - "Servers": "DNS地址", - "Network": "协议", - "Domains": "域名", - "RecordType": "记录类型", - "Port": "端口", - "Timeout": "超时", - "List of DNS": "DNS服务器列表", - "Protocol, must be tcp or udp": "请求协议,必须是 tcp 或 udp", - "List of Domains": "解析域名列表", - "DNS Record Type": "DNS记录类型", - "Port, default is 53": "DNS端口号,默认是53", - "Set timeout": "设置超时,单位是秒", - }, - } -) - -type Rule struct { - Servers []string `label:"Servers" json:"servers,required" description:"List of DNS" example:"223.5.5.5"` - Network string `label:"Network" json:"network" description:"Protocol, must be tcp or udp" example:"udp"` - Domains []string `label:"Domains" json:"domains,required" description:"List of Domains", example:"www.baidu.com"` - RecordType string `label:"RecordType" json:"record_type" enum:"[\"A\", \"AAAA\", \"CNAME\", \"MX\", \"NS\", \"PTR\", \"TXT\", \"SOA\", \"SPF\", \"SRV\"]" description:"DNS Record Type"` - Port int `label:"Port" json:"port" default:"53" description:"Port"` - Timeout int `label:"Timeout" json:"timeout" default:"10" description:"Set timeout"` -} - -func (p *Rule) Validate() error { - if len(p.Servers) == 0 || p.Servers[0] == "" { - return fmt.Errorf("dns.rule.servers must be set") - } - if p.Network == "" { - p.Network = "udp" - } - if !(p.Network == "tcp" || p.Network == "udp") { - return fmt.Errorf("net_response.rule.Network must be tcp or udp") - } - if len(p.Domains) == 0 || p.Domains[0] == "" { - return fmt.Errorf("dns.rule.domians must be set") - } - if p.RecordType == "" { - p.RecordType = "A" - } - if p.Port == 0 { - p.Port = 53 - } - if p.Timeout == 0 { - p.Timeout = 10 - } - - return nil -} - - -func (p *Rule) TelegrafInput() (telegraf.Input, error) { - if err := p.Validate(); err != nil { - return nil, err - } - - return &dns_query.DnsQuery{ - Servers: p.Servers, - Network: p.Network, - Domains: p.Domains, - RecordType: p.RecordType, - Port: p.Port, - Timeout: p.Timeout, - }, nil -} diff --git a/src/modules/monapi/plugins/dns_query/dns_query_test.go b/src/modules/monapi/plugins/dns_query/dns_query_test.go deleted file mode 100644 index 38575e35..00000000 --- a/src/modules/monapi/plugins/dns_query/dns_query_test.go +++ /dev/null @@ -1,13 +0,0 @@ -package dns_query - -import ( - "github.com/didi/nightingale/src/modules/monapi/plugins" - "testing" -) - -func TestCollect(t *testing.T) { - plugins.PluginTest(t, &Rule{ - Servers: []string{"223.5.5.5"}, - Domains: []string{"www.baidu.com"}, - }) -} diff --git a/src/modules/monapi/plugins/haproxy/haproxy.go b/src/modules/monapi/plugins/haproxy/haproxy.go deleted file mode 100644 index c122543a..00000000 --- a/src/modules/monapi/plugins/haproxy/haproxy.go +++ /dev/null @@ -1,71 +0,0 @@ -package haproxy - -import ( - "fmt" - "github.com/didi/nightingale/src/modules/monapi/collector" - "github.com/didi/nightingale/src/modules/monapi/plugins" - "github.com/didi/nightingale/src/toolkits/i18n" - "github.com/influxdata/telegraf" - "github.com/didi/nightingale/src/modules/monapi/plugins/haproxy/haproxy" -) - -func init() { - collector.CollectorRegister(NewHaproxyCollector()) // for monapi - i18n.DictRegister(langDict) -} - -type HaproxyCollector struct { - *collector.BaseCollector -} - -func NewHaproxyCollector() *HaproxyCollector { - return &HaproxyCollector{BaseCollector: collector.NewBaseCollector( - "haproxy", - collector.RemoteCategory, - func() collector.TelegrafPlugin { return &HaproxyRule{} }, - )} -} - -var ( - langDict = map[string]map[string]string{ - "zh": map[string]string{ - "Servers": "Servers", - "Username": "用户名", - "Password": "密码", - }, - } -) - -type HaproxyRule struct { - Servers []string `label:"Servers" json:"servers,required" example:"http://myhaproxy.com:1936/haproxy?stats"` - KeepFieldNames bool `label:"KeepFieldNames" json:"keepFieldNames" default:"false" description:"Setting this option to true results in the plugin keeping the original"` - Username string `label:"Username" json:"username" description:"specify username"` - Password string `label:"Password" json:"password" format:"password" description:"specify server password"` - - plugins.ClientConfig -} - -func (p *HaproxyRule) Validate() error { - if len(p.Servers) == 0 || p.Servers[0] == "" { - return fmt.Errorf("haproxy.rule.servers must be set") - } - return nil -} - -func (p *HaproxyRule) TelegrafInput() (telegraf.Input, error) { - if err := p.Validate(); err != nil { - return nil, err - } - - ha := &haproxy.Haproxy{ - - Servers: p.Servers, - KeepFieldNames: p.KeepFieldNames, - Username: p.Username, - Password: p.Password, - ClientConfig: p.ClientConfig.TlsClientConfig(), - } - - return ha, nil -} - diff --git a/src/modules/monapi/plugins/mysql/sample.out b/src/modules/monapi/plugins/mysql/sample.out deleted file mode 100644 index 75d07d1f..00000000 --- a/src/modules/monapi/plugins/mysql/sample.out +++ /dev/null @@ -1,417 +0,0 @@ -=== RUN TestCollect -2021-03-09 18:46:22.999601 DEBUG metric/metric.go:61 unable to convert field to float64 mysql_binlog_snapshot_file map[server:127.0.0.1:3306] value: mysql-bin.000062 -2021-03-09 18:46:23.000402 DEBUG metric/metric.go:61 unable to convert field to float64 mysql_rpl_status map[server:127.0.0.1:3306] value: AUTH_MASTER -2021-03-09 18:46:23.000455 DEBUG metric/metric.go:61 unable to convert field to float64 mysql_ssl_cipher_list map[server:127.0.0.1:3306] value: -2021-03-09 18:46:23.000463 DEBUG metric/metric.go:61 unable to convert field to float64 mysql_ssl_cipher map[server:127.0.0.1:3306] value: -2021-03-09 18:46:23.000516 DEBUG metric/metric.go:61 unable to convert field to float64 mysql_ssl_version map[server:127.0.0.1:3306] value: - util.go:96: 0 GAUGE /mysql_access_denied_errors/server=127.0.0.1:3306 13.000000 - util.go:96: 1 GAUGE /mysql_aria_pagecache_blocks_unused/server=127.0.0.1:3306 15737.000000 - util.go:96: 2 GAUGE /mysql_binlog_cache_disk_use/server=127.0.0.1:3306 9.000000 - util.go:96: 3 GAUGE /mysql_aria_pagecache_reads/server=127.0.0.1:3306 262305.000000 - util.go:96: 4 GAUGE /mysql_aria_transaction_log_syncs/server=127.0.0.1:3306 0.000000 - util.go:96: 5 GAUGE /mysql_aria_pagecache_write_requests/server=127.0.0.1:3306 530482.000000 - util.go:96: 6 GAUGE /mysql_aborted_clients/server=127.0.0.1:3306 1600.000000 - util.go:96: 7 GAUGE /mysql_aria_pagecache_blocks_not_flushed/server=127.0.0.1:3306 0.000000 - util.go:96: 8 GAUGE /mysql_binlog_cache_use/server=127.0.0.1:3306 5834920.000000 - util.go:96: 9 GAUGE /mysql_aborted_connects/server=127.0.0.1:3306 23.000000 - util.go:96: 10 GAUGE /mysql_binlog_group_commits/server=127.0.0.1:3306 5834836.000000 - util.go:96: 11 GAUGE /mysql_binlog_commits/server=127.0.0.1:3306 5834841.000000 - util.go:96: 12 GAUGE /mysql_binlog_stmt_cache_disk_use/server=127.0.0.1:3306 0.000000 - util.go:96: 13 GAUGE /mysql_aria_pagecache_read_requests/server=127.0.0.1:3306 12559583.000000 - util.go:96: 14 GAUGE /mysql_binlog_bytes_written/server=127.0.0.1:3306 3167439379.000000 - util.go:96: 15 GAUGE /mysql_aria_pagecache_writes/server=127.0.0.1:3306 0.000000 - util.go:96: 16 GAUGE /mysql_binlog_snapshot_position/server=127.0.0.1:3306 1019956335.000000 - util.go:96: 17 GAUGE /mysql_binlog_stmt_cache_use/server=127.0.0.1:3306 0.000000 - util.go:96: 18 GAUGE /mysql_aria_pagecache_blocks_used/server=127.0.0.1:3306 4.000000 - util.go:96: 19 GAUGE /mysql_com_alter_tablespace/server=127.0.0.1:3306 0.000000 - util.go:96: 20 GAUGE /mysql_com_begin/server=127.0.0.1:3306 372780.000000 - util.go:96: 21 GAUGE /mysql_com_binlog/server=127.0.0.1:3306 0.000000 - util.go:96: 22 GAUGE /mysql_busy_time/server=127.0.0.1:3306 0.000000 - util.go:96: 23 GAUGE /mysql_com_alter_server/server=127.0.0.1:3306 0.000000 - util.go:96: 24 GAUGE /mysql_bytes_sent/server=127.0.0.1:3306 1177364764708.000000 - util.go:96: 25 GAUGE /mysql_com_alter_function/server=127.0.0.1:3306 0.000000 - util.go:96: 26 GAUGE /mysql_bytes_received/server=127.0.0.1:3306 195471825078.000000 - util.go:96: 27 GAUGE /mysql_com_admin_commands/server=127.0.0.1:3306 47.000000 - util.go:96: 28 GAUGE /mysql_com_alter_event/server=127.0.0.1:3306 0.000000 - util.go:96: 29 GAUGE /mysql_com_call_procedure/server=127.0.0.1:3306 0.000000 - util.go:96: 30 GAUGE /mysql_com_change_master/server=127.0.0.1:3306 0.000000 - util.go:96: 31 GAUGE /mysql_com_alter_db_upgrade/server=127.0.0.1:3306 0.000000 - util.go:96: 32 GAUGE /mysql_com_alter_db/server=127.0.0.1:3306 0.000000 - util.go:96: 33 GAUGE /mysql_com_check/server=127.0.0.1:3306 0.000000 - util.go:96: 34 GAUGE /mysql_com_alter_procedure/server=127.0.0.1:3306 0.000000 - util.go:96: 35 GAUGE /mysql_com_alter_table/server=127.0.0.1:3306 0.000000 - util.go:96: 36 GAUGE /mysql_com_assign_to_keycache/server=127.0.0.1:3306 0.000000 - util.go:96: 37 GAUGE /mysql_com_analyze/server=127.0.0.1:3306 0.000000 - util.go:96: 38 GAUGE /mysql_com_change_db/server=127.0.0.1:3306 26.000000 - util.go:96: 39 GAUGE /mysql_com_create_db/server=127.0.0.1:3306 0.000000 - util.go:96: 40 GAUGE /mysql_com_delete_multi/server=127.0.0.1:3306 0.000000 - util.go:96: 41 GAUGE /mysql_com_commit/server=127.0.0.1:3306 372632.000000 - util.go:96: 42 GAUGE /mysql_com_create_event/server=127.0.0.1:3306 0.000000 - util.go:96: 43 GAUGE /mysql_com_drop_function/server=127.0.0.1:3306 0.000000 - util.go:96: 44 GAUGE /mysql_com_create_table/server=127.0.0.1:3306 0.000000 - util.go:96: 45 GAUGE /mysql_com_delete/server=127.0.0.1:3306 336504.000000 - util.go:96: 46 GAUGE /mysql_com_do/server=127.0.0.1:3306 0.000000 - util.go:96: 47 GAUGE /mysql_com_create_view/server=127.0.0.1:3306 0.000000 - util.go:96: 48 GAUGE /mysql_com_drop_db/server=127.0.0.1:3306 0.000000 - util.go:96: 49 GAUGE /mysql_com_create_index/server=127.0.0.1:3306 0.000000 - util.go:96: 50 GAUGE /mysql_com_create_procedure/server=127.0.0.1:3306 0.000000 - util.go:96: 51 GAUGE /mysql_com_dealloc_sql/server=127.0.0.1:3306 0.000000 - util.go:96: 52 GAUGE /mysql_com_create_udf/server=127.0.0.1:3306 0.000000 - util.go:96: 53 GAUGE /mysql_com_create_user/server=127.0.0.1:3306 0.000000 - util.go:96: 54 GAUGE /mysql_com_drop_event/server=127.0.0.1:3306 0.000000 - util.go:96: 55 GAUGE /mysql_com_create_server/server=127.0.0.1:3306 0.000000 - util.go:96: 56 GAUGE /mysql_com_checksum/server=127.0.0.1:3306 0.000000 - util.go:96: 57 GAUGE /mysql_com_create_function/server=127.0.0.1:3306 0.000000 - util.go:96: 58 GAUGE /mysql_com_create_trigger/server=127.0.0.1:3306 0.000000 - util.go:96: 59 GAUGE /mysql_com_drop_server/server=127.0.0.1:3306 0.000000 - util.go:96: 60 GAUGE /mysql_com_drop_trigger/server=127.0.0.1:3306 0.000000 - util.go:96: 61 GAUGE /mysql_com_drop_user/server=127.0.0.1:3306 0.000000 - util.go:96: 62 GAUGE /mysql_com_flush/server=127.0.0.1:3306 0.000000 - util.go:96: 63 GAUGE /mysql_com_load/server=127.0.0.1:3306 0.000000 - util.go:96: 64 GAUGE /mysql_com_empty_query/server=127.0.0.1:3306 0.000000 - util.go:96: 65 GAUGE /mysql_com_ha_open/server=127.0.0.1:3306 0.000000 - util.go:96: 66 GAUGE /mysql_com_insert_select/server=127.0.0.1:3306 0.000000 - util.go:96: 67 GAUGE /mysql_com_help/server=127.0.0.1:3306 0.000000 - util.go:96: 68 GAUGE /mysql_com_drop_procedure/server=127.0.0.1:3306 0.000000 - util.go:96: 69 GAUGE /mysql_com_drop_table/server=127.0.0.1:3306 0.000000 - util.go:96: 70 GAUGE /mysql_com_ha_close/server=127.0.0.1:3306 0.000000 - util.go:96: 71 GAUGE /mysql_com_insert/server=127.0.0.1:3306 677505.000000 - util.go:96: 72 GAUGE /mysql_com_install_plugin/server=127.0.0.1:3306 0.000000 - util.go:96: 73 GAUGE /mysql_com_kill/server=127.0.0.1:3306 0.000000 - util.go:96: 74 GAUGE /mysql_com_ha_read/server=127.0.0.1:3306 0.000000 - util.go:96: 75 GAUGE /mysql_com_drop_index/server=127.0.0.1:3306 0.000000 - util.go:96: 76 GAUGE /mysql_com_execute_sql/server=127.0.0.1:3306 0.000000 - util.go:96: 77 GAUGE /mysql_com_grant/server=127.0.0.1:3306 0.000000 - util.go:96: 78 GAUGE /mysql_com_drop_view/server=127.0.0.1:3306 0.000000 - util.go:96: 79 GAUGE /mysql_com_lock_tables/server=127.0.0.1:3306 1.000000 - util.go:96: 80 GAUGE /mysql_com_purge_before_date/server=127.0.0.1:3306 0.000000 - util.go:96: 81 GAUGE /mysql_com_select/server=127.0.0.1:3306 1216792068.000000 - util.go:96: 82 GAUGE /mysql_com_rename_table/server=127.0.0.1:3306 0.000000 - util.go:96: 83 GAUGE /mysql_com_repair/server=127.0.0.1:3306 0.000000 - util.go:96: 84 GAUGE /mysql_com_revoke/server=127.0.0.1:3306 0.000000 - util.go:96: 85 GAUGE /mysql_com_rename_user/server=127.0.0.1:3306 0.000000 - util.go:96: 86 GAUGE /mysql_com_preload_keys/server=127.0.0.1:3306 0.000000 - util.go:96: 87 GAUGE /mysql_com_rollback/server=127.0.0.1:3306 148.000000 - util.go:96: 88 GAUGE /mysql_com_rollback_to_savepoint/server=127.0.0.1:3306 0.000000 - util.go:96: 89 GAUGE /mysql_com_resignal/server=127.0.0.1:3306 0.000000 - util.go:96: 90 GAUGE /mysql_com_prepare_sql/server=127.0.0.1:3306 0.000000 - util.go:96: 91 GAUGE /mysql_com_optimize/server=127.0.0.1:3306 0.000000 - util.go:96: 92 GAUGE /mysql_com_purge/server=127.0.0.1:3306 0.000000 - util.go:96: 93 GAUGE /mysql_com_replace_select/server=127.0.0.1:3306 0.000000 - util.go:96: 94 GAUGE /mysql_com_replace/server=127.0.0.1:3306 0.000000 - util.go:96: 95 GAUGE /mysql_com_reset/server=127.0.0.1:3306 0.000000 - util.go:96: 96 GAUGE /mysql_com_release_savepoint/server=127.0.0.1:3306 0.000000 - util.go:96: 97 GAUGE /mysql_com_savepoint/server=127.0.0.1:3306 0.000000 - util.go:96: 98 GAUGE /mysql_com_revoke_all/server=127.0.0.1:3306 0.000000 - util.go:96: 99 GAUGE /mysql_com_show_client_statistics/server=127.0.0.1:3306 0.000000 - util.go:96: 100 GAUGE /mysql_com_show_create_table/server=127.0.0.1:3306 6.000000 - util.go:96: 101 GAUGE /mysql_com_show_create_db/server=127.0.0.1:3306 0.000000 - util.go:96: 102 GAUGE /mysql_com_show_create_proc/server=127.0.0.1:3306 0.000000 - util.go:96: 103 GAUGE /mysql_com_show_create_event/server=127.0.0.1:3306 0.000000 - util.go:96: 104 GAUGE /mysql_com_show_engine_status/server=127.0.0.1:3306 0.000000 - util.go:96: 105 GAUGE /mysql_com_show_binlogs/server=127.0.0.1:3306 131018.000000 - util.go:96: 106 GAUGE /mysql_com_set_option/server=127.0.0.1:3306 20801.000000 - util.go:96: 107 GAUGE /mysql_com_show_create_trigger/server=127.0.0.1:3306 0.000000 - util.go:96: 108 GAUGE /mysql_com_show_databases/server=127.0.0.1:3306 26.000000 - util.go:96: 109 GAUGE /mysql_com_show_authors/server=127.0.0.1:3306 0.000000 - util.go:96: 110 GAUGE /mysql_com_show_collations/server=127.0.0.1:3306 0.000000 - util.go:96: 111 GAUGE /mysql_com_show_contributors/server=127.0.0.1:3306 0.000000 - util.go:96: 112 GAUGE /mysql_com_show_events/server=127.0.0.1:3306 0.000000 - util.go:96: 113 GAUGE /mysql_com_show_engine_mutex/server=127.0.0.1:3306 0.000000 - util.go:96: 114 GAUGE /mysql_com_show_charsets/server=127.0.0.1:3306 0.000000 - util.go:96: 115 GAUGE /mysql_com_show_create_func/server=127.0.0.1:3306 0.000000 - util.go:96: 116 GAUGE /mysql_com_show_binlog_events/server=127.0.0.1:3306 0.000000 - util.go:96: 117 GAUGE /mysql_com_show_engine_logs/server=127.0.0.1:3306 0.000000 - util.go:96: 118 GAUGE /mysql_com_show_errors/server=127.0.0.1:3306 0.000000 - util.go:96: 119 GAUGE /mysql_com_show_storage_engines/server=127.0.0.1:3306 0.000000 - util.go:96: 120 GAUGE /mysql_com_show_index_statistics/server=127.0.0.1:3306 0.000000 - util.go:96: 121 GAUGE /mysql_com_show_function_status/server=127.0.0.1:3306 0.000000 - util.go:96: 122 GAUGE /mysql_com_show_keys/server=127.0.0.1:3306 0.000000 - util.go:96: 123 GAUGE /mysql_com_show_processlist/server=127.0.0.1:3306 0.000000 - util.go:96: 124 GAUGE /mysql_com_show_procedure_status/server=127.0.0.1:3306 0.000000 - util.go:96: 125 GAUGE /mysql_com_show_profiles/server=127.0.0.1:3306 0.000000 - util.go:96: 126 GAUGE /mysql_com_show_table_statistics/server=127.0.0.1:3306 0.000000 - util.go:96: 127 GAUGE /mysql_com_show_grants/server=127.0.0.1:3306 0.000000 - util.go:96: 128 GAUGE /mysql_com_show_master_status/server=127.0.0.1:3306 0.000000 - util.go:96: 129 GAUGE /mysql_com_show_plugins/server=127.0.0.1:3306 0.000000 - util.go:96: 130 GAUGE /mysql_com_show_table_status/server=127.0.0.1:3306 6.000000 - util.go:96: 131 GAUGE /mysql_com_show_privileges/server=127.0.0.1:3306 0.000000 - util.go:96: 132 GAUGE /mysql_com_show_fields/server=127.0.0.1:3306 505.000000 - util.go:96: 133 GAUGE /mysql_com_show_open_tables/server=127.0.0.1:3306 0.000000 - util.go:96: 134 GAUGE /mysql_com_show_slave_status/server=127.0.0.1:3306 131023.000000 - util.go:96: 135 GAUGE /mysql_com_show_profile/server=127.0.0.1:3306 0.000000 - util.go:96: 136 GAUGE /mysql_com_show_relaylog_events/server=127.0.0.1:3306 0.000000 - util.go:96: 137 GAUGE /mysql_com_show_slave_hosts/server=127.0.0.1:3306 0.000000 - util.go:96: 138 GAUGE /mysql_com_show_status/server=127.0.0.1:3306 181406.000000 - util.go:96: 139 GAUGE /mysql_com_stmt_fetch/server=127.0.0.1:3306 0.000000 - util.go:96: 140 GAUGE /mysql_com_update_multi/server=127.0.0.1:3306 0.000000 - util.go:96: 141 GAUGE /mysql_com_stmt_close/server=127.0.0.1:3306 1209479745.000000 - util.go:96: 142 GAUGE /mysql_com_show_warnings/server=127.0.0.1:3306 0.000000 - util.go:96: 143 GAUGE /mysql_com_stmt_prepare/server=127.0.0.1:3306 1209481246.000000 - util.go:96: 144 GAUGE /mysql_com_show_user_statistics/server=127.0.0.1:3306 0.000000 - util.go:96: 145 GAUGE /mysql_com_truncate/server=127.0.0.1:3306 0.000000 - util.go:96: 146 GAUGE /mysql_com_slave_stop/server=127.0.0.1:3306 0.000000 - util.go:96: 147 GAUGE /mysql_com_unlock_tables/server=127.0.0.1:3306 1.000000 - util.go:96: 148 GAUGE /mysql_com_show_tables/server=127.0.0.1:3306 38.000000 - util.go:96: 149 GAUGE /mysql_com_signal/server=127.0.0.1:3306 0.000000 - util.go:96: 150 GAUGE /mysql_com_uninstall_plugin/server=127.0.0.1:3306 0.000000 - util.go:96: 151 GAUGE /mysql_com_show_triggers/server=127.0.0.1:3306 6.000000 - util.go:96: 152 GAUGE /mysql_com_stmt_reprepare/server=127.0.0.1:3306 0.000000 - util.go:96: 153 GAUGE /mysql_com_update/server=127.0.0.1:3306 5370110.000000 - util.go:96: 154 GAUGE /mysql_com_show_variables/server=127.0.0.1:3306 2103.000000 - util.go:96: 155 GAUGE /mysql_com_slave_start/server=127.0.0.1:3306 0.000000 - util.go:96: 156 GAUGE /mysql_com_stmt_send_long_data/server=127.0.0.1:3306 0.000000 - util.go:96: 157 GAUGE /mysql_com_stmt_execute/server=127.0.0.1:3306 1209479772.000000 - util.go:96: 158 GAUGE /mysql_com_stmt_reset/server=127.0.0.1:3306 0.000000 - util.go:96: 159 GAUGE /mysql_executed_triggers/server=127.0.0.1:3306 0.000000 - util.go:96: 160 GAUGE /mysql_com_xa_recover/server=127.0.0.1:3306 0.000000 - util.go:96: 161 GAUGE /mysql_com_xa_rollback/server=127.0.0.1:3306 0.000000 - util.go:96: 162 GAUGE /mysql_created_tmp_files/server=127.0.0.1:3306 15.000000 - util.go:96: 163 GAUGE /mysql_com_xa_prepare/server=127.0.0.1:3306 0.000000 - util.go:96: 164 GAUGE /mysql_created_tmp_disk_tables/server=127.0.0.1:3306 263176.000000 - util.go:96: 165 GAUGE /mysql_com_xa_commit/server=127.0.0.1:3306 0.000000 - util.go:96: 166 GAUGE /mysql_delayed_writes/server=127.0.0.1:3306 0.000000 - util.go:96: 167 GAUGE /mysql_created_tmp_tables/server=127.0.0.1:3306 864565.000000 - util.go:96: 168 GAUGE /mysql_com_xa_start/server=127.0.0.1:3306 0.000000 - util.go:96: 169 GAUGE /mysql_cpu_time/server=127.0.0.1:3306 0.000000 - util.go:96: 170 GAUGE /mysql_delayed_insert_threads/server=127.0.0.1:3306 0.000000 - util.go:96: 171 GAUGE /mysql_executed_events/server=127.0.0.1:3306 0.000000 - util.go:96: 172 GAUGE /mysql_com_xa_end/server=127.0.0.1:3306 0.000000 - util.go:96: 173 GAUGE /mysql_feature_dynamic_columns/server=127.0.0.1:3306 0.000000 - util.go:96: 174 GAUGE /mysql_compression/server=127.0.0.1:3306 0.000000 - util.go:96: 175 GAUGE /mysql_feature_fulltext/server=127.0.0.1:3306 0.000000 - util.go:96: 176 GAUGE /mysql_empty_queries/server=127.0.0.1:3306 190626062.000000 - util.go:96: 177 GAUGE /mysql_connections/server=127.0.0.1:3306 202585.000000 - util.go:96: 178 GAUGE /mysql_delayed_errors/server=127.0.0.1:3306 0.000000 - util.go:96: 179 GAUGE /mysql_handler_delete/server=127.0.0.1:3306 803073.000000 - util.go:96: 180 GAUGE /mysql_feature_trigger/server=127.0.0.1:3306 0.000000 - util.go:96: 181 GAUGE /mysql_handler_read_last/server=127.0.0.1:3306 657.000000 - util.go:96: 182 GAUGE /mysql_handler_mrr_init/server=127.0.0.1:3306 0.000000 - util.go:96: 183 GAUGE /mysql_handler_mrr_key_refills/server=127.0.0.1:3306 0.000000 - util.go:96: 184 GAUGE /mysql_handler_read_key/server=127.0.0.1:3306 1837811887.000000 - util.go:96: 185 GAUGE /mysql_feature_locale/server=127.0.0.1:3306 0.000000 - util.go:96: 186 GAUGE /mysql_handler_discover/server=127.0.0.1:3306 0.000000 - util.go:96: 187 GAUGE /mysql_handler_mrr_rowid_refills/server=127.0.0.1:3306 0.000000 - util.go:96: 188 GAUGE /mysql_handler_prepare/server=127.0.0.1:3306 12416798.000000 - util.go:96: 189 GAUGE /mysql_feature_xml/server=127.0.0.1:3306 0.000000 - util.go:96: 190 GAUGE /mysql_handler_icp_match/server=127.0.0.1:3306 6263850501.000000 - util.go:96: 191 GAUGE /mysql_feature_timezone/server=127.0.0.1:3306 2.000000 - util.go:96: 192 GAUGE /mysql_handler_commit/server=127.0.0.1:3306 1229364043.000000 - util.go:96: 193 GAUGE /mysql_handler_icp_attempts/server=127.0.0.1:3306 6263850820.000000 - util.go:96: 194 GAUGE /mysql_handler_read_next/server=127.0.0.1:3306 11304267626.000000 - util.go:96: 195 GAUGE /mysql_flush_commands/server=127.0.0.1:3306 2.000000 - util.go:96: 196 GAUGE /mysql_feature_subquery/server=127.0.0.1:3306 19689.000000 - util.go:96: 197 GAUGE /mysql_feature_gis/server=127.0.0.1:3306 0.000000 - util.go:96: 198 GAUGE /mysql_handler_read_first/server=127.0.0.1:3306 21981691.000000 - util.go:96: 199 GAUGE /mysql_innodb_adaptive_hash_non_hash_searches/server=127.0.0.1:3306 2067223006.000000 - util.go:96: 200 GAUGE /mysql_innodb_buffer_pool_bytes_data/server=127.0.0.1:3306 131481600.000000 - util.go:96: 201 GAUGE /mysql_handler_read_prev/server=127.0.0.1:3306 1963930.000000 - util.go:96: 202 GAUGE /mysql_handler_read_rnd_next/server=127.0.0.1:3306 5293489605.000000 - util.go:96: 203 GAUGE /mysql_handler_write/server=127.0.0.1:3306 677505.000000 - util.go:96: 204 GAUGE /mysql_handler_savepoint/server=127.0.0.1:3306 0.000000 - util.go:96: 205 GAUGE /mysql_innodb_adaptive_hash_cells/server=127.0.0.1:3306 276671.000000 - util.go:96: 206 GAUGE /mysql_innodb_buffer_pool_bytes_dirty/server=127.0.0.1:3306 2162688.000000 - util.go:96: 207 GAUGE /mysql_innodb_buffer_pool_pages_data/server=127.0.0.1:3306 8025.000000 - util.go:96: 208 GAUGE /mysql_innodb_adaptive_hash_hash_searches/server=127.0.0.1:3306 8730195172.000000 - util.go:96: 209 GAUGE /mysql_handler_update/server=127.0.0.1:3306 5357179.000000 - util.go:96: 210 GAUGE /mysql_innodb_adaptive_hash_heap_buffers/server=127.0.0.1:3306 163.000000 - util.go:96: 211 GAUGE /mysql_handler_read_rnd/server=127.0.0.1:3306 46602021.000000 - util.go:96: 212 GAUGE /mysql_handler_read_rnd_deleted/server=127.0.0.1:3306 75.000000 - util.go:96: 213 GAUGE /mysql_handler_tmp_update/server=127.0.0.1:3306 11613039.000000 - util.go:96: 214 GAUGE /mysql_handler_tmp_write/server=127.0.0.1:3306 88820066.000000 - util.go:96: 215 GAUGE /mysql_innodb_background_log_sync/server=127.0.0.1:3306 1313983.000000 - util.go:96: 216 GAUGE /mysql_innodb_buffer_pool_pages_dirty/server=127.0.0.1:3306 132.000000 - util.go:96: 217 GAUGE /mysql_handler_rollback/server=127.0.0.1:3306 273.000000 - util.go:96: 218 GAUGE /mysql_handler_savepoint_rollback/server=127.0.0.1:3306 0.000000 - util.go:96: 219 GAUGE /mysql_innodb_buffer_pool_read_requests/server=127.0.0.1:3306 34499933436.000000 - util.go:96: 220 GAUGE /mysql_innodb_buffer_pool_pages_free/server=127.0.0.1:3306 2.000000 - util.go:96: 221 GAUGE /mysql_innodb_buffer_pool_pages_old/server=127.0.0.1:3306 2942.000000 - util.go:96: 222 GAUGE /mysql_innodb_buffer_pool_pages_total/server=127.0.0.1:3306 8191.000000 - util.go:96: 223 GAUGE /mysql_innodb_buffer_pool_pages_flushed/server=127.0.0.1:3306 16815509.000000 - util.go:96: 224 GAUGE /mysql_innodb_current_row_locks/server=127.0.0.1:3306 0.000000 - util.go:96: 225 GAUGE /mysql_innodb_buffer_pool_read_ahead_rnd/server=127.0.0.1:3306 0.000000 - util.go:96: 226 GAUGE /mysql_innodb_data_fsyncs/server=127.0.0.1:3306 13448619.000000 - util.go:96: 227 GAUGE /mysql_innodb_buffer_pool_pages_made_young/server=127.0.0.1:3306 6739112.000000 - util.go:96: 228 GAUGE /mysql_innodb_buffer_pool_read_ahead_evicted/server=127.0.0.1:3306 31126.000000 - util.go:96: 229 GAUGE /mysql_innodb_buffer_pool_reads/server=127.0.0.1:3306 1745575.000000 - util.go:96: 230 GAUGE /mysql_innodb_checkpoint_age/server=127.0.0.1:3306 145006.000000 - util.go:96: 231 GAUGE /mysql_innodb_buffer_pool_read_ahead/server=127.0.0.1:3306 4920963.000000 - util.go:96: 232 GAUGE /mysql_innodb_checkpoint_target_age/server=127.0.0.1:3306 7539162.000000 - util.go:96: 233 GAUGE /mysql_innodb_buffer_pool_write_requests/server=127.0.0.1:3306 84935775.000000 - util.go:96: 234 GAUGE /mysql_innodb_checkpoint_max_age/server=127.0.0.1:3306 7782360.000000 - util.go:96: 235 GAUGE /mysql_innodb_buffer_pool_pages_misc/server=127.0.0.1:3306 164.000000 - util.go:96: 236 GAUGE /mysql_innodb_buffer_pool_pages_lru_flushed/server=127.0.0.1:3306 13048.000000 - util.go:96: 237 GAUGE /mysql_innodb_buffer_pool_pages_made_not_young/server=127.0.0.1:3306 0.000000 - util.go:96: 238 GAUGE /mysql_innodb_buffer_pool_wait_free/server=127.0.0.1:3306 0.000000 - util.go:96: 239 GAUGE /mysql_innodb_dblwr_writes/server=127.0.0.1:3306 208562.000000 - util.go:96: 240 GAUGE /mysql_innodb_have_atomic_builtins/server=127.0.0.1:3306 1.000000 - util.go:96: 241 GAUGE /mysql_innodb_ibuf_discarded_delete_marks/server=127.0.0.1:3306 0.000000 - util.go:96: 242 GAUGE /mysql_innodb_dict_tables/server=127.0.0.1:3306 288.000000 - util.go:96: 243 GAUGE /mysql_innodb_data_read/server=127.0.0.1:3306 109548622336.000000 - util.go:96: 244 GAUGE /mysql_innodb_dblwr_pages_written/server=127.0.0.1:3306 16815509.000000 - util.go:96: 245 GAUGE /mysql_innodb_ibuf_merged_deletes/server=127.0.0.1:3306 2107.000000 - util.go:96: 246 GAUGE /mysql_innodb_data_written/server=127.0.0.1:3306 562594477568.000000 - util.go:96: 247 GAUGE /mysql_innodb_ibuf_discarded_inserts/server=127.0.0.1:3306 0.000000 - util.go:96: 248 GAUGE /mysql_innodb_descriptors_memory/server=127.0.0.1:3306 8000.000000 - util.go:96: 249 GAUGE /mysql_innodb_ibuf_free_list/server=127.0.0.1:3306 19.000000 - util.go:96: 250 GAUGE /mysql_innodb_ibuf_discarded_deletes/server=127.0.0.1:3306 0.000000 - util.go:96: 251 GAUGE /mysql_innodb_data_pending_fsyncs/server=127.0.0.1:3306 0.000000 - util.go:96: 252 GAUGE /mysql_innodb_data_pending_reads/server=127.0.0.1:3306 0.000000 - util.go:96: 253 GAUGE /mysql_innodb_deadlocks/server=127.0.0.1:3306 0.000000 - util.go:96: 254 GAUGE /mysql_innodb_ibuf_merged_delete_marks/server=127.0.0.1:3306 17841.000000 - util.go:96: 255 GAUGE /mysql_innodb_data_reads/server=127.0.0.1:3306 6686190.000000 - util.go:96: 256 GAUGE /mysql_innodb_history_list_length/server=127.0.0.1:3306 2339.000000 - util.go:96: 257 GAUGE /mysql_innodb_data_pending_writes/server=127.0.0.1:3306 0.000000 - util.go:96: 258 GAUGE /mysql_innodb_data_writes/server=127.0.0.1:3306 30213416.000000 - util.go:96: 259 GAUGE /mysql_innodb_lsn_flushed/server=127.0.0.1:3306 169256052958.000000 - util.go:96: 260 GAUGE /mysql_innodb_ibuf_size/server=127.0.0.1:3306 1.000000 - util.go:96: 261 GAUGE /mysql_innodb_log_write_requests/server=127.0.0.1:3306 11582262.000000 - util.go:96: 262 GAUGE /mysql_innodb_log_writes/server=127.0.0.1:3306 12898582.000000 - util.go:96: 263 GAUGE /mysql_innodb_master_thread_main_flush_loops/server=127.0.0.1:3306 17.000000 - util.go:96: 264 GAUGE /mysql_innodb_master_thread_background_loops/server=127.0.0.1:3306 17.000000 - util.go:96: 265 GAUGE /mysql_innodb_mem_total/server=127.0.0.1:3306 137756672.000000 - util.go:96: 266 GAUGE /mysql_innodb_mem_dictionary/server=127.0.0.1:3306 2431236.000000 - util.go:96: 267 GAUGE /mysql_innodb_lsn_current/server=127.0.0.1:3306 169256052958.000000 - util.go:96: 268 GAUGE /mysql_innodb_ibuf_merged_inserts/server=127.0.0.1:3306 16571.000000 - util.go:96: 269 GAUGE /mysql_innodb_master_thread_10_second_loops/server=127.0.0.1:3306 133759.000000 - util.go:96: 270 GAUGE /mysql_innodb_master_thread_sleeps/server=127.0.0.1:3306 1337603.000000 - util.go:96: 271 GAUGE /mysql_innodb_log_waits/server=127.0.0.1:3306 0.000000 - util.go:96: 272 GAUGE /mysql_innodb_master_thread_1_second_loops/server=127.0.0.1:3306 1337604.000000 - util.go:96: 273 GAUGE /mysql_innodb_max_trx_id/server=127.0.0.1:3306 12779359643.000000 - util.go:96: 274 GAUGE /mysql_innodb_ibuf_merges/server=127.0.0.1:3306 30302.000000 - util.go:96: 275 GAUGE /mysql_innodb_ibuf_segment_size/server=127.0.0.1:3306 21.000000 - util.go:96: 276 GAUGE /mysql_innodb_lsn_last_checkpoint/server=127.0.0.1:3306 169255907952.000000 - util.go:96: 277 GAUGE /mysql_innodb_mem_adaptive_hash/server=127.0.0.1:3306 4904560.000000 - util.go:96: 278 GAUGE /mysql_innodb_mutex_os_waits/server=127.0.0.1:3306 41490.000000 - util.go:96: 279 GAUGE /mysql_innodb_os_log_written/server=127.0.0.1:3306 11515828224.000000 - util.go:96: 280 GAUGE /mysql_innodb_pages_created/server=127.0.0.1:3306 36908.000000 - util.go:96: 281 GAUGE /mysql_innodb_pages_read/server=127.0.0.1:3306 6686188.000000 - util.go:96: 282 GAUGE /mysql_innodb_row_lock_waits/server=127.0.0.1:3306 9995.000000 - util.go:96: 283 GAUGE /mysql_innodb_row_lock_time_avg/server=127.0.0.1:3306 52.000000 - util.go:96: 284 GAUGE /mysql_innodb_row_lock_time/server=127.0.0.1:3306 520373.000000 - util.go:96: 285 GAUGE /mysql_innodb_pages_written/server=127.0.0.1:3306 16815509.000000 - util.go:96: 286 GAUGE /mysql_innodb_row_lock_current_waits/server=127.0.0.1:3306 0.000000 - util.go:96: 287 GAUGE /mysql_innodb_mutex_spin_waits/server=127.0.0.1:3306 27901952.000000 - util.go:96: 288 GAUGE /mysql_innodb_os_log_pending_fsyncs/server=127.0.0.1:3306 0.000000 - util.go:96: 289 GAUGE /mysql_innodb_page_size/server=127.0.0.1:3306 16384.000000 - util.go:96: 290 GAUGE /mysql_innodb_purge_trx_id/server=127.0.0.1:3306 12779359593.000000 - util.go:96: 291 GAUGE /mysql_innodb_read_views_memory/server=127.0.0.1:3306 6112.000000 - util.go:96: 292 GAUGE /mysql_innodb_os_log_pending_writes/server=127.0.0.1:3306 0.000000 - util.go:96: 293 GAUGE /mysql_innodb_row_lock_time_max/server=127.0.0.1:3306 32832.000000 - util.go:96: 294 GAUGE /mysql_innodb_mutex_spin_rounds/server=127.0.0.1:3306 21616613.000000 - util.go:96: 295 GAUGE /mysql_innodb_os_log_fsyncs/server=127.0.0.1:3306 13031540.000000 - util.go:96: 296 GAUGE /mysql_innodb_rows_deleted/server=127.0.0.1:3306 803073.000000 - util.go:96: 297 GAUGE /mysql_innodb_purge_undo_no/server=127.0.0.1:3306 0.000000 - util.go:96: 298 GAUGE /mysql_innodb_oldest_view_low_limit_trx_id/server=127.0.0.1:3306 12779359606.000000 - util.go:96: 299 GAUGE /mysql_innodb_s_lock_spin_rounds/server=127.0.0.1:3306 10655402.000000 - util.go:96: 300 GAUGE /mysql_innodb_rows_updated/server=127.0.0.1:3306 5357177.000000 - util.go:96: 301 GAUGE /mysql_key_blocks_not_flushed/server=127.0.0.1:3306 0.000000 - util.go:96: 302 GAUGE /mysql_innodb_truncated_status_writes/server=127.0.0.1:3306 0.000000 - util.go:96: 303 GAUGE /mysql_innodb_x_lock_os_waits/server=127.0.0.1:3306 10086.000000 - util.go:96: 304 GAUGE /mysql_key_writes/server=127.0.0.1:3306 0.000000 - util.go:96: 305 GAUGE /mysql_last_query_cost/server=127.0.0.1:3306 0.000000 - util.go:96: 306 GAUGE /mysql_innodb_s_lock_spin_waits/server=127.0.0.1:3306 1646305.000000 - util.go:96: 307 GAUGE /mysql_key_blocks_warm/server=127.0.0.1:3306 0.000000 - util.go:96: 308 GAUGE /mysql_innodb_x_lock_spin_waits/server=127.0.0.1:3306 495539.000000 - util.go:96: 309 GAUGE /mysql_key_write_requests/server=127.0.0.1:3306 0.000000 - util.go:96: 310 GAUGE /mysql_innodb_rows_read/server=127.0.0.1:3306 17879112016.000000 - util.go:96: 311 GAUGE /mysql_key_blocks_unused/server=127.0.0.1:3306 107171.000000 - util.go:96: 312 GAUGE /mysql_max_used_connections/server=127.0.0.1:3306 95.000000 - util.go:96: 313 GAUGE /mysql_key_blocks_used/server=127.0.0.1:3306 0.000000 - util.go:96: 314 GAUGE /mysql_innodb_rows_inserted/server=127.0.0.1:3306 677459.000000 - util.go:96: 315 GAUGE /mysql_key_reads/server=127.0.0.1:3306 0.000000 - util.go:96: 316 GAUGE /mysql_innodb_s_lock_os_waits/server=127.0.0.1:3306 179801.000000 - util.go:96: 317 GAUGE /mysql_innodb_x_lock_spin_rounds/server=127.0.0.1:3306 4386498.000000 - util.go:96: 318 GAUGE /mysql_key_read_requests/server=127.0.0.1:3306 0.000000 - util.go:96: 319 GAUGE /mysql_performance_schema_cond_instances_lost/server=127.0.0.1:3306 0.000000 - util.go:96: 320 GAUGE /mysql_performance_schema_rwlock_instances_lost/server=127.0.0.1:3306 0.000000 - util.go:96: 321 GAUGE /mysql_open_table_definitions/server=127.0.0.1:3306 310.000000 - util.go:96: 322 GAUGE /mysql_performance_schema_file_instances_lost/server=127.0.0.1:3306 0.000000 - util.go:96: 323 GAUGE /mysql_performance_schema_cond_classes_lost/server=127.0.0.1:3306 0.000000 - util.go:96: 324 GAUGE /mysql_performance_schema_file_handles_lost/server=127.0.0.1:3306 0.000000 - util.go:96: 325 GAUGE /mysql_open_streams/server=127.0.0.1:3306 0.000000 - util.go:96: 326 GAUGE /mysql_opened_table_definitions/server=127.0.0.1:3306 277.000000 - util.go:96: 327 GAUGE /mysql_performance_schema_locker_lost/server=127.0.0.1:3306 0.000000 - util.go:96: 328 GAUGE /mysql_performance_schema_mutex_instances_lost/server=127.0.0.1:3306 0.000000 - util.go:96: 329 GAUGE /mysql_not_flushed_delayed_rows/server=127.0.0.1:3306 0.000000 - util.go:96: 330 GAUGE /mysql_performance_schema_mutex_classes_lost/server=127.0.0.1:3306 0.000000 - util.go:96: 331 GAUGE /mysql_opened_tables/server=127.0.0.1:3306 433.000000 - util.go:96: 332 GAUGE /mysql_open_tables/server=127.0.0.1:3306 400.000000 - util.go:96: 333 GAUGE /mysql_open_files/server=127.0.0.1:3306 5.000000 - util.go:96: 334 GAUGE /mysql_performance_schema_rwlock_classes_lost/server=127.0.0.1:3306 0.000000 - util.go:96: 335 GAUGE /mysql_performance_schema_table_handles_lost/server=127.0.0.1:3306 0.000000 - util.go:96: 336 GAUGE /mysql_opened_files/server=127.0.0.1:3306 8861710.000000 - util.go:96: 337 GAUGE /mysql_performance_schema_file_classes_lost/server=127.0.0.1:3306 0.000000 - util.go:96: 338 GAUGE /mysql_opened_views/server=127.0.0.1:3306 0.000000 - util.go:96: 339 GAUGE /mysql_qcache_inserts/server=127.0.0.1:3306 0.000000 - util.go:96: 340 GAUGE /mysql_qcache_free_blocks/server=127.0.0.1:3306 0.000000 - util.go:96: 341 GAUGE /mysql_qcache_free_memory/server=127.0.0.1:3306 0.000000 - util.go:96: 342 GAUGE /mysql_performance_schema_table_instances_lost/server=127.0.0.1:3306 0.000000 - util.go:96: 343 GAUGE /mysql_questions/server=127.0.0.1:3306 1224720650.000000 - util.go:96: 344 GAUGE /mysql_select_full_join/server=127.0.0.1:3306 559.000000 - util.go:96: 345 GAUGE /mysql_qcache_total_blocks/server=127.0.0.1:3306 0.000000 - util.go:96: 346 GAUGE /mysql_rows_sent/server=127.0.0.1:3306 5505541906.000000 - util.go:96: 347 GAUGE /mysql_rows_tmp_read/server=127.0.0.1:3306 100358534.000000 - util.go:96: 348 GAUGE /mysql_qcache_hits/server=127.0.0.1:3306 0.000000 - util.go:96: 349 GAUGE /mysql_select_full_range_join/server=127.0.0.1:3306 1831.000000 - util.go:96: 350 GAUGE /mysql_qcache_lowmem_prunes/server=127.0.0.1:3306 0.000000 - util.go:96: 351 GAUGE /mysql_qcache_queries_in_cache/server=127.0.0.1:3306 0.000000 - util.go:96: 352 GAUGE /mysql_queries/server=127.0.0.1:3306 3643681642.000000 - util.go:96: 353 GAUGE /mysql_performance_schema_thread_instances_lost/server=127.0.0.1:3306 0.000000 - util.go:96: 354 GAUGE /mysql_qcache_not_cached/server=127.0.0.1:3306 0.000000 - util.go:96: 355 GAUGE /mysql_prepared_stmt_count/server=127.0.0.1:3306 1.000000 - util.go:96: 356 GAUGE /mysql_performance_schema_thread_classes_lost/server=127.0.0.1:3306 0.000000 - util.go:96: 357 GAUGE /mysql_rows_read/server=127.0.0.1:3306 17885689333.000000 - util.go:96: 358 GAUGE /mysql_select_scan/server=127.0.0.1:3306 47775862.000000 - util.go:96: 359 GAUGE /mysql_ssl_callback_cache_hits/server=127.0.0.1:3306 0.000000 - util.go:96: 360 GAUGE /mysql_slave_received_heartbeats/server=127.0.0.1:3306 0.000000 - util.go:96: 361 GAUGE /mysql_slave_heartbeat_period/server=127.0.0.1:3306 0.000000 - util.go:96: 362 GAUGE /mysql_slow_launch_threads/server=127.0.0.1:3306 0.000000 - util.go:96: 363 GAUGE /mysql_ssl_accept_renegotiates/server=127.0.0.1:3306 0.000000 - util.go:96: 364 GAUGE /mysql_ssl_accepts/server=127.0.0.1:3306 0.000000 - util.go:96: 365 GAUGE /mysql_slow_queries/server=127.0.0.1:3306 53.000000 - util.go:96: 366 GAUGE /mysql_slave_retried_transactions/server=127.0.0.1:3306 0.000000 - util.go:96: 367 GAUGE /mysql_sort_rows/server=127.0.0.1:3306 53690107.000000 - util.go:96: 368 GAUGE /mysql_sort_scan/server=127.0.0.1:3306 9059484.000000 - util.go:96: 369 GAUGE /mysql_slave_running/server=127.0.0.1:3306 0.000000 - util.go:96: 370 GAUGE /mysql_slave_open_temp_tables/server=127.0.0.1:3306 0.000000 - util.go:96: 371 GAUGE /mysql_select_range/server=127.0.0.1:3306 160819917.000000 - util.go:96: 372 GAUGE /mysql_sort_merge_passes/server=127.0.0.1:3306 0.000000 - util.go:96: 373 GAUGE /mysql_select_range_check/server=127.0.0.1:3306 0.000000 - util.go:96: 374 GAUGE /mysql_sort_range/server=127.0.0.1:3306 2021523.000000 - util.go:96: 375 GAUGE /mysql_ssl_client_connects/server=127.0.0.1:3306 0.000000 - util.go:96: 376 GAUGE /mysql_ssl_session_cache_overflows/server=127.0.0.1:3306 0.000000 - util.go:96: 377 GAUGE /mysql_ssl_connect_renegotiates/server=127.0.0.1:3306 0.000000 - util.go:96: 378 GAUGE /mysql_subquery_cache_hit/server=127.0.0.1:3306 443088.000000 - util.go:96: 379 GAUGE /mysql_ssl_ctx_verify_depth/server=127.0.0.1:3306 0.000000 - util.go:96: 380 GAUGE /mysql_ssl_default_timeout/server=127.0.0.1:3306 0.000000 - util.go:96: 381 GAUGE /mysql_ssl_used_session_cache_entries/server=127.0.0.1:3306 0.000000 - util.go:96: 382 GAUGE /mysql_ssl_verify_mode/server=127.0.0.1:3306 0.000000 - util.go:96: 383 GAUGE /mysql_ssl_session_cache_timeouts/server=127.0.0.1:3306 0.000000 - util.go:96: 384 GAUGE /mysql_ssl_ctx_verify_mode/server=127.0.0.1:3306 0.000000 - util.go:96: 385 GAUGE /mysql_ssl_session_cache_hits/server=127.0.0.1:3306 0.000000 - util.go:96: 386 GAUGE /mysql_ssl_session_cache_mode/server=127.0.0.1:3306 0.000000 - util.go:96: 387 GAUGE /mysql_subquery_cache_miss/server=127.0.0.1:3306 277273.000000 - util.go:96: 388 GAUGE /mysql_ssl_finished_accepts/server=127.0.0.1:3306 0.000000 - util.go:96: 389 GAUGE /mysql_ssl_finished_connects/server=127.0.0.1:3306 0.000000 - util.go:96: 390 GAUGE /mysql_ssl_sessions_reused/server=127.0.0.1:3306 0.000000 - util.go:96: 391 GAUGE /mysql_ssl_verify_depth/server=127.0.0.1:3306 0.000000 - util.go:96: 392 GAUGE /mysql_syncs/server=127.0.0.1:3306 14.000000 - util.go:96: 393 GAUGE /mysql_ssl_session_cache_misses/server=127.0.0.1:3306 0.000000 - util.go:96: 394 GAUGE /mysql_ssl_session_cache_size/server=127.0.0.1:3306 0.000000 - util.go:96: 395 GAUGE /mysql_tc_log_max_pages_used/server=127.0.0.1:3306 0.000000 - util.go:96: 396 GAUGE /mysql_threadpool_idle_threads/server=127.0.0.1:3306 0.000000 - util.go:96: 397 GAUGE /mysql_table_locks_immediate/server=127.0.0.1:3306 1222884407.000000 - util.go:96: 398 GAUGE /mysql_threads_connected/server=127.0.0.1:3306 56.000000 - util.go:96: 399 GAUGE /mysql_threads_running/server=127.0.0.1:3306 1.000000 - util.go:96: 400 GAUGE /mysql_tc_log_page_size/server=127.0.0.1:3306 0.000000 - util.go:96: 401 GAUGE /mysql_threadpool_threads/server=127.0.0.1:3306 0.000000 - util.go:96: 402 GAUGE /mysql_tc_log_page_waits/server=127.0.0.1:3306 0.000000 - util.go:96: 403 GAUGE /mysql_uptime/server=127.0.0.1:3306 1313798.000000 - util.go:96: 404 GAUGE /mysql_table_locks_waited/server=127.0.0.1:3306 0.000000 - util.go:96: 405 GAUGE /mysql_threads_cached/server=127.0.0.1:3306 0.000000 - util.go:96: 406 GAUGE /mysql_threads_created/server=127.0.0.1:3306 202584.000000 - util.go:96: 407 GAUGE /mysql_uptime_since_flush_status/server=127.0.0.1:3306 1313798.000000 ---- PASS: TestCollect (0.01s) -PASS -ok github.com/didi/nightingale/src/modules/monapi/plugins/mysql 0.011s diff --git a/src/modules/monapi/plugins/redis/sample.out b/src/modules/monapi/plugins/redis/sample.out deleted file mode 100644 index 57017aa8..00000000 --- a/src/modules/monapi/plugins/redis/sample.out +++ /dev/null @@ -1,129 +0,0 @@ -=== RUN TestCollect -2021-03-09 18:53:12.445852 WARNING redis/redis.go:198 Server URL found without scheme; please update your configuration file -2021-03-09 18:53:12.446842 DEBUG metric/metric.go:61 unable to convert field to float64 redis_aof_last_bgrewrite_status map[port:6379 replication_role:master server:127.0.0.1] value: ok -2021-03-09 18:53:12.446896 DEBUG metric/metric.go:61 unable to convert field to float64 redis_aof_last_write_status map[port:6379 replication_role:master server:127.0.0.1] value: ok -2021-03-09 18:53:12.446911 DEBUG metric/metric.go:61 unable to convert field to float64 redis_redis_version map[port:6379 replication_role:master server:127.0.0.1] value: 3.2.12 -2021-03-09 18:53:12.446918 DEBUG metric/metric.go:61 unable to convert field to float64 redis_maxmemory_policy map[port:6379 replication_role:master server:127.0.0.1] value: noeviction -2021-03-09 18:53:12.446933 DEBUG metric/metric.go:61 unable to convert field to float64 redis_rdb_last_bgsave_status map[port:6379 replication_role:master server:127.0.0.1] value: ok - util.go:96: 0 GAUGE /redis_cmdstat_usec_per_call/command=get,port=6379,replication_role=master,server=127.0.0.1 1.420000 - util.go:96: 1 GAUGE /redis_cmdstat_calls/command=get,port=6379,replication_role=master,server=127.0.0.1 25083.000000 - util.go:96: 2 GAUGE /redis_cmdstat_usec/command=get,port=6379,replication_role=master,server=127.0.0.1 35679.000000 - util.go:96: 3 GAUGE /redis_cmdstat_usec/command=set,port=6379,replication_role=master,server=127.0.0.1 33433.000000 - util.go:96: 4 GAUGE /redis_cmdstat_usec_per_call/command=set,port=6379,replication_role=master,server=127.0.0.1 3.940000 - util.go:96: 5 GAUGE /redis_cmdstat_calls/command=set,port=6379,replication_role=master,server=127.0.0.1 8484.000000 - util.go:96: 6 GAUGE /redis_cmdstat_calls/command=exists,port=6379,replication_role=master,server=127.0.0.1 372387.000000 - util.go:96: 7 GAUGE /redis_cmdstat_usec/command=exists,port=6379,replication_role=master,server=127.0.0.1 326854.000000 - util.go:96: 8 GAUGE /redis_cmdstat_usec_per_call/command=exists,port=6379,replication_role=master,server=127.0.0.1 0.880000 - util.go:96: 9 GAUGE /redis_cmdstat_usec/command=lpush,port=6379,replication_role=master,server=127.0.0.1 1184855.000000 - util.go:96: 10 GAUGE /redis_cmdstat_usec_per_call/command=lpush,port=6379,replication_role=master,server=127.0.0.1 4.100000 - util.go:96: 11 GAUGE /redis_cmdstat_calls/command=lpush,port=6379,replication_role=master,server=127.0.0.1 288752.000000 - util.go:96: 12 GAUGE /redis_cmdstat_calls/command=rpop,port=6379,replication_role=master,server=127.0.0.1 28499902.000000 - util.go:96: 13 GAUGE /redis_cmdstat_usec/command=rpop,port=6379,replication_role=master,server=127.0.0.1 10735311.000000 - util.go:96: 14 GAUGE /redis_cmdstat_usec_per_call/command=rpop,port=6379,replication_role=master,server=127.0.0.1 0.380000 - util.go:96: 15 GAUGE /redis_cmdstat_usec/command=brpop,port=6379,replication_role=master,server=127.0.0.1 5646355.000000 - util.go:96: 16 GAUGE /redis_cmdstat_usec_per_call/command=brpop,port=6379,replication_role=master,server=127.0.0.1 4.990000 - util.go:96: 17 GAUGE /redis_cmdstat_calls/command=brpop,port=6379,replication_role=master,server=127.0.0.1 1132255.000000 - util.go:96: 18 GAUGE /redis_cmdstat_usec_per_call/command=zadd,port=6379,replication_role=master,server=127.0.0.1 6.070000 - util.go:96: 19 GAUGE /redis_cmdstat_calls/command=zadd,port=6379,replication_role=master,server=127.0.0.1 528916.000000 - util.go:96: 20 GAUGE /redis_cmdstat_usec/command=zadd,port=6379,replication_role=master,server=127.0.0.1 3208335.000000 - util.go:96: 21 GAUGE /redis_cmdstat_usec_per_call/command=zrangebyscore,port=6379,replication_role=master,server=127.0.0.1 67.000000 - util.go:96: 22 GAUGE /redis_cmdstat_calls/command=zrangebyscore,port=6379,replication_role=master,server=127.0.0.1 1.000000 - util.go:96: 23 GAUGE /redis_cmdstat_usec/command=zrangebyscore,port=6379,replication_role=master,server=127.0.0.1 67.000000 - util.go:96: 24 GAUGE /redis_cmdstat_calls/command=zscore,port=6379,replication_role=master,server=127.0.0.1 1185752.000000 - util.go:96: 25 GAUGE /redis_cmdstat_usec/command=zscore,port=6379,replication_role=master,server=127.0.0.1 8707384.000000 - util.go:96: 26 GAUGE /redis_cmdstat_usec_per_call/command=zscore,port=6379,replication_role=master,server=127.0.0.1 7.340000 - util.go:96: 27 GAUGE /redis_cmdstat_usec_per_call/command=hset,port=6379,replication_role=master,server=127.0.0.1 5.600000 - util.go:96: 28 GAUGE /redis_cmdstat_calls/command=hset,port=6379,replication_role=master,server=127.0.0.1 2795.000000 - util.go:96: 29 GAUGE /redis_cmdstat_usec/command=hset,port=6379,replication_role=master,server=127.0.0.1 15656.000000 - util.go:96: 30 GAUGE /redis_cmdstat_calls/command=hget,port=6379,replication_role=master,server=127.0.0.1 1862.000000 - util.go:96: 31 GAUGE /redis_cmdstat_usec/command=hget,port=6379,replication_role=master,server=127.0.0.1 6220.000000 - util.go:96: 32 GAUGE /redis_cmdstat_usec_per_call/command=hget,port=6379,replication_role=master,server=127.0.0.1 3.340000 - util.go:96: 33 GAUGE /redis_cmdstat_calls/command=hmget,port=6379,replication_role=master,server=127.0.0.1 5600.000000 - util.go:96: 34 GAUGE /redis_cmdstat_usec/command=hmget,port=6379,replication_role=master,server=127.0.0.1 40432.000000 - util.go:96: 35 GAUGE /redis_cmdstat_usec_per_call/command=hmget,port=6379,replication_role=master,server=127.0.0.1 7.220000 - util.go:96: 36 GAUGE /redis_cmdstat_usec/command=hincrbyfloat,port=6379,replication_role=master,server=127.0.0.1 35022977.000000 - util.go:96: 37 GAUGE /redis_cmdstat_usec_per_call/command=hincrbyfloat,port=6379,replication_role=master,server=127.0.0.1 8.280000 - util.go:96: 38 GAUGE /redis_cmdstat_calls/command=hincrbyfloat,port=6379,replication_role=master,server=127.0.0.1 4231328.000000 - util.go:96: 39 GAUGE /redis_cmdstat_calls/command=hdel,port=6379,replication_role=master,server=127.0.0.1 82.000000 - util.go:96: 40 GAUGE /redis_cmdstat_usec/command=hdel,port=6379,replication_role=master,server=127.0.0.1 284.000000 - util.go:96: 41 GAUGE /redis_cmdstat_usec_per_call/command=hdel,port=6379,replication_role=master,server=127.0.0.1 3.460000 - util.go:96: 42 GAUGE /redis_cmdstat_usec_per_call/command=hkeys,port=6379,replication_role=master,server=127.0.0.1 1.280000 - util.go:96: 43 GAUGE /redis_cmdstat_calls/command=hkeys,port=6379,replication_role=master,server=127.0.0.1 61289.000000 - util.go:96: 44 GAUGE /redis_cmdstat_usec/command=hkeys,port=6379,replication_role=master,server=127.0.0.1 78581.000000 - util.go:96: 45 GAUGE /redis_cmdstat_usec/command=select,port=6379,replication_role=master,server=127.0.0.1 35.000000 - util.go:96: 46 GAUGE /redis_cmdstat_usec_per_call/command=select,port=6379,replication_role=master,server=127.0.0.1 0.710000 - util.go:96: 47 GAUGE /redis_cmdstat_calls/command=select,port=6379,replication_role=master,server=127.0.0.1 49.000000 - util.go:96: 48 GAUGE /redis_cmdstat_calls/command=auth,port=6379,replication_role=master,server=127.0.0.1 52586.000000 - util.go:96: 49 GAUGE /redis_cmdstat_usec/command=auth,port=6379,replication_role=master,server=127.0.0.1 68536.000000 - util.go:96: 50 GAUGE /redis_cmdstat_usec_per_call/command=auth,port=6379,replication_role=master,server=127.0.0.1 1.300000 - util.go:96: 51 GAUGE /redis_cmdstat_usec_per_call/command=ping,port=6379,replication_role=master,server=127.0.0.1 0.260000 - util.go:96: 52 GAUGE /redis_cmdstat_calls/command=ping,port=6379,replication_role=master,server=127.0.0.1 36302293.000000 - util.go:96: 53 GAUGE /redis_cmdstat_usec/command=ping,port=6379,replication_role=master,server=127.0.0.1 9316429.000000 - util.go:96: 54 GAUGE /redis_cmdstat_calls/command=info,port=6379,replication_role=master,server=127.0.0.1 10216.000000 - util.go:96: 55 GAUGE /redis_cmdstat_usec/command=info,port=6379,replication_role=master,server=127.0.0.1 613444.000000 - util.go:96: 56 GAUGE /redis_cmdstat_usec_per_call/command=info,port=6379,replication_role=master,server=127.0.0.1 60.050000 - util.go:96: 57 GAUGE /redis_keyspace_keys/database=db0,port=6379,replication_role=master,server=127.0.0.1 20.000000 - util.go:96: 58 GAUGE /redis_keyspace_expires/database=db0,port=6379,replication_role=master,server=127.0.0.1 16.000000 - util.go:96: 59 GAUGE /redis_keyspace_avg_ttl/database=db0,port=6379,replication_role=master,server=127.0.0.1 2053042783.000000 - util.go:96: 60 GAUGE /redis_keyspace_avg_ttl/database=db1,port=6379,replication_role=master,server=127.0.0.1 0.000000 - util.go:96: 61 GAUGE /redis_keyspace_keys/database=db1,port=6379,replication_role=master,server=127.0.0.1 13.000000 - util.go:96: 62 GAUGE /redis_keyspace_expires/database=db1,port=6379,replication_role=master,server=127.0.0.1 0.000000 - util.go:96: 63 GAUGE /redis_cluster_enabled/port=6379,replication_role=master,server=127.0.0.1 0.000000 - util.go:96: 64 GAUGE /redis_instantaneous_ops_per_sec/port=6379,replication_role=master,server=127.0.0.1 88.000000 - util.go:96: 65 GAUGE /redis_aof_last_rewrite_time_sec/port=6379,replication_role=master,server=127.0.0.1 -1.000000 - util.go:96: 66 GAUGE /redis_sync_partial_err/port=6379,replication_role=master,server=127.0.0.1 0.000000 - util.go:96: 67 GAUGE /redis_used_memory_peak/port=6379,replication_role=master,server=127.0.0.1 35650504.000000 - util.go:96: 68 GAUGE /redis_rdb_bgsave_in_progress/port=6379,replication_role=master,server=127.0.0.1 0.000000 - util.go:96: 69 GAUGE /redis_used_cpu_sys/port=6379,replication_role=master,server=127.0.0.1 1062.770000 - util.go:96: 70 GAUGE /redis_lru_clock/port=6379,replication_role=master,server=127.0.0.1 4674456.000000 - util.go:96: 71 GAUGE /redis_used_memory_lua/port=6379,replication_role=master,server=127.0.0.1 37888.000000 - util.go:96: 72 GAUGE /redis_client_biggest_input_buf/port=6379,replication_role=master,server=127.0.0.1 0.000000 - util.go:96: 73 GAUGE /redis_maxmemory/port=6379,replication_role=master,server=127.0.0.1 0.000000 - util.go:96: 74 GAUGE /redis_used_memory_rss/port=6379,replication_role=master,server=127.0.0.1 6713344.000000 - util.go:96: 75 GAUGE /redis_keyspace_hitrate/port=6379,replication_role=master,server=127.0.0.1 0.952114 - util.go:96: 76 GAUGE /redis_total_net_output_bytes/port=6379,replication_role=master,server=127.0.0.1 711227059.000000 - util.go:96: 77 GAUGE /redis_mem_fragmentation_ratio/port=6379,replication_role=master,server=127.0.0.1 1.090000 - util.go:96: 78 GAUGE /redis_used_cpu_user/port=6379,replication_role=master,server=127.0.0.1 401.600000 - util.go:96: 79 GAUGE /redis_keyspace_misses/port=6379,replication_role=master,server=127.0.0.1 61274.000000 - util.go:96: 80 GAUGE /redis_total_system_memory/port=6379,replication_role=master,server=127.0.0.1 134967951360.000000 - util.go:96: 81 GAUGE /redis_rejected_connections/port=6379,replication_role=master,server=127.0.0.1 0.000000 - util.go:96: 82 GAUGE /redis_aof_rewrite_scheduled/port=6379,replication_role=master,server=127.0.0.1 0.000000 - util.go:96: 83 GAUGE /redis_blocked_clients/port=6379,replication_role=master,server=127.0.0.1 2.000000 - util.go:96: 84 GAUGE /redis_repl_backlog_histlen/port=6379,replication_role=master,server=127.0.0.1 0.000000 - util.go:96: 85 GAUGE /redis_latest_fork_usec/port=6379,replication_role=master,server=127.0.0.1 295.000000 - util.go:96: 86 GAUGE /redis_connected_slaves/port=6379,replication_role=master,server=127.0.0.1 0.000000 - util.go:96: 87 GAUGE /redis_instantaneous_output_kbps/port=6379,replication_role=master,server=127.0.0.1 0.990000 - util.go:96: 88 GAUGE /redis_used_cpu_sys_children/port=6379,replication_role=master,server=127.0.0.1 6.000000 - util.go:96: 89 GAUGE /redis_migrate_cached_sockets/port=6379,replication_role=master,server=127.0.0.1 0.000000 - util.go:96: 90 GAUGE /redis_rdb_changes_since_last_save/port=6379,replication_role=master,server=127.0.0.1 2330.000000 - util.go:96: 91 GAUGE /redis_pubsub_patterns/port=6379,replication_role=master,server=127.0.0.1 0.000000 - util.go:96: 92 GAUGE /redis_total_commands_processed/port=6379,replication_role=master,server=127.0.0.1 72709632.000000 - util.go:96: 93 GAUGE /redis_master_repl_offset/port=6379,replication_role=master,server=127.0.0.1 0.000000 - util.go:96: 94 GAUGE /redis_used_memory/port=6379,replication_role=master,server=127.0.0.1 6133328.000000 - util.go:96: 95 GAUGE /redis_repl_backlog_first_byte_offset/port=6379,replication_role=master,server=127.0.0.1 0.000000 - util.go:96: 96 GAUGE /redis_rdb_last_bgsave_time_sec/port=6379,replication_role=master,server=127.0.0.1 0.000000 - util.go:96: 97 GAUGE /redis_pubsub_channels/port=6379,replication_role=master,server=127.0.0.1 0.000000 - util.go:96: 98 GAUGE /redis_sync_full/port=6379,replication_role=master,server=127.0.0.1 0.000000 - util.go:96: 99 GAUGE /redis_aof_current_rewrite_time_sec/port=6379,replication_role=master,server=127.0.0.1 -1.000000 - util.go:96: 100 GAUGE /redis_loading/port=6379,replication_role=master,server=127.0.0.1 0.000000 - util.go:96: 101 GAUGE /redis_rdb_current_bgsave_time_sec/port=6379,replication_role=master,server=127.0.0.1 -1.000000 - util.go:96: 102 GAUGE /redis_rdb_last_save_time_elapsed/port=6379,replication_role=master,server=127.0.0.1 242.000000 - util.go:96: 103 GAUGE /redis_aof_rewrite_in_progress/port=6379,replication_role=master,server=127.0.0.1 0.000000 - util.go:96: 104 GAUGE /redis_repl_backlog_size/port=6379,replication_role=master,server=127.0.0.1 1048576.000000 - util.go:96: 105 GAUGE /redis_instantaneous_input_kbps/port=6379,replication_role=master,server=127.0.0.1 2.660000 - util.go:96: 106 GAUGE /redis_aof_enabled/port=6379,replication_role=master,server=127.0.0.1 0.000000 - util.go:96: 107 GAUGE /redis_total_connections_received/port=6379,replication_role=master,server=127.0.0.1 52761.000000 - util.go:96: 108 GAUGE /redis_used_cpu_user_children/port=6379,replication_role=master,server=127.0.0.1 27.650000 - util.go:96: 109 GAUGE /redis_total_net_input_bytes/port=6379,replication_role=master,server=127.0.0.1 2238891986.000000 - util.go:96: 110 GAUGE /redis_client_longest_output_list/port=6379,replication_role=master,server=127.0.0.1 0.000000 - util.go:96: 111 GAUGE /redis_evicted_keys/port=6379,replication_role=master,server=127.0.0.1 0.000000 - util.go:96: 112 GAUGE /redis_expired_keys/port=6379,replication_role=master,server=127.0.0.1 0.000000 - util.go:96: 113 GAUGE /redis_repl_backlog_active/port=6379,replication_role=master,server=127.0.0.1 0.000000 - util.go:96: 114 GAUGE /redis_clients/port=6379,replication_role=master,server=127.0.0.1 53.000000 - util.go:96: 115 GAUGE /redis_rdb_last_save_time/port=6379,replication_role=master,server=127.0.0.1 1615286950.000000 - util.go:96: 116 GAUGE /redis_sync_partial_ok/port=6379,replication_role=master,server=127.0.0.1 0.000000 - util.go:96: 117 GAUGE /redis_keyspace_hits/port=6379,replication_role=master,server=127.0.0.1 1218313.000000 - util.go:96: 118 GAUGE /redis_uptime/port=6379,replication_role=master,server=127.0.0.1 612872.000000 ---- PASS: TestCollect (0.00s) -PASS -ok github.com/didi/nightingale/src/modules/monapi/plugins/redis 0.009s diff --git a/src/modules/monapi/redisc/funcs.go b/src/modules/monapi/redisc/funcs.go deleted file mode 100644 index eddd1e65..00000000 --- a/src/modules/monapi/redisc/funcs.go +++ /dev/null @@ -1,84 +0,0 @@ -package redisc - -import ( - "github.com/garyburd/redigo/redis" - "github.com/toolkits/pkg/logger" -) - -func HasKey(key string) bool { - rc := RedisConnPool.Get() - defer rc.Close() - - ret, _ := redis.Bool(rc.Do("EXISTS", key)) - - return ret -} - -func INCR(key string) int { - rc := RedisConnPool.Get() - defer rc.Close() - - ret, err := redis.Int(rc.Do("INCR", key)) - if err != nil { - logger.Errorf("incr %s error: %v", key, err) - } - - return ret -} - -func GET(key string) int64 { - rc := RedisConnPool.Get() - defer rc.Close() - - ret, err := redis.Int64(rc.Do("GET", key)) - if err != nil { - logger.Errorf("get %s error: %v", key, err) - } - - return ret -} - -func SetWithTTL(key string, value interface{}, ttl int) error { - rc := RedisConnPool.Get() - defer rc.Close() - - _, err := rc.Do("SET", key, value, "EX", ttl) - return err -} - -func Set(key string, value interface{}) error { - rc := RedisConnPool.Get() - defer rc.Close() - - _, err := rc.Do("SET", key, value) - return err -} - -func DelKey(key string) error { - rc := RedisConnPool.Get() - defer rc.Close() - - _, err := rc.Do("DEL", key) - return err -} - -func HSET(key string, field interface{}, value interface{}) (int64, error) { - rc := RedisConnPool.Get() - defer rc.Close() - - return redis.Int64(rc.Do("HSET", key, field, value)) -} - -func HKEYS(key string) ([]string, error) { - rc := RedisConnPool.Get() - defer rc.Close() - - return redis.Strings(rc.Do("HKEYS", key)) -} - -func HDEL(keys []interface{}) (int64, error) { - rc := RedisConnPool.Get() - defer rc.Close() - - return redis.Int64(rc.Do("HDEL", keys...)) -} diff --git a/src/modules/monapi/redisc/redis.go b/src/modules/monapi/redisc/redis.go deleted file mode 100644 index fa1c634c..00000000 --- a/src/modules/monapi/redisc/redis.go +++ /dev/null @@ -1,62 +0,0 @@ -package redisc - -import ( - "log" - "time" - - "github.com/didi/nightingale/src/modules/monapi/config" - - "github.com/garyburd/redigo/redis" - "github.com/toolkits/pkg/logger" -) - -var RedisConnPool *redis.Pool - -func InitRedis() { - cfg := config.Get() - - addr := cfg.Redis.Addr - pass := cfg.Redis.Pass - maxIdle := cfg.Redis.Idle - idleTimeout := 240 * time.Second - - connTimeout := time.Duration(cfg.Redis.Timeout.Conn) * time.Millisecond - readTimeout := time.Duration(cfg.Redis.Timeout.Read) * time.Millisecond - writeTimeout := time.Duration(cfg.Redis.Timeout.Write) * time.Millisecond - - RedisConnPool = &redis.Pool{ - MaxIdle: maxIdle, - IdleTimeout: idleTimeout, - Dial: func() (redis.Conn, error) { - c, err := redis.Dial("tcp", addr, redis.DialConnectTimeout(connTimeout), redis.DialReadTimeout(readTimeout), redis.DialWriteTimeout(writeTimeout)) - if err != nil { - logger.Errorf("conn redis err:%v", err) - return nil, err - } - - if pass != "" { - if _, err := c.Do("AUTH", pass); err != nil { - c.Close() - logger.Errorf("ERR: redis auth fail:%v", err) - return nil, err - } - } - - return c, err - }, - TestOnBorrow: PingRedis, - } -} - -func PingRedis(c redis.Conn, t time.Time) error { - _, err := c.Do("ping") - if err != nil { - log.Println("ERR: ping redis fail", err) - } - return err -} - -func CloseRedis() { - log.Println("INFO: closing redis...") - RedisConnPool.Close() -} diff --git a/src/modules/monapi/scache/collect_cache.go b/src/modules/monapi/scache/collect_cache.go deleted file mode 100644 index 93f2b9f2..00000000 --- a/src/modules/monapi/scache/collect_cache.go +++ /dev/null @@ -1,41 +0,0 @@ -package scache - -import ( - "sync" - - "github.com/didi/nightingale/src/models" -) - -type CollectCacheMap struct { - sync.RWMutex - Data map[string]*models.Collect -} - -var CollectCache *CollectCacheMap - -func NewCollectCache() *CollectCacheMap { - return &CollectCacheMap{Data: make(map[string]*models.Collect)} -} - -func (c *CollectCacheMap) GetBy(endpoint string) *models.Collect { - c.RLock() - defer c.RUnlock() - - return c.Data[endpoint] -} - -func (c *CollectCacheMap) Set(endpoint string, collect *models.Collect) { - c.Lock() - defer c.Unlock() - - c.Data[endpoint] = collect - return -} - -func (c *CollectCacheMap) SetAll(strasMap map[string]*models.Collect) { - c.Lock() - defer c.Unlock() - - c.Data = strasMap - return -} diff --git a/src/modules/monapi/scache/init.go b/src/modules/monapi/scache/init.go deleted file mode 100644 index 28f3dd0a..00000000 --- a/src/modules/monapi/scache/init.go +++ /dev/null @@ -1,53 +0,0 @@ -package scache - -import ( - "context" - "strconv" - - "github.com/didi/nightingale/src/common/report" - "github.com/didi/nightingale/src/modules/monapi/config" - - "github.com/toolkits/pkg/logger" -) - -var CollectRuleCache *collectRuleCache -var JudgeHashRing *ConsistentHashRing -var ActiveJudgeNode = NewNodeMap() - -const CHECK_INTERVAL = 9 - -func Init() { - // 初始化默认参数 - StraCache = NewStraCache() - CollectCache = NewCollectCache() - AggrCalcStraCache = NewAggrCalcStraCache() - - InitJudgeHashRing() - - CollectRuleCache = NewCollectRuleCache() - CollectRuleCache.Start(context.Background()) - - go CheckJudgeNodes() - go SyncStras() - go SyncCollects() - go CleanCollectLoop() - go CleanStraLoop() - go SyncAggrCalcStras() -} - -func InitJudgeHashRing() { - JudgeHashRing = NewConsistentHashRing(int32(config.JudgesReplicas), []string{}) - - judges, err := report.GetAlive("judge", "rdb") - if err != nil { - logger.Warning("get judge err:", err) - } - - judgeNodes := []string{} - for _, j := range judges { - if j.Active { - judgeNodes = append(judgeNodes, strconv.FormatInt(j.Id, 10)) - } - } - JudgeHashRing = NewConsistentHashRing(int32(config.JudgesReplicas), judgeNodes) -} diff --git a/src/modules/monapi/tools/user.go b/src/modules/monapi/tools/user.go deleted file mode 100644 index 30a93cb1..00000000 --- a/src/modules/monapi/tools/user.go +++ /dev/null @@ -1,39 +0,0 @@ -package tools - -import ( - "time" - - "github.com/didi/nightingale/src/models" - - "github.com/toolkits/pkg/cache" - "github.com/toolkits/pkg/logger" -) - -func UsernameByUUID(uuid string) string { - if uuid == "" { - return "" - } - - var username string - if err := cache.Get("uuid."+uuid, &username); err == nil { - return username - } - - value := models.UsernameByUUID(uuid) - - if value != "" { - cache.Set("uuid."+uuid, value, time.Hour) - } else { - logger.Warningf("cannot get username by uuid:%v", uuid) - } - - return value -} - -func UserByUUID(uuid string) *models.User { - user, err := models.UserGet("uuid=?", uuid) - if err != nil { - logger.Warningf("cannot get username by uuid:%v err:%v", uuid, err) - } - return user -} diff --git a/src/modules/prober/cache/cache.go b/src/modules/prober/cache/cache.go index bd20cadc..187dfa6e 100644 --- a/src/modules/prober/cache/cache.go +++ b/src/modules/prober/cache/cache.go @@ -3,7 +3,7 @@ package cache import ( "context" - "github.com/didi/nightingale/src/modules/prober/config" + "github.com/didi/nightingale/v4/src/modules/prober/config" ) var CollectRule *CollectRuleCache // collectrule.go diff --git a/src/modules/prober/cache/collectrule.go b/src/modules/prober/cache/collectrule.go index 84a661c5..f8b591b0 100644 --- a/src/modules/prober/cache/collectrule.go +++ b/src/modules/prober/cache/collectrule.go @@ -3,18 +3,17 @@ package cache import ( "context" "fmt" - "math/rand" "sync" "time" - "github.com/didi/nightingale/src/common/address" - "github.com/didi/nightingale/src/common/identity" - "github.com/didi/nightingale/src/common/report" - "github.com/didi/nightingale/src/models" - "github.com/didi/nightingale/src/modules/prober/config" - "github.com/didi/nightingale/src/toolkits/stats" + "github.com/didi/nightingale/v4/src/common/client" + "github.com/didi/nightingale/v4/src/common/identity" + "github.com/didi/nightingale/v4/src/common/report" + "github.com/didi/nightingale/v4/src/common/stats" + "github.com/didi/nightingale/v4/src/models" + "github.com/didi/nightingale/v4/src/modules/prober/config" + "github.com/toolkits/pkg/logger" - "github.com/toolkits/pkg/net/httplib" ) type CollectRuleCache struct { @@ -108,37 +107,18 @@ type collectRulesResp struct { } func (p *CollectRuleCache) syncCollectRule() error { - addrs := address.GetHTTPAddresses(p.Mod) - if len(addrs) == 0 { - return fmt.Errorf("empty config addr") - } - - var resp collectRulesResp - perm := rand.Perm(len(addrs)) - for i := range perm { - ident, err := identity.GetIdent() - if err != nil { - return fmt.Errorf("getIdent err %s", err) - } - url := fmt.Sprintf("http://%s/v1/mon/collect-rules/endpoints/%s:%s/remote", - addrs[perm[i]], ident, report.Config.HTTPPort) - if err = httplib.Get(url).SetTimeout(p.timeout). - Header("X-Srv-Token", p.token).ToJSON(&resp); err != nil { - logger.Warningf("get %s collect rule from remote failed, error:%v", url, err) - stats.Counter.Set("collectrule.get.err", 1) - continue - } - - if resp.Err != "" { - logger.Warningf("get collect rule from remote failed, error:%v", resp.Err) - stats.Counter.Set("collectrule.get.err", 1) - continue - } + ident, err := identity.GetIdent() + if err != nil { + return fmt.Errorf("getIdent err %s", err) + } - if len(resp.Data) > 0 { - break - } + endpoint := ident + ":" + report.Config.HTTPPort + var resp models.CollectRuleRpcResp + err = client.GetCli("server").Call("Server.GetProberCollectBy", endpoint, &resp) + if err != nil { + client.CloseCli() + return fmt.Errorf("Server.GetProberCollectBy err:%v", err) } collectRuleCount := len(resp.Data) diff --git a/src/modules/prober/cache/history.go b/src/modules/prober/cache/history.go index b636e688..d64a92aa 100644 --- a/src/modules/prober/cache/history.go +++ b/src/modules/prober/cache/history.go @@ -4,7 +4,7 @@ import ( "sync" "time" - "github.com/didi/nightingale/src/common/dataobj" + "github.com/didi/nightingale/v4/src/common/dataobj" ) func NewHistory() *history { diff --git a/src/modules/prober/config/config.go b/src/modules/prober/config/config.go index 9cc3a99f..d58d320a 100644 --- a/src/modules/prober/config/config.go +++ b/src/modules/prober/config/config.go @@ -5,11 +5,11 @@ import ( "fmt" "strconv" - "github.com/didi/nightingale/src/common/address" - "github.com/didi/nightingale/src/common/loggeri" - "github.com/didi/nightingale/src/common/report" + "github.com/didi/nightingale/v4/src/common/address" + "github.com/didi/nightingale/v4/src/common/loggeri" + "github.com/didi/nightingale/v4/src/common/report" - // "github.com/didi/nightingale/src/modules/prober/backend/transfer" + // "github.com/didi/nightingale/v4/src/modules/prober/backend/transfer" "github.com/spf13/viper" "github.com/toolkits/pkg/file" diff --git a/src/modules/prober/config/plugin.go b/src/modules/prober/config/plugin.go index 9485651b..08039c5f 100644 --- a/src/modules/prober/config/plugin.go +++ b/src/modules/prober/config/plugin.go @@ -6,9 +6,10 @@ import ( "path/filepath" "strings" - "github.com/didi/nightingale/src/common/dataobj" - "github.com/didi/nightingale/src/modules/monapi/collector" - "github.com/didi/nightingale/src/modules/prober/expr" + "github.com/didi/nightingale/v4/src/common/dataobj" + "github.com/didi/nightingale/v4/src/modules/prober/expr" + "github.com/didi/nightingale/v4/src/modules/server/collector" + "github.com/influxdata/telegraf" "github.com/toolkits/pkg/logger" "gopkg.in/yaml.v2" diff --git a/src/modules/prober/core/common.go b/src/modules/prober/core/common.go index b49fdec5..7cf2660a 100644 --- a/src/modules/prober/core/common.go +++ b/src/modules/prober/core/common.go @@ -3,7 +3,7 @@ package core import ( "strings" - "github.com/didi/nightingale/src/common/dataobj" + "github.com/didi/nightingale/v4/src/common/dataobj" ) func NewMetricValue(metric string, val interface{}, dataType string, tags ...string) *dataobj.MetricValue { diff --git a/src/modules/prober/core/push.go b/src/modules/prober/core/push.go index e69bb5bd..13353942 100644 --- a/src/modules/prober/core/push.go +++ b/src/modules/prober/core/push.go @@ -10,12 +10,12 @@ import ( "reflect" "time" + "github.com/didi/nightingale/v4/src/common/address" + "github.com/didi/nightingale/v4/src/common/dataobj" + "github.com/didi/nightingale/v4/src/modules/prober/cache" + "github.com/toolkits/pkg/logger" "github.com/ugorji/go/codec" - - "github.com/didi/nightingale/src/common/address" - "github.com/didi/nightingale/src/common/dataobj" - "github.com/didi/nightingale/src/modules/prober/cache" ) func Push(metricItems []*dataobj.MetricValue) { @@ -48,7 +48,7 @@ func Push(metricItems []*dataobj.MetricValue) { items = append(items, item) } - addrs := address.GetRPCAddresses("transfer") + addrs := address.GetRPCAddresses("server") count := len(addrs) retry := 0 for { @@ -100,7 +100,7 @@ func rpcCall(addr string, items []*dataobj.MetricValue) (dataobj.TransferResp, e done := make(chan error, 1) go func() { - err := client.Call("Transfer.Push", items, &reply) + err := client.Call("Server.Push", items, &reply) done <- err }() diff --git a/src/modules/prober/expr/expr.go b/src/modules/prober/expr/expr.go index b0f85d0f..d8070596 100644 --- a/src/modules/prober/expr/expr.go +++ b/src/modules/prober/expr/expr.go @@ -7,7 +7,8 @@ import ( "go/token" "strconv" - "github.com/didi/nightingale/src/common/dataobj" + "github.com/didi/nightingale/v4/src/common/dataobj" + "github.com/toolkits/pkg/logger" ) diff --git a/src/modules/prober/http/http_server.go b/src/modules/prober/http/http_server.go index e4c50a3a..15ca774f 100644 --- a/src/modules/prober/http/http_server.go +++ b/src/modules/prober/http/http_server.go @@ -8,11 +8,11 @@ import ( "strings" "time" - "github.com/gin-gonic/gin" + "github.com/didi/nightingale/v4/src/common/address" + "github.com/didi/nightingale/v4/src/common/middleware" + "github.com/didi/nightingale/v4/src/modules/prober/config" - "github.com/didi/nightingale/src/common/address" - "github.com/didi/nightingale/src/common/middleware" - "github.com/didi/nightingale/src/modules/prober/config" + "github.com/gin-gonic/gin" ) var srv = &http.Server{ diff --git a/src/modules/prober/http/router.go b/src/modules/prober/http/router.go index e924b27f..67e1b365 100644 --- a/src/modules/prober/http/router.go +++ b/src/modules/prober/http/router.go @@ -4,7 +4,8 @@ import ( "fmt" "os" - "github.com/didi/nightingale/src/modules/prober/cache" + "github.com/didi/nightingale/v4/src/modules/prober/cache" + "github.com/gin-contrib/pprof" "github.com/gin-gonic/gin" ) diff --git a/src/modules/prober/http/router_funcs.go b/src/modules/prober/http/router_funcs.go index 2c9afe5d..c5ac497b 100644 --- a/src/modules/prober/http/router_funcs.go +++ b/src/modules/prober/http/router_funcs.go @@ -4,11 +4,11 @@ import ( "fmt" "strconv" + "github.com/didi/nightingale/v4/src/common/i18n" + "github.com/didi/nightingale/v4/src/models" + "github.com/gin-gonic/gin" "github.com/toolkits/pkg/errors" - - "github.com/didi/nightingale/src/models" - "github.com/didi/nightingale/src/toolkits/i18n" ) func dangerous(v interface{}) { diff --git a/src/modules/prober/manager/accumulator/accumulator.go b/src/modules/prober/manager/accumulator/accumulator.go index 6b1a6731..e7085993 100644 --- a/src/modules/prober/manager/accumulator/accumulator.go +++ b/src/modules/prober/manager/accumulator/accumulator.go @@ -6,8 +6,9 @@ import ( "sync" "time" - "github.com/didi/nightingale/src/common/dataobj" - "github.com/didi/nightingale/src/modules/prober/manager/metric" + "github.com/didi/nightingale/v4/src/common/dataobj" + "github.com/didi/nightingale/v4/src/modules/prober/manager/metric" + "github.com/influxdata/telegraf" "github.com/toolkits/pkg/logger" ) diff --git a/src/modules/prober/manager/collectrule.go b/src/modules/prober/manager/collectrule.go index df68c33a..d48d1ff9 100644 --- a/src/modules/prober/manager/collectrule.go +++ b/src/modules/prober/manager/collectrule.go @@ -5,11 +5,12 @@ import ( "strconv" "sync" - "github.com/didi/nightingale/src/common/dataobj" - "github.com/didi/nightingale/src/models" - "github.com/didi/nightingale/src/modules/monapi/collector" - "github.com/didi/nightingale/src/modules/prober/config" - "github.com/didi/nightingale/src/modules/prober/manager/accumulator" + "github.com/didi/nightingale/v4/src/common/dataobj" + "github.com/didi/nightingale/v4/src/models" + "github.com/didi/nightingale/v4/src/modules/prober/config" + "github.com/didi/nightingale/v4/src/modules/prober/manager/accumulator" + "github.com/didi/nightingale/v4/src/modules/server/collector" + "github.com/influxdata/telegraf" "github.com/toolkits/pkg/logger" ) diff --git a/src/modules/prober/manager/manager.go b/src/modules/prober/manager/manager.go index 4212f50d..a313ec88 100644 --- a/src/modules/prober/manager/manager.go +++ b/src/modules/prober/manager/manager.go @@ -7,11 +7,12 @@ import ( "log" "time" - "github.com/didi/nightingale/src/models" - "github.com/didi/nightingale/src/modules/monapi/collector" - "github.com/didi/nightingale/src/modules/prober/cache" - "github.com/didi/nightingale/src/modules/prober/config" - "github.com/didi/nightingale/src/modules/prober/core" + "github.com/didi/nightingale/v4/src/models" + "github.com/didi/nightingale/v4/src/modules/prober/cache" + "github.com/didi/nightingale/v4/src/modules/prober/config" + "github.com/didi/nightingale/v4/src/modules/prober/core" + "github.com/didi/nightingale/v4/src/modules/server/collector" + "github.com/influxdata/telegraf" "github.com/toolkits/pkg/logger" ) diff --git a/src/modules/prober/manager/manager_test.go b/src/modules/prober/manager/manager_test.go index 153929c6..d113d0f3 100644 --- a/src/modules/prober/manager/manager_test.go +++ b/src/modules/prober/manager/manager_test.go @@ -9,9 +9,10 @@ import ( "testing" "time" - "github.com/didi/nightingale/src/models" - "github.com/didi/nightingale/src/modules/monapi/collector" - "github.com/didi/nightingale/src/modules/prober/config" + "github.com/didi/nightingale/v4/src/models" + "github.com/didi/nightingale/v4/src/modules/prober/config" + "github.com/didi/nightingale/v4/src/modules/server/collector" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs/prometheus" ) diff --git a/src/modules/prober/prober.go b/src/modules/prober/prober.go index da581336..364da73f 100644 --- a/src/modules/prober/prober.go +++ b/src/modules/prober/prober.go @@ -8,19 +8,19 @@ import ( "os/signal" "syscall" - "github.com/didi/nightingale/src/common/identity" - "github.com/didi/nightingale/src/common/loggeri" - "github.com/didi/nightingale/src/common/report" - "github.com/didi/nightingale/src/toolkits/stats" - - "github.com/didi/nightingale/src/modules/prober/cache" - "github.com/didi/nightingale/src/modules/prober/config" - "github.com/didi/nightingale/src/modules/prober/core" - "github.com/didi/nightingale/src/modules/prober/http" - "github.com/didi/nightingale/src/modules/prober/manager" - - "github.com/didi/nightingale/src/modules/monapi/collector" - _ "github.com/didi/nightingale/src/modules/monapi/plugins/all" + "github.com/didi/nightingale/v4/src/common/identity" + "github.com/didi/nightingale/v4/src/common/loggeri" + "github.com/didi/nightingale/v4/src/common/report" + "github.com/didi/nightingale/v4/src/common/stats" + + "github.com/didi/nightingale/v4/src/modules/prober/cache" + "github.com/didi/nightingale/v4/src/modules/prober/config" + "github.com/didi/nightingale/v4/src/modules/prober/core" + "github.com/didi/nightingale/v4/src/modules/prober/http" + "github.com/didi/nightingale/v4/src/modules/prober/manager" + + "github.com/didi/nightingale/v4/src/modules/server/collector" + _ "github.com/didi/nightingale/v4/src/modules/server/plugins/all" _ "github.com/go-sql-driver/mysql" "github.com/gin-gonic/gin" @@ -66,7 +66,7 @@ func main() { loggeri.Init(cfg.Logger) go stats.Init("n9e.prober") - go report.Init(cfg.Report, "rdb") + go report.Init(cfg.Report) cache.Init(ctx) diff --git a/src/modules/rdb/config/const.go b/src/modules/rdb/config/const.go deleted file mode 100644 index 0f01939d..00000000 --- a/src/modules/rdb/config/const.go +++ /dev/null @@ -1,8 +0,0 @@ -package config - -const ( - SMS_QUEUE_NAME = "/queue/rdb/sms" - MAIL_QUEUE_NAME = "/queue/rdb/mail" - VOICE_QUEUE_NAME = "/queue/rdb/voice" - IM_QUEUE_NAME = "/queue/rdb/im" -) diff --git a/src/modules/rdb/config/yaml.go b/src/modules/rdb/config/yaml.go deleted file mode 100644 index 636bbcfa..00000000 --- a/src/modules/rdb/config/yaml.go +++ /dev/null @@ -1,187 +0,0 @@ -package config - -import ( - "fmt" - - "github.com/toolkits/pkg/file" - - "github.com/didi/nightingale/src/common/loggeri" - "github.com/didi/nightingale/src/toolkits/i18n" -) - -type ConfigT struct { - Logger loggeri.Config `yaml:"logger"` - HTTP httpSection `yaml:"http"` - LDAP ldapSection `yaml:"ldap"` - SSO ssoSection `yaml:"sso"` - Tokens []string `yaml:"tokens"` - Redis redisSection `yaml:"redis"` - Sender map[string]senderSection `yaml:"sender"` - RabbitMQ rabbitmqSection `yaml:"rabbitmq"` - WeChat wechatSection `yaml:"wechat"` - I18n i18n.I18nSection `yaml:"i18n"` - Auth authSection `yaml:"auth"` - Webhook []webhook `yaml:"webhook"` -} - -type webhook struct { - Addr string `yaml:"addr"` - Token string `yaml:"token"` -} - -type authSection struct { - Captcha bool `yaml:"captcha"` - ExtraMode AuthExtraSection `yaml:"extraMode"` -} - -type AuthExtraSection struct { - Enable bool `yaml:"enable"` - Debug bool `yaml:"debug" description:"debug"` - DebugUser string `yaml:"debugUser" description:"debug username"` - WhiteList bool `yaml:"whiteList"` - FrozenDays int `yaml:"frozenDays"` - WritenOffDays int `yaml:"writenOffDays"` -} - -type wechatSection struct { - CorpID string `yaml:"corp_id"` - AgentID int `yaml:"agent_id"` - Secret string `yaml:"secret"` -} - -type ssoSection struct { - Enable bool `yaml:"enable"` - RedirectURL string `yaml:"redirectURL"` - SsoAddr string `yaml:"ssoAddr"` - ClientId string `yaml:"clientId"` - ClientSecret string `yaml:"clientSecret"` - ApiKey string `yaml:"apiKey"` - StateExpiresIn int64 `yaml:"stateExpiresIn"` - CoverAttributes bool `yaml:"coverAttributes"` - Attributes struct { - Dispname string `yaml:"dispname"` - Phone string `yaml:"phone"` - Email string `yaml:"email"` - Im string `yaml:"im"` - } `yaml:"attributes"` -} - -type httpSection struct { - Mode string `yaml:"mode"` - Session SessionSection `yaml:"session"` -} - -type SessionSection struct { - CookieName string `yaml:"cookieName"` - SidLength int `yaml:"sidLength"` - HttpOnly bool `yaml:"httpOnly"` - Domain string `yaml:"domain"` - GcInterval int64 `yaml:"gcInterval"` - CookieLifetime int64 `yaml:"cookieLifetime"` - Storage string `yaml:"storage" description:"mem|db(defualt)"` -} - -type ldapSection struct { - DefaultUse bool `yaml:"defaultUse"` - Host string `yaml:"host"` - Port int `yaml:"port"` - BaseDn string `yaml:"baseDn"` - BindUser string `yaml:"bindUser"` - BindPass string `yaml:"bindPass"` - AuthFilter string `yaml:"authFilter"` - Attributes ldapAttributes `yaml:"attributes"` - CoverAttributes bool `yaml:"coverAttributes"` - TLS bool `yaml:"tls"` - StartTLS bool `yaml:"startTLS"` -} - -type ldapAttributes struct { - Dispname string `yaml:"dispname"` - Phone string `yaml:"phone"` - Email string `yaml:"email"` - Im string `yaml:"im"` -} - -type senderSection struct { - Way string `yaml:"way"` - Worker int `yaml:"worker"` - API string `yaml:"api"` -} - -type redisSection struct { - Enable bool `yaml:"enable"` - Addr string `yaml:"addr"` - Pass string `yaml:"pass"` - Idle int `yaml:"idle"` - Timeout timeoutSection `yaml:"timeout"` -} - -type timeoutSection struct { - Conn int `yaml:"conn"` - Read int `yaml:"read"` - Write int `yaml:"write"` -} - -type rabbitmqSection struct { - Enable bool `yaml:"enable"` - Addr string `yaml:"addr"` - Queue string `yaml:"queue"` -} - -var Config *ConfigT - -// Parse configuration file -func Parse() error { - ymlFile := getYmlFile() - if ymlFile == "" { - return fmt.Errorf("configuration file not found") - } - - var c ConfigT - err := file.ReadYaml(ymlFile, &c) - if err != nil { - return fmt.Errorf("cannot read yml[%s]: %v", ymlFile, err) - } - - Config = &c - fmt.Println("config.file:", ymlFile) - - if Config.I18n.DictPath == "" { - Config.I18n.DictPath = "etc/dict.json" - } - - if Config.I18n.Lang == "" { - Config.I18n.Lang = "zh" - } - - if err = parseOps(); err != nil { - return err - } - - // if Config.HTTP.Session.CookieLifetime == 0 { - // Config.HTTP.Session.CookieLifetime = 24 * 3600 - // } - - if Config.HTTP.Session.GcInterval == 0 { - Config.HTTP.Session.GcInterval = 60 - } - - if Config.HTTP.Session.SidLength == 0 { - Config.HTTP.Session.SidLength = 32 - } - return nil -} - -func getYmlFile() string { - yml := "etc/rdb.local.yml" - if file.IsExist(yml) { - return yml - } - - yml = "etc/rdb.yml" - if file.IsExist(yml) { - return yml - } - - return "" -} diff --git a/src/modules/rdb/cron/sender_init.go b/src/modules/rdb/cron/sender_init.go deleted file mode 100644 index 5a3b8f2f..00000000 --- a/src/modules/rdb/cron/sender_init.go +++ /dev/null @@ -1,21 +0,0 @@ -package cron - -import "github.com/didi/nightingale/src/modules/rdb/config" - -var ( - SmsWorkerChan chan int - MailWorkerChan chan int - VoiceWorkerChan chan int - ImWorkerChan chan int -) - -func InitWorker() { - if !config.Config.Redis.Enable { - return - } - - SmsWorkerChan = make(chan int, config.Config.Sender["sms"].Worker) - MailWorkerChan = make(chan int, config.Config.Sender["mail"].Worker) - VoiceWorkerChan = make(chan int, config.Config.Sender["voice"].Worker) - ImWorkerChan = make(chan int, config.Config.Sender["im"].Worker) -} diff --git a/src/modules/rdb/http/router.go b/src/modules/rdb/http/router.go deleted file mode 100644 index 4f2e885a..00000000 --- a/src/modules/rdb/http/router.go +++ /dev/null @@ -1,235 +0,0 @@ -package http - -import ( - "github.com/gin-gonic/gin" -) - -func Config(r *gin.Engine) { - - notLogin := r.Group("/api/rdb") - { - notLogin.GET("/ping", ping) - notLogin.GET("/ldap/used", ldapUsed) - notLogin.GET("/ops/global", globalOpsGet) - notLogin.GET("/ops/local", localOpsGet) - notLogin.GET("/roles/global", globalRoleGet) - notLogin.GET("/roles/local", localRoleGet) - notLogin.POST("/users/invite", userInvitePost) - - notLogin.POST("/auth/send-login-code", sendLoginCode) - notLogin.POST("/auth/send-rst-code", sendRstCode) - notLogin.POST("/auth/rst-password", rstPassword) - notLogin.GET("/auth/captcha", captchaGet) - - notLogin.GET("/v2/nodes", nodeGets) - notLogin.GET("/pwd-rules", pwdRulesGet) - notLogin.GET("/counter", counterGet) - - } - - sessionStarted := r.Group("/api/rdb").Use(shouldStartSession()) - { - sessionStarted.POST("/auth/login", login) - sessionStarted.GET("/auth/logout", logout) - sessionStarted.GET("/auth/v2/authorize", authAuthorizeV2) - sessionStarted.GET("/auth/v2/callback", authCallbackV2) - sessionStarted.GET("/auth/v2/logout", logoutV2) - } - - hbs := r.Group("/api/hbs") - { - hbs.POST("/heartbeat", heartBeat) - hbs.GET("/instances", instanceGets) - } - - rootLogin := r.Group("/api/rdb").Use(shouldBeRoot()) - { - rootLogin.GET("/configs/smtp", smtpConfigsGet) - rootLogin.POST("/configs/smtp/test", smtpTest) - rootLogin.PUT("/configs/smtp", smtpConfigsPut) - - rootLogin.GET("/configs/auth", authConfigsGet) - rootLogin.PUT("/configs/auth", authConfigsPut) - rootLogin.POST("/auth/white-list", whiteListPost) - rootLogin.GET("/auth/white-list", whiteListsGet) - rootLogin.GET("/auth/white-list/:id", whiteListGet) - rootLogin.PUT("/auth/white-list/:id", whiteListPut) - rootLogin.DELETE("/auth/white-list/:id", whiteListDel) - - rootLogin.GET("/log/login", loginLogGets) - rootLogin.GET("/log/operation", operationLogGets) - - rootLogin.POST("/roles", roleAddPost) - rootLogin.PUT("/role/:id", rolePut) - rootLogin.DELETE("/role/:id", roleDel) - rootLogin.GET("/role/:id", roleDetail) - rootLogin.GET("/role/:id/users", roleGlobalUsersGet) - rootLogin.PUT("/role/:id/users/bind", roleGlobalUsersBind) - rootLogin.PUT("/role/:id/users/unbind", roleGlobalUsersUnbind) - - rootLogin.POST("/users", userAddPost) - rootLogin.GET("/user/:id/profile", userProfileGet) - rootLogin.PUT("/user/:id/profile", userProfilePut) - rootLogin.PUT("/user/:id/password", userPasswordPut) - rootLogin.DELETE("/user/:id", userDel) - - rootLogin.POST("/node-cates", nodeCatePost) - rootLogin.PUT("/node-cate/:id", nodeCatePut) - rootLogin.DELETE("/node-cate/:id", nodeCateDel) - rootLogin.POST("/node-cates/fields", nodeCateFieldNew) - rootLogin.PUT("/node-cates/field/:id", nodeCateFieldPut) - rootLogin.DELETE("/node-cates/field/:id", nodeCateFieldDel) - - rootLogin.GET("/nodes/trash", nodeTrashGets) - rootLogin.PUT("/nodes/trash/recycle", nodeTrashRecycle) - - rootLogin.POST("/sso/clients", ssoClientsPost) - rootLogin.GET("/sso/clients", ssoClientsGet) - rootLogin.GET("/sso/clients/:clientId", ssoClientGet) - rootLogin.PUT("/sso/clients/:clientId", ssoClientPut) - rootLogin.DELETE("/sso/clients/:clientId", ssoClientDel) - - rootLogin.GET("/resources/tenant-rank", tenantResourcesCountRank) - rootLogin.GET("/resources/project-rank", projectResourcesCountRank) - - rootLogin.GET("/root/users", userListGet) - rootLogin.GET("/root/teams/all", teamAllGet) - rootLogin.GET("/root/node-cates", nodeCateGets) - - } - - userLogin := r.Group("/api/rdb").Use(shouldBeLogin()) - { - userLogin.GET("/resoplogs", operationLogResGets) - - userLogin.GET("/self/profile", selfProfileGet) - userLogin.PUT("/self/profile", selfProfilePut) - userLogin.GET("/self/token", selfTokenGets) - userLogin.POST("/self/token", selfTokenPost) - userLogin.PUT("/self/token", selfTokenPut) - userLogin.GET("/self/perms/global", permGlobalOps) - userLogin.GET("/self/perms/local/node/:id", permLocalOps) - - notLogin.PUT("/self/password", selfPasswordPut) - - userLogin.GET("/users", userListGet) - userLogin.GET("/users/invite", userInviteGet) - - userLogin.GET("/teams/all", teamAllGet) - userLogin.GET("/teams/mine", teamMineGet) - userLogin.POST("/teams", teamAddPost) - userLogin.PUT("/team/:id", teamPut) - userLogin.GET("/team/:id", teamDetail) - userLogin.PUT("/team/:id/users/bind", teamUserBind) - userLogin.PUT("/team/:id/users/unbind", teamUserUnbind) - userLogin.DELETE("/team/:id", teamDel) - - userLogin.GET("/node-cates", nodeCateGets) - userLogin.GET("/node-cates/fields", nodeCateFieldGets) - userLogin.GET("/node-cates/field/:id", nodeCateFieldGet) - - userLogin.POST("/nodes", nodePost) - userLogin.GET("/nodes", nodeGets) - userLogin.GET("/node/:id", nodeGet) - userLogin.PUT("/node/:id", nodePut) - userLogin.DELETE("/node/:id", nodeDel) - userLogin.GET("/node/:id/fields", nodeFieldGets) - userLogin.PUT("/node/:id/fields", nodeFieldPuts) - userLogin.GET("/node/:id/roles", rolesUnderNodeGets) - userLogin.POST("/node/:id/roles", rolesUnderNodePost) - userLogin.DELETE("/node/:id/roles", rolesUnderNodeDel) - userLogin.DELETE("/node/:id/roles/try", rolesUnderNodeDelTry) - userLogin.GET("/node/:id/resources", resourceUnderNodeGet) - userLogin.GET("/node/:id/resources/cate-count", renderNodeResourcesCountByCate) - userLogin.POST("/node/:id/resources/bind", resourceBindNode) - userLogin.POST("/node/:id/resources/unbind", resourceUnbindNode) - userLogin.PUT("/node/:id/resources/note", resourceUnderNodeNotePut) - userLogin.PUT("/node/:id/resources/labels", resourceUnderNodeLabelsPut) - - userLogin.GET("/tree", treeUntilLeafGets) - userLogin.GET("/tree/projs", treeUntilProjectGets) - userLogin.GET("/tree/orgs", treeUntilOrganizationGets) - - userLogin.GET("/resources/search", resourceSearchGet) - userLogin.PUT("/resources/note", resourceNotePut) - userLogin.PUT("/resources/note/try", resourceNotePutTry) - userLogin.GET("/resources/bindings", resourceBindingsGet) - userLogin.GET("/resources/orphan", resourceOrphanGet) - - userLogin.GET("/resources/cate-count", renderAllResourcesCountByCate) - - // 是否在某个节点上有权限做某个操作(即资源权限点) - userLogin.GET("/can-do-node-op", v1CandoNodeOp) - // 同时校验多个操作权限点 - userLogin.GET("/can-do-node-ops", v1CandoNodeOps) - } - - v1 := r.Group("/v1/rdb").Use(shouldBeService()) - { - // 获取这个节点下的所有资源,跟给前端的API(/api/rdb/node/:id/resources会根据当前登陆用户获取有权限看到的资源列表)不同 - v1.GET("/node/:id/resources", v1ResourcesUnderNodeGet) - // RDB作为一个类似CMDB的东西,接收各个子系统注册过来的资源,其他资源都是依托于项目创建的,RDB会根据nid自动挂载资源到相应节点 - v1.POST("/resources/register", v1ResourcesRegisterPost) - // 资源销毁的时候,需要从库里清掉,同时需要把节点挂载关系也删除,一个资源可能挂载在多个节点,都要统统干掉 - v1.POST("/resources/unregister", v1ResourcesUnregisterPost) - - v1.POST("/containers/bind", v1ContainersBindPost) - v1.POST("/container/sync", v1ContainerSyncPost) - - // 发送邮件、短信、语音、即时通讯消息,这些都依赖客户那边的通道 - v1.POST("/sender/mail", v1SendMail) - v1.POST("/sender/sms", v1SendSms) - v1.POST("/sender/voice", v1SendVoice) - v1.POST("/sender/im", v1SendIm) - - v1.GET("/nodes", nodeGets) - v1.GET("/node/:id", nodeGet) - v1.GET("/node-include-trash/:id", nodeIncludeTrashGet) - v1.GET("/node/:id/projs", v1treeUntilProjectGetsByNid) - v1.GET("/tree/projs", v1TreeUntilProjectGets) - v1.GET("/tree", v1TreeUntilTypGets) - - // 外部系统推送一些操作日志过来,RDB统一存储,实际用MQ会更好一些 - v1.POST("/resoplogs", v1OperationLogResPost) - - // 是否有权限做一些全局操作(即页面权限点) - v1.GET("/can-do-global-op", v1CandoGlobalOp) - // 是否在某个节点上有权限做某个操作(即资源权限点) - v1.GET("/can-do-node-op", v1CandoNodeOp) - // 同时校验多个操作权限点 - v1.GET("/can-do-node-ops", v1CandoNodeOps) - - // 获取用户、团队相关信息 - v1.GET("/get-user-by-uuid", v1UserGetByUUID) - v1.GET("/get-users-by-uuids", v1UserGetByUUIDs) - v1.GET("/get-users-by-ids", v1UserGetByIds) - v1.GET("/get-users-by-names", v1UserGetByNames) - v1.GET("/get-user-by-token", v1UserGetByToken) - v1.GET("/get-users-by-query", userListGet) - v1.GET("/get-teams-by-ids", v1TeamGetByIds) - v1.GET("/get-user-ids-by-team-ids", v1UserIdsGetByTeamIds) - - v1.GET("/users", v1UserListGet) - - v1.POST("/login", v1Login) - v1.POST("/send-login-code", sendLoginCode) - - // 第三方系统获取某个用户的所有权限点 - v1.GET("/perms/global", v1PermGlobalOps) - - // session - v1.GET("/sessions/:sid", v1SessionGet) - v1.GET("/sessions/:sid/user", v1SessionGetUser) - v1.GET("/sessions", v1SessionListGet) - v1.DELETE("/sessions/:sid", v1SessionDelete) - - // token - v1.GET("/tokens/:token", v1TokenGet) - v1.GET("/tokens/:token/user", v1TokenGetUser) - v1.DELETE("/tokens/:token", v1TokenDelete) - - // 第三方系统同步权限表的数据 - v1.GET("/table/sync/role-operation", v1RoleOperationGets) - v1.GET("/table/sync/role-global-user", v1RoleGlobalUserGets) - } -} diff --git a/src/modules/rdb/http/router_funcs.go b/src/modules/rdb/http/router_funcs.go deleted file mode 100644 index 7f64801d..00000000 --- a/src/modules/rdb/http/router_funcs.go +++ /dev/null @@ -1,233 +0,0 @@ -package http - -import ( - "fmt" - "strconv" - - "github.com/didi/nightingale/src/models" - "github.com/didi/nightingale/src/modules/rdb/auth" - "github.com/didi/nightingale/src/toolkits/i18n" - "github.com/gin-gonic/gin" - "github.com/toolkits/pkg/errors" -) - -func dangerous(v interface{}) { - errors.Dangerous(v) -} - -func bomb(format string, a ...interface{}) { - errors.Bomb(i18n.Sprintf(format, a...)) -} - -func bind(c *gin.Context, ptr interface{}) { - dangerous(c.ShouldBindJSON(ptr)) -} - -func urlParamStr(c *gin.Context, field string) string { - val := c.Param(field) - - if val == "" { - bomb("url param[%s] is blank", field) - } - - return val -} - -func urlParamInt64(c *gin.Context, field string) int64 { - strval := urlParamStr(c, field) - intval, err := strconv.ParseInt(strval, 10, 64) - if err != nil { - bomb("cannot convert %s to int64", strval) - } - - return intval -} - -func urlParamInt(c *gin.Context, field string) int { - return int(urlParamInt64(c, field)) -} - -func queryStr(c *gin.Context, key string, defaultVal ...string) string { - val := c.Query(key) - if val != "" { - return val - } - - if len(defaultVal) == 0 { - bomb("query param[%s] is necessary", key) - } - - return defaultVal[0] -} - -func queryInt(c *gin.Context, key string, defaultVal ...int) int { - strv := c.Query(key) - if strv != "" { - intv, err := strconv.Atoi(strv) - if err != nil { - bomb("cannot convert [%s] to int", strv) - } - return intv - } - - if len(defaultVal) == 0 { - bomb("query param[%s] is necessary", key) - } - - return defaultVal[0] -} - -func queryInt64(c *gin.Context, key string, defaultVal ...int64) int64 { - strv := c.Query(key) - if strv != "" { - intv, err := strconv.ParseInt(strv, 10, 64) - if err != nil { - bomb("cannot convert [%s] to int64", strv) - } - return intv - } - - if len(defaultVal) == 0 { - bomb("query param[%s] is necessary", key) - } - - return defaultVal[0] -} - -func offset(c *gin.Context, limit int) int { - if limit <= 0 { - limit = 10 - } - - page := queryInt(c, "p", 1) - return (page - 1) * limit -} - -func renderMessage(c *gin.Context, v interface{}) { - if v == nil { - c.JSON(200, gin.H{"err": ""}) - return - } - - switch t := v.(type) { - case string: - c.JSON(200, gin.H{"err": i18n.Sprintf(t)}) - case error: - c.JSON(200, gin.H{"err": t.Error()}) - } -} - -func renderData(c *gin.Context, data interface{}, err error) { - if err == nil { - c.JSON(200, gin.H{"dat": data, "err": ""}) - return - } - - renderMessage(c, err.Error()) -} - -func renderZeroPage(c *gin.Context) { - renderData(c, gin.H{ - "list": []int{}, - "total": 0, - }, nil) -} - -type idsForm struct { - Ids []int64 `json:"ids"` -} - -func loginUsername(c *gin.Context) string { - value, has := c.Get("username") - if !has { - bomb("unauthorized") - } - - if value == nil { - bomb("unauthorized") - } - - return value.(string) -} - -func loginUser(c *gin.Context) *models.User { - username := loginUsername(c) - - user, err := models.UserGet("username=?", username) - dangerous(err) - - if user == nil { - bomb("unauthorized") - } - - auth.PrepareUser(user) - - return user -} - -func loginRoot(c *gin.Context) *models.User { - value, has := c.Get("user") - if !has { - bomb("unauthorized") - } - - return value.(*models.User) -} - -func User(id int64) *models.User { - user, err := models.UserGet("id=?", id) - if err != nil { - bomb("cannot retrieve user[%d]: %v", id, err) - } - - if user == nil { - bomb("no such user[%d]", id) - } - - return user -} - -func Team(id int64) *models.Team { - team, err := models.TeamGet("id=?", id) - if err != nil { - bomb("cannot retrieve team[%d]: %v", id, err) - } - - if team == nil { - bomb("no such team[%d]", id) - } - - return team -} - -func Role(id int64) *models.Role { - role, err := models.RoleGet("id=?", id) - if err != nil { - bomb("cannot retrieve role[%d]: %v", id, err) - } - - if role == nil { - bomb("no such role[%d]", id) - } - - return role -} - -func Node(id int64) *models.Node { - node, err := models.NodeGet("id=?", id) - dangerous(err) - - if node == nil { - bomb("no such node[%d]", id) - } - - return node -} - -func _e(format string, a ...interface{}) error { - return fmt.Errorf(i18n.Sprintf(format, a...)) -} - -func _s(format string, a ...interface{}) string { - return i18n.Sprintf(format, a...) -} diff --git a/src/modules/rdb/http/router_health.go b/src/modules/rdb/http/router_health.go deleted file mode 100644 index 4b6a7523..00000000 --- a/src/modules/rdb/http/router_health.go +++ /dev/null @@ -1,7 +0,0 @@ -package http - -import "github.com/gin-gonic/gin" - -func ping(c *gin.Context) { - c.String(200, "pong") -} diff --git a/src/modules/rdb/http/router_home.go b/src/modules/rdb/http/router_home.go deleted file mode 100644 index ede9ce9c..00000000 --- a/src/modules/rdb/http/router_home.go +++ /dev/null @@ -1,10 +0,0 @@ -package http - -import ( - "github.com/didi/nightingale/src/modules/rdb/config" - "github.com/gin-gonic/gin" -) - -func ldapUsed(c *gin.Context) { - renderData(c, config.Config.LDAP.DefaultUse, nil) -} diff --git a/src/modules/rdb/http/router_stats.go b/src/modules/rdb/http/router_stats.go deleted file mode 100644 index 35c07b67..00000000 --- a/src/modules/rdb/http/router_stats.go +++ /dev/null @@ -1,26 +0,0 @@ -package http - -import ( - "github.com/didi/nightingale/src/models" - "github.com/gin-gonic/gin" -) - -type rdbStats struct { - Login *models.Stats -} - -var ( - stats *rdbStats -) - -func initStats() { - stats = &rdbStats{ - Login: models.MustNewStats("login"), - } -} - -func counterGet(c *gin.Context) { - renderData(c, map[string]int64{ - "login": stats.Login.Get(), - }, nil) -} diff --git a/src/modules/rdb/rdb.go b/src/modules/rdb/rdb.go deleted file mode 100644 index a23ce404..00000000 --- a/src/modules/rdb/rdb.go +++ /dev/null @@ -1,122 +0,0 @@ -package main - -import ( - "flag" - "fmt" - "os" - "os/signal" - "syscall" - - _ "github.com/go-sql-driver/mysql" - - "github.com/toolkits/pkg/logger" - "github.com/toolkits/pkg/runner" - - "github.com/didi/nightingale/src/common/loggeri" - "github.com/didi/nightingale/src/models" - "github.com/didi/nightingale/src/modules/rdb/auth" - "github.com/didi/nightingale/src/modules/rdb/cache" - "github.com/didi/nightingale/src/modules/rdb/config" - "github.com/didi/nightingale/src/modules/rdb/cron" - "github.com/didi/nightingale/src/modules/rdb/http" - "github.com/didi/nightingale/src/modules/rdb/rabbitmq" - "github.com/didi/nightingale/src/modules/rdb/redisc" - "github.com/didi/nightingale/src/modules/rdb/session" - "github.com/didi/nightingale/src/modules/rdb/ssoc" - "github.com/didi/nightingale/src/toolkits/i18n" -) - -var ( - vers *bool - help *bool - conf *string - - version = "No Version Provided" -) - -func init() { - vers = flag.Bool("v", false, "display the version.") - help = flag.Bool("h", false, "print this help.") - conf = flag.String("f", "", "specify configuration file.") - flag.Parse() - - if *vers { - fmt.Println("Version:", version) - os.Exit(0) - } - - if *help { - flag.Usage() - os.Exit(0) - } - - runner.Init() - fmt.Println("runner.cwd:", runner.Cwd) - fmt.Println("runner.hostname:", runner.Hostname) -} - -func main() { - parseConf() - - loggeri.Init(config.Config.Logger) - - // 初始化数据库和相关数据 - models.InitMySQL("rdb", "hbs") - - if config.Config.SSO.Enable && config.Config.Auth.ExtraMode.Enable { - models.InitMySQL("sso") - } - models.InitSalt() - models.InitRooter() - - ssoc.InitSSO() - - // 初始化 redis 用来发送邮件短信等 - redisc.InitRedis() - cron.InitWorker() - i18n.Init(config.Config.I18n) - - // 初始化 rabbitmq 处理部分异步逻辑 - rabbitmq.Init() - - cache.Start() - session.Init() - - auth.Init(config.Config.Auth.ExtraMode) - auth.Start() - - go cron.ConsumeMail() - go cron.ConsumeSms() - go cron.ConsumeVoice() - go cron.ConsumeIm() - go cron.CleanerLoop() - - http.Start() - - endingProc() -} - -func parseConf() { - if err := config.Parse(); err != nil { - fmt.Println("cannot parse configuration file:", err) - os.Exit(1) - } -} - -func endingProc() { - c := make(chan os.Signal, 1) - signal.Notify(c, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT) - select { - case <-c: - fmt.Printf("stop signal caught, stopping... pid=%d\n", os.Getpid()) - } - - logger.Close() - http.Shutdown() - redisc.CloseRedis() - rabbitmq.Shutdown() - session.Stop() - cache.Stop() - - fmt.Println("process stopped successfully") -} diff --git a/src/modules/rdb/redisc/reader.go b/src/modules/rdb/redisc/reader.go deleted file mode 100644 index e45f7d74..00000000 --- a/src/modules/rdb/redisc/reader.go +++ /dev/null @@ -1,42 +0,0 @@ -package redisc - -import ( - "encoding/json" - - "github.com/garyburd/redigo/redis" - "github.com/toolkits/pkg/logger" - - "github.com/didi/nightingale/src/common/dataobj" -) - -func Pop(count int, queue string) []*dataobj.Message { - var ret []*dataobj.Message - - rc := RedisConnPool.Get() - defer rc.Close() - - for i := 0; i < count; i++ { - reply, err := redis.String(rc.Do("RPOP", queue)) - if err != nil { - if err != redis.ErrNil { - logger.Errorf("rpop queue:%s failed, err: %v", queue, err) - } - break - } - - if reply == "" || reply == "nil" { - continue - } - - var message dataobj.Message - err = json.Unmarshal([]byte(reply), &message) - if err != nil { - logger.Errorf("unmarshal message failed, err: %v, redis reply: %v", err, reply) - continue - } - - ret = append(ret, &message) - } - - return ret -} diff --git a/src/modules/rdb/redisc/writer.go b/src/modules/rdb/redisc/writer.go deleted file mode 100644 index 61ddaa3a..00000000 --- a/src/modules/rdb/redisc/writer.go +++ /dev/null @@ -1,53 +0,0 @@ -package redisc - -import ( - "encoding/json" - "fmt" - "strings" - - "github.com/toolkits/pkg/logger" - - "github.com/didi/nightingale/src/common/dataobj" -) - -func lpush(queue, message string) error { - rc := RedisConnPool.Get() - defer rc.Close() - _, err := rc.Do("LPUSH", queue, message) - if err != nil { - logger.Errorf("LPUSH %s fail, message:%s, error:%v", queue, message, err) - } - return err -} - -// Write LPUSH message to redis -func Write(data *dataobj.Message, queue string) error { - if data == nil { - return fmt.Errorf("message is nil") - } - - data.Tos = removeEmptyString(data.Tos) - - bs, err := json.Marshal(data) - if err != nil { - logger.Errorf("marshal message failed, message: %+v, err: %v", data, err) - return err - } - - logger.Debugf("write message to queue, message:%+v, queue:%s", data, queue) - return lpush(queue, string(bs)) -} - -func removeEmptyString(s []string) []string { - cnt := len(s) - ss := make([]string, 0, cnt) - for i := 0; i < cnt; i++ { - if strings.TrimSpace(s[i]) == "" { - continue - } - - ss = append(ss, s[i]) - } - - return ss -} diff --git a/src/modules/transfer/aggr/aggr.go b/src/modules/server/aggr/aggr.go similarity index 95% rename from src/modules/transfer/aggr/aggr.go rename to src/modules/server/aggr/aggr.go index 89fbab64..4421b668 100644 --- a/src/modules/transfer/aggr/aggr.go +++ b/src/modules/server/aggr/aggr.go @@ -8,9 +8,9 @@ import ( "strings" "time" - "github.com/didi/nightingale/src/common/dataobj" - "github.com/didi/nightingale/src/modules/transfer/cache" - "github.com/didi/nightingale/src/toolkits/str" + "github.com/didi/nightingale/v4/src/common/dataobj" + "github.com/didi/nightingale/v4/src/common/str" + "github.com/didi/nightingale/v4/src/modules/server/cache" "github.com/spaolacci/murmur3" "github.com/toolkits/pkg/logger" @@ -53,7 +53,7 @@ func SendToAggr(items []*dataobj.MetricValue) error { key = item.Endpoint } - validStrategys := cache.AggrCalcMap.GetByKey(str.MD5(key, item.Metric, "")) + validStrategys := cache.AggrCalcMap.GetByKey(str.ToMD5(key, item.Metric, "")) var stras []*dataobj.RawMetricAggrCalc for _, stra := range validStrategys { if !tagMatch(stra.TagFilters, item.TagsMap) { diff --git a/src/modules/transfer/aggr/kafka.go b/src/modules/server/aggr/kafka.go similarity index 100% rename from src/modules/transfer/aggr/kafka.go rename to src/modules/server/aggr/kafka.go diff --git a/src/modules/monapi/alarm/callback.go b/src/modules/server/alarm/callback.go similarity index 91% rename from src/modules/monapi/alarm/callback.go rename to src/modules/server/alarm/callback.go index 9c8d97c3..f699f800 100644 --- a/src/modules/monapi/alarm/callback.go +++ b/src/modules/server/alarm/callback.go @@ -6,10 +6,10 @@ import ( "strings" "time" - "github.com/didi/nightingale/src/models" - "github.com/didi/nightingale/src/modules/monapi/acache" - "github.com/didi/nightingale/src/modules/monapi/config" - "github.com/didi/nightingale/src/modules/monapi/redisc" + "github.com/didi/nightingale/v4/src/models" + "github.com/didi/nightingale/v4/src/modules/server/cache" + "github.com/didi/nightingale/v4/src/modules/server/config" + "github.com/didi/nightingale/v4/src/modules/server/redisc" "github.com/garyburd/redigo/redis" "github.com/toolkits/pkg/logger" @@ -39,7 +39,7 @@ type CallbackEvent struct { } func CallbackConsumer() { - queue := config.Get().Queue.Callback + queue := config.Config.Monapi.Queue.Callback for { callbackEvent := PopCallbackEvent(queue) if callbackEvent == nil { @@ -52,7 +52,7 @@ func CallbackConsumer() { } func NeedCallback(sid int64) bool { - stra, exists := acache.StraCache.GetById(sid) + stra, exists := cache.AlarmStraCache.GetById(sid) if !exists { return false } @@ -64,7 +64,7 @@ func NeedCallback(sid int64) bool { } func PushCallbackEvent(event *models.Event) error { - callbackQueue := config.Get().Queue.Callback + callbackQueue := config.Config.Monapi.Queue.Callback es, err := json.Marshal(event) if err != nil { @@ -109,7 +109,7 @@ func PopCallbackEvent(queue string) *models.Event { } func doCallback(event *models.Event) { - stra, exists := acache.StraCache.GetById(event.Sid) + stra, exists := cache.AlarmStraCache.GetById(event.Sid) if !exists { logger.Errorf("sid not found, event: %v", event) return diff --git a/src/modules/monapi/alarm/event_cleaner.go b/src/modules/server/alarm/event_cleaner.go similarity index 78% rename from src/modules/monapi/alarm/event_cleaner.go rename to src/modules/server/alarm/event_cleaner.go index 94c53f78..b12f67fa 100644 --- a/src/modules/monapi/alarm/event_cleaner.go +++ b/src/modules/server/alarm/event_cleaner.go @@ -3,8 +3,8 @@ package alarm import ( "time" - "github.com/didi/nightingale/src/models" - "github.com/didi/nightingale/src/modules/monapi/config" + "github.com/didi/nightingale/v4/src/models" + "github.com/didi/nightingale/v4/src/modules/server/config" "github.com/toolkits/pkg/logger" ) @@ -17,7 +17,7 @@ func CleanEventLoop() { } func CleanEvent() { - cfg := config.Get().Cleaner + cfg := config.Config.Monapi.Cleaner days := cfg.Days batch := cfg.Batch diff --git a/src/modules/monapi/alarm/event_consumer.go b/src/modules/server/alarm/event_consumer.go similarity index 94% rename from src/modules/monapi/alarm/event_consumer.go rename to src/modules/server/alarm/event_consumer.go index e84adfd2..939dcee8 100644 --- a/src/modules/monapi/alarm/event_consumer.go +++ b/src/modules/server/alarm/event_consumer.go @@ -6,11 +6,11 @@ import ( "strings" "time" - "github.com/didi/nightingale/src/models" - "github.com/didi/nightingale/src/modules/monapi/acache" - "github.com/didi/nightingale/src/modules/monapi/config" - "github.com/didi/nightingale/src/modules/monapi/notify" - "github.com/didi/nightingale/src/modules/monapi/redisc" + "github.com/didi/nightingale/v4/src/models" + "github.com/didi/nightingale/v4/src/modules/server/cache" + "github.com/didi/nightingale/v4/src/modules/server/config" + "github.com/didi/nightingale/v4/src/modules/server/notify" + "github.com/didi/nightingale/v4/src/modules/server/redisc" "github.com/toolkits/pkg/logger" ) @@ -109,7 +109,7 @@ func updatePriority(event *models.Event) error { return err } - if event.EventType == config.ALERT { + if event.EventType == models.ALERT { err := models.UpdateEventCurPriority(event.HashId, alertUpgrade.Level) if err != nil { logger.Errorf("UpdateEventCurPriority failed, err: %v, event: %+v", err, event) @@ -129,7 +129,7 @@ func updatePriority(event *models.Event) error { // 1. 用户配置了N秒之内只报警M次 // 2. 用户配置了不发送recovery报警 func isInConverge(event *models.Event) bool { - stra, exists := acache.StraCache.GetById(event.Sid) + stra, exists := cache.AlarmStraCache.GetById(event.Sid) if !exists { logger.Errorf("sid not found, event: %+v", event) return false @@ -139,7 +139,7 @@ func isInConverge(event *models.Event) bool { now := time.Now().Unix() - if event.EventType == config.RECOVERY { + if event.EventType == models.RECOVERY { redisc.SetWithTTL(eventString, now, 30*24*3600) if stra.RecoveryNotify == 0 { // 不发送recovery通知 @@ -195,7 +195,7 @@ func needUpgrade(event *models.Event) bool { alertUpgradeKey := ALERT_UPGRADE_PREFIX + fmt.Sprint(event.HashId) eventAlertKey := ALERT_TIME_PREFIX + fmt.Sprint(event.HashId) - if event.EventType == config.RECOVERY { + if event.EventType == models.RECOVERY { if redisc.HasKey(alertUpgradeKey) { err := redisc.DelKey(eventAlertKey) if err != nil { @@ -277,7 +277,7 @@ func needUpgrade(event *models.Event) bool { } func SetEventStatus(event *models.Event, status string) { - if event.EventType == config.ALERT { + if event.EventType == models.ALERT { if err := models.SaveEventCurStatus(event.HashId, status); err != nil { logger.Errorf("set event_cur status fail, event: %+v, status: %v, err:%v", event, status, err) } else { @@ -285,7 +285,7 @@ func SetEventStatus(event *models.Event, status string) { } } - if config.Get().Cleaner.Converge && status == models.STATUS_CONVERGE { + if config.Config.Monapi.Cleaner.Converge && status == models.STATUS_CONVERGE { // 已收敛的告警,直接从库里删了,不保留了 if err := models.EventDelById(event.Id); err != nil { logger.Errorf("converge_del fail, id: %v, hash id: %v, error: %v", event.Id, event.HashId, err) diff --git a/src/modules/monapi/alarm/event_merge.go b/src/modules/server/alarm/event_merge.go similarity index 85% rename from src/modules/monapi/alarm/event_merge.go rename to src/modules/server/alarm/event_merge.go index a2a97117..c5b96232 100644 --- a/src/modules/monapi/alarm/event_merge.go +++ b/src/modules/server/alarm/event_merge.go @@ -5,16 +5,17 @@ import ( "sort" "time" - "github.com/didi/nightingale/src/models" - "github.com/didi/nightingale/src/modules/monapi/config" - "github.com/didi/nightingale/src/modules/monapi/notify" - "github.com/didi/nightingale/src/modules/monapi/redisc" + "github.com/didi/nightingale/v4/src/common/slice" + "github.com/didi/nightingale/v4/src/models" + "github.com/didi/nightingale/v4/src/modules/server/config" + "github.com/didi/nightingale/v4/src/modules/server/notify" + "github.com/didi/nightingale/v4/src/modules/server/redisc" "github.com/toolkits/pkg/logger" ) func MergeEvent() { - mergeCfg := config.Get().Merge + mergeCfg := config.Config.Monapi.Merge for { eventMap := getAllEventFromMergeHash(mergeCfg.Hash) if eventMap != nil { @@ -53,7 +54,7 @@ func storeLowEvent(event *models.Event) { return } - mergeCfg := config.Get().Merge + mergeCfg := config.Config.Monapi.Merge if _, err := redisc.HSET(mergeCfg.Hash, string(es), ""); err != nil { logger.Errorf("hset event to %v failed, err: %v, event: %+v", mergeCfg.Hash, err, event) @@ -64,7 +65,7 @@ func storeLowEvent(event *models.Event) { } func parseMergeEvent(eventMap map[int64][]*models.Event) { - mergeCfg := config.Get().Merge + mergeCfg := config.Config.Monapi.Merge max := mergeCfg.Max @@ -81,7 +82,7 @@ func parseMergeEvent(eventMap map[int64][]*models.Event) { recoveryEvents := []*models.Event{} for _, event := range events { - if event.EventType == config.ALERT { + if event.EventType == models.ALERT { alertEvents = append(alertEvents, event) } else { recoveryEvents = append(recoveryEvents, event) @@ -96,7 +97,7 @@ func parseMergeEvent(eventMap map[int64][]*models.Event) { // continue //} - for _, bounds := range config.SplitN(len(alertEvents), max) { + for _, bounds := range slice.SplitN(len(alertEvents), max) { go notify.DoNotify(false, alertEvents[bounds[0]:bounds[1]]...) } @@ -118,7 +119,7 @@ func parseMergeEvent(eventMap map[int64][]*models.Event) { continue } - for _, bounds := range config.SplitN(len(recoveryEvents), max) { + for _, bounds := range slice.SplitN(len(recoveryEvents), max) { go notify.DoNotify(false, recoveryEvents[bounds[0]:bounds[1]]...) } diff --git a/src/modules/monapi/alarm/event_reader.go b/src/modules/server/alarm/event_reader.go similarity index 92% rename from src/modules/monapi/alarm/event_reader.go rename to src/modules/server/alarm/event_reader.go index b6d9cc48..4c122ceb 100644 --- a/src/modules/monapi/alarm/event_reader.go +++ b/src/modules/server/alarm/event_reader.go @@ -5,17 +5,17 @@ import ( "strconv" "time" - "github.com/didi/nightingale/src/models" - "github.com/didi/nightingale/src/modules/monapi/acache" - "github.com/didi/nightingale/src/modules/monapi/config" - "github.com/didi/nightingale/src/modules/monapi/redisc" + "github.com/didi/nightingale/v4/src/models" + "github.com/didi/nightingale/v4/src/modules/server/cache" + "github.com/didi/nightingale/v4/src/modules/server/config" + "github.com/didi/nightingale/v4/src/modules/server/redisc" "github.com/garyburd/redigo/redis" "github.com/toolkits/pkg/logger" ) func ReadHighEvent() { - queues := config.Get().Queue.High + queues := config.Config.Monapi.Queue.High if len(queues) == 0 { return } @@ -33,7 +33,7 @@ func ReadHighEvent() { } func ReadLowEvent() { - queues := config.Get().Queue.Low + queues := config.Config.Monapi.Queue.Low if len(queues) == 0 { return } @@ -75,7 +75,7 @@ func popEvent(queues []interface{}) (*models.Event, bool) { return nil, false } - stra, has := acache.StraCache.GetById(event.Sid) + stra, has := cache.AlarmStraCache.GetById(event.Sid) if !has { // 可能策略已经删除了 logger.Errorf("stra not found, stra id: %d, event: %+v", event.Sid, event) @@ -184,7 +184,7 @@ func popEvent(queues []interface{}) (*models.Event, bool) { return event, true } - if event.EventType == config.ALERT { + if event.EventType == models.ALERT { eventCur := new(models.EventCur) if err = json.Unmarshal([]byte(reply[1]), eventCur); err != nil { logger.Errorf("unmarshal redis reply failed, err: %v, event: %+v", err, event) diff --git a/src/modules/monapi/alarm/mask.go b/src/modules/server/alarm/mask.go similarity index 86% rename from src/modules/monapi/alarm/mask.go rename to src/modules/server/alarm/mask.go index e9ac4f71..844349f3 100644 --- a/src/modules/monapi/alarm/mask.go +++ b/src/modules/server/alarm/mask.go @@ -5,8 +5,8 @@ import ( "strings" "time" - "github.com/didi/nightingale/src/models" - "github.com/didi/nightingale/src/modules/monapi/acache" + "github.com/didi/nightingale/v4/src/models" + "github.com/didi/nightingale/v4/src/modules/server/cache" "github.com/toolkits/pkg/logger" ) @@ -58,7 +58,7 @@ func SyncMaskconf() error { } } - acache.MaskCache.SetAll(maskMap) + cache.MaskCache.SetAll(maskMap) return nil } @@ -81,15 +81,15 @@ func IsMaskEvent(event *models.Event) bool { var maskTagsList []string var exists bool if event.Category == 1 { - maskTagsList, exists = acache.MaskCache.GetByKey("#" + event.Endpoint) + maskTagsList, exists = cache.MaskCache.GetByKey("#" + event.Endpoint) if !exists { - maskTagsList, exists = acache.MaskCache.GetByKey(eventMetric + "#" + event.Endpoint) + maskTagsList, exists = cache.MaskCache.GetByKey(eventMetric + "#" + event.Endpoint) if !exists { continue } } } else { - maskTagsList, exists = acache.MaskCache.GetByKey(eventMetric + "#" + event.CurNid) + maskTagsList, exists = cache.MaskCache.GetByKey(eventMetric + "#" + event.CurNid) if !exists { continue } diff --git a/src/modules/monapi/alarm/stra.go b/src/modules/server/alarm/stra.go similarity index 88% rename from src/modules/monapi/alarm/stra.go rename to src/modules/server/alarm/stra.go index 190a5ddd..8c302c0a 100644 --- a/src/modules/monapi/alarm/stra.go +++ b/src/modules/server/alarm/stra.go @@ -4,8 +4,8 @@ import ( "fmt" "time" - "github.com/didi/nightingale/src/models" - "github.com/didi/nightingale/src/modules/monapi/acache" + "github.com/didi/nightingale/v4/src/models" + "github.com/didi/nightingale/v4/src/modules/server/cache" "github.com/toolkits/pkg/logger" ) @@ -29,7 +29,7 @@ func SyncStra() error { smap[list[i].Id] = list[i] } - acache.StraCache.SetAll(smap) + cache.AlarmStraCache.SetAll(smap) return nil } diff --git a/src/modules/rdb/auth/auth.go b/src/modules/server/auth/auth.go similarity index 85% rename from src/modules/rdb/auth/auth.go rename to src/modules/server/auth/auth.go index 8b755cb5..e6a1e585 100644 --- a/src/modules/rdb/auth/auth.go +++ b/src/modules/server/auth/auth.go @@ -1,9 +1,9 @@ package auth import ( - "github.com/didi/nightingale/src/models" - "github.com/didi/nightingale/src/modules/rdb/config" - "github.com/didi/nightingale/src/modules/rdb/ssoc" + "github.com/didi/nightingale/v4/src/models" + "github.com/didi/nightingale/v4/src/modules/server/config" + "github.com/didi/nightingale/v4/src/modules/server/ssoc" ) var defaultAuth Authenticator diff --git a/src/modules/rdb/auth/authenticator.go b/src/modules/server/auth/authenticator.go similarity index 97% rename from src/modules/rdb/auth/authenticator.go rename to src/modules/server/auth/authenticator.go index f503a393..e290f4da 100644 --- a/src/modules/rdb/auth/authenticator.go +++ b/src/modules/server/auth/authenticator.go @@ -8,11 +8,12 @@ import ( "net/url" "time" - "github.com/didi/nightingale/src/models" - "github.com/didi/nightingale/src/modules/rdb/cache" - "github.com/didi/nightingale/src/modules/rdb/config" - "github.com/didi/nightingale/src/modules/rdb/ssoc" - "github.com/didi/nightingale/src/toolkits/i18n" + "github.com/didi/nightingale/v4/src/common/i18n" + "github.com/didi/nightingale/v4/src/models" + "github.com/didi/nightingale/v4/src/modules/server/cache" + "github.com/didi/nightingale/v4/src/modules/server/config" + "github.com/didi/nightingale/v4/src/modules/server/ssoc" + pkgcache "github.com/toolkits/pkg/cache" "github.com/toolkits/pkg/logger" ) diff --git a/src/modules/transfer/backend/datasource.go b/src/modules/server/backend/datasource.go similarity index 97% rename from src/modules/transfer/backend/datasource.go rename to src/modules/server/backend/datasource.go index 09993ef0..b3eb4760 100644 --- a/src/modules/transfer/backend/datasource.go +++ b/src/modules/server/backend/datasource.go @@ -4,7 +4,7 @@ import ( "fmt" "time" - "github.com/didi/nightingale/src/common/dataobj" + "github.com/didi/nightingale/v4/src/common/dataobj" ) // send diff --git a/src/modules/transfer/backend/influxdb/influxdb.go b/src/modules/server/backend/influxdb/influxdb.go similarity index 97% rename from src/modules/transfer/backend/influxdb/influxdb.go rename to src/modules/server/backend/influxdb/influxdb.go index a42e1292..220fb660 100644 --- a/src/modules/transfer/backend/influxdb/influxdb.go +++ b/src/modules/server/backend/influxdb/influxdb.go @@ -3,8 +3,8 @@ package influxdb import ( "time" - "github.com/didi/nightingale/src/common/dataobj" - "github.com/didi/nightingale/src/toolkits/stats" + "github.com/didi/nightingale/v4/src/common/dataobj" + "github.com/didi/nightingale/v4/src/common/stats" client "github.com/influxdata/influxdb/client/v2" "github.com/toolkits/pkg/concurrent/semaphore" diff --git a/src/modules/transfer/backend/influxdb/model.go b/src/modules/server/backend/influxdb/model.go similarity index 98% rename from src/modules/transfer/backend/influxdb/model.go rename to src/modules/server/backend/influxdb/model.go index 9452923a..983f75cb 100644 --- a/src/modules/transfer/backend/influxdb/model.go +++ b/src/modules/server/backend/influxdb/model.go @@ -5,7 +5,7 @@ import ( "strings" "time" - "github.com/didi/nightingale/src/common/dataobj" + "github.com/didi/nightingale/v4/src/common/dataobj" "github.com/toolkits/pkg/logger" ) diff --git a/src/modules/transfer/backend/influxdb/query.go b/src/modules/server/backend/influxdb/query.go similarity index 99% rename from src/modules/transfer/backend/influxdb/query.go rename to src/modules/server/backend/influxdb/query.go index 127edb62..471c1cd6 100644 --- a/src/modules/transfer/backend/influxdb/query.go +++ b/src/modules/server/backend/influxdb/query.go @@ -6,7 +6,7 @@ import ( "strings" "time" - "github.com/didi/nightingale/src/common/dataobj" + "github.com/didi/nightingale/v4/src/common/dataobj" client "github.com/influxdata/influxdb/client/v2" "github.com/influxdata/influxdb/models" diff --git a/src/modules/transfer/backend/init.go b/src/modules/server/backend/init.go similarity index 88% rename from src/modules/transfer/backend/init.go rename to src/modules/server/backend/init.go index 5c8f33dd..45d52464 100644 --- a/src/modules/transfer/backend/init.go +++ b/src/modules/server/backend/init.go @@ -3,16 +3,15 @@ package backend import ( "log" - "github.com/didi/nightingale/src/modules/transfer/backend/influxdb" - "github.com/didi/nightingale/src/modules/transfer/backend/m3db" - "github.com/didi/nightingale/src/modules/transfer/backend/tsdb" + "github.com/didi/nightingale/v4/src/modules/server/backend/influxdb" + "github.com/didi/nightingale/v4/src/modules/server/backend/m3db" + "github.com/didi/nightingale/v4/src/modules/server/backend/tsdb" ) type BackendSection struct { DataSource string `yaml:"datasource"` StraPath string `yaml:"straPath"` - Judge JudgeSection `yaml:"judge"` M3db m3db.M3dbSection `yaml:"m3db"` Tsdb tsdb.TsdbSection `yaml:"tsdb"` Influxdb influxdb.InfluxdbSection `yaml:"influxdb"` @@ -34,9 +33,6 @@ func Init(cfg BackendSection) { defaultDataSource = cfg.DataSource StraPath = cfg.StraPath - // init judge - InitJudge(cfg.Judge) - // init tsdb if cfg.Tsdb.Enabled { tsdbDataSource = &tsdb.TsdbDataSource{ diff --git a/src/modules/transfer/backend/kafka.go b/src/modules/server/backend/kafka.go similarity index 97% rename from src/modules/transfer/backend/kafka.go rename to src/modules/server/backend/kafka.go index 2570282d..317b038d 100644 --- a/src/modules/transfer/backend/kafka.go +++ b/src/modules/server/backend/kafka.go @@ -8,8 +8,8 @@ import ( "strings" "time" - "github.com/didi/nightingale/src/common/dataobj" - "github.com/didi/nightingale/src/toolkits/stats" + "github.com/didi/nightingale/v4/src/common/dataobj" + "github.com/didi/nightingale/v4/src/common/stats" "github.com/Shopify/sarama" "github.com/toolkits/pkg/logger" diff --git a/src/modules/transfer/backend/m3db/.gitignore b/src/modules/server/backend/m3db/.gitignore similarity index 100% rename from src/modules/transfer/backend/m3db/.gitignore rename to src/modules/server/backend/m3db/.gitignore diff --git a/src/modules/transfer/backend/m3db/benchmark/benchmark.yml b/src/modules/server/backend/m3db/benchmark/benchmark.yml similarity index 100% rename from src/modules/transfer/backend/m3db/benchmark/benchmark.yml rename to src/modules/server/backend/m3db/benchmark/benchmark.yml diff --git a/src/modules/transfer/backend/m3db/benchmark/main.go b/src/modules/server/backend/m3db/benchmark/main.go similarity index 95% rename from src/modules/transfer/backend/m3db/benchmark/main.go rename to src/modules/server/backend/m3db/benchmark/main.go index 7d27e5ae..2e278f14 100644 --- a/src/modules/transfer/backend/m3db/benchmark/main.go +++ b/src/modules/server/backend/m3db/benchmark/main.go @@ -10,8 +10,9 @@ import ( "strconv" "time" - "github.com/didi/nightingale/src/common/dataobj" - "github.com/didi/nightingale/src/modules/transfer/backend/m3db" + "github.com/didi/nightingale/v4/src/common/dataobj" + "github.com/didi/nightingale/v4/src/modules/server/backend/m3db" + "github.com/toolkits/pkg/concurrent/semaphore" "gopkg.in/yaml.v2" ) diff --git a/src/modules/transfer/backend/m3db/convert.go b/src/modules/server/backend/m3db/convert.go similarity index 96% rename from src/modules/transfer/backend/m3db/convert.go rename to src/modules/server/backend/m3db/convert.go index 8fa7e197..3c2512e6 100644 --- a/src/modules/transfer/backend/m3db/convert.go +++ b/src/modules/server/backend/m3db/convert.go @@ -4,9 +4,10 @@ import ( "math" "strings" - "github.com/didi/nightingale/src/common/dataobj" - "github.com/didi/nightingale/src/modules/transfer/calc" - "github.com/didi/nightingale/src/toolkits/str" + "github.com/didi/nightingale/v4/src/common/dataobj" + "github.com/didi/nightingale/v4/src/common/str" + "github.com/didi/nightingale/v4/src/modules/server/calc" + "github.com/m3db/m3/src/query/storage/m3/consolidators" "github.com/m3db/m3/src/x/ident" "github.com/toolkits/pkg/logger" @@ -16,7 +17,7 @@ func mvID(in *dataobj.MetricValue) ident.ID { if in.Nid != "" { in.Endpoint = dataobj.NidToEndpoint(in.Nid) } - return ident.StringID(str.MD5(in.Endpoint, in.Metric, str.SortedTags(in.TagsMap))) + return ident.StringID(str.ToMD5(in.Endpoint, in.Metric, str.SortedTags(in.TagsMap))) } func mvTags(item *dataobj.MetricValue) ident.Tags { diff --git a/src/modules/transfer/backend/m3db/m3db.go b/src/modules/server/backend/m3db/m3db.go similarity index 99% rename from src/modules/transfer/backend/m3db/m3db.go rename to src/modules/server/backend/m3db/m3db.go index da14d317..06f4ba68 100644 --- a/src/modules/transfer/backend/m3db/m3db.go +++ b/src/modules/server/backend/m3db/m3db.go @@ -6,8 +6,9 @@ import ( "sync/atomic" "time" - "github.com/didi/nightingale/src/common/dataobj" - "github.com/didi/nightingale/src/toolkits/stats" + "github.com/didi/nightingale/v4/src/common/dataobj" + "github.com/didi/nightingale/v4/src/common/stats" + "github.com/m3db/m3/src/dbnode/client" "github.com/m3db/m3/src/dbnode/encoding" "github.com/m3db/m3/src/dbnode/storage/index" diff --git a/src/modules/transfer/backend/m3db/query.go b/src/modules/server/backend/m3db/query.go similarity index 99% rename from src/modules/transfer/backend/m3db/query.go rename to src/modules/server/backend/m3db/query.go index b91e841d..5e3c8010 100644 --- a/src/modules/transfer/backend/m3db/query.go +++ b/src/modules/server/backend/m3db/query.go @@ -4,7 +4,8 @@ import ( "strings" "time" - "github.com/didi/nightingale/src/common/dataobj" + "github.com/didi/nightingale/v4/src/common/dataobj" + "github.com/m3db/m3/src/dbnode/storage/index" "github.com/m3db/m3/src/m3ninx/idx" ) diff --git a/src/modules/transfer/backend/opentsdb.go b/src/modules/server/backend/opentsdb.go similarity index 95% rename from src/modules/transfer/backend/opentsdb.go rename to src/modules/server/backend/opentsdb.go index 615d3bcc..47557bc6 100644 --- a/src/modules/transfer/backend/opentsdb.go +++ b/src/modules/server/backend/opentsdb.go @@ -4,9 +4,9 @@ import ( "bytes" "time" - "github.com/didi/nightingale/src/common/dataobj" - "github.com/didi/nightingale/src/toolkits/pools" - "github.com/didi/nightingale/src/toolkits/stats" + "github.com/didi/nightingale/v4/src/common/dataobj" + "github.com/didi/nightingale/v4/src/common/pools" + "github.com/didi/nightingale/v4/src/common/stats" "github.com/toolkits/pkg/concurrent/semaphore" "github.com/toolkits/pkg/container/list" diff --git a/src/modules/transfer/backend/tsdb/index.go b/src/modules/server/backend/tsdb/index.go similarity index 84% rename from src/modules/transfer/backend/tsdb/index.go rename to src/modules/server/backend/tsdb/index.go index 5d08bfc4..54b5bf17 100644 --- a/src/modules/transfer/backend/tsdb/index.go +++ b/src/modules/server/backend/tsdb/index.go @@ -5,8 +5,8 @@ import ( "sync" "time" - "github.com/didi/nightingale/src/common/report" - "github.com/didi/nightingale/src/toolkits/stats" + "github.com/didi/nightingale/v4/src/common/stats" + "github.com/didi/nightingale/v4/src/models" "github.com/toolkits/pkg/logger" ) @@ -40,7 +40,7 @@ func GetIndexLoop() { } func GetIndex() { - instances, err := report.GetAlive("index", "rdb") + instances, err := models.GetAllInstances("index", 1) if err != nil { stats.Counter.Set("get.index.err", 1) logger.Warningf("get index list err:%v", err) diff --git a/src/modules/transfer/backend/tsdb/query.go b/src/modules/server/backend/tsdb/query.go similarity index 98% rename from src/modules/transfer/backend/tsdb/query.go rename to src/modules/server/backend/tsdb/query.go index 01081d26..b7ed4d55 100644 --- a/src/modules/transfer/backend/tsdb/query.go +++ b/src/modules/server/backend/tsdb/query.go @@ -20,10 +20,10 @@ import ( "math/rand" "time" - "github.com/didi/nightingale/src/common/dataobj" - "github.com/didi/nightingale/src/modules/transfer/calc" - "github.com/didi/nightingale/src/toolkits/pools" - "github.com/didi/nightingale/src/toolkits/stats" + "github.com/didi/nightingale/v4/src/common/dataobj" + "github.com/didi/nightingale/v4/src/common/pools" + "github.com/didi/nightingale/v4/src/common/stats" + "github.com/didi/nightingale/v4/src/modules/server/calc" "github.com/toolkits/pkg/logger" "github.com/toolkits/pkg/net/httplib" diff --git a/src/modules/transfer/backend/tsdb/ring.go b/src/modules/server/backend/tsdb/ring.go similarity index 100% rename from src/modules/transfer/backend/tsdb/ring.go rename to src/modules/server/backend/tsdb/ring.go diff --git a/src/modules/transfer/backend/tsdb/tsdb.go b/src/modules/server/backend/tsdb/tsdb.go similarity index 97% rename from src/modules/transfer/backend/tsdb/tsdb.go rename to src/modules/server/backend/tsdb/tsdb.go index 0ce779de..da50395d 100644 --- a/src/modules/transfer/backend/tsdb/tsdb.go +++ b/src/modules/server/backend/tsdb/tsdb.go @@ -18,9 +18,9 @@ import ( "strings" "time" - "github.com/didi/nightingale/src/common/dataobj" - "github.com/didi/nightingale/src/toolkits/pools" - "github.com/didi/nightingale/src/toolkits/stats" + "github.com/didi/nightingale/v4/src/common/dataobj" + "github.com/didi/nightingale/v4/src/common/pools" + "github.com/didi/nightingale/v4/src/common/stats" "github.com/toolkits/pkg/concurrent/semaphore" "github.com/toolkits/pkg/container/list" diff --git a/src/modules/monapi/acache/mask.go b/src/modules/server/cache/alarm_mask.go similarity index 97% rename from src/modules/monapi/acache/mask.go rename to src/modules/server/cache/alarm_mask.go index f0dad4c4..52554406 100644 --- a/src/modules/monapi/acache/mask.go +++ b/src/modules/server/cache/alarm_mask.go @@ -1,4 +1,4 @@ -package acache +package cache import "sync" diff --git a/src/modules/server/cache/alarm_stra.go b/src/modules/server/cache/alarm_stra.go new file mode 100644 index 00000000..4aeca675 --- /dev/null +++ b/src/modules/server/cache/alarm_stra.go @@ -0,0 +1,35 @@ +package cache + +import ( + "sync" + + "github.com/didi/nightingale/v4/src/models" +) + +type AlarmStraCacheMap struct { + sync.RWMutex + Data map[int64]*models.Stra +} + +var AlarmStraCache *AlarmStraCacheMap + +func NewAlarmStraCache() *AlarmStraCacheMap { + return &AlarmStraCacheMap{ + Data: make(map[int64]*models.Stra), + } +} + +func (this *AlarmStraCacheMap) SetAll(m map[int64]*models.Stra) { + this.Lock() + defer this.Unlock() + this.Data = m +} + +func (this *AlarmStraCacheMap) GetById(id int64) (*models.Stra, bool) { + this.RLock() + defer this.RUnlock() + + value, exists := this.Data[id] + + return value, exists +} diff --git a/src/modules/rdb/cache/cache.go b/src/modules/server/cache/cache.go similarity index 94% rename from src/modules/rdb/cache/cache.go rename to src/modules/server/cache/cache.go index f195c933..5d7f8e9b 100644 --- a/src/modules/rdb/cache/cache.go +++ b/src/modules/server/cache/cache.go @@ -3,7 +3,7 @@ package cache import ( "context" - "github.com/didi/nightingale/src/models" + "github.com/didi/nightingale/v4/src/models" ) var ( diff --git a/src/modules/rdb/cache/config.go b/src/modules/server/cache/config.go similarity index 95% rename from src/modules/rdb/cache/config.go rename to src/modules/server/cache/config.go index c3c5f32f..a4ef84d5 100644 --- a/src/modules/rdb/cache/config.go +++ b/src/modules/server/cache/config.go @@ -5,7 +5,7 @@ import ( "sync" "time" - "github.com/didi/nightingale/src/models" + "github.com/didi/nightingale/v4/src/models" "github.com/toolkits/pkg/logger" ) diff --git a/src/modules/server/cache/init.go b/src/modules/server/cache/init.go new file mode 100644 index 00000000..fba29930 --- /dev/null +++ b/src/modules/server/cache/init.go @@ -0,0 +1,117 @@ +package cache + +import ( + "context" + "strconv" + + "github.com/didi/nightingale/v4/src/models" + + "github.com/toolkits/pkg/logger" +) + +var CollectRuleCache *collectRuleCache +var JudgeHashRing *ConsistentHashRing +var ApiDetectorHashRing map[string]*ConsistentHashRing +var SnmpDetectorHashRing map[string]*ConsistentHashRing +var ActiveJudgeNode = NewNodeMap() + +const CHECK_INTERVAL = 9 + +func Init(regions []string) { + // 初始化默认参数 + StraCache = NewStraCache() + CollectCache = NewCollectCache() + AggrCalcStraCache = NewAggrCalcStraCache() + AlarmStraCache = NewAlarmStraCache() + MaskCache = NewMaskCache() + ApiCollectCache = NewApiCollectCache() + SnmpCollectCache = NewSnmpCollectCache() + SnmpHWCache = NewSnmpHWCache() + LoadMetrics() + + go InitJudgeHashRing() + go InitApiDetectorHashRing(regions) + go InitSnmpDetectorHashRing(regions) + + CollectRuleCache = NewCollectRuleCache(regions) + CollectRuleCache.Start(context.Background()) + + //judge + InitHistoryBigMap() + Strategy = NewStrategyMap() + NodataStra = NewStrategyMap() + SeriesMap = NewIndexMap() + + //rdb + Start() +} + +const JudgesReplicas = 500 + +func InitJudgeHashRing() { + JudgeHashRing = NewConsistentHashRing(int32(JudgesReplicas), []string{}) + + instances, err := models.GetAllInstances("server", 1) + if err != nil { + logger.Warning("get server err:", err) + } + + judgeNodes := []string{} + for _, j := range instances { + if j.Active { + judgeNodes = append(judgeNodes, strconv.FormatInt(j.Id, 10)) + } + } + JudgeHashRing = NewConsistentHashRing(int32(JudgesReplicas), judgeNodes) +} + +func InitApiDetectorHashRing(regions []string) { + ApiDetectorHashRing = make(map[string]*ConsistentHashRing) + for _, region := range regions { + ApiDetectorHashRing[region] = NewConsistentHashRing(int32(500), []string{}) + } + + detectors, err := models.GetAllInstances("api", 1) + if err != nil { + logger.Warning("get api err:", err) + } + + nodesMap := make(map[string][]string) + for _, d := range detectors { + if _, exists := nodesMap[d.Region]; exists { + nodesMap[d.Region] = append(nodesMap[d.Region], d.Identity) + } else { + nodesMap[d.Region] = []string{d.Identity} + } + } + + for region, nodes := range nodesMap { + ApiDetectorHashRing[region] = NewConsistentHashRing(int32(500), nodes) + } + +} + +func InitSnmpDetectorHashRing(regions []string) { + SnmpDetectorHashRing = make(map[string]*ConsistentHashRing) + for _, region := range regions { + SnmpDetectorHashRing[region] = NewConsistentHashRing(int32(500), []string{}) + } + + detectors, err := models.GetAllInstances("snmp", 1) + if err != nil { + logger.Warning("get snmp err:", err) + } + + nodesMap := make(map[string][]string) + for _, d := range detectors { + if _, exists := nodesMap[d.Region]; exists { + nodesMap[d.Region] = append(nodesMap[d.Region], d.Identity) + } else { + nodesMap[d.Region] = []string{d.Identity} + } + } + + for region, nodes := range nodesMap { + SnmpDetectorHashRing[region] = NewConsistentHashRing(int32(500), nodes) + } +} diff --git a/src/modules/judge/cache/event.go b/src/modules/server/cache/judge_event.go similarity index 89% rename from src/modules/judge/cache/event.go rename to src/modules/server/cache/judge_event.go index 560d7f5b..6c7e54f2 100644 --- a/src/modules/judge/cache/event.go +++ b/src/modules/server/cache/judge_event.go @@ -3,7 +3,7 @@ package cache import ( "sync" - "github.com/didi/nightingale/src/common/dataobj" + "github.com/didi/nightingale/v4/src/common/dataobj" ) type SafeEventMap struct { diff --git a/src/modules/judge/cache/history.go b/src/modules/server/cache/judge_history.go similarity index 97% rename from src/modules/judge/cache/history.go rename to src/modules/server/cache/judge_history.go index 610ca903..159e9355 100644 --- a/src/modules/judge/cache/history.go +++ b/src/modules/server/cache/judge_history.go @@ -17,7 +17,7 @@ package cache import ( "sync" - "github.com/didi/nightingale/src/common/dataobj" + "github.com/didi/nightingale/v4/src/common/dataobj" ) type JudgeItemMap struct { diff --git a/src/modules/judge/cache/index.go b/src/modules/server/cache/judge_index.go similarity index 100% rename from src/modules/judge/cache/index.go rename to src/modules/server/cache/judge_index.go diff --git a/src/modules/judge/cache/linkedlist.go b/src/modules/server/cache/judge_linkedlist.go similarity index 98% rename from src/modules/judge/cache/linkedlist.go rename to src/modules/server/cache/judge_linkedlist.go index b19c3031..cb794a9b 100644 --- a/src/modules/judge/cache/linkedlist.go +++ b/src/modules/server/cache/judge_linkedlist.go @@ -18,7 +18,7 @@ import ( "container/list" "sync" - "github.com/didi/nightingale/src/common/dataobj" + "github.com/didi/nightingale/v4/src/common/dataobj" ) type SafeLinkedList struct { diff --git a/src/modules/judge/cache/stra.go b/src/modules/server/cache/judge_stra.go similarity index 91% rename from src/modules/judge/cache/stra.go rename to src/modules/server/cache/judge_stra.go index 97e6232e..e4454bf5 100644 --- a/src/modules/judge/cache/stra.go +++ b/src/modules/server/cache/judge_stra.go @@ -4,8 +4,8 @@ import ( "sync" "time" - "github.com/didi/nightingale/src/models" - "github.com/didi/nightingale/src/toolkits/stats" + "github.com/didi/nightingale/v4/src/common/stats" + "github.com/didi/nightingale/v4/src/models" ) var Strategy *StrategyMap diff --git a/src/modules/monapi/scache/aggr_cache.go b/src/modules/server/cache/monapi_aggr.go similarity index 88% rename from src/modules/monapi/scache/aggr_cache.go rename to src/modules/server/cache/monapi_aggr.go index 216b3aff..78f51476 100644 --- a/src/modules/monapi/scache/aggr_cache.go +++ b/src/modules/server/cache/monapi_aggr.go @@ -1,9 +1,9 @@ -package scache +package cache import ( "sync" - "github.com/didi/nightingale/src/models" + "github.com/didi/nightingale/v4/src/models" ) type AggrCalcCacheMap struct { diff --git a/src/modules/server/cache/monapi_collect.go b/src/modules/server/cache/monapi_collect.go new file mode 100644 index 00000000..8062b93f --- /dev/null +++ b/src/modules/server/cache/monapi_collect.go @@ -0,0 +1,144 @@ +package cache + +import ( + "sync" + + "github.com/didi/nightingale/v4/src/common/dataobj" + "github.com/didi/nightingale/v4/src/models" +) + +type CollectCacheMap struct { + sync.RWMutex + Data map[string]*models.Collect +} + +var CollectCache *CollectCacheMap + +func NewCollectCache() *CollectCacheMap { + return &CollectCacheMap{Data: make(map[string]*models.Collect)} +} + +func (c *CollectCacheMap) GetBy(endpoint string) *models.Collect { + c.RLock() + defer c.RUnlock() + + return c.Data[endpoint] +} + +func (c *CollectCacheMap) Set(endpoint string, collect *models.Collect) { + c.Lock() + defer c.Unlock() + + c.Data[endpoint] = collect + return +} + +func (c *CollectCacheMap) SetAll(strasMap map[string]*models.Collect) { + c.Lock() + defer c.Unlock() + + c.Data = strasMap + return +} + +type ApiCollectCacheMap struct { + sync.RWMutex + Data map[string][]*models.ApiCollect +} + +var ApiCollectCache *ApiCollectCacheMap + +func NewApiCollectCache() *ApiCollectCacheMap { + return &ApiCollectCacheMap{Data: make(map[string][]*models.ApiCollect)} +} + +func (c *ApiCollectCacheMap) GetBy(node string) []*models.ApiCollect { + c.RLock() + defer c.RUnlock() + + return c.Data[node] +} + +func (c *ApiCollectCacheMap) Set(node string, collects []*models.ApiCollect) { + c.Lock() + defer c.Unlock() + + c.Data[node] = collects + return +} + +func (c *ApiCollectCacheMap) SetAll(data map[string][]*models.ApiCollect) { + c.Lock() + defer c.Unlock() + + c.Data = data + return +} + +//snmp +type SnmpCollectCacheMap struct { + sync.RWMutex + Data map[string][]*dataobj.IPAndSnmp +} + +var SnmpCollectCache *SnmpCollectCacheMap +var SnmpHWCache *SnmpHWCacheMap + +func NewSnmpCollectCache() *SnmpCollectCacheMap { + return &SnmpCollectCacheMap{Data: make(map[string][]*dataobj.IPAndSnmp)} +} + +func (c *SnmpCollectCacheMap) GetBy(node string) []*dataobj.IPAndSnmp { + c.RLock() + defer c.RUnlock() + + return c.Data[node] +} + +func (c *SnmpCollectCacheMap) Set(node string, collects []*dataobj.IPAndSnmp) { + c.Lock() + defer c.Unlock() + + c.Data[node] = collects + return +} + +func (c *SnmpCollectCacheMap) SetAll(data map[string][]*dataobj.IPAndSnmp) { + c.Lock() + defer c.Unlock() + + c.Data = data + return +} + +type SnmpHWCacheMap struct { + sync.RWMutex + Data map[string][]*models.NetworkHardware +} + +func NewSnmpHWCache() *SnmpHWCacheMap { + return &SnmpHWCacheMap{Data: make(map[string][]*models.NetworkHardware)} +} + +func (c *SnmpHWCacheMap) GetBy(node string) []*models.NetworkHardware { + c.RLock() + defer c.RUnlock() + + return c.Data[node] +} + +func (c *SnmpHWCacheMap) Set(node string, hws []*models.NetworkHardware) { + c.Lock() + defer c.Unlock() + + c.Data[node] = hws + return +} + +func (c *SnmpHWCacheMap) SetAll(data map[string][]*models.NetworkHardware) { + c.Lock() + defer c.Unlock() + + c.Data = data + return +} diff --git a/src/modules/server/cache/monapi_snmp_metric.go b/src/modules/server/cache/monapi_snmp_metric.go new file mode 100644 index 00000000..658e7f6c --- /dev/null +++ b/src/modules/server/cache/monapi_snmp_metric.go @@ -0,0 +1,80 @@ +package cache + +import ( + "io/ioutil" + "sync" + + "github.com/didi/nightingale/v4/src/common/dataobj" + + "github.com/toolkits/pkg/logger" + "gopkg.in/yaml.v2" +) + +var CommonModule string + +var ModuleMetric ModuleMetricMap + +type ModuleMetricMap struct { + sync.RWMutex + Data map[string]map[string]*dataobj.Metric +} + +func (m *ModuleMetricMap) Set(module string, metrics map[string]*dataobj.Metric) { + m.Lock() + m.Data[module] = metrics + m.Unlock() +} + +func (m *ModuleMetricMap) Get(module, metricName string) (*dataobj.Metric, bool) { + m.RLock() + defer m.RUnlock() + + metrics, exists := m.Data[module] + if !exists { + return nil, false + } + + metric, exists := metrics[metricName] + return metric, exists +} + +func (m *ModuleMetricMap) GetByModule(module string) map[string]*dataobj.Metric { + m.RLock() + defer m.RUnlock() + + metricMap, _ := m.Data[module] + return metricMap +} + +func LoadMetrics() { + ModuleMetric = ModuleMetricMap{ + Data: make(map[string]map[string]*dataobj.Metric), + } + + metricsConfig, err := LoadFile() + if err != nil { + logger.Debug(err) + return + } + + for module, metrics := range metricsConfig { + metricMap := make(map[string]*dataobj.Metric) + for _, m := range metrics { + metricMap[m.Name] = m + } + ModuleMetric.Set(module, metricMap) + } +} + +func LoadFile() (map[string][]*dataobj.Metric, error) { + content, err := ioutil.ReadFile("./etc/snmp.yml") + if err != nil { + return nil, err + } + metrics := make(map[string][]*dataobj.Metric) + err = yaml.UnmarshalStrict(content, metrics) + if err != nil { + return nil, err + } + return metrics, nil +} diff --git a/src/modules/monapi/scache/stra_cache.go b/src/modules/server/cache/monapi_stra.go similarity index 94% rename from src/modules/monapi/scache/stra_cache.go rename to src/modules/server/cache/monapi_stra.go index 02ee25eb..1698c5b0 100644 --- a/src/modules/monapi/scache/stra_cache.go +++ b/src/modules/server/cache/monapi_stra.go @@ -1,9 +1,9 @@ -package scache +package cache import ( "sync" - "github.com/didi/nightingale/src/models" + "github.com/didi/nightingale/v4/src/models" ) type StraCacheMap struct { diff --git a/src/modules/monapi/scache/collectrule.go b/src/modules/server/cache/prober_collect.go similarity index 89% rename from src/modules/monapi/scache/collectrule.go rename to src/modules/server/cache/prober_collect.go index 379f9e2f..69e1983f 100644 --- a/src/modules/monapi/scache/collectrule.go +++ b/src/modules/server/cache/prober_collect.go @@ -1,4 +1,4 @@ -package scache +package cache import ( "context" @@ -8,13 +8,15 @@ import ( "sync" "time" - "github.com/didi/nightingale/src/common/report" - "github.com/didi/nightingale/src/models" - "github.com/didi/nightingale/src/modules/monapi/config" + "github.com/didi/nightingale/v4/src/models" + "github.com/toolkits/pkg/consistent" "github.com/toolkits/pkg/logger" ) +const ProbersReplicas = 500 +const DetectorReplicas = 500 + type collectRuleCache struct { sync.RWMutex Region []string @@ -22,9 +24,9 @@ type collectRuleCache struct { HashRing map[string]*ConsistentHashRing // map: region } -func NewCollectRuleCache() *collectRuleCache { +func NewCollectRuleCache(regions []string) *collectRuleCache { return &collectRuleCache{ - Region: config.Get().Region, + Region: regions, Data: make(map[string][]*models.CollectRule), HashRing: make(map[string]*ConsistentHashRing), } @@ -45,7 +47,7 @@ func (p *collectRuleCache) Start(ctx context.Context) { func (p *collectRuleCache) initHashRing() { for _, region := range p.Region { - p.HashRing[region] = NewConsistentHashRing(int32(config.DetectorReplicas), []string{}) + p.HashRing[region] = NewConsistentHashRing(int32(DetectorReplicas), []string{}) } } @@ -153,10 +155,10 @@ func (p *collectRuleCache) syncPlacementLoop(ctx context.Context) { } func (p *collectRuleCache) syncPlacement() error { - instances, err := report.GetAlive("prober", "rdb") + instances, err := models.GetAllInstances("prober", 1) if err != nil { logger.Warning("get prober err:", err) - return fmt.Errorf("report.GetAlive prober fail: %v", err) + return fmt.Errorf("GetAlive prober fail: %v", err) } logger.Debugf("get placement %d %s", len(instances), str(instances)) @@ -197,7 +199,7 @@ func (p *collectRuleCache) syncPlacement() error { if rehash { //重建 hash环 r := consistent.New() - r.NumberOfReplicas = config.DetectorReplicas + r.NumberOfReplicas = DetectorReplicas for node, _ := range nodes { r.Add(node) } diff --git a/src/modules/monapi/scache/ring.go b/src/modules/server/cache/ring.go similarity index 99% rename from src/modules/monapi/scache/ring.go rename to src/modules/server/cache/ring.go index a1bc8cd8..d9867158 100644 --- a/src/modules/monapi/scache/ring.go +++ b/src/modules/server/cache/ring.go @@ -1,4 +1,4 @@ -package scache +package cache import ( "sync" diff --git a/src/modules/rdb/cache/session.go b/src/modules/server/cache/session.go similarity index 100% rename from src/modules/rdb/cache/session.go rename to src/modules/server/cache/session.go diff --git a/src/modules/transfer/cache/aggr.go b/src/modules/server/cache/transfer_aggr.go similarity index 95% rename from src/modules/transfer/cache/aggr.go rename to src/modules/server/cache/transfer_aggr.go index 47446302..2d74268a 100644 --- a/src/modules/transfer/cache/aggr.go +++ b/src/modules/server/cache/transfer_aggr.go @@ -3,7 +3,7 @@ package cache import ( "sync" - "github.com/didi/nightingale/src/common/dataobj" + "github.com/didi/nightingale/v4/src/common/dataobj" ) type SafeAggrCalcMap struct { diff --git a/src/modules/transfer/cache/queue.go b/src/modules/server/cache/transfer_queue.go similarity index 100% rename from src/modules/transfer/cache/queue.go rename to src/modules/server/cache/transfer_queue.go diff --git a/src/modules/transfer/cache/stra.go b/src/modules/server/cache/transfer_stra.go similarity index 94% rename from src/modules/transfer/cache/stra.go rename to src/modules/server/cache/transfer_stra.go index da02cad0..daf9fc0f 100644 --- a/src/modules/transfer/cache/stra.go +++ b/src/modules/server/cache/transfer_stra.go @@ -3,7 +3,7 @@ package cache import ( "sync" - "github.com/didi/nightingale/src/models" + "github.com/didi/nightingale/v4/src/models" ) type SafeStraMap struct { diff --git a/src/modules/transfer/calc/aggr.go b/src/modules/server/calc/aggr.go similarity index 98% rename from src/modules/transfer/calc/aggr.go rename to src/modules/server/calc/aggr.go index 7ae64a57..7b621ec5 100644 --- a/src/modules/transfer/calc/aggr.go +++ b/src/modules/server/calc/aggr.go @@ -4,7 +4,7 @@ import ( "math" "sort" - "github.com/didi/nightingale/src/common/dataobj" + "github.com/didi/nightingale/v4/src/common/dataobj" ) type AggrTsValue struct { diff --git a/src/modules/monapi/collector/basecollector.go b/src/modules/server/collector/basecollector.go similarity index 96% rename from src/modules/monapi/collector/basecollector.go rename to src/modules/server/collector/basecollector.go index 8fae9d67..8d3d0eb4 100644 --- a/src/modules/monapi/collector/basecollector.go +++ b/src/modules/server/collector/basecollector.go @@ -6,9 +6,10 @@ import ( "fmt" "time" - "github.com/didi/nightingale/src/common/dataobj" - "github.com/didi/nightingale/src/models" - "github.com/didi/nightingale/src/modules/prober/manager/accumulator" + "github.com/didi/nightingale/v4/src/common/dataobj" + "github.com/didi/nightingale/v4/src/models" + "github.com/didi/nightingale/v4/src/modules/prober/manager/accumulator" + "github.com/influxdata/telegraf" ) diff --git a/src/modules/monapi/collector/collector.go b/src/modules/server/collector/collector.go similarity index 96% rename from src/modules/monapi/collector/collector.go rename to src/modules/server/collector/collector.go index 376dd556..51c36a61 100644 --- a/src/modules/monapi/collector/collector.go +++ b/src/modules/server/collector/collector.go @@ -4,8 +4,9 @@ import ( "errors" "fmt" - "github.com/didi/nightingale/src/models" - "github.com/didi/nightingale/src/toolkits/i18n" + "github.com/didi/nightingale/v4/src/common/i18n" + "github.com/didi/nightingale/v4/src/models" + "github.com/influxdata/telegraf" ) diff --git a/src/modules/monapi/collector/template.go b/src/modules/server/collector/template.go similarity index 100% rename from src/modules/monapi/collector/template.go rename to src/modules/server/collector/template.go diff --git a/src/modules/server/config/config.go b/src/modules/server/config/config.go new file mode 100644 index 00000000..14a2541e --- /dev/null +++ b/src/modules/server/config/config.go @@ -0,0 +1,441 @@ +package config + +import ( + "bytes" + "fmt" + "strconv" + "strings" + + "github.com/didi/nightingale/v4/src/common/address" + "github.com/didi/nightingale/v4/src/common/i18n" + "github.com/didi/nightingale/v4/src/common/identity" + "github.com/didi/nightingale/v4/src/common/loggeri" + "github.com/didi/nightingale/v4/src/common/report" + "github.com/didi/nightingale/v4/src/models" + "github.com/didi/nightingale/v4/src/modules/server/aggr" + "github.com/didi/nightingale/v4/src/modules/server/backend" + "github.com/didi/nightingale/v4/src/modules/server/backend/tsdb" + "github.com/didi/nightingale/v4/src/modules/server/cron" + "github.com/didi/nightingale/v4/src/modules/server/judge" + "github.com/didi/nightingale/v4/src/modules/server/judge/query" + "github.com/didi/nightingale/v4/src/modules/server/rabbitmq" + "github.com/didi/nightingale/v4/src/modules/server/redisc" + "github.com/didi/nightingale/v4/src/modules/server/wechat" + + "github.com/spf13/viper" + "github.com/toolkits/pkg/file" + "gopkg.in/yaml.v2" +) + +type ConfigT struct { + Logger loggeri.Config `yaml:"logger"` + HTTP httpSection `yaml:"http"` + Redis redisc.RedisSection `yaml:"redis"` + WeChat wechat.WechatSection `yaml:"wechat"` + RabbitMQ rabbitmq.RabbitmqSection `yaml:"rabbitmq"` + Tokens []string `yaml:"tokens"` + I18n i18n.I18nSection `yaml:"i18n"` + Report report.ReportSection `yaml:"report"` + Rdb rdbSection `yaml:"rdb"` + Job jobSection `yaml:"job"` + Transfer transferSection `yaml:"transfer"` + Monapi monapiSection `yaml:"monapi"` + Judge judgeSection `yaml:"judge"` + Nems nemsSection `yaml:"nems"` +} + +type judgeSection struct { + Query query.SeriesQuerySection `yaml:"query"` + Strategy cron.StrategySection `yaml:"strategy"` + NodataConcurrency int `yaml:"nodataConcurrency"` + Backend judge.JudgeSection `yaml:"backend"` +} + +type rdbSection struct { + Auth authSection `yaml:"auth"` + LDAP models.LDAPSection `yaml:"ldap"` + SSO ssoSection `yaml:"sso"` + Sender map[string]cron.SenderSection `yaml:"sender"` + Webhook []webhook `yaml:"webhook"` +} + +type webhook struct { + Addr string `yaml:"addr"` + Token string `yaml:"token"` +} + +type authSection struct { + Captcha bool `yaml:"captcha"` + ExtraMode AuthExtraSection `yaml:"extraMode"` +} + +type AuthExtraSection struct { + Enable bool `yaml:"enable"` + Debug bool `yaml:"debug" description:"debug"` + DebugUser string `yaml:"debugUser" description:"debug username"` + WhiteList bool `yaml:"whiteList"` + FrozenDays int `yaml:"frozenDays"` + WritenOffDays int `yaml:"writenOffDays"` +} + +type ssoSection struct { + Enable bool `yaml:"enable"` + RedirectURL string `yaml:"redirectURL"` + SsoAddr string `yaml:"ssoAddr"` + ClientId string `yaml:"clientId"` + ClientSecret string `yaml:"clientSecret"` + ApiKey string `yaml:"apiKey"` + StateExpiresIn int64 `yaml:"stateExpiresIn"` + CoverAttributes bool `yaml:"coverAttributes"` + Attributes struct { + Dispname string `yaml:"dispname"` + Phone string `yaml:"phone"` + Email string `yaml:"email"` + Im string `yaml:"im"` + } `yaml:"attributes"` +} + +type httpSection struct { + Mode string `yaml:"mode"` + ShowLog bool `yaml:"showLog"` + Session SessionSection `yaml:"session"` +} + +type SessionSection struct { + CookieName string `yaml:"cookieName"` + CookieDomain string `yaml:"cookieDomain"` + SidLength int `yaml:"sidLength"` + HttpOnly bool `yaml:"httpOnly"` + GcInterval int64 `yaml:"gcInterval"` + CookieLifetime int64 `yaml:"cookieLifetime"` + Storage string `yaml:"storage" description:"mem|db(defualt)"` +} + +type ldapSection struct { + DefaultUse bool `yaml:"defaultUse"` + Host string `yaml:"host"` + Port int `yaml:"port"` + BaseDn string `yaml:"baseDn"` + BindUser string `yaml:"bindUser"` + BindPass string `yaml:"bindPass"` + AuthFilter string `yaml:"authFilter"` + Attributes ldapAttributes `yaml:"attributes"` + CoverAttributes bool `yaml:"coverAttributes"` + TLS bool `yaml:"tls"` + StartTLS bool `yaml:"startTLS"` +} + +type ldapAttributes struct { + Dispname string `yaml:"dispname"` + Phone string `yaml:"phone"` + Email string `yaml:"email"` + Im string `yaml:"im"` +} + +type jobSection struct { + Enable bool `yaml:"enable"` + OutputComeFrom string `yaml:"outputComeFrom"` + RemoteAgtdPort int `yaml:"remoteAgtdPort"` +} + +type transferSection struct { + Aggr aggr.AggrSection `yaml:"aggr"` + Backend backend.BackendSection `yaml:"backend"` +} + +type nemsSection struct { + Enabled bool `yaml:"enabled"` + CheckTarget bool `yaml:"checkTarget"` +} + +type monapiSection struct { + Proxy proxySection `yaml:"proxy"` + Region []string `yaml:"region"` + Habits habitsSection `yaml:"habits"` + AlarmEnabled bool `yaml:"alarmEnabled"` + ApiDetectorEnabled bool `yaml:"apiDetectorEnabled"` + SnmpDetectorEnabled bool `yaml:"snmpDetectorEnabled"` + TicketEnabled bool `yaml:"ticketEnabled"` + Queue queueSection `yaml:"queue"` + Cleaner cleanerSection `yaml:"cleaner"` + Merge mergeSection `yaml:"merge"` + Notify map[string][]string `yaml:"notify"` + Link linkSection `yaml:"link"` + IndexMod string `yaml:"indexMod"` + Tpl tplSection `yaml:"tpl"` + SnmpConfig string `yaml:"snmpConfig"` +} + +type tplSection struct { + AlertPath string `yaml:"alertPath"` + ScreenPath string `yaml:"screenPath"` +} + +type linkSection struct { + Stra string `yaml:"stra"` + Event string `yaml:"event"` + Claim string `yaml:"claim"` +} + +type mergeSection struct { + Hash string `yaml:"hash"` + Max int `yaml:"max"` + Interval int `yaml:"interval"` +} + +type cleanerSection struct { + Days int `yaml:"days"` + Batch int `yaml:"batch"` + Converge bool `yaml:"converge"` +} + +type queueSection struct { + High []interface{} `yaml:"high"` + Low []interface{} `yaml:"low"` + Callback string `yaml:"callback"` +} + +type habitsSection struct { + Identity string `yaml:"identity"` +} + +type proxySection struct { + Transfer string `yaml:"transfer"` + Index string `yaml:"index"` +} + +var Config *ConfigT +var Ident string + +// Parse configuration file +func Parse() error { + ymlFile := getYmlFile() + if ymlFile == "" { + return fmt.Errorf("configuration file etc/server.yml not found") + } + + bs, err := file.ReadBytes(ymlFile) + if err != nil { + return fmt.Errorf("cannot read yml[%s]: %v", ymlFile, err) + } + + viper.SetConfigType("yaml") + err = viper.ReadConfig(bytes.NewBuffer(bs)) + if err != nil { + return fmt.Errorf("cannot read yml[%s]: %v", ymlFile, err) + } + + viper.SetDefault("i18n", map[string]string{ + "dictPath": "etc/dict.json", + "lang": "zh", + }) + + viper.SetDefault("report", map[string]interface{}{ + "mod": "server", + "enabled": true, + "interval": 4000, + "timeout": 3000, + "remark": "", + }) + + viper.SetDefault("redis.local.idle", 5) + viper.SetDefault("redis.local.timeout", map[string]int{ + "conn": 500, + "read": 3000, + "write": 3000, + }) + + viper.SetDefault("job", map[string]interface{}{ + "outputComeFrom": "database", + "remoteAgtdPort": 2080, + }) + + viper.SetDefault("transfer.backend", map[string]interface{}{ + "datasource": "m3db", + "straPath": "/api/mon/stras/effective?all=1", + }) + + viper.SetDefault("judge.backend", map[string]interface{}{ + "batch": 200, //每次拉取文件的个数 + "workerNum": 32, + "maxConns": 2000, //查询和推送数据的并发个数 + "maxIdle": 32, //建立的连接池的最大空闲数 + "connTimeout": 1000, //链接超时时间,单位毫秒 + "callTimeout": 3000, //访问超时时间,单位毫秒 + "hbsMod": "rdb", + "eventPrefix": "/n9e", + }) + + viper.SetDefault("transfer.backend.tsdb", map[string]interface{}{ + "enabled": false, + "name": "tsdb", + "batch": 200, //每次拉取文件的个数 + "workerNum": 32, + "maxConns": 2000, //查询和推送数据的并发个数 + "maxIdle": 32, //建立的连接池的最大空闲数 + "connTimeout": 1000, //链接超时时间,单位毫秒 + "callTimeout": 3000, //访问超时时间,单位毫秒 + "indexTimeout": 3000, //访问index超时时间,单位毫秒 + "replicas": 500, //一致性hash虚拟节点 + }) + + viper.SetDefault("transfer.aggr", map[string]interface{}{ + "enabled": false, + "apiTimeout": 3000, + "apiPath": "/api/mon/aggrs", + }) + + viper.SetDefault("transfer.backend.influxdb", map[string]interface{}{ + "enabled": false, + "name": "influxdb", + "batch": 200, //每次拉取文件的个数 + "maxRetry": 3, //重试次数 + "workerNum": 32, + "maxConns": 2000, //查询和推送数据的并发个数 + "timeout": 3000, //访问超时时间,单位毫秒 + }) + + viper.SetDefault("transfer.backend.opentsdb", map[string]interface{}{ + "enabled": false, + "name": "opentsdb", + "batch": 200, //每次拉取文件的个数 + "maxRetry": 3, //重试次数 + "workerNum": 32, + "maxConns": 2000, //查询和推送数据的并发个数 + "maxIdle": 32, //建立的连接池的最大空闲数 + "connTimeout": 1000, //链接超时时间,单位毫秒 + "callTimeout": 3000, //访问超时时间,单位毫秒 + }) + + viper.SetDefault("transfer.backend.kafka", map[string]interface{}{ + "enabled": false, + "name": "kafka", + "maxRetry": 3, //重试次数 + "connTimeout": 1000, //链接超时时间,单位毫秒 + "callTimeout": 3000, //访问超时时间,单位毫秒 + }) + + viper.SetDefault("monapi.proxy", map[string]string{ + "transfer": "http://127.0.0.1:7900", + "index": "http://127.0.0.1:7904", + }) + + viper.SetDefault("monapi.alarmEnabled", "true") + viper.SetDefault("monapi.indexMod", "index") + + viper.SetDefault("monapi.habits.identity", "ip") + + viper.SetDefault("monapi.merge", map[string]interface{}{ + "hash": "mon-merge", + "max": 100, //merge的最大条数 + "interval": 10, //merge等待的数据,单位秒 + }) + + viper.SetDefault("monapi.queue", map[string]interface{}{ + "high": []string{"/n9e/event/p1"}, + "low": []string{"/n9e/event/p2", "/n9e/event/p3"}, + "callback": "/ecmc.io/alarm/callback", + }) + + viper.SetDefault("monapi.cleaner", map[string]interface{}{ + "days": 31, + "batch": 100, + "converge": true, // 历史告警的数据库表,对于已收敛的告警,默认删掉,不保留,省得告警太多 + }) + + viper.SetDefault("monapi.tpl", map[string]string{ + "alertPath": "./etc/alert", + "screenPath": "./etc/screen", + }) + + //judge + viper.SetDefault("judge.nodataConcurrency", 1000) + viper.SetDefault("judge.query", map[string]interface{}{ + "maxConn": 2000, + "maxIdle": 100, + "connTimeout": 1000, + "callTimeout": 2000, + "indexCallTimeout": 2000, + "indexMod": "index", + "indexPath": "/api/index/counter/clude", + }) + + viper.SetDefault("judge.strategy", map[string]interface{}{ + "partitionApi": "/api/mon/stras/effective?instance=%s:%s", + "updateInterval": 9000, + "indexInterval": 60000, + "timeout": 5000, + "mod": "server", + "eventPrefix": "n9e", + }) + + err = viper.Unmarshal(&Config) + if err != nil { + return fmt.Errorf("cannot read yml[%s]: %v", ymlFile, err) + } + + Config.Transfer.Backend.Tsdb.ClusterList = formatClusterItems(Config.Transfer.Backend.Tsdb.Cluster) + + Config.Report.HTTPPort = strconv.Itoa(address.GetHTTPPort("server")) + Config.Report.RPCPort = strconv.Itoa(address.GetRPCPort("server")) + + if Config.HTTP.Session.GcInterval == 0 { + Config.HTTP.Session.GcInterval = 60 + } + + if Config.HTTP.Session.SidLength == 0 { + Config.HTTP.Session.SidLength = 32 + } + + if Config.Transfer.Backend.M3db.Enabled { + // viper.Unmarshal not compatible with yaml.Unmarshal + var b *ConfigT + err := yaml.Unmarshal([]byte(bs), &b) + if err != nil { + return err + } + Config.Transfer.Backend.M3db = b.Transfer.Backend.M3db + } + + fmt.Println("config.file:", ymlFile) + Ident, _ = identity.GetIdent() + + if err := parseOps(); err != nil { + return err + } + + return identity.Parse() +} + +func getYmlFile() string { + yml := "etc/server.local.yml" + if file.IsExist(yml) { + return yml + } + + yml = "etc/server.yml" + if file.IsExist(yml) { + return yml + } + + return "" +} + +// map["node"]="host1,host2" --> map["node"]=["host1", "host2"] +func formatClusterItems(cluster map[string]string) map[string]*tsdb.ClusterNode { + ret := make(map[string]*tsdb.ClusterNode) + for node, clusterStr := range cluster { + items := strings.Split(clusterStr, ",") + nitems := make([]string, 0) + for _, item := range items { + nitems = append(nitems, strings.TrimSpace(item)) + } + ret[node] = NewClusterNode(nitems) + } + + return ret +} + +func NewClusterNode(addrs []string) *tsdb.ClusterNode { + return &tsdb.ClusterNode{Addrs: addrs} +} diff --git a/src/modules/index/config/const.go b/src/modules/server/config/const.go similarity index 100% rename from src/modules/index/config/const.go rename to src/modules/server/config/const.go diff --git a/src/modules/rdb/config/i18n.go b/src/modules/server/config/i18n.go similarity index 99% rename from src/modules/rdb/config/i18n.go rename to src/modules/server/config/i18n.go index 6a111293..beb157cf 100644 --- a/src/modules/rdb/config/i18n.go +++ b/src/modules/server/config/i18n.go @@ -1,6 +1,6 @@ package config -import "github.com/didi/nightingale/src/toolkits/i18n" +import "github.com/didi/nightingale/v4/src/common/i18n" var ( langDict = map[string]map[string]string{ diff --git a/src/modules/rdb/config/ops.go b/src/modules/server/config/ops.go similarity index 100% rename from src/modules/rdb/config/ops.go rename to src/modules/server/config/ops.go diff --git a/src/modules/rdb/cron/cleaner.go b/src/modules/server/cron/cleaner.go similarity index 82% rename from src/modules/rdb/cron/cleaner.go rename to src/modules/server/cron/cleaner.go index 74ca2686..5f993ac5 100644 --- a/src/modules/rdb/cron/cleaner.go +++ b/src/modules/server/cron/cleaner.go @@ -3,7 +3,7 @@ package cron import ( "time" - "github.com/didi/nightingale/src/models" + "github.com/didi/nightingale/v4/src/models" ) const cleanerInterval = 3600 * time.Second diff --git a/src/modules/server/cron/init.go b/src/modules/server/cron/init.go new file mode 100644 index 00000000..b2865375 --- /dev/null +++ b/src/modules/server/cron/init.go @@ -0,0 +1,21 @@ +package cron + +func Init() { + go GetStrategy() + go RebuildJudgePool() + go UpdateJudgeQueue() + + //monapi + go CheckJudgeNodes() + go SyncStras() + go CleanStraLoop() + go SyncCollects() + go CleanCollectLoop() + + //rdb + go ConsumeMail() + go ConsumeSms() + go ConsumeVoice() + go ConsumeIm() + go CleanerLoop() +} diff --git a/src/modules/monapi/scache/judge.go b/src/modules/server/cron/judge_ring.go similarity index 58% rename from src/modules/monapi/scache/judge.go rename to src/modules/server/cron/judge_ring.go index 9a15563e..7d2381d0 100644 --- a/src/modules/monapi/scache/judge.go +++ b/src/modules/server/cron/judge_ring.go @@ -1,18 +1,22 @@ -package scache +package cron import ( "fmt" "strconv" "time" - "github.com/didi/nightingale/src/common/report" - "github.com/didi/nightingale/src/modules/monapi/config" + "github.com/didi/nightingale/v4/src/models" + "github.com/didi/nightingale/v4/src/modules/server/cache" "github.com/toolkits/pkg/consistent" "github.com/toolkits/pkg/logger" ) func CheckJudgeNodes() { + if err := CheckJudge(); err != nil { + logger.Errorf("check judge fail: %v", err) + } + t1 := time.NewTicker(time.Duration(3 * time.Second)) for { <-t1.C @@ -21,10 +25,10 @@ func CheckJudgeNodes() { } func CheckJudge() error { - judges, err := report.GetAlive("judge", "rdb") + judges, err := models.GetAllInstances("server", 1) if err != nil { logger.Warning("get judge err:", err) - return fmt.Errorf("report.GetAlive judge fail: %v", err) + return fmt.Errorf("GetAlive judge fail: %v", err) } if len(judges) < 1 { @@ -40,11 +44,11 @@ func CheckJudge() error { } rehash := false - if ActiveJudgeNode.Len() != len(judgeNode) { //scache.ActiveJudgeNode中的node数量和新获取的不同,重新rehash + if cache.ActiveJudgeNode.Len() != len(judgeNode) { //scache.cache.ActiveJudgeNode中的node数量和新获取的不同,重新rehash rehash = true } else { for node, instance := range judgeNode { - v, exists := ActiveJudgeNode.GetInstanceBy(node) + v, exists := cache.ActiveJudgeNode.GetInstanceBy(node) if !exists || (exists && instance != v) { rehash = true break @@ -52,17 +56,17 @@ func CheckJudge() error { } } if rehash { - ActiveJudgeNode.Set(judgeNode) + cache.ActiveJudgeNode.Set(judgeNode) //重建judge hash环 r := consistent.New() - r.NumberOfReplicas = config.JudgesReplicas - nodes := ActiveJudgeNode.GetNodes() + r.NumberOfReplicas = cache.JudgesReplicas + nodes := cache.ActiveJudgeNode.GetNodes() for _, node := range nodes { r.Add(node) } logger.Warning("judge hash ring rebuild ", r.Members()) - JudgeHashRing.Set(r) + cache.JudgeHashRing.Set(r) } return nil diff --git a/src/modules/server/cron/judge_stra.go b/src/modules/server/cron/judge_stra.go new file mode 100644 index 00000000..d511026e --- /dev/null +++ b/src/modules/server/cron/judge_stra.go @@ -0,0 +1,80 @@ +package cron + +import ( + "fmt" + "time" + + "github.com/didi/nightingale/v4/src/common/address" + "github.com/didi/nightingale/v4/src/common/identity" + "github.com/didi/nightingale/v4/src/common/stats" + "github.com/didi/nightingale/v4/src/models" + "github.com/didi/nightingale/v4/src/modules/server/cache" + + "github.com/toolkits/pkg/logger" +) + +type StrategySection struct { + PartitionApi string `yaml:"partitionApi"` + Timeout int `yaml:"timeout"` + Token string `yaml:"token"` + UpdateInterval int `yaml:"updateInterval"` + IndexInterval int `yaml:"indexInterval"` + ReportInterval int `yaml:"reportInterval"` + Mod string `yaml:"mod"` + EventPrefix string `yaml:"eventPrefix"` +} + +var JudgeStra StrategySection + +type StrasResp struct { + Data []*models.Stra `json:"dat"` + Err string `json:"err"` +} + +func InitStrategySection(cfg StrategySection) { + JudgeStra = cfg +} + +func GetJudgeStrategy(cfg StrategySection) { + t1 := time.NewTicker(time.Duration(cfg.UpdateInterval) * time.Millisecond) + ident, err := identity.GetIdent() + if err != nil { + logger.Fatalf("get ident err:%v", err) + return + } + + getJudgeStrategy(cfg, ident) + for { + <-t1.C + getJudgeStrategy(cfg, ident) + } +} + +func getJudgeStrategy(opts StrategySection, ident string) { + instance := fmt.Sprintf("%s:%d", ident, address.GetRPCPort("server")) + node, has := cache.ActiveJudgeNode.GetNodeBy(instance) + if !has { + logger.Errorf("%s get node err", instance) + return + } + + stras := cache.StraCache.GetByNode(node) + for _, stra := range stras { + if len(stra.Exprs) < 1 { + logger.Warningf("strategy:%v exprs < 1", stra) + stats.Counter.Set("stra.illegal", 1) + continue + } + + if stra.Exprs[0].Func == "nodata" { + stats.Counter.Set("stra.nodata", 1) + cache.NodataStra.Set(stra.Id, stra) + } else { + stats.Counter.Set("stra.common", 1) + cache.Strategy.Set(stra.Id, stra) + } + } + + cache.NodataStra.Clean() + cache.Strategy.Clean() +} diff --git a/src/modules/monapi/scache/aggr.go b/src/modules/server/cron/monapi_aggr.go similarity index 92% rename from src/modules/monapi/scache/aggr.go rename to src/modules/server/cron/monapi_aggr.go index 71c59b9e..020a0dae 100644 --- a/src/modules/monapi/scache/aggr.go +++ b/src/modules/server/cron/monapi_aggr.go @@ -1,16 +1,17 @@ -package scache +package cron import ( "strconv" "time" - "github.com/didi/nightingale/src/models" + "github.com/didi/nightingale/v4/src/models" + "github.com/didi/nightingale/v4/src/modules/server/cache" "github.com/toolkits/pkg/logger" ) func SyncAggrCalcStras() { - t1 := time.NewTicker(time.Duration(CHECK_INTERVAL) * time.Second) + t1 := time.NewTicker(time.Duration(cache.CHECK_INTERVAL) * time.Second) syncAggrCalcStras() logger.Info("[cron] sync stras start...") @@ -77,7 +78,7 @@ func syncAggrCalcStras() { } } - AggrCalcStraCache.Set(stras) + cache.AggrCalcStraCache.Set(stras) } func CleanAggrCalcStraLoop() { diff --git a/src/modules/server/cron/monapi_api_collect.go b/src/modules/server/cron/monapi_api_collect.go new file mode 100644 index 00000000..2d7d4399 --- /dev/null +++ b/src/modules/server/cron/monapi_api_collect.go @@ -0,0 +1,50 @@ +package cron + +import ( + "strconv" + "time" + + "github.com/didi/nightingale/v4/src/models" + "github.com/didi/nightingale/v4/src/modules/server/cache" + "github.com/toolkits/pkg/logger" +) + +func SyncApiCollects() { + t1 := time.NewTicker(time.Duration(cache.CHECK_INTERVAL) * time.Second) + + syncApiCollects() + logger.Info("[cron] sync api collects start...") + for { + <-t1.C + syncApiCollects() + } +} + +func syncApiCollects() { + apiConfigs, err := models.GetApiCollects() + if err != nil { + logger.Warningf("get log collects err:%v %v", err) + } + + configsMap := make(map[string][]*models.ApiCollect) + for _, api := range apiConfigs { + if _, exists := cache.ApiDetectorHashRing[api.Region]; !exists { + logger.Warningf("get node err, hash ring do noe exists %v", api) + continue + } + node, err := cache.ApiDetectorHashRing[api.Region].GetNode(strconv.FormatInt(api.Id, 10)) + if err != nil { + logger.Warningf("get node err:%v %v", err, api) + continue + } + api.Decode() + key := api.Region + "-" + node + if _, exists := configsMap[key]; exists { + configsMap[key] = append(configsMap[key], api) + } else { + configsMap[key] = []*models.ApiCollect{api} + } + } + + cache.ApiCollectCache.SetAll(configsMap) +} diff --git a/src/modules/server/cron/monapi_api_detector.go b/src/modules/server/cron/monapi_api_detector.go new file mode 100644 index 00000000..be06cef0 --- /dev/null +++ b/src/modules/server/cron/monapi_api_detector.go @@ -0,0 +1,75 @@ +package cron + +import ( + "time" + + "github.com/didi/nightingale/v4/src/models" + "github.com/didi/nightingale/v4/src/modules/server/cache" + + "github.com/toolkits/pkg/consistent" + "github.com/toolkits/pkg/logger" +) + +func CheckDetectorNodes() { + t1 := time.NewTicker(time.Duration(cache.CHECK_INTERVAL * time.Second)) + checkDetectorNodes() + for { + <-t1.C + checkDetectorNodes() + } +} + +func checkDetectorNodes() { + detectors, err := models.GetAllInstances("api", 1) + if err != nil { + logger.Errorf("get api detector err:%v", err) + return + } + + if len(detectors) < 1 { + logger.Error("get api detector err: len(detectors) < 1 ") + return + } + + nodesMap := make(map[string]map[string]struct{}) + for _, d := range detectors { + if _, exists := nodesMap[d.Region]; exists { + nodesMap[d.Region][d.Identity] = struct{}{} + } else { + nodesMap[d.Region] = make(map[string]struct{}) + nodesMap[d.Region][d.Identity] = struct{}{} + } + } + + for region, nodes := range nodesMap { + rehash := false + if _, exists := cache.ApiDetectorHashRing[region]; !exists { + logger.Warningf("hash ring do not exists %v", region) + continue + } + oldNodes := cache.ApiDetectorHashRing[region].GetRing().Members() + if len(oldNodes) != len(nodes) { //ActiveNode中的node数量和新获取的不同,重新rehash + rehash = true + } else { + for _, node := range oldNodes { + if _, exists := nodes[node]; !exists { + rehash = true + break + } + } + } + + if rehash { + //重建 hash环 + r := consistent.New() + r.NumberOfReplicas = 500 + for node, _ := range nodes { + r.Add(node) + } + logger.Warningf("detector hash ring rebuild old:%v new:%v", oldNodes, r.Members()) + cache.ApiDetectorHashRing[region].Set(r) + } + } + + return +} diff --git a/src/modules/monapi/scache/collect.go b/src/modules/server/cron/monapi_collect.go similarity index 95% rename from src/modules/monapi/scache/collect.go rename to src/modules/server/cron/monapi_collect.go index 534795ec..25345359 100644 --- a/src/modules/monapi/scache/collect.go +++ b/src/modules/server/cron/monapi_collect.go @@ -1,16 +1,17 @@ -package scache +package cron import ( "fmt" "time" - "github.com/didi/nightingale/src/models" + "github.com/didi/nightingale/v4/src/models" + "github.com/didi/nightingale/v4/src/modules/server/cache" "github.com/toolkits/pkg/logger" ) func SyncCollects() { - t1 := time.NewTicker(time.Duration(CHECK_INTERVAL) * time.Second) + t1 := time.NewTicker(time.Duration(cache.CHECK_INTERVAL) * time.Second) syncCollects() logger.Info("[cron] sync collects start...") @@ -128,7 +129,7 @@ func syncCollects() { } } - CollectCache.SetAll(collectMap) + cache.CollectCache.SetAll(collectMap) } diff --git a/src/modules/server/cron/monapi_snmp_collect.go b/src/modules/server/cron/monapi_snmp_collect.go new file mode 100644 index 00000000..99d85aad --- /dev/null +++ b/src/modules/server/cron/monapi_snmp_collect.go @@ -0,0 +1,149 @@ +package cron + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/didi/nightingale/v4/src/common/dataobj" + "github.com/didi/nightingale/v4/src/models" + "github.com/didi/nightingale/v4/src/modules/server/cache" + + "github.com/toolkits/pkg/logger" +) + +func SyncSnmpCollects() { + t1 := time.NewTicker(time.Duration(cache.CHECK_INTERVAL) * time.Second) + + syncSnmpCollects() + logger.Info("[cron] sync snmp collects start...") + for { + <-t1.C + syncSnmpCollects() + } +} + +func syncSnmpCollects() { + snmpConfigs, err := models.GetSnmpCollects(0) + if err != nil { + logger.Warningf("get snmp collects err:%v", err) + return + } + + var snmpCollects []*dataobj.IPAndSnmp + configsMap := make(map[string][]*dataobj.IPAndSnmp) + for _, snmp := range snmpConfigs { + snmp.Decode() + hosts, err := HostUnderNode(snmp.Nid) + if err != nil { + logger.Warningf("get hosts err:%v %+v", err, snmp) + continue + } + + hws := models.GetHardwareInfoBy(hosts) + + for _, hw := range hws { + if hw.Region == "" { + continue + } + indexes := []*dataobj.Index{} + lookups := []*dataobj.Lookup{} + for _, snmpIdx := range snmp.Indexes { + if snmpIdx == nil { + continue + } + index := &dataobj.Index{ + Labelname: snmpIdx.TagKey, + Type: snmpIdx.Type, + } + indexes = append(indexes, index) + + for _, lookup := range snmpIdx.Lookups { + if lookup == nil { + continue + } + tmpLookup := &dataobj.Lookup{ + Labels: []string{snmpIdx.TagKey}, + Labelname: lookup.Labelname, + Oid: lookup.Oid, + Type: lookup.Type, + } + lookups = append(lookups, tmpLookup) + } + } + + var enumValues map[int]string + if snmp.OidType != 1 { + mib, err := models.MibGet("module=? and metric=?", snmp.Module, snmp.Metric) + if err != nil { + logger.Warningf("get mib err:%v %+v", err, snmp) + continue + } + + if mib.Metric != "" { + err = json.Unmarshal([]byte(mib.EnumValues), &enumValues) + if err != nil { + logger.Warningf("unmarshal enumValues err:%v %+v", err, mib) + } + } + } + + metric := dataobj.Metric{ + Name: snmp.Metric, + Oid: snmp.Oid, + Type: snmp.MetricType, + Help: snmp.Comment, + Indexes: indexes, + EnumValues: enumValues, + Lookups: lookups, + } + + if snmp.OidType == 1 { + if m, exists := cache.ModuleMetric.Get(dataobj.COMMON_MODULE, snmp.Metric); exists { + metric.Lookups = m.Lookups + metric.EnumValues = m.EnumValues + metric.Oid = m.Oid + metric.Indexes = m.Indexes + metric.Type = m.Type + } + } + + snmpCollect := &dataobj.IPAndSnmp{ + IP: hw.IP, + Version: hw.SnmpVersion, + Auth: hw.Auth, + Region: hw.Region, + Module: snmp.Module, + Step: snmp.Step, + Timeout: snmp.Timeout, + Port: snmp.Port, + Metric: metric, + LastUpdated: snmp.LastUpdated, + } + snmpCollects = append(snmpCollects, snmpCollect) + } + } + + for _, collect := range snmpCollects { + if _, exists := cache.SnmpDetectorHashRing[collect.Region]; !exists { + logger.Warningf("get node err, hash ring do noe exists %+v", collect) + continue + } + pk := fmt.Sprintf("%s-%s-%s", collect.IP, collect.Module, collect.Metric.Oid) + node, err := cache.SnmpDetectorHashRing[collect.Region].GetNode(pk) + if err != nil { + logger.Warningf("get node err:%v %v", err, collect) + continue + } + + key := collect.Region + "-" + node + if _, exists := configsMap[key]; exists { + configsMap[key] = append(configsMap[key], collect) + } else { + configsMap[key] = []*dataobj.IPAndSnmp{collect} + } + + cache.SnmpCollectCache.SetAll(configsMap) + } + +} diff --git a/src/modules/server/cron/monapi_snmp_detector.go b/src/modules/server/cron/monapi_snmp_detector.go new file mode 100644 index 00000000..9ebcada2 --- /dev/null +++ b/src/modules/server/cron/monapi_snmp_detector.go @@ -0,0 +1,77 @@ +package cron + +import ( + "time" + + "github.com/didi/nightingale/v4/src/models" + "github.com/didi/nightingale/v4/src/modules/server/cache" + + "github.com/toolkits/pkg/consistent" + "github.com/toolkits/pkg/logger" +) + +func CheckSnmpDetectorNodes() { + t1 := time.NewTicker(time.Duration(cache.CHECK_INTERVAL * time.Second)) + checkSnmpDetectorNodes() + for { + <-t1.C + checkSnmpDetectorNodes() + } +} + +func checkSnmpDetectorNodes() { + detectors, err := models.GetAllInstances("snmp", 1) + if err != nil { + logger.Errorf("get api detector err:%v", err) + return + } + + if len(detectors) < 1 { + logger.Error("get api detector err: len(detectors) < 1 ") + return + } + + nodesMap := make(map[string]map[string]struct{}) + for _, d := range detectors { + if d.Active { + if _, exists := nodesMap[d.Region]; exists { + nodesMap[d.Region][d.Identity] = struct{}{} + } else { + nodesMap[d.Region] = make(map[string]struct{}) + nodesMap[d.Region][d.Identity] = struct{}{} + } + } + } + + for region, nodes := range nodesMap { + rehash := false + if _, exists := cache.SnmpDetectorHashRing[region]; !exists { + logger.Warningf("hash ring do not exists %v", region) + continue + } + oldNodes := cache.SnmpDetectorHashRing[region].GetRing().Members() + if len(oldNodes) != len(nodes) { //ActiveNode中的node数量和新获取的不同,重新rehash + rehash = true + } else { + for _, node := range oldNodes { + if _, exists := nodes[node]; !exists { + rehash = true + break + } + } + } + + if rehash { + //重建 hash环 + r := consistent.New() + r.NumberOfReplicas = 500 + for node, _ := range nodes { + r.Add(node) + } + logger.Warningf("detector hash ring rebuild old:%v new:%v", oldNodes, r.Members()) + cache.SnmpDetectorHashRing[region].Set(r) + } + } + + return +} diff --git a/src/modules/server/cron/monapi_snmp_hw.go b/src/modules/server/cron/monapi_snmp_hw.go new file mode 100644 index 00000000..62454f04 --- /dev/null +++ b/src/modules/server/cron/monapi_snmp_hw.go @@ -0,0 +1,52 @@ +package cron + +import ( + "time" + + "github.com/didi/nightingale/v4/src/models" + "github.com/didi/nightingale/v4/src/modules/server/cache" + + "github.com/toolkits/pkg/logger" +) + +func SyncHardwares() { + t1 := time.NewTicker(time.Duration(cache.CHECK_INTERVAL) * time.Second) + + syncHardwares() + logger.Info("[cron] sync snmp collects start...") + for { + <-t1.C + syncHardwares() + } +} + +func syncHardwares() { + configsMap := make(map[string][]*models.NetworkHardware) + + hwList, err := models.NetworkHardwareList("", 10000000, 0) + if err != nil { + logger.Warningf("get snmp hw err:%v", err) + return + } + + for i := range hwList { + if _, exists := cache.SnmpDetectorHashRing[hwList[i].Region]; !exists { + logger.Warningf("get node err, hash ring do noe exists %v", hwList[i]) + continue + } + node, err := cache.SnmpDetectorHashRing[hwList[i].Region].GetNode(hwList[i].IP) + if err != nil { + logger.Warningf("get node err:%v %v", err, hwList[i]) + continue + } + + key := hwList[i].Region + "-" + node + if _, exists := configsMap[key]; exists { + configsMap[key] = append(configsMap[key], &hwList[i]) + } else { + configsMap[key] = []*models.NetworkHardware{&hwList[i]} + } + } + + cache.SnmpHWCache.SetAll(configsMap) +} diff --git a/src/modules/monapi/scache/stra.go b/src/modules/server/cron/monapi_stra.go similarity index 89% rename from src/modules/monapi/scache/stra.go rename to src/modules/server/cron/monapi_stra.go index e764c4c1..2982e35a 100644 --- a/src/modules/monapi/scache/stra.go +++ b/src/modules/server/cron/monapi_stra.go @@ -1,16 +1,17 @@ -package scache +package cron import ( "strconv" "time" - "github.com/didi/nightingale/src/models" + "github.com/didi/nightingale/v4/src/models" + "github.com/didi/nightingale/v4/src/modules/server/cache" "github.com/toolkits/pkg/logger" ) func SyncStras() { - t1 := time.NewTicker(time.Duration(CHECK_INTERVAL) * time.Second) + t1 := time.NewTicker(time.Duration(cache.CHECK_INTERVAL) * time.Second) syncStras() for { @@ -80,7 +81,7 @@ func syncStras() { } } - node, err := JudgeHashRing.GetNode(strconv.FormatInt(stra.Id, 10)) + node, err := cache.JudgeHashRing.GetNode(strconv.FormatInt(stra.Id, 10)) if err != nil { logger.Warningf("get node err:%v %v", err, stra) continue @@ -91,7 +92,8 @@ func syncStras() { strasMap[node] = []*models.Stra{stra} } } - StraCache.SetAll(strasMap) + + cache.StraCache.SetAll(strasMap) logger.Infof("[cron] sync stras done, cost: %dms", time.Now().Sub(start).Milliseconds()) } diff --git a/src/modules/server/cron/report.go b/src/modules/server/cron/report.go new file mode 100644 index 00000000..5da605f0 --- /dev/null +++ b/src/modules/server/cron/report.go @@ -0,0 +1,39 @@ +package cron + +import ( + "time" + + "github.com/didi/nightingale/v4/src/common/identity" + "github.com/didi/nightingale/v4/src/common/report" + "github.com/didi/nightingale/v4/src/models" + + "github.com/toolkits/pkg/logger" +) + +var ReportConfig report.ReportSection + +func InitReportHeartBeat(cfg report.ReportSection) { + ReportConfig = cfg + ident, _ := identity.GetIdent() + for { + reportHeartBeat(ident) + time.Sleep(time.Duration(ReportConfig.Interval) * time.Millisecond) + } +} + +func reportHeartBeat(ident string) { + instance := models.Instance{ + Module: ReportConfig.Mod, + Identity: ident, + RPCPort: ReportConfig.RPCPort, + HTTPPort: ReportConfig.HTTPPort, + Remark: ReportConfig.Remark, + Region: ReportConfig.Region, + } + + err := models.ReportHeartBeat(instance) + if err != nil { + logger.Errorf("report instance:%+v err:%v", instance, err) + } + +} diff --git a/src/modules/rdb/cron/sender_im.go b/src/modules/server/cron/sender_im.go similarity index 84% rename from src/modules/rdb/cron/sender_im.go rename to src/modules/server/cron/sender_im.go index 894d8530..21dfb4f1 100644 --- a/src/modules/rdb/cron/sender_im.go +++ b/src/modules/server/cron/sender_im.go @@ -6,25 +6,20 @@ import ( "strings" "time" + "github.com/didi/nightingale/v4/src/common/dataobj" + "github.com/didi/nightingale/v4/src/modules/server/dingtalk" + "github.com/didi/nightingale/v4/src/modules/server/redisc" + "github.com/didi/nightingale/v4/src/modules/server/wechat" + "github.com/toolkits/pkg/file" "github.com/toolkits/pkg/logger" "github.com/toolkits/pkg/net/httplib" "github.com/toolkits/pkg/sys" - - "github.com/didi/nightingale/src/common/dataobj" - "github.com/didi/nightingale/src/modules/rdb/config" - "github.com/didi/nightingale/src/modules/rdb/dingtalk" - "github.com/didi/nightingale/src/modules/rdb/redisc" - "github.com/didi/nightingale/src/modules/rdb/wechat" ) func ConsumeIm() { - if !config.Config.Redis.Enable { - return - } - for { - list := redisc.Pop(1, config.IM_QUEUE_NAME) + list := redisc.Pop(1, IM_QUEUE_NAME) if len(list) == 0 { time.Sleep(time.Millisecond * 200) continue @@ -45,7 +40,7 @@ func sendIm(message *dataobj.Message) { <-ImWorkerChan }() - switch config.Config.Sender["im"].Way { + switch Sender["im"].Way { case "api": sendImByAPI(message) case "shell": @@ -57,12 +52,12 @@ func sendIm(message *dataobj.Message) { case "dingtalk_robot": sendImByDingTalkRobot(message) default: - logger.Errorf("not support %s to send im, im: %+v", config.Config.Sender["im"].Way, message) + logger.Errorf("not support %s to send im, im: %+v", Sender["im"].Way, message) } } func sendImByAPI(message *dataobj.Message) { - api := config.Config.Sender["im"].API + api := Sender["im"].API res, code, err := httplib.PostJSON(api, time.Second, message, nil) logger.Infof("SendImByAPI, api:%s, im:%+v, error:%v, response:%s, statuscode:%d", api, message, err, string(res), code) } @@ -79,9 +74,9 @@ func sendImByShell(message *dataobj.Message) { } func sendImByWeChat(message *dataobj.Message) { - corpID := config.Config.WeChat.CorpID - agentID := config.Config.WeChat.AgentID - secret := config.Config.WeChat.Secret + corpID := wechat.WeChat.CorpID + agentID := wechat.WeChat.AgentID + secret := wechat.WeChat.Secret cnt := len(message.Tos) if cnt == 0 { @@ -168,7 +163,7 @@ func sendImByDingTalkRobot(message *dataobj.Message) { var atUser []string var tokenUser []string for user := range set { - if req.MatchString(user){ + if req.MatchString(user) { atUser = append(atUser, user) } else { tokenUser = append(tokenUser, user) diff --git a/src/modules/server/cron/sender_init.go b/src/modules/server/cron/sender_init.go new file mode 100644 index 00000000..71eaf135 --- /dev/null +++ b/src/modules/server/cron/sender_init.go @@ -0,0 +1,31 @@ +package cron + +var ( + SmsWorkerChan chan int + MailWorkerChan chan int + VoiceWorkerChan chan int + ImWorkerChan chan int +) + +const ( + SMS_QUEUE_NAME = "/queue/rdb/sms" + MAIL_QUEUE_NAME = "/queue/rdb/mail" + VOICE_QUEUE_NAME = "/queue/rdb/voice" + IM_QUEUE_NAME = "/queue/rdb/im" +) + +type SenderSection struct { + Way string `yaml:"way"` + Worker int `yaml:"worker"` + API string `yaml:"api"` +} + +var Sender map[string]SenderSection + +func InitWorker(sender map[string]SenderSection) { + Sender = sender + SmsWorkerChan = make(chan int, Sender["sms"].Worker) + MailWorkerChan = make(chan int, Sender["mail"].Worker) + VoiceWorkerChan = make(chan int, Sender["voice"].Worker) + ImWorkerChan = make(chan int, Sender["im"].Worker) +} diff --git a/src/modules/rdb/cron/sender_mail.go b/src/modules/server/cron/sender_mail.go similarity index 85% rename from src/modules/rdb/cron/sender_mail.go rename to src/modules/server/cron/sender_mail.go index 7884667a..f06aef61 100644 --- a/src/modules/rdb/cron/sender_mail.go +++ b/src/modules/server/cron/sender_mail.go @@ -8,26 +8,20 @@ import ( "strings" "time" - "gopkg.in/gomail.v2" + "github.com/didi/nightingale/v4/src/common/dataobj" + "github.com/didi/nightingale/v4/src/models" + "github.com/didi/nightingale/v4/src/modules/server/redisc" "github.com/toolkits/pkg/file" "github.com/toolkits/pkg/logger" "github.com/toolkits/pkg/net/httplib" "github.com/toolkits/pkg/sys" - - "github.com/didi/nightingale/src/common/dataobj" - "github.com/didi/nightingale/src/models" - "github.com/didi/nightingale/src/modules/rdb/config" - "github.com/didi/nightingale/src/modules/rdb/redisc" + gomail "gopkg.in/gomail.v2" ) func ConsumeMail() { - if !config.Config.Redis.Enable { - return - } - for { - list := redisc.Pop(1, config.MAIL_QUEUE_NAME) + list := redisc.Pop(1, MAIL_QUEUE_NAME) if len(list) == 0 { time.Sleep(time.Millisecond * 200) continue @@ -65,7 +59,7 @@ func sendMail(message *dataobj.Message) { message.Tos = tos - switch config.Config.Sender["mail"].Way { + switch Sender["mail"].Way { case "api": sendMailByAPI(message) case "smtp": @@ -73,12 +67,12 @@ func sendMail(message *dataobj.Message) { case "shell": sendMailByShell(message) default: - logger.Errorf("not support %s to send mail, mail: %+v", config.Config.Sender["mail"].Way, message) + logger.Errorf("not support %s to send mail, mail: %+v", Sender["mail"].Way, message) } } func sendMailByAPI(message *dataobj.Message) { - api := config.Config.Sender["mail"].API + api := Sender["mail"].API res, code, err := httplib.PostJSON(api, time.Second, message, nil) logger.Infof("SendMailByAPI, api:%s, mail:%+v, error:%v, response:%s, statuscode:%d", api, message, err, string(res), code) } diff --git a/src/modules/rdb/cron/sender_sms.go b/src/modules/server/cron/sender_sms.go similarity index 74% rename from src/modules/rdb/cron/sender_sms.go rename to src/modules/server/cron/sender_sms.go index 5574694a..945dfc88 100644 --- a/src/modules/rdb/cron/sender_sms.go +++ b/src/modules/server/cron/sender_sms.go @@ -5,23 +5,18 @@ import ( "strings" "time" + "github.com/didi/nightingale/v4/src/common/dataobj" + "github.com/didi/nightingale/v4/src/modules/server/redisc" + "github.com/toolkits/pkg/file" "github.com/toolkits/pkg/logger" "github.com/toolkits/pkg/net/httplib" "github.com/toolkits/pkg/sys" - - "github.com/didi/nightingale/src/common/dataobj" - "github.com/didi/nightingale/src/modules/rdb/config" - "github.com/didi/nightingale/src/modules/rdb/redisc" ) func ConsumeSms() { - if !config.Config.Redis.Enable { - return - } - for { - list := redisc.Pop(1, config.SMS_QUEUE_NAME) + list := redisc.Pop(1, SMS_QUEUE_NAME) if len(list) == 0 { time.Sleep(time.Millisecond * 200) continue @@ -42,18 +37,18 @@ func sendSms(message *dataobj.Message) { <-SmsWorkerChan }() - switch config.Config.Sender["sms"].Way { + switch Sender["sms"].Way { case "api": sendSmsByAPI(message) case "shell": sendSmsByShell(message) default: - logger.Errorf("not support %s to send sms, sms: %+v", config.Config.Sender["sms"].Way, message) + logger.Errorf("not support %s to send sms, sms: %+v", Sender["sms"].Way, message) } } func sendSmsByAPI(message *dataobj.Message) { - api := config.Config.Sender["sms"].API + api := Sender["sms"].API res, err := httplib.Post(api).JSONBodyQuiet(message).SetTimeout(time.Second * 3).String() logger.Infof("SendSmsByAPI, api:%s, sms:%+v, error:%v, response:%s", api, message, err, res) } diff --git a/src/modules/rdb/cron/sender_voice.go b/src/modules/server/cron/sender_voice.go similarity index 74% rename from src/modules/rdb/cron/sender_voice.go rename to src/modules/server/cron/sender_voice.go index 8967f292..a804ff03 100644 --- a/src/modules/rdb/cron/sender_voice.go +++ b/src/modules/server/cron/sender_voice.go @@ -5,23 +5,18 @@ import ( "strings" "time" + "github.com/didi/nightingale/v4/src/common/dataobj" + "github.com/didi/nightingale/v4/src/modules/server/redisc" + "github.com/toolkits/pkg/file" "github.com/toolkits/pkg/logger" "github.com/toolkits/pkg/net/httplib" "github.com/toolkits/pkg/sys" - - "github.com/didi/nightingale/src/common/dataobj" - "github.com/didi/nightingale/src/modules/rdb/config" - "github.com/didi/nightingale/src/modules/rdb/redisc" ) func ConsumeVoice() { - if !config.Config.Redis.Enable { - return - } - for { - list := redisc.Pop(1, config.VOICE_QUEUE_NAME) + list := redisc.Pop(1, VOICE_QUEUE_NAME) if len(list) == 0 { time.Sleep(time.Millisecond * 200) continue @@ -42,18 +37,18 @@ func sendVoice(message *dataobj.Message) { <-VoiceWorkerChan }() - switch config.Config.Sender["voice"].Way { + switch Sender["voice"].Way { case "api": sendVoiceByAPI(message) case "shell": sendVoiceByShell(message) default: - logger.Errorf("not support %s to send voice, voice: %+v", config.Config.Sender["voice"].Way, message) + logger.Errorf("not support %s to send voice, voice: %+v", Sender["voice"].Way, message) } } func sendVoiceByAPI(message *dataobj.Message) { - api := config.Config.Sender["voice"].API + api := Sender["voice"].API res, code, err := httplib.PostJSON(api, time.Second, message, nil) logger.Infof("SendVoiceByAPI, api:%s, voice:%+v, error:%v, response:%s, statuscode:%d", api, message, err, string(res), code) } diff --git a/src/modules/transfer/cron/aggr.go b/src/modules/server/cron/transfer_aggr.go similarity index 58% rename from src/modules/transfer/cron/aggr.go rename to src/modules/server/cron/transfer_aggr.go index 772e755c..ecb43ab9 100644 --- a/src/modules/transfer/cron/aggr.go +++ b/src/modules/server/cron/transfer_aggr.go @@ -1,20 +1,14 @@ package cron import ( - "fmt" - "math/rand" "time" - "github.com/didi/nightingale/src/common/address" - "github.com/didi/nightingale/src/common/dataobj" - "github.com/didi/nightingale/src/models" - "github.com/didi/nightingale/src/modules/transfer/aggr" - "github.com/didi/nightingale/src/modules/transfer/cache" - "github.com/didi/nightingale/src/toolkits/stats" - "github.com/didi/nightingale/src/toolkits/str" - - "github.com/toolkits/pkg/logger" - "github.com/toolkits/pkg/net/httplib" + "github.com/didi/nightingale/v4/src/common/dataobj" + "github.com/didi/nightingale/v4/src/common/stats" + "github.com/didi/nightingale/v4/src/common/str" + "github.com/didi/nightingale/v4/src/models" + "github.com/didi/nightingale/v4/src/modules/server/aggr" + "github.com/didi/nightingale/v4/src/modules/server/cache" ) type AggrCalcResp struct { @@ -36,45 +30,12 @@ func GetAggrCalcStrategy() { } func getAggrCalcStrategy() { - addrs := address.GetHTTPAddresses("monapi") - if len(addrs) == 0 { - logger.Error("find no monapi address") - return - } - - var stras AggrCalcResp - perm := rand.Perm(len(addrs)) - var err error - for i := range perm { - url := fmt.Sprintf("http://%s%s", addrs[perm[i]], aggr.AggrConfig.ApiPath) - err = httplib.Get(url).SetTimeout(time.Duration(aggr.AggrConfig.ApiTimeout) * time.Millisecond).ToJSON(&stras) - - if err != nil { - logger.Warningf("get strategy from remote failed, error:%v", err) - continue - } - - if stras.Err != "" { - logger.Warningf("get strategy from remote failed, error:%v", stras.Err) - continue - } - if len(stras.Data) > 0 { - break - } - } - - if err != nil { - logger.Errorf("get stra err: %v", err) - stats.Counter.Set("stra.err", 1) - } - - if len(stras.Data) == 0 { //策略数为零,不更新缓存 - return - } straMap := make(map[string]map[string][]*dataobj.RawMetricAggrCalc) metricMap := make(map[int64]string) - for _, stra := range stras.Data { + + stras := cache.AggrCalcStraCache.Get() + for _, stra := range stras { stats.Counter.Set("stra.count", 1) metricMap[stra.Id] = stra.NewMetric @@ -82,8 +43,8 @@ func getAggrCalcStrategy() { metric := rawMetric.Name for _, nid := range rawMetric.Nids { - key := str.MD5(nid, metric, "") //TODO get straMap key, 此处需要优化 - k1 := key[0:2] //为了加快查找,增加一层 map,key 为计算出来的 hash 的前 2 位 + key := str.ToMD5(nid, metric, "") //TODO get straMap key, 此处需要优化 + k1 := key[0:2] //为了加快查找,增加一层 map,key 为计算出来的 hash 的前 2 位 if _, exists := straMap[k1]; !exists { straMap[k1] = make(map[string][]*dataobj.RawMetricAggrCalc) @@ -115,8 +76,8 @@ func getAggrCalcStrategy() { } for _, endpoint := range rawMetric.Endpoints { - key := str.MD5(endpoint, metric, "") //TODO get straMap key, 此处需要优化 - k1 := key[0:2] //为了加快查找,增加一层 map,key 为计算出来的 hash 的前 2 位 + key := str.ToMD5(endpoint, metric, "") //TODO get straMap key, 此处需要优化 + k1 := key[0:2] //为了加快查找,增加一层 map,key 为计算出来的 hash 的前 2 位 if _, exists := straMap[k1]; !exists { straMap[k1] = make(map[string][]*dataobj.RawMetricAggrCalc) diff --git a/src/modules/transfer/cron/pool.go b/src/modules/server/cron/transfer_pool.go similarity index 68% rename from src/modules/transfer/cron/pool.go rename to src/modules/server/cron/transfer_pool.go index 052a5cd4..c9319f42 100644 --- a/src/modules/transfer/cron/pool.go +++ b/src/modules/server/cron/transfer_pool.go @@ -3,19 +3,19 @@ package cron import ( "time" - "github.com/didi/nightingale/src/modules/transfer/backend" + "github.com/didi/nightingale/v4/src/modules/server/judge" ) func RebuildJudgePool() { ticker := time.NewTicker(time.Duration(8) * time.Second) for { <-ticker.C - judges := backend.GetJudges() + judges := judge.GetJudges() if len(judges) == 0 { //防止心跳服务故障导致 judge 不可用,如果 judges 个数为 0,先不更新 judge 连接池 continue } - backend.JudgeConnPools.UpdatePools(judges) + judge.JudgeConnPools.UpdatePools(judges) } } diff --git a/src/modules/server/cron/transfer_queue.go b/src/modules/server/cron/transfer_queue.go new file mode 100644 index 00000000..4db7194f --- /dev/null +++ b/src/modules/server/cron/transfer_queue.go @@ -0,0 +1,35 @@ +package cron + +import ( + "time" + + "github.com/didi/nightingale/v4/src/modules/server/judge" + + "github.com/toolkits/pkg/container/list" +) + +func UpdateJudgeQueue() { + ticker := time.NewTicker(time.Duration(8) * time.Second) + for { + <-ticker.C + updateJudgeQueue() + } +} + +func updateJudgeQueue() { + instances := judge.GetJudges() + if len(instances) == 0 { + return + } + + for _, instance := range instances { + if !judge.JudgeQueues.Exists(instance) { + q := list.NewSafeListLimited(judge.DefaultSendQueueMaxSize) + judge.JudgeQueues.Set(instance, q) + go judge.Send2JudgeTask(q, instance, judge.JudgeConfig.WorkerNum) + } else { + judge.JudgeQueues.UpdateTS(instance) + } + } + judge.JudgeQueues.Clean() +} diff --git a/src/modules/server/cron/transfer_stra.go b/src/modules/server/cron/transfer_stra.go new file mode 100644 index 00000000..e2712275 --- /dev/null +++ b/src/modules/server/cron/transfer_stra.go @@ -0,0 +1,76 @@ +package cron + +import ( + "time" + + "github.com/didi/nightingale/v4/src/common/stats" + "github.com/didi/nightingale/v4/src/common/str" + "github.com/didi/nightingale/v4/src/models" + "github.com/didi/nightingale/v4/src/modules/server/cache" + + "github.com/toolkits/pkg/logger" +) + +type StraResp struct { + Data []*models.Stra `json:"dat"` + Err string `json:"err"` +} + +func GetStrategy() { + ticker := time.NewTicker(time.Duration(8) * time.Second) + getStrategy() + for { + <-ticker.C + getStrategy() + } +} + +func getStrategy() { + stras := cache.StraCache.GetAll() + straMap := make(map[string]map[string][]*models.Stra) + for _, stra := range stras { + stats.Counter.Set("stra.count", 1) + + if len(stra.Exprs) < 1 { + logger.Warningf("illegal stra:%v exprs", stra) + continue + } + + metric := stra.Exprs[0].Metric + for _, nid := range stra.Nids { + key := str.ToMD5(nid, metric, "") //TODO get straMap key, 此处需要优化 + k1 := key[0:2] //为了加快查找,增加一层 map,key 为计算出来的 hash 的前 2 位 + + if _, exists := straMap[k1]; !exists { + straMap[k1] = make(map[string][]*models.Stra) + } + + if _, exists := straMap[k1][key]; !exists { + straMap[k1][key] = []*models.Stra{stra} + stats.Counter.Set("stra.key", 1) + + } else { + straMap[k1][key] = append(straMap[k1][key], stra) + } + } + + for _, endpoint := range stra.Endpoints { + key := str.ToMD5(endpoint, metric, "") //TODO get straMap key, 此处需要优化 + k1 := key[0:2] //为了加快查找,增加一层 map,key 为计算出来的 hash 的前 2 位 + + if _, exists := straMap[k1]; !exists { + straMap[k1] = make(map[string][]*models.Stra) + } + + if _, exists := straMap[k1][key]; !exists { + straMap[k1][key] = []*models.Stra{stra} + stats.Counter.Set("stra.key", 1) + + } else { + straMap[k1][key] = append(straMap[k1][key], stra) + } + } + } + + cache.StraMap.ReInit(straMap) +} diff --git a/src/modules/rdb/dingtalk/dingtalk.go b/src/modules/server/dingtalk/dingtalk.go similarity index 93% rename from src/modules/rdb/dingtalk/dingtalk.go rename to src/modules/server/dingtalk/dingtalk.go index ba1b15ac..fa93dd70 100644 --- a/src/modules/rdb/dingtalk/dingtalk.go +++ b/src/modules/server/dingtalk/dingtalk.go @@ -16,7 +16,7 @@ type Result struct { type dingReqData struct { Msgtype string `json:"msgtype"` Text *textContent `json:"text"` - At *atContent `json:"at"` + At *atContent `json:"at"` } type textContent struct { @@ -24,8 +24,8 @@ type textContent struct { } type atContent struct { - AtMobiles []string `json:"atMobiles"` - IsAtAll bool `json:"isAtAll"` + AtMobiles []string `json:"atMobiles"` + IsAtAll bool `json:"isAtAll"` } // RobotSend robot发送信息 diff --git a/src/modules/job/http/router_funcs.go b/src/modules/server/http/http_funcs.go similarity index 65% rename from src/modules/job/http/router_funcs.go rename to src/modules/server/http/http_funcs.go index 82ed7919..da1e4b90 100644 --- a/src/modules/job/http/router_funcs.go +++ b/src/modules/server/http/http_funcs.go @@ -1,14 +1,16 @@ package http import ( + "fmt" "strconv" "strings" + "github.com/didi/nightingale/v4/src/common/i18n" + "github.com/didi/nightingale/v4/src/models" + "github.com/didi/nightingale/v4/src/modules/server/auth" + "github.com/gin-gonic/gin" "github.com/toolkits/pkg/errors" - - "github.com/didi/nightingale/src/models" - "github.com/didi/nightingale/src/toolkits/i18n" ) func dangerous(v interface{}) { @@ -18,6 +20,7 @@ func dangerous(v interface{}) { func bomb(format string, a ...interface{}) { errors.Bomb(i18n.Sprintf(format, a...)) } + func bind(c *gin.Context, ptr interface{}) { dangerous(c.ShouldBindJSON(ptr)) } @@ -129,7 +132,7 @@ func renderMessage(c *gin.Context, v interface{}) { switch t := v.(type) { case string: - c.JSON(200, gin.H{"err": t}) + c.JSON(200, gin.H{"err": i18n.Sprintf(t)}) case error: c.JSON(200, gin.H{"err": t.Error()}) } @@ -151,13 +154,15 @@ func renderZeroPage(c *gin.Context) { }, nil) } -// ------------ - type idsForm struct { Ids []int64 `json:"ids"` } -// ------------ +func (f idsForm) Validate() { + if len(f.Ids) == 0 { + bomb("arg[ids] is empty") + } +} func loginUsername(c *gin.Context) string { value, has := c.Get("username") @@ -182,6 +187,8 @@ func loginUser(c *gin.Context) *models.User { bomb("unauthorized") } + auth.PrepareUser(user) + return user } @@ -194,30 +201,6 @@ func loginRoot(c *gin.Context) *models.User { return value.(*models.User) } -func User(id int64) *models.User { - user, err := models.UserGet("id=?", id) - if err != nil { - bomb("cannot retrieve user[%d]: %v", id, err) - } - - if user == nil { - bomb("no such user[%d]", id) - } - - return user -} - -func Node(id int64) *models.Node { - node, err := models.NodeGet("id=?", id) - dangerous(err) - - if node == nil { - bomb("no such node[%d]", id) - } - - return node -} - func TaskTpl(id int64) *models.TaskTpl { obj, err := models.TaskTplGet("id=?", id) dangerous(err) @@ -258,3 +241,139 @@ func cleanHosts(formHosts []string) []string { return arr } + +func User(id int64) *models.User { + user, err := models.UserGet("id=?", id) + if err != nil { + bomb("cannot retrieve user[%d]: %v", id, err) + } + + if user == nil { + bomb("no such user[%d]", id) + } + + return user +} + +func Team(id int64) *models.Team { + team, err := models.TeamGet("id=?", id) + if err != nil { + bomb("cannot retrieve team[%d]: %v", id, err) + } + + if team == nil { + bomb("no such team[%d]", id) + } + + return team +} + +func Role(id int64) *models.Role { + role, err := models.RoleGet("id=?", id) + if err != nil { + bomb("cannot retrieve role[%d]: %v", id, err) + } + + if role == nil { + bomb("no such role[%d]", id) + } + + return role +} + +func Node(id int64) *models.Node { + node, err := models.NodeGet("id=?", id) + dangerous(err) + + if node == nil { + bomb("no such node[%d]", id) + } + + return node +} + +func mustNode(id int64) *models.Node { + node, err := models.NodeGet("id=?", id) + if err != nil { + bomb("cannot retrieve node[%d]: %v", id, err) + } + + if node == nil { + bomb("no such node[%d]", id) + } + + return node +} + +func mustScreen(id int64) *models.Screen { + screen, err := models.ScreenGet("id", id) + if err != nil { + bomb("cannot retrieve screen[%d]: %v", id, err) + } + + if screen == nil { + bomb("no such screen[%d]", id) + } + + return screen +} + +func mustScreenSubclass(id int64) *models.ScreenSubclass { + subclass, err := models.ScreenSubclassGet("id", id) + if err != nil { + bomb("cannot retrieve subclass[%d]: %v", id, err) + } + + if subclass == nil { + bomb("no such subclass[%d]", id) + } + + return subclass +} + +func mustChart(id int64) *models.Chart { + chart, err := models.ChartGet("id", id) + if err != nil { + bomb("cannot retrieve chart[%d]: %v", id, err) + } + + if chart == nil { + bomb("no such chart[%d]", id) + } + + return chart +} + +func mustEventCur(id int64) *models.EventCur { + eventCur, err := models.EventCurGet("id", id) + if err != nil { + bomb("cannot retrieve eventCur[%d]: %v", id, err) + } + + if eventCur == nil { + bomb("no such eventCur[%d]", id) + } + + return eventCur +} + +func mustEvent(id int64) *models.Event { + eventCur, err := models.EventGet("id", id) + if err != nil { + bomb("cannot retrieve event[%d]: %v", id, err) + } + + if eventCur == nil { + bomb("no such event[%d]", id) + } + + return eventCur +} + +func _e(format string, a ...interface{}) error { + return fmt.Errorf(i18n.Sprintf(format, a...)) +} + +func _s(format string, a ...interface{}) string { + return i18n.Sprintf(format, a...) +} diff --git a/src/modules/rdb/http/http_middleware.go b/src/modules/server/http/http_middleware.go similarity index 91% rename from src/modules/rdb/http/http_middleware.go rename to src/modules/server/http/http_middleware.go index ad3d75b6..44c6a645 100644 --- a/src/modules/rdb/http/http_middleware.go +++ b/src/modules/server/http/http_middleware.go @@ -9,10 +9,10 @@ import ( "github.com/toolkits/pkg/logger" "github.com/toolkits/pkg/slice" - "github.com/didi/nightingale/src/common/address" - "github.com/didi/nightingale/src/models" - "github.com/didi/nightingale/src/modules/rdb/config" - "github.com/didi/nightingale/src/modules/rdb/session" + "github.com/didi/nightingale/v4/src/common/address" + "github.com/didi/nightingale/v4/src/models" + "github.com/didi/nightingale/v4/src/modules/server/config" + "github.com/didi/nightingale/v4/src/modules/server/http/session" ) func shouldStartSession() gin.HandlerFunc { @@ -66,7 +66,7 @@ func shouldBeService() gin.HandlerFunc { return } - if ip != "" && slice.ContainsString(address.GetAddresses("rdb"), ip) { + if ip != "" && slice.ContainsString(address.GetAddresses("server"), ip) { c.Next() return } diff --git a/src/modules/rdb/http/http_server.go b/src/modules/server/http/http_server.go similarity index 82% rename from src/modules/rdb/http/http_server.go rename to src/modules/server/http/http_server.go index 188f0fbb..5204fd85 100644 --- a/src/modules/rdb/http/http_server.go +++ b/src/modules/server/http/http_server.go @@ -10,9 +10,9 @@ import ( "github.com/gin-gonic/gin" - "github.com/didi/nightingale/src/common/address" - "github.com/didi/nightingale/src/common/middleware" - "github.com/didi/nightingale/src/modules/rdb/config" + "github.com/didi/nightingale/v4/src/common/address" + "github.com/didi/nightingale/v4/src/common/middleware" + "github.com/didi/nightingale/v4/src/modules/server/config" ) var srv = &http.Server{ @@ -35,13 +35,17 @@ func Start() { } r := gin.New() - r.Use(loggerMid, recoveryMid) + r.Use(recoveryMid) + + if c.HTTP.ShowLog { + r.Use(loggerMid) + } Config(r) initStats() - srv.Addr = address.GetHTTPListen("rdb") + srv.Addr = address.GetHTTPListen("server") srv.Handler = r go func() { diff --git a/src/modules/server/http/router.go b/src/modules/server/http/router.go new file mode 100644 index 00000000..cdfadf2c --- /dev/null +++ b/src/modules/server/http/router.go @@ -0,0 +1,551 @@ +package http + +// Config routes +import ( + "github.com/didi/nightingale/v4/src/modules/server/config" + + "github.com/gin-contrib/pprof" + "github.com/gin-gonic/gin" +) + +func Config(r *gin.Engine) { + r.Static("/pub", "./pub") + r.StaticFile("/favicon.ico", "./pub/favicon.ico") + + pprof.Register(r, "/api/server/debug/pprof") + + sys := r.Group("/api/server") + { + sys.GET("/ping", ping) + sys.GET("/pid", pid) + sys.GET("/addr", addr) + } + + hbs := r.Group("/api/hbs") + { + hbs.POST("/heartbeat", heartBeat) + hbs.GET("/instances", instanceGets) + } + + jobNotLogin := r.Group("/api/job-ce") + { + jobNotLogin.GET("/ping", ping) + jobNotLogin.POST("/callback", taskCallback) + jobNotLogin.GET("/task/:id/stdout", taskStdout) + jobNotLogin.GET("/task/:id/stderr", taskStderr) + jobNotLogin.GET("/task/:id/state", apiTaskState) + jobNotLogin.GET("/task/:id/result", apiTaskResult) + jobNotLogin.GET("/task/:id/host/:host/output", taskHostOutput) + jobNotLogin.GET("/task/:id/host/:host/stdout", taskHostStdout) + jobNotLogin.GET("/task/:id/host/:host/stderr", taskHostStderr) + jobNotLogin.GET("/task/:id/stdout.txt", taskStdoutTxt) + jobNotLogin.GET("/task/:id/stderr.txt", taskStderrTxt) + jobNotLogin.GET("/task/:id/stdout.json", apiTaskJSONStdouts) + jobNotLogin.GET("/task/:id/stderr.json", apiTaskJSONStderrs) + } + + jobUserLogin := r.Group("/api/job-ce").Use(shouldBeLogin()) + { + jobUserLogin.GET("/task-tpls", taskTplGets) + jobUserLogin.POST("/task-tpls", taskTplPost) + jobUserLogin.GET("/task-tpl/:id", taskTplGet) + jobUserLogin.PUT("/task-tpl/:id", taskTplPut) + jobUserLogin.DELETE("/task-tpl/:id", taskTplDel) + jobUserLogin.POST("/task-tpl/:id/run", taskTplRun) + jobUserLogin.PUT("/task-tpls/tags", taskTplTagsPut) + jobUserLogin.PUT("/task-tpls/node", taskTplNodePut) + + jobUserLogin.POST("/tasks", taskPost) + jobUserLogin.GET("/tasks", taskGets) + jobUserLogin.GET("/task/:id", taskView) + jobUserLogin.PUT("/task/:id/action", taskActionPut) + jobUserLogin.PUT("/task/:id/host", taskHostPut) + + // 专门针对工单系统开发的接口 + jobUserLogin.POST("/run/:id", taskRunForTT) + } + + rdbNotLogin := r.Group("/api/rdb") + { + rdbNotLogin.GET("/ping", ping) + rdbNotLogin.GET("/ldap/used", ldapUsed) + rdbNotLogin.GET("/ops/global", globalOpsGet) + rdbNotLogin.GET("/ops/local", localOpsGet) + rdbNotLogin.GET("/roles/global", globalRoleGet) + rdbNotLogin.GET("/roles/local", localRoleGet) + rdbNotLogin.POST("/users/invite", userInvitePost) + + rdbNotLogin.POST("/auth/send-login-code", sendLoginCode) + rdbNotLogin.POST("/auth/send-rst-code", sendRstCode) + rdbNotLogin.POST("/auth/rst-password", rstPassword) + rdbNotLogin.GET("/auth/captcha", captchaGet) + + rdbNotLogin.GET("/v2/nodes", nodeGets) + rdbNotLogin.GET("/pwd-rules", pwdRulesGet) + rdbNotLogin.GET("/counter", counterGet) + + rdbNotLogin.PUT("/self/password", selfPasswordPut) + + } + + rdbRootLogin := r.Group("/api/rdb").Use(shouldBeRoot()) + { + rdbRootLogin.GET("/configs/smtp", smtpConfigsGet) + rdbRootLogin.POST("/configs/smtp/test", smtpTest) + rdbRootLogin.PUT("/configs/smtp", smtpConfigsPut) + + rdbRootLogin.GET("/configs/auth", authConfigsGet) + rdbRootLogin.PUT("/configs/auth", authConfigsPut) + rdbRootLogin.POST("/auth/white-list", whiteListPost) + rdbRootLogin.GET("/auth/white-list", whiteListsGet) + rdbRootLogin.GET("/auth/white-list/:id", whiteListGet) + rdbRootLogin.PUT("/auth/white-list/:id", whiteListPut) + rdbRootLogin.DELETE("/auth/white-list/:id", whiteListDel) + + rdbRootLogin.GET("/log/login", loginLogGets) + rdbRootLogin.GET("/log/operation", operationLogGets) + + rdbRootLogin.POST("/roles", roleAddPost) + rdbRootLogin.PUT("/role/:id", rolePut) + rdbRootLogin.DELETE("/role/:id", roleDel) + rdbRootLogin.GET("/role/:id", roleDetail) + rdbRootLogin.GET("/role/:id/users", roleGlobalUsersGet) + rdbRootLogin.PUT("/role/:id/users/bind", roleGlobalUsersBind) + rdbRootLogin.PUT("/role/:id/users/unbind", roleGlobalUsersUnbind) + + rdbRootLogin.POST("/users", userAddPost) + rdbRootLogin.GET("/user/:id/profile", userProfileGet) + rdbRootLogin.PUT("/user/:id/profile", userProfilePut) + rdbRootLogin.PUT("/user/:id/password", userPasswordPut) + rdbRootLogin.DELETE("/user/:id", userDel) + + rdbRootLogin.POST("/node-cates", nodeCatePost) + rdbRootLogin.PUT("/node-cate/:id", nodeCatePut) + rdbRootLogin.DELETE("/node-cate/:id", nodeCateDel) + rdbRootLogin.POST("/node-cates/fields", nodeCateFieldNew) + rdbRootLogin.PUT("/node-cates/field/:id", nodeCateFieldPut) + rdbRootLogin.DELETE("/node-cates/field/:id", nodeCateFieldDel) + + rdbRootLogin.GET("/nodes/trash", nodeTrashGets) + rdbRootLogin.PUT("/nodes/trash/recycle", nodeTrashRecycle) + + rdbRootLogin.POST("/sso/clients", ssoClientsPost) + rdbRootLogin.GET("/sso/clients", ssoClientsGet) + rdbRootLogin.GET("/sso/clients/:clientId", ssoClientGet) + rdbRootLogin.PUT("/sso/clients/:clientId", ssoClientPut) + rdbRootLogin.DELETE("/sso/clients/:clientId", ssoClientDel) + + rdbRootLogin.GET("/resources/tenant-rank", tenantResourcesCountRank) + rdbRootLogin.GET("/resources/project-rank", projectResourcesCountRank) + + rdbRootLogin.GET("/root/users", userListGet) + rdbRootLogin.GET("/root/teams/all", teamAllGet) + rdbRootLogin.GET("/root/node-cates", nodeCateGets) + } + + rdbUserLogin := r.Group("/api/rdb").Use(shouldBeLogin()) + { + rdbUserLogin.GET("/resoplogs", operationLogResGets) + + rdbUserLogin.GET("/self/profile", selfProfileGet) + rdbUserLogin.PUT("/self/profile", selfProfilePut) + rdbUserLogin.GET("/self/token", selfTokenGets) + rdbUserLogin.POST("/self/token", selfTokenPost) + rdbUserLogin.PUT("/self/token", selfTokenPut) + rdbUserLogin.GET("/self/perms/global", permGlobalOps) + rdbUserLogin.GET("/self/perms/local/node/:id", permLocalOps) + + rdbUserLogin.GET("/users", userListGet) + rdbUserLogin.GET("/users/invite", userInviteGet) + + rdbUserLogin.GET("/teams/all", teamAllGet) + rdbUserLogin.GET("/teams/mine", teamMineGet) + rdbUserLogin.POST("/teams", teamAddPost) + rdbUserLogin.PUT("/team/:id", teamPut) + rdbUserLogin.GET("/team/:id", teamDetail) + rdbUserLogin.PUT("/team/:id/users/bind", teamUserBind) + rdbUserLogin.PUT("/team/:id/users/unbind", teamUserUnbind) + rdbUserLogin.DELETE("/team/:id", teamDel) + + rdbUserLogin.GET("/node-cates", nodeCateGets) + rdbUserLogin.GET("/node-cates/fields", nodeCateFieldGets) + rdbUserLogin.GET("/node-cates/field/:id", nodeCateFieldGet) + + rdbUserLogin.POST("/nodes", nodePost) + rdbUserLogin.GET("/nodes", nodeGets) + rdbUserLogin.GET("/node/:id", nodeGet) + rdbUserLogin.PUT("/node/:id", nodePut) + rdbUserLogin.DELETE("/node/:id", nodeDel) + rdbUserLogin.GET("/node/:id/fields", nodeFieldGets) + rdbUserLogin.PUT("/node/:id/fields", nodeFieldPuts) + rdbUserLogin.GET("/node/:id/roles", rolesUnderNodeGets) + rdbUserLogin.POST("/node/:id/roles", rolesUnderNodePost) + rdbUserLogin.DELETE("/node/:id/roles", rolesUnderNodeDel) + rdbUserLogin.DELETE("/node/:id/roles/try", rolesUnderNodeDelTry) + rdbUserLogin.GET("/node/:id/resources", resourceUnderNodeGet) + rdbUserLogin.GET("/node/:id/resources/cate-count", renderNodeResourcesCountByCate) + rdbUserLogin.POST("/node/:id/resources/bind", resourceBindNode) + rdbUserLogin.POST("/node/:id/resources/unbind", resourceUnbindNode) + rdbUserLogin.PUT("/node/:id/resources/note", resourceUnderNodeNotePut) + rdbUserLogin.PUT("/node/:id/resources/labels", resourceUnderNodeLabelsPut) + + rdbUserLogin.GET("/tree", treeUntilLeafGets) + rdbUserLogin.GET("/tree/projs", treeUntilProjectGets) + rdbUserLogin.GET("/tree/orgs", treeUntilOrganizationGets) + + rdbUserLogin.GET("/resources/search", resourceSearchGet) + rdbUserLogin.PUT("/resources/note", resourceNotePut) + rdbUserLogin.PUT("/resources/note/try", resourceNotePutTry) + rdbUserLogin.GET("/resources/bindings", resourceBindingsGet) + rdbUserLogin.GET("/resources/orphan", resourceOrphanGet) + + rdbUserLogin.GET("/resources/cate-count", renderAllResourcesCountByCate) + + // 是否在某个节点上有权限做某个操作(即资源权限点) + rdbUserLogin.GET("/can-do-node-op", v1CandoNodeOp) + // 同时校验多个操作权限点 + rdbUserLogin.GET("/can-do-node-ops", v1CandoNodeOps) + } + + sessionStarted := r.Group("/api/rdb").Use(shouldStartSession()) + { + sessionStarted.POST("/auth/login", login) + sessionStarted.GET("/auth/logout", logout) + sessionStarted.GET("/auth/v2/authorize", authAuthorizeV2) + sessionStarted.GET("/auth/v2/callback", authCallbackV2) + sessionStarted.GET("/auth/v2/logout", logoutV2) + } + + transfer := r.Group("/api/transfer") + { + transfer.POST("/stra", getStra) + transfer.POST("/which-tsdb", tsdbInstance) + transfer.POST("/which-judge", judgeInstance) + transfer.GET("/alive-judges", judges) + + transfer.POST("/push", PushData) + transfer.POST("/data", QueryData) + transfer.POST("/data/ui", QueryDataForUI) + } + + index := r.Group("/api/index") + { + index.POST("/metrics", GetMetrics) + index.POST("/tagkv", GetTagPairs) + index.POST("/counter/clude", GetIndexByClude) + index.POST("/counter/fullmatch", GetIndexByFullTags) + } + + generic := r.Group("/api/mon").Use(shouldBeLogin()) + { + generic.GET("/regions", func(c *gin.Context) { renderData(c, config.Config.Monapi.Region, nil) }) + } + + node := r.Group("/api/mon/node").Use(shouldBeLogin()) + { + node.GET("/:id/maskconf", maskconfGets) + node.GET("/:id/screen", screenGets) + node.POST("/:id/screen", screenPost) + } + + maskconf := r.Group("/api/mon/maskconf").Use(shouldBeLogin()) + { + maskconf.POST("", maskconfPost) + maskconf.PUT("/:id", maskconfPut) + maskconf.DELETE("/:id", maskconfDel) + } + + screen := r.Group("/api/mon/screen").Use(shouldBeLogin()) + { + screen.GET("/:id", screenGet) + screen.PUT("/:id", screenPut) + screen.DELETE("/:id", screenDel) + screen.GET("/:id/subclass", screenSubclassGets) + screen.POST("/:id/subclass", screenSubclassPost) + } + + subclass := r.Group("/api/mon/subclass").Use(shouldBeLogin()) + { + subclass.PUT("", screenSubclassPut) + subclass.DELETE("/:id", screenSubclassDel) + subclass.GET("/:id/chart", chartGets) + subclass.POST("/:id/chart", chartPost) + } + + subclasses := r.Group("/api/mon/subclasses").Use(shouldBeLogin()) + { + subclasses.PUT("/loc", screenSubclassLocPut) + } + + chart := r.Group("/api/mon/chart").Use(shouldBeLogin()) + { + chart.PUT("/:id", chartPut) + chart.DELETE("/:id", chartDel) + } + + charts := r.Group("/api/mon/charts").Use(shouldBeLogin()) + { + charts.PUT("/weights", chartWeightsPut) + } + + tmpchart := r.Group("/api/mon/tmpchart").Use(shouldBeLogin()) + { + tmpchart.GET("", tmpChartGet) + tmpchart.POST("", tmpChartPost) + } + + event := r.Group("/api/mon/event").Use(shouldBeLogin()) + { + event.GET("/cur", eventCurGets) + event.GET("/cur/:id", eventCurGetById) + event.DELETE("/cur/:id", eventCurDel) + event.GET("/his", eventHisGets) + event.GET("/his/:id", eventHisGetById) + event.POST("/cur/claim", eventCurClaim) + } + + // TODO: merge to collect-rule + collect := r.Group("/api/mon/collect").Use(shouldBeLogin()) + { + collect.POST("", collectRulePost) // create a collect rule + collect.GET("/list", collectRulesGet) // get collect rules + collect.GET("", collectRuleGet) // get collect rule by type & id + collect.PUT("", collectRulePut) // update collect rule by type & id + collect.DELETE("", collectsRuleDel) // delete collect rules by type & ids + collect.POST("/check", regExpCheck) // check collect rule + } + + collectEE := r.Group("/api/mon-ee/collect").Use(shouldBeLogin()) + { + collectEE.POST("", collectRulePost) + collectEE.GET("/list", collectRulesGet) + collectEE.GET("", collectRuleGet) + collectEE.PUT("", collectRulePut) + collectEE.DELETE("", collectsRuleDel) + collectEE.GET("/region", apiRegionGet) + } + + apicollects := r.Group("/api/mon-ee/apicollects") + { + apicollects.GET("", apiCollectsGet) + } + + snmpcollects := r.Group("/api/mon-ee/snmp") + { + snmpcollects.GET("/collects", snmpCollectsGet) + snmpcollects.GET("/hw", snmpHWsGet) + snmpcollects.GET("/mib/module", mibModuleGet) + snmpcollects.GET("/mib/metric", mibMetricGet) + snmpcollects.GET("/mib", mibGet) + snmpcollects.GET("/mibs", mibGets) + } + + // TODO: merge to collect-rules, used by agent + collects := r.Group("/api/mon/collects") + { + collects.GET("/:endpoint", collectRulesGetByLocalEndpoint) // get collect rules by endpoint, for agent + collects.GET("", collectRulesGet) // get collect rules + } + + collectRules := r.Group("/api/mon/collect-rules").Use(shouldBeLogin()) + { + collectRules.POST("", collectRulePost) // create a collect rule + collectRules.GET("/list", collectRulesGetV2) // get collect rules + collectRules.GET("", collectRuleGet) // get collect rule by type & id + collectRules.PUT("", collectRulePut) // update collect rule by type & id + collectRules.DELETE("", collectsRuleDel) // delete collect rules by type & ids + collectRules.POST("/check", regExpCheck) // check collect rule + collectRules.GET("/types", collectRuleTypesGet) // get collect types, category: local|remote + collectRules.GET("/types/:type/template", collectRuleTemplateGet) // get collect teplate by type + + } + + collectRulesAnonymous := r.Group("/api/mon/collect-rules") + { + collectRulesAnonymous.GET("/endpoints/:endpoint/local", collectRulesGetByLocalEndpoint) // for agent + } + + stra := r.Group("/api/mon/stra").Use(shouldBeLogin()) + { + stra.POST("", straPost) + stra.PUT("", straPut) + stra.DELETE("", strasDel) + stra.GET("", strasGet) + stra.GET("/:sid", straGet) + } + + stras := r.Group("/api/mon/stras") + { + stras.GET("/effective", effectiveStrasGet) + stras.GET("", strasAll) + } + + aggr := r.Group("/api/mon/aggr").Use(shouldBeLogin()) + { + aggr.POST("", aggrCalcPost) + aggr.PUT("", aggrCalcPut) + aggr.DELETE("", aggrCalcsDel) + aggr.GET("", aggrCalcsGet) + aggr.GET("/:id", aggrCalcGet) + } + + tpl := r.Group("/api/mon/tpl") + { + tpl.GET("", tplNameGets) + tpl.GET("/content", tplGet) + } + + aggrs := r.Group("/api/mon/aggrs").Use() + { + aggrs.GET("", aggrCalcsWithEndpointGet) + } + + monIndex := r.Group("/api/mon/index") + { + monIndex.POST("/metrics", getMetrics) + monIndex.POST("/tagkv", getTagkvs) + } + + judge := r.Group("/api/judge") + { + judge.GET("/stra/:id", getStraInJudge) + judge.POST("/data", getData) + } + + monV1 := r.Group("/v1/mon") + { + monV1.GET("/collect-rules/endpoints/:endpoint/remote", collectRulesGetByRemoteEndpoint) // for prober + } + + nemsLogin := r.Group("/api/ams-ee").Use(shouldBeLogin()) + { + nemsLogin.POST("/nethws", networkHardwarePost) + nemsLogin.GET("/nethws", networkHardwareGets) + nemsLogin.DELETE("/nethws", networkHardwareDel) + nemsLogin.PUT("/nethw/obj/:id", networkHardwarePut) + nemsLogin.PUT("/nethw/note", mgrHWNotePut) + nemsLogin.PUT("/nethw/cate", mgrHWCatePut) + nemsLogin.PUT("/nethw/tenant", mgrHWTenantPut) + nemsLogin.GET("/nethw/cate", hwCateGet) + nemsLogin.GET("/nethw/region", snmpRegionGet) + nemsLogin.GET("/nethws/search", nwSearchGets) + + //md-20201223 + nemsLogin.GET("/nws", nwGets) + nemsLogin.PUT("/nws/back", nwBackPut) + nemsLogin.GET("/nws/search", nwSearchGets) + nemsLogin.DELETE("/nws", nwDel) + + nemsLogin.POST("/mibs", mibPost) + nemsLogin.GET("/mibs", mibGetsByQuery) + nemsLogin.DELETE("/mibs", mibDel) + nemsLogin.GET("/mib", mibGet) + } + + nemsV1 := r.Group("/v1/ams-ee").Use(shouldBeService()) + { + nemsV1.POST("/get-hw-by-ip", networkHardwareByIP) + nemsV1.GET("/get-hw", networkHardwareGetAll) + nemsV1.PUT("/nethws", networkHardwaresPut) + nemsV1.GET("/mib", mibGet) + nemsV1.GET("/mibs", mibGets) + nemsV1.GET("/mib/module", mibModuleGet) + nemsV1.GET("/mib/metric", mibMetricGet) + } + + v1 := r.Group("/v1/rdb").Use(shouldBeService()) + { + // 获取这个节点下的所有资源,跟给前端的API(/api/rdb/node/:id/resources会根据当前登陆用户获取有权限看到的资源列表)不同 + v1.GET("/node/:id/resources", v1ResourcesUnderNodeGet) + // RDB作为一个类似CMDB的东西,接收各个子系统注册过来的资源,其他资源都是依托于项目创建的,RDB会根据nid自动挂载资源到相应节点 + v1.POST("/resources/register", v1ResourcesRegisterPost) + // 资源销毁的时候,需要从库里清掉,同时需要把节点挂载关系也删除,一个资源可能挂载在多个节点,都要统统干掉 + v1.POST("/resources/unregister", v1ResourcesUnregisterPost) + + v1.POST("/containers/bind", v1ContainersBindPost) + v1.POST("/container/sync", v1ContainerSyncPost) + + // 发送邮件、短信、语音、即时通讯消息,这些都依赖客户那边的通道 + v1.POST("/sender/mail", v1SendMail) + v1.POST("/sender/sms", v1SendSms) + v1.POST("/sender/voice", v1SendVoice) + v1.POST("/sender/im", v1SendIm) + + v1.GET("/nodes", nodeGets) + v1.GET("/node-include-trash/:id", nodeIncludeTrashGet) + v1.GET("/node/:id", nodeGet) + v1.GET("/node/:id/projs", v1treeUntilProjectGetsByNid) + v1.GET("/tree/projs", v1TreeUntilProjectGets) + v1.GET("/tree", v1TreeUntilTypGets) + + // 外部系统推送一些操作日志过来,RDB统一存储,实际用MQ会更好一些 + v1.POST("/resoplogs", v1OperationLogResPost) + + // 是否有权限做一些全局操作(即页面权限点) + v1.GET("/can-do-global-op", v1CandoGlobalOp) + // 是否在某个节点上有权限做某个操作(即资源权限点) + v1.GET("/can-do-node-op", v1CandoNodeOp) + // 同时校验多个操作权限点 + v1.GET("/can-do-node-ops", v1CandoNodeOps) + + // 获取用户、团队相关信息 + v1.GET("/get-user-by-uuid", v1UserGetByUUID) + v1.GET("/get-users-by-uuids", v1UserGetByUUIDs) + v1.GET("/get-users-by-ids", v1UserGetByIds) + v1.GET("/get-users-by-names", v1UserGetByNames) + v1.GET("/get-user-by-token", v1UserGetByToken) + v1.GET("/get-users-by-query", userListGet) + v1.GET("/get-teams-by-ids", v1TeamGetByIds) + v1.GET("/get-user-ids-by-team-ids", v1UserIdsGetByTeamIds) + + v1.GET("/users", v1UserListGet) + + v1.POST("/login", v1Login) + v1.POST("/send-login-code", sendLoginCode) + + // 第三方系统获取某个用户的所有权限点 + v1.GET("/perms/global", v1PermGlobalOps) + + // session + v1.GET("/sessions/:sid", v1SessionGet) + v1.GET("/sessions/:sid/user", v1SessionGetUser) + v1.GET("/sessions", v1SessionListGet) + v1.DELETE("/sessions/:sid", v1SessionDelete) + + // token + v1.GET("/tokens/:token", v1TokenGet) + v1.GET("/tokens/:token/user", v1TokenGetUser) + v1.DELETE("/tokens/:token", v1TokenDelete) + + // 第三方系统同步权限表的数据 + v1.GET("/table/sync/role-operation", v1RoleOperationGets) + v1.GET("/table/sync/role-global-user", v1RoleGlobalUserGets) + } + + amsUserLogin := r.Group("/api/ams-ce").Use(shouldBeLogin()) + { + amsUserLogin.GET("/hosts", hostGets) + amsUserLogin.POST("/hosts", hostPost) + amsUserLogin.GET("/host/:id", hostGet) + amsUserLogin.PUT("/hosts/tenant", hostTenantPut) + amsUserLogin.PUT("/hosts/node", hostNodePut) + amsUserLogin.PUT("/hosts/back", hostBackPut) + amsUserLogin.PUT("/hosts/note", hostNotePut) + amsUserLogin.PUT("/hosts/cate", hostCatePut) + amsUserLogin.DELETE("/hosts", hostDel) + amsUserLogin.GET("/hosts/search", hostSearchGets) + amsUserLogin.POST("/hosts/fields", hostFieldNew) + amsUserLogin.GET("/hosts/fields", hostFieldsGets) + amsUserLogin.GET("/hosts/field/:id", hostFieldGet) + amsUserLogin.PUT("/hosts/field/:id", hostFieldPut) + amsUserLogin.DELETE("/hosts/field/:id", hostFieldDel) + amsUserLogin.GET("/host/:id/fields", hostFieldGets) + amsUserLogin.PUT("/host/:id/fields", hostFieldPuts) + } + + amsV1 := r.Group("/v1/ams-ce").Use(shouldBeService()) + { + amsV1.POST("/hosts/register", v1HostRegister) + } + +} diff --git a/src/modules/monapi/http/router_aggr.go b/src/modules/server/http/router_aggr.go similarity index 93% rename from src/modules/monapi/http/router_aggr.go rename to src/modules/server/http/router_aggr.go index c513876e..ef339c1a 100644 --- a/src/modules/monapi/http/router_aggr.go +++ b/src/modules/server/http/router_aggr.go @@ -1,8 +1,8 @@ package http import ( - "github.com/didi/nightingale/src/models" - "github.com/didi/nightingale/src/modules/monapi/scache" + "github.com/didi/nightingale/v4/src/models" + "github.com/didi/nightingale/v4/src/modules/server/cache" "github.com/gin-gonic/gin" "github.com/toolkits/pkg/errors" @@ -106,11 +106,11 @@ func aggrCalcGet(c *gin.Context) { func aggrCalcsGet(c *gin.Context) { name := queryStr(c, "name", "") - nid := mustQueryInt64(c, "nid") + nid := queryInt64(c, "nid") list, err := models.AggrCalcsList(name, nid) renderData(c, list, err) } func aggrCalcsWithEndpointGet(c *gin.Context) { - renderData(c, scache.AggrCalcStraCache.Get(), nil) + renderData(c, cache.AggrCalcStraCache.Get(), nil) } diff --git a/src/modules/rdb/http/router_auth.go b/src/modules/server/http/router_auth.go similarity index 94% rename from src/modules/rdb/http/router_auth.go rename to src/modules/server/http/router_auth.go index 38274327..80605c79 100644 --- a/src/modules/rdb/http/router_auth.go +++ b/src/modules/server/http/router_auth.go @@ -11,20 +11,21 @@ import ( "strings" "time" + "github.com/didi/nightingale/v4/src/common/dataobj" + "github.com/didi/nightingale/v4/src/models" + "github.com/didi/nightingale/v4/src/modules/server/auth" + "github.com/didi/nightingale/v4/src/modules/server/cache" + "github.com/didi/nightingale/v4/src/modules/server/config" + "github.com/didi/nightingale/v4/src/modules/server/cron" + "github.com/didi/nightingale/v4/src/modules/server/http/session" + "github.com/didi/nightingale/v4/src/modules/server/redisc" + "github.com/didi/nightingale/v4/src/modules/server/ssoc" + "github.com/gin-gonic/gin" "github.com/mojocn/base64Captcha" "github.com/toolkits/pkg/file" "github.com/toolkits/pkg/logger" "github.com/toolkits/pkg/str" - - "github.com/didi/nightingale/src/common/dataobj" - "github.com/didi/nightingale/src/models" - "github.com/didi/nightingale/src/modules/rdb/auth" - "github.com/didi/nightingale/src/modules/rdb/cache" - "github.com/didi/nightingale/src/modules/rdb/config" - "github.com/didi/nightingale/src/modules/rdb/redisc" - "github.com/didi/nightingale/src/modules/rdb/session" - "github.com/didi/nightingale/src/modules/rdb/ssoc" ) var ( @@ -80,11 +81,11 @@ func init() { if err != nil { log.Fatal(err) } + passwordChangedEmailTpl, err = template.ParseFiles(filename) if err != nil { log.Fatalf("open %s err: %s", filename, err) } - } // login for UI @@ -98,7 +99,7 @@ func login(c *gin.Context) { return err } - if config.Config.Auth.Captcha { + if config.Config.Rdb.Auth.Captcha { c, err := models.CaptchaGet("captcha_id=?", in.CaptchaId) if err != nil { return _e("Unable to get captcha") @@ -153,7 +154,7 @@ func authAuthorizeV2(c *gin.Context) { return &authRedirect{Redirect: redirect}, nil } - if !config.Config.SSO.Enable { + if !config.Config.Rdb.SSO.Enable { return &authRedirect{Redirect: "/login"}, nil } @@ -340,7 +341,7 @@ func sendLoginCode(c *gin.Context) { if err := in.Validate(); err != nil { return "", err } - if !config.Config.Redis.Enable { + if !config.Config.Redis.Local.Enable { return "", _e("sms/email sender is disabled") } @@ -369,13 +370,13 @@ func sendLoginCode(c *gin.Context) { if err := loginCodeSmsTpl.Execute(&buf, loginCode); err != nil { return "", err } - queueName = config.SMS_QUEUE_NAME + queueName = cron.SMS_QUEUE_NAME case models.LOGIN_T_EMAIL: user, _ = models.UserGet("email=?", in.Arg) if err := loginCodeEmailTpl.Execute(&buf, loginCode); err != nil { return "", err } - queueName = config.MAIL_QUEUE_NAME + queueName = cron.MAIL_QUEUE_NAME default: return "", _e("Invalid code type %s", in.Type) } @@ -393,7 +394,7 @@ func sendLoginCode(c *gin.Context) { return "", err } - if config.Config.Auth.ExtraMode.Debug { + if config.Config.Rdb.Auth.ExtraMode.Debug { return fmt.Sprintf("[debug]: %s", buf.String()), nil } @@ -412,7 +413,8 @@ func sendRstCode(c *gin.Context) { if err := in.Validate(); err != nil { return "", err } - if !config.Config.Redis.Enable { + + if !config.Config.Redis.Local.Enable { return "", _e("email/sms sender is disabled") } @@ -441,13 +443,13 @@ func sendRstCode(c *gin.Context) { if err := loginCodeSmsTpl.Execute(&buf, loginCode); err != nil { return "", err } - queueName = config.SMS_QUEUE_NAME + queueName = cron.SMS_QUEUE_NAME case models.LOGIN_T_EMAIL: user, _ = models.UserGet("email=?", in.Arg) if err := loginCodeEmailTpl.Execute(&buf, loginCode); err != nil { return "", err } - queueName = config.MAIL_QUEUE_NAME + queueName = cron.MAIL_QUEUE_NAME default: return "", _e("Invalid code type %s", in.Type) } @@ -465,7 +467,7 @@ func sendRstCode(c *gin.Context) { return "", err } - if config.Config.Auth.ExtraMode.Debug { + if config.Config.Rdb.Auth.ExtraMode.Debug { return fmt.Sprintf("[debug] msg: %s", buf.String()), nil } @@ -540,6 +542,7 @@ func rstPassword(c *gin.Context) { if err := passwordChangedNotify(user); err != nil { logger.Warningf("password changed notify error %s", err) } + lc.Del() return nil }() @@ -553,7 +556,7 @@ func rstPassword(c *gin.Context) { func captchaGet(c *gin.Context) { ret, err := func() (*models.Captcha, error) { - if !config.Config.Auth.Captcha { + if !config.Config.Rdb.Auth.Captcha { return nil, errUnsupportCaptcha } @@ -585,7 +588,7 @@ func authSettings(c *gin.Context) { renderData(c, struct { Sso bool `json:"sso"` }{ - Sso: config.Config.SSO.Enable, + Sso: config.Config.Rdb.SSO.Enable, }, nil) } @@ -778,5 +781,5 @@ func passwordChangedNotify(user *models.User) error { return err } - return redisc.Write(&dataobj.Message{Tos: []string{user.Email}, Subject: _s("[Notify] Password Changed"), Content: buf.String()}, config.MAIL_QUEUE_NAME) + return redisc.Write(&dataobj.Message{Tos: []string{user.Email}, Subject: _s("[Notify] Password Changed"), Content: buf.String()}, cron.MAIL_QUEUE_NAME) } diff --git a/src/modules/monapi/http/router_chart.go b/src/modules/server/http/router_chart.go similarity index 98% rename from src/modules/monapi/http/router_chart.go rename to src/modules/server/http/router_chart.go index b0ca9811..2ece2918 100644 --- a/src/modules/monapi/http/router_chart.go +++ b/src/modules/server/http/router_chart.go @@ -1,7 +1,7 @@ package http import ( - "github.com/didi/nightingale/src/models" + "github.com/didi/nightingale/v4/src/models" "github.com/gin-gonic/gin" "github.com/toolkits/pkg/errors" diff --git a/src/modules/monapi/http/router_collect.go b/src/modules/server/http/router_collect.go similarity index 89% rename from src/modules/monapi/http/router_collect.go rename to src/modules/server/http/router_collect.go index 5e957d1a..3d2e8a20 100644 --- a/src/modules/monapi/http/router_collect.go +++ b/src/modules/server/http/router_collect.go @@ -7,9 +7,10 @@ import ( "regexp" "strings" - "github.com/didi/nightingale/src/models" - "github.com/didi/nightingale/src/modules/monapi/collector" - "github.com/didi/nightingale/src/modules/monapi/scache" + "github.com/didi/nightingale/v4/src/models" + "github.com/didi/nightingale/v4/src/modules/server/cache" + "github.com/didi/nightingale/v4/src/modules/server/collector" + "github.com/didi/nightingale/v4/src/modules/server/config" "github.com/gin-gonic/gin" "github.com/toolkits/pkg/errors" @@ -45,13 +46,13 @@ func collectRulePost(c *gin.Context) { } func collectRulesGetByLocalEndpoint(c *gin.Context) { - collect := scache.CollectCache.GetBy(urlParamStr(c, "endpoint")) + collect := cache.CollectCache.GetBy(urlParamStr(c, "endpoint")) renderData(c, collect, nil) } func collectRuleGet(c *gin.Context) { - t := mustQueryStr(c, "type") - id := mustQueryInt64(c, "id") + t := queryStr(c, "type") + id := queryInt64(c, "id") cl, err := collector.GetCollector(t) errors.Dangerous(err) @@ -96,7 +97,7 @@ func collectRulesGetV2(c *gin.Context) { limit := queryInt(c, "limit", 20) typ := queryStr(c, "type", "") - total, list, err := models.GetCollectRules(typ, nid, limit, offset(c, limit, 0)) + total, list, err := models.GetCollectRules(typ, nid, limit, offset(c, limit)) renderData(c, map[string]interface{}{ "total": total, @@ -149,7 +150,7 @@ func collectsRuleDel(c *gin.Context) { } func collectRuleTypesGet(c *gin.Context) { - category := mustQueryStr(c, "category") + category := queryStr(c, "category") switch category { case "remote": renderData(c, collector.GetRemoteCollectors(), nil) @@ -378,7 +379,35 @@ func genIllegalCharErrMsg() string { } func collectRulesGetByRemoteEndpoint(c *gin.Context) { - rules := scache.CollectRuleCache.GetBy(urlParamStr(c, "endpoint")) + rules := cache.CollectRuleCache.GetBy(urlParamStr(c, "endpoint")) renderData(c, rules, nil) } + +func apiCollectsGet(c *gin.Context) { + node := queryStr(c, "node") + region := queryStr(c, "region") + key := region + "-" + node + collects := cache.ApiCollectCache.GetBy(key) + renderData(c, collects, nil) +} + +func snmpCollectsGet(c *gin.Context) { + node := queryStr(c, "node") + region := queryStr(c, "region") + key := region + "-" + node + collects := cache.SnmpCollectCache.GetBy(key) + renderData(c, collects, nil) +} + +func snmpHWsGet(c *gin.Context) { + node := queryStr(c, "node") + region := queryStr(c, "region") + key := region + "-" + node + hws := cache.SnmpHWCache.GetBy(key) + renderData(c, hws, nil) +} + +func apiRegionGet(c *gin.Context) { + renderData(c, config.Config.Monapi.Region, nil) +} diff --git a/src/modules/rdb/http/router_configs.go b/src/modules/server/http/router_configs.go similarity index 96% rename from src/modules/rdb/http/router_configs.go rename to src/modules/server/http/router_configs.go index 687dbe2c..c2015a36 100644 --- a/src/modules/rdb/http/router_configs.go +++ b/src/modules/server/http/router_configs.go @@ -6,10 +6,10 @@ import ( "strconv" "time" - "gopkg.in/gomail.v2" + "github.com/didi/nightingale/v4/src/models" - "github.com/didi/nightingale/src/models" "github.com/gin-gonic/gin" + gomail "gopkg.in/gomail.v2" ) func smtpConfigsGet(c *gin.Context) { diff --git a/src/modules/rdb/http/router_container.go b/src/modules/server/http/router_container.go similarity index 100% rename from src/modules/rdb/http/router_container.go rename to src/modules/server/http/router_container.go diff --git a/src/modules/monapi/http/router_event.go b/src/modules/server/http/router_event.go similarity index 99% rename from src/modules/monapi/http/router_event.go rename to src/modules/server/http/router_event.go index 124518f0..a26aa827 100644 --- a/src/modules/monapi/http/router_event.go +++ b/src/modules/server/http/router_event.go @@ -6,7 +6,7 @@ import ( "strings" "time" - "github.com/didi/nightingale/src/models" + "github.com/didi/nightingale/v4/src/models" "github.com/gin-gonic/gin" "github.com/toolkits/pkg/errors" @@ -79,7 +79,7 @@ func eventCurGets(c *gin.Context) { total, err := models.EventCurTotal(stime, etime, nodePath, query, strings.Split(priorities, ","), strings.Split(sendtypes, ",")) errors.Dangerous(err) - events, err := models.EventCurGets(stime, etime, nodePath, query, strings.Split(priorities, ","), strings.Split(sendtypes, ","), limit, offset(c, limit, total)) + events, err := models.EventCurGets(stime, etime, nodePath, query, strings.Split(priorities, ","), strings.Split(sendtypes, ","), limit, offset(c, limit)) errors.Dangerous(err) datList := []eventData{} @@ -190,7 +190,7 @@ func eventHisGets(c *gin.Context) { total, err := models.EventTotal(stime, etime, nodePath, query, eventType, strings.Split(priorities, ","), strings.Split(sendtypes, ",")) errors.Dangerous(err) - events, err := models.EventGets(stime, etime, nodePath, query, eventType, strings.Split(priorities, ","), strings.Split(sendtypes, ","), limit, offset(c, limit, total)) + events, err := models.EventGets(stime, etime, nodePath, query, eventType, strings.Split(priorities, ","), strings.Split(sendtypes, ","), limit, offset(c, limit)) errors.Dangerous(err) datList := []eventData{} diff --git a/src/modules/rdb/http/router_hbs.go b/src/modules/server/http/router_hbs.go similarity index 96% rename from src/modules/rdb/http/router_hbs.go rename to src/modules/server/http/router_hbs.go index 241f37f4..a6b5a6d9 100644 --- a/src/modules/rdb/http/router_hbs.go +++ b/src/modules/server/http/router_hbs.go @@ -3,7 +3,7 @@ package http import ( "time" - "github.com/didi/nightingale/src/models" + "github.com/didi/nightingale/v4/src/models" "github.com/gin-gonic/gin" "github.com/toolkits/pkg/errors" diff --git a/src/modules/monapi/http/router_sys.go b/src/modules/server/http/router_health.go similarity index 56% rename from src/modules/monapi/http/router_sys.go rename to src/modules/server/http/router_health.go index f93ff37b..cc49f066 100644 --- a/src/modules/monapi/http/router_sys.go +++ b/src/modules/server/http/router_health.go @@ -1,10 +1,9 @@ package http import ( + "fmt" "os" - "github.com/didi/nightingale/src/modules/monapi/config" - "github.com/gin-gonic/gin" ) @@ -12,14 +11,10 @@ func ping(c *gin.Context) { c.String(200, "pong") } -func version(c *gin.Context) { - c.String(200, "%d", config.Version) +func addr(c *gin.Context) { + c.String(200, c.Request.RemoteAddr) } func pid(c *gin.Context) { - c.String(200, "%d", os.Getpid()) -} - -func addr(c *gin.Context) { - c.String(200, c.Request.RemoteAddr) + c.String(200, fmt.Sprintf("%d", os.Getpid())) } diff --git a/src/modules/server/http/router_home.go b/src/modules/server/http/router_home.go new file mode 100644 index 00000000..793ef109 --- /dev/null +++ b/src/modules/server/http/router_home.go @@ -0,0 +1,11 @@ +package http + +import ( + "github.com/didi/nightingale/v4/src/modules/server/config" + + "github.com/gin-gonic/gin" +) + +func ldapUsed(c *gin.Context) { + renderData(c, config.Config.Rdb.LDAP.DefaultUse, nil) +} diff --git a/src/modules/ams/http/router_host.go b/src/modules/server/http/router_host.go similarity index 57% rename from src/modules/ams/http/router_host.go rename to src/modules/server/http/router_host.go index cbd61cc2..fe041285 100644 --- a/src/modules/ams/http/router_host.go +++ b/src/modules/server/http/router_host.go @@ -1,16 +1,12 @@ package http import ( - "encoding/json" "fmt" "strings" - "time" - "github.com/gin-gonic/gin" - "github.com/toolkits/pkg/cache" - "github.com/toolkits/pkg/logger" + "github.com/didi/nightingale/v4/src/models" - "github.com/didi/nightingale/src/models" + "github.com/gin-gonic/gin" ) // 管理员在主机设备管理页面查看列表 @@ -296,215 +292,12 @@ func hostSearchGets(c *gin.Context) { renderData(c, list, err) } -type hostRegisterForm struct { - SN string `json:"sn"` - IP string `json:"ip"` - Ident string `json:"ident"` - Name string `json:"name"` - Cate string `json:"cate"` - UniqKey string `json:"uniqkey"` - Fields map[string]interface{} `json:"fields"` - Digest string `json:"digest"` -} - -func (f hostRegisterForm) Validate() { - if f.IP == "" { - bomb("ip is blank") - } - - if f.UniqKey == "" { - bomb("uniqkey is blank") - } - - if f.Digest == "" { - bomb("digest is blank") - } -} - -// mapKeyClear map key clear -func mapKeyClear(src map[string]interface{}, save map[string]struct{}) { - var dels []string - for k := range src { - if _, ok := save[k]; !ok { - dels = append(dels, k) - } - } - - for i := 0; i < len(dels); i++ { - delete(src, dels[i]) - } -} - // agent主动上报注册信息 func v1HostRegister(c *gin.Context) { - var f hostRegisterForm + var f models.HostRegisterForm bind(c, &f) f.Validate() - oldFields := make(map[string]interface{}, len(f.Fields)) - for k, v := range f.Fields { - oldFields[k] = v - } - - uniqValue := "" - - if f.UniqKey == "sn" { - uniqValue = f.SN - } - - if f.UniqKey == "ip" { - uniqValue = f.IP - } - - if f.UniqKey == "ident" { - uniqValue = f.Ident - } - - if f.UniqKey == "name" { - uniqValue = f.Name - } - - if uniqValue == "" { - bomb("%s is blank", f.UniqKey) - } - - cacheKey := "/host/info/" + f.UniqKey + "/" + uniqValue - - var val string - if err := cache.Get(cacheKey, &val); err == nil { - if f.Digest == val { - // 说明客户端采集到的各个字段信息并无变化,无需更新DB - renderMessage(c, nil) - return - } - } else { - if err.Error() != cache.ErrCacheMiss.Error() { - msg := "get cache err" - logger.Error(err) - renderMessage(c, msg) - return - } - } - - host, err := models.HostGet(f.UniqKey+" = ?", uniqValue) - dangerous(err) - - hFixed := map[string]struct{}{ - "cpu": struct{}{}, - "mem": struct{}{}, - "disk": struct{}{}, - } - - mapKeyClear(f.Fields, hFixed) - - if host == nil { - msg := "create host failed" - host, err = models.HostNew(f.SN, f.IP, f.Ident, f.Name, f.Cate, f.Fields) - if err != nil { - logger.Error(err) - renderMessage(c, msg) - return - } - - if host == nil { - logger.Errorf("%s, report info:%v", msg, f) - renderMessage(c, msg) - return - } - } else { - f.Fields["sn"] = f.SN - f.Fields["ip"] = f.IP - f.Fields["ident"] = f.Ident - f.Fields["name"] = f.Name - f.Fields["cate"] = f.Cate - f.Fields["clock"] = time.Now().Unix() - - err = host.Update(f.Fields) - if err != nil { - logger.Error(err) - msg := "update host err" - renderMessage(c, msg) - return - } - } - - if v, ok := oldFields["tenant"]; ok { - vStr := v.(string) - if vStr != "" { - err = models.HostUpdateTenant([]int64{host.Id}, vStr) - if err != nil { - logger.Error(err) - msg := "update host tenant err" - renderMessage(c, msg) - return - } - - err = models.ResourceRegister([]models.Host{*host}, vStr) - if err != nil { - logger.Error(err) - msg := "register resource err" - renderMessage(c, msg) - return - } - } - } - - if host.Tenant != "" { - // 已经分配给某个租户了,那肯定对应某个resource,需要更新resource的信息 - res, err := models.ResourceGet("uuid=?", fmt.Sprintf("host-%d", host.Id)) - dangerous(err) - - if res == nil { - // 数据不干净,ams里有这个host,而且是已分配状态,但是resource表里没有,重新注册一下 - dangerous(models.ResourceRegister([]models.Host{*host}, host.Tenant)) - - // 注册完了,重新查询一下试试 - res, err = models.ResourceGet("uuid=?", fmt.Sprintf("host-%d", host.Id)) - dangerous(err) - - if res == nil { - bomb("resource register fail, unknown error") - } - } - - res.Ident = f.Ident - res.Name = f.Name - res.Cate = f.Cate - - mapKeyClear(f.Fields, hFixed) - - js, err := json.Marshal(f.Fields) - dangerous(err) - - res.Extend = string(js) - - dangerous(res.Update("ident", "name", "cate", "extend")) - } - - var objs []models.HostFieldValue - for k, v := range oldFields { - if k == "tenant" { - continue - } - - if _, ok := hFixed[k]; !ok { - tmp := models.HostFieldValue{HostId: host.Id, FieldIdent: k, FieldValue: v.(string)} - objs = append(objs, tmp) - } - } - - if len(objs) > 0 { - err = models.HostFieldValuePuts(host.Id, objs) - dangerous(err) - } - - err = cache.Set(cacheKey, f.Digest, cache.DEFAULT) - if err != nil { - msg := "set cache err" - logger.Error(err) - renderMessage(c, msg) - return - } - - renderMessage(c, nil) + err := models.HostRegister(f) + renderMessage(c, err) } diff --git a/src/modules/ams/http/router_host_field.go b/src/modules/server/http/router_host_field.go similarity index 97% rename from src/modules/ams/http/router_host_field.go rename to src/modules/server/http/router_host_field.go index 5b7d3320..7158788f 100644 --- a/src/modules/ams/http/router_host_field.go +++ b/src/modules/server/http/router_host_field.go @@ -1,9 +1,9 @@ package http import ( - "github.com/gin-gonic/gin" + "github.com/didi/nightingale/v4/src/models" - "github.com/didi/nightingale/src/models" + "github.com/gin-gonic/gin" ) func hostFieldNew(c *gin.Context) { diff --git a/src/modules/monapi/http/router_index.go b/src/modules/server/http/router_index.go similarity index 90% rename from src/modules/monapi/http/router_index.go rename to src/modules/server/http/router_index.go index 8b9569b0..87c69238 100644 --- a/src/modules/monapi/http/router_index.go +++ b/src/modules/server/http/router_index.go @@ -6,10 +6,9 @@ import ( "strconv" "time" - "github.com/didi/nightingale/src/common/dataobj" - "github.com/didi/nightingale/src/common/report" - "github.com/didi/nightingale/src/models" - "github.com/didi/nightingale/src/modules/monapi/config" + "github.com/didi/nightingale/v4/src/common/dataobj" + "github.com/didi/nightingale/v4/src/models" + "github.com/didi/nightingale/v4/src/modules/server/config" "github.com/gin-gonic/gin" "github.com/toolkits/pkg/errors" @@ -129,13 +128,13 @@ func Tagkv(request NidMetricRecv) ([]IndexTagkvResp, error) { func GetIndexes() []string { var indexInstances []string - instances, err := report.GetAlive(config.Get().IndexMod, "rdb") + instances, err := models.GetAllInstances(config.Config.Monapi.IndexMod, 1) if err != nil { return indexInstances } for _, instance := range instances { - judgeInstance := instance.Identity + ":" + instance.HTTPPort - indexInstances = append(indexInstances, judgeInstance) + indexInstance := instance.Identity + ":" + instance.HTTPPort + indexInstances = append(indexInstances, indexInstance) } return indexInstances } diff --git a/src/modules/server/http/router_judge.go b/src/modules/server/http/router_judge.go new file mode 100644 index 00000000..14a827ae --- /dev/null +++ b/src/modules/server/http/router_judge.go @@ -0,0 +1,31 @@ +package http + +import ( + "github.com/didi/nightingale/v4/src/common/dataobj" + "github.com/didi/nightingale/v4/src/modules/server/cache" + + "github.com/gin-gonic/gin" + "github.com/toolkits/pkg/errors" +) + +func getStraInJudge(c *gin.Context) { + sid := urlParamInt64(c, "id") + + stra, exists := cache.Strategy.Get(sid) + if exists { + renderData(c, stra, nil) + return + } + + stra, _ = cache.NodataStra.Get(sid) + renderData(c, stra, nil) +} + +func getData(c *gin.Context) { + var input dataobj.JudgeItem + errors.Dangerous(c.ShouldBind(&input)) + pk := input.MD5() + linkedList, _ := cache.HistoryBigMap[pk[0:2]].Get(pk) + data := linkedList.HistoryData() + renderData(c, data, nil) +} diff --git a/src/modules/rdb/http/router_log.go b/src/modules/server/http/router_log.go similarity index 97% rename from src/modules/rdb/http/router_log.go rename to src/modules/server/http/router_log.go index 1c4e2708..5fc85589 100644 --- a/src/modules/rdb/http/router_log.go +++ b/src/modules/server/http/router_log.go @@ -1,7 +1,8 @@ package http import ( - "github.com/didi/nightingale/src/models" + "github.com/didi/nightingale/v4/src/models" + "github.com/gin-gonic/gin" ) diff --git a/src/modules/monapi/http/router_maskconf.go b/src/modules/server/http/router_maskconf.go similarity index 98% rename from src/modules/monapi/http/router_maskconf.go rename to src/modules/server/http/router_maskconf.go index fc5206ee..466f52d2 100644 --- a/src/modules/monapi/http/router_maskconf.go +++ b/src/modules/server/http/router_maskconf.go @@ -3,7 +3,7 @@ package http import ( "strings" - "github.com/didi/nightingale/src/models" + "github.com/didi/nightingale/v4/src/models" "github.com/gin-gonic/gin" "github.com/toolkits/pkg/errors" diff --git a/src/modules/server/http/router_mib.go b/src/modules/server/http/router_mib.go new file mode 100644 index 00000000..3a89a574 --- /dev/null +++ b/src/modules/server/http/router_mib.go @@ -0,0 +1,184 @@ +package http + +import ( + "encoding/json" + "os" + "strings" + "time" + + "github.com/didi/nightingale/v4/src/common/compress" + "github.com/didi/nightingale/v4/src/models" + + "github.com/gin-gonic/gin" + "github.com/toolkits/pkg/file" + "github.com/toolkits/pkg/logger" + "github.com/toolkits/pkg/sys" +) + +var MIBS string + +func mibPost(c *gin.Context) { + MIBS = "./mibs/current_mib" + file.EnsureDir(MIBS) + username := loginUsername(c) + can, err := models.UsernameCandoGlobalOp(username, "nems_network_ops") + dangerous(err) + if !can { + bomb("no privilege") + } + + module := c.PostForm("module") + if module == "" { + bomb("module is blank") + } + + formFile, err := c.FormFile("file") + dangerous(err) + + if !strings.HasSuffix(formFile.Filename, ".tar.gz") { + bomb("file suffix only support .tar.gz") + } + + dirOfFile := "./mibs" + file.EnsureDir(dirOfFile) + + pathOfFile := dirOfFile + "/" + formFile.Filename + if err := c.SaveUploadedFile(formFile, pathOfFile); err != nil { + bomb("upload file err: %v", err) + } + + err = compress.UnCompress(pathOfFile, MIBS) + dangerous(err) + + absPath, err := file.RealPath(MIBS) + dangerous(err) + + out, err, isTimeout := sys.CmdRunT(5*time.Second, "./parse-mib", absPath) + if err != nil { + logger.Error(err) + bomb("parse mib err") + } + if isTimeout { + bomb("parse mib err: timeout") + } + + var mibs []*models.Metric + err = json.Unmarshal([]byte(out), &mibs) + if err != nil { + logger.Error(err, out) + bomb("parse mib err") + } + os.RemoveAll(MIBS) + + for _, m := range mibs { + newMib := models.NewMib(module, m) + oldMib, err := models.MibGet("module=? and oid=?", module, m.Oid) + if err != nil { + logger.Warning("get mib err:", err) + continue + } + if oldMib == nil { + err := newMib.Save() + if err != nil { + logger.Warning("save mib err:", err) + } + } + } + renderMessage(c, err) +} + +func mibModuleGet(c *gin.Context) { + mibs, err := models.MibGetsGroupBy("module", "") + dangerous(err) + + var modules []string + for _, m := range mibs { + if m.Module == "" { + continue + } + modules = append(modules, m.Module) + } + + renderData(c, modules, err) +} + +func mibMetricGet(c *gin.Context) { + module := queryStr(c, "module") + + mibs, err := models.MibGetsGroupBy("metric", "module=?", module) + dangerous(err) + + var metric []string + for _, m := range mibs { + metric = append(metric, m.Metric) + } + renderData(c, metric, err) +} + +func mibGet(c *gin.Context) { + module := queryStr(c, "module", "") + metric := queryStr(c, "metric", "") + + mib, err := models.MibGet("module=? and metric=?", module, metric) + renderData(c, mib, err) +} + +func mibGets(c *gin.Context) { + module := queryStr(c, "module", "") + metric := queryStr(c, "metric", "") + + var param []interface{} + var sql string + sql = "1 = 1" + if module != "" { + sql += " and module=?" + param = append(param, module) + } + if metric != "" { + sql += " and metric=?" + param = append(param, metric) + } + + mibs, err := models.MibGets(sql, param...) + renderData(c, mibs, err) +} + +func mibGetsByQuery(c *gin.Context) { + loginUsername(c) + + limit := queryInt(c, "limit", 20) + query := queryStr(c, "query", "") + + total, err := models.MibTotal(query) + dangerous(err) + + list, err := models.MibGetsByQuery(query, limit, offset(c, limit)) + dangerous(err) + + renderData(c, gin.H{ + "list": list, + "total": total, + }, nil) +} + +type MibsDelRev struct { + Ids []int64 `json:"ids"` +} + +func mibDel(c *gin.Context) { + username := loginUsername(c) + can, err := models.UsernameCandoGlobalOp(username, "nems_network_ops") + dangerous(err) + if !can { + bomb("no privilege") + } + + var recv MibsDelRev + dangerous(c.ShouldBind(&recv)) + for i := 0; i < len(recv.Ids); i++ { + err = models.MibDel(recv.Ids[i]) + dangerous(err) + } + + renderMessage(c, err) +} diff --git a/src/modules/server/http/router_nethw.go b/src/modules/server/http/router_nethw.go new file mode 100644 index 00000000..0dcc8b23 --- /dev/null +++ b/src/modules/server/http/router_nethw.go @@ -0,0 +1,402 @@ +package http + +import ( + "fmt" + "strings" + + goping "github.com/didi/nightingale/v4/src/common/ping" + "github.com/didi/nightingale/v4/src/models" + "github.com/didi/nightingale/v4/src/modules/server/config" + + "github.com/gaochao1/sw" + "github.com/gin-gonic/gin" + "github.com/toolkits/pkg/logger" +) + +type networkHardwareForm struct { + IPs string `json:"ips"` + Cate string `json:"cate"` + SnmpVersion string `json:"snmp_version"` + Auth string `json:"auth"` + Region string `json:"region"` + Note string `json:"note"` +} + +// agent上报的接口 +func networkHardwarePost(c *gin.Context) { + username := loginUsername(c) + can, err := models.UsernameCandoGlobalOp(username, "nems_network_ops") + dangerous(err) + if !can { + bomb("no privilege") + } + + var f networkHardwareForm + bind(c, &f) + var ipList []string + ips := strings.Split(f.IPs, "\n") + for _, ip := range ips { + list := sw.ParseIp(ip) + + ipList = append(ipList, list...) + } + + if config.Config.Nems.CheckTarget { + ipList = goping.FilterIP(ipList) + } + for _, ip := range ipList { + err := models.NetworkHardwareNew(models.MakeNetworkHardware(ip, f.Cate, f.SnmpVersion, f.Auth, f.Region, f.Note)) + if err != nil { + logger.Warning(err) + } + } + + renderMessage(c, nil) +} + +type networkHardwarePutForm struct { + IP string `json:"ip"` + Cate string `json:"cate"` + SnmpVersion string `json:"snmp_version"` + Auth string `json:"auth"` + Region string `json:"region"` + Note string `json:"note"` +} + +func networkHardwarePut(c *gin.Context) { + username := loginUsername(c) + can, err := models.UsernameCandoGlobalOp(username, "nems_network_ops") + dangerous(err) + if !can { + bomb("no privilege") + } + + var f networkHardwarePutForm + bind(c, &f) + + id := urlParamInt64(c, "id") + nh, err := models.NetworkHardwareGet("id=?", id) + dangerous(err) + nh.IP = f.IP + nh.Cate = f.Cate + nh.SnmpVersion = f.SnmpVersion + nh.Auth = f.Auth + nh.Region = f.Region + nh.Note = f.Note + err = nh.Update() + renderData(c, nh, err) +} + +type IPForm struct { + IPs []string `json:"ips"` +} + +func networkHardwareByIP(c *gin.Context) { + var f IPForm + bind(c, &f) + + renderData(c, models.GetHardwareInfoBy(f.IPs), nil) +} + +func networkHardwareGetAll(c *gin.Context) { + list, err := models.NetworkHardwareList("", 10000000, 0) + renderData(c, list, err) +} + +func networkHardwareGets(c *gin.Context) { + limit := queryInt(c, "limit", 20) + query := queryStr(c, "query", "") + + username := loginUsername(c) + can, err := models.UsernameCandoGlobalOp(username, "nems_network_ops") + dangerous(err) + if !can { + bomb("no privilege") + } + + total, err := models.NetworkHardwareTotal(query) + dangerous(err) + + list, err := models.NetworkHardwareList(query, limit, offset(c, limit)) + dangerous(err) + + renderData(c, gin.H{ + "list": list, + "total": total, + }, nil) +} + +type mgrHWNoteForm struct { + Ids []int64 `json:"ids" binding:"required"` + Note string `json:"note" binding:"required"` +} + +func (f mgrHWNoteForm) Validate() { + if len(f.Ids) == 0 { + bomb("arg[ids] is empty") + } +} + +func mgrHWNotePut(c *gin.Context) { + username := loginUsername(c) + can, err := models.UsernameCandoGlobalOp(username, "nems_network_ops") + dangerous(err) + if !can { + bomb("no privilege") + } + + var f mgrHWNoteForm + bind(c, &f) + f.Validate() + + for i := 0; i < len(f.Ids); i++ { + hw, err := models.NetworkHardwareGet("id=?", f.Ids[i]) + dangerous(err) + + if hw == nil { + continue + } + + if hw.Note == f.Note { + continue + } + + hw.Note = f.Note + dangerous(hw.Update("note")) + } + + renderMessage(c, nil) +} + +type mgrHWCateForm struct { + Ids []int64 `json:"ids" binding:"required"` + Cate string `json:"cate" binding:"required"` +} + +func (f mgrHWCateForm) Validate() { + if len(f.Ids) == 0 { + bomb("arg[ids] is empty") + } +} + +func mgrHWCatePut(c *gin.Context) { + username := loginUsername(c) + can, err := models.UsernameCandoGlobalOp(username, "nems_network_ops") + dangerous(err) + if !can { + bomb("no privilege") + } + + var f mgrHWCateForm + bind(c, &f) + f.Validate() + + for i := 0; i < len(f.Ids); i++ { + hw, err := models.NetworkHardwareGet("id=?", f.Ids[i]) + dangerous(err) + + if hw == nil { + continue + } + + if hw.Cate == f.Cate { + continue + } + + hw.Cate = f.Cate + dangerous(hw.Update("cate")) + } + + renderMessage(c, nil) +} + +type mgrHWTenantForm struct { + Ids []int64 `json:"ids" binding:"required"` + Tenant string `json:"tenant" binding:"required"` +} + +func (f mgrHWTenantForm) Validate() { + if len(f.Ids) == 0 { + bomb("arg[ids] is empty") + } +} + +func mgrHWTenantPut(c *gin.Context) { + username := loginUsername(c) + can, err := models.UsernameCandoGlobalOp(username, "nems_network_ops") + dangerous(err) + if !can { + bomb("no privilege") + } + + var f mgrHWTenantForm + bind(c, &f) + f.Validate() + + var hws []*models.NetworkHardware + + for i := 0; i < len(f.Ids); i++ { + hw, err := models.NetworkHardwareGet("id=?", f.Ids[i]) + dangerous(err) + + if hw == nil { + continue + } + + if hw.Tenant == f.Tenant { + continue + } + + hw.Tenant = f.Tenant + dangerous(hw.Update("tenant")) + + hws = append(hws, hw) + } + + dangerous(models.NetworkHardwareResourceRegister(hws, f.Tenant)) + + renderMessage(c, nil) +} + +func networkHardwaresPut(c *gin.Context) { + var hws []*models.NetworkHardware + bind(c, &hws) + + for i := 0; i < len(hws); i++ { + hw, err := models.NetworkHardwareGet("id=?", hws[i].Id) + dangerous(err) + + if hw == nil { + continue + } + + hw.Name = hws[i].Name + hw.SN = hws[i].SN + hw.Uptime = hws[i].Uptime + hw.Info = hws[i].Info + + dangerous(hw.Update("name", "sn", "info", "uptime")) + } + + renderMessage(c, nil) +} + +func hwCateGet(c *gin.Context) { + cates := []string{"sw", "fw"} + renderData(c, cates, nil) +} + +type hwsDelRev struct { + Ids []int64 `json:"ids"` +} + +func networkHardwareDel(c *gin.Context) { + username := loginUsername(c) + can, err := models.UsernameCandoGlobalOp(username, "nems_network_ops") + dangerous(err) + if !can { + bomb("no privilege") + } + + var recv hwsDelRev + dangerous(c.ShouldBind(&recv)) + for i := 0; i < len(recv.Ids); i++ { + err = models.NetworkHardwareDel(recv.Ids[i]) + dangerous(err) + } + + renderMessage(c, err) +} + +func snmpRegionGet(c *gin.Context) { + renderData(c, config.Config.Monapi.Region, nil) +} + +// 从某个租户手上回收资源 +func nwBackPut(c *gin.Context) { + var f idsForm + bind(c, &f) + f.Validate() + + loginUser(c).CheckPermGlobal("nems_network_ops") + + count := len(f.Ids) + for i := 0; i < count; i++ { + nw, err := models.NetworkHardwareGet("id=?", f.Ids[i]) + dangerous(err) + + if nw == nil { + logger.Warningf("network hardware %d not exist", f.Ids[i]) + continue + } + + nw.Tenant = "" + dangerous(nw.Update("tenant")) + dangerous(models.ResourceUnregister([]string{nw.SN})) + } + + renderMessage(c, nil) +} + +// 普通用户在批量搜索页面搜索网络设备 +func nwSearchGets(c *gin.Context) { + batch := queryStr(c, "batch") + field := queryStr(c, "field") // ip,sn,name + list, err := models.NwSearch(batch, field) + renderData(c, list, err) +} + +// 管理员在主机网络设备管理页面查看列表 +func nwGets(c *gin.Context) { + tenant := queryStr(c, "tenant", "") + page := queryInt(c, "p", 1) + limit := queryInt(c, "limit", 20) + query := queryStr(c, "query", "") + batch := queryStr(c, "batch", "") + field := queryStr(c, "field", "ip") + + if page < 1 || limit < 1 { + dangerous(fmt.Errorf("param p or limit < 1")) + } + + total, err := models.NwTotalForAdmin(tenant, query, batch, field) + dangerous(err) + + start := (page - 1) * limit + list, err := models.NwGetsForAdmin(tenant, query, batch, field, limit, start) + dangerous(err) + + renderData(c, gin.H{ + "list": list, + "total": total, + }, nil) +} + +func nwDel(c *gin.Context) { + var f idsForm + bind(c, &f) + f.Validate() + + loginUser(c).CheckPermGlobal("nems_network_ops") + + count := len(f.Ids) + for i := 0; i < count; i++ { + id := f.Ids[i] + nw, err := models.NetworkHardwareGet("id=?", id) + dangerous(err) + + if nw == nil { + logger.Warningf("network hardware %d not exist", id) + continue + } + + if nw.Tenant != "" { + bomb("network_hardware[ip:%s, name:%s] belongs to tenant[:%s], cannot delete", nw.IP, nw.Name, nw.Tenant) + } + + dangerous(models.ResourceUnregister([]string{nw.SN})) + dangerous(nw.Del()) + } + + renderMessage(c, nil) +} diff --git a/src/modules/rdb/http/router_node.go b/src/modules/server/http/router_node.go similarity index 97% rename from src/modules/rdb/http/router_node.go rename to src/modules/server/http/router_node.go index cb928d27..7be3a2a9 100644 --- a/src/modules/rdb/http/router_node.go +++ b/src/modules/server/http/router_node.go @@ -4,13 +4,13 @@ import ( "fmt" "time" + "github.com/didi/nightingale/v4/src/models" + "github.com/didi/nightingale/v4/src/modules/server/config" + "github.com/gin-gonic/gin" "github.com/toolkits/pkg/logger" "github.com/toolkits/pkg/net/httplib" "github.com/toolkits/pkg/str" - - "github.com/didi/nightingale/src/models" - "github.com/didi/nightingale/src/modules/rdb/config" ) func nodeGet(c *gin.Context) { @@ -235,7 +235,7 @@ type eventEntity struct { } func nodeEvent(action string, node *models.Node) { - for _, backend := range config.Config.Webhook { + for _, backend := range config.Config.Rdb.Webhook { header := map[string]string{} if backend.Token != "" { header["Authorization"] = "Bearer " + backend.Token diff --git a/src/modules/rdb/http/router_node_cate.go b/src/modules/server/http/router_node_cate.go similarity index 97% rename from src/modules/rdb/http/router_node_cate.go rename to src/modules/server/http/router_node_cate.go index ea1b0306..c1515910 100644 --- a/src/modules/rdb/http/router_node_cate.go +++ b/src/modules/server/http/router_node_cate.go @@ -1,10 +1,10 @@ package http import ( + "github.com/didi/nightingale/v4/src/models" + "github.com/gin-gonic/gin" "github.com/toolkits/pkg/str" - - "github.com/didi/nightingale/src/models" ) func nodeCateGets(c *gin.Context) { diff --git a/src/modules/rdb/http/router_node_cate_field.go b/src/modules/server/http/router_node_cate_field.go similarity index 96% rename from src/modules/rdb/http/router_node_cate_field.go rename to src/modules/server/http/router_node_cate_field.go index 06898679..da51ac2c 100644 --- a/src/modules/rdb/http/router_node_cate_field.go +++ b/src/modules/server/http/router_node_cate_field.go @@ -1,7 +1,8 @@ package http import ( - "github.com/didi/nightingale/src/models" + "github.com/didi/nightingale/v4/src/models" + "github.com/gin-gonic/gin" ) diff --git a/src/modules/rdb/http/router_node_field_value.go b/src/modules/server/http/router_node_field_value.go similarity index 90% rename from src/modules/rdb/http/router_node_field_value.go rename to src/modules/server/http/router_node_field_value.go index d9809bad..7d6ef080 100644 --- a/src/modules/rdb/http/router_node_field_value.go +++ b/src/modules/server/http/router_node_field_value.go @@ -1,7 +1,8 @@ package http import ( - "github.com/didi/nightingale/src/models" + "github.com/didi/nightingale/v4/src/models" + "github.com/gin-gonic/gin" ) diff --git a/src/modules/rdb/http/router_node_role.go b/src/modules/server/http/router_node_role.go similarity index 98% rename from src/modules/rdb/http/router_node_role.go rename to src/modules/server/http/router_node_role.go index d7dd9eed..b919c090 100644 --- a/src/modules/rdb/http/router_node_role.go +++ b/src/modules/server/http/router_node_role.go @@ -3,7 +3,8 @@ package http import ( "fmt" - "github.com/didi/nightingale/src/models" + "github.com/didi/nightingale/v4/src/models" + "github.com/gin-gonic/gin" ) diff --git a/src/modules/rdb/http/router_node_trash.go b/src/modules/server/http/router_node_trash.go similarity index 91% rename from src/modules/rdb/http/router_node_trash.go rename to src/modules/server/http/router_node_trash.go index 5abb3854..ace09950 100644 --- a/src/modules/rdb/http/router_node_trash.go +++ b/src/modules/server/http/router_node_trash.go @@ -1,7 +1,8 @@ package http import ( - "github.com/didi/nightingale/src/models" + "github.com/didi/nightingale/v4/src/models" + "github.com/gin-gonic/gin" ) diff --git a/src/modules/rdb/http/router_ops.go b/src/modules/server/http/router_ops.go similarity index 77% rename from src/modules/rdb/http/router_ops.go rename to src/modules/server/http/router_ops.go index 8af62b0a..72cb19ca 100644 --- a/src/modules/rdb/http/router_ops.go +++ b/src/modules/server/http/router_ops.go @@ -1,7 +1,8 @@ package http import ( - "github.com/didi/nightingale/src/modules/rdb/config" + "github.com/didi/nightingale/v4/src/modules/server/config" + "github.com/gin-gonic/gin" ) diff --git a/src/modules/rdb/http/router_perm.go b/src/modules/server/http/router_perm.go similarity index 95% rename from src/modules/rdb/http/router_perm.go rename to src/modules/server/http/router_perm.go index bc31ee59..55d10e8b 100644 --- a/src/modules/rdb/http/router_perm.go +++ b/src/modules/server/http/router_perm.go @@ -1,9 +1,11 @@ package http import ( - "github.com/didi/nightingale/src/models" - "github.com/gin-gonic/gin" + "github.com/didi/nightingale/v4/src/models" + "strings" + + "github.com/gin-gonic/gin" ) func v1CandoGlobalOp(c *gin.Context) { diff --git a/src/modules/transfer/http/push_router.go b/src/modules/server/http/router_push.go similarity index 50% rename from src/modules/transfer/http/push_router.go rename to src/modules/server/http/router_push.go index 59503501..0d248933 100644 --- a/src/modules/transfer/http/push_router.go +++ b/src/modules/server/http/router_push.go @@ -1,10 +1,9 @@ package http import ( - "github.com/didi/nightingale/src/common/dataobj" - "github.com/didi/nightingale/src/modules/transfer/rpc" - "github.com/didi/nightingale/src/toolkits/http/render" - "github.com/didi/nightingale/src/toolkits/stats" + "github.com/didi/nightingale/v4/src/common/dataobj" + statsd "github.com/didi/nightingale/v4/src/common/stats" + "github.com/didi/nightingale/v4/src/modules/server/rpc" "github.com/gin-gonic/gin" "github.com/toolkits/pkg/errors" @@ -12,7 +11,7 @@ import ( func PushData(c *gin.Context) { if c.Request.ContentLength == 0 { - render.Message(c, "blank body") + renderMessage(c, "blank body") return } @@ -20,11 +19,11 @@ func PushData(c *gin.Context) { errors.Dangerous(c.ShouldBindJSON(&recvMetricValues)) errCount, errMsg := rpc.PushData(recvMetricValues) - stats.Counter.Set("http.points.in.err", errCount) + statsd.Counter.Set("http.points.in.err", errCount) if errMsg != "" { - render.Message(c, errMsg) + renderMessage(c, errMsg) return } - render.Data(c, "ok", nil) + renderData(c, "ok", nil) } diff --git a/src/modules/transfer/http/query_router.go b/src/modules/server/http/router_query.go similarity index 79% rename from src/modules/transfer/http/query_router.go rename to src/modules/server/http/router_query.go index e2a8d6d8..ece852a4 100644 --- a/src/modules/transfer/http/query_router.go +++ b/src/modules/server/http/router_query.go @@ -1,10 +1,9 @@ package http import ( - "github.com/didi/nightingale/src/common/dataobj" - "github.com/didi/nightingale/src/modules/transfer/backend" - "github.com/didi/nightingale/src/toolkits/http/render" - "github.com/didi/nightingale/src/toolkits/stats" + "github.com/didi/nightingale/v4/src/common/dataobj" + statsd "github.com/didi/nightingale/v4/src/common/stats" + "github.com/didi/nightingale/v4/src/modules/server/backend" "github.com/gin-gonic/gin" "github.com/toolkits/pkg/errors" @@ -12,23 +11,23 @@ import ( ) func QueryData(c *gin.Context) { - stats.Counter.Set("data.api.qp10s", 1) + statsd.Counter.Set("data.api.qp10s", 1) dataSource, err := backend.GetDataSourceFor("") if err != nil { logger.Warningf("could not find datasource") - render.Message(c, err) + renderMessage(c, err) return } var input []dataobj.QueryData errors.Dangerous(c.ShouldBindJSON(&input)) resp := dataSource.QueryData(input) - render.Data(c, resp, nil) + renderData(c, resp, nil) } func QueryDataForUI(c *gin.Context) { - stats.Counter.Set("data.ui.qp10s", 1) + statsd.Counter.Set("data.ui.qp10s", 1) var input dataobj.QueryDataForUI var respData []*dataobj.QueryDataForUIResp @@ -39,7 +38,7 @@ func QueryDataForUI(c *gin.Context) { dataSource, err := backend.GetDataSourceFor("") if err != nil { logger.Warningf("could not find datasource") - render.Message(c, err) + renderMessage(c, err) return } resp := dataSource.QueryDataForUI(input) @@ -84,72 +83,72 @@ func QueryDataForUI(c *gin.Context) { } } - render.Data(c, respData, nil) + renderData(c, respData, nil) } func GetMetrics(c *gin.Context) { - stats.Counter.Set("metric.qp10s", 1) + statsd.Counter.Set("metric.qp10s", 1) recv := dataobj.EndpointsRecv{} errors.Dangerous(c.ShouldBindJSON(&recv)) dataSource, err := backend.GetDataSourceFor("") if err != nil { logger.Warningf("could not find datasource") - render.Message(c, err) + renderMessage(c, err) return } resp := dataSource.QueryMetrics(recv) - render.Data(c, resp, nil) + renderData(c, resp, nil) } func GetTagPairs(c *gin.Context) { - stats.Counter.Set("tag.qp10s", 1) + statsd.Counter.Set("tag.qp10s", 1) recv := dataobj.EndpointMetricRecv{} errors.Dangerous(c.ShouldBindJSON(&recv)) dataSource, err := backend.GetDataSourceFor("") if err != nil { logger.Warningf("could not find datasource") - render.Message(c, err) + renderMessage(c, err) return } resp := dataSource.QueryTagPairs(recv) - render.Data(c, resp, nil) + renderData(c, resp, nil) } func GetIndexByClude(c *gin.Context) { - stats.Counter.Set("xclude.qp10s", 1) + statsd.Counter.Set("xclude.qp10s", 1) recvs := make([]dataobj.CludeRecv, 0) errors.Dangerous(c.ShouldBindJSON(&recvs)) dataSource, err := backend.GetDataSourceFor("") if err != nil { logger.Warningf("could not find datasource") - render.Message(c, err) + renderMessage(c, err) return } resp := dataSource.QueryIndexByClude(recvs) - render.Data(c, resp, nil) + renderData(c, resp, nil) } func GetIndexByFullTags(c *gin.Context) { - stats.Counter.Set("counter.qp10s", 1) + statsd.Counter.Set("counter.qp10s", 1) recvs := make([]dataobj.IndexByFullTagsRecv, 0) errors.Dangerous(c.ShouldBindJSON(&recvs)) dataSource, err := backend.GetDataSourceFor("") if err != nil { logger.Warningf("could not find datasource") - render.Message(c, err) + renderMessage(c, err) return } resp, count := dataSource.QueryIndexByFullTags(recvs) - render.Data(c, &listResp{List: resp, Count: count}, nil) + renderData(c, &listResp{List: resp, Count: count}, nil) } type listResp struct { diff --git a/src/modules/rdb/http/router_resource.go b/src/modules/server/http/router_resource.go similarity index 99% rename from src/modules/rdb/http/router_resource.go rename to src/modules/server/http/router_resource.go index e4069bc2..27a190ec 100644 --- a/src/modules/rdb/http/router_resource.go +++ b/src/modules/server/http/router_resource.go @@ -5,12 +5,12 @@ import ( "sort" "strings" + "github.com/didi/nightingale/v4/src/models" + "github.com/gin-gonic/gin" "github.com/toolkits/pkg/logger" "github.com/toolkits/pkg/slice" "github.com/toolkits/pkg/str" - - "github.com/didi/nightingale/src/models" ) func resourceSearchGet(c *gin.Context) { diff --git a/src/modules/rdb/http/router_role.go b/src/modules/server/http/router_role.go similarity index 98% rename from src/modules/rdb/http/router_role.go rename to src/modules/server/http/router_role.go index fad2f020..0ffe797e 100644 --- a/src/modules/rdb/http/router_role.go +++ b/src/modules/server/http/router_role.go @@ -1,7 +1,8 @@ package http import ( - "github.com/didi/nightingale/src/models" + "github.com/didi/nightingale/v4/src/models" + "github.com/gin-gonic/gin" ) diff --git a/src/modules/monapi/http/router_screen.go b/src/modules/server/http/router_screen.go similarity index 99% rename from src/modules/monapi/http/router_screen.go rename to src/modules/server/http/router_screen.go index 6bf7e28f..3728a1cb 100644 --- a/src/modules/monapi/http/router_screen.go +++ b/src/modules/server/http/router_screen.go @@ -3,7 +3,7 @@ package http import ( "strings" - "github.com/didi/nightingale/src/models" + "github.com/didi/nightingale/v4/src/models" "github.com/gin-gonic/gin" "github.com/toolkits/pkg/errors" diff --git a/src/modules/rdb/http/router_self.go b/src/modules/server/http/router_self.go similarity index 93% rename from src/modules/rdb/http/router_self.go rename to src/modules/server/http/router_self.go index 98e6d4d7..e5f31e50 100644 --- a/src/modules/rdb/http/router_self.go +++ b/src/modules/server/http/router_self.go @@ -1,9 +1,10 @@ package http import ( - "github.com/didi/nightingale/src/models" - "github.com/didi/nightingale/src/modules/rdb/auth" - "github.com/didi/nightingale/src/modules/rdb/config" + "github.com/didi/nightingale/v4/src/models" + "github.com/didi/nightingale/v4/src/modules/server/auth" + "github.com/didi/nightingale/v4/src/modules/server/config" + "github.com/gin-gonic/gin" "github.com/toolkits/pkg/logger" ) @@ -132,7 +133,7 @@ func permLocalOps(c *gin.Context) { user := loginUser(c) node := Node(urlParamInt64(c, "id")) - operations, err := user.PermByNode(node) + operations, err := user.PermByNode(node, config.LocalOpsList) renderData(c, operations, err) } diff --git a/src/modules/rdb/http/router_sender.go b/src/modules/server/http/router_sender.go similarity index 75% rename from src/modules/rdb/http/router_sender.go rename to src/modules/server/http/router_sender.go index 7f895890..037d5a76 100644 --- a/src/modules/rdb/http/router_sender.go +++ b/src/modules/server/http/router_sender.go @@ -3,11 +3,11 @@ package http import ( "strings" - "github.com/gin-gonic/gin" + "github.com/didi/nightingale/v4/src/common/dataobj" + "github.com/didi/nightingale/v4/src/modules/server/cron" + "github.com/didi/nightingale/v4/src/modules/server/redisc" - "github.com/didi/nightingale/src/common/dataobj" - "github.com/didi/nightingale/src/modules/rdb/config" - "github.com/didi/nightingale/src/modules/rdb/redisc" + "github.com/gin-gonic/gin" ) func v1SendMail(c *gin.Context) { @@ -29,7 +29,7 @@ func v1SendMail(c *gin.Context) { bomb("content is blank") } - renderMessage(c, redisc.Write(&message, config.MAIL_QUEUE_NAME)) + renderMessage(c, redisc.Write(&message, cron.MAIL_QUEUE_NAME)) } func v1SendSms(c *gin.Context) { @@ -46,7 +46,7 @@ func v1SendSms(c *gin.Context) { bomb("content is blank") } - renderMessage(c, redisc.Write(&message, config.SMS_QUEUE_NAME)) + renderMessage(c, redisc.Write(&message, cron.SMS_QUEUE_NAME)) } func v1SendVoice(c *gin.Context) { @@ -63,7 +63,7 @@ func v1SendVoice(c *gin.Context) { bomb("content is blank") } - renderMessage(c, redisc.Write(&message, config.VOICE_QUEUE_NAME)) + renderMessage(c, redisc.Write(&message, cron.VOICE_QUEUE_NAME)) } func v1SendIm(c *gin.Context) { @@ -80,5 +80,5 @@ func v1SendIm(c *gin.Context) { bomb("content is blank") } - renderMessage(c, redisc.Write(&message, config.IM_QUEUE_NAME)) + renderMessage(c, redisc.Write(&message, cron.IM_QUEUE_NAME)) } diff --git a/src/modules/rdb/http/router_sso.go b/src/modules/server/http/router_sso.go similarity index 91% rename from src/modules/rdb/http/router_sso.go rename to src/modules/server/http/router_sso.go index 06a069c3..d8862e14 100644 --- a/src/modules/rdb/http/router_sso.go +++ b/src/modules/server/http/router_sso.go @@ -1,7 +1,8 @@ package http import ( - "github.com/didi/nightingale/src/modules/rdb/ssoc" + "github.com/didi/nightingale/v4/src/modules/server/ssoc" + "github.com/gin-gonic/gin" ) diff --git a/src/modules/transfer/http/health_router.go b/src/modules/server/http/router_stats.go similarity index 57% rename from src/modules/transfer/http/health_router.go rename to src/modules/server/http/router_stats.go index a9de83be..839506c7 100644 --- a/src/modules/transfer/http/health_router.go +++ b/src/modules/server/http/router_stats.go @@ -1,29 +1,36 @@ package http import ( - "fmt" - "os" - - "github.com/didi/nightingale/src/modules/transfer/backend" - "github.com/didi/nightingale/src/modules/transfer/cache" - "github.com/didi/nightingale/src/modules/transfer/config" - "github.com/didi/nightingale/src/toolkits/http/render" - "github.com/didi/nightingale/src/toolkits/str" + "github.com/didi/nightingale/v4/src/common/str" + "github.com/didi/nightingale/v4/src/models" + "github.com/didi/nightingale/v4/src/modules/server/backend" + "github.com/didi/nightingale/v4/src/modules/server/cache" + "github.com/didi/nightingale/v4/src/modules/server/config" + "github.com/didi/nightingale/v4/src/modules/server/judge" + "github.com/gin-gonic/gin" "github.com/toolkits/pkg/errors" "github.com/toolkits/pkg/logger" ) -func ping(c *gin.Context) { - c.String(200, "pong") +type rdbStats struct { + Login *models.Stats } -func addr(c *gin.Context) { - c.String(200, c.Request.RemoteAddr) +var ( + stats *rdbStats +) + +func initStats() { + stats = &rdbStats{ + Login: models.MustNewStats("login"), + } } -func pid(c *gin.Context) { - c.String(200, fmt.Sprintf("%d", os.Getpid())) +func counterGet(c *gin.Context) { + renderData(c, map[string]int64{ + "login": stats.Login.Get(), + }, nil) } type getStraReq struct { @@ -34,10 +41,10 @@ type getStraReq struct { func getStra(c *gin.Context) { var input getStraReq errors.Dangerous(c.ShouldBindJSON(&input)) - key := str.MD5(input.Endpoint, input.Metric, "") + key := str.ToMD5(input.Endpoint, input.Metric, "") stras := cache.StraMap.GetByKey(key) - render.Data(c, stras, nil) + renderData(c, stras, nil) } type tsdbInstanceRecv struct { @@ -50,15 +57,15 @@ func tsdbInstance(c *gin.Context) { var input tsdbInstanceRecv errors.Dangerous(c.ShouldBindJSON(&input)) - dataSource, err := backend.GetDataSourceFor(config.Config.Backend.DataSource) + dataSource, err := backend.GetDataSourceFor(config.Config.Transfer.Backend.DataSource) if err != nil { logger.Warningf("could not find datasource") - render.Message(c, err) + renderMessage(c, err) return } addrs := dataSource.GetInstance(input.Metric, input.Endpoint, input.TagMap) - render.Data(c, addrs, nil) + renderData(c, addrs, nil) } type judgeInstanceRecv struct { @@ -73,18 +80,18 @@ func judgeInstance(c *gin.Context) { var input judgeInstanceRecv errors.Dangerous(c.ShouldBindJSON(&input)) var instance string - key := str.MD5(input.Endpoint, input.Metric, "") + key := str.ToMD5(input.Endpoint, input.Metric, "") stras := cache.StraMap.GetByKey(key) for _, stra := range stras { - if input.Sid != stra.Id || !backend.TagMatch(stra.Tags, input.TagMap) { + if input.Sid != stra.Id || !judge.TagMatch(stra.Tags, input.TagMap) { continue } instance = stra.JudgeInstance } - render.Data(c, instance, nil) + renderData(c, instance, nil) } func judges(c *gin.Context) { - render.Data(c, backend.GetJudges(), nil) + renderData(c, judge.GetJudges(), nil) } diff --git a/src/modules/monapi/http/router_stra.go b/src/modules/server/http/router_stra.go similarity index 92% rename from src/modules/monapi/http/router_stra.go rename to src/modules/server/http/router_stra.go index 784125b9..33f347c7 100644 --- a/src/modules/monapi/http/router_stra.go +++ b/src/modules/server/http/router_stra.go @@ -1,8 +1,8 @@ package http import ( - "github.com/didi/nightingale/src/models" - "github.com/didi/nightingale/src/modules/monapi/scache" + "github.com/didi/nightingale/v4/src/models" + "github.com/didi/nightingale/v4/src/modules/server/cache" "github.com/gin-gonic/gin" "github.com/toolkits/pkg/errors" @@ -131,12 +131,12 @@ func effectiveStrasGet(c *gin.Context) { instance := queryStr(c, "instance", "") if queryInt(c, "all", 0) == 1 { - stras = scache.StraCache.GetAll() + stras = cache.StraCache.GetAll() } else if instance != "" { - node, err := scache.ActiveJudgeNode.GetNodeBy(instance) + node, err := cache.ActiveJudgeNode.GetNodeBy(instance) errors.Dangerous(err) - stras = scache.StraCache.GetByNode(node) + stras = cache.StraCache.GetByNode(node) } renderData(c, stras, nil) } diff --git a/src/modules/job/http/router_task.go b/src/modules/server/http/router_task.go similarity index 96% rename from src/modules/job/http/router_task.go rename to src/modules/server/http/router_task.go index 9d818fbb..b367135d 100644 --- a/src/modules/job/http/router_task.go +++ b/src/modules/server/http/router_task.go @@ -12,9 +12,9 @@ import ( "github.com/toolkits/pkg/net/httplib" "github.com/toolkits/pkg/slice" - "github.com/didi/nightingale/src/common/address" - "github.com/didi/nightingale/src/models" - "github.com/didi/nightingale/src/modules/job/config" + "github.com/didi/nightingale/v4/src/common/address" + "github.com/didi/nightingale/v4/src/models" + "github.com/didi/nightingale/v4/src/modules/server/config" ) type taskForm struct { @@ -280,18 +280,18 @@ func taskHostStdout(c *gin.Context) { id := urlParamInt64(c, "id") host := urlParamStr(c, "host") - if config.Config.Output.ComeFrom == "database" || config.Config.Output.ComeFrom == "" { + if config.Config.Job.OutputComeFrom == "database" || config.Config.Job.OutputComeFrom == "" { obj, err := models.TaskHostGet(id, host) renderData(c, obj.Stdout, err) return } - if config.Config.Output.RemotePort <= 0 || config.Config.Output.RemotePort > 65535 { - renderMessage(c, fmt.Errorf("remotePort[%d] invalid", config.Config.Output.RemotePort)) + if config.Config.Job.RemoteAgtdPort <= 0 || config.Config.Job.RemoteAgtdPort > 65535 { + renderMessage(c, fmt.Errorf("remotePort[%d] invalid", config.Config.Job.RemoteAgtdPort)) return } - url := fmt.Sprintf("http://%s:%d/output/%d/stdout.json", host, config.Config.Output.RemotePort, id) + url := fmt.Sprintf("http://%s:%d/output/%d/stdout.json", host, config.Config.Job.RemoteAgtdPort, id) client := &http.Client{ CheckRedirect: func(req *http.Request, via []*http.Request) error { return http.ErrUseLastResponse @@ -313,18 +313,18 @@ func taskHostStderr(c *gin.Context) { id := urlParamInt64(c, "id") host := urlParamStr(c, "host") - if config.Config.Output.ComeFrom == "database" || config.Config.Output.ComeFrom == "" { + if config.Config.Job.OutputComeFrom == "database" || config.Config.Job.OutputComeFrom == "" { obj, err := models.TaskHostGet(id, host) renderData(c, obj.Stderr, err) return } - if config.Config.Output.RemotePort <= 0 || config.Config.Output.RemotePort > 65535 { - renderMessage(c, fmt.Errorf("remotePort[%d] invalid", config.Config.Output.RemotePort)) + if config.Config.Job.RemoteAgtdPort <= 0 || config.Config.Job.RemoteAgtdPort > 65535 { + renderMessage(c, fmt.Errorf("remotePort[%d] invalid", config.Config.Job.RemoteAgtdPort)) return } - url := fmt.Sprintf("http://%s:%d/output/%d/stderr.json", host, config.Config.Output.RemotePort, id) + url := fmt.Sprintf("http://%s:%d/output/%d/stderr.json", host, config.Config.Job.RemoteAgtdPort, id) client := &http.Client{ CheckRedirect: func(req *http.Request, via []*http.Request) error { return http.ErrUseLastResponse diff --git a/src/modules/job/http/router_task_tpl.go b/src/modules/server/http/router_task_tpl.go similarity index 99% rename from src/modules/job/http/router_task_tpl.go rename to src/modules/server/http/router_task_tpl.go index 495713be..e593f38d 100644 --- a/src/modules/job/http/router_task_tpl.go +++ b/src/modules/server/http/router_task_tpl.go @@ -7,7 +7,7 @@ import ( "github.com/gin-gonic/gin" "github.com/toolkits/pkg/str" - "github.com/didi/nightingale/src/models" + "github.com/didi/nightingale/v4/src/models" ) func taskTplGets(c *gin.Context) { diff --git a/src/modules/rdb/http/router_team.go b/src/modules/server/http/router_team.go similarity index 99% rename from src/modules/rdb/http/router_team.go rename to src/modules/server/http/router_team.go index b1454b22..c545eb56 100644 --- a/src/modules/rdb/http/router_team.go +++ b/src/modules/server/http/router_team.go @@ -5,10 +5,10 @@ import ( "strconv" "strings" + "github.com/didi/nightingale/v4/src/models" + "github.com/gin-gonic/gin" "github.com/toolkits/pkg/str" - - "github.com/didi/nightingale/src/models" ) func teamAllGet(c *gin.Context) { diff --git a/src/modules/monapi/http/router_tmpchart.go b/src/modules/server/http/router_tmpchar.go similarity index 91% rename from src/modules/monapi/http/router_tmpchart.go rename to src/modules/server/http/router_tmpchar.go index 49ca1f0d..87593911 100644 --- a/src/modules/monapi/http/router_tmpchart.go +++ b/src/modules/server/http/router_tmpchar.go @@ -4,7 +4,7 @@ import ( "strconv" "strings" - "github.com/didi/nightingale/src/models" + "github.com/didi/nightingale/v4/src/models" "github.com/gin-gonic/gin" "github.com/toolkits/pkg/errors" @@ -34,7 +34,7 @@ func tmpChartPost(c *gin.Context) { func tmpChartGet(c *gin.Context) { objs := []*models.TmpChart{} - idStr := mustQueryStr(c, "ids") + idStr := queryStr(c, "ids") ids := strings.Split(idStr, ",") for _, id := range ids { i, err := strconv.ParseInt(id, 10, 64) diff --git a/src/modules/monapi/http/router_tpl.go b/src/modules/server/http/router_tpl.go similarity index 57% rename from src/modules/monapi/http/router_tpl.go rename to src/modules/server/http/router_tpl.go index e5ad822c..ca06132d 100644 --- a/src/modules/monapi/http/router_tpl.go +++ b/src/modules/server/http/router_tpl.go @@ -3,23 +3,23 @@ package http import ( "path" - "github.com/didi/nightingale/src/modules/monapi/config" + "github.com/didi/nightingale/v4/src/modules/server/config" "github.com/gin-gonic/gin" "github.com/toolkits/pkg/file" ) func tplNameGets(c *gin.Context) { - tplType := mustQueryStr(c, "tplType") + tplType := queryStr(c, "tplType") var files []string var err error switch tplType { case "alert": - files, err = file.FilesUnder(config.Get().Tpl.AlertPath) + files, err = file.FilesUnder(config.Config.Monapi.Tpl.AlertPath) dangerous(err) case "screen": - files, err = file.FilesUnder(config.Get().Tpl.ScreenPath) + files, err = file.FilesUnder(config.Config.Monapi.Tpl.ScreenPath) dangerous(err) default: bomb("tpl type not found") @@ -29,15 +29,15 @@ func tplNameGets(c *gin.Context) { } func tplGet(c *gin.Context) { - tplName := path.Base(mustQueryStr(c, "tplName")) - tplType := mustQueryStr(c, "tplType") + tplName := path.Base(queryStr(c, "tplName")) + tplType := queryStr(c, "tplType") var filePath string switch tplType { case "alert": - filePath = config.Get().Tpl.AlertPath + "/" + tplName + filePath = config.Config.Monapi.Tpl.AlertPath + "/" + tplName case "screen": - filePath = config.Get().Tpl.ScreenPath + "/" + tplName + filePath = config.Config.Monapi.Tpl.ScreenPath + "/" + tplName default: bomb("tpl type not found") } diff --git a/src/modules/rdb/http/router_tree.go b/src/modules/server/http/router_tree.go similarity index 99% rename from src/modules/rdb/http/router_tree.go rename to src/modules/server/http/router_tree.go index fee286e4..2bb5dd60 100644 --- a/src/modules/rdb/http/router_tree.go +++ b/src/modules/server/http/router_tree.go @@ -3,9 +3,9 @@ package http import ( "strings" - "github.com/gin-gonic/gin" + "github.com/didi/nightingale/v4/src/models" - "github.com/didi/nightingale/src/models" + "github.com/gin-gonic/gin" ) func treeUntilLeafGets(c *gin.Context) { diff --git a/src/modules/rdb/http/router_user.go b/src/modules/server/http/router_user.go similarity index 98% rename from src/modules/rdb/http/router_user.go rename to src/modules/server/http/router_user.go index 263cb042..b409ae51 100644 --- a/src/modules/rdb/http/router_user.go +++ b/src/modules/server/http/router_user.go @@ -6,12 +6,12 @@ import ( "strings" "time" + "github.com/didi/nightingale/v4/src/models" + "github.com/didi/nightingale/v4/src/modules/server/auth" + "github.com/gin-gonic/gin" "github.com/toolkits/pkg/logger" "github.com/toolkits/pkg/str" - - "github.com/didi/nightingale/src/models" - "github.com/didi/nightingale/src/modules/rdb/auth" ) // 通讯录,只要登录用户就可以看,超管要修改某个用户的信息,也是调用这个接口获取列表先 @@ -204,6 +204,7 @@ func userPasswordPut(c *gin.Context) { if err := passwordChangedNotify(user); err != nil { logger.Warningf("password changed notify error %s", err) } + go models.OperationLogNew(root.Username, "user", user.Id, fmt.Sprintf("UserChangePassword %s", user.Username)) } renderMessage(c, err) diff --git a/src/modules/rdb/session/session.go b/src/modules/server/http/session/session.go similarity index 96% rename from src/modules/rdb/session/session.go rename to src/modules/server/http/session/session.go index a4f44d1b..2e018d62 100644 --- a/src/modules/rdb/session/session.go +++ b/src/modules/server/http/session/session.go @@ -8,8 +8,9 @@ import ( "sync" "time" - "github.com/didi/nightingale/src/models" - "github.com/didi/nightingale/src/modules/rdb/config" + "github.com/didi/nightingale/v4/src/models" + "github.com/didi/nightingale/v4/src/modules/server/config" + "github.com/google/uuid" "github.com/toolkits/pkg/logger" ) @@ -30,6 +31,7 @@ func Init() { var err error DefaultSession, err = StartSession(&config.Config.HTTP.Session) if err != nil { + logger.Errorf("start session err:%v", err) panic(err) } } @@ -125,7 +127,7 @@ func (p *Manager) Start(w http.ResponseWriter, r *http.Request) (store *SessionS Value: url.QueryEscape(sid), Path: "/", HttpOnly: p.config.HttpOnly, - Domain: p.config.Domain, + Domain: p.config.CookieDomain, } if p.config.CookieLifetime > 0 { cookie.MaxAge = int(p.config.CookieLifetime) diff --git a/src/modules/rdb/session/session_db.go b/src/modules/server/http/session/session_db.go similarity index 93% rename from src/modules/rdb/session/session_db.go rename to src/modules/server/http/session/session_db.go index 3f5fb179..9a54e21c 100644 --- a/src/modules/rdb/session/session_db.go +++ b/src/modules/server/http/session/session_db.go @@ -3,8 +3,9 @@ package session import ( "time" - "github.com/didi/nightingale/src/models" - "github.com/didi/nightingale/src/modules/rdb/config" + "github.com/didi/nightingale/v4/src/models" + "github.com/didi/nightingale/v4/src/modules/server/config" + "github.com/toolkits/pkg/logger" ) diff --git a/src/modules/rdb/session/session_mem.go b/src/modules/server/http/session/session_mem.go similarity index 90% rename from src/modules/rdb/session/session_mem.go rename to src/modules/server/http/session/session_mem.go index d0604a66..77d74e6a 100644 --- a/src/modules/rdb/session/session_mem.go +++ b/src/modules/server/http/session/session_mem.go @@ -5,9 +5,9 @@ import ( "sync" "time" - "github.com/didi/nightingale/src/models" - "github.com/didi/nightingale/src/modules/rdb/cache" - "github.com/didi/nightingale/src/modules/rdb/config" + "github.com/didi/nightingale/v4/src/models" + "github.com/didi/nightingale/v4/src/modules/server/cache" + "github.com/didi/nightingale/v4/src/modules/server/config" ) func newMemStorage(cf *config.SessionSection, opts *options) (storage, error) { diff --git a/src/modules/rdb/session/session_options.go b/src/modules/server/http/session/session_options.go similarity index 100% rename from src/modules/rdb/session/session_options.go rename to src/modules/server/http/session/session_options.go diff --git a/src/modules/rdb/session/session_test.go b/src/modules/server/http/session/session_test.go similarity index 100% rename from src/modules/rdb/session/session_test.go rename to src/modules/server/http/session/session_test.go diff --git a/src/modules/transfer/backend/judge.go b/src/modules/server/judge/backend.go similarity index 77% rename from src/modules/transfer/backend/judge.go rename to src/modules/server/judge/backend.go index fd558c3d..c646c2dc 100644 --- a/src/modules/transfer/backend/judge.go +++ b/src/modules/server/judge/backend.go @@ -12,18 +12,18 @@ // See the License for the specific language governing permissions and // limitations under the License. -package backend +package judge import ( + "strings" "time" - "github.com/didi/nightingale/src/common/dataobj" - "github.com/didi/nightingale/src/common/report" - "github.com/didi/nightingale/src/models" - "github.com/didi/nightingale/src/modules/transfer/cache" - "github.com/didi/nightingale/src/toolkits/pools" - "github.com/didi/nightingale/src/toolkits/stats" - "github.com/didi/nightingale/src/toolkits/str" + "github.com/didi/nightingale/v4/src/common/dataobj" + "github.com/didi/nightingale/v4/src/common/pools" + "github.com/didi/nightingale/v4/src/common/stats" + "github.com/didi/nightingale/v4/src/common/str" + "github.com/didi/nightingale/v4/src/models" + "github.com/didi/nightingale/v4/src/modules/server/cache" "github.com/toolkits/pkg/concurrent/semaphore" "github.com/toolkits/pkg/container/list" @@ -38,26 +38,33 @@ type JudgeSection struct { MaxConns int `yaml:"maxConns"` MaxIdle int `yaml:"maxIdle"` HbsMod string `yaml:"hbsMod"` + EventPrefix string `yaml:"eventPrefix"` } var ( // config - Judge JudgeSection + JudgeConfig JudgeSection + Ident string // 连接池 node_address -> connection_pool JudgeConnPools *pools.ConnPools // queue JudgeQueues = cache.SafeJudgeQueue{} + + DefaultSendTaskSleepInterval = time.Millisecond * 50 //默认睡眠间隔为50ms + DefaultSendQueueMaxSize = 102400 + MaxSendRetry = 10 ) -func InitJudge(section JudgeSection) { - Judge = section +func InitJudge(section JudgeSection, ident string) { + JudgeConfig = section + Ident = ident judges := GetJudges() // init connPool - JudgeConnPools = pools.NewConnPools(Judge.MaxConns, Judge.MaxIdle, Judge.ConnTimeout, Judge.CallTimeout, judges) + JudgeConnPools = pools.NewConnPools(JudgeConfig.MaxConns, JudgeConfig.MaxIdle, JudgeConfig.ConnTimeout, JudgeConfig.CallTimeout, judges) // init queue JudgeQueues = cache.NewJudgeQueue() @@ -66,7 +73,7 @@ func InitJudge(section JudgeSection) { } // start task - judgeConcurrent := Judge.WorkerNum + judgeConcurrent := JudgeConfig.WorkerNum if judgeConcurrent < 1 { judgeConcurrent = 1 } @@ -78,7 +85,7 @@ func InitJudge(section JudgeSection) { } func Send2JudgeTask(Q *list.SafeListLimited, addr string, concurrent int) { - batch := Judge.Batch + batch := JudgeConfig.Batch sema := semaphore.NewSemaphore(concurrent) for { @@ -99,11 +106,16 @@ func Send2JudgeTask(Q *list.SafeListLimited, addr string, concurrent int) { go func(addr string, judgeItems []*dataobj.JudgeItem, count int) { defer sema.Release() + if strings.Contains(addr, Ident) { + Send(judgeItems) + return + } + resp := &dataobj.SimpleRpcResponse{} var err error sendOk := false for i := 0; i < MaxSendRetry; i++ { - err = JudgeConnPools.Call(addr, "Judge.Send", judgeItems, resp) + err = JudgeConnPools.Call(addr, "Server.Send", judgeItems, resp) if err == nil { sendOk = true break @@ -128,9 +140,9 @@ func Push2JudgeQueue(items []*dataobj.MetricValue) { for _, item := range items { var key string if item.Nid != "" { - key = str.MD5(item.Nid, item.Metric, "") + key = str.ToMD5(item.Nid, item.Metric, "") } else { - key = str.MD5(item.Endpoint, item.Metric, "") + key = str.ToMD5(item.Endpoint, item.Metric, "") } stras := cache.StraMap.GetByKey(key) @@ -160,7 +172,7 @@ func Push2JudgeQueue(items []*dataobj.MetricValue) { } } } - stats.Counter.Set("judge.queue.err", errCnt) + stats.Counter.Set("judgeConfig.queue.err", errCnt) } func alignTs(ts int64, period int64) int64 { @@ -199,9 +211,9 @@ func TagMatch(straTags []models.Tag, tag map[string]string) bool { func GetJudges() []string { var judgeInstances []string - instances, err := report.GetAlive("judge", Judge.HbsMod) + instances, err := models.GetAllInstances("server", 1) if err != nil { - stats.Counter.Set("judge.get.err", 1) + stats.Counter.Set("server.get.err", 1) return judgeInstances } for _, instance := range instances { diff --git a/src/modules/judge/judge/func.go b/src/modules/server/judge/func.go similarity index 99% rename from src/modules/judge/judge/func.go rename to src/modules/server/judge/func.go index 7087435f..49ebcda3 100644 --- a/src/modules/judge/judge/func.go +++ b/src/modules/server/judge/func.go @@ -18,7 +18,7 @@ import ( "fmt" "math" - "github.com/didi/nightingale/src/common/dataobj" + "github.com/didi/nightingale/v4/src/common/dataobj" ) type Function interface { diff --git a/src/modules/judge/judge/judge.go b/src/modules/server/judge/judge.go similarity index 93% rename from src/modules/judge/judge/judge.go rename to src/modules/server/judge/judge.go index 182f75d6..73f79ae4 100644 --- a/src/modules/judge/judge/judge.go +++ b/src/modules/server/judge/judge.go @@ -25,13 +25,13 @@ import ( "sync" "time" - "github.com/didi/nightingale/src/common/dataobj" - "github.com/didi/nightingale/src/models" - "github.com/didi/nightingale/src/modules/judge/backend/query" - "github.com/didi/nightingale/src/modules/judge/backend/redi" - "github.com/didi/nightingale/src/modules/judge/cache" - "github.com/didi/nightingale/src/toolkits/stats" - "github.com/didi/nightingale/src/toolkits/str" + "github.com/didi/nightingale/v4/src/common/dataobj" + "github.com/didi/nightingale/v4/src/common/stats" + "github.com/didi/nightingale/v4/src/common/str" + "github.com/didi/nightingale/v4/src/models" + "github.com/didi/nightingale/v4/src/modules/server/cache" + "github.com/didi/nightingale/v4/src/modules/server/judge/query" + "github.com/didi/nightingale/v4/src/modules/server/redisc" "github.com/spaolacci/murmur3" "github.com/toolkits/pkg/logger" @@ -54,6 +54,17 @@ func GetStra(sid int64) (*models.Stra, bool) { return nil, false } +func Send(items []*dataobj.JudgeItem) { + for _, item := range items { + now := item.Timestamp + pk := item.MD5() + logger.Debugf("recv-->%+v", item) + stats.Counter.Set("push.in", 1) + + go ToJudge(cache.HistoryBigMap[pk[0:2]], pk, item, now) + } +} + func ToJudge(historyMap *cache.JudgeItemMap, key string, val *dataobj.JudgeItem, now int64) { stra, exists := GetStra(val.Sid) if !exists { @@ -139,7 +150,7 @@ func ToJudge(historyMap *cache.JudgeItemMap, key string, val *dataobj.JudgeItem, Info: eventInfo, Detail: string(bs), Value: value, - Partition: redi.Config.Prefix + "/event/p" + strconv.Itoa(stra.Priority), + Partition: JudgeConfig.EventPrefix + "/event/p" + strconv.Itoa(stra.Priority), Sid: stra.Id, Hashid: getHashId(stra.Id, val), } @@ -423,7 +434,7 @@ func sendEvent(event *dataobj.Event) { // update last event cache.LastEvents.Set(event.ID, event) - err := redi.Push(event) + err := redisc.Push(event) if err != nil { stats.Counter.Set("redis.push.failed", 1) logger.Errorf("push event:%v err:%v", event, err) @@ -461,8 +472,8 @@ func getTags(counter string) (tags string) { func getHash(idx query.IndexData, tag string) string { if idx.Nid != "" { - return str.MD5(idx.Nid, idx.Metric, tag) + return str.ToMD5(idx.Nid, idx.Metric, tag) } - return str.MD5(idx.Endpoint, idx.Metric, tag) + return str.ToMD5(idx.Endpoint, idx.Metric, tag) } diff --git a/src/modules/judge/judge/nodata.go b/src/modules/server/judge/nodata.go similarity index 92% rename from src/modules/judge/judge/nodata.go rename to src/modules/server/judge/nodata.go index 9c0ece89..204c988e 100644 --- a/src/modules/judge/judge/nodata.go +++ b/src/modules/server/judge/nodata.go @@ -7,10 +7,9 @@ import ( "strings" "time" - "github.com/didi/nightingale/src/common/dataobj" - "github.com/didi/nightingale/src/models" - "github.com/didi/nightingale/src/modules/judge/backend/redi" - "github.com/didi/nightingale/src/modules/judge/cache" + "github.com/didi/nightingale/v4/src/common/dataobj" + "github.com/didi/nightingale/v4/src/models" + "github.com/didi/nightingale/v4/src/modules/server/cache" "github.com/toolkits/pkg/concurrent/semaphore" "github.com/toolkits/pkg/logger" @@ -112,7 +111,7 @@ func AsyncJudge(sema *semaphore.Semaphore, stra *models.Stra, exps []models.Exp, Info: eventInfo, Detail: string(bs), Value: value, - Partition: redi.Config.Prefix + "/event/p" + strconv.Itoa(stra.Priority), + Partition: JudgeConfig.EventPrefix + "/event/p" + strconv.Itoa(stra.Priority), Sid: stra.Id, Hashid: getHashId(stra.Id, firstItem), } diff --git a/src/modules/judge/backend/query/index.go b/src/modules/server/judge/query/index.go similarity index 74% rename from src/modules/judge/backend/query/index.go rename to src/modules/server/judge/query/index.go index c3b2ce1c..5156403c 100644 --- a/src/modules/judge/backend/query/index.go +++ b/src/modules/server/judge/query/index.go @@ -5,8 +5,8 @@ import ( "sync" "time" - "github.com/didi/nightingale/src/common/report" - "github.com/didi/nightingale/src/toolkits/stats" + "github.com/didi/nightingale/v4/src/common/stats" + "github.com/didi/nightingale/v4/src/models" "github.com/toolkits/pkg/logger" ) @@ -30,17 +30,17 @@ func (i *IndexAddrs) Get() []string { return i.Data } -func GetIndexLoop(hbsMod string) { +func GetIndexLoop() { t1 := time.NewTicker(time.Duration(9) * time.Second) - GetIndex(hbsMod) + GetIndex() for { <-t1.C - GetIndex(hbsMod) + GetIndex() } } -func GetIndex(hbsMod string) { - instances, err := report.GetAlive(Config.IndexMod, hbsMod) +func GetIndex() { + instances, err := models.GetAllInstances(Config.IndexMod, 1) if err != nil { stats.Counter.Set("get.index.err", 1) logger.Warningf("get index list err:%v", err) diff --git a/src/modules/judge/backend/query/init.go b/src/modules/server/judge/query/init.go similarity index 75% rename from src/modules/judge/backend/query/init.go rename to src/modules/server/judge/query/init.go index 7d4c2ba3..908ca900 100644 --- a/src/modules/judge/backend/query/init.go +++ b/src/modules/server/judge/query/init.go @@ -1,8 +1,8 @@ package query import ( - "github.com/didi/nightingale/src/common/address" - "github.com/didi/nightingale/src/toolkits/pools" + "github.com/didi/nightingale/v4/src/common/address" + "github.com/didi/nightingale/v4/src/common/pools" ) var ( @@ -24,11 +24,11 @@ type SeriesQuerySection struct { IndexCallTimeout int `json:"indexCallTimeout"` // 请求超时 } -func Init(cfg SeriesQuerySection, hbsMod string) { +func Init(cfg SeriesQuerySection) { Config = cfg TransferConnPools = pools.NewConnPools( - Config.MaxConn, Config.MaxIdle, Config.ConnTimeout, Config.CallTimeout, address.GetRPCAddresses("transfer"), + Config.MaxConn, Config.MaxIdle, Config.ConnTimeout, Config.CallTimeout, address.GetRPCAddresses("server"), ) - go GetIndexLoop(hbsMod) + go GetIndexLoop() } diff --git a/src/modules/judge/backend/query/query.go b/src/modules/server/judge/query/query.go similarity index 95% rename from src/modules/judge/backend/query/query.go rename to src/modules/server/judge/query/query.go index 9dbe7253..5e91ca5e 100644 --- a/src/modules/judge/backend/query/query.go +++ b/src/modules/server/judge/query/query.go @@ -8,11 +8,11 @@ import ( "strings" "time" - "github.com/didi/nightingale/src/common/dataobj" - "github.com/didi/nightingale/src/models" - "github.com/didi/nightingale/src/modules/judge/cache" - "github.com/didi/nightingale/src/toolkits/stats" - "github.com/didi/nightingale/src/toolkits/str" + "github.com/didi/nightingale/v4/src/common/dataobj" + "github.com/didi/nightingale/v4/src/common/stats" + "github.com/didi/nightingale/v4/src/common/str" + "github.com/didi/nightingale/v4/src/models" + "github.com/didi/nightingale/v4/src/modules/server/cache" "github.com/toolkits/pkg/logger" "github.com/toolkits/pkg/net/httplib" @@ -36,7 +36,7 @@ func Query(reqs []*dataobj.QueryData, stra *models.Stra, expFunc string) []*data if len(newReqs) > 0 { stats.Counter.Set("query.data.by.transfer", 1) for i := 0; i < 3; i++ { - err = TransferConnPools.Call("", "Transfer.Query", newReqs, &resp) + err = TransferConnPools.Call("", "Server.Query", newReqs, &resp) if err == nil { break } diff --git a/src/modules/monapi/notify/notify.go b/src/modules/server/notify/notify.go similarity index 83% rename from src/modules/monapi/notify/notify.go rename to src/modules/server/notify/notify.go index 5f7c8665..1e591b37 100644 --- a/src/modules/monapi/notify/notify.go +++ b/src/modules/server/notify/notify.go @@ -10,10 +10,13 @@ import ( "strings" "time" - "github.com/didi/nightingale/src/common/address" - "github.com/didi/nightingale/src/common/dataobj" - "github.com/didi/nightingale/src/models" - "github.com/didi/nightingale/src/modules/monapi/config" + "github.com/didi/nightingale/v4/src/common/address" + "github.com/didi/nightingale/v4/src/common/dataobj" + "github.com/didi/nightingale/v4/src/common/slice" + "github.com/didi/nightingale/v4/src/models" + "github.com/didi/nightingale/v4/src/modules/server/config" + "github.com/didi/nightingale/v4/src/modules/server/cron" + "github.com/didi/nightingale/v4/src/modules/server/redisc" "github.com/toolkits/pkg/file" "github.com/toolkits/pkg/logger" @@ -49,17 +52,17 @@ func DoNotify(isUpgrade bool, events ...*models.Event) { return } - notifyTypes := config.Get().Notify[prio] + notifyTypes := config.Config.Monapi.Notify[prio] for i := 0; i < len(notifyTypes); i++ { switch notifyTypes[i] { case "voice": - if events[0].EventType == config.ALERT { + if events[0].EventType == models.ALERT { tos := []string{} for j := 0; j < len(users); j++ { tos = append(tos, users[j].Phone) } - send(config.Set(tos), events[0].Sname, "", "voice") + send(slice.Set(tos), events[0].Sname, "", "voice") } case "sms": tos := []string{} @@ -67,14 +70,14 @@ func DoNotify(isUpgrade bool, events ...*models.Event) { tos = append(tos, users[j].Phone) } - send(config.Set(tos), content, "", "sms") + send(slice.Set(tos), content, "", "sms") case "mail": tos := []string{} for j := 0; j < len(users); j++ { tos = append(tos, users[j].Email) } - if err := send(config.Set(tos), mailContent, subject, "mail"); err == nil { + if err := send(slice.Set(tos), mailContent, subject, "mail"); err == nil { logger.Infof("sendMail: %+v", events[0]) } case "im": @@ -83,7 +86,7 @@ func DoNotify(isUpgrade bool, events ...*models.Event) { tos = append(tos, users[j].Im) } - send(config.Set(tos), content, "", "im") + send(slice.Set(tos), content, "", "im") default: logger.Errorf("not support %s to send notify, events: %+v", notifyTypes[i], events) } @@ -96,7 +99,7 @@ func genContent(isUpgrade bool, events []*models.Event) (string, string) { return "", "" } - cfg := config.Get() + cfg := config.Config.Monapi metricList := []string{} detail, err := events[cnt-1].GetEventDetail() @@ -125,7 +128,7 @@ func genContent(isUpgrade bool, events []*models.Event) (string, string) { clink := "" curNodePath := events[cnt-1].CurNodePath - if events[0].EventType == config.ALERT { + if events[0].EventType == models.ALERT { clink = genClaimLink(events) } @@ -141,7 +144,7 @@ func genContent(isUpgrade bool, events []*models.Event) (string, string) { isAlert := false hasClaim := false isMachineDep := false - if events[0].EventType == config.ALERT { + if events[0].EventType == models.ALERT { isAlert = true } @@ -227,7 +230,7 @@ func genClaimLink(events []*models.Event) string { continue } - return fmt.Sprintf(config.Get().Link.Claim, eventCur.Id) + return fmt.Sprintf(config.Config.Monapi.Link.Claim, eventCur.Id) } return "" } @@ -241,9 +244,9 @@ func genSubject(isUpgrade bool, events []*models.Event) string { } if cnt > 1 { - subject += fmt.Sprintf("[P%d 聚合%s]%s", events[cnt-1].Priority, config.EventTypeMap[events[cnt-1].EventType], events[cnt-1].Sname) + subject += fmt.Sprintf("[P%d 聚合%s]%s", events[cnt-1].Priority, models.EventTypeMap[events[cnt-1].EventType], events[cnt-1].Sname) } else { - subject += fmt.Sprintf("[P%d %s]%s", events[cnt-1].Priority, config.EventTypeMap[events[cnt-1].EventType], events[cnt-1].Sname) + subject += fmt.Sprintf("[P%d %s]%s", events[cnt-1].Priority, models.EventTypeMap[events[cnt-1].EventType], events[cnt-1].Sname) } return subject + " - " + genEndpoint(events) @@ -251,7 +254,7 @@ func genSubject(isUpgrade bool, events []*models.Event) string { func genStatus(events []*models.Event) string { cnt := len(events) - status := fmt.Sprintf("P%d %s", events[cnt-1].Priority, config.EventTypeMap[events[cnt-1].EventType]) + status := fmt.Sprintf("P%d %s", events[cnt-1].Priority, models.EventTypeMap[events[cnt-1].EventType]) if cnt > 1 { status += "(聚合)" @@ -306,8 +309,8 @@ func genNameAndNoteByResources(resources []models.Resource) (name, note string) names = append(names, resources[i].Name) notes = append(notes, resources[i].Note) } - names = config.Set(names) - notes = config.Set(notes) + names = slice.Set(names) + notes = slice.Set(notes) if len(resources) == 1 { if len(names) > 0 { @@ -329,7 +332,7 @@ func getEndpoint(events []*models.Event) []string { endpointList = append(endpointList, events[i].Endpoint) } - endpointList = config.Set(endpointList) + endpointList = slice.Set(endpointList) return endpointList } @@ -339,7 +342,7 @@ func genEndpoint(events []*models.Event) string { endpointList = append(endpointList, events[i].Endpoint) } - endpointList = config.Set(endpointList) + endpointList = slice.Set(endpointList) if len(endpointList) == 1 { return endpointList[0] @@ -357,7 +360,7 @@ func genTags(events []*models.Event) string { } if len(detail) > 0 { for k, v := range detail[0].Tags { - if !config.InSlice(v, tagsMap[k]) { + if !slice.InSlice(v, tagsMap[k]) { tagsMap[k] = append(tagsMap[k], v) } } @@ -402,41 +405,36 @@ func genEtime(events []*models.Event) string { } func send(tos []string, content, subject, notifyType string) error { - addrs := address.GetHTTPAddresses("rdb") - perm := rand.Perm(len(addrs)) - - var err error - for i := range perm { - data := dataobj.Notify{ - Tos: tos, - Subject: subject, - Content: content, - } - - url := fmt.Sprintf("%s/v1/rdb/sender/%s", addrs[perm[i]], notifyType) - if !(strings.HasPrefix(url, "http://") || strings.HasPrefix(url, "https://")) { - url = "http://" + url - } + var message dataobj.Message - res, code, err := httplib.PostJSON(url, time.Second*5, data, map[string]string{"X-Srv-Token": "rdb-builtin-token"}) - if err != nil { - logger.Errorf("call sender api failed, server: %v, data: %+v, err: %v, resp:%v, status code:%d", url, data, err, string(res), code) - continue - } + if tos == nil || len(tos) == 0 { + return fmt.Errorf("tos is empty") + } - if code != 200 { - logger.Errorf("call sender api failed, server: %v, data: %+v, resp:%v, code:%d", url, data, string(res), code) - continue - } + message.Content = strings.TrimSpace(content) + if message.Content == "" { + return fmt.Errorf("content is blank") + } - if err == nil { - break + if notifyType == "email" { + message.Subject = strings.TrimSpace(message.Subject) + if message.Subject == "" { + return fmt.Errorf("subject is blank") } + } - logger.Infof("curl %s response: %s", url, string(res)) + switch notifyType { + case "voice": + redisc.Write(&message, cron.VOICE_QUEUE_NAME) + case "sms": + redisc.Write(&message, cron.SMS_QUEUE_NAME) + case "email": + redisc.Write(&message, cron.MAIL_QUEUE_NAME) + case "im": + redisc.Write(&message, cron.IM_QUEUE_NAME) } - return err + return nil } type TicketInfo struct { @@ -454,7 +452,7 @@ type TicketReq struct { } func send2Ticket(content, subject, hashId string, prio int, eventType string, workGroups []int) { - if !config.Get().TicketEnabled { + if !config.Config.Monapi.TicketEnabled { return } diff --git a/src/modules/server/plugins/all/all.go b/src/modules/server/plugins/all/all.go new file mode 100644 index 00000000..9149b618 --- /dev/null +++ b/src/modules/server/plugins/all/all.go @@ -0,0 +1,28 @@ +package all + +import ( + // remote + // _ "github.com/didi/nightingale/v4/src/modules/server/plugins/api" + // telegraf style + _ "github.com/didi/nightingale/v4/src/modules/server/plugins/dns_query" + _ "github.com/didi/nightingale/v4/src/modules/server/plugins/elasticsearch" + _ "github.com/didi/nightingale/v4/src/modules/server/plugins/github" + _ "github.com/didi/nightingale/v4/src/modules/server/plugins/haproxy" + _ "github.com/didi/nightingale/v4/src/modules/server/plugins/http_response" + _ "github.com/didi/nightingale/v4/src/modules/server/plugins/mongodb" + _ "github.com/didi/nightingale/v4/src/modules/server/plugins/mysql" + _ "github.com/didi/nightingale/v4/src/modules/server/plugins/net_response" + _ "github.com/didi/nightingale/v4/src/modules/server/plugins/nginx" + _ "github.com/didi/nightingale/v4/src/modules/server/plugins/ping" + _ "github.com/didi/nightingale/v4/src/modules/server/plugins/prometheus" + _ "github.com/didi/nightingale/v4/src/modules/server/plugins/rabbitmq" + _ "github.com/didi/nightingale/v4/src/modules/server/plugins/redis" + _ "github.com/didi/nightingale/v4/src/modules/server/plugins/tengine" + _ "github.com/didi/nightingale/v4/src/modules/server/plugins/zookeeper" + + // local + _ "github.com/didi/nightingale/v4/src/modules/server/plugins/log" + _ "github.com/didi/nightingale/v4/src/modules/server/plugins/plugin" + _ "github.com/didi/nightingale/v4/src/modules/server/plugins/port" + _ "github.com/didi/nightingale/v4/src/modules/server/plugins/proc" +) diff --git a/src/modules/monapi/plugins/all/dlopen.go b/src/modules/server/plugins/all/dlopen.go similarity index 100% rename from src/modules/monapi/plugins/all/dlopen.go rename to src/modules/server/plugins/all/dlopen.go diff --git a/src/modules/monapi/plugins/api/api.go b/src/modules/server/plugins/api/api.go similarity index 95% rename from src/modules/monapi/plugins/api/api.go rename to src/modules/server/plugins/api/api.go index 2630fdd2..67c32406 100644 --- a/src/modules/monapi/plugins/api/api.go +++ b/src/modules/server/plugins/api/api.go @@ -5,8 +5,8 @@ import ( "errors" "fmt" - "github.com/didi/nightingale/src/models" - "github.com/didi/nightingale/src/modules/monapi/collector" + "github.com/didi/nightingale/v4/src/models" + "github.com/didi/nightingale/v4/src/modules/server/collector" "github.com/influxdata/telegraf" ) @@ -82,7 +82,7 @@ func (p ApiCollector) Create(data []byte, username string) error { if old != nil { return fmt.Errorf("同节点下策略名称 %s 已存在", name) } - return models.CreateCollect(p.Name(), username, collect) + return models.CreateCollect(p.Name(), username, collect, false) } func (p ApiCollector) Update(data []byte, username string) error { diff --git a/src/modules/monapi/plugins/demo/demo.go b/src/modules/server/plugins/demo/demo.go similarity index 77% rename from src/modules/monapi/plugins/demo/demo.go rename to src/modules/server/plugins/demo/demo.go index 721e76a1..432241ce 100644 --- a/src/modules/monapi/plugins/demo/demo.go +++ b/src/modules/server/plugins/demo/demo.go @@ -1,9 +1,9 @@ package demo import ( - "github.com/didi/nightingale/src/modules/monapi/collector" - "github.com/didi/nightingale/src/modules/monapi/plugins/demo/demo" - "github.com/didi/nightingale/src/toolkits/i18n" + "github.com/didi/nightingale/v4/src/common/i18n" + "github.com/didi/nightingale/v4/src/modules/server/collector" + "github.com/didi/nightingale/v4/src/modules/server/plugins/demo/demo" "github.com/influxdata/telegraf" ) @@ -27,10 +27,10 @@ func NewDemoCollector() *DemoCollector { var ( langDict = map[string]map[string]string{ "zh": map[string]string{ - "Period": "周期", + "Period": "周期", "The period of the function, in seconds": "函数周期,单位 秒", - "Count": "数量", - "The Count of the series": "指标数量", + "Count": "数量", + "The Count of the series": "指标数量", }, } ) diff --git a/src/modules/monapi/plugins/demo/demo/demo.go b/src/modules/server/plugins/demo/demo/demo.go similarity index 100% rename from src/modules/monapi/plugins/demo/demo/demo.go rename to src/modules/server/plugins/demo/demo/demo.go diff --git a/src/modules/monapi/plugins/demo/demo_test.go b/src/modules/server/plugins/demo/demo_test.go similarity index 69% rename from src/modules/monapi/plugins/demo/demo_test.go rename to src/modules/server/plugins/demo/demo_test.go index 729b9e9f..585115b1 100644 --- a/src/modules/monapi/plugins/demo/demo_test.go +++ b/src/modules/server/plugins/demo/demo_test.go @@ -3,7 +3,7 @@ package demo import ( "testing" - "github.com/didi/nightingale/src/modules/monapi/plugins" + "github.com/didi/nightingale/v4/src/modules/server/plugins" ) func TestCollect(t *testing.T) { diff --git a/src/modules/server/plugins/demo/lib/lib.go b/src/modules/server/plugins/demo/lib/lib.go new file mode 100644 index 00000000..72656579 --- /dev/null +++ b/src/modules/server/plugins/demo/lib/lib.go @@ -0,0 +1,5 @@ +package main + +import ( + _ "github.com/didi/nightingale/v4/src/modules/server/plugins/demo" +) diff --git a/src/modules/server/plugins/dns_query/dns_query.go b/src/modules/server/plugins/dns_query/dns_query.go new file mode 100644 index 00000000..56ecb2b9 --- /dev/null +++ b/src/modules/server/plugins/dns_query/dns_query.go @@ -0,0 +1,96 @@ +package dns_query + +import ( + "fmt" + + "github.com/didi/nightingale/v4/src/common/i18n" + "github.com/didi/nightingale/v4/src/modules/server/collector" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/inputs/dns_query" +) + +func init() { + collector.CollectorRegister(NewCollector()) // for monapi + i18n.DictRegister(langDict) +} + +type Collector struct { + *collector.BaseCollector +} + +func NewCollector() *Collector { + return &Collector{BaseCollector: collector.NewBaseCollector( + "dns_query", + collector.RemoteCategory, + func() collector.TelegrafPlugin { return &Rule{} }, + )} +} + +var ( + langDict = map[string]map[string]string{ + "zh": map[string]string{ + "Servers": "DNS地址", + "Network": "协议", + "Domains": "域名", + "RecordType": "记录类型", + "Port": "端口", + "Timeout": "超时", + "List of DNS": "DNS服务器列表", + "Protocol, must be tcp or udp": "请求协议,必须是 tcp 或 udp", + "List of Domains": "解析域名列表", + "DNS Record Type": "DNS记录类型", + "Port, default is 53": "DNS端口号,默认是53", + "Set timeout": "设置超时,单位是秒", + }, + } +) + +type Rule struct { + Servers []string `label:"Servers" json:"servers,required" description:"List of DNS" example:"223.5.5.5"` + Network string `label:"Network" json:"network" description:"Protocol, must be tcp or udp" example:"udp"` + Domains []string `label:"Domains" json:"domains,required" description:"List of Domains", example:"www.baidu.com"` + RecordType string `label:"RecordType" json:"record_type" enum:"[\"A\", \"AAAA\", \"CNAME\", \"MX\", \"NS\", \"PTR\", \"TXT\", \"SOA\", \"SPF\", \"SRV\"]" description:"DNS Record Type"` + Port int `label:"Port" json:"port" default:"53" description:"Port"` + Timeout int `label:"Timeout" json:"timeout" default:"10" description:"Set timeout"` +} + +func (p *Rule) Validate() error { + if len(p.Servers) == 0 || p.Servers[0] == "" { + return fmt.Errorf("dns.rule.servers must be set") + } + if p.Network == "" { + p.Network = "udp" + } + if !(p.Network == "tcp" || p.Network == "udp") { + return fmt.Errorf("net_response.rule.Network must be tcp or udp") + } + if len(p.Domains) == 0 || p.Domains[0] == "" { + return fmt.Errorf("dns.rule.domians must be set") + } + if p.RecordType == "" { + p.RecordType = "A" + } + if p.Port == 0 { + p.Port = 53 + } + if p.Timeout == 0 { + p.Timeout = 10 + } + + return nil +} + +func (p *Rule) TelegrafInput() (telegraf.Input, error) { + if err := p.Validate(); err != nil { + return nil, err + } + + return &dns_query.DnsQuery{ + Servers: p.Servers, + Network: p.Network, + Domains: p.Domains, + RecordType: p.RecordType, + Port: p.Port, + Timeout: p.Timeout, + }, nil +} diff --git a/src/modules/server/plugins/dns_query/dns_query_test.go b/src/modules/server/plugins/dns_query/dns_query_test.go new file mode 100644 index 00000000..dfc95d5e --- /dev/null +++ b/src/modules/server/plugins/dns_query/dns_query_test.go @@ -0,0 +1,14 @@ +package dns_query + +import ( + "testing" + + "github.com/didi/nightingale/v4/src/modules/server/plugins" +) + +func TestCollect(t *testing.T) { + plugins.PluginTest(t, &Rule{ + Servers: []string{"223.5.5.5"}, + Domains: []string{"www.baidu.com"}, + }) +} diff --git a/src/modules/monapi/plugins/elasticsearch/elasticsearch.go b/src/modules/server/plugins/elasticsearch/elasticsearch.go similarity index 95% rename from src/modules/monapi/plugins/elasticsearch/elasticsearch.go rename to src/modules/server/plugins/elasticsearch/elasticsearch.go index c1519ca9..88582a47 100644 --- a/src/modules/monapi/plugins/elasticsearch/elasticsearch.go +++ b/src/modules/server/plugins/elasticsearch/elasticsearch.go @@ -2,12 +2,13 @@ package elasticsearch import ( "fmt" - "github.com/didi/nightingale/src/modules/monapi/collector" - "github.com/didi/nightingale/src/modules/monapi/plugins" - "github.com/didi/nightingale/src/toolkits/i18n" + "time" + + "github.com/didi/nightingale/v4/src/common/i18n" + "github.com/didi/nightingale/v4/src/modules/server/collector" + "github.com/didi/nightingale/v4/src/modules/server/plugins" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs/elasticsearch" - "time" ) func init() { @@ -38,7 +39,7 @@ var ( "Timeout for HTTP requests": "http请求超时时间, 单位: 秒", "ClusterHealth": "集群健康状态", "Set cluster_health to true when you want to obtain cluster health stats": "是否获取集群健康状况统计信息", - "ClusterHealthLevel": "健康状况等级", + "ClusterHealthLevel": "健康状况等级", "Adjust cluster_health_level when you want to obtain detailed health stats
The options are
- indices (default)
- cluster": "统计健康状况等级。可选(indices, cluster)", "ClusterStats": "集群运行状态", "Set cluster_stats to true when you want to obtain cluster stats.": "是否收集集群运行状态", diff --git a/src/modules/monapi/plugins/elasticsearch/elasticsearch_test.go b/src/modules/server/plugins/elasticsearch/elasticsearch_test.go similarity index 79% rename from src/modules/monapi/plugins/elasticsearch/elasticsearch_test.go rename to src/modules/server/plugins/elasticsearch/elasticsearch_test.go index f9d8bfdc..319f2632 100644 --- a/src/modules/monapi/plugins/elasticsearch/elasticsearch_test.go +++ b/src/modules/server/plugins/elasticsearch/elasticsearch_test.go @@ -1,9 +1,10 @@ package elasticsearch import ( - "github.com/didi/nightingale/src/modules/monapi/plugins" "testing" "time" + + "github.com/didi/nightingale/v4/src/modules/server/plugins" ) func TestCollect(t *testing.T) { diff --git a/src/modules/monapi/plugins/github/github.go b/src/modules/server/plugins/github/github.go similarity index 86% rename from src/modules/monapi/plugins/github/github.go rename to src/modules/server/plugins/github/github.go index 90608748..0de69905 100644 --- a/src/modules/monapi/plugins/github/github.go +++ b/src/modules/server/plugins/github/github.go @@ -4,9 +4,9 @@ import ( "fmt" "time" - "github.com/didi/nightingale/src/modules/monapi/collector" - "github.com/didi/nightingale/src/modules/monapi/plugins" - "github.com/didi/nightingale/src/toolkits/i18n" + "github.com/didi/nightingale/v4/src/common/i18n" + "github.com/didi/nightingale/v4/src/modules/server/collector" + "github.com/didi/nightingale/v4/src/modules/server/plugins" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs/github" ) @@ -22,8 +22,8 @@ var ( "Repositories": "代码仓库", "List of repositories to monitor": "要监视的代码仓库存列表", "Access token": "访问令牌", - "Github API access token. Unauthenticated requests are limited to 60 per hour": "Github 接口的访问令牌. 匿名状态下,每小时请求限制为60", - "Enterprise base url": "Github 企业版地址", + "Github API access token. Unauthenticated requests are limited to 60 per hour": "Github 接口的访问令牌. 匿名状态下,每小时请求限制为60", + "Enterprise base url": "Github 企业版地址", "Github API enterprise url. Github Enterprise accounts must specify their base url": "如果使用Github企业版,请配置企业版API地址", "HTTP timeout": "请求超时时间", "Timeout for HTTP requests": "http请求超时时间, 单位: 秒", diff --git a/src/modules/monapi/plugins/github/github_test.go b/src/modules/server/plugins/github/github_test.go similarity index 72% rename from src/modules/monapi/plugins/github/github_test.go rename to src/modules/server/plugins/github/github_test.go index 0a04a64e..6e4a4b83 100644 --- a/src/modules/monapi/plugins/github/github_test.go +++ b/src/modules/server/plugins/github/github_test.go @@ -3,7 +3,7 @@ package github import ( "testing" - "github.com/didi/nightingale/src/modules/monapi/plugins" + "github.com/didi/nightingale/v4/src/modules/server/plugins" ) func TestCollect(t *testing.T) { diff --git a/src/modules/server/plugins/haproxy/haproxy.go b/src/modules/server/plugins/haproxy/haproxy.go new file mode 100644 index 00000000..069718cb --- /dev/null +++ b/src/modules/server/plugins/haproxy/haproxy.go @@ -0,0 +1,71 @@ +package haproxy + +import ( + "fmt" + + "github.com/didi/nightingale/v4/src/common/i18n" + "github.com/didi/nightingale/v4/src/modules/server/collector" + "github.com/didi/nightingale/v4/src/modules/server/plugins" + "github.com/didi/nightingale/v4/src/modules/server/plugins/haproxy/haproxy" + "github.com/influxdata/telegraf" +) + +func init() { + collector.CollectorRegister(NewHaproxyCollector()) // for monapi + i18n.DictRegister(langDict) +} + +type HaproxyCollector struct { + *collector.BaseCollector +} + +func NewHaproxyCollector() *HaproxyCollector { + return &HaproxyCollector{BaseCollector: collector.NewBaseCollector( + "haproxy", + collector.RemoteCategory, + func() collector.TelegrafPlugin { return &HaproxyRule{} }, + )} +} + +var ( + langDict = map[string]map[string]string{ + "zh": map[string]string{ + "Servers": "Servers", + "Username": "用户名", + "Password": "密码", + }, + } +) + +type HaproxyRule struct { + Servers []string `label:"Servers" json:"servers,required" example:"http://myhaproxy.com:1936/haproxy?stats"` + KeepFieldNames bool `label:"KeepFieldNames" json:"keepFieldNames" default:"false" description:"Setting this option to true results in the plugin keeping the original"` + Username string `label:"Username" json:"username" description:"specify username"` + Password string `label:"Password" json:"password" format:"password" description:"specify server password"` + + plugins.ClientConfig +} + +func (p *HaproxyRule) Validate() error { + if len(p.Servers) == 0 || p.Servers[0] == "" { + return fmt.Errorf("haproxy.rule.servers must be set") + } + return nil +} + +func (p *HaproxyRule) TelegrafInput() (telegraf.Input, error) { + if err := p.Validate(); err != nil { + return nil, err + } + + ha := &haproxy.Haproxy{ + + Servers: p.Servers, + KeepFieldNames: p.KeepFieldNames, + Username: p.Username, + Password: p.Password, + ClientConfig: p.ClientConfig.TlsClientConfig(), + } + + return ha, nil +} diff --git a/src/modules/monapi/plugins/haproxy/haproxy/haproxy.go b/src/modules/server/plugins/haproxy/haproxy/haproxy.go similarity index 100% rename from src/modules/monapi/plugins/haproxy/haproxy/haproxy.go rename to src/modules/server/plugins/haproxy/haproxy/haproxy.go diff --git a/src/modules/monapi/plugins/http_response/http_response.go b/src/modules/server/plugins/http_response/http_response.go similarity index 76% rename from src/modules/monapi/plugins/http_response/http_response.go rename to src/modules/server/plugins/http_response/http_response.go index 181b4d76..8339976e 100644 --- a/src/modules/monapi/plugins/http_response/http_response.go +++ b/src/modules/server/plugins/http_response/http_response.go @@ -6,9 +6,9 @@ import ( "strings" "time" - "github.com/didi/nightingale/src/modules/monapi/collector" - "github.com/didi/nightingale/src/modules/monapi/plugins" - "github.com/didi/nightingale/src/toolkits/i18n" + "github.com/didi/nightingale/v4/src/common/i18n" + "github.com/didi/nightingale/v4/src/modules/server/collector" + "github.com/didi/nightingale/v4/src/modules/server/plugins" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs/http_response" ) @@ -33,22 +33,22 @@ func NewCollector() *Collector { var ( langDict = map[string]map[string]string{ "zh": map[string]string{ - "URLS": "地址", - "Method": "方法", - "ResponseTimeout": "响应超时", - "Headers": "Headers", - "Username": "用户名", - "Password": "密码", - "Body": "Body", - "ResponseBodyMaxSize": "ResponseBodyMaxSize", - "ResponseStringMatch": "ResponseStringMatch", - "ResponseStatusCode": "ResponseStatusCode", - "Interface": "Interface", - "HTTPProxy": "HTTPProxy", - "FollowRedirects": "FollowRedirects", - "List of urls to query": "要监测的URL地址", - "HTTP Request Method, default GET": "HTTP 的请求方法,默认是 GET", - "HTTP Request Headers": "HTTP 请求的的 Headers", + "URLS": "地址", + "Method": "方法", + "ResponseTimeout": "响应超时", + "Headers": "Headers", + "Username": "用户名", + "Password": "密码", + "Body": "Body", + "ResponseBodyMaxSize": "ResponseBodyMaxSize", + "ResponseStringMatch": "ResponseStringMatch", + "ResponseStatusCode": "ResponseStatusCode", + "Interface": "Interface", + "HTTPProxy": "HTTPProxy", + "FollowRedirects": "FollowRedirects", + "List of urls to query": "要监测的URL地址", + "HTTP Request Method, default GET": "HTTP 的请求方法,默认是 GET", + "HTTP Request Headers": "HTTP 请求的的 Headers", "Optional HTTP Basic Auth Credentials, Username": "HTTP Basic 认证的用户名", "Optional HTTP Basic Auth Credentials, Password": "HTTP Basic 认证的密码", "Optional HTTP Request Body": "HTTP 请求的 Body", diff --git a/src/modules/monapi/plugins/http_response/http_response_test.go b/src/modules/server/plugins/http_response/http_response_test.go similarity index 79% rename from src/modules/monapi/plugins/http_response/http_response_test.go rename to src/modules/server/plugins/http_response/http_response_test.go index da7faf19..f5a704a9 100644 --- a/src/modules/monapi/plugins/http_response/http_response_test.go +++ b/src/modules/server/plugins/http_response/http_response_test.go @@ -3,7 +3,7 @@ package http_response import ( "testing" - "github.com/didi/nightingale/src/modules/monapi/plugins" + "github.com/didi/nightingale/v4/src/modules/server/plugins" ) func TestCollect(t *testing.T) { diff --git a/src/modules/monapi/plugins/log/log.go b/src/modules/server/plugins/log/log.go similarity index 97% rename from src/modules/monapi/plugins/log/log.go rename to src/modules/server/plugins/log/log.go index df2f0092..319d9960 100644 --- a/src/modules/monapi/plugins/log/log.go +++ b/src/modules/server/plugins/log/log.go @@ -5,8 +5,8 @@ import ( "errors" "fmt" - "github.com/didi/nightingale/src/models" - "github.com/didi/nightingale/src/modules/monapi/collector" + "github.com/didi/nightingale/v4/src/models" + "github.com/didi/nightingale/v4/src/modules/server/collector" "github.com/influxdata/telegraf" ) diff --git a/src/modules/monapi/plugins/mongodb/mongodb.go b/src/modules/server/plugins/mongodb/mongodb.go similarity index 94% rename from src/modules/monapi/plugins/mongodb/mongodb.go rename to src/modules/server/plugins/mongodb/mongodb.go index 42400cb5..433b7f33 100644 --- a/src/modules/monapi/plugins/mongodb/mongodb.go +++ b/src/modules/server/plugins/mongodb/mongodb.go @@ -5,9 +5,9 @@ import ( "reflect" "unsafe" - "github.com/didi/nightingale/src/modules/monapi/collector" - "github.com/didi/nightingale/src/modules/monapi/plugins" - "github.com/didi/nightingale/src/toolkits/i18n" + "github.com/didi/nightingale/v4/src/common/i18n" + "github.com/didi/nightingale/v4/src/modules/server/collector" + "github.com/didi/nightingale/v4/src/modules/server/plugins" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs/mongodb" ) diff --git a/src/modules/monapi/plugins/mongodb/mongodb_test.go b/src/modules/server/plugins/mongodb/mongodb_test.go similarity index 85% rename from src/modules/monapi/plugins/mongodb/mongodb_test.go rename to src/modules/server/plugins/mongodb/mongodb_test.go index f545f7e1..7d9684da 100644 --- a/src/modules/monapi/plugins/mongodb/mongodb_test.go +++ b/src/modules/server/plugins/mongodb/mongodb_test.go @@ -4,7 +4,7 @@ import ( "testing" "time" - "github.com/didi/nightingale/src/modules/monapi/plugins" + "github.com/didi/nightingale/v4/src/modules/server/plugins" ) func TestCollect(t *testing.T) { diff --git a/src/modules/monapi/plugins/mysql/mysql.go b/src/modules/server/plugins/mysql/mysql.go similarity index 82% rename from src/modules/monapi/plugins/mysql/mysql.go rename to src/modules/server/plugins/mysql/mysql.go index 13a67305..363a4052 100644 --- a/src/modules/monapi/plugins/mysql/mysql.go +++ b/src/modules/server/plugins/mysql/mysql.go @@ -3,9 +3,10 @@ package mysql import ( "fmt" - "github.com/didi/nightingale/src/modules/monapi/collector" - "github.com/didi/nightingale/src/modules/monapi/plugins" - "github.com/didi/nightingale/src/toolkits/i18n" + "github.com/didi/nightingale/v4/src/common/i18n" + "github.com/didi/nightingale/v4/src/modules/server/collector" + "github.com/didi/nightingale/v4/src/modules/server/plugins" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs/mysql" ) @@ -35,33 +36,33 @@ var ( "if the list is empty, then metrics are gathered from all database tables": "如果列表为空,则收集所有数据库表", "Process List": "进程列表", "gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST": "采集 INFORMATION_SCHEMA.PROCESSLIST", - "User Statistics": "用户统计", + "User Statistics": "用户统计", "gather user statistics from INFORMATION_SCHEMA.USER_STATISTICS": "采集 INFORMATION_SCHEMA.USER_STATISTICS", "Auto Increment": "自动递增变量", "gather auto_increment columns and max values from information schema": "采集 auto_increment 和 max values", - "Innodb Metrics": "Innodb统计", + "Innodb Metrics": "Innodb统计", "gather metrics from INFORMATION_SCHEMA.INNODB_METRICS": "采集 INFORMATION_SCHEMA.INNODB_METRICS", - "Slave Status": "Slave状态", - "gather metrics from SHOW SLAVE STATUS command output": "采集 SHOW SLAVE STATUS", + "Slave Status": "Slave状态", + "gather metrics from SHOW SLAVE STATUS command output": "采集 SHOW SLAVE STATUS", "Binary Logs": "Binary Logs", "gather metrics from SHOW BINARY LOGS command output": "采集 SHOW BINARY LOGS", - "Table IO Waits": "Table IO Waits", + "Table IO Waits": "Table IO Waits", "gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_TABLE": "采集 PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_TABLE", - "Table Lock Waits": "Table Lock Waits", + "Table Lock Waits": "Table Lock Waits", "gather metrics from PERFORMANCE_SCHEMA.TABLE_LOCK_WAITS": "采集 PERFORMANCE_SCHEMA.TABLE_LOCK_WAITS", - "Index IO Waits": "Index IO Waits", + "Index IO Waits": "Index IO Waits", "gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_INDEX_USAGE": "采集 PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_INDEX_USAGE", "Event Waits": "Event Waits", "gather metrics from PERFORMANCE_SCHEMA.EVENT_WAITS": "采集 PERFORMANCE_SCHEMA.EVENT_WAITS", "Tables": "Tables", "gather metrics from INFORMATION_SCHEMA.TABLES for databases provided above list": "采集 INFORMATION_SCHEMA.TABLES", - "File Events Stats": "File Events Stats", - "gather metrics from PERFORMANCE_SCHEMA.FILE_SUMMARY_BY_EVENT_NAME": "采集 PERFORMANCE_SCHEMA.FILE_SUMMARY_BY_EVENT_NAME", - "Perf Events Statements Digest Text Limit": "标准语句的最大长度", - "Perf Events Statements Limit": "根据响应时间限制语句的事件数量", - "Perf Events Statements Timelimit": "限制最后出现的事件", - "Perf Events Statements": "采集 PERFORMANCE_SCHEMA", - "gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_DIGEST": "采集 PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_DIGEST", + "File Events Stats": "File Events Stats", + "gather metrics from PERFORMANCE_SCHEMA.FILE_SUMMARY_BY_EVENT_NAME": "采集 PERFORMANCE_SCHEMA.FILE_SUMMARY_BY_EVENT_NAME", + "Perf Events Statements Digest Text Limit": "标准语句的最大长度", + "Perf Events Statements Limit": "根据响应时间限制语句的事件数量", + "Perf Events Statements Timelimit": "限制最后出现的事件", + "Perf Events Statements": "采集 PERFORMANCE_SCHEMA", + "gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_DIGEST": "采集 PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_DIGEST", "specify servers via a url matching
[username[:password]@][protocol[(address)]]/[?tls=[true|false|skip-verify|custom]]
see https://github.com/go-sql-driver/mysql#dsn-data-source-name": "通过URL设置指定服务器
[username[:password]@][protocol[(address)]]/[?tls=[true|false|skip-verify|custom]]
参考 https://github.com/go-sql-driver/mysql#dsn-data-source-name", "Interval Slow": "周期限制", "Some queries we may want to run less often (such as SHOW GLOBAL VARIABLES)": "限制一些查询的最小间隔(比如 SHOW GLOBAL VARIABLES)", @@ -121,7 +122,7 @@ func (p *MysqlRule) TelegrafInput() (telegraf.Input, error) { } return &mysql.Mysql{ - Servers: p.Servers, + Servers: p.Servers, PerfEventsStatementsDigestTextLimit: p.PerfEventsStatementsDigestTextLimit, PerfEventsStatementsLimit: p.PerfEventsStatementsLimit, PerfEventsStatementsTimeLimit: p.PerfEventsStatementsTimeLimit, diff --git a/src/modules/monapi/plugins/mysql/mysql_test.go b/src/modules/server/plugins/mysql/mysql_test.go similarity index 82% rename from src/modules/monapi/plugins/mysql/mysql_test.go rename to src/modules/server/plugins/mysql/mysql_test.go index 831bb4fb..6720261f 100644 --- a/src/modules/monapi/plugins/mysql/mysql_test.go +++ b/src/modules/server/plugins/mysql/mysql_test.go @@ -4,7 +4,7 @@ import ( "os" "testing" - "github.com/didi/nightingale/src/modules/monapi/plugins" + "github.com/didi/nightingale/v4/src/modules/server/plugins" ) func TestCollect(t *testing.T) { diff --git a/src/modules/monapi/plugins/net_response/net_response.go b/src/modules/server/plugins/net_response/net_response.go similarity index 87% rename from src/modules/monapi/plugins/net_response/net_response.go rename to src/modules/server/plugins/net_response/net_response.go index a9f1cbdc..a1cb95b9 100644 --- a/src/modules/monapi/plugins/net_response/net_response.go +++ b/src/modules/server/plugins/net_response/net_response.go @@ -4,9 +4,10 @@ import ( "fmt" "time" - "github.com/didi/nightingale/src/modules/monapi/collector" - "github.com/didi/nightingale/src/modules/monapi/plugins" - "github.com/didi/nightingale/src/toolkits/i18n" + "github.com/didi/nightingale/v4/src/common/i18n" + "github.com/didi/nightingale/v4/src/modules/server/collector" + "github.com/didi/nightingale/v4/src/modules/server/plugins" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs/net_response" ) @@ -38,8 +39,8 @@ var ( "Send": "Send", "Expect": "Expect", "readme - https://github.com/influxdata/telegraf/tree/master/plugins/inputs/net_response": "更多说明详细详见 https://github.com/influxdata/telegraf/tree/master/plugins/inputs/net_response", - "Protocol, must be tcp or udp": "请求协议,必须是 tcp 或 udp", - "Set timeout": "设置超时,单位是秒", + "Protocol, must be tcp or udp": "请求协议,必须是 tcp 或 udp", + "Set timeout": "设置超时,单位是秒", "Set read timeout (only used if expecting a response)": "设置读取的超时(仅当配置了 expect response 时使用),单位是秒", "string sent to the server, udp required": "发送给服务器的字符串,udp 必须", "expected string in answer, udp required": "期待服务器返回的字符串(部分),udp 必须", diff --git a/src/modules/monapi/plugins/net_response/net_response_test.go b/src/modules/server/plugins/net_response/net_response_test.go similarity index 70% rename from src/modules/monapi/plugins/net_response/net_response_test.go rename to src/modules/server/plugins/net_response/net_response_test.go index 6d20ec6e..95faf419 100644 --- a/src/modules/monapi/plugins/net_response/net_response_test.go +++ b/src/modules/server/plugins/net_response/net_response_test.go @@ -3,7 +3,7 @@ package net_response import ( "testing" - "github.com/didi/nightingale/src/modules/monapi/plugins" + "github.com/didi/nightingale/v4/src/modules/server/plugins" ) func TestCollect(t *testing.T) { diff --git a/src/modules/monapi/plugins/nginx/nginx.go b/src/modules/server/plugins/nginx/nginx.go similarity index 64% rename from src/modules/monapi/plugins/nginx/nginx.go rename to src/modules/server/plugins/nginx/nginx.go index b5f65ba9..26cc7607 100644 --- a/src/modules/monapi/plugins/nginx/nginx.go +++ b/src/modules/server/plugins/nginx/nginx.go @@ -2,12 +2,14 @@ package nginx import ( "fmt" - "github.com/didi/nightingale/src/modules/monapi/collector" - "github.com/didi/nightingale/src/modules/monapi/plugins" - "github.com/didi/nightingale/src/toolkits/i18n" + "time" + + "github.com/didi/nightingale/v4/src/common/i18n" + "github.com/didi/nightingale/v4/src/modules/server/collector" + "github.com/didi/nightingale/v4/src/modules/server/plugins" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs/nginx" - "time" ) func init() { @@ -32,15 +34,15 @@ var ( "zh": map[string]string{ "Urls": "服务", "An array of Nginx stub_status URI to gather stats.": "查看Nginx状态的地址", - "ResponseTimeout":"响应超时时间", - "HTTP response timeout (default: 5s)": "HTTP响应超时时间(单位: 秒),默认5秒", + "ResponseTimeout": "响应超时时间", + "HTTP response timeout (default: 5s)": "HTTP响应超时时间(单位: 秒),默认5秒", }, } ) type Rule struct { - Urls []string `label:"Urls" json:"urls,required" description:"An array of Nginx stub_status URI to gather stats." example:"http://localhost/status"` - ResponseTimeout int `label:"ResponseTimeout" json:"response_timeout" default:"5" description:"HTTP response timeout (default: 5s)"` + Urls []string `label:"Urls" json:"urls,required" description:"An array of Nginx stub_status URI to gather stats." example:"http://localhost/status"` + ResponseTimeout int `label:"ResponseTimeout" json:"response_timeout" default:"5" description:"HTTP response timeout (default: 5s)"` plugins.ClientConfig } diff --git a/src/modules/monapi/plugins/nginx/nginx_test.go b/src/modules/server/plugins/nginx/nginx_test.go similarity index 79% rename from src/modules/monapi/plugins/nginx/nginx_test.go rename to src/modules/server/plugins/nginx/nginx_test.go index 39b4c507..532ff9ca 100644 --- a/src/modules/monapi/plugins/nginx/nginx_test.go +++ b/src/modules/server/plugins/nginx/nginx_test.go @@ -1,9 +1,10 @@ package nginx import ( - "github.com/didi/nightingale/src/modules/monapi/plugins" "testing" "time" + + "github.com/didi/nightingale/v4/src/modules/server/plugins" ) func TestCollect(t *testing.T) { diff --git a/src/modules/monapi/plugins/ping/ping.go b/src/modules/server/plugins/ping/ping.go similarity index 94% rename from src/modules/monapi/plugins/ping/ping.go rename to src/modules/server/plugins/ping/ping.go index cc784fe9..04d3e41c 100644 --- a/src/modules/monapi/plugins/ping/ping.go +++ b/src/modules/server/plugins/ping/ping.go @@ -3,10 +3,11 @@ package ping import ( "fmt" - "github.com/didi/nightingale/src/modules/monapi/collector" - "github.com/didi/nightingale/src/modules/monapi/plugins" - "github.com/didi/nightingale/src/modules/monapi/plugins/ping/ping" - "github.com/didi/nightingale/src/toolkits/i18n" + "github.com/didi/nightingale/v4/src/common/i18n" + "github.com/didi/nightingale/v4/src/modules/server/collector" + "github.com/didi/nightingale/v4/src/modules/server/plugins" + "github.com/didi/nightingale/v4/src/modules/server/plugins/ping/ping" + "github.com/influxdata/telegraf" ) diff --git a/src/modules/monapi/plugins/ping/ping/exec.go b/src/modules/server/plugins/ping/ping/exec.go similarity index 100% rename from src/modules/monapi/plugins/ping/ping/exec.go rename to src/modules/server/plugins/ping/ping/exec.go diff --git a/src/modules/monapi/plugins/ping/ping/exec_unix.go b/src/modules/server/plugins/ping/ping/exec_unix.go similarity index 100% rename from src/modules/monapi/plugins/ping/ping/exec_unix.go rename to src/modules/server/plugins/ping/ping/exec_unix.go diff --git a/src/modules/monapi/plugins/ping/ping/exec_windows.go b/src/modules/server/plugins/ping/ping/exec_windows.go similarity index 100% rename from src/modules/monapi/plugins/ping/ping/exec_windows.go rename to src/modules/server/plugins/ping/ping/exec_windows.go diff --git a/src/modules/monapi/plugins/ping/ping/ping.go b/src/modules/server/plugins/ping/ping/ping.go similarity index 100% rename from src/modules/monapi/plugins/ping/ping/ping.go rename to src/modules/server/plugins/ping/ping/ping.go diff --git a/src/modules/monapi/plugins/ping/ping/ping_notwindows.go b/src/modules/server/plugins/ping/ping/ping_notwindows.go similarity index 100% rename from src/modules/monapi/plugins/ping/ping/ping_notwindows.go rename to src/modules/server/plugins/ping/ping/ping_notwindows.go diff --git a/src/modules/monapi/plugins/ping/ping/ping_test.go b/src/modules/server/plugins/ping/ping/ping_test.go similarity index 100% rename from src/modules/monapi/plugins/ping/ping/ping_test.go rename to src/modules/server/plugins/ping/ping/ping_test.go diff --git a/src/modules/monapi/plugins/ping/ping/ping_windows.go b/src/modules/server/plugins/ping/ping/ping_windows.go similarity index 100% rename from src/modules/monapi/plugins/ping/ping/ping_windows.go rename to src/modules/server/plugins/ping/ping/ping_windows.go diff --git a/src/modules/monapi/plugins/ping/ping/ping_windows_test.go b/src/modules/server/plugins/ping/ping/ping_windows_test.go similarity index 100% rename from src/modules/monapi/plugins/ping/ping/ping_windows_test.go rename to src/modules/server/plugins/ping/ping/ping_windows_test.go diff --git a/src/modules/monapi/plugins/ping/ping_test.go b/src/modules/server/plugins/ping/ping_test.go similarity index 71% rename from src/modules/monapi/plugins/ping/ping_test.go rename to src/modules/server/plugins/ping/ping_test.go index 4ce5e346..4b8f162a 100644 --- a/src/modules/monapi/plugins/ping/ping_test.go +++ b/src/modules/server/plugins/ping/ping_test.go @@ -1,8 +1,9 @@ package ping import ( - "github.com/didi/nightingale/src/modules/monapi/plugins" "testing" + + "github.com/didi/nightingale/v4/src/modules/server/plugins" ) func TestCollect(t *testing.T) { diff --git a/src/modules/monapi/plugins/plugin/plugin.go b/src/modules/server/plugins/plugin/plugin.go similarity index 97% rename from src/modules/monapi/plugins/plugin/plugin.go rename to src/modules/server/plugins/plugin/plugin.go index 419ce7cd..a7390464 100644 --- a/src/modules/monapi/plugins/plugin/plugin.go +++ b/src/modules/server/plugins/plugin/plugin.go @@ -5,8 +5,9 @@ import ( "errors" "fmt" - "github.com/didi/nightingale/src/models" - "github.com/didi/nightingale/src/modules/monapi/collector" + "github.com/didi/nightingale/v4/src/models" + "github.com/didi/nightingale/v4/src/modules/server/collector" + "github.com/influxdata/telegraf" ) diff --git a/src/modules/monapi/plugins/port/port.go b/src/modules/server/plugins/port/port.go similarity index 97% rename from src/modules/monapi/plugins/port/port.go rename to src/modules/server/plugins/port/port.go index 05377e3f..19162f6e 100644 --- a/src/modules/monapi/plugins/port/port.go +++ b/src/modules/server/plugins/port/port.go @@ -5,8 +5,9 @@ import ( "errors" "fmt" - "github.com/didi/nightingale/src/models" - "github.com/didi/nightingale/src/modules/monapi/collector" + "github.com/didi/nightingale/v4/src/models" + "github.com/didi/nightingale/v4/src/modules/server/collector" + "github.com/influxdata/telegraf" ) diff --git a/src/modules/monapi/plugins/proc/proc.go b/src/modules/server/plugins/proc/proc.go similarity index 97% rename from src/modules/monapi/plugins/proc/proc.go rename to src/modules/server/plugins/proc/proc.go index a86b8c32..5819194f 100644 --- a/src/modules/monapi/plugins/proc/proc.go +++ b/src/modules/server/plugins/proc/proc.go @@ -5,8 +5,9 @@ import ( "errors" "fmt" - "github.com/didi/nightingale/src/models" - "github.com/didi/nightingale/src/modules/monapi/collector" + "github.com/didi/nightingale/v4/src/models" + "github.com/didi/nightingale/v4/src/modules/server/collector" + "github.com/influxdata/telegraf" ) diff --git a/src/modules/monapi/plugins/prometheus/prometheus.go b/src/modules/server/plugins/prometheus/prometheus.go similarity index 94% rename from src/modules/monapi/plugins/prometheus/prometheus.go rename to src/modules/server/plugins/prometheus/prometheus.go index 7da1c55c..92ea203f 100644 --- a/src/modules/monapi/plugins/prometheus/prometheus.go +++ b/src/modules/server/plugins/prometheus/prometheus.go @@ -4,9 +4,10 @@ import ( "fmt" "time" - "github.com/didi/nightingale/src/modules/monapi/collector" - "github.com/didi/nightingale/src/modules/monapi/plugins" - "github.com/didi/nightingale/src/toolkits/i18n" + "github.com/didi/nightingale/v4/src/common/i18n" + "github.com/didi/nightingale/v4/src/modules/server/collector" + "github.com/didi/nightingale/v4/src/modules/server/plugins" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs/prometheus" ) @@ -29,7 +30,7 @@ var ( "HTTP Basic Authentication username": "HTTP认证用户名", "HTTP Basic Authentication password": "HTTP认证密码", "RESP Timeout": "请求超时时间", - "Specify timeout duration for slower prometheus clients": "k8s请求超时时间, 单位: 秒", + "Specify timeout duration for slower prometheus clients": "k8s请求超时时间, 单位: 秒", }, } ) diff --git a/src/modules/monapi/plugins/prometheus/prometheus_test.go b/src/modules/server/plugins/prometheus/prometheus_test.go similarity index 97% rename from src/modules/monapi/plugins/prometheus/prometheus_test.go rename to src/modules/server/plugins/prometheus/prometheus_test.go index 68102120..e6c154c5 100644 --- a/src/modules/monapi/plugins/prometheus/prometheus_test.go +++ b/src/modules/server/plugins/prometheus/prometheus_test.go @@ -6,7 +6,7 @@ import ( "net/http/httptest" "testing" - "github.com/didi/nightingale/src/modules/monapi/plugins" + "github.com/didi/nightingale/v4/src/modules/server/plugins" ) const sampleTextFormat = `# HELP test_metric An untyped metric with a timestamp diff --git a/src/modules/monapi/plugins/rabbitmq/rabbitmq.go b/src/modules/server/plugins/rabbitmq/rabbitmq.go similarity index 50% rename from src/modules/monapi/plugins/rabbitmq/rabbitmq.go rename to src/modules/server/plugins/rabbitmq/rabbitmq.go index 0844752f..496ab5a1 100644 --- a/src/modules/monapi/plugins/rabbitmq/rabbitmq.go +++ b/src/modules/server/plugins/rabbitmq/rabbitmq.go @@ -2,11 +2,13 @@ package rabbitmq import ( "fmt" - "time" "reflect" - "github.com/didi/nightingale/src/modules/monapi/collector" - "github.com/didi/nightingale/src/modules/monapi/plugins" - "github.com/didi/nightingale/src/toolkits/i18n" + "time" + + "github.com/didi/nightingale/v4/src/common/i18n" + "github.com/didi/nightingale/v4/src/modules/server/collector" + "github.com/didi/nightingale/v4/src/modules/server/plugins" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs/rabbitmq" ) @@ -42,23 +44,22 @@ var ( "exchanges": "Exchange交换机", "QueueNameInclude": "包含队列", "QueueNameExclude": "排除队列", - }, } ) type RabbitMQRule struct { - URL string `label:"URL" json:"url,required" example:"http://localhost:15672"` - Name string `label:"Name" json:"Name" description:"Tag added to rabbitmq_overview series"` - Username string `label:"Username" json:"username,required" description:"specify username"` - Password string `label:"Password" json:"password,required" format:"password" description:"specify server password"` - ResponseHeaderTimeout int `label:"header time out" json:"header_timeout" default:"3" description:"for a server's response headers after fully writing the request"` - ClientTimeout int `label:"client time out" json:"client_timeout" default:"4" description:"for a server's response headers after fully writing the request"` - Nodes []string `label:"nodes" json:"nodes" description:"A list of nodes to gather as the rabbitmq_node measurement"` - Queues []string `label:"queues" json:"queues" description:"A list of queues to gather as the rabbitmq_queue measurement"` - Exchanges []string `label:"exchanges" json:"exchanges" description:"A list of exchanges to gather as the rabbitmq_exchange measurement"` - QueueNameInclude []string `label:"queue name include" json:"queue_name_include" description:"Queues to include."` - QueueNameExclude []string `label:"queue name exclude" json:"queue_name_exclude" description:"Queues to exclude."` + URL string `label:"URL" json:"url,required" example:"http://localhost:15672"` + Name string `label:"Name" json:"Name" description:"Tag added to rabbitmq_overview series"` + Username string `label:"Username" json:"username,required" description:"specify username"` + Password string `label:"Password" json:"password,required" format:"password" description:"specify server password"` + ResponseHeaderTimeout int `label:"header time out" json:"header_timeout" default:"3" description:"for a server's response headers after fully writing the request"` + ClientTimeout int `label:"client time out" json:"client_timeout" default:"4" description:"for a server's response headers after fully writing the request"` + Nodes []string `label:"nodes" json:"nodes" description:"A list of nodes to gather as the rabbitmq_node measurement"` + Queues []string `label:"queues" json:"queues" description:"A list of queues to gather as the rabbitmq_queue measurement"` + Exchanges []string `label:"exchanges" json:"exchanges" description:"A list of exchanges to gather as the rabbitmq_exchange measurement"` + QueueNameInclude []string `label:"queue name include" json:"queue_name_include" description:"Queues to include."` + QueueNameExclude []string `label:"queue name exclude" json:"queue_name_exclude" description:"Queues to exclude."` FederationUpstreamInclude []string `label:"FederationUpstreamInclude" json:"federation_upstream_include" description:"exchange filters include"` FederationUpstreamExclude []string `label:"FederationUpstreamExclude" json:"federation_upstream_exclude" description:"exchange filters exclude"` plugins.ClientConfig @@ -78,16 +79,16 @@ func (p *RabbitMQRule) TelegrafInput() (telegraf.Input, error) { mq := &rabbitmq.RabbitMQ{ - URL: p.URL, - Name: p.Name, - Username: p.Username, - Password: p.Password, - Nodes: p.Nodes, - Queues: p.Queues, - Exchanges: p.Exchanges, - QueueInclude: p.QueueNameInclude, - QueueExclude: p.QueueNameExclude, - ClientConfig: p.ClientConfig.TlsClientConfig(), + URL: p.URL, + Name: p.Name, + Username: p.Username, + Password: p.Password, + Nodes: p.Nodes, + Queues: p.Queues, + Exchanges: p.Exchanges, + QueueInclude: p.QueueNameInclude, + QueueExclude: p.QueueNameExclude, + ClientConfig: p.ClientConfig.TlsClientConfig(), } v := reflect.ValueOf(&(mq.ResponseHeaderTimeout.Duration)).Elem() v.Set(reflect.ValueOf(time.Second * time.Duration(p.ResponseHeaderTimeout))) @@ -95,4 +96,3 @@ func (p *RabbitMQRule) TelegrafInput() (telegraf.Input, error) { v1.Set(reflect.ValueOf(time.Second * time.Duration(p.ClientTimeout))) return mq, nil } - diff --git a/src/modules/monapi/plugins/redis/redis.go b/src/modules/server/plugins/redis/redis.go similarity index 94% rename from src/modules/monapi/plugins/redis/redis.go rename to src/modules/server/plugins/redis/redis.go index c9279aad..42df13e0 100644 --- a/src/modules/monapi/plugins/redis/redis.go +++ b/src/modules/server/plugins/redis/redis.go @@ -4,9 +4,10 @@ import ( "fmt" "strings" - "github.com/didi/nightingale/src/modules/monapi/collector" - "github.com/didi/nightingale/src/modules/monapi/plugins" - "github.com/didi/nightingale/src/toolkits/i18n" + "github.com/didi/nightingale/v4/src/common/i18n" + "github.com/didi/nightingale/v4/src/modules/server/collector" + "github.com/didi/nightingale/v4/src/modules/server/plugins" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs/redis" ) diff --git a/src/modules/monapi/plugins/redis/redist_test.go b/src/modules/server/plugins/redis/redist_test.go similarity index 83% rename from src/modules/monapi/plugins/redis/redist_test.go rename to src/modules/server/plugins/redis/redist_test.go index e3fb5343..dc45dc35 100644 --- a/src/modules/monapi/plugins/redis/redist_test.go +++ b/src/modules/server/plugins/redis/redist_test.go @@ -4,7 +4,7 @@ import ( "os" "testing" - "github.com/didi/nightingale/src/modules/monapi/plugins" + "github.com/didi/nightingale/v4/src/modules/server/plugins" ) func TestCollect(t *testing.T) { diff --git a/src/modules/monapi/plugins/tengine/tengine.go b/src/modules/server/plugins/tengine/tengine.go similarity index 80% rename from src/modules/monapi/plugins/tengine/tengine.go rename to src/modules/server/plugins/tengine/tengine.go index 84b0fc0c..59dbf58b 100644 --- a/src/modules/monapi/plugins/tengine/tengine.go +++ b/src/modules/server/plugins/tengine/tengine.go @@ -2,12 +2,14 @@ package tengine import ( "fmt" - "github.com/didi/nightingale/src/modules/monapi/collector" - "github.com/didi/nightingale/src/modules/monapi/plugins" - "github.com/didi/nightingale/src/toolkits/i18n" + "time" + + "github.com/didi/nightingale/v4/src/common/i18n" + "github.com/didi/nightingale/v4/src/modules/server/collector" + "github.com/didi/nightingale/v4/src/modules/server/plugins" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs/tengine" - "time" ) func init() { @@ -32,8 +34,8 @@ var ( "zh": map[string]string{ "Urls": "服务", "An array of Tengine reqstat module URI to gather stats.": "查看Tengine状态的地址", - "ResponseTimeout": "响应超时时间", - "HTTP response timeout (default: 5s)": "HTTP响应超时时间(单位: 秒),默认5秒", + "ResponseTimeout": "响应超时时间", + "HTTP response timeout (default: 5s)": "HTTP响应超时时间(单位: 秒),默认5秒", }, } ) diff --git a/src/modules/monapi/plugins/tengine/tengine_test.go b/src/modules/server/plugins/tengine/tengine_test.go similarity index 78% rename from src/modules/monapi/plugins/tengine/tengine_test.go rename to src/modules/server/plugins/tengine/tengine_test.go index a8c6968a..bf36bb6d 100644 --- a/src/modules/monapi/plugins/tengine/tengine_test.go +++ b/src/modules/server/plugins/tengine/tengine_test.go @@ -1,9 +1,10 @@ package tengine import ( - "github.com/didi/nightingale/src/modules/monapi/plugins" "testing" "time" + + "github.com/didi/nightingale/v4/src/modules/server/plugins" ) func TestCollect(t *testing.T) { diff --git a/src/modules/monapi/plugins/types.go b/src/modules/server/plugins/types.go similarity index 96% rename from src/modules/monapi/plugins/types.go rename to src/modules/server/plugins/types.go index a35fdde0..e56df631 100644 --- a/src/modules/monapi/plugins/types.go +++ b/src/modules/server/plugins/types.go @@ -1,7 +1,8 @@ package plugins import ( - "github.com/didi/nightingale/src/toolkits/i18n" + "github.com/didi/nightingale/v4/src/common/i18n" + "github.com/influxdata/telegraf/plugins/common/tls" ) diff --git a/src/modules/monapi/plugins/util.go b/src/modules/server/plugins/util.go similarity index 94% rename from src/modules/monapi/plugins/util.go rename to src/modules/server/plugins/util.go index b31982fb..31f8ae4a 100644 --- a/src/modules/monapi/plugins/util.go +++ b/src/modules/server/plugins/util.go @@ -6,8 +6,9 @@ import ( "strings" "testing" - "github.com/didi/nightingale/src/common/dataobj" - "github.com/didi/nightingale/src/modules/prober/manager/accumulator" + "github.com/didi/nightingale/v4/src/common/dataobj" + "github.com/didi/nightingale/v4/src/modules/prober/manager/accumulator" + "github.com/influxdata/telegraf" "github.com/toolkits/pkg/logger" ) diff --git a/src/modules/monapi/plugins/zookeeper/zookeeper.go b/src/modules/server/plugins/zookeeper/zookeeper.go similarity index 91% rename from src/modules/monapi/plugins/zookeeper/zookeeper.go rename to src/modules/server/plugins/zookeeper/zookeeper.go index 12a0572c..a4de4df9 100644 --- a/src/modules/monapi/plugins/zookeeper/zookeeper.go +++ b/src/modules/server/plugins/zookeeper/zookeeper.go @@ -2,12 +2,14 @@ package zookeeper import ( "fmt" - "github.com/didi/nightingale/src/modules/monapi/collector" - "github.com/didi/nightingale/src/modules/monapi/plugins" - "github.com/didi/nightingale/src/toolkits/i18n" + "time" + + "github.com/didi/nightingale/v4/src/common/i18n" + "github.com/didi/nightingale/v4/src/modules/server/collector" + "github.com/didi/nightingale/v4/src/modules/server/plugins" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs/zookeeper" - "time" ) func init() { diff --git a/src/modules/monapi/plugins/zookeeper/zookeeper_test.go b/src/modules/server/plugins/zookeeper/zookeeper_test.go similarity index 78% rename from src/modules/monapi/plugins/zookeeper/zookeeper_test.go rename to src/modules/server/plugins/zookeeper/zookeeper_test.go index f9498c0f..21257b34 100644 --- a/src/modules/monapi/plugins/zookeeper/zookeeper_test.go +++ b/src/modules/server/plugins/zookeeper/zookeeper_test.go @@ -1,9 +1,10 @@ package zookeeper import ( - "github.com/didi/nightingale/src/modules/monapi/plugins" "testing" "time" + + "github.com/didi/nightingale/v4/src/modules/server/plugins" ) func TestCollect(t *testing.T) { diff --git a/src/modules/rdb/rabbitmq/conn.go b/src/modules/server/rabbitmq/conn.go similarity index 74% rename from src/modules/rdb/rabbitmq/conn.go rename to src/modules/server/rabbitmq/conn.go index 57a115b1..d15539f0 100644 --- a/src/modules/rdb/rabbitmq/conn.go +++ b/src/modules/server/rabbitmq/conn.go @@ -6,19 +6,25 @@ import ( "github.com/streadway/amqp" "github.com/toolkits/pkg/logger" - - "github.com/didi/nightingale/src/modules/rdb/config" ) var ( - conn *amqp.Connection - exit = make(chan bool) + conn *amqp.Connection + exit = make(chan bool) + RabbitMQ RabbitmqSection ) -func Init() { - if config.Config.RabbitMQ.Enable { - dial(config.Config.RabbitMQ.Addr) - go Consume(config.Config.RabbitMQ.Addr, config.Config.RabbitMQ.Queue) +type RabbitmqSection struct { + Enable bool `yaml:"enable"` + Addr string `yaml:"addr"` + Queue string `yaml:"queue"` +} + +func Init(rabbitMQ RabbitmqSection) { + RabbitMQ = rabbitMQ + if RabbitMQ.Enable { + dial(RabbitMQ.Addr) + go Consume(RabbitMQ.Addr, RabbitMQ.Queue) } } @@ -79,7 +85,7 @@ func close() { } func Shutdown() { - if config.Config.RabbitMQ.Enable { + if RabbitMQ.Enable { conn.Close() exit <- true } diff --git a/src/modules/rdb/rabbitmq/queue_consume.go b/src/modules/server/rabbitmq/queue_consume.go similarity index 100% rename from src/modules/rdb/rabbitmq/queue_consume.go rename to src/modules/server/rabbitmq/queue_consume.go diff --git a/src/modules/rdb/rabbitmq/request_handler.go b/src/modules/server/rabbitmq/request_handler.go similarity index 98% rename from src/modules/rdb/rabbitmq/request_handler.go rename to src/modules/server/rabbitmq/request_handler.go index 0beba2da..68931d7e 100644 --- a/src/modules/rdb/rabbitmq/request_handler.go +++ b/src/modules/server/rabbitmq/request_handler.go @@ -5,7 +5,7 @@ import ( "github.com/toolkits/pkg/logger" - "github.com/didi/nightingale/src/models" + "github.com/didi/nightingale/v4/src/models" ) type MQRequest struct { diff --git a/src/modules/server/redisc/funcs.go b/src/modules/server/redisc/funcs.go new file mode 100644 index 00000000..37e83d71 --- /dev/null +++ b/src/modules/server/redisc/funcs.go @@ -0,0 +1,187 @@ +package redisc + +import ( + "encoding/json" + "fmt" + "strings" + + "github.com/didi/nightingale/v4/src/common/dataobj" + "github.com/didi/nightingale/v4/src/common/stats" + + "github.com/garyburd/redigo/redis" + "github.com/toolkits/pkg/logger" +) + +func HasKey(key string) bool { + rc := RedisConnPool.Get() + defer rc.Close() + + ret, _ := redis.Bool(rc.Do("EXISTS", key)) + + return ret +} + +func INCR(key string) int { + rc := RedisConnPool.Get() + defer rc.Close() + + ret, err := redis.Int(rc.Do("INCR", key)) + if err != nil { + logger.Errorf("incr %s error: %v", key, err) + } + + return ret +} + +func GET(key string) int64 { + rc := RedisConnPool.Get() + defer rc.Close() + + ret, err := redis.Int64(rc.Do("GET", key)) + if err != nil { + logger.Errorf("get %s error: %v", key, err) + } + + return ret +} + +func SetWithTTL(key string, value interface{}, ttl int) error { + rc := RedisConnPool.Get() + defer rc.Close() + + _, err := rc.Do("SET", key, value, "EX", ttl) + return err +} + +func Set(key string, value interface{}) error { + rc := RedisConnPool.Get() + defer rc.Close() + + _, err := rc.Do("SET", key, value) + return err +} + +func DelKey(key string) error { + rc := RedisConnPool.Get() + defer rc.Close() + + _, err := rc.Do("DEL", key) + return err +} + +func HSET(key string, field interface{}, value interface{}) (int64, error) { + rc := RedisConnPool.Get() + defer rc.Close() + + return redis.Int64(rc.Do("HSET", key, field, value)) +} + +func HKEYS(key string) ([]string, error) { + rc := RedisConnPool.Get() + defer rc.Close() + + return redis.Strings(rc.Do("HKEYS", key)) +} + +func HDEL(keys []interface{}) (int64, error) { + rc := RedisConnPool.Get() + defer rc.Close() + + return redis.Int64(rc.Do("HDEL", keys...)) +} + +func Push(event *dataobj.Event) error { + bytes, err := json.Marshal(event) + if err != nil { + err = fmt.Errorf("redis publish failed, error:%v", err) + return err + } + + rc := RedisConnPool.Get() + defer rc.Close() + + // 如果写入用lpush 则读出应该用 rpop + // 如果写入用rpush 则读出应该用 lpop + stats.Counter.Set("redis.push", 1) + _, err = rc.Do("LPUSH", event.Partition, string(bytes)) + if err == nil { + logger.Debugf("redis publish succ, event: %s", string(bytes)) + return nil + } + + return fmt.Errorf("redis publish failed finally:%v", err) +} + +func Pop(count int, queue string) []*dataobj.Message { + var ret []*dataobj.Message + + rc := RedisConnPool.Get() + defer rc.Close() + + for i := 0; i < count; i++ { + reply, err := redis.String(rc.Do("RPOP", queue)) + if err != nil { + if err != redis.ErrNil { + logger.Errorf("rpop queue:%s failed, err: %v", queue, err) + } + break + } + + if reply == "" || reply == "nil" { + continue + } + + var message dataobj.Message + err = json.Unmarshal([]byte(reply), &message) + if err != nil { + logger.Errorf("unmarshal message failed, err: %v, redis reply: %v", err, reply) + continue + } + + ret = append(ret, &message) + } + + return ret +} + +func lpush(queue, message string) error { + rc := RedisConnPool.Get() + defer rc.Close() + _, err := rc.Do("LPUSH", queue, message) + if err != nil { + logger.Errorf("LPUSH %s fail, message:%s, error:%v", queue, message, err) + } + return err +} + +// Write LPUSH message to redis +func Write(data *dataobj.Message, queue string) error { + if data == nil { + return fmt.Errorf("message is nil") + } + + data.Tos = removeEmptyString(data.Tos) + + bs, err := json.Marshal(data) + if err != nil { + logger.Errorf("marshal message failed, message: %+v, err: %v", data, err) + return err + } + + logger.Debugf("write message to queue, message:%+v, queue:%s", data, queue) + return lpush(queue, string(bs)) +} + +func removeEmptyString(s []string) []string { + cnt := len(s) + ss := make([]string, 0, cnt) + for i := 0; i < cnt; i++ { + if strings.TrimSpace(s[i]) == "" { + continue + } + + ss = append(ss, s[i]) + } + + return ss +} diff --git a/src/modules/rdb/redisc/redis.go b/src/modules/server/redisc/redis.go similarity index 57% rename from src/modules/rdb/redisc/redis.go rename to src/modules/server/redisc/redis.go index 51f48a2c..e45a6d4d 100644 --- a/src/modules/rdb/redisc/redis.go +++ b/src/modules/server/redisc/redis.go @@ -4,28 +4,41 @@ import ( "time" "github.com/garyburd/redigo/redis" - "github.com/toolkits/pkg/logger" - "github.com/didi/nightingale/src/modules/rdb/config" + "github.com/toolkits/pkg/logger" ) var RedisConnPool *redis.Pool -func InitRedis() { - cfg := config.Config +type RedisSection struct { + Local localRedis `yaml:"local"` +} - if !cfg.Redis.Enable { - return - } +type localRedis struct { + Enable bool `yaml:"enable"` + Addr string `yaml:"addr"` + Pass string `yaml:"pass"` + Idle int `yaml:"idle"` + Timeout timeoutSection `yaml:"timeout"` +} - addr := cfg.Redis.Addr - pass := cfg.Redis.Pass - maxIdle := cfg.Redis.Idle +type timeoutSection struct { + Conn int `yaml:"conn"` + Read int `yaml:"read"` + Write int `yaml:"write"` +} + +func InitRedis(r RedisSection) { + cfg := r.Local + + addr := cfg.Addr + pass := cfg.Pass + maxIdle := cfg.Idle idleTimeout := 240 * time.Second - connTimeout := time.Duration(cfg.Redis.Timeout.Conn) * time.Millisecond - readTimeout := time.Duration(cfg.Redis.Timeout.Read) * time.Millisecond - writeTimeout := time.Duration(cfg.Redis.Timeout.Write) * time.Millisecond + connTimeout := time.Duration(cfg.Timeout.Conn) * time.Millisecond + readTimeout := time.Duration(cfg.Timeout.Read) * time.Millisecond + writeTimeout := time.Duration(cfg.Timeout.Write) * time.Millisecond RedisConnPool = &redis.Pool{ MaxIdle: maxIdle, @@ -59,9 +72,6 @@ func PingRedis(c redis.Conn, t time.Time) error { } func CloseRedis() { - if !config.Config.Redis.Enable { - return - } logger.Info("closing redis...") RedisConnPool.Close() } diff --git a/src/modules/server/rpc/ams_host.go b/src/modules/server/rpc/ams_host.go new file mode 100644 index 00000000..40d3166e --- /dev/null +++ b/src/modules/server/rpc/ams_host.go @@ -0,0 +1,17 @@ +package rpc + +import ( + "fmt" + + "github.com/didi/nightingale/v4/src/models" +) + +func (*Server) HostRegister(host models.HostRegisterForm, output *string) error { + host.Validate() + err := models.HostRegister(host) + if err != nil { + *output = fmt.Sprintf("%v", err) + } + + return nil +} diff --git a/src/modules/server/rpc/hbs_heartbeat.go b/src/modules/server/rpc/hbs_heartbeat.go new file mode 100644 index 00000000..30edba95 --- /dev/null +++ b/src/modules/server/rpc/hbs_heartbeat.go @@ -0,0 +1,26 @@ +package rpc + +import ( + "fmt" + + "github.com/didi/nightingale/v4/src/models" +) + +func (*Server) HeartBeat(rev models.Instance, output *string) error { + err := models.ReportHeartBeat(rev) + if err != nil { + *output = fmt.Sprintf("%v", err) + } + + return nil +} + +func (*Server) InstanceGets(mod string, instancesResp *models.InstancesResp) error { + var err error + instancesResp.Data, err = models.GetAllInstances(mod, 1) + if err != nil { + instancesResp.Msg = fmt.Sprintf("get %s installs err:%v", mod, err) + } + + return nil +} diff --git a/src/modules/job/rpc/meta.go b/src/modules/server/rpc/job_meta.go similarity index 56% rename from src/modules/job/rpc/meta.go rename to src/modules/server/rpc/job_meta.go index 1ed3549f..469f1260 100644 --- a/src/modules/job/rpc/meta.go +++ b/src/modules/server/rpc/job_meta.go @@ -3,12 +3,12 @@ package rpc import ( "fmt" - "github.com/didi/nightingale/src/common/dataobj" - "github.com/didi/nightingale/src/models" + "github.com/didi/nightingale/v4/src/common/dataobj" + "github.com/didi/nightingale/v4/src/models" ) -// GetTaskMeta 获取任务元信息,自带缓存,executor使用 -func (*Scheduler) GetTaskMeta(id int64, resp *dataobj.TaskMetaResponse) error { +// GetTaskMeta 获取任务元信息,自带缓存 +func (*Server) GetTaskMeta(id int64, resp *dataobj.TaskMetaResponse) error { meta, err := models.TaskMetaGetByID(id) if err != nil { resp.Message = err.Error() diff --git a/src/modules/job/rpc/report.go b/src/modules/server/rpc/job_report.go similarity index 82% rename from src/modules/job/rpc/report.go rename to src/modules/server/rpc/job_report.go index 81823aae..e80ba23a 100644 --- a/src/modules/job/rpc/report.go +++ b/src/modules/server/rpc/job_report.go @@ -1,13 +1,13 @@ package rpc import ( - "github.com/toolkits/pkg/logger" + "github.com/didi/nightingale/v4/src/common/dataobj" + "github.com/didi/nightingale/v4/src/models" - "github.com/didi/nightingale/src/common/dataobj" - "github.com/didi/nightingale/src/models" + "github.com/toolkits/pkg/logger" ) -func (*Scheduler) Report(req dataobj.ReportRequest, resp *dataobj.ReportResponse) error { +func (*Server) Report(req dataobj.ReportRequest, resp *dataobj.ReportResponse) error { if req.ReportTasks != nil && len(req.ReportTasks) > 0 { err := handleDoneTask(req) if err != nil { diff --git a/src/modules/server/rpc/judge_send.go b/src/modules/server/rpc/judge_send.go new file mode 100644 index 00000000..4a066781 --- /dev/null +++ b/src/modules/server/rpc/judge_send.go @@ -0,0 +1,13 @@ +package rpc + +import ( + "github.com/didi/nightingale/v4/src/common/dataobj" + "github.com/didi/nightingale/v4/src/modules/server/judge" +) + +func (*Server) Send(items []*dataobj.JudgeItem, resp *dataobj.SimpleRpcResponse) error { + // 把当前时间的计算放在最外层,是为了减少获取时间时的系统调用开销 + judge.Send(items) + + return nil +} diff --git a/src/modules/server/rpc/mon_collect.go b/src/modules/server/rpc/mon_collect.go new file mode 100644 index 00000000..465feb4d --- /dev/null +++ b/src/modules/server/rpc/mon_collect.go @@ -0,0 +1,30 @@ +package rpc + +import ( + "encoding/json" + + "github.com/didi/nightingale/v4/src/models" + "github.com/didi/nightingale/v4/src/modules/server/cache" + + "github.com/toolkits/pkg/logger" +) + +func (*Server) GetCollectBy(endpoint string, resp *string) error { + collect := cache.CollectCache.GetBy(endpoint) + collectByte, _ := json.Marshal(collect) + *resp = string(collectByte) + + logger.Debugf("agent %s get collect %+v %s", endpoint, collect, *resp) + + return nil +} + +func (*Server) GetProberCollectBy(endpoint string, resp *models.CollectRuleRpcResp) error { + resp.Data = cache.CollectRuleCache.GetBy(endpoint) + return nil +} + +func (*Server) GetApiCollectBy(key string, resp *models.ApiCollectRpcResp) error { + resp.Data = cache.ApiCollectCache.GetBy(key) + return nil +} diff --git a/src/modules/server/rpc/mon_snmp.go b/src/modules/server/rpc/mon_snmp.go new file mode 100644 index 00000000..06250b11 --- /dev/null +++ b/src/modules/server/rpc/mon_snmp.go @@ -0,0 +1,51 @@ +package rpc + +import ( + "encoding/json" + + "github.com/didi/nightingale/v4/src/models" + "github.com/didi/nightingale/v4/src/modules/server/cache" + "github.com/toolkits/pkg/logger" +) + +func (*Server) SnmpCollectsGet(key string, resp *string) error { + data := cache.SnmpCollectCache.GetBy(key) + b, err := json.Marshal(data) + if err != nil { + logger.Warningf("get collect err:%v", err) + } + + *resp = string(b) + return nil +} + +func (*Server) HWsGet(key string, resp *models.NetworkHardwareRpcResp) error { + resp.Data = cache.SnmpHWCache.GetBy(key) + return nil +} + +func (*Server) HWsPut(hws []*models.NetworkHardware, resp *string) error { + for i := 0; i < len(hws); i++ { + hw, err := models.NetworkHardwareGet("id=?", hws[i].Id) + if err != nil { + logger.Warningf("get hw:%+v err:%v", hws[i], err) + continue + } + + if hw == nil { + continue + } + + hw.Name = hws[i].Name + hw.SN = hws[i].SN + hw.Uptime = hws[i].Uptime + hw.Info = hws[i].Info + + err = hw.Update("name", "sn", "info", "uptime") + if err != nil { + logger.Warningf("get hw:%+v err:%v", hws[i], err) + continue + } + } + return nil +} diff --git a/src/modules/job/rpc/ping.go b/src/modules/server/rpc/ping.go similarity index 59% rename from src/modules/job/rpc/ping.go rename to src/modules/server/rpc/ping.go index 0e771daa..ef37e779 100644 --- a/src/modules/job/rpc/ping.go +++ b/src/modules/server/rpc/ping.go @@ -1,7 +1,7 @@ package rpc // Ping return string 'pong', just for test -func (*Scheduler) Ping(input string, output *string) error { +func (*Server) Ping(input string, output *string) error { *output = "pong" return nil } diff --git a/src/modules/transfer/rpc/rpc.go b/src/modules/server/rpc/rpc.go similarity index 75% rename from src/modules/transfer/rpc/rpc.go rename to src/modules/server/rpc/rpc.go index d324d827..4c3ad4c0 100644 --- a/src/modules/transfer/rpc/rpc.go +++ b/src/modules/server/rpc/rpc.go @@ -9,20 +9,19 @@ import ( "reflect" "time" - "github.com/didi/nightingale/src/common/address" + "github.com/didi/nightingale/v4/src/common/address" "github.com/toolkits/pkg/logger" "github.com/ugorji/go/codec" ) -type Transfer int +type Server int func Start() { - go consumer() - addr := address.GetRPCListen("transfer") + addr := address.GetRPCListen("server") server := rpc.NewServer() - server.Register(new(Transfer)) + server.Register(new(Server)) l, err := net.Listen("tcp", addr) if err != nil { @@ -42,12 +41,12 @@ func Start() { continue } - var bufconn = struct { + var bufConn = struct { io.Closer *bufio.Reader *bufio.Writer }{conn, bufio.NewReader(conn), bufio.NewWriter(conn)} - go server.ServeCodec(codec.MsgpackSpecRpc.ServerCodec(bufconn, &mh)) + go server.ServeCodec(codec.MsgpackSpecRpc.ServerCodec(bufConn, &mh)) } } diff --git a/src/modules/transfer/rpc/consumer.go b/src/modules/server/rpc/transfer_consumer.go similarity index 90% rename from src/modules/transfer/rpc/consumer.go rename to src/modules/server/rpc/transfer_consumer.go index 9321d37a..292f3123 100644 --- a/src/modules/transfer/rpc/consumer.go +++ b/src/modules/server/rpc/transfer_consumer.go @@ -6,10 +6,10 @@ import ( "os" "os/signal" - "github.com/didi/nightingale/src/common/dataobj" - "github.com/didi/nightingale/src/modules/transfer/aggr" - "github.com/didi/nightingale/src/modules/transfer/cache" - "github.com/didi/nightingale/src/toolkits/stats" + "github.com/didi/nightingale/v4/src/common/dataobj" + "github.com/didi/nightingale/v4/src/common/stats" + "github.com/didi/nightingale/v4/src/modules/server/aggr" + "github.com/didi/nightingale/v4/src/modules/server/cache" "github.com/Shopify/sarama" "github.com/toolkits/pkg/logger" diff --git a/src/modules/transfer/rpc/push.go b/src/modules/server/rpc/transfer_push.go similarity index 84% rename from src/modules/transfer/rpc/push.go rename to src/modules/server/rpc/transfer_push.go index 320b649e..97cc5261 100644 --- a/src/modules/transfer/rpc/push.go +++ b/src/modules/server/rpc/transfer_push.go @@ -18,20 +18,16 @@ import ( "fmt" "time" - "github.com/didi/nightingale/src/common/dataobj" - "github.com/didi/nightingale/src/modules/transfer/aggr" - "github.com/didi/nightingale/src/modules/transfer/backend" - "github.com/didi/nightingale/src/toolkits/stats" + "github.com/didi/nightingale/v4/src/common/dataobj" + "github.com/didi/nightingale/v4/src/common/stats" + "github.com/didi/nightingale/v4/src/modules/server/aggr" + "github.com/didi/nightingale/v4/src/modules/server/backend" + "github.com/didi/nightingale/v4/src/modules/server/judge" "github.com/toolkits/pkg/logger" ) -func (t *Transfer) Ping(args string, reply *string) error { - *reply = args - return nil -} - -func (t *Transfer) Push(args []*dataobj.MetricValue, reply *dataobj.TransferResp) error { +func (t *Server) Push(args []*dataobj.MetricValue, reply *dataobj.TransferResp) error { start := time.Now() reply.Invalid = 0 @@ -52,7 +48,7 @@ func (t *Transfer) Push(args []*dataobj.MetricValue, reply *dataobj.TransferResp } // send to judge - backend.Push2JudgeQueue(items) + judge.Push2JudgeQueue(items) if aggr.AggrConfig.Enabled { go aggr.SendToAggr(items) @@ -99,7 +95,7 @@ func PushData(args []*dataobj.MetricValue) (int, string) { } // send to judge - backend.Push2JudgeQueue(items) + judge.Push2JudgeQueue(items) if aggr.AggrConfig.Enabled { go aggr.SendToAggr(items) diff --git a/src/modules/transfer/rpc/query.go b/src/modules/server/rpc/transfer_query.go similarity index 54% rename from src/modules/transfer/rpc/query.go rename to src/modules/server/rpc/transfer_query.go index c3a493f5..e59d10fe 100644 --- a/src/modules/transfer/rpc/query.go +++ b/src/modules/server/rpc/transfer_query.go @@ -1,13 +1,13 @@ package rpc import ( - "github.com/didi/nightingale/src/common/dataobj" - "github.com/didi/nightingale/src/modules/transfer/backend" + "github.com/didi/nightingale/v4/src/common/dataobj" + "github.com/didi/nightingale/v4/src/modules/server/backend" "github.com/toolkits/pkg/logger" ) -func (t *Transfer) Query(args []dataobj.QueryData, reply *dataobj.QueryDataResp) error { +func (t *Server) Query(args []dataobj.QueryData, reply *dataobj.QueryDataResp) error { dataSource, err := backend.GetDataSourceFor("") if err != nil { logger.Warningf("could not find datasource") diff --git a/src/modules/server/server.go b/src/modules/server/server.go new file mode 100644 index 00000000..5a76f028 --- /dev/null +++ b/src/modules/server/server.go @@ -0,0 +1,186 @@ +package main + +import ( + "fmt" + "log" + "os" + "os/signal" + "syscall" + "time" + + "github.com/didi/nightingale/v4/src/common/i18n" + "github.com/didi/nightingale/v4/src/common/identity" + "github.com/didi/nightingale/v4/src/common/loggeri" + "github.com/didi/nightingale/v4/src/common/stats" + "github.com/didi/nightingale/v4/src/models" + "github.com/didi/nightingale/v4/src/modules/server/aggr" + "github.com/didi/nightingale/v4/src/modules/server/alarm" + "github.com/didi/nightingale/v4/src/modules/server/auth" + "github.com/didi/nightingale/v4/src/modules/server/backend" + "github.com/didi/nightingale/v4/src/modules/server/cache" + "github.com/didi/nightingale/v4/src/modules/server/collector" + "github.com/didi/nightingale/v4/src/modules/server/config" + "github.com/didi/nightingale/v4/src/modules/server/cron" + "github.com/didi/nightingale/v4/src/modules/server/http" + "github.com/didi/nightingale/v4/src/modules/server/http/session" + "github.com/didi/nightingale/v4/src/modules/server/judge" + "github.com/didi/nightingale/v4/src/modules/server/judge/query" + "github.com/didi/nightingale/v4/src/modules/server/rabbitmq" + "github.com/didi/nightingale/v4/src/modules/server/redisc" + "github.com/didi/nightingale/v4/src/modules/server/rpc" + "github.com/didi/nightingale/v4/src/modules/server/ssoc" + "github.com/didi/nightingale/v4/src/modules/server/timer" + "github.com/didi/nightingale/v4/src/modules/server/wechat" + + _ "github.com/didi/nightingale/v4/src/modules/server/plugins/all" + _ "github.com/didi/nightingale/v4/src/modules/server/plugins/api" + + _ "github.com/go-sql-driver/mysql" + pcache "github.com/toolkits/pkg/cache" + "github.com/toolkits/pkg/logger" +) + +func main() { + parseConf() + conf := config.Config + + loggeri.Init(conf.Logger) + i18n.Init() + pcache.InitMemoryCache(time.Hour) + + checkIdentity() + + // 初始化数据库和相关数据 + models.InitMySQL("rdb", "mon", "ams", "hbs") + if config.Config.Nems.Enabled { + models.InitMySQL("nems") + go cron.SyncSnmpCollects() + go cron.SyncHardwares() + } + + if conf.Rdb.SSO.Enable && conf.Rdb.Auth.ExtraMode.Enable { + models.InitMySQL("sso") + } + models.InitSalt() + models.InitRooter() + + ssoc.InitSSO() + + // 初始化 redis 用来处理告警事件、发送邮件短信等 + redisc.InitRedis(conf.Redis) + + // 初始化 rabbitmq 处理部分异步逻辑 + wechat.Init(conf.WeChat) + rabbitmq.Init(conf.RabbitMQ) + session.Init() + + auth.Init(conf.Rdb.Auth.ExtraMode) + auth.Start() + + models.InitLDAP(conf.Rdb.LDAP) + go stats.Init("n9e") + + if conf.Job.Enable { + models.InitMySQL("job") + timer.CacheHostDoing() + go timer.Heartbeat() + go timer.Schedule() + go timer.CleanLong() + } + + aggr.Init(conf.Transfer.Aggr) + backend.Init(conf.Transfer.Backend) + // init judge + go judge.InitJudge(conf.Judge.Backend, config.Ident) + + cache.Init(conf.Monapi.Region) + cron.Init() + go cron.InitWorker(conf.Rdb.Sender) + go cron.InitReportHeartBeat(conf.Report) + + //judge + go query.Init(conf.Judge.Query) + go cron.GetJudgeStrategy(conf.Judge.Strategy) + go judge.NodataJudge(conf.Judge.NodataConcurrency) + + if conf.Monapi.AlarmEnabled { + if err := alarm.SyncMaskconf(); err != nil { + log.Fatalf("sync maskconf fail: %v", err) + } + + if err := alarm.SyncStra(); err != nil { + log.Fatalf("sync stra fail: %v", err) + } + + go alarm.SyncMaskconfLoop() + go alarm.SyncStraLoop() + go alarm.CleanStraLoop() + go alarm.ReadHighEvent() + go alarm.ReadLowEvent() + go alarm.CallbackConsumer() + go alarm.MergeEvent() + go alarm.CleanEventLoop() + } + + if conf.Monapi.ApiDetectorEnabled { + go cron.CheckDetectorNodes() + go cron.SyncApiCollects() + } + + if conf.Monapi.SnmpDetectorEnabled { + go cron.CheckSnmpDetectorNodes() + } + + if conf.Transfer.Aggr.Enabled { + go cron.SyncAggrCalcStras() + go cron.GetAggrCalcStrategy() + } + + pluginInfo() + + go rpc.Start() + + http.Start() + + endingProc() +} + +func parseConf() { + if err := config.Parse(); err != nil { + fmt.Println("cannot parse configuration file:", err) + os.Exit(1) + } +} + +func checkIdentity() { + ip, err := identity.GetIP() + if err != nil { + fmt.Println("cannot get ip:", err) + os.Exit(1) + } + + if ip == "127.0.0.1" { + fmt.Println("identity: 127.0.0.1, cannot work") + os.Exit(2) + } +} + +func endingProc() { + c := make(chan os.Signal, 1) + signal.Notify(c, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT) + select { + case <-c: + fmt.Printf("stop signal caught, stopping... pid=%d\n", os.Getpid()) + } + + logger.Close() + http.Shutdown() + fmt.Println("process stopped successfully") +} + +func pluginInfo() { + fmt.Println("remote collector") + for k, v := range collector.GetRemoteCollectors() { + fmt.Printf(" %d %s\n", k, v) + } +} diff --git a/src/modules/job/service/scheduler.go b/src/modules/server/service/job_scheduler.go similarity index 98% rename from src/modules/job/service/scheduler.go rename to src/modules/server/service/job_scheduler.go index 840c70cd..eab04af2 100644 --- a/src/modules/job/service/scheduler.go +++ b/src/modules/server/service/job_scheduler.go @@ -5,7 +5,7 @@ import ( "github.com/toolkits/pkg/slice" "github.com/toolkits/pkg/str" - "github.com/didi/nightingale/src/models" + "github.com/didi/nightingale/v4/src/models" ) func ScheduleTask(id int64) { diff --git a/src/modules/job/service/timeout.go b/src/modules/server/service/job_timeout.go similarity index 95% rename from src/modules/job/service/timeout.go rename to src/modules/server/service/job_timeout.go index ae4640e4..b7c6e693 100644 --- a/src/modules/job/service/timeout.go +++ b/src/modules/server/service/job_timeout.go @@ -5,7 +5,7 @@ import ( "github.com/toolkits/pkg/logger" - "github.com/didi/nightingale/src/models" + "github.com/didi/nightingale/v4/src/models" ) func CheckTimeout(id int64) { diff --git a/src/modules/rdb/ssoc/sso.go b/src/modules/server/ssoc/sso.go similarity index 97% rename from src/modules/rdb/ssoc/sso.go rename to src/modules/server/ssoc/sso.go index 61252301..e8e02147 100644 --- a/src/modules/rdb/ssoc/sso.go +++ b/src/modules/server/ssoc/sso.go @@ -13,9 +13,10 @@ import ( "strings" "time" - "github.com/coreos/go-oidc" - "github.com/didi/nightingale/src/models" - "github.com/didi/nightingale/src/modules/rdb/config" + "github.com/didi/nightingale/v4/src/models" + "github.com/didi/nightingale/v4/src/modules/server/config" + + oidc "github.com/coreos/go-oidc" "github.com/google/uuid" "golang.org/x/oauth2" ) @@ -47,7 +48,7 @@ var ( ) func InitSSO() { - cf := config.Config.SSO + cf := config.Config.Rdb.SSO if !cf.Enable { return diff --git a/src/modules/job/timer/heartbeat.go b/src/modules/server/timer/job_heartbeat.go similarity index 93% rename from src/modules/job/timer/heartbeat.go rename to src/modules/server/timer/job_heartbeat.go index d0fac57c..e59c06d0 100644 --- a/src/modules/job/timer/heartbeat.go +++ b/src/modules/server/timer/job_heartbeat.go @@ -5,8 +5,8 @@ import ( "github.com/toolkits/pkg/logger" - "github.com/didi/nightingale/src/common/identity" - "github.com/didi/nightingale/src/models" + "github.com/didi/nightingale/v4/src/common/identity" + "github.com/didi/nightingale/v4/src/models" ) func Heartbeat() { diff --git a/src/modules/job/timer/host_doing.go b/src/modules/server/timer/job_host_doing.go similarity index 94% rename from src/modules/job/timer/host_doing.go rename to src/modules/server/timer/job_host_doing.go index 93399cf3..f30ad5f4 100644 --- a/src/modules/job/timer/host_doing.go +++ b/src/modules/server/timer/job_host_doing.go @@ -7,7 +7,7 @@ import ( "github.com/toolkits/pkg/logger" - "github.com/didi/nightingale/src/models" + "github.com/didi/nightingale/v4/src/models" ) // CacheHostDoing 缓存task_host_doing表全部内容,减轻DB压力 diff --git a/src/modules/job/timer/scheduler.go b/src/modules/server/timer/job_scheduler.go similarity index 89% rename from src/modules/job/timer/scheduler.go rename to src/modules/server/timer/job_scheduler.go index 8c35a8db..05038400 100644 --- a/src/modules/job/timer/scheduler.go +++ b/src/modules/server/timer/job_scheduler.go @@ -5,9 +5,9 @@ import ( "github.com/toolkits/pkg/logger" - "github.com/didi/nightingale/src/common/identity" - "github.com/didi/nightingale/src/models" - "github.com/didi/nightingale/src/modules/job/service" + "github.com/didi/nightingale/v4/src/common/identity" + "github.com/didi/nightingale/v4/src/models" + "github.com/didi/nightingale/v4/src/modules/server/service" ) func Schedule() { diff --git a/src/modules/job/timer/cleaner.go b/src/modules/server/timer/job_task_cleaner.go similarity index 92% rename from src/modules/job/timer/cleaner.go rename to src/modules/server/timer/job_task_cleaner.go index 02f7f859..cdf271b1 100644 --- a/src/modules/job/timer/cleaner.go +++ b/src/modules/server/timer/job_task_cleaner.go @@ -5,7 +5,7 @@ import ( "github.com/toolkits/pkg/logger" - "github.com/didi/nightingale/src/models" + "github.com/didi/nightingale/v4/src/models" ) func CleanLong() { diff --git a/src/modules/rdb/wechat/wechat.go b/src/modules/server/wechat/wechat.go similarity index 96% rename from src/modules/rdb/wechat/wechat.go rename to src/modules/server/wechat/wechat.go index cd57011a..fd038589 100644 --- a/src/modules/rdb/wechat/wechat.go +++ b/src/modules/server/wechat/wechat.go @@ -10,6 +10,18 @@ import ( "time" ) +var WeChat WechatSection + +func Init(wechat WechatSection) { + WeChat = wechat +} + +type WechatSection struct { + CorpID string `yaml:"corp_id"` + AgentID int `yaml:"agent_id"` + Secret string `yaml:"secret"` +} + // Err 微信返回错误 type Err struct { ErrCode int `json:"errcode"` diff --git a/src/modules/transfer/config/config.go b/src/modules/transfer/config/config.go deleted file mode 100644 index 7dad2e4e..00000000 --- a/src/modules/transfer/config/config.go +++ /dev/null @@ -1,187 +0,0 @@ -package config - -import ( - "bytes" - "fmt" - "strconv" - "strings" - - "github.com/didi/nightingale/src/common/address" - "github.com/didi/nightingale/src/common/identity" - "github.com/didi/nightingale/src/common/loggeri" - "github.com/didi/nightingale/src/common/report" - "github.com/didi/nightingale/src/modules/transfer/aggr" - "github.com/didi/nightingale/src/modules/transfer/backend" - "github.com/didi/nightingale/src/modules/transfer/backend/tsdb" - - "github.com/spf13/viper" - "github.com/toolkits/pkg/file" - "gopkg.in/yaml.v2" -) - -type ConfYaml struct { - Debug bool `yaml:"debug"` - MinStep int `yaml:"minStep"` - Logger loggeri.Config `yaml:"logger"` - Backend backend.BackendSection `yaml:"backend"` - HTTP HTTPSection `yaml:"http"` - RPC RPCSection `yaml:"rpc"` - Identity identity.Identity `yaml:"identity"` - Report report.ReportSection `yaml:"report"` - Aggr aggr.AggrSection `yaml:"aggr"` -} - -type IndexSection struct { - Path string `yaml:"path"` - Timeout int `yaml:"timeout"` -} - -type LoggerSection struct { - Dir string `yaml:"dir"` - Level string `yaml:"level"` - KeepHours uint `yaml:"keepHours"` -} - -type HTTPSection struct { - Mode string `yaml:"mode"` - CookieName string `yaml:"cookieName"` - CookieDomain string `yaml:"cookieDomain"` -} - -type RPCSection struct { - Enabled bool `yaml:"enabled"` -} - -var ( - Config *ConfYaml -) - -func NewClusterNode(addrs []string) *tsdb.ClusterNode { - return &tsdb.ClusterNode{Addrs: addrs} -} - -// map["node"]="host1,host2" --> map["node"]=["host1", "host2"] -func formatClusterItems(cluster map[string]string) map[string]*tsdb.ClusterNode { - ret := make(map[string]*tsdb.ClusterNode) - for node, clusterStr := range cluster { - items := strings.Split(clusterStr, ",") - nitems := make([]string, 0) - for _, item := range items { - nitems = append(nitems, strings.TrimSpace(item)) - } - ret[node] = NewClusterNode(nitems) - } - - return ret -} - -func Parse(conf string) error { - bs, err := file.ReadBytes(conf) - if err != nil { - return fmt.Errorf("cannot read yml[%s]: %v", conf, err) - } - - viper.SetConfigType("yaml") - err = viper.ReadConfig(bytes.NewBuffer(bs)) - if err != nil { - return fmt.Errorf("cannot read yml[%s]: %v", conf, err) - } - - viper.SetDefault("http.enabled", true) - viper.SetDefault("minStep", 1) - - viper.SetDefault("backend", map[string]interface{}{ - "datasource": "tsdb", - "straPath": "/api/mon/stras/effective?all=1", - }) - - viper.SetDefault("backend.judge", map[string]interface{}{ - "batch": 200, //每次拉取文件的个数 - "workerNum": 32, - "maxConns": 2000, //查询和推送数据的并发个数 - "maxIdle": 32, //建立的连接池的最大空闲数 - "connTimeout": 1000, //链接超时时间,单位毫秒 - "callTimeout": 3000, //访问超时时间,单位毫秒 - "hbsMod": "rdb", - }) - - viper.SetDefault("backend.tsdb", map[string]interface{}{ - "enabled": true, - "name": "tsdb", - "batch": 200, //每次拉取文件的个数 - "workerNum": 32, - "maxConns": 2000, //查询和推送数据的并发个数 - "maxIdle": 32, //建立的连接池的最大空闲数 - "connTimeout": 1000, //链接超时时间,单位毫秒 - "callTimeout": 3000, //访问超时时间,单位毫秒 - "indexTimeout": 3000, //访问index超时时间,单位毫秒 - "replicas": 500, //一致性hash虚拟节点 - }) - - viper.SetDefault("aggr", map[string]interface{}{ - "enabled": false, - "apiTimeout": 3000, - "apiPath": "/api/mon/aggrs", - }) - - viper.SetDefault("backend.influxdb", map[string]interface{}{ - "enabled": false, - "name": "influxdb", - "batch": 200, //每次拉取文件的个数 - "maxRetry": 3, //重试次数 - "workerNum": 32, - "maxConns": 2000, //查询和推送数据的并发个数 - "timeout": 3000, //访问超时时间,单位毫秒 - }) - - viper.SetDefault("backend.opentsdb", map[string]interface{}{ - "enabled": false, - "name": "opentsdb", - "batch": 200, //每次拉取文件的个数 - "maxRetry": 3, //重试次数 - "workerNum": 32, - "maxConns": 2000, //查询和推送数据的并发个数 - "maxIdle": 32, //建立的连接池的最大空闲数 - "connTimeout": 1000, //链接超时时间,单位毫秒 - "callTimeout": 3000, //访问超时时间,单位毫秒 - }) - - viper.SetDefault("backend.kafka", map[string]interface{}{ - "enabled": false, - "name": "kafka", - "maxRetry": 3, //重试次数 - "connTimeout": 1000, //链接超时时间,单位毫秒 - "callTimeout": 3000, //访问超时时间,单位毫秒 - }) - - viper.SetDefault("report", map[string]interface{}{ - "mod": "transfer", - "enabled": true, - "interval": 4000, - "timeout": 3000, - "api": "api/hbs/heartbeat", - "remark": "", - }) - - err = viper.Unmarshal(&Config) - if err != nil { - return fmt.Errorf("cannot read yml[%s]: %v", conf, err) - } - - Config.Backend.Tsdb.ClusterList = formatClusterItems(Config.Backend.Tsdb.Cluster) - - Config.Report.HTTPPort = strconv.Itoa(address.GetHTTPPort("transfer")) - Config.Report.RPCPort = strconv.Itoa(address.GetRPCPort("transfer")) - - if Config.Backend.M3db.Enabled { - // viper.Unmarshal not compatible with yaml.Unmarshal - var b *ConfYaml - err := yaml.Unmarshal([]byte(bs), &b) - if err != nil { - return err - } - Config.Backend.M3db = b.Backend.M3db - } - - return identity.Parse() -} diff --git a/src/modules/transfer/config/const.go b/src/modules/transfer/config/const.go deleted file mode 100644 index c3d935d8..00000000 --- a/src/modules/transfer/config/const.go +++ /dev/null @@ -1,5 +0,0 @@ -package config - -const ( - Version = 1 -) diff --git a/src/modules/transfer/cron/init.go b/src/modules/transfer/cron/init.go deleted file mode 100644 index e6c6c8b9..00000000 --- a/src/modules/transfer/cron/init.go +++ /dev/null @@ -1,8 +0,0 @@ -package cron - -func Init() { - go GetStrategy() - go RebuildJudgePool() - go UpdateJudgeQueue() - go GetAggrCalcStrategy() -} diff --git a/src/modules/transfer/cron/queue.go b/src/modules/transfer/cron/queue.go deleted file mode 100644 index 487dde6d..00000000 --- a/src/modules/transfer/cron/queue.go +++ /dev/null @@ -1,35 +0,0 @@ -package cron - -import ( - "time" - - "github.com/didi/nightingale/src/modules/transfer/backend" - - "github.com/toolkits/pkg/container/list" -) - -func UpdateJudgeQueue() { - ticker := time.NewTicker(time.Duration(8) * time.Second) - for { - <-ticker.C - updateJudgeQueue() - } -} - -func updateJudgeQueue() { - instances := backend.GetJudges() - if len(instances) == 0 { - return - } - - for _, instance := range instances { - if !backend.JudgeQueues.Exists(instance) { - q := list.NewSafeListLimited(backend.DefaultSendQueueMaxSize) - backend.JudgeQueues.Set(instance, q) - go backend.Send2JudgeTask(q, instance, backend.Judge.WorkerNum) - } else { - backend.JudgeQueues.UpdateTS(instance) - } - } - backend.JudgeQueues.Clean() -} diff --git a/src/modules/transfer/cron/stra.go b/src/modules/transfer/cron/stra.go deleted file mode 100644 index 7643d95b..00000000 --- a/src/modules/transfer/cron/stra.go +++ /dev/null @@ -1,116 +0,0 @@ -package cron - -import ( - "fmt" - "math/rand" - "time" - - "github.com/didi/nightingale/src/common/address" - "github.com/didi/nightingale/src/models" - "github.com/didi/nightingale/src/modules/transfer/backend" - "github.com/didi/nightingale/src/modules/transfer/cache" - "github.com/didi/nightingale/src/toolkits/stats" - "github.com/didi/nightingale/src/toolkits/str" - - "github.com/toolkits/pkg/logger" - "github.com/toolkits/pkg/net/httplib" -) - -type StraResp struct { - Data []*models.Stra `json:"dat"` - Err string `json:"err"` -} - -func GetStrategy() { - ticker := time.NewTicker(time.Duration(8) * time.Second) - getStrategy() - for { - <-ticker.C - getStrategy() - } -} - -func getStrategy() { - addrs := address.GetHTTPAddresses("monapi") - if len(addrs) == 0 { - logger.Error("find no monapi address") - return - } - - var stras StraResp - perm := rand.Perm(len(addrs)) - var err error - for i := range perm { - url := fmt.Sprintf("http://%s%s", addrs[perm[i]], backend.StraPath) - err = httplib.Get(url).SetTimeout(time.Duration(3000) * time.Millisecond).ToJSON(&stras) - - if err != nil { - logger.Warningf("get strategy from remote failed, error:%v", err) - continue - } - - if stras.Err != "" { - logger.Warningf("get strategy from remote failed, error:%v", stras.Err) - continue - } - if len(stras.Data) > 0 { - break - } - } - - if err != nil { - logger.Errorf("get stra err: %v", err) - stats.Counter.Set("stra.err", 1) - } - - if len(stras.Data) == 0 { //策略数为零,不更新缓存 - return - } - - straMap := make(map[string]map[string][]*models.Stra) - for _, stra := range stras.Data { - stats.Counter.Set("stra.count", 1) - - if len(stra.Exprs) < 1 { - logger.Warningf("illegal stra:%v exprs", stra) - continue - } - - metric := stra.Exprs[0].Metric - for _, nid := range stra.Nids { - key := str.MD5(nid, metric, "") //TODO get straMap key, 此处需要优化 - k1 := key[0:2] //为了加快查找,增加一层 map,key 为计算出来的 hash 的前 2 位 - - if _, exists := straMap[k1]; !exists { - straMap[k1] = make(map[string][]*models.Stra) - } - - if _, exists := straMap[k1][key]; !exists { - straMap[k1][key] = []*models.Stra{stra} - stats.Counter.Set("stra.key", 1) - - } else { - straMap[k1][key] = append(straMap[k1][key], stra) - } - } - - for _, endpoint := range stra.Endpoints { - key := str.MD5(endpoint, metric, "") //TODO get straMap key, 此处需要优化 - k1 := key[0:2] //为了加快查找,增加一层 map,key 为计算出来的 hash 的前 2 位 - - if _, exists := straMap[k1]; !exists { - straMap[k1] = make(map[string][]*models.Stra) - } - - if _, exists := straMap[k1][key]; !exists { - straMap[k1][key] = []*models.Stra{stra} - stats.Counter.Set("stra.key", 1) - - } else { - straMap[k1][key] = append(straMap[k1][key], stra) - } - } - } - - cache.StraMap.ReInit(straMap) -} diff --git a/src/modules/transfer/http/http_server.go b/src/modules/transfer/http/http_server.go deleted file mode 100644 index b26f3f6d..00000000 --- a/src/modules/transfer/http/http_server.go +++ /dev/null @@ -1,67 +0,0 @@ -package http - -import ( - "context" - "fmt" - "net/http" - "os" - "strings" - "time" - - "github.com/gin-gonic/gin" - - "github.com/didi/nightingale/src/common/address" - "github.com/didi/nightingale/src/common/middleware" - "github.com/didi/nightingale/src/modules/transfer/config" -) - -var srv = &http.Server{ - ReadTimeout: 10 * time.Second, - WriteTimeout: 10 * time.Second, - MaxHeaderBytes: 1 << 20, -} - -func Start() { - c := config.Config - - recoveryMid := middleware.Recovery() - - if strings.ToLower(c.HTTP.Mode) == "release" { - gin.SetMode(gin.ReleaseMode) - middleware.DisableConsoleColor() - } - - r := gin.New() - r.Use(recoveryMid) - - Config(r) - - srv.Addr = address.GetHTTPListen("transfer") - srv.Handler = r - - go func() { - fmt.Println("http.listening:", srv.Addr) - if err := srv.ListenAndServe(); err != nil && err != http.ErrServerClosed { - fmt.Printf("listening %s occur error: %s\n", srv.Addr, err) - os.Exit(3) - } - }() -} - -// Shutdown http server -func Shutdown() { - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - if err := srv.Shutdown(ctx); err != nil { - fmt.Println("cannot shutdown http server:", err) - os.Exit(2) - } - - // catching ctx.Done(). timeout of 5 seconds. - select { - case <-ctx.Done(): - fmt.Println("shutdown http server timeout of 5 seconds.") - default: - fmt.Println("http server stopped") - } -} diff --git a/src/modules/transfer/http/router_funcs.go b/src/modules/transfer/http/router_funcs.go deleted file mode 100644 index b58f5f1a..00000000 --- a/src/modules/transfer/http/router_funcs.go +++ /dev/null @@ -1,130 +0,0 @@ -package http - -import ( - "strconv" - - "github.com/gin-gonic/gin" - "github.com/toolkits/pkg/errors" -) - -func dangerous(v interface{}) { - errors.Dangerous(v) -} - -func bomb(format string, a ...interface{}) { - errors.Bomb(format, a...) -} - -func bind(c *gin.Context, ptr interface{}) { - dangerous(c.ShouldBindJSON(ptr)) -} - -func urlParamStr(c *gin.Context, field string) string { - val := c.Param(field) - - if val == "" { - bomb("url param[%s] is blank", field) - } - - return val -} - -func urlParamInt64(c *gin.Context, field string) int64 { - strval := urlParamStr(c, field) - intval, err := strconv.ParseInt(strval, 10, 64) - if err != nil { - bomb("cannot convert %s to int64", strval) - } - - return intval -} - -func urlParamInt(c *gin.Context, field string) int { - return int(urlParamInt64(c, field)) -} - -func queryStr(c *gin.Context, key string, defaultVal ...string) string { - val := c.Query(key) - if val != "" { - return val - } - - if len(defaultVal) == 0 { - bomb("query param[%s] is necessary", key) - } - - return defaultVal[0] -} - -func queryInt(c *gin.Context, key string, defaultVal ...int) int { - strv := c.Query(key) - if strv != "" { - intv, err := strconv.Atoi(strv) - if err != nil { - bomb("cannot convert [%s] to int", strv) - } - return intv - } - - if len(defaultVal) == 0 { - bomb("query param[%s] is necessary", key) - } - - return defaultVal[0] -} - -func queryInt64(c *gin.Context, key string, defaultVal ...int64) int64 { - strv := c.Query(key) - if strv != "" { - intv, err := strconv.ParseInt(strv, 10, 64) - if err != nil { - bomb("cannot convert [%s] to int64", strv) - } - return intv - } - - if len(defaultVal) == 0 { - bomb("query param[%s] is necessary", key) - } - - return defaultVal[0] -} - -func offset(c *gin.Context, limit int) int { - if limit <= 0 { - limit = 10 - } - - page := queryInt(c, "p", 1) - return (page - 1) * limit -} - -func renderMessage(c *gin.Context, v interface{}) { - if v == nil { - c.JSON(200, gin.H{"err": ""}) - return - } - - switch t := v.(type) { - case string: - c.JSON(200, gin.H{"err": t}) - case error: - c.JSON(200, gin.H{"err": t.Error()}) - } -} - -func renderData(c *gin.Context, data interface{}, err error) { - if err == nil { - c.JSON(200, gin.H{"dat": data, "err": ""}) - return - } - - renderMessage(c, err.Error()) -} - -func renderZeroPage(c *gin.Context) { - renderData(c, gin.H{ - "list": []int{}, - "total": 0, - }, nil) -} diff --git a/src/modules/transfer/http/routes.go b/src/modules/transfer/http/routes.go deleted file mode 100644 index f45af474..00000000 --- a/src/modules/transfer/http/routes.go +++ /dev/null @@ -1,34 +0,0 @@ -package http - -import ( - "github.com/gin-contrib/pprof" - "github.com/gin-gonic/gin" -) - -// Config routes -func Config(r *gin.Engine) { - sys := r.Group("/api/transfer") - { - sys.GET("/ping", ping) - sys.GET("/pid", pid) - sys.GET("/addr", addr) - sys.POST("/stra", getStra) - sys.POST("/which-tsdb", tsdbInstance) - sys.POST("/which-judge", judgeInstance) - sys.GET("/alive-judges", judges) - - sys.POST("/push", PushData) - sys.POST("/data", QueryData) - sys.POST("/data/ui", QueryDataForUI) - } - - index := r.Group("/api/index") - { - index.POST("/metrics", GetMetrics) - index.POST("/tagkv", GetTagPairs) - index.POST("/counter/clude", GetIndexByClude) - index.POST("/counter/fullmatch", GetIndexByFullTags) - } - - pprof.Register(r, "/api/transfer/debug/pprof") -} diff --git a/src/modules/transfer/transfer.go b/src/modules/transfer/transfer.go deleted file mode 100644 index 82c4003b..00000000 --- a/src/modules/transfer/transfer.go +++ /dev/null @@ -1,118 +0,0 @@ -package main - -import ( - "flag" - "fmt" - "os" - "os/signal" - "syscall" - - "github.com/didi/nightingale/src/common/loggeri" - "github.com/didi/nightingale/src/common/report" - "github.com/didi/nightingale/src/modules/transfer/aggr" - "github.com/didi/nightingale/src/modules/transfer/backend" - "github.com/didi/nightingale/src/modules/transfer/config" - "github.com/didi/nightingale/src/modules/transfer/cron" - "github.com/didi/nightingale/src/modules/transfer/http" - "github.com/didi/nightingale/src/modules/transfer/rpc" - "github.com/didi/nightingale/src/toolkits/stats" - - "github.com/toolkits/pkg/file" - "github.com/toolkits/pkg/logger" - "github.com/toolkits/pkg/runner" -) - -var ( - vers *bool - help *bool - conf *string - - version = "No Version Provided" -) - -func init() { - vers = flag.Bool("v", false, "display the version.") - help = flag.Bool("h", false, "print this help.") - conf = flag.String("f", "", "specify configuration file.") - flag.Parse() - - if *vers { - fmt.Println("Version:", version) - os.Exit(0) - } - - if *help { - flag.Usage() - os.Exit(0) - } -} - -func main() { - aconf() - pconf() - start() - - cfg := config.Config - - loggeri.Init(cfg.Logger) - go stats.Init("n9e.transfer") - - aggr.Init(cfg.Aggr) - backend.Init(cfg.Backend) - cron.Init() - - go report.Init(cfg.Report, "rdb") - go rpc.Start() - - http.Start() - - cleanup() -} - -// auto detect configuration file -func aconf() { - if *conf != "" && file.IsExist(*conf) { - return - } - - *conf = "etc/transfer.local.yml" - if file.IsExist(*conf) { - return - } - - *conf = "etc/transfer.yml" - if file.IsExist(*conf) { - return - } - - fmt.Println("no configuration file for transfer") - os.Exit(1) -} - -// parse configuration file -func pconf() { - if err := config.Parse(*conf); err != nil { - fmt.Println("cannot parse configuration file:", err) - os.Exit(1) - } -} - -func cleanup() { - c := make(chan os.Signal, 1) - signal.Notify(c, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT) - select { - case <-c: - fmt.Println("stop signal caught, stopping... pid=", os.Getpid()) - } - - logger.Close() - http.Shutdown() - fmt.Println("sender stopped successfully") -} - -func start() { - runner.Init() - fmt.Println("transfer started, use configuration file:", *conf) - fmt.Println("runner.Cwd:", runner.Cwd) - fmt.Println("runner.Hostname:", runner.Hostname) -} diff --git a/src/modules/tsdb/backend/rpc/init.go b/src/modules/tsdb/backend/rpc/init.go deleted file mode 100644 index 61a8827d..00000000 --- a/src/modules/tsdb/backend/rpc/init.go +++ /dev/null @@ -1,27 +0,0 @@ -package rpc - -import ( - "github.com/didi/nightingale/src/toolkits/pools" -) - -var ( - // 连接池 node_address -> connection_pool - IndexConnPools *pools.ConnPools - Config RpcClientSection -) - -type RpcClientSection struct { - MaxConns int `yaml:"maxConns"` - MaxIdle int `yaml:"maxIdle"` - ConnTimeout int `yaml:"connTimeout"` - CallTimeout int `yaml:"callTimeout"` -} - -func Init(cfg RpcClientSection, indexes []string) { - Config = cfg - IndexConnPools = pools.NewConnPools(cfg.MaxConns, cfg.MaxIdle, cfg.ConnTimeout, cfg.CallTimeout, indexes) -} - -func ReNewPools(indexes []string) []string { - return IndexConnPools.UpdatePools(indexes) -} diff --git a/src/modules/tsdb/backend/rpc/push.go b/src/modules/tsdb/backend/rpc/push.go deleted file mode 100644 index 8365d055..00000000 --- a/src/modules/tsdb/backend/rpc/push.go +++ /dev/null @@ -1,76 +0,0 @@ -package rpc - -import ( - "time" - - "github.com/didi/nightingale/src/common/dataobj" - "github.com/didi/nightingale/src/toolkits/stats" - - "github.com/toolkits/pkg/logger" -) - -const ( - ALLINDEX = 0 - INCRINDEX = 1 -) - -func Push2Index(mode int, items []*dataobj.TsdbItem, indexAddrs []string) { - for _, addr := range indexAddrs { - push(mode, addr, items) - } -} - -func push(mode int, addr string, tsdbItems []*dataobj.TsdbItem) { - resp := &dataobj.IndexResp{} - var err error - sendOk := false - - if len(tsdbItems) == 0 { - return - } - - itemCount := int64(len(tsdbItems)) - - bodyList := make([]*dataobj.IndexModel, itemCount) - for i, item := range tsdbItems { - logger.Debugf("mode:%d push index:%v to:%s", mode, item, addr) - - var tmp dataobj.IndexModel - tmp.Endpoint = item.Endpoint - tmp.Nid = item.Nid - tmp.Metric = item.Metric - tmp.Step = item.Step - tmp.DsType = item.DsType - if len(item.TagsMap) == 0 { - tmp.Tags = make(map[string]string) - } else { - tmp.Tags = item.TagsMap - } - tmp.Timestamp = item.Timestamp - bodyList[i] = &tmp - } - - for i := 0; i < 3; i++ { //最多重试3次 - if mode == INCRINDEX { - err = IndexConnPools.Call(addr, "Index.IncrPush", bodyList, resp) - stats.Counter.Set("index.out.incr", int(itemCount)) - } else { - err = IndexConnPools.Call(addr, "Index.Push", bodyList, resp) - stats.Counter.Set("index.out", int(itemCount)) - } - if err == nil { - sendOk = true - break - } - if resp.Msg != "" { - logger.Warning(resp.Msg) - } - time.Sleep(time.Millisecond * 10) - } - - if !sendOk { - stats.Counter.Set("index.out.err", int(itemCount)) - - logger.Errorf("send %v to index %s fail: %v", bodyList, addr, err) - } -} diff --git a/src/modules/tsdb/cache/cache.go b/src/modules/tsdb/cache/cache.go deleted file mode 100644 index dfca2142..00000000 --- a/src/modules/tsdb/cache/cache.go +++ /dev/null @@ -1,234 +0,0 @@ -package cache - -import ( - "fmt" - "log" - "sync" - "sync/atomic" - "time" - - "github.com/didi/nightingale/src/modules/tsdb/utils" - "github.com/didi/nightingale/src/toolkits/stats" - - "github.com/toolkits/pkg/logger" -) - -type CacheSection struct { - KeepMinutes int `yaml:"keepMinutes"` - SpanInSeconds int `yaml:"spanInSeconds"` - NumOfChunks int `yaml:"numOfChunks"` - DoCleanInMinutes int `yaml:"doCleanInMinutes"` - FlushDiskStepMs int `yaml:"flushDiskStepMs"` -} - -const SHARD_COUNT = 256 - -var ( - Caches caches - Config CacheSection -) - -var ( - TotalCount int64 - cleaning bool -) - -type ( - caches []*cache -) - -type cache struct { - Items map[string]*CS // [counter]ts,value - sync.RWMutex -} - -func Init(cfg CacheSection) { - Config = cfg - - //根据内存保存曲线的时长,计算出需要几个chunk - //如果内存保留2个小时数据,+1为了查询2个小时内的数据一定落在内存中 - Config.NumOfChunks = Config.KeepMinutes*60/Config.SpanInSeconds + 1 - - InitCaches() - go StartCleanup() -} - -func InitCaches() { - Caches = NewCaches() -} - -func InitChunkSlot() { - size := Config.SpanInSeconds * 1000 / Config.FlushDiskStepMs - if size < 0 { - log.Panicf("store.init, bad size %d\n", size) - } - - ChunksSlots = &ChunksSlot{ - Data: make([]map[string][]*Chunk, size), - Size: size, - } - for i := 0; i < size; i++ { - ChunksSlots.Data[i] = make(map[string][]*Chunk) - } -} - -func NewCaches() caches { - c := make(caches, SHARD_COUNT) - for i := 0; i < SHARD_COUNT; i++ { - c[i] = &cache{Items: make(map[string]*CS)} - } - return c -} - -func StartCleanup() { - cfg := Config - t := time.NewTicker(time.Minute * time.Duration(cfg.DoCleanInMinutes)) - cleaning = false - - for { - select { - case <-t.C: - if !cleaning { - go Caches.Cleanup(cfg.KeepMinutes) - } else { - logger.Warning("cleanup() is working, may be it's too slow") - } - } - } -} - -func (c *caches) Push(seriesID string, ts int64, value float64) error { - shard := c.getShard(seriesID) - existC, exist := Caches.exist(seriesID) - if exist { - shard.Lock() - err := existC.Push(seriesID, ts, value) - shard.Unlock() - return err - } - newC := Caches.create(seriesID) - shard.Lock() - err := newC.Push(seriesID, ts, value) - shard.Unlock() - - return err -} - -func (c *caches) Get(seriesID string, from, to int64) ([]Iter, error) { - existC, exist := Caches.exist(seriesID) - - if !exist { - return nil, fmt.Errorf("non series exist") - } - - res := existC.Get(from, to) - if res == nil { - return nil, fmt.Errorf("non enough data") - } - - return res, nil -} - -func (c *caches) SetFlag(seriesID string, flag uint32) error { - existC, exist := Caches.exist(seriesID) - if !exist { - return fmt.Errorf("non series exist") - } - existC.SetFlag(flag) - return nil -} - -func (c *caches) GetFlag(seriesID string) uint32 { - existC, exist := Caches.exist(seriesID) - if !exist { - return 0 - } - return existC.GetFlag() -} - -func (c *caches) create(seriesID string) *CS { - atomic.AddInt64(&TotalCount, 1) - shard := c.getShard(seriesID) - shard.Lock() - newC := NewChunks(Config.NumOfChunks) - shard.Items[seriesID] = newC - shard.Unlock() - - return newC -} - -func (c *caches) exist(seriesID string) (*CS, bool) { - shard := c.getShard(seriesID) - shard.RLock() - existC, exist := shard.Items[seriesID] - shard.RUnlock() - - return existC, exist -} - -func (c *caches) GetCurrentChunk(seriesID string) (*Chunk, bool) { - shard := c.getShard(seriesID) - if shard == nil { - return nil, false - } - shard.RLock() - existC, exists := shard.Items[seriesID] - shard.RUnlock() - if exists { - chunk := existC.GetChunk(existC.CurrentChunkPos) - return chunk, exists - } - return nil, exists -} - -func (c caches) Count() int64 { - return atomic.LoadInt64(&TotalCount) -} - -func (c caches) Remove(seriesID string) { - atomic.AddInt64(&TotalCount, -1) - shard := c.getShard(seriesID) - shard.Lock() - delete(shard.Items, seriesID) - shard.Unlock() -} - -func (c caches) Cleanup(expiresInMinutes int) { - now := time.Now() - done := make(chan struct{}) - var count int64 - cleaning = true - defer func() { cleaning = false }() - - go func() { - wg := sync.WaitGroup{} - wg.Add(SHARD_COUNT) - - for _, shard := range c { - go func(shard *cache) { - shard.RLock() - for key, chunks := range shard.Items { - _, lastTs := chunks.GetInfoUnsafe() - if int64(lastTs) < now.Unix()-60*int64(expiresInMinutes) { - atomic.AddInt64(&count, 1) - shard.RUnlock() - c.Remove(key) - stats.Counter.Set("series.delete", 1) - shard.RLock() - } - } - shard.RUnlock() - wg.Done() - }(shard) - } - wg.Wait() - done <- struct{}{} - }() - - <-done - logger.Infof("cleanup %v Items, took %.2f ms\n", count, float64(time.Since(now).Nanoseconds())*1e-6) -} - -func (c caches) getShard(key string) *cache { - return c[utils.HashKey(key)%SHARD_COUNT] -} diff --git a/src/modules/tsdb/cache/chunk.go b/src/modules/tsdb/cache/chunk.go deleted file mode 100644 index b5c5bd4f..00000000 --- a/src/modules/tsdb/cache/chunk.go +++ /dev/null @@ -1,135 +0,0 @@ -package cache - -import ( - "fmt" - "sync" - - "github.com/didi/nightingale/src/modules/tsdb/utils" - - tsz "github.com/dgryski/go-tsz" - "github.com/toolkits/pkg/logger" -) - -var FlushDoneChan chan int - -func init() { - FlushDoneChan = make(chan int, 1) -} - -type Chunk struct { - tsz.Series - FirstTs uint32 - LastTs uint32 - NumPoints uint32 - Closed bool -} - -func NewChunk(t0 uint32) *Chunk { - return &Chunk{ - Series: *tsz.New(t0), - FirstTs: 0, - LastTs: 0, - NumPoints: 0, - Closed: false, - } -} - -func (c *Chunk) Push(t uint32, v float64) error { - if t <= c.LastTs { - return fmt.Errorf("Point must be newer than already added points. t:%d v:%v,lastTs: %d\n", t, v, c.LastTs) - } - c.Series.Push(t, v) - c.NumPoints += 1 - c.LastTs = t - - return nil -} - -func (c *Chunk) FinishSync() { - c.Closed = true //存在panic的可能 - c.Series.Finish() -} - -var ChunksSlots *ChunksSlot - -type ChunksSlot struct { - sync.RWMutex - Data []map[string][]*Chunk - Size int -} - -func (c *ChunksSlot) Len(idx int) int { - c.Lock() - defer c.Unlock() - - return len(c.Data[idx]) -} - -func (c *ChunksSlot) Get(idx int) map[string][]*Chunk { - c.Lock() - defer c.Unlock() - - items := c.Data[idx] - ret := make(map[string][]*Chunk) - for k, v := range items { - ret[k] = v - } - c.Data[idx] = make(map[string][]*Chunk) - - return ret -} - -func (c *ChunksSlot) GetChunks(key string) ([]*Chunk, bool) { - c.Lock() - defer c.Unlock() - - idx, err := GetChunkIndex(key, c.Size) - if err != nil { - logger.Error(err) - return nil, false - } - - val, ok := c.Data[idx][key] - if ok { - delete(c.Data[idx], key) - } - return val, ok -} - -func (c *ChunksSlot) PushChunks(key string, vals []*Chunk) { - c.Lock() - defer c.Unlock() - idx, err := GetChunkIndex(key, c.Size) - if err != nil { - logger.Error(err) - return - } - if _, exists := c.Data[idx][key]; !exists { - c.Data[idx][key] = make([]*Chunk, 0) - } else { - for _, v := range c.Data[idx][key] { - vals = append(vals, v) - } - } - - c.Data[idx][key] = vals -} - -func (c *ChunksSlot) Push(key string, val *Chunk) { - c.Lock() - defer c.Unlock() - idx, err := GetChunkIndex(key, c.Size) - if err != nil { - logger.Error(err) - return - } - if _, exists := c.Data[idx][key]; !exists { - c.Data[idx][key] = make([]*Chunk, 0) - } - - c.Data[idx][key] = append(c.Data[idx][key], val) -} - -func GetChunkIndex(key string, size int) (uint32, error) { - return utils.HashKey(key) % uint32(size), nil -} diff --git a/src/modules/tsdb/cache/chunks.go b/src/modules/tsdb/cache/chunks.go deleted file mode 100644 index 39969291..00000000 --- a/src/modules/tsdb/cache/chunks.go +++ /dev/null @@ -1,223 +0,0 @@ -package cache - -import ( - "fmt" - "sync" - - "github.com/toolkits/pkg/logger" -) - -type CS struct { - Chunks []*Chunk - CurrentChunkPos int - flag uint32 - - sync.RWMutex -} - -func NewChunks(numOfChunks int) *CS { - cs := make([]*Chunk, 0, numOfChunks) - - return &CS{Chunks: cs} -} - -func (cs *CS) Push(seriesID string, ts int64, value float64) error { - //找到当前chunk的起始时间 - t0 := uint32(ts - (ts % int64(Config.SpanInSeconds))) - - // 尚无chunk - if len(cs.Chunks) == 0 { - c := NewChunk(uint32(t0)) - c.FirstTs = uint32(ts) - cs.Chunks = append(cs.Chunks, c) - - return cs.Chunks[0].Push(uint32(ts), value) - } - - // push到当前chunk - currentChunk := cs.GetChunk(cs.CurrentChunkPos) - if t0 == currentChunk.T0 { - if currentChunk.Closed { - return fmt.Errorf("push to closed chunk") - } - - return currentChunk.Push(uint32(ts), value) - } - - if t0 < currentChunk.T0 { - return fmt.Errorf("data @%v, timestamp old than previous chunk. currentchunk t0: %v\n", t0, currentChunk.T0) - } - - // 需要新建chunk - // 先finish掉现有chunk - if !currentChunk.Closed { - currentChunk.FinishSync() - ChunksSlots.Push(seriesID, currentChunk) - } - - // 超过chunks限制, pos回绕到0 - cs.CurrentChunkPos++ - if cs.CurrentChunkPos >= int(Config.NumOfChunks) { - cs.CurrentChunkPos = 0 - } - - // chunks未满, 直接append即可 - if len(cs.Chunks) < int(Config.NumOfChunks) { - c := NewChunk(uint32(t0)) - c.FirstTs = uint32(ts) - cs.Chunks = append(cs.Chunks, c) - - return cs.Chunks[cs.CurrentChunkPos].Push(uint32(ts), value) - } else { - c := NewChunk(uint32(t0)) - c.FirstTs = uint32(ts) - cs.Chunks[cs.CurrentChunkPos] = c - - return cs.Chunks[cs.CurrentChunkPos].Push(uint32(ts), value) - } - - return nil -} - -func (cs *CS) Get(from, to int64) []Iter { - // 这种case不应该发生 - if from >= to { - return nil - } - - cs.RLock() - defer cs.RUnlock() - - // cache server还没有数据 - if len(cs.Chunks) == 0 { - return nil - } - - var iters []Iter - - // from 超出最新chunk可能达到的最新点, 这种case不应该发生 - newestChunk := cs.GetChunk(cs.CurrentChunkPos) - if from >= int64(newestChunk.T0)+int64(Config.SpanInSeconds) { - return nil - } - - // 假设共有2个chunk - // len = 1, CurrentChunkPos = 0, oldestPos = 0 - // len = 2, CurrentChunkPos = 0, oldestPos = 1 - // len = 2, CurrentChunkPos = 1, oldestPos = 0 - oldestPos := cs.CurrentChunkPos + 1 - if oldestPos >= len(cs.Chunks) { - oldestPos = 0 - } - oldestChunk := cs.GetChunk(oldestPos) - if oldestChunk == nil { - logger.Error("unexpected nil chunk") - return nil - } - - // to 太老了, 这种case不应发生, 应由query处理 - if to <= int64(oldestChunk.FirstTs) { - return nil - } - - // 找from所在的chunk - for from >= int64(oldestChunk.T0)+int64(Config.SpanInSeconds) { - oldestPos++ - if oldestPos >= len(cs.Chunks) { - oldestPos = 0 - } - oldestChunk = cs.GetChunk(oldestPos) - if oldestChunk == nil { - logger.Error("unexpected nil chunk") - return nil - } - } - - // 找to所在的trunk - newestPos := cs.CurrentChunkPos - for to <= int64(newestChunk.T0) { - newestPos-- - if newestPos < 0 { - newestPos += len(cs.Chunks) - } - newestChunk = cs.GetChunk(newestPos) - if newestChunk == nil { - logger.Error("unexpected nil chunk") - return nil - } - } - - for { - c := cs.GetChunk(oldestPos) - iters = append(iters, NewIter(c.Iter())) - if oldestPos == newestPos { - break - } - oldestPos++ - if oldestPos >= len(cs.Chunks) { - oldestPos = 0 - } - } - - return iters -} - -// GetInfo get oldest ts and newest ts in cache -func (cs *CS) GetInfo() (uint32, uint32) { - cs.RLock() - defer cs.RUnlock() - - return cs.GetInfoUnsafe() -} - -func (cs *CS) GetInfoUnsafe() (uint32, uint32) { - var oldestTs, newestTs uint32 - - if len(cs.Chunks) == 0 { - return 0, 0 - } - - newestChunk := cs.GetChunk(cs.CurrentChunkPos) - if newestChunk == nil { - newestTs = 0 - } else { - newestTs = newestChunk.LastTs - } - - oldestPos := cs.CurrentChunkPos + 1 - if oldestPos >= len(cs.Chunks) { - oldestPos = 0 - } - - oldestChunk := cs.GetChunk(oldestPos) - if oldestChunk == nil { - oldestTs = 0 - } else { - oldestTs = oldestChunk.FirstTs - } - - return oldestTs, newestTs -} - -func (cs *CS) GetFlag() uint32 { - cs.RLock() - defer cs.RUnlock() - - return cs.flag -} - -func (cs *CS) SetFlag(flag uint32) { - cs.Lock() - defer cs.Unlock() - - cs.flag = flag - return -} - -func (cs CS) GetChunk(pos int) *Chunk { - if pos < 0 || pos >= len(cs.Chunks) { - return cs.Chunks[0] - } - - return cs.Chunks[pos] -} diff --git a/src/modules/tsdb/cache/iter.go b/src/modules/tsdb/cache/iter.go deleted file mode 100644 index 9375a40e..00000000 --- a/src/modules/tsdb/cache/iter.go +++ /dev/null @@ -1,13 +0,0 @@ -package cache - -import tsz "github.com/dgryski/go-tsz" - -type Iter struct { - *tsz.Iter -} - -func NewIter(i *tsz.Iter) Iter { - return Iter{ - i, - } -} diff --git a/src/modules/tsdb/cache/point.go b/src/modules/tsdb/cache/point.go deleted file mode 100644 index c0bd58b0..00000000 --- a/src/modules/tsdb/cache/point.go +++ /dev/null @@ -1,7 +0,0 @@ -package cache - -type Point struct { - Key string `msg:"key"` - Timestamp int64 `msg:"timestamp"` - Value float64 `msg:"value"` -} diff --git a/src/modules/tsdb/config/config.go b/src/modules/tsdb/config/config.go deleted file mode 100644 index 67e1b935..00000000 --- a/src/modules/tsdb/config/config.go +++ /dev/null @@ -1,134 +0,0 @@ -package config - -import ( - "bytes" - "fmt" - - "github.com/didi/nightingale/src/common/loggeri" - "github.com/didi/nightingale/src/modules/tsdb/backend/rpc" - "github.com/didi/nightingale/src/modules/tsdb/cache" - "github.com/didi/nightingale/src/modules/tsdb/index" - "github.com/didi/nightingale/src/modules/tsdb/migrate" - "github.com/didi/nightingale/src/modules/tsdb/rrdtool" - - "github.com/spf13/viper" - "github.com/toolkits/pkg/file" -) - -type File struct { - Filename string - Body []byte -} - -type ConfYaml struct { - Http *HttpSection `yaml:"http"` - Rpc *RpcSection `yaml:"rpc"` - RRD rrdtool.RRDSection `yaml:"rrd"` - Logger loggeri.Config `yaml:"logger"` - Migrate migrate.MigrateSection `yaml:"migrate"` - Index index.IndexSection `yaml:"index"` - RpcClient rpc.RpcClientSection `yaml:"rpcClient"` - Cache cache.CacheSection `yaml:"cache"` - CallTimeout int `yaml:"callTimeout"` - IOWorkerNum int `yaml:"ioWorkerNum"` - FirstBytesSize int `yaml:"firstBytesSize"` -} - -type HttpSection struct { - Enabled bool `yaml:"enabled"` -} - -type RpcSection struct { - Enabled bool `yaml:"enabled"` -} - -var ( - Config *ConfYaml -) - -func GetCfgYml() *ConfYaml { - return Config -} - -func Parse(conf string) error { - bs, err := file.ReadBytes(conf) - if err != nil { - return fmt.Errorf("cannot read yml[%s]: %v", conf, err) - } - - viper.SetConfigType("yaml") - err = viper.ReadConfig(bytes.NewBuffer(bs)) - if err != nil { - return fmt.Errorf("cannot read yml[%s]: %v", conf, err) - } - - viper.SetDefault("http.enabled", true) - viper.SetDefault("rpc.enabled", true) - - viper.SetDefault("rrd.rra", map[int]int{ - 1: 720, // 原始点,假设10s一个点,则存2h,即720个点 - 6: 4320, // 6个点归档为一个点,即1min一个点,3天的话是4320个点 - 180: 1440, // 180个点归档为一个点,即30min一个点,1个月30天是1440个点 - 1080: 2880, // 1080个点归档为一个点,即6h一个点存1年,按照360天算是2880个点 - }) - - viper.SetDefault("rrd.enabled", true) - viper.SetDefault("rrd.wait", true) - viper.SetDefault("rrd.enabled", 100) //每次从待落盘队列中间等待间隔,单位毫秒 - viper.SetDefault("rrd.batch", 100) //每次从待落盘队列中获取数据的个数 - viper.SetDefault("rrd.concurrency", 20) //每次从待落盘队列中获取数据的个数 - viper.SetDefault("rrd.ioWorkerNum", 64) //同时落盘的io并发个数 - - viper.SetDefault("cache.keepMinutes", 120) - viper.SetDefault("cache.spanInSeconds", 900) //每个数据块保存数据的时间范围,单位秒 - viper.SetDefault("cache.doCleanInMinutes", 10) //清理过期数据的周期,单位分钟 - viper.SetDefault("cache.flushDiskStepMs", 1000) - - viper.SetDefault("migrate.enabled", false) - viper.SetDefault("migrate.concurrency", 2) - viper.SetDefault("migrate.batch", 200) - viper.SetDefault("migrate.replicas", 500) - viper.SetDefault("migrate.connTimeout", 1000) - viper.SetDefault("migrate.callTimeout", 3000) - viper.SetDefault("migrate.maxConns", 32) - viper.SetDefault("migrate.maxIdle", 32) - - viper.SetDefault("index.activeDuration", 90000) //索引最大的保留时间,超过此数值,索引不会被重建,默认是1天+1小时 - viper.SetDefault("index.rebuildInterval", 21600) //重建索引的周期,单位为秒,默认是6h - viper.SetDefault("index.hbsMod", "rdb") //获取index心跳的模块 - - viper.SetDefault("rpcClient", map[string]int{ - "maxConns": 320, //查询和推送数据的并发个数 - "maxIdle": 320, //建立的连接池的最大空闲数 - "connTimeout": 1000, //链接超时时间,单位毫秒 - "callTimeout": 3000, //访问超时时间,单位毫秒 - }) - - err = viper.Unmarshal(&Config) - if err != nil { - return fmt.Errorf("Unmarshal %v", err) - } - - return err -} - -func GetInt(defaultVal, val int) int { - if val != 0 { - return val - } - return defaultVal -} - -func GetString(defaultVal, val string) string { - if val != "" { - return val - } - return defaultVal -} - -func GetBool(defaultVal, val bool) bool { - if val != false { - return val - } - return defaultVal -} diff --git a/src/modules/tsdb/config/const.go b/src/modules/tsdb/config/const.go deleted file mode 100644 index f819659e..00000000 --- a/src/modules/tsdb/config/const.go +++ /dev/null @@ -1,5 +0,0 @@ -package config - -const ( - VERSION = "0.0.1" -) diff --git a/src/modules/tsdb/http/http.go b/src/modules/tsdb/http/http.go deleted file mode 100644 index abd8637e..00000000 --- a/src/modules/tsdb/http/http.go +++ /dev/null @@ -1,84 +0,0 @@ -package http - -import ( - "log" - "net" - "net/http" - _ "net/http/pprof" - "time" - - "github.com/didi/nightingale/src/common/address" - "github.com/didi/nightingale/src/modules/tsdb/http/middleware" - "github.com/didi/nightingale/src/modules/tsdb/http/render" - "github.com/didi/nightingale/src/modules/tsdb/http/routes" - - "github.com/codegangsta/negroni" - "github.com/gorilla/mux" - "github.com/toolkits/pkg/logger" -) - -var Close_chan, Close_done_chan chan int - -func init() { - Close_chan = make(chan int, 1) - Close_done_chan = make(chan int, 1) -} - -// tcpKeepAliveListener sets TCP keep-alive timeouts on accepted -// connections. It's used by ListenAndServe and ListenAndServeTLS so -// dead TCP connections (e.g. closing laptop mid-download) eventually -// go away. -type TcpKeepAliveListener struct { - *net.TCPListener -} - -func (ln TcpKeepAliveListener) Accept() (c net.Conn, err error) { - tc, err := ln.AcceptTCP() - if err != nil { - return - } - tc.SetKeepAlive(true) - tc.SetKeepAlivePeriod(3 * time.Minute) - return tc, nil -} - -func Start() { - render.Init() - - r := mux.NewRouter().StrictSlash(false) - routes.ConfigRoutes(r) - - n := negroni.New() - n.Use(middleware.NewLogger()) - n.Use(middleware.NewRecovery()) - - n.UseHandler(r) - - addr := address.GetHTTPListen("tsdb") - if addr == "" { - return - } - s := &http.Server{ - Addr: addr, - MaxHeaderBytes: 1 << 30, - Handler: n, - } - logger.Info("http listening", addr) - - ln, err := net.Listen("tcp", addr) - if err != nil { - log.Fatalln(err) - return - } - l := ln.(*net.TCPListener) - go s.Serve(TcpKeepAliveListener{l}) - - select { - case <-Close_chan: - log.Println("http recv sigout and exit...") - l.Close() - Close_done_chan <- 1 - return - } - -} diff --git a/src/modules/tsdb/http/middleware/logger.go b/src/modules/tsdb/http/middleware/logger.go deleted file mode 100644 index fc602956..00000000 --- a/src/modules/tsdb/http/middleware/logger.go +++ /dev/null @@ -1,29 +0,0 @@ -package middleware - -import ( - "log" - "net/http" - "time" - - "github.com/codegangsta/negroni" - "github.com/toolkits/pkg/logger" -) - -// Logger is a middleware handler that logs the request as it goes in and the response as it goes out. -type Logger struct { - // Logger inherits from log.Logger used to log messages with the Logger middleware - *log.Logger -} - -// NewLogger returns a new Logger instance -func NewLogger() *Logger { - return &Logger{} -} - -func (l *Logger) ServeHTTP(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc) { - start := time.Now() - next(rw, r) - - res := rw.(negroni.ResponseWriter) - logger.Debugf("%v [method:%s][uri:%s][status:%d][use:%v][from:%s]", time.Now().Format("2006/01/02 15:04:05"), r.Method, r.URL.Path, res.Status(), time.Since(start), r.RemoteAddr) -} diff --git a/src/modules/tsdb/http/middleware/recovery.go b/src/modules/tsdb/http/middleware/recovery.go deleted file mode 100644 index f3f65dab..00000000 --- a/src/modules/tsdb/http/middleware/recovery.go +++ /dev/null @@ -1,54 +0,0 @@ -package middleware - -import ( - "net/http" - "runtime" - - "github.com/didi/nightingale/src/modules/tsdb/http/render" - - "github.com/toolkits/pkg/logger" -) - -// Recovery is a Negroni middleware that recovers from any panics and writes a 500 if there was one. -type Recovery struct { - StackAll bool - StackSize int -} - -type Error struct { - Code int `json:"code"` - Msg string `json:"msg"` - Time string `json:"time"` - File string `json:"file"` - Line int `json:"line"` -} - -// NewRecovery returns a new instance of Recovery -func NewRecovery() *Recovery { - return &Recovery{ - StackAll: false, - StackSize: 1024 * 8, - } -} - -func (rec *Recovery) ServeHTTP(w http.ResponseWriter, r *http.Request, next http.HandlerFunc) { - defer func() { - if err := recover(); err != nil { - if e, ok := err.(Error); ok { - logger.Errorf("[%s:%d] %s [Error:]%s", e.File, e.Line, e.Time, e.Msg) - - render.Message(w, e.Msg) - return - } - - // Negroni part - w.WriteHeader(http.StatusInternalServerError) - stack := make([]byte, rec.StackSize) - stack = stack[:runtime.Stack(stack, rec.StackAll)] - - logger.Errorf("PANIC: %s\n%s", err, stack) - } - }() - - next(w, r) -} diff --git a/src/modules/tsdb/http/render/render.go b/src/modules/tsdb/http/render/render.go deleted file mode 100644 index 3c1f13e8..00000000 --- a/src/modules/tsdb/http/render/render.go +++ /dev/null @@ -1,40 +0,0 @@ -package render - -import ( - "net/http" - - "github.com/unrolled/render" -) - -var Render *render.Render - -func Init() { - Render = render.New(render.Options{ - Directory: "tsdb", - Extensions: []string{".html"}, - Delims: render.Delims{"{{", "}}"}, - IndentJSON: false, - }) -} - -func Message(w http.ResponseWriter, v interface{}) { - if v == nil { - Render.JSON(w, http.StatusOK, map[string]string{"err": ""}) - return - } - - switch t := v.(type) { - case string: - Render.JSON(w, http.StatusOK, map[string]string{"err": t}) - case error: - Render.JSON(w, http.StatusOK, map[string]string{"err": t.Error()}) - } -} - -func Data(w http.ResponseWriter, v interface{}, err error) { - if err != nil { - Render.JSON(w, http.StatusOK, map[string]interface{}{"err": err.Error(), "dat": v}) - } else { - Render.JSON(w, http.StatusOK, map[string]interface{}{"err": "", "dat": v}) - } -} diff --git a/src/modules/tsdb/http/routes/health_router.go b/src/modules/tsdb/http/routes/health_router.go deleted file mode 100644 index 100fe2d3..00000000 --- a/src/modules/tsdb/http/routes/health_router.go +++ /dev/null @@ -1,19 +0,0 @@ -package routes - -import ( - "fmt" - "net/http" - "os" -) - -func ping(w http.ResponseWriter, r *http.Request) { - fmt.Fprintf(w, "pong") -} - -func addr(w http.ResponseWriter, r *http.Request) { - fmt.Fprintf(w, r.RemoteAddr) -} - -func pid(w http.ResponseWriter, r *http.Request) { - fmt.Fprintf(w, fmt.Sprintf("%d", os.Getpid())) -} diff --git a/src/modules/tsdb/http/routes/op_router.go b/src/modules/tsdb/http/routes/op_router.go deleted file mode 100644 index f5e3dd06..00000000 --- a/src/modules/tsdb/http/routes/op_router.go +++ /dev/null @@ -1,70 +0,0 @@ -package routes - -import ( - "net/http" - "sync/atomic" - - "github.com/didi/nightingale/src/modules/tsdb/cache" - "github.com/didi/nightingale/src/modules/tsdb/http/render" - "github.com/didi/nightingale/src/modules/tsdb/index" - "github.com/didi/nightingale/src/modules/tsdb/rrdtool" - "github.com/didi/nightingale/src/modules/tsdb/utils" - "github.com/didi/nightingale/src/toolkits/str" - - "github.com/toolkits/pkg/file" -) - -func getItemBySeriesID(w http.ResponseWriter, r *http.Request) { - seriesID, err := String(r, "series_id", "") - if err != nil { - render.Message(w, err) - return - } - - item := index.GetItemFronIndex(seriesID) - render.Data(w, item, nil) -} - -func indexTotal(w http.ResponseWriter, r *http.Request) { - var total int - for _, indexMap := range index.IndexedItemCacheBigMap { - total += indexMap.Size() - } - - render.Data(w, total, nil) -} - -func seriesTotal(w http.ResponseWriter, r *http.Request) { - render.Data(w, atomic.LoadInt64(&cache.TotalCount), nil) -} - -type delRRDRecv struct { - Endpoint string `json:"endpoint"` - Metric string `json:"metric"` - TagsMap map[string]string `json:"tags"` - Step int `json:"step"` -} - -func delRRDByCounter(w http.ResponseWriter, r *http.Request) { - var inputs []delRRDRecv - err := BindJson(r, &inputs) - if err != nil { - render.Message(w, err) - return - } - - for _, input := range inputs { - seriesId := str.Checksum(input.Endpoint, input.Metric, str.SortedTags(input.TagsMap)) - index.DeleteItemFronIndex(seriesId) - - cache.Caches.Remove(seriesId) - - filename := utils.RrdFileName(rrdtool.Config.Storage, seriesId, "GAUGE", input.Step) - err = file.Remove(filename) - } - render.Data(w, "ok", err) -} - -func indexList(w http.ResponseWriter, r *http.Request) { - render.Data(w, index.IndexList.Get(), nil) -} diff --git a/src/modules/tsdb/http/routes/rotuer.go b/src/modules/tsdb/http/routes/rotuer.go deleted file mode 100644 index 65514168..00000000 --- a/src/modules/tsdb/http/routes/rotuer.go +++ /dev/null @@ -1,71 +0,0 @@ -package routes - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "strings" - - "github.com/gorilla/mux" - - "github.com/didi/nightingale/src/modules/tsdb/http/render" - "github.com/didi/nightingale/src/modules/tsdb/index" -) - -func ConfigRoutes(r *mux.Router) { - r.HandleFunc("/api/tsdb/ping", ping) - r.HandleFunc("/api/tsdb/addr", addr) - r.HandleFunc("/api/tsdb/pid", pid) - - r.HandleFunc("/api/tsdb/get-item-by-series-id", getItemBySeriesID) - r.HandleFunc("/api/tsdb/update-index", rebuildIndex) - r.HandleFunc("/api/tsdb/index-total", indexTotal) - r.HandleFunc("/api/tsdb/series-total", seriesTotal) - r.HandleFunc("/api/tsdb/del-rrd-by-counter", delRRDByCounter) - r.HandleFunc("/api/tsdb/alive-index", indexList) - - r.PathPrefix("/debug").Handler(http.DefaultServeMux) -} - -func rebuildIndex(w http.ResponseWriter, r *http.Request) { - go index.RebuildAllIndex() - render.Data(w, "ok", nil) -} - -func String(r *http.Request, key string, defVal string) (string, error) { - if val, ok := r.URL.Query()[key]; ok { - if val[0] == "" { - return defVal, nil - } - return strings.TrimSpace(val[0]), nil - } - - if r.Form == nil { - err := r.ParseForm() - if err != nil { - return "", err - } - } - - val := r.Form.Get(key) - if val == "" { - return defVal, nil - } - - return strings.TrimSpace(val), nil -} - -func BindJson(r *http.Request, obj interface{}) error { - if r.Body == nil { - return fmt.Errorf("Empty request body") - } - defer r.Body.Close() - body, _ := ioutil.ReadAll(r.Body) - - err := json.Unmarshal(body, obj) - if err != nil { - return fmt.Errorf("unmarshal body %s err:%v", string(body), err) - } - return err -} diff --git a/src/modules/tsdb/index/cache.go b/src/modules/tsdb/index/cache.go deleted file mode 100644 index cd09aadd..00000000 --- a/src/modules/tsdb/index/cache.go +++ /dev/null @@ -1,84 +0,0 @@ -package index - -import ( - "sync" - - "github.com/didi/nightingale/src/common/dataobj" -) - -const ( - DefaultMaxCacheSize = 5000000 // 默认 最多500w个,太大了内存会耗尽 -) - -type DsTypeAndStep struct { - DsType string `json:"dstype"` - Step int `json:"step"` -} - -// 索引缓存的元素数据结构 -type IndexCacheItem struct { - UUID string - Item *dataobj.TsdbItem -} - -func NewIndexCacheItem(uuid string, item *dataobj.TsdbItem) *IndexCacheItem { - return &IndexCacheItem{UUID: uuid, Item: item} -} - -// 索引缓存-基本缓存容器 -type IndexCacheBase struct { - sync.RWMutex - maxSize int - data map[string]*dataobj.TsdbItem -} - -func NewIndexCacheBase(max int) *IndexCacheBase { - return &IndexCacheBase{maxSize: max, data: make(map[string]*dataobj.TsdbItem)} -} - -func (i *IndexCacheBase) Put(key string, item *dataobj.TsdbItem) { - i.Lock() - defer i.Unlock() - i.data[key] = item -} - -func (i *IndexCacheBase) Get(key string) *dataobj.TsdbItem { - i.RLock() - defer i.RUnlock() - return i.data[key] -} - -func (i *IndexCacheBase) ContainsKey(key string) bool { - i.RLock() - defer i.RUnlock() - return i.data[key] != nil -} - -func (i *IndexCacheBase) Size() int { - i.RLock() - defer i.RUnlock() - return len(i.data) -} - -func (i *IndexCacheBase) Keys() []string { - i.RLock() - defer i.RUnlock() - - count := len(i.data) - if count == 0 { - return []string{} - } - - keys := make([]string, 0, count) - for key := range i.data { - keys = append(keys, key) - } - - return keys -} - -func (i *IndexCacheBase) Remove(key string) { - i.Lock() - defer i.Unlock() - delete(i.data, key) -} diff --git a/src/modules/tsdb/index/index.go b/src/modules/tsdb/index/index.go deleted file mode 100644 index c22b96c7..00000000 --- a/src/modules/tsdb/index/index.go +++ /dev/null @@ -1,62 +0,0 @@ -package index - -import ( - "fmt" - "sync" - "time" - - "github.com/didi/nightingale/src/common/report" - "github.com/didi/nightingale/src/modules/tsdb/backend/rpc" - "github.com/didi/nightingale/src/toolkits/stats" - - "github.com/toolkits/pkg/logger" -) - -var IndexList IndexAddrs - -type IndexAddrs struct { - sync.RWMutex - Data []string -} - -func (i *IndexAddrs) Set(addrs []string) { - i.Lock() - defer i.Unlock() - i.Data = addrs -} - -func (i *IndexAddrs) Get() []string { - i.RLock() - defer i.RUnlock() - return i.Data -} - -func GetIndexLoop() { - t1 := time.NewTicker(time.Duration(9) * time.Second) - GetIndex() - for { - <-t1.C - GetIndex() - addrs := rpc.ReNewPools(IndexList.Get()) - if len(addrs) > 0 { - RebuildAllIndex(addrs) //addrs为新增的index实例列表,重新推一遍全量索引 - } - } -} - -func GetIndex() { - instances, err := report.GetAlive("index", Config.HbsMod) - if err != nil { - stats.Counter.Set("get.index.err", 1) - logger.Warningf("get index list err:%v", err) - return - } - - activeIndexs := []string{} - for _, instance := range instances { - activeIndexs = append(activeIndexs, fmt.Sprintf("%s:%s", instance.Identity, instance.RPCPort)) - } - - IndexList.Set(activeIndexs) - return -} diff --git a/src/modules/tsdb/index/init.go b/src/modules/tsdb/index/init.go deleted file mode 100644 index f703a05c..00000000 --- a/src/modules/tsdb/index/init.go +++ /dev/null @@ -1,89 +0,0 @@ -package index - -import ( - "reflect" - - "github.com/didi/nightingale/src/common/dataobj" - "github.com/didi/nightingale/src/modules/tsdb/utils" - "github.com/didi/nightingale/src/toolkits/stats" - - "github.com/toolkits/pkg/concurrent/semaphore" - "github.com/toolkits/pkg/logger" -) - -type IndexSection struct { - ActiveDuration int64 `yaml:"activeDuration"` //内存索引保留时间 - RebuildInterval int64 `yaml:"rebuildInterval"` //索引重建周期 - HbsMod string `yaml:"hbsMod"` -} - -//重建索引全局锁 -var UpdateIndexLock = semaphore.NewSemaphore(1) -var Config IndexSection - -const INDEX_SHARD = 256 - -var IndexedItemCacheBigMap = make([]*IndexCacheBase, INDEX_SHARD) -var UnIndexedItemCacheBigMap = make([]*IndexCacheBase, INDEX_SHARD) - -// 初始化索引功能模块 -func Init(cfg IndexSection) { - Config = cfg - for i := 0; i < INDEX_SHARD; i++ { - IndexedItemCacheBigMap[i] = NewIndexCacheBase(DefaultMaxCacheSize) - UnIndexedItemCacheBigMap[i] = NewIndexCacheBase(DefaultMaxCacheSize) - } - - go GetIndexLoop() - go StartIndexUpdateIncrTask() - go StartUpdateIndexTask() - logger.Info("index.Start ok") -} - -func GetItemFronIndex(hash string) *dataobj.TsdbItem { - indexedItemCache := IndexedItemCacheBigMap[utils.HashKey(hash)%INDEX_SHARD] - return indexedItemCache.Get(hash) -} - -func DeleteItemFronIndex(hash string) { - indexedItemCache := IndexedItemCacheBigMap[utils.HashKey(hash)%INDEX_SHARD] - indexedItemCache.Remove(hash) - return -} - -// index收到一条新上报的监控数据,尝试用于增量更新索引 -func ReceiveItem(item *dataobj.TsdbItem, hash string) { - if item == nil { - return - } - var indexedItemCache *IndexCacheBase - var unIndexedItemCache *IndexCacheBase - - indexedItemCache = IndexedItemCacheBigMap[int(hashKey(hash)%INDEX_SHARD)] - unIndexedItemCache = UnIndexedItemCacheBigMap[int(hashKey(hash)%INDEX_SHARD)] - - if indexedItemCache == nil { - stats.Counter.Set("index.in.err", 1) - logger.Error("indexedItemCache: ", reflect.TypeOf(hash), hash) - } - // 已上报过的数据 - stats.Counter.Set("index.in", 1) - if indexedItemCache.ContainsKey(hash) { - indexedItemCache.Put(hash, item) - return - } - stats.Counter.Set("index.incr.in", 1) - // 缓存未命中, 放入增量更新队列 - unIndexedItemCache.Put(hash, item) - indexedItemCache.Put(hash, item) -} - -func hashKey(key string) uint32 { - hash := uint32(2166136261) - const prime32 = uint32(16777619) - for i := 0; i < len(key); i++ { - hash *= prime32 - hash ^= uint32(key[i]) - } - return hash -} diff --git a/src/modules/tsdb/index/update_all.go b/src/modules/tsdb/index/update_all.go deleted file mode 100644 index 21c1d0bf..00000000 --- a/src/modules/tsdb/index/update_all.go +++ /dev/null @@ -1,83 +0,0 @@ -package index - -import ( - "fmt" - "time" - - "github.com/didi/nightingale/src/common/dataobj" - "github.com/didi/nightingale/src/modules/tsdb/backend/rpc" - "github.com/didi/nightingale/src/toolkits/stats" - - "github.com/toolkits/pkg/logger" -) - -func StartUpdateIndexTask() { - - t1 := time.NewTicker(time.Duration(Config.RebuildInterval) * time.Second) - for { - <-t1.C - - RebuildAllIndex() - } -} - -func RebuildAllIndex(params ...[]string) error { - var addrs []string - if len(params) > 0 { - addrs = params[0] - } else { - addrs = IndexList.Get() - } - //postTms := time.Now().Unix() - start := time.Now().Unix() - lastTs := start - Config.ActiveDuration - aggrNum := 200 - - if !UpdateIndexLock.TryAcquire() { - return fmt.Errorf("RebuildAllIndex already Rebuiding..") - } else { - defer UpdateIndexLock.Release() - var pushCnt = 0 - var oldCnt = 0 - for idx := range IndexedItemCacheBigMap { - keys := IndexedItemCacheBigMap[idx].Keys() - - i := 0 - tmpList := make([]*dataobj.TsdbItem, aggrNum) - - for _, key := range keys { - item := IndexedItemCacheBigMap[idx].Get(key) - if item == nil { - continue - } - - if item.Timestamp < lastTs { //缓存中的数据太旧了,不能用于索引的全量更新 - IndexedItemCacheBigMap[idx].Remove(key) - logger.Debug("push index remove:", item) - oldCnt++ - continue - } - logger.Debug("push index:", item) - pushCnt++ - tmpList[i] = item - i = i + 1 - - if i == aggrNum { - rpc.Push2Index(rpc.ALLINDEX, tmpList, addrs) - i = 0 - } - } - - if i != 0 { - rpc.Push2Index(rpc.ALLINDEX, tmpList[:i], addrs) - } - } - - stats.Counter.Set("index.delete", oldCnt) - - end := time.Now().Unix() - logger.Infof("RebuildAllIndex end : start_ts[%d] latency[%d] old/success/all[%d/%d/%d]", start, end-start, oldCnt, pushCnt, oldCnt+pushCnt) - } - - return nil -} diff --git a/src/modules/tsdb/index/update_incr.go b/src/modules/tsdb/index/update_incr.go deleted file mode 100644 index 6bde7801..00000000 --- a/src/modules/tsdb/index/update_incr.go +++ /dev/null @@ -1,69 +0,0 @@ -package index - -import ( - "time" - - "github.com/didi/nightingale/src/common/dataobj" - "github.com/didi/nightingale/src/modules/tsdb/backend/rpc" - "github.com/didi/nightingale/src/toolkits/str" - - "github.com/toolkits/pkg/logger" -) - -const ( - IndexUpdateIncrTaskSleepInterval = time.Duration(10) * time.Second // 增量更新间隔时间, 默认30s -) - -// 启动索引的 异步、增量更新 任务, 每隔一定时间,刷新cache中的数据到数据库中 -func StartIndexUpdateIncrTask() { - - t1 := time.NewTicker(IndexUpdateIncrTaskSleepInterval) - for { - <-t1.C - - startTs := time.Now().Unix() - cnt := updateIndexIncr() - endTs := time.Now().Unix() - - logger.Debugf("UpdateIncrIndex, count %d, lastStartTs %s, lastTimeConsumingInSec %d\n", - cnt, str.UnixTsFormat(startTs), endTs-startTs) - } -} - -func updateIndexIncr() int { - ret := 0 - aggrNum := 200 - - for idx := range UnIndexedItemCacheBigMap { - if UnIndexedItemCacheBigMap[idx] == nil || UnIndexedItemCacheBigMap[idx].Size() <= 0 { - continue - } - - keys := UnIndexedItemCacheBigMap[idx].Keys() - i := 0 - tmpList := make([]*dataobj.TsdbItem, aggrNum) - - for _, key := range keys { - item := UnIndexedItemCacheBigMap[idx].Get(key) - UnIndexedItemCacheBigMap[idx].Remove(key) - if item == nil { - continue - } - - ret++ - tmpList[i] = item - i = i + 1 - if i == aggrNum { - rpc.Push2Index(rpc.INCRINDEX, tmpList, IndexList.Get()) - i = 0 - } - } - - if i != 0 { - rpc.Push2Index(rpc.INCRINDEX, tmpList[:i], IndexList.Get()) - } - - } - - return ret -} diff --git a/src/modules/tsdb/migrate/init.go b/src/modules/tsdb/migrate/init.go deleted file mode 100644 index 1a745920..00000000 --- a/src/modules/tsdb/migrate/init.go +++ /dev/null @@ -1,117 +0,0 @@ -package migrate - -import ( - "sync" - - "github.com/toolkits/pkg/container/list" - "github.com/toolkits/pkg/container/set" - "github.com/toolkits/pkg/logger" - "github.com/toolkits/pkg/str" - - "github.com/didi/nightingale/src/toolkits/pools" -) - -type MigrateSection struct { - Batch int `yaml:"batch"` - Concurrency int `yaml:"concurrency"` //number of multiple worker per node - Enabled bool `yaml:"enabled"` - Replicas int `yaml:"replicas"` - OldCluster map[string]string `yaml:"oldCluster"` - NewCluster map[string]string `yaml:"newCluster"` - MaxConns int `yaml:"maxConns"` - MaxIdle int `yaml:"maxIdle"` - ConnTimeout int `yaml:"connTimeout"` - CallTimeout int `yaml:"callTimeout"` -} - -const ( - DefaultSendQueueMaxSize = 102400 //10.24w -) - -var ( - Config MigrateSection - QueueCheck = QueueFilter{Data: make(map[string]struct{})} - - TsdbQueues = make(map[string]*list.SafeListLimited) - NewTsdbQueues = make(map[string]*list.SafeListLimited) - RRDFileQueues = make(map[string]*list.SafeListLimited) - // 服务节点的一致性哈希环 pk -> node - TsdbNodeRing *ConsistentHashRing - NewTsdbNodeRing *ConsistentHashRing - - // 连接池 node_address -> connection_pool - TsdbConnPools *pools.ConnPools - NewTsdbConnPools *pools.ConnPools -) - -type QueueFilter struct { - Data map[string]struct{} - sync.RWMutex -} - -func (q *QueueFilter) Exists(key string) bool { - q.RLock() - defer q.RUnlock() - - _, exsits := q.Data[key] - return exsits -} - -func (q *QueueFilter) Set(key string) { - q.Lock() - defer q.Unlock() - - q.Data[key] = struct{}{} - return -} - -func Init(cfg MigrateSection) { - logger.Info("migrate start...") - Config = cfg - if !Config.Enabled { - return - } - initHashRing() - initConnPools() - initQueues() - StartMigrate() -} - -func initHashRing() { - TsdbNodeRing = NewConsistentHashRing(int32(Config.Replicas), str.KeysOfMap(Config.OldCluster)) - NewTsdbNodeRing = NewConsistentHashRing(int32(Config.Replicas), str.KeysOfMap(Config.NewCluster)) -} - -func initConnPools() { - // tsdb - tsdbInstances := set.NewSafeSet() - for _, addr := range Config.OldCluster { - tsdbInstances.Add(addr) - } - TsdbConnPools = pools.NewConnPools( - Config.MaxConns, Config.MaxIdle, Config.ConnTimeout, Config.CallTimeout, tsdbInstances.ToSlice(), - ) - - // tsdb - newTsdbInstances := set.NewSafeSet() - for _, addr := range Config.NewCluster { - newTsdbInstances.Add(addr) - } - NewTsdbConnPools = pools.NewConnPools( - Config.MaxConns, Config.MaxIdle, Config.ConnTimeout, Config.CallTimeout, newTsdbInstances.ToSlice(), - ) -} - -func initQueues() { - for node := range Config.OldCluster { - RRDFileQueues[node] = list.NewSafeListLimited(DefaultSendQueueMaxSize) - } - - for node := range Config.OldCluster { - TsdbQueues[node] = list.NewSafeListLimited(DefaultSendQueueMaxSize) - } - - for node := range Config.NewCluster { - NewTsdbQueues[node] = list.NewSafeListLimited(DefaultSendQueueMaxSize) - } -} diff --git a/src/modules/tsdb/migrate/push.go b/src/modules/tsdb/migrate/push.go deleted file mode 100644 index 11f10189..00000000 --- a/src/modules/tsdb/migrate/push.go +++ /dev/null @@ -1,48 +0,0 @@ -package migrate - -import ( - "github.com/didi/nightingale/src/common/dataobj" - - "github.com/toolkits/pkg/logger" -) - -// 将数据 打入 某个Tsdb的发送缓存队列, 具体是哪一个Tsdb 由一致性哈希 决定 -func Push2OldTsdbSendQueue(item *dataobj.TsdbItem) { - var errCnt int - node, err := TsdbNodeRing.GetNode(item.PrimaryKey()) - if err != nil { - logger.Error("E:", err) - return - } - - Q := TsdbQueues[node] - logger.Debug("->push queue: ", item) - if !Q.PushFront(item) { - errCnt += 1 - } - - // statistics - if errCnt > 0 { - logger.Error("Push2TsdbSendQueue err num: ", errCnt) - } -} - -func Push2NewTsdbSendQueue(item *dataobj.TsdbItem) { - var errCnt int - node, err := NewTsdbNodeRing.GetNode(item.PrimaryKey()) - if err != nil { - logger.Error("E:", err) - return - } - - Q := NewTsdbQueues[node] - logger.Debug("->push queue: ", item) - if !Q.PushFront(item) { - errCnt += 1 - } - - // statistics - if errCnt > 0 { - logger.Error("Push2TsdbSendQueue err num: ", errCnt) - } -} diff --git a/src/modules/tsdb/migrate/query.go b/src/modules/tsdb/migrate/query.go deleted file mode 100644 index ae84a657..00000000 --- a/src/modules/tsdb/migrate/query.go +++ /dev/null @@ -1,146 +0,0 @@ -package migrate - -import ( - "errors" - "fmt" - "math" - "time" - - "github.com/didi/nightingale/src/common/dataobj" - "github.com/didi/nightingale/src/toolkits/pools" - "github.com/didi/nightingale/src/toolkits/str" - - "github.com/toolkits/pkg/pool" -) - -func FetchData(start, end int64, consolFun, endpoint, counter string, step int) ([]*dataobj.RRDData, error) { - var err error - if step <= 0 { - step, err = getCounterStep(endpoint, counter) - if err != nil { - return nil, err - } - } - - qparm := GenQParam(start, end, consolFun, endpoint, counter, step) - resp, err := QueryOne(qparm) - if err != nil { - return []*dataobj.RRDData{}, err - } - - if len(resp.Values) < 1 { - ts := start - start%int64(60) - count := (end - start) / 60 - if count > 730 { - count = 730 - } - - if count <= 0 { - return []*dataobj.RRDData{}, nil - } - - step := (end - start) / count // integer divide by zero - for i := 0; i < int(count); i++ { - resp.Values = append(resp.Values, &dataobj.RRDData{Timestamp: ts, Value: dataobj.JsonFloat(math.NaN())}) - ts += int64(step) - } - } - - return resp.Values, nil -} -func getCounterStep(endpoint, counter string) (step int, err error) { - //从内存中获取 - return -} - -func GenQParam(start, end int64, consolFunc, endpoint, counter string, step int) dataobj.TsdbQueryParam { - return dataobj.TsdbQueryParam{ - Start: start, - End: end, - ConsolFunc: consolFunc, - Endpoint: endpoint, - Counter: counter, - Step: step, - } -} - -func QueryOne(para dataobj.TsdbQueryParam) (resp *dataobj.TsdbQueryResponse, err error) { - start, end := para.Start, para.End - resp = &dataobj.TsdbQueryResponse{} - - pk := str.PK(para.Endpoint, para.Counter) - onePool, addr, err := selectPoolByPK(pk) - if err != nil { - return resp, err - } - - conn, err := onePool.Fetch() - if err != nil { - return resp, err - } - - rpcConn := conn.(pools.RpcClient) - if rpcConn.Closed() { - onePool.ForceClose(conn) - return resp, errors.New("conn closed") - } - - type ChResult struct { - Err error - Resp *dataobj.TsdbQueryResponse - } - - ch := make(chan *ChResult, 1) - go func() { - resp := &dataobj.TsdbQueryResponse{} - err := rpcConn.Call("Tsdb.Query", para, resp) - ch <- &ChResult{Err: err, Resp: resp} - }() - - select { - case <-time.After(time.Duration(Config.CallTimeout) * time.Millisecond): - onePool.ForceClose(conn) - return nil, fmt.Errorf("%s, call timeout. proc: %s", addr, onePool.Proc()) - case r := <-ch: - if r.Err != nil { - onePool.ForceClose(conn) - return r.Resp, fmt.Errorf("%s, call failed, err %v. proc: %s", addr, r.Err, onePool.Proc()) - } else { - onePool.Release(conn) - if len(r.Resp.Values) < 1 { - r.Resp.Values = []*dataobj.RRDData{} - return r.Resp, nil - } - - fixed := make([]*dataobj.RRDData, 0) - for _, v := range r.Resp.Values { - if v == nil || !(v.Timestamp >= start && v.Timestamp <= end) { - continue - } - - fixed = append(fixed, v) - } - r.Resp.Values = fixed - } - return r.Resp, nil - } -} - -func selectPoolByPK(pk string) (*pool.ConnPool, string, error) { - node, err := TsdbNodeRing.GetNode(pk) - if err != nil { - return nil, "", err - } - - addr, found := Config.OldCluster[node] - if !found { - return nil, "", errors.New("node not found") - } - - onePool, found := TsdbConnPools.Get(addr) - if !found { - return nil, "", errors.New("addr not found") - } - - return onePool, addr, nil -} diff --git a/src/modules/tsdb/migrate/ring.go b/src/modules/tsdb/migrate/ring.go deleted file mode 100644 index 66597820..00000000 --- a/src/modules/tsdb/migrate/ring.go +++ /dev/null @@ -1,51 +0,0 @@ -package migrate - -import ( - "sync" - - "github.com/toolkits/pkg/consistent" -) - -type ConsistentHashRing struct { - sync.RWMutex - ring *consistent.Consistent -} - -func (c *ConsistentHashRing) GetNode(pk string) (string, error) { - c.RLock() - defer c.RUnlock() - - return c.ring.Get(pk) -} - -func (c *ConsistentHashRing) Set(r *consistent.Consistent) { - c.Lock() - defer c.Unlock() - c.ring = r - return -} - -func (c *ConsistentHashRing) GetRing() *consistent.Consistent { - c.RLock() - defer c.RUnlock() - - return c.ring -} - -func NewConsistentHashRing(replicas int32, nodes []string) *ConsistentHashRing { - ret := &ConsistentHashRing{ring: consistent.New()} - ret.ring.NumberOfReplicas = int(replicas) - for i := 0; i < len(nodes); i++ { - ret.ring.Add(nodes[i]) - } - return ret -} - -func RebuildConsistentHashRing(hashRing *ConsistentHashRing, nodes []string, replicas int) { - r := consistent.New() - r.NumberOfReplicas = replicas - for i := 0; i < len(nodes); i++ { - r.Add(nodes[i]) - } - hashRing.Set(r) -} diff --git a/src/modules/tsdb/migrate/worker.go b/src/modules/tsdb/migrate/worker.go deleted file mode 100644 index 4d5b66f0..00000000 --- a/src/modules/tsdb/migrate/worker.go +++ /dev/null @@ -1,206 +0,0 @@ -package migrate - -import ( - "strings" - "time" - - "github.com/didi/nightingale/src/common/dataobj" - "github.com/didi/nightingale/src/modules/tsdb/cache" - "github.com/didi/nightingale/src/modules/tsdb/rrdtool" - "github.com/didi/nightingale/src/modules/tsdb/utils" - "github.com/didi/nightingale/src/toolkits/stats" - "github.com/didi/nightingale/src/toolkits/str" - - "github.com/toolkits/pkg/concurrent/semaphore" - "github.com/toolkits/pkg/file" - "github.com/toolkits/pkg/logger" -) - -// send -const ( - DefaultSendTaskSleepInterval = time.Millisecond * 50 //默认睡眠间隔为50ms -) - -func StartMigrate() { - for node, addr := range Config.OldCluster { - go pullRRD(node, addr, Config.Concurrency) - } - - for node, addr := range Config.NewCluster { - go send2NewTsdbTask(node, addr, Config.Concurrency) - } - - for node, addr := range Config.OldCluster { - go send2OldTsdbTask(node, addr, Config.Concurrency) - } -} - -func pullRRD(node string, addr string, concurrent int) { - batch := Config.Batch // 一次发送,最多batch条数据 - Q := RRDFileQueues[node] - - sema := semaphore.NewSemaphore(concurrent) - for { - fnames := Q.PopBackBy(batch) - count := len(fnames) - - if count == 0 { - time.Sleep(10 * time.Millisecond) - continue - } - stats.Counter.Set("pull.rrd", count) - - filenames := make([]dataobj.RRDFile, count) - for i := 0; i < count; i++ { - filenames[i] = fnames[i].(dataobj.RRDFile) - cache.Caches.SetFlag(str.GetKey(filenames[i].Filename), rrdtool.ITEM_TO_PULLRRD) - } - - //控制并发 - sema.Acquire() - go func(addr string, filenames []dataobj.RRDFile, count int) { - defer sema.Release() - - req := dataobj.RRDFileQuery{Files: filenames} - resp := &dataobj.RRDFileResp{} - var err error - sendOk := false - for i := 0; i < 3; i++ { //最多重试3次 - err = TsdbConnPools.Call(addr, "Tsdb.GetRRD", req, resp) - if err == nil { - sendOk = true - break - } - time.Sleep(time.Millisecond * 10) - } - for _, f := range resp.Files { - filePath := rrdtool.Config.Storage + "/" + f.Filename - - paths := strings.Split(f.Filename, "/") - if len(paths) != 2 { - logger.Errorf("write rrd file err %v filename:%s", err, f.Filename) - stats.Counter.Set("pull.rrd.err", count) - continue - } - file.EnsureDir(rrdtool.Config.Storage + "/" + paths[0]) - err = utils.WriteFile(filePath, f.Body, 0644) - if err != nil { - stats.Counter.Set("pull.rrd.err", count) - logger.Errorf("write rrd file err %v filename:%s", err, f.Filename) - } - - cache.Caches.SetFlag(str.GetKey(f.Filename), 0) //重置曲线标志位 - } - - // statistics - if !sendOk { - logger.Errorf("get %v from old tsdb %s:%s fail: %v", filenames, node, addr, err) - } else { - logger.Infof("get %v from old tsdb %s:%s ok", filenames, node, addr) - } - }(addr, filenames, count) - } -} - -func send2OldTsdbTask(node string, addr string, concurrent int) { - batch := Config.Batch // 一次发送,最多batch条数据 - Q := TsdbQueues[node] - - sema := semaphore.NewSemaphore(concurrent) - - for { - items := Q.PopBackBy(batch) - count := len(items) - - if count == 0 { - time.Sleep(DefaultSendTaskSleepInterval) - continue - } - - tsdbItems := make([]*dataobj.TsdbItem, count) - for i := 0; i < count; i++ { - tsdbItems[i] = items[i].(*dataobj.TsdbItem) - tsdbItems[i].From = dataobj.GRAPH - stats.Counter.Set("migrate.old.out", 1) - - logger.Debug("send to old tsdb->: ", tsdbItems[i]) - } - - //控制并发 - sema.Acquire() - go func(addr string, tsdbItems []*dataobj.TsdbItem, count int) { - defer sema.Release() - - resp := &dataobj.SimpleRpcResponse{} - var err error - sendOk := false - for i := 0; i < 3; i++ { //最多重试3次 - err = TsdbConnPools.Call(addr, "Tsdb.Send", tsdbItems, resp) - if err == nil { - sendOk = true - break - } - time.Sleep(time.Millisecond * 10) - } - - // statistics - //atomic.AddInt64(&PointOut2Tsdb, int64(count)) - if !sendOk { - logger.Errorf("send %v to tsdb %s:%s fail: %v", tsdbItems, node, addr, err) - } else { - logger.Infof("send to tsdb %s:%s ok", node, addr) - } - }(addr, tsdbItems, count) - } -} - -func send2NewTsdbTask(node string, addr string, concurrent int) { - batch := Config.Batch // 一次发送,最多batch条数据 - Q := NewTsdbQueues[node] - - sema := semaphore.NewSemaphore(concurrent) - - for { - items := Q.PopBackBy(batch) - count := len(items) - - if count == 0 { - time.Sleep(DefaultSendTaskSleepInterval) - continue - } - - tsdbItems := make([]*dataobj.TsdbItem, count) - for i := 0; i < count; i++ { - tsdbItems[i] = items[i].(*dataobj.TsdbItem) - tsdbItems[i].From = dataobj.GRAPH - stats.Counter.Set("migrate.new.out", 1) - logger.Debug("send to new tsdb->: ", tsdbItems[i]) - } - - //控制并发 - sema.Acquire() - go func(addr string, tsdbItems []*dataobj.TsdbItem, count int) { - defer sema.Release() - - resp := &dataobj.SimpleRpcResponse{} - var err error - sendOk := false - for i := 0; i < 3; i++ { //最多重试3次 - err = NewTsdbConnPools.Call(addr, "Tsdb.Send", tsdbItems, resp) - if err == nil { - sendOk = true - break - } - time.Sleep(time.Millisecond * 10) - } - - // statistics - //atomic.AddInt64(&PointOut2Tsdb, int64(count)) - if !sendOk { - logger.Errorf("send %v to tsdb %s:%s fail: %v", tsdbItems, node, addr, err) - } else { - logger.Infof("send to tsdb %s:%s ok", node, addr) - } - }(addr, tsdbItems, count) - } -} diff --git a/src/modules/tsdb/rpc/push.go b/src/modules/tsdb/rpc/push.go deleted file mode 100644 index faaed628..00000000 --- a/src/modules/tsdb/rpc/push.go +++ /dev/null @@ -1,146 +0,0 @@ -// Copyright 2017 Xiaomi, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package rpc - -import ( - "math" - "time" - - "github.com/didi/nightingale/src/common/dataobj" - "github.com/didi/nightingale/src/modules/tsdb/cache" - "github.com/didi/nightingale/src/modules/tsdb/index" - "github.com/didi/nightingale/src/modules/tsdb/migrate" - "github.com/didi/nightingale/src/modules/tsdb/rrdtool" - "github.com/didi/nightingale/src/modules/tsdb/utils" - "github.com/didi/nightingale/src/toolkits/stats" - "github.com/didi/nightingale/src/toolkits/str" - - "github.com/toolkits/pkg/file" - "github.com/toolkits/pkg/logger" -) - -const MaxRRAPointCnt = 730 // 每次查询最多返回的点数 - -type Tsdb int - -func (t *Tsdb) Ping(req dataobj.NullRpcRequest, resp *dataobj.SimpleRpcResponse) error { - return nil -} - -func (t *Tsdb) Send(items []*dataobj.TsdbItem, resp *dataobj.SimpleRpcResponse) error { - stats.Counter.Set("push.qp10s", 1) - - go handleItems(items) - return nil -} - -// 供外部调用、处理接收到的数据 的接口 -func HandleItems(items []*dataobj.TsdbItem) error { - - handleItems(items) - return nil -} - -func handleItems(items []*dataobj.TsdbItem) { - count := len(items) - - if items == nil || count == 0 { - logger.Warning("items is null") - return - } - - var cnt, fail int64 - for i := 0; i < count; i++ { - if items[i] == nil { - continue - } - stats.Counter.Set("points.in", 1) - - item := convert2CacheServerItem(items[i]) - - if err := cache.Caches.Push(item.Key, item.Timestamp, item.Value); err != nil { - stats.Counter.Set("points.in.err", 1) - logger.Warningf("push obj error, obj: %v, error: %v\n", items[i], err) - fail++ - } - cnt++ - - index.ReceiveItem(items[i], item.Key) - - if migrate.Config.Enabled { - //曲线要迁移到新的存储实例,将数据转发给新存储实例 - if cache.Caches.GetFlag(item.Key) == rrdtool.ITEM_TO_SEND && items[i].From != dataobj.GRAPH { //转发数据 - migrate.Push2NewTsdbSendQueue(items[i]) - } else { - rrdFile := utils.RrdFileName(rrdtool.Config.Storage, item.Key, items[i].DsType, items[i].Step) - //本地文件不存在,应该是新实例,去旧实例拉取文件 - if !file.IsExist(rrdFile) && !migrate.QueueCheck.Exists(item.Key) { - //在新实例rrd文件没有拉取到本地之前,数据要从旧实例查询,要保证旧实例数据完整性 - if items[i].From != dataobj.GRAPH { - migrate.Push2OldTsdbSendQueue(items[i]) - } - node, err := migrate.TsdbNodeRing.GetNode(items[i].PrimaryKey()) - if err != nil { - logger.Error("E:", err) - continue - } - filename := utils.QueryRrdFile(item.Key, items[i].DsType, items[i].Step) - if filename == "" { - continue - } - Q := migrate.RRDFileQueues[node] - body := dataobj.RRDFile{ - Key: item.Key, - Filename: filename, - } - Q.PushFront(body) - } - } - } - } -} - -func convert2CacheServerItem(d *dataobj.TsdbItem) cache.Point { - if d.Nid != "" { - d.Endpoint = dataobj.NidToEndpoint(d.Nid) - } - p := cache.Point{ - Key: str.Checksum(d.Endpoint, d.Metric, str.SortedTags(d.TagsMap)), - Timestamp: d.Timestamp, - Value: d.Value, - } - return p -} - -func GetNeedStep(startTime int64, step int, realStep int) int { - now := time.Now().Unix() - realDataDurationStart := now - int64(step*720) - if startTime > realDataDurationStart { - return step * 6 - } - return realStep -} - -func isNumber(v dataobj.JsonFloat) bool { - f := float64(v) - if math.IsNaN(f) || math.IsInf(f, 0) { - return false - } - return true -} - -func alignTs(ts int64, period int64) int64 { - return ts - ts%period -} diff --git a/src/modules/tsdb/rpc/query.go b/src/modules/tsdb/rpc/query.go deleted file mode 100644 index bbf1eba9..00000000 --- a/src/modules/tsdb/rpc/query.go +++ /dev/null @@ -1,447 +0,0 @@ -// Copyright 2017 Xiaomi, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package rpc - -import ( - "math" - "time" - - "github.com/didi/nightingale/src/common/dataobj" - "github.com/didi/nightingale/src/modules/tsdb/cache" - "github.com/didi/nightingale/src/modules/tsdb/index" - "github.com/didi/nightingale/src/modules/tsdb/migrate" - "github.com/didi/nightingale/src/modules/tsdb/rrdtool" - "github.com/didi/nightingale/src/modules/tsdb/utils" - "github.com/didi/nightingale/src/toolkits/stats" - "github.com/didi/nightingale/src/toolkits/str" - - "github.com/toolkits/pkg/file" - "github.com/toolkits/pkg/logger" -) - -func (g *Tsdb) Query(param dataobj.TsdbQueryParam, resp *dataobj.TsdbQueryResponse) error { - stats.Counter.Set("query.qp10s", 1) - - var ( - rrdDatas []*dataobj.RRDData - datasSize int - rrdFile string - cachePointsSize int - err error - ) - - // form empty response - resp.Values = []*dataobj.RRDData{} - resp.Endpoint = param.Endpoint - resp.Counter = param.Counter - resp.Nid = param.Nid - if param.Nid != "" { - param.Endpoint = dataobj.NidToEndpoint(param.Nid) - } - - needStep := 0 - dsType := param.DsType - - step := param.Step - seriesID := str.Checksum(param.Endpoint, param.Counter, "") - - if param.ConsolFunc == "" { - param.ConsolFunc = "AVERAGE" - } - - if dsType == "" || step == 0 { - item := index.GetItemFronIndex(seriesID) - if item == nil { - dsType = "GAUGE" - step = 10 - } else { - dsType = item.DsType - step = item.Step - } - } - - resp.DsType = dsType - resp.Step = step - - startTs := param.Start - param.Start%int64(step) - endTs := param.End - param.End%int64(step) + int64(step) - if endTs-startTs-int64(step) < 1 { - logger.Debug("time duration error", param) - return nil - } - nowTs := time.Now().Unix() - - cachePoints := make([]*dataobj.RRDData, 0) - cacheFirstTs := nowTs - nowTs%int64(step) - 3600 //存在cache的时间点 - if endTs > cacheFirstTs { //最后的时间点在cache范围内 - iters, err := cache.Caches.Get(seriesID, startTs, endTs) - if err != nil { - logger.Debug("get %v cache by %v err:%v", seriesID, param, err) - stats.Counter.Set("query.miss", 1) - } - for _, iter := range iters { - for iter.Next() { - t, v := iter.Values() - if int64(t) < startTs || int64(t) > endTs { - //不在查询范围内 - continue - } - cachePoints = append(cachePoints, dataobj.NewRRDData(int64(t), v)) - } - } - //logger.Debugf("query %d cache count:%d detail:%v", seriesID, len(cachePoints), cachePoints) - - cachePointsSize = len(cachePoints) - //查询起始时间在cache范围内,直接返回结果 - if cachePointsSize > 0 && param.Start >= cachePoints[0].Timestamp { - resp.Values = cachePoints - stats.Counter.Set("query.hit.cache", 1) - goto _RETURN_OK - } - } - - rrdFile = utils.RrdFileName(rrdtool.Config.Storage, seriesID, dsType, step) - if migrate.Config.Enabled && !file.IsExist(rrdFile) { - rrdDatas, err = migrate.FetchData(startTs-int64(step), endTs, param.ConsolFunc, param.Endpoint, param.Counter, step) - - if !migrate.QueueCheck.Exists(seriesID) { - node, err := migrate.TsdbNodeRing.GetNode(param.PK()) - if err != nil { - logger.Error("E:", err) - } else { - filename := utils.QueryRrdFile(seriesID, dsType, step) - Q := migrate.RRDFileQueues[node] - body := dataobj.RRDFile{ - Key: seriesID, - Filename: filename, - } - Q.PushFront(body) - } - } - } else { - // read data from rrd file - // 从RRD中获取数据不包含起始时间点 - // 例: startTs=1484651400,step=60,则第一个数据时间为1484651460) - stats.Counter.Set("query.hit.file", 1) - rrdDatas, err = rrdtool.Fetch(rrdFile, seriesID, param.ConsolFunc, startTs-int64(step), endTs, step) - if err != nil { - logger.Warningf("fetch rrd data err:%v seriesID:%v, param:%v", err, seriesID, param) - } - datasSize = len(rrdDatas) - //logger.Debugf("query %d rrd items count:%d detail:%v ", seriesID, len(rrdDatas), rrdDatas) - } - - if datasSize < 1 { - resp.Values = cachePoints - goto _RETURN_OK - } - - if datasSize > 2 { - step = int(rrdDatas[1].Timestamp - rrdDatas[0].Timestamp) - } - - if endTs < cacheFirstTs { - //请求结束时间不在cache时间范围内,直接返回磁盘数据 - - resp.Values = rrdDatas - goto _RETURN_OK - } - - if cachePointsSize < 1 { - //cache数据为空,直接返回磁盘数据 - resp.Values = rrdDatas - goto _RETURN_OK - } - - // merge - { - // fmt cached items - var val dataobj.JsonFloat - dataPoints := make([]*dataobj.RRDData, 0) - - ts := cachePoints[0].Timestamp - cacheTs := ts - - //和磁盘中取出来的数据对齐时间戳 - if deta := ts % int64(step); deta != 0 { - cacheTs = ts - deta + int64(step) - } - - itemEndTs := cachePoints[cachePointsSize-1].Timestamp - itemIdx := 0 //时间戳游标 - for cacheTs <= itemEndTs { - vals := dataobj.JsonFloat(0.0) - cnt := 0 - - for ; itemIdx < cachePointsSize; itemIdx += 1 { - // 依赖: cache的数据按照时间升序排列 - if cachePoints[itemIdx].Timestamp > cacheTs { //超过一个step范围,跳出去 - break - } - if isNumber(cachePoints[itemIdx].Value) { - vals += dataobj.JsonFloat(cachePoints[itemIdx].Value) - cnt += 1 - } - } - - //cache内多个点合成一个点 - if cnt > 0 { - val = vals / dataobj.JsonFloat(cnt) - } else { - val = dataobj.JsonFloat(math.NaN()) - } - - dataPoints = append(dataPoints, &dataobj.RRDData{Timestamp: cacheTs, Value: val}) - cacheTs += int64(step) - } - cacheSize := len(dataPoints) - - //将磁盘中的数据存到 merged - merged := make([]*dataobj.RRDData, 0) - if datasSize > 0 { - for _, val := range rrdDatas { - if val.Timestamp >= startTs && val.Timestamp <= endTs { - // 依赖: rrdtool返回的数据,时间戳是连续的、不会有跳点的情况 - merged = append(merged, val) - } - } - } - - if cacheSize > 0 { - rrdDataSize := len(merged) - lastTs := dataPoints[0].Timestamp - - // 找到merged中第一个时间戳比lastTs小的点 - rrdDataIdx := 0 - for rrdDataIdx = rrdDataSize - 1; rrdDataIdx >= 0; rrdDataIdx-- { - if merged[rrdDataIdx].Timestamp < dataPoints[0].Timestamp { - lastTs = merged[rrdDataIdx].Timestamp - break - } - } - - // fix missing - for ts := lastTs + int64(step); ts < dataPoints[0].Timestamp; ts += int64(step) { - merged = append(merged, &dataobj.RRDData{Timestamp: ts, Value: dataobj.JsonFloat(math.NaN())}) - } - - // merge cached items to result - rrdDataIdx += 1 - for cacheIdx := 0; cacheIdx < cacheSize; cacheIdx++ { - // 从 rrdDataIdx 开始往后面追加数据 - if rrdDataIdx < rrdDataSize { - if !math.IsNaN(float64(dataPoints[cacheIdx].Value)) { - merged[rrdDataIdx] = dataPoints[cacheIdx] // 优先使用cache的数据 - } - } else { - merged = append(merged, dataPoints[cacheIdx]) - } - - rrdDataIdx++ - } - } - - //logger.Debugf("query %d merged items count:%d detail:%v ", seriesID, len(merged), merged) - - mergedSize := len(merged) - // fmt result - retSize := int((endTs - startTs) / int64(step)) - retSize += 1 - ret := make([]*dataobj.RRDData, retSize, retSize) - mergedIdx := 0 - ts = startTs - startTs%int64(step) - for i := 0; i < retSize; i++ { - if mergedIdx < mergedSize && ts == merged[mergedIdx].Timestamp { - ret[i] = merged[mergedIdx] - mergedIdx++ - } else { - ret[i] = &dataobj.RRDData{Timestamp: ts, Value: dataobj.JsonFloat(math.NaN())} - } - ts += int64(step) - } - resp.Values = ret - } - - //logger.Debugf("-->query data: %v <--data from cache %v <--data from disk %v <--merged data:%v", param, items, datas, resp.Values) - -_RETURN_OK: - rsize := len(resp.Values) - realStep := 0 - - if rsize > 2 { - realStep = int(resp.Values[1].Timestamp - resp.Values[0].Timestamp) - } - if rsize > MaxRRAPointCnt || needStep != 0 { - - var sampleRate, sampleSize, sampleStep int - if rsize > MaxRRAPointCnt { - sampleRate = int(rsize/MaxRRAPointCnt) + 1 - sampleSize = int(rsize / sampleRate) - sampleStep = sampleRate * realStep - //logger.Debugf("rsize:%d sampleRate:%d sampleSize:%d sampleStep:%d", rsize, sampleRate, sampleSize, sampleStep) - } - - // needStep 不为空则按照指定的step降采样,同环比会用到此功能 - if needStep != 0 && realStep != 0 { - needStep = GetNeedStep(param.Start, param.Step, realStep) //统一环比1天,7天时间跨度,后期待优化 - - sampleRate = int(needStep / realStep) - if sampleRate == 0 { - logger.Error("sampleRate is 0", param) - sampleRate = 1 - } - sampleSize = int(rsize / sampleRate) - sampleStep = needStep - //logger.Debugf("sampleRate:%d sampleSize:%d sampleStep:%d", sampleRate, sampleSize, sampleStep) - } - - if sampleStep > 0 { - // get offset - offset := 0 - for i := 0; i < sampleRate && i < rsize; i++ { - if resp.Values[i].Timestamp%int64(sampleStep) == 0 { - offset = i - break - } - } - - // set data - sampled := make([]*dataobj.RRDData, 0) - for i := 1; i < sampleSize; i++ { - sv := &dataobj.RRDData{Timestamp: 0, Value: 0.0} - cnt := 0 - jend := i*sampleRate + offset - jstart := jend - sampleRate + 1 - - if jend > rsize { - break // 扔掉最后一个不完整的降采样周期 - } - sv.Timestamp = resp.Values[jend].Timestamp - for j := jstart; j <= jend && j < rsize; j++ { - if j < 0 { - continue - } - - if !isNumber(resp.Values[j].Value) { - continue - } - - if !(startTs <= resp.Values[j].Timestamp && - endTs >= resp.Values[j].Timestamp) { - // 时间范围不合法 - continue - } - - sv.Value = sv.Value + dataobj.JsonFloat(resp.Values[j].Value) - cnt += 1 - } - - if cnt == 0 { - sv.Value = dataobj.JsonFloat(math.NaN()) - } else { - sv.Value = sv.Value / dataobj.JsonFloat(cnt) - } - if sv.Timestamp >= param.Start && sv.Timestamp <= param.End { - sampled = append(sampled, sv) - } - } - - resp.Step = sampleStep - resp.Values = sampled - } else if sampleStep <= 0 { - logger.Errorf("zero step, %v", resp) - } - } else { - tmpList := make([]*dataobj.RRDData, 0) - //cache需要补null - for _, dat := range resp.Values { - if dat.Timestamp >= param.Start && dat.Timestamp <= param.End { - tmpList = append(tmpList, &dataobj.RRDData{Timestamp: dat.Timestamp, Value: dat.Value}) - } - } - resp.Values = tmpList - } - - // statistics - return nil -} - -func (g *Tsdb) GetRRD(param dataobj.RRDFileQuery, resp *dataobj.RRDFileResp) (err error) { - go func() { //异步更新flag - for _, f := range param.Files { - err := cache.Caches.SetFlag(str.GetKey(f.Filename), rrdtool.ITEM_TO_SEND) - if err != nil { - logger.Errorf("key:%v file:%s set flag error:%v", f.Key, f.Filename, err) - } - } - }() - - workerNum := 100 - worker := make(chan struct{}, workerNum) //控制goroutine并发数 - dataChan := make(chan *dataobj.File, 1000) - - for _, f := range param.Files { - worker <- struct{}{} - go getRRD(f, worker, dataChan) - } - - //等待所有goroutine执行完成 - for i := 0; i < workerNum; i++ { - worker <- struct{}{} - } - - close(dataChan) - for { - d, ok := <-dataChan - if !ok { - break - } - resp.Files = append(resp.Files, *d) - } - return -} - -func getRRD(f dataobj.RRDFile, worker chan struct{}, dataChan chan *dataobj.File) { - defer func() { - <-worker - }() - - filePath := rrdtool.Config.Storage + "/" + f.Filename - //将内存中的数据落盘 - key := str.GetKey(f.Filename) - if c, exists := cache.Caches.GetCurrentChunk(key); exists { - cache.ChunksSlots.Push(key, c) - } - - chunks, exists := cache.ChunksSlots.GetChunks(key) - if exists { - m := make(map[string][]*cache.Chunk) - m[key] = chunks - rrdtool.FlushRRD(m) - } - - body, err := rrdtool.ReadFile(filePath, filePath) - if err != nil { - logger.Error(err) - return - } - tmp := dataobj.File{ - Key: key, - Filename: f.Filename, - Body: body, - } - dataChan <- &tmp - return -} diff --git a/src/modules/tsdb/rpc/rpc.go b/src/modules/tsdb/rpc/rpc.go deleted file mode 100644 index 99dba76b..00000000 --- a/src/modules/tsdb/rpc/rpc.go +++ /dev/null @@ -1,74 +0,0 @@ -package rpc - -import ( - "bufio" - "io" - "log" - "net" - "net/rpc" - "os" - "reflect" - "time" - - "github.com/didi/nightingale/src/common/address" - - "github.com/toolkits/pkg/logger" - "github.com/ugorji/go/codec" -) - -var Close_chan, Close_done_chan chan int - -func init() { - Close_chan = make(chan int, 1) - Close_done_chan = make(chan int, 1) -} - -func Start() { - addr := address.GetRPCListen("tsdb") - var closeFlag = false - server := rpc.NewServer() - server.Register(new(Tsdb)) - - l, e := net.Listen("tcp", addr) - if e != nil { - logger.Fatal("cannot listen ", addr, e) - os.Exit(1) - } - - logger.Info("rpc listening ", addr) - - var mh codec.MsgpackHandle - mh.MapType = reflect.TypeOf(map[string]interface{}(nil)) - - go func() { - for { - conn, err := l.Accept() - if err != nil { - if closeFlag { - break - } - logger.Warning("listener accept error: ", err) - time.Sleep(time.Duration(100) * time.Millisecond) - continue - } - - var bufconn = struct { - io.Closer - *bufio.Reader - *bufio.Writer - }{conn, bufio.NewReader(conn), bufio.NewWriter(conn)} - - go server.ServeCodec(codec.MsgpackSpecRpc.ServerCodec(bufconn, &mh)) - } - }() - - select { - case <-Close_chan: - log.Println("rpc, recv sigout and exiting...") - closeFlag = true - l.Close() - Close_done_chan <- 1 - - return - } -} diff --git a/src/modules/tsdb/rrdtool/rrdtool.go b/src/modules/tsdb/rrdtool/rrdtool.go deleted file mode 100644 index d00d4337..00000000 --- a/src/modules/tsdb/rrdtool/rrdtool.go +++ /dev/null @@ -1,142 +0,0 @@ -// Copyright 2017 Xiaomi, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package rrdtool - -import ( - "errors" - "io" - "math" - "os" - "time" - - "github.com/didi/nightingale/src/common/dataobj" - "github.com/didi/nightingale/src/modules/tsdb/index" - "github.com/didi/nightingale/src/modules/tsdb/utils" - - "github.com/open-falcon/rrdlite" - "github.com/toolkits/pkg/file" -) - -func create(filename string, item *dataobj.TsdbItem) error { - now := time.Now() - start := now.Add(time.Duration(-24) * time.Hour) - step := uint(item.Step) - - c := rrdlite.NewCreator(filename, start, step) - c.DS("metric", item.DsType, item.Heartbeat, item.Min, item.Max) - - // 设置各种归档策略 - // 10s一个点存 12小时 - - for archive, cnt := range Config.RRA { - if archive == 1 { - c.RRA("AVERAGE", 0, archive, cnt) - } else { - c.RRA("AVERAGE", 0, archive, cnt) - c.RRA("MAX", 0, archive, cnt) - c.RRA("MIN", 0, archive, cnt) - } - } - - return c.Create(true) -} - -func update(filename string, items []*dataobj.TsdbItem) error { - u := rrdlite.NewUpdater(filename) - - for _, item := range items { - v := math.Abs(item.Value) - if v > 1e+300 || (v < 1e-300 && v > 0) { - continue - } - u.Cache(item.Timestamp, item.Value) - } - - return u.Update() -} - -// flush to disk from memory -// 最新的数据在列表的最后面 -func Flushrrd(seriesID string, items []*dataobj.TsdbItem) error { - item := index.GetItemFronIndex(seriesID) - if items == nil || len(items) == 0 || item == nil { - return errors.New("empty items") - } - - filename := utils.RrdFileName(Config.Storage, seriesID, item.DsType, item.Step) - if !file.IsExist(filename) { - baseDir := file.Dir(filename) - - err := file.InsureDir(baseDir) - if err != nil { - return err - } - - err = create(filename, item) - if err != nil { - return err - } - } - - return update(filename, items) -} - -func fetch(filename string, cf string, start, end int64, step int) ([]*dataobj.RRDData, error) { - start_t := time.Unix(start, 0) - end_t := time.Unix(end, 0) - step_t := time.Duration(step) * time.Second - - fetchRes, err := rrdlite.Fetch(filename, cf, start_t, end_t, step_t) - if err != nil { - return []*dataobj.RRDData{}, err - } - - defer fetchRes.FreeValues() - - values := fetchRes.Values() - size := len(values) - ret := make([]*dataobj.RRDData, size) - - start_ts := fetchRes.Start.Unix() - step_s := fetchRes.Step.Seconds() - - for i, val := range values { - ts := start_ts + int64(i+1)*int64(step_s) - d := &dataobj.RRDData{ - Timestamp: ts, - Value: dataobj.JsonFloat(val), - } - ret[i] = d - } - - return ret, nil -} - -// WriteFile writes data to a file named by filename. -// file must not exist -func writeFile(filename string, data []byte, perm os.FileMode) error { - f, err := os.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_EXCL, perm) - if err != nil { - return err - } - n, err := f.Write(data) - if err == nil && n < len(data) { - err = io.ErrShortWrite - } - if err1 := f.Close(); err == nil { - err = err1 - } - return err -} diff --git a/src/modules/tsdb/rrdtool/sync_disk.go b/src/modules/tsdb/rrdtool/sync_disk.go deleted file mode 100644 index ce492595..00000000 --- a/src/modules/tsdb/rrdtool/sync_disk.go +++ /dev/null @@ -1,330 +0,0 @@ -// Copyright 2017 Xiaomi, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package rrdtool - -import ( - "io/ioutil" - "sync" - "sync/atomic" - "time" - - "github.com/didi/nightingale/src/common/dataobj" - "github.com/didi/nightingale/src/modules/tsdb/cache" - "github.com/didi/nightingale/src/modules/tsdb/index" - "github.com/didi/nightingale/src/modules/tsdb/utils" - "github.com/didi/nightingale/src/toolkits/stats" - - "github.com/toolkits/pkg/concurrent/semaphore" - "github.com/toolkits/pkg/file" - "github.com/toolkits/pkg/logger" -) - -var bufferPool = sync.Pool{New: func() interface{} { return new(dataobj.TsdbItem) }} - -var ( - disk_counter uint64 - net_counter uint64 -) - -const ( - ITEM_TO_SEND = 1 - ITEM_TO_PULLRRD = 2 -) - -const ( - _ = iota - IO_TASK_M_READ - IO_TASK_M_WRITE - IO_TASK_M_FLUSH - IO_TASK_M_FETCH -) - -type File struct { - Filename string - Body []byte -} - -type fetch_t struct { - filename string - cf string - start int64 - end int64 - step int - data []*dataobj.RRDData -} - -type flushfile_t struct { - seriesID string - items []*dataobj.TsdbItem -} - -type readfile_t struct { - filename string - data []byte -} - -type io_task_t struct { - method int - args interface{} - done chan error -} - -var ( - Out_done_chan chan int - io_task_chans []chan *io_task_t - flushrrd_timeout int32 - - Config RRDSection -) - -type RRDSection struct { - Enabled bool `yaml:"enabled"` - Migrate bool `yaml:"enabled"` - Storage string `yaml:"storage"` - Batch int `yaml:"batch"` - Concurrency int `yaml:"concurrency"` - Wait int `yaml:"wait"` - RRA map[int]int `yaml:"rra"` - IOWorkerNum int `yaml:"ioWorkerNum"` -} - -func Init(cfg RRDSection) { - Config = cfg - InitChannel() - Start() - - go FlushFinishd2Disk() -} - -func InitChannel() { //初始化io池 - Out_done_chan = make(chan int, 1) - ioWorkerNum := Config.IOWorkerNum - io_task_chans = make([]chan *io_task_t, ioWorkerNum) - for i := 0; i < ioWorkerNum; i++ { - io_task_chans[i] = make(chan *io_task_t, 16) - } -} - -func Start() { - var err error - // check data dir - if err = file.EnsureDirRW(Config.Storage); err != nil { - logger.Fatal("rrdtool.Start error, bad data dir "+Config.Storage+",", err) - } - - // sync disk - go ioWorker() - logger.Info("rrdtool.Start ok") -} - -func ioWorker() { - ioWorkerNum := Config.IOWorkerNum - for i := 0; i < ioWorkerNum; i++ { - go func(i int) { - var err error - for { - select { - case task := <-io_task_chans[i]: - if task.method == IO_TASK_M_READ { - if args, ok := task.args.(*readfile_t); ok { - args.data, err = ioutil.ReadFile(args.filename) - task.done <- err - } - } else if task.method == IO_TASK_M_WRITE { - //filename must not exist - if args, ok := task.args.(*File); ok { - baseDir := file.Dir(args.Filename) - if err = file.InsureDir(baseDir); err != nil { - task.done <- err - } - task.done <- writeFile(args.Filename, args.Body, 0644) - } - } else if task.method == IO_TASK_M_FLUSH { - if args, ok := task.args.(*flushfile_t); ok { - task.done <- Flushrrd(args.seriesID, args.items) - } - } else if task.method == IO_TASK_M_FETCH { - if args, ok := task.args.(*fetch_t); ok { - args.data, err = fetch(args.filename, args.cf, args.start, args.end, args.step) - task.done <- err - } - } - } - } - }(i) - } -} - -func FlushFinishd2Disk() { - var idx int = 0 - //time.Sleep(time.Second * time.Duration(cache.Config.SpanInSeconds)) - ticker := time.NewTicker(time.Millisecond * time.Duration(cache.Config.FlushDiskStepMs)).C - slotNum := cache.Config.SpanInSeconds * 1000 / cache.Config.FlushDiskStepMs - for { - select { - case <-ticker: - idx = idx % slotNum - chunks := cache.ChunksSlots.Get(idx) - flushChunks := make(map[string][]*cache.Chunk, 0) - for key, cs := range chunks { - if Config.Migrate { - item := index.GetItemFronIndex(key) - rrdFile := utils.RrdFileName(Config.Storage, key, item.DsType, item.Step) - //在扩容期间,当新实例内存中的曲线对应的rrd文件还没有从旧实例获取并落盘时,先在内存中继续保持 - if !file.IsExist(rrdFile) && cache.Caches.GetFlag(key) == ITEM_TO_PULLRRD { - cache.ChunksSlots.PushChunks(key, cs) - continue - } - } - flushChunks[key] = cs - } - FlushRRD(flushChunks) - idx += 1 - case <-cache.FlushDoneChan: - logger.Info("FlushFinishd2Disk recv sigout and exit...") - return - } - } -} - -func Persist() { - logger.Info("start Persist") - - for _, shard := range cache.Caches { - if len(shard.Items) == 0 { - continue - } - for id, chunks := range shard.Items { - cache.ChunksSlots.Push(id, chunks.GetChunk(chunks.CurrentChunkPos)) - } - } - - for i := 0; i < cache.ChunksSlots.Size; i++ { - FlushRRD(cache.ChunksSlots.Get(i)) - } - - return -} - -func FlushRRD(flushChunks map[string][]*cache.Chunk) { - sema := semaphore.NewSemaphore(Config.Concurrency) - var wg sync.WaitGroup - for key, chunks := range flushChunks { - //控制并发 - sema.Acquire() - wg.Add(1) - go func(seriesID string, chunks []*cache.Chunk) { - defer sema.Release() - defer wg.Done() - for _, c := range chunks { - iter := c.Iter() - items := []*dataobj.TsdbItem{} - for iter.Next() { - t, v := iter.Values() - d := bufferPool.Get().(*dataobj.TsdbItem) - d = &dataobj.TsdbItem{ - Timestamp: int64(t), - Value: v, - } - items = append(items, d) - bufferPool.Put(d) - } - - err := FlushFile(seriesID, items) - if err != nil { - stats.Counter.Set("flush.rrd.err", 1) - logger.Errorf("flush %v data to rrd err:%v", seriesID, err) - continue - } - } - }(key, chunks) - } - wg.Wait() -} - -//todo items数据结构优化 -func Commit(seriesID string, items []*dataobj.TsdbItem) { - FlushFile(seriesID, items) -} - -func FlushFile(seriesID string, items []*dataobj.TsdbItem) error { - done := make(chan error, 1) - index, err := getIndex(seriesID) - if err != nil { - return err - } - io_task_chans[index] <- &io_task_t{ - method: IO_TASK_M_FLUSH, - args: &flushfile_t{ - seriesID: seriesID, - items: items, - }, - done: done, - } - stats.Counter.Set("series.write", 1) - atomic.AddUint64(&disk_counter, 1) - return <-done -} - -func Fetch(filename string, seriesID string, cf string, start, end int64, step int) ([]*dataobj.RRDData, error) { - done := make(chan error, 1) - task := &io_task_t{ - method: IO_TASK_M_FETCH, - args: &fetch_t{ - filename: filename, - cf: cf, - start: start, - end: end, - step: step, - }, - done: done, - } - index, err := getIndex(seriesID) - if err != nil { - return nil, err - } - - io_task_chans[index] <- task - err = <-done - return task.args.(*fetch_t).data, err -} - -func getIndex(seriesID string) (index int, err error) { - batchNum := Config.IOWorkerNum - - if batchNum <= 1 { - return 0, nil - } - - return int(utils.HashKey(seriesID) % uint32(batchNum)), nil -} - -func ReadFile(filename string, seriesID string) ([]byte, error) { - done := make(chan error, 1) - task := &io_task_t{ - method: IO_TASK_M_READ, - args: &readfile_t{filename: filename}, - done: done, - } - - index, err := getIndex(seriesID) - if err != nil { - return nil, err - } - - io_task_chans[index] <- task - err = <-done - return task.args.(*readfile_t).data, err -} diff --git a/src/modules/tsdb/tsdb.go b/src/modules/tsdb/tsdb.go deleted file mode 100644 index f7f1c790..00000000 --- a/src/modules/tsdb/tsdb.go +++ /dev/null @@ -1,145 +0,0 @@ -package main - -import ( - "flag" - "fmt" - "log" - "os" - "os/signal" - "syscall" - - "github.com/didi/nightingale/src/common/loggeri" - brpc "github.com/didi/nightingale/src/modules/tsdb/backend/rpc" - "github.com/didi/nightingale/src/modules/tsdb/cache" - "github.com/didi/nightingale/src/modules/tsdb/config" - "github.com/didi/nightingale/src/modules/tsdb/http" - "github.com/didi/nightingale/src/modules/tsdb/index" - "github.com/didi/nightingale/src/modules/tsdb/migrate" - "github.com/didi/nightingale/src/modules/tsdb/rpc" - "github.com/didi/nightingale/src/modules/tsdb/rrdtool" - "github.com/didi/nightingale/src/toolkits/stats" - - "github.com/toolkits/pkg/file" - "github.com/toolkits/pkg/runner" -) - -var ( - vers *bool - help *bool - conf *string - - version = "No Version Provided" -) - -func init() { - vers = flag.Bool("v", false, "display the version.") - help = flag.Bool("h", false, "print this help.") - conf = flag.String("f", "", "specify configuration file.") - flag.Parse() - - if *vers { - fmt.Println("Version:", version) - os.Exit(0) - } - - if *help { - flag.Usage() - os.Exit(0) - } -} - -func main() { - aconf() - pconf() - start() - - cfg := config.Config - - loggeri.Init(cfg.Logger) - go stats.Init("n9e.tsdb") - - // INIT - cache.Init(cfg.Cache) - index.Init(cfg.Index) - brpc.Init(cfg.RpcClient, index.IndexList.Get()) - - cache.InitChunkSlot() - rrdtool.Init(cfg.RRD) - - migrate.Init(cfg.Migrate) //读数据加队列 - - go http.Start() - go rpc.Start() - - startSignal(os.Getpid()) -} - -// auto detect configuration file -func aconf() { - if *conf != "" && file.IsExist(*conf) { - return - } - - *conf = "etc/tsdb.local.yml" - if file.IsExist(*conf) { - return - } - - *conf = "etc/tsdb.yml" - if file.IsExist(*conf) { - return - } - - fmt.Println("no configuration file for tsdb") - os.Exit(1) -} - -// parse configuration file -func pconf() { - if err := config.Parse(*conf); err != nil { - fmt.Println("cannot parse configuration file:", err) - os.Exit(1) - } -} - -func start() { - runner.Init() - fmt.Println("tsdb start, use configuration file:", *conf) - fmt.Println("runner.Cwd:", runner.Cwd) - fmt.Println("runner.Hostname:", runner.Hostname) -} - -func startSignal(pid int) { - cfg := config.Config - sigs := make(chan os.Signal, 1) - log.Printf("%d register signal notify", pid) - signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT) - - for { - s := <-sigs - log.Println("recv", s) - - switch s { - case syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT: - log.Println("graceful shut down") - - if cfg.Http.Enabled { - http.Close_chan <- 1 - <-http.Close_done_chan - } - log.Println("http stop ok") - - if cfg.Rpc.Enabled { - rpc.Close_chan <- 1 - <-rpc.Close_done_chan - } - log.Println("rpc stop ok") - - cache.FlushDoneChan <- 1 - rrdtool.Persist() - log.Println("====================== tsdb stop ok ======================") - log.Println(pid, "exit") - os.Exit(0) - } - } -} diff --git a/src/modules/tsdb/utils/utils.go b/src/modules/tsdb/utils/utils.go deleted file mode 100644 index 1a01eb8d..00000000 --- a/src/modules/tsdb/utils/utils.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright 2017 Xiaomi, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package utils - -import ( - "io" - "os" - "strconv" -) - -// RRDTOOL UTILS -// 监控数据对应的rrd文件名称 - -const RRDDIRS uint64 = 1000 - -func QueryRrdFile(seriesID string, dsType string, step int) string { - return seriesID[0:2] + "/" + seriesID + "_" + dsType + "_" + strconv.Itoa(step) + ".rrd" -} - -func RrdFileName(baseDir string, seriesID string, dsType string, step int) string { - return baseDir + "/" + seriesID[0:2] + "/" + seriesID + "_" + dsType + "_" + strconv.Itoa(step) + ".rrd" -} - -// WriteFile writes data to a file named by filename. -// file must not exist -func WriteFile(filename string, data []byte, perm os.FileMode) error { - f, err := os.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_EXCL, perm) - if err != nil { - return err - } - n, err := f.Write(data) - if err == nil && n < len(data) { - err = io.ErrShortWrite - } - if err1 := f.Close(); err == nil { - err = err1 - } - return err -} - -func HashKey(key string) uint32 { - hash := uint32(2166136261) - const prime32 = uint32(16777619) - for i := 0; i < len(key); i++ { - hash *= prime32 - hash ^= uint32(key[i]) - } - return hash -} diff --git a/src/toolkits/compress/tar.go b/src/toolkits/compress/tar.go deleted file mode 100644 index 99f69463..00000000 --- a/src/toolkits/compress/tar.go +++ /dev/null @@ -1,127 +0,0 @@ -package compress - -import ( - "archive/tar" - "compress/gzip" - "io" - "os" - "path" -) - -func TarGzWrite(recPath string, tw *tar.Writer, fi os.FileInfo) error { - fr, err := os.Open(recPath) - if err != nil { - return err - } - defer fr.Close() - - h := new(tar.Header) - h.Name = recPath - h.Size = fi.Size() - h.Mode = int64(fi.Mode()) - h.ModTime = fi.ModTime() - - err = tw.WriteHeader(h) - if err != nil { - return err - } - - _, err = io.Copy(tw, fr) - return err -} - -func IterDirectory(dirPath string, tw *tar.Writer) error { - dir, err := os.Open(dirPath) - if err != nil { - return err - } - defer dir.Close() - - fis, err := dir.Readdir(0) - if err != nil { - return err - } - for _, fi := range fis { - curPath := dirPath + "/" + fi.Name() - if fi.IsDir() { - //TarGzWrite( curPath, tw, fi ) - err = IterDirectory(curPath, tw) - if err != nil { - return err - } - } else { - //fmt.Printf("adding... %s\n", curPath) - err = TarGzWrite(curPath, tw, fi) - if err != nil { - return err - } - } - } - return err -} - -func TarGz(outFilePath string, inPath string) error { - // file write - fw, err := os.Create(outFilePath) - if err != nil { - return err - } - defer fw.Close() - - // gzip write - gw := gzip.NewWriter(fw) - defer gw.Close() - - // tar write - tw := tar.NewWriter(gw) - defer tw.Close() - - err = IterDirectory(inPath, tw) - return err - -} - -func UnTarGz(srcFilePath string, destDirPath string) error { - // Create destination directory - os.Mkdir(destDirPath, os.ModePerm) - - fr, err := os.Open(srcFilePath) - if err != nil { - return err - } - defer fr.Close() - - // Gzip reader - gr, err := gzip.NewReader(fr) - if err != nil { - return err - } - - // Tar reader - tr := tar.NewReader(gr) - - for { - hdr, err := tr.Next() - if err == io.EOF { - // End of tar archive - break - } - - // Check if it is diretory or file - if hdr.Typeflag != tar.TypeDir { - // Get files from archive - // Create diretory before create file - os.MkdirAll(destDirPath+"/"+path.Dir(hdr.Name), os.ModePerm) - // Write data to file - fw, err := os.Create(destDirPath + "/" + hdr.Name) - if err != nil { - return err - } - _, err = io.Copy(fw, tr) - if err != nil { - return err - } - } - } - return err -} diff --git a/src/toolkits/http/http.go b/src/toolkits/http/http.go deleted file mode 100644 index 543d677a..00000000 --- a/src/toolkits/http/http.go +++ /dev/null @@ -1,61 +0,0 @@ -package http - -import ( - "context" - "log" - "net/http" - _ "net/http/pprof" - "time" - - "github.com/didi/nightingale/src/common/address" - "github.com/didi/nightingale/src/toolkits/http/middleware" - - "github.com/gin-gonic/gin" -) - -var srv = &http.Server{ - ReadTimeout: 10 * time.Second, - WriteTimeout: 10 * time.Second, - MaxHeaderBytes: 1 << 20, -} - -// Start http server -func Start(r *gin.Engine, mod string, level string) { - loggerMid := middleware.LoggerWithConfig(middleware.LoggerConfig{}) - recoveryMid := middleware.Recovery() - - if level != "DEBUG" { - middleware.DisableConsoleColor() - } else { - srv.WriteTimeout = 120 * time.Second - } - - r.Use(loggerMid, recoveryMid) - - srv.Addr = address.GetHTTPListen(mod) - srv.Handler = r - - go func() { - log.Println("starting http server, listening on:", srv.Addr) - if err := srv.ListenAndServe(); err != nil && err != http.ErrServerClosed { - log.Fatalf("listening %s occur error: %s\n", srv.Addr, err) - } - }() -} - -// Shutdown http server -func Shutdown() { - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - if err := srv.Shutdown(ctx); err != nil { - log.Fatalln("cannot shutdown http server:", err) - } - - // catching ctx.Done(). timeout of 5 seconds. - select { - case <-ctx.Done(): - log.Println("shutdown http server timeout of 5 seconds.") - default: - log.Println("http server stopped") - } -} diff --git a/src/toolkits/http/middleware/logger.go b/src/toolkits/http/middleware/logger.go deleted file mode 100644 index d8f49d40..00000000 --- a/src/toolkits/http/middleware/logger.go +++ /dev/null @@ -1,295 +0,0 @@ -package middleware - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "net/http" - "os" - "time" - - "github.com/gin-gonic/gin" - "github.com/mattn/go-isatty" - "github.com/toolkits/pkg/logger" -) - -type consoleColorModeValue int - -const ( - autoColor consoleColorModeValue = iota - disableColor - forceColor -) - -var ( - green = string([]byte{27, 91, 57, 55, 59, 52, 50, 109}) - white = string([]byte{27, 91, 57, 48, 59, 52, 55, 109}) - yellow = string([]byte{27, 91, 57, 48, 59, 52, 51, 109}) - red = string([]byte{27, 91, 57, 55, 59, 52, 49, 109}) - blue = string([]byte{27, 91, 57, 55, 59, 52, 52, 109}) - magenta = string([]byte{27, 91, 57, 55, 59, 52, 53, 109}) - cyan = string([]byte{27, 91, 57, 55, 59, 52, 54, 109}) - reset = string([]byte{27, 91, 48, 109}) - consoleColorMode = autoColor -) - -// LoggerConfig defines the config for Logger middleware. -type LoggerConfig struct { - // Optional. Default value is gin.defaultLogFormatter - Formatter LogFormatter - - // Output is a writer where logs are written. - // Optional. Default value is gin.DefaultWriter. - Output io.Writer - - // SkipPaths is a url path array which logs are not written. - // Optional. - SkipPaths []string -} - -// LogFormatter gives the signature of the formatter function passed to LoggerWithFormatter -type LogFormatter func(params LogFormatterParams) string - -// LogFormatterParams is the structure any formatter will be handed when time to log comes -type LogFormatterParams struct { - Request *http.Request - - // TimeStamp shows the time after the server returns a response. - TimeStamp time.Time - // StatusCode is HTTP response code. - StatusCode int - // Latency is how much time the server cost to process a certain request. - Latency time.Duration - // ClientIP equals Context's ClientIP method. - ClientIP string - // Method is the HTTP method given to the request. - Method string - // Path is a path the client requests. - Path string - // ErrorMessage is set if error has occurred in processing the request. - ErrorMessage string - // isTerm shows whether does gin's output descriptor refers to a terminal. - isTerm bool - // BodySize is the size of the Response Body - BodySize int - // Keys are the keys set on the request's context. - Keys map[string]interface{} -} - -// StatusCodeColor is the ANSI color for appropriately logging http status code to a terminal. -func (p *LogFormatterParams) StatusCodeColor() string { - code := p.StatusCode - - switch { - case code >= http.StatusOK && code < http.StatusMultipleChoices: - return green - case code >= http.StatusMultipleChoices && code < http.StatusBadRequest: - return white - case code >= http.StatusBadRequest && code < http.StatusInternalServerError: - return yellow - default: - return red - } -} - -// MethodColor is the ANSI color for appropriately logging http method to a terminal. -func (p *LogFormatterParams) MethodColor() string { - method := p.Method - - switch method { - case "GET": - return blue - case "POST": - return cyan - case "PUT": - return yellow - case "DELETE": - return red - case "PATCH": - return green - case "HEAD": - return magenta - case "OPTIONS": - return white - default: - return reset - } -} - -// ResetColor resets all escape attributes. -func (p *LogFormatterParams) ResetColor() string { - return reset -} - -// IsOutputColor indicates whether can colors be outputted to the log. -func (p *LogFormatterParams) IsOutputColor() bool { - return consoleColorMode == forceColor || (consoleColorMode == autoColor && p.isTerm) -} - -// defaultLogFormatter is the default log format function Logger middleware uses. -var defaultLogFormatter = func(param LogFormatterParams) string { - var statusColor, methodColor, resetColor string - if param.IsOutputColor() { - statusColor = param.StatusCodeColor() - methodColor = param.MethodColor() - resetColor = param.ResetColor() - } - - if param.Latency > time.Minute { - // Truncate in a golang < 1.8 safe way - param.Latency = param.Latency - param.Latency%time.Second - } - return fmt.Sprintf("[GIN] |%s %3d %s| %13v | %15s |%s %-7s %s %s\n%s", - statusColor, param.StatusCode, resetColor, - param.Latency, - param.ClientIP, - methodColor, param.Method, resetColor, - param.Path, - param.ErrorMessage, - ) -} - -// DisableConsoleColor disables color output in the console. -func DisableConsoleColor() { - consoleColorMode = disableColor -} - -// ForceConsoleColor force color output in the console. -func ForceConsoleColor() { - consoleColorMode = forceColor -} - -// ErrorLogger returns a handlerfunc for any error type. -func ErrorLogger() gin.HandlerFunc { - return ErrorLoggerT(gin.ErrorTypeAny) -} - -// ErrorLoggerT returns a handlerfunc for a given error type. -func ErrorLoggerT(typ gin.ErrorType) gin.HandlerFunc { - return func(c *gin.Context) { - c.Next() - errors := c.Errors.ByType(typ) - if len(errors) > 0 { - c.JSON(-1, errors) - } - } -} - -// Logger instances a Logger middleware that will write the logs to gin.DefaultWriter. -// By default gin.DefaultWriter = os.Stdout. -func Logger() gin.HandlerFunc { - return LoggerWithConfig(LoggerConfig{}) -} - -// LoggerWithFormatter instance a Logger middleware with the specified log format function. -func LoggerWithFormatter(f LogFormatter) gin.HandlerFunc { - return LoggerWithConfig(LoggerConfig{ - Formatter: f, - }) -} - -// LoggerWithWriter instance a Logger middleware with the specified writer buffer. -// Example: os.Stdout, a file opened in write mode, a socket... -func LoggerWithWriter(out io.Writer, notlogged ...string) gin.HandlerFunc { - return LoggerWithConfig(LoggerConfig{ - Output: out, - SkipPaths: notlogged, - }) -} - -// LoggerWithConfig instance a Logger middleware with config. -func LoggerWithConfig(conf LoggerConfig) gin.HandlerFunc { - formatter := conf.Formatter - if formatter == nil { - formatter = defaultLogFormatter - } - - out := conf.Output - if out == nil { - out = os.Stdout - } - - notlogged := conf.SkipPaths - - isTerm := true - - if w, ok := out.(*os.File); !ok || os.Getenv("TERM") == "dumb" || - (!isatty.IsTerminal(w.Fd()) && !isatty.IsCygwinTerminal(w.Fd())) { - isTerm = false - } - - var skip map[string]struct{} - - if length := len(notlogged); length > 0 { - skip = make(map[string]struct{}, length) - - for _, path := range notlogged { - skip[path] = struct{}{} - } - } - - return func(c *gin.Context) { - // Start timer - start := time.Now() - path := c.Request.URL.Path - raw := c.Request.URL.RawQuery - - var ( - rdr1 io.ReadCloser - rdr2 io.ReadCloser - ) - - if c.Request.Method != "GET" { - buf, _ := ioutil.ReadAll(c.Request.Body) - rdr1 = ioutil.NopCloser(bytes.NewBuffer(buf)) - rdr2 = ioutil.NopCloser(bytes.NewBuffer(buf)) - - c.Request.Body = rdr2 - } - - // Process request - c.Next() - - // Log only when path is not being skipped - if _, ok := skip[path]; !ok { - param := LogFormatterParams{ - Request: c.Request, - isTerm: isTerm, - Keys: c.Keys, - } - - // Stop timer - param.TimeStamp = time.Now() - param.Latency = param.TimeStamp.Sub(start) - - param.ClientIP = c.ClientIP() - param.Method = c.Request.Method - param.StatusCode = c.Writer.Status() - param.ErrorMessage = c.Errors.ByType(gin.ErrorTypePrivate).String() - - param.BodySize = c.Writer.Size() - - if raw != "" { - path = path + "?" + raw - } - - param.Path = path - - // fmt.Fprint(out, formatter(param)) - logger.Info(formatter(param)) - - if c.Request.Method != "GET" { - logger.Info(readBody(rdr1)) - } - } - } -} - -func readBody(reader io.Reader) string { - buf := new(bytes.Buffer) - buf.ReadFrom(reader) - - s := buf.String() - return s -} diff --git a/src/toolkits/http/middleware/recovery.go b/src/toolkits/http/middleware/recovery.go deleted file mode 100644 index 7c1d5110..00000000 --- a/src/toolkits/http/middleware/recovery.go +++ /dev/null @@ -1,160 +0,0 @@ -package middleware - -// Copyright 2014 Manu Martinez-Almeida. All rights reserved. -// Use of this source code is governed by a MIT style -// license that can be found in the LICENSE file. - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "log" - "net" - "net/http" - "net/http/httputil" - "os" - "runtime" - "strings" - "time" - - "github.com/gin-gonic/gin" - "github.com/toolkits/pkg/errors" -) - -var ( - dunno = []byte("???") - centerDot = []byte("·") - dot = []byte(".") - slash = []byte("/") -) - -// Recovery returns a middleware that recovers from any panics and writes a 500 if there was one. -func Recovery() gin.HandlerFunc { - return RecoveryWithWriter(gin.DefaultErrorWriter) -} - -// RecoveryWithWriter returns a middleware for a given writer that recovers from any panics and writes a 500 if there was one. -func RecoveryWithWriter(out io.Writer) gin.HandlerFunc { - var logger *log.Logger - if out != nil { - logger = log.New(out, "\n\n\x1b[31m", log.LstdFlags) - } - return func(c *gin.Context) { - defer func() { - if err := recover(); err != nil { - // custom error - if e, ok := err.(errors.PageError); ok { - c.JSON(200, gin.H{"err": e.Message}) - c.Abort() - return - } - - // Check for a broken connection, as it is not really a - // condition that warrants a panic stack trace. - var brokenPipe bool - if ne, ok := err.(*net.OpError); ok { - if se, ok := ne.Err.(*os.SyscallError); ok { - if strings.Contains(strings.ToLower(se.Error()), "broken pipe") || strings.Contains(strings.ToLower(se.Error()), "connection reset by peer") { - brokenPipe = true - } - } - } - if logger != nil { - stack := stack(3) - httpRequest, _ := httputil.DumpRequest(c.Request, false) - headers := strings.Split(string(httpRequest), "\r\n") - for idx, header := range headers { - current := strings.Split(header, ":") - if current[0] == "Authorization" { - headers[idx] = current[0] + ": *" - } - } - if brokenPipe { - logger.Printf("%s\n%s%s", err, string(httpRequest), reset) - } else if gin.IsDebugging() { - logger.Printf("[Recovery] %s panic recovered:\n%s\n%s\n%s%s", - timeFormat(time.Now()), strings.Join(headers, "\r\n"), err, stack, reset) - } else { - logger.Printf("[Recovery] %s panic recovered:\n%s\n%s%s", - timeFormat(time.Now()), err, stack, reset) - } - } - - // If the connection is dead, we can't write a status to it. - if brokenPipe { - c.Error(err.(error)) // nolint: errcheck - c.Abort() - } else { - c.AbortWithStatus(http.StatusInternalServerError) - } - } - }() - c.Next() - } -} - -// stack returns a nicely formatted stack frame, skipping skip frames. -func stack(skip int) []byte { - buf := new(bytes.Buffer) // the returned data - // As we loop, we open files and read them. These variables record the currently - // loaded file. - var lines [][]byte - var lastFile string - for i := skip; ; i++ { // Skip the expected number of frames - pc, file, line, ok := runtime.Caller(i) - if !ok { - break - } - // Print this much at least. If we can't find the source, it won't show. - fmt.Fprintf(buf, "%s:%d (0x%x)\n", file, line, pc) - if file != lastFile { - data, err := ioutil.ReadFile(file) - if err != nil { - continue - } - lines = bytes.Split(data, []byte{'\n'}) - lastFile = file - } - fmt.Fprintf(buf, "\t%s: %s\n", function(pc), source(lines, line)) - } - return buf.Bytes() -} - -// source returns a space-trimmed slice of the n'th line. -func source(lines [][]byte, n int) []byte { - n-- // in stack trace, lines are 1-indexed but our array is 0-indexed - if n < 0 || n >= len(lines) { - return dunno - } - return bytes.TrimSpace(lines[n]) -} - -// function returns, if possible, the name of the function containing the PC. -func function(pc uintptr) []byte { - fn := runtime.FuncForPC(pc) - if fn == nil { - return dunno - } - name := []byte(fn.Name()) - // The name includes the path name to the package, which is unnecessary - // since the file name is already included. Plus, it has center dots. - // That is, we see - // runtime/debug.*T·ptrmethod - // and want - // *T.ptrmethod - // Also the package path might contains dot (e.g. code.google.com/...), - // so first eliminate the path prefix - if lastSlash := bytes.LastIndex(name, slash); lastSlash >= 0 { - name = name[lastSlash+1:] - } - if period := bytes.Index(name, dot); period >= 0 { - name = name[period+1:] - } - name = bytes.Replace(name, centerDot, dot, -1) - return name -} - -func timeFormat(t time.Time) string { - return t.Format("2006/01/02 - 15:04:05") -} diff --git a/src/toolkits/http/render/funcs.go b/src/toolkits/http/render/funcs.go deleted file mode 100644 index 798e38d6..00000000 --- a/src/toolkits/http/render/funcs.go +++ /dev/null @@ -1,26 +0,0 @@ -package render - -import "github.com/gin-gonic/gin" - -func Message(c *gin.Context, v interface{}) { - if v == nil { - c.JSON(200, gin.H{"err": ""}) - return - } - - switch t := v.(type) { - case string: - c.JSON(200, gin.H{"err": t}) - case error: - c.JSON(200, gin.H{"err": t.Error()}) - } -} - -func Data(c *gin.Context, data interface{}, err error) { - if err == nil { - c.JSON(200, gin.H{"dat": data, "err": ""}) - return - } - - Message(c, err.Error()) -} diff --git a/vendor/github.com/alouca/gologger/.gitignore b/vendor/github.com/alouca/gologger/.gitignore new file mode 100644 index 00000000..00268614 --- /dev/null +++ b/vendor/github.com/alouca/gologger/.gitignore @@ -0,0 +1,22 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe diff --git a/vendor/github.com/alouca/gologger/README.md b/vendor/github.com/alouca/gologger/README.md new file mode 100644 index 00000000..cef25ae5 --- /dev/null +++ b/vendor/github.com/alouca/gologger/README.md @@ -0,0 +1,4 @@ +gologger +======== + +A simple CLI/File logger for Go \ No newline at end of file diff --git a/vendor/github.com/alouca/gologger/logger.go b/vendor/github.com/alouca/gologger/logger.go new file mode 100644 index 00000000..f85b0ebb --- /dev/null +++ b/vendor/github.com/alouca/gologger/logger.go @@ -0,0 +1,88 @@ +package logger + +import ( + "fmt" + "log" + "os" + "runtime" + "strings" +) + +type Logger struct { + DebugFlag bool + VerboseFlag bool + *log.Logger +} + +var ( + def *Logger +) + +func CreateLogger(verbose, debug bool) *Logger { + def = InitLogger(verbose, debug, log.Ldate|log.Ltime) + + return def +} + +func CreateLoggerWithFile(verbose, debug bool, file string) *Logger { + def = InitLoggerWithFile(verbose, debug, file, log.Ldate|log.Ltime) + + return def +} + +func GetDefaultLogger() *Logger { + return def +} + +func InitLogger(debug, verbose bool, flag int) *Logger { + l := &Logger{debug, verbose, log.New(os.Stdout, "", flag)} + def = l + return l +} + +func InitLoggerWithFile(debug, verbose bool, file string, flag int) *Logger { + f, e := os.OpenFile(file, os.O_WRONLY, 0666) + + if e != nil { + panic("Unable to open log file.") + } + + l := &Logger{debug, verbose, log.New(f, "", flag)} + def = l + return l +} + +func (x *Logger) Debug(format string, v ...interface{}) { + if x.DebugFlag { + x.Printf("[DEBUG] " + x.getVerboseInfo() + fmt.Sprintf(format, v...)) + } +} + +func (x *Logger) Info(format string, v ...interface{}) { + x.getVerboseInfo() + x.Printf("[INFO] " + x.getVerboseInfo() + fmt.Sprintf(format, v...)) +} + +func (x *Logger) Error(format string, v ...interface{}) { + x.Printf("[ERROR] " + x.getVerboseInfo() + fmt.Sprintf(format, v...)) +} + +func (x *Logger) Fatal(format string, v ...interface{}) { + x.Printf("[FATAL] " + x.getVerboseInfo() + fmt.Sprintf(format, v...)) +} + +func (x *Logger) getVerboseInfo() string { + var verboseInfo string + // If verbose info is enabled + if x.VerboseFlag { + // Retrieve 3 stacks behind to get the actual caller. + pc := make([]uintptr, 1) + ret := runtime.Callers(3, pc) + if ret > 0 { + f := runtime.FuncForPC(pc[0]) + file, line := f.FileLine(pc[0]) + verboseInfo = fmt.Sprintf("%s:%d (%s) ", file[strings.LastIndex(file, "/")+1:], line, f.Name()) + } + } + return verboseInfo +} diff --git a/vendor/github.com/codegangsta/negroni/.gitignore b/vendor/github.com/codegangsta/negroni/.gitignore deleted file mode 100644 index 3f2bc474..00000000 --- a/vendor/github.com/codegangsta/negroni/.gitignore +++ /dev/null @@ -1 +0,0 @@ -/coverage.txt diff --git a/vendor/github.com/codegangsta/negroni/.travis.yml b/vendor/github.com/codegangsta/negroni/.travis.yml deleted file mode 100644 index 18104334..00000000 --- a/vendor/github.com/codegangsta/negroni/.travis.yml +++ /dev/null @@ -1,27 +0,0 @@ -language: go - -sudo: false -dist: trusty - -go: -- 1.x -- 1.2.x -- 1.3.x -- 1.4.x -- 1.5.x -- 1.6.x -- 1.7.x -- 1.8.x -- master - -before_install: -- find "${GOPATH%%:*}" -name '*.a' -delete -- rm -rf "${GOPATH%%:*}/src/golang.org" -- go get golang.org/x/tools/cover -- go get golang.org/x/tools/cmd/cover - -script: -- go test -race -coverprofile=coverage.txt -covermode=atomic - -after_success: -- bash <(curl -s "https://codecov.io/bash") diff --git a/vendor/github.com/codegangsta/negroni/CHANGELOG.md b/vendor/github.com/codegangsta/negroni/CHANGELOG.md deleted file mode 100644 index 9a6d638e..00000000 --- a/vendor/github.com/codegangsta/negroni/CHANGELOG.md +++ /dev/null @@ -1,69 +0,0 @@ -# Change Log - -**ATTN**: This project uses [semantic versioning](http://semver.org/). - -## [Unreleased] - - -## [1.0.0] - 2018-09-01 - -### Fixed -- `Logger` middleware now correctly handles paths containing a `%` instead of trying to treat it as a format specifier - -## [0.3.0] - 2017-11-11 -### Added -- `With()` helper for building a new `Negroni` struct chaining handlers from - existing `Negroni` structs -- Format log output in `Logger` middleware via a configurable `text/template` - string injectable via `.SetFormat`. Added `LoggerDefaultFormat` and - `LoggerDefaultDateFormat` to configure the default template and date format - used by the `Logger` middleware. -- Support for HTTP/2 pusher support via `http.Pusher` interface for Go 1.8+. -- `WrapFunc` to convert `http.HandlerFunc` into a `negroni.Handler` -- `Formatter` field added to `Recovery` middleware to allow configuring how - `panic`s are output. Default of `TextFormatter` (how it was output in - `0.2.0`) used. `HTMLPanicFormatter` also added to allow easy outputing of - `panic`s as HTML. - -### Fixed -- `Written()` correct returns `false` if no response header has been written -- Only implement `http.CloseNotifier` with the `negroni.ResponseWriter` if the - underlying `http.ResponseWriter` implements it (previously would always - implement it and panic if the underlying `http.ResponseWriter` did not. - -### Changed -- Set default status to `0` in the case that no handler writes status -- was - previously `200` (in 0.2.0, before that it was `0` so this reestablishes that - behavior) -- Catch `panic`s thrown by callbacks provided to the `Recovery` handler -- Recovery middleware will set `text/plain` content-type if none is set -- `ALogger` interface to allow custom logger outputs to be used with the - `Logger` middleware. Changes embeded field in `negroni.Logger` from `Logger` - to `ALogger`. -- Default `Logger` middleware output changed to be more structure and verbose - (also now configurable, see `Added`) -- Automatically bind to port specified in `$PORT` in `.Run()` if an address is - not passed in. Fall back to binding to `:8080` if no address specified - (configuable via `DefaultAddress`). -- `PanicHandlerFunc` added to `Recovery` middleware to enhance custom handling - of `panic`s by providing additional information to the handler including the - stack and the `http.Request`. `Recovery.ErrorHandlerFunc` was also added, but - deprecated in favor of the new `PanicHandlerFunc`. - -## [0.2.0] - 2016-05-10 -### Added -- Support for variadic handlers in `New()` -- Added `Negroni.Handlers()` to fetch all of the handlers for a given chain -- Allowed size in `Recovery` handler was bumped to 8k -- `Negroni.UseFunc` to push another handler onto the chain - -### Changed -- Set the status before calling `beforeFuncs` so the information is available to them -- Set default status to `200` in the case that no handler writes status -- was previously `0` -- Panic if `nil` handler is given to `negroni.Use` - -## 0.1.0 - 2013-07-22 -### Added -- Initial implementation. - -[Unreleased]: https://github.com/urfave/negroni/compare/v0.2.0...HEAD -[0.2.0]: https://github.com/urfave/negroni/compare/v0.1.0...v0.2.0 diff --git a/vendor/github.com/codegangsta/negroni/README.md b/vendor/github.com/codegangsta/negroni/README.md deleted file mode 100644 index 17298559..00000000 --- a/vendor/github.com/codegangsta/negroni/README.md +++ /dev/null @@ -1,549 +0,0 @@ -# Negroni -[![GoDoc](https://godoc.org/github.com/urfave/negroni?status.svg)](http://godoc.org/github.com/urfave/negroni) -[![Build Status](https://travis-ci.org/urfave/negroni.svg?branch=master)](https://travis-ci.org/urfave/negroni) -[![codebeat](https://codebeat.co/badges/47d320b1-209e-45e8-bd99-9094bc5111e2)](https://codebeat.co/projects/github-com-urfave-negroni) -[![codecov](https://codecov.io/gh/urfave/negroni/branch/master/graph/badge.svg)](https://codecov.io/gh/urfave/negroni) - -**Notice:** This is the library formerly known as -`github.com/codegangsta/negroni` -- Github will automatically redirect requests -to this repository, but we recommend updating your references for clarity. - -Negroni is an idiomatic approach to web middleware in Go. It is tiny, -non-intrusive, and encourages use of `net/http` Handlers. - -If you like the idea of [Martini](https://github.com/go-martini/martini), but -you think it contains too much magic, then Negroni is a great fit. - -Language Translations: -* [Deutsch (de_DE)](translations/README_de_de.md) -* [Português Brasileiro (pt_BR)](translations/README_pt_br.md) -* [简体中文 (zh_CN)](translations/README_zh_CN.md) -* [繁體中文 (zh_TW)](translations/README_zh_tw.md) -* [日本語 (ja_JP)](translations/README_ja_JP.md) -* [Français (fr_FR)](translations/README_fr_FR.md) - -## Getting Started - -After installing Go and setting up your -[GOPATH](http://golang.org/doc/code.html#GOPATH), create your first `.go` file. -We'll call it `server.go`. - - -``` go -package main - -import ( - "fmt" - "net/http" - - "github.com/urfave/negroni" -) - -func main() { - mux := http.NewServeMux() - mux.HandleFunc("/", func(w http.ResponseWriter, req *http.Request) { - fmt.Fprintf(w, "Welcome to the home page!") - }) - - n := negroni.Classic() // Includes some default middlewares - n.UseHandler(mux) - - http.ListenAndServe(":3000", n) -} -``` - -Then install the Negroni package (**NOTE**: >= **go 1.1** is required): - -``` -go get github.com/urfave/negroni -``` - -Then run your server: - -``` -go run server.go -``` - -You will now have a Go `net/http` webserver running on `localhost:3000`. - -### Packaging - -If you are on Debian, `negroni` is also available as [a -package](https://packages.debian.org/sid/golang-github-urfave-negroni-dev) that -you can install via `apt install golang-github-urfave-negroni-dev` (at the time -of writing, it is in the `sid` repositories). - -## Is Negroni a Framework? - -Negroni is **not** a framework. It is a middleware-focused library that is -designed to work directly with `net/http`. - -## Routing? - -Negroni is BYOR (Bring your own Router). The Go community already has a number -of great http routers available, and Negroni tries to play well with all of them -by fully supporting `net/http`. For instance, integrating with [Gorilla Mux] -looks like so: - -``` go -router := mux.NewRouter() -router.HandleFunc("/", HomeHandler) - -n := negroni.New(Middleware1, Middleware2) -// Or use a middleware with the Use() function -n.Use(Middleware3) -// router goes last -n.UseHandler(router) - -http.ListenAndServe(":3001", n) -``` - -## `negroni.Classic()` - -`negroni.Classic()` provides some default middleware that is useful for most -applications: - -* [`negroni.Recovery`](#recovery) - Panic Recovery Middleware. -* [`negroni.Logger`](#logger) - Request/Response Logger Middleware. -* [`negroni.Static`](#static) - Static File serving under the "public" - directory. - -This makes it really easy to get started with some useful features from Negroni. - -## Handlers - -Negroni provides a bidirectional middleware flow. This is done through the -`negroni.Handler` interface: - -``` go -type Handler interface { - ServeHTTP(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc) -} -``` - -If a middleware hasn't already written to the `ResponseWriter`, it should call -the next `http.HandlerFunc` in the chain to yield to the next middleware -handler. This can be used for great good: - -``` go -func MyMiddleware(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc) { - // do some stuff before - next(rw, r) - // do some stuff after -} -``` - -And you can map it to the handler chain with the `Use` function: - -``` go -n := negroni.New() -n.Use(negroni.HandlerFunc(MyMiddleware)) -``` - -You can also map plain old `http.Handler`s: - -``` go -n := negroni.New() - -mux := http.NewServeMux() -// map your routes - -n.UseHandler(mux) - -http.ListenAndServe(":3000", n) -``` - -## `With()` - -Negroni has a convenience function called `With`. `With` takes one or more -`Handler` instances and returns a new `Negroni` with the combination of the -receiver's handlers and the new handlers. - -```go -// middleware we want to reuse -common := negroni.New() -common.Use(MyMiddleware1) -common.Use(MyMiddleware2) - -// `specific` is a new negroni with the handlers from `common` combined with the -// the handlers passed in -specific := common.With( - SpecificMiddleware1, - SpecificMiddleware2 -) -``` - -## `Run()` - -Negroni has a convenience function called `Run`. `Run` takes an addr string -identical to [`http.ListenAndServe`](https://godoc.org/net/http#ListenAndServe). - - -``` go -package main - -import ( - "github.com/urfave/negroni" -) - -func main() { - n := negroni.Classic() - n.Run(":8080") -} -``` -If no address is provided, the `PORT` environment variable is used instead. -If the `PORT` environment variable is not defined, the default address will be used. -See [Run](https://godoc.org/github.com/urfave/negroni#Negroni.Run) for a complete description. - -In general, you will want to use `net/http` methods and pass `negroni` as a -`Handler`, as this is more flexible, e.g.: - - -``` go -package main - -import ( - "fmt" - "log" - "net/http" - "time" - - "github.com/urfave/negroni" -) - -func main() { - mux := http.NewServeMux() - mux.HandleFunc("/", func(w http.ResponseWriter, req *http.Request) { - fmt.Fprintf(w, "Welcome to the home page!") - }) - - n := negroni.Classic() // Includes some default middlewares - n.UseHandler(mux) - - s := &http.Server{ - Addr: ":8080", - Handler: n, - ReadTimeout: 10 * time.Second, - WriteTimeout: 10 * time.Second, - MaxHeaderBytes: 1 << 20, - } - log.Fatal(s.ListenAndServe()) -} -``` - -## Route Specific Middleware - -If you have a route group of routes that need specific middleware to be -executed, you can simply create a new Negroni instance and use it as your route -handler. - -``` go -router := mux.NewRouter() -adminRoutes := mux.NewRouter() -// add admin routes here - -// Create a new negroni for the admin middleware -router.PathPrefix("/admin").Handler(negroni.New( - Middleware1, - Middleware2, - negroni.Wrap(adminRoutes), -)) -``` - -If you are using [Gorilla Mux], here is an example using a subrouter: - -``` go -router := mux.NewRouter() -subRouter := mux.NewRouter().PathPrefix("/subpath").Subrouter().StrictSlash(true) -subRouter.HandleFunc("/", someSubpathHandler) // "/subpath/" -subRouter.HandleFunc("/:id", someSubpathHandler) // "/subpath/:id" - -// "/subpath" is necessary to ensure the subRouter and main router linkup -router.PathPrefix("/subpath").Handler(negroni.New( - Middleware1, - Middleware2, - negroni.Wrap(subRouter), -)) -``` - -`With()` can be used to eliminate redundancy for middlewares shared across -routes. - -``` go -router := mux.NewRouter() -apiRoutes := mux.NewRouter() -// add api routes here -webRoutes := mux.NewRouter() -// add web routes here - -// create common middleware to be shared across routes -common := negroni.New( - Middleware1, - Middleware2, -) - -// create a new negroni for the api middleware -// using the common middleware as a base -router.PathPrefix("/api").Handler(common.With( - APIMiddleware1, - negroni.Wrap(apiRoutes), -)) -// create a new negroni for the web middleware -// using the common middleware as a base -router.PathPrefix("/web").Handler(common.With( - WebMiddleware1, - negroni.Wrap(webRoutes), -)) -``` - -## Bundled Middleware - -### Static - -This middleware will serve files on the filesystem. If the files do not exist, -it proxies the request to the next middleware. If you want the requests for -non-existent files to return a `404 File Not Found` to the user you should look -at using [http.FileServer](https://golang.org/pkg/net/http/#FileServer) as -a handler. - -Example: - - -``` go -package main - -import ( - "fmt" - "net/http" - - "github.com/urfave/negroni" -) - -func main() { - mux := http.NewServeMux() - mux.HandleFunc("/", func(w http.ResponseWriter, req *http.Request) { - fmt.Fprintf(w, "Welcome to the home page!") - }) - - // Example of using a http.FileServer if you want "server-like" rather than "middleware" behavior - // mux.Handle("/public", http.FileServer(http.Dir("/home/public"))) - - n := negroni.New() - n.Use(negroni.NewStatic(http.Dir("/tmp"))) - n.UseHandler(mux) - - http.ListenAndServe(":3002", n) -} -``` - -Will serve files from the `/tmp` directory first, but proxy calls to the next -handler if the request does not match a file on the filesystem. - -### Recovery - -This middleware catches `panic`s and responds with a `500` response code. If -any other middleware has written a response code or body, this middleware will -fail to properly send a 500 to the client, as the client has already received -the HTTP response code. Additionally, an `PanicHandlerFunc` can be attached -to report 500's to an error reporting service such as Sentry or Airbrake. - -Example: - - -``` go -package main - -import ( - "net/http" - - "github.com/urfave/negroni" -) - -func main() { - mux := http.NewServeMux() - mux.HandleFunc("/", func(w http.ResponseWriter, req *http.Request) { - panic("oh no") - }) - - n := negroni.New() - n.Use(negroni.NewRecovery()) - n.UseHandler(mux) - - http.ListenAndServe(":3003", n) -} -``` - -Will return a `500 Internal Server Error` to each request. It will also log the -stack traces as well as print the stack trace to the requester if `PrintStack` -is set to `true` (the default). - -Example with error handler: - -``` go -package main - -import ( - "net/http" - - "github.com/urfave/negroni" -) - -func main() { - mux := http.NewServeMux() - mux.HandleFunc("/", func(w http.ResponseWriter, req *http.Request) { - panic("oh no") - }) - - n := negroni.New() - recovery := negroni.NewRecovery() - recovery.PanicHandlerFunc = reportToSentry - n.Use(recovery) - n.UseHandler(mux) - - http.ListenAndServe(":3003", n) -} - -func reportToSentry(info *negroni.PanicInformation) { - // write code here to report error to Sentry -} -``` - -The middleware simply output the informations on STDOUT by default. -You can customize the output process by using the `SetFormatter()` function. - -You can use also the `HTMLPanicFormatter` to display a pretty HTML when a crash occurs. - - -``` go -package main - -import ( - "net/http" - - "github.com/urfave/negroni" -) - -func main() { - mux := http.NewServeMux() - mux.HandleFunc("/", func(w http.ResponseWriter, req *http.Request) { - panic("oh no") - }) - - n := negroni.New() - recovery := negroni.NewRecovery() - recovery.Formatter = &negroni.HTMLPanicFormatter{} - n.Use(recovery) - n.UseHandler(mux) - - http.ListenAndServe(":3003", n) -} -``` - -## Logger - -This middleware logs each incoming request and response. - -Example: - - -``` go -package main - -import ( - "fmt" - "net/http" - - "github.com/urfave/negroni" -) - -func main() { - mux := http.NewServeMux() - mux.HandleFunc("/", func(w http.ResponseWriter, req *http.Request) { - fmt.Fprintf(w, "Welcome to the home page!") - }) - - n := negroni.New() - n.Use(negroni.NewLogger()) - n.UseHandler(mux) - - http.ListenAndServe(":3004", n) -} -``` - -Will print a log similar to: - -``` -[negroni] 2017-10-04T14:56:25+02:00 | 200 | 378µs | localhost:3004 | GET / -``` - -on each request. - -You can also set your own log format by calling the `SetFormat` function. The format is a template string with fields as mentioned in the `LoggerEntry` struct. So, as an example - - -```go -l.SetFormat("[{{.Status}} {{.Duration}}] - {{.Request.UserAgent}}") -``` - -will show something like - `[200 18.263µs] - Go-User-Agent/1.1 ` - -## Third Party Middleware - -Here is a current list of Negroni compatible middlware. Feel free to put up a PR -linking your middleware if you have built one: - -| Middleware | Author | Description | -| -----------|--------|-------------| -| [authz](https://github.com/casbin/negroni-authz) | [Yang Luo](https://github.com/hsluoyz) | ACL, RBAC, ABAC Authorization middlware based on [Casbin](https://github.com/casbin/casbin) | -| [binding](https://github.com/mholt/binding) | [Matt Holt](https://github.com/mholt) | Data binding from HTTP requests into structs | -| [cloudwatch](https://github.com/cvillecsteele/negroni-cloudwatch) | [Colin Steele](https://github.com/cvillecsteele) | AWS cloudwatch metrics middleware | -| [cors](https://github.com/rs/cors) | [Olivier Poitrey](https://github.com/rs) | [Cross Origin Resource Sharing](http://www.w3.org/TR/cors/) (CORS) support | -| [csp](https://github.com/awakenetworks/csp) | [Awake Networks](https://github.com/awakenetworks) | [Content Security Policy](https://www.w3.org/TR/CSP2/) (CSP) support | -| [delay](https://github.com/jeffbmartinez/delay) | [Jeff Martinez](https://github.com/jeffbmartinez) | Add delays/latency to endpoints. Useful when testing effects of high latency | -| [New Relic Go Agent](https://github.com/yadvendar/negroni-newrelic-go-agent) | [Yadvendar Champawat](https://github.com/yadvendar) | Official [New Relic Go Agent](https://github.com/newrelic/go-agent) (currently in beta) | -| [gorelic](https://github.com/jingweno/negroni-gorelic) | [Jingwen Owen Ou](https://github.com/jingweno) | New Relic agent for Go runtime | -| [Graceful](https://github.com/tylerb/graceful) | [Tyler Bunnell](https://github.com/tylerb) | Graceful HTTP Shutdown | -| [gzip](https://github.com/phyber/negroni-gzip) | [phyber](https://github.com/phyber) | GZIP response compression | -| [JWT Middleware](https://github.com/auth0/go-jwt-middleware) | [Auth0](https://github.com/auth0) | Middleware checks for a JWT on the `Authorization` header on incoming requests and decodes it| -| [JWT Middleware](https://github.com/mfuentesg/go-jwtmiddleware) | [Marcelo Fuentes](https://github.com/mfuentesg) | JWT middleware for golang | -| [logrus](https://github.com/meatballhat/negroni-logrus) | [Dan Buch](https://github.com/meatballhat) | Logrus-based logger | -| [oauth2](https://github.com/goincremental/negroni-oauth2) | [David Bochenski](https://github.com/bochenski) | oAuth2 middleware | -| [onthefly](https://github.com/xyproto/onthefly) | [Alexander Rødseth](https://github.com/xyproto) | Generate TinySVG, HTML and CSS on the fly | -| [permissions2](https://github.com/xyproto/permissions2) | [Alexander Rødseth](https://github.com/xyproto) | Cookies, users and permissions | -| [prometheus](https://github.com/zbindenren/negroni-prometheus) | [Rene Zbinden](https://github.com/zbindenren) | Easily create metrics endpoint for the [prometheus](http://prometheus.io) instrumentation tool | -| [render](https://github.com/unrolled/render) | [Cory Jacobsen](https://github.com/unrolled) | Render JSON, XML and HTML templates | -| [RestGate](https://github.com/pjebs/restgate) | [Prasanga Siripala](https://github.com/pjebs) | Secure authentication for REST API endpoints | -| [secure](https://github.com/unrolled/secure) | [Cory Jacobsen](https://github.com/unrolled) | Middleware that implements a few quick security wins | -| [sessions](https://github.com/goincremental/negroni-sessions) | [David Bochenski](https://github.com/bochenski) | Session Management | -| [stats](https://github.com/thoas/stats) | [Florent Messa](https://github.com/thoas) | Store information about your web application (response time, etc.) | -| [VanGoH](https://github.com/auroratechnologies/vangoh) | [Taylor Wrobel](https://github.com/twrobel3) | Configurable [AWS-Style](http://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html) HMAC authentication middleware | -| [xrequestid](https://github.com/pilu/xrequestid) | [Andrea Franz](https://github.com/pilu) | Middleware that assigns a random X-Request-Id header to each request | -| [mgo session](https://github.com/joeljames/nigroni-mgo-session) | [Joel James](https://github.com/joeljames) | Middleware that handles creating and closing mgo sessions per request | -| [digits](https://github.com/bamarni/digits) | [Bilal Amarni](https://github.com/bamarni) | Middleware that handles [Twitter Digits](https://get.digits.com/) authentication | -| [stats](https://github.com/guptachirag/stats) | [Chirag Gupta](https://github.com/guptachirag/stats) | Middleware that manages qps and latency stats for your endpoints and asynchronously flushes them to influx db | -| [Chaos](https://github.com/falzm/chaos) | [Marc Falzon](https://github.com/falzm) | Middleware for injecting chaotic behavior into application in a programmatic way | - -## Examples - -[Alexander Rødseth](https://github.com/xyproto) created -[mooseware](https://github.com/xyproto/mooseware), a skeleton for writing a -Negroni middleware handler. - -[Prasanga Siripala](https://github.com/pjebs) created an effective skeleton structure for web-based Go/Negroni projects: [Go-Skeleton](https://github.com/pjebs/go-skeleton) - -## Live code reload? - -[gin](https://github.com/codegangsta/gin) and -[fresh](https://github.com/pilu/fresh) both live reload negroni apps. - -## Essential Reading for Beginners of Go & Negroni - -* [Using a Context to pass information from middleware to end handler](http://elithrar.github.io/article/map-string-interface/) -* [Understanding middleware](https://mattstauffer.co/blog/laravel-5.0-middleware-filter-style) - -## About - -Negroni is obsessively designed by none other than the [Code -Gangsta](https://codegangsta.io/) - -[Gorilla Mux]: https://github.com/gorilla/mux -[`http.FileSystem`]: https://godoc.org/net/http#FileSystem diff --git a/vendor/github.com/codegangsta/negroni/doc.go b/vendor/github.com/codegangsta/negroni/doc.go deleted file mode 100644 index add1ed9f..00000000 --- a/vendor/github.com/codegangsta/negroni/doc.go +++ /dev/null @@ -1,25 +0,0 @@ -// Package negroni is an idiomatic approach to web middleware in Go. It is tiny, non-intrusive, and encourages use of net/http Handlers. -// -// If you like the idea of Martini, but you think it contains too much magic, then Negroni is a great fit. -// -// For a full guide visit http://github.com/urfave/negroni -// -// package main -// -// import ( -// "github.com/urfave/negroni" -// "net/http" -// "fmt" -// ) -// -// func main() { -// mux := http.NewServeMux() -// mux.HandleFunc("/", func(w http.ResponseWriter, req *http.Request) { -// fmt.Fprintf(w, "Welcome to the home page!") -// }) -// -// n := negroni.Classic() -// n.UseHandler(mux) -// n.Run(":3000") -// } -package negroni diff --git a/vendor/github.com/codegangsta/negroni/logger.go b/vendor/github.com/codegangsta/negroni/logger.go deleted file mode 100644 index 9c2216ba..00000000 --- a/vendor/github.com/codegangsta/negroni/logger.go +++ /dev/null @@ -1,78 +0,0 @@ -package negroni - -import ( - "bytes" - - "log" - "net/http" - "os" - "text/template" - "time" -) - -// LoggerEntry is the structure passed to the template. -type LoggerEntry struct { - StartTime string - Status int - Duration time.Duration - Hostname string - Method string - Path string - Request *http.Request -} - -// LoggerDefaultFormat is the format logged used by the default Logger instance. -var LoggerDefaultFormat = "{{.StartTime}} | {{.Status}} | \t {{.Duration}} | {{.Hostname}} | {{.Method}} {{.Path}}" - -// LoggerDefaultDateFormat is the format used for date by the default Logger instance. -var LoggerDefaultDateFormat = time.RFC3339 - -// ALogger interface -type ALogger interface { - Println(v ...interface{}) - Printf(format string, v ...interface{}) -} - -// Logger is a middleware handler that logs the request as it goes in and the response as it goes out. -type Logger struct { - // ALogger implements just enough log.Logger interface to be compatible with other implementations - ALogger - dateFormat string - template *template.Template -} - -// NewLogger returns a new Logger instance -func NewLogger() *Logger { - logger := &Logger{ALogger: log.New(os.Stdout, "[negroni] ", 0), dateFormat: LoggerDefaultDateFormat} - logger.SetFormat(LoggerDefaultFormat) - return logger -} - -func (l *Logger) SetFormat(format string) { - l.template = template.Must(template.New("negroni_parser").Parse(format)) -} - -func (l *Logger) SetDateFormat(format string) { - l.dateFormat = format -} - -func (l *Logger) ServeHTTP(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc) { - start := time.Now() - - next(rw, r) - - res := rw.(ResponseWriter) - log := LoggerEntry{ - StartTime: start.Format(l.dateFormat), - Status: res.Status(), - Duration: time.Since(start), - Hostname: r.Host, - Method: r.Method, - Path: r.URL.Path, - Request: r, - } - - buff := &bytes.Buffer{} - l.template.Execute(buff, log) - l.Println(buff.String()) -} diff --git a/vendor/github.com/codegangsta/negroni/negroni.go b/vendor/github.com/codegangsta/negroni/negroni.go deleted file mode 100644 index d1d77820..00000000 --- a/vendor/github.com/codegangsta/negroni/negroni.go +++ /dev/null @@ -1,169 +0,0 @@ -package negroni - -import ( - "log" - "net/http" - "os" -) - -const ( - // DefaultAddress is used if no other is specified. - DefaultAddress = ":8080" -) - -// Handler handler is an interface that objects can implement to be registered to serve as middleware -// in the Negroni middleware stack. -// ServeHTTP should yield to the next middleware in the chain by invoking the next http.HandlerFunc -// passed in. -// -// If the Handler writes to the ResponseWriter, the next http.HandlerFunc should not be invoked. -type Handler interface { - ServeHTTP(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc) -} - -// HandlerFunc is an adapter to allow the use of ordinary functions as Negroni handlers. -// If f is a function with the appropriate signature, HandlerFunc(f) is a Handler object that calls f. -type HandlerFunc func(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc) - -func (h HandlerFunc) ServeHTTP(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc) { - h(rw, r, next) -} - -type middleware struct { - handler Handler - next *middleware -} - -func (m middleware) ServeHTTP(rw http.ResponseWriter, r *http.Request) { - m.handler.ServeHTTP(rw, r, m.next.ServeHTTP) -} - -// Wrap converts a http.Handler into a negroni.Handler so it can be used as a Negroni -// middleware. The next http.HandlerFunc is automatically called after the Handler -// is executed. -func Wrap(handler http.Handler) Handler { - return HandlerFunc(func(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc) { - handler.ServeHTTP(rw, r) - next(rw, r) - }) -} - -// WrapFunc converts a http.HandlerFunc into a negroni.Handler so it can be used as a Negroni -// middleware. The next http.HandlerFunc is automatically called after the Handler -// is executed. -func WrapFunc(handlerFunc http.HandlerFunc) Handler { - return HandlerFunc(func(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc) { - handlerFunc(rw, r) - next(rw, r) - }) -} - -// Negroni is a stack of Middleware Handlers that can be invoked as an http.Handler. -// Negroni middleware is evaluated in the order that they are added to the stack using -// the Use and UseHandler methods. -type Negroni struct { - middleware middleware - handlers []Handler -} - -// New returns a new Negroni instance with no middleware preconfigured. -func New(handlers ...Handler) *Negroni { - return &Negroni{ - handlers: handlers, - middleware: build(handlers), - } -} - -// With returns a new Negroni instance that is a combination of the negroni -// receiver's handlers and the provided handlers. -func (n *Negroni) With(handlers ...Handler) *Negroni { - return New( - append(n.handlers, handlers...)..., - ) -} - -// Classic returns a new Negroni instance with the default middleware already -// in the stack. -// -// Recovery - Panic Recovery Middleware -// Logger - Request/Response Logging -// Static - Static File Serving -func Classic() *Negroni { - return New(NewRecovery(), NewLogger(), NewStatic(http.Dir("public"))) -} - -func (n *Negroni) ServeHTTP(rw http.ResponseWriter, r *http.Request) { - n.middleware.ServeHTTP(NewResponseWriter(rw), r) -} - -// Use adds a Handler onto the middleware stack. Handlers are invoked in the order they are added to a Negroni. -func (n *Negroni) Use(handler Handler) { - if handler == nil { - panic("handler cannot be nil") - } - - n.handlers = append(n.handlers, handler) - n.middleware = build(n.handlers) -} - -// UseFunc adds a Negroni-style handler function onto the middleware stack. -func (n *Negroni) UseFunc(handlerFunc func(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc)) { - n.Use(HandlerFunc(handlerFunc)) -} - -// UseHandler adds a http.Handler onto the middleware stack. Handlers are invoked in the order they are added to a Negroni. -func (n *Negroni) UseHandler(handler http.Handler) { - n.Use(Wrap(handler)) -} - -// UseHandlerFunc adds a http.HandlerFunc-style handler function onto the middleware stack. -func (n *Negroni) UseHandlerFunc(handlerFunc func(rw http.ResponseWriter, r *http.Request)) { - n.UseHandler(http.HandlerFunc(handlerFunc)) -} - -// Run is a convenience function that runs the negroni stack as an HTTP -// server. The addr string, if provided, takes the same format as http.ListenAndServe. -// If no address is provided but the PORT environment variable is set, the PORT value is used. -// If neither is provided, the address' value will equal the DefaultAddress constant. -func (n *Negroni) Run(addr ...string) { - l := log.New(os.Stdout, "[negroni] ", 0) - finalAddr := detectAddress(addr...) - l.Printf("listening on %s", finalAddr) - l.Fatal(http.ListenAndServe(finalAddr, n)) -} - -func detectAddress(addr ...string) string { - if len(addr) > 0 { - return addr[0] - } - if port := os.Getenv("PORT"); port != "" { - return ":" + port - } - return DefaultAddress -} - -// Returns a list of all the handlers in the current Negroni middleware chain. -func (n *Negroni) Handlers() []Handler { - return n.handlers -} - -func build(handlers []Handler) middleware { - var next middleware - - if len(handlers) == 0 { - return voidMiddleware() - } else if len(handlers) > 1 { - next = build(handlers[1:]) - } else { - next = voidMiddleware() - } - - return middleware{handlers[0], &next} -} - -func voidMiddleware() middleware { - return middleware{ - HandlerFunc(func(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc) {}), - &middleware{}, - } -} diff --git a/vendor/github.com/codegangsta/negroni/recovery.go b/vendor/github.com/codegangsta/negroni/recovery.go deleted file mode 100644 index c6fc24ef..00000000 --- a/vendor/github.com/codegangsta/negroni/recovery.go +++ /dev/null @@ -1,194 +0,0 @@ -package negroni - -import ( - "fmt" - "log" - "net/http" - "os" - "runtime" - "runtime/debug" - "text/template" -) - -const ( - panicText = "PANIC: %s\n%s" - panicHTML = ` -PANIC: {{.RecoveredPanic}} - - -

Negroni - PANIC

- -
-

{{.RequestDescription}}

- Runtime error: {{.RecoveredPanic}} -
- -{{ if .Stack }} -
-

Runtime Stack

-
{{.StackAsString}}
-
-{{ end }} - - -` - nilRequestMessage = "Request is nil" -) - -var panicHTMLTemplate = template.Must(template.New("PanicPage").Parse(panicHTML)) - -// PanicInformation contains all -// elements for printing stack informations. -type PanicInformation struct { - RecoveredPanic interface{} - Stack []byte - Request *http.Request -} - -// StackAsString returns a printable version of the stack -func (p *PanicInformation) StackAsString() string { - return string(p.Stack) -} - -// RequestDescription returns a printable description of the url -func (p *PanicInformation) RequestDescription() string { - - if p.Request == nil { - return nilRequestMessage - } - - var queryOutput string - if p.Request.URL.RawQuery != "" { - queryOutput = "?" + p.Request.URL.RawQuery - } - return fmt.Sprintf("%s %s%s", p.Request.Method, p.Request.URL.Path, queryOutput) -} - -// PanicFormatter is an interface on object can implement -// to be able to output the stack trace -type PanicFormatter interface { - // FormatPanicError output the stack for a given answer/response. - // In case the the middleware should not output the stack trace, - // the field `Stack` of the passed `PanicInformation` instance equals `[]byte{}`. - FormatPanicError(rw http.ResponseWriter, r *http.Request, infos *PanicInformation) -} - -// TextPanicFormatter output the stack -// as simple text on os.Stdout. If no `Content-Type` is set, -// it will output the data as `text/plain; charset=utf-8`. -// Otherwise, the origin `Content-Type` is kept. -type TextPanicFormatter struct{} - -func (t *TextPanicFormatter) FormatPanicError(rw http.ResponseWriter, r *http.Request, infos *PanicInformation) { - if rw.Header().Get("Content-Type") == "" { - rw.Header().Set("Content-Type", "text/plain; charset=utf-8") - } - fmt.Fprintf(rw, panicText, infos.RecoveredPanic, infos.Stack) -} - -// HTMLPanicFormatter output the stack inside -// an HTML page. This has been largely inspired by -// https://github.com/go-martini/martini/pull/156/commits. -type HTMLPanicFormatter struct{} - -func (t *HTMLPanicFormatter) FormatPanicError(rw http.ResponseWriter, r *http.Request, infos *PanicInformation) { - if rw.Header().Get("Content-Type") == "" { - rw.Header().Set("Content-Type", "text/html; charset=utf-8") - } - panicHTMLTemplate.Execute(rw, infos) -} - -// Recovery is a Negroni middleware that recovers from any panics and writes a 500 if there was one. -type Recovery struct { - Logger ALogger - PrintStack bool - PanicHandlerFunc func(*PanicInformation) - StackAll bool - StackSize int - Formatter PanicFormatter - - // Deprecated: Use PanicHandlerFunc instead to receive panic - // error with additional information (see PanicInformation) - ErrorHandlerFunc func(interface{}) -} - -// NewRecovery returns a new instance of Recovery -func NewRecovery() *Recovery { - return &Recovery{ - Logger: log.New(os.Stdout, "[negroni] ", 0), - PrintStack: true, - StackAll: false, - StackSize: 1024 * 8, - Formatter: &TextPanicFormatter{}, - } -} - -func (rec *Recovery) ServeHTTP(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc) { - defer func() { - if err := recover(); err != nil { - rw.WriteHeader(http.StatusInternalServerError) - - stack := make([]byte, rec.StackSize) - stack = stack[:runtime.Stack(stack, rec.StackAll)] - infos := &PanicInformation{RecoveredPanic: err, Request: r} - - if rec.PrintStack { - infos.Stack = stack - } - rec.Logger.Printf(panicText, err, stack) - rec.Formatter.FormatPanicError(rw, r, infos) - - if rec.ErrorHandlerFunc != nil { - func() { - defer func() { - if err := recover(); err != nil { - rec.Logger.Printf("provided ErrorHandlerFunc panic'd: %s, trace:\n%s", err, debug.Stack()) - rec.Logger.Printf("%s\n", debug.Stack()) - } - }() - rec.ErrorHandlerFunc(err) - }() - } - if rec.PanicHandlerFunc != nil { - func() { - defer func() { - if err := recover(); err != nil { - rec.Logger.Printf("provided PanicHandlerFunc panic'd: %s, trace:\n%s", err, debug.Stack()) - rec.Logger.Printf("%s\n", debug.Stack()) - } - }() - rec.PanicHandlerFunc(infos) - }() - } - } - }() - - next(rw, r) -} diff --git a/vendor/github.com/codegangsta/negroni/response_writer.go b/vendor/github.com/codegangsta/negroni/response_writer.go deleted file mode 100644 index cc507eb4..00000000 --- a/vendor/github.com/codegangsta/negroni/response_writer.go +++ /dev/null @@ -1,113 +0,0 @@ -package negroni - -import ( - "bufio" - "fmt" - "net" - "net/http" -) - -// ResponseWriter is a wrapper around http.ResponseWriter that provides extra information about -// the response. It is recommended that middleware handlers use this construct to wrap a responsewriter -// if the functionality calls for it. -type ResponseWriter interface { - http.ResponseWriter - http.Flusher - // Status returns the status code of the response or 0 if the response has - // not been written - Status() int - // Written returns whether or not the ResponseWriter has been written. - Written() bool - // Size returns the size of the response body. - Size() int - // Before allows for a function to be called before the ResponseWriter has been written to. This is - // useful for setting headers or any other operations that must happen before a response has been written. - Before(func(ResponseWriter)) -} - -type beforeFunc func(ResponseWriter) - -// NewResponseWriter creates a ResponseWriter that wraps an http.ResponseWriter -func NewResponseWriter(rw http.ResponseWriter) ResponseWriter { - nrw := &responseWriter{ - ResponseWriter: rw, - } - - if _, ok := rw.(http.CloseNotifier); ok { - return &responseWriterCloseNotifer{nrw} - } - - return nrw -} - -type responseWriter struct { - http.ResponseWriter - status int - size int - beforeFuncs []beforeFunc -} - -func (rw *responseWriter) WriteHeader(s int) { - rw.status = s - rw.callBefore() - rw.ResponseWriter.WriteHeader(s) -} - -func (rw *responseWriter) Write(b []byte) (int, error) { - if !rw.Written() { - // The status will be StatusOK if WriteHeader has not been called yet - rw.WriteHeader(http.StatusOK) - } - size, err := rw.ResponseWriter.Write(b) - rw.size += size - return size, err -} - -func (rw *responseWriter) Status() int { - return rw.status -} - -func (rw *responseWriter) Size() int { - return rw.size -} - -func (rw *responseWriter) Written() bool { - return rw.status != 0 -} - -func (rw *responseWriter) Before(before func(ResponseWriter)) { - rw.beforeFuncs = append(rw.beforeFuncs, before) -} - -func (rw *responseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) { - hijacker, ok := rw.ResponseWriter.(http.Hijacker) - if !ok { - return nil, nil, fmt.Errorf("the ResponseWriter doesn't support the Hijacker interface") - } - return hijacker.Hijack() -} - -func (rw *responseWriter) callBefore() { - for i := len(rw.beforeFuncs) - 1; i >= 0; i-- { - rw.beforeFuncs[i](rw) - } -} - -func (rw *responseWriter) Flush() { - flusher, ok := rw.ResponseWriter.(http.Flusher) - if ok { - if !rw.Written() { - // The status will be StatusOK if WriteHeader has not been called yet - rw.WriteHeader(http.StatusOK) - } - flusher.Flush() - } -} - -type responseWriterCloseNotifer struct { - *responseWriter -} - -func (rw *responseWriterCloseNotifer) CloseNotify() <-chan bool { - return rw.ResponseWriter.(http.CloseNotifier).CloseNotify() -} diff --git a/vendor/github.com/codegangsta/negroni/response_writer_pusher.go b/vendor/github.com/codegangsta/negroni/response_writer_pusher.go deleted file mode 100644 index 213cb35f..00000000 --- a/vendor/github.com/codegangsta/negroni/response_writer_pusher.go +++ /dev/null @@ -1,16 +0,0 @@ -//+build go1.8 - -package negroni - -import ( - "fmt" - "net/http" -) - -func (rw *responseWriter) Push(target string, opts *http.PushOptions) error { - pusher, ok := rw.ResponseWriter.(http.Pusher) - if ok { - return pusher.Push(target, opts) - } - return fmt.Errorf("the ResponseWriter doesn't support the Pusher interface") -} diff --git a/vendor/github.com/codegangsta/negroni/static.go b/vendor/github.com/codegangsta/negroni/static.go deleted file mode 100644 index 34be967c..00000000 --- a/vendor/github.com/codegangsta/negroni/static.go +++ /dev/null @@ -1,88 +0,0 @@ -package negroni - -import ( - "net/http" - "path" - "strings" -) - -// Static is a middleware handler that serves static files in the given -// directory/filesystem. If the file does not exist on the filesystem, it -// passes along to the next middleware in the chain. If you desire "fileserver" -// type behavior where it returns a 404 for unfound files, you should consider -// using http.FileServer from the Go stdlib. -type Static struct { - // Dir is the directory to serve static files from - Dir http.FileSystem - // Prefix is the optional prefix used to serve the static directory content - Prefix string - // IndexFile defines which file to serve as index if it exists. - IndexFile string -} - -// NewStatic returns a new instance of Static -func NewStatic(directory http.FileSystem) *Static { - return &Static{ - Dir: directory, - Prefix: "", - IndexFile: "index.html", - } -} - -func (s *Static) ServeHTTP(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc) { - if r.Method != "GET" && r.Method != "HEAD" { - next(rw, r) - return - } - file := r.URL.Path - // if we have a prefix, filter requests by stripping the prefix - if s.Prefix != "" { - if !strings.HasPrefix(file, s.Prefix) { - next(rw, r) - return - } - file = file[len(s.Prefix):] - if file != "" && file[0] != '/' { - next(rw, r) - return - } - } - f, err := s.Dir.Open(file) - if err != nil { - // discard the error? - next(rw, r) - return - } - defer f.Close() - - fi, err := f.Stat() - if err != nil { - next(rw, r) - return - } - - // try to serve index file - if fi.IsDir() { - // redirect if missing trailing slash - if !strings.HasSuffix(r.URL.Path, "/") { - http.Redirect(rw, r, r.URL.Path+"/", http.StatusFound) - return - } - - file = path.Join(file, s.IndexFile) - f, err = s.Dir.Open(file) - if err != nil { - next(rw, r) - return - } - defer f.Close() - - fi, err = f.Stat() - if err != nil || fi.IsDir() { - next(rw, r) - return - } - } - - http.ServeContent(rw, r, file, fi.ModTime(), f) -} diff --git a/vendor/github.com/dgryski/go-tsz/.gitignore b/vendor/github.com/dgryski/go-tsz/.gitignore deleted file mode 100644 index 28bb1653..00000000 --- a/vendor/github.com/dgryski/go-tsz/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -eval/eval -/target -*.test diff --git a/vendor/github.com/dgryski/go-tsz/.travis.yml b/vendor/github.com/dgryski/go-tsz/.travis.yml deleted file mode 100644 index 10c8a832..00000000 --- a/vendor/github.com/dgryski/go-tsz/.travis.yml +++ /dev/null @@ -1,38 +0,0 @@ -language: go - -sudo: false - -branches: - except: - - release - -branches: - only: - - master - - develop - - travis - -go: - - 1.9 - - tip - -matrix: - allow_failures: - - go: tip - -before_install: - - if [ -n "$GH_USER" ]; then git config --global github.user ${GH_USER}; fi; - - if [ -n "$GH_TOKEN" ]; then git config --global github.token ${GH_TOKEN}; fi; - - go get github.com/mattn/goveralls - -before_script: - - make deps - -script: - - make qa - -after_failure: - - cat ./target/test/report.xml - -after_success: - - if [ "$TRAVIS_GO_VERSION" = "1.9" ]; then $HOME/gopath/bin/goveralls -covermode=count -coverprofile=target/report/coverage.out -service=travis-ci; fi; diff --git a/vendor/github.com/dgryski/go-tsz/LICENSE b/vendor/github.com/dgryski/go-tsz/LICENSE deleted file mode 100644 index 97b198a4..00000000 --- a/vendor/github.com/dgryski/go-tsz/LICENSE +++ /dev/null @@ -1,23 +0,0 @@ -Copyright (c) 2015,2016 Damian Gryski -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, -this list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above copyright notice, -this list of conditions and the following disclaimer in the documentation -and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/dgryski/go-tsz/Makefile b/vendor/github.com/dgryski/go-tsz/Makefile deleted file mode 100644 index 230f9a75..00000000 --- a/vendor/github.com/dgryski/go-tsz/Makefile +++ /dev/null @@ -1,203 +0,0 @@ -# MAKEFILE -# -# @author Nicola Asuni -# @link https://github.com/dgryski/go-tsz -# -# This file is intended to be executed in a Linux-compatible system. -# It also assumes that the project has been cloned in the right path under GOPATH: -# $GOPATH/src/github.com/dgryski/go-tsz -# -# ------------------------------------------------------------------------------ - -# List special make targets that are not associated with files -.PHONY: help all test format fmtcheck vet lint coverage cyclo ineffassign misspell structcheck varcheck errcheck gosimple astscan qa deps clean nuke - -# Use bash as shell (Note: Ubuntu now uses dash which doesn't support PIPESTATUS). -SHELL=/bin/bash - -# CVS path (path to the parent dir containing the project) -CVSPATH=github.com/dgryski - -# Project owner -OWNER=dgryski - -# Project vendor -VENDOR=dgryski - -# Project name -PROJECT=go-tsz - -# Project version -VERSION=$(shell cat VERSION) - -# Name of RPM or DEB package -PKGNAME=${VENDOR}-${PROJECT} - -# Current directory -CURRENTDIR=$(shell pwd) - -# GO lang path -ifneq ($(GOPATH),) - ifeq ($(findstring $(GOPATH),$(CURRENTDIR)),) - # the defined GOPATH is not valid - GOPATH= - endif -endif -ifeq ($(GOPATH),) - # extract the GOPATH - GOPATH=$(firstword $(subst /src/, ,$(CURRENTDIR))) -endif - -# --- MAKE TARGETS --- - -# Display general help about this command -help: - @echo "" - @echo "$(PROJECT) Makefile." - @echo "GOPATH=$(GOPATH)" - @echo "The following commands are available:" - @echo "" - @echo " make qa : Run all the tests" - @echo " make test : Run the unit tests" - @echo "" - @echo " make format : Format the source code" - @echo " make fmtcheck : Check if the source code has been formatted" - @echo " make vet : Check for suspicious constructs" - @echo " make lint : Check for style errors" - @echo " make coverage : Generate the coverage report" - @echo " make cyclo : Generate the cyclomatic complexity report" - @echo " make ineffassign : Detect ineffectual assignments" - @echo " make misspell : Detect commonly misspelled words in source files" - @echo " make structcheck : Find unused struct fields" - @echo " make varcheck : Find unused global variables and constants" - @echo " make errcheck : Check that error return values are used" - @echo " make gosimple : Suggest code simplifications" - @echo " make astscan : GO AST scanner" - @echo "" - @echo " make docs : Generate source code documentation" - @echo "" - @echo " make deps : Get the dependencies" - @echo " make clean : Remove any build artifact" - @echo " make nuke : Deletes any intermediate file" - @echo "" - - -# Alias for help target -all: help - -# Run the unit tests -test: - @mkdir -p target/test - @mkdir -p target/report - GOPATH=$(GOPATH) \ - go test \ - -covermode=atomic \ - -bench=. \ - -race \ - -cpuprofile=target/report/cpu.out \ - -memprofile=target/report/mem.out \ - -mutexprofile=target/report/mutex.out \ - -coverprofile=target/report/coverage.out \ - -v . | \ - tee >(PATH=$(GOPATH)/bin:$(PATH) go-junit-report > target/test/report.xml); \ - test $${PIPESTATUS[0]} -eq 0 - -# Format the source code -format: - @find . -type f -name "*.go" -exec gofmt -s -w {} \; - -# Check if the source code has been formatted -fmtcheck: - @mkdir -p target - @find . -type f -name "*.go" -exec gofmt -s -d {} \; | tee target/format.diff - @test ! -s target/format.diff || { echo "ERROR: the source code has not been formatted - please use 'make format' or 'gofmt'"; exit 1; } - -# Check for syntax errors -vet: - GOPATH=$(GOPATH) go vet . - -# Check for style errors -lint: - GOPATH=$(GOPATH) PATH=$(GOPATH)/bin:$(PATH) golint . - -# Generate the coverage report -coverage: - @mkdir -p target/report - GOPATH=$(GOPATH) \ - go tool cover -html=target/report/coverage.out -o target/report/coverage.html - -# Report cyclomatic complexity -cyclo: - @mkdir -p target/report - GOPATH=$(GOPATH) gocyclo -avg ./ | tee target/report/cyclo.txt ; test $${PIPESTATUS[0]} -eq 0 - -# Detect ineffectual assignments -ineffassign: - @mkdir -p target/report - GOPATH=$(GOPATH) ineffassign ./ | tee target/report/ineffassign.txt ; test $${PIPESTATUS[0]} -eq 0 - -# Detect commonly misspelled words in source files -misspell: - @mkdir -p target/report - GOPATH=$(GOPATH) misspell -error ./ | tee target/report/misspell.txt ; test $${PIPESTATUS[0]} -eq 0 - -# Find unused struct fields -structcheck: - @mkdir -p target/report - GOPATH=$(GOPATH) structcheck -a ./ | tee target/report/structcheck.txt - -# Find unused global variables and constants -varcheck: - @mkdir -p target/report - GOPATH=$(GOPATH) varcheck -e ./ | tee target/report/varcheck.txt - -# Check that error return values are used -errcheck: - @mkdir -p target/report - GOPATH=$(GOPATH) errcheck ./ | tee target/report/errcheck.txt - -# Suggest code simplifications -gosimple: - @mkdir -p target/report - GOPATH=$(GOPATH) gosimple ./ | tee target/report/gosimple.txt - -# AST scanner -astscan: - @mkdir -p target/report - GOPATH=$(GOPATH) gas .//*.go | tee target/report/astscan.txt - -# Generate source docs -docs: - @mkdir -p target/docs - nohup sh -c 'GOPATH=$(GOPATH) godoc -http=127.0.0.1:6060' > target/godoc_server.log 2>&1 & - wget --directory-prefix=target/docs/ --execute robots=off --retry-connrefused --recursive --no-parent --adjust-extension --page-requisites --convert-links http://127.0.0.1:6060/pkg/github.com/${VENDOR}/${PROJECT}/ ; kill -9 `lsof -ti :6060` - @echo ''${PKGNAME}' Documentation ...' > target/docs/index.html - -# Alias to run all quality-assurance checks -qa: fmtcheck test vet lint coverage cyclo ineffassign misspell structcheck varcheck errcheck gosimple astscan - -# --- INSTALL --- - -# Get the dependencies -deps: - GOPATH=$(GOPATH) go get ./... - GOPATH=$(GOPATH) go get github.com/golang/lint/golint - GOPATH=$(GOPATH) go get github.com/jstemmer/go-junit-report - GOPATH=$(GOPATH) go get github.com/axw/gocov/gocov - GOPATH=$(GOPATH) go get github.com/fzipp/gocyclo - GOPATH=$(GOPATH) go get github.com/gordonklaus/ineffassign - GOPATH=$(GOPATH) go get github.com/client9/misspell/cmd/misspell - GOPATH=$(GOPATH) go get github.com/opennota/check/cmd/structcheck - GOPATH=$(GOPATH) go get github.com/opennota/check/cmd/varcheck - GOPATH=$(GOPATH) go get github.com/kisielk/errcheck - GOPATH=$(GOPATH) go get honnef.co/go/tools/cmd/gosimple - GOPATH=$(GOPATH) go get github.com/GoASTScanner/gas - -# Remove any build artifact -clean: - GOPATH=$(GOPATH) go clean ./... - -# Deletes any intermediate file -nuke: - rm -rf ./target - GOPATH=$(GOPATH) go clean -i ./... diff --git a/vendor/github.com/dgryski/go-tsz/README.md b/vendor/github.com/dgryski/go-tsz/README.md deleted file mode 100644 index 88cc72ec..00000000 --- a/vendor/github.com/dgryski/go-tsz/README.md +++ /dev/null @@ -1,32 +0,0 @@ -# go-tsz - -* Package tsz implement time-series compression http://www.vldb.org/pvldb/vol8/p1816-teller.pdf in Go* - -[![Master Branch](https://img.shields.io/badge/branch-master-lightgray.svg)](https://github.com/dgryski/go-tsz/tree/master) -[![Master Build Status](https://secure.travis-ci.org/dgryski/go-tsz.svg?branch=master)](https://travis-ci.org/dgryski/go-tsz?branch=master) -[![Master Coverage Status](https://coveralls.io/repos/dgryski/go-tsz/badge.svg?branch=master&service=github)](https://coveralls.io/github/dgryski/go-tsz?branch=master) -[![Go Report Card](https://goreportcard.com/badge/github.com/dgryski/go-tsz)](https://goreportcard.com/report/github.com/dgryski/go-tsz) -[![GoDoc](https://godoc.org/github.com/dgryski/go-tsz?status.svg)](http://godoc.org/github.com/dgryski/go-tsz) - -## Description - -Package tsz implement the Gorilla Time Series Databasetime-series compression as described in: -http://www.vldb.org/pvldb/vol8/p1816-teller.pdf - - -## Getting started - -This application is written in Go language, please refer to the guides in https://golang.org for getting started. - -This project include a Makefile that allows you to test and build the project with simple commands. -To see all available options: -```bash -make help -``` - -## Running all tests - -Before committing the code, please check if it passes all tests using -```bash -make qa -``` diff --git a/vendor/github.com/dgryski/go-tsz/VERSION b/vendor/github.com/dgryski/go-tsz/VERSION deleted file mode 100644 index 3eefcb9d..00000000 --- a/vendor/github.com/dgryski/go-tsz/VERSION +++ /dev/null @@ -1 +0,0 @@ -1.0.0 diff --git a/vendor/github.com/dgryski/go-tsz/bstream.go b/vendor/github.com/dgryski/go-tsz/bstream.go deleted file mode 100644 index 5f754cb1..00000000 --- a/vendor/github.com/dgryski/go-tsz/bstream.go +++ /dev/null @@ -1,205 +0,0 @@ -package tsz - -import ( - "bytes" - "encoding/binary" - "io" -) - -// bstream is a stream of bits -type bstream struct { - // the data stream - stream []byte - - // how many bits are valid in current byte - count uint8 -} - -func newBReader(b []byte) *bstream { - return &bstream{stream: b, count: 8} -} - -func newBWriter(size int) *bstream { - return &bstream{stream: make([]byte, 0, size), count: 0} -} - -func (b *bstream) clone() *bstream { - d := make([]byte, len(b.stream)) - copy(d, b.stream) - return &bstream{stream: d, count: b.count} -} - -func (b *bstream) bytes() []byte { - return b.stream -} - -type bit bool - -const ( - zero bit = false - one bit = true -) - -func (b *bstream) writeBit(bit bit) { - - if b.count == 0 { - b.stream = append(b.stream, 0) - b.count = 8 - } - - i := len(b.stream) - 1 - - if bit { - b.stream[i] |= 1 << (b.count - 1) - } - - b.count-- -} - -func (b *bstream) writeByte(byt byte) { - - if b.count == 0 { - b.stream = append(b.stream, 0) - b.count = 8 - } - - i := len(b.stream) - 1 - - // fill up b.b with b.count bits from byt - b.stream[i] |= byt >> (8 - b.count) - - b.stream = append(b.stream, 0) - i++ - b.stream[i] = byt << b.count -} - -func (b *bstream) writeBits(u uint64, nbits int) { - u <<= (64 - uint(nbits)) - for nbits >= 8 { - byt := byte(u >> 56) - b.writeByte(byt) - u <<= 8 - nbits -= 8 - } - - for nbits > 0 { - b.writeBit((u >> 63) == 1) - u <<= 1 - nbits-- - } -} - -func (b *bstream) readBit() (bit, error) { - - if len(b.stream) == 0 { - return false, io.EOF - } - - if b.count == 0 { - b.stream = b.stream[1:] - // did we just run out of stuff to read? - if len(b.stream) == 0 { - return false, io.EOF - } - b.count = 8 - } - - b.count-- - d := b.stream[0] & 0x80 - b.stream[0] <<= 1 - return d != 0, nil -} - -func (b *bstream) readByte() (byte, error) { - - if len(b.stream) == 0 { - return 0, io.EOF - } - - if b.count == 0 { - b.stream = b.stream[1:] - - if len(b.stream) == 0 { - return 0, io.EOF - } - - b.count = 8 - } - - if b.count == 8 { - b.count = 0 - return b.stream[0], nil - } - - byt := b.stream[0] - b.stream = b.stream[1:] - - if len(b.stream) == 0 { - return 0, io.EOF - } - - byt |= b.stream[0] >> b.count - b.stream[0] <<= (8 - b.count) - - return byt, nil -} - -func (b *bstream) readBits(nbits int) (uint64, error) { - - var u uint64 - - for nbits >= 8 { - byt, err := b.readByte() - if err != nil { - return 0, err - } - - u = (u << 8) | uint64(byt) - nbits -= 8 - } - - if nbits == 0 { - return u, nil - } - - if nbits > int(b.count) { - u = (u << uint(b.count)) | uint64(b.stream[0]>>(8-b.count)) - nbits -= int(b.count) - b.stream = b.stream[1:] - - if len(b.stream) == 0 { - return 0, io.EOF - } - b.count = 8 - } - - u = (u << uint(nbits)) | uint64(b.stream[0]>>(8-uint(nbits))) - b.stream[0] <<= uint(nbits) - b.count -= uint8(nbits) - return u, nil -} - -// MarshalBinary implements the encoding.BinaryMarshaler interface -func (b *bstream) MarshalBinary() ([]byte, error) { - buf := new(bytes.Buffer) - err := binary.Write(buf, binary.BigEndian, b.count) - if err != nil { - return nil, err - } - err = binary.Write(buf, binary.BigEndian, b.stream) - if err != nil { - return nil, err - } - return buf.Bytes(), nil -} - -// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface -func (b *bstream) UnmarshalBinary(bIn []byte) error { - buf := bytes.NewReader(bIn) - err := binary.Read(buf, binary.BigEndian, &b.count) - if err != nil { - return err - } - b.stream = make([]byte, buf.Len()) - return binary.Read(buf, binary.BigEndian, &b.stream) -} diff --git a/vendor/github.com/dgryski/go-tsz/fuzz.go b/vendor/github.com/dgryski/go-tsz/fuzz.go deleted file mode 100644 index ffcf7e9e..00000000 --- a/vendor/github.com/dgryski/go-tsz/fuzz.go +++ /dev/null @@ -1,69 +0,0 @@ -// +build gofuzz - -package tsz - -import ( - "encoding/binary" - "fmt" - "math" - - "github.com/dgryski/go-tsz/testdata" -) - -func Fuzz(data []byte) int { - - fuzzUnpack(data) - - if len(data) < 9 { - return 0 - } - - t0 := uint32(1456236677) - - v := float64(10000) - - var vals []testdata.Point - s := New(t0) - t := t0 - for len(data) >= 10 { - tdelta := uint32(binary.LittleEndian.Uint16(data)) - if t == t0 { - tdelta &= (1 << 14) - 1 - } - t += tdelta - data = data[2:] - v += float64(int16(binary.LittleEndian.Uint16(data))) + float64(binary.LittleEndian.Uint16(data[2:]))/float64(math.MaxUint16) - data = data[8:] - vals = append(vals, testdata.Point{V: v, T: t}) - s.Push(t, v) - } - - it := s.Iter() - - var i int - for it.Next() { - gt, gv := it.Values() - if gt != vals[i].T || (gv != vals[i].V || math.IsNaN(gv) && math.IsNaN(vals[i].V)) { - panic(fmt.Sprintf("failure: gt=%v vals[i].T=%v gv=%v vals[i].V=%v", gt, vals[i].T, gv, vals[i].V)) - } - i++ - } - - if i != len(vals) { - panic("extra data") - } - - return 1 -} - -func fuzzUnpack(data []byte) { - - it, err := NewIterator(data) - if err != nil { - return - } - - for it.Next() { - _, _ = it.Values() - } -} diff --git a/vendor/github.com/dgryski/go-tsz/testdata/data.go b/vendor/github.com/dgryski/go-tsz/testdata/data.go deleted file mode 100644 index f6682c1e..00000000 --- a/vendor/github.com/dgryski/go-tsz/testdata/data.go +++ /dev/null @@ -1,34 +0,0 @@ -package testdata - -type Point struct { - V float64 - T uint32 -} - -// 120 points every 60s -var TwoHoursData = []Point{ - {761, 1440583200}, {727, 1440583260}, {765, 1440583320}, {706, 1440583380}, {700, 1440583440}, - {679, 1440583500}, {757, 1440583560}, {708, 1440583620}, {739, 1440583680}, {707, 1440583740}, - {699, 1440583800}, {740, 1440583860}, {729, 1440583920}, {766, 1440583980}, {730, 1440584040}, - {715, 1440584100}, {705, 1440584160}, {693, 1440584220}, {765, 1440584280}, {724, 1440584340}, - {799, 1440584400}, {761, 1440584460}, {737, 1440584520}, {766, 1440584580}, {756, 1440584640}, - {719, 1440584700}, {722, 1440584760}, {801, 1440584820}, {747, 1440584880}, {731, 1440584940}, - {742, 1440585000}, {744, 1440585060}, {791, 1440585120}, {750, 1440585180}, {759, 1440585240}, - {809, 1440585300}, {751, 1440585360}, {705, 1440585420}, {770, 1440585480}, {792, 1440585540}, - {727, 1440585600}, {762, 1440585660}, {772, 1440585720}, {721, 1440585780}, {748, 1440585840}, - {753, 1440585900}, {744, 1440585960}, {716, 1440586020}, {776, 1440586080}, {659, 1440586140}, - {789, 1440586200}, {766, 1440586260}, {758, 1440586320}, {690, 1440586380}, {795, 1440586440}, - {770, 1440586500}, {758, 1440586560}, {723, 1440586620}, {767, 1440586680}, {765, 1440586740}, - {693, 1440586800}, {706, 1440586860}, {681, 1440586920}, {727, 1440586980}, {724, 1440587040}, - {780, 1440587100}, {678, 1440587160}, {696, 1440587220}, {758, 1440587280}, {740, 1440587340}, - {735, 1440587400}, {700, 1440587460}, {742, 1440587520}, {747, 1440587580}, {752, 1440587640}, - {734, 1440587700}, {743, 1440587760}, {732, 1440587820}, {746, 1440587880}, {770, 1440587940}, - {780, 1440588000}, {710, 1440588060}, {731, 1440588120}, {712, 1440588180}, {712, 1440588240}, - {741, 1440588300}, {770, 1440588360}, {770, 1440588420}, {754, 1440588480}, {718, 1440588540}, - {670, 1440588600}, {775, 1440588660}, {749, 1440588720}, {795, 1440588780}, {756, 1440588840}, - {741, 1440588900}, {787, 1440588960}, {721, 1440589020}, {745, 1440589080}, {782, 1440589140}, - {765, 1440589200}, {780, 1440589260}, {811, 1440589320}, {790, 1440589380}, {836, 1440589440}, - {743, 1440589500}, {858, 1440589560}, {739, 1440589620}, {762, 1440589680}, {770, 1440589740}, - {752, 1440589800}, {763, 1440589860}, {795, 1440589920}, {792, 1440589980}, {746, 1440590040}, - {786, 1440590100}, {785, 1440590160}, {774, 1440590220}, {786, 1440590280}, {718, 1440590340}, -} diff --git a/vendor/github.com/dgryski/go-tsz/tsz.go b/vendor/github.com/dgryski/go-tsz/tsz.go deleted file mode 100644 index 3b9d9a0e..00000000 --- a/vendor/github.com/dgryski/go-tsz/tsz.go +++ /dev/null @@ -1,408 +0,0 @@ -// Package tsz implement time-series compression -/* - -http://www.vldb.org/pvldb/vol8/p1816-teller.pdf - -*/ -package tsz - -import ( - "bytes" - "encoding/binary" - "io" - "math" - "math/bits" - "sync" -) - -// Series is the basic series primitive -// you can concurrently put values, finish the stream, and create iterators -type Series struct { - sync.Mutex - - // TODO(dgryski): timestamps in the paper are uint64 - T0 uint32 - t uint32 - val float64 - - bw bstream - leading uint8 - trailing uint8 - finished bool - - tDelta uint32 -} - -// New series -func New(t0 uint32) *Series { - s := Series{ - T0: t0, - leading: ^uint8(0), - } - - // block header - s.bw.writeBits(uint64(t0), 32) - - return &s - -} - -// Bytes value of the series stream -func (s *Series) Bytes() []byte { - s.Lock() - defer s.Unlock() - return s.bw.bytes() -} - -func finish(w *bstream) { - // write an end-of-stream record - w.writeBits(0x0f, 4) - w.writeBits(0xffffffff, 32) - w.writeBit(zero) -} - -// Finish the series by writing an end-of-stream record -func (s *Series) Finish() { - s.Lock() - if !s.finished { - finish(&s.bw) - s.finished = true - } - s.Unlock() -} - -// Push a timestamp and value to the series -func (s *Series) Push(t uint32, v float64) { - s.Lock() - defer s.Unlock() - - if s.t == 0 { - // first point - s.t = t - s.val = v - s.tDelta = t - s.T0 - s.bw.writeBits(uint64(s.tDelta), 14) - s.bw.writeBits(math.Float64bits(v), 64) - return - } - - tDelta := t - s.t - dod := int32(tDelta - s.tDelta) - - switch { - case dod == 0: - s.bw.writeBit(zero) - case -63 <= dod && dod <= 64: - s.bw.writeBits(0x02, 2) // '10' - s.bw.writeBits(uint64(dod), 7) - case -255 <= dod && dod <= 256: - s.bw.writeBits(0x06, 3) // '110' - s.bw.writeBits(uint64(dod), 9) - case -2047 <= dod && dod <= 2048: - s.bw.writeBits(0x0e, 4) // '1110' - s.bw.writeBits(uint64(dod), 12) - default: - s.bw.writeBits(0x0f, 4) // '1111' - s.bw.writeBits(uint64(dod), 32) - } - - vDelta := math.Float64bits(v) ^ math.Float64bits(s.val) - - if vDelta == 0 { - s.bw.writeBit(zero) - } else { - s.bw.writeBit(one) - - leading := uint8(bits.LeadingZeros64(vDelta)) - trailing := uint8(bits.TrailingZeros64(vDelta)) - - // clamp number of leading zeros to avoid overflow when encoding - if leading >= 32 { - leading = 31 - } - - // TODO(dgryski): check if it's 'cheaper' to reset the leading/trailing bits instead - if s.leading != ^uint8(0) && leading >= s.leading && trailing >= s.trailing { - s.bw.writeBit(zero) - s.bw.writeBits(vDelta>>s.trailing, 64-int(s.leading)-int(s.trailing)) - } else { - s.leading, s.trailing = leading, trailing - - s.bw.writeBit(one) - s.bw.writeBits(uint64(leading), 5) - - // Note that if leading == trailing == 0, then sigbits == 64. But that value doesn't actually fit into the 6 bits we have. - // Luckily, we never need to encode 0 significant bits, since that would put us in the other case (vdelta == 0). - // So instead we write out a 0 and adjust it back to 64 on unpacking. - sigbits := 64 - leading - trailing - s.bw.writeBits(uint64(sigbits), 6) - s.bw.writeBits(vDelta>>trailing, int(sigbits)) - } - } - - s.tDelta = tDelta - s.t = t - s.val = v - -} - -// Iter lets you iterate over a series. It is not concurrency-safe. -func (s *Series) Iter() *Iter { - s.Lock() - w := s.bw.clone() - s.Unlock() - - finish(w) - iter, _ := bstreamIterator(w) - return iter -} - -// Iter lets you iterate over a series. It is not concurrency-safe. -type Iter struct { - T0 uint32 - - t uint32 - val float64 - - br bstream - leading uint8 - trailing uint8 - - finished bool - - tDelta uint32 - err error -} - -func bstreamIterator(br *bstream) (*Iter, error) { - - br.count = 8 - - t0, err := br.readBits(32) - if err != nil { - return nil, err - } - - return &Iter{ - T0: uint32(t0), - br: *br, - }, nil -} - -// NewIterator for the series -func NewIterator(b []byte) (*Iter, error) { - return bstreamIterator(newBReader(b)) -} - -// Next iteration of the series iterator -func (it *Iter) Next() bool { - - if it.err != nil || it.finished { - return false - } - - if it.t == 0 { - // read first t and v - tDelta, err := it.br.readBits(14) - if err != nil { - it.err = err - return false - } - it.tDelta = uint32(tDelta) - it.t = it.T0 + it.tDelta - v, err := it.br.readBits(64) - if err != nil { - it.err = err - return false - } - - it.val = math.Float64frombits(v) - - return true - } - - // read delta-of-delta - var d byte - for i := 0; i < 4; i++ { - d <<= 1 - bit, err := it.br.readBit() - if err != nil { - it.err = err - return false - } - if bit == zero { - break - } - d |= 1 - } - - var dod int32 - var sz uint - switch d { - case 0x00: - // dod == 0 - case 0x02: - sz = 7 - case 0x06: - sz = 9 - case 0x0e: - sz = 12 - case 0x0f: - bits, err := it.br.readBits(32) - if err != nil { - it.err = err - return false - } - - // end of stream - if bits == 0xffffffff { - it.finished = true - return false - } - - dod = int32(bits) - } - - if sz != 0 { - bits, err := it.br.readBits(int(sz)) - if err != nil { - it.err = err - return false - } - if bits > (1 << (sz - 1)) { - // or something - bits = bits - (1 << sz) - } - dod = int32(bits) - } - - tDelta := it.tDelta + uint32(dod) - - it.tDelta = tDelta - it.t = it.t + it.tDelta - - // read compressed value - bit, err := it.br.readBit() - if err != nil { - it.err = err - return false - } - - if bit == zero { - // it.val = it.val - } else { - bit, itErr := it.br.readBit() - if itErr != nil { - it.err = err - return false - } - if bit == zero { - // reuse leading/trailing zero bits - // it.leading, it.trailing = it.leading, it.trailing - } else { - bits, err := it.br.readBits(5) - if err != nil { - it.err = err - return false - } - it.leading = uint8(bits) - - bits, err = it.br.readBits(6) - if err != nil { - it.err = err - return false - } - mbits := uint8(bits) - // 0 significant bits here means we overflowed and we actually need 64; see comment in encoder - if mbits == 0 { - mbits = 64 - } - it.trailing = 64 - it.leading - mbits - } - - mbits := int(64 - it.leading - it.trailing) - bits, err := it.br.readBits(mbits) - if err != nil { - it.err = err - return false - } - vbits := math.Float64bits(it.val) - vbits ^= (bits << it.trailing) - it.val = math.Float64frombits(vbits) - } - - return true -} - -// Values at the current iterator position -func (it *Iter) Values() (uint32, float64) { - return it.t, it.val -} - -// Err error at the current iterator position -func (it *Iter) Err() error { - return it.err -} - -type errMarshal struct { - w io.Writer - r io.Reader - err error -} - -func (em *errMarshal) write(t interface{}) { - if em.err != nil { - return - } - em.err = binary.Write(em.w, binary.BigEndian, t) -} - -func (em *errMarshal) read(t interface{}) { - if em.err != nil { - return - } - em.err = binary.Read(em.r, binary.BigEndian, t) -} - -// MarshalBinary implements the encoding.BinaryMarshaler interface -func (s *Series) MarshalBinary() ([]byte, error) { - buf := new(bytes.Buffer) - em := &errMarshal{w: buf} - em.write(s.T0) - em.write(s.leading) - em.write(s.t) - em.write(s.tDelta) - em.write(s.trailing) - em.write(s.val) - bStream, err := s.bw.MarshalBinary() - if err != nil { - return nil, err - } - em.write(bStream) - if em.err != nil { - return nil, em.err - } - return buf.Bytes(), nil -} - -// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface -func (s *Series) UnmarshalBinary(b []byte) error { - buf := bytes.NewReader(b) - em := &errMarshal{r: buf} - em.read(&s.T0) - em.read(&s.leading) - em.read(&s.t) - em.read(&s.tDelta) - em.read(&s.trailing) - em.read(&s.val) - outBuf := make([]byte, buf.Len()) - em.read(outBuf) - err := s.bw.UnmarshalBinary(outBuf) - if err != nil { - return err - } - if em.err != nil { - return em.err - } - return nil -} diff --git a/vendor/github.com/unrolled/render/.gitignore b/vendor/github.com/freedomkk-qfeng/go-fastping/.gitignore similarity index 91% rename from vendor/github.com/unrolled/render/.gitignore rename to vendor/github.com/freedomkk-qfeng/go-fastping/.gitignore index 05f4eaf6..494eb387 100644 --- a/vendor/github.com/unrolled/render/.gitignore +++ b/vendor/github.com/freedomkk-qfeng/go-fastping/.gitignore @@ -20,8 +20,8 @@ _cgo_export.* _testmain.go *.exe -*.test +cover.out -*.pem -.DS_Store +ping +!ping/ diff --git a/vendor/github.com/unrolled/render/LICENSE b/vendor/github.com/freedomkk-qfeng/go-fastping/LICENSE similarity index 96% rename from vendor/github.com/unrolled/render/LICENSE rename to vendor/github.com/freedomkk-qfeng/go-fastping/LICENSE index 9c62063e..abb8deee 100644 --- a/vendor/github.com/unrolled/render/LICENSE +++ b/vendor/github.com/freedomkk-qfeng/go-fastping/LICENSE @@ -1,6 +1,6 @@ The MIT License (MIT) -Copyright (c) 2014 Cory Jacobsen +Copyright (c) 2013 Tatsushi Demachi Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in diff --git a/vendor/github.com/freedomkk-qfeng/go-fastping/README.md b/vendor/github.com/freedomkk-qfeng/go-fastping/README.md new file mode 100644 index 00000000..d77a2619 --- /dev/null +++ b/vendor/github.com/freedomkk-qfeng/go-fastping/README.md @@ -0,0 +1,54 @@ +go-fastping +=========== + +go-fastping is a Go language ICMP ping library, inspired by the `AnyEvent::FastPing` +Perl module, for quickly sending ICMP ECHO REQUEST packets. Original Perl module +is available at http://search.cpan.org/~mlehmann/AnyEvent-FastPing-2.01/ + +All original functions haven't been implemented yet. + +[![GoDoc](https://godoc.org/github.com/tatsushid/go-fastping?status.svg)](https://godoc.org/github.com/tatsushid/go-fastping) + +## Installation + +Install and update with `go get -u github.com/tatsushid/go-fastping` + +## Examples + +Import this package and write + +```go +p := fastping.NewPinger() +ra, err := net.ResolveIPAddr("ip4:icmp", os.Args[1]) +if err != nil { + fmt.Println(err) + os.Exit(1) +} +p.AddIPAddr(ra) +p.OnRecv = func(addr *net.IPAddr, rtt time.Duration) { + fmt.Printf("IP Addr: %s receive, RTT: %v\n", addr.String(), rtt) +} +p.OnIdle = func() { + fmt.Println("finish") +} +err = p.Run() +if err != nil { + fmt.Println(err) +} +``` + +The example sends an ICMP packet and waits for a response. If it receives a +response, it calls the "receive" callback. After that, once MaxRTT time has +passed, it calls the "idle" callback. For more details, +refer [to the godoc][godoc], and if you need more examples, +please see "cmd/ping/ping.go". + +## Caution +This package implements ICMP ping using both raw socket and UDP. If your program +uses this package in raw socket mode, it needs to be run as a root user. + +## License +go-fastping is under MIT License. See the [LICENSE][license] file for details. + +[godoc]: http://godoc.org/github.com/tatsushid/go-fastping +[license]: https://github.com/tatsushid/go-fastping/blob/master/LICENSE diff --git a/vendor/github.com/freedomkk-qfeng/go-fastping/fastping.go b/vendor/github.com/freedomkk-qfeng/go-fastping/fastping.go new file mode 100644 index 00000000..96950ca0 --- /dev/null +++ b/vendor/github.com/freedomkk-qfeng/go-fastping/fastping.go @@ -0,0 +1,685 @@ +// Package fastping is an ICMP ping library inspired by AnyEvent::FastPing Perl +// module to send ICMP ECHO REQUEST packets quickly. Original Perl module is +// available at +// http://search.cpan.org/~mlehmann/AnyEvent-FastPing-2.01/ +// +// It hasn't been fully implemented original functions yet. +// +// Here is an example: +// +// p := fastping.NewPinger() +// ra, err := net.ResolveIPAddr("ip4:icmp", os.Args[1]) +// if err != nil { +// fmt.Println(err) +// os.Exit(1) +// } +// p.AddIPAddr(ra) +// p.OnRecv = func(addr *net.IPAddr, rtt time.Duration) { +// fmt.Printf("IP Addr: %s receive, RTT: %v\n", addr.String(), rtt) +// } +// p.OnIdle = func() { +// fmt.Println("finish") +// } +// err = p.Run() +// if err != nil { +// fmt.Println(err) +// } +// +// It sends an ICMP packet and wait a response. If it receives a response, +// it calls "receive" callback. After that, MaxRTT time passed, it calls +// "idle" callback. If you need more example, please see "cmd/ping/ping.go". +// +// This library needs to run as a superuser for sending ICMP packets when +// privileged raw ICMP endpoints is used so in such a case, to run go test +// for the package, please run like a following +// +// sudo go test +// +package fastping + +import ( + "errors" + "fmt" + "log" + "math/rand" + "net" + "sync" + "syscall" + "time" + + "golang.org/x/net/icmp" + "golang.org/x/net/ipv4" + "golang.org/x/net/ipv6" +) + +const ( + TimeSliceLength = 8 + ProtocolICMP = 1 + ProtocolIPv6ICMP = 58 +) + +var ( + ipv4Proto = map[string]string{"ip": "ip4:icmp", "udp": "udp4"} + ipv6Proto = map[string]string{"ip": "ip6:ipv6-icmp", "udp": "udp6"} +) + +func byteSliceOfSize(n int) []byte { + b := make([]byte, n) + for i := 0; i < len(b); i++ { + b[i] = 1 + } + + return b +} + +func timeToBytes(t time.Time) []byte { + nsec := t.UnixNano() + b := make([]byte, 8) + for i := uint8(0); i < 8; i++ { + b[i] = byte((nsec >> ((7 - i) * 8)) & 0xff) + } + return b +} + +func bytesToTime(b []byte) time.Time { + var nsec int64 + for i := uint8(0); i < 8; i++ { + nsec += int64(b[i]) << ((7 - i) * 8) + } + return time.Unix(nsec/1000000000, nsec%1000000000) +} + +func isIPv4(ip net.IP) bool { + return len(ip.To4()) == net.IPv4len +} + +func isIPv6(ip net.IP) bool { + return len(ip) == net.IPv6len +} + +func ipv4Payload(b []byte) []byte { + if len(b) < ipv4.HeaderLen { + return b + } + hdrlen := int(b[0]&0x0f) << 2 + return b[hdrlen:] +} + +type packet struct { + bytes []byte + addr net.Addr +} + +type context struct { + stop chan bool + done chan bool + err error +} + +func newContext() *context { + return &context{ + stop: make(chan bool), + done: make(chan bool), + } +} + +// Pinger represents ICMP packet sender/receiver +type Pinger struct { + id int + seq int + // key string is IPAddr.String() + addrs map[string]*net.IPAddr + network string + source string + source6 string + hasIPv4 bool + hasIPv6 bool + ctx *context + mu sync.Mutex + + // Size in bytes of the payload to send + Size int + // Number of (nano,milli)seconds of an idle timeout. Once it passed, + // the library calls an idle callback function. It is also used for an + // interval time of RunLoop() method + MaxRTT time.Duration + // OnRecv is called with a response packet's source address and its + // elapsed time when Pinger receives a response packet. + OnRecv func(*net.IPAddr, time.Duration) + // OnIdle is called when MaxRTT time passed + OnIdle func() + // If Debug is true, it prints debug messages to stdout. + Debug bool +} + +// NewPinger returns a new Pinger struct pointer +func NewPinger() *Pinger { + rand.Seed(time.Now().UnixNano()) + return &Pinger{ + id: rand.Intn(0xffff), + seq: rand.Intn(0xffff), + addrs: make(map[string]*net.IPAddr), + network: "ip", + source: "", + source6: "", + hasIPv4: false, + hasIPv6: false, + Size: TimeSliceLength, + MaxRTT: time.Second, + OnRecv: nil, + OnIdle: nil, + Debug: false, + } +} + +// Network sets a network endpoints for ICMP ping and returns the previous +// setting. network arg should be "ip" or "udp" string or if others are +// specified, it returns an error. If this function isn't called, Pinger +// uses "ip" as default. +func (p *Pinger) Network(network string) (string, error) { + origNet := p.network + switch network { + case "ip": + fallthrough + case "udp": + p.network = network + default: + return origNet, errors.New(network + " can't be used as ICMP endpoint") + } + return origNet, nil +} + +// Source sets ipv4/ipv6 source IP for sending ICMP packets and returns the previous +// setting. Empty value indicates to use system default one (for both ipv4 and ipv6). +func (p *Pinger) Source(source string) (string, error) { + // using ipv4 previous value for new empty one + origSource := p.source + if "" == source { + p.mu.Lock() + p.source = "" + p.source6 = "" + p.mu.Unlock() + return origSource, nil + } + + addr := net.ParseIP(source) + if addr == nil { + return origSource, errors.New(source + " is not a valid textual representation of an IPv4/IPv6 address") + } + + if isIPv4(addr) { + p.mu.Lock() + p.source = source + p.mu.Unlock() + } else if isIPv6(addr) { + origSource = p.source6 + p.mu.Lock() + p.source6 = source + p.mu.Unlock() + } else { + return origSource, errors.New(source + " is not a valid textual representation of an IPv4/IPv6 address") + } + + return origSource, nil +} + +// AddIP adds an IP address to Pinger. ipaddr arg should be a string like +// "192.0.2.1". +func (p *Pinger) AddIP(ipaddr string) error { + addr := net.ParseIP(ipaddr) + if addr == nil { + return fmt.Errorf("%s is not a valid textual representation of an IP address", ipaddr) + } + p.mu.Lock() + p.addrs[addr.String()] = &net.IPAddr{IP: addr} + if isIPv4(addr) { + p.hasIPv4 = true + } else if isIPv6(addr) { + p.hasIPv6 = true + } + p.mu.Unlock() + return nil +} + +// AddIPAddr adds an IP address to Pinger. ip arg should be a net.IPAddr +// pointer. +func (p *Pinger) AddIPAddr(ip *net.IPAddr) { + p.mu.Lock() + p.addrs[ip.String()] = ip + if isIPv4(ip.IP) { + p.hasIPv4 = true + } else if isIPv6(ip.IP) { + p.hasIPv6 = true + } + p.mu.Unlock() +} + +// RemoveIP removes an IP address from Pinger. ipaddr arg should be a string +// like "192.0.2.1". +func (p *Pinger) RemoveIP(ipaddr string) error { + addr := net.ParseIP(ipaddr) + if addr == nil { + return fmt.Errorf("%s is not a valid textual representation of an IP address", ipaddr) + } + p.mu.Lock() + delete(p.addrs, addr.String()) + p.mu.Unlock() + return nil +} + +// RemoveIPAddr removes an IP address from Pinger. ip arg should be a net.IPAddr +// pointer. +func (p *Pinger) RemoveIPAddr(ip *net.IPAddr) { + p.mu.Lock() + delete(p.addrs, ip.String()) + p.mu.Unlock() +} + +// AddHandler adds event handler to Pinger. event arg should be "receive" or +// "idle" string. +// +// **CAUTION** This function is deprecated. Please use OnRecv and OnIdle field +// of Pinger struct to set following handlers. +// +// "receive" handler should be +// +// func(addr *net.IPAddr, rtt time.Duration) +// +// type function. The handler is called with a response packet's source address +// and its elapsed time when Pinger receives a response packet. +// +// "idle" handler should be +// +// func() +// +// type function. The handler is called when MaxRTT time passed. For more +// detail, please see Run() and RunLoop(). +func (p *Pinger) AddHandler(event string, handler interface{}) error { + switch event { + case "receive": + if hdl, ok := handler.(func(*net.IPAddr, time.Duration)); ok { + p.mu.Lock() + p.OnRecv = hdl + p.mu.Unlock() + return nil + } + return errors.New("receive event handler should be `func(*net.IPAddr, time.Duration)`") + case "idle": + if hdl, ok := handler.(func()); ok { + p.mu.Lock() + p.OnIdle = hdl + p.mu.Unlock() + return nil + } + return errors.New("idle event handler should be `func()`") + } + return errors.New("No such event: " + event) +} + +// Run invokes a single send/receive procedure. It sends packets to all hosts +// which have already been added by AddIP() etc. and wait those responses. When +// it receives a response, it calls "receive" handler registered by AddHander(). +// After MaxRTT seconds, it calls "idle" handler and returns to caller with +// an error value. It means it blocks until MaxRTT seconds passed. For the +// purpose of sending/receiving packets over and over, use RunLoop(). +func (p *Pinger) Run() error { + p.mu.Lock() + p.ctx = newContext() + p.mu.Unlock() + p.run(true) + p.mu.Lock() + defer p.mu.Unlock() + return p.ctx.err +} + +// RunLoop invokes send/receive procedure repeatedly. It sends packets to all +// hosts which have already been added by AddIP() etc. and wait those responses. +// When it receives a response, it calls "receive" handler registered by +// AddHander(). After MaxRTT seconds, it calls "idle" handler, resend packets +// and wait those response. MaxRTT works as an interval time. +// +// This is a non-blocking method so immediately returns. If you want to monitor +// and stop sending packets, use Done() and Stop() methods. For example, +// +// p.RunLoop() +// ticker := time.NewTicker(time.Millisecond * 250) +// select { +// case <-p.Done(): +// if err := p.Err(); err != nil { +// log.Fatalf("Ping failed: %v", err) +// } +// case <-ticker.C: +// break +// } +// ticker.Stop() +// p.Stop() +// +// For more details, please see "cmd/ping/ping.go". +func (p *Pinger) RunLoop() { + p.mu.Lock() + p.ctx = newContext() + p.mu.Unlock() + go p.run(false) +} + +// Done returns a channel that is closed when RunLoop() is stopped by an error +// or Stop(). It must be called after RunLoop() call. If not, it causes panic. +func (p *Pinger) Done() <-chan bool { + return p.ctx.done +} + +// Stop stops RunLoop(). It must be called after RunLoop(). If not, it causes +// panic. +func (p *Pinger) Stop() { + p.debugln("Stop(): close(p.ctx.stop)") + close(p.ctx.stop) + p.debugln("Stop(): <-p.ctx.done") + <-p.ctx.done +} + +// Err returns an error that is set by RunLoop(). It must be called after +// RunLoop(). If not, it causes panic. +func (p *Pinger) Err() error { + p.mu.Lock() + defer p.mu.Unlock() + return p.ctx.err +} + +func (p *Pinger) listen(netProto string, source string) *icmp.PacketConn { + conn, err := icmp.ListenPacket(netProto, source) + if err != nil { + p.mu.Lock() + p.ctx.err = err + p.mu.Unlock() + p.debugln("Run(): close(p.ctx.done)") + close(p.ctx.done) + return nil + } + return conn +} + +func (p *Pinger) run(once bool) { + p.debugln("Run(): Start") + var conn, conn6 *icmp.PacketConn + if p.hasIPv4 { + if conn = p.listen(ipv4Proto[p.network], p.source); conn == nil { + return + } + defer conn.Close() + } + + if p.hasIPv6 { + if conn6 = p.listen(ipv6Proto[p.network], p.source6); conn6 == nil { + return + } + defer conn6.Close() + } + + recv := make(chan *packet, 1) + recvCtx := newContext() + wg := new(sync.WaitGroup) + + p.debugln("Run(): call recvICMP()") + if conn != nil { + wg.Add(1) + go p.recvICMP(conn, recv, recvCtx, wg) + } + if conn6 != nil { + wg.Add(1) + go p.recvICMP(conn6, recv, recvCtx, wg) + } + + p.debugln("Run(): call sendICMP()") + queue, err := p.sendICMP(conn, conn6) + + ticker := time.NewTicker(p.MaxRTT) + +mainloop: + for { + select { + case <-p.ctx.stop: + p.debugln("Run(): <-p.ctx.stop") + break mainloop + case <-recvCtx.done: + p.debugln("Run(): <-recvCtx.done") + p.mu.Lock() + err = recvCtx.err + p.mu.Unlock() + break mainloop + case <-ticker.C: + p.mu.Lock() + handler := p.OnIdle + p.mu.Unlock() + if handler != nil { + handler() + } + if once || err != nil { + break mainloop + } + p.debugln("Run(): call sendICMP()") + queue, err = p.sendICMP(conn, conn6) + case r := <-recv: + p.debugln("Run(): <-recv") + p.procRecv(r, queue) + } + } + + ticker.Stop() + + p.debugln("Run(): close(recvCtx.stop)") + close(recvCtx.stop) + p.debugln("Run(): wait recvICMP()") + wg.Wait() + + p.mu.Lock() + p.ctx.err = err + p.mu.Unlock() + + p.debugln("Run(): close(p.ctx.done)") + close(p.ctx.done) + p.debugln("Run(): End") +} + +func (p *Pinger) sendICMP(conn, conn6 *icmp.PacketConn) (map[string]*net.IPAddr, error) { + p.debugln("sendICMP(): Start") + p.mu.Lock() + p.id = rand.Intn(0xffff) + p.seq = rand.Intn(0xffff) + p.mu.Unlock() + queue := make(map[string]*net.IPAddr) + wg := new(sync.WaitGroup) + for key, addr := range p.addrs { + var typ icmp.Type + var cn *icmp.PacketConn + if isIPv4(addr.IP) { + typ = ipv4.ICMPTypeEcho + cn = conn + } else if isIPv6(addr.IP) { + typ = ipv6.ICMPTypeEchoRequest + cn = conn6 + } else { + continue + } + if cn == nil { + continue + } + + t := timeToBytes(time.Now()) + + if p.Size-TimeSliceLength != 0 { + t = append(t, byteSliceOfSize(p.Size-TimeSliceLength)...) + } + + p.mu.Lock() + bytes, err := (&icmp.Message{ + Type: typ, Code: 0, + Body: &icmp.Echo{ + ID: p.id, Seq: p.seq, + Data: t, + }, + }).Marshal(nil) + p.mu.Unlock() + if err != nil { + wg.Wait() + return queue, err + } + + queue[key] = addr + var dst net.Addr = addr + if p.network == "udp" { + dst = &net.UDPAddr{IP: addr.IP, Zone: addr.Zone} + } + + p.debugln("sendICMP(): Invoke goroutine") + wg.Add(1) + go func(conn *icmp.PacketConn, ra net.Addr, b []byte) { + for { + if _, err := conn.WriteTo(bytes, ra); err != nil { + if neterr, ok := err.(*net.OpError); ok { + if neterr.Err == syscall.ENOBUFS { + continue + } + } + } + break + } + p.debugln("sendICMP(): WriteTo End") + wg.Done() + }(cn, dst, bytes) + } + wg.Wait() + p.debugln("sendICMP(): End") + return queue, nil +} + +func (p *Pinger) recvICMP(conn *icmp.PacketConn, recv chan<- *packet, ctx *context, wg *sync.WaitGroup) { + p.debugln("recvICMP(): Start") + for { + select { + case <-ctx.stop: + p.debugln("recvICMP(): <-ctx.stop") + wg.Done() + p.debugln("recvICMP(): wg.Done()") + return + default: + } + + bytes := make([]byte, 512) + conn.SetReadDeadline(time.Now().Add(time.Millisecond * 100)) + p.debugln("recvICMP(): ReadFrom Start") + _, ra, err := conn.ReadFrom(bytes) + p.debugln("recvICMP(): ReadFrom End") + if err != nil { + if neterr, ok := err.(*net.OpError); ok { + if neterr.Timeout() { + p.debugln("recvICMP(): Read Timeout") + continue + } else { + p.debugln("recvICMP(): OpError happen", err) + p.mu.Lock() + ctx.err = err + p.mu.Unlock() + p.debugln("recvICMP(): close(ctx.done)") + close(ctx.done) + p.debugln("recvICMP(): wg.Done()") + wg.Done() + return + } + } + } + p.debugln("recvICMP(): p.recv <- packet") + + select { + case recv <- &packet{bytes: bytes, addr: ra}: + case <-ctx.stop: + p.debugln("recvICMP(): <-ctx.stop") + wg.Done() + p.debugln("recvICMP(): wg.Done()") + return + } + } +} + +func (p *Pinger) procRecv(recv *packet, queue map[string]*net.IPAddr) { + var ipaddr *net.IPAddr + switch adr := recv.addr.(type) { + case *net.IPAddr: + ipaddr = adr + case *net.UDPAddr: + ipaddr = &net.IPAddr{IP: adr.IP, Zone: adr.Zone} + default: + return + } + + addr := ipaddr.String() + p.mu.Lock() + if _, ok := p.addrs[addr]; !ok { + p.mu.Unlock() + return + } + p.mu.Unlock() + + var bytes []byte + var proto int + if isIPv4(ipaddr.IP) { + if p.network == "ip" { + bytes = ipv4Payload(recv.bytes) + } else { + bytes = recv.bytes + } + proto = ProtocolICMP + } else if isIPv6(ipaddr.IP) { + bytes = recv.bytes + proto = ProtocolIPv6ICMP + } else { + return + } + + var m *icmp.Message + var err error + if m, err = icmp.ParseMessage(proto, bytes); err != nil { + return + } + + if m.Type != ipv4.ICMPTypeEchoReply && m.Type != ipv6.ICMPTypeEchoReply { + return + } + + var rtt time.Duration + switch pkt := m.Body.(type) { + case *icmp.Echo: + p.mu.Lock() + if pkt.ID == p.id && pkt.Seq == p.seq { + rtt = time.Since(bytesToTime(pkt.Data[:TimeSliceLength])) + } + p.mu.Unlock() + default: + return + } + + if _, ok := queue[addr]; ok { + delete(queue, addr) + p.mu.Lock() + handler := p.OnRecv + p.mu.Unlock() + if handler != nil { + handler(ipaddr, rtt) + } + } +} + +func (p *Pinger) debugln(args ...interface{}) { + p.mu.Lock() + defer p.mu.Unlock() + if p.Debug { + log.Println(args...) + } +} + +func (p *Pinger) debugf(format string, args ...interface{}) { + p.mu.Lock() + defer p.mu.Unlock() + if p.Debug { + log.Printf(format, args...) + } +} diff --git a/vendor/github.com/gaochao1/gosnmp/.gitignore b/vendor/github.com/gaochao1/gosnmp/.gitignore new file mode 100644 index 00000000..61f5ce4f --- /dev/null +++ b/vendor/github.com/gaochao1/gosnmp/.gitignore @@ -0,0 +1,28 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe + +# Editor Files +.*.sw* + +# Mac OS X crap +.DS_Store \ No newline at end of file diff --git a/vendor/github.com/gosnmp/gosnmp/LICENSE b/vendor/github.com/gaochao1/gosnmp/LICENSE similarity index 89% rename from vendor/github.com/gosnmp/gosnmp/LICENSE rename to vendor/github.com/gaochao1/gosnmp/LICENSE index e03e94e8..12b9bc4b 100644 --- a/vendor/github.com/gosnmp/gosnmp/LICENSE +++ b/vendor/github.com/gaochao1/gosnmp/LICENSE @@ -1,14 +1,13 @@ -Copyright 2012-2020 The GoSNMP Authors. All rights reserved. Use of this -rights reserved. Use of this source code is governed by a BSD-style -license that can be found in the LICENSE file. +Copyright (c) 2012-2013, Andreas Louca +All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. @@ -24,7 +23,7 @@ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -Parts of the gosnmp code are from GoLang ASN.1 Library +Parts of the gosnmp code are from GoLang ASN.1 Library (as marked in the source code). For those part of code the following license applies: @@ -54,4 +53,4 @@ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/vendor/github.com/gaochao1/gosnmp/README.md b/vendor/github.com/gaochao1/gosnmp/README.md new file mode 100644 index 00000000..2e6e1cc8 --- /dev/null +++ b/vendor/github.com/gaochao1/gosnmp/README.md @@ -0,0 +1,77 @@ +gosnmp +====== + +GoSNMP is a simple SNMP client library, written fully in Go. Currently it supports only GetRequest (with the rest GetNextRequest, SetRequest in the pipe line). Support for traps is also in the plans. + + +Install +------- + +The easiest way to install is via go get: + + go get github.com/alouca/gosnmp + +License +------- + +Some parts of the code are borrowed by the Golang project (specifically some functions for unmarshaling BER responses), which are under the same terms and conditions as the Go language, which are marked appropriately in the source code. The rest of the code is under the BSD license. + +See the LICENSE file for more details. + +Usage +----- +The library usage is pretty simple: + + // Connect to 192.168.0.1 with timeout of 5 seconds + + import ( + "github.com/alouca/gosnmp" + "log" + ) + + s, err := gosnmp.NewGoSNMP("61.147.69.87", "public", gosnmp.Version2c, 5) + if err != nil { + log.Fatal(err) + } + resp, err := s.Get(".1.3.6.1.2.1.1.1.0") + if err == nil { + for _, v := range resp.Variables { + switch v.Type { + case gosnmp.OctetString: + log.Printf("Response: %s : %s : %s \n", v.Name, v.Value.(string), v.Type.String()) + } + } + } + +The response value is always given as an interface{} depending on the PDU response from the SNMP server. For an example checkout examples/example.go. + +Responses are a struct of the following format: + + type Variable struct { + Name asn1.ObjectIdentifier + Type Asn1BER + Value interface{} + } + +Where Name is the OID encoded as an object identifier, Type is the encoding type of the response and Value is an interface{} type, with the response appropriately decoded. + +SNMP BER Types can be one of the following: + + type Asn1BER byte + + const ( + Integer Asn1BER = 0x02 + BitString = 0x03 + OctetString = 0x04 + Null = 0x05 + ObjectIdentifier = 0x06 + Counter32 = 0x41 + Gauge32 = 0x42 + TimeTicks = 0x43 + Opaque = 0x44 + NsapAddress = 0x45 + Counter64 = 0x46 + Uinteger32 = 0x47 + ) + +GoSNMP supports most of the above values, subsequent releases will support all of them. diff --git a/vendor/github.com/gaochao1/gosnmp/decode.go b/vendor/github.com/gaochao1/gosnmp/decode.go new file mode 100644 index 00000000..911d26b9 --- /dev/null +++ b/vendor/github.com/gaochao1/gosnmp/decode.go @@ -0,0 +1,177 @@ +// Copyright 2012 Andreas Louca. All rights reserved. +// Use of this source code is goverend by a BSD-style +// license that can be found in the LICENSE file. + +package gosnmp + +import ( + "fmt" + "net" +) + +type Asn1BER byte + +// SNMP Data Types +const ( + Integer Asn1BER = 0x02 + BitString = 0x03 + OctetString = 0x04 + Null = 0x05 + ObjectIdentifier = 0x06 + Sequence = 0x30 + IpAddress = 0x40 + Counter32 = 0x41 + Gauge32 = 0x42 + TimeTicks = 0x43 + Opaque = 0x44 + NsapAddress = 0x45 + Counter64 = 0x46 + Uinteger32 = 0x47 + NoSuchObject = 0x80 + NoSuchInstance = 0x81 + GetRequest = 0xa0 + GetNextRequest = 0xa1 + GetResponse = 0xa2 + SetRequest = 0xa3 + Trap = 0xa4 + GetBulkRequest = 0xa5 + EndOfMibView = 0x82 +) + +// String representations of each SNMP Data Type +var dataTypeStrings = map[Asn1BER]string{ + Integer: "Integer", + BitString: "BitString", + OctetString: "OctetString", + Null: "Null", + ObjectIdentifier: "ObjectIdentifier", + IpAddress: "IpAddress", + Sequence: "Sequence", + Counter32: "Counter32", + Gauge32: "Gauge32", + TimeTicks: "TimeTicks", + Opaque: "Opaque", + NsapAddress: "NsapAddress", + Counter64: "Counter64", + Uinteger32: "Uinteger32", + NoSuchObject: "NoSuchObject", + NoSuchInstance: "NoSuchInstance", + GetRequest: "GetRequest", + GetNextRequest: "GetNextRequest", + GetResponse: "GetResponse", + SetRequest: "SetRequest", + Trap: "Trap", + GetBulkRequest: "GetBulkRequest", + EndOfMibView: "endOfMib", +} + +func (dataType Asn1BER) String() string { + str, ok := dataTypeStrings[dataType] + + if !ok { + str = "Unknown" + } + + return str +} + +type Variable struct { + Name []int + Type Asn1BER + Size uint64 + Value interface{} +} + +func decodeValue(valueType Asn1BER, data []byte) (retVal *Variable, err error) { + retVal = new(Variable) + retVal.Size = uint64(len(data)) + + switch Asn1BER(valueType) { + + // Integer + case Integer: + ret, err := parseInt(data) + if err != nil { + break + } + retVal.Type = Integer + retVal.Value = ret + // Octet + case OctetString: + retVal.Type = OctetString + retVal.Value = string(data) + case ObjectIdentifier: + retVal.Type = ObjectIdentifier + retVal.Value, _ = parseObjectIdentifier(data) + // IpAddress + case IpAddress: + retVal.Type = IpAddress + retVal.Value = net.IP{data[0], data[1], data[2], data[3]} + // Counter32 + case Counter32: + ret, err := parseInt(data) + if err != nil { + break + } + retVal.Type = Counter32 + retVal.Value = ret + case TimeTicks: + ret, err := parseInt(data) + if err != nil { + break + } + retVal.Type = TimeTicks + retVal.Value = ret + // Gauge32 + case Gauge32: + ret, err := parseInt(data) + if err != nil { + break + } + retVal.Type = Gauge32 + retVal.Value = ret + case Counter64: + ret, err := parseInt64(data) + + // Decode it + if err != nil { + break + } + + retVal.Type = Counter64 + retVal.Value = ret + case Null: + retVal.Value = nil + case Sequence: + // NOOP + retVal.Value = data + case GetResponse: + // NOOP + retVal.Value = data + case GetRequest: + // NOOP + retVal.Value = data + case EndOfMibView: + retVal.Type = EndOfMibView + retVal.Value = "endOfMib" + case GetBulkRequest: + // NOOP + retVal.Value = data + case NoSuchInstance: + return nil, fmt.Errorf("No such instance") + case NoSuchObject: + return nil, fmt.Errorf("No such object") + default: + err = fmt.Errorf("Unable to decode %s %#v - not implemented", valueType, valueType) + } + + return retVal, err +} + +// Parses UINT16 +func ParseUint16(content []byte) int { + number := uint8(content[1]) | uint8(content[0])<<8 + //fmt.Printf("\t%d\n", number) + + return int(number) +} diff --git a/vendor/github.com/gaochao1/gosnmp/gosnmp.go b/vendor/github.com/gaochao1/gosnmp/gosnmp.go new file mode 100644 index 00000000..cca7afa9 --- /dev/null +++ b/vendor/github.com/gaochao1/gosnmp/gosnmp.go @@ -0,0 +1,284 @@ +// Copyright 2012 Andreas Louca. All rights reserved. +// Use of this source code is goverend by a BSD-style +// license that can be found in the LICENSE file. + +package gosnmp + +import ( + "fmt" + l "github.com/alouca/gologger" + "net" + "strings" + "time" +) + +type GoSNMP struct { + Target string + Community string + Version SnmpVersion + Timeout time.Duration + conn net.Conn + Log *l.Logger +} + +var DEFAULT_PORT = 161 + +// Creates a new SNMP Client. Target is the IP address, Community the SNMP Community String and Version the SNMP version. +// Currently only v2c is supported. Timeout parameter is measured in seconds. +func NewGoSNMP(target, community string, version SnmpVersion, timeout int64) (*GoSNMP, error) { + if !strings.Contains(target, ":") { + target = fmt.Sprintf("%s:%d", target, DEFAULT_PORT) + } + + // Open a UDP connection to the target + conn, err := net.DialTimeout("udp", target, time.Duration(timeout)*time.Millisecond) + + if err != nil { + return nil, fmt.Errorf("Error establishing connection to host: %s\n", err.Error()) + } + s := &GoSNMP{target, community, version, time.Duration(timeout) * time.Second, conn, l.CreateLogger(false, false)} + + return s, nil +} + +// Enables verbose logging +func (x *GoSNMP) SetVerbose(v bool) { + x.Log.VerboseFlag = v +} + +// Enables debugging +func (x *GoSNMP) SetDebug(d bool) { + x.Log.DebugFlag = d +} + +// Sets the timeout for network read/write functions. Defaults to 5 seconds. +func (x *GoSNMP) SetTimeout(seconds int64) { + if seconds <= 0 { + seconds = 5 + } + x.Timeout = time.Duration(seconds) * time.Millisecond +} + +// StreamWalk will start walking a specified OID, and push through a channel the results +// as it receives them, without waiting for the whole process to finish to return the +// results +func (x *GoSNMP) StreamWalk(oid string, c chan *Variable) error { + + return nil +} + +func (x *GoSNMP) BulkWalk(max_repetitions uint8, oid string) (results []SnmpPDU, err error) { + if oid == "" { + return nil, fmt.Errorf("No OID given\n") + } + return x._bulkWalk(max_repetitions, oid, oid) +} +func (x *GoSNMP) _bulkWalk(max_repetitions uint8, searching_oid string, root_oid string) (results []SnmpPDU, err error) { + response, err := x.GetBulk(0, max_repetitions, searching_oid) + if err != nil { + return + } + for i, v := range response.Variables { + if v.Value == "endOfMib" { + return + } + // is this variable still in the requested oid range + if strings.HasPrefix(v.Name, root_oid) { + results = append(results, v) + // is the last oid received still in the requested range + if i == len(response.Variables)-1 { + var sub_results []SnmpPDU + sub_results, err = x._bulkWalk(max_repetitions, v.Name, root_oid) + if err != nil { + return + } + results = append(results, sub_results...) + } + } + } + return +} + +// Walk will SNMP walk the target, blocking until the process is complete +func (x *GoSNMP) Walk(oid string) (results []SnmpPDU, err error) { + if oid == "" { + return nil, fmt.Errorf("No OID given\n") + } + results = make([]SnmpPDU, 0) + requestOid := oid + + for { + res, err := x.GetNext(oid) + if err != nil { + return results, err + } + if res != nil { + if len(res.Variables) > 0 { + if strings.Index(res.Variables[0].Name, requestOid) > -1 { + results = append(results, res.Variables[0]) + // Set to the next + oid = res.Variables[0].Name + x.Log.Debug("Moving to %s\n", oid) + } else { + x.Log.Debug("Root OID mismatch, stopping walk\n") + break + } + } else { + break + } + } else { + break + } + + } + return +} + +// Marshals & send an SNMP request. Unmarshals the response and returns back the parsed +// SNMP packet +func (x *GoSNMP) sendPacket(packet *SnmpPacket) (*SnmpPacket, error) { + // Set timeouts on the connection + deadline := time.Now() + x.conn.SetDeadline(deadline.Add(x.Timeout)) + + // Marshal it + fBuf, err := packet.marshal() + + if err != nil { + return nil, err + } + + // Send the packet! + _, err = x.conn.Write(fBuf) + if err != nil { + return nil, fmt.Errorf("Error writing to socket: %s\n", err.Error()) + } + // Try to read the response + resp := make([]byte, 8192, 8192) + n, err := x.conn.Read(resp) + + if err != nil { + return nil, fmt.Errorf("Error reading from UDP: %s\n", err.Error()) + } + + // Unmarshal the read bytes + pdu, err := Unmarshal(resp[:n]) + + if err != nil { + return nil, fmt.Errorf("Unable to decode packet: %s\n", err.Error()) + } else { + if len(pdu.Variables) < 1 { + return nil, fmt.Errorf("No responses received.") + } else { + return pdu, nil + } + } + + return nil, nil +} + +// Sends an SNMP Get Next Request to the target. Returns the next variable response from the OID given or an error +func (x *GoSNMP) GetNext(oid string) (*SnmpPacket, error) { + var err error + defer func() { + if e := recover(); e != nil { + err = fmt.Errorf("%v", e) + } + }() + + // Create the packet + packet := new(SnmpPacket) + + packet.Community = x.Community + packet.Error = 0 + packet.ErrorIndex = 0 + packet.RequestType = GetNextRequest + packet.Version = 1 // version 2 + packet.Variables = []SnmpPDU{SnmpPDU{Name: oid, Type: Null}} + + return x.sendPacket(packet) +} + +// Debug function. Unmarshals raw bytes and returns the result without the network part +func (x *GoSNMP) Debug(data []byte) (*SnmpPacket, error) { + packet, err := Unmarshal(data) + + if err != nil { + return nil, fmt.Errorf("Unable to decode packet: %s\n", err.Error()) + } + return packet, nil +} + +// Sends an SNMP BULK-GET request to the target. Returns a Variable with the response or an error +func (x *GoSNMP) GetBulk(non_repeaters, max_repetitions uint8, oids ...string) (*SnmpPacket, error) { + var err error + defer func() { + if e := recover(); e != nil { + err = fmt.Errorf("%v", e) + } + }() + + // Create the packet + packet := new(SnmpPacket) + + packet.Community = x.Community + packet.NonRepeaters = non_repeaters + packet.MaxRepetitions = max_repetitions + packet.RequestType = GetBulkRequest + packet.Version = 1 // version 2 + packet.Variables = make([]SnmpPDU, len(oids)) + + for i, oid := range oids { + packet.Variables[i] = SnmpPDU{Name: oid, Type: Null} + } + + return x.sendPacket(packet) +} + +// Sends an SNMP GET request to the target. Returns a Variable with the response or an error +func (x *GoSNMP) Get(oid string) (*SnmpPacket, error) { + var err error + defer func() { + if e := recover(); e != nil { + err = fmt.Errorf("%v", e) + } + }() + + // Create the packet + packet := new(SnmpPacket) + + packet.Community = x.Community + packet.Error = 0 + packet.ErrorIndex = 0 + packet.RequestType = GetRequest + packet.Version = 1 // version 2 + packet.Variables = []SnmpPDU{SnmpPDU{Name: oid, Type: Null}} + + return x.sendPacket(packet) +} + +// Sends an SNMP GET request to the target. Returns a Variable with the response or an error +func (x *GoSNMP) GetMulti(oids []string) (*SnmpPacket, error) { + var err error + defer func() { + if e := recover(); e != nil { + err = fmt.Errorf("%v", e) + } + }() + + // Create the packet + packet := new(SnmpPacket) + + packet.Community = x.Community + packet.Error = 0 + packet.ErrorIndex = 0 + packet.RequestType = GetRequest + packet.Version = 1 // version 2 + packet.Variables = make([]SnmpPDU, len(oids)) + + for i, oid := range oids { + packet.Variables[i] = SnmpPDU{Name: oid, Type: Null} + } + + return x.sendPacket(packet) +} diff --git a/vendor/github.com/gaochao1/gosnmp/helper.go b/vendor/github.com/gaochao1/gosnmp/helper.go new file mode 100644 index 00000000..b2393e57 --- /dev/null +++ b/vendor/github.com/gaochao1/gosnmp/helper.go @@ -0,0 +1,208 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gosnmp + +import ( + "bytes" + "errors" + "fmt" +) + +func marshalObjectIdentifier(oid []int) (ret []byte, err error) { + out := bytes.NewBuffer(make([]byte, 0, 128)) + if len(oid) < 2 || oid[0] > 6 || oid[1] >= 40 { + return nil, errors.New("invalid object identifier") + } + + err = out.WriteByte(byte(oid[0]*40 + oid[1])) + if err != nil { + return + } + for i := 2; i < len(oid); i++ { + err = marshalBase128Int(out, int64(oid[i])) + if err != nil { + return + } + } + + ret = out.Bytes() + + return +} + +// parseObjectIdentifier parses an OBJECT IDENTIFIER from the given bytes and +// returns it. An object identifier is a sequence of variable length integers +// that are assigned in a hierarchy. +func parseObjectIdentifier(bytes []byte) (s []int, err error) { + if len(bytes) == 0 { + err = fmt.Errorf("zero length OBJECT IDENTIFIER") + return + } + + // In the worst case, we get two elements from the first byte (which is + // encoded differently) and then every varint is a single byte long. + s = make([]int, len(bytes)+1) + + // The first byte is 40*value1 + value2: + s[0] = int(bytes[0]) / 40 + s[1] = int(bytes[0]) % 40 + i := 2 + for offset := 1; offset < len(bytes); i++ { + var v int + v, offset, err = parseBase128Int(bytes, offset) + if err != nil { + return + } + s[i] = v + } + s = s[0:i] + return +} + +// parseBase128Int parses a base-128 encoded int from the given offset in the +// given byte slice. It returns the value and the new offset. +func parseBase128Int(bytes []byte, initOffset int) (ret, offset int, err error) { + offset = initOffset + for shifted := 0; offset < len(bytes); shifted++ { + if shifted > 4 { + err = fmt.Errorf("Structural Error: base 128 integer too large") + return + } + ret <<= 7 + b := bytes[offset] + ret |= int(b & 0x7f) + offset++ + if b&0x80 == 0 { + return + } + } + err = fmt.Errorf("Syntax Error: truncated base 128 integer") + return +} + +func marshalBase128Int(out *bytes.Buffer, n int64) (err error) { + if n == 0 { + err = out.WriteByte(0) + return + } + + l := 0 + for i := n; i > 0; i >>= 7 { + l++ + } + + for i := l - 1; i >= 0; i-- { + o := byte(n >> uint(i*7)) + o &= 0x7f + if i != 0 { + o |= 0x80 + } + err = out.WriteByte(o) + if err != nil { + return + } + } + + return nil +} + +// parseInt64 treats the given bytes as a big-endian, signed integer and +// returns the result. +func parseInt64(bytes []byte) (ret uint64, err error) { + if len(bytes) > 8 { + // We'll overflow an int64 in this case. + err = errors.New("integer too large") + return + } + for bytesRead := 0; bytesRead < len(bytes); bytesRead++ { + ret <<= 8 + ret |= uint64(bytes[bytesRead]) + } + + // Shift up and down in order to sign extend the result. + ret <<= 64 - uint8(len(bytes))*8 + ret >>= 64 - uint8(len(bytes))*8 + return +} + +// parseInt treats the given bytes as a big-endian, signed integer and returns +// the result. +func parseInt(bytes []byte) (int, error) { + ret64, err := parseInt64(bytes) + if err != nil { + return 0, err + } + if ret64 != uint64(int(ret64)) { + return 0, errors.New("integer too large") + } + return int(ret64), nil +} + +func Uvarint(buf []byte) (x uint64) { + for i, b := range buf { + x = x<<8 + uint64(b) + if i == 7 { + return + } + } + return +} + +// BIT STRING + +// BitStringValue is the structure to use when you want an ASN.1 BIT STRING type. A +// bit string is padded up to the nearest byte in memory and the number of +// valid bits is recorded. Padding bits will be zero. +type BitStringValue struct { + Bytes []byte // bits packed into bytes. + BitLength int // length in bits. +} + +// At returns the bit at the given index. If the index is out of range it +// returns false. +func (b BitStringValue) At(i int) int { + if i < 0 || i >= b.BitLength { + return 0 + } + x := i / 8 + y := 7 - uint(i%8) + return int(b.Bytes[x]>>y) & 1 +} + +// RightAlign returns a slice where the padding bits are at the beginning. The +// slice may share memory with the BitString. +func (b BitStringValue) RightAlign() []byte { + shift := uint(8 - (b.BitLength % 8)) + if shift == 8 || len(b.Bytes) == 0 { + return b.Bytes + } + + a := make([]byte, len(b.Bytes)) + a[0] = b.Bytes[0] >> shift + for i := 1; i < len(b.Bytes); i++ { + a[i] = b.Bytes[i-1] << (8 - shift) + a[i] |= b.Bytes[i] >> shift + } + + return a +} + +// parseBitString parses an ASN.1 bit string from the given byte slice and returns it. +func parseBitString(bytes []byte) (ret BitStringValue, err error) { + if len(bytes) == 0 { + err = errors.New("zero length BIT STRING") + return + } + paddingBits := int(bytes[0]) + if paddingBits > 7 || + len(bytes) == 1 && paddingBits > 0 || + bytes[len(bytes)-1]&((1< 0x80 { + length = length - 0x80 + log.Debug("Field length is padded to %d bytes\n", length) + ber.DataLength = Uvarint(data[2 : 2+length]) + log.Debug("Decoded final length: %d\n", ber.DataLength) + + ber.HeaderLength = 2 + uint64(length) + + } else { + ber.HeaderLength = 2 + ber.DataLength = uint64(length) + } + + // Do sanity checks + if ber.DataLength > uint64(len(data)) { + return nil, fmt.Errorf("Unable to parse BER: provided data length is longer than actual data (%d vs %d)", ber.DataLength, len(data)) + } + + ber.Data = data[ber.HeaderLength : ber.HeaderLength+ber.DataLength] + + ber.BERVariable, err = decodeValue(ber.Type, ber.Data) + + if err != nil { + return nil, fmt.Errorf("Unable to decode value: %s\n", err.Error()) + } + + return ber, nil +} + +func (packet *SnmpPacket) marshal() ([]byte, error) { + // Prepare the buffer to send + buffer := make([]byte, 0, 1024) + buf := bytes.NewBuffer(buffer) + + // Write the packet header (Message type 0x30) & Version = 2 + buf.Write([]byte{byte(Sequence), 0, 2, 1, byte(packet.Version)}) + + // Write Community + buf.Write([]byte{4, uint8(len(packet.Community))}) + buf.WriteString(packet.Community) + + // Marshal the SNMP PDU + snmpPduBuffer := make([]byte, 0, 1024) + snmpPduBuf := bytes.NewBuffer(snmpPduBuffer) + + snmpPduBuf.Write([]byte{byte(packet.RequestType), 0, 2, 1, packet.RequestID}) + + switch packet.RequestType { + case GetBulkRequest: + snmpPduBuf.Write([]byte{ + 2, 1, packet.NonRepeaters, + 2, 1, packet.MaxRepetitions, + }) + default: + snmpPduBuf.Write([]byte{ + 2, 1, packet.Error, + 2, 1, packet.ErrorIndex, + }) + } + + snmpPduBuf.Write([]byte{byte(Sequence), 0}) + + pduLength := 0 + for _, varlist := range packet.Variables { + pdu, err := marshalPDU(&varlist) + + if err != nil { + return nil, err + } + pduLength += len(pdu) + snmpPduBuf.Write(pdu) + } + + pduBytes := snmpPduBuf.Bytes() + // Varbind list length + pduBytes[12] = byte(pduLength) + // SNMP PDU length (PDU header + varbind list length) + pduBytes[1] = byte(pduLength + 11) + + buf.Write(pduBytes) + + // Write the + //buf.Write([]byte{packet.RequestType, uint8(17 + len(mOid)), 2, 1, 1, 2, 1, 0, 2, 1, 0, 0x30, uint8(6 + len(mOid)), 0x30, uint8(4 + len(mOid)), 6, uint8(len(mOid))}) + //buf.Write(mOid) + //buf.Write([]byte{5, 0}) + + ret := buf.Bytes() + + // Set the packet size + ret[1] = uint8(len(ret) - 2) + + return ret, nil +} + +func marshalPDU(pdu *SnmpPDU) ([]byte, error) { + oid, err := marshalOID(pdu.Name) + if err != nil { + return nil, err + } + + pduBuffer := make([]byte, 0, 1024) + pduBuf := bytes.NewBuffer(pduBuffer) + + // Mashal the PDU type into the appropriate BER + switch pdu.Type { + case Null: + pduBuf.Write([]byte{byte(Sequence), byte(len(oid) + 4)}) + pduBuf.Write([]byte{byte(ObjectIdentifier), byte(len(oid))}) + pduBuf.Write(oid) + pduBuf.Write([]byte{Null, 0x00}) + default: + return nil, fmt.Errorf("Unable to marshal PDU: unknown BER type %d", pdu.Type) + } + + return pduBuf.Bytes(), nil +} + +func oidToString(oid []int) (ret string) { + values := make([]interface{}, len(oid)) + for i, v := range oid { + values[i] = v + } + return fmt.Sprintf(strings.Repeat(".%d", len(oid)), values...) +} + +func marshalOID(oid string) ([]byte, error) { + var err error + + // Encode the oid + oid = strings.Trim(oid, ".") + oidParts := strings.Split(oid, ".") + oidBytes := make([]int, len(oidParts)) + + // Convert the string OID to an array of integers + for i := 0; i < len(oidParts); i++ { + oidBytes[i], err = strconv.Atoi(oidParts[i]) + if err != nil { + return nil, fmt.Errorf("Unable to parse OID: %s\n", err.Error()) + } + } + + mOid, err := marshalObjectIdentifier(oidBytes) + + if err != nil { + return nil, fmt.Errorf("Unable to marshal OID: %s\n", err.Error()) + } + + return mOid, err +} diff --git a/vendor/github.com/gaochao1/sw/README.md b/vendor/github.com/gaochao1/sw/README.md new file mode 100644 index 00000000..040e9eed --- /dev/null +++ b/vendor/github.com/gaochao1/sw/README.md @@ -0,0 +1 @@ +swcollector的工具库 \ No newline at end of file diff --git a/vendor/github.com/gaochao1/sw/conn.go b/vendor/github.com/gaochao1/sw/conn.go new file mode 100644 index 00000000..28817fae --- /dev/null +++ b/vendor/github.com/gaochao1/sw/conn.go @@ -0,0 +1,42 @@ +package sw + +import ( + "log" + "time" + + "github.com/gaochao1/gosnmp" +) + +func ConnectionStat(ip, community string, timeout, retry int) (int, error) { + defer func() { + if r := recover(); r != nil { + log.Println(ip+" Recovered in Conntilization", r) + } + }() + vendor, err := SysVendor(ip, community, timeout) + method := "get" + var oid string + switch vendor { + case "Cisco_ASA", "Cisco_ASA_OLD": + oid = "1.3.6.1.4.1.9.9.147.1.2.2.2.1.5.40.6" + default: + return 0, err + } + + var snmpPDUs []gosnmp.SnmpPDU + for i := 0; i < retry; i++ { + snmpPDUs, err = RunSnmp(ip, community, oid, method, timeout) + if len(snmpPDUs) > 0 { + break + } + time.Sleep(100 * time.Millisecond) + } + + if err == nil { + for _, pdu := range snmpPDUs { + return pdu.Value.(int), err + } + } + + return 0, err +} diff --git a/vendor/github.com/gaochao1/sw/cpustat.go b/vendor/github.com/gaochao1/sw/cpustat.go new file mode 100644 index 00000000..e0b010e2 --- /dev/null +++ b/vendor/github.com/gaochao1/sw/cpustat.go @@ -0,0 +1,208 @@ +package sw + +import ( + "errors" + "log" + "strings" + "time" + + "github.com/gaochao1/gosnmp" +) + +func CpuUtilization(ip, community string, timeout, retry int) (int, error) { + defer func() { + if r := recover(); r != nil { + log.Println(ip+" Recovered in CPUtilization", r) + } + }() + vendor, err := SysVendor(ip, community, timeout) + method := "get" + var oid string + + switch vendor { + case "Cisco_NX": + oid = "1.3.6.1.4.1.9.9.305.1.1.1.0" + case "Cisco", "Cisco_IOS_7200", "Cisco_old": + oid = "1.3.6.1.4.1.9.9.109.1.1.1.1.7.1" + case "Cisco_IOS_XE", "Cisco_IOS_XR": + oid = "1.3.6.1.4.1.9.9.109.1.1.1.1.7" + method = "getnext" + case "Cisco_ASA": + oid = "1.3.6.1.4.1.9.9.109.1.1.1.1.7" + return getCiscoASAcpu(ip, community, oid, timeout, retry) + case "Cisco_ASA_OLD": + oid = "1.3.6.1.4.1.9.9.109.1.1.1.1.4" + return getCiscoASAcpu(ip, community, oid, timeout, retry) + case "Huawei", "Huawei_V5.70", "Huawei_V5.130": + oid = "1.3.6.1.4.1.2011.5.25.31.1.1.1.1.5" + return getH3CHWcpumem(ip, community, oid, timeout, retry) + case "Huawei_V3.10": + oid = "1.3.6.1.4.1.2011.6.1.1.1.3" + return getH3CHWcpumem(ip, community, oid, timeout, retry) + case "Huawei_ME60": + oid = "1.3.6.1.4.1.2011.6.3.4.1.2" + return getHuawei_ME60cpu(ip, community, oid, timeout, retry) + case "H3C", "H3C_V5", "H3C_V7": + oid = "1.3.6.1.4.1.25506.2.6.1.1.1.1.6" + return getH3CHWcpumem(ip, community, oid, timeout, retry) + case "H3C_S9500": + oid = "1.3.6.1.4.1.2011.10.2.6.1.1.1.1.6" + return getH3CHWcpumem(ip, community, oid, timeout, retry) + case "Juniper": + oid = "1.3.6.1.4.1.2636.3.1.13.1.8" + return getH3CHWcpumem(ip, community, oid, timeout, retry) + case "Ruijie": + oid = "1.3.6.1.4.1.4881.1.1.10.2.36.1.1.2" + return getRuijiecpumem(ip, community, oid, timeout, retry) + case "Dell": + oid = "1.3.6.1.4.1.674.10895.5000.2.6132.1.1.1.1.4.11" + return getDellCpu(ip, community, oid, timeout, retry) + default: + err = errors.New(ip + "Switch Vendor is not defined") + return 0, err + } + + var snmpPDUs []gosnmp.SnmpPDU + for i := 0; i < retry; i++ { + snmpPDUs, err = RunSnmp(ip, community, oid, method, timeout) + if len(snmpPDUs) > 0 { + break + } + time.Sleep(100 * time.Millisecond) + } + + if err == nil { + for _, pdu := range snmpPDUs { + return pdu.Value.(int), err + } + } + + return 0, err +} + +func getCiscoASAcpu(ip, community, oid string, timeout, retry int) (value int, err error) { + CPU_Value_SUM, CPU_Count, err := snmp_walk_sum(ip, community, oid, timeout, retry) + if err == nil { + if CPU_Count > 0 { + return int(CPU_Value_SUM / CPU_Count), err + } + } + return 0, err +} + +func getH3CHWcpumem(ip, community, oid string, timeout, retry int) (value int, err error) { + defer func() { + if r := recover(); r != nil { + log.Println(ip+" Recovered in CPUtilization", r) + } + }() + method := "getnext" + oidnext := oid + var snmpPDUs []gosnmp.SnmpPDU + + for { + for i := 0; i < retry; i++ { + snmpPDUs, err = RunSnmp(ip, community, oidnext, method, timeout) + if len(snmpPDUs) > 0 { + break + } + time.Sleep(100 * time.Millisecond) + } + oidnext = snmpPDUs[0].Name + if strings.Contains(oidnext, oid) { + if snmpPDUs[0].Value.(int) != 0 { + value = snmpPDUs[0].Value.(int) + break + } + } else { + break + } + + } + + return value, err +} + +func getRuijiecpumem(ip, community, oid string, timeout, retry int) (value int, err error) { + + defer func() { + if r := recover(); r != nil { + log.Println(ip+" Recovered in CPUtilization", r) + } + }() + method := "getnext" + + var snmpPDUs []gosnmp.SnmpPDU + + for i := 0; i < retry; i++ { + snmpPDUs, err = RunSnmp(ip, community, oid, method, timeout) + if len(snmpPDUs) > 0 { + break + } + time.Sleep(100 * time.Millisecond) + } + + return snmpPDUs[0].Value.(int), err +} + +func getHuawei_ME60cpu(ip, community, oid string, timeout, retry int) (value int, err error) { + CPU_Value_SUM, CPU_Count, err := snmp_walk_sum(ip, community, oid, timeout, retry) + if err == nil { + if CPU_Count > 0 { + return int(CPU_Value_SUM / CPU_Count), err + } + } + + return 0, err +} + +func getDellCpu(ip, community, oid string, timeout, retry int) (value int, err error) { + + defer func() { + if r := recover(); r != nil { + log.Println(ip+" Recovered in CPUtilization", r) + } + }() + method := "getnext" + + var snmpPDUs []gosnmp.SnmpPDU + + for i := 0; i < retry; i++ { + snmpPDUs, err = RunSnmp(ip, community, oid, method, timeout) + if len(snmpPDUs) > 0 { + break + } + time.Sleep(100 * time.Millisecond) + } + + return snmpPDUs[0].Value.(int), err +} + +func snmp_walk_sum(ip, community, oid string, timeout, retry int) (value_sum int, value_count int, err error) { + defer func() { + if r := recover(); r != nil { + log.Println(ip+" Recovered in CPUtilization", r) + } + }() + var snmpPDUs []gosnmp.SnmpPDU + method := "walk" + for i := 0; i < retry; i++ { + snmpPDUs, err = RunSnmp(ip, community, oid, method, timeout) + if len(snmpPDUs) > 0 { + break + } + time.Sleep(100 * time.Millisecond) + } + var Values []int + if err == nil { + for _, pdu := range snmpPDUs { + Values = append(Values, pdu.Value.(int)) + } + } + var Value_SUM int + Value_SUM = 0 + for _, value := range Values { + Value_SUM = Value_SUM + value + } + return Value_SUM, len(Values), err +} diff --git a/vendor/github.com/gaochao1/sw/descrstat.go b/vendor/github.com/gaochao1/sw/descrstat.go new file mode 100644 index 00000000..6ac09b97 --- /dev/null +++ b/vendor/github.com/gaochao1/sw/descrstat.go @@ -0,0 +1,119 @@ +package sw + +import ( + "log" + "strconv" + "strings" +) + +func SysDescr(ip, community string, timeout int) (string, error) { + oid := "1.3.6.1.2.1.1.1.0" + method := "get" + + snmpPDUs, err := RunSnmp(ip, community, oid, method, timeout) + defer func() { + if r := recover(); r != nil { + log.Println(ip+" Recovered in sysDescr", r) + } + }() + if err == nil { + for _, pdu := range snmpPDUs { + return pdu.Value.(string), err + } + } + + return "", err +} + +func SysVendor(ip, community string, timeout int) (string, error) { + sysDescr, err := SysDescr(ip, community, timeout) + sysDescrLower := strings.ToLower(sysDescr) + + if strings.Contains(sysDescrLower, "cisco nx-os") { + return "Cisco_NX", err + } + + if strings.Contains(sysDescr, "Cisco Internetwork Operating System Software") { + return "Cisco_old", err + } + + if strings.Contains(sysDescrLower, "cisco ios") { + if strings.Contains(sysDescr, "IOS-XE Software") { + return "Cisco_IOS_XE", err + } else if strings.Contains(sysDescr, "Cisco IOS XR") { + return "Cisco_IOS_XR", err + } else { + return "Cisco", err + } + } + + if strings.Contains(sysDescrLower, "cisco adaptive security appliance") { + version_number, err := strconv.ParseFloat(getVersionNumber(sysDescr), 32) + if err == nil && version_number < 9.2 { + return "Cisco_ASA_OLD", err + } + return "Cisco_ASA", err + } + if strings.Contains(sysDescrLower, "h3c") { + if strings.Contains(sysDescr, "Software Version 5") { + return "H3C_V5", err + } + + if strings.Contains(sysDescr, "Software Version 7") { + return "H3C_V7", err + } + + if strings.Contains(sysDescr, "Version S9500") { + return "H3C_S9500", err + } + + return "H3C", err + } + + if strings.Contains(sysDescrLower, "huawei") { + if strings.Contains(sysDescr, "MultiserviceEngine 60") { + return "Huawei_ME60", err + } + if strings.Contains(sysDescr, "Version 5.70") { + return "Huawei_V5.70", err + } + if strings.Contains(sysDescr, "Version 5.130") { + return "Huawei_V5.130", err + } + if strings.Contains(sysDescr, "Version 3.10") { + return "Huawei_V3.10", err + } + return "Huawei", err + } + + if strings.Contains(sysDescrLower, "ruijie") { + return "Ruijie", err + } + + if strings.Contains(sysDescrLower, "juniper networks") { + return "Juniper", err + } + + if strings.Contains(sysDescrLower, "dell networking") { + return "Dell", err + } + + if strings.Contains(sysDescrLower, "linux") { + return "Linux", err + } + + return "", err +} + +func getVersionNumber(sysdescr string) string { + version_number := "" + s := strings.Fields(sysdescr) + for index, value := range s { + if strings.ToLower(value) == "version" { + version_number = s[index+1] + } + } + version_number = strings.Replace(version_number, "(", "", -1) + version_number = strings.Replace(version_number, ")", "", -1) + return version_number +} diff --git a/vendor/github.com/gaochao1/sw/fastping.go b/vendor/github.com/gaochao1/sw/fastping.go new file mode 100644 index 00000000..0bd0e21e --- /dev/null +++ b/vendor/github.com/gaochao1/sw/fastping.go @@ -0,0 +1,32 @@ +package sw + +import ( + "github.com/freedomkk-qfeng/go-fastping" + "net" + "time" +) + +func fastPingRtt(ip string, timeout int) (float64, error) { + var rt float64 + rt = -1 + p := fastping.NewPinger() + ra, err := net.ResolveIPAddr("ip4:icmp", ip) + if err != nil { + return -1, err + } + p.AddIPAddr(ra) + p.OnRecv = func(addr *net.IPAddr, rtt time.Duration) { + rt = float64(rtt.Nanoseconds()) / 1000000.0 + //fmt.Printf("IP Addr: %s receive, RTT: %v\n", addr.String(), rtt) + } + p.OnIdle = func() { + //fmt.Println("finish") + } + p.MaxRTT = time.Millisecond * time.Duration(timeout) + err = p.Run() + if err != nil { + return -1, err + } + + return rt, err +} diff --git a/vendor/github.com/gaochao1/sw/goping.go b/vendor/github.com/gaochao1/sw/goping.go new file mode 100644 index 00000000..201c1197 --- /dev/null +++ b/vendor/github.com/gaochao1/sw/goping.go @@ -0,0 +1,189 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// taken from http://golang.org/src/pkg/net/ipraw_test.go +// taken from https://github.com/paulstuart/ping/blob/master/ping.go +// change timeout from time.Second to time.Millisecond. + +package sw + +import ( + "bytes" + "errors" + "fmt" + "net" + "os" + "strconv" + "time" +) + +const ( + icmpv4EchoRequest = 8 + icmpv4EchoReply = 0 + icmpv6EchoRequest = 128 + icmpv6EchoReply = 129 +) + +type icmpMessage struct { + Type int // type + Code int // code + Checksum int // checksum + Body icmpMessageBody // body +} + +type icmpMessageBody interface { + Len() int + Marshal() ([]byte, error) +} + +// Marshal returns the binary enconding of the ICMP echo request or +// reply message m. +func (m *icmpMessage) Marshal() ([]byte, error) { + b := []byte{byte(m.Type), byte(m.Code), 0, 0} + if m.Body != nil && m.Body.Len() != 0 { + mb, err := m.Body.Marshal() + if err != nil { + return nil, err + } + b = append(b, mb...) + } + switch m.Type { + case icmpv6EchoRequest, icmpv6EchoReply: + return b, nil + } + csumcv := len(b) - 1 // checksum coverage + s := uint32(0) + for i := 0; i < csumcv; i += 2 { + s += uint32(b[i+1])<<8 | uint32(b[i]) + } + if csumcv&1 == 0 { + s += uint32(b[csumcv]) + } + s = s>>16 + s&0xffff + s = s + s>>16 + // Place checksum back in header; using ^= avoids the + // assumption the checksum bytes are zero. + b[2] ^= byte(^s & 0xff) + b[3] ^= byte(^s >> 8) + return b, nil +} + +// parseICMPMessage parses b as an ICMP message. +func parseICMPMessage(b []byte) (*icmpMessage, error) { + msglen := len(b) + if msglen < 4 { + return nil, errors.New("message too short") + } + m := &icmpMessage{Type: int(b[0]), Code: int(b[1]), Checksum: int(b[2])<<8 | int(b[3])} + if msglen > 4 { + var err error + switch m.Type { + case icmpv4EchoRequest, icmpv4EchoReply, icmpv6EchoRequest, icmpv6EchoReply: + m.Body, err = parseICMPEcho(b[4:]) + if err != nil { + return nil, err + } + } + } + return m, nil +} + +// imcpEcho represenets an ICMP echo request or reply message body. +type icmpEcho struct { + ID int // identifier + Seq int // sequence number + Data []byte // data +} + +func (p *icmpEcho) Len() int { + if p == nil { + return 0 + } + return 4 + len(p.Data) +} + +// Marshal returns the binary enconding of the ICMP echo request or +// reply message body p. +func (p *icmpEcho) Marshal() ([]byte, error) { + b := make([]byte, 4+len(p.Data)) + b[0], b[1] = byte(p.ID>>8), byte(p.ID&0xff) + b[2], b[3] = byte(p.Seq>>8), byte(p.Seq&0xff) + copy(b[4:], p.Data) + return b, nil +} + +// parseICMPEcho parses b as an ICMP echo request or reply message body. +func parseICMPEcho(b []byte) (*icmpEcho, error) { + bodylen := len(b) + p := &icmpEcho{ID: int(b[0])<<8 | int(b[1]), Seq: int(b[2])<<8 | int(b[3])} + if bodylen > 4 { + p.Data = make([]byte, bodylen-4) + copy(p.Data, b[4:]) + } + return p, nil +} + +func goPingRtt(address string, timeout int) (float64, error) { + now := time.Now() + + err := Pinger(address, timeout) + + end := time.Now() + d := end.Sub(now) + + rttStr := fmt.Sprintf("%.3f", float64(d.Nanoseconds())/1000000.0) + rtt, _ := strconv.ParseFloat(rttStr, 64) + + return rtt, err +} + +func Pinger(address string, timeout int) error { + c, err := net.Dial("ip4:icmp", address) + if err != nil { + return err + } + c.SetDeadline(time.Now().Add(time.Duration(timeout) * time.Millisecond)) + defer c.Close() + + typ := icmpv4EchoRequest + xid, xseq := os.Getpid()&0xffff, 1 + wb, err := (&icmpMessage{ + Type: typ, Code: 0, + Body: &icmpEcho{ + ID: xid, Seq: xseq, + Data: bytes.Repeat([]byte("Go Ping!"), 3), + }, + }).Marshal() + if err != nil { + return err + } + if _, err = c.Write(wb); err != nil { + return err + } + var m *icmpMessage + rb := make([]byte, 40+len(wb)) + for { + if _, err = c.Read(rb); err != nil { + return err + } + rb = ipv4Payload(rb) + if m, err = parseICMPMessage(rb); err != nil { + return err + } + switch m.Type { + case icmpv4EchoRequest, icmpv6EchoRequest: + continue + } + break + } + return nil +} + +func ipv4Payload(b []byte) []byte { + if len(b) < 20 { + return b + } + hdrlen := int(b[0]&0x0f) << 2 + return b[hdrlen:] +} diff --git a/vendor/github.com/gaochao1/sw/ifstat.go b/vendor/github.com/gaochao1/sw/ifstat.go new file mode 100644 index 00000000..7c93b9de --- /dev/null +++ b/vendor/github.com/gaochao1/sw/ifstat.go @@ -0,0 +1,468 @@ +package sw + +import ( + "fmt" + "log" + "strconv" + "strings" + "time" + + "github.com/gaochao1/gosnmp" +) + +const ( + ifNameOid = "1.3.6.1.2.1.31.1.1.1.1" + ifNameOidPrefix = ".1.3.6.1.2.1.31.1.1.1.1." + ifHCInOid = "1.3.6.1.2.1.31.1.1.1.6" + ifHCInOidPrefix = ".1.3.6.1.2.1.31.1.1.1.6." + ifHCOutOid = "1.3.6.1.2.1.31.1.1.1.10" + ifHCInPktsOid = "1.3.6.1.2.1.31.1.1.1.7" + ifHCInPktsOidPrefix = ".1.3.6.1.2.1.31.1.1.1.7." + ifHCOutPktsOid = "1.3.6.1.2.1.31.1.1.1.11" + ifOperStatusOid = "1.3.6.1.2.1.2.2.1.8" + ifOperStatusOidPrefix = ".1.3.6.1.2.1.2.2.1.8." + ifHCInBroadcastPktsOid = "1.3.6.1.2.1.31.1.1.1.9" + ifHCInBroadcastPktsOidPrefix = ".1.3.6.1.2.1.31.1.1.1.9." + ifHCOutBroadcastPktsOid = "1.3.6.1.2.1.31.1.1.1.13" + // multicastpkt + ifHCInMulticastPktsOid = "1.3.6.1.2.1.31.1.1.1.8" + ifHCInMulticastPktsOidPrefix = ".1.3.6.1.2.1.31.1.1.1.8." + ifHCOutMulticastPktsOid = "1.3.6.1.2.1.31.1.1.1.12" + // speed 配置 + ifSpeedOid = "1.3.6.1.2.1.31.1.1.1.15" + ifSpeedOidPrefix = ".1.3.6.1.2.1.31.1.1.1.15." + + // Discards配置 + ifInDiscardsOid = "1.3.6.1.2.1.2.2.1.13" + ifInDiscardsOidPrefix = ".1.3.6.1.2.1.2.2.1.13." + ifOutDiscardsOid = "1.3.6.1.2.1.2.2.1.19" + + // Errors配置 + ifInErrorsOid = "1.3.6.1.2.1.2.2.1.14" + ifInErrorsOidPrefix = ".1.3.6.1.2.1.2.2.1.14." + ifOutErrorsOid = "1.3.6.1.2.1.2.2.1.20" + ifOutErrorsOidPrefix = ".1.3.6.1.2.1.2.2.1.20." + + //ifInUnknownProtos 由于未知或不支持的网络协议而丢弃的输入报文的数量 + ifInUnknownProtosOid = "1.3.6.1.2.1.2.2.1.15" + ifInUnknownProtosPrefix = ".1.3.6.1.2.1.2.2.1.15." + + //ifOutQLen 接口上输出报文队列长度 + ifOutQLenOid = "1.3.6.1.2.1.2.2.1.21" + ifOutQLenPrefix = ".1.3.6.1.2.1.2.2.1.21." +) + +type IfStats struct { + IfName string + IfIndex int + IfHCInOctets uint64 + IfHCOutOctets uint64 + IfHCInUcastPkts uint64 + IfHCOutUcastPkts uint64 + IfHCInBroadcastPkts uint64 + IfHCOutBroadcastPkts uint64 + IfHCInMulticastPkts uint64 + IfHCOutMulticastPkts uint64 + IfSpeed int + IfInDiscards int + IfOutDiscards int + IfInErrors int + IfOutErrors int + IfInUnknownProtos int + IfOutQLen int + IfOperStatus int + TS int64 +} + +func (this *IfStats) String() string { + return fmt.Sprintf("", this.IfName, this.IfIndex, this.IfHCInOctets, this.IfHCOutOctets) +} + +func ListIfStats(ip, community string, timeout int, ignoreIface []string, retry int, limitConn int, ignorePkt bool, ignoreOperStatus bool, ignoreBroadcastPkt bool, ignoreMulticastPkt bool, ignoreDiscards bool, ignoreErrors bool, ignoreUnknownProtos bool, ignoreOutQLen bool) ([]IfStats, error) { + var ifStatsList []IfStats + var limitCh chan bool + if limitConn > 0 { + limitCh = make(chan bool, limitConn) + } else { + limitCh = make(chan bool, 1) + } + + defer func() { + if r := recover(); r != nil { + log.Println(ip+" Recovered in ListIfStats", r) + } + }() + + chIfInList := make(chan []gosnmp.SnmpPDU) + chIfOutList := make(chan []gosnmp.SnmpPDU) + + chIfNameList := make(chan []gosnmp.SnmpPDU) + chIfSpeedList := make(chan []gosnmp.SnmpPDU) + + limitCh <- true + go ListIfHCInOctets(ip, community, timeout, chIfInList, retry, limitCh) + time.Sleep(5 * time.Millisecond) + limitCh <- true + go ListIfHCOutOctets(ip, community, timeout, chIfOutList, retry, limitCh) + time.Sleep(5 * time.Millisecond) + limitCh <- true + go ListIfName(ip, community, timeout, chIfNameList, retry, limitCh) + time.Sleep(5 * time.Millisecond) + limitCh <- true + go ListIfSpeed(ip, community, timeout, chIfSpeedList, retry, limitCh) + time.Sleep(5 * time.Millisecond) + + // OperStatus + var ifStatusList []gosnmp.SnmpPDU + chIfStatusList := make(chan []gosnmp.SnmpPDU) + if ignoreOperStatus == false { + limitCh <- true + go ListIfOperStatus(ip, community, timeout, chIfStatusList, retry, limitCh) + time.Sleep(5 * time.Millisecond) + } + + chIfInPktList := make(chan []gosnmp.SnmpPDU) + chIfOutPktList := make(chan []gosnmp.SnmpPDU) + + var ifInPktList, ifOutPktList []gosnmp.SnmpPDU + + if ignorePkt == false { + limitCh <- true + go ListIfHCInUcastPkts(ip, community, timeout, chIfInPktList, retry, limitCh) + time.Sleep(5 * time.Millisecond) + limitCh <- true + go ListIfHCOutUcastPkts(ip, community, timeout, chIfOutPktList, retry, limitCh) + time.Sleep(5 * time.Millisecond) + + } + + chIfInBroadcastPktList := make(chan []gosnmp.SnmpPDU) + chIfOutBroadcastPktList := make(chan []gosnmp.SnmpPDU) + + var ifInBroadcastPktList, ifOutBroadcastPktList []gosnmp.SnmpPDU + + if ignoreBroadcastPkt == false { + limitCh <- true + go ListIfHCInBroadcastPkts(ip, community, timeout, chIfInBroadcastPktList, retry, limitCh) + time.Sleep(5 * time.Millisecond) + limitCh <- true + go ListIfHCOutBroadcastPkts(ip, community, timeout, chIfOutBroadcastPktList, retry, limitCh) + time.Sleep(5 * time.Millisecond) + + } + + chIfInMulticastPktList := make(chan []gosnmp.SnmpPDU) + chIfOutMulticastPktList := make(chan []gosnmp.SnmpPDU) + + var ifInMulticastPktList, ifOutMulticastPktList []gosnmp.SnmpPDU + + if ignoreMulticastPkt == false { + limitCh <- true + go ListIfHCInMulticastPkts(ip, community, timeout, chIfInMulticastPktList, retry, limitCh) + time.Sleep(5 * time.Millisecond) + limitCh <- true + go ListIfHCOutMulticastPkts(ip, community, timeout, chIfOutMulticastPktList, retry, limitCh) + time.Sleep(5 * time.Millisecond) + + } + + //Discards + chIfInDiscardsList := make(chan []gosnmp.SnmpPDU) + chIfOutDiscardsList := make(chan []gosnmp.SnmpPDU) + + var ifInDiscardsList, ifOutDiscardsList []gosnmp.SnmpPDU + + if ignoreDiscards == false { + limitCh <- true + go ListIfInDiscards(ip, community, timeout, chIfInDiscardsList, retry, limitCh) + time.Sleep(5 * time.Millisecond) + limitCh <- true + go ListIfOutDiscards(ip, community, timeout, chIfOutDiscardsList, retry, limitCh) + time.Sleep(5 * time.Millisecond) + + } + + //Errors + chIfInErrorsList := make(chan []gosnmp.SnmpPDU) + chIfOutErrorsList := make(chan []gosnmp.SnmpPDU) + + var ifInErrorsList, ifOutErrorsList []gosnmp.SnmpPDU + + if ignoreErrors == false { + limitCh <- true + go ListIfInErrors(ip, community, timeout, chIfInErrorsList, retry, limitCh) + time.Sleep(5 * time.Millisecond) + limitCh <- true + go ListIfOutErrors(ip, community, timeout, chIfOutErrorsList, retry, limitCh) + time.Sleep(5 * time.Millisecond) + + } + + //UnknownProtos + chIfInUnknownProtosList := make(chan []gosnmp.SnmpPDU) + + var ifInUnknownProtosList []gosnmp.SnmpPDU + + if ignoreUnknownProtos == false { + limitCh <- true + go ListIfInUnknownProtos(ip, community, timeout, chIfInUnknownProtosList, retry, limitCh) + time.Sleep(5 * time.Millisecond) + + } + //QLen + chIfOutQLenList := make(chan []gosnmp.SnmpPDU) + + var ifOutQLenList []gosnmp.SnmpPDU + + if ignoreOutQLen == false { + limitCh <- true + go ListIfOutQLen(ip, community, timeout, chIfOutQLenList, retry, limitCh) + time.Sleep(5 * time.Millisecond) + + } + ifInList := <-chIfInList + ifOutList := <-chIfOutList + ifNameList := <-chIfNameList + ifSpeedList := <-chIfSpeedList + if ignoreOperStatus == false { + ifStatusList = <-chIfStatusList + } + if ignorePkt == false { + ifInPktList = <-chIfInPktList + ifOutPktList = <-chIfOutPktList + } + if ignoreBroadcastPkt == false { + ifInBroadcastPktList = <-chIfInBroadcastPktList + ifOutBroadcastPktList = <-chIfOutBroadcastPktList + } + if ignoreMulticastPkt == false { + ifInMulticastPktList = <-chIfInMulticastPktList + ifOutMulticastPktList = <-chIfOutMulticastPktList + } + if ignoreDiscards == false { + ifInDiscardsList = <-chIfInDiscardsList + ifOutDiscardsList = <-chIfOutDiscardsList + } + if ignoreErrors == false { + ifInErrorsList = <-chIfInErrorsList + ifOutErrorsList = <-chIfOutErrorsList + } + if ignoreUnknownProtos == false { + ifInUnknownProtosList = <-chIfInUnknownProtosList + } + if ignoreOutQLen == false { + ifOutQLenList = <-chIfOutQLenList + } + + if len(ifNameList) > 0 && len(ifInList) > 0 && len(ifOutList) > 0 { + now := time.Now().Unix() + + for _, ifNamePDU := range ifNameList { + + ifName := ifNamePDU.Value.(string) + + check := true + if len(ignoreIface) > 0 { + for _, ignore := range ignoreIface { + if strings.Contains(ifName, ignore) { + check = false + break + } + } + } + + if check { + var ifStats IfStats + + ifIndexStr := strings.Replace(ifNamePDU.Name, ifNameOidPrefix, "", 1) + + ifStats.IfIndex, _ = strconv.Atoi(ifIndexStr) + + for ti, ifHCInOctetsPDU := range ifInList { + if strings.Replace(ifHCInOctetsPDU.Name, ifHCInOidPrefix, "", 1) == ifIndexStr { + ifStats.IfHCInOctets = ifInList[ti].Value.(uint64) + ifStats.IfHCOutOctets = ifOutList[ti].Value.(uint64) + break + } + } + if ignorePkt == false { + for ti, ifHCInPktsPDU := range ifInPktList { + if strings.Replace(ifHCInPktsPDU.Name, ifHCInPktsOidPrefix, "", 1) == ifIndexStr { + ifStats.IfHCInUcastPkts = ifInPktList[ti].Value.(uint64) + ifStats.IfHCOutUcastPkts = ifOutPktList[ti].Value.(uint64) + break + } + } + } + if ignoreBroadcastPkt == false { + for ti, ifHCInBroadcastPktPDU := range ifInBroadcastPktList { + if strings.Replace(ifHCInBroadcastPktPDU.Name, ifHCInBroadcastPktsOidPrefix, "", 1) == ifIndexStr { + ifStats.IfHCInBroadcastPkts = ifInBroadcastPktList[ti].Value.(uint64) + ifStats.IfHCOutBroadcastPkts = ifOutBroadcastPktList[ti].Value.(uint64) + break + } + } + } + if ignoreMulticastPkt == false { + for ti, ifHCInMulticastPktPDU := range ifInMulticastPktList { + if strings.Replace(ifHCInMulticastPktPDU.Name, ifHCInMulticastPktsOidPrefix, "", 1) == ifIndexStr { + ifStats.IfHCInMulticastPkts = ifInMulticastPktList[ti].Value.(uint64) + ifStats.IfHCOutMulticastPkts = ifOutMulticastPktList[ti].Value.(uint64) + break + } + } + } + + if ignoreDiscards == false { + for ti, ifInDiscardsPDU := range ifInDiscardsList { + if strings.Replace(ifInDiscardsPDU.Name, ifInDiscardsOidPrefix, "", 1) == ifIndexStr { + ifStats.IfInDiscards = ifInDiscardsList[ti].Value.(int) + ifStats.IfOutDiscards = ifOutDiscardsList[ti].Value.(int) + break + } + } + } + + if ignoreErrors == false { + for ti, ifInErrorsPDU := range ifInErrorsList { + if strings.Replace(ifInErrorsPDU.Name, ifInErrorsOidPrefix, "", 1) == ifIndexStr { + ifStats.IfInErrors = ifInErrorsList[ti].Value.(int) + break + } + } + for ti, ifOutErrorsPDU := range ifOutErrorsList { + if strings.Replace(ifOutErrorsPDU.Name, ifOutErrorsOidPrefix, "", 1) == ifIndexStr { + ifStats.IfOutErrors = ifOutErrorsList[ti].Value.(int) + break + } + } + } + + if ignoreOperStatus == false { + for ti, ifOperStatusPDU := range ifStatusList { + if strings.Replace(ifOperStatusPDU.Name, ifOperStatusOidPrefix, "", 1) == ifIndexStr { + ifStats.IfOperStatus = ifStatusList[ti].Value.(int) + break + } + } + } + + if ignoreUnknownProtos == false { + for ti, ifInUnknownProtosPDU := range ifInUnknownProtosList { + if strings.Replace(ifInUnknownProtosPDU.Name, ifInUnknownProtosPrefix, "", 1) == ifIndexStr { + ifStats.IfInUnknownProtos = ifInUnknownProtosList[ti].Value.(int) + break + } + } + } + + if ignoreOutQLen == false { + for ti, ifOutQLenPDU := range ifOutQLenList { + if strings.Replace(ifOutQLenPDU.Name, ifOutQLenPrefix, "", 1) == ifIndexStr { + ifStats.IfOutQLen = ifOutQLenList[ti].Value.(int) + break + } + } + } + + for ti, ifSpeedPDU := range ifSpeedList { + if strings.Replace(ifSpeedPDU.Name, ifSpeedOidPrefix, "", 1) == ifIndexStr { + ifStats.IfSpeed = 1000 * 1000 * ifSpeedList[ti].Value.(int) + break + } + } + + ifStats.TS = now + ifStats.IfName = ifName + ifStatsList = append(ifStatsList, ifStats) + } + } + } + + return ifStatsList, nil +} + +func ListIfOperStatus(ip, community string, timeout int, ch chan []gosnmp.SnmpPDU, retry int, limitCh chan bool) { + RunSnmpRetry(ip, community, timeout, ch, retry, limitCh, ifOperStatusOid) +} + +func ListIfName(ip, community string, timeout int, ch chan []gosnmp.SnmpPDU, retry int, limitCh chan bool) { + RunSnmpRetry(ip, community, timeout, ch, retry, limitCh, ifNameOid) +} + +func ListIfHCInOctets(ip, community string, timeout int, ch chan []gosnmp.SnmpPDU, retry int, limitCh chan bool) { + RunSnmpRetry(ip, community, timeout, ch, retry, limitCh, ifHCInOid) +} + +func ListIfHCOutOctets(ip, community string, timeout int, ch chan []gosnmp.SnmpPDU, retry int, limitCh chan bool) { + RunSnmpRetry(ip, community, timeout, ch, retry, limitCh, ifHCOutOid) +} + +func ListIfHCInUcastPkts(ip, community string, timeout int, ch chan []gosnmp.SnmpPDU, retry int, limitCh chan bool) { + RunSnmpRetry(ip, community, timeout, ch, retry, limitCh, ifHCInPktsOid) +} + +func ListIfHCInBroadcastPkts(ip, community string, timeout int, ch chan []gosnmp.SnmpPDU, retry int, limitCh chan bool) { + RunSnmpRetry(ip, community, timeout, ch, retry, limitCh, ifHCInBroadcastPktsOid) +} + +func ListIfHCOutBroadcastPkts(ip, community string, timeout int, ch chan []gosnmp.SnmpPDU, retry int, limitCh chan bool) { + RunSnmpRetry(ip, community, timeout, ch, retry, limitCh, ifHCOutBroadcastPktsOid) +} + +func ListIfHCInMulticastPkts(ip, community string, timeout int, ch chan []gosnmp.SnmpPDU, retry int, limitCh chan bool) { + RunSnmpRetry(ip, community, timeout, ch, retry, limitCh, ifHCInMulticastPktsOid) +} + +func ListIfHCOutMulticastPkts(ip, community string, timeout int, ch chan []gosnmp.SnmpPDU, retry int, limitCh chan bool) { + RunSnmpRetry(ip, community, timeout, ch, retry, limitCh, ifHCOutMulticastPktsOid) +} + +func ListIfInDiscards(ip, community string, timeout int, ch chan []gosnmp.SnmpPDU, retry int, limitCh chan bool) { + RunSnmpRetry(ip, community, timeout, ch, retry, limitCh, ifInDiscardsOid) +} + +func ListIfOutDiscards(ip, community string, timeout int, ch chan []gosnmp.SnmpPDU, retry int, limitCh chan bool) { + RunSnmpRetry(ip, community, timeout, ch, retry, limitCh, ifOutDiscardsOid) +} + +func ListIfInErrors(ip, community string, timeout int, ch chan []gosnmp.SnmpPDU, retry int, limitCh chan bool) { + RunSnmpRetry(ip, community, timeout, ch, retry, limitCh, ifInErrorsOid) +} + +func ListIfOutErrors(ip, community string, timeout int, ch chan []gosnmp.SnmpPDU, retry int, limitCh chan bool) { + RunSnmpRetry(ip, community, timeout, ch, retry, limitCh, ifOutErrorsOid) +} + +func ListIfHCOutUcastPkts(ip, community string, timeout int, ch chan []gosnmp.SnmpPDU, retry int, limitCh chan bool) { + RunSnmpRetry(ip, community, timeout, ch, retry, limitCh, ifHCOutPktsOid) +} + +func ListIfInUnknownProtos(ip, community string, timeout int, ch chan []gosnmp.SnmpPDU, retry int, limitCh chan bool) { + RunSnmpRetry(ip, community, timeout, ch, retry, limitCh, ifInUnknownProtosOid) +} + +func ListIfOutQLen(ip, community string, timeout int, ch chan []gosnmp.SnmpPDU, retry int, limitCh chan bool) { + RunSnmpRetry(ip, community, timeout, ch, retry, limitCh, ifOutQLenOid) +} + +func ListIfSpeed(ip, community string, timeout int, ch chan []gosnmp.SnmpPDU, retry int, limitCh chan bool) { + RunSnmpRetry(ip, community, timeout, ch, retry, limitCh, ifSpeedOid) +} + +func RunSnmpRetry(ip, community string, timeout int, ch chan []gosnmp.SnmpPDU, retry int, limitCh chan bool, oid string) { + + var snmpPDUs []gosnmp.SnmpPDU + var err error + snmpPDUs, err = RunSnmpwalk(ip, community, oid, retry, timeout) + if err != nil { + log.Println(ip, oid, err) + close(ch) + <-limitCh + return + } + <-limitCh + ch <- snmpPDUs + + return +} diff --git a/vendor/github.com/gaochao1/sw/ifstat_snmpwalk.go b/vendor/github.com/gaochao1/sw/ifstat_snmpwalk.go new file mode 100644 index 00000000..5aa53547 --- /dev/null +++ b/vendor/github.com/gaochao1/sw/ifstat_snmpwalk.go @@ -0,0 +1,334 @@ +package sw + +import ( + "bytes" + "log" + "os/exec" + "strconv" + "strings" + "time" +) + +func ListIfStatsSnmpWalk(ip, community string, timeout int, ignoreIface []string, retry int, ignorePkt bool, ignoreOperStatus bool, ignoreBroadcastPkt bool, ignoreMulticastPkt bool, ignoreDiscards bool, ignoreErrors bool, ignoreUnknownProtos bool, ignoreOutQLen bool) ([]IfStats, error) { + + var ifStatsList []IfStats + defer func() { + if r := recover(); r != nil { + log.Println(ip+" Recovered in ListIfStats_SnmpWalk", r) + } + }() + chIfInMap := make(chan map[string]string) + chIfOutMap := make(chan map[string]string) + + chIfNameMap := make(chan map[string]string) + chIfSpeedMap := make(chan map[string]string) + + go WalkIfIn(ip, community, timeout, chIfInMap, retry) + go WalkIfOut(ip, community, timeout, chIfOutMap, retry) + + go WalkIfName(ip, community, timeout, chIfNameMap, retry) + go WalkIfSpeed(ip, community, timeout, chIfSpeedMap, retry) + + ifInMap := <-chIfInMap + ifOutMap := <-chIfOutMap + + ifNameMap := <-chIfNameMap + ifSpeedMap := <-chIfSpeedMap + + var ifStatusMap map[string]string + chIfStatusMap := make(chan map[string]string) + if ignoreOperStatus == false { + go WalkIfOperStatus(ip, community, timeout, chIfStatusMap, retry) + ifStatusMap = <-chIfStatusMap + } + + chIfInPktMap := make(chan map[string]string) + chIfOutPktMap := make(chan map[string]string) + + var ifInPktMap, ifOutPktMap map[string]string + + if ignorePkt == false { + go WalkIfInPkts(ip, community, timeout, chIfInPktMap, retry) + go WalkIfOutPkts(ip, community, timeout, chIfOutPktMap, retry) + ifInPktMap = <-chIfInPktMap + ifOutPktMap = <-chIfOutPktMap + } + + chIfInBroadcastPktMap := make(chan map[string]string) + chIfOutBroadcastPktMap := make(chan map[string]string) + + var ifInBroadcastPktMap, ifOutBroadcastPktMap map[string]string + + if ignoreBroadcastPkt == false { + go WalkIfInBroadcastPkts(ip, community, timeout, chIfInBroadcastPktMap, retry) + go WalkIfOutBroadcastPkts(ip, community, timeout, chIfOutBroadcastPktMap, retry) + ifInBroadcastPktMap = <-chIfInBroadcastPktMap + ifOutBroadcastPktMap = <-chIfOutBroadcastPktMap + } + + chIfInMulticastPktMap := make(chan map[string]string) + chIfOutMulticastPktMap := make(chan map[string]string) + + var ifInMulticastPktMap, ifOutMulticastPktMap map[string]string + + if ignoreMulticastPkt == false { + go WalkIfInMulticastPkts(ip, community, timeout, chIfInMulticastPktMap, retry) + go WalkIfOutMulticastPkts(ip, community, timeout, chIfOutMulticastPktMap, retry) + ifInMulticastPktMap = <-chIfInMulticastPktMap + ifOutMulticastPktMap = <-chIfOutMulticastPktMap + } + + chIfInDiscardsMap := make(chan map[string]string) + chIfOutDiscardsMap := make(chan map[string]string) + + var ifInDiscardsMap, ifOutDiscardsMap map[string]string + + if ignoreDiscards == false { + go WalkIfInDiscards(ip, community, timeout, chIfInDiscardsMap, retry) + go WalkIfOutDiscards(ip, community, timeout, chIfOutDiscardsMap, retry) + ifInDiscardsMap = <-chIfInDiscardsMap + ifOutDiscardsMap = <-chIfOutDiscardsMap + } + + chIfInErrorsMap := make(chan map[string]string) + chIfOutErrorsMap := make(chan map[string]string) + + var ifInErrorsMap, ifOutErrorsMap map[string]string + + if ignoreErrors == false { + go WalkIfInErrors(ip, community, timeout, chIfInErrorsMap, retry) + go WalkIfOutErrors(ip, community, timeout, chIfOutErrorsMap, retry) + ifInErrorsMap = <-chIfInErrorsMap + ifOutErrorsMap = <-chIfOutErrorsMap + } + //UnknownProtos + chIfInUnknownProtosMap := make(chan map[string]string) + + var ifInUnknownProtosMap map[string]string + + if ignoreUnknownProtos == false { + go WalkIfInUnknownProtos(ip, community, timeout, chIfInUnknownProtosMap, retry) + ifInUnknownProtosMap = <-chIfInUnknownProtosMap + } + //QLen + chIfOutQLenMap := make(chan map[string]string) + + var ifOutQLenMap map[string]string + + if ignoreOutQLen == false { + go WalkIfOutQLen(ip, community, timeout, chIfOutQLenMap, retry) + ifOutQLenMap = <-chIfOutQLenMap + } + + if len(ifNameMap) > 0 && len(ifInMap) > 0 && len(ifOutMap) > 0 { + + now := time.Now().Unix() + + for ifIndex, ifName := range ifNameMap { + + check := true + if len(ignoreIface) > 0 { + for _, ignore := range ignoreIface { + if strings.Contains(ifName, ignore) { + check = false + break + } + } + } + + if check { + var ifStats IfStats + var ifstatus_string string + ifStats.IfIndex, _ = strconv.Atoi(ifIndex) + ifStats.IfHCInOctets, _ = strconv.ParseUint(ifInMap[ifIndex], 10, 64) + ifStats.IfHCOutOctets, _ = strconv.ParseUint(ifOutMap[ifIndex], 10, 64) + + if ignorePkt == false { + ifStats.IfHCInUcastPkts, _ = strconv.ParseUint(ifInPktMap[ifIndex], 10, 64) + ifStats.IfHCOutUcastPkts, _ = strconv.ParseUint(ifOutPktMap[ifIndex], 10, 64) + } + if ignoreBroadcastPkt == false { + ifStats.IfHCInBroadcastPkts, _ = strconv.ParseUint(ifInBroadcastPktMap[ifIndex], 10, 64) + ifStats.IfHCOutBroadcastPkts, _ = strconv.ParseUint(ifOutBroadcastPktMap[ifIndex], 10, 64) + } + if ignoreMulticastPkt == false { + ifStats.IfHCInMulticastPkts, _ = strconv.ParseUint(ifInMulticastPktMap[ifIndex], 10, 64) + ifStats.IfHCOutMulticastPkts, _ = strconv.ParseUint(ifOutMulticastPktMap[ifIndex], 10, 64) + } + if ignoreDiscards == false { + ifStats.IfInDiscards, _ = strconv.Atoi(ifInDiscardsMap[ifIndex]) + ifStats.IfOutDiscards, _ = strconv.Atoi(ifOutDiscardsMap[ifIndex]) + } + if ignoreErrors == false { + ifStats.IfInErrors, _ = strconv.Atoi(ifInErrorsMap[ifIndex]) + ifStats.IfOutErrors, _ = strconv.Atoi(ifOutErrorsMap[ifIndex]) + } + if ignoreUnknownProtos == false { + ifStats.IfInUnknownProtos, _ = strconv.Atoi(ifInUnknownProtosMap[ifIndex]) + } + if ignoreOutQLen == false { + ifStats.IfOutQLen, _ = strconv.Atoi(ifOutQLenMap[ifIndex]) + } + + ifStats.IfSpeed, _ = strconv.Atoi(ifSpeedMap[ifIndex]) + ifStats.IfSpeed = 1000 * 1000 * ifStats.IfSpeed + + if ignoreOperStatus == false { + ifstatus_string = ifStatusMap[ifIndex] + ifstatus_string = strings.TrimSpace(ifstatus_string) + ifstatus := ifstatus_string[(len(ifstatus_string) - 2):(len(ifstatus_string) - 1)] + ifStats.IfOperStatus, _ = strconv.Atoi(ifstatus) + } + ifStats.TS = now + + ifName = strings.Replace(ifName, `"`, "", -1) + ifStats.IfName = ifName + + ifStatsList = append(ifStatsList, ifStats) + } + } + } + + return ifStatsList, nil +} + +func WalkIfOperStatus(ip, community string, timeout int, ch chan map[string]string, retry int) { + WalkIf(ip, ifOperStatusOid, community, timeout, retry, ch) +} + +func WalkIfName(ip, community string, timeout int, ch chan map[string]string, retry int) { + WalkIf(ip, ifNameOid, community, timeout, retry, ch) +} + +func WalkIfIn(ip, community string, timeout int, ch chan map[string]string, retry int) { + WalkIf(ip, ifHCInOid, community, timeout, retry, ch) +} + +func WalkIfOut(ip, community string, timeout int, ch chan map[string]string, retry int) { + WalkIf(ip, ifHCOutOid, community, timeout, retry, ch) +} + +func WalkIfInPkts(ip, community string, timeout int, ch chan map[string]string, retry int) { + WalkIf(ip, ifHCInPktsOid, community, timeout, retry, ch) +} + +func WalkIfOutPkts(ip, community string, timeout int, ch chan map[string]string, retry int) { + WalkIf(ip, ifHCOutPktsOid, community, timeout, retry, ch) +} + +func WalkIfInBroadcastPkts(ip, community string, timeout int, ch chan map[string]string, retry int) { + WalkIf(ip, ifHCInBroadcastPktsOid, community, timeout, retry, ch) +} + +func WalkIfOutBroadcastPkts(ip, community string, timeout int, ch chan map[string]string, retry int) { + WalkIf(ip, ifHCOutBroadcastPktsOid, community, timeout, retry, ch) +} + +func WalkIfInMulticastPkts(ip, community string, timeout int, ch chan map[string]string, retry int) { + WalkIf(ip, ifHCInMulticastPktsOid, community, timeout, retry, ch) +} + +func WalkIfOutMulticastPkts(ip, community string, timeout int, ch chan map[string]string, retry int) { + WalkIf(ip, ifHCOutMulticastPktsOid, community, timeout, retry, ch) +} + +func WalkIfInDiscards(ip, community string, timeout int, ch chan map[string]string, retry int) { + WalkIf(ip, ifInDiscardsOid, community, timeout, retry, ch) +} + +func WalkIfOutDiscards(ip, community string, timeout int, ch chan map[string]string, retry int) { + WalkIf(ip, ifOutDiscardsOid, community, timeout, retry, ch) +} + +func WalkIfInErrors(ip, community string, timeout int, ch chan map[string]string, retry int) { + WalkIf(ip, ifInErrorsOid, community, timeout, retry, ch) +} + +func WalkIfOutErrors(ip, community string, timeout int, ch chan map[string]string, retry int) { + WalkIf(ip, ifOutErrorsOid, community, timeout, retry, ch) +} + +func WalkIfInUnknownProtos(ip, community string, timeout int, ch chan map[string]string, retry int) { + WalkIf(ip, ifInUnknownProtosOid, community, timeout, retry, ch) +} + +func WalkIfOutQLen(ip, community string, timeout int, ch chan map[string]string, retry int) { + WalkIf(ip, ifOutQLenOid, community, timeout, retry, ch) +} + +func WalkIfSpeed(ip, community string, timeout int, ch chan map[string]string, retry int) { + WalkIf(ip, ifSpeedOid, community, timeout, retry, ch) +} + +func WalkIf(ip, oid, community string, timeout, retry int, ch chan map[string]string) { + result := make(map[string]string) + + for i := 0; i < retry; i++ { + out, err := CmdTimeout(timeout, "snmpwalk", "-v", "2c", "-c", community, ip, oid) + + var list []string + if strings.Contains(out, "IF-MIB") { + list = strings.Split(out, "IF-MIB") + } else { + list = strings.Split(out, "iso") + } + + for _, v := range list { + + defer func() { + if r := recover(); r != nil { + log.Println("Recovered in WalkIf", r) + } + }() + + if len(v) > 0 && strings.Contains(v, "=") { + vt := strings.Split(v, "=") + + var ifIndex, ifValue string + if strings.Contains(vt[0], ".") { + leftList := strings.Split(vt[0], ".") + ifIndex = leftList[len(leftList)-1] + ifIndex = strings.TrimSpace(ifIndex) + } + + if strings.Contains(vt[1], ":") { + ifValue = strings.Split(vt[1], ":")[1] + ifValue = strings.TrimSpace(ifValue) + } + + result[ifIndex] = ifValue + } + } + + if len(result) > 0 { + ch <- result + return + } + if err != nil && i == (retry-1) { + log.Println(ip, oid, err) + } + time.Sleep(100 * time.Millisecond) + } + + ch <- result + return +} + +func CmdTimeout(timeout int, name string, arg ...string) (string, error) { + cmd := exec.Command(name, arg...) + + var out bytes.Buffer + cmd.Stdout = &out + + cmd.Start() + timer := time.AfterFunc(time.Duration(timeout)*time.Millisecond, func() { + err := cmd.Process.Kill() + if err != nil { + log.Println("failed to kill: ", err) + } + }) + err := cmd.Wait() + timer.Stop() + + return out.String(), err +} diff --git a/vendor/github.com/gaochao1/sw/memstat.go b/vendor/github.com/gaochao1/sw/memstat.go new file mode 100644 index 00000000..ba9cd122 --- /dev/null +++ b/vendor/github.com/gaochao1/sw/memstat.go @@ -0,0 +1,220 @@ +package sw + +import ( + "errors" + "log" + "strconv" + "time" + + "github.com/gaochao1/gosnmp" +) + +func MemUtilization(ip, community string, timeout, retry int) (int, error) { + vendor, err := SysVendor(ip, community, timeout) + method := "get" + var oid string + + defer func() { + if r := recover(); r != nil { + log.Println(ip+" Recovered in MemUtilization", r) + } + }() + + switch vendor { + case "Cisco_NX": + oid = "1.3.6.1.4.1.9.9.305.1.1.2.0" + case "Cisco", "Cisco_IOS_XE", "Cisco_old": + memUsedOid := "1.3.6.1.4.1.9.9.48.1.1.1.5.1" + snmpMemUsed, _ := RunSnmp(ip, community, memUsedOid, method, timeout) + + memFreeOid := "1.3.6.1.4.1.9.9.48.1.1.1.6.1" + snmpMemFree, _ := RunSnmp(ip, community, memFreeOid, method, timeout) + + if len(snmpMemFree) == 0 || len(snmpMemUsed) == 0 { + err := errors.New(ip + " No Such Object available on this agent at this OID") + return 0, err + } else { + if snmpMemUsed[0].Value == nil || snmpMemFree[0].Value == nil { + err := errors.New(ip + " mem value return nil") + return 0, err + } + memUsed := snmpMemUsed[0].Value.(int) + memFree := snmpMemFree[0].Value.(int) + if memUsed+memFree != 0 { + memUtili := float64(memUsed) / float64(memUsed+memFree) + return int(memUtili * 100), nil + } + } + case "Cisco_IOS_XR": + return getCisco_IOS_XR_Mem(ip, community, timeout, retry) + case "Cisco_ASA", "Cisco_ASA_OLD": + return getCisco_ASA_Mem(ip, community, timeout, retry) + case "Huawei", "Huawei_V5.70", "Huawei_V5.130": + oid = "1.3.6.1.4.1.2011.5.25.31.1.1.1.1.7" + return getH3CHWcpumem(ip, community, oid, timeout, retry) + case "Huawei_V3.10": + return getOldHuawei_Mem(ip, community, timeout, retry) + case "Huawei_ME60": + return getHuawei_Me60_Mem(ip, community, timeout, retry) + case "H3C", "H3C_V5", "H3C_V7": + oid = "1.3.6.1.4.1.25506.2.6.1.1.1.1.8" + return getH3CHWcpumem(ip, community, oid, timeout, retry) + case "H3C_S9500": + oid = "1.3.6.1.4.1.2011.10.2.6.1.1.1.1.8" + return getH3CHWcpumem(ip, community, oid, timeout, retry) + case "Juniper": + oid = "1.3.6.1.4.1.2636.3.1.13.1.11" + return getH3CHWcpumem(ip, community, oid, timeout, retry) + case "Ruijie": + oid = "1.3.6.1.4.1.4881.1.1.10.2.35.1.1.1.3" + return getRuijiecpumem(ip, community, oid, timeout, retry) + case "Dell": + return GetDellMem(ip, community, timeout, retry) + default: + err = errors.New(ip + "Switch Vendor is not defined") + return 0, err + } + + var snmpPDUs []gosnmp.SnmpPDU + for i := 0; i < retry; i++ { + snmpPDUs, err = RunSnmp(ip, community, oid, method, timeout) + if len(snmpPDUs) > 0 { + break + } + time.Sleep(100 * time.Millisecond) + } + + if err == nil { + for _, pdu := range snmpPDUs { + return pdu.Value.(int), err + } + } + + return 0, err +} +func getCisco_IOS_XR_Mem(ip, community string, timeout, retry int) (int, error) { + defer func() { + if r := recover(); r != nil { + log.Println(ip+" Recovered in MemUtilization", r) + } + }() + cpuindex := "1.3.6.1.4.1.9.9.109.1.1.1.1.2" + method := "getnext" + var snmpPDUs []gosnmp.SnmpPDU + var err error + var index string + for i := 0; i < retry; i++ { + snmpPDUs, err = RunSnmp(ip, community, cpuindex, method, timeout) + if len(snmpPDUs) > 0 { + break + } + time.Sleep(100 * time.Millisecond) + } + index = strconv.Itoa(snmpPDUs[0].Value.(int)) + method = "get" + memUsedOid := "1.3.6.1.4.1.9.9.221.1.1.1.1.18." + index + ".1" + snmpMemUsed, _ := RunSnmp(ip, community, memUsedOid, method, timeout) + memFreeOid := "1.3.6.1.4.1.9.9.221.1.1.1.1.20." + index + ".1" + snmpMemFree, _ := RunSnmp(ip, community, memFreeOid, method, timeout) + if len(snmpMemFree) == 0 || len(snmpMemUsed) == 0 { + err := errors.New(ip + " No Such Object available on this agent at this OID") + return 0, err + } else { + if snmpMemUsed[0].Value == nil || snmpMemFree[0].Value == nil { + err := errors.New(ip + " mem value return nil") + return 0, err + } + memUsed := snmpMemUsed[0].Value.(uint64) + memFree := snmpMemFree[0].Value.(uint64) + if memUsed+memFree != 0 { + memUtili := float64(memUsed) / float64(memUsed+memFree) + return int(memUtili * 100), nil + } + } + return 0, err +} + +func getOldHuawei_Mem(ip, community string, timeout, retry int) (int, error) { + defer func() { + if r := recover(); r != nil { + log.Println(ip+" Recovered in MemUtilization", r) + } + }() + method := "walk" + memTotalOid := "1.3.6.1.4.1.2011.6.1.2.1.1.2" + snmpMemTotal, err := RunSnmp(ip, community, memTotalOid, method, timeout) + + memFreeOid := "1.3.6.1.4.1.2011.6.1.2.1.1.3" + snmpMemFree, err := RunSnmp(ip, community, memFreeOid, method, timeout) + if len(snmpMemFree) == 0 || len(snmpMemTotal) == 0 { + err := errors.New(ip + " No Such Object available on this agent at this OID") + return 0, err + } else { + memTotal := snmpMemTotal[0].Value.(int) + memFree := snmpMemFree[0].Value.(int) + if memTotal != 0 { + memUtili := float64(memTotal-memFree) / float64(memTotal) + return int(memUtili * 100), err + } + } + return 0, err +} + +func getCisco_ASA_Mem(ip, community string, timeout, retry int) (int, error) { + defer func() { + if r := recover(); r != nil { + log.Println(ip+" Recovered in MemUtilization", r) + } + }() + method := "getnext" + memUsedOid := "1.3.6.1.4.1.9.9.221.1.1.1.1.18" + snmpMemUsed, err := RunSnmp(ip, community, memUsedOid, method, timeout) + time.Sleep(100 * time.Millisecond) + memFreeOid := "1.3.6.1.4.1.9.9.221.1.1.1.1.20" + snmpMemFree, err := RunSnmp(ip, community, memFreeOid, method, timeout) + if len(snmpMemFree) == 0 || len(snmpMemUsed) == 0 { + err := errors.New(ip + " No Such Object available on this agent at this OID") + return 0, err + } else { + if snmpMemUsed[0].Value == nil || snmpMemFree[0].Value == nil { + err := errors.New(ip + " mem value return nil") + return 0, err + } + memUsed := snmpMemUsed[0].Value.(uint64) + memFree := snmpMemFree[0].Value.(uint64) + if memUsed+memFree != 0 { + memUtili := float64(memUsed) / float64(memUsed+memFree) + return int(memUtili * 100), nil + } + } + return 0, err +} + +func getHuawei_Me60_Mem(ip, community string, timeout, retry int) (int, error) { + memTotalOid := "1.3.6.1.4.1.2011.6.3.5.1.1.2" + + memTotal, _, err := snmp_walk_sum(ip, community, memTotalOid, timeout, retry) + + memFreeOid := "1.3.6.1.4.1.2011.6.3.5.1.1.3" + memFree, _, err := snmp_walk_sum(ip, community, memFreeOid, timeout, retry) + if memTotal != 0 && memFree != 0 { + memUtili := float64(memTotal-memFree) / float64(memTotal) + return int(memUtili * 100), nil + } + return 0, err +} + +func GetDellMem(ip, community string, timeout, retry int) (int, error) { + method := "getnext" + memTotalOid := "1.3.6.1.4.1.674.10895.5000.2.6132.1.1.1.1.4.2" + memTotal, err := RunSnmp(ip, community, memTotalOid, method, timeout) + memFreeOid := "1.3.6.1.4.1.674.10895.5000.2.6132.1.1.1.1.4.1" + memFree, err := RunSnmp(ip, community, memFreeOid, method, timeout) + if &memTotal[0] != nil && &memFree[0] != nil { + memfree := memFree[0].Value.(int) + memtotal := memTotal[0].Value.(int) + memUtili := float64(memtotal-memfree) / float64(memtotal) + return int(memUtili * 100), nil + } + return 0, err +} diff --git a/vendor/github.com/gaochao1/sw/modelstat.go b/vendor/github.com/gaochao1/sw/modelstat.go new file mode 100644 index 00000000..32fa5f75 --- /dev/null +++ b/vendor/github.com/gaochao1/sw/modelstat.go @@ -0,0 +1,46 @@ +package sw + +import ( + "log" + "regexp" +) + +func SysModel(ip, community string, timeout int) (string, error) { + vendor, err := SysVendor(ip, community, timeout) + method := "get" + var oid string + + defer func() { + if r := recover(); r != nil { + log.Println("Recovered in sw.modelstat.go SysModel", r) + } + }() + + switch vendor { + case "Cisco_NX": + oid = "1.3.6.1.2.1.47.1.1.1.1.13.10" + case "Cisco": + oid = "1.3.6.1.2.1.47.1.1.1.1.13.1001" + case "Huawei", "H3C", "H3C_V5", "H3C_V7": + re := regexp.MustCompile(`\w+-\w+-\w+\S+`) + sysDescr, err := SysDescr(ip, community, timeout) + if err != nil { + return "", err + } else { + return re.FindAllString(sysDescr, 1)[0], nil + } + default: + return "", err + } + + snmpPDUs, err := RunSnmp(ip, community, oid, method, timeout) + + if err == nil { + for _, pdu := range snmpPDUs { + return pdu.Value.(string), err + } + } + + return "", err + +} diff --git a/vendor/github.com/gaochao1/sw/namestat.go b/vendor/github.com/gaochao1/sw/namestat.go new file mode 100644 index 00000000..0d93e019 --- /dev/null +++ b/vendor/github.com/gaochao1/sw/namestat.go @@ -0,0 +1,24 @@ +package sw + +import ( + "log" +) + +func SysName(ip, community string, timeout int) (string, error) { + oid := "1.3.6.1.2.1.1.5.0" + method := "get" + defer func() { + if r := recover(); r != nil { + log.Println(ip+" Recovered in SysName", r) + } + }() + snmpPDUs, err := RunSnmp(ip, community, oid, method, timeout) + + if err == nil { + for _, pdu := range snmpPDUs { + return pdu.Value.(string), err + } + } + + return "", err +} diff --git a/vendor/github.com/gaochao1/sw/parseip.go b/vendor/github.com/gaochao1/sw/parseip.go new file mode 100644 index 00000000..4194cbe5 --- /dev/null +++ b/vendor/github.com/gaochao1/sw/parseip.go @@ -0,0 +1,139 @@ +package sw + +import ( + "encoding/binary" + "net" + "strconv" + "strings" +) + +func ParseIp(ip string) []string { + var availableIPs []string + // if ip is "1.1.1.1/",trim / + ip = strings.TrimRight(ip, "/") + if strings.Contains(ip, "/") == true { + if strings.Contains(ip, "/32") == true { + aip := strings.Replace(ip, "/32", "", -1) + availableIPs = append(availableIPs, aip) + } else { + availableIPs = GetAvailableIP(ip) + } + } else if strings.Contains(ip, "-") == true { + ipRange := strings.SplitN(ip, "-", 2) + availableIPs = GetAvailableIPRange(ipRange[0], ipRange[1]) + } else { + availableIPs = append(availableIPs, ip) + } + return availableIPs +} + +func GetAvailableIPRange(ipStart, ipEnd string) []string { + var availableIPs []string + + firstIP := net.ParseIP(ipStart) + endIP := net.ParseIP(ipEnd) + if firstIP.To4() == nil || endIP.To4() == nil { + return availableIPs + } + firstIPNum := ipToInt(firstIP.To4()) + EndIPNum := ipToInt(endIP.To4()) + pos := int32(1) + + newNum := firstIPNum + + for newNum <= EndIPNum { + availableIPs = append(availableIPs, intToIP(newNum).String()) + newNum = newNum + pos + } + return availableIPs +} + +func GetAvailableIP(ipAndMask string) []string { + var availableIPs []string + + ipAndMask = strings.TrimSpace(ipAndMask) + ipAndMask = IPAddressToCIDR(ipAndMask) + _, ipnet, _ := net.ParseCIDR(ipAndMask) + + firstIP, _ := networkRange(ipnet) + ipNum := ipToInt(firstIP) + size := networkSize(ipnet.Mask) + pos := int32(1) + max := size - 2 // -1 for the broadcast address, -1 for the gateway address + + var newNum int32 + for attempt := int32(0); attempt < max; attempt++ { + newNum = ipNum + pos + pos = pos%max + 1 + availableIPs = append(availableIPs, intToIP(newNum).String()) + } + return availableIPs +} + +func IPAddressToCIDR(ipAdress string) string { + if strings.Contains(ipAdress, "/") == true { + ipAndMask := strings.Split(ipAdress, "/") + ip := ipAndMask[0] + mask := ipAndMask[1] + if strings.Contains(mask, ".") == true { + mask = IPMaskStringToCIDR(mask) + } + return ip + "/" + mask + } else { + return ipAdress + } +} + +func IPMaskStringToCIDR(netmask string) string { + netmaskList := strings.Split(netmask, ".") + var mint []int + for _, v := range netmaskList { + strv, _ := strconv.Atoi(v) + mint = append(mint, strv) + } + myIPMask := net.IPv4Mask(byte(mint[0]), byte(mint[1]), byte(mint[2]), byte(mint[3])) + ones, _ := myIPMask.Size() + return strconv.Itoa(ones) +} + +func IPMaskCIDRToString(one string) string { + oneInt, _ := strconv.Atoi(one) + mIPmask := net.CIDRMask(oneInt, 32) + var maskstring []string + for _, v := range mIPmask { + maskstring = append(maskstring, strconv.Itoa(int(v))) + } + return strings.Join(maskstring, ".") +} + +// Calculates the first and last IP addresses in an IPNet +func networkRange(network *net.IPNet) (net.IP, net.IP) { + netIP := network.IP.To4() + firstIP := netIP.Mask(network.Mask) + lastIP := net.IPv4(0, 0, 0, 0).To4() + for i := 0; i < len(lastIP); i++ { + lastIP[i] = netIP[i] | ^network.Mask[i] + } + return firstIP, lastIP +} + +// Given a netmask, calculates the number of available hosts +func networkSize(mask net.IPMask) int32 { + m := net.IPv4Mask(0, 0, 0, 0) + for i := 0; i < net.IPv4len; i++ { + m[i] = ^mask[i] + } + return int32(binary.BigEndian.Uint32(m)) + 1 +} + +// Converts a 4 bytes IP into a 32 bit integer +func ipToInt(ip net.IP) int32 { + return int32(binary.BigEndian.Uint32(ip.To4())) +} + +// Converts 32 bit integer into a 4 bytes IP address +func intToIP(n int32) net.IP { + b := make([]byte, 4) + binary.BigEndian.PutUint32(b, uint32(n)) + return net.IP(b) +} diff --git a/vendor/github.com/gaochao1/sw/ping.go b/vendor/github.com/gaochao1/sw/ping.go new file mode 100644 index 00000000..5beeaf55 --- /dev/null +++ b/vendor/github.com/gaochao1/sw/ping.go @@ -0,0 +1,20 @@ +package sw + +func PingRtt(ip string, timeout int, fastPingMode bool) (float64, error) { + var rtt float64 + var err error + if fastPingMode == true { + rtt, err = fastPingRtt(ip, timeout) + } else { + rtt, err = goPingRtt(ip, timeout) + } + return rtt, err +} + +func Ping(ip string, timeout int, fastPingMode bool) bool { + rtt, _ := PingRtt(ip, timeout, fastPingMode) + if rtt == -1 { + return false + } + return true +} diff --git a/vendor/github.com/gaochao1/sw/pingstat.go b/vendor/github.com/gaochao1/sw/pingstat.go new file mode 100644 index 00000000..be6c4429 --- /dev/null +++ b/vendor/github.com/gaochao1/sw/pingstat.go @@ -0,0 +1,59 @@ +package sw + +import ( + "bufio" + "bytes" + "github.com/toolkits/file" + "github.com/toolkits/sys" + "io" + "strconv" + "strings" +) + +func PingStatSummary(ip string, count, timeout int) (m map[string]string, err error) { + m = make(map[string]string) + var bs []byte + bs, err = sys.CmdOutBytes("ping", "-c", strconv.Itoa(count), "-W", strconv.Itoa(timeout), ip) + if err != nil { + return m, err + } + + reader := bufio.NewReader(bytes.NewBuffer(bs)) + + // ignore the first line + line, e := file.ReadLine(reader) + if e != nil { + return m, e + } + + for { + line, err = file.ReadLine(reader) + if err == io.EOF { + err = nil + break + } else if err != nil { + return m, err + } + + lineStr := string(line) + if strings.Contains(lineStr, "packet loss") { + arr := strings.Split(lineStr, ", ") + for _, val := range arr { + fields := strings.Fields(val) + if fields[1] == "packet" { + m["pkloss"] = fields[0] + } + } + } + + if strings.Contains(lineStr, "min/avg/max") { + fields := strings.Fields(lineStr) + result := strings.Split(fields[3], "/") + m["min"] = result[0] + m["avg"] = result[1] + m["max"] = result[2] + } + } + + return m, e +} diff --git a/vendor/github.com/gaochao1/sw/runsnmp.go b/vendor/github.com/gaochao1/sw/runsnmp.go new file mode 100644 index 00000000..23da7d79 --- /dev/null +++ b/vendor/github.com/gaochao1/sw/runsnmp.go @@ -0,0 +1,88 @@ +package sw + +import ( + "strings" + "time" + + "github.com/gaochao1/gosnmp" +) + +func RunSnmp(ip, community, oid, method string, timeout int) (snmpPDUs []gosnmp.SnmpPDU, err error) { + cur_gosnmp, err := gosnmp.NewGoSNMP(ip, community, gosnmp.Version2c, int64(timeout)) + + if err != nil { + return nil, err + } else { + cur_gosnmp.SetTimeout(int64(timeout)) + snmpPDUs, err := ParseSnmpMethod(oid, method, cur_gosnmp) + if err != nil { + return nil, err + } else { + return snmpPDUs, err + } + } + + return +} + +func ParseSnmpMethod(oid, method string, cur_gosnmp *gosnmp.GoSNMP) (snmpPDUs []gosnmp.SnmpPDU, err error) { + var snmpPacket *gosnmp.SnmpPacket + + switch method { + case "get": + snmpPacket, err = cur_gosnmp.Get(oid) + if err != nil { + return nil, err + } else { + snmpPDUs = snmpPacket.Variables + return snmpPDUs, err + } + case "getnext": + snmpPacket, err = cur_gosnmp.GetNext(oid) + if err != nil { + return nil, err + } else { + snmpPDUs = snmpPacket.Variables + return snmpPDUs, err + } + default: + snmpPDUs, err = cur_gosnmp.Walk(oid) + return snmpPDUs, err + } + + return +} + +func snmpPDUNameToIfIndex(snmpPDUName string) string { + oidSplit := strings.Split(snmpPDUName, ".") + curIfIndex := oidSplit[len(oidSplit)-1] + return curIfIndex +} + +func RunSnmpwalk(ip, community, oid string, retry int, timeout int) ([]gosnmp.SnmpPDU, error) { + method := "getnext" + oidnext := oid + var snmpPDUs = []gosnmp.SnmpPDU{} + var snmpPDU []gosnmp.SnmpPDU + var err error + + for { + for i := 0; i < retry; i++ { + snmpPDU, err = RunSnmp(ip, community, oidnext, method, timeout) + if len(snmpPDU) > 0 { + break + } + time.Sleep(100 * time.Millisecond) + } + if err != nil { + break + } + oidnext = snmpPDU[0].Name + if strings.Contains(oidnext, oid) { + snmpPDUs = append(snmpPDUs, snmpPDU[0]) + } else { + break + } + } + return snmpPDUs, err +} diff --git a/vendor/github.com/gaochao1/sw/uptimestat.go b/vendor/github.com/gaochao1/sw/uptimestat.go new file mode 100644 index 00000000..c211680e --- /dev/null +++ b/vendor/github.com/gaochao1/sw/uptimestat.go @@ -0,0 +1,50 @@ +package sw + +import ( + "fmt" + "log" + "math" + "strconv" + "strings" + "time" +) + +func SysUpTime(ip, community string, timeout int) (string, error) { + oid := "1.3.6.1.2.1.1.3.0" + method := "get" + defer func() { + if r := recover(); r != nil { + log.Println(ip+" Recovered in Uptime", r) + } + }() + snmpPDUs, err := RunSnmp(ip, community, oid, method, timeout) + + if err == nil { + for _, pdu := range snmpPDUs { + durationStr := parseTime(pdu.Value.(int)) + return durationStr, err + } + } + + return "", err +} + +func parseTime(d int) string { + timestr := strconv.Itoa(d / 100) + duration, _ := time.ParseDuration(timestr + "s") + + totalHour := duration.Hours() + day := int(totalHour / 24) + + modTime := math.Mod(totalHour, 24) + modTimeStr := strconv.FormatFloat(modTime, 'f', 3, 64) + modDuration, _ := time.ParseDuration(modTimeStr + "h") + + modDurationStr := modDuration.String() + if strings.Contains(modDurationStr, ".") { + modDurationStr = strings.Split(modDurationStr, ".")[0] + "s" + } + + return fmt.Sprintf("%dday %s", day, modDurationStr) + +} diff --git a/vendor/github.com/go-ping/ping/.editorconfig b/vendor/github.com/go-ping/ping/.editorconfig new file mode 100644 index 00000000..57abfdc2 --- /dev/null +++ b/vendor/github.com/go-ping/ping/.editorconfig @@ -0,0 +1,16 @@ +# https://editorconfig.org + +root = true + +[*] +end_of_line = lf +insert_final_newline = true +trim_trailing_whitespace = true +charset = utf-8 +indent_style = space + +[Makefile] +indent_style = tab + +[*.go] +indent_style = tab diff --git a/vendor/github.com/go-ping/ping/.gitignore b/vendor/github.com/go-ping/ping/.gitignore new file mode 100644 index 00000000..f527a0e0 --- /dev/null +++ b/vendor/github.com/go-ping/ping/.gitignore @@ -0,0 +1,2 @@ +/ping +/dist diff --git a/vendor/github.com/go-ping/ping/.golangci.yml b/vendor/github.com/go-ping/ping/.golangci.yml new file mode 100644 index 00000000..eb311f81 --- /dev/null +++ b/vendor/github.com/go-ping/ping/.golangci.yml @@ -0,0 +1,6 @@ +--- +issues: + exclude-rules: + - path: _test.go + linters: + - errcheck diff --git a/vendor/github.com/go-ping/ping/.goreleaser.yml b/vendor/github.com/go-ping/ping/.goreleaser.yml new file mode 100644 index 00000000..3c5cc0ac --- /dev/null +++ b/vendor/github.com/go-ping/ping/.goreleaser.yml @@ -0,0 +1,46 @@ +project_name: ping +before: + hooks: + - go mod download +builds: +- binary: ping + dir: cmd/ping + goarch: + - amd64 + - arm + - arm64 + goarm: + - 6 + - 7 + goos: + - darwin + - freebsd + - linux + - windows +archives: +- files: + - LICENSE + - README.md + format_overrides: + - goos: windows + format: zip + wrap_in_directory: true +# TODO: Decide if we want packages (name conflcits with /bin/ping?) +# nfpms: +# homepage: https://github.com/go-ping/ping +# maintainer: 'Go Ping Maintainers ' +# description: Ping written in Go. +# license: MIT +# formats: +# - deb +# - rpm +checksum: + name_template: 'checksums.txt' +snapshot: + name_template: "{{ .Tag }}-{{ .ShortCommit }}" +changelog: + sort: asc + filters: + exclude: + - '^docs:' + - '^test:' diff --git a/vendor/github.com/influxdata/wlog/LICENSE b/vendor/github.com/go-ping/ping/LICENSE similarity index 95% rename from vendor/github.com/influxdata/wlog/LICENSE rename to vendor/github.com/go-ping/ping/LICENSE index f39920b0..5584bb00 100644 --- a/vendor/github.com/influxdata/wlog/LICENSE +++ b/vendor/github.com/go-ping/ping/LICENSE @@ -1,6 +1,6 @@ The MIT License (MIT) -Copyright (c) 2016 InfluxData +Copyright (c) 2016 Cameron Sparr and contributors. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/vendor/github.com/go-ping/ping/README.md b/vendor/github.com/go-ping/ping/README.md new file mode 100644 index 00000000..7fbdd4df --- /dev/null +++ b/vendor/github.com/go-ping/ping/README.md @@ -0,0 +1,130 @@ +# go-ping +[![PkgGoDev](https://pkg.go.dev/badge/github.com/go-ping/ping)](https://pkg.go.dev/github.com/go-ping/ping) +[![Circle CI](https://circleci.com/gh/go-ping/ping.svg?style=svg)](https://circleci.com/gh/go-ping/ping) + +A simple but powerful ICMP echo (ping) library for Go, inspired by +[go-fastping](https://github.com/tatsushid/go-fastping). + +Here is a very simple example that sends and receives three packets: + +```go +pinger, err := ping.NewPinger("www.google.com") +if err != nil { + panic(err) +} +pinger.Count = 3 +err = pinger.Run() // Blocks until finished. +if err != nil { + panic(err) +} +stats := pinger.Statistics() // get send/receive/rtt stats +``` + +Here is an example that emulates the traditional UNIX ping command: + +```go +pinger, err := ping.NewPinger("www.google.com") +if err != nil { + panic(err) +} + +// Listen for Ctrl-C. +c := make(chan os.Signal, 1) +signal.Notify(c, os.Interrupt) +go func() { + for _ = range c { + pinger.Stop() + } +}() + +pinger.OnRecv = func(pkt *ping.Packet) { + fmt.Printf("%d bytes from %s: icmp_seq=%d time=%v\n", + pkt.Nbytes, pkt.IPAddr, pkt.Seq, pkt.Rtt) +} + +pinger.OnFinish = func(stats *ping.Statistics) { + fmt.Printf("\n--- %s ping statistics ---\n", stats.Addr) + fmt.Printf("%d packets transmitted, %d packets received, %v%% packet loss\n", + stats.PacketsSent, stats.PacketsRecv, stats.PacketLoss) + fmt.Printf("round-trip min/avg/max/stddev = %v/%v/%v/%v\n", + stats.MinRtt, stats.AvgRtt, stats.MaxRtt, stats.StdDevRtt) +} + +fmt.Printf("PING %s (%s):\n", pinger.Addr(), pinger.IPAddr()) +err = pinger.Run() +if err != nil { + panic(err) +} +``` + +It sends ICMP Echo Request packet(s) and waits for an Echo Reply in +response. If it receives a response, it calls the `OnRecv` callback. +When it's finished, it calls the `OnFinish` callback. + +For a full ping example, see +[cmd/ping/ping.go](https://github.com/go-ping/ping/blob/master/cmd/ping/ping.go). + +## Installation + +``` +go get -u github.com/go-ping/ping +``` + +To install the native Go ping executable: + +```bash +go get -u github.com/go-ping/ping/... +$GOPATH/bin/ping +``` + +## Supported Operating Systems + +### Linux +This library attempts to send an "unprivileged" ping via UDP. On Linux, +this must be enabled with the following sysctl command: + +``` +sudo sysctl -w net.ipv4.ping_group_range="0 2147483647" +``` + +If you do not wish to do this, you can call `pinger.SetPrivileged(true)` +in your code and then use setcap on your binary to allow it to bind to +raw sockets (or just run it as root): + +``` +setcap cap_net_raw=+ep /path/to/your/compiled/binary +``` + +See [this blog](https://sturmflut.github.io/linux/ubuntu/2015/01/17/unprivileged-icmp-sockets-on-linux/) +and the Go [x/net/icmp](https://godoc.org/golang.org/x/net/icmp) package +for more details. + +### Windows + +You must use `pinger.SetPrivileged(true)`, otherwise you will receive +the following error: + +``` +socket: The requested protocol has not been configured into the system, or no implementation for it exists. +``` + +Despite the method name, this should work without the need to elevate +privileges and has been tested on Windows 10. Please note that accessing +packet TTL values is not supported due to limitations in the Go +x/net/ipv4 and x/net/ipv6 packages. + +### Plan 9 from Bell Labs + +There is no support for Plan 9. This is because the entire `x/net/ipv4` +and `x/net/ipv6` packages are not implemented by the Go programming +language. + +## Maintainers and Getting Help: + +This repo was originally in the personal account of +[sparrc](https://github.com/sparrc), but is now maintained by the +[go-ping organization](https://github.com/go-ping). + +For support and help, you usually find us in the #go-ping channel of +Gophers Slack. See https://invite.slack.golangbridge.org/ for an invite +to the Gophers Slack org. diff --git a/vendor/github.com/go-ping/ping/go.mod b/vendor/github.com/go-ping/ping/go.mod new file mode 100644 index 00000000..44ad2f7f --- /dev/null +++ b/vendor/github.com/go-ping/ping/go.mod @@ -0,0 +1,5 @@ +module github.com/go-ping/ping + +go 1.14 + +require golang.org/x/net v0.0.0-20200904194848-62affa334b73 diff --git a/vendor/github.com/go-ping/ping/go.sum b/vendor/github.com/go-ping/ping/go.sum new file mode 100644 index 00000000..6bc5e041 --- /dev/null +++ b/vendor/github.com/go-ping/ping/go.sum @@ -0,0 +1,10 @@ +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20200904194848-62affa334b73 h1:MXfv8rhZWmFeqX3GNZRsd6vOLoaCHjYEX3qkRo3YBUA= +golang.org/x/net v0.0.0-20200904194848-62affa334b73/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd h1:xhmwyvizuTgC2qz7ZlMluP20uW+C3Rm0FD/WLDX8884= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= diff --git a/vendor/github.com/go-ping/ping/ping.go b/vendor/github.com/go-ping/ping/ping.go new file mode 100644 index 00000000..0ed08490 --- /dev/null +++ b/vendor/github.com/go-ping/ping/ping.go @@ -0,0 +1,669 @@ +// Package ping is a simple but powerful ICMP echo (ping) library. +// +// Here is a very simple example that sends and receives three packets: +// +// pinger, err := ping.NewPinger("www.google.com") +// if err != nil { +// panic(err) +// } +// pinger.Count = 3 +// err = pinger.Run() // blocks until finished +// if err != nil { +// panic(err) +// } +// stats := pinger.Statistics() // get send/receive/rtt stats +// +// Here is an example that emulates the traditional UNIX ping command: +// +// pinger, err := ping.NewPinger("www.google.com") +// if err != nil { +// panic(err) +// } +// // Listen for Ctrl-C. +// c := make(chan os.Signal, 1) +// signal.Notify(c, os.Interrupt) +// go func() { +// for _ = range c { +// pinger.Stop() +// } +// }() +// pinger.OnRecv = func(pkt *ping.Packet) { +// fmt.Printf("%d bytes from %s: icmp_seq=%d time=%v\n", +// pkt.Nbytes, pkt.IPAddr, pkt.Seq, pkt.Rtt) +// } +// pinger.OnFinish = func(stats *ping.Statistics) { +// fmt.Printf("\n--- %s ping statistics ---\n", stats.Addr) +// fmt.Printf("%d packets transmitted, %d packets received, %v%% packet loss\n", +// stats.PacketsSent, stats.PacketsRecv, stats.PacketLoss) +// fmt.Printf("round-trip min/avg/max/stddev = %v/%v/%v/%v\n", +// stats.MinRtt, stats.AvgRtt, stats.MaxRtt, stats.StdDevRtt) +// } +// fmt.Printf("PING %s (%s):\n", pinger.Addr(), pinger.IPAddr()) +// err = pinger.Run() +// if err != nil { +// panic(err) +// } +// +// It sends ICMP Echo Request packet(s) and waits for an Echo Reply in response. +// If it receives a response, it calls the OnRecv callback. When it's finished, +// it calls the OnFinish callback. +// +// For a full ping example, see "cmd/ping/ping.go". +// +package ping + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "math" + "math/rand" + "net" + "runtime" + "sync" + "syscall" + "time" + + "golang.org/x/net/icmp" + "golang.org/x/net/ipv4" + "golang.org/x/net/ipv6" +) + +const ( + timeSliceLength = 8 + trackerLength = 8 + protocolICMP = 1 + protocolIPv6ICMP = 58 +) + +var ( + ipv4Proto = map[string]string{"icmp": "ip4:icmp", "udp": "udp4"} + ipv6Proto = map[string]string{"icmp": "ip6:ipv6-icmp", "udp": "udp6"} +) + +// New returns a new Pinger struct pointer. +func New(addr string) *Pinger { + r := rand.New(rand.NewSource(time.Now().UnixNano())) + return &Pinger{ + Count: -1, + Interval: time.Second, + RecordRtts: true, + Size: timeSliceLength, + Timeout: time.Second * 100000, + Tracker: r.Int63n(math.MaxInt64), + + addr: addr, + done: make(chan bool), + id: r.Intn(math.MaxInt16), + ipaddr: nil, + ipv4: false, + network: "ip", + protocol: "udp", + } +} + +// NewPinger returns a new Pinger and resolves the address. +func NewPinger(addr string) (*Pinger, error) { + p := New(addr) + return p, p.Resolve() +} + +// Pinger represents a packet sender/receiver. +type Pinger struct { + // Interval is the wait time between each packet send. Default is 1s. + Interval time.Duration + + // Timeout specifies a timeout before ping exits, regardless of how many + // packets have been received. + Timeout time.Duration + + // Count tells pinger to stop after sending (and receiving) Count echo + // packets. If this option is not specified, pinger will operate until + // interrupted. + Count int + + // Debug runs in debug mode + Debug bool + + // Number of packets sent + PacketsSent int + + // Number of packets received + PacketsRecv int + + // If true, keep a record of rtts of all received packets. + // Set to false to avoid memory bloat for long running pings. + RecordRtts bool + + // rtts is all of the Rtts + rtts []time.Duration + + // OnSend is called when Pinger sends a packet + OnSend func(*Packet) + + // OnRecv is called when Pinger receives and processes a packet + OnRecv func(*Packet) + + // OnFinish is called when Pinger exits + OnFinish func(*Statistics) + + // Size of packet being sent + Size int + + // Tracker: Used to uniquely identify packet when non-priviledged + Tracker int64 + + // Source is the source IP address + Source string + + // stop chan bool + done chan bool + + ipaddr *net.IPAddr + addr string + + ipv4 bool + id int + sequence int + // network is one of "ip", "ip4", or "ip6". + network string + // protocol is "icmp" or "udp". + protocol string +} + +type packet struct { + bytes []byte + nbytes int + ttl int +} + +// Packet represents a received and processed ICMP echo packet. +type Packet struct { + // Rtt is the round-trip time it took to ping. + Rtt time.Duration + + // IPAddr is the address of the host being pinged. + IPAddr *net.IPAddr + + // Addr is the string address of the host being pinged. + Addr string + + // NBytes is the number of bytes in the message. + Nbytes int + + // Seq is the ICMP sequence number. + Seq int + + // TTL is the Time To Live on the packet. + Ttl int +} + +// Statistics represent the stats of a currently running or finished +// pinger operation. +type Statistics struct { + // PacketsRecv is the number of packets received. + PacketsRecv int + + // PacketsSent is the number of packets sent. + PacketsSent int + + // PacketLoss is the percentage of packets lost. + PacketLoss float64 + + // IPAddr is the address of the host being pinged. + IPAddr *net.IPAddr + + // Addr is the string address of the host being pinged. + Addr string + + // Rtts is all of the round-trip times sent via this pinger. + Rtts []time.Duration + + // MinRtt is the minimum round-trip time sent via this pinger. + MinRtt time.Duration + + // MaxRtt is the maximum round-trip time sent via this pinger. + MaxRtt time.Duration + + // AvgRtt is the average round-trip time sent via this pinger. + AvgRtt time.Duration + + // StdDevRtt is the standard deviation of the round-trip times sent via + // this pinger. + StdDevRtt time.Duration +} + +// SetIPAddr sets the ip address of the target host. +func (p *Pinger) SetIPAddr(ipaddr *net.IPAddr) { + p.ipv4 = isIPv4(ipaddr.IP) + + p.ipaddr = ipaddr + p.addr = ipaddr.String() +} + +// IPAddr returns the ip address of the target host. +func (p *Pinger) IPAddr() *net.IPAddr { + return p.ipaddr +} + +// Resolve does the DNS lookup for the Pinger address and sets IP protocol. +func (p *Pinger) Resolve() error { + if len(p.addr) == 0 { + return errors.New("addr cannot be empty") + } + ipaddr, err := net.ResolveIPAddr(p.network, p.addr) + if err != nil { + return err + } + + p.ipv4 = isIPv4(ipaddr.IP) + + p.ipaddr = ipaddr + + return nil +} + +// SetAddr resolves and sets the ip address of the target host, addr can be a +// DNS name like "www.google.com" or IP like "127.0.0.1". +func (p *Pinger) SetAddr(addr string) error { + oldAddr := p.addr + p.addr = addr + err := p.Resolve() + if err != nil { + p.addr = oldAddr + return err + } + return nil +} + +// Addr returns the string ip address of the target host. +func (p *Pinger) Addr() string { + return p.addr +} + +// SetNetwork allows configuration of DNS resolution. +// * "ip" will automatically select IPv4 or IPv6. +// * "ip4" will select IPv4. +// * "ip6" will select IPv6. +func (p *Pinger) SetNetwork(n string) { + switch n { + case "ip4": + p.network = "ip4" + case "ip6": + p.network = "ip6" + default: + p.network = "ip" + } +} + +// SetPrivileged sets the type of ping pinger will send. +// false means pinger will send an "unprivileged" UDP ping. +// true means pinger will send a "privileged" raw ICMP ping. +// NOTE: setting to true requires that it be run with super-user privileges. +func (p *Pinger) SetPrivileged(privileged bool) { + if privileged { + p.protocol = "icmp" + } else { + p.protocol = "udp" + } +} + +// Privileged returns whether pinger is running in privileged mode. +func (p *Pinger) Privileged() bool { + return p.protocol == "icmp" +} + +// Run runs the pinger. This is a blocking function that will exit when it's +// done. If Count or Interval are not specified, it will run continuously until +// it is interrupted. +func (p *Pinger) Run() error { + var conn *icmp.PacketConn + var err error + if p.ipaddr == nil { + err = p.Resolve() + } + if err != nil { + return err + } + if p.ipv4 { + if conn, err = p.listen(ipv4Proto[p.protocol]); err != nil { + return err + } + if err = conn.IPv4PacketConn().SetControlMessage(ipv4.FlagTTL, true); runtime.GOOS != "windows" && err != nil { + return err + } + } else { + if conn, err = p.listen(ipv6Proto[p.protocol]); err != nil { + return err + } + if err = conn.IPv6PacketConn().SetControlMessage(ipv6.FlagHopLimit, true); runtime.GOOS != "windows" && err != nil { + return err + } + } + defer conn.Close() + defer p.finish() + + var wg sync.WaitGroup + recv := make(chan *packet, 5) + defer close(recv) + wg.Add(1) + //nolint:errcheck + go p.recvICMP(conn, recv, &wg) + + err = p.sendICMP(conn) + if err != nil { + return err + } + + timeout := time.NewTicker(p.Timeout) + defer timeout.Stop() + interval := time.NewTicker(p.Interval) + defer interval.Stop() + + for { + select { + case <-p.done: + wg.Wait() + return nil + case <-timeout.C: + close(p.done) + wg.Wait() + return nil + case <-interval.C: + if p.Count > 0 && p.PacketsSent >= p.Count { + continue + } + err = p.sendICMP(conn) + if err != nil { + // FIXME: this logs as FATAL but continues + fmt.Println("FATAL: ", err.Error()) + } + case r := <-recv: + err := p.processPacket(r) + if err != nil { + // FIXME: this logs as FATAL but continues + fmt.Println("FATAL: ", err.Error()) + } + } + if p.Count > 0 && p.PacketsRecv >= p.Count { + close(p.done) + wg.Wait() + return nil + } + } +} + +func (p *Pinger) Stop() { + close(p.done) +} + +func (p *Pinger) finish() { + handler := p.OnFinish + if handler != nil { + s := p.Statistics() + handler(s) + } +} + +// Statistics returns the statistics of the pinger. This can be run while the +// pinger is running or after it is finished. OnFinish calls this function to +// get it's finished statistics. +func (p *Pinger) Statistics() *Statistics { + loss := float64(p.PacketsSent-p.PacketsRecv) / float64(p.PacketsSent) * 100 + var min, max, total time.Duration + if len(p.rtts) > 0 { + min = p.rtts[0] + max = p.rtts[0] + } + for _, rtt := range p.rtts { + if rtt < min { + min = rtt + } + if rtt > max { + max = rtt + } + total += rtt + } + s := Statistics{ + PacketsSent: p.PacketsSent, + PacketsRecv: p.PacketsRecv, + PacketLoss: loss, + Rtts: p.rtts, + Addr: p.addr, + IPAddr: p.ipaddr, + MaxRtt: max, + MinRtt: min, + } + if len(p.rtts) > 0 { + s.AvgRtt = total / time.Duration(len(p.rtts)) + var sumsquares time.Duration + for _, rtt := range p.rtts { + sumsquares += (rtt - s.AvgRtt) * (rtt - s.AvgRtt) + } + s.StdDevRtt = time.Duration(math.Sqrt( + float64(sumsquares / time.Duration(len(p.rtts))))) + } + return &s +} + +func (p *Pinger) recvICMP( + conn *icmp.PacketConn, + recv chan<- *packet, + wg *sync.WaitGroup, +) error { + defer wg.Done() + for { + select { + case <-p.done: + return nil + default: + bytes := make([]byte, 512) + if err := conn.SetReadDeadline(time.Now().Add(time.Millisecond * 100)); err != nil { + return err + } + var n, ttl int + var err error + if p.ipv4 { + var cm *ipv4.ControlMessage + n, cm, _, err = conn.IPv4PacketConn().ReadFrom(bytes) + if cm != nil { + ttl = cm.TTL + } + } else { + var cm *ipv6.ControlMessage + n, cm, _, err = conn.IPv6PacketConn().ReadFrom(bytes) + if cm != nil { + ttl = cm.HopLimit + } + } + if err != nil { + if neterr, ok := err.(*net.OpError); ok { + if neterr.Timeout() { + // Read timeout + continue + } else { + close(p.done) + return err + } + } + } + + select { + case <-p.done: + return nil + case recv <- &packet{bytes: bytes, nbytes: n, ttl: ttl}: + } + } + } +} + +func (p *Pinger) processPacket(recv *packet) error { + receivedAt := time.Now() + var proto int + if p.ipv4 { + proto = protocolICMP + } else { + proto = protocolIPv6ICMP + } + + var m *icmp.Message + var err error + if m, err = icmp.ParseMessage(proto, recv.bytes); err != nil { + return fmt.Errorf("error parsing icmp message: %s", err.Error()) + } + + if m.Type != ipv4.ICMPTypeEchoReply && m.Type != ipv6.ICMPTypeEchoReply { + // Not an echo reply, ignore it + return nil + } + + outPkt := &Packet{ + Nbytes: recv.nbytes, + IPAddr: p.ipaddr, + Addr: p.addr, + Ttl: recv.ttl, + } + + switch pkt := m.Body.(type) { + case *icmp.Echo: + // If we are priviledged, we can match icmp.ID + if p.protocol == "icmp" { + // Check if reply from same ID + if pkt.ID != p.id { + return nil + } + } + + if len(pkt.Data) < timeSliceLength+trackerLength { + return fmt.Errorf("insufficient data received; got: %d %v", + len(pkt.Data), pkt.Data) + } + + tracker := bytesToInt(pkt.Data[timeSliceLength:]) + timestamp := bytesToTime(pkt.Data[:timeSliceLength]) + + if tracker != p.Tracker { + return nil + } + + outPkt.Rtt = receivedAt.Sub(timestamp) + outPkt.Seq = pkt.Seq + p.PacketsRecv++ + default: + // Very bad, not sure how this can happen + return fmt.Errorf("invalid ICMP echo reply; type: '%T', '%v'", pkt, pkt) + } + + if p.RecordRtts { + p.rtts = append(p.rtts, outPkt.Rtt) + } + handler := p.OnRecv + if handler != nil { + handler(outPkt) + } + + return nil +} + +func (p *Pinger) sendICMP(conn *icmp.PacketConn) error { + var typ icmp.Type + if p.ipv4 { + typ = ipv4.ICMPTypeEcho + } else { + typ = ipv6.ICMPTypeEchoRequest + } + + var dst net.Addr = p.ipaddr + if p.protocol == "udp" { + dst = &net.UDPAddr{IP: p.ipaddr.IP, Zone: p.ipaddr.Zone} + } + + t := append(timeToBytes(time.Now()), intToBytes(p.Tracker)...) + if remainSize := p.Size - timeSliceLength - trackerLength; remainSize > 0 { + t = append(t, bytes.Repeat([]byte{1}, remainSize)...) + } + + body := &icmp.Echo{ + ID: p.id, + Seq: p.sequence, + Data: t, + } + + msg := &icmp.Message{ + Type: typ, + Code: 0, + Body: body, + } + + msgBytes, err := msg.Marshal(nil) + if err != nil { + return err + } + + for { + if _, err := conn.WriteTo(msgBytes, dst); err != nil { + if neterr, ok := err.(*net.OpError); ok { + if neterr.Err == syscall.ENOBUFS { + continue + } + } + } + handler := p.OnSend + if handler != nil { + outPkt := &Packet{ + Nbytes: len(msgBytes), + IPAddr: p.ipaddr, + Addr: p.addr, + Seq: p.sequence, + } + handler(outPkt) + } + + p.PacketsSent++ + p.sequence++ + break + } + + return nil +} + +func (p *Pinger) listen(netProto string) (*icmp.PacketConn, error) { + conn, err := icmp.ListenPacket(netProto, p.Source) + if err != nil { + close(p.done) + return nil, err + } + return conn, nil +} + +func bytesToTime(b []byte) time.Time { + var nsec int64 + for i := uint8(0); i < 8; i++ { + nsec += int64(b[i]) << ((7 - i) * 8) + } + return time.Unix(nsec/1000000000, nsec%1000000000) +} + +func isIPv4(ip net.IP) bool { + return len(ip.To4()) == net.IPv4len +} + +func timeToBytes(t time.Time) []byte { + nsec := t.UnixNano() + b := make([]byte, 8) + for i := uint8(0); i < 8; i++ { + b[i] = byte((nsec >> ((7 - i) * 8)) & 0xff) + } + return b +} + +func bytesToInt(b []byte) int64 { + return int64(binary.BigEndian.Uint64(b)) +} + +func intToBytes(tracker int64) []byte { + b := make([]byte, 8) + binary.BigEndian.PutUint64(b, uint64(tracker)) + return b +} diff --git a/vendor/github.com/gorilla/mux/AUTHORS b/vendor/github.com/gorilla/mux/AUTHORS deleted file mode 100644 index b722392e..00000000 --- a/vendor/github.com/gorilla/mux/AUTHORS +++ /dev/null @@ -1,8 +0,0 @@ -# This is the official list of gorilla/mux authors for copyright purposes. -# -# Please keep the list sorted. - -Google LLC (https://opensource.google.com/) -Kamil Kisielk -Matt Silverlock -Rodrigo Moraes (https://github.com/moraes) diff --git a/vendor/github.com/gorilla/mux/README.md b/vendor/github.com/gorilla/mux/README.md deleted file mode 100644 index 92e422ee..00000000 --- a/vendor/github.com/gorilla/mux/README.md +++ /dev/null @@ -1,718 +0,0 @@ -# gorilla/mux - -[![GoDoc](https://godoc.org/github.com/gorilla/mux?status.svg)](https://godoc.org/github.com/gorilla/mux) -[![Build Status](https://travis-ci.org/gorilla/mux.svg?branch=master)](https://travis-ci.org/gorilla/mux) -[![CircleCI](https://circleci.com/gh/gorilla/mux.svg?style=svg)](https://circleci.com/gh/gorilla/mux) -[![Sourcegraph](https://sourcegraph.com/github.com/gorilla/mux/-/badge.svg)](https://sourcegraph.com/github.com/gorilla/mux?badge) - -![Gorilla Logo](http://www.gorillatoolkit.org/static/images/gorilla-icon-64.png) - -https://www.gorillatoolkit.org/pkg/mux - -Package `gorilla/mux` implements a request router and dispatcher for matching incoming requests to -their respective handler. - -The name mux stands for "HTTP request multiplexer". Like the standard `http.ServeMux`, `mux.Router` matches incoming requests against a list of registered routes and calls a handler for the route that matches the URL or other conditions. The main features are: - -* It implements the `http.Handler` interface so it is compatible with the standard `http.ServeMux`. -* Requests can be matched based on URL host, path, path prefix, schemes, header and query values, HTTP methods or using custom matchers. -* URL hosts, paths and query values can have variables with an optional regular expression. -* Registered URLs can be built, or "reversed", which helps maintaining references to resources. -* Routes can be used as subrouters: nested routes are only tested if the parent route matches. This is useful to define groups of routes that share common conditions like a host, a path prefix or other repeated attributes. As a bonus, this optimizes request matching. - ---- - -* [Install](#install) -* [Examples](#examples) -* [Matching Routes](#matching-routes) -* [Static Files](#static-files) -* [Registered URLs](#registered-urls) -* [Walking Routes](#walking-routes) -* [Graceful Shutdown](#graceful-shutdown) -* [Middleware](#middleware) -* [Handling CORS Requests](#handling-cors-requests) -* [Testing Handlers](#testing-handlers) -* [Full Example](#full-example) - ---- - -## Install - -With a [correctly configured](https://golang.org/doc/install#testing) Go toolchain: - -```sh -go get -u github.com/gorilla/mux -``` - -## Examples - -Let's start registering a couple of URL paths and handlers: - -```go -func main() { - r := mux.NewRouter() - r.HandleFunc("/", HomeHandler) - r.HandleFunc("/products", ProductsHandler) - r.HandleFunc("/articles", ArticlesHandler) - http.Handle("/", r) -} -``` - -Here we register three routes mapping URL paths to handlers. This is equivalent to how `http.HandleFunc()` works: if an incoming request URL matches one of the paths, the corresponding handler is called passing (`http.ResponseWriter`, `*http.Request`) as parameters. - -Paths can have variables. They are defined using the format `{name}` or `{name:pattern}`. If a regular expression pattern is not defined, the matched variable will be anything until the next slash. For example: - -```go -r := mux.NewRouter() -r.HandleFunc("/products/{key}", ProductHandler) -r.HandleFunc("/articles/{category}/", ArticlesCategoryHandler) -r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler) -``` - -The names are used to create a map of route variables which can be retrieved calling `mux.Vars()`: - -```go -func ArticlesCategoryHandler(w http.ResponseWriter, r *http.Request) { - vars := mux.Vars(r) - w.WriteHeader(http.StatusOK) - fmt.Fprintf(w, "Category: %v\n", vars["category"]) -} -``` - -And this is all you need to know about the basic usage. More advanced options are explained below. - -### Matching Routes - -Routes can also be restricted to a domain or subdomain. Just define a host pattern to be matched. They can also have variables: - -```go -r := mux.NewRouter() -// Only matches if domain is "www.example.com". -r.Host("www.example.com") -// Matches a dynamic subdomain. -r.Host("{subdomain:[a-z]+}.example.com") -``` - -There are several other matchers that can be added. To match path prefixes: - -```go -r.PathPrefix("/products/") -``` - -...or HTTP methods: - -```go -r.Methods("GET", "POST") -``` - -...or URL schemes: - -```go -r.Schemes("https") -``` - -...or header values: - -```go -r.Headers("X-Requested-With", "XMLHttpRequest") -``` - -...or query values: - -```go -r.Queries("key", "value") -``` - -...or to use a custom matcher function: - -```go -r.MatcherFunc(func(r *http.Request, rm *RouteMatch) bool { - return r.ProtoMajor == 0 -}) -``` - -...and finally, it is possible to combine several matchers in a single route: - -```go -r.HandleFunc("/products", ProductsHandler). - Host("www.example.com"). - Methods("GET"). - Schemes("http") -``` - -Routes are tested in the order they were added to the router. If two routes match, the first one wins: - -```go -r := mux.NewRouter() -r.HandleFunc("/specific", specificHandler) -r.PathPrefix("/").Handler(catchAllHandler) -``` - -Setting the same matching conditions again and again can be boring, so we have a way to group several routes that share the same requirements. We call it "subrouting". - -For example, let's say we have several URLs that should only match when the host is `www.example.com`. Create a route for that host and get a "subrouter" from it: - -```go -r := mux.NewRouter() -s := r.Host("www.example.com").Subrouter() -``` - -Then register routes in the subrouter: - -```go -s.HandleFunc("/products/", ProductsHandler) -s.HandleFunc("/products/{key}", ProductHandler) -s.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler) -``` - -The three URL paths we registered above will only be tested if the domain is `www.example.com`, because the subrouter is tested first. This is not only convenient, but also optimizes request matching. You can create subrouters combining any attribute matchers accepted by a route. - -Subrouters can be used to create domain or path "namespaces": you define subrouters in a central place and then parts of the app can register its paths relatively to a given subrouter. - -There's one more thing about subroutes. When a subrouter has a path prefix, the inner routes use it as base for their paths: - -```go -r := mux.NewRouter() -s := r.PathPrefix("/products").Subrouter() -// "/products/" -s.HandleFunc("/", ProductsHandler) -// "/products/{key}/" -s.HandleFunc("/{key}/", ProductHandler) -// "/products/{key}/details" -s.HandleFunc("/{key}/details", ProductDetailsHandler) -``` - - -### Static Files - -Note that the path provided to `PathPrefix()` represents a "wildcard": calling -`PathPrefix("/static/").Handler(...)` means that the handler will be passed any -request that matches "/static/\*". This makes it easy to serve static files with mux: - -```go -func main() { - var dir string - - flag.StringVar(&dir, "dir", ".", "the directory to serve files from. Defaults to the current dir") - flag.Parse() - r := mux.NewRouter() - - // This will serve files under http://localhost:8000/static/ - r.PathPrefix("/static/").Handler(http.StripPrefix("/static/", http.FileServer(http.Dir(dir)))) - - srv := &http.Server{ - Handler: r, - Addr: "127.0.0.1:8000", - // Good practice: enforce timeouts for servers you create! - WriteTimeout: 15 * time.Second, - ReadTimeout: 15 * time.Second, - } - - log.Fatal(srv.ListenAndServe()) -} -``` - -### Registered URLs - -Now let's see how to build registered URLs. - -Routes can be named. All routes that define a name can have their URLs built, or "reversed". We define a name calling `Name()` on a route. For example: - -```go -r := mux.NewRouter() -r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). - Name("article") -``` - -To build a URL, get the route and call the `URL()` method, passing a sequence of key/value pairs for the route variables. For the previous route, we would do: - -```go -url, err := r.Get("article").URL("category", "technology", "id", "42") -``` - -...and the result will be a `url.URL` with the following path: - -``` -"/articles/technology/42" -``` - -This also works for host and query value variables: - -```go -r := mux.NewRouter() -r.Host("{subdomain}.example.com"). - Path("/articles/{category}/{id:[0-9]+}"). - Queries("filter", "{filter}"). - HandlerFunc(ArticleHandler). - Name("article") - -// url.String() will be "http://news.example.com/articles/technology/42?filter=gorilla" -url, err := r.Get("article").URL("subdomain", "news", - "category", "technology", - "id", "42", - "filter", "gorilla") -``` - -All variables defined in the route are required, and their values must conform to the corresponding patterns. These requirements guarantee that a generated URL will always match a registered route -- the only exception is for explicitly defined "build-only" routes which never match. - -Regex support also exists for matching Headers within a route. For example, we could do: - -```go -r.HeadersRegexp("Content-Type", "application/(text|json)") -``` - -...and the route will match both requests with a Content-Type of `application/json` as well as `application/text` - -There's also a way to build only the URL host or path for a route: use the methods `URLHost()` or `URLPath()` instead. For the previous route, we would do: - -```go -// "http://news.example.com/" -host, err := r.Get("article").URLHost("subdomain", "news") - -// "/articles/technology/42" -path, err := r.Get("article").URLPath("category", "technology", "id", "42") -``` - -And if you use subrouters, host and path defined separately can be built as well: - -```go -r := mux.NewRouter() -s := r.Host("{subdomain}.example.com").Subrouter() -s.Path("/articles/{category}/{id:[0-9]+}"). - HandlerFunc(ArticleHandler). - Name("article") - -// "http://news.example.com/articles/technology/42" -url, err := r.Get("article").URL("subdomain", "news", - "category", "technology", - "id", "42") -``` - -### Walking Routes - -The `Walk` function on `mux.Router` can be used to visit all of the routes that are registered on a router. For example, -the following prints all of the registered routes: - -```go -package main - -import ( - "fmt" - "net/http" - "strings" - - "github.com/gorilla/mux" -) - -func handler(w http.ResponseWriter, r *http.Request) { - return -} - -func main() { - r := mux.NewRouter() - r.HandleFunc("/", handler) - r.HandleFunc("/products", handler).Methods("POST") - r.HandleFunc("/articles", handler).Methods("GET") - r.HandleFunc("/articles/{id}", handler).Methods("GET", "PUT") - r.HandleFunc("/authors", handler).Queries("surname", "{surname}") - err := r.Walk(func(route *mux.Route, router *mux.Router, ancestors []*mux.Route) error { - pathTemplate, err := route.GetPathTemplate() - if err == nil { - fmt.Println("ROUTE:", pathTemplate) - } - pathRegexp, err := route.GetPathRegexp() - if err == nil { - fmt.Println("Path regexp:", pathRegexp) - } - queriesTemplates, err := route.GetQueriesTemplates() - if err == nil { - fmt.Println("Queries templates:", strings.Join(queriesTemplates, ",")) - } - queriesRegexps, err := route.GetQueriesRegexp() - if err == nil { - fmt.Println("Queries regexps:", strings.Join(queriesRegexps, ",")) - } - methods, err := route.GetMethods() - if err == nil { - fmt.Println("Methods:", strings.Join(methods, ",")) - } - fmt.Println() - return nil - }) - - if err != nil { - fmt.Println(err) - } - - http.Handle("/", r) -} -``` - -### Graceful Shutdown - -Go 1.8 introduced the ability to [gracefully shutdown](https://golang.org/doc/go1.8#http_shutdown) a `*http.Server`. Here's how to do that alongside `mux`: - -```go -package main - -import ( - "context" - "flag" - "log" - "net/http" - "os" - "os/signal" - "time" - - "github.com/gorilla/mux" -) - -func main() { - var wait time.Duration - flag.DurationVar(&wait, "graceful-timeout", time.Second * 15, "the duration for which the server gracefully wait for existing connections to finish - e.g. 15s or 1m") - flag.Parse() - - r := mux.NewRouter() - // Add your routes as needed - - srv := &http.Server{ - Addr: "0.0.0.0:8080", - // Good practice to set timeouts to avoid Slowloris attacks. - WriteTimeout: time.Second * 15, - ReadTimeout: time.Second * 15, - IdleTimeout: time.Second * 60, - Handler: r, // Pass our instance of gorilla/mux in. - } - - // Run our server in a goroutine so that it doesn't block. - go func() { - if err := srv.ListenAndServe(); err != nil { - log.Println(err) - } - }() - - c := make(chan os.Signal, 1) - // We'll accept graceful shutdowns when quit via SIGINT (Ctrl+C) - // SIGKILL, SIGQUIT or SIGTERM (Ctrl+/) will not be caught. - signal.Notify(c, os.Interrupt) - - // Block until we receive our signal. - <-c - - // Create a deadline to wait for. - ctx, cancel := context.WithTimeout(context.Background(), wait) - defer cancel() - // Doesn't block if no connections, but will otherwise wait - // until the timeout deadline. - srv.Shutdown(ctx) - // Optionally, you could run srv.Shutdown in a goroutine and block on - // <-ctx.Done() if your application should wait for other services - // to finalize based on context cancellation. - log.Println("shutting down") - os.Exit(0) -} -``` - -### Middleware - -Mux supports the addition of middlewares to a [Router](https://godoc.org/github.com/gorilla/mux#Router), which are executed in the order they are added if a match is found, including its subrouters. -Middlewares are (typically) small pieces of code which take one request, do something with it, and pass it down to another middleware or the final handler. Some common use cases for middleware are request logging, header manipulation, or `ResponseWriter` hijacking. - -Mux middlewares are defined using the de facto standard type: - -```go -type MiddlewareFunc func(http.Handler) http.Handler -``` - -Typically, the returned handler is a closure which does something with the http.ResponseWriter and http.Request passed to it, and then calls the handler passed as parameter to the MiddlewareFunc. This takes advantage of closures being able access variables from the context where they are created, while retaining the signature enforced by the receivers. - -A very basic middleware which logs the URI of the request being handled could be written as: - -```go -func loggingMiddleware(next http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - // Do stuff here - log.Println(r.RequestURI) - // Call the next handler, which can be another middleware in the chain, or the final handler. - next.ServeHTTP(w, r) - }) -} -``` - -Middlewares can be added to a router using `Router.Use()`: - -```go -r := mux.NewRouter() -r.HandleFunc("/", handler) -r.Use(loggingMiddleware) -``` - -A more complex authentication middleware, which maps session token to users, could be written as: - -```go -// Define our struct -type authenticationMiddleware struct { - tokenUsers map[string]string -} - -// Initialize it somewhere -func (amw *authenticationMiddleware) Populate() { - amw.tokenUsers["00000000"] = "user0" - amw.tokenUsers["aaaaaaaa"] = "userA" - amw.tokenUsers["05f717e5"] = "randomUser" - amw.tokenUsers["deadbeef"] = "user0" -} - -// Middleware function, which will be called for each request -func (amw *authenticationMiddleware) Middleware(next http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - token := r.Header.Get("X-Session-Token") - - if user, found := amw.tokenUsers[token]; found { - // We found the token in our map - log.Printf("Authenticated user %s\n", user) - // Pass down the request to the next middleware (or final handler) - next.ServeHTTP(w, r) - } else { - // Write an error and stop the handler chain - http.Error(w, "Forbidden", http.StatusForbidden) - } - }) -} -``` - -```go -r := mux.NewRouter() -r.HandleFunc("/", handler) - -amw := authenticationMiddleware{} -amw.Populate() - -r.Use(amw.Middleware) -``` - -Note: The handler chain will be stopped if your middleware doesn't call `next.ServeHTTP()` with the corresponding parameters. This can be used to abort a request if the middleware writer wants to. Middlewares _should_ write to `ResponseWriter` if they _are_ going to terminate the request, and they _should not_ write to `ResponseWriter` if they _are not_ going to terminate it. - -### Handling CORS Requests - -[CORSMethodMiddleware](https://godoc.org/github.com/gorilla/mux#CORSMethodMiddleware) intends to make it easier to strictly set the `Access-Control-Allow-Methods` response header. - -* You will still need to use your own CORS handler to set the other CORS headers such as `Access-Control-Allow-Origin` -* The middleware will set the `Access-Control-Allow-Methods` header to all the method matchers (e.g. `r.Methods(http.MethodGet, http.MethodPut, http.MethodOptions)` -> `Access-Control-Allow-Methods: GET,PUT,OPTIONS`) on a route -* If you do not specify any methods, then: -> _Important_: there must be an `OPTIONS` method matcher for the middleware to set the headers. - -Here is an example of using `CORSMethodMiddleware` along with a custom `OPTIONS` handler to set all the required CORS headers: - -```go -package main - -import ( - "net/http" - "github.com/gorilla/mux" -) - -func main() { - r := mux.NewRouter() - - // IMPORTANT: you must specify an OPTIONS method matcher for the middleware to set CORS headers - r.HandleFunc("/foo", fooHandler).Methods(http.MethodGet, http.MethodPut, http.MethodPatch, http.MethodOptions) - r.Use(mux.CORSMethodMiddleware(r)) - - http.ListenAndServe(":8080", r) -} - -func fooHandler(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Access-Control-Allow-Origin", "*") - if r.Method == http.MethodOptions { - return - } - - w.Write([]byte("foo")) -} -``` - -And an request to `/foo` using something like: - -```bash -curl localhost:8080/foo -v -``` - -Would look like: - -```bash -* Trying ::1... -* TCP_NODELAY set -* Connected to localhost (::1) port 8080 (#0) -> GET /foo HTTP/1.1 -> Host: localhost:8080 -> User-Agent: curl/7.59.0 -> Accept: */* -> -< HTTP/1.1 200 OK -< Access-Control-Allow-Methods: GET,PUT,PATCH,OPTIONS -< Access-Control-Allow-Origin: * -< Date: Fri, 28 Jun 2019 20:13:30 GMT -< Content-Length: 3 -< Content-Type: text/plain; charset=utf-8 -< -* Connection #0 to host localhost left intact -foo -``` - -### Testing Handlers - -Testing handlers in a Go web application is straightforward, and _mux_ doesn't complicate this any further. Given two files: `endpoints.go` and `endpoints_test.go`, here's how we'd test an application using _mux_. - -First, our simple HTTP handler: - -```go -// endpoints.go -package main - -func HealthCheckHandler(w http.ResponseWriter, r *http.Request) { - // A very simple health check. - w.Header().Set("Content-Type", "application/json") - w.WriteHeader(http.StatusOK) - - // In the future we could report back on the status of our DB, or our cache - // (e.g. Redis) by performing a simple PING, and include them in the response. - io.WriteString(w, `{"alive": true}`) -} - -func main() { - r := mux.NewRouter() - r.HandleFunc("/health", HealthCheckHandler) - - log.Fatal(http.ListenAndServe("localhost:8080", r)) -} -``` - -Our test code: - -```go -// endpoints_test.go -package main - -import ( - "net/http" - "net/http/httptest" - "testing" -) - -func TestHealthCheckHandler(t *testing.T) { - // Create a request to pass to our handler. We don't have any query parameters for now, so we'll - // pass 'nil' as the third parameter. - req, err := http.NewRequest("GET", "/health", nil) - if err != nil { - t.Fatal(err) - } - - // We create a ResponseRecorder (which satisfies http.ResponseWriter) to record the response. - rr := httptest.NewRecorder() - handler := http.HandlerFunc(HealthCheckHandler) - - // Our handlers satisfy http.Handler, so we can call their ServeHTTP method - // directly and pass in our Request and ResponseRecorder. - handler.ServeHTTP(rr, req) - - // Check the status code is what we expect. - if status := rr.Code; status != http.StatusOK { - t.Errorf("handler returned wrong status code: got %v want %v", - status, http.StatusOK) - } - - // Check the response body is what we expect. - expected := `{"alive": true}` - if rr.Body.String() != expected { - t.Errorf("handler returned unexpected body: got %v want %v", - rr.Body.String(), expected) - } -} -``` - -In the case that our routes have [variables](#examples), we can pass those in the request. We could write -[table-driven tests](https://dave.cheney.net/2013/06/09/writing-table-driven-tests-in-go) to test multiple -possible route variables as needed. - -```go -// endpoints.go -func main() { - r := mux.NewRouter() - // A route with a route variable: - r.HandleFunc("/metrics/{type}", MetricsHandler) - - log.Fatal(http.ListenAndServe("localhost:8080", r)) -} -``` - -Our test file, with a table-driven test of `routeVariables`: - -```go -// endpoints_test.go -func TestMetricsHandler(t *testing.T) { - tt := []struct{ - routeVariable string - shouldPass bool - }{ - {"goroutines", true}, - {"heap", true}, - {"counters", true}, - {"queries", true}, - {"adhadaeqm3k", false}, - } - - for _, tc := range tt { - path := fmt.Sprintf("/metrics/%s", tc.routeVariable) - req, err := http.NewRequest("GET", path, nil) - if err != nil { - t.Fatal(err) - } - - rr := httptest.NewRecorder() - - // Need to create a router that we can pass the request through so that the vars will be added to the context - router := mux.NewRouter() - router.HandleFunc("/metrics/{type}", MetricsHandler) - router.ServeHTTP(rr, req) - - // In this case, our MetricsHandler returns a non-200 response - // for a route variable it doesn't know about. - if rr.Code == http.StatusOK && !tc.shouldPass { - t.Errorf("handler should have failed on routeVariable %s: got %v want %v", - tc.routeVariable, rr.Code, http.StatusOK) - } - } -} -``` - -## Full Example - -Here's a complete, runnable example of a small `mux` based server: - -```go -package main - -import ( - "net/http" - "log" - "github.com/gorilla/mux" -) - -func YourHandler(w http.ResponseWriter, r *http.Request) { - w.Write([]byte("Gorilla!\n")) -} - -func main() { - r := mux.NewRouter() - // Routes consist of a path and a handler function. - r.HandleFunc("/", YourHandler) - - // Bind to a port and pass our router in - log.Fatal(http.ListenAndServe(":8000", r)) -} -``` - -## License - -BSD licensed. See the LICENSE file for details. diff --git a/vendor/github.com/gorilla/mux/context.go b/vendor/github.com/gorilla/mux/context.go deleted file mode 100644 index 665940a2..00000000 --- a/vendor/github.com/gorilla/mux/context.go +++ /dev/null @@ -1,18 +0,0 @@ -package mux - -import ( - "context" - "net/http" -) - -func contextGet(r *http.Request, key interface{}) interface{} { - return r.Context().Value(key) -} - -func contextSet(r *http.Request, key, val interface{}) *http.Request { - if val == nil { - return r - } - - return r.WithContext(context.WithValue(r.Context(), key, val)) -} diff --git a/vendor/github.com/gorilla/mux/doc.go b/vendor/github.com/gorilla/mux/doc.go deleted file mode 100644 index bd5a38b5..00000000 --- a/vendor/github.com/gorilla/mux/doc.go +++ /dev/null @@ -1,306 +0,0 @@ -// Copyright 2012 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Package mux implements a request router and dispatcher. - -The name mux stands for "HTTP request multiplexer". Like the standard -http.ServeMux, mux.Router matches incoming requests against a list of -registered routes and calls a handler for the route that matches the URL -or other conditions. The main features are: - - * Requests can be matched based on URL host, path, path prefix, schemes, - header and query values, HTTP methods or using custom matchers. - * URL hosts, paths and query values can have variables with an optional - regular expression. - * Registered URLs can be built, or "reversed", which helps maintaining - references to resources. - * Routes can be used as subrouters: nested routes are only tested if the - parent route matches. This is useful to define groups of routes that - share common conditions like a host, a path prefix or other repeated - attributes. As a bonus, this optimizes request matching. - * It implements the http.Handler interface so it is compatible with the - standard http.ServeMux. - -Let's start registering a couple of URL paths and handlers: - - func main() { - r := mux.NewRouter() - r.HandleFunc("/", HomeHandler) - r.HandleFunc("/products", ProductsHandler) - r.HandleFunc("/articles", ArticlesHandler) - http.Handle("/", r) - } - -Here we register three routes mapping URL paths to handlers. This is -equivalent to how http.HandleFunc() works: if an incoming request URL matches -one of the paths, the corresponding handler is called passing -(http.ResponseWriter, *http.Request) as parameters. - -Paths can have variables. They are defined using the format {name} or -{name:pattern}. If a regular expression pattern is not defined, the matched -variable will be anything until the next slash. For example: - - r := mux.NewRouter() - r.HandleFunc("/products/{key}", ProductHandler) - r.HandleFunc("/articles/{category}/", ArticlesCategoryHandler) - r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler) - -Groups can be used inside patterns, as long as they are non-capturing (?:re). For example: - - r.HandleFunc("/articles/{category}/{sort:(?:asc|desc|new)}", ArticlesCategoryHandler) - -The names are used to create a map of route variables which can be retrieved -calling mux.Vars(): - - vars := mux.Vars(request) - category := vars["category"] - -Note that if any capturing groups are present, mux will panic() during parsing. To prevent -this, convert any capturing groups to non-capturing, e.g. change "/{sort:(asc|desc)}" to -"/{sort:(?:asc|desc)}". This is a change from prior versions which behaved unpredictably -when capturing groups were present. - -And this is all you need to know about the basic usage. More advanced options -are explained below. - -Routes can also be restricted to a domain or subdomain. Just define a host -pattern to be matched. They can also have variables: - - r := mux.NewRouter() - // Only matches if domain is "www.example.com". - r.Host("www.example.com") - // Matches a dynamic subdomain. - r.Host("{subdomain:[a-z]+}.domain.com") - -There are several other matchers that can be added. To match path prefixes: - - r.PathPrefix("/products/") - -...or HTTP methods: - - r.Methods("GET", "POST") - -...or URL schemes: - - r.Schemes("https") - -...or header values: - - r.Headers("X-Requested-With", "XMLHttpRequest") - -...or query values: - - r.Queries("key", "value") - -...or to use a custom matcher function: - - r.MatcherFunc(func(r *http.Request, rm *RouteMatch) bool { - return r.ProtoMajor == 0 - }) - -...and finally, it is possible to combine several matchers in a single route: - - r.HandleFunc("/products", ProductsHandler). - Host("www.example.com"). - Methods("GET"). - Schemes("http") - -Setting the same matching conditions again and again can be boring, so we have -a way to group several routes that share the same requirements. -We call it "subrouting". - -For example, let's say we have several URLs that should only match when the -host is "www.example.com". Create a route for that host and get a "subrouter" -from it: - - r := mux.NewRouter() - s := r.Host("www.example.com").Subrouter() - -Then register routes in the subrouter: - - s.HandleFunc("/products/", ProductsHandler) - s.HandleFunc("/products/{key}", ProductHandler) - s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler) - -The three URL paths we registered above will only be tested if the domain is -"www.example.com", because the subrouter is tested first. This is not -only convenient, but also optimizes request matching. You can create -subrouters combining any attribute matchers accepted by a route. - -Subrouters can be used to create domain or path "namespaces": you define -subrouters in a central place and then parts of the app can register its -paths relatively to a given subrouter. - -There's one more thing about subroutes. When a subrouter has a path prefix, -the inner routes use it as base for their paths: - - r := mux.NewRouter() - s := r.PathPrefix("/products").Subrouter() - // "/products/" - s.HandleFunc("/", ProductsHandler) - // "/products/{key}/" - s.HandleFunc("/{key}/", ProductHandler) - // "/products/{key}/details" - s.HandleFunc("/{key}/details", ProductDetailsHandler) - -Note that the path provided to PathPrefix() represents a "wildcard": calling -PathPrefix("/static/").Handler(...) means that the handler will be passed any -request that matches "/static/*". This makes it easy to serve static files with mux: - - func main() { - var dir string - - flag.StringVar(&dir, "dir", ".", "the directory to serve files from. Defaults to the current dir") - flag.Parse() - r := mux.NewRouter() - - // This will serve files under http://localhost:8000/static/ - r.PathPrefix("/static/").Handler(http.StripPrefix("/static/", http.FileServer(http.Dir(dir)))) - - srv := &http.Server{ - Handler: r, - Addr: "127.0.0.1:8000", - // Good practice: enforce timeouts for servers you create! - WriteTimeout: 15 * time.Second, - ReadTimeout: 15 * time.Second, - } - - log.Fatal(srv.ListenAndServe()) - } - -Now let's see how to build registered URLs. - -Routes can be named. All routes that define a name can have their URLs built, -or "reversed". We define a name calling Name() on a route. For example: - - r := mux.NewRouter() - r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). - Name("article") - -To build a URL, get the route and call the URL() method, passing a sequence of -key/value pairs for the route variables. For the previous route, we would do: - - url, err := r.Get("article").URL("category", "technology", "id", "42") - -...and the result will be a url.URL with the following path: - - "/articles/technology/42" - -This also works for host and query value variables: - - r := mux.NewRouter() - r.Host("{subdomain}.domain.com"). - Path("/articles/{category}/{id:[0-9]+}"). - Queries("filter", "{filter}"). - HandlerFunc(ArticleHandler). - Name("article") - - // url.String() will be "http://news.domain.com/articles/technology/42?filter=gorilla" - url, err := r.Get("article").URL("subdomain", "news", - "category", "technology", - "id", "42", - "filter", "gorilla") - -All variables defined in the route are required, and their values must -conform to the corresponding patterns. These requirements guarantee that a -generated URL will always match a registered route -- the only exception is -for explicitly defined "build-only" routes which never match. - -Regex support also exists for matching Headers within a route. For example, we could do: - - r.HeadersRegexp("Content-Type", "application/(text|json)") - -...and the route will match both requests with a Content-Type of `application/json` as well as -`application/text` - -There's also a way to build only the URL host or path for a route: -use the methods URLHost() or URLPath() instead. For the previous route, -we would do: - - // "http://news.domain.com/" - host, err := r.Get("article").URLHost("subdomain", "news") - - // "/articles/technology/42" - path, err := r.Get("article").URLPath("category", "technology", "id", "42") - -And if you use subrouters, host and path defined separately can be built -as well: - - r := mux.NewRouter() - s := r.Host("{subdomain}.domain.com").Subrouter() - s.Path("/articles/{category}/{id:[0-9]+}"). - HandlerFunc(ArticleHandler). - Name("article") - - // "http://news.domain.com/articles/technology/42" - url, err := r.Get("article").URL("subdomain", "news", - "category", "technology", - "id", "42") - -Mux supports the addition of middlewares to a Router, which are executed in the order they are added if a match is found, including its subrouters. Middlewares are (typically) small pieces of code which take one request, do something with it, and pass it down to another middleware or the final handler. Some common use cases for middleware are request logging, header manipulation, or ResponseWriter hijacking. - - type MiddlewareFunc func(http.Handler) http.Handler - -Typically, the returned handler is a closure which does something with the http.ResponseWriter and http.Request passed to it, and then calls the handler passed as parameter to the MiddlewareFunc (closures can access variables from the context where they are created). - -A very basic middleware which logs the URI of the request being handled could be written as: - - func simpleMw(next http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - // Do stuff here - log.Println(r.RequestURI) - // Call the next handler, which can be another middleware in the chain, or the final handler. - next.ServeHTTP(w, r) - }) - } - -Middlewares can be added to a router using `Router.Use()`: - - r := mux.NewRouter() - r.HandleFunc("/", handler) - r.Use(simpleMw) - -A more complex authentication middleware, which maps session token to users, could be written as: - - // Define our struct - type authenticationMiddleware struct { - tokenUsers map[string]string - } - - // Initialize it somewhere - func (amw *authenticationMiddleware) Populate() { - amw.tokenUsers["00000000"] = "user0" - amw.tokenUsers["aaaaaaaa"] = "userA" - amw.tokenUsers["05f717e5"] = "randomUser" - amw.tokenUsers["deadbeef"] = "user0" - } - - // Middleware function, which will be called for each request - func (amw *authenticationMiddleware) Middleware(next http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - token := r.Header.Get("X-Session-Token") - - if user, found := amw.tokenUsers[token]; found { - // We found the token in our map - log.Printf("Authenticated user %s\n", user) - next.ServeHTTP(w, r) - } else { - http.Error(w, "Forbidden", http.StatusForbidden) - } - }) - } - - r := mux.NewRouter() - r.HandleFunc("/", handler) - - amw := authenticationMiddleware{tokenUsers: make(map[string]string)} - amw.Populate() - - r.Use(amw.Middleware) - -Note: The handler chain will be stopped if your middleware doesn't call `next.ServeHTTP()` with the corresponding parameters. This can be used to abort a request if the middleware writer wants to. - -*/ -package mux diff --git a/vendor/github.com/gorilla/mux/go.mod b/vendor/github.com/gorilla/mux/go.mod deleted file mode 100644 index cfc8ede5..00000000 --- a/vendor/github.com/gorilla/mux/go.mod +++ /dev/null @@ -1 +0,0 @@ -module github.com/gorilla/mux diff --git a/vendor/github.com/gorilla/mux/middleware.go b/vendor/github.com/gorilla/mux/middleware.go deleted file mode 100644 index cf2b26dc..00000000 --- a/vendor/github.com/gorilla/mux/middleware.go +++ /dev/null @@ -1,79 +0,0 @@ -package mux - -import ( - "net/http" - "strings" -) - -// MiddlewareFunc is a function which receives an http.Handler and returns another http.Handler. -// Typically, the returned handler is a closure which does something with the http.ResponseWriter and http.Request passed -// to it, and then calls the handler passed as parameter to the MiddlewareFunc. -type MiddlewareFunc func(http.Handler) http.Handler - -// middleware interface is anything which implements a MiddlewareFunc named Middleware. -type middleware interface { - Middleware(handler http.Handler) http.Handler -} - -// Middleware allows MiddlewareFunc to implement the middleware interface. -func (mw MiddlewareFunc) Middleware(handler http.Handler) http.Handler { - return mw(handler) -} - -// Use appends a MiddlewareFunc to the chain. Middleware can be used to intercept or otherwise modify requests and/or responses, and are executed in the order that they are applied to the Router. -func (r *Router) Use(mwf ...MiddlewareFunc) { - for _, fn := range mwf { - r.middlewares = append(r.middlewares, fn) - } -} - -// useInterface appends a middleware to the chain. Middleware can be used to intercept or otherwise modify requests and/or responses, and are executed in the order that they are applied to the Router. -func (r *Router) useInterface(mw middleware) { - r.middlewares = append(r.middlewares, mw) -} - -// CORSMethodMiddleware automatically sets the Access-Control-Allow-Methods response header -// on requests for routes that have an OPTIONS method matcher to all the method matchers on -// the route. Routes that do not explicitly handle OPTIONS requests will not be processed -// by the middleware. See examples for usage. -func CORSMethodMiddleware(r *Router) MiddlewareFunc { - return func(next http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { - allMethods, err := getAllMethodsForRoute(r, req) - if err == nil { - for _, v := range allMethods { - if v == http.MethodOptions { - w.Header().Set("Access-Control-Allow-Methods", strings.Join(allMethods, ",")) - } - } - } - - next.ServeHTTP(w, req) - }) - } -} - -// getAllMethodsForRoute returns all the methods from method matchers matching a given -// request. -func getAllMethodsForRoute(r *Router, req *http.Request) ([]string, error) { - var allMethods []string - - err := r.Walk(func(route *Route, _ *Router, _ []*Route) error { - for _, m := range route.matchers { - if _, ok := m.(*routeRegexp); ok { - if m.Match(req, &RouteMatch{}) { - methods, err := route.GetMethods() - if err != nil { - return err - } - - allMethods = append(allMethods, methods...) - } - break - } - } - return nil - }) - - return allMethods, err -} diff --git a/vendor/github.com/gorilla/mux/mux.go b/vendor/github.com/gorilla/mux/mux.go deleted file mode 100644 index a2cd193e..00000000 --- a/vendor/github.com/gorilla/mux/mux.go +++ /dev/null @@ -1,607 +0,0 @@ -// Copyright 2012 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mux - -import ( - "errors" - "fmt" - "net/http" - "path" - "regexp" -) - -var ( - // ErrMethodMismatch is returned when the method in the request does not match - // the method defined against the route. - ErrMethodMismatch = errors.New("method is not allowed") - // ErrNotFound is returned when no route match is found. - ErrNotFound = errors.New("no matching route was found") -) - -// NewRouter returns a new router instance. -func NewRouter() *Router { - return &Router{namedRoutes: make(map[string]*Route)} -} - -// Router registers routes to be matched and dispatches a handler. -// -// It implements the http.Handler interface, so it can be registered to serve -// requests: -// -// var router = mux.NewRouter() -// -// func main() { -// http.Handle("/", router) -// } -// -// Or, for Google App Engine, register it in a init() function: -// -// func init() { -// http.Handle("/", router) -// } -// -// This will send all incoming requests to the router. -type Router struct { - // Configurable Handler to be used when no route matches. - NotFoundHandler http.Handler - - // Configurable Handler to be used when the request method does not match the route. - MethodNotAllowedHandler http.Handler - - // Routes to be matched, in order. - routes []*Route - - // Routes by name for URL building. - namedRoutes map[string]*Route - - // If true, do not clear the request context after handling the request. - // - // Deprecated: No effect when go1.7+ is used, since the context is stored - // on the request itself. - KeepContext bool - - // Slice of middlewares to be called after a match is found - middlewares []middleware - - // configuration shared with `Route` - routeConf -} - -// common route configuration shared between `Router` and `Route` -type routeConf struct { - // If true, "/path/foo%2Fbar/to" will match the path "/path/{var}/to" - useEncodedPath bool - - // If true, when the path pattern is "/path/", accessing "/path" will - // redirect to the former and vice versa. - strictSlash bool - - // If true, when the path pattern is "/path//to", accessing "/path//to" - // will not redirect - skipClean bool - - // Manager for the variables from host and path. - regexp routeRegexpGroup - - // List of matchers. - matchers []matcher - - // The scheme used when building URLs. - buildScheme string - - buildVarsFunc BuildVarsFunc -} - -// returns an effective deep copy of `routeConf` -func copyRouteConf(r routeConf) routeConf { - c := r - - if r.regexp.path != nil { - c.regexp.path = copyRouteRegexp(r.regexp.path) - } - - if r.regexp.host != nil { - c.regexp.host = copyRouteRegexp(r.regexp.host) - } - - c.regexp.queries = make([]*routeRegexp, 0, len(r.regexp.queries)) - for _, q := range r.regexp.queries { - c.regexp.queries = append(c.regexp.queries, copyRouteRegexp(q)) - } - - c.matchers = make([]matcher, 0, len(r.matchers)) - for _, m := range r.matchers { - c.matchers = append(c.matchers, m) - } - - return c -} - -func copyRouteRegexp(r *routeRegexp) *routeRegexp { - c := *r - return &c -} - -// Match attempts to match the given request against the router's registered routes. -// -// If the request matches a route of this router or one of its subrouters the Route, -// Handler, and Vars fields of the the match argument are filled and this function -// returns true. -// -// If the request does not match any of this router's or its subrouters' routes -// then this function returns false. If available, a reason for the match failure -// will be filled in the match argument's MatchErr field. If the match failure type -// (eg: not found) has a registered handler, the handler is assigned to the Handler -// field of the match argument. -func (r *Router) Match(req *http.Request, match *RouteMatch) bool { - for _, route := range r.routes { - if route.Match(req, match) { - // Build middleware chain if no error was found - if match.MatchErr == nil { - for i := len(r.middlewares) - 1; i >= 0; i-- { - match.Handler = r.middlewares[i].Middleware(match.Handler) - } - } - return true - } - } - - if match.MatchErr == ErrMethodMismatch { - if r.MethodNotAllowedHandler != nil { - match.Handler = r.MethodNotAllowedHandler - return true - } - - return false - } - - // Closest match for a router (includes sub-routers) - if r.NotFoundHandler != nil { - match.Handler = r.NotFoundHandler - match.MatchErr = ErrNotFound - return true - } - - match.MatchErr = ErrNotFound - return false -} - -// ServeHTTP dispatches the handler registered in the matched route. -// -// When there is a match, the route variables can be retrieved calling -// mux.Vars(request). -func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) { - if !r.skipClean { - path := req.URL.Path - if r.useEncodedPath { - path = req.URL.EscapedPath() - } - // Clean path to canonical form and redirect. - if p := cleanPath(path); p != path { - - // Added 3 lines (Philip Schlump) - It was dropping the query string and #whatever from query. - // This matches with fix in go 1.2 r.c. 4 for same problem. Go Issue: - // http://code.google.com/p/go/issues/detail?id=5252 - url := *req.URL - url.Path = p - p = url.String() - - w.Header().Set("Location", p) - w.WriteHeader(http.StatusMovedPermanently) - return - } - } - var match RouteMatch - var handler http.Handler - if r.Match(req, &match) { - handler = match.Handler - req = setVars(req, match.Vars) - req = setCurrentRoute(req, match.Route) - } - - if handler == nil && match.MatchErr == ErrMethodMismatch { - handler = methodNotAllowedHandler() - } - - if handler == nil { - handler = http.NotFoundHandler() - } - - handler.ServeHTTP(w, req) -} - -// Get returns a route registered with the given name. -func (r *Router) Get(name string) *Route { - return r.namedRoutes[name] -} - -// GetRoute returns a route registered with the given name. This method -// was renamed to Get() and remains here for backwards compatibility. -func (r *Router) GetRoute(name string) *Route { - return r.namedRoutes[name] -} - -// StrictSlash defines the trailing slash behavior for new routes. The initial -// value is false. -// -// When true, if the route path is "/path/", accessing "/path" will perform a redirect -// to the former and vice versa. In other words, your application will always -// see the path as specified in the route. -// -// When false, if the route path is "/path", accessing "/path/" will not match -// this route and vice versa. -// -// The re-direct is a HTTP 301 (Moved Permanently). Note that when this is set for -// routes with a non-idempotent method (e.g. POST, PUT), the subsequent re-directed -// request will be made as a GET by most clients. Use middleware or client settings -// to modify this behaviour as needed. -// -// Special case: when a route sets a path prefix using the PathPrefix() method, -// strict slash is ignored for that route because the redirect behavior can't -// be determined from a prefix alone. However, any subrouters created from that -// route inherit the original StrictSlash setting. -func (r *Router) StrictSlash(value bool) *Router { - r.strictSlash = value - return r -} - -// SkipClean defines the path cleaning behaviour for new routes. The initial -// value is false. Users should be careful about which routes are not cleaned -// -// When true, if the route path is "/path//to", it will remain with the double -// slash. This is helpful if you have a route like: /fetch/http://xkcd.com/534/ -// -// When false, the path will be cleaned, so /fetch/http://xkcd.com/534/ will -// become /fetch/http/xkcd.com/534 -func (r *Router) SkipClean(value bool) *Router { - r.skipClean = value - return r -} - -// UseEncodedPath tells the router to match the encoded original path -// to the routes. -// For eg. "/path/foo%2Fbar/to" will match the path "/path/{var}/to". -// -// If not called, the router will match the unencoded path to the routes. -// For eg. "/path/foo%2Fbar/to" will match the path "/path/foo/bar/to" -func (r *Router) UseEncodedPath() *Router { - r.useEncodedPath = true - return r -} - -// ---------------------------------------------------------------------------- -// Route factories -// ---------------------------------------------------------------------------- - -// NewRoute registers an empty route. -func (r *Router) NewRoute() *Route { - // initialize a route with a copy of the parent router's configuration - route := &Route{routeConf: copyRouteConf(r.routeConf), namedRoutes: r.namedRoutes} - r.routes = append(r.routes, route) - return route -} - -// Name registers a new route with a name. -// See Route.Name(). -func (r *Router) Name(name string) *Route { - return r.NewRoute().Name(name) -} - -// Handle registers a new route with a matcher for the URL path. -// See Route.Path() and Route.Handler(). -func (r *Router) Handle(path string, handler http.Handler) *Route { - return r.NewRoute().Path(path).Handler(handler) -} - -// HandleFunc registers a new route with a matcher for the URL path. -// See Route.Path() and Route.HandlerFunc(). -func (r *Router) HandleFunc(path string, f func(http.ResponseWriter, - *http.Request)) *Route { - return r.NewRoute().Path(path).HandlerFunc(f) -} - -// Headers registers a new route with a matcher for request header values. -// See Route.Headers(). -func (r *Router) Headers(pairs ...string) *Route { - return r.NewRoute().Headers(pairs...) -} - -// Host registers a new route with a matcher for the URL host. -// See Route.Host(). -func (r *Router) Host(tpl string) *Route { - return r.NewRoute().Host(tpl) -} - -// MatcherFunc registers a new route with a custom matcher function. -// See Route.MatcherFunc(). -func (r *Router) MatcherFunc(f MatcherFunc) *Route { - return r.NewRoute().MatcherFunc(f) -} - -// Methods registers a new route with a matcher for HTTP methods. -// See Route.Methods(). -func (r *Router) Methods(methods ...string) *Route { - return r.NewRoute().Methods(methods...) -} - -// Path registers a new route with a matcher for the URL path. -// See Route.Path(). -func (r *Router) Path(tpl string) *Route { - return r.NewRoute().Path(tpl) -} - -// PathPrefix registers a new route with a matcher for the URL path prefix. -// See Route.PathPrefix(). -func (r *Router) PathPrefix(tpl string) *Route { - return r.NewRoute().PathPrefix(tpl) -} - -// Queries registers a new route with a matcher for URL query values. -// See Route.Queries(). -func (r *Router) Queries(pairs ...string) *Route { - return r.NewRoute().Queries(pairs...) -} - -// Schemes registers a new route with a matcher for URL schemes. -// See Route.Schemes(). -func (r *Router) Schemes(schemes ...string) *Route { - return r.NewRoute().Schemes(schemes...) -} - -// BuildVarsFunc registers a new route with a custom function for modifying -// route variables before building a URL. -func (r *Router) BuildVarsFunc(f BuildVarsFunc) *Route { - return r.NewRoute().BuildVarsFunc(f) -} - -// Walk walks the router and all its sub-routers, calling walkFn for each route -// in the tree. The routes are walked in the order they were added. Sub-routers -// are explored depth-first. -func (r *Router) Walk(walkFn WalkFunc) error { - return r.walk(walkFn, []*Route{}) -} - -// SkipRouter is used as a return value from WalkFuncs to indicate that the -// router that walk is about to descend down to should be skipped. -var SkipRouter = errors.New("skip this router") - -// WalkFunc is the type of the function called for each route visited by Walk. -// At every invocation, it is given the current route, and the current router, -// and a list of ancestor routes that lead to the current route. -type WalkFunc func(route *Route, router *Router, ancestors []*Route) error - -func (r *Router) walk(walkFn WalkFunc, ancestors []*Route) error { - for _, t := range r.routes { - err := walkFn(t, r, ancestors) - if err == SkipRouter { - continue - } - if err != nil { - return err - } - for _, sr := range t.matchers { - if h, ok := sr.(*Router); ok { - ancestors = append(ancestors, t) - err := h.walk(walkFn, ancestors) - if err != nil { - return err - } - ancestors = ancestors[:len(ancestors)-1] - } - } - if h, ok := t.handler.(*Router); ok { - ancestors = append(ancestors, t) - err := h.walk(walkFn, ancestors) - if err != nil { - return err - } - ancestors = ancestors[:len(ancestors)-1] - } - } - return nil -} - -// ---------------------------------------------------------------------------- -// Context -// ---------------------------------------------------------------------------- - -// RouteMatch stores information about a matched route. -type RouteMatch struct { - Route *Route - Handler http.Handler - Vars map[string]string - - // MatchErr is set to appropriate matching error - // It is set to ErrMethodMismatch if there is a mismatch in - // the request method and route method - MatchErr error -} - -type contextKey int - -const ( - varsKey contextKey = iota - routeKey -) - -// Vars returns the route variables for the current request, if any. -func Vars(r *http.Request) map[string]string { - if rv := contextGet(r, varsKey); rv != nil { - return rv.(map[string]string) - } - return nil -} - -// CurrentRoute returns the matched route for the current request, if any. -// This only works when called inside the handler of the matched route -// because the matched route is stored in the request context which is cleared -// after the handler returns, unless the KeepContext option is set on the -// Router. -func CurrentRoute(r *http.Request) *Route { - if rv := contextGet(r, routeKey); rv != nil { - return rv.(*Route) - } - return nil -} - -func setVars(r *http.Request, val interface{}) *http.Request { - return contextSet(r, varsKey, val) -} - -func setCurrentRoute(r *http.Request, val interface{}) *http.Request { - return contextSet(r, routeKey, val) -} - -// ---------------------------------------------------------------------------- -// Helpers -// ---------------------------------------------------------------------------- - -// cleanPath returns the canonical path for p, eliminating . and .. elements. -// Borrowed from the net/http package. -func cleanPath(p string) string { - if p == "" { - return "/" - } - if p[0] != '/' { - p = "/" + p - } - np := path.Clean(p) - // path.Clean removes trailing slash except for root; - // put the trailing slash back if necessary. - if p[len(p)-1] == '/' && np != "/" { - np += "/" - } - - return np -} - -// uniqueVars returns an error if two slices contain duplicated strings. -func uniqueVars(s1, s2 []string) error { - for _, v1 := range s1 { - for _, v2 := range s2 { - if v1 == v2 { - return fmt.Errorf("mux: duplicated route variable %q", v2) - } - } - } - return nil -} - -// checkPairs returns the count of strings passed in, and an error if -// the count is not an even number. -func checkPairs(pairs ...string) (int, error) { - length := len(pairs) - if length%2 != 0 { - return length, fmt.Errorf( - "mux: number of parameters must be multiple of 2, got %v", pairs) - } - return length, nil -} - -// mapFromPairsToString converts variadic string parameters to a -// string to string map. -func mapFromPairsToString(pairs ...string) (map[string]string, error) { - length, err := checkPairs(pairs...) - if err != nil { - return nil, err - } - m := make(map[string]string, length/2) - for i := 0; i < length; i += 2 { - m[pairs[i]] = pairs[i+1] - } - return m, nil -} - -// mapFromPairsToRegex converts variadic string parameters to a -// string to regex map. -func mapFromPairsToRegex(pairs ...string) (map[string]*regexp.Regexp, error) { - length, err := checkPairs(pairs...) - if err != nil { - return nil, err - } - m := make(map[string]*regexp.Regexp, length/2) - for i := 0; i < length; i += 2 { - regex, err := regexp.Compile(pairs[i+1]) - if err != nil { - return nil, err - } - m[pairs[i]] = regex - } - return m, nil -} - -// matchInArray returns true if the given string value is in the array. -func matchInArray(arr []string, value string) bool { - for _, v := range arr { - if v == value { - return true - } - } - return false -} - -// matchMapWithString returns true if the given key/value pairs exist in a given map. -func matchMapWithString(toCheck map[string]string, toMatch map[string][]string, canonicalKey bool) bool { - for k, v := range toCheck { - // Check if key exists. - if canonicalKey { - k = http.CanonicalHeaderKey(k) - } - if values := toMatch[k]; values == nil { - return false - } else if v != "" { - // If value was defined as an empty string we only check that the - // key exists. Otherwise we also check for equality. - valueExists := false - for _, value := range values { - if v == value { - valueExists = true - break - } - } - if !valueExists { - return false - } - } - } - return true -} - -// matchMapWithRegex returns true if the given key/value pairs exist in a given map compiled against -// the given regex -func matchMapWithRegex(toCheck map[string]*regexp.Regexp, toMatch map[string][]string, canonicalKey bool) bool { - for k, v := range toCheck { - // Check if key exists. - if canonicalKey { - k = http.CanonicalHeaderKey(k) - } - if values := toMatch[k]; values == nil { - return false - } else if v != nil { - // If value was defined as an empty string we only check that the - // key exists. Otherwise we also check for equality. - valueExists := false - for _, value := range values { - if v.MatchString(value) { - valueExists = true - break - } - } - if !valueExists { - return false - } - } - } - return true -} - -// methodNotAllowed replies to the request with an HTTP status code 405. -func methodNotAllowed(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusMethodNotAllowed) -} - -// methodNotAllowedHandler returns a simple request handler -// that replies to each request with a status code 405. -func methodNotAllowedHandler() http.Handler { return http.HandlerFunc(methodNotAllowed) } diff --git a/vendor/github.com/gorilla/mux/regexp.go b/vendor/github.com/gorilla/mux/regexp.go deleted file mode 100644 index ac1abcd4..00000000 --- a/vendor/github.com/gorilla/mux/regexp.go +++ /dev/null @@ -1,345 +0,0 @@ -// Copyright 2012 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mux - -import ( - "bytes" - "fmt" - "net/http" - "net/url" - "regexp" - "strconv" - "strings" -) - -type routeRegexpOptions struct { - strictSlash bool - useEncodedPath bool -} - -type regexpType int - -const ( - regexpTypePath regexpType = 0 - regexpTypeHost regexpType = 1 - regexpTypePrefix regexpType = 2 - regexpTypeQuery regexpType = 3 -) - -// newRouteRegexp parses a route template and returns a routeRegexp, -// used to match a host, a path or a query string. -// -// It will extract named variables, assemble a regexp to be matched, create -// a "reverse" template to build URLs and compile regexps to validate variable -// values used in URL building. -// -// Previously we accepted only Python-like identifiers for variable -// names ([a-zA-Z_][a-zA-Z0-9_]*), but currently the only restriction is that -// name and pattern can't be empty, and names can't contain a colon. -func newRouteRegexp(tpl string, typ regexpType, options routeRegexpOptions) (*routeRegexp, error) { - // Check if it is well-formed. - idxs, errBraces := braceIndices(tpl) - if errBraces != nil { - return nil, errBraces - } - // Backup the original. - template := tpl - // Now let's parse it. - defaultPattern := "[^/]+" - if typ == regexpTypeQuery { - defaultPattern = ".*" - } else if typ == regexpTypeHost { - defaultPattern = "[^.]+" - } - // Only match strict slash if not matching - if typ != regexpTypePath { - options.strictSlash = false - } - // Set a flag for strictSlash. - endSlash := false - if options.strictSlash && strings.HasSuffix(tpl, "/") { - tpl = tpl[:len(tpl)-1] - endSlash = true - } - varsN := make([]string, len(idxs)/2) - varsR := make([]*regexp.Regexp, len(idxs)/2) - pattern := bytes.NewBufferString("") - pattern.WriteByte('^') - reverse := bytes.NewBufferString("") - var end int - var err error - for i := 0; i < len(idxs); i += 2 { - // Set all values we are interested in. - raw := tpl[end:idxs[i]] - end = idxs[i+1] - parts := strings.SplitN(tpl[idxs[i]+1:end-1], ":", 2) - name := parts[0] - patt := defaultPattern - if len(parts) == 2 { - patt = parts[1] - } - // Name or pattern can't be empty. - if name == "" || patt == "" { - return nil, fmt.Errorf("mux: missing name or pattern in %q", - tpl[idxs[i]:end]) - } - // Build the regexp pattern. - fmt.Fprintf(pattern, "%s(?P<%s>%s)", regexp.QuoteMeta(raw), varGroupName(i/2), patt) - - // Build the reverse template. - fmt.Fprintf(reverse, "%s%%s", raw) - - // Append variable name and compiled pattern. - varsN[i/2] = name - varsR[i/2], err = regexp.Compile(fmt.Sprintf("^%s$", patt)) - if err != nil { - return nil, err - } - } - // Add the remaining. - raw := tpl[end:] - pattern.WriteString(regexp.QuoteMeta(raw)) - if options.strictSlash { - pattern.WriteString("[/]?") - } - if typ == regexpTypeQuery { - // Add the default pattern if the query value is empty - if queryVal := strings.SplitN(template, "=", 2)[1]; queryVal == "" { - pattern.WriteString(defaultPattern) - } - } - if typ != regexpTypePrefix { - pattern.WriteByte('$') - } - - var wildcardHostPort bool - if typ == regexpTypeHost { - if !strings.Contains(pattern.String(), ":") { - wildcardHostPort = true - } - } - reverse.WriteString(raw) - if endSlash { - reverse.WriteByte('/') - } - // Compile full regexp. - reg, errCompile := regexp.Compile(pattern.String()) - if errCompile != nil { - return nil, errCompile - } - - // Check for capturing groups which used to work in older versions - if reg.NumSubexp() != len(idxs)/2 { - panic(fmt.Sprintf("route %s contains capture groups in its regexp. ", template) + - "Only non-capturing groups are accepted: e.g. (?:pattern) instead of (pattern)") - } - - // Done! - return &routeRegexp{ - template: template, - regexpType: typ, - options: options, - regexp: reg, - reverse: reverse.String(), - varsN: varsN, - varsR: varsR, - wildcardHostPort: wildcardHostPort, - }, nil -} - -// routeRegexp stores a regexp to match a host or path and information to -// collect and validate route variables. -type routeRegexp struct { - // The unmodified template. - template string - // The type of match - regexpType regexpType - // Options for matching - options routeRegexpOptions - // Expanded regexp. - regexp *regexp.Regexp - // Reverse template. - reverse string - // Variable names. - varsN []string - // Variable regexps (validators). - varsR []*regexp.Regexp - // Wildcard host-port (no strict port match in hostname) - wildcardHostPort bool -} - -// Match matches the regexp against the URL host or path. -func (r *routeRegexp) Match(req *http.Request, match *RouteMatch) bool { - if r.regexpType == regexpTypeHost { - host := getHost(req) - if r.wildcardHostPort { - // Don't be strict on the port match - if i := strings.Index(host, ":"); i != -1 { - host = host[:i] - } - } - return r.regexp.MatchString(host) - } else { - if r.regexpType == regexpTypeQuery { - return r.matchQueryString(req) - } - path := req.URL.Path - if r.options.useEncodedPath { - path = req.URL.EscapedPath() - } - return r.regexp.MatchString(path) - } -} - -// url builds a URL part using the given values. -func (r *routeRegexp) url(values map[string]string) (string, error) { - urlValues := make([]interface{}, len(r.varsN)) - for k, v := range r.varsN { - value, ok := values[v] - if !ok { - return "", fmt.Errorf("mux: missing route variable %q", v) - } - if r.regexpType == regexpTypeQuery { - value = url.QueryEscape(value) - } - urlValues[k] = value - } - rv := fmt.Sprintf(r.reverse, urlValues...) - if !r.regexp.MatchString(rv) { - // The URL is checked against the full regexp, instead of checking - // individual variables. This is faster but to provide a good error - // message, we check individual regexps if the URL doesn't match. - for k, v := range r.varsN { - if !r.varsR[k].MatchString(values[v]) { - return "", fmt.Errorf( - "mux: variable %q doesn't match, expected %q", values[v], - r.varsR[k].String()) - } - } - } - return rv, nil -} - -// getURLQuery returns a single query parameter from a request URL. -// For a URL with foo=bar&baz=ding, we return only the relevant key -// value pair for the routeRegexp. -func (r *routeRegexp) getURLQuery(req *http.Request) string { - if r.regexpType != regexpTypeQuery { - return "" - } - templateKey := strings.SplitN(r.template, "=", 2)[0] - for key, vals := range req.URL.Query() { - if key == templateKey && len(vals) > 0 { - return key + "=" + vals[0] - } - } - return "" -} - -func (r *routeRegexp) matchQueryString(req *http.Request) bool { - return r.regexp.MatchString(r.getURLQuery(req)) -} - -// braceIndices returns the first level curly brace indices from a string. -// It returns an error in case of unbalanced braces. -func braceIndices(s string) ([]int, error) { - var level, idx int - var idxs []int - for i := 0; i < len(s); i++ { - switch s[i] { - case '{': - if level++; level == 1 { - idx = i - } - case '}': - if level--; level == 0 { - idxs = append(idxs, idx, i+1) - } else if level < 0 { - return nil, fmt.Errorf("mux: unbalanced braces in %q", s) - } - } - } - if level != 0 { - return nil, fmt.Errorf("mux: unbalanced braces in %q", s) - } - return idxs, nil -} - -// varGroupName builds a capturing group name for the indexed variable. -func varGroupName(idx int) string { - return "v" + strconv.Itoa(idx) -} - -// ---------------------------------------------------------------------------- -// routeRegexpGroup -// ---------------------------------------------------------------------------- - -// routeRegexpGroup groups the route matchers that carry variables. -type routeRegexpGroup struct { - host *routeRegexp - path *routeRegexp - queries []*routeRegexp -} - -// setMatch extracts the variables from the URL once a route matches. -func (v routeRegexpGroup) setMatch(req *http.Request, m *RouteMatch, r *Route) { - // Store host variables. - if v.host != nil { - host := getHost(req) - matches := v.host.regexp.FindStringSubmatchIndex(host) - if len(matches) > 0 { - extractVars(host, matches, v.host.varsN, m.Vars) - } - } - path := req.URL.Path - if r.useEncodedPath { - path = req.URL.EscapedPath() - } - // Store path variables. - if v.path != nil { - matches := v.path.regexp.FindStringSubmatchIndex(path) - if len(matches) > 0 { - extractVars(path, matches, v.path.varsN, m.Vars) - // Check if we should redirect. - if v.path.options.strictSlash { - p1 := strings.HasSuffix(path, "/") - p2 := strings.HasSuffix(v.path.template, "/") - if p1 != p2 { - u, _ := url.Parse(req.URL.String()) - if p1 { - u.Path = u.Path[:len(u.Path)-1] - } else { - u.Path += "/" - } - m.Handler = http.RedirectHandler(u.String(), http.StatusMovedPermanently) - } - } - } - } - // Store query string variables. - for _, q := range v.queries { - queryURL := q.getURLQuery(req) - matches := q.regexp.FindStringSubmatchIndex(queryURL) - if len(matches) > 0 { - extractVars(queryURL, matches, q.varsN, m.Vars) - } - } -} - -// getHost tries its best to return the request host. -// According to section 14.23 of RFC 2616 the Host header -// can include the port number if the default value of 80 is not used. -func getHost(r *http.Request) string { - if r.URL.IsAbs() { - return r.URL.Host - } - return r.Host -} - -func extractVars(input string, matches []int, names []string, output map[string]string) { - for i, name := range names { - output[name] = input[matches[2*i+2]:matches[2*i+3]] - } -} diff --git a/vendor/github.com/gorilla/mux/route.go b/vendor/github.com/gorilla/mux/route.go deleted file mode 100644 index 8479c68c..00000000 --- a/vendor/github.com/gorilla/mux/route.go +++ /dev/null @@ -1,710 +0,0 @@ -// Copyright 2012 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mux - -import ( - "errors" - "fmt" - "net/http" - "net/url" - "regexp" - "strings" -) - -// Route stores information to match a request and build URLs. -type Route struct { - // Request handler for the route. - handler http.Handler - // If true, this route never matches: it is only used to build URLs. - buildOnly bool - // The name used to build URLs. - name string - // Error resulted from building a route. - err error - - // "global" reference to all named routes - namedRoutes map[string]*Route - - // config possibly passed in from `Router` - routeConf -} - -// SkipClean reports whether path cleaning is enabled for this route via -// Router.SkipClean. -func (r *Route) SkipClean() bool { - return r.skipClean -} - -// Match matches the route against the request. -func (r *Route) Match(req *http.Request, match *RouteMatch) bool { - if r.buildOnly || r.err != nil { - return false - } - - var matchErr error - - // Match everything. - for _, m := range r.matchers { - if matched := m.Match(req, match); !matched { - if _, ok := m.(methodMatcher); ok { - matchErr = ErrMethodMismatch - continue - } - - // Ignore ErrNotFound errors. These errors arise from match call - // to Subrouters. - // - // This prevents subsequent matching subrouters from failing to - // run middleware. If not ignored, the middleware would see a - // non-nil MatchErr and be skipped, even when there was a - // matching route. - if match.MatchErr == ErrNotFound { - match.MatchErr = nil - } - - matchErr = nil - return false - } - } - - if matchErr != nil { - match.MatchErr = matchErr - return false - } - - if match.MatchErr == ErrMethodMismatch { - // We found a route which matches request method, clear MatchErr - match.MatchErr = nil - // Then override the mis-matched handler - match.Handler = r.handler - } - - // Yay, we have a match. Let's collect some info about it. - if match.Route == nil { - match.Route = r - } - if match.Handler == nil { - match.Handler = r.handler - } - if match.Vars == nil { - match.Vars = make(map[string]string) - } - - // Set variables. - r.regexp.setMatch(req, match, r) - return true -} - -// ---------------------------------------------------------------------------- -// Route attributes -// ---------------------------------------------------------------------------- - -// GetError returns an error resulted from building the route, if any. -func (r *Route) GetError() error { - return r.err -} - -// BuildOnly sets the route to never match: it is only used to build URLs. -func (r *Route) BuildOnly() *Route { - r.buildOnly = true - return r -} - -// Handler -------------------------------------------------------------------- - -// Handler sets a handler for the route. -func (r *Route) Handler(handler http.Handler) *Route { - if r.err == nil { - r.handler = handler - } - return r -} - -// HandlerFunc sets a handler function for the route. -func (r *Route) HandlerFunc(f func(http.ResponseWriter, *http.Request)) *Route { - return r.Handler(http.HandlerFunc(f)) -} - -// GetHandler returns the handler for the route, if any. -func (r *Route) GetHandler() http.Handler { - return r.handler -} - -// Name ----------------------------------------------------------------------- - -// Name sets the name for the route, used to build URLs. -// It is an error to call Name more than once on a route. -func (r *Route) Name(name string) *Route { - if r.name != "" { - r.err = fmt.Errorf("mux: route already has name %q, can't set %q", - r.name, name) - } - if r.err == nil { - r.name = name - r.namedRoutes[name] = r - } - return r -} - -// GetName returns the name for the route, if any. -func (r *Route) GetName() string { - return r.name -} - -// ---------------------------------------------------------------------------- -// Matchers -// ---------------------------------------------------------------------------- - -// matcher types try to match a request. -type matcher interface { - Match(*http.Request, *RouteMatch) bool -} - -// addMatcher adds a matcher to the route. -func (r *Route) addMatcher(m matcher) *Route { - if r.err == nil { - r.matchers = append(r.matchers, m) - } - return r -} - -// addRegexpMatcher adds a host or path matcher and builder to a route. -func (r *Route) addRegexpMatcher(tpl string, typ regexpType) error { - if r.err != nil { - return r.err - } - if typ == regexpTypePath || typ == regexpTypePrefix { - if len(tpl) > 0 && tpl[0] != '/' { - return fmt.Errorf("mux: path must start with a slash, got %q", tpl) - } - if r.regexp.path != nil { - tpl = strings.TrimRight(r.regexp.path.template, "/") + tpl - } - } - rr, err := newRouteRegexp(tpl, typ, routeRegexpOptions{ - strictSlash: r.strictSlash, - useEncodedPath: r.useEncodedPath, - }) - if err != nil { - return err - } - for _, q := range r.regexp.queries { - if err = uniqueVars(rr.varsN, q.varsN); err != nil { - return err - } - } - if typ == regexpTypeHost { - if r.regexp.path != nil { - if err = uniqueVars(rr.varsN, r.regexp.path.varsN); err != nil { - return err - } - } - r.regexp.host = rr - } else { - if r.regexp.host != nil { - if err = uniqueVars(rr.varsN, r.regexp.host.varsN); err != nil { - return err - } - } - if typ == regexpTypeQuery { - r.regexp.queries = append(r.regexp.queries, rr) - } else { - r.regexp.path = rr - } - } - r.addMatcher(rr) - return nil -} - -// Headers -------------------------------------------------------------------- - -// headerMatcher matches the request against header values. -type headerMatcher map[string]string - -func (m headerMatcher) Match(r *http.Request, match *RouteMatch) bool { - return matchMapWithString(m, r.Header, true) -} - -// Headers adds a matcher for request header values. -// It accepts a sequence of key/value pairs to be matched. For example: -// -// r := mux.NewRouter() -// r.Headers("Content-Type", "application/json", -// "X-Requested-With", "XMLHttpRequest") -// -// The above route will only match if both request header values match. -// If the value is an empty string, it will match any value if the key is set. -func (r *Route) Headers(pairs ...string) *Route { - if r.err == nil { - var headers map[string]string - headers, r.err = mapFromPairsToString(pairs...) - return r.addMatcher(headerMatcher(headers)) - } - return r -} - -// headerRegexMatcher matches the request against the route given a regex for the header -type headerRegexMatcher map[string]*regexp.Regexp - -func (m headerRegexMatcher) Match(r *http.Request, match *RouteMatch) bool { - return matchMapWithRegex(m, r.Header, true) -} - -// HeadersRegexp accepts a sequence of key/value pairs, where the value has regex -// support. For example: -// -// r := mux.NewRouter() -// r.HeadersRegexp("Content-Type", "application/(text|json)", -// "X-Requested-With", "XMLHttpRequest") -// -// The above route will only match if both the request header matches both regular expressions. -// If the value is an empty string, it will match any value if the key is set. -// Use the start and end of string anchors (^ and $) to match an exact value. -func (r *Route) HeadersRegexp(pairs ...string) *Route { - if r.err == nil { - var headers map[string]*regexp.Regexp - headers, r.err = mapFromPairsToRegex(pairs...) - return r.addMatcher(headerRegexMatcher(headers)) - } - return r -} - -// Host ----------------------------------------------------------------------- - -// Host adds a matcher for the URL host. -// It accepts a template with zero or more URL variables enclosed by {}. -// Variables can define an optional regexp pattern to be matched: -// -// - {name} matches anything until the next dot. -// -// - {name:pattern} matches the given regexp pattern. -// -// For example: -// -// r := mux.NewRouter() -// r.Host("www.example.com") -// r.Host("{subdomain}.domain.com") -// r.Host("{subdomain:[a-z]+}.domain.com") -// -// Variable names must be unique in a given route. They can be retrieved -// calling mux.Vars(request). -func (r *Route) Host(tpl string) *Route { - r.err = r.addRegexpMatcher(tpl, regexpTypeHost) - return r -} - -// MatcherFunc ---------------------------------------------------------------- - -// MatcherFunc is the function signature used by custom matchers. -type MatcherFunc func(*http.Request, *RouteMatch) bool - -// Match returns the match for a given request. -func (m MatcherFunc) Match(r *http.Request, match *RouteMatch) bool { - return m(r, match) -} - -// MatcherFunc adds a custom function to be used as request matcher. -func (r *Route) MatcherFunc(f MatcherFunc) *Route { - return r.addMatcher(f) -} - -// Methods -------------------------------------------------------------------- - -// methodMatcher matches the request against HTTP methods. -type methodMatcher []string - -func (m methodMatcher) Match(r *http.Request, match *RouteMatch) bool { - return matchInArray(m, r.Method) -} - -// Methods adds a matcher for HTTP methods. -// It accepts a sequence of one or more methods to be matched, e.g.: -// "GET", "POST", "PUT". -func (r *Route) Methods(methods ...string) *Route { - for k, v := range methods { - methods[k] = strings.ToUpper(v) - } - return r.addMatcher(methodMatcher(methods)) -} - -// Path ----------------------------------------------------------------------- - -// Path adds a matcher for the URL path. -// It accepts a template with zero or more URL variables enclosed by {}. The -// template must start with a "/". -// Variables can define an optional regexp pattern to be matched: -// -// - {name} matches anything until the next slash. -// -// - {name:pattern} matches the given regexp pattern. -// -// For example: -// -// r := mux.NewRouter() -// r.Path("/products/").Handler(ProductsHandler) -// r.Path("/products/{key}").Handler(ProductsHandler) -// r.Path("/articles/{category}/{id:[0-9]+}"). -// Handler(ArticleHandler) -// -// Variable names must be unique in a given route. They can be retrieved -// calling mux.Vars(request). -func (r *Route) Path(tpl string) *Route { - r.err = r.addRegexpMatcher(tpl, regexpTypePath) - return r -} - -// PathPrefix ----------------------------------------------------------------- - -// PathPrefix adds a matcher for the URL path prefix. This matches if the given -// template is a prefix of the full URL path. See Route.Path() for details on -// the tpl argument. -// -// Note that it does not treat slashes specially ("/foobar/" will be matched by -// the prefix "/foo") so you may want to use a trailing slash here. -// -// Also note that the setting of Router.StrictSlash() has no effect on routes -// with a PathPrefix matcher. -func (r *Route) PathPrefix(tpl string) *Route { - r.err = r.addRegexpMatcher(tpl, regexpTypePrefix) - return r -} - -// Query ---------------------------------------------------------------------- - -// Queries adds a matcher for URL query values. -// It accepts a sequence of key/value pairs. Values may define variables. -// For example: -// -// r := mux.NewRouter() -// r.Queries("foo", "bar", "id", "{id:[0-9]+}") -// -// The above route will only match if the URL contains the defined queries -// values, e.g.: ?foo=bar&id=42. -// -// If the value is an empty string, it will match any value if the key is set. -// -// Variables can define an optional regexp pattern to be matched: -// -// - {name} matches anything until the next slash. -// -// - {name:pattern} matches the given regexp pattern. -func (r *Route) Queries(pairs ...string) *Route { - length := len(pairs) - if length%2 != 0 { - r.err = fmt.Errorf( - "mux: number of parameters must be multiple of 2, got %v", pairs) - return nil - } - for i := 0; i < length; i += 2 { - if r.err = r.addRegexpMatcher(pairs[i]+"="+pairs[i+1], regexpTypeQuery); r.err != nil { - return r - } - } - - return r -} - -// Schemes -------------------------------------------------------------------- - -// schemeMatcher matches the request against URL schemes. -type schemeMatcher []string - -func (m schemeMatcher) Match(r *http.Request, match *RouteMatch) bool { - return matchInArray(m, r.URL.Scheme) -} - -// Schemes adds a matcher for URL schemes. -// It accepts a sequence of schemes to be matched, e.g.: "http", "https". -func (r *Route) Schemes(schemes ...string) *Route { - for k, v := range schemes { - schemes[k] = strings.ToLower(v) - } - if len(schemes) > 0 { - r.buildScheme = schemes[0] - } - return r.addMatcher(schemeMatcher(schemes)) -} - -// BuildVarsFunc -------------------------------------------------------------- - -// BuildVarsFunc is the function signature used by custom build variable -// functions (which can modify route variables before a route's URL is built). -type BuildVarsFunc func(map[string]string) map[string]string - -// BuildVarsFunc adds a custom function to be used to modify build variables -// before a route's URL is built. -func (r *Route) BuildVarsFunc(f BuildVarsFunc) *Route { - if r.buildVarsFunc != nil { - // compose the old and new functions - old := r.buildVarsFunc - r.buildVarsFunc = func(m map[string]string) map[string]string { - return f(old(m)) - } - } else { - r.buildVarsFunc = f - } - return r -} - -// Subrouter ------------------------------------------------------------------ - -// Subrouter creates a subrouter for the route. -// -// It will test the inner routes only if the parent route matched. For example: -// -// r := mux.NewRouter() -// s := r.Host("www.example.com").Subrouter() -// s.HandleFunc("/products/", ProductsHandler) -// s.HandleFunc("/products/{key}", ProductHandler) -// s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler) -// -// Here, the routes registered in the subrouter won't be tested if the host -// doesn't match. -func (r *Route) Subrouter() *Router { - // initialize a subrouter with a copy of the parent route's configuration - router := &Router{routeConf: copyRouteConf(r.routeConf), namedRoutes: r.namedRoutes} - r.addMatcher(router) - return router -} - -// ---------------------------------------------------------------------------- -// URL building -// ---------------------------------------------------------------------------- - -// URL builds a URL for the route. -// -// It accepts a sequence of key/value pairs for the route variables. For -// example, given this route: -// -// r := mux.NewRouter() -// r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). -// Name("article") -// -// ...a URL for it can be built using: -// -// url, err := r.Get("article").URL("category", "technology", "id", "42") -// -// ...which will return an url.URL with the following path: -// -// "/articles/technology/42" -// -// This also works for host variables: -// -// r := mux.NewRouter() -// r.Host("{subdomain}.domain.com"). -// HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). -// Name("article") -// -// // url.String() will be "http://news.domain.com/articles/technology/42" -// url, err := r.Get("article").URL("subdomain", "news", -// "category", "technology", -// "id", "42") -// -// All variables defined in the route are required, and their values must -// conform to the corresponding patterns. -func (r *Route) URL(pairs ...string) (*url.URL, error) { - if r.err != nil { - return nil, r.err - } - values, err := r.prepareVars(pairs...) - if err != nil { - return nil, err - } - var scheme, host, path string - queries := make([]string, 0, len(r.regexp.queries)) - if r.regexp.host != nil { - if host, err = r.regexp.host.url(values); err != nil { - return nil, err - } - scheme = "http" - if r.buildScheme != "" { - scheme = r.buildScheme - } - } - if r.regexp.path != nil { - if path, err = r.regexp.path.url(values); err != nil { - return nil, err - } - } - for _, q := range r.regexp.queries { - var query string - if query, err = q.url(values); err != nil { - return nil, err - } - queries = append(queries, query) - } - return &url.URL{ - Scheme: scheme, - Host: host, - Path: path, - RawQuery: strings.Join(queries, "&"), - }, nil -} - -// URLHost builds the host part of the URL for a route. See Route.URL(). -// -// The route must have a host defined. -func (r *Route) URLHost(pairs ...string) (*url.URL, error) { - if r.err != nil { - return nil, r.err - } - if r.regexp.host == nil { - return nil, errors.New("mux: route doesn't have a host") - } - values, err := r.prepareVars(pairs...) - if err != nil { - return nil, err - } - host, err := r.regexp.host.url(values) - if err != nil { - return nil, err - } - u := &url.URL{ - Scheme: "http", - Host: host, - } - if r.buildScheme != "" { - u.Scheme = r.buildScheme - } - return u, nil -} - -// URLPath builds the path part of the URL for a route. See Route.URL(). -// -// The route must have a path defined. -func (r *Route) URLPath(pairs ...string) (*url.URL, error) { - if r.err != nil { - return nil, r.err - } - if r.regexp.path == nil { - return nil, errors.New("mux: route doesn't have a path") - } - values, err := r.prepareVars(pairs...) - if err != nil { - return nil, err - } - path, err := r.regexp.path.url(values) - if err != nil { - return nil, err - } - return &url.URL{ - Path: path, - }, nil -} - -// GetPathTemplate returns the template used to build the -// route match. -// This is useful for building simple REST API documentation and for instrumentation -// against third-party services. -// An error will be returned if the route does not define a path. -func (r *Route) GetPathTemplate() (string, error) { - if r.err != nil { - return "", r.err - } - if r.regexp.path == nil { - return "", errors.New("mux: route doesn't have a path") - } - return r.regexp.path.template, nil -} - -// GetPathRegexp returns the expanded regular expression used to match route path. -// This is useful for building simple REST API documentation and for instrumentation -// against third-party services. -// An error will be returned if the route does not define a path. -func (r *Route) GetPathRegexp() (string, error) { - if r.err != nil { - return "", r.err - } - if r.regexp.path == nil { - return "", errors.New("mux: route does not have a path") - } - return r.regexp.path.regexp.String(), nil -} - -// GetQueriesRegexp returns the expanded regular expressions used to match the -// route queries. -// This is useful for building simple REST API documentation and for instrumentation -// against third-party services. -// An error will be returned if the route does not have queries. -func (r *Route) GetQueriesRegexp() ([]string, error) { - if r.err != nil { - return nil, r.err - } - if r.regexp.queries == nil { - return nil, errors.New("mux: route doesn't have queries") - } - var queries []string - for _, query := range r.regexp.queries { - queries = append(queries, query.regexp.String()) - } - return queries, nil -} - -// GetQueriesTemplates returns the templates used to build the -// query matching. -// This is useful for building simple REST API documentation and for instrumentation -// against third-party services. -// An error will be returned if the route does not define queries. -func (r *Route) GetQueriesTemplates() ([]string, error) { - if r.err != nil { - return nil, r.err - } - if r.regexp.queries == nil { - return nil, errors.New("mux: route doesn't have queries") - } - var queries []string - for _, query := range r.regexp.queries { - queries = append(queries, query.template) - } - return queries, nil -} - -// GetMethods returns the methods the route matches against -// This is useful for building simple REST API documentation and for instrumentation -// against third-party services. -// An error will be returned if route does not have methods. -func (r *Route) GetMethods() ([]string, error) { - if r.err != nil { - return nil, r.err - } - for _, m := range r.matchers { - if methods, ok := m.(methodMatcher); ok { - return []string(methods), nil - } - } - return nil, errors.New("mux: route doesn't have methods") -} - -// GetHostTemplate returns the template used to build the -// route match. -// This is useful for building simple REST API documentation and for instrumentation -// against third-party services. -// An error will be returned if the route does not define a host. -func (r *Route) GetHostTemplate() (string, error) { - if r.err != nil { - return "", r.err - } - if r.regexp.host == nil { - return "", errors.New("mux: route doesn't have a host") - } - return r.regexp.host.template, nil -} - -// prepareVars converts the route variable pairs into a map. If the route has a -// BuildVarsFunc, it is invoked. -func (r *Route) prepareVars(pairs ...string) (map[string]string, error) { - m, err := mapFromPairsToString(pairs...) - if err != nil { - return nil, err - } - return r.buildVars(m), nil -} - -func (r *Route) buildVars(m map[string]string) map[string]string { - if r.buildVarsFunc != nil { - m = r.buildVarsFunc(m) - } - return m -} diff --git a/vendor/github.com/gorilla/mux/test_helpers.go b/vendor/github.com/gorilla/mux/test_helpers.go deleted file mode 100644 index 32ecffde..00000000 --- a/vendor/github.com/gorilla/mux/test_helpers.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright 2012 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mux - -import "net/http" - -// SetURLVars sets the URL variables for the given request, to be accessed via -// mux.Vars for testing route behaviour. Arguments are not modified, a shallow -// copy is returned. -// -// This API should only be used for testing purposes; it provides a way to -// inject variables into the request context. Alternatively, URL variables -// can be set by making a route that captures the required variables, -// starting a server and sending the request to that server. -func SetURLVars(r *http.Request, val map[string]string) *http.Request { - return setVars(r, val) -} diff --git a/vendor/github.com/gosnmp/gosnmp/.gitignore b/vendor/github.com/gosnmp/gosnmp/.gitignore deleted file mode 100644 index 909e8acb..00000000 --- a/vendor/github.com/gosnmp/gosnmp/.gitignore +++ /dev/null @@ -1,75 +0,0 @@ -# Created by https://www.gitignore.io/api/go,osx,vim - -### Go ### -# Binaries for programs and plugins -*.exe -*.dll -*.so -*.dylib - -# Test binary, build with `go test -c` -*.test - -# Output of the go coverage tool, specifically when used with LiteIDE -*.out - -# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736 -.glide/ - -### OSX ### -*.DS_Store -.AppleDouble -.LSOverride - -# Icon must end with two \r -Icon - -# Thumbnails -._* - -# Files that might appear in the root of a volume -.DocumentRevisions-V100 -.fseventsd -.Spotlight-V100 -.TemporaryItems -.Trashes -.VolumeIcon.icns -.com.apple.timemachine.donotpresent - -# Directories potentially created on remote AFP share -.AppleDB -.AppleDesktop -Network Trash Folder -Temporary Items -.apdisk - -### Vim ### -# swap -[._]*.s[a-v][a-z] -[._]*.sw[a-p] -[._]s[a-v][a-z] -[._]sw[a-p] -# session -Session.vim -# temporary -.netrwhist -*~ -# auto-generated tag files -tags - -# End of https://www.gitignore.io/api/go,osx,vim - -# gogland -.idea/ - -# git rebase files -*.orig - -# test coverage outputs -coverage.json -gosnmp.html - -# profiling outputs -cpu.out -mem.out -gosnmp.test diff --git a/vendor/github.com/gosnmp/gosnmp/.golangci.yml b/vendor/github.com/gosnmp/gosnmp/.golangci.yml deleted file mode 100644 index d1b233b2..00000000 --- a/vendor/github.com/gosnmp/gosnmp/.golangci.yml +++ /dev/null @@ -1,57 +0,0 @@ ---- -run: - timeout: 5m - -linters: - disable-all: true - enable: - - bodyclose - - deadcode - - dogsled - - dupl - - errcheck - - gochecknoglobals - - goconst - - gocritic - - goimports - - golint - - goprintffuncname - - gosec - - gosimple - - govet - - ineffassign - - interfacer - - misspell - - nakedret - - staticcheck - - structcheck - - stylecheck - - typecheck - - unconvert - - unparam - - unused - - varcheck - - nolintlint - - scopelint - - whitespace - -linters-settings: - gofmt: - simplify: true - golint: - min-confidence: 0 - gocyclo: - min-complexity: 20 - govet: - check-shadowing: true - enable-all: true - - # TODO the following linters - # - gocognit - # - gocyclo - # - goerr113 - # - gomnd - # - lll - # - maligned - # - nestif - # - prealloc diff --git a/vendor/github.com/gosnmp/gosnmp/AUTHORS.md b/vendor/github.com/gosnmp/gosnmp/AUTHORS.md deleted file mode 100644 index ff6379ef..00000000 --- a/vendor/github.com/gosnmp/gosnmp/AUTHORS.md +++ /dev/null @@ -1,88 +0,0 @@ -# GoSNMP authors - -`git log --pretty=format:"* %an %ae" df49b4fc0b10ed2cab253cecc8c3d86b72cec41d..HEAD | sort -f | uniq >> AUTHORS.md` - -`TODO: something clever with sed, etc to autogenerate this` - -* 10074432 liu.xuefeng1@zte.com.cn -* Andreas Louca andreas@louca.org -* Andrew Filonov aef@bks.tv -* Andris Raugulis moo@arthepsy.eu -* Balogh Ákos akos@rubin.hu -* Benjamin benjamin.guy.thomas@gmail.com -* Benjamin Thomas benjamin.guy.thomas@gmail.com -* Ben Kochie superq@gmail.com -* benthor github@benthor.name -* Brian Brazil brian.brazil@robustperception.io -* Bryan Hill bryan.d.hill@gmail.com -* Bryan Hill bryan.hill@ontario.ca -* Chris chris.dance@papercut.com -* codedance dance.chris@gmail.com -* Daniel Swarbrick daniel.swarbrick@gmail.com -* davidbj david_bj@126.com -* David Riley fraveydank@gmail.com -* Douglas Heriot git@douglasheriot.com -* dramirez dramirez@rackspace.com -* Dr Josef Karthauser joe@truespeed.com -* Eamon Bauman eamon@eamonbauman.com -* Eduardo Ferro Aldama eduardo.ferro.aldama@gmail.com -* Eduardo Ferro eduardo.ferro.aldama@gmail.com -* Eli Yukelzon reflog@gmail.com -* Felix Maurer felix@felix-maurer.de -* frozenbubbleboy github@wildtongue.net -* geofduf 46729592+geofduf@users.noreply.github.com -* Guillem Jover gjover@sipwise.com -* HD Moore x@hdm.io -* Igor Novgorodov igor@novg.net -* Ivan Radakovic iradakovic13@gmail.com -* jacob dubinsky dubinskyjm@gmail.com -* Jacob Dubinsky dubinskyjm@gmail.com -* Jaime Gil de Sagredo Luna jgil@alea-soluciones.com -* Jan Kodera koderja2@fit.cvut.cz -* Jared Housh j.housh@f5.com -* jclc jclc@protonmail.com -* Joe Cracchiolo jjc@simplybits.com -* Jon Auer jda@coldshore.com -* Jon Auer jda@tapodi.net -* Joshua Green joshua.green@mail.com -* JP Kekkonen karatepekka@gmail.com -* kauppine 24810630+kauppine@users.noreply.github.com -* Kauppine 24810630+kauppine@users.noreply.github.com -* Kian Ostvar kiano@jurumani.com -* krkini16 krkini16@users.noreply.github.com -* lilinzhe slayercat.registiononly@gmail.com -* lilinzhe slayercat.subscription@gmail.com -* Marc Arndt marcarndt@Marcs-MacBook-Pro.local -* Marc Arndt marc@marcarndt.com -* Martin Lindhe martinlindhe@users.noreply.github.com -* Marty Schoch marty.schoch@gmail.com -* Mattias Folke mattias.folke@gmail.com -* Mattias Folke mattias.folke@tre.se -* Mehdi Pourfar mehdipourfar@gmail.com -* meifakun runner.mei@gmail.com -* Michał Derkacz michal@Lnet.pl -* Michel Blanc mb@mbnet.fr -* Miroslav Genov mgenov@gmail.com -* Nathan Owens nathan_owens@cable.comcast.com -* Nathan Owens virtuallynathan@gmail.com -* NewHooker yaocanwu@gmail.com -* nikandfor nikandfor@gmail.com -* Patrick Hemmer patrick.hemmer@gmail.com -* Patryk Najda ptrknjd@gmail.com -* Peter Vypov peter.vypov@gmail.com -* Rene Fragoso ctrlrsf@gmail.com -* rjammalamadaka rajanikanth.jammalamadaka@mandiant.com -* Ross Wilson ross.wilson@iomart.com -* Sonia Hamilton sonia@snowfrog.net -* StefanHauth 63204425+StefanHauth@users.noreply.github.com -* Stefan Hauth stefan.hauth@dynatrace.com -* Tara taramerin@gmail.com -* The Binary binary4bytes@gmail.com -* Tim Rots 8184932+TimRots@users.noreply.github.com -* Tim Rots tim.rots@protonmail.ch -* toni-moreno toni.moreno@gmail.com -* Vallimamod Abdullah vma@users.noreply.github.com -* WangShouLin wang.shoulin1@zte.com.cn -* Whitham D. Reeve II thetawaves@gmail.com -* Whitham D. Reeve II wreeve@gci.com -* x1unix ascii@live.ru diff --git a/vendor/github.com/gosnmp/gosnmp/CHANGELOG.md b/vendor/github.com/gosnmp/gosnmp/CHANGELOG.md deleted file mode 100644 index c35fb470..00000000 --- a/vendor/github.com/gosnmp/gosnmp/CHANGELOG.md +++ /dev/null @@ -1,92 +0,0 @@ -## unreleased - -* [CHANGE] -* [FEATURE] -* [ENHANCEMENT] -* [BUGFIX] - -## v1.29.0 - -NOTE: This release returns the OctetString []byte behavior for v1.26.0 and earlier. - -* [CHANGE] Return OctetString as []byte #264 - -## v1.28.0 - -This release updates the Go import path from `github.com/soniah/gosnmp` -to `github.com/gosnmp/gosnmp`. - -* [CHANGE] Update project path #257 -* [ENHANCEMENT] Improve SNMPv3 trap support #253 - -## v1.27.0 - -* fix a race condition - logger -* INFORM responses -* linting - -## v1.26.0 - -* more SNMPv3 -* various bug fixes -* linting - -## v1.25.0 - -* SNMPv3 new hash functions for SNMPV3 USM RFC7860 -* SNMPv3 tests for SNMPv3 traps -* go versions 1.12 1.13 - -## v1.24.0 - -* doco, fix AUTHORS, fix copyright -* decode more packet types -* TCP trap listening - -## v1.23.1 - -* add support for contexts -* fix panic conditions by checking for out-of-bounds reads - -## v1.23.0 - -* BREAKING CHANGE: The mocks have been moved to `github.com/gosnmp/gosnmp/mocks`. - If you use them, you will need to adjust your imports. -* bug fix: issue 170: No results when performing a walk starting on a leaf OID -* bug fix: issue 210: Set function fails if value is an Integer -* doco: loggingEnabled, MIB parser -* linting - -## v1.22.0 - -* travis now failing build when goimports needs running -* gometalinter -* shell script for running local tests -* SNMPv3 - avoid crash when missing SecurityParameters -* add support for Walk and Get over TCP - RFC 3430 -* SNMPv3 - allow input of private key instead of passphrase - -## v1.21.0 - -* add netsnmp functionality "not check returned OIDs are increasing" - -## v1.20.0 - -* convert all tags to correct semantic versioning, and remove old tags -* SNMPv1 trap IDs should be marshalInt32() not single byte -* use packetSecParams not sp secretKey in v3 isAuthentic() -* fix IPAddress marshalling in Set() - -## v1.19.0 - -* bug fix: handle uninitialized v3 SecurityParameters in SnmpDecodePacket() -* SNMPError, Asn1BER - stringers; types on constants - -## v1.18.0 - -* bug fix: use format flags - logPrintf() not logPrint() -* bug fix: parseObjectIdentifier() now returns []byte{0} rather than error - when it receive zero length input -* use gomock -* start using go modules -* start a changelog diff --git a/vendor/github.com/gosnmp/gosnmp/Dockerfile b/vendor/github.com/gosnmp/gosnmp/Dockerfile deleted file mode 100644 index a77690e3..00000000 --- a/vendor/github.com/gosnmp/gosnmp/Dockerfile +++ /dev/null @@ -1,39 +0,0 @@ -FROM golang:1.14.4-alpine3.12 - -# Install deps -RUN apk add --no-cache \ - bash \ - curl \ - gcc \ - libc-dev \ - make \ - python3 \ - py3-pip - -# add new user -RUN addgroup -g 1001 \ - -S gosnmp; \ - adduser -u 1001 -D -S \ - -s /bin/bash \ - -h /home/gosnmp \ - -G gosnmp gosnmp - -RUN pip install snmpsim - -# Copy local branch into container -USER gosnmp -WORKDIR /go/src/github.com/gosnmp/gosnmp -COPY --chown=gosnmp . . - -RUN go get github.com/stretchr/testify/assert && \ - make tools && \ - make lint - -ENV GOSNMP_TARGET=127.0.0.1 -ENV GOSNMP_PORT=1024 -ENV GOSNMP_TARGET_IPV4=127.0.0.1 -ENV GOSNMP_PORT_IPV4=1024 -ENV GOSNMP_TARGET_IPV6='::1' -ENV GOSNMP_PORT_IPV6=1024 - -ENTRYPOINT ["/go/src/github.com/gosnmp/gosnmp/build_tests.sh"] diff --git a/vendor/github.com/gosnmp/gosnmp/README.md b/vendor/github.com/gosnmp/gosnmp/README.md deleted file mode 100644 index 5d727d3f..00000000 --- a/vendor/github.com/gosnmp/gosnmp/README.md +++ /dev/null @@ -1,272 +0,0 @@ -gosnmp -====== -[![Mentioned in Awesome Go](https://awesome.re/mentioned-badge-flat.svg)](https://github.com/avelino/awesome-go#networking) - -![Build Status](https://circleci.com/gh/gosnmp/gosnmp.svg?style=shield) -[![PkgGoDev](https://pkg.go.dev/badge/github.com/gosnmp/gosnmp)](https://pkg.go.dev/github.com/gosnmp/gosnmp) - -GoSNMP is an SNMP client library fully written in Go. It provides Get, -GetNext, GetBulk, Walk, BulkWalk, Set and Traps. It supports IPv4 and -IPv6, using __SNMPv2c__ or __SNMPv3__. Builds are tested against -linux/amd64 and linux/386. - -# About - -**gosnmp** was started by [Andreas Louca](https://github.com/alouca), then -completely rewritten by [Sonia Hamilton](https://github.com/soniah) -(2012-2020), and now ownership has been transferred to the community at -[gosnmp/gosnmp](https://github.com/gosnmp/gosnmp). - -For support and help, join us in the #snmp channel of -[Gophers Slack](https://invite.slack.golangbridge.org/). - -# Overview - -GoSNMP has the following SNMP functions: - -* **Get** (single or multiple OIDs) -* **GetNext** -* **GetBulk** -* **Walk** - retrieves a subtree of values using GETNEXT. -* **BulkWalk** - retrieves a subtree of values using GETBULK. -* **Set** - supports Integers and OctetStrings. -* **SendTrap** - send SNMP TRAPs. -* **Listen** - act as an NMS for receiving TRAPs. - -GoSNMP has the following **helper** functions: - -* **ToBigInt** - treat returned values as `*big.Int` -* **Partition** - facilitates dividing up large slices of OIDs - -**gosnmp/gosnmp** has completely diverged from **alouca/gosnmp**, your code -will require modification in these (and other) locations: - -* the **Get** function has a different method signature -* the **NewGoSNMP** function has been removed, use **Connect** instead - (see Usage below). `Connect` uses the `GoSNMP` struct; - `gosnmp.Default` is provided for you to build on. -* GoSNMP no longer relies on **alouca/gologger** - you can use your - logger if it conforms to the `gosnmp.Logger` interface; otherwise - debugging will be discarded (/dev/null). - -```go -type Logger interface { - Print(v ...interface{}) - Printf(format string, v ...interface{}) -} -``` - -# Installation - -```shell -go get github.com/gosnmp/gosnmp -``` - -# Documentation - -http://godoc.org/github.com/gosnmp/gosnmp - -# Usage - -Here is `examples/example.go`, demonstrating how to use GoSNMP: - -```go -// Default is a pointer to a GoSNMP struct that contains sensible defaults -// eg port 161, community public, etc -g.Default.Target = "192.168.1.10" -err := g.Default.Connect() -if err != nil { - log.Fatalf("Connect() err: %v", err) -} -defer g.Default.Conn.Close() - -oids := []string{"1.3.6.1.2.1.1.4.0", "1.3.6.1.2.1.1.7.0"} -result, err2 := g.Default.Get(oids) // Get() accepts up to g.MAX_OIDS -if err2 != nil { - log.Fatalf("Get() err: %v", err2) -} - -for i, variable := range result.Variables { - fmt.Printf("%d: oid: %s ", i, variable.Name) - - // the Value of each variable returned by Get() implements - // interface{}. You could do a type switch... - switch variable.Type { - case g.OctetString: - bytes := variable.Value.([]byte) - fmt.Printf("string: %s\n", string(bytes)) - default: - // ... or often you're just interested in numeric values. - // ToBigInt() will return the Value as a BigInt, for plugging - // into your calculations. - fmt.Printf("number: %d\n", g.ToBigInt(variable.Value)) - } -} -``` - -Running this example gives the following output (from my printer): - -```shell -% go run example.go -0: oid: 1.3.6.1.2.1.1.4.0 string: Administrator -1: oid: 1.3.6.1.2.1.1.7.0 number: 104 -``` - -* `examples/example2.go` is similar to `example.go`, however it uses a - custom `&GoSNMP` rather than `g.Default` -* `examples/walkexample.go` demonstrates using `BulkWalk` -* `examples/example3.go` demonstrates `SNMPv3` -* `examples/trapserver.go` demonstrates writing an SNMP v2c trap server - -# MIB Parser - -I don't have any plans to write a mib parser. Others have suggested -https://github.com/sleepinggenius2/gosmi - -# Contributions - -Contributions are welcome, especially ones that have packet captures (see -below). - -If you've never contributed to a Go project before, here is an example workflow. - -1. [fork this repo on the GitHub webpage](https://github.com/gosnmp/gosnmp/fork) -1. `go get github.com/gosnmp/gosnmp` -1. `cd $GOPATH/src/github.com/gosnmp/gosnmp` -1. `git remote rename origin upstream` -1. `git remote add origin git@github.com:/gosnmp.git` -1. `git checkout -b development` -1. `git push -u origin development` (setup where you push to, check it works) - -# Packet Captures - -Create your packet captures in the following way: - -Expected output, obtained via an **snmp** command. For example: - -```shell -% snmpget -On -v2c -c public 203.50.251.17 1.3.6.1.2.1.1.7.0 \ - 1.3.6.1.2.1.2.2.1.2.6 1.3.6.1.2.1.2.2.1.5.3 -.1.3.6.1.2.1.1.7.0 = INTEGER: 78 -.1.3.6.1.2.1.2.2.1.2.6 = STRING: GigabitEthernet0 -.1.3.6.1.2.1.2.2.1.5.3 = Gauge32: 4294967295 -``` - -A packet capture, obtained while running the snmpget. For example: - -```shell -sudo tcpdump -s 0 -i eth0 -w foo.pcap host 203.50.251.17 and port 161 -``` - -# Bugs - -Rane's document [SNMP: Simple? Network Management -Protocol](https://www.ranecommercial.com/legacy/note161.html) was useful when learning the SNMP -protocol. - -Please create an [issue](https://github.com/gosnmp/gosnmp/issues) on -Github with packet captures (upload capture to Google Drive, Dropbox, or -similar) containing samples of missing BER types, or of any other bugs -you find. If possible, please include 2 or 3 examples of the -missing/faulty BER type. - -The following BER types have been implemented: - -* 0x00 UnknownType -* 0x01 Boolean -* 0x02 Integer -* 0x03 BitString -* 0x04 OctetString -* 0x05 Null -* 0x06 ObjectIdentifier -* 0x07 ObjectDescription -* 0x40 IPAddress (IPv4 & IPv6) -* 0x41 Counter32 -* 0x42 Gauge32 -* 0x43 TimeTicks -* 0x44 Opaque (Float & Double) -* 0x45 NsapAddress -* 0x46 Counter64 -* 0x47 Uinteger32 -* 0x78 OpaqueFloat -* 0x79 OpaqueDouble -* 0x80 NoSuchObject -* 0x81 NoSuchInstance -* 0x82 EndOfMibView - -# Running the Tests - -Local testing in Docker -```shell -docker build -t gosnmp/gosnmp:latest . -docker run -it gosnmp/gosnmp:latest -``` - -or - -```shell -export GOSNMP_TARGET=1.2.3.4 -export GOSNMP_PORT=161 -export GOSNMP_TARGET_IPV4=1.2.3.4 -export GOSNMP_PORT_IPV4=161 -export GOSNMP_TARGET_IPV6='0:0:0:0:0:ffff:102:304' -export GOSNMP_PORT_IPV6=161 -go test -v -tags all # for example -go test -v -tags helper # for example -``` - -Tests are grouped as follows: - -* Unit tests (validating data packing and marshalling): - * `marshal_test.go` - * `misc_test.go` -* Public API consistency tests: - * `gosnmp_api_test.go` -* End-to-end integration tests: - * `generic_e2e_test.go` - -The generic end-to-end integration test `generic_e2e_test.go` should -work against any SNMP MIB-2 compliant host (e.g. a router, NAS box, printer). - -Mocks were generated using: - -`mockgen -source=interface.go -destination=mocks/gosnmp_mock.go -package=mocks` - -However they're currently removed, as they were breaking linting. - -To profile cpu usage: - -```shell -go test -cpuprofile cpu.out -go test -c -go tool pprof gosnmp.test cpu.out -``` - -To profile memory usage: - -```shell -go test -memprofile mem.out -go test -c -go tool pprof gosnmp.test mem.out -``` - -To check test coverage: - -```shell -go get github.com/axw/gocov/gocov -go get github.com/matm/gocov-html -gocov test github.com/gosnmp/gosnmp | gocov-html > gosnmp.html && firefox gosnmp.html & -``` - - -# License - -Parts of the code are taken from the Golang project (specifically some -functions for unmarshaling BER responses), which are under the same terms -and conditions as the Go language. The rest of the code is under a BSD -license. - -See the LICENSE file for more details. - -The remaining code is Copyright 2012-2020 the GoSNMP Authors - see -AUTHORS.md for a list of authors. diff --git a/vendor/github.com/gosnmp/gosnmp/asn1ber_string.go b/vendor/github.com/gosnmp/gosnmp/asn1ber_string.go deleted file mode 100644 index 72e31c46..00000000 --- a/vendor/github.com/gosnmp/gosnmp/asn1ber_string.go +++ /dev/null @@ -1,37 +0,0 @@ -// Code generated by "stringer -type Asn1BER"; DO NOT EDIT. - -package gosnmp - -import "strconv" - -const ( - _Asn1BER_name_0 = "EndOfContentsBooleanIntegerBitStringOctetStringNullObjectIdentifierObjectDescription" - _Asn1BER_name_1 = "IPAddressCounter32Gauge32TimeTicksOpaqueNsapAddressCounter64Uinteger32" - _Asn1BER_name_2 = "OpaqueFloatOpaqueDouble" - _Asn1BER_name_3 = "NoSuchObjectNoSuchInstanceEndOfMibView" -) - -var ( - _Asn1BER_index_0 = [...]uint8{0, 13, 20, 27, 36, 47, 51, 67, 84} - _Asn1BER_index_1 = [...]uint8{0, 9, 18, 25, 34, 40, 51, 60, 70} - _Asn1BER_index_2 = [...]uint8{0, 11, 23} - _Asn1BER_index_3 = [...]uint8{0, 12, 26, 38} -) - -func (i Asn1BER) String() string { - switch { - case 0 <= i && i <= 7: - return _Asn1BER_name_0[_Asn1BER_index_0[i]:_Asn1BER_index_0[i+1]] - case 64 <= i && i <= 71: - i -= 64 - return _Asn1BER_name_1[_Asn1BER_index_1[i]:_Asn1BER_index_1[i+1]] - case 120 <= i && i <= 121: - i -= 120 - return _Asn1BER_name_2[_Asn1BER_index_2[i]:_Asn1BER_index_2[i+1]] - case 128 <= i && i <= 130: - i -= 128 - return _Asn1BER_name_3[_Asn1BER_index_3[i]:_Asn1BER_index_3[i+1]] - default: - return "Asn1BER(" + strconv.FormatInt(int64(i), 10) + ")" - } -} diff --git a/vendor/github.com/gosnmp/gosnmp/build_tests.sh b/vendor/github.com/gosnmp/gosnmp/build_tests.sh deleted file mode 100644 index b6657370..00000000 --- a/vendor/github.com/gosnmp/gosnmp/build_tests.sh +++ /dev/null @@ -1,10 +0,0 @@ -#!/usr/bin/env bash - -snmpsimd.py --logging-method=null --agent-udpv4-endpoint=127.0.0.1:1024 & -go test -v -tags helper -go test -v -tags marshal -go test -v -tags misc -go test -v -tags api -go test -v -tags end2end -go test -v -tags trap -go test -v -tags all -race diff --git a/vendor/github.com/gosnmp/gosnmp/go.mod b/vendor/github.com/gosnmp/gosnmp/go.mod deleted file mode 100644 index ee2fc608..00000000 --- a/vendor/github.com/gosnmp/gosnmp/go.mod +++ /dev/null @@ -1,8 +0,0 @@ -module github.com/gosnmp/gosnmp - -go 1.13 - -require ( - github.com/golang/mock v1.4.4 - github.com/stretchr/testify v1.6.1 -) diff --git a/vendor/github.com/gosnmp/gosnmp/go.sum b/vendor/github.com/gosnmp/gosnmp/go.sum deleted file mode 100644 index e7f8f518..00000000 --- a/vendor/github.com/gosnmp/gosnmp/go.sum +++ /dev/null @@ -1,20 +0,0 @@ -github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/golang/mock v1.4.4 h1:l75CXGRSwbaYNpl/Z2X1XIIAMSCquvXgpVZDhwEIJsc= -github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/stretchr/objx v0.1.0 h1:4G4v2dO3VZwixGIRoQ5Lfboy6nUhCyYzaqnIAPPhYs4= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/vendor/github.com/gosnmp/gosnmp/goimports2 b/vendor/github.com/gosnmp/gosnmp/goimports2 deleted file mode 100644 index fab78234..00000000 --- a/vendor/github.com/gosnmp/gosnmp/goimports2 +++ /dev/null @@ -1,21 +0,0 @@ -#!/bin/bash - -# remove all blank lines in go 'imports' statements, -# then sort with goimports - -if [ $# != 1 ] ; then - echo "usage: $0 " - exit 1 -fi - -EXE="sed" -if [[ "$OSTYPE" == "darwin"* ]]; then - EXE="ssed" -fi -$EXE -i ' - /^import/,/)/ { - /^$/ d - } -' $1 -goimports -w $1 -gofmt -s -w $1 diff --git a/vendor/github.com/gosnmp/gosnmp/goimports2_all b/vendor/github.com/gosnmp/gosnmp/goimports2_all deleted file mode 100644 index 5522b4fa..00000000 --- a/vendor/github.com/gosnmp/gosnmp/goimports2_all +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/bash - -# run goimports2 script across all go files, excluding the following directories: -# - mocks - -find . -type d -name mocks -prune -o -type f -name '*.go' -exec ./goimports2 '{}' ';' diff --git a/vendor/github.com/gosnmp/gosnmp/gosnmp.go b/vendor/github.com/gosnmp/gosnmp/gosnmp.go deleted file mode 100644 index e9dca753..00000000 --- a/vendor/github.com/gosnmp/gosnmp/gosnmp.go +++ /dev/null @@ -1,621 +0,0 @@ -// Copyright 2012 The GoSNMP Authors. All rights reserved. Use of this -// source code is governed by a BSD-style license that can be found in the -// LICENSE file. - -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gosnmp - -import ( - "context" - "crypto/rand" - "fmt" - "io/ioutil" - "log" - "math" - "math/big" - "net" - "strconv" - "sync" - "sync/atomic" - "time" -) - -const ( - // MaxOids is the maximum number of OIDs permitted in a single call, - // otherwise error. MaxOids too high can cause remote devices to fail - // strangely. 60 seems to be a common value that works, but you will want - // to change this in the GoSNMP struct - MaxOids = 60 - - // Base OID for MIB-2 defined SNMP variables - baseOid = ".1.3.6.1.2.1" - - // Java SNMP uses 50, snmp-net uses 10 - defaultMaxRepetitions = 50 - - // "udp" is used regularly, prevent 'goconst' complaints - udp = "udp" -) - -// GoSNMP represents GoSNMP library state -type GoSNMP struct { - mu sync.Mutex - - // Conn is net connection to use, typically established using GoSNMP.Connect() - Conn net.Conn - - // Target is an ipv4 address - Target string - - // Port is a port - Port uint16 - - // Transport is the transport protocol to use ("udp" or "tcp"); if unset "udp" will be used. - Transport string - - // Community is an SNMP Community string - Community string - - // Version is an SNMP Version - Version SnmpVersion - - // Context allows for overall deadlines and cancellation - Context context.Context - - // Timeout is the timeout for one SNMP request/response - Timeout time.Duration - - // Set the number of retries to attempt - Retries int - - // Double timeout in each retry - ExponentialTimeout bool - - // Logger is the GoSNMP.Logger to use for debugging. If nil, debugging - // output will be discarded (/dev/null). For verbose logging to stdout: - // x.Logger = log.New(os.Stdout, "", 0) - Logger Logger - - // loggingEnabled is set if the Logger isn't nil, otherwise any logging calls - // are ignored via shortcircuit - loggingEnabled bool - - // MaxOids is the maximum number of oids allowed in a Get() - // (default: MaxOids) - MaxOids int - - // MaxRepetitions sets the GETBULK max-repetitions used by BulkWalk* - // Unless MaxRepetitions is specified it will use defaultMaxRepetitions (50) - // This may cause issues with some devices, if so set MaxRepetitions lower. - // See comments in https://github.com/gosnmp/gosnmp/issues/100 - MaxRepetitions uint8 - - // NonRepeaters sets the GETBULK max-repeaters used by BulkWalk* - // (default: 0 as per RFC 1905) - NonRepeaters int - - // netsnmp has '-C APPOPTS - set various application specific behaviours' - // - // - 'c: do not check returned OIDs are increasing' - use AppOpts = map[string]interface{"c":true} with - // Walk() or BulkWalk(). The library user needs to implement their own policy for terminating walks. - // - 'p,i,I,t,E' -> pull requests welcome - AppOpts map[string]interface{} - - // Internal - used to sync requests to responses - requestID uint32 - random uint32 - - rxBuf *[rxBufSize]byte // has to be pointer due to https://github.com/golang/go/issues/11728 - - // MsgFlags is an SNMPV3 MsgFlags - MsgFlags SnmpV3MsgFlags - - // SecurityModel is an SNMPV3 Security Model - SecurityModel SnmpV3SecurityModel - - // SecurityParameters is an SNMPV3 Security Model parameters struct - SecurityParameters SnmpV3SecurityParameters - - // ContextEngineID is SNMPV3 ContextEngineID in ScopedPDU - ContextEngineID string - - // ContextName is SNMPV3 ContextName in ScopedPDU - ContextName string - - // Internal - used to sync requests to responses - snmpv3 - msgID uint32 -} - -// Default connection settings -//nolint:gochecknoglobals -var Default = &GoSNMP{ - Port: 161, - Transport: udp, - Community: "public", - Version: Version2c, - Timeout: time.Duration(2) * time.Second, - Retries: 3, - ExponentialTimeout: true, - MaxOids: MaxOids, -} - -// SnmpPDU will be used when doing SNMP Set's -type SnmpPDU struct { - // Name is an oid in string format eg ".1.3.6.1.4.9.27" - Name string - - // The type of the value eg Integer - Type Asn1BER - - // The value to be set by the SNMP set, or the value when - // sending a trap - Value interface{} -} - -// AsnExtensionID mask to identify types > 30 in subsequent byte -const AsnExtensionID = 0x1F - -//go:generate stringer -type Asn1BER - -// Asn1BER is the type of the SNMP PDU -type Asn1BER byte - -// Asn1BER's - http://www.ietf.org/rfc/rfc1442.txt -const ( - EndOfContents Asn1BER = 0x00 - UnknownType Asn1BER = 0x00 - Boolean Asn1BER = 0x01 - Integer Asn1BER = 0x02 - BitString Asn1BER = 0x03 - OctetString Asn1BER = 0x04 - Null Asn1BER = 0x05 - ObjectIdentifier Asn1BER = 0x06 - ObjectDescription Asn1BER = 0x07 - IPAddress Asn1BER = 0x40 - Counter32 Asn1BER = 0x41 - Gauge32 Asn1BER = 0x42 - TimeTicks Asn1BER = 0x43 - Opaque Asn1BER = 0x44 - NsapAddress Asn1BER = 0x45 - Counter64 Asn1BER = 0x46 - Uinteger32 Asn1BER = 0x47 - OpaqueFloat Asn1BER = 0x78 - OpaqueDouble Asn1BER = 0x79 - NoSuchObject Asn1BER = 0x80 - NoSuchInstance Asn1BER = 0x81 - EndOfMibView Asn1BER = 0x82 -) - -//go:generate stringer -type SNMPError - -// SNMPError is the type for standard SNMP errors. -type SNMPError uint8 - -// SNMP Errors -const ( - NoError SNMPError = iota // No error occurred. This code is also used in all request PDUs, since they have no error status to report. - TooBig // The size of the Response-PDU would be too large to transport. - NoSuchName // The name of a requested object was not found. - BadValue // A value in the request didn't match the structure that the recipient of the request had for the object. For example, an object in the request was specified with an incorrect length or type. - ReadOnly // An attempt was made to set a variable that has an Access value indicating that it is read-only. - GenErr // An error occurred other than one indicated by a more specific error code in this table. - NoAccess // Access was denied to the object for security reasons. - WrongType // The object type in a variable binding is incorrect for the object. - WrongLength // A variable binding specifies a length incorrect for the object. - WrongEncoding // A variable binding specifies an encoding incorrect for the object. - WrongValue // The value given in a variable binding is not possible for the object. - NoCreation // A specified variable does not exist and cannot be created. - InconsistentValue // A variable binding specifies a value that could be held by the variable but cannot be assigned to it at this time. - ResourceUnavailable // An attempt to set a variable required a resource that is not available. - CommitFailed // An attempt to set a particular variable failed. - UndoFailed // An attempt to set a particular variable as part of a group of variables failed, and the attempt to then undo the setting of other variables was not successful. - AuthorizationError // A problem occurred in authorization. - NotWritable // The variable cannot be written or created. - InconsistentName // The name in a variable binding specifies a variable that does not exist. -) - -// -// Public Functions (main interface) -// - -// Connect creates and opens a socket. Because UDP is a connectionless -// protocol, you won't know if the remote host is responding until you send -// packets. Neither will you know if the host is regularly disappearing and reappearing. -// -// For historical reasons (ie this is part of the public API), the method won't -// be renamed to Dial(). -func (x *GoSNMP) Connect() error { - return x.connect("") -} - -// ConnectIPv4 forces an IPv4-only connection -func (x *GoSNMP) ConnectIPv4() error { - return x.connect("4") -} - -// ConnectIPv6 forces an IPv6-only connection -func (x *GoSNMP) ConnectIPv6() error { - return x.connect("6") -} - -// connect to address addr on the given network -// -// https://golang.org/pkg/net/#Dial gives acceptable network values as: -// "tcp", "tcp4" (IPv4-only), "tcp6" (IPv6-only), "udp", "udp4" (IPv4-only),"udp6" (IPv6-only), "ip", -// "ip4" (IPv4-only), "ip6" (IPv6-only), "unix", "unixgram" and "unixpacket" -func (x *GoSNMP) connect(networkSuffix string) error { - err := x.validateParameters() - if err != nil { - return err - } - - x.Transport += networkSuffix - if err = x.netConnect(); err != nil { - return fmt.Errorf("error establishing connection to host: %s", err.Error()) - } - - if x.random == 0 { - n, err := rand.Int(rand.Reader, big.NewInt(math.MaxInt32)) // returns a uniform random value in [0, 2147483647]. - if err != nil { - return fmt.Errorf("error occurred while generating random: %s", err.Error()) - } - x.random = uint32(n.Uint64()) - } - // http://tools.ietf.org/html/rfc3412#section-6 - msgID only uses the first 31 bits - // msgID INTEGER (0..2147483647) - x.msgID = x.random - - // RequestID is Integer32 from SNMPV2-SMI and uses all 32 bits - x.requestID = x.random - - x.rxBuf = new([rxBufSize]byte) - - return nil -} - -// Performs the real socket opening network operation. This can be used to do a -// reconnect (needed for TCP) -func (x *GoSNMP) netConnect() error { - var err error - addr := net.JoinHostPort(x.Target, strconv.Itoa(int(x.Port))) - dialer := net.Dialer{Timeout: x.Timeout} - x.Conn, err = dialer.DialContext(x.Context, x.Transport, addr) - return err -} - -func (x *GoSNMP) validateParameters() error { - if x.Logger == nil { - x.mu.Lock() - defer x.mu.Unlock() - x.Logger = log.New(ioutil.Discard, "", 0) - } else { - x.loggingEnabled = true - } - - if x.Transport == "" { - x.Transport = udp - } - - if x.MaxOids == 0 { - x.MaxOids = MaxOids - } else if x.MaxOids < 0 { - return fmt.Errorf("field MaxOids cannot be less than 0") - } - - if x.Version == Version3 { - x.MsgFlags |= Reportable // tell the snmp server that a report PDU MUST be sent - - err := x.validateParametersV3() - if err != nil { - return err - } - err = x.SecurityParameters.init(x.Logger) - if err != nil { - return err - } - } - - if x.Context == nil { - x.Context = context.Background() - } - return nil -} - -func (x *GoSNMP) mkSnmpPacket(pdutype PDUType, pdus []SnmpPDU, nonRepeaters uint8, maxRepetitions uint8) *SnmpPacket { - var newSecParams SnmpV3SecurityParameters - if x.SecurityParameters != nil { - newSecParams = x.SecurityParameters.Copy() - } - return &SnmpPacket{ - Version: x.Version, - Community: x.Community, - MsgFlags: x.MsgFlags, - SecurityModel: x.SecurityModel, - SecurityParameters: newSecParams, - ContextEngineID: x.ContextEngineID, - ContextName: x.ContextName, - Error: 0, - ErrorIndex: 0, - PDUType: pdutype, - NonRepeaters: nonRepeaters, - MaxRepetitions: maxRepetitions, - Variables: pdus, - } -} - -// Get sends an SNMP GET request -func (x *GoSNMP) Get(oids []string) (result *SnmpPacket, err error) { - oidCount := len(oids) - if oidCount > x.MaxOids { - return nil, fmt.Errorf("oid count (%d) is greater than MaxOids (%d)", - oidCount, x.MaxOids) - } - // convert oids slice to pdu slice - var pdus []SnmpPDU - for _, oid := range oids { - pdus = append(pdus, SnmpPDU{oid, Null, nil}) - } - // build up SnmpPacket - packetOut := x.mkSnmpPacket(GetRequest, pdus, 0, 0) - return x.send(packetOut, true) -} - -// Set sends an SNMP SET request -func (x *GoSNMP) Set(pdus []SnmpPDU) (result *SnmpPacket, err error) { - var packetOut *SnmpPacket - switch pdus[0].Type { - // TODO test Gauge32 - case Integer, OctetString, Gauge32, IPAddress: - packetOut = x.mkSnmpPacket(SetRequest, pdus, 0, 0) - default: - return nil, fmt.Errorf("ERR:gosnmp currently only supports SNMP SETs for Integers, IPAddress and OctetStrings") - } - return x.send(packetOut, true) -} - -// GetNext sends an SNMP GETNEXT request -func (x *GoSNMP) GetNext(oids []string) (result *SnmpPacket, err error) { - oidCount := len(oids) - if oidCount > x.MaxOids { - return nil, fmt.Errorf("oid count (%d) is greater than MaxOids (%d)", - oidCount, x.MaxOids) - } - - // convert oids slice to pdu slice - var pdus []SnmpPDU - for _, oid := range oids { - pdus = append(pdus, SnmpPDU{oid, Null, nil}) - } - - // Marshal and send the packet - packetOut := x.mkSnmpPacket(GetNextRequest, pdus, 0, 0) - - return x.send(packetOut, true) -} - -// GetBulk sends an SNMP GETBULK request -// -// For maxRepetitions greater than 255, use BulkWalk() or BulkWalkAll() -func (x *GoSNMP) GetBulk(oids []string, nonRepeaters uint8, maxRepetitions uint8) (result *SnmpPacket, err error) { - oidCount := len(oids) - if oidCount > x.MaxOids { - return nil, fmt.Errorf("oid count (%d) is greater than MaxOids (%d)", - oidCount, x.MaxOids) - } - - // convert oids slice to pdu slice - var pdus []SnmpPDU - for _, oid := range oids { - pdus = append(pdus, SnmpPDU{oid, Null, nil}) - } - - // Marshal and send the packet - packetOut := x.mkSnmpPacket(GetBulkRequest, pdus, nonRepeaters, maxRepetitions) - return x.send(packetOut, true) -} - -// SnmpEncodePacket exposes SNMP packet generation to external callers. -// This is useful for generating traffic for use over separate transport -// stacks and creating traffic samples for test purposes. -func (x *GoSNMP) SnmpEncodePacket(pdutype PDUType, pdus []SnmpPDU, nonRepeaters uint8, maxRepetitions uint8) ([]byte, error) { - err := x.validateParameters() - if err != nil { - return []byte{}, err - } - - pkt := x.mkSnmpPacket(pdutype, pdus, nonRepeaters, maxRepetitions) - - // Request ID is an atomic counter (started at a random value) - reqID := atomic.AddUint32(&(x.requestID), 1) // TODO: fix overflows - pkt.RequestID = reqID - - if x.Version == Version3 { - msgID := atomic.AddUint32(&(x.msgID), 1) // TODO: fix overflows - pkt.MsgID = msgID - - err = x.initPacket(pkt) - if err != nil { - return []byte{}, err - } - } - - var out []byte - out, err = pkt.marshalMsg() - if err != nil { - return []byte{}, err - } - - return out, nil -} - -// SnmpDecodePacket exposes SNMP packet parsing to external callers. -// This is useful for processing traffic from other sources and -// building test harnesses. -func (x *GoSNMP) SnmpDecodePacket(resp []byte) (*SnmpPacket, error) { - var err error - - result := &SnmpPacket{} - - err = x.validateParameters() - if err != nil { - return result, err - } - - result.Logger = x.Logger - if x.SecurityParameters != nil { - result.SecurityParameters = x.SecurityParameters.Copy() - } - - var cursor int - cursor, err = x.unmarshalHeader(resp, result) - if err != nil { - err = fmt.Errorf("unable to decode packet header: %s", err.Error()) - return result, err - } - - if result.Version == Version3 { - resp, cursor, err = x.decryptPacket(resp, cursor, result) - if err != nil { - return result, err - } - } - - err = x.unmarshalPayload(resp, cursor, result) - if err != nil { - err = fmt.Errorf("unable to decode packet body: %s", err.Error()) - return result, err - } - - // if result == nil { - // err = fmt.Errorf("Unable to decode packet: no variables") - // return result, err - // } - return result, nil -} - -// SetRequestID sets the base ID value for future requests -func (x *GoSNMP) SetRequestID(reqID uint32) { - x.requestID = reqID -} - -// SetMsgID sets the base ID value for future messages -func (x *GoSNMP) SetMsgID(msgID uint32) { - x.msgID = msgID & 0x7fffffff -} - -// -// SNMP Walk functions - Analogous to net-snmp's snmpwalk commands -// - -// WalkFunc is the type of the function called for each data unit visited -// by the Walk function. If an error is returned processing stops. -type WalkFunc func(dataUnit SnmpPDU) error - -// BulkWalk retrieves a subtree of values using GETBULK. As the tree is -// walked walkFn is called for each new value. The function immediately returns -// an error if either there is an underlaying SNMP error (e.g. GetBulk fails), -// or if walkFn returns an error. -func (x *GoSNMP) BulkWalk(rootOid string, walkFn WalkFunc) error { - return x.walk(GetBulkRequest, rootOid, walkFn) -} - -// BulkWalkAll is similar to BulkWalk but returns a filled array of all values -// rather than using a callback function to stream results. Caution: if you -// have set x.AppOpts to 'c', BulkWalkAll may loop indefinitely and cause an -// Out Of Memory - use BulkWalk instead. -func (x *GoSNMP) BulkWalkAll(rootOid string) (results []SnmpPDU, err error) { - return x.walkAll(GetBulkRequest, rootOid) -} - -// Walk retrieves a subtree of values using GETNEXT - a request is made for each -// value, unlike BulkWalk which does this operation in batches. As the tree is -// walked walkFn is called for each new value. The function immediately returns -// an error if either there is an underlaying SNMP error (e.g. GetNext fails), -// or if walkFn returns an error. -func (x *GoSNMP) Walk(rootOid string, walkFn WalkFunc) error { - return x.walk(GetNextRequest, rootOid, walkFn) -} - -// WalkAll is similar to Walk but returns a filled array of all values rather -// than using a callback function to stream results. Caution: if you have set -// x.AppOpts to 'c', WalkAll may loop indefinitely and cause an Out Of Memory - -// use Walk instead. -func (x *GoSNMP) WalkAll(rootOid string) (results []SnmpPDU, err error) { - return x.walkAll(GetNextRequest, rootOid) -} - -// -// Public Functions (helpers) - in alphabetical order -// - -// Partition - returns true when dividing a slice into -// partitionSize lengths, including last partition which may be smaller -// than partitionSize. This is useful when you have a large array of OIDs -// to run Get() on. See the tests for example usage. -// -// For example for a slice of 8 items to be broken into partitions of -// length 3, Partition returns true for the currentPosition having -// the following values: -// -// 0 1 2 3 4 5 6 7 -// T T T -// -func Partition(currentPosition, partitionSize, sliceLength int) bool { - if currentPosition < 0 || currentPosition >= sliceLength { - return false - } - if partitionSize == 1 { // redundant, but an obvious optimisation - return true - } - if currentPosition%partitionSize == partitionSize-1 { - return true - } - if currentPosition == sliceLength-1 { - return true - } - return false -} - -// ToBigInt converts SnmpPDU.Value to big.Int, or returns a zero big.Int for -// non int-like types (eg strings). -// -// This is a convenience function to make working with SnmpPDU's easier - it -// reduces the need for type assertions. A big.Int is convenient, as SNMP can -// return int32, uint32, and uint64. -func ToBigInt(value interface{}) *big.Int { - var val int64 - switch value := value.(type) { // shadow - case int: - val = int64(value) - case int8: - val = int64(value) - case int16: - val = int64(value) - case int32: - val = int64(value) - case int64: - val = value - case uint: - val = int64(value) - case uint8: - val = int64(value) - case uint16: - val = int64(value) - case uint32: - val = int64(value) - case uint64: - return (uint64ToBigInt(value)) - case string: - // for testing and other apps - numbers may appear as strings - var err error - if val, err = strconv.ParseInt(value, 10, 64); err != nil { - return new(big.Int) - } - default: - return new(big.Int) - } - return big.NewInt(val) -} diff --git a/vendor/github.com/gosnmp/gosnmp/helper.go b/vendor/github.com/gosnmp/gosnmp/helper.go deleted file mode 100644 index 7aa51af1..00000000 --- a/vendor/github.com/gosnmp/gosnmp/helper.go +++ /dev/null @@ -1,810 +0,0 @@ -// Copyright 2012 The GoSNMP Authors. All rights reserved. Use of this -// source code is governed by a BSD-style license that can be found in the -// LICENSE file. - -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gosnmp - -import ( - // "bytes" - "bytes" - "encoding/binary" - "errors" - "fmt" - "io" - "log" - "math" - "math/big" - "net" - "os" - "strconv" - "strings" -) - -// variable struct is used by decodeValue(), which is used for debugging -type variable struct { - Name []int - Type Asn1BER - Value interface{} -} - -// -- helper functions (mostly) in alphabetical order -------------------------- - -// Check makes checking errors easy, so they actually get a minimal check -func (x *GoSNMP) Check(err error) { - if err != nil { - x.Logger.Printf("Check: %v\n", err) - os.Exit(1) - } -} - -// Check makes checking errors easy, so they actually get a minimal check -func (packet *SnmpPacket) Check(err error) { - if err != nil { - packet.Logger.Printf("Check: %v\n", err) - os.Exit(1) - } -} - -// Check makes checking errors easy, so they actually get a minimal check -func Check(err error) { - if err != nil { - log.Fatalf("Check: %v\n", err) - } -} - -func (x *GoSNMP) decodeValue(data []byte, msg string) (*variable, error) { - retVal := &variable{} - - if len(msg) > 0 { - x.logPrintf("decodeValue: msg: %s", msg) - } - - if len(data) == 0 { - return nil, fmt.Errorf("err: zero byte buffer") - } - - // values matching this mask have the type in subsequent byte - if data[0]&AsnExtensionID == AsnExtensionID { - if len(data) < 2 { - return nil, fmt.Errorf("bytes: % x err: truncated (data %d length %d)", data, len(data), 2) - } - data = data[1:] - } - switch Asn1BER(data[0]) { - case Integer: - // 0x02. signed - x.logPrint("decodeValue: type is Integer") - length, cursor := parseLength(data) - if length > len(data) { - return nil, fmt.Errorf("bytes: % x err: truncated (data %d length %d)", data, len(data), length) - } - - var ret int - var err2 error - if ret, err2 = parseInt(data[cursor:length]); err2 != nil { - x.logPrintf("%v:", err2) - return nil, fmt.Errorf("bytes: % x err: %v", data, err2) - } - retVal.Type = Integer - retVal.Value = ret - case OctetString: - // 0x04 - x.logPrint("decodeValue: type is OctetString") - length, cursor := parseLength(data) - if length > len(data) { - return nil, fmt.Errorf("bytes: % x err: truncated (data %d length %d)", data, len(data), length) - } - - retVal.Type = OctetString - retVal.Value = data[cursor:length] - case Null: - // 0x05 - x.logPrint("decodeValue: type is Null") - retVal.Type = Null - retVal.Value = nil - case ObjectIdentifier: - // 0x06 - x.logPrint("decodeValue: type is ObjectIdentifier") - rawOid, _, err2 := parseRawField(x.Logger, data, "OID") - if err2 != nil { - return nil, fmt.Errorf("error parsing OID Value: %s", err2.Error()) - } - var oid []int - var ok bool - if oid, ok = rawOid.([]int); !ok { - return nil, fmt.Errorf("unable to type assert rawOid |%v| to []int", rawOid) - } - retVal.Type = ObjectIdentifier - retVal.Value = oidToString(oid) - case IPAddress: - // 0x40 - x.logPrint("decodeValue: type is IPAddress") - retVal.Type = IPAddress - if len(data) < 2 { - return nil, fmt.Errorf("not enough data for ipv4 address: %x", data) - } - - switch data[1] { - case 0: // real life, buggy devices returning bad data - retVal.Value = nil - return retVal, nil - case 4: // IPv4 - if len(data) < 6 { - return nil, fmt.Errorf("not enough data for ipv4 address: %x", data) - } - retVal.Value = net.IPv4(data[2], data[3], data[4], data[5]).String() - case 16: // IPv6 - if len(data) < 18 { - return nil, fmt.Errorf("not enough data for ipv6 address: %x", data) - } - d := make(net.IP, 16) - copy(d, data[2:17]) - retVal.Value = d.String() - default: - return nil, fmt.Errorf("got ipaddress len %d, expected 4 or 16", data[1]) - } - case Counter32: - // 0x41. unsigned - x.logPrint("decodeValue: type is Counter32") - length, cursor := parseLength(data) - if length > len(data) { - return nil, fmt.Errorf("not enough data for Counter32 %x (data %d length %d)", data, len(data), length) - } - - ret, err2 := parseUint(data[cursor:length]) - if err2 != nil { - x.logPrintf("decodeValue: err is %v", err2) - break - } - retVal.Type = Counter32 - retVal.Value = ret - case Gauge32: - // 0x42. unsigned - x.logPrint("decodeValue: type is Gauge32") - length, cursor := parseLength(data) - if length > len(data) { - return nil, fmt.Errorf("not enough data for Gauge32 %x (data %d length %d)", data, len(data), length) - } - - ret, err2 := parseUint(data[cursor:length]) - if err2 != nil { - x.logPrintf("decodeValue: err is %v", err2) - break - } - retVal.Type = Gauge32 - retVal.Value = ret - case TimeTicks: - // 0x43 - x.logPrint("decodeValue: type is TimeTicks") - length, cursor := parseLength(data) - if length > len(data) { - return nil, fmt.Errorf("not enough data for TimeTicks %x (data %d length %d)", data, len(data), length) - } - - ret, err2 := parseUint32(data[cursor:length]) - if err2 != nil { - x.logPrintf("decodeValue: err is %v", err2) - break - } - retVal.Type = TimeTicks - retVal.Value = ret - case Opaque: - // 0x44 - x.logPrint("decodeValue: type is Opaque") - length, cursor := parseLength(data) - if length > len(data) { - return nil, fmt.Errorf("not enough data for Opaque %x (data %d length %d)", data, len(data), length) - } - - opaqueData := data[cursor:length] - // recursively decode opaque data - return x.decodeValue(opaqueData, msg) - case Counter64: - // 0x46 - x.logPrint("decodeValue: type is Counter64") - length, cursor := parseLength(data) - if length > len(data) { - return nil, fmt.Errorf("not enough data for Counter64 %x (data %d length %d)", data, len(data), length) - } - - ret, err2 := parseUint64(data[cursor:length]) - if err2 != nil { - x.logPrintf("decodeValue: err is %v", err2) - break - } - retVal.Type = Counter64 - retVal.Value = ret - case OpaqueFloat: - // 0x78 - x.logPrint("decodeValue: type is OpaqueFloat") - length, cursor := parseLength(data) - if length > len(data) { - return nil, fmt.Errorf("not enough data for OpaqueFloat %x (data %d length %d)", data, len(data), length) - } - - var err error - retVal.Type = OpaqueFloat - retVal.Value, err = parseFloat32(data[cursor:length]) - if err != nil { - return nil, err - } - case OpaqueDouble: - // 0x79 - x.logPrint("decodeValue: type is OpaqueDouble") - length, cursor := parseLength(data) - if length > len(data) { - return nil, fmt.Errorf("not enough data for OpaqueDouble %x (data %d length %d)", data, len(data), length) - } - - var err error - retVal.Type = OpaqueDouble - retVal.Value, err = parseFloat64(data[cursor:length]) - if err != nil { - return nil, err - } - case NoSuchObject: - // 0x80 - x.logPrint("decodeValue: type is NoSuchObject") - retVal.Type = NoSuchObject - retVal.Value = nil - case NoSuchInstance: - // 0x81 - x.logPrint("decodeValue: type is NoSuchInstance") - retVal.Type = NoSuchInstance - retVal.Value = nil - case EndOfMibView: - // 0x82 - x.logPrint("decodeValue: type is EndOfMibView") - retVal.Type = EndOfMibView - retVal.Value = nil - default: - x.logPrintf("decodeValue: type %x isn't implemented", data[0]) - retVal.Type = UnknownType - retVal.Value = nil - } - x.logPrintf("decodeValue: value is %#v", retVal.Value) - return retVal, nil -} - -func marshalUvarInt(x uint32) []byte { - buf := make([]byte, 4) - binary.BigEndian.PutUint32(buf, x) - i := 0 - for ; i < 3; i++ { - if buf[i] != 0 { - break - } - } - buf = buf[i:] - // if the highest bit in buf is set and x is not negative - prepend a byte to make it positive - if len(buf) > 0 && buf[0]&0x80 > 0 { - buf = append([]byte{0}, buf...) - } - return buf -} - -func marshalBase128Int(out io.ByteWriter, n int64) (err error) { - if n == 0 { - err = out.WriteByte(0) - return - } - - l := 0 - for i := n; i > 0; i >>= 7 { - l++ - } - - for i := l - 1; i >= 0; i-- { - o := byte(n >> uint(i*7)) - o &= 0x7f - if i != 0 { - o |= 0x80 - } - err = out.WriteByte(o) - if err != nil { - return - } - } - - return nil -} - -/* - snmp Integer32 and INTEGER: - -2^31 and 2^31-1 inclusive (-2147483648 to 2147483647 decimal) - (FYI https://groups.google.com/forum/#!topic/comp.protocols.snmp/1xaAMzCe_hE) - - versus: - - snmp Counter32, Gauge32, TimeTicks, Unsigned32: (below) - non-negative integer, maximum value of 2^32-1 (4294967295 decimal) -*/ - -// marshalInt32 builds a byte representation of a signed 32 bit int in BigEndian form -// ie -2^31 and 2^31-1 inclusive (-2147483648 to 2147483647 decimal) -func marshalInt32(value int) (rs []byte, err error) { - rs = make([]byte, 4) - if 0 <= value && value <= 2147483647 { - binary.BigEndian.PutUint32(rs, uint32(value)) - if value < 0x80 { - return rs[3:], nil - } - if value < 0x8000 { - return rs[2:], nil - } - if value < 0x800000 { - return rs[1:], nil - } - return rs, nil - } - if -2147483648 <= value && value < 0 { - value = ^value - binary.BigEndian.PutUint32(rs, uint32(value)) - for k, v := range rs { - rs[k] = ^v - } - return rs, nil - } - return nil, fmt.Errorf("unable to marshal %d", value) -} - -func marshalUint64(v interface{}) ([]byte, error) { - bs := make([]byte, 8) - source := v.(uint64) - binary.BigEndian.PutUint64(bs, source) // will panic on failure - // truncate leading zeros. Cleaner technique? - return bytes.TrimLeft(bs, "\x00"), nil -} - -// Counter32, Gauge32, TimeTicks, Unsigned32 -func marshalUint32(v interface{}) ([]byte, error) { - bs := make([]byte, 4) - - var source uint32 - switch val := v.(type) { - case uint32: - source = val - case uint: - source = uint32(val) - // We could do others here, but coercing from anything else is dangerous. - // Even uint could be 64 bits, though in practice nothing we work with here - // is. - default: - return nil, fmt.Errorf("unable to marshal %T to uint32", v) - } - - binary.BigEndian.PutUint32(bs, source) // will panic on failure - // truncate leading zeros. Cleaner technique? - if source < 0x80 { - return bs[3:], nil - } - if source < 0x8000 { - return bs[2:], nil - } - if source < 0x800000 { - return bs[1:], nil - } - return bs, nil -} - -func marshalFloat32(v interface{}) ([]byte, error) { - source := v.(float32) - i32 := math.Float32bits(source) - return marshalUint32(i32) -} - -func marshalFloat64(v interface{}) ([]byte, error) { - source := v.(float64) - i64 := math.Float64bits(source) - return marshalUint64(i64) -} - -// marshalLength builds a byte representation of length -// -// http://luca.ntop.org/Teaching/Appunti/asn1.html -// -// Length octets. There are two forms: short (for lengths between 0 and 127), -// and long definite (for lengths between 0 and 2^1008 -1). -// -// * Short form. One octet. Bit 8 has value "0" and bits 7-1 give the length. -// * Long form. Two to 127 octets. Bit 8 of first octet has value "1" and bits -// 7-1 give the number of additional length octets. Second and following -// octets give the length, base 256, most significant digit first. -func marshalLength(length int) ([]byte, error) { - // more convenient to pass length as int than uint64. Therefore check < 0 - if length < 0 { - return nil, fmt.Errorf("length must be greater than zero") - } else if length < 127 { - return []byte{byte(length)}, nil - } - - buf := new(bytes.Buffer) - err := binary.Write(buf, binary.BigEndian, uint64(length)) - if err != nil { - return nil, err - } - bufBytes := buf.Bytes() - - // strip leading zeros - for idx, octect := range bufBytes { - if octect != 00 { - bufBytes = bufBytes[idx:] - break - } - } - - header := []byte{byte(128 | len(bufBytes))} - return append(header, bufBytes...), nil -} - -func marshalObjectIdentifier(oid []int) (ret []byte, err error) { - out := new(bytes.Buffer) - if len(oid) < 2 || oid[0] > 6 || oid[1] >= 40 { - return nil, errors.New("invalid object identifier") - } - - err = out.WriteByte(byte(oid[0]*40 + oid[1])) - if err != nil { - return - } - for i := 2; i < len(oid); i++ { - err = marshalBase128Int(out, int64(oid[i])) - if err != nil { - return - } - } - - ret = out.Bytes() - return -} - -func marshalOID(oid string) ([]byte, error) { - var err error - - // Encode the oid - oid = strings.Trim(oid, ".") - oidParts := strings.Split(oid, ".") - oidBytes := make([]int, len(oidParts)) - - // Convert the string OID to an array of integers - for i := 0; i < len(oidParts); i++ { - oidBytes[i], err = strconv.Atoi(oidParts[i]) - if err != nil { - return nil, fmt.Errorf("unable to parse OID: %s", err.Error()) - } - } - - mOid, err := marshalObjectIdentifier(oidBytes) - - if err != nil { - return nil, fmt.Errorf("unable to marshal OID: %s", err.Error()) - } - - return mOid, err -} - -func oidToString(oid []int) (ret string) { - oidAsString := make([]string, len(oid)+1) - - // used for appending of the first dot - oidAsString[0] = "" - for i := range oid { - oidAsString[i+1] = strconv.Itoa(oid[i]) - } - - return strings.Join(oidAsString, ".") -} - -// TODO no tests -func ipv4toBytes(ip net.IP) []byte { - return []byte(ip)[12:] -} - -// parseBase128Int parses a base-128 encoded int from the given offset in the -// given byte slice. It returns the value and the new offset. -func parseBase128Int(bytes []byte, initOffset int) (ret, offset int, err error) { - offset = initOffset - for shifted := 0; offset < len(bytes); shifted++ { - if shifted > 4 { - err = fmt.Errorf("structural error: base 128 integer too large") - return - } - ret <<= 7 - b := bytes[offset] - ret |= int(b & 0x7f) - offset++ - if b&0x80 == 0 { - return - } - } - err = fmt.Errorf("syntax error: truncated base 128 integer") - return -} - -// parseInt64 treats the given bytes as a big-endian, signed integer and -// returns the result. -func parseInt64(bytes []byte) (ret int64, err error) { - if len(bytes) > 8 { - // We'll overflow an int64 in this case. - err = errors.New("integer too large") - return - } - for bytesRead := 0; bytesRead < len(bytes); bytesRead++ { - ret <<= 8 - ret |= int64(bytes[bytesRead]) - } - - // Shift up and down in order to sign extend the result. - ret <<= 64 - uint8(len(bytes))*8 - ret >>= 64 - uint8(len(bytes))*8 - return -} - -// parseInt treats the given bytes as a big-endian, signed integer and returns -// the result. -func parseInt(bytes []byte) (int, error) { - ret64, err := parseInt64(bytes) - if err != nil { - return 0, err - } - if ret64 != int64(int(ret64)) { - return 0, errors.New("integer too large") - } - return int(ret64), nil -} - -// parseLength parses and calculates an snmp packet length -// -// http://luca.ntop.org/Teaching/Appunti/asn1.html -// -// Length octets. There are two forms: short (for lengths between 0 and 127), -// and long definite (for lengths between 0 and 2^1008 -1). -// -// * Short form. One octet. Bit 8 has value "0" and bits 7-1 give the length. -// * Long form. Two to 127 octets. Bit 8 of first octet has value "1" and bits -// 7-1 give the number of additional length octets. Second and following -// octets give the length, base 256, most significant digit first. -func parseLength(bytes []byte) (length int, cursor int) { - switch { - case len(bytes) <= 2: - // handle null octet strings ie "0x04 0x00" - cursor = len(bytes) - length = len(bytes) - case int(bytes[1]) <= 127: - length = int(bytes[1]) - length += 2 - cursor += 2 - default: - numOctets := int(bytes[1]) & 127 - for i := 0; i < numOctets; i++ { - length <<= 8 - length += int(bytes[2+i]) - } - length += 2 + numOctets - cursor += 2 + numOctets - } - return length, cursor -} - -// parseObjectIdentifier parses an OBJECT IDENTIFIER from the given bytes and -// returns it. An object identifier is a sequence of variable length integers -// that are assigned in a hierarchy. -func parseObjectIdentifier(bytes []byte) (s []int, err error) { - if len(bytes) == 0 { - return []int{0}, nil - } - - // In the worst case, we get two elements from the first byte (which is - // encoded differently) and then every varint is a single byte long. - s = make([]int, len(bytes)+1) - - // The first byte is 40*value1 + value2: - s[0] = int(bytes[0]) / 40 - s[1] = int(bytes[0]) % 40 - i := 2 - for offset := 1; offset < len(bytes); i++ { - var v int - v, offset, err = parseBase128Int(bytes, offset) - if err != nil { - return - } - s[i] = v - } - s = s[0:i] - return -} - -func parseRawField(logger Logger, data []byte, msg string) (interface{}, int, error) { - if len(data) == 0 { - return nil, 0, fmt.Errorf("empty data passed to parseRawField") - } - logger.Printf("parseRawField: %s", msg) - switch Asn1BER(data[0]) { - case Integer: - length, cursor := parseLength(data) - if length > len(data) { - return nil, 0, fmt.Errorf("not enough data for Integer (%d vs %d): %x", length, len(data), data) - } - i, err := parseInt(data[cursor:length]) - if err != nil { - return nil, 0, fmt.Errorf("unable to parse raw INTEGER: %x err: %v", data, err) - } - return i, length, nil - case OctetString: - length, cursor := parseLength(data) - if length > len(data) { - return nil, 0, fmt.Errorf("not enough data for OctetString (%d vs %d): %x", length, len(data), data) - } - return string(data[cursor:length]), length, nil - case ObjectIdentifier: - length, cursor := parseLength(data) - if length > len(data) { - return nil, 0, fmt.Errorf("not enough data for OID (%d vs %d): %x", length, len(data), data) - } - oid, err := parseObjectIdentifier(data[cursor:length]) - return oid, length, err - case IPAddress: - length, _ := parseLength(data) - if len(data) < 2 { - return nil, 0, fmt.Errorf("not enough data for ipv4 address: %x", data) - } - - switch data[1] { - case 0: // real life, buggy devices returning bad data - return nil, length, nil - case 4: // IPv4 - if len(data) < 6 { - return nil, 0, fmt.Errorf("not enough data for ipv4 address: %x", data) - } - return net.IPv4(data[2], data[3], data[4], data[5]).String(), length, nil - default: - return nil, 0, fmt.Errorf("got ipaddress len %d, expected 4", data[1]) - } - case TimeTicks: - length, cursor := parseLength(data) - if length > len(data) { - return nil, 0, fmt.Errorf("not enough data for TimeTicks (%d vs %d): %x", length, len(data), data) - } - ret, err := parseUint(data[cursor:length]) - if err != nil { - return nil, 0, fmt.Errorf("error in parseUint: %s", err) - } - return ret, length, nil - } - - return nil, 0, fmt.Errorf("unknown field type: %x", data[0]) -} - -// parseUint64 treats the given bytes as a big-endian, unsigned integer and returns -// the result. -func parseUint64(bytes []byte) (ret uint64, err error) { - if len(bytes) > 9 || (len(bytes) > 8 && bytes[0] != 0x0) { - // We'll overflow a uint64 in this case. - err = errors.New("integer too large") - return - } - for bytesRead := 0; bytesRead < len(bytes); bytesRead++ { - ret <<= 8 - ret |= uint64(bytes[bytesRead]) - } - return -} - -// parseUint32 treats the given bytes as a big-endian, signed integer and returns -// the result. -func parseUint32(bytes []byte) (uint32, error) { - ret, err := parseUint(bytes) - if err != nil { - return 0, err - } - return uint32(ret), nil -} - -// parseUint treats the given bytes as a big-endian, signed integer and returns -// the result. -func parseUint(bytes []byte) (uint, error) { - ret64, err := parseUint64(bytes) - if err != nil { - return 0, err - } - if ret64 != uint64(uint(ret64)) { - return 0, errors.New("integer too large") - } - return uint(ret64), nil -} - -func parseFloat32(bytes []byte) (ret float32, err error) { - if len(bytes) > 4 { - // We'll overflow a uint64 in this case. - err = errors.New("float too large") - return - } - ret = math.Float32frombits(binary.BigEndian.Uint32(bytes)) - return -} - -func parseFloat64(bytes []byte) (ret float64, err error) { - if len(bytes) > 8 { - // We'll overflow a uint64 in this case. - err = errors.New("float too large") - return - } - ret = math.Float64frombits(binary.BigEndian.Uint64(bytes)) - return -} - -// Issue 4389: math/big: add SetUint64 and Uint64 functions to *Int -// -// uint64ToBigInt copied from: http://github.com/cznic/mathutil/blob/master/mathutil.go#L341 -// -// replace with Uint64ToBigInt or equivalent when using Go 1.1 - -//nolint:gochecknoglobals -var uint64ToBigIntDelta big.Int - -func init() { - uint64ToBigIntDelta.SetBit(&uint64ToBigIntDelta, 63, 1) -} - -func uint64ToBigInt(n uint64) *big.Int { - if n <= math.MaxInt64 { - return big.NewInt(int64(n)) - } - - y := big.NewInt(int64(n - uint64(math.MaxInt64) - 1)) - return y.Add(y, &uint64ToBigIntDelta) -} - -// -- Bit String --------------------------------------------------------------- - -// BitStringValue is the structure to use when you want an ASN.1 BIT STRING type. A -// bit string is padded up to the nearest byte in memory and the number of -// valid bits is recorded. Padding bits will be zero. -type BitStringValue struct { - Bytes []byte // bits packed into bytes. - BitLength int // length in bits. -} - -// At returns the bit at the given index. If the index is out of range it -// returns false. -func (b BitStringValue) At(i int) int { - if i < 0 || i >= b.BitLength { - return 0 - } - x := i / 8 - y := 7 - uint(i%8) - return int(b.Bytes[x]>>y) & 1 -} - -// RightAlign returns a slice where the padding bits are at the beginning. The -// slice may share memory with the BitString. -func (b BitStringValue) RightAlign() []byte { - shift := uint(8 - (b.BitLength % 8)) - if shift == 8 || len(b.Bytes) == 0 { - return b.Bytes - } - - a := make([]byte, len(b.Bytes)) - a[0] = b.Bytes[0] >> shift - for i := 1; i < len(b.Bytes); i++ { - a[i] = b.Bytes[i-1] << (8 - shift) - a[i] |= b.Bytes[i] >> shift - } - - return a -} - -// -- SnmpVersion -------------------------------------------------------------- - -func (s SnmpVersion) String() string { - if s == Version1 { - return "1" - } else if s == Version2c { - return "2c" - } - return "3" -} diff --git a/vendor/github.com/gosnmp/gosnmp/interface.go b/vendor/github.com/gosnmp/gosnmp/interface.go deleted file mode 100644 index 68250e91..00000000 --- a/vendor/github.com/gosnmp/gosnmp/interface.go +++ /dev/null @@ -1,340 +0,0 @@ -// Copyright 2012 The GoSNMP Authors. All rights reserved. Use of this -// source code is governed by a BSD-style license that can be found in the -// LICENSE file. - -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gosnmp - -import ( - "time" -) - -//go:generate mockgen --destination gosnmp_mock.go --package=gosnmp --source interface.go - -// Handler is a GoSNMP interface -// -// Handler is provided to assist with testing using mocks -type Handler interface { - // Connect creates and opens a socket. Because UDP is a connectionless - // protocol, you won't know if the remote host is responding until you send - // packets. And if the host is regularly disappearing and reappearing, you won't - // know if you've only done a Connect(). - // - // For historical reasons (ie this is part of the public API), the method won't - // be renamed. - Connect() error - - // ConnectIPv4 connects using IPv4 - ConnectIPv4() error - - // ConnectIPv6 connects using IPv6 - ConnectIPv6() error - - // Get sends an SNMP GET request - Get(oids []string) (result *SnmpPacket, err error) - - // GetBulk sends an SNMP GETBULK request - // - // For maxRepetitions greater than 255, use BulkWalk() or BulkWalkAll() - GetBulk(oids []string, nonRepeaters uint8, maxRepetitions uint8) (result *SnmpPacket, err error) - - // GetNext sends an SNMP GETNEXT request - GetNext(oids []string) (result *SnmpPacket, err error) - - // Walk retrieves a subtree of values using GETNEXT - a request is made for each - // value, unlike BulkWalk which does this operation in batches. As the tree is - // walked walkFn is called for each new value. The function immediately returns - // an error if either there is an underlaying SNMP error (e.g. GetNext fails), - // or if walkFn returns an error. - Walk(rootOid string, walkFn WalkFunc) error - - // WalkAll is similar to Walk but returns a filled array of all values rather - // than using a callback function to stream results. - WalkAll(rootOid string) (results []SnmpPDU, err error) - - // BulkWalk retrieves a subtree of values using GETBULK. As the tree is - // walked walkFn is called for each new value. The function immediately returns - // an error if either there is an underlaying SNMP error (e.g. GetBulk fails), - // or if walkFn returns an error. - BulkWalk(rootOid string, walkFn WalkFunc) error - - // BulkWalkAll is similar to BulkWalk but returns a filled array of all values - // rather than using a callback function to stream results. - BulkWalkAll(rootOid string) (results []SnmpPDU, err error) - - // SendTrap sends a SNMP Trap (v2c/v3 only) - // - // pdus[0] can a pdu of Type TimeTicks (with the desired uint32 epoch - // time). Otherwise a TimeTicks pdu will be prepended, with time set to - // now. This mirrors the behaviour of the Net-SNMP command-line tools. - // - // SendTrap doesn't wait for a return packet from the NMS (Network - // Management Station). - // - // See also Listen() and examples for creating an NMS. - SendTrap(trap SnmpTrap) (result *SnmpPacket, err error) - - // UnmarshalTrap unpacks the SNMP Trap. - UnmarshalTrap(trap []byte, useResponseSecurityParameters bool) (result *SnmpPacket) - - // Set sends an SNMP SET request - Set(pdus []SnmpPDU) (result *SnmpPacket, err error) - - // Check makes checking errors easy, so they actually get a minimal check - Check(err error) - - // Close closes the connection - Close() error - - // Target gets the Target - Target() string - - // SetTarget sets the Target - SetTarget(target string) - - // Port gets the Port - Port() uint16 - - // SetPort sets the Port - SetPort(port uint16) - - // Community gets the Community - Community() string - - // SetCommunity sets the Community - SetCommunity(community string) - - // Version gets the Version - Version() SnmpVersion - - // SetVersion sets the Version - SetVersion(version SnmpVersion) - - // Timeout gets the Timeout - Timeout() time.Duration - - // SetTimeout sets the Timeout - SetTimeout(timeout time.Duration) - - // Retries gets the Retries - Retries() int - - // SetRetries sets the Retries - SetRetries(retries int) - - // GetExponentialTimeout gets the ExponentialTimeout - GetExponentialTimeout() bool - - // SetExponentialTimeout sets the ExponentialTimeout - SetExponentialTimeout(value bool) - - // Logger gets the Logger - Logger() Logger - - // SetLogger sets the Logger - SetLogger(logger Logger) - - // MaxOids gets the MaxOids - MaxOids() int - - // SetMaxOids sets the MaxOids - SetMaxOids(maxOids int) - - // MaxRepetitions gets the maxRepetitions - MaxRepetitions() uint8 - - // SetMaxRepetitions sets the maxRepetitions - SetMaxRepetitions(maxRepetitions uint8) - - // NonRepeaters gets the nonRepeaters - NonRepeaters() int - - // SetNonRepeaters sets the nonRepeaters - SetNonRepeaters(nonRepeaters int) - - // MsgFlags gets the MsgFlags - MsgFlags() SnmpV3MsgFlags - - // SetMsgFlags sets the MsgFlags - SetMsgFlags(msgFlags SnmpV3MsgFlags) - - // SecurityModel gets the SecurityModel - SecurityModel() SnmpV3SecurityModel - - // SetSecurityModel sets the SecurityModel - SetSecurityModel(securityModel SnmpV3SecurityModel) - - // SecurityParameters gets the SecurityParameters - SecurityParameters() SnmpV3SecurityParameters - - // SetSecurityParameters sets the SecurityParameters - SetSecurityParameters(securityParameters SnmpV3SecurityParameters) - - // ContextEngineID gets the ContextEngineID - ContextEngineID() string - - // SetContextEngineID sets the ContextEngineID - SetContextEngineID(contextEngineID string) - - // ContextName gets the ContextName - ContextName() string - - // SetContextName sets the ContextName - SetContextName(contextName string) -} - -// snmpHandler is a wrapper around gosnmp -type snmpHandler struct { - GoSNMP -} - -// NewHandler creates a new Handler using gosnmp -func NewHandler() Handler { - return &snmpHandler{ - GoSNMP{ - Port: Default.Port, - Community: Default.Community, - Version: Default.Version, - Timeout: Default.Timeout, - Retries: Default.Retries, - MaxOids: Default.MaxOids, - }, - } -} - -func (x *snmpHandler) Target() string { - // not x.Target because it would reference function Target - return x.GoSNMP.Target -} - -func (x *snmpHandler) SetTarget(target string) { - x.GoSNMP.Target = target -} - -func (x *snmpHandler) Port() uint16 { - return x.GoSNMP.Port -} - -func (x *snmpHandler) SetPort(port uint16) { - x.GoSNMP.Port = port -} - -func (x *snmpHandler) Community() string { - return x.GoSNMP.Community -} - -func (x *snmpHandler) SetCommunity(community string) { - x.GoSNMP.Community = community -} - -func (x *snmpHandler) Version() SnmpVersion { - return x.GoSNMP.Version -} - -func (x *snmpHandler) SetVersion(version SnmpVersion) { - x.GoSNMP.Version = version -} - -func (x *snmpHandler) Timeout() time.Duration { - return x.GoSNMP.Timeout -} - -func (x *snmpHandler) SetTimeout(timeout time.Duration) { - x.GoSNMP.Timeout = timeout -} - -func (x *snmpHandler) Retries() int { - return x.GoSNMP.Retries -} - -func (x *snmpHandler) SetRetries(retries int) { - x.GoSNMP.Retries = retries -} - -func (x *snmpHandler) GetExponentialTimeout() bool { - return x.GoSNMP.ExponentialTimeout -} - -func (x *snmpHandler) SetExponentialTimeout(value bool) { - x.GoSNMP.ExponentialTimeout = value -} - -func (x *snmpHandler) Logger() Logger { - return x.GoSNMP.Logger -} - -func (x *snmpHandler) SetLogger(logger Logger) { - x.GoSNMP.Logger = logger -} - -func (x *snmpHandler) MaxOids() int { - return x.GoSNMP.MaxOids -} - -func (x *snmpHandler) SetMaxOids(maxOids int) { - x.GoSNMP.MaxOids = maxOids -} - -func (x *snmpHandler) MaxRepetitions() uint8 { - return x.GoSNMP.MaxRepetitions -} - -func (x *snmpHandler) SetMaxRepetitions(maxRepetitions uint8) { - x.GoSNMP.MaxRepetitions = maxRepetitions -} - -func (x *snmpHandler) NonRepeaters() int { - return x.GoSNMP.NonRepeaters -} - -func (x *snmpHandler) SetNonRepeaters(nonRepeaters int) { - x.GoSNMP.NonRepeaters = nonRepeaters -} - -func (x *snmpHandler) MsgFlags() SnmpV3MsgFlags { - return x.GoSNMP.MsgFlags -} - -func (x *snmpHandler) SetMsgFlags(msgFlags SnmpV3MsgFlags) { - x.GoSNMP.MsgFlags = msgFlags -} - -func (x *snmpHandler) SecurityModel() SnmpV3SecurityModel { - return x.GoSNMP.SecurityModel -} - -func (x *snmpHandler) SetSecurityModel(securityModel SnmpV3SecurityModel) { - x.GoSNMP.SecurityModel = securityModel -} - -func (x *snmpHandler) SecurityParameters() SnmpV3SecurityParameters { - return x.GoSNMP.SecurityParameters -} - -func (x *snmpHandler) SetSecurityParameters(securityParameters SnmpV3SecurityParameters) { - x.GoSNMP.SecurityParameters = securityParameters -} - -func (x *snmpHandler) ContextEngineID() string { - return x.GoSNMP.ContextEngineID -} - -func (x *snmpHandler) SetContextEngineID(contextEngineID string) { - x.GoSNMP.ContextEngineID = contextEngineID -} - -func (x *snmpHandler) ContextName() string { - return x.GoSNMP.ContextName -} - -func (x *snmpHandler) SetContextName(contextName string) { - x.GoSNMP.ContextName = contextName -} - -func (x *snmpHandler) Close() error { - // not x.Conn for consistency - return x.GoSNMP.Conn.Close() -} diff --git a/vendor/github.com/gosnmp/gosnmp/local_tests.sh b/vendor/github.com/gosnmp/gosnmp/local_tests.sh deleted file mode 100644 index 8726d4d5..00000000 --- a/vendor/github.com/gosnmp/gosnmp/local_tests.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/bash - -go test -v -tags helper -go test -v -tags marshal -go test -v -tags misc -go test -v -tags api -go test -v -tags trap diff --git a/vendor/github.com/gosnmp/gosnmp/marshal.go b/vendor/github.com/gosnmp/gosnmp/marshal.go deleted file mode 100644 index 402a54c3..00000000 --- a/vendor/github.com/gosnmp/gosnmp/marshal.go +++ /dev/null @@ -1,1164 +0,0 @@ -// Copyright 2012 The GoSNMP Authors. All rights reserved. Use of this -// source code is governed by a BSD-style license that can be found in the -// LICENSE file. - -package gosnmp - -import ( - "bytes" - "context" - "encoding/asn1" - "encoding/binary" - "fmt" - "io" - "net" - "runtime" - "strings" - "sync/atomic" - "time" -) - -// -// Remaining globals and definitions located here. -// See http://www.rane.com/note161.html for a succint description of the SNMP -// protocol. -// - -// SnmpVersion 1, 2c and 3 implemented -type SnmpVersion uint8 - -// SnmpVersion 1, 2c and 3 implemented -const ( - Version1 SnmpVersion = 0x0 - Version2c SnmpVersion = 0x1 - Version3 SnmpVersion = 0x3 -) - -// SnmpPacket struct represents the entire SNMP Message or Sequence at the -// application layer. -type SnmpPacket struct { - Version SnmpVersion - MsgFlags SnmpV3MsgFlags - SecurityModel SnmpV3SecurityModel - SecurityParameters SnmpV3SecurityParameters // interface - ContextEngineID string - ContextName string - Community string - PDUType PDUType - MsgID uint32 - RequestID uint32 - MsgMaxSize uint32 - Error SNMPError - ErrorIndex uint8 - NonRepeaters uint8 - MaxRepetitions uint8 - Variables []SnmpPDU - Logger Logger // interface - - // v1 traps have a very different format from v2c and v3 traps. - // - // These fields are set via the SnmpTrap parameter to SendTrap(). - SnmpTrap -} - -// SnmpTrap is used to define a SNMP trap, and is passed into SendTrap -type SnmpTrap struct { - Variables []SnmpPDU - - // If true, the trap is an InformRequest, not a trap. This has no effect on - // v1 traps, as Inform is not part of the v1 protocol. - IsInform bool - - // These fields are required for SNMPV1 Trap Headers - Enterprise string - AgentAddress string - GenericTrap int - SpecificTrap int - Timestamp uint -} - -// VarBind struct represents an SNMP Varbind. -type VarBind struct { - Name asn1.ObjectIdentifier - Value asn1.RawValue -} - -// PDUType describes which SNMP Protocol Data Unit is being sent. -type PDUType byte - -// The currently supported PDUType's -const ( - Sequence PDUType = 0x30 - GetRequest PDUType = 0xa0 - GetNextRequest PDUType = 0xa1 - GetResponse PDUType = 0xa2 - SetRequest PDUType = 0xa3 - Trap PDUType = 0xa4 // v1 - GetBulkRequest PDUType = 0xa5 - InformRequest PDUType = 0xa6 - SNMPv2Trap PDUType = 0xa7 // v2c, v3 - Report PDUType = 0xa8 -) - -const rxBufSize = 65535 // max size of IPv4 & IPv6 packet - -// Logger is an interface used for debugging. Both Print and -// Printf have the same interfaces as Package Log in the std library. The -// Logger interface is small to give you flexibility in how you do -// your debugging. -// -// For verbose logging to stdout: -// -// gosnmp_logger = log.New(os.Stdout, "", 0) -type Logger interface { - Print(v ...interface{}) - Printf(format string, v ...interface{}) -} - -func (x *GoSNMP) logPrint(v ...interface{}) { - if x.loggingEnabled { - x.Logger.Print(v...) - } -} - -func (x *GoSNMP) logPrintf(format string, v ...interface{}) { - if x.loggingEnabled { - x.Logger.Printf(format, v...) - } -} - -// send/receive one snmp request -func (x *GoSNMP) sendOneRequest(packetOut *SnmpPacket, - wait bool) (result *SnmpPacket, err error) { - allReqIDs := make([]uint32, 0, x.Retries+1) - // allMsgIDs := make([]uint32, 0, x.Retries+1) // unused - - timeout := x.Timeout - withContextDeadline := false - for retries := 0; ; retries++ { - if retries > 0 { - x.logPrintf("Retry number %d. Last error was: %v", retries, err) - if withContextDeadline && strings.Contains(err.Error(), "timeout") { - err = context.DeadlineExceeded - break - } - if retries > x.Retries { - if strings.Contains(err.Error(), "timeout") { - err = fmt.Errorf("request timeout (after %d retries)", retries-1) - } - break - } - if x.ExponentialTimeout { - // https://www.webnms.com/snmp/help/snmpapi/snmpv3/v1/timeout.html - timeout *= 2 - } - withContextDeadline = false - } - err = nil - - if x.Context.Err() != nil { - return nil, x.Context.Err() - } - - reqDeadline := time.Now().Add(timeout) - if contextDeadline, ok := x.Context.Deadline(); ok { - if contextDeadline.Before(reqDeadline) { - reqDeadline = contextDeadline - withContextDeadline = true - } - } - - err = x.Conn.SetDeadline(reqDeadline) - if err != nil { - return nil, err - } - - // Request ID is an atomic counter (started at a random value) - reqID := atomic.AddUint32(&(x.requestID), 1) // TODO: fix overflows - allReqIDs = append(allReqIDs, reqID) - - packetOut.RequestID = reqID - - if x.Version == Version3 { - msgID := atomic.AddUint32(&(x.msgID), 1) // TODO: fix overflows - // allMsgIDs = append(allMsgIDs, msgID) // unused - - packetOut.MsgID = msgID - - err = x.initPacket(packetOut) - if err != nil { - break - } - } - if x.loggingEnabled && x.Version == Version3 { - packetOut.SecurityParameters.Log() - } - - var outBuf []byte - outBuf, err = packetOut.marshalMsg() - if err != nil { - // Don't retry - not going to get any better! - err = fmt.Errorf("marshal: %v", err) - break - } - - x.logPrintf("SENDING PACKET: %#+v", *packetOut) - _, err = x.Conn.Write(outBuf) - if err != nil { - continue - } - - // all sends wait for the return packet, except for SNMPv2Trap - if !wait { - return &SnmpPacket{}, nil - } - - waitingResponse: - for { - x.logPrint("WAITING RESPONSE...") - // Receive response and try receiving again on any decoding error. - // Let the deadline abort us if we don't receive a valid response. - - var resp []byte - resp, err = x.receive() - if err == io.EOF && strings.HasPrefix(x.Transport, "tcp") { - // EOF on TCP: reconnect and retry. Do not count - // as retry as socket was broken - x.logPrintf("ERROR: EOF. Performing reconnect") - err = x.netConnect() - if err != nil { - return nil, err - } - retries-- - break - } else if err != nil { - // receive error. retrying won't help. abort - break - } - x.logPrintf("GET RESPONSE OK: %+v", resp) - result = new(SnmpPacket) - result.Logger = x.Logger - - result.MsgFlags = packetOut.MsgFlags - if packetOut.SecurityParameters != nil { - result.SecurityParameters = packetOut.SecurityParameters.Copy() - } - - var cursor int - cursor, err = x.unmarshalHeader(resp, result) - if err != nil { - x.logPrintf("ERROR on unmarshall header: %s", err) - continue - } - - if x.Version == Version3 { - useResponseSecurityParameters := false - if usp, ok := x.SecurityParameters.(*UsmSecurityParameters); ok { - if usp.AuthoritativeEngineID == "" { - useResponseSecurityParameters = true - } - } - err = x.testAuthentication(resp, result, useResponseSecurityParameters) - if err != nil { - x.logPrintf("ERROR on Test Authentication on v3: %s", err) - break - } - resp, cursor, _ = x.decryptPacket(resp, cursor, result) - } - - err = x.unmarshalPayload(resp, cursor, result) - if err != nil { - x.logPrintf("ERROR on UnmarshalPayload on v3: %s", err) - continue - } - if len(result.Variables) < 1 { - x.logPrintf("ERROR on UnmarshalPayload on v3: %s", err) - continue - } - - // Detect usmStats report PDUs and go out of this function with all data - // (usmStatsNotInTimeWindows [1.3.6.1.6.3.15.1.1.2.0] will be handled by the calling - // function, and retransmitted. All others need to be handled by user code) - if result.Version == Version3 && len(result.Variables) == 1 && result.PDUType == Report { - switch result.Variables[0].Name { - case ".1.3.6.1.6.3.15.1.1.1.0", ".1.3.6.1.6.3.15.1.1.2.0", - ".1.3.6.1.6.3.15.1.1.3.0", ".1.3.6.1.6.3.15.1.1.4.0", - ".1.3.6.1.6.3.15.1.1.5.0", ".1.3.6.1.6.3.15.1.1.6.0": - break waitingResponse - } - } - - validID := false - for _, id := range allReqIDs { - if id == result.RequestID { - validID = true - } - } - if result.RequestID == 0 { - validID = true - } - if !validID { - x.logPrint("ERROR out of order") - continue - } - - break - } - if err != nil { - continue - } - - // Success! - return result, nil - } - - // Return last error - return nil, err -} - -// generic "sender" that negotiate any version of snmp request -// -// all sends wait for the return packet, except for SNMPv2Trap -func (x *GoSNMP) send(packetOut *SnmpPacket, wait bool) (result *SnmpPacket, err error) { - defer func() { - if e := recover(); e != nil { - var buf = make([]byte, 8192) - runtime.Stack(buf, true) - - err = fmt.Errorf("recover: %v\nStack:%v", e, string(buf)) - } - }() - - if x.Conn == nil { - return nil, fmt.Errorf("&GoSNMP.Conn is missing. Provide a connection or use Connect()") - } - - if x.Retries < 0 { - x.Retries = 0 - } - x.logPrint("SEND INIT") - if packetOut.Version == Version3 { - x.logPrint("SEND INIT NEGOTIATE SECURITY PARAMS") - if err = x.negotiateInitialSecurityParameters(packetOut); err != nil { - return &SnmpPacket{}, err - } - x.logPrint("SEND END NEGOTIATE SECURITY PARAMS") - } - - // perform request - result, err = x.sendOneRequest(packetOut, wait) - if err != nil { - x.logPrintf("SEND Error on the first Request Error: %s", err) - return result, err - } - - if result.Version == Version3 { - x.logPrintf("SEND STORE SECURITY PARAMS from result: %+v", result) - err = x.storeSecurityParameters(result) - - // detect out-of-time-window error and retransmit with updated auth engine parameters - if len(result.Variables) == 1 && result.Variables[0].Name == ".1.3.6.1.6.3.15.1.1.2.0" { - x.logPrint("WARNING detected out-of-time-window ERROR") - err = x.updatePktSecurityParameters(packetOut) - if err != nil { - x.logPrintf("ERROR updatePktSecurityParameters error: %s", err) - return nil, err - } - result, err = x.sendOneRequest(packetOut, wait) - } - } - - // detect unknown engine id error and retransmit with updated engine id - if len(result.Variables) == 1 && result.Variables[0].Name == ".1.3.6.1.6.3.15.1.1.4.0" { - x.logPrint("WARNING detected unknown enginer id ERROR") //nolint:misspell - err = x.updatePktSecurityParameters(packetOut) - if err != nil { - x.logPrintf("ERROR updatePktSecurityParameters error: %s", err) - return nil, err - } - result, err = x.sendOneRequest(packetOut, wait) - } - return result, err -} -func (packet *SnmpPacket) logPrintf(format string, v ...interface{}) { - if packet.Logger != nil { - packet.Logger.Printf(format, v...) - } -} - -// -- Marshalling Logic -------------------------------------------------------- - -// MarshalMsg marshalls a snmp packet, ready for sending across the wire -func (packet *SnmpPacket) MarshalMsg() ([]byte, error) { - return packet.marshalMsg() -} - -// marshal an SNMP message -func (packet *SnmpPacket) marshalMsg() ([]byte, error) { - var err error - buf := new(bytes.Buffer) - - // version - buf.Write([]byte{2, 1, byte(packet.Version)}) - - if packet.Version == Version3 { - buf, err = packet.marshalV3(buf) - if err != nil { - return nil, err - } - } else { - // community - buf.Write([]byte{4, uint8(len(packet.Community))}) - buf.WriteString(packet.Community) - // pdu - pdu, err2 := packet.marshalPDU() - if err2 != nil { - return nil, err2 - } - buf.Write(pdu) - } - - // build up resulting msg - sequence, length then the tail (buf) - msg := new(bytes.Buffer) - msg.WriteByte(byte(Sequence)) - - bufLengthBytes, err2 := marshalLength(buf.Len()) - if err2 != nil { - return nil, err2 - } - msg.Write(bufLengthBytes) - _, err = buf.WriteTo(msg) - if err != nil { - return nil, err - } - - authenticatedMessage, err := packet.authenticate(msg.Bytes()) - if err != nil { - return nil, err - } - - return authenticatedMessage, nil -} - -func (packet *SnmpPacket) marshalSNMPV1TrapHeader() ([]byte, error) { - buf := new(bytes.Buffer) - - // marshal OID - oidBytes, err := marshalOID(packet.Enterprise) - if err != nil { - return nil, fmt.Errorf("unable to marshal OID: %s", err.Error()) - } - buf.Write([]byte{byte(ObjectIdentifier), byte(len(oidBytes))}) - buf.Write(oidBytes) - - // marshal AgentAddress (ip address) - ip := net.ParseIP(packet.AgentAddress) - ipAddressBytes := ipv4toBytes(ip) - buf.Write([]byte{byte(IPAddress), byte(len(ipAddressBytes))}) - buf.Write(ipAddressBytes) - - // marshal GenericTrap. Could just cast GenericTrap to a single byte as IDs greater than 6 are unknown, - // but do it properly. See issue 182. - var genericTrapBytes []byte - genericTrapBytes, err = marshalInt32(packet.GenericTrap) - if err != nil { - return nil, fmt.Errorf("unable to marshal SNMPv1 GenericTrap: %s", err.Error()) - } - buf.Write([]byte{byte(Integer), byte(len(genericTrapBytes))}) - buf.Write(genericTrapBytes) - - // marshal SpecificTrap - var specificTrapBytes []byte - specificTrapBytes, err = marshalInt32(packet.SpecificTrap) - if err != nil { - return nil, fmt.Errorf("unable to marshal SNMPv1 SpecificTrap: %s", err.Error()) - } - buf.Write([]byte{byte(Integer), byte(len(specificTrapBytes))}) - buf.Write(specificTrapBytes) - - // marshal timeTicks - timeTickBytes, e := marshalUint32(uint32(packet.Timestamp)) - if e != nil { - return nil, fmt.Errorf("unable to Timestamp: %s", e.Error()) - } - buf.Write([]byte{byte(TimeTicks), byte(len(timeTickBytes))}) - buf.Write(timeTickBytes) - - return buf.Bytes(), nil -} - -// marshal a PDU -func (packet *SnmpPacket) marshalPDU() ([]byte, error) { - buf := new(bytes.Buffer) - - switch packet.PDUType { - case GetBulkRequest: - // requestid - buf.Write([]byte{2, 4}) - err := binary.Write(buf, binary.BigEndian, packet.RequestID) - if err != nil { - return nil, err - } - - // non repeaters - buf.Write([]byte{2, 1, packet.NonRepeaters}) - - // max repetitions - buf.Write([]byte{2, 1, packet.MaxRepetitions}) - - case Trap: - // write SNMP V1 Trap Header fields - snmpV1TrapHeader, err := packet.marshalSNMPV1TrapHeader() - if err != nil { - return nil, err - } - - buf.Write(snmpV1TrapHeader) - - default: - // requestid - buf.Write([]byte{2, 4}) - err := binary.Write(buf, binary.BigEndian, packet.RequestID) - - if err != nil { - return nil, fmt.Errorf("unable to marshal OID: %s", err.Error()) - } - - // error - buf.Write([]byte{2, 1, byte(packet.Error)}) - - // error index - buf.Write([]byte{2, 1, packet.ErrorIndex}) - } - - // varbind list - vbl, err := packet.marshalVBL() - if err != nil { - return nil, err - } - buf.Write(vbl) - - // build up resulting pdu - request type, length, then the tail (buf) - pdu := new(bytes.Buffer) - pdu.WriteByte(byte(packet.PDUType)) - - bufLengthBytes, err2 := marshalLength(buf.Len()) - if err2 != nil { - return nil, err2 - } - pdu.Write(bufLengthBytes) - - _, err = buf.WriteTo(pdu) - if err != nil { - return nil, err - } - return pdu.Bytes(), nil -} - -// marshal a varbind list -func (packet *SnmpPacket) marshalVBL() ([]byte, error) { - vblBuf := new(bytes.Buffer) - for _, pdu := range packet.Variables { - pdu := pdu - vb, err := marshalVarbind(&pdu) - if err != nil { - return nil, err - } - vblBuf.Write(vb) - } - - vblBytes := vblBuf.Bytes() - vblLengthBytes, err := marshalLength(len(vblBytes)) - if err != nil { - return nil, err - } - - // FIX does bytes.Buffer give better performance than byte slices? - result := []byte{byte(Sequence)} - result = append(result, vblLengthBytes...) - result = append(result, vblBytes...) - return result, nil -} - -// marshal a varbind -func marshalVarbind(pdu *SnmpPDU) ([]byte, error) { - oid, err := marshalOID(pdu.Name) - if err != nil { - return nil, err - } - pduBuf := new(bytes.Buffer) - tmpBuf := new(bytes.Buffer) - - // Marshal the PDU type into the appropriate BER - switch pdu.Type { - case Null: - ltmp, err2 := marshalLength(len(oid)) - if err2 != nil { - return nil, err2 - } - tmpBuf.Write([]byte{byte(ObjectIdentifier)}) - tmpBuf.Write(ltmp) - tmpBuf.Write(oid) - tmpBuf.Write([]byte{byte(Null), byte(EndOfContents)}) - - ltmp, err2 = marshalLength(tmpBuf.Len()) - if err2 != nil { - return nil, err2 - } - pduBuf.Write([]byte{byte(Sequence)}) - pduBuf.Write(ltmp) - _, err2 = tmpBuf.WriteTo(pduBuf) - if err2 != nil { - return nil, err2 - } - - case Integer: - // Oid - tmpBuf.Write([]byte{byte(ObjectIdentifier), byte(len(oid))}) - tmpBuf.Write(oid) - - // Number - var intBytes []byte - switch value := pdu.Value.(type) { - case byte: - intBytes = []byte{byte(pdu.Value.(int))} - case int: - if intBytes, err = marshalInt32(value); err != nil { - return nil, fmt.Errorf("error mashalling PDU Integer: %w", err) - } - default: - return nil, fmt.Errorf("unable to marshal PDU Integer; not byte or int") - } - tmpBuf.Write([]byte{byte(Integer), byte(len(intBytes))}) - tmpBuf.Write(intBytes) - - // Sequence, length of oid + integer, then oid/integer data - pduBuf.WriteByte(byte(Sequence)) - pduBuf.WriteByte(byte(len(oid) + len(intBytes) + 4)) - pduBuf.Write(tmpBuf.Bytes()) - - case Counter32, Gauge32, TimeTicks, Uinteger32: - // Oid - tmpBuf.Write([]byte{byte(ObjectIdentifier), byte(len(oid))}) - tmpBuf.Write(oid) - - // Number - var intBytes []byte - switch value := pdu.Value.(type) { - case uint32: - if intBytes, err = marshalUint32(value); err != nil { - return nil, fmt.Errorf("error marshalling PDU Uinteger32 type from uint32: %w", err) - } - case uint: - if intBytes, err = marshalUint32(uint32(value)); err != nil { - return nil, fmt.Errorf("error marshalling PDU Uinteger32 type from uint: %w", err) - } - default: - return nil, fmt.Errorf("unable to marshal pdu.Type %v; unknown pdu.Value %v[type=%T]", pdu.Type, pdu.Value, pdu.Value) - } - tmpBuf.Write([]byte{byte(pdu.Type), byte(len(intBytes))}) - tmpBuf.Write(intBytes) - - // Sequence, length of oid + integer, then oid/integer data - pduBuf.WriteByte(byte(Sequence)) - pduBuf.WriteByte(byte(len(oid) + len(intBytes) + 4)) - pduBuf.Write(tmpBuf.Bytes()) - - case OctetString, BitString: - // Oid - tmpBuf.Write([]byte{byte(ObjectIdentifier), byte(len(oid))}) - tmpBuf.Write(oid) - - // OctetString - var octetStringBytes []byte - switch value := pdu.Value.(type) { - case []byte: - octetStringBytes = value - case string: - octetStringBytes = []byte(value) - default: - return nil, fmt.Errorf("unable to marshal PDU OctetString; not []byte or string") - } - - var length []byte - length, err = marshalLength(len(octetStringBytes)) - if err != nil { - return nil, fmt.Errorf("unable to marshal PDU length: %w", err) - } - tmpBuf.WriteByte(byte(pdu.Type)) - tmpBuf.Write(length) - tmpBuf.Write(octetStringBytes) - - tmpBytes := tmpBuf.Bytes() - - length, err = marshalLength(len(tmpBytes)) - if err != nil { - return nil, fmt.Errorf("unable to marshal PDU data length: %w", err) - } - // Sequence, length of oid + octetstring, then oid/octetstring data - pduBuf.WriteByte(byte(Sequence)) - - pduBuf.Write(length) - pduBuf.Write(tmpBytes) - - case ObjectIdentifier: - // Oid - tmpBuf.Write([]byte{byte(ObjectIdentifier), byte(len(oid))}) - tmpBuf.Write(oid) - value := pdu.Value.(string) - oidBytes, err := marshalOID(value) - if err != nil { - return nil, fmt.Errorf("error marshalling ObjectIdentifier: %w", err) - } - - // Oid data - var length []byte - length, err = marshalLength(len(oidBytes)) - if err != nil { - return nil, fmt.Errorf("error marshalling ObjectIdentifier length: %w", err) - } - tmpBuf.WriteByte(byte(pdu.Type)) - tmpBuf.Write(length) - tmpBuf.Write(oidBytes) - - tmpBytes := tmpBuf.Bytes() - length, err = marshalLength(len(tmpBytes)) - if err != nil { - return nil, fmt.Errorf("error marshalling ObjectIdentifier data length: %w", err) - } - // Sequence, length of oid + oid, then oid/oid data - pduBuf.WriteByte(byte(Sequence)) - pduBuf.Write(length) - pduBuf.Write(tmpBytes) - - case IPAddress: - // Oid - tmpBuf.Write([]byte{byte(ObjectIdentifier), byte(len(oid))}) - tmpBuf.Write(oid) - // OctetString - var ipAddressBytes []byte - switch value := pdu.Value.(type) { - case []byte: - ipAddressBytes = value - case string: - ip := net.ParseIP(value) - ipAddressBytes = ipv4toBytes(ip) - default: - return nil, fmt.Errorf("unable to marshal PDU IPAddress; not []byte or string") - } - tmpBuf.Write([]byte{byte(IPAddress), byte(len(ipAddressBytes))}) - tmpBuf.Write(ipAddressBytes) - // Sequence, length of oid + octetstring, then oid/octetstring data - pduBuf.WriteByte(byte(Sequence)) - pduBuf.WriteByte(byte(len(oid) + len(ipAddressBytes) + 4)) - pduBuf.Write(tmpBuf.Bytes()) - case Counter64, OpaqueFloat, OpaqueDouble: - converters := map[Asn1BER]func(interface{}) ([]byte, error){ - Counter64: marshalUint64, - OpaqueFloat: marshalFloat32, - OpaqueDouble: marshalFloat64, - } - tmpBuf.Write([]byte{byte(ObjectIdentifier), byte(len(oid))}) - tmpBuf.Write(oid) - tmpBuf.WriteByte(byte(pdu.Type)) - intBytes, err := converters[pdu.Type](pdu.Value) - if err != nil { - return nil, fmt.Errorf("error converting PDU value type %v to %v: %w", pdu.Value, pdu.Type, err) - } - - tmpBuf.WriteByte(byte(len(intBytes))) - tmpBuf.Write(intBytes) - tmpBytes := tmpBuf.Bytes() - length, err := marshalLength(len(tmpBytes)) - if err != nil { - return nil, fmt.Errorf("error marshalling Float type length: %w", err) - } - // Sequence, length of oid + oid, then oid/oid data - pduBuf.WriteByte(byte(Sequence)) - pduBuf.Write(length) - pduBuf.Write(tmpBytes) - case NoSuchInstance, NoSuchObject, EndOfMibView: - tmpBuf.Write([]byte{byte(ObjectIdentifier), byte(len(oid))}) - tmpBuf.Write(oid) - tmpBuf.WriteByte(byte(pdu.Type)) - tmpBuf.WriteByte(byte(EndOfContents)) - tmpBytes := tmpBuf.Bytes() - length, err := marshalLength(len(tmpBytes)) - if err != nil { - return nil, fmt.Errorf("error marshalling Null type data length: %w", err) - } - // Sequence, length of oid + oid, then oid/oid data - pduBuf.WriteByte(byte(Sequence)) - pduBuf.Write(length) - pduBuf.Write(tmpBytes) - default: - return nil, fmt.Errorf("unable to marshal PDU: unknown BER type %q", pdu.Type) - } - - return pduBuf.Bytes(), nil -} - -// -- Unmarshalling Logic ------------------------------------------------------ - -func (x *GoSNMP) unmarshalHeader(packet []byte, response *SnmpPacket) (int, error) { - if len(packet) < 2 { - return 0, fmt.Errorf("cannot unmarshal empty packet") - } - if response == nil { - return 0, fmt.Errorf("cannot unmarshal response into nil packet reference") - } - - response.Variables = make([]SnmpPDU, 0, 5) - - // Start parsing the packet - cursor := 0 - - // First bytes should be 0x30 - if PDUType(packet[0]) != Sequence { - return 0, fmt.Errorf("invalid packet header") - } - - length, cursor := parseLength(packet) - if len(packet) != length { - return 0, fmt.Errorf("error verifying packet sanity: Got %d Expected: %d", len(packet), length) - } - x.logPrintf("Packet sanity verified, we got all the bytes (%d)", length) - - // Parse SNMP Version - rawVersion, count, err := parseRawField(x.Logger, packet[cursor:], "version") - if err != nil { - return 0, fmt.Errorf("error parsing SNMP packet version: %s", err.Error()) - } - - cursor += count - if cursor > len(packet) { - return 0, fmt.Errorf("error parsing SNMP packet, packet length %d cursor %d", len(packet), cursor) - } - - if version, ok := rawVersion.(int); ok { - response.Version = SnmpVersion(version) - x.logPrintf("Parsed version %d", version) - } - - if response.Version == Version3 { - oldcursor := cursor - cursor, err = x.unmarshalV3Header(packet, cursor, response) - if err != nil { - return 0, err - } - x.logPrintf("UnmarshalV3Header done. [with SecurityParameters]. Header Size %d. Last 4 Bytes=[%v]", cursor-oldcursor, packet[cursor-4:cursor]) - } else { - // Parse community - rawCommunity, count, err := parseRawField(x.Logger, packet[cursor:], "community") - if err != nil { - return 0, fmt.Errorf("error parsing community string: %s", err.Error()) - } - cursor += count - if cursor > len(packet) { - return 0, fmt.Errorf("error parsing SNMP packet, packet length %d cursor %d", len(packet), cursor) - } - - if community, ok := rawCommunity.(string); ok { - response.Community = community - x.logPrintf("Parsed community %s", community) - } - } - return cursor, nil -} - -func (x *GoSNMP) unmarshalPayload(packet []byte, cursor int, response *SnmpPacket) error { - var err error - // Parse SNMP packet type - requestType := PDUType(packet[cursor]) - x.logPrintf("UnmarshalPayload Meet PDUType %#x. Offset %v", requestType, cursor) - switch requestType { - // known, supported types - case GetResponse, GetNextRequest, GetBulkRequest, Report, SNMPv2Trap, GetRequest, SetRequest, InformRequest: - response.PDUType = requestType - err = x.unmarshalResponse(packet[cursor:], response) - if err != nil { - return fmt.Errorf("error in unmarshalResponse: %s", err.Error()) - } - // If it's an InformRequest, mark the trap. - response.IsInform = (requestType == InformRequest) - case Trap: - response.PDUType = requestType - err = x.unmarshalTrapV1(packet[cursor:], response) - if err != nil { - return fmt.Errorf("error in unmarshalTrapV1: %s", err.Error()) - } - default: - x.logPrintf("UnmarshalPayload Meet Unknown PDUType %#x. Offset %v", requestType, cursor) - return fmt.Errorf("unknown PDUType %#x", requestType) - } - return nil -} - -func (x *GoSNMP) unmarshalResponse(packet []byte, response *SnmpPacket) error { - cursor := 0 - - getResponseLength, cursor := parseLength(packet) - if len(packet) != getResponseLength { - return fmt.Errorf("error verifying Response sanity: Got %d Expected: %d", len(packet), getResponseLength) - } - x.logPrintf("getResponseLength: %d", getResponseLength) - - // Parse Request-ID - rawRequestID, count, err := parseRawField(x.Logger, packet[cursor:], "request id") - if err != nil { - return fmt.Errorf("error parsing SNMP packet request ID: %s", err.Error()) - } - cursor += count - if cursor > len(packet) { - return fmt.Errorf("error parsing SNMP packet, packet length %d cursor %d", len(packet), cursor) - } - - if requestid, ok := rawRequestID.(int); ok { - response.RequestID = uint32(requestid) - x.logPrintf("requestID: %d", response.RequestID) - } - - if response.PDUType == GetBulkRequest { - // Parse Non Repeaters - rawNonRepeaters, count, err := parseRawField(x.Logger, packet[cursor:], "non repeaters") - if err != nil { - return fmt.Errorf("error parsing SNMP packet non repeaters: %s", err.Error()) - } - cursor += count - if cursor > len(packet) { - return fmt.Errorf("error parsing SNMP packet, packet length %d cursor %d", len(packet), cursor) - } - - if nonRepeaters, ok := rawNonRepeaters.(int); ok { - response.NonRepeaters = uint8(nonRepeaters) - } - - // Parse Max Repetitions - rawMaxRepetitions, count, err := parseRawField(x.Logger, packet[cursor:], "max repetitions") - if err != nil { - return fmt.Errorf("error parsing SNMP packet max repetitions: %s", err.Error()) - } - cursor += count - if cursor > len(packet) { - return fmt.Errorf("error parsing SNMP packet, packet length %d cursor %d", len(packet), cursor) - } - - if maxRepetitions, ok := rawMaxRepetitions.(int); ok { - response.MaxRepetitions = uint8(maxRepetitions) - } - } else { - // Parse Error-Status - rawError, count, err := parseRawField(x.Logger, packet[cursor:], "error-status") - if err != nil { - return fmt.Errorf("error parsing SNMP packet error: %s", err.Error()) - } - cursor += count - if cursor > len(packet) { - return fmt.Errorf("error parsing SNMP packet, packet length %d cursor %d", len(packet), cursor) - } - - if errorStatus, ok := rawError.(int); ok { - response.Error = SNMPError(errorStatus) - x.logPrintf("errorStatus: %d", uint8(errorStatus)) - } - - // Parse Error-Index - rawErrorIndex, count, err := parseRawField(x.Logger, packet[cursor:], "error index") - if err != nil { - return fmt.Errorf("error parsing SNMP packet error index: %s", err.Error()) - } - cursor += count - if cursor > len(packet) { - return fmt.Errorf("error parsing SNMP packet, packet length %d cursor %d", len(packet), cursor) - } - - if errorindex, ok := rawErrorIndex.(int); ok { - response.ErrorIndex = uint8(errorindex) - x.logPrintf("error-index: %d", uint8(errorindex)) - } - } - - return x.unmarshalVBL(packet[cursor:], response) -} - -func (x *GoSNMP) unmarshalTrapV1(packet []byte, response *SnmpPacket) error { - cursor := 0 - - getResponseLength, cursor := parseLength(packet) - if len(packet) != getResponseLength { - return fmt.Errorf("error verifying Response sanity: Got %d Expected: %d", len(packet), getResponseLength) - } - x.logPrintf("getResponseLength: %d", getResponseLength) - - // Parse Enterprise - rawEnterprise, count, err := parseRawField(x.Logger, packet[cursor:], "enterprise") - if err != nil { - return fmt.Errorf("error parsing SNMP packet error: %s", err.Error()) - } - cursor += count - if cursor > len(packet) { - return fmt.Errorf("error parsing SNMP packet, packet length %d cursor %d", len(packet), cursor) - } - - if Enterprise, ok := rawEnterprise.([]int); ok { - response.Enterprise = oidToString(Enterprise) - x.logPrintf("Enterprise: %+v", Enterprise) - } - - // Parse AgentAddress - rawAgentAddress, count, err := parseRawField(x.Logger, packet[cursor:], "agent-address") - if err != nil { - return fmt.Errorf("error parsing SNMP packet error: %s", err.Error()) - } - cursor += count - if cursor > len(packet) { - return fmt.Errorf("error parsing SNMP packet, packet length %d cursor %d", len(packet), cursor) - } - - if AgentAddress, ok := rawAgentAddress.(string); ok { - response.AgentAddress = AgentAddress - x.logPrintf("AgentAddress: %s", AgentAddress) - } - - // Parse GenericTrap - rawGenericTrap, count, err := parseRawField(x.Logger, packet[cursor:], "generic-trap") - if err != nil { - return fmt.Errorf("error parsing SNMP packet error: %s", err.Error()) - } - cursor += count - if cursor > len(packet) { - return fmt.Errorf("error parsing SNMP packet, packet length %d cursor %d", len(packet), cursor) - } - - if GenericTrap, ok := rawGenericTrap.(int); ok { - response.GenericTrap = GenericTrap - x.logPrintf("GenericTrap: %d", GenericTrap) - } - - // Parse SpecificTrap - rawSpecificTrap, count, err := parseRawField(x.Logger, packet[cursor:], "specific-trap") - if err != nil { - return fmt.Errorf("error parsing SNMP packet error: %s", err.Error()) - } - cursor += count - if cursor > len(packet) { - return fmt.Errorf("error parsing SNMP packet, packet length %d cursor %d", len(packet), cursor) - } - - if SpecificTrap, ok := rawSpecificTrap.(int); ok { - response.SpecificTrap = SpecificTrap - x.logPrintf("SpecificTrap: %d", SpecificTrap) - } - - // Parse TimeStamp - rawTimestamp, count, err := parseRawField(x.Logger, packet[cursor:], "time-stamp") - if err != nil { - return fmt.Errorf("error parsing SNMP packet error: %s", err.Error()) - } - cursor += count - if cursor > len(packet) { - return fmt.Errorf("error parsing SNMP packet, packet length %d cursor %d", len(packet), cursor) - } - - if Timestamp, ok := rawTimestamp.(uint); ok { - response.Timestamp = Timestamp - x.logPrintf("Timestamp: %d", Timestamp) - } - - return x.unmarshalVBL(packet[cursor:], response) -} - -// unmarshal a Varbind list -func (x *GoSNMP) unmarshalVBL(packet []byte, response *SnmpPacket) error { - var cursor, cursorInc int - var vblLength int - - if len(packet) == 0 || cursor > len(packet) { - return fmt.Errorf("truncated packet when unmarshalling a VBL, got length %d cursor %d", len(packet), cursor) - } - - if packet[cursor] != 0x30 { - return fmt.Errorf("expected a sequence when unmarshalling a VBL, got %x", packet[cursor]) - } - - vblLength, cursor = parseLength(packet) - if vblLength == 0 || vblLength > len(packet) { - return fmt.Errorf("truncated packet when unmarshalling a VBL, packet length %d cursor %d", len(packet), cursor) - } - - if len(packet) != vblLength { - return fmt.Errorf("error verifying: packet length %d vbl length %d", len(packet), vblLength) - } - x.logPrintf("vblLength: %d", vblLength) - - // check for an empty response - if vblLength == 2 && packet[1] == 0x00 { - return nil - } - - // Loop & parse Varbinds - for cursor < vblLength { - if packet[cursor] != 0x30 { - return fmt.Errorf("expected a sequence when unmarshalling a VB, got %x", packet[cursor]) - } - - _, cursorInc = parseLength(packet[cursor:]) - cursor += cursorInc - if cursor > len(packet) { - return fmt.Errorf("error parsing OID Value: packet %d cursor %d", len(packet), cursor) - } - - // Parse OID - rawOid, oidLength, err := parseRawField(x.Logger, packet[cursor:], "OID") - if err != nil { - return fmt.Errorf("error parsing OID Value: %s", err.Error()) - } - - cursor += oidLength - if cursor > len(packet) { - return fmt.Errorf("error parsing OID Value: truncated, packet length %d cursor %d", len(packet), cursor) - } - - var oid []int - var ok bool - if oid, ok = rawOid.([]int); !ok { - return fmt.Errorf("unable to type assert rawOid |%v| to []int", rawOid) - } - oidStr := oidToString(oid) - x.logPrintf("OID: %s", oidStr) - - // Parse Value - v, err := x.decodeValue(packet[cursor:], "value") - if err != nil { - return fmt.Errorf("error decoding value: %v", err) - } - - valueLength, _ := parseLength(packet[cursor:]) - cursor += valueLength - if cursor > len(packet) { - return fmt.Errorf("error decoding OID Value: truncated, packet length %d cursor %d", len(packet), cursor) - } - - response.Variables = append(response.Variables, SnmpPDU{oidStr, v.Type, v.Value}) - } - return nil -} - -// receive response from network and read into a byte array -func (x *GoSNMP) receive() ([]byte, error) { - n, err := x.Conn.Read(x.rxBuf[:]) - if err == io.EOF { - return nil, err - } else if err != nil { - return nil, fmt.Errorf("error reading from socket: %s", err.Error()) - } - - if n == rxBufSize { - // This should never happen unless we're using something like a unix domain socket. - return nil, fmt.Errorf("response buffer too small") - } - - resp := make([]byte, n) - copy(resp, x.rxBuf[:n]) - return resp, nil -} diff --git a/vendor/github.com/gosnmp/gosnmp/snmp_users.sh b/vendor/github.com/gosnmp/gosnmp/snmp_users.sh deleted file mode 100644 index e96f9377..00000000 --- a/vendor/github.com/gosnmp/gosnmp/snmp_users.sh +++ /dev/null @@ -1,50 +0,0 @@ -#!/bin/bash - -cat << EOF >> /etc/snmp/snmpd.conf -createUser noAuthNoPrivUser -createUser authMD5OnlyUser MD5 testingpass0123456789 -createUser authSHAOnlyUser SHA testingpass9876543210 -createUser authSHA224OnlyUser SHA224 testingpass5123456 -createUser authSHA256OnlyUser SHA256 testingpass5223456 -createUser authSHA384OnlyUser SHA384 testingpass5323456 -createUser authSHA512OnlyUser SHA512 testingpass5423456 - -createUser authMD5PrivDESUser MD5 testingpass9876543210 DES -createUser authSHAPrivDESUser SHA testingpassabc6543210 DES -createUser authSHA224PrivDESUser SHA224 testingpass6123456 DES -createUser authSHA256PrivDESUser SHA256 testingpass6223456 DES -createUser authSHA384PrivDESUser SHA384 testingpass6323456 DES -createUser authSHA512PrivDESUser SHA512 testingpass6423456 DES - -createUser authMD5PrivAESUser MD5 AEStestingpass9876543210 AES -createUser authSHAPrivAESUser SHA AEStestingpassabc6543210 AES -createUser authSHA224PrivAESUser SHA224 testingpass7123456 AES -createUser authSHA256PrivAESUser SHA256 testingpass7223456 AES -createUser authSHA384PrivAESUser SHA384 testingpass7323456 AES -createUser authSHA512PrivAESUser SHA512 testingpass7423456 AES - -rouser noAuthNoPrivUser noauth -rouser authMD5OnlyUser auth -rouser authSHAOnlyUser auth -rouser authSHA224OnlyUser auth -rouser authSHA256OnlyUser auth -rouser authSHA384OnlyUser auth -rouser authSHA512OnlyUser auth - -rouser authMD5PrivDESUser authPriv -rouser authSHAPrivDESUser authPriv -rouser authSHA224PrivDESUser authPriv -rouser authSHA256PrivDESUser authPriv -rouser authSHA384PrivDESUser authPriv -rouser authSHA512PrivDESUser authPriv - -rouser authMD5PrivAESUser authPriv -rouser authSHAPrivAESUser authPriv -rouser authSHA224PrivAESUser authPriv -rouser authSHA256PrivAESUser authPriv -rouser authSHA384PrivAESUser authPriv -rouser authSHA512PrivAESUser authPriv -EOF - -# enable ipv6 TODO restart fails - need to enable ipv6 on interface; spin up a Linux instance to check this -# sed -i -e '/agentAddress/ s/^/#/' -e '/agentAddress/ s/^##//' /etc/snmp/snmpd.conf diff --git a/vendor/github.com/gosnmp/gosnmp/snmperror_string.go b/vendor/github.com/gosnmp/gosnmp/snmperror_string.go deleted file mode 100644 index 0b191522..00000000 --- a/vendor/github.com/gosnmp/gosnmp/snmperror_string.go +++ /dev/null @@ -1,16 +0,0 @@ -// Code generated by "stringer -type SNMPError"; DO NOT EDIT. - -package gosnmp - -import "strconv" - -const _SNMPError_name = "NoErrorTooBigNoSuchNameBadValueReadOnlyGenErrNoAccessWrongTypeWrongLengthWrongEncodingWrongValueNoCreationInconsistentValueResourceUnavailableCommitFailedUndoFailedAuthorizationErrorNotWritableInconsistentName" - -var _SNMPError_index = [...]uint8{0, 7, 13, 23, 31, 39, 45, 53, 62, 73, 86, 96, 106, 123, 142, 154, 164, 182, 193, 209} - -func (i SNMPError) String() string { - if i >= SNMPError(len(_SNMPError_index)-1) { - return "SNMPError(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _SNMPError_name[_SNMPError_index[i]:_SNMPError_index[i+1]] -} diff --git a/vendor/github.com/gosnmp/gosnmp/snmpv3authprotocol_string.go b/vendor/github.com/gosnmp/gosnmp/snmpv3authprotocol_string.go deleted file mode 100644 index 457553ef..00000000 --- a/vendor/github.com/gosnmp/gosnmp/snmpv3authprotocol_string.go +++ /dev/null @@ -1,30 +0,0 @@ -// Code generated by "stringer -type=SnmpV3AuthProtocol"; DO NOT EDIT. - -package gosnmp - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[NoAuth-1] - _ = x[MD5-2] - _ = x[SHA-3] - _ = x[SHA224-4] - _ = x[SHA256-5] - _ = x[SHA384-6] - _ = x[SHA512-7] -} - -const _SnmpV3AuthProtocol_name = "NoAuthMD5SHASHA224SHA256SHA384SHA512" - -var _SnmpV3AuthProtocol_index = [...]uint8{0, 6, 9, 12, 18, 24, 30, 36} - -func (authProtocol SnmpV3AuthProtocol) String() string { - authProtocol -= 1 - if authProtocol >= SnmpV3AuthProtocol(len(_SnmpV3AuthProtocol_index)-1) { - return "SnmpV3AuthProtocol(" + strconv.FormatInt(int64(authProtocol+1), 10) + ")" - } - return _SnmpV3AuthProtocol_name[_SnmpV3AuthProtocol_index[authProtocol]:_SnmpV3AuthProtocol_index[authProtocol+1]] -} diff --git a/vendor/github.com/gosnmp/gosnmp/snmpv3privprotocol_string.go b/vendor/github.com/gosnmp/gosnmp/snmpv3privprotocol_string.go deleted file mode 100644 index 4e34a92a..00000000 --- a/vendor/github.com/gosnmp/gosnmp/snmpv3privprotocol_string.go +++ /dev/null @@ -1,30 +0,0 @@ -// Code generated by "stringer -type=SnmpV3PrivProtocol"; DO NOT EDIT. - -package gosnmp - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[NoPriv-1] - _ = x[DES-2] - _ = x[AES-3] - _ = x[AES192-4] - _ = x[AES256-5] - _ = x[AES192C-6] - _ = x[AES256C-7] -} - -const _SnmpV3PrivProtocol_name = "NoPrivDESAESAES192AES256AES192CAES256C" - -var _SnmpV3PrivProtocol_index = [...]uint8{0, 6, 9, 12, 18, 24, 31, 38} - -func (i SnmpV3PrivProtocol) String() string { - i -= 1 - if i >= SnmpV3PrivProtocol(len(_SnmpV3PrivProtocol_index)-1) { - return "SnmpV3PrivProtocol(" + strconv.FormatInt(int64(i+1), 10) + ")" - } - return _SnmpV3PrivProtocol_name[_SnmpV3PrivProtocol_index[i]:_SnmpV3PrivProtocol_index[i+1]] -} diff --git a/vendor/github.com/gosnmp/gosnmp/trap.go b/vendor/github.com/gosnmp/gosnmp/trap.go deleted file mode 100644 index a65e0bdf..00000000 --- a/vendor/github.com/gosnmp/gosnmp/trap.go +++ /dev/null @@ -1,400 +0,0 @@ -// Copyright 2012 The GoSNMP Authors. All rights reserved. Use of this -// source code is governed by a BSD-style license that can be found in the -// LICENSE file. - -package gosnmp - -import ( - "fmt" - "net" - "strings" - "sync" - "sync/atomic" - "time" -) - -// -// Sending Traps ie GoSNMP acting as an Agent -// - -// SendTrap sends a SNMP Trap (v2c/v3 only) -// -// pdus[0] can a pdu of Type TimeTicks (with the desired uint32 epoch -// time). Otherwise a TimeTicks pdu will be prepended, with time set to -// now. This mirrors the behaviour of the Net-SNMP command-line tools. -// -// SendTrap doesn't wait for a return packet from the NMS (Network -// Management Station). -// -// See also Listen() and examples for creating an NMS. -// -// NOTE: the trap code is currently unreliable when working with snmpv3 - pull requests welcome -func (x *GoSNMP) SendTrap(trap SnmpTrap) (result *SnmpPacket, err error) { - var pdutype PDUType - - if len(trap.Variables) == 0 { - return nil, fmt.Errorf("function SendTrap requires at least 1 PDU") - } - - if trap.Variables[0].Type == TimeTicks { - // check is uint32 - if _, ok := trap.Variables[0].Value.(uint32); !ok { - return nil, fmt.Errorf("function SendTrap TimeTick must be uint32") - } - } - - switch x.Version { - case Version2c, Version3: - // Default to a v2 trap. - pdutype = SNMPv2Trap - - // If it's an inform, do that instead. - if trap.IsInform { - pdutype = InformRequest - } - - if trap.Variables[0].Type != TimeTicks { - now := uint32(time.Now().Unix()) - timetickPDU := SnmpPDU{"1.3.6.1.2.1.1.3.0", TimeTicks, now} - // prepend timetickPDU - trap.Variables = append([]SnmpPDU{timetickPDU}, trap.Variables...) - } - - case Version1: - pdutype = Trap - if len(trap.Enterprise) == 0 { - return nil, fmt.Errorf("function SendTrap for SNMPV1 requires an Enterprise OID") - } - if len(trap.AgentAddress) == 0 { - return nil, fmt.Errorf("function SendTrap for SNMPV1 requires an Agent Address") - } - - default: - err = fmt.Errorf("function SendTrap doesn't support %s", x.Version) - return nil, err - } - - packetOut := x.mkSnmpPacket(pdutype, trap.Variables, 0, 0) - if x.Version == Version1 { - packetOut.Enterprise = trap.Enterprise - packetOut.AgentAddress = trap.AgentAddress - packetOut.GenericTrap = trap.GenericTrap - packetOut.SpecificTrap = trap.SpecificTrap - packetOut.Timestamp = trap.Timestamp - } - - // all sends wait for the return packet, except for SNMPv2Trap - // -> wait is only for informs - return x.send(packetOut, trap.IsInform) -} - -// -// Receiving Traps ie GoSNMP acting as an NMS (Network Management -// Station). -// -// GoSNMP.unmarshal() currently only handles SNMPv2Trap -// - -// A TrapListener defines parameters for running a SNMP Trap receiver. -// nil values will be replaced by default values. -type TrapListener struct { - sync.Mutex - - // Params is a reference to the TrapListener's "parent" GoSNMP instance. - Params *GoSNMP - - // OnNewTrap handles incoming Trap and Inform PDUs. - OnNewTrap TrapHandlerFunc - - // These unexported fields are for letting test cases - // know we are ready. - conn *net.UDPConn - proto string - - finish int32 // Atomic flag; set to 1 when closing connection - done chan bool - listening chan bool -} - -// TrapHandlerFunc is a callback function type which receives SNMP Trap and -// Inform packets when they are received. If this callback is null, Trap and -// Inform PDUs will not be received (Inform responses will still be sent, -// however). This callback should not modify the contents of the SnmpPacket -// nor the UDPAddr passed to it, and it should copy out any values it wishes to -// use instead of retaining references in order to avoid memory fragmentation. -// -// The general effect of received Trap and Inform packets do not differ for the -// receiver, and the response is handled by the caller of the handler, so there -// is no need for the application to handle Informs any different than Traps. -// Nonetheless, the packet's Type field can be examined to determine what type -// of event this is for e.g. statistics gathering functions, etc. -type TrapHandlerFunc func(s *SnmpPacket, u *net.UDPAddr) - -// NewTrapListener returns an initialized TrapListener. -// -// NOTE: the trap code is currently unreliable when working with snmpv3 - pull requests welcome -func NewTrapListener() *TrapListener { - tl := &TrapListener{ - finish: 0, - done: make(chan bool), - // Buffered because one doesn't have to block on it. - listening: make(chan bool, 1), - } - - return tl -} - -// Listening returns a sentinel channel on which one can block -// until the listener is ready to receive requests. -// -// NOTE: the trap code is currently unreliable when working with snmpv3 - pull requests welcome -func (t *TrapListener) Listening() <-chan bool { - t.Lock() - defer t.Unlock() - return t.listening -} - -// Close terminates the listening on TrapListener socket -// -// NOTE: the trap code is currently unreliable when working with snmpv3 - pull requests welcome -func (t *TrapListener) Close() { - // Prevent concurrent calls to Close - if atomic.CompareAndSwapInt32(&t.finish, 0, 1) { - // TODO there's bugs here - if t.conn == nil { - return - } - if t.conn.LocalAddr().Network() == udp { - t.conn.Close() - } - <-t.done - } -} - -func (t *TrapListener) listenUDP(addr string) error { - // udp - - udpAddr, err := net.ResolveUDPAddr(t.proto, addr) - if err != nil { - return err - } - t.conn, err = net.ListenUDP(udp, udpAddr) - if err != nil { - return err - } - - defer t.conn.Close() - - // Mark that we are listening now. - t.listening <- true - - for { - switch { - case atomic.LoadInt32(&t.finish) == 1: - t.done <- true - return nil - - default: - var buf [4096]byte - rlen, remote, err := t.conn.ReadFromUDP(buf[:]) - if err != nil { - if atomic.LoadInt32(&t.finish) == 1 { - // err most likely comes from reading from a closed connection - continue - } - t.Params.logPrintf("TrapListener: error in read %s\n", err) - continue - } - - msg := buf[:rlen] - traps := t.Params.UnmarshalTrap(msg, false) - - if traps != nil { - // Here we assume that t.OnNewTrap will not alter the contents - // of the PDU (per documentation, because Go does not have - // compile-time const checking). We don't pass a copy because - // the SnmpPacket type is somewhat large, but we could without - // violating any implicit or explicit spec. - t.OnNewTrap(traps, remote) - - // If it was an Inform request, we need to send a response. - if traps.PDUType == InformRequest { //nolint:whitespace - - // Reuse the packet, since we're supposed to send it back - // with the exact same variables unless there's an error. - // Change the PDUType to the response, though. - traps.PDUType = GetResponse - - // If the response can be sent, the error-status is - // supposed to be set to noError and the error-index set to - // zero. - traps.Error = NoError - traps.ErrorIndex = 0 - - // TODO: Check that the message marshalled is not too large - // for the originator to accept and if so, send a tooBig - // error PDU per RFC3416 section 4.2.7. This maximum size, - // however, does not have a well-defined mechanism in the - // RFC other than using the path MTU (which is difficult to - // determine), so it's left to future implementations. - ob, err := traps.marshalMsg() - if err != nil { - return fmt.Errorf("error marshaling INFORM response: %v", err) - } - - // Send the return packet back. - count, err := t.conn.WriteTo(ob, remote) - if err != nil { - return fmt.Errorf("error sending INFORM response: %v", err) - } - - // This isn't fatal, but should be logged. - if count != len(ob) { - t.Params.logPrintf("Failed to send all bytes of INFORM response!\n") - } - } - } - } - } -} - -func (t *TrapListener) handleTCPRequest(conn net.Conn) { - // Make a buffer to hold incoming data. - buf := make([]byte, 4096) - // Read the incoming connection into the buffer. - reqLen, err := conn.Read(buf) - if err != nil { - t.Params.logPrintf("TrapListener: error in read %s\n", err) - return - } - - msg := buf[:reqLen] - traps := t.Params.UnmarshalTrap(msg, false) - - if traps != nil { - // TODO: lying for backward compatibility reason - create UDP Address ... not nice - r, _ := net.ResolveUDPAddr("", conn.RemoteAddr().String()) - t.OnNewTrap(traps, r) - } - // Close the connection when you're done with it. - conn.Close() -} - -func (t *TrapListener) listenTCP(addr string) error { - tcpAddr, err := net.ResolveTCPAddr(t.proto, addr) - if err != nil { - return err - } - - l, err := net.ListenTCP("tcp", tcpAddr) - if err != nil { - return err - } - - defer l.Close() - - // Mark that we are listening now. - t.listening <- true - - for { - switch { - case atomic.LoadInt32(&t.finish) == 1: - t.done <- true - return nil - default: - - // Listen for an incoming connection. - conn, err := l.Accept() - fmt.Printf("ACCEPT: %s", conn) - if err != nil { - fmt.Println("error accepting: ", err.Error()) - return err - } - // Handle connections in a new goroutine. - go t.handleTCPRequest(conn) - } - } -} - -// Listen listens on the UDP address addr and calls the OnNewTrap -// function specified in *TrapListener for every trap received. -// -// NOTE: the trap code is currently unreliable when working with snmpv3 - pull requests welcome -func (t *TrapListener) Listen(addr string) error { - if t.Params == nil { - t.Params = Default - } - - // TODO TODO returning an error cause the following to hang/break - // TestSendTrapBasic - // TestSendTrapWithoutWaitingOnListen - // TestSendV1Trap - _ = t.Params.validateParameters() - - if t.OnNewTrap == nil { - t.OnNewTrap = t.debugTrapHandler - } - - splitted := strings.SplitN(addr, "://", 2) - t.proto = udp - if len(splitted) > 1 { - t.proto = splitted[0] - addr = splitted[1] - } - - if t.proto == "tcp" { - return t.listenTCP(addr) - } else if t.proto == udp { - return t.listenUDP(addr) - } - - return fmt.Errorf("not implemented network protocol: %s [use: tcp/udp]", t.proto) -} - -// Default trap handler -func (t *TrapListener) debugTrapHandler(s *SnmpPacket, u *net.UDPAddr) { - t.Params.logPrintf("got trapdata from %+v: %+v\n", u, s) -} - -// UnmarshalTrap unpacks the SNMP Trap. -// -// NOTE: the trap code is currently unreliable when working with snmpv3 - pull requests welcome -func (x *GoSNMP) UnmarshalTrap(trap []byte, useResponseSecurityParameters bool) (result *SnmpPacket) { - result = new(SnmpPacket) - - if x.SecurityParameters != nil { - err := x.SecurityParameters.initSecurityKeys() - if err != nil { - return nil - } - result.SecurityParameters = x.SecurityParameters.Copy() - } - - cursor, err := x.unmarshalHeader(trap, result) - if err != nil { - x.logPrintf("UnmarshalTrap: %s\n", err) - return nil - } - - if result.Version == Version3 { - if result.SecurityModel == UserSecurityModel { - err = x.testAuthentication(trap, result, useResponseSecurityParameters) - if err != nil { - x.logPrintf("UnmarshalTrap v3 auth: %s\n", err) - return nil - } - } - - trap, cursor, err = x.decryptPacket(trap, cursor, result) - if err != nil { - x.logPrintf("UnmarshalTrap v3 decrypt: %s\n", err) - return nil - } - } - err = x.unmarshalPayload(trap, cursor, result) - if err != nil { - x.logPrintf("UnmarshalTrap: %s\n", err) - return nil - } - return result -} diff --git a/vendor/github.com/gosnmp/gosnmp/trap.md b/vendor/github.com/gosnmp/gosnmp/trap.md deleted file mode 100644 index e4c33f88..00000000 --- a/vendor/github.com/gosnmp/gosnmp/trap.md +++ /dev/null @@ -1,100 +0,0 @@ -# setup for working on traps - -``` -$ sudo aptitude -y install snmp-mibs-downloader snmp snmpd snmp-mibs-downloader -``` - -In the file `/etc/snmp/snmp.conf` -``` -mibs +ALL -``` - -In the file `/etc/snmp/snmpd.conf` - -``` -comment out: - agentAddress udp:127.0.0.1:161 - -uncomment: - agentAddress udp:161,udp6:[::1]:161 - -comment out: - rocommunity public default -V systemonly - -uncomment: - rocommunity public 10.0.0.0/16 - -comment out: - trapsink localhost public - -uncomment: - trap2sink localhost public -``` - -Create the file `~/.snmp/snmp.conf` with the contents: - -``` -# ~ expansion fails -persistentDir /home/sonia/.snmp_persist -``` - -``` -$ sudo /etc/init.d/snmpd restart -``` - -# test - -``` -snmptrap -v 2c -c public 192.168.1.10 '' SNMPv2-MIB::system SNMPv2-MIB::sysDescr.0 s "red laptop" SNMPv2-MIB::sysServices.0 i "5" SNMPv2-MIB::sysObjectID o "1.3.6.1.4.1.2.3.4.5" -``` - -# tshark, wireshark - -``` -sudo aptitude -y install wireshark tshark -sudo dpkg-reconfigure wireshark-common # allow captures -sudo usermod -a -G wireshark sonia -sudo setcap cap_net_raw,cap_net_admin=eip /usr/bin/dumpcap -sudo getcap /usr/bin/dumpcap -# still 'Couldn't run /usr/bin/dumpcap in child process', so nuke it -sudo chmod 777 /usr/bin/dumpcap -``` -Logout, login to apply wireshark and tshark permissions - -In a second terminal, run: - -``` -tshark -i eth0 -f "port 161" -w trap.pcap -``` - -# snmptrap and MIBs - -``` -The TYPE is a single character, one of: - i INTEGER INTEGER - u UNSIGNED - c COUNTER32 - s STRING DisplayString - x HEX STRING - d DECIMAL STRING - n NULLOBJ - o OBJID OBJECT IDENTIFIER - t TIMETICKS - a IPADDRESS - b BITS -``` - -# finding MIBs - -Look in the file `/usr/share/mibs/ietf/SNMPv2-MIB`. Here are some -example lines: - -``` -line:77 sysDescr -line:88 sysObjectID -line:146 sysServices -``` - -For a gui MIB browser: - -https://l3net.wordpress.com/2013/05/12/installing-net-snmp-mibs-on-ubuntu-and-debian/ diff --git a/vendor/github.com/gosnmp/gosnmp/v3.go b/vendor/github.com/gosnmp/gosnmp/v3.go deleted file mode 100644 index 28264b64..00000000 --- a/vendor/github.com/gosnmp/gosnmp/v3.go +++ /dev/null @@ -1,486 +0,0 @@ -// Copyright 2012 The GoSNMP Authors. All rights reserved. Use of this -// source code is governed by a BSD-style license that can be found in the -// LICENSE file. - -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gosnmp - -import ( - "bytes" - "encoding/binary" - "fmt" - "runtime" -) - -// SnmpV3MsgFlags contains various message flags to describe Authentication, Privacy, and whether a report PDU must be sent. -type SnmpV3MsgFlags uint8 - -// Possible values of SnmpV3MsgFlags -const ( - NoAuthNoPriv SnmpV3MsgFlags = 0x0 // No authentication, and no privacy - AuthNoPriv SnmpV3MsgFlags = 0x1 // Authentication and no privacy - AuthPriv SnmpV3MsgFlags = 0x3 // Authentication and privacy - Reportable SnmpV3MsgFlags = 0x4 // Report PDU must be sent. -) - -// SnmpV3SecurityModel describes the security model used by a SnmpV3 connection -type SnmpV3SecurityModel uint8 - -// UserSecurityModel is the only SnmpV3SecurityModel currently implemented. -const ( - UserSecurityModel SnmpV3SecurityModel = 3 -) - -// SnmpV3SecurityParameters is a generic interface type to contain various implementations of SnmpV3SecurityParameters -type SnmpV3SecurityParameters interface { - Log() - Copy() SnmpV3SecurityParameters - Description() string - validate(flags SnmpV3MsgFlags) error - init(log Logger) error - initPacket(packet *SnmpPacket) error - discoveryRequired() *SnmpPacket - getDefaultContextEngineID() string - setSecurityParameters(in SnmpV3SecurityParameters) error - marshal(flags SnmpV3MsgFlags) ([]byte, error) - unmarshal(flags SnmpV3MsgFlags, packet []byte, cursor int) (int, error) - authenticate(packet []byte) error - isAuthentic(packetBytes []byte, packet *SnmpPacket) (bool, error) - encryptPacket(scopedPdu []byte) ([]byte, error) - decryptPacket(packet []byte, cursor int) ([]byte, error) - initSecurityKeys() error -} - -func (x *GoSNMP) validateParametersV3() error { - // update following code if you implement a new security model - if x.SecurityModel != UserSecurityModel { - return fmt.Errorf("the SNMPV3 User Security Model is the only SNMPV3 security model currently implemented") - } - if x.SecurityParameters == nil { - return fmt.Errorf("SNMPV3 SecurityParameters must be set") - } - - return x.SecurityParameters.validate(x.MsgFlags) -} - -// authenticate the marshalled result of a snmp version 3 packet -func (packet *SnmpPacket) authenticate(msg []byte) ([]byte, error) { - defer func() { - if e := recover(); e != nil { - var buf = make([]byte, 8192) - runtime.Stack(buf, true) - fmt.Printf("[v3::authenticate]recover: %v. Stack=%v\n", e, string(buf)) - } - }() - if packet.Version != Version3 { - return msg, nil - } - if packet.MsgFlags&AuthNoPriv > 0 { - err := packet.SecurityParameters.authenticate(msg) - if err != nil { - return nil, err - } - } - - return msg, nil -} - -func (x *GoSNMP) testAuthentication(packet []byte, result *SnmpPacket, useResponseSecurityParameters bool) error { - if x.Version != Version3 { - return fmt.Errorf("testAuthentication called with non Version3 connection") - } - msgFlags := x.MsgFlags - if useResponseSecurityParameters { - msgFlags = result.MsgFlags - } - - if msgFlags&AuthNoPriv > 0 { - var authentic bool - var err error - if useResponseSecurityParameters { - authentic, err = result.SecurityParameters.isAuthentic(packet, result) - } else { - authentic, err = x.SecurityParameters.isAuthentic(packet, result) - } - if err != nil { - return err - } - if !authentic { - return fmt.Errorf("incoming packet is not authentic, discarding") - } - } - - return nil -} - -func (x *GoSNMP) initPacket(packetOut *SnmpPacket) error { - if x.MsgFlags&AuthPriv > AuthNoPriv { - return x.SecurityParameters.initPacket(packetOut) - } - - return nil -} - -// http://tools.ietf.org/html/rfc2574#section-2.2.3 This code does not -// check if the last message received was more than 150 seconds ago The -// snmpds that this code was tested on emit an 'out of time window' -// error with the new time and this code will retransmit when that is -// received. -func (x *GoSNMP) negotiateInitialSecurityParameters(packetOut *SnmpPacket) error { - if x.Version != Version3 || packetOut.Version != Version3 { - return fmt.Errorf("negotiateInitialSecurityParameters called with non Version3 connection or packet") - } - - if x.SecurityModel != packetOut.SecurityModel { - return fmt.Errorf("connection security model does not match security model defined in packet") - } - - if discoveryPacket := packetOut.SecurityParameters.discoveryRequired(); discoveryPacket != nil { - discoveryPacket.ContextName = x.ContextName - result, err := x.sendOneRequest(discoveryPacket, true) - - if err != nil { - return err - } - - err = x.storeSecurityParameters(result) - if err != nil { - return err - } - - err = x.updatePktSecurityParameters(packetOut) - if err != nil { - return err - } - } else { - err := packetOut.SecurityParameters.initSecurityKeys() - if err == nil { - return err - } - } - - return nil -} - -// save the connection security parameters after a request/response -func (x *GoSNMP) storeSecurityParameters(result *SnmpPacket) error { - if x.Version != Version3 || result.Version != Version3 { - return fmt.Errorf("storeParameters called with non Version3 connection or packet") - } - - if x.SecurityModel != result.SecurityModel { - return fmt.Errorf("connection security model does not match security model extracted from packet") - } - - if x.ContextEngineID == "" { - x.ContextEngineID = result.SecurityParameters.getDefaultContextEngineID() - } - - return x.SecurityParameters.setSecurityParameters(result.SecurityParameters) -} - -// update packet security parameters to match connection security parameters -func (x *GoSNMP) updatePktSecurityParameters(packetOut *SnmpPacket) error { - if x.Version != Version3 || packetOut.Version != Version3 { - return fmt.Errorf("updatePktSecurityParameters called with non Version3 connection or packet") - } - - if x.SecurityModel != packetOut.SecurityModel { - return fmt.Errorf("connection security model does not match security model extracted from packet") - } - - err := packetOut.SecurityParameters.setSecurityParameters(x.SecurityParameters) - if err != nil { - return err - } - - if packetOut.ContextEngineID == "" { - packetOut.ContextEngineID = x.ContextEngineID - } - - return nil -} - -func (packet *SnmpPacket) marshalV3(buf *bytes.Buffer) (*bytes.Buffer, error) { //nolint:interfacer - emptyBuffer := new(bytes.Buffer) // used when returning errors - - header, err := packet.marshalV3Header() - if err != nil { - return emptyBuffer, err - } - buf.Write([]byte{byte(Sequence), byte(len(header))}) - packet.logPrintf("Marshal V3 Header len=%d. Eaten Last 4 Bytes=%v", len(header), header[len(header)-4:]) - buf.Write(header) - - var securityParameters []byte - securityParameters, err = packet.SecurityParameters.marshal(packet.MsgFlags) - if err != nil { - return emptyBuffer, err - } - packet.logPrintf("Marshal V3 SecurityParameters len=%d. Eaten Last 4 Bytes=%v", - len(securityParameters), securityParameters[len(securityParameters)-4:]) - - buf.Write([]byte{byte(OctetString)}) - secParamLen, err := marshalLength(len(securityParameters)) - if err != nil { - return emptyBuffer, err - } - buf.Write(secParamLen) - buf.Write(securityParameters) - - scopedPdu, err := packet.marshalV3ScopedPDU() - if err != nil { - return emptyBuffer, err - } - buf.Write(scopedPdu) - return buf, nil -} - -// marshal a snmp version 3 packet header -func (packet *SnmpPacket) marshalV3Header() ([]byte, error) { - buf := new(bytes.Buffer) - - // msg id - buf.Write([]byte{byte(Integer), 4}) - err := binary.Write(buf, binary.BigEndian, packet.MsgID) - if err != nil { - return nil, err - } - oldLen := 0 - packet.logPrintf("MarshalV3Header msgID len=%v", buf.Len()-oldLen) - oldLen = buf.Len() - // maximum response msg size - var maxBufSize uint32 = rxBufSize - if packet.MsgMaxSize != 0 { - maxBufSize = packet.MsgMaxSize - } - maxmsgsize := marshalUvarInt(maxBufSize) - buf.Write([]byte{byte(Integer), byte(len(maxmsgsize))}) - buf.Write(maxmsgsize) - packet.logPrintf("MarshalV3Header maxmsgsize len=%v", buf.Len()-oldLen) - oldLen = buf.Len() - - // msg flags - buf.Write([]byte{byte(OctetString), 1, byte(packet.MsgFlags)}) - - packet.logPrintf("MarshalV3Header msg flags len=%v", buf.Len()-oldLen) - oldLen = buf.Len() - - // msg security model - buf.Write([]byte{byte(Integer), 1, byte(packet.SecurityModel)}) - - packet.logPrintf("MarshalV3Header msg security model len=%v", buf.Len()-oldLen) - - return buf.Bytes(), nil -} - -// marshal and encrypt (if necessary) a snmp version 3 Scoped PDU -func (packet *SnmpPacket) marshalV3ScopedPDU() ([]byte, error) { - var b []byte - - scopedPdu, err := packet.prepareV3ScopedPDU() - if err != nil { - return nil, err - } - pduLen, err := marshalLength(len(scopedPdu)) - if err != nil { - return nil, err - } - b = append([]byte{byte(Sequence)}, pduLen...) - scopedPdu = append(b, scopedPdu...) - if packet.MsgFlags&AuthPriv > AuthNoPriv { - scopedPdu, err = packet.SecurityParameters.encryptPacket(scopedPdu) - if err != nil { - return nil, err - } - } - - return scopedPdu, nil -} - -// prepare the plain text of a snmp version 3 Scoped PDU -func (packet *SnmpPacket) prepareV3ScopedPDU() ([]byte, error) { - var buf bytes.Buffer - - // ContextEngineID - idlen, err := marshalLength(len(packet.ContextEngineID)) - if err != nil { - return nil, err - } - buf.Write(append([]byte{byte(OctetString)}, idlen...)) - buf.WriteString(packet.ContextEngineID) - - // ContextName - namelen, err := marshalLength(len(packet.ContextName)) - if err != nil { - return nil, err - } - buf.Write(append([]byte{byte(OctetString)}, namelen...)) - buf.WriteString(packet.ContextName) - - data, err := packet.marshalPDU() - if err != nil { - return nil, err - } - buf.Write(data) - return buf.Bytes(), nil -} - -func (x *GoSNMP) unmarshalV3Header(packet []byte, - cursor int, - response *SnmpPacket) (int, error) { - if PDUType(packet[cursor]) != Sequence { - return 0, fmt.Errorf("invalid SNMPV3 Header") - } - - _, cursorTmp := parseLength(packet[cursor:]) - cursor += cursorTmp - if cursor > len(packet) { - return 0, fmt.Errorf("error parsing SNMPV3 message ID: truncted packet") - } - - rawMsgID, count, err := parseRawField(x.Logger, packet[cursor:], "msgID") - if err != nil { - return 0, fmt.Errorf("error parsing SNMPV3 message ID: %s", err.Error()) - } - cursor += count - if cursor > len(packet) { - return 0, fmt.Errorf("error parsing SNMPV3 message ID: truncted packet") - } - - if MsgID, ok := rawMsgID.(int); ok { - response.MsgID = uint32(MsgID) - x.logPrintf("Parsed message ID %d", MsgID) - } - - rawMsgMaxSize, count, err := parseRawField(x.Logger, packet[cursor:], "msgMaxSize") - if err != nil { - return 0, fmt.Errorf("error parsing SNMPV3 msgMaxSize: %s", err.Error()) - } - cursor += count - if cursor > len(packet) { - return 0, fmt.Errorf("error parsing SNMPV3 message ID: truncted packet") - } - - if MsgMaxSize, ok := rawMsgMaxSize.(int); ok { - response.MsgMaxSize = uint32(MsgMaxSize) - x.logPrintf("Parsed message max size %d", MsgMaxSize) - } - - rawMsgFlags, count, err := parseRawField(x.Logger, packet[cursor:], "msgFlags") - if err != nil { - return 0, fmt.Errorf("error parsing SNMPV3 msgFlags: %s", err.Error()) - } - cursor += count - if cursor > len(packet) { - return 0, fmt.Errorf("error parsing SNMPV3 message ID: truncted packet") - } - - if MsgFlags, ok := rawMsgFlags.(string); ok { - response.MsgFlags = SnmpV3MsgFlags(MsgFlags[0]) - x.logPrintf("parsed msg flags %s", MsgFlags) - } - - rawSecModel, count, err := parseRawField(x.Logger, packet[cursor:], "msgSecurityModel") - if err != nil { - return 0, fmt.Errorf("error parsing SNMPV3 msgSecModel: %s", err.Error()) - } - cursor += count - if cursor > len(packet) { - return 0, fmt.Errorf("error parsing SNMPV3 message ID: truncted packet") - } - - if SecModel, ok := rawSecModel.(int); ok { - response.SecurityModel = SnmpV3SecurityModel(SecModel) - x.logPrintf("Parsed security model %d", SecModel) - } - - if PDUType(packet[cursor]) != PDUType(OctetString) { - return 0, fmt.Errorf("invalid SNMPV3 Security Parameters") - } - _, cursorTmp = parseLength(packet[cursor:]) - cursor += cursorTmp - if cursor > len(packet) { - return 0, fmt.Errorf("error parsing SNMPV3 message ID: truncted packet") - } - if response.SecurityParameters == nil { - response.SecurityParameters = &UsmSecurityParameters{Logger: x.Logger} - } - - cursor, err = response.SecurityParameters.unmarshal(response.MsgFlags, packet, cursor) - if err != nil { - return 0, err - } - x.logPrintf("Parsed Security Parameters. now offset=%v,", cursor) - - return cursor, nil -} - -func (x *GoSNMP) decryptPacket(packet []byte, cursor int, response *SnmpPacket) ([]byte, int, error) { - var err error - var decrypted = false - - if cursor > len(packet) { - return nil, 0, fmt.Errorf("error parsing SNMPV3: truncated packet") - } - - switch PDUType(packet[cursor]) { - case PDUType(OctetString): - // pdu is encrypted - packet, err = response.SecurityParameters.decryptPacket(packet, cursor) - if err != nil { - return nil, 0, err - } - decrypted = true - fallthrough - case Sequence: - // pdu is plaintext or has been decrypted - tlength, cursorTmp := parseLength(packet[cursor:]) - if decrypted { - // truncate padding that might have been included with - // the encrypted PDU - if cursor+tlength > len(packet) { - return nil, 0, fmt.Errorf("error parsing SNMPV3: truncated packet") - } - packet = packet[:cursor+tlength] - } - cursor += cursorTmp - if cursor > len(packet) { - return nil, 0, fmt.Errorf("error parsing SNMPV3: truncated packet") - } - - rawContextEngineID, count, err := parseRawField(x.Logger, packet[cursor:], "contextEngineID") - if err != nil { - return nil, 0, fmt.Errorf("error parsing SNMPV3 contextEngineID: %s", err.Error()) - } - cursor += count - if cursor > len(packet) { - return nil, 0, fmt.Errorf("error parsing SNMPV3: truncated packet") - } - - if contextEngineID, ok := rawContextEngineID.(string); ok { - response.ContextEngineID = contextEngineID - x.logPrintf("Parsed contextEngineID %s", contextEngineID) - } - rawContextName, count, err := parseRawField(x.Logger, packet[cursor:], "contextName") - if err != nil { - return nil, 0, fmt.Errorf("error parsing SNMPV3 contextName: %s", err.Error()) - } - cursor += count - if cursor > len(packet) { - return nil, 0, fmt.Errorf("error parsing SNMPV3: truncated packet") - } - - if contextName, ok := rawContextName.(string); ok { - response.ContextName = contextName - x.logPrintf("Parsed contextName %s", contextName) - } - - default: - return nil, 0, fmt.Errorf("error parsing SNMPV3 scoped PDU") - } - return packet, cursor, nil -} diff --git a/vendor/github.com/gosnmp/gosnmp/v3_testing_credentials.go b/vendor/github.com/gosnmp/gosnmp/v3_testing_credentials.go deleted file mode 100644 index 234d9108..00000000 --- a/vendor/github.com/gosnmp/gosnmp/v3_testing_credentials.go +++ /dev/null @@ -1,162 +0,0 @@ -package gosnmp - -import "testing" - -// GO SNMP credentials table -//nolint:gochecknoglobals,unused -var authenticationCredentials = map[string][]string{ - NoAuth.String() + NoPriv.String(): {"noAuthNoPrivUser", "", ""}, - - MD5.String() + NoPriv.String(): {"authMD5OnlyUser", "testingpass0123456789", ""}, - MD5.String() + DES.String(): {"authMD5PrivDESUser", "testingpass9876543210", "testingpass9876543210"}, - MD5.String() + AES.String(): {"authMD5PrivAESUser", "AEStestingpass9876543210", "AEStestingpass9876543210"}, - // MD5.String() + AES192.String(): { "authMD5PrivAES192BlmtUser", "authkey1", "privkey1" }, - // MD5.String() + AES192C.String(): { "authMD5PrivAES192User", "authkey1", "privkey1" }, - // MD5.String() + AES256.String(): { "authMD5PrivAES256BlmtUser", "authkey1", "privkey1" }, - // MD5.String() + AES256C.String(): { "authMD5PrivAES256User", "authkey1", "privkey1" }, - - SHA.String() + NoPriv.String(): {"authSHAOnlyUser", "testingpass9876543210", ""}, - SHA.String() + DES.String(): {"authSHAPrivDESUser", "testingpassabc6543210", "testingpassabc6543210"}, - SHA.String() + AES.String(): {"authSHAPrivAESUser", "AEStestingpassabc6543210", "AEStestingpassabc6543210"}, - // SHA.String() + AES192.String(): { "authSHAPrivAES192BlmtUser", "authkey1", "privkey1" }, - // SHA.String() + AES192C.String(): { "authSHAPrivAES192User", "authkey1", "privkey1" }, - // SHA.String() + AES256.String(): { "authSHAPrivAES256BlmtUser", "authkey1", "privkey1" }, - // SHA.String() + AES256C.String(): { "authSHAPrivAES256User", "authkey1", "privkey1" }, - - SHA224.String() + NoPriv.String(): {"authSHA224OnlyUser", "testingpass5123456", ""}, - SHA224.String() + DES.String(): {"authSHA224PrivDESUser", "testingpass6123456", "testingpass6123456"}, - SHA224.String() + AES.String(): {"authSHA224PrivAESUser", "testingpass7123456", "testingpass7123456"}, - // SHA224.String() + AES192.String(): { "authSHA224PrivAES192BlmtUser", "authkey1", "privkey1" }, - // SHA224.String() + AES192C.String(): { "authSHA224PrivAES192User", "authkey1", "privkey1" }, - // SHA224.String() + AES256.String(): { "authSHA224PrivAES256BlmtUser", "authkey1", "privkey1" }, - // SHA224.String() + AES256C.String(): { "authSHA224PrivAES256User", "authkey1", "privkey1" }, - - SHA256.String() + NoPriv.String(): {"authSHA256OnlyUser", "testingpass5223456", ""}, - SHA256.String() + DES.String(): {"authSHA256PrivDESUser", "testingpass6223456", "testingpass6223456"}, - SHA256.String() + AES.String(): {"authSHA256PrivAESUser", "testingpass7223456", "testingpass7223456"}, - // SHA256.String() + AES192.String(): { "authSHA256PrivAES192BlmtUser", "authkey1", "privkey1" }, - // SHA256.String() + AES192C.String(): { "authSHA256PrivAES192User", "authkey1", "privkey1" }, - // SHA256.String() + AES256.String(): { "authSHA256PrivAES256BlmtUser", "authkey1", "privkey1" }, - // SHA256.String() + AES256C.String(): { "authSHA256PrivAES256User", "authkey1", "privkey1" }, - - SHA384.String() + NoPriv.String(): {"authSHA384OnlyUser", "testingpass5323456", ""}, - SHA384.String() + DES.String(): {"authSHA384PrivDESUser", "testingpass6323456", "testingpass6323456"}, - SHA384.String() + AES.String(): {"authSHA384PrivAESUser", "testingpass7323456", "testingpass7323456"}, - // SHA384.String() + AES192.String(): { "authSHA384PrivAES192BlmtUser", "authkey1", "privkey1" }, - // SHA384.String() + AES192C.String(): { "authSHA384PrivAES192User", "authkey1", "privkey1" }, - // SHA384.String() + AES256.String(): { "authSHA384PrivAES256BlmtUser", "authkey1", "privkey1" }, - // SHA384.String() + AES256C.String(): { "authSHA384PrivAES256User", "authkey1", "privkey1" }, - - SHA512.String() + NoPriv.String(): {"authSHA512OnlyUser", "testingpass5423456", ""}, - SHA512.String() + DES.String(): {"authSHA512PrivDESUser", "testingpass6423456", "testingpass6423456"}, - SHA512.String() + AES.String(): {"authSHA512PrivAESUser", "testingpass7423456", "testingpass7423456"}, - // SHA512.String() + AES192.String(): { "authSHA512PrivAES192BlmtUser", "authkey1", "privkey1" }, - // SHA512.String() + AES192C.String(): { "authSHA512PrivAES192User", "authkey1", "privkey1" }, - // SHA512.String() + AES256.String(): { "authSHA512PrivAES256BlmtUser", "authkey1", "privkey1" }, - // SHA512.String() + AES256C.String(): { "authSHA512PrivAES256User", "authkey1", "privkey1" }, -} - -// Credentials table for public demo.snmplabs.org -//nolint:unused,gochecknoglobals -var authenticationCredentialsSnmpLabs = map[string][]string{ - NoAuth.String() + NoPriv.String(): {"usr-none-none", "", ""}, - - MD5.String() + NoPriv.String(): {"usr-md5-none", "authkey1", ""}, - MD5.String() + DES.String(): {"usr-md5-des", "authkey1", "privkey1"}, - MD5.String() + AES.String(): {"usr-md5-aes", "authkey1", "privkey1"}, - MD5.String() + AES192.String(): {"usr-md5-aes192-blmt", "authkey1", "privkey1"}, - MD5.String() + AES192C.String(): {"usr-md5-aes192", "authkey1", "privkey1"}, - MD5.String() + AES256.String(): {"usr-md5-aes256-blmt", "authkey1", "privkey1"}, - MD5.String() + AES256C.String(): {"usr-md5-aes256", "authkey1", "privkey1"}, - - SHA.String() + NoPriv.String(): {"usr-sha-none", "authkey1", ""}, - SHA.String() + DES.String(): {"usr-sha-des", "authkey1", "privkey1"}, - SHA.String() + AES.String(): {"usr-sha-aes", "authkey1", "privkey1"}, - SHA.String() + AES192.String(): {"usr-sha-aes192-blmt", "authkey1", "privkey1"}, - SHA.String() + AES192C.String(): {"usr-sha-aes192", "authkey1", "privkey1"}, - SHA.String() + AES256.String(): {"usr-sha-aes256-blmt", "authkey1", "privkey1"}, - SHA.String() + AES256C.String(): {"usr-sha-aes256", "authkey1", "privkey1"}, - - SHA224.String() + NoPriv.String(): {"usr-sha224-none", "authkey1", ""}, - SHA224.String() + DES.String(): {"usr-sha224-des", "authkey1", "privkey1"}, - SHA224.String() + AES.String(): {"usr-sha224-aes", "authkey1", "privkey1"}, - SHA224.String() + AES192.String(): {"usr-sha224-aes192-blmt", "authkey1", "privkey1"}, - SHA224.String() + AES192C.String(): {"usr-sha224-aes192", "authkey1", "privkey1"}, - SHA224.String() + AES256.String(): {"usr-sha224-aes256-blmt", "authkey1", "privkey1"}, - SHA224.String() + AES256C.String(): {"usr-sha224-aes256", "authkey1", "privkey1"}, - - SHA256.String() + NoPriv.String(): {"usr-sha256-none", "authkey1", ""}, - SHA256.String() + DES.String(): {"usr-sha256-des", "authkey1", "privkey1"}, - SHA256.String() + AES.String(): {"usr-sha256-aes", "authkey1", "privkey1"}, - SHA256.String() + AES192.String(): {"usr-sha256-aes192-blmt", "authkey1", "privkey1"}, - SHA256.String() + AES192C.String(): {"usr-sha256-aes192", "authkey1", "privkey1"}, - SHA256.String() + AES256.String(): {"usr-sha256-aes256-blmt", "authkey1", "privkey1"}, - SHA256.String() + AES256C.String(): {"usr-sha256-aes256", "authkey1", "privkey1"}, - - SHA384.String() + NoPriv.String(): {"usr-sha384-none", "authkey1", ""}, - SHA384.String() + DES.String(): {"usr-sha384-des", "authkey1", "privkey1"}, - SHA384.String() + AES.String(): {"usr-sha384-aes", "authkey1", "privkey1"}, - SHA384.String() + AES192.String(): {"usr-sha384-aes192-blmt", "authkey1", "privkey1"}, - SHA384.String() + AES192C.String(): {"usr-sha384-aes192", "authkey1", "privkey1"}, - SHA384.String() + AES256.String(): {"usr-sha384-aes256-blmt", "authkey1", "privkey1"}, - SHA384.String() + AES256C.String(): {"usr-sha384-aes256", "authkey1", "privkey1"}, - - SHA512.String() + NoPriv.String(): {"usr-sha512-none", "authkey1", ""}, - SHA512.String() + DES.String(): {"usr-sha512-des", "authkey1", "privkey1"}, - SHA512.String() + AES.String(): {"usr-sha512-aes", "authkey1", "privkey1"}, - SHA512.String() + AES192.String(): {"usr-sha512-aes192-blmt", "authkey1", "privkey1"}, - SHA512.String() + AES192C.String(): {"usr-sha512-aes192", "authkey1", "privkey1"}, - SHA512.String() + AES256.String(): {"usr-sha512-aes256-blmt", "authkey1", "privkey1"}, - SHA512.String() + AES256C.String(): {"usr-sha512-aes256", "authkey1", "privkey1"}, -} - -//nolint:unused,gochecknoglobals -var useSnmpLabsCredentials = false - -// TODO get above credentials into snmpsimd, so *all* tests can be run. Combine with settings in `snmp_users.sh` - -const cIdxUserName = 0 -const cIdxAuthKey = 1 -const cIdxPrivKey = 2 - -//nolint -func isUsingSnmpLabs() bool { - return useSnmpLabsCredentials -} - -// conveniently enable demo.snmplabs.com for a one test -//nolint -func useSnmpLabs(use bool) { - useSnmpLabsCredentials = use -} - -//nolint -func getCredentials(t *testing.T, authProtocol SnmpV3AuthProtocol, privProtocol SnmpV3PrivProtocol) []string { - var credentials []string - if useSnmpLabsCredentials { - credentials = authenticationCredentialsSnmpLabs[authProtocol.String()+privProtocol.String()] - } else { - credentials = authenticationCredentials[authProtocol.String()+privProtocol.String()] - } - - if credentials == nil { - t.Skipf("No user credentials found for %s/%s", authProtocol.String(), privProtocol.String()) - return []string{"unknown", "unknown", "unkown"} - } - return credentials -} - -//nolint -func getUserName(t *testing.T, authProtocol SnmpV3AuthProtocol, privProtocol SnmpV3PrivProtocol) string { - return getCredentials(t, authProtocol, privProtocol)[cIdxUserName] -} - -//nolint:unused,deadcode -func getAuthKey(t *testing.T, authProtocol SnmpV3AuthProtocol, privProtocol SnmpV3PrivProtocol) string { - return getCredentials(t, authProtocol, privProtocol)[cIdxAuthKey] -} - -//nolint:unused,deadcode -func getPrivKey(t *testing.T, authProtocol SnmpV3AuthProtocol, privProtocol SnmpV3PrivProtocol) string { - return getCredentials(t, authProtocol, privProtocol)[cIdxPrivKey] -} diff --git a/vendor/github.com/gosnmp/gosnmp/v3_usm.go b/vendor/github.com/gosnmp/gosnmp/v3_usm.go deleted file mode 100644 index 9855ac8b..00000000 --- a/vendor/github.com/gosnmp/gosnmp/v3_usm.go +++ /dev/null @@ -1,916 +0,0 @@ -// Copyright 2012 The GoSNMP Authors. All rights reserved. Use of this -// source code is governed by a BSD-style license that can be found in the -// LICENSE file. - -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gosnmp - -import ( - "bytes" - "crypto" - "crypto/aes" - "crypto/cipher" - "crypto/des" //nolint:gosec - "crypto/hmac" - _ "crypto/md5" //nolint:gosec - crand "crypto/rand" - _ "crypto/sha1" //nolint:gosec - _ "crypto/sha256" - _ "crypto/sha512" - "encoding/binary" - "encoding/hex" - "errors" - "fmt" - "hash" - "strings" - "sync" - "sync/atomic" -) - -// SnmpV3AuthProtocol describes the authentication protocol in use by an authenticated SnmpV3 connection. -type SnmpV3AuthProtocol uint8 - -// NoAuth, MD5, and SHA are implemented -const ( - NoAuth SnmpV3AuthProtocol = 1 - MD5 SnmpV3AuthProtocol = 2 - SHA SnmpV3AuthProtocol = 3 - SHA224 SnmpV3AuthProtocol = 4 - SHA256 SnmpV3AuthProtocol = 5 - SHA384 SnmpV3AuthProtocol = 6 - SHA512 SnmpV3AuthProtocol = 7 -) - -//go:generate stringer -type=SnmpV3AuthProtocol - -// HashType maps the AuthProtocol's hash type to an actual crypto.Hash object. -func (authProtocol SnmpV3AuthProtocol) HashType() crypto.Hash { - switch authProtocol { - default: - return crypto.MD5 - case SHA: - return crypto.SHA1 - case SHA224: - return crypto.SHA224 - case SHA256: - return crypto.SHA256 - case SHA384: - return crypto.SHA384 - case SHA512: - return crypto.SHA512 - } -} - -//nolint:gochecknoglobals -var macVarbinds = [][]byte{ - {}, - {byte(OctetString), 0}, - {byte(OctetString), 12, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0}, - {byte(OctetString), 12, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0}, - {byte(OctetString), 16, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0}, - {byte(OctetString), 24, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0}, - {byte(OctetString), 32, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0}, - {byte(OctetString), 48, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0}} - -// SnmpV3PrivProtocol is the privacy protocol in use by an private SnmpV3 connection. -type SnmpV3PrivProtocol uint8 - -// NoPriv, DES implemented, AES planned -// Changed: AES192, AES256, AES192C, AES256C added -const ( - NoPriv SnmpV3PrivProtocol = 1 - DES SnmpV3PrivProtocol = 2 - AES SnmpV3PrivProtocol = 3 - AES192 SnmpV3PrivProtocol = 4 // Blumenthal-AES192 - AES256 SnmpV3PrivProtocol = 5 // Blumenthal-AES256 - AES192C SnmpV3PrivProtocol = 6 // Reeder-AES192 - AES256C SnmpV3PrivProtocol = 7 // Reeder-AES256 -) - -//go:generate stringer -type=SnmpV3PrivProtocol - -// UsmSecurityParameters is an implementation of SnmpV3SecurityParameters for the UserSecurityModel -type UsmSecurityParameters struct { - mu sync.Mutex - // localAESSalt must be 64bit aligned to use with atomic operations. - localAESSalt uint64 - localDESSalt uint32 - - AuthoritativeEngineID string - AuthoritativeEngineBoots uint32 - AuthoritativeEngineTime uint32 - UserName string - AuthenticationParameters string - PrivacyParameters []byte - - AuthenticationProtocol SnmpV3AuthProtocol - PrivacyProtocol SnmpV3PrivProtocol - - AuthenticationPassphrase string - PrivacyPassphrase string - - SecretKey []byte - PrivacyKey []byte - - Logger Logger -} - -// Log logs security paramater information to the provided GoSNMP Logger -func (sp *UsmSecurityParameters) Description() string { - var sb strings.Builder - sb.WriteString("user=") - sb.WriteString(sp.UserName) - - sb.WriteString(",engine=(") - sb.WriteString(hex.EncodeToString([]byte(sp.AuthoritativeEngineID))) - // sb.WriteString(sp.AuthoritativeEngineID) - sb.WriteString(")") - - switch sp.AuthenticationProtocol { - case NoAuth: - sb.WriteString(",auth=noauth") - case MD5: - sb.WriteString(",auth=md5") - case SHA: - sb.WriteString(",auth=sha") - case SHA224: - sb.WriteString(",auth=sha224") - case SHA256: - sb.WriteString(",auth=sha256") - case SHA384: - sb.WriteString(",auth=sha384") - case SHA512: - sb.WriteString(",auth=sha512") - } - sb.WriteString(",authPass=") - sb.WriteString(sp.AuthenticationPassphrase) - - switch sp.PrivacyProtocol { - case NoPriv: - sb.WriteString(",priv=NoPriv") - case DES: - sb.WriteString(",priv=DES") - case AES: - sb.WriteString(",priv=AES") - case AES192: - sb.WriteString(",priv=AES192") - case AES256: - sb.WriteString(",priv=AES256") - case AES192C: - sb.WriteString(",priv=AES192C") - case AES256C: - sb.WriteString(",priv=AES256C") - } - sb.WriteString(",privPass=") - sb.WriteString(sp.PrivacyPassphrase) - - return sb.String() -} - -// Log logs security paramater information to the provided GoSNMP Logger -func (sp *UsmSecurityParameters) Log() { - sp.mu.Lock() - defer sp.mu.Unlock() - sp.Logger.Printf("SECURITY PARAMETERS:%+v", sp) -} - -// Copy method for UsmSecurityParameters used to copy a SnmpV3SecurityParameters without knowing it's implementation -func (sp *UsmSecurityParameters) Copy() SnmpV3SecurityParameters { - sp.mu.Lock() - defer sp.mu.Unlock() - return &UsmSecurityParameters{AuthoritativeEngineID: sp.AuthoritativeEngineID, - AuthoritativeEngineBoots: sp.AuthoritativeEngineBoots, - AuthoritativeEngineTime: sp.AuthoritativeEngineTime, - UserName: sp.UserName, - AuthenticationParameters: sp.AuthenticationParameters, - PrivacyParameters: sp.PrivacyParameters, - AuthenticationProtocol: sp.AuthenticationProtocol, - PrivacyProtocol: sp.PrivacyProtocol, - AuthenticationPassphrase: sp.AuthenticationPassphrase, - PrivacyPassphrase: sp.PrivacyPassphrase, - SecretKey: sp.SecretKey, - PrivacyKey: sp.PrivacyKey, - localDESSalt: sp.localDESSalt, - localAESSalt: sp.localAESSalt, - Logger: sp.Logger, - } -} - -func (sp *UsmSecurityParameters) getDefaultContextEngineID() string { - return sp.AuthoritativeEngineID -} -func (sp *UsmSecurityParameters) initSecurityKeys() error { - sp.mu.Lock() - defer sp.mu.Unlock() - - return sp.initSecurityKeysNoLock() -} - -func (sp *UsmSecurityParameters) initSecurityKeysNoLock() error { - var err error - - if sp.AuthenticationProtocol > NoAuth && len(sp.SecretKey) == 0 { - sp.SecretKey, err = genlocalkey(sp.AuthenticationProtocol, - sp.AuthenticationPassphrase, - sp.AuthoritativeEngineID) - if err != nil { - return err - } - } - if sp.PrivacyProtocol > NoPriv && len(sp.PrivacyKey) == 0 { - switch sp.PrivacyProtocol { - // Changed: The Output of SHA1 is a 20 octets array, therefore for AES128 (16 octets) either key extension algorithm can be used. - case AES, AES192, AES256, AES192C, AES256C: - // Use abstract AES key localization algorithms. - sp.PrivacyKey, err = genlocalPrivKey(sp.PrivacyProtocol, sp.AuthenticationProtocol, - sp.PrivacyPassphrase, - sp.AuthoritativeEngineID) - if err != nil { - return err - } - default: - sp.PrivacyKey, err = genlocalkey(sp.AuthenticationProtocol, - sp.PrivacyPassphrase, - sp.AuthoritativeEngineID) - if err != nil { - return err - } - } - } - return nil -} - -func (sp *UsmSecurityParameters) setSecurityParameters(in SnmpV3SecurityParameters) error { - var insp *UsmSecurityParameters - var err error - - sp.mu.Lock() - defer sp.mu.Unlock() - - if insp, err = castUsmSecParams(in); err != nil { - return err - } - - if sp.AuthoritativeEngineID != insp.AuthoritativeEngineID { - sp.AuthoritativeEngineID = insp.AuthoritativeEngineID - sp.SecretKey = nil - sp.PrivacyKey = nil - - err = sp.initSecurityKeysNoLock() - if err != nil { - return err - } - } - sp.AuthoritativeEngineBoots = insp.AuthoritativeEngineBoots - sp.AuthoritativeEngineTime = insp.AuthoritativeEngineTime - - return nil -} - -func (sp *UsmSecurityParameters) validate(flags SnmpV3MsgFlags) error { - securityLevel := flags & AuthPriv // isolate flags that determine security level - - switch securityLevel { - case AuthPriv: - if sp.PrivacyProtocol <= NoPriv { - return fmt.Errorf("securityParameters.PrivacyProtocol is required") - } - fallthrough - case AuthNoPriv: - if sp.AuthenticationProtocol <= NoAuth { - return fmt.Errorf("securityParameters.AuthenticationProtocol is required") - } - fallthrough - case NoAuthNoPriv: - if sp.UserName == "" { - return fmt.Errorf("securityParameters.UserName is required") - } - default: - return fmt.Errorf("validate: MsgFlags must be populated with an appropriate security level") - } - - if sp.PrivacyProtocol > NoPriv && len(sp.PrivacyKey) == 0 { - if sp.PrivacyPassphrase == "" { - return fmt.Errorf("securityParameters.PrivacyPassphrase is required when a privacy protocol is specified") - } - } - - if sp.AuthenticationProtocol > NoAuth && len(sp.SecretKey) == 0 { - if sp.AuthenticationPassphrase == "" { - return fmt.Errorf("securityParameters.AuthenticationPassphrase is required when an authentication protocol is specified") - } - } - - return nil -} - -func (sp *UsmSecurityParameters) init(log Logger) error { - var err error - - sp.Logger = log - - switch sp.PrivacyProtocol { - case AES, AES192, AES256, AES192C, AES256C: - salt := make([]byte, 8) - _, err = crand.Read(salt) - if err != nil { - return fmt.Errorf("error creating a cryptographically secure salt: %s", err.Error()) - } - sp.localAESSalt = binary.BigEndian.Uint64(salt) - case DES: - salt := make([]byte, 4) - _, err = crand.Read(salt) - if err != nil { - return fmt.Errorf("error creating a cryptographically secure salt: %s", err.Error()) - } - sp.localDESSalt = binary.BigEndian.Uint32(salt) - } - - return nil -} - -func castUsmSecParams(secParams SnmpV3SecurityParameters) (*UsmSecurityParameters, error) { - s, ok := secParams.(*UsmSecurityParameters) - if !ok || s == nil { - return nil, fmt.Errorf("param SnmpV3SecurityParameters is not of type *UsmSecurityParameters") - } - return s, nil -} - -var ( - passwordKeyHashCache = make(map[string][]byte) //nolint:gochecknoglobals - passwordKeyHashMutex sync.RWMutex //nolint:gochecknoglobals -) - -func hashPassword(hash hash.Hash, password string) ([]byte, error) { - if len(password) == 0 { - return []byte{}, errors.New("hashPassword: password is empty") - } - var pi int // password index - for i := 0; i < 1048576; i += 64 { - var chunk []byte - for e := 0; e < 64; e++ { - chunk = append(chunk, password[pi%len(password)]) - pi++ - } - if _, err := hash.Write(chunk); err != nil { - return []byte{}, err - } - } - hashed := hash.Sum(nil) - return hashed, nil -} - -// Common passwordToKey algorithm, "caches" the result to avoid extra computation each reuse -func cachedPasswordToKey(hash hash.Hash, cacheKey string, password string) ([]byte, error) { - passwordKeyHashMutex.RLock() - value := passwordKeyHashCache[cacheKey] - passwordKeyHashMutex.RUnlock() - - if value != nil { - return value, nil - } - - hashed, err := hashPassword(hash, password) - if err != nil { - return nil, err - } - - passwordKeyHashMutex.Lock() - passwordKeyHashCache[cacheKey] = hashed - passwordKeyHashMutex.Unlock() - - return hashed, nil -} - -func hMAC(hash crypto.Hash, cacheKey string, password string, engineID string) ([]byte, error) { - hashed, err := cachedPasswordToKey(hash.New(), cacheKey, password) - if err != nil { - return []byte{}, nil - } - - local := hash.New() - _, err = local.Write(hashed) - if err != nil { - return []byte{}, err - } - - _, err = local.Write([]byte(engineID)) - if err != nil { - return []byte{}, err - } - - _, err = local.Write(hashed) - if err != nil { - return []byte{}, err - } - - final := local.Sum(nil) - return final, nil -} - -func cacheKey(authProtocol SnmpV3AuthProtocol, passphrase string) string { - var cacheKey = make([]byte, 1+len(passphrase)) - cacheKey = append(cacheKey, 'h'+byte(authProtocol)) - cacheKey = append(cacheKey, []byte(passphrase)...) - return string(cacheKey) -} - -// Extending the localized privacy key according to Reeder Key extension algorithm: -// https://tools.ietf.org/html/draft-reeder-snmpv3-usm-3dese -// Many vendors, including Cisco, use the 3DES key extension algorithm to extend the privacy keys that are too short when using AES,AES192 and AES256. -// Previously implemented in net-snmp and pysnmp libraries. -// Tested for AES128 and AES256 -func extendKeyReeder(authProtocol SnmpV3AuthProtocol, password string, engineID string) ([]byte, error) { - var key []byte - var err error - - key, err = hMAC(authProtocol.HashType(), cacheKey(authProtocol, password), password, engineID) - - if err != nil { - return nil, err - } - - newkey, err := hMAC(authProtocol.HashType(), cacheKey(authProtocol, string(key)), string(key), engineID) - - return append(key, newkey...), err -} - -// Extending the localized privacy key according to Blumenthal key extension algorithm: -// https://tools.ietf.org/html/draft-blumenthal-aes-usm-04#page-7 -// Not many vendors use this algorithm. -// Previously implemented in the net-snmp and pysnmp libraries. -// Not tested -func extendKeyBlumenthal(authProtocol SnmpV3AuthProtocol, password string, engineID string) ([]byte, error) { - var key []byte - var err error - - key, err = hMAC(authProtocol.HashType(), cacheKey(authProtocol, ""), password, engineID) - - if err != nil { - return nil, err - } - - newkey := authProtocol.HashType().New() - _, _ = newkey.Write(key) - return append(key, newkey.Sum(nil)...), err -} - -// Changed: New function to calculate the Privacy Key for abstract AES -func genlocalPrivKey(privProtocol SnmpV3PrivProtocol, authProtocol SnmpV3AuthProtocol, password string, engineID string) ([]byte, error) { - var keylen int - var localPrivKey []byte - var err error - - switch privProtocol { - case AES, DES: - keylen = 16 - case AES192, AES192C: - keylen = 24 - case AES256, AES256C: - keylen = 32 - } - - switch privProtocol { - case AES, AES192C, AES256C: - localPrivKey, err = extendKeyReeder(authProtocol, password, engineID) - - case AES192, AES256: - localPrivKey, err = extendKeyBlumenthal(authProtocol, password, engineID) - - default: - localPrivKey, err = genlocalkey(authProtocol, password, engineID) - } - - if err != nil { - return nil, err - } - - if len(localPrivKey) < keylen { - return []byte{}, fmt.Errorf("genlocalPrivKey: privProtocol: %v len(localPrivKey): %d, keylen: %d", - privProtocol, len(localPrivKey), keylen) - } - - return localPrivKey[:keylen], nil -} - -func genlocalkey(authProtocol SnmpV3AuthProtocol, passphrase string, engineID string) ([]byte, error) { - var secretKey []byte - var err error - - secretKey, err = hMAC(authProtocol.HashType(), cacheKey(authProtocol, passphrase), passphrase, engineID) - - if err != nil { - return []byte{}, err - } - - return secretKey, nil -} - -// http://tools.ietf.org/html/rfc2574#section-8.1.1.1 -// localDESSalt needs to be incremented on every packet. -func (sp *UsmSecurityParameters) usmAllocateNewSalt() interface{} { - sp.mu.Lock() - defer sp.mu.Unlock() - var newSalt interface{} - - switch sp.PrivacyProtocol { - case AES, AES192, AES256, AES192C, AES256C: - newSalt = atomic.AddUint64(&(sp.localAESSalt), 1) - default: - newSalt = atomic.AddUint32(&(sp.localDESSalt), 1) - } - return newSalt -} - -func (sp *UsmSecurityParameters) usmSetSalt(newSalt interface{}) error { - sp.mu.Lock() - defer sp.mu.Unlock() - switch sp.PrivacyProtocol { - case AES, AES192, AES256, AES192C, AES256C: - aesSalt, ok := newSalt.(uint64) - if !ok { - return fmt.Errorf("salt provided to usmSetSalt is not the correct type for the AES privacy protocol") - } - var salt = make([]byte, 8) - binary.BigEndian.PutUint64(salt, aesSalt) - sp.PrivacyParameters = salt - default: - desSalt, ok := newSalt.(uint32) - if !ok { - return fmt.Errorf("salt provided to usmSetSalt is not the correct type for the DES privacy protocol") - } - var salt = make([]byte, 8) - binary.BigEndian.PutUint32(salt, sp.AuthoritativeEngineBoots) - binary.BigEndian.PutUint32(salt[4:], desSalt) - sp.PrivacyParameters = salt - } - return nil -} - -func (sp *UsmSecurityParameters) initPacket(packet *SnmpPacket) error { - // http://tools.ietf.org/html/rfc2574#section-8.1.1.1 - // localDESSalt needs to be incremented on every packet. - newSalt := sp.usmAllocateNewSalt() - if packet.MsgFlags&AuthPriv > AuthNoPriv { - s, err := castUsmSecParams(packet.SecurityParameters) - if err != nil { - return err - } - return s.usmSetSalt(newSalt) - } - return nil -} - -func (sp *UsmSecurityParameters) discoveryRequired() *SnmpPacket { - if sp.AuthoritativeEngineID == "" { - var emptyPdus []SnmpPDU - - // send blank packet to discover authoriative engine ID/boots/time - blankPacket := &SnmpPacket{ - Version: Version3, - MsgFlags: Reportable | NoAuthNoPriv, - SecurityModel: UserSecurityModel, - SecurityParameters: &UsmSecurityParameters{Logger: sp.Logger}, - PDUType: GetRequest, - Logger: sp.Logger, - Variables: emptyPdus, - } - - return blankPacket - } - return nil -} - -func (sp *UsmSecurityParameters) calcPacketDigest(packet []byte) []byte { - var mac hash.Hash - - switch sp.AuthenticationProtocol { - default: - mac = hmac.New(crypto.MD5.New, sp.SecretKey) - case SHA: - mac = hmac.New(crypto.SHA1.New, sp.SecretKey) - case SHA224: - mac = hmac.New(crypto.SHA224.New, sp.SecretKey) - case SHA256: - mac = hmac.New(crypto.SHA256.New, sp.SecretKey) - case SHA384: - mac = hmac.New(crypto.SHA384.New, sp.SecretKey) - case SHA512: - mac = hmac.New(crypto.SHA512.New, sp.SecretKey) - } - - _, _ = mac.Write(packet) - msgDigest := mac.Sum(nil) - return msgDigest -} - -func (sp *UsmSecurityParameters) authenticate(packet []byte) error { - msgDigest := sp.calcPacketDigest(packet) - idx := bytes.Index(packet, macVarbinds[sp.AuthenticationProtocol]) - - if idx < 0 { - return fmt.Errorf("unable to locate the position in packet to write authentication key") - } - - copy(packet[idx+2:idx+len(macVarbinds[sp.AuthenticationProtocol])], msgDigest) - return nil -} - -// determine whether a message is authentic -func (sp *UsmSecurityParameters) isAuthentic(packetBytes []byte, packet *SnmpPacket) (bool, error) { - var packetSecParams *UsmSecurityParameters - var err error - - if packetSecParams, err = castUsmSecParams(packet.SecurityParameters); err != nil { - return false, err - } - // TODO: investigate call chain to determine if this is really the best spot for this - msgDigest := sp.calcPacketDigest(packetBytes) - - for k, v := range []byte(packetSecParams.AuthenticationParameters) { - if msgDigest[k] != v { - return false, nil - } - } - return true, nil -} - -func (sp *UsmSecurityParameters) encryptPacket(scopedPdu []byte) ([]byte, error) { - var b []byte - - switch sp.PrivacyProtocol { - case AES, AES192, AES256, AES192C, AES256C: - var iv [16]byte - binary.BigEndian.PutUint32(iv[:], sp.AuthoritativeEngineBoots) - binary.BigEndian.PutUint32(iv[4:], sp.AuthoritativeEngineTime) - copy(iv[8:], sp.PrivacyParameters) - // aes.NewCipher(sp.PrivacyKey[:16]) changed to aes.NewCipher(sp.PrivacyKey) - block, err := aes.NewCipher(sp.PrivacyKey) - if err != nil { - return nil, err - } - stream := cipher.NewCFBEncrypter(block, iv[:]) - ciphertext := make([]byte, len(scopedPdu)) - stream.XORKeyStream(ciphertext, scopedPdu) - pduLen, err := marshalLength(len(ciphertext)) - if err != nil { - return nil, err - } - b = append([]byte{byte(OctetString)}, pduLen...) - scopedPdu = append(b, ciphertext...) //nolint:gocritic - default: - preiv := sp.PrivacyKey[8:] - var iv [8]byte - for i := 0; i < len(iv); i++ { - iv[i] = preiv[i] ^ sp.PrivacyParameters[i] - } - block, err := des.NewCipher(sp.PrivacyKey[:8]) //nolint:gosec - if err != nil { - return nil, err - } - mode := cipher.NewCBCEncrypter(block, iv[:]) - - pad := make([]byte, des.BlockSize-len(scopedPdu)%des.BlockSize) - scopedPdu = append(scopedPdu, pad...) - - ciphertext := make([]byte, len(scopedPdu)) - mode.CryptBlocks(ciphertext, scopedPdu) - pduLen, err := marshalLength(len(ciphertext)) - if err != nil { - return nil, err - } - b = append([]byte{byte(OctetString)}, pduLen...) - scopedPdu = append(b, ciphertext...) //nolint:gocritic - } - - return scopedPdu, nil -} - -func (sp *UsmSecurityParameters) decryptPacket(packet []byte, cursor int) ([]byte, error) { - _, cursorTmp := parseLength(packet[cursor:]) - cursorTmp += cursor - if cursorTmp > len(packet) { - return nil, fmt.Errorf("error decrypting ScopedPDU: truncated packet") - } - - switch sp.PrivacyProtocol { - case AES, AES192, AES256, AES192C, AES256C: - var iv [16]byte - binary.BigEndian.PutUint32(iv[:], sp.AuthoritativeEngineBoots) - binary.BigEndian.PutUint32(iv[4:], sp.AuthoritativeEngineTime) - copy(iv[8:], sp.PrivacyParameters) - - block, err := aes.NewCipher(sp.PrivacyKey) - if err != nil { - return nil, err - } - stream := cipher.NewCFBDecrypter(block, iv[:]) - plaintext := make([]byte, len(packet[cursorTmp:])) - stream.XORKeyStream(plaintext, packet[cursorTmp:]) - copy(packet[cursor:], plaintext) - packet = packet[:cursor+len(plaintext)] - default: - if len(packet[cursorTmp:])%des.BlockSize != 0 { - return nil, fmt.Errorf("error decrypting ScopedPDU: not multiple of des block size") - } - preiv := sp.PrivacyKey[8:] - var iv [8]byte - for i := 0; i < len(iv); i++ { - iv[i] = preiv[i] ^ sp.PrivacyParameters[i] - } - block, err := des.NewCipher(sp.PrivacyKey[:8]) //nolint:gosec - if err != nil { - return nil, err - } - mode := cipher.NewCBCDecrypter(block, iv[:]) - - plaintext := make([]byte, len(packet[cursorTmp:])) - mode.CryptBlocks(plaintext, packet[cursorTmp:]) - copy(packet[cursor:], plaintext) - // truncate packet to remove extra space caused by the - // octetstring/length header that was just replaced - packet = packet[:cursor+len(plaintext)] - } - return packet, nil -} - -// marshal a snmp version 3 security parameters field for the User Security Model -func (sp *UsmSecurityParameters) marshal(flags SnmpV3MsgFlags) ([]byte, error) { - var buf bytes.Buffer - var err error - - // msgAuthoritativeEngineID - buf.Write([]byte{byte(OctetString), byte(len(sp.AuthoritativeEngineID))}) - buf.WriteString(sp.AuthoritativeEngineID) - - // msgAuthoritativeEngineBoots - msgAuthoritativeEngineBoots := marshalUvarInt(sp.AuthoritativeEngineBoots) - buf.Write([]byte{byte(Integer), byte(len(msgAuthoritativeEngineBoots))}) - buf.Write(msgAuthoritativeEngineBoots) - - // msgAuthoritativeEngineTime - msgAuthoritativeEngineTime := marshalUvarInt(sp.AuthoritativeEngineTime) - buf.Write([]byte{byte(Integer), byte(len(msgAuthoritativeEngineTime))}) - buf.Write(msgAuthoritativeEngineTime) - - // msgUserName - buf.Write([]byte{byte(OctetString), byte(len(sp.UserName))}) - buf.WriteString(sp.UserName) - - // msgAuthenticationParameters - if flags&AuthNoPriv > 0 { - buf.Write(macVarbinds[sp.AuthenticationProtocol]) - } else { - buf.Write([]byte{byte(OctetString), 0}) - } - // msgPrivacyParameters - if flags&AuthPriv > AuthNoPriv { - privlen, err2 := marshalLength(len(sp.PrivacyParameters)) - if err2 != nil { - return nil, err2 - } - buf.Write([]byte{byte(OctetString)}) - buf.Write(privlen) - buf.Write(sp.PrivacyParameters) - } else { - buf.Write([]byte{byte(OctetString), 0}) - } - - // wrap security parameters in a sequence - paramLen, err := marshalLength(buf.Len()) - if err != nil { - return nil, err - } - tmpseq := append([]byte{byte(Sequence)}, paramLen...) - tmpseq = append(tmpseq, buf.Bytes()...) - - return tmpseq, nil -} - -func (sp *UsmSecurityParameters) unmarshal(flags SnmpV3MsgFlags, packet []byte, cursor int) (int, error) { - var err error - - if PDUType(packet[cursor]) != Sequence { - return 0, fmt.Errorf("error parsing SNMPV3 User Security Model parameters") - } - _, cursorTmp := parseLength(packet[cursor:]) - cursor += cursorTmp - if cursorTmp > len(packet) { - return 0, fmt.Errorf("error parsing SNMPV3 User Security Model parameters: truncated packet") - } - - rawMsgAuthoritativeEngineID, count, err := parseRawField(sp.Logger, packet[cursor:], "msgAuthoritativeEngineID") - if err != nil { - return 0, fmt.Errorf("error parsing SNMPV3 User Security Model msgAuthoritativeEngineID: %s", err.Error()) - } - cursor += count - if AuthoritativeEngineID, ok := rawMsgAuthoritativeEngineID.(string); ok { - if sp.AuthoritativeEngineID != AuthoritativeEngineID { - sp.AuthoritativeEngineID = AuthoritativeEngineID - sp.SecretKey = nil - sp.PrivacyKey = nil - - sp.Logger.Printf("Parsed authoritativeEngineID %0x", []byte(AuthoritativeEngineID)) - err = sp.initSecurityKeysNoLock() - if err != nil { - return 0, err - } - } - } - - rawMsgAuthoritativeEngineBoots, count, err := parseRawField(sp.Logger, packet[cursor:], "msgAuthoritativeEngineBoots") - if err != nil { - return 0, fmt.Errorf("error parsing SNMPV3 User Security Model msgAuthoritativeEngineBoots: %s", err.Error()) - } - cursor += count - if AuthoritativeEngineBoots, ok := rawMsgAuthoritativeEngineBoots.(int); ok { - sp.AuthoritativeEngineBoots = uint32(AuthoritativeEngineBoots) - sp.Logger.Printf("Parsed authoritativeEngineBoots %d", AuthoritativeEngineBoots) - } - - rawMsgAuthoritativeEngineTime, count, err := parseRawField(sp.Logger, packet[cursor:], "msgAuthoritativeEngineTime") - if err != nil { - return 0, fmt.Errorf("error parsing SNMPV3 User Security Model msgAuthoritativeEngineTime: %s", err.Error()) - } - cursor += count - if AuthoritativeEngineTime, ok := rawMsgAuthoritativeEngineTime.(int); ok { - sp.AuthoritativeEngineTime = uint32(AuthoritativeEngineTime) - sp.Logger.Printf("Parsed authoritativeEngineTime %d", AuthoritativeEngineTime) - } - - rawMsgUserName, count, err := parseRawField(sp.Logger, packet[cursor:], "msgUserName") - if err != nil { - return 0, fmt.Errorf("error parsing SNMPV3 User Security Model msgUserName: %s", err.Error()) - } - cursor += count - if msgUserName, ok := rawMsgUserName.(string); ok { - sp.UserName = msgUserName - sp.Logger.Printf("Parsed userName %s", msgUserName) - } - - rawMsgAuthParameters, count, err := parseRawField(sp.Logger, packet[cursor:], "msgAuthenticationParameters") - if err != nil { - return 0, fmt.Errorf("error parsing SNMPV3 User Security Model msgAuthenticationParameters: %s", err.Error()) - } - if msgAuthenticationParameters, ok := rawMsgAuthParameters.(string); ok { - sp.AuthenticationParameters = msgAuthenticationParameters - sp.Logger.Printf("Parsed authenticationParameters %s", msgAuthenticationParameters) - } - // blank msgAuthenticationParameters to prepare for authentication check later - if flags&AuthNoPriv > 0 { - copy(packet[cursor+2:cursor+len(macVarbinds[sp.AuthenticationProtocol])], macVarbinds[sp.AuthenticationProtocol][2:]) - } - cursor += count - - rawMsgPrivacyParameters, count, err := parseRawField(sp.Logger, packet[cursor:], "msgPrivacyParameters") - if err != nil { - return 0, fmt.Errorf("error parsing SNMPV3 User Security Model msgPrivacyParameters: %s", err.Error()) - } - cursor += count - if msgPrivacyParameters, ok := rawMsgPrivacyParameters.(string); ok { - sp.PrivacyParameters = []byte(msgPrivacyParameters) - sp.Logger.Printf("Parsed privacyParameters %s", msgPrivacyParameters) - } - - return cursor, nil -} diff --git a/vendor/github.com/gosnmp/gosnmp/walk.go b/vendor/github.com/gosnmp/gosnmp/walk.go deleted file mode 100644 index fcab53c9..00000000 --- a/vendor/github.com/gosnmp/gosnmp/walk.go +++ /dev/null @@ -1,115 +0,0 @@ -// Copyright 2012 The GoSNMP Authors. All rights reserved. Use of this -// source code is governed by a BSD-style license that can be found in the -// LICENSE file. - -package gosnmp - -import ( - "fmt" - "strings" -) - -func (x *GoSNMP) walk(getRequestType PDUType, rootOid string, walkFn WalkFunc) error { - if rootOid == "" || rootOid == "." { - rootOid = baseOid - } - - if !strings.HasPrefix(rootOid, ".") { - rootOid = string(".") + rootOid - } - - oid := rootOid - requests := 0 - maxReps := x.MaxRepetitions - if maxReps == 0 { - maxReps = defaultMaxRepetitions - } - - // AppOpt 'c: do not check returned OIDs are increasing' - checkIncreasing := true - if x.AppOpts != nil { - if _, ok := x.AppOpts["c"]; ok { - if getRequestType == GetBulkRequest || getRequestType == GetNextRequest { - checkIncreasing = false - } - } - } - -RequestLoop: - for { - requests++ - - var response *SnmpPacket - var err error - - switch getRequestType { - case GetBulkRequest: - response, err = x.GetBulk([]string{oid}, uint8(x.NonRepeaters), maxReps) - case GetNextRequest: - response, err = x.GetNext([]string{oid}) - case GetRequest: - response, err = x.Get([]string{oid}) - default: - response, err = nil, fmt.Errorf("unsupported request type: %d", getRequestType) - } - - if err != nil { - return err - } - if len(response.Variables) == 0 { - break RequestLoop - } - - if response.Error == NoSuchName { - x.Logger.Print("Walk terminated with NoSuchName") - break RequestLoop - } - - for i, pdu := range response.Variables { - if pdu.Type == EndOfMibView || pdu.Type == NoSuchObject || pdu.Type == NoSuchInstance { - x.Logger.Printf("BulkWalk terminated with type 0x%x", pdu.Type) - break RequestLoop - } - if !strings.HasPrefix(pdu.Name, rootOid+".") { - // Not in the requested root range. - // if this is the first request, and the first variable in that request - // and this condition is triggered - the first result is out of range - // need to perform a regular get request - // this request has been too narrowly defined to be found with a getNext - // Issue #78 #93 - if requests == 1 && i == 0 { - getRequestType = GetRequest - continue RequestLoop - } else if pdu.Name == rootOid && pdu.Type != NoSuchInstance { - // Call walk function if the pdu instance is found - // considering that the rootOid is a leafOid - if err := walkFn(pdu); err != nil { - return err - } - } - break RequestLoop - } - - if checkIncreasing && pdu.Name == oid { - return fmt.Errorf("OID not increasing: %s", pdu.Name) - } - - // Report our pdu - if err := walkFn(pdu); err != nil { - return err - } - } - // Save last oid for next request - oid = response.Variables[len(response.Variables)-1].Name - } - x.Logger.Printf("BulkWalk completed in %d requests", requests) - return nil -} - -func (x *GoSNMP) walkAll(getRequestType PDUType, rootOid string) (results []SnmpPDU, err error) { - err = x.walk(getRequestType, rootOid, func(dataUnit SnmpPDU) error { - results = append(results, dataUnit) - return nil - }) - return results, err -} diff --git a/vendor/github.com/influxdata/telegraf/internal/snmp/config.go b/vendor/github.com/influxdata/telegraf/internal/snmp/config.go deleted file mode 100644 index e616e757..00000000 --- a/vendor/github.com/influxdata/telegraf/internal/snmp/config.go +++ /dev/null @@ -1,34 +0,0 @@ -package snmp - -import ( - "github.com/influxdata/telegraf/internal" -) - -type ClientConfig struct { - // Timeout to wait for a response. - Timeout internal.Duration `toml:"timeout"` - Retries int `toml:"retries"` - // Values: 1, 2, 3 - Version uint8 `toml:"version"` - - // Parameters for Version 1 & 2 - Community string `toml:"community"` - - // Parameters for Version 2 & 3 - MaxRepetitions uint8 `toml:"max_repetitions"` - - // Parameters for Version 3 - ContextName string `toml:"context_name"` - // Values: "noAuthNoPriv", "authNoPriv", "authPriv" - SecLevel string `toml:"sec_level"` - SecName string `toml:"sec_name"` - // Values: "MD5", "SHA", "". Default: "" - AuthProtocol string `toml:"auth_protocol"` - AuthPassword string `toml:"auth_password"` - // Values: "DES", "AES", "". Default: "" - PrivProtocol string `toml:"priv_protocol"` - PrivPassword string `toml:"priv_password"` - EngineID string `toml:"-"` - EngineBoots uint32 `toml:"-"` - EngineTime uint32 `toml:"-"` -} diff --git a/vendor/github.com/influxdata/telegraf/internal/snmp/wrapper.go b/vendor/github.com/influxdata/telegraf/internal/snmp/wrapper.go deleted file mode 100644 index 06552850..00000000 --- a/vendor/github.com/influxdata/telegraf/internal/snmp/wrapper.go +++ /dev/null @@ -1,188 +0,0 @@ -package snmp - -import ( - "fmt" - "net/url" - "strconv" - "strings" - - "github.com/gosnmp/gosnmp" -) - -// GosnmpWrapper wraps a *gosnmp.GoSNMP object so we can use it as a snmpConnection. -type GosnmpWrapper struct { - *gosnmp.GoSNMP -} - -// Host returns the value of GoSNMP.Target. -func (gsw GosnmpWrapper) Host() string { - return gsw.Target -} - -// Walk wraps GoSNMP.Walk() or GoSNMP.BulkWalk(), depending on whether the -// connection is using SNMPv1 or newer. -// Also, if any error is encountered, it will just once reconnect and try again. -func (gsw GosnmpWrapper) Walk(oid string, fn gosnmp.WalkFunc) error { - var err error - // On error, retry once. - // Unfortunately we can't distinguish between an error returned by gosnmp, and one returned by the walk function. - for i := 0; i < 2; i++ { - if gsw.Version == gosnmp.Version1 { - err = gsw.GoSNMP.Walk(oid, fn) - } else { - err = gsw.GoSNMP.BulkWalk(oid, fn) - } - if err == nil { - return nil - } - if err := gsw.GoSNMP.Connect(); err != nil { - return fmt.Errorf("reconnecting: %w", err) - } - } - return err -} - -// Get wraps GoSNMP.GET(). -// If any error is encountered, it will just once reconnect and try again. -func (gsw GosnmpWrapper) Get(oids []string) (*gosnmp.SnmpPacket, error) { - var err error - var pkt *gosnmp.SnmpPacket - for i := 0; i < 2; i++ { - pkt, err = gsw.GoSNMP.Get(oids) - if err == nil { - return pkt, nil - } - if err := gsw.GoSNMP.Connect(); err != nil { - return nil, fmt.Errorf("reconnecting: %w", err) - } - } - return nil, err -} - -func NewWrapper(s ClientConfig) (GosnmpWrapper, error) { - gs := GosnmpWrapper{&gosnmp.GoSNMP{}} - - gs.Timeout = s.Timeout.Duration - - gs.Retries = s.Retries - - switch s.Version { - case 3: - gs.Version = gosnmp.Version3 - case 2, 0: - gs.Version = gosnmp.Version2c - case 1: - gs.Version = gosnmp.Version1 - default: - return GosnmpWrapper{}, fmt.Errorf("invalid version") - } - - if s.Version < 3 { - if s.Community == "" { - gs.Community = "public" - } else { - gs.Community = s.Community - } - } - - gs.MaxRepetitions = s.MaxRepetitions - - if s.Version == 3 { - gs.ContextName = s.ContextName - - sp := &gosnmp.UsmSecurityParameters{} - gs.SecurityParameters = sp - gs.SecurityModel = gosnmp.UserSecurityModel - - switch strings.ToLower(s.SecLevel) { - case "noauthnopriv", "": - gs.MsgFlags = gosnmp.NoAuthNoPriv - case "authnopriv": - gs.MsgFlags = gosnmp.AuthNoPriv - case "authpriv": - gs.MsgFlags = gosnmp.AuthPriv - default: - return GosnmpWrapper{}, fmt.Errorf("invalid secLevel") - } - - sp.UserName = s.SecName - - switch strings.ToLower(s.AuthProtocol) { - case "md5": - sp.AuthenticationProtocol = gosnmp.MD5 - case "sha": - sp.AuthenticationProtocol = gosnmp.SHA - case "": - sp.AuthenticationProtocol = gosnmp.NoAuth - default: - return GosnmpWrapper{}, fmt.Errorf("invalid authProtocol") - } - - sp.AuthenticationPassphrase = s.AuthPassword - - switch strings.ToLower(s.PrivProtocol) { - case "des": - sp.PrivacyProtocol = gosnmp.DES - case "aes": - sp.PrivacyProtocol = gosnmp.AES - case "aes192": - sp.PrivacyProtocol = gosnmp.AES192 - case "aes192c": - sp.PrivacyProtocol = gosnmp.AES192C - case "aes256": - sp.PrivacyProtocol = gosnmp.AES256 - case "aes256c": - sp.PrivacyProtocol = gosnmp.AES256C - case "": - sp.PrivacyProtocol = gosnmp.NoPriv - default: - return GosnmpWrapper{}, fmt.Errorf("invalid privProtocol") - } - - sp.PrivacyPassphrase = s.PrivPassword - - sp.AuthoritativeEngineID = s.EngineID - - sp.AuthoritativeEngineBoots = s.EngineBoots - - sp.AuthoritativeEngineTime = s.EngineTime - } - return gs, nil -} - -// SetAgent takes a url (scheme://host:port) and sets the wrapped -// GoSNMP struct's corresponding fields. This shouldn't be called -// after using the wrapped GoSNMP struct, for example after -// connecting. -func (gs *GosnmpWrapper) SetAgent(agent string) error { - if !strings.Contains(agent, "://") { - agent = "udp://" + agent - } - - u, err := url.Parse(agent) - if err != nil { - return err - } - - switch u.Scheme { - case "tcp": - gs.Transport = "tcp" - case "", "udp": - gs.Transport = "udp" - default: - return fmt.Errorf("unsupported scheme: %v", u.Scheme) - } - - gs.Target = u.Hostname() - - portStr := u.Port() - if portStr == "" { - portStr = "161" - } - port, err := strconv.ParseUint(portStr, 10, 16) - if err != nil { - return fmt.Errorf("parsing port: %w", err) - } - gs.Port = uint16(port) - return nil -} diff --git a/vendor/github.com/influxdata/telegraf/plugins/inputs/dns_query/README.md b/vendor/github.com/influxdata/telegraf/plugins/inputs/dns_query/README.md new file mode 100644 index 00000000..dc8ddd90 --- /dev/null +++ b/vendor/github.com/influxdata/telegraf/plugins/inputs/dns_query/README.md @@ -0,0 +1,73 @@ +# DNS Query Input Plugin + +The DNS plugin gathers dns query times in miliseconds - like [Dig](https://en.wikipedia.org/wiki/Dig_\(command\)) + +### Configuration: +```toml +# Query given DNS server and gives statistics +[[inputs.dns_query]] + ## servers to query + servers = ["8.8.8.8"] + + ## Network is the network protocol name. + # network = "udp" + + ## Domains or subdomains to query. + # domains = ["."] + + ## Query record type. + ## Possible values: A, AAAA, CNAME, MX, NS, PTR, TXT, SOA, SPF, SRV. + # record_type = "A" + + ## Dns server port. + # port = 53 + + ## Query timeout in seconds. + # timeout = 2 +``` + +### Metrics: + +- dns_query + - tags: + - server + - domain + - record_type + - result + - rcode + - fields: + - query_time_ms (float) + - result_code (int, success = 0, timeout = 1, error = 2) + - rcode_value (int) + + +### Rcode Descriptions +|rcode_value|rcode|Description| +|---|-----------|-----------------------------------| +|0 | NoError | No Error | +|1 | FormErr | Format Error | +|2 | ServFail | Server Failure | +|3 | NXDomain | Non-Existent Domain | +|4 | NotImp | Not Implemented | +|5 | Refused | Query Refused | +|6 | YXDomain | Name Exists when it should not | +|7 | YXRRSet | RR Set Exists when it should not | +|8 | NXRRSet | RR Set that should exist does not | +|9 | NotAuth | Server Not Authoritative for zone | +|10 | NotZone | Name not contained in zone | +|16 | BADSIG | TSIG Signature Failure | +|16 | BADVERS | Bad OPT Version | +|17 | BADKEY | Key not recognized | +|18 | BADTIME | Signature out of time window | +|19 | BADMODE | Bad TKEY Mode | +|20 | BADNAME | Duplicate key name | +|21 | BADALG | Algorithm not supported | +|22 | BADTRUNC | Bad Truncation | +|23 | BADCOOKIE | Bad/missing Server Cookie | + + +### Example Output: + +``` +dns_query,domain=google.com,rcode=NOERROR,record_type=A,result=success,server=127.0.0.1 rcode_value=0i,result_code=0i,query_time_ms=0.13746 1550020750001000000 +``` diff --git a/vendor/github.com/influxdata/telegraf/plugins/inputs/dns_query/dns_query.go b/vendor/github.com/influxdata/telegraf/plugins/inputs/dns_query/dns_query.go new file mode 100644 index 00000000..c5657277 --- /dev/null +++ b/vendor/github.com/influxdata/telegraf/plugins/inputs/dns_query/dns_query.go @@ -0,0 +1,215 @@ +package dns_query + +import ( + "fmt" + "net" + "strconv" + "sync" + "time" + + "github.com/miekg/dns" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/inputs" +) + +type ResultType uint64 + +const ( + Success ResultType = 0 + Timeout = 1 + Error = 2 +) + +type DnsQuery struct { + // Domains or subdomains to query + Domains []string + + // Network protocol name + Network string + + // Server to query + Servers []string + + // Record type + RecordType string `toml:"record_type"` + + // DNS server port number + Port int + + // Dns query timeout in seconds. 0 means no timeout + Timeout int +} + +var sampleConfig = ` + ## servers to query + servers = ["8.8.8.8"] + + ## Network is the network protocol name. + # network = "udp" + + ## Domains or subdomains to query. + # domains = ["."] + + ## Query record type. + ## Possible values: A, AAAA, CNAME, MX, NS, PTR, TXT, SOA, SPF, SRV. + # record_type = "A" + + ## Dns server port. + # port = 53 + + ## Query timeout in seconds. + # timeout = 2 +` + +func (d *DnsQuery) SampleConfig() string { + return sampleConfig +} + +func (d *DnsQuery) Description() string { + return "Query given DNS server and gives statistics" +} +func (d *DnsQuery) Gather(acc telegraf.Accumulator) error { + var wg sync.WaitGroup + d.setDefaultValues() + + for _, domain := range d.Domains { + for _, server := range d.Servers { + wg.Add(1) + go func(domain, server string) { + fields := make(map[string]interface{}, 2) + tags := map[string]string{ + "server": server, + "domain": domain, + "record_type": d.RecordType, + } + + dnsQueryTime, rcode, err := d.getDnsQueryTime(domain, server) + if rcode >= 0 { + tags["rcode"] = dns.RcodeToString[rcode] + fields["rcode_value"] = rcode + } + if err == nil { + setResult(Success, fields, tags) + fields["query_time_ms"] = dnsQueryTime + } else if opErr, ok := err.(*net.OpError); ok && opErr.Timeout() { + setResult(Timeout, fields, tags) + } else if err != nil { + setResult(Error, fields, tags) + acc.AddError(err) + } + + acc.AddFields("dns_query", fields, tags) + + wg.Done() + }(domain, server) + } + } + + wg.Wait() + return nil +} + +func (d *DnsQuery) setDefaultValues() { + if d.Network == "" { + d.Network = "udp" + } + + if len(d.RecordType) == 0 { + d.RecordType = "NS" + } + + if len(d.Domains) == 0 { + d.Domains = []string{"."} + d.RecordType = "NS" + } + + if d.Port == 0 { + d.Port = 53 + } + + if d.Timeout == 0 { + d.Timeout = 2 + } +} + +func (d *DnsQuery) getDnsQueryTime(domain string, server string) (float64, int, error) { + dnsQueryTime := float64(0) + + c := new(dns.Client) + c.ReadTimeout = time.Duration(d.Timeout) * time.Second + c.Net = d.Network + + m := new(dns.Msg) + recordType, err := d.parseRecordType() + if err != nil { + return dnsQueryTime, -1, err + } + m.SetQuestion(dns.Fqdn(domain), recordType) + m.RecursionDesired = true + + r, rtt, err := c.Exchange(m, net.JoinHostPort(server, strconv.Itoa(d.Port))) + if err != nil { + return dnsQueryTime, -1, err + } + if r.Rcode != dns.RcodeSuccess { + return dnsQueryTime, r.Rcode, fmt.Errorf("Invalid answer (%s) from %s after %s query for %s", dns.RcodeToString[r.Rcode], server, d.RecordType, domain) + } + dnsQueryTime = float64(rtt.Nanoseconds()) / 1e6 + return dnsQueryTime, r.Rcode, nil +} + +func (d *DnsQuery) parseRecordType() (uint16, error) { + var recordType uint16 + var err error + + switch d.RecordType { + case "A": + recordType = dns.TypeA + case "AAAA": + recordType = dns.TypeAAAA + case "ANY": + recordType = dns.TypeANY + case "CNAME": + recordType = dns.TypeCNAME + case "MX": + recordType = dns.TypeMX + case "NS": + recordType = dns.TypeNS + case "PTR": + recordType = dns.TypePTR + case "SOA": + recordType = dns.TypeSOA + case "SPF": + recordType = dns.TypeSPF + case "SRV": + recordType = dns.TypeSRV + case "TXT": + recordType = dns.TypeTXT + default: + err = fmt.Errorf("Record type %s not recognized", d.RecordType) + } + + return recordType, err +} + +func setResult(result ResultType, fields map[string]interface{}, tags map[string]string) { + var tag string + switch result { + case Success: + tag = "success" + case Timeout: + tag = "timeout" + case Error: + tag = "error" + } + + tags["result"] = tag + fields["result_code"] = uint64(result) +} + +func init() { + inputs.Add("dns_query", func() telegraf.Input { + return &DnsQuery{} + }) +} diff --git a/vendor/github.com/influxdata/telegraf/plugins/inputs/http_response/README.md b/vendor/github.com/influxdata/telegraf/plugins/inputs/http_response/README.md new file mode 100644 index 00000000..81b512e8 --- /dev/null +++ b/vendor/github.com/influxdata/telegraf/plugins/inputs/http_response/README.md @@ -0,0 +1,120 @@ +# HTTP Response Input Plugin + +This input plugin checks HTTP/HTTPS connections. + +### Configuration: + +```toml +# HTTP/HTTPS request given an address a method and a timeout +[[inputs.http_response]] + ## address is Deprecated in 1.12, use 'urls' + + ## List of urls to query. + # urls = ["http://localhost"] + + ## Set http_proxy (telegraf uses the system wide proxy settings if it's is not set) + # http_proxy = "http://localhost:8888" + + ## Set response_timeout (default 5 seconds) + # response_timeout = "5s" + + ## HTTP Request Method + # method = "GET" + + ## Whether to follow redirects from the server (defaults to false) + # follow_redirects = false + + ## Optional file with Bearer token + ## file content is added as an Authorization header + # bearer_token = "/path/to/file" + + ## Optional HTTP Basic Auth Credentials + # username = "username" + # password = "pa$$word" + + ## Optional HTTP Request Body + # body = ''' + # {'fake':'data'} + # ''' + + ## Optional name of the field that will contain the body of the response. + ## By default it is set to an empty String indicating that the body's content won't be added + # response_body_field = '' + + ## Maximum allowed HTTP response body size in bytes. + ## 0 means to use the default of 32MiB. + ## If the response body size exceeds this limit a "body_read_error" will be raised + # response_body_max_size = "32MiB" + + ## Optional substring or regex match in body of the response (case sensitive) + # response_string_match = "\"service_status\": \"up\"" + # response_string_match = "ok" + # response_string_match = "\".*_status\".?:.?\"up\"" + + ## Expected response status code. + ## The status code of the response is compared to this value. If they match, the field + ## "response_status_code_match" will be 1, otherwise it will be 0. If the + ## expected status code is 0, the check is disabled and the field won't be added. + # response_status_code = 0 + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false + ## Use the given name as the SNI server name on each URL + # tls_server_name = "" + + ## HTTP Request Headers (all values must be strings) + # [inputs.http_response.headers] + # Host = "github.com" + + ## Optional setting to map response http headers into tags + ## If the http header is not present on the request, no corresponding tag will be added + ## If multiple instances of the http header are present, only the first value will be used + # http_header_tags = {"HTTP_HEADER" = "TAG_NAME"} + + ## Interface to use when dialing an address + # interface = "eth0" +``` + +### Metrics: + +- http_response + - tags: + - server (target URL) + - method (request method) + - status_code (response status code) + - result ([see below](#result--result_code)) + - fields: + - response_time (float, seconds) + - content_length (int, response body length) + - response_string_match (int, 0 = mismatch / body read error, 1 = match) + - response_status_code_match (int, 0 = mismatch, 1 = match) + - http_response_code (int, response status code) + - result_type (string, deprecated in 1.6: use `result` tag and `result_code` field) + - result_code (int, [see below](#result--result_code)) + +#### `result` / `result_code` + +Upon finishing polling the target server, the plugin registers the result of the operation in the `result` tag, and adds a numeric field called `result_code` corresponding with that tag value. + +This tag is used to expose network and plugin errors. HTTP errors are considered a successful connection. + +|Tag value |Corresponding field value|Description| +-------------------------------|-------------------------|-----------| +|success | 0 |The HTTP request completed, even if the HTTP code represents an error| +|response_string_mismatch | 1 |The option `response_string_match` was used, and the body of the response didn't match the regex. HTTP errors with content in their body (like 4xx, 5xx) will trigger this error| +|body_read_error | 2 |The option `response_string_match` was used, but the plugin wasn't able to read the body of the response. Responses with empty bodies (like 3xx, HEAD, etc) will trigger this error. Or the option `response_body_field` was used and the content of the response body was not a valid utf-8. Or the size of the body of the response exceeded the `response_body_max_size` | +|connection_failed | 3 |Catch all for any network error not specifically handled by the plugin| +|timeout | 4 |The plugin timed out while awaiting the HTTP connection to complete| +|dns_error | 5 |There was a DNS error while attempting to connect to the host| +|response_status_code_mismatch | 6 |The option `response_status_code_match` was used, and the status code of the response didn't match the value.| + + +### Example Output: + +``` +http_response,method=GET,result=success,server=http://github.com,status_code=200 content_length=87878i,http_response_code=200i,response_time=0.937655534,result_code=0i,result_type="success" 1565839598000000000 +``` diff --git a/vendor/github.com/influxdata/telegraf/plugins/inputs/http_response/http_response.go b/vendor/github.com/influxdata/telegraf/plugins/inputs/http_response/http_response.go new file mode 100644 index 00000000..01ce8140 --- /dev/null +++ b/vendor/github.com/influxdata/telegraf/plugins/inputs/http_response/http_response.go @@ -0,0 +1,479 @@ +package http_response + +import ( + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + "net/url" + "regexp" + "strconv" + "strings" + "time" + "unicode/utf8" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/plugins/common/tls" + "github.com/influxdata/telegraf/plugins/inputs" +) + +const ( + // defaultResponseBodyMaxSize is the default maximum response body size, in bytes. + // if the response body is over this size, we will raise a body_read_error. + defaultResponseBodyMaxSize = 32 * 1024 * 1024 +) + +// HTTPResponse struct +type HTTPResponse struct { + Address string // deprecated in 1.12 + URLs []string `toml:"urls"` + HTTPProxy string `toml:"http_proxy"` + Body string + Method string + ResponseTimeout internal.Duration + HTTPHeaderTags map[string]string `toml:"http_header_tags"` + Headers map[string]string + FollowRedirects bool + // Absolute path to file with Bearer token + BearerToken string `toml:"bearer_token"` + ResponseBodyField string `toml:"response_body_field"` + ResponseBodyMaxSize internal.Size `toml:"response_body_max_size"` + ResponseStringMatch string + ResponseStatusCode int + Interface string + // HTTP Basic Auth Credentials + Username string `toml:"username"` + Password string `toml:"password"` + tls.ClientConfig + + Log telegraf.Logger + + compiledStringMatch *regexp.Regexp + client httpClient +} + +type httpClient interface { + Do(req *http.Request) (*http.Response, error) +} + +// Description returns the plugin Description +func (h *HTTPResponse) Description() string { + return "HTTP/HTTPS request given an address a method and a timeout" +} + +var sampleConfig = ` + ## Deprecated in 1.12, use 'urls' + ## Server address (default http://localhost) + # address = "http://localhost" + + ## List of urls to query. + # urls = ["http://localhost"] + + ## Set http_proxy (telegraf uses the system wide proxy settings if it's is not set) + # http_proxy = "http://localhost:8888" + + ## Set response_timeout (default 5 seconds) + # response_timeout = "5s" + + ## HTTP Request Method + # method = "GET" + + ## Whether to follow redirects from the server (defaults to false) + # follow_redirects = false + + ## Optional file with Bearer token + ## file content is added as an Authorization header + # bearer_token = "/path/to/file" + + ## Optional HTTP Basic Auth Credentials + # username = "username" + # password = "pa$$word" + + ## Optional HTTP Request Body + # body = ''' + # {'fake':'data'} + # ''' + + ## Optional name of the field that will contain the body of the response. + ## By default it is set to an empty String indicating that the body's content won't be added + # response_body_field = '' + + ## Maximum allowed HTTP response body size in bytes. + ## 0 means to use the default of 32MiB. + ## If the response body size exceeds this limit a "body_read_error" will be raised + # response_body_max_size = "32MiB" + + ## Optional substring or regex match in body of the response (case sensitive) + # response_string_match = "\"service_status\": \"up\"" + # response_string_match = "ok" + # response_string_match = "\".*_status\".?:.?\"up\"" + + ## Expected response status code. + ## The status code of the response is compared to this value. If they match, the field + ## "response_status_code_match" will be 1, otherwise it will be 0. If the + ## expected status code is 0, the check is disabled and the field won't be added. + # response_status_code = 0 + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false + + ## HTTP Request Headers (all values must be strings) + # [inputs.http_response.headers] + # Host = "github.com" + + ## Optional setting to map response http headers into tags + ## If the http header is not present on the request, no corresponding tag will be added + ## If multiple instances of the http header are present, only the first value will be used + # http_header_tags = {"HTTP_HEADER" = "TAG_NAME"} + + ## Interface to use when dialing an address + # interface = "eth0" +` + +// SampleConfig returns the plugin SampleConfig +func (h *HTTPResponse) SampleConfig() string { + return sampleConfig +} + +// ErrRedirectAttempted indicates that a redirect occurred +var ErrRedirectAttempted = errors.New("redirect") + +// Set the proxy. A configured proxy overwrites the system wide proxy. +func getProxyFunc(http_proxy string) func(*http.Request) (*url.URL, error) { + if http_proxy == "" { + return http.ProxyFromEnvironment + } + proxyURL, err := url.Parse(http_proxy) + if err != nil { + return func(_ *http.Request) (*url.URL, error) { + return nil, errors.New("bad proxy: " + err.Error()) + } + } + return func(r *http.Request) (*url.URL, error) { + return proxyURL, nil + } +} + +// createHttpClient creates an http client which will timeout at the specified +// timeout period and can follow redirects if specified +func (h *HTTPResponse) createHttpClient() (*http.Client, error) { + tlsCfg, err := h.ClientConfig.TLSConfig() + if err != nil { + return nil, err + } + + dialer := &net.Dialer{} + + if h.Interface != "" { + dialer.LocalAddr, err = localAddress(h.Interface) + if err != nil { + return nil, err + } + } + + client := &http.Client{ + Transport: &http.Transport{ + Proxy: getProxyFunc(h.HTTPProxy), + DialContext: dialer.DialContext, + DisableKeepAlives: true, + TLSClientConfig: tlsCfg, + }, + Timeout: h.ResponseTimeout.Duration, + } + + if h.FollowRedirects == false { + client.CheckRedirect = func(req *http.Request, via []*http.Request) error { + return http.ErrUseLastResponse + } + } + return client, nil +} + +func localAddress(interfaceName string) (net.Addr, error) { + i, err := net.InterfaceByName(interfaceName) + if err != nil { + return nil, err + } + + addrs, err := i.Addrs() + if err != nil { + return nil, err + } + + for _, addr := range addrs { + if naddr, ok := addr.(*net.IPNet); ok { + // leaving port set to zero to let kernel pick + return &net.TCPAddr{IP: naddr.IP}, nil + } + } + + return nil, fmt.Errorf("cannot create local address for interface %q", interfaceName) +} + +func setResult(result_string string, fields map[string]interface{}, tags map[string]string) { + result_codes := map[string]int{ + "success": 0, + "response_string_mismatch": 1, + "body_read_error": 2, + "connection_failed": 3, + "timeout": 4, + "dns_error": 5, + "response_status_code_mismatch": 6, + } + + tags["result"] = result_string + fields["result_type"] = result_string + fields["result_code"] = result_codes[result_string] +} + +func setError(err error, fields map[string]interface{}, tags map[string]string) error { + if timeoutError, ok := err.(net.Error); ok && timeoutError.Timeout() { + setResult("timeout", fields, tags) + return timeoutError + } + + urlErr, isUrlErr := err.(*url.Error) + if !isUrlErr { + return nil + } + + opErr, isNetErr := (urlErr.Err).(*net.OpError) + if isNetErr { + switch e := (opErr.Err).(type) { + case (*net.DNSError): + setResult("dns_error", fields, tags) + return e + case (*net.ParseError): + // Parse error has to do with parsing of IP addresses, so we + // group it with address errors + setResult("address_error", fields, tags) + return e + } + } + + return nil +} + +// HTTPGather gathers all fields and returns any errors it encounters +func (h *HTTPResponse) httpGather(u string) (map[string]interface{}, map[string]string, error) { + // Prepare fields and tags + fields := make(map[string]interface{}) + tags := map[string]string{"server": u, "method": h.Method} + + var body io.Reader + if h.Body != "" { + body = strings.NewReader(h.Body) + } + request, err := http.NewRequest(h.Method, u, body) + if err != nil { + return nil, nil, err + } + + if h.BearerToken != "" { + token, err := ioutil.ReadFile(h.BearerToken) + if err != nil { + return nil, nil, err + } + bearer := "Bearer " + strings.Trim(string(token), "\n") + request.Header.Add("Authorization", bearer) + } + + for key, val := range h.Headers { + request.Header.Add(key, val) + if key == "Host" { + request.Host = val + } + } + + if h.Username != "" || h.Password != "" { + request.SetBasicAuth(h.Username, h.Password) + } + + // Start Timer + start := time.Now() + resp, err := h.client.Do(request) + response_time := time.Since(start).Seconds() + + // If an error in returned, it means we are dealing with a network error, as + // HTTP error codes do not generate errors in the net/http library + if err != nil { + // Log error + h.Log.Debugf("Network error while polling %s: %s", u, err.Error()) + + // Get error details + netErr := setError(err, fields, tags) + + // If recognize the returned error, get out + if netErr != nil { + return fields, tags, nil + } + + // Any error not recognized by `set_error` is considered a "connection_failed" + setResult("connection_failed", fields, tags) + return fields, tags, nil + } + + if _, ok := fields["response_time"]; !ok { + fields["response_time"] = response_time + } + + // This function closes the response body, as + // required by the net/http library + defer resp.Body.Close() + + // Add the response headers + for headerName, tag := range h.HTTPHeaderTags { + headerValues, foundHeader := resp.Header[headerName] + if foundHeader && len(headerValues) > 0 { + tags[tag] = headerValues[0] + } + } + + // Set log the HTTP response code + tags["status_code"] = strconv.Itoa(resp.StatusCode) + fields["http_response_code"] = resp.StatusCode + + if h.ResponseBodyMaxSize.Size == 0 { + h.ResponseBodyMaxSize.Size = defaultResponseBodyMaxSize + } + bodyBytes, err := ioutil.ReadAll(io.LimitReader(resp.Body, h.ResponseBodyMaxSize.Size+1)) + // Check first if the response body size exceeds the limit. + if err == nil && int64(len(bodyBytes)) > h.ResponseBodyMaxSize.Size { + h.setBodyReadError("The body of the HTTP Response is too large", bodyBytes, fields, tags) + return fields, tags, nil + } else if err != nil { + h.setBodyReadError(fmt.Sprintf("Failed to read body of HTTP Response : %s", err.Error()), bodyBytes, fields, tags) + return fields, tags, nil + } + + // Add the body of the response if expected + if len(h.ResponseBodyField) > 0 { + // Check that the content of response contains only valid utf-8 characters. + if !utf8.Valid(bodyBytes) { + h.setBodyReadError("The body of the HTTP Response is not a valid utf-8 string", bodyBytes, fields, tags) + return fields, tags, nil + } + fields[h.ResponseBodyField] = string(bodyBytes) + } + fields["content_length"] = len(bodyBytes) + + var success = true + + // Check the response for a regex + if h.ResponseStringMatch != "" { + if h.compiledStringMatch.Match(bodyBytes) { + fields["response_string_match"] = 1 + } else { + success = false + setResult("response_string_mismatch", fields, tags) + fields["response_string_match"] = 0 + } + } + + // Check the response status code + if h.ResponseStatusCode > 0 { + if resp.StatusCode == h.ResponseStatusCode { + fields["response_status_code_match"] = 1 + } else { + success = false + setResult("response_status_code_mismatch", fields, tags) + fields["response_status_code_match"] = 0 + } + } + + if success { + setResult("success", fields, tags) + } + + return fields, tags, nil +} + +// Set result in case of a body read error +func (h *HTTPResponse) setBodyReadError(error_msg string, bodyBytes []byte, fields map[string]interface{}, tags map[string]string) { + h.Log.Debugf(error_msg) + setResult("body_read_error", fields, tags) + fields["content_length"] = len(bodyBytes) + if h.ResponseStringMatch != "" { + fields["response_string_match"] = 0 + } +} + +// Gather gets all metric fields and tags and returns any errors it encounters +func (h *HTTPResponse) Gather(acc telegraf.Accumulator) error { + // Compile the body regex if it exist + if h.compiledStringMatch == nil { + var err error + h.compiledStringMatch, err = regexp.Compile(h.ResponseStringMatch) + if err != nil { + return fmt.Errorf("Failed to compile regular expression %s : %s", h.ResponseStringMatch, err) + } + } + + // Set default values + if h.ResponseTimeout.Duration < time.Second { + h.ResponseTimeout.Duration = time.Second * 5 + } + // Check send and expected string + if h.Method == "" { + h.Method = "GET" + } + + if len(h.URLs) == 0 { + if h.Address == "" { + h.URLs = []string{"http://localhost"} + } else { + h.Log.Warn("'address' deprecated in telegraf 1.12, please use 'urls'") + h.URLs = []string{h.Address} + } + } + + if h.client == nil { + client, err := h.createHttpClient() + if err != nil { + return err + } + h.client = client + } + + for _, u := range h.URLs { + addr, err := url.Parse(u) + if err != nil { + acc.AddError(err) + continue + } + + if addr.Scheme != "http" && addr.Scheme != "https" { + acc.AddError(errors.New("Only http and https are supported")) + continue + } + + // Prepare data + var fields map[string]interface{} + var tags map[string]string + + // Gather data + fields, tags, err = h.httpGather(u) + if err != nil { + acc.AddError(err) + continue + } + + // Add metrics + acc.AddFields("http_response", fields, tags) + } + + return nil +} + +func init() { + inputs.Add("http_response", func() telegraf.Input { + return &HTTPResponse{} + }) +} diff --git a/vendor/github.com/influxdata/telegraf/plugins/inputs/net_response/README.md b/vendor/github.com/influxdata/telegraf/plugins/inputs/net_response/README.md new file mode 100644 index 00000000..2c492408 --- /dev/null +++ b/vendor/github.com/influxdata/telegraf/plugins/inputs/net_response/README.md @@ -0,0 +1,56 @@ +# Network Response Input Plugin + +The input plugin test UDP/TCP connections response time and can optional +verify text in the response. + +### Configuration: + +```toml +# Collect response time of a TCP or UDP connection +[[inputs.net_response]] + ## Protocol, must be "tcp" or "udp" + ## NOTE: because the "udp" protocol does not respond to requests, it requires + ## a send/expect string pair (see below). + protocol = "tcp" + ## Server address (default localhost) + address = "localhost:80" + + ## Set timeout + # timeout = "1s" + + ## Set read timeout (only used if expecting a response) + # read_timeout = "1s" + + ## The following options are required for UDP checks. For TCP, they are + ## optional. The plugin will send the given string to the server and then + ## expect to receive the given 'expect' string back. + ## string sent to the server + # send = "ssh" + ## expected string in answer + # expect = "ssh" + + ## Uncomment to remove deprecated fields; recommended for new deploys + # fielddrop = ["result_type", "string_found"] +``` + +### Metrics: + +- net_response + - tags: + - server + - port + - protocol + - result + - fields: + - response_time (float, seconds) + - result_code (int, success = 0, timeout = 1, connection_failed = 2, read_failed = 3, string_mismatch = 4) + - result_type (string) **DEPRECATED in 1.7; use result tag** + - string_found (boolean) **DEPRECATED in 1.4; use result tag** + +### Example Output: + +``` +net_response,port=8086,protocol=tcp,result=success,server=localhost response_time=0.000092948,result_code=0i,result_type="success" 1525820185000000000 +net_response,port=8080,protocol=tcp,result=connection_failed,server=localhost result_code=2i,result_type="connection_failed" 1525820088000000000 +net_response,port=8080,protocol=udp,result=read_failed,server=localhost result_code=3i,result_type="read_failed",string_found=false 1525820088000000000 +``` diff --git a/vendor/github.com/influxdata/telegraf/plugins/inputs/net_response/net_response.go b/vendor/github.com/influxdata/telegraf/plugins/inputs/net_response/net_response.go new file mode 100644 index 00000000..023b4405 --- /dev/null +++ b/vendor/github.com/influxdata/telegraf/plugins/inputs/net_response/net_response.go @@ -0,0 +1,271 @@ +package net_response + +import ( + "bufio" + "errors" + "net" + "net/textproto" + "regexp" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/plugins/inputs" +) + +type ResultType uint64 + +const ( + Success ResultType = 0 + Timeout = 1 + ConnectionFailed = 2 + ReadFailed = 3 + StringMismatch = 4 +) + +// NetResponse struct +type NetResponse struct { + Address string + Timeout internal.Duration + ReadTimeout internal.Duration + Send string + Expect string + Protocol string +} + +var description = "Collect response time of a TCP or UDP connection" + +// Description will return a short string to explain what the plugin does. +func (*NetResponse) Description() string { + return description +} + +var sampleConfig = ` + ## Protocol, must be "tcp" or "udp" + ## NOTE: because the "udp" protocol does not respond to requests, it requires + ## a send/expect string pair (see below). + protocol = "tcp" + ## Server address (default localhost) + address = "localhost:80" + + ## Set timeout + # timeout = "1s" + + ## Set read timeout (only used if expecting a response) + # read_timeout = "1s" + + ## The following options are required for UDP checks. For TCP, they are + ## optional. The plugin will send the given string to the server and then + ## expect to receive the given 'expect' string back. + ## string sent to the server + # send = "ssh" + ## expected string in answer + # expect = "ssh" + + ## Uncomment to remove deprecated fields + # fielddrop = ["result_type", "string_found"] +` + +// SampleConfig will return a complete configuration example with details about each field. +func (*NetResponse) SampleConfig() string { + return sampleConfig +} + +// TCPGather will execute if there are TCP tests defined in the configuration. +// It will return a map[string]interface{} for fields and a map[string]string for tags +func (n *NetResponse) TCPGather() (tags map[string]string, fields map[string]interface{}) { + // Prepare returns + tags = make(map[string]string) + fields = make(map[string]interface{}) + // Start Timer + start := time.Now() + // Connecting + conn, err := net.DialTimeout("tcp", n.Address, n.Timeout.Duration) + // Stop timer + responseTime := time.Since(start).Seconds() + // Handle error + if err != nil { + if e, ok := err.(net.Error); ok && e.Timeout() { + setResult(Timeout, fields, tags, n.Expect) + } else { + setResult(ConnectionFailed, fields, tags, n.Expect) + } + return tags, fields + } + defer conn.Close() + // Send string if needed + if n.Send != "" { + msg := []byte(n.Send) + conn.Write(msg) + // Stop timer + responseTime = time.Since(start).Seconds() + } + // Read string if needed + if n.Expect != "" { + // Set read timeout + conn.SetReadDeadline(time.Now().Add(n.ReadTimeout.Duration)) + // Prepare reader + reader := bufio.NewReader(conn) + tp := textproto.NewReader(reader) + // Read + data, err := tp.ReadLine() + // Stop timer + responseTime = time.Since(start).Seconds() + // Handle error + if err != nil { + setResult(ReadFailed, fields, tags, n.Expect) + } else { + // Looking for string in answer + RegEx := regexp.MustCompile(`.*` + n.Expect + `.*`) + find := RegEx.FindString(string(data)) + if find != "" { + setResult(Success, fields, tags, n.Expect) + } else { + setResult(StringMismatch, fields, tags, n.Expect) + } + } + } else { + setResult(Success, fields, tags, n.Expect) + } + fields["response_time"] = responseTime + return tags, fields +} + +// UDPGather will execute if there are UDP tests defined in the configuration. +// It will return a map[string]interface{} for fields and a map[string]string for tags +func (n *NetResponse) UDPGather() (tags map[string]string, fields map[string]interface{}) { + // Prepare returns + tags = make(map[string]string) + fields = make(map[string]interface{}) + // Start Timer + start := time.Now() + // Resolving + udpAddr, err := net.ResolveUDPAddr("udp", n.Address) + // Handle error + if err != nil { + setResult(ConnectionFailed, fields, tags, n.Expect) + return tags, fields + } + // Connecting + conn, err := net.DialUDP("udp", nil, udpAddr) + // Handle error + if err != nil { + setResult(ConnectionFailed, fields, tags, n.Expect) + return tags, fields + } + defer conn.Close() + // Send string + msg := []byte(n.Send) + conn.Write(msg) + // Read string + // Set read timeout + conn.SetReadDeadline(time.Now().Add(n.ReadTimeout.Duration)) + // Read + buf := make([]byte, 1024) + _, _, err = conn.ReadFromUDP(buf) + // Stop timer + responseTime := time.Since(start).Seconds() + // Handle error + if err != nil { + setResult(ReadFailed, fields, tags, n.Expect) + return tags, fields + } + + // Looking for string in answer + RegEx := regexp.MustCompile(`.*` + n.Expect + `.*`) + find := RegEx.FindString(string(buf)) + if find != "" { + setResult(Success, fields, tags, n.Expect) + } else { + setResult(StringMismatch, fields, tags, n.Expect) + } + + fields["response_time"] = responseTime + + return tags, fields +} + +// Gather is called by telegraf when the plugin is executed on its interval. +// It will call either UDPGather or TCPGather based on the configuration and +// also fill an Accumulator that is supplied. +func (n *NetResponse) Gather(acc telegraf.Accumulator) error { + // Set default values + if n.Timeout.Duration == 0 { + n.Timeout.Duration = time.Second + } + if n.ReadTimeout.Duration == 0 { + n.ReadTimeout.Duration = time.Second + } + // Check send and expected string + if n.Protocol == "udp" && n.Send == "" { + return errors.New("Send string cannot be empty") + } + if n.Protocol == "udp" && n.Expect == "" { + return errors.New("Expected string cannot be empty") + } + // Prepare host and port + host, port, err := net.SplitHostPort(n.Address) + if err != nil { + return err + } + if host == "" { + n.Address = "localhost:" + port + } + if port == "" { + return errors.New("Bad port") + } + // Prepare data + tags := map[string]string{"server": host, "port": port} + var fields map[string]interface{} + var returnTags map[string]string + // Gather data + if n.Protocol == "tcp" { + returnTags, fields = n.TCPGather() + tags["protocol"] = "tcp" + } else if n.Protocol == "udp" { + returnTags, fields = n.UDPGather() + tags["protocol"] = "udp" + } else { + return errors.New("Bad protocol") + } + // Merge the tags + for k, v := range returnTags { + tags[k] = v + } + // Add metrics + acc.AddFields("net_response", fields, tags) + return nil +} + +func setResult(result ResultType, fields map[string]interface{}, tags map[string]string, expect string) { + var tag string + switch result { + case Success: + tag = "success" + case Timeout: + tag = "timeout" + case ConnectionFailed: + tag = "connection_failed" + case ReadFailed: + tag = "read_failed" + case StringMismatch: + tag = "string_mismatch" + } + + tags["result"] = tag + fields["result_code"] = uint64(result) + + // deprecated in 1.7; use result tag + fields["result_type"] = tag + + // deprecated in 1.4; use result tag + if expect != "" { + fields["string_found"] = result == Success + } +} + +func init() { + inputs.Add("net_response", func() telegraf.Input { + return &NetResponse{} + }) +} diff --git a/vendor/github.com/influxdata/telegraf/plugins/inputs/snmp/README.md b/vendor/github.com/influxdata/telegraf/plugins/inputs/snmp/README.md deleted file mode 100644 index c4aa3367..00000000 --- a/vendor/github.com/influxdata/telegraf/plugins/inputs/snmp/README.md +++ /dev/null @@ -1,238 +0,0 @@ -# SNMP Input Plugin - -The `snmp` input plugin uses polling to gather metrics from SNMP agents. -Support for gathering individual OIDs as well as complete SNMP tables is -included. - -### Prerequisites - -This plugin uses the `snmptable` and `snmptranslate` programs from the -[net-snmp][] project. These tools will need to be installed into the `PATH` in -order to be located. Other utilities from the net-snmp project may be useful -for troubleshooting, but are not directly used by the plugin. - -These programs will load available MIBs on the system. Typically the default -directory for MIBs is `/usr/share/snmp/mibs`, but if your MIBs are in a -different location you may need to make the paths known to net-snmp. The -location of these files can be configured in the `snmp.conf` or via the -`MIBDIRS` environment variable. See [`man 1 snmpcmd`][man snmpcmd] for more -information. - -### Configuration -```toml -[[inputs.snmp]] - ## Agent addresses to retrieve values from. - ## example: agents = ["udp://127.0.0.1:161"] - ## agents = ["tcp://127.0.0.1:161"] - agents = ["udp://127.0.0.1:161"] - - ## Timeout for each request. - # timeout = "5s" - - ## SNMP version; can be 1, 2, or 3. - # version = 2 - - ## SNMP community string. - # community = "public" - - ## Agent host tag - # agent_host_tag = "agent_host" - - ## Number of retries to attempt. - # retries = 3 - - ## The GETBULK max-repetitions parameter. - # max_repetitions = 10 - - ## SNMPv3 authentication and encryption options. - ## - ## Security Name. - # sec_name = "myuser" - ## Authentication protocol; one of "MD5", "SHA", or "". - # auth_protocol = "MD5" - ## Authentication password. - # auth_password = "pass" - ## Security Level; one of "noAuthNoPriv", "authNoPriv", or "authPriv". - # sec_level = "authNoPriv" - ## Context Name. - # context_name = "" - ## Privacy protocol used for encrypted messages; one of "DES", "AES", "AES192", "AES192C", "AES256", "AES256C", or "". - ### Protocols "AES192", "AES192", "AES256", and "AES256C" require the underlying net-snmp tools - ### to be compiled with --enable-blumenthal-aes (http://www.net-snmp.org/docs/INSTALL.html) - # priv_protocol = "" - ## Privacy password used for encrypted messages. - # priv_password = "" - - ## Add fields and tables defining the variables you wish to collect. This - ## example collects the system uptime and interface variables. Reference the - ## full plugin documentation for configuration details. - [[inputs.snmp.field]] - oid = "RFC1213-MIB::sysUpTime.0" - name = "uptime" - - [[inputs.snmp.field]] - oid = "RFC1213-MIB::sysName.0" - name = "source" - is_tag = true - - [[inputs.snmp.table]] - oid = "IF-MIB::ifTable" - name = "interface" - inherit_tags = ["source"] - - [[inputs.snmp.table.field]] - oid = "IF-MIB::ifDescr" - name = "ifDescr" - is_tag = true -``` - -#### Configure SNMP Requests - -This plugin provides two methods for configuring the SNMP requests: `fields` -and `tables`. Use the `field` option to gather single ad-hoc variables. -To collect SNMP tables, use the `table` option. - -##### Field - -Use a `field` to collect a variable by OID. Requests specified with this -option operate similar to the `snmpget` utility. - -```toml -[[inputs.snmp]] - # ... snip ... - - [[inputs.snmp.field]] - ## Object identifier of the variable as a numeric or textual OID. - oid = "RFC1213-MIB::sysName.0" - - ## Name of the field or tag to create. If not specified, it defaults to - ## the value of 'oid'. If 'oid' is numeric, an attempt to translate the - ## numeric OID into a textual OID will be made. - # name = "" - - ## If true the variable will be added as a tag, otherwise a field will be - ## created. - # is_tag = false - - ## Apply one of the following conversions to the variable value: - ## float(X): Convert the input value into a float and divides by the - ## Xth power of 10. Effectively just moves the decimal left - ## X places. For example a value of `123` with `float(2)` - ## will result in `1.23`. - ## float: Convert the value into a float with no adjustment. Same - ## as `float(0)`. - ## int: Convert the value into an integer. - ## hwaddr: Convert the value to a MAC address. - ## ipaddr: Convert the value to an IP address. - ## hextoint:X:Y Convert a hex string value to integer. Where X is the Endian - ## and Y the bit size. For example: hextoint:LittleEndian:uint64 - ## or hextoint:BigEndian:uint32. Valid options for the Endian are: - ## BigEndian and LittleEndian. For the bit size: uint16, uint32 - ## and uint64. - ## - # conversion = "" -``` - -##### Table - -Use a `table` to configure the collection of a SNMP table. SNMP requests -formed with this option operate similarly way to the `snmptable` command. - -Control the handling of specific table columns using a nested `field`. These -nested fields are specified similarly to a top-level `field`. - -By default all columns of the SNMP table will be collected - it is not required -to add a nested field for each column, only those which you wish to modify. To -*only* collect certain columns, omit the `oid` from the `table` section and only -include `oid` settings in `field` sections. For more complex include/exclude -cases for columns use [metric filtering][]. - -One [metric][] is created for each row of the SNMP table. - -```toml -[[inputs.snmp]] - # ... snip ... - - [[inputs.snmp.table]] - ## Object identifier of the SNMP table as a numeric or textual OID. - oid = "IF-MIB::ifTable" - - ## Name of the field or tag to create. If not specified, it defaults to - ## the value of 'oid'. If 'oid' is numeric an attempt to translate the - ## numeric OID into a textual OID will be made. - # name = "" - - ## Which tags to inherit from the top-level config and to use in the output - ## of this table's measurement. - ## example: inherit_tags = ["source"] - # inherit_tags = [] - - ## Add an 'index' tag with the table row number. Use this if the table has - ## no indexes or if you are excluding them. This option is normally not - ## required as any index columns are automatically added as tags. - # index_as_tag = false - - [[inputs.snmp.table.field]] - ## OID to get. May be a numeric or textual module-qualified OID. - oid = "IF-MIB::ifDescr" - - ## Name of the field or tag to create. If not specified, it defaults to - ## the value of 'oid'. If 'oid' is numeric an attempt to translate the - ## numeric OID into a textual OID will be made. - # name = "" - - ## Output this field as a tag. - # is_tag = false - - ## The OID sub-identifier to strip off so that the index can be matched - ## against other fields in the table. - # oid_index_suffix = "" - - ## Specifies the length of the index after the supplied table OID (in OID - ## path segments). Truncates the index after this point to remove non-fixed - ## value or length index suffixes. - # oid_index_length = 0 - - ## Specifies if the value of given field should be snmptranslated - ## by default no field values are translated - # translate = true -``` - -### Troubleshooting - -Check that a numeric field can be translated to a textual field: -``` -$ snmptranslate .1.3.6.1.2.1.1.3.0 -DISMAN-EVENT-MIB::sysUpTimeInstance -``` - -Request a top-level field: -``` -$ snmpget -v2c -c public 127.0.0.1 sysUpTime.0 -``` - -Request a table: -``` -$ snmptable -v2c -c public 127.0.0.1 ifTable -``` - -To collect a packet capture, run this command in the background while running -Telegraf or one of the above commands. Adjust the interface, host and port as -needed: -``` -$ sudo tcpdump -s 0 -i eth0 -w telegraf-snmp.pcap host 127.0.0.1 and port 161 -``` - -### Example Output - -``` -snmp,agent_host=127.0.0.1,source=loaner uptime=11331974i 1575509815000000000 -interface,agent_host=127.0.0.1,ifDescr=wlan0,ifIndex=3,source=example.org ifAdminStatus=1i,ifInDiscards=0i,ifInErrors=0i,ifInNUcastPkts=0i,ifInOctets=3436617431i,ifInUcastPkts=2717778i,ifInUnknownProtos=0i,ifLastChange=0i,ifMtu=1500i,ifOperStatus=1i,ifOutDiscards=0i,ifOutErrors=0i,ifOutNUcastPkts=0i,ifOutOctets=581368041i,ifOutQLen=0i,ifOutUcastPkts=1354338i,ifPhysAddress="c8:5b:76:c9:e6:8c",ifSpecific=".0.0",ifSpeed=0i,ifType=6i 1575509815000000000 -interface,agent_host=127.0.0.1,ifDescr=eth0,ifIndex=2,source=example.org ifAdminStatus=1i,ifInDiscards=0i,ifInErrors=0i,ifInNUcastPkts=21i,ifInOctets=3852386380i,ifInUcastPkts=3634004i,ifInUnknownProtos=0i,ifLastChange=9088763i,ifMtu=1500i,ifOperStatus=1i,ifOutDiscards=0i,ifOutErrors=0i,ifOutNUcastPkts=0i,ifOutOctets=434865441i,ifOutQLen=0i,ifOutUcastPkts=2110394i,ifPhysAddress="c8:5b:76:c9:e6:8c",ifSpecific=".0.0",ifSpeed=1000000000i,ifType=6i 1575509815000000000 -interface,agent_host=127.0.0.1,ifDescr=lo,ifIndex=1,source=example.org ifAdminStatus=1i,ifInDiscards=0i,ifInErrors=0i,ifInNUcastPkts=0i,ifInOctets=51555569i,ifInUcastPkts=339097i,ifInUnknownProtos=0i,ifLastChange=0i,ifMtu=65536i,ifOperStatus=1i,ifOutDiscards=0i,ifOutErrors=0i,ifOutNUcastPkts=0i,ifOutOctets=51555569i,ifOutQLen=0i,ifOutUcastPkts=339097i,ifSpecific=".0.0",ifSpeed=10000000i,ifType=24i 1575509815000000000 -``` - -[net-snmp]: http://www.net-snmp.org/ -[man snmpcmd]: http://net-snmp.sourceforge.net/docs/man/snmpcmd.html#lbAK -[metric filtering]: /docs/CONFIGURATION.md#metric-filtering -[metric]: /docs/METRICS.md diff --git a/vendor/github.com/influxdata/telegraf/plugins/inputs/snmp/snmp.go b/vendor/github.com/influxdata/telegraf/plugins/inputs/snmp/snmp.go deleted file mode 100644 index 9aac89b8..00000000 --- a/vendor/github.com/influxdata/telegraf/plugins/inputs/snmp/snmp.go +++ /dev/null @@ -1,951 +0,0 @@ -package snmp - -import ( - "bufio" - "bytes" - "encoding/binary" - "fmt" - "log" - "math" - "net" - "os/exec" - "strconv" - "strings" - "sync" - "time" - - "github.com/gosnmp/gosnmp" - "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" - "github.com/influxdata/telegraf/internal/snmp" - "github.com/influxdata/telegraf/plugins/inputs" - "github.com/influxdata/wlog" -) - -const description = `Retrieves SNMP values from remote agents` -const sampleConfig = ` - ## Agent addresses to retrieve values from. - ## example: agents = ["udp://127.0.0.1:161"] - ## agents = ["tcp://127.0.0.1:161"] - agents = ["udp://127.0.0.1:161"] - - ## Timeout for each request. - # timeout = "5s" - - ## SNMP version; can be 1, 2, or 3. - # version = 2 - - ## Agent host tag; the tag used to reference the source host - # agent_host_tag = "agent_host" - - ## SNMP community string. - # community = "public" - - ## Number of retries to attempt. - # retries = 3 - - ## The GETBULK max-repetitions parameter. - # max_repetitions = 10 - - ## SNMPv3 authentication and encryption options. - ## - ## Security Name. - # sec_name = "myuser" - ## Authentication protocol; one of "MD5", "SHA", or "". - # auth_protocol = "MD5" - ## Authentication password. - # auth_password = "pass" - ## Security Level; one of "noAuthNoPriv", "authNoPriv", or "authPriv". - # sec_level = "authNoPriv" - ## Context Name. - # context_name = "" - ## Privacy protocol used for encrypted messages; one of "DES", "AES" or "". - # priv_protocol = "" - ## Privacy password used for encrypted messages. - # priv_password = "" - - ## Add fields and tables defining the variables you wish to collect. This - ## example collects the system uptime and interface variables. Reference the - ## full plugin documentation for configuration details. -` - -// execCommand is so tests can mock out exec.Command usage. -var execCommand = exec.Command - -// execCmd executes the specified command, returning the STDOUT content. -// If command exits with error status, the output is captured into the returned error. -func execCmd(arg0 string, args ...string) ([]byte, error) { - if wlog.LogLevel() == wlog.DEBUG { - quoted := make([]string, 0, len(args)) - for _, arg := range args { - quoted = append(quoted, fmt.Sprintf("%q", arg)) - } - log.Printf("D! [inputs.snmp] executing %q %s", arg0, strings.Join(quoted, " ")) - } - - out, err := execCommand(arg0, args...).Output() - if err != nil { - if err, ok := err.(*exec.ExitError); ok { - return nil, fmt.Errorf("%s: %w", bytes.TrimRight(err.Stderr, "\r\n"), err) - } - return nil, err - } - return out, nil -} - -// Snmp holds the configuration for the plugin. -type Snmp struct { - // The SNMP agent to query. Format is [SCHEME://]ADDR[:PORT] (e.g. - // udp://1.2.3.4:161). If the scheme is not specified then "udp" is used. - Agents []string `toml:"agents"` - - // The tag used to name the agent host - AgentHostTag string `toml:"agent_host_tag"` - - snmp.ClientConfig - - Tables []Table `toml:"table"` - - // Name & Fields are the elements of a Table. - // Telegraf chokes if we try to embed a Table. So instead we have to embed the - // fields of a Table, and construct a Table during runtime. - Name string // deprecated in 1.14; use name_override - Fields []Field `toml:"field"` - - connectionCache []snmpConnection - initialized bool -} - -func (s *Snmp) init() error { - if s.initialized { - return nil - } - - s.connectionCache = make([]snmpConnection, len(s.Agents)) - - for i := range s.Tables { - if err := s.Tables[i].Init(); err != nil { - return fmt.Errorf("initializing table %s: %w", s.Tables[i].Name, err) - } - } - - for i := range s.Fields { - if err := s.Fields[i].init(); err != nil { - return fmt.Errorf("initializing field %s: %w", s.Fields[i].Name, err) - } - } - - if len(s.AgentHostTag) == 0 { - s.AgentHostTag = "agent_host" - } - - s.initialized = true - return nil -} - -// Table holds the configuration for a SNMP table. -type Table struct { - // Name will be the name of the measurement. - Name string - - // Which tags to inherit from the top-level config. - InheritTags []string - - // Adds each row's table index as a tag. - IndexAsTag bool - - // Fields is the tags and values to look up. - Fields []Field `toml:"field"` - - // OID for automatic field population. - // If provided, init() will populate Fields with all the table columns of the - // given OID. - Oid string - - initialized bool -} - -// Init() builds & initializes the nested fields. -func (t *Table) Init() error { - if t.initialized { - return nil - } - - if err := t.initBuild(); err != nil { - return err - } - - // initialize all the nested fields - for i := range t.Fields { - if err := t.Fields[i].init(); err != nil { - return fmt.Errorf("initializing field %s: %w", t.Fields[i].Name, err) - } - } - - t.initialized = true - return nil -} - -// initBuild initializes the table if it has an OID configured. If so, the -// net-snmp tools will be used to look up the OID and auto-populate the table's -// fields. -func (t *Table) initBuild() error { - if t.Oid == "" { - return nil - } - - _, _, oidText, fields, err := snmpTable(t.Oid) - if err != nil { - return err - } - - if t.Name == "" { - t.Name = oidText - } - - knownOIDs := map[string]bool{} - for _, f := range t.Fields { - knownOIDs[f.Oid] = true - } - for _, f := range fields { - if !knownOIDs[f.Oid] { - t.Fields = append(t.Fields, f) - } - } - - return nil -} - -// Field holds the configuration for a Field to look up. -type Field struct { - // Name will be the name of the field. - Name string - // OID is prefix for this field. The plugin will perform a walk through all - // OIDs with this as their parent. For each value found, the plugin will strip - // off the OID prefix, and use the remainder as the index. For multiple fields - // to show up in the same row, they must share the same index. - Oid string - // OidIndexSuffix is the trailing sub-identifier on a table record OID that will be stripped off to get the record's index. - OidIndexSuffix string - // OidIndexLength specifies the length of the index in OID path segments. It can be used to remove sub-identifiers that vary in content or length. - OidIndexLength int - // IsTag controls whether this OID is output as a tag or a value. - IsTag bool - // Conversion controls any type conversion that is done on the value. - // "float"/"float(0)" will convert the value into a float. - // "float(X)" will convert the value into a float, and then move the decimal before Xth right-most digit. - // "int" will conver the value into an integer. - // "hwaddr" will convert a 6-byte string to a MAC address. - // "ipaddr" will convert the value to an IPv4 or IPv6 address. - Conversion string - // Translate tells if the value of the field should be snmptranslated - Translate bool - - initialized bool -} - -// init() converts OID names to numbers, and sets the .Name attribute if unset. -func (f *Field) init() error { - if f.initialized { - return nil - } - - _, oidNum, oidText, conversion, err := SnmpTranslate(f.Oid) - if err != nil { - return fmt.Errorf("translating: %w", err) - } - f.Oid = oidNum - if f.Name == "" { - f.Name = oidText - } - if f.Conversion == "" { - f.Conversion = conversion - } - - //TODO use textual convention conversion from the MIB - - f.initialized = true - return nil -} - -// RTable is the resulting table built from a Table. -type RTable struct { - // Name is the name of the field, copied from Table.Name. - Name string - // Time is the time the table was built. - Time time.Time - // Rows are the rows that were found, one row for each table OID index found. - Rows []RTableRow -} - -// RTableRow is the resulting row containing all the OID values which shared -// the same index. -type RTableRow struct { - // Tags are all the Field values which had IsTag=true. - Tags map[string]string - // Fields are all the Field values which had IsTag=false. - Fields map[string]interface{} -} - -type walkError struct { - msg string - err error -} - -func (e *walkError) Error() string { - return e.msg -} - -func (e *walkError) Unwrap() error { - return e.err -} - -func init() { - inputs.Add("snmp", func() telegraf.Input { - return &Snmp{ - Name: "snmp", - ClientConfig: snmp.ClientConfig{ - Retries: 3, - MaxRepetitions: 10, - Timeout: internal.Duration{Duration: 5 * time.Second}, - Version: 2, - Community: "public", - }, - } - }) -} - -// SampleConfig returns the default configuration of the input. -func (s *Snmp) SampleConfig() string { - return sampleConfig -} - -// Description returns a one-sentence description on the input. -func (s *Snmp) Description() string { - return description -} - -// Gather retrieves all the configured fields and tables. -// Any error encountered does not halt the process. The errors are accumulated -// and returned at the end. -func (s *Snmp) Gather(acc telegraf.Accumulator) error { - if err := s.init(); err != nil { - return err - } - - var wg sync.WaitGroup - for i, agent := range s.Agents { - wg.Add(1) - go func(i int, agent string) { - defer wg.Done() - gs, err := s.getConnection(i) - if err != nil { - acc.AddError(fmt.Errorf("agent %s: %w", agent, err)) - return - } - - // First is the top-level fields. We treat the fields as table prefixes with an empty index. - t := Table{ - Name: s.Name, - Fields: s.Fields, - } - topTags := map[string]string{} - if err := s.gatherTable(acc, gs, t, topTags, false); err != nil { - acc.AddError(fmt.Errorf("agent %s: %w", agent, err)) - } - - // Now is the real tables. - for _, t := range s.Tables { - if err := s.gatherTable(acc, gs, t, topTags, true); err != nil { - acc.AddError(fmt.Errorf("agent %s: gathering table %s: %w", agent, t.Name, err)) - } - } - }(i, agent) - } - wg.Wait() - - return nil -} - -func (s *Snmp) gatherTable(acc telegraf.Accumulator, gs snmpConnection, t Table, topTags map[string]string, walk bool) error { - rt, err := t.Build(gs, walk) - if err != nil { - return err - } - - for _, tr := range rt.Rows { - if !walk { - // top-level table. Add tags to topTags. - for k, v := range tr.Tags { - topTags[k] = v - } - } else { - // real table. Inherit any specified tags. - for _, k := range t.InheritTags { - if v, ok := topTags[k]; ok { - tr.Tags[k] = v - } - } - } - if _, ok := tr.Tags[s.AgentHostTag]; !ok { - tr.Tags[s.AgentHostTag] = gs.Host() - } - acc.AddFields(rt.Name, tr.Fields, tr.Tags, rt.Time) - } - - return nil -} - -// Build retrieves all the fields specified in the table and constructs the RTable. -func (t Table) Build(gs snmpConnection, walk bool) (*RTable, error) { - rows := map[string]RTableRow{} - - tagCount := 0 - for _, f := range t.Fields { - if f.IsTag { - tagCount++ - } - - if len(f.Oid) == 0 { - return nil, fmt.Errorf("cannot have empty OID on field %s", f.Name) - } - var oid string - if f.Oid[0] == '.' { - oid = f.Oid - } else { - // make sure OID has "." because the BulkWalkAll results do, and the prefix needs to match - oid = "." + f.Oid - } - - // ifv contains a mapping of table OID index to field value - ifv := map[string]interface{}{} - - if !walk { - // This is used when fetching non-table fields. Fields configured a the top - // scope of the plugin. - // We fetch the fields directly, and add them to ifv as if the index were an - // empty string. This results in all the non-table fields sharing the same - // index, and being added on the same row. - if pkt, err := gs.Get([]string{oid}); err != nil { - return nil, fmt.Errorf("performing get on field %s: %w", f.Name, err) - } else if pkt != nil && len(pkt.Variables) > 0 && pkt.Variables[0].Type != gosnmp.NoSuchObject && pkt.Variables[0].Type != gosnmp.NoSuchInstance { - ent := pkt.Variables[0] - fv, err := fieldConvert(f.Conversion, ent.Value) - if err != nil { - return nil, fmt.Errorf("converting %q (OID %s) for field %s: %w", ent.Value, ent.Name, f.Name, err) - } - ifv[""] = fv - } - } else { - err := gs.Walk(oid, func(ent gosnmp.SnmpPDU) error { - if len(ent.Name) <= len(oid) || ent.Name[:len(oid)+1] != oid+"." { - return &walkError{} // break the walk - } - - idx := ent.Name[len(oid):] - if f.OidIndexSuffix != "" { - if !strings.HasSuffix(idx, f.OidIndexSuffix) { - // this entry doesn't match our OidIndexSuffix. skip it - return nil - } - idx = idx[:len(idx)-len(f.OidIndexSuffix)] - } - if f.OidIndexLength != 0 { - i := f.OidIndexLength + 1 // leading separator - idx = strings.Map(func(r rune) rune { - if r == '.' { - i -= 1 - } - if i < 1 { - return -1 - } - return r - }, idx) - } - - // snmptranslate table field value here - if f.Translate { - if entOid, ok := ent.Value.(string); ok { - _, _, oidText, _, err := SnmpTranslate(entOid) - if err == nil { - // If no error translating, the original value for ent.Value should be replaced - ent.Value = oidText - } - } - } - - fv, err := fieldConvert(f.Conversion, ent.Value) - if err != nil { - return &walkError{ - msg: fmt.Sprintf("converting %q (OID %s) for field %s", ent.Value, ent.Name, f.Name), - err: err, - } - } - ifv[idx] = fv - return nil - }) - if err != nil { - // Our callback always wraps errors in a walkError. - // If this error isn't a walkError, we know it's not - // from the callback - if _, ok := err.(*walkError); !ok { - return nil, fmt.Errorf("performing bulk walk for field %s: %w", f.Name, err) - } - } - } - - for idx, v := range ifv { - rtr, ok := rows[idx] - if !ok { - rtr = RTableRow{} - rtr.Tags = map[string]string{} - rtr.Fields = map[string]interface{}{} - rows[idx] = rtr - } - if t.IndexAsTag && idx != "" { - if idx[0] == '.' { - idx = idx[1:] - } - rtr.Tags["index"] = idx - } - // don't add an empty string - if vs, ok := v.(string); !ok || vs != "" { - if f.IsTag { - if ok { - rtr.Tags[f.Name] = vs - } else { - rtr.Tags[f.Name] = fmt.Sprintf("%v", v) - } - } else { - rtr.Fields[f.Name] = v - } - } - } - } - - rt := RTable{ - Name: t.Name, - Time: time.Now(), //TODO record time at start - Rows: make([]RTableRow, 0, len(rows)), - } - for _, r := range rows { - rt.Rows = append(rt.Rows, r) - } - return &rt, nil -} - -// snmpConnection is an interface which wraps a *gosnmp.GoSNMP object. -// We interact through an interface so we can mock it out in tests. -type snmpConnection interface { - Host() string - //BulkWalkAll(string) ([]gosnmp.SnmpPDU, error) - Walk(string, gosnmp.WalkFunc) error - Get(oids []string) (*gosnmp.SnmpPacket, error) -} - -// getConnection creates a snmpConnection (*gosnmp.GoSNMP) object and caches the -// result using `agentIndex` as the cache key. This is done to allow multiple -// connections to a single address. It is an error to use a connection in -// more than one goroutine. -func (s *Snmp) getConnection(idx int) (snmpConnection, error) { - if gs := s.connectionCache[idx]; gs != nil { - return gs, nil - } - - agent := s.Agents[idx] - - var err error - var gs snmp.GosnmpWrapper - gs, err = snmp.NewWrapper(s.ClientConfig) - if err != nil { - return nil, err - } - gs.SetAgent(agent) - if err != nil { - return nil, err - } - - s.connectionCache[idx] = gs - - if err := gs.Connect(); err != nil { - return nil, fmt.Errorf("setting up connection: %w", err) - } - - return gs, nil -} - -// fieldConvert converts from any type according to the conv specification -func fieldConvert(conv string, v interface{}) (interface{}, error) { - if conv == "" { - if bs, ok := v.([]byte); ok { - return string(bs), nil - } - return v, nil - } - - var d int - if _, err := fmt.Sscanf(conv, "float(%d)", &d); err == nil || conv == "float" { - switch vt := v.(type) { - case float32: - v = float64(vt) / math.Pow10(d) - case float64: - v = float64(vt) / math.Pow10(d) - case int: - v = float64(vt) / math.Pow10(d) - case int8: - v = float64(vt) / math.Pow10(d) - case int16: - v = float64(vt) / math.Pow10(d) - case int32: - v = float64(vt) / math.Pow10(d) - case int64: - v = float64(vt) / math.Pow10(d) - case uint: - v = float64(vt) / math.Pow10(d) - case uint8: - v = float64(vt) / math.Pow10(d) - case uint16: - v = float64(vt) / math.Pow10(d) - case uint32: - v = float64(vt) / math.Pow10(d) - case uint64: - v = float64(vt) / math.Pow10(d) - case []byte: - vf, _ := strconv.ParseFloat(string(vt), 64) - v = vf / math.Pow10(d) - case string: - vf, _ := strconv.ParseFloat(vt, 64) - v = vf / math.Pow10(d) - } - return v, nil - } - - if conv == "int" { - switch vt := v.(type) { - case float32: - v = int64(vt) - case float64: - v = int64(vt) - case int: - v = int64(vt) - case int8: - v = int64(vt) - case int16: - v = int64(vt) - case int32: - v = int64(vt) - case int64: - v = int64(vt) - case uint: - v = int64(vt) - case uint8: - v = int64(vt) - case uint16: - v = int64(vt) - case uint32: - v = int64(vt) - case uint64: - v = int64(vt) - case []byte: - v, _ = strconv.ParseInt(string(vt), 10, 64) - case string: - v, _ = strconv.ParseInt(vt, 10, 64) - } - return v, nil - } - - if conv == "hwaddr" { - switch vt := v.(type) { - case string: - v = net.HardwareAddr(vt).String() - case []byte: - v = net.HardwareAddr(vt).String() - default: - return nil, fmt.Errorf("invalid type (%T) for hwaddr conversion", v) - } - return v, nil - } - - split := strings.Split(conv, ":") - if split[0] == "hextoint" && len(split) == 3 { - - endian := split[1] - bit := split[2] - - bv, ok := v.([]byte) - if !ok { - return v, nil - } - - if endian == "LittleEndian" { - switch bit { - case "uint64": - v = binary.LittleEndian.Uint64(bv) - case "uint32": - v = binary.LittleEndian.Uint32(bv) - case "uint16": - v = binary.LittleEndian.Uint16(bv) - default: - return nil, fmt.Errorf("invalid bit value (%s) for hex to int conversion", bit) - } - } else if endian == "BigEndian" { - switch bit { - case "uint64": - v = binary.BigEndian.Uint64(bv) - case "uint32": - v = binary.BigEndian.Uint32(bv) - case "uint16": - v = binary.BigEndian.Uint16(bv) - default: - return nil, fmt.Errorf("invalid bit value (%s) for hex to int conversion", bit) - } - } else { - return nil, fmt.Errorf("invalid Endian value (%s) for hex to int conversion", endian) - } - - return v, nil - } - - if conv == "ipaddr" { - var ipbs []byte - - switch vt := v.(type) { - case string: - ipbs = []byte(vt) - case []byte: - ipbs = vt - default: - return nil, fmt.Errorf("invalid type (%T) for ipaddr conversion", v) - } - - switch len(ipbs) { - case 4, 16: - v = net.IP(ipbs).String() - default: - return nil, fmt.Errorf("invalid length (%d) for ipaddr conversion", len(ipbs)) - } - - return v, nil - } - - return nil, fmt.Errorf("invalid conversion type '%s'", conv) -} - -type snmpTableCache struct { - mibName string - oidNum string - oidText string - fields []Field - err error -} - -var snmpTableCaches map[string]snmpTableCache -var snmpTableCachesLock sync.Mutex - -// snmpTable resolves the given OID as a table, providing information about the -// table and fields within. -func snmpTable(oid string) (mibName string, oidNum string, oidText string, fields []Field, err error) { - snmpTableCachesLock.Lock() - if snmpTableCaches == nil { - snmpTableCaches = map[string]snmpTableCache{} - } - - var stc snmpTableCache - var ok bool - if stc, ok = snmpTableCaches[oid]; !ok { - stc.mibName, stc.oidNum, stc.oidText, stc.fields, stc.err = snmpTableCall(oid) - snmpTableCaches[oid] = stc - } - - snmpTableCachesLock.Unlock() - return stc.mibName, stc.oidNum, stc.oidText, stc.fields, stc.err -} - -func snmpTableCall(oid string) (mibName string, oidNum string, oidText string, fields []Field, err error) { - mibName, oidNum, oidText, _, err = SnmpTranslate(oid) - if err != nil { - return "", "", "", nil, fmt.Errorf("translating: %w", err) - } - - mibPrefix := mibName + "::" - oidFullName := mibPrefix + oidText - - // first attempt to get the table's tags - tagOids := map[string]struct{}{} - // We have to guess that the "entry" oid is `oid+".1"`. snmptable and snmptranslate don't seem to have a way to provide the info. - if out, err := execCmd("snmptranslate", "-Td", oidFullName+".1"); err == nil { - scanner := bufio.NewScanner(bytes.NewBuffer(out)) - for scanner.Scan() { - line := scanner.Text() - - if !strings.HasPrefix(line, " INDEX") { - continue - } - - i := strings.Index(line, "{ ") - if i == -1 { // parse error - continue - } - line = line[i+2:] - i = strings.Index(line, " }") - if i == -1 { // parse error - continue - } - line = line[:i] - for _, col := range strings.Split(line, ", ") { - tagOids[mibPrefix+col] = struct{}{} - } - } - } - - // this won't actually try to run a query. The `-Ch` will just cause it to dump headers. - out, err := execCmd("snmptable", "-Ch", "-Cl", "-c", "public", "127.0.0.1", oidFullName) - if err != nil { - return "", "", "", nil, fmt.Errorf("getting table columns: %w", err) - } - scanner := bufio.NewScanner(bytes.NewBuffer(out)) - scanner.Scan() - cols := scanner.Text() - if len(cols) == 0 { - return "", "", "", nil, fmt.Errorf("could not find any columns in table") - } - for _, col := range strings.Split(cols, " ") { - if len(col) == 0 { - continue - } - _, isTag := tagOids[mibPrefix+col] - fields = append(fields, Field{Name: col, Oid: mibPrefix + col, IsTag: isTag}) - } - - return mibName, oidNum, oidText, fields, err -} - -type snmpTranslateCache struct { - mibName string - oidNum string - oidText string - conversion string - err error -} - -var snmpTranslateCachesLock sync.Mutex -var snmpTranslateCaches map[string]snmpTranslateCache - -// snmpTranslate resolves the given OID. -func SnmpTranslate(oid string) (mibName string, oidNum string, oidText string, conversion string, err error) { - snmpTranslateCachesLock.Lock() - if snmpTranslateCaches == nil { - snmpTranslateCaches = map[string]snmpTranslateCache{} - } - - var stc snmpTranslateCache - var ok bool - if stc, ok = snmpTranslateCaches[oid]; !ok { - // This will result in only one call to snmptranslate running at a time. - // We could speed it up by putting a lock in snmpTranslateCache and then - // returning it immediately, and multiple callers would then release the - // snmpTranslateCachesLock and instead wait on the individual - // snmpTranslation.Lock to release. But I don't know that the extra complexity - // is worth it. Especially when it would slam the system pretty hard if lots - // of lookups are being performed. - - stc.mibName, stc.oidNum, stc.oidText, stc.conversion, stc.err = snmpTranslateCall(oid) - snmpTranslateCaches[oid] = stc - } - - snmpTranslateCachesLock.Unlock() - - return stc.mibName, stc.oidNum, stc.oidText, stc.conversion, stc.err -} - -func SnmpTranslateForce(oid string, mibName string, oidNum string, oidText string, conversion string) { - snmpTranslateCachesLock.Lock() - defer snmpTranslateCachesLock.Unlock() - if snmpTranslateCaches == nil { - snmpTranslateCaches = map[string]snmpTranslateCache{} - } - - var stc snmpTranslateCache - stc.mibName = mibName - stc.oidNum = oidNum - stc.oidText = oidText - stc.conversion = conversion - stc.err = nil - snmpTranslateCaches[oid] = stc -} - -func SnmpTranslateClear() { - snmpTranslateCachesLock.Lock() - defer snmpTranslateCachesLock.Unlock() - snmpTranslateCaches = map[string]snmpTranslateCache{} -} - -func snmpTranslateCall(oid string) (mibName string, oidNum string, oidText string, conversion string, err error) { - var out []byte - if strings.ContainsAny(oid, ":abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") { - out, err = execCmd("snmptranslate", "-Td", "-Ob", oid) - } else { - out, err = execCmd("snmptranslate", "-Td", "-Ob", "-m", "all", oid) - if err, ok := err.(*exec.Error); ok && err.Err == exec.ErrNotFound { - // Silently discard error if snmptranslate not found and we have a numeric OID. - // Meaning we can get by without the lookup. - return "", oid, oid, "", nil - } - } - if err != nil { - return "", "", "", "", err - } - - scanner := bufio.NewScanner(bytes.NewBuffer(out)) - ok := scanner.Scan() - if !ok && scanner.Err() != nil { - return "", "", "", "", fmt.Errorf("getting OID text: %w", scanner.Err()) - } - - oidText = scanner.Text() - - i := strings.Index(oidText, "::") - if i == -1 { - // was not found in MIB. - if bytes.Contains(out, []byte("[TRUNCATED]")) { - return "", oid, oid, "", nil - } - // not truncated, but not fully found. We still need to parse out numeric OID, so keep going - oidText = oid - } else { - mibName = oidText[:i] - oidText = oidText[i+2:] - } - - for scanner.Scan() { - line := scanner.Text() - - if strings.HasPrefix(line, " -- TEXTUAL CONVENTION ") { - tc := strings.TrimPrefix(line, " -- TEXTUAL CONVENTION ") - switch tc { - case "MacAddress", "PhysAddress": - conversion = "hwaddr" - case "InetAddressIPv4", "InetAddressIPv6", "InetAddress", "IPSIpAddress": - conversion = "ipaddr" - } - } else if strings.HasPrefix(line, "::= { ") { - objs := strings.TrimPrefix(line, "::= { ") - objs = strings.TrimSuffix(objs, " }") - - for _, obj := range strings.Split(objs, " ") { - if len(obj) == 0 { - continue - } - if i := strings.Index(obj, "("); i != -1 { - obj = obj[i+1:] - oidNum += "." + obj[:strings.Index(obj, ")")] - } else { - oidNum += "." + obj - } - } - break - } - } - - return mibName, oidNum, oidText, conversion, nil -} diff --git a/vendor/github.com/influxdata/telegraf/plugins/inputs/snmp/snmp_mocks_generate.go b/vendor/github.com/influxdata/telegraf/plugins/inputs/snmp/snmp_mocks_generate.go deleted file mode 100644 index 7227771a..00000000 --- a/vendor/github.com/influxdata/telegraf/plugins/inputs/snmp/snmp_mocks_generate.go +++ /dev/null @@ -1,102 +0,0 @@ -// +build generate - -package main - -import ( - "bufio" - "bytes" - "fmt" - "os" - "os/exec" - "strings" -) - -// This file is a generator used to generate the mocks for the commands used by the tests. - -// These are the commands to be mocked. -var mockedCommands = [][]string{ - {"snmptranslate", "-Td", "-Ob", "-m", "all", ".1.0.0.0"}, - {"snmptranslate", "-Td", "-Ob", "-m", "all", ".1.0.0.1.1"}, - {"snmptranslate", "-Td", "-Ob", "-m", "all", ".1.0.0.1.2"}, - {"snmptranslate", "-Td", "-Ob", "-m", "all", "1.0.0.1.1"}, - {"snmptranslate", "-Td", "-Ob", "-m", "all", ".1.0.0.0.1.1"}, - {"snmptranslate", "-Td", "-Ob", "-m", "all", ".1.0.0.0.1.1.0"}, - {"snmptranslate", "-Td", "-Ob", "-m", "all", ".1.0.0.0.1.5"}, - {"snmptranslate", "-Td", "-Ob", "-m", "all", ".1.2.3"}, - {"snmptranslate", "-Td", "-Ob", "-m", "all", ".1.0.0.0.1.7"}, - {"snmptranslate", "-Td", "-Ob", ".iso.2.3"}, - {"snmptranslate", "-Td", "-Ob", "-m", "all", ".999"}, - {"snmptranslate", "-Td", "-Ob", "TEST::server"}, - {"snmptranslate", "-Td", "-Ob", "TEST::server.0"}, - {"snmptranslate", "-Td", "-Ob", "TEST::testTable"}, - {"snmptranslate", "-Td", "-Ob", "TEST::connections"}, - {"snmptranslate", "-Td", "-Ob", "TEST::latency"}, - {"snmptranslate", "-Td", "-Ob", "TEST::description"}, - {"snmptranslate", "-Td", "-Ob", "TEST::hostname"}, - {"snmptranslate", "-Td", "-Ob", "IF-MIB::ifPhysAddress.1"}, - {"snmptranslate", "-Td", "-Ob", "BRIDGE-MIB::dot1dTpFdbAddress.1"}, - {"snmptranslate", "-Td", "-Ob", "TCP-MIB::tcpConnectionLocalAddress.1"}, - {"snmptranslate", "-Td", "TEST::testTable.1"}, - {"snmptable", "-Ch", "-Cl", "-c", "public", "127.0.0.1", "TEST::testTable"}, -} - -type mockedCommandResult struct { - stdout string - stderr string - exitError bool -} - -func main() { - if err := generate(); err != nil { - fmt.Fprintf(os.Stderr, "error: %s\n", err) - os.Exit(1) - } -} - -func generate() error { - f, err := os.OpenFile("snmp_mocks_test.go", os.O_RDWR, 0644) - if err != nil { - return err - } - br := bufio.NewReader(f) - var i int64 - for l, err := br.ReadString('\n'); err == nil; l, err = br.ReadString('\n') { - i += int64(len(l)) - if l == "// BEGIN GO GENERATE CONTENT\n" { - break - } - } - f.Truncate(i) - f.Seek(i, 0) - - fmt.Fprintf(f, "var mockedCommandResults = map[string]mockedCommandResult{\n") - - for _, cmd := range mockedCommands { - ec := exec.Command(cmd[0], cmd[1:]...) - out := bytes.NewBuffer(nil) - err := bytes.NewBuffer(nil) - ec.Stdout = out - ec.Stderr = err - ec.Env = []string{ - "MIBDIRS=+./testdata", - } - - var mcr mockedCommandResult - if err := ec.Run(); err != nil { - if err, ok := err.(*exec.ExitError); !ok { - mcr.exitError = true - } else { - return fmt.Errorf("executing %v: %s", cmd, err) - } - } - mcr.stdout = string(out.Bytes()) - mcr.stderr = string(err.Bytes()) - cmd0 := strings.Join(cmd, "\000") - mcrv := fmt.Sprintf("%#v", mcr)[5:] // trim `main.` prefix - fmt.Fprintf(f, "%#v: %s,\n", cmd0, mcrv) - } - f.Write([]byte("}\n")) - f.Close() - - return exec.Command("gofmt", "-w", "snmp_mocks_test.go").Run() -} diff --git a/vendor/github.com/influxdata/wlog/README.md b/vendor/github.com/influxdata/wlog/README.md deleted file mode 100644 index bd67f05e..00000000 --- a/vendor/github.com/influxdata/wlog/README.md +++ /dev/null @@ -1,81 +0,0 @@ -# wlog -Simple log level based Go logger. -Provides an io.Writer that filters log messages based on a log level prefix. -Valid log levels are: DEBUG, INFO, WARN, ERROR, OFF. -Log messages need to begin with a L! where L is one of D, I, W, or E. - - -## Usage - -Create a *log.Logger via wlog.New: - -```go -package main - -import ( - "log" - "os" - - "github.com/influxdata/wlog" -) - -func main() { - var logger *log.Logger - logger = wlog.New(os.Stderr, "prefix", log.LstdFlags) - logger.Println("I! initialized logger") -} -``` - -Create a *log.Logger explicitly using wlog.Writer: - -```go -package main - -import ( - "log" - "os" - - "github.com/influxdata/wlog" -) - -func main() { - var logger *log.Logger - logger = log.New(wlog.NewWriter(os.Stderr), "prefix", log.LstdFlags) - logger.Println("I! initialized logger") -} -``` - -Prefix log messages with a log level char and the `!` delimiter. - -```go -logger.Println("D! this is a debug log") -logger.Println("I! this is an info log") -logger.Println("W! this is a warn log") -logger.Println("E! this is an error log") -``` - - -The log level can be changed via the SetLevel or the SetLevelFromName functions. - - -```go -package main - -import ( - "log" - "os" - - "github.com/influxdata/wlog" -) - -func main() { - var logger *log.Logger - logger = wlog.New(os.Stderr, "prefix", log.LstdFlags) - wlog.SetLevel(wlog.DEBUG) - logger.Println("D! initialized logger") - wlog.SetLevelFromName("INFO") - logger.Println("D! this message will be dropped") - logger.Println("I! this message will be printed") -} -``` - diff --git a/vendor/github.com/influxdata/wlog/writer.go b/vendor/github.com/influxdata/wlog/writer.go deleted file mode 100644 index 49716917..00000000 --- a/vendor/github.com/influxdata/wlog/writer.go +++ /dev/null @@ -1,166 +0,0 @@ -/* - Provides an io.Writer that filters log messages based on a log level. - - Valid log levels are: DEBUG, INFO, WARN, ERROR. - - Log messages need to begin with a L! where L is one of D, I, W, or E. - - Examples: - log.Println("D! this is a debug log") - log.Println("I! this is an info log") - log.Println("W! this is a warn log") - log.Println("E! this is an error log") - - Simply pass a instance of wlog.Writer to log.New or use the helper wlog.New function. - - The log level can be changed via the SetLevel or the SetLevelFromName functions. -*/ -package wlog - -import ( - "fmt" - "io" - "log" - "strings" - "sync" -) - -type Level int - -const ( - _ Level = iota - DEBUG - INFO - WARN - ERROR - OFF -) - -const Delimiter = '!' - -var invalidMSG = []byte("log messages must have 'L!' prefix where L is one of 'D', 'I', 'W', 'E'") - -var Levels = map[byte]Level{ - 'D': DEBUG, - 'I': INFO, - 'W': WARN, - 'E': ERROR, -} -var ReverseLevels map[Level]byte - -func init() { - ReverseLevels = make(map[Level]byte, len(Levels)) - for k, l := range Levels { - ReverseLevels[l] = k - } -} - -// The global and only log level. Log levels are not implemented per writer. -var logLevel = INFO - -var mu sync.RWMutex - -// Set the current logging Level. -func SetLevel(l Level) { - mu.Lock() - defer mu.Unlock() - logLevel = l -} - -// Retrieve the current logging Level. -func LogLevel() Level { - mu.RLock() - defer mu.RUnlock() - return logLevel -} - -// name to Level mappings -var StringToLevel = map[string]Level{ - "DEBUG": DEBUG, - "INFO": INFO, - "WARN": WARN, - "ERROR": ERROR, - "OFF": OFF, -} - -// Set the log level via a string name. To set it directly use 'logLevel'. -func SetLevelFromName(level string) error { - l := StringToLevel[strings.ToUpper(level)] - if l > 0 { - SetLevel(l) - } else { - return fmt.Errorf("invalid log level: %q", level) - } - return nil -} - -// Implements io.Writer. Checks first byte of write for log level -// and drops the log if necessary -type Writer struct { - start int - w io.Writer -} - -// Create a new *log.Logger wrapping w in a wlog.Writer -func New(w io.Writer, prefix string, flag int) *log.Logger { - return log.New(NewWriter(w), prefix, flag) -} - -// Create a new wlog.Writer wrapping w. -func NewWriter(w io.Writer) *Writer { - return &Writer{-1, w} -} - -// Implements the io.Writer method. -func (w *Writer) Write(buf []byte) (int, error) { - if len(buf) > 0 { - if w.start == -1 { - // Find start of message index - for i, c := range buf { - if c == Delimiter && i > 0 { - l := buf[i-1] - level := Levels[l] - if level > 0 { - w.start = i - 1 - break - } - } - } - if w.start == -1 { - buf = append(invalidMSG, buf...) - return w.w.Write(buf) - } - } - l := Levels[buf[w.start]] - if l >= LogLevel() { - return w.w.Write(buf) - } else if l == 0 { - buf = append(invalidMSG, buf...) - return w.w.Write(buf) - } - } - return 0, nil -} - -// StaticLevelWriter prefixes all log messages -// with a static log level. -type StaticLevelWriter struct { - levelPrefix []byte - w io.Writer -} - -// Create a writer that always append a static log prefix to all messages. -// Usefult for supplying a *log.Logger to a package that doesn't -// prefix log messages itself. -func NewStaticLevelWriter(w io.Writer, level Level) *StaticLevelWriter { - levelPrefix := []byte{ReverseLevels[level], '!', ' '} - return &StaticLevelWriter{ - levelPrefix: levelPrefix, - w: w, - } -} - -func (w *StaticLevelWriter) Write(buf []byte) (int, error) { - buf = append(w.levelPrefix, buf...) - return w.w.Write(buf) -} diff --git a/vendor/github.com/miekg/dns/.codecov.yml b/vendor/github.com/miekg/dns/.codecov.yml new file mode 100644 index 00000000..f91e5c1f --- /dev/null +++ b/vendor/github.com/miekg/dns/.codecov.yml @@ -0,0 +1,8 @@ +coverage: + status: + project: + default: + target: 40% + threshold: null + patch: false + changes: false diff --git a/vendor/github.com/miekg/dns/.gitignore b/vendor/github.com/miekg/dns/.gitignore new file mode 100644 index 00000000..776cd950 --- /dev/null +++ b/vendor/github.com/miekg/dns/.gitignore @@ -0,0 +1,4 @@ +*.6 +tags +test.out +a.out diff --git a/vendor/github.com/miekg/dns/.travis.yml b/vendor/github.com/miekg/dns/.travis.yml new file mode 100644 index 00000000..8eaa0642 --- /dev/null +++ b/vendor/github.com/miekg/dns/.travis.yml @@ -0,0 +1,17 @@ +language: go +sudo: false + +go: + - "1.12.x" + - "1.13.x" + - tip + +env: + - GO111MODULE=on + +script: + - go generate ./... && test `git ls-files --modified | wc -l` = 0 + - go test -race -v -bench=. -coverprofile=coverage.txt -covermode=atomic ./... + +after_success: + - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/github.com/miekg/dns/AUTHORS b/vendor/github.com/miekg/dns/AUTHORS new file mode 100644 index 00000000..19656835 --- /dev/null +++ b/vendor/github.com/miekg/dns/AUTHORS @@ -0,0 +1 @@ +Miek Gieben diff --git a/vendor/github.com/miekg/dns/CODEOWNERS b/vendor/github.com/miekg/dns/CODEOWNERS new file mode 100644 index 00000000..e0917031 --- /dev/null +++ b/vendor/github.com/miekg/dns/CODEOWNERS @@ -0,0 +1 @@ +* @miekg @tmthrgd diff --git a/vendor/github.com/miekg/dns/CONTRIBUTORS b/vendor/github.com/miekg/dns/CONTRIBUTORS new file mode 100644 index 00000000..5903779d --- /dev/null +++ b/vendor/github.com/miekg/dns/CONTRIBUTORS @@ -0,0 +1,10 @@ +Alex A. Skinner +Andrew Tunnell-Jones +Ask Bjørn Hansen +Dave Cheney +Dusty Wilson +Marek Majkowski +Peter van Dijk +Omri Bahumi +Alex Sergeyev +James Hartig diff --git a/vendor/github.com/miekg/dns/COPYRIGHT b/vendor/github.com/miekg/dns/COPYRIGHT new file mode 100644 index 00000000..35702b10 --- /dev/null +++ b/vendor/github.com/miekg/dns/COPYRIGHT @@ -0,0 +1,9 @@ +Copyright 2009 The Go Authors. All rights reserved. Use of this source code +is governed by a BSD-style license that can be found in the LICENSE file. +Extensions of the original work are copyright (c) 2011 Miek Gieben + +Copyright 2011 Miek Gieben. All rights reserved. Use of this source code is +governed by a BSD-style license that can be found in the LICENSE file. + +Copyright 2014 CloudFlare. All rights reserved. Use of this source code is +governed by a BSD-style license that can be found in the LICENSE file. diff --git a/vendor/github.com/gorilla/mux/LICENSE b/vendor/github.com/miekg/dns/LICENSE similarity index 76% rename from vendor/github.com/gorilla/mux/LICENSE rename to vendor/github.com/miekg/dns/LICENSE index 6903df63..55f12ab7 100644 --- a/vendor/github.com/gorilla/mux/LICENSE +++ b/vendor/github.com/miekg/dns/LICENSE @@ -1,16 +1,16 @@ -Copyright (c) 2012-2018 The Gorilla Authors. All rights reserved. +Copyright (c) 2009 The Go Authors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - * Redistributions of source code must retain the above copyright + * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above + * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. @@ -25,3 +25,6 @@ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +As this is fork of the official Go code the same license applies. +Extensions of the original work are copyright (c) 2011 Miek Gieben diff --git a/vendor/github.com/miekg/dns/Makefile.fuzz b/vendor/github.com/miekg/dns/Makefile.fuzz new file mode 100644 index 00000000..dc158c4a --- /dev/null +++ b/vendor/github.com/miekg/dns/Makefile.fuzz @@ -0,0 +1,33 @@ +# Makefile for fuzzing +# +# Use go-fuzz and needs the tools installed. +# See https://blog.cloudflare.com/dns-parser-meet-go-fuzzer/ +# +# Installing go-fuzz: +# $ make -f Makefile.fuzz get +# Installs: +# * github.com/dvyukov/go-fuzz/go-fuzz +# * get github.com/dvyukov/go-fuzz/go-fuzz-build + +all: build + +.PHONY: build +build: + go-fuzz-build -tags fuzz github.com/miekg/dns + +.PHONY: build-newrr +build-newrr: + go-fuzz-build -func FuzzNewRR -tags fuzz github.com/miekg/dns + +.PHONY: fuzz +fuzz: + go-fuzz -bin=dns-fuzz.zip -workdir=fuzz + +.PHONY: get +get: + go get github.com/dvyukov/go-fuzz/go-fuzz + go get github.com/dvyukov/go-fuzz/go-fuzz-build + +.PHONY: clean +clean: + rm *-fuzz.zip diff --git a/vendor/github.com/miekg/dns/Makefile.release b/vendor/github.com/miekg/dns/Makefile.release new file mode 100644 index 00000000..8fb748e8 --- /dev/null +++ b/vendor/github.com/miekg/dns/Makefile.release @@ -0,0 +1,52 @@ +# Makefile for releasing. +# +# The release is controlled from version.go. The version found there is +# used to tag the git repo, we're not building any artifects so there is nothing +# to upload to github. +# +# * Up the version in version.go +# * Run: make -f Makefile.release release +# * will *commit* your change with 'Release $VERSION' +# * push to github +# + +define GO +//+build ignore + +package main + +import ( + "fmt" + + "github.com/miekg/dns" +) + +func main() { + fmt.Println(dns.Version.String()) +} +endef + +$(file > version_release.go,$(GO)) +VERSION:=$(shell go run version_release.go) +TAG="v$(VERSION)" + +all: + @echo Use the \'release\' target to start a release $(VERSION) + rm -f version_release.go + +.PHONY: release +release: commit push + @echo Released $(VERSION) + rm -f version_release.go + +.PHONY: commit +commit: + @echo Committing release $(VERSION) + git commit -am"Release $(VERSION)" + git tag $(TAG) + +.PHONY: push +push: + @echo Pushing release $(VERSION) to master + git push --tags + git push diff --git a/vendor/github.com/miekg/dns/README.md b/vendor/github.com/miekg/dns/README.md new file mode 100644 index 00000000..126fe62c --- /dev/null +++ b/vendor/github.com/miekg/dns/README.md @@ -0,0 +1,175 @@ +[![Build Status](https://travis-ci.org/miekg/dns.svg?branch=master)](https://travis-ci.org/miekg/dns) +[![Code Coverage](https://img.shields.io/codecov/c/github/miekg/dns/master.svg)](https://codecov.io/github/miekg/dns?branch=master) +[![Go Report Card](https://goreportcard.com/badge/github.com/miekg/dns)](https://goreportcard.com/report/miekg/dns) +[![](https://godoc.org/github.com/miekg/dns?status.svg)](https://godoc.org/github.com/miekg/dns) + +# Alternative (more granular) approach to a DNS library + +> Less is more. + +Complete and usable DNS library. All Resource Records are supported, including the DNSSEC types. +It follows a lean and mean philosophy. If there is stuff you should know as a DNS programmer there +isn't a convenience function for it. Server side and client side programming is supported, i.e. you +can build servers and resolvers with it. + +We try to keep the "master" branch as sane as possible and at the bleeding edge of standards, +avoiding breaking changes wherever reasonable. We support the last two versions of Go. + +# Goals + +* KISS; +* Fast; +* Small API. If it's easy to code in Go, don't make a function for it. + +# Users + +A not-so-up-to-date-list-that-may-be-actually-current: + +* https://github.com/coredns/coredns +* https://cloudflare.com +* https://github.com/abh/geodns +* http://www.statdns.com/ +* http://www.dnsinspect.com/ +* https://github.com/chuangbo/jianbing-dictionary-dns +* http://www.dns-lg.com/ +* https://github.com/fcambus/rrda +* https://github.com/kenshinx/godns +* https://github.com/skynetservices/skydns +* https://github.com/hashicorp/consul +* https://github.com/DevelopersPL/godnsagent +* https://github.com/duedil-ltd/discodns +* https://github.com/StalkR/dns-reverse-proxy +* https://github.com/tianon/rawdns +* https://mesosphere.github.io/mesos-dns/ +* https://pulse.turbobytes.com/ +* https://github.com/fcambus/statzone +* https://github.com/benschw/dns-clb-go +* https://github.com/corny/dnscheck for +* https://namesmith.io +* https://github.com/miekg/unbound +* https://github.com/miekg/exdns +* https://dnslookup.org +* https://github.com/looterz/grimd +* https://github.com/phamhongviet/serf-dns +* https://github.com/mehrdadrad/mylg +* https://github.com/bamarni/dockness +* https://github.com/fffaraz/microdns +* http://kelda.io +* https://github.com/ipdcode/hades +* https://github.com/StackExchange/dnscontrol/ +* https://www.dnsperf.com/ +* https://dnssectest.net/ +* https://dns.apebits.com +* https://github.com/oif/apex +* https://github.com/jedisct1/dnscrypt-proxy +* https://github.com/jedisct1/rpdns +* https://github.com/xor-gate/sshfp +* https://github.com/rs/dnstrace +* https://blitiri.com.ar/p/dnss ([github mirror](https://github.com/albertito/dnss)) +* https://github.com/semihalev/sdns +* https://render.com +* https://github.com/peterzen/goresolver +* https://github.com/folbricht/routedns + +Send pull request if you want to be listed here. + +# Features + +* UDP/TCP queries, IPv4 and IPv6 +* RFC 1035 zone file parsing ($INCLUDE, $ORIGIN, $TTL and $GENERATE (for all record types) are supported +* Fast +* Server side programming (mimicking the net/http package) +* Client side programming +* DNSSEC: signing, validating and key generation for DSA, RSA, ECDSA and Ed25519 +* EDNS0, NSID, Cookies +* AXFR/IXFR +* TSIG, SIG(0) +* DNS over TLS (DoT): encrypted connection between client and server over TCP +* DNS name compression + +Have fun! + +Miek Gieben - 2010-2012 - +DNS Authors 2012- + +# Building + +This library uses Go modules and uses semantic versioning. Building is done with the `go` tool, so +the following should work: + + go get github.com/miekg/dns + go build github.com/miekg/dns + +## Examples + +A short "how to use the API" is at the beginning of doc.go (this also will show when you call `godoc +github.com/miekg/dns`). + +Example programs can be found in the `github.com/miekg/exdns` repository. + +## Supported RFCs + +*all of them* + +* 103{4,5} - DNS standard +* 1348 - NSAP record (removed the record) +* 1982 - Serial Arithmetic +* 1876 - LOC record +* 1995 - IXFR +* 1996 - DNS notify +* 2136 - DNS Update (dynamic updates) +* 2181 - RRset definition - there is no RRset type though, just []RR +* 2537 - RSAMD5 DNS keys +* 2065 - DNSSEC (updated in later RFCs) +* 2671 - EDNS record +* 2782 - SRV record +* 2845 - TSIG record +* 2915 - NAPTR record +* 2929 - DNS IANA Considerations +* 3110 - RSASHA1 DNS keys +* 3123 - APL record +* 3225 - DO bit (DNSSEC OK) +* 340{1,2,3} - NAPTR record +* 3445 - Limiting the scope of (DNS)KEY +* 3597 - Unknown RRs +* 403{3,4,5} - DNSSEC + validation functions +* 4255 - SSHFP record +* 4343 - Case insensitivity +* 4408 - SPF record +* 4509 - SHA256 Hash in DS +* 4592 - Wildcards in the DNS +* 4635 - HMAC SHA TSIG +* 4701 - DHCID +* 4892 - id.server +* 5001 - NSID +* 5155 - NSEC3 record +* 5205 - HIP record +* 5702 - SHA2 in the DNS +* 5936 - AXFR +* 5966 - TCP implementation recommendations +* 6605 - ECDSA +* 6725 - IANA Registry Update +* 6742 - ILNP DNS +* 6840 - Clarifications and Implementation Notes for DNS Security +* 6844 - CAA record +* 6891 - EDNS0 update +* 6895 - DNS IANA considerations +* 6944 - DNSSEC DNSKEY Algorithm Status +* 6975 - Algorithm Understanding in DNSSEC +* 7043 - EUI48/EUI64 records +* 7314 - DNS (EDNS) EXPIRE Option +* 7477 - CSYNC RR +* 7828 - edns-tcp-keepalive EDNS0 Option +* 7553 - URI record +* 7858 - DNS over TLS: Initiation and Performance Considerations +* 7871 - EDNS0 Client Subnet +* 7873 - Domain Name System (DNS) Cookies +* 8080 - EdDSA for DNSSEC +* 8499 - DNS Terminology + +## Loosely Based Upon + +* ldns - +* NSD - +* Net::DNS - +* GRONG - diff --git a/vendor/github.com/miekg/dns/acceptfunc.go b/vendor/github.com/miekg/dns/acceptfunc.go new file mode 100644 index 00000000..825617fe --- /dev/null +++ b/vendor/github.com/miekg/dns/acceptfunc.go @@ -0,0 +1,61 @@ +package dns + +// MsgAcceptFunc is used early in the server code to accept or reject a message with RcodeFormatError. +// It returns a MsgAcceptAction to indicate what should happen with the message. +type MsgAcceptFunc func(dh Header) MsgAcceptAction + +// DefaultMsgAcceptFunc checks the request and will reject if: +// +// * isn't a request (don't respond in that case) +// +// * opcode isn't OpcodeQuery or OpcodeNotify +// +// * Zero bit isn't zero +// +// * has more than 1 question in the question section +// +// * has more than 1 RR in the Answer section +// +// * has more than 0 RRs in the Authority section +// +// * has more than 2 RRs in the Additional section +// +var DefaultMsgAcceptFunc MsgAcceptFunc = defaultMsgAcceptFunc + +// MsgAcceptAction represents the action to be taken. +type MsgAcceptAction int + +const ( + MsgAccept MsgAcceptAction = iota // Accept the message + MsgReject // Reject the message with a RcodeFormatError + MsgIgnore // Ignore the error and send nothing back. + MsgRejectNotImplemented // Reject the message with a RcodeNotImplemented +) + +func defaultMsgAcceptFunc(dh Header) MsgAcceptAction { + if isResponse := dh.Bits&_QR != 0; isResponse { + return MsgIgnore + } + + // Don't allow dynamic updates, because then the sections can contain a whole bunch of RRs. + opcode := int(dh.Bits>>11) & 0xF + if opcode != OpcodeQuery && opcode != OpcodeNotify { + return MsgRejectNotImplemented + } + + if dh.Qdcount != 1 { + return MsgReject + } + // NOTIFY requests can have a SOA in the ANSWER section. See RFC 1996 Section 3.7 and 3.11. + if dh.Ancount > 1 { + return MsgReject + } + // IXFR request could have one SOA RR in the NS section. See RFC 1995, section 3. + if dh.Nscount > 1 { + return MsgReject + } + if dh.Arcount > 2 { + return MsgReject + } + return MsgAccept +} diff --git a/vendor/github.com/miekg/dns/client.go b/vendor/github.com/miekg/dns/client.go new file mode 100644 index 00000000..db2761d4 --- /dev/null +++ b/vendor/github.com/miekg/dns/client.go @@ -0,0 +1,415 @@ +package dns + +// A client implementation. + +import ( + "context" + "crypto/tls" + "encoding/binary" + "fmt" + "io" + "net" + "strings" + "time" +) + +const ( + dnsTimeout time.Duration = 2 * time.Second + tcpIdleTimeout time.Duration = 8 * time.Second +) + +// A Conn represents a connection to a DNS server. +type Conn struct { + net.Conn // a net.Conn holding the connection + UDPSize uint16 // minimum receive buffer for UDP messages + TsigSecret map[string]string // secret(s) for Tsig map[], zonename must be in canonical form (lowercase, fqdn, see RFC 4034 Section 6.2) + tsigRequestMAC string +} + +// A Client defines parameters for a DNS client. +type Client struct { + Net string // if "tcp" or "tcp-tls" (DNS over TLS) a TCP query will be initiated, otherwise an UDP one (default is "" for UDP) + UDPSize uint16 // minimum receive buffer for UDP messages + TLSConfig *tls.Config // TLS connection configuration + Dialer *net.Dialer // a net.Dialer used to set local address, timeouts and more + // Timeout is a cumulative timeout for dial, write and read, defaults to 0 (disabled) - overrides DialTimeout, ReadTimeout, + // WriteTimeout when non-zero. Can be overridden with net.Dialer.Timeout (see Client.ExchangeWithDialer and + // Client.Dialer) or context.Context.Deadline (see the deprecated ExchangeContext) + Timeout time.Duration + DialTimeout time.Duration // net.DialTimeout, defaults to 2 seconds, or net.Dialer.Timeout if expiring earlier - overridden by Timeout when that value is non-zero + ReadTimeout time.Duration // net.Conn.SetReadTimeout value for connections, defaults to 2 seconds - overridden by Timeout when that value is non-zero + WriteTimeout time.Duration // net.Conn.SetWriteTimeout value for connections, defaults to 2 seconds - overridden by Timeout when that value is non-zero + TsigSecret map[string]string // secret(s) for Tsig map[], zonename must be in canonical form (lowercase, fqdn, see RFC 4034 Section 6.2) + SingleInflight bool // if true suppress multiple outstanding queries for the same Qname, Qtype and Qclass + group singleflight +} + +// Exchange performs a synchronous UDP query. It sends the message m to the address +// contained in a and waits for a reply. Exchange does not retry a failed query, nor +// will it fall back to TCP in case of truncation. +// See client.Exchange for more information on setting larger buffer sizes. +func Exchange(m *Msg, a string) (r *Msg, err error) { + client := Client{Net: "udp"} + r, _, err = client.Exchange(m, a) + return r, err +} + +func (c *Client) dialTimeout() time.Duration { + if c.Timeout != 0 { + return c.Timeout + } + if c.DialTimeout != 0 { + return c.DialTimeout + } + return dnsTimeout +} + +func (c *Client) readTimeout() time.Duration { + if c.ReadTimeout != 0 { + return c.ReadTimeout + } + return dnsTimeout +} + +func (c *Client) writeTimeout() time.Duration { + if c.WriteTimeout != 0 { + return c.WriteTimeout + } + return dnsTimeout +} + +// Dial connects to the address on the named network. +func (c *Client) Dial(address string) (conn *Conn, err error) { + // create a new dialer with the appropriate timeout + var d net.Dialer + if c.Dialer == nil { + d = net.Dialer{Timeout: c.getTimeoutForRequest(c.dialTimeout())} + } else { + d = *c.Dialer + } + + network := c.Net + if network == "" { + network = "udp" + } + + useTLS := strings.HasPrefix(network, "tcp") && strings.HasSuffix(network, "-tls") + + conn = new(Conn) + if useTLS { + network = strings.TrimSuffix(network, "-tls") + + conn.Conn, err = tls.DialWithDialer(&d, network, address, c.TLSConfig) + } else { + conn.Conn, err = d.Dial(network, address) + } + if err != nil { + return nil, err + } + + return conn, nil +} + +// Exchange performs a synchronous query. It sends the message m to the address +// contained in a and waits for a reply. Basic use pattern with a *dns.Client: +// +// c := new(dns.Client) +// in, rtt, err := c.Exchange(message, "127.0.0.1:53") +// +// Exchange does not retry a failed query, nor will it fall back to TCP in +// case of truncation. +// It is up to the caller to create a message that allows for larger responses to be +// returned. Specifically this means adding an EDNS0 OPT RR that will advertise a larger +// buffer, see SetEdns0. Messages without an OPT RR will fallback to the historic limit +// of 512 bytes +// To specify a local address or a timeout, the caller has to set the `Client.Dialer` +// attribute appropriately +func (c *Client) Exchange(m *Msg, address string) (r *Msg, rtt time.Duration, err error) { + if !c.SingleInflight { + return c.exchange(m, address) + } + + q := m.Question[0] + key := fmt.Sprintf("%s:%d:%d", q.Name, q.Qtype, q.Qclass) + r, rtt, err, shared := c.group.Do(key, func() (*Msg, time.Duration, error) { + return c.exchange(m, address) + }) + if r != nil && shared { + r = r.Copy() + } + + return r, rtt, err +} + +func (c *Client) exchange(m *Msg, a string) (r *Msg, rtt time.Duration, err error) { + var co *Conn + + co, err = c.Dial(a) + + if err != nil { + return nil, 0, err + } + defer co.Close() + + opt := m.IsEdns0() + // If EDNS0 is used use that for size. + if opt != nil && opt.UDPSize() >= MinMsgSize { + co.UDPSize = opt.UDPSize() + } + // Otherwise use the client's configured UDP size. + if opt == nil && c.UDPSize >= MinMsgSize { + co.UDPSize = c.UDPSize + } + + co.TsigSecret = c.TsigSecret + t := time.Now() + // write with the appropriate write timeout + co.SetWriteDeadline(t.Add(c.getTimeoutForRequest(c.writeTimeout()))) + if err = co.WriteMsg(m); err != nil { + return nil, 0, err + } + + co.SetReadDeadline(time.Now().Add(c.getTimeoutForRequest(c.readTimeout()))) + r, err = co.ReadMsg() + if err == nil && r.Id != m.Id { + err = ErrId + } + rtt = time.Since(t) + return r, rtt, err +} + +// ReadMsg reads a message from the connection co. +// If the received message contains a TSIG record the transaction signature +// is verified. This method always tries to return the message, however if an +// error is returned there are no guarantees that the returned message is a +// valid representation of the packet read. +func (co *Conn) ReadMsg() (*Msg, error) { + p, err := co.ReadMsgHeader(nil) + if err != nil { + return nil, err + } + + m := new(Msg) + if err := m.Unpack(p); err != nil { + // If an error was returned, we still want to allow the user to use + // the message, but naively they can just check err if they don't want + // to use an erroneous message + return m, err + } + if t := m.IsTsig(); t != nil { + if _, ok := co.TsigSecret[t.Hdr.Name]; !ok { + return m, ErrSecret + } + // Need to work on the original message p, as that was used to calculate the tsig. + err = TsigVerify(p, co.TsigSecret[t.Hdr.Name], co.tsigRequestMAC, false) + } + return m, err +} + +// ReadMsgHeader reads a DNS message, parses and populates hdr (when hdr is not nil). +// Returns message as a byte slice to be parsed with Msg.Unpack later on. +// Note that error handling on the message body is not possible as only the header is parsed. +func (co *Conn) ReadMsgHeader(hdr *Header) ([]byte, error) { + var ( + p []byte + n int + err error + ) + + if _, ok := co.Conn.(net.PacketConn); ok { + if co.UDPSize > MinMsgSize { + p = make([]byte, co.UDPSize) + } else { + p = make([]byte, MinMsgSize) + } + n, err = co.Read(p) + } else { + var length uint16 + if err := binary.Read(co.Conn, binary.BigEndian, &length); err != nil { + return nil, err + } + + p = make([]byte, length) + n, err = io.ReadFull(co.Conn, p) + } + + if err != nil { + return nil, err + } else if n < headerSize { + return nil, ErrShortRead + } + + p = p[:n] + if hdr != nil { + dh, _, err := unpackMsgHdr(p, 0) + if err != nil { + return nil, err + } + *hdr = dh + } + return p, err +} + +// Read implements the net.Conn read method. +func (co *Conn) Read(p []byte) (n int, err error) { + if co.Conn == nil { + return 0, ErrConnEmpty + } + + if _, ok := co.Conn.(net.PacketConn); ok { + // UDP connection + return co.Conn.Read(p) + } + + var length uint16 + if err := binary.Read(co.Conn, binary.BigEndian, &length); err != nil { + return 0, err + } + if int(length) > len(p) { + return 0, io.ErrShortBuffer + } + + return io.ReadFull(co.Conn, p[:length]) +} + +// WriteMsg sends a message through the connection co. +// If the message m contains a TSIG record the transaction +// signature is calculated. +func (co *Conn) WriteMsg(m *Msg) (err error) { + var out []byte + if t := m.IsTsig(); t != nil { + mac := "" + if _, ok := co.TsigSecret[t.Hdr.Name]; !ok { + return ErrSecret + } + out, mac, err = TsigGenerate(m, co.TsigSecret[t.Hdr.Name], co.tsigRequestMAC, false) + // Set for the next read, although only used in zone transfers + co.tsigRequestMAC = mac + } else { + out, err = m.Pack() + } + if err != nil { + return err + } + _, err = co.Write(out) + return err +} + +// Write implements the net.Conn Write method. +func (co *Conn) Write(p []byte) (int, error) { + if len(p) > MaxMsgSize { + return 0, &Error{err: "message too large"} + } + + if _, ok := co.Conn.(net.PacketConn); ok { + return co.Conn.Write(p) + } + + l := make([]byte, 2) + binary.BigEndian.PutUint16(l, uint16(len(p))) + + n, err := (&net.Buffers{l, p}).WriteTo(co.Conn) + return int(n), err +} + +// Return the appropriate timeout for a specific request +func (c *Client) getTimeoutForRequest(timeout time.Duration) time.Duration { + var requestTimeout time.Duration + if c.Timeout != 0 { + requestTimeout = c.Timeout + } else { + requestTimeout = timeout + } + // net.Dialer.Timeout has priority if smaller than the timeouts computed so + // far + if c.Dialer != nil && c.Dialer.Timeout != 0 { + if c.Dialer.Timeout < requestTimeout { + requestTimeout = c.Dialer.Timeout + } + } + return requestTimeout +} + +// Dial connects to the address on the named network. +func Dial(network, address string) (conn *Conn, err error) { + conn = new(Conn) + conn.Conn, err = net.Dial(network, address) + if err != nil { + return nil, err + } + return conn, nil +} + +// ExchangeContext performs a synchronous UDP query, like Exchange. It +// additionally obeys deadlines from the passed Context. +func ExchangeContext(ctx context.Context, m *Msg, a string) (r *Msg, err error) { + client := Client{Net: "udp"} + r, _, err = client.ExchangeContext(ctx, m, a) + // ignorint rtt to leave the original ExchangeContext API unchanged, but + // this function will go away + return r, err +} + +// ExchangeConn performs a synchronous query. It sends the message m via the connection +// c and waits for a reply. The connection c is not closed by ExchangeConn. +// Deprecated: This function is going away, but can easily be mimicked: +// +// co := &dns.Conn{Conn: c} // c is your net.Conn +// co.WriteMsg(m) +// in, _ := co.ReadMsg() +// co.Close() +// +func ExchangeConn(c net.Conn, m *Msg) (r *Msg, err error) { + println("dns: ExchangeConn: this function is deprecated") + co := new(Conn) + co.Conn = c + if err = co.WriteMsg(m); err != nil { + return nil, err + } + r, err = co.ReadMsg() + if err == nil && r.Id != m.Id { + err = ErrId + } + return r, err +} + +// DialTimeout acts like Dial but takes a timeout. +func DialTimeout(network, address string, timeout time.Duration) (conn *Conn, err error) { + client := Client{Net: network, Dialer: &net.Dialer{Timeout: timeout}} + return client.Dial(address) +} + +// DialWithTLS connects to the address on the named network with TLS. +func DialWithTLS(network, address string, tlsConfig *tls.Config) (conn *Conn, err error) { + if !strings.HasSuffix(network, "-tls") { + network += "-tls" + } + client := Client{Net: network, TLSConfig: tlsConfig} + return client.Dial(address) +} + +// DialTimeoutWithTLS acts like DialWithTLS but takes a timeout. +func DialTimeoutWithTLS(network, address string, tlsConfig *tls.Config, timeout time.Duration) (conn *Conn, err error) { + if !strings.HasSuffix(network, "-tls") { + network += "-tls" + } + client := Client{Net: network, Dialer: &net.Dialer{Timeout: timeout}, TLSConfig: tlsConfig} + return client.Dial(address) +} + +// ExchangeContext acts like Exchange, but honors the deadline on the provided +// context, if present. If there is both a context deadline and a configured +// timeout on the client, the earliest of the two takes effect. +func (c *Client) ExchangeContext(ctx context.Context, m *Msg, a string) (r *Msg, rtt time.Duration, err error) { + var timeout time.Duration + if deadline, ok := ctx.Deadline(); !ok { + timeout = 0 + } else { + timeout = time.Until(deadline) + } + // not passing the context to the underlying calls, as the API does not support + // context. For timeouts you should set up Client.Dialer and call Client.Exchange. + // TODO(tmthrgd,miekg): this is a race condition. + c.Dialer = &net.Dialer{Timeout: timeout} + return c.Exchange(m, a) +} diff --git a/vendor/github.com/miekg/dns/clientconfig.go b/vendor/github.com/miekg/dns/clientconfig.go new file mode 100644 index 00000000..e11b630d --- /dev/null +++ b/vendor/github.com/miekg/dns/clientconfig.go @@ -0,0 +1,135 @@ +package dns + +import ( + "bufio" + "io" + "os" + "strconv" + "strings" +) + +// ClientConfig wraps the contents of the /etc/resolv.conf file. +type ClientConfig struct { + Servers []string // servers to use + Search []string // suffixes to append to local name + Port string // what port to use + Ndots int // number of dots in name to trigger absolute lookup + Timeout int // seconds before giving up on packet + Attempts int // lost packets before giving up on server, not used in the package dns +} + +// ClientConfigFromFile parses a resolv.conf(5) like file and returns +// a *ClientConfig. +func ClientConfigFromFile(resolvconf string) (*ClientConfig, error) { + file, err := os.Open(resolvconf) + if err != nil { + return nil, err + } + defer file.Close() + return ClientConfigFromReader(file) +} + +// ClientConfigFromReader works like ClientConfigFromFile but takes an io.Reader as argument +func ClientConfigFromReader(resolvconf io.Reader) (*ClientConfig, error) { + c := new(ClientConfig) + scanner := bufio.NewScanner(resolvconf) + c.Servers = make([]string, 0) + c.Search = make([]string, 0) + c.Port = "53" + c.Ndots = 1 + c.Timeout = 5 + c.Attempts = 2 + + for scanner.Scan() { + if err := scanner.Err(); err != nil { + return nil, err + } + line := scanner.Text() + f := strings.Fields(line) + if len(f) < 1 { + continue + } + switch f[0] { + case "nameserver": // add one name server + if len(f) > 1 { + // One more check: make sure server name is + // just an IP address. Otherwise we need DNS + // to look it up. + name := f[1] + c.Servers = append(c.Servers, name) + } + + case "domain": // set search path to just this domain + if len(f) > 1 { + c.Search = make([]string, 1) + c.Search[0] = f[1] + } else { + c.Search = make([]string, 0) + } + + case "search": // set search path to given servers + c.Search = append([]string(nil), f[1:]...) + + case "options": // magic options + for _, s := range f[1:] { + switch { + case len(s) >= 6 && s[:6] == "ndots:": + n, _ := strconv.Atoi(s[6:]) + if n < 0 { + n = 0 + } else if n > 15 { + n = 15 + } + c.Ndots = n + case len(s) >= 8 && s[:8] == "timeout:": + n, _ := strconv.Atoi(s[8:]) + if n < 1 { + n = 1 + } + c.Timeout = n + case len(s) >= 9 && s[:9] == "attempts:": + n, _ := strconv.Atoi(s[9:]) + if n < 1 { + n = 1 + } + c.Attempts = n + case s == "rotate": + /* not imp */ + } + } + } + } + return c, nil +} + +// NameList returns all of the names that should be queried based on the +// config. It is based off of go's net/dns name building, but it does not +// check the length of the resulting names. +func (c *ClientConfig) NameList(name string) []string { + // if this domain is already fully qualified, no append needed. + if IsFqdn(name) { + return []string{name} + } + + // Check to see if the name has more labels than Ndots. Do this before making + // the domain fully qualified. + hasNdots := CountLabel(name) > c.Ndots + // Make the domain fully qualified. + name = Fqdn(name) + + // Make a list of names based off search. + names := []string{} + + // If name has enough dots, try that first. + if hasNdots { + names = append(names, name) + } + for _, s := range c.Search { + names = append(names, Fqdn(name+s)) + } + // If we didn't have enough dots, try after suffixes. + if !hasNdots { + names = append(names, name) + } + return names +} diff --git a/vendor/github.com/miekg/dns/dane.go b/vendor/github.com/miekg/dns/dane.go new file mode 100644 index 00000000..8c4a14ef --- /dev/null +++ b/vendor/github.com/miekg/dns/dane.go @@ -0,0 +1,43 @@ +package dns + +import ( + "crypto/sha256" + "crypto/sha512" + "crypto/x509" + "encoding/hex" + "errors" +) + +// CertificateToDANE converts a certificate to a hex string as used in the TLSA or SMIMEA records. +func CertificateToDANE(selector, matchingType uint8, cert *x509.Certificate) (string, error) { + switch matchingType { + case 0: + switch selector { + case 0: + return hex.EncodeToString(cert.Raw), nil + case 1: + return hex.EncodeToString(cert.RawSubjectPublicKeyInfo), nil + } + case 1: + h := sha256.New() + switch selector { + case 0: + h.Write(cert.Raw) + return hex.EncodeToString(h.Sum(nil)), nil + case 1: + h.Write(cert.RawSubjectPublicKeyInfo) + return hex.EncodeToString(h.Sum(nil)), nil + } + case 2: + h := sha512.New() + switch selector { + case 0: + h.Write(cert.Raw) + return hex.EncodeToString(h.Sum(nil)), nil + case 1: + h.Write(cert.RawSubjectPublicKeyInfo) + return hex.EncodeToString(h.Sum(nil)), nil + } + } + return "", errors.New("dns: bad MatchingType or Selector") +} diff --git a/vendor/github.com/miekg/dns/defaults.go b/vendor/github.com/miekg/dns/defaults.go new file mode 100644 index 00000000..b059f6fc --- /dev/null +++ b/vendor/github.com/miekg/dns/defaults.go @@ -0,0 +1,378 @@ +package dns + +import ( + "errors" + "net" + "strconv" + "strings" +) + +const hexDigit = "0123456789abcdef" + +// Everything is assumed in ClassINET. + +// SetReply creates a reply message from a request message. +func (dns *Msg) SetReply(request *Msg) *Msg { + dns.Id = request.Id + dns.Response = true + dns.Opcode = request.Opcode + if dns.Opcode == OpcodeQuery { + dns.RecursionDesired = request.RecursionDesired // Copy rd bit + dns.CheckingDisabled = request.CheckingDisabled // Copy cd bit + } + dns.Rcode = RcodeSuccess + if len(request.Question) > 0 { + dns.Question = make([]Question, 1) + dns.Question[0] = request.Question[0] + } + return dns +} + +// SetQuestion creates a question message, it sets the Question +// section, generates an Id and sets the RecursionDesired (RD) +// bit to true. +func (dns *Msg) SetQuestion(z string, t uint16) *Msg { + dns.Id = Id() + dns.RecursionDesired = true + dns.Question = make([]Question, 1) + dns.Question[0] = Question{z, t, ClassINET} + return dns +} + +// SetNotify creates a notify message, it sets the Question +// section, generates an Id and sets the Authoritative (AA) +// bit to true. +func (dns *Msg) SetNotify(z string) *Msg { + dns.Opcode = OpcodeNotify + dns.Authoritative = true + dns.Id = Id() + dns.Question = make([]Question, 1) + dns.Question[0] = Question{z, TypeSOA, ClassINET} + return dns +} + +// SetRcode creates an error message suitable for the request. +func (dns *Msg) SetRcode(request *Msg, rcode int) *Msg { + dns.SetReply(request) + dns.Rcode = rcode + return dns +} + +// SetRcodeFormatError creates a message with FormError set. +func (dns *Msg) SetRcodeFormatError(request *Msg) *Msg { + dns.Rcode = RcodeFormatError + dns.Opcode = OpcodeQuery + dns.Response = true + dns.Authoritative = false + dns.Id = request.Id + return dns +} + +// SetUpdate makes the message a dynamic update message. It +// sets the ZONE section to: z, TypeSOA, ClassINET. +func (dns *Msg) SetUpdate(z string) *Msg { + dns.Id = Id() + dns.Response = false + dns.Opcode = OpcodeUpdate + dns.Compress = false // BIND9 cannot handle compression + dns.Question = make([]Question, 1) + dns.Question[0] = Question{z, TypeSOA, ClassINET} + return dns +} + +// SetIxfr creates message for requesting an IXFR. +func (dns *Msg) SetIxfr(z string, serial uint32, ns, mbox string) *Msg { + dns.Id = Id() + dns.Question = make([]Question, 1) + dns.Ns = make([]RR, 1) + s := new(SOA) + s.Hdr = RR_Header{z, TypeSOA, ClassINET, defaultTtl, 0} + s.Serial = serial + s.Ns = ns + s.Mbox = mbox + dns.Question[0] = Question{z, TypeIXFR, ClassINET} + dns.Ns[0] = s + return dns +} + +// SetAxfr creates message for requesting an AXFR. +func (dns *Msg) SetAxfr(z string) *Msg { + dns.Id = Id() + dns.Question = make([]Question, 1) + dns.Question[0] = Question{z, TypeAXFR, ClassINET} + return dns +} + +// SetTsig appends a TSIG RR to the message. +// This is only a skeleton TSIG RR that is added as the last RR in the +// additional section. The Tsig is calculated when the message is being send. +func (dns *Msg) SetTsig(z, algo string, fudge uint16, timesigned int64) *Msg { + t := new(TSIG) + t.Hdr = RR_Header{z, TypeTSIG, ClassANY, 0, 0} + t.Algorithm = algo + t.Fudge = fudge + t.TimeSigned = uint64(timesigned) + t.OrigId = dns.Id + dns.Extra = append(dns.Extra, t) + return dns +} + +// SetEdns0 appends a EDNS0 OPT RR to the message. +// TSIG should always the last RR in a message. +func (dns *Msg) SetEdns0(udpsize uint16, do bool) *Msg { + e := new(OPT) + e.Hdr.Name = "." + e.Hdr.Rrtype = TypeOPT + e.SetUDPSize(udpsize) + if do { + e.SetDo() + } + dns.Extra = append(dns.Extra, e) + return dns +} + +// IsTsig checks if the message has a TSIG record as the last record +// in the additional section. It returns the TSIG record found or nil. +func (dns *Msg) IsTsig() *TSIG { + if len(dns.Extra) > 0 { + if dns.Extra[len(dns.Extra)-1].Header().Rrtype == TypeTSIG { + return dns.Extra[len(dns.Extra)-1].(*TSIG) + } + } + return nil +} + +// IsEdns0 checks if the message has a EDNS0 (OPT) record, any EDNS0 +// record in the additional section will do. It returns the OPT record +// found or nil. +func (dns *Msg) IsEdns0() *OPT { + // RFC 6891, Section 6.1.1 allows the OPT record to appear + // anywhere in the additional record section, but it's usually at + // the end so start there. + for i := len(dns.Extra) - 1; i >= 0; i-- { + if dns.Extra[i].Header().Rrtype == TypeOPT { + return dns.Extra[i].(*OPT) + } + } + return nil +} + +// popEdns0 is like IsEdns0, but it removes the record from the message. +func (dns *Msg) popEdns0() *OPT { + // RFC 6891, Section 6.1.1 allows the OPT record to appear + // anywhere in the additional record section, but it's usually at + // the end so start there. + for i := len(dns.Extra) - 1; i >= 0; i-- { + if dns.Extra[i].Header().Rrtype == TypeOPT { + opt := dns.Extra[i].(*OPT) + dns.Extra = append(dns.Extra[:i], dns.Extra[i+1:]...) + return opt + } + } + return nil +} + +// IsDomainName checks if s is a valid domain name, it returns the number of +// labels and true, when a domain name is valid. Note that non fully qualified +// domain name is considered valid, in this case the last label is counted in +// the number of labels. When false is returned the number of labels is not +// defined. Also note that this function is extremely liberal; almost any +// string is a valid domain name as the DNS is 8 bit protocol. It checks if each +// label fits in 63 characters and that the entire name will fit into the 255 +// octet wire format limit. +func IsDomainName(s string) (labels int, ok bool) { + // XXX: The logic in this function was copied from packDomainName and + // should be kept in sync with that function. + + const lenmsg = 256 + + if len(s) == 0 { // Ok, for instance when dealing with update RR without any rdata. + return 0, false + } + + s = Fqdn(s) + + // Each dot ends a segment of the name. Except for escaped dots (\.), which + // are normal dots. + + var ( + off int + begin int + wasDot bool + ) + for i := 0; i < len(s); i++ { + switch s[i] { + case '\\': + if off+1 > lenmsg { + return labels, false + } + + // check for \DDD + if i+3 < len(s) && isDigit(s[i+1]) && isDigit(s[i+2]) && isDigit(s[i+3]) { + i += 3 + begin += 3 + } else { + i++ + begin++ + } + + wasDot = false + case '.': + if wasDot { + // two dots back to back is not legal + return labels, false + } + wasDot = true + + labelLen := i - begin + if labelLen >= 1<<6 { // top two bits of length must be clear + return labels, false + } + + // off can already (we're in a loop) be bigger than lenmsg + // this happens when a name isn't fully qualified + off += 1 + labelLen + if off > lenmsg { + return labels, false + } + + labels++ + begin = i + 1 + default: + wasDot = false + } + } + + return labels, true +} + +// IsSubDomain checks if child is indeed a child of the parent. If child and parent +// are the same domain true is returned as well. +func IsSubDomain(parent, child string) bool { + // Entire child is contained in parent + return CompareDomainName(parent, child) == CountLabel(parent) +} + +// IsMsg sanity checks buf and returns an error if it isn't a valid DNS packet. +// The checking is performed on the binary payload. +func IsMsg(buf []byte) error { + // Header + if len(buf) < headerSize { + return errors.New("dns: bad message header") + } + // Header: Opcode + // TODO(miek): more checks here, e.g. check all header bits. + return nil +} + +// IsFqdn checks if a domain name is fully qualified. +func IsFqdn(s string) bool { + s2 := strings.TrimSuffix(s, ".") + if s == s2 { + return false + } + + i := strings.LastIndexFunc(s2, func(r rune) bool { + return r != '\\' + }) + + // Test whether we have an even number of escape sequences before + // the dot or none. + return (len(s2)-i)%2 != 0 +} + +// IsRRset checks if a set of RRs is a valid RRset as defined by RFC 2181. +// This means the RRs need to have the same type, name, and class. Returns true +// if the RR set is valid, otherwise false. +func IsRRset(rrset []RR) bool { + if len(rrset) == 0 { + return false + } + if len(rrset) == 1 { + return true + } + rrHeader := rrset[0].Header() + rrType := rrHeader.Rrtype + rrClass := rrHeader.Class + rrName := rrHeader.Name + + for _, rr := range rrset[1:] { + curRRHeader := rr.Header() + if curRRHeader.Rrtype != rrType || curRRHeader.Class != rrClass || curRRHeader.Name != rrName { + // Mismatch between the records, so this is not a valid rrset for + //signing/verifying + return false + } + } + + return true +} + +// Fqdn return the fully qualified domain name from s. +// If s is already fully qualified, it behaves as the identity function. +func Fqdn(s string) string { + if IsFqdn(s) { + return s + } + return s + "." +} + +// Copied from the official Go code. + +// ReverseAddr returns the in-addr.arpa. or ip6.arpa. hostname of the IP +// address suitable for reverse DNS (PTR) record lookups or an error if it fails +// to parse the IP address. +func ReverseAddr(addr string) (arpa string, err error) { + ip := net.ParseIP(addr) + if ip == nil { + return "", &Error{err: "unrecognized address: " + addr} + } + if v4 := ip.To4(); v4 != nil { + buf := make([]byte, 0, net.IPv4len*4+len("in-addr.arpa.")) + // Add it, in reverse, to the buffer + for i := len(v4) - 1; i >= 0; i-- { + buf = strconv.AppendInt(buf, int64(v4[i]), 10) + buf = append(buf, '.') + } + // Append "in-addr.arpa." and return (buf already has the final .) + buf = append(buf, "in-addr.arpa."...) + return string(buf), nil + } + // Must be IPv6 + buf := make([]byte, 0, net.IPv6len*4+len("ip6.arpa.")) + // Add it, in reverse, to the buffer + for i := len(ip) - 1; i >= 0; i-- { + v := ip[i] + buf = append(buf, hexDigit[v&0xF]) + buf = append(buf, '.') + buf = append(buf, hexDigit[v>>4]) + buf = append(buf, '.') + } + // Append "ip6.arpa." and return (buf already has the final .) + buf = append(buf, "ip6.arpa."...) + return string(buf), nil +} + +// String returns the string representation for the type t. +func (t Type) String() string { + if t1, ok := TypeToString[uint16(t)]; ok { + return t1 + } + return "TYPE" + strconv.Itoa(int(t)) +} + +// String returns the string representation for the class c. +func (c Class) String() string { + if s, ok := ClassToString[uint16(c)]; ok { + // Only emit mnemonics when they are unambiguous, specically ANY is in both. + if _, ok := StringToType[s]; !ok { + return s + } + } + return "CLASS" + strconv.Itoa(int(c)) +} + +// String returns the string representation for the name n. +func (n Name) String() string { + return sprintName(string(n)) +} diff --git a/vendor/github.com/miekg/dns/dns.go b/vendor/github.com/miekg/dns/dns.go new file mode 100644 index 00000000..ad83a27e --- /dev/null +++ b/vendor/github.com/miekg/dns/dns.go @@ -0,0 +1,134 @@ +package dns + +import "strconv" + +const ( + year68 = 1 << 31 // For RFC1982 (Serial Arithmetic) calculations in 32 bits. + defaultTtl = 3600 // Default internal TTL. + + // DefaultMsgSize is the standard default for messages larger than 512 bytes. + DefaultMsgSize = 4096 + // MinMsgSize is the minimal size of a DNS packet. + MinMsgSize = 512 + // MaxMsgSize is the largest possible DNS packet. + MaxMsgSize = 65535 +) + +// Error represents a DNS error. +type Error struct{ err string } + +func (e *Error) Error() string { + if e == nil { + return "dns: " + } + return "dns: " + e.err +} + +// An RR represents a resource record. +type RR interface { + // Header returns the header of an resource record. The header contains + // everything up to the rdata. + Header() *RR_Header + // String returns the text representation of the resource record. + String() string + + // copy returns a copy of the RR + copy() RR + + // len returns the length (in octets) of the compressed or uncompressed RR in wire format. + // + // If compression is nil, the uncompressed size will be returned, otherwise the compressed + // size will be returned and domain names will be added to the map for future compression. + len(off int, compression map[string]struct{}) int + + // pack packs the records RDATA into wire format. The header will + // already have been packed into msg. + pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) + + // unpack unpacks an RR from wire format. + // + // This will only be called on a new and empty RR type with only the header populated. It + // will only be called if the record's RDATA is non-empty. + unpack(msg []byte, off int) (off1 int, err error) + + // parse parses an RR from zone file format. + // + // This will only be called on a new and empty RR type with only the header populated. + parse(c *zlexer, origin string) *ParseError + + // isDuplicate returns whether the two RRs are duplicates. + isDuplicate(r2 RR) bool +} + +// RR_Header is the header all DNS resource records share. +type RR_Header struct { + Name string `dns:"cdomain-name"` + Rrtype uint16 + Class uint16 + Ttl uint32 + Rdlength uint16 // Length of data after header. +} + +// Header returns itself. This is here to make RR_Header implements the RR interface. +func (h *RR_Header) Header() *RR_Header { return h } + +// Just to implement the RR interface. +func (h *RR_Header) copy() RR { return nil } + +func (h *RR_Header) String() string { + var s string + + if h.Rrtype == TypeOPT { + s = ";" + // and maybe other things + } + + s += sprintName(h.Name) + "\t" + s += strconv.FormatInt(int64(h.Ttl), 10) + "\t" + s += Class(h.Class).String() + "\t" + s += Type(h.Rrtype).String() + "\t" + return s +} + +func (h *RR_Header) len(off int, compression map[string]struct{}) int { + l := domainNameLen(h.Name, off, compression, true) + l += 10 // rrtype(2) + class(2) + ttl(4) + rdlength(2) + return l +} + +func (h *RR_Header) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + // RR_Header has no RDATA to pack. + return off, nil +} + +func (h *RR_Header) unpack(msg []byte, off int) (int, error) { + panic("dns: internal error: unpack should never be called on RR_Header") +} + +func (h *RR_Header) parse(c *zlexer, origin string) *ParseError { + panic("dns: internal error: parse should never be called on RR_Header") +} + +// ToRFC3597 converts a known RR to the unknown RR representation from RFC 3597. +func (rr *RFC3597) ToRFC3597(r RR) error { + buf := make([]byte, Len(r)*2) + headerEnd, off, err := packRR(r, buf, 0, compressionMap{}, false) + if err != nil { + return err + } + buf = buf[:off] + + *rr = RFC3597{Hdr: *r.Header()} + rr.Hdr.Rdlength = uint16(off - headerEnd) + + if noRdata(rr.Hdr) { + return nil + } + + _, err = rr.unpack(buf, headerEnd) + if err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/miekg/dns/dnssec.go b/vendor/github.com/miekg/dns/dnssec.go new file mode 100644 index 00000000..12a693f9 --- /dev/null +++ b/vendor/github.com/miekg/dns/dnssec.go @@ -0,0 +1,794 @@ +package dns + +import ( + "bytes" + "crypto" + "crypto/dsa" + "crypto/ecdsa" + "crypto/elliptic" + _ "crypto/md5" + "crypto/rand" + "crypto/rsa" + _ "crypto/sha1" + _ "crypto/sha256" + _ "crypto/sha512" + "encoding/asn1" + "encoding/binary" + "encoding/hex" + "math/big" + "sort" + "strings" + "time" + + "golang.org/x/crypto/ed25519" +) + +// DNSSEC encryption algorithm codes. +const ( + _ uint8 = iota + RSAMD5 + DH + DSA + _ // Skip 4, RFC 6725, section 2.1 + RSASHA1 + DSANSEC3SHA1 + RSASHA1NSEC3SHA1 + RSASHA256 + _ // Skip 9, RFC 6725, section 2.1 + RSASHA512 + _ // Skip 11, RFC 6725, section 2.1 + ECCGOST + ECDSAP256SHA256 + ECDSAP384SHA384 + ED25519 + ED448 + INDIRECT uint8 = 252 + PRIVATEDNS uint8 = 253 // Private (experimental keys) + PRIVATEOID uint8 = 254 +) + +// AlgorithmToString is a map of algorithm IDs to algorithm names. +var AlgorithmToString = map[uint8]string{ + RSAMD5: "RSAMD5", + DH: "DH", + DSA: "DSA", + RSASHA1: "RSASHA1", + DSANSEC3SHA1: "DSA-NSEC3-SHA1", + RSASHA1NSEC3SHA1: "RSASHA1-NSEC3-SHA1", + RSASHA256: "RSASHA256", + RSASHA512: "RSASHA512", + ECCGOST: "ECC-GOST", + ECDSAP256SHA256: "ECDSAP256SHA256", + ECDSAP384SHA384: "ECDSAP384SHA384", + ED25519: "ED25519", + ED448: "ED448", + INDIRECT: "INDIRECT", + PRIVATEDNS: "PRIVATEDNS", + PRIVATEOID: "PRIVATEOID", +} + +// AlgorithmToHash is a map of algorithm crypto hash IDs to crypto.Hash's. +var AlgorithmToHash = map[uint8]crypto.Hash{ + RSAMD5: crypto.MD5, // Deprecated in RFC 6725 + DSA: crypto.SHA1, + RSASHA1: crypto.SHA1, + RSASHA1NSEC3SHA1: crypto.SHA1, + RSASHA256: crypto.SHA256, + ECDSAP256SHA256: crypto.SHA256, + ECDSAP384SHA384: crypto.SHA384, + RSASHA512: crypto.SHA512, + ED25519: crypto.Hash(0), +} + +// DNSSEC hashing algorithm codes. +const ( + _ uint8 = iota + SHA1 // RFC 4034 + SHA256 // RFC 4509 + GOST94 // RFC 5933 + SHA384 // Experimental + SHA512 // Experimental +) + +// HashToString is a map of hash IDs to names. +var HashToString = map[uint8]string{ + SHA1: "SHA1", + SHA256: "SHA256", + GOST94: "GOST94", + SHA384: "SHA384", + SHA512: "SHA512", +} + +// DNSKEY flag values. +const ( + SEP = 1 + REVOKE = 1 << 7 + ZONE = 1 << 8 +) + +// The RRSIG needs to be converted to wireformat with some of the rdata (the signature) missing. +type rrsigWireFmt struct { + TypeCovered uint16 + Algorithm uint8 + Labels uint8 + OrigTtl uint32 + Expiration uint32 + Inception uint32 + KeyTag uint16 + SignerName string `dns:"domain-name"` + /* No Signature */ +} + +// Used for converting DNSKEY's rdata to wirefmt. +type dnskeyWireFmt struct { + Flags uint16 + Protocol uint8 + Algorithm uint8 + PublicKey string `dns:"base64"` + /* Nothing is left out */ +} + +func divRoundUp(a, b int) int { + return (a + b - 1) / b +} + +// KeyTag calculates the keytag (or key-id) of the DNSKEY. +func (k *DNSKEY) KeyTag() uint16 { + if k == nil { + return 0 + } + var keytag int + switch k.Algorithm { + case RSAMD5: + // Look at the bottom two bytes of the modules, which the last + // item in the pubkey. + // This algorithm has been deprecated, but keep this key-tag calculation. + modulus, _ := fromBase64([]byte(k.PublicKey)) + if len(modulus) > 1 { + x := binary.BigEndian.Uint16(modulus[len(modulus)-2:]) + keytag = int(x) + } + default: + keywire := new(dnskeyWireFmt) + keywire.Flags = k.Flags + keywire.Protocol = k.Protocol + keywire.Algorithm = k.Algorithm + keywire.PublicKey = k.PublicKey + wire := make([]byte, DefaultMsgSize) + n, err := packKeyWire(keywire, wire) + if err != nil { + return 0 + } + wire = wire[:n] + for i, v := range wire { + if i&1 != 0 { + keytag += int(v) // must be larger than uint32 + } else { + keytag += int(v) << 8 + } + } + keytag += keytag >> 16 & 0xFFFF + keytag &= 0xFFFF + } + return uint16(keytag) +} + +// ToDS converts a DNSKEY record to a DS record. +func (k *DNSKEY) ToDS(h uint8) *DS { + if k == nil { + return nil + } + ds := new(DS) + ds.Hdr.Name = k.Hdr.Name + ds.Hdr.Class = k.Hdr.Class + ds.Hdr.Rrtype = TypeDS + ds.Hdr.Ttl = k.Hdr.Ttl + ds.Algorithm = k.Algorithm + ds.DigestType = h + ds.KeyTag = k.KeyTag() + + keywire := new(dnskeyWireFmt) + keywire.Flags = k.Flags + keywire.Protocol = k.Protocol + keywire.Algorithm = k.Algorithm + keywire.PublicKey = k.PublicKey + wire := make([]byte, DefaultMsgSize) + n, err := packKeyWire(keywire, wire) + if err != nil { + return nil + } + wire = wire[:n] + + owner := make([]byte, 255) + off, err1 := PackDomainName(strings.ToLower(k.Hdr.Name), owner, 0, nil, false) + if err1 != nil { + return nil + } + owner = owner[:off] + // RFC4034: + // digest = digest_algorithm( DNSKEY owner name | DNSKEY RDATA); + // "|" denotes concatenation + // DNSKEY RDATA = Flags | Protocol | Algorithm | Public Key. + + var hash crypto.Hash + switch h { + case SHA1: + hash = crypto.SHA1 + case SHA256: + hash = crypto.SHA256 + case SHA384: + hash = crypto.SHA384 + case SHA512: + hash = crypto.SHA512 + default: + return nil + } + + s := hash.New() + s.Write(owner) + s.Write(wire) + ds.Digest = hex.EncodeToString(s.Sum(nil)) + return ds +} + +// ToCDNSKEY converts a DNSKEY record to a CDNSKEY record. +func (k *DNSKEY) ToCDNSKEY() *CDNSKEY { + c := &CDNSKEY{DNSKEY: *k} + c.Hdr = k.Hdr + c.Hdr.Rrtype = TypeCDNSKEY + return c +} + +// ToCDS converts a DS record to a CDS record. +func (d *DS) ToCDS() *CDS { + c := &CDS{DS: *d} + c.Hdr = d.Hdr + c.Hdr.Rrtype = TypeCDS + return c +} + +// Sign signs an RRSet. The signature needs to be filled in with the values: +// Inception, Expiration, KeyTag, SignerName and Algorithm. The rest is copied +// from the RRset. Sign returns a non-nill error when the signing went OK. +// There is no check if RRSet is a proper (RFC 2181) RRSet. If OrigTTL is non +// zero, it is used as-is, otherwise the TTL of the RRset is used as the +// OrigTTL. +func (rr *RRSIG) Sign(k crypto.Signer, rrset []RR) error { + if k == nil { + return ErrPrivKey + } + // s.Inception and s.Expiration may be 0 (rollover etc.), the rest must be set + if rr.KeyTag == 0 || len(rr.SignerName) == 0 || rr.Algorithm == 0 { + return ErrKey + } + + h0 := rrset[0].Header() + rr.Hdr.Rrtype = TypeRRSIG + rr.Hdr.Name = h0.Name + rr.Hdr.Class = h0.Class + if rr.OrigTtl == 0 { // If set don't override + rr.OrigTtl = h0.Ttl + } + rr.TypeCovered = h0.Rrtype + rr.Labels = uint8(CountLabel(h0.Name)) + + if strings.HasPrefix(h0.Name, "*") { + rr.Labels-- // wildcard, remove from label count + } + + sigwire := new(rrsigWireFmt) + sigwire.TypeCovered = rr.TypeCovered + sigwire.Algorithm = rr.Algorithm + sigwire.Labels = rr.Labels + sigwire.OrigTtl = rr.OrigTtl + sigwire.Expiration = rr.Expiration + sigwire.Inception = rr.Inception + sigwire.KeyTag = rr.KeyTag + // For signing, lowercase this name + sigwire.SignerName = strings.ToLower(rr.SignerName) + + // Create the desired binary blob + signdata := make([]byte, DefaultMsgSize) + n, err := packSigWire(sigwire, signdata) + if err != nil { + return err + } + signdata = signdata[:n] + wire, err := rawSignatureData(rrset, rr) + if err != nil { + return err + } + + hash, ok := AlgorithmToHash[rr.Algorithm] + if !ok { + return ErrAlg + } + + switch rr.Algorithm { + case ED25519: + // ed25519 signs the raw message and performs hashing internally. + // All other supported signature schemes operate over the pre-hashed + // message, and thus ed25519 must be handled separately here. + // + // The raw message is passed directly into sign and crypto.Hash(0) is + // used to signal to the crypto.Signer that the data has not been hashed. + signature, err := sign(k, append(signdata, wire...), crypto.Hash(0), rr.Algorithm) + if err != nil { + return err + } + + rr.Signature = toBase64(signature) + case RSAMD5, DSA, DSANSEC3SHA1: + // See RFC 6944. + return ErrAlg + default: + h := hash.New() + h.Write(signdata) + h.Write(wire) + + signature, err := sign(k, h.Sum(nil), hash, rr.Algorithm) + if err != nil { + return err + } + + rr.Signature = toBase64(signature) + } + + return nil +} + +func sign(k crypto.Signer, hashed []byte, hash crypto.Hash, alg uint8) ([]byte, error) { + signature, err := k.Sign(rand.Reader, hashed, hash) + if err != nil { + return nil, err + } + + switch alg { + case RSASHA1, RSASHA1NSEC3SHA1, RSASHA256, RSASHA512: + return signature, nil + + case ECDSAP256SHA256, ECDSAP384SHA384: + ecdsaSignature := &struct { + R, S *big.Int + }{} + if _, err := asn1.Unmarshal(signature, ecdsaSignature); err != nil { + return nil, err + } + + var intlen int + switch alg { + case ECDSAP256SHA256: + intlen = 32 + case ECDSAP384SHA384: + intlen = 48 + } + + signature := intToBytes(ecdsaSignature.R, intlen) + signature = append(signature, intToBytes(ecdsaSignature.S, intlen)...) + return signature, nil + + // There is no defined interface for what a DSA backed crypto.Signer returns + case DSA, DSANSEC3SHA1: + // t := divRoundUp(divRoundUp(p.PublicKey.Y.BitLen(), 8)-64, 8) + // signature := []byte{byte(t)} + // signature = append(signature, intToBytes(r1, 20)...) + // signature = append(signature, intToBytes(s1, 20)...) + // rr.Signature = signature + + case ED25519: + return signature, nil + } + + return nil, ErrAlg +} + +// Verify validates an RRSet with the signature and key. This is only the +// cryptographic test, the signature validity period must be checked separately. +// This function copies the rdata of some RRs (to lowercase domain names) for the validation to work. +func (rr *RRSIG) Verify(k *DNSKEY, rrset []RR) error { + // First the easy checks + if !IsRRset(rrset) { + return ErrRRset + } + if rr.KeyTag != k.KeyTag() { + return ErrKey + } + if rr.Hdr.Class != k.Hdr.Class { + return ErrKey + } + if rr.Algorithm != k.Algorithm { + return ErrKey + } + if !strings.EqualFold(rr.SignerName, k.Hdr.Name) { + return ErrKey + } + if k.Protocol != 3 { + return ErrKey + } + + // IsRRset checked that we have at least one RR and that the RRs in + // the set have consistent type, class, and name. Also check that type and + // class matches the RRSIG record. + if h0 := rrset[0].Header(); h0.Class != rr.Hdr.Class || h0.Rrtype != rr.TypeCovered { + return ErrRRset + } + + // RFC 4035 5.3.2. Reconstructing the Signed Data + // Copy the sig, except the rrsig data + sigwire := new(rrsigWireFmt) + sigwire.TypeCovered = rr.TypeCovered + sigwire.Algorithm = rr.Algorithm + sigwire.Labels = rr.Labels + sigwire.OrigTtl = rr.OrigTtl + sigwire.Expiration = rr.Expiration + sigwire.Inception = rr.Inception + sigwire.KeyTag = rr.KeyTag + sigwire.SignerName = strings.ToLower(rr.SignerName) + // Create the desired binary blob + signeddata := make([]byte, DefaultMsgSize) + n, err := packSigWire(sigwire, signeddata) + if err != nil { + return err + } + signeddata = signeddata[:n] + wire, err := rawSignatureData(rrset, rr) + if err != nil { + return err + } + + sigbuf := rr.sigBuf() // Get the binary signature data + if rr.Algorithm == PRIVATEDNS { // PRIVATEOID + // TODO(miek) + // remove the domain name and assume its ours? + } + + hash, ok := AlgorithmToHash[rr.Algorithm] + if !ok { + return ErrAlg + } + + switch rr.Algorithm { + case RSASHA1, RSASHA1NSEC3SHA1, RSASHA256, RSASHA512, RSAMD5: + // TODO(mg): this can be done quicker, ie. cache the pubkey data somewhere?? + pubkey := k.publicKeyRSA() // Get the key + if pubkey == nil { + return ErrKey + } + + h := hash.New() + h.Write(signeddata) + h.Write(wire) + return rsa.VerifyPKCS1v15(pubkey, hash, h.Sum(nil), sigbuf) + + case ECDSAP256SHA256, ECDSAP384SHA384: + pubkey := k.publicKeyECDSA() + if pubkey == nil { + return ErrKey + } + + // Split sigbuf into the r and s coordinates + r := new(big.Int).SetBytes(sigbuf[:len(sigbuf)/2]) + s := new(big.Int).SetBytes(sigbuf[len(sigbuf)/2:]) + + h := hash.New() + h.Write(signeddata) + h.Write(wire) + if ecdsa.Verify(pubkey, h.Sum(nil), r, s) { + return nil + } + return ErrSig + + case ED25519: + pubkey := k.publicKeyED25519() + if pubkey == nil { + return ErrKey + } + + if ed25519.Verify(pubkey, append(signeddata, wire...), sigbuf) { + return nil + } + return ErrSig + + default: + return ErrAlg + } +} + +// ValidityPeriod uses RFC1982 serial arithmetic to calculate +// if a signature period is valid. If t is the zero time, the +// current time is taken other t is. Returns true if the signature +// is valid at the given time, otherwise returns false. +func (rr *RRSIG) ValidityPeriod(t time.Time) bool { + var utc int64 + if t.IsZero() { + utc = time.Now().UTC().Unix() + } else { + utc = t.UTC().Unix() + } + modi := (int64(rr.Inception) - utc) / year68 + mode := (int64(rr.Expiration) - utc) / year68 + ti := int64(rr.Inception) + modi*year68 + te := int64(rr.Expiration) + mode*year68 + return ti <= utc && utc <= te +} + +// Return the signatures base64 encodedig sigdata as a byte slice. +func (rr *RRSIG) sigBuf() []byte { + sigbuf, err := fromBase64([]byte(rr.Signature)) + if err != nil { + return nil + } + return sigbuf +} + +// publicKeyRSA returns the RSA public key from a DNSKEY record. +func (k *DNSKEY) publicKeyRSA() *rsa.PublicKey { + keybuf, err := fromBase64([]byte(k.PublicKey)) + if err != nil { + return nil + } + + if len(keybuf) < 1+1+64 { + // Exponent must be at least 1 byte and modulus at least 64 + return nil + } + + // RFC 2537/3110, section 2. RSA Public KEY Resource Records + // Length is in the 0th byte, unless its zero, then it + // it in bytes 1 and 2 and its a 16 bit number + explen := uint16(keybuf[0]) + keyoff := 1 + if explen == 0 { + explen = uint16(keybuf[1])<<8 | uint16(keybuf[2]) + keyoff = 3 + } + + if explen > 4 || explen == 0 || keybuf[keyoff] == 0 { + // Exponent larger than supported by the crypto package, + // empty, or contains prohibited leading zero. + return nil + } + + modoff := keyoff + int(explen) + modlen := len(keybuf) - modoff + if modlen < 64 || modlen > 512 || keybuf[modoff] == 0 { + // Modulus is too small, large, or contains prohibited leading zero. + return nil + } + + pubkey := new(rsa.PublicKey) + + var expo uint64 + // The exponent of length explen is between keyoff and modoff. + for _, v := range keybuf[keyoff:modoff] { + expo <<= 8 + expo |= uint64(v) + } + if expo > 1<<31-1 { + // Larger exponent than supported by the crypto package. + return nil + } + + pubkey.E = int(expo) + pubkey.N = new(big.Int).SetBytes(keybuf[modoff:]) + return pubkey +} + +// publicKeyECDSA returns the Curve public key from the DNSKEY record. +func (k *DNSKEY) publicKeyECDSA() *ecdsa.PublicKey { + keybuf, err := fromBase64([]byte(k.PublicKey)) + if err != nil { + return nil + } + pubkey := new(ecdsa.PublicKey) + switch k.Algorithm { + case ECDSAP256SHA256: + pubkey.Curve = elliptic.P256() + if len(keybuf) != 64 { + // wrongly encoded key + return nil + } + case ECDSAP384SHA384: + pubkey.Curve = elliptic.P384() + if len(keybuf) != 96 { + // Wrongly encoded key + return nil + } + } + pubkey.X = new(big.Int).SetBytes(keybuf[:len(keybuf)/2]) + pubkey.Y = new(big.Int).SetBytes(keybuf[len(keybuf)/2:]) + return pubkey +} + +func (k *DNSKEY) publicKeyDSA() *dsa.PublicKey { + keybuf, err := fromBase64([]byte(k.PublicKey)) + if err != nil { + return nil + } + if len(keybuf) < 22 { + return nil + } + t, keybuf := int(keybuf[0]), keybuf[1:] + size := 64 + t*8 + q, keybuf := keybuf[:20], keybuf[20:] + if len(keybuf) != 3*size { + return nil + } + p, keybuf := keybuf[:size], keybuf[size:] + g, y := keybuf[:size], keybuf[size:] + pubkey := new(dsa.PublicKey) + pubkey.Parameters.Q = new(big.Int).SetBytes(q) + pubkey.Parameters.P = new(big.Int).SetBytes(p) + pubkey.Parameters.G = new(big.Int).SetBytes(g) + pubkey.Y = new(big.Int).SetBytes(y) + return pubkey +} + +func (k *DNSKEY) publicKeyED25519() ed25519.PublicKey { + keybuf, err := fromBase64([]byte(k.PublicKey)) + if err != nil { + return nil + } + if len(keybuf) != ed25519.PublicKeySize { + return nil + } + return keybuf +} + +type wireSlice [][]byte + +func (p wireSlice) Len() int { return len(p) } +func (p wireSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } +func (p wireSlice) Less(i, j int) bool { + _, ioff, _ := UnpackDomainName(p[i], 0) + _, joff, _ := UnpackDomainName(p[j], 0) + return bytes.Compare(p[i][ioff+10:], p[j][joff+10:]) < 0 +} + +// Return the raw signature data. +func rawSignatureData(rrset []RR, s *RRSIG) (buf []byte, err error) { + wires := make(wireSlice, len(rrset)) + for i, r := range rrset { + r1 := r.copy() + h := r1.Header() + h.Ttl = s.OrigTtl + labels := SplitDomainName(h.Name) + // 6.2. Canonical RR Form. (4) - wildcards + if len(labels) > int(s.Labels) { + // Wildcard + h.Name = "*." + strings.Join(labels[len(labels)-int(s.Labels):], ".") + "." + } + // RFC 4034: 6.2. Canonical RR Form. (2) - domain name to lowercase + h.Name = strings.ToLower(h.Name) + // 6.2. Canonical RR Form. (3) - domain rdata to lowercase. + // NS, MD, MF, CNAME, SOA, MB, MG, MR, PTR, + // HINFO, MINFO, MX, RP, AFSDB, RT, SIG, PX, NXT, NAPTR, KX, + // SRV, DNAME, A6 + // + // RFC 6840 - Clarifications and Implementation Notes for DNS Security (DNSSEC): + // Section 6.2 of [RFC4034] also erroneously lists HINFO as a record + // that needs conversion to lowercase, and twice at that. Since HINFO + // records contain no domain names, they are not subject to case + // conversion. + switch x := r1.(type) { + case *NS: + x.Ns = strings.ToLower(x.Ns) + case *MD: + x.Md = strings.ToLower(x.Md) + case *MF: + x.Mf = strings.ToLower(x.Mf) + case *CNAME: + x.Target = strings.ToLower(x.Target) + case *SOA: + x.Ns = strings.ToLower(x.Ns) + x.Mbox = strings.ToLower(x.Mbox) + case *MB: + x.Mb = strings.ToLower(x.Mb) + case *MG: + x.Mg = strings.ToLower(x.Mg) + case *MR: + x.Mr = strings.ToLower(x.Mr) + case *PTR: + x.Ptr = strings.ToLower(x.Ptr) + case *MINFO: + x.Rmail = strings.ToLower(x.Rmail) + x.Email = strings.ToLower(x.Email) + case *MX: + x.Mx = strings.ToLower(x.Mx) + case *RP: + x.Mbox = strings.ToLower(x.Mbox) + x.Txt = strings.ToLower(x.Txt) + case *AFSDB: + x.Hostname = strings.ToLower(x.Hostname) + case *RT: + x.Host = strings.ToLower(x.Host) + case *SIG: + x.SignerName = strings.ToLower(x.SignerName) + case *PX: + x.Map822 = strings.ToLower(x.Map822) + x.Mapx400 = strings.ToLower(x.Mapx400) + case *NAPTR: + x.Replacement = strings.ToLower(x.Replacement) + case *KX: + x.Exchanger = strings.ToLower(x.Exchanger) + case *SRV: + x.Target = strings.ToLower(x.Target) + case *DNAME: + x.Target = strings.ToLower(x.Target) + } + // 6.2. Canonical RR Form. (5) - origTTL + wire := make([]byte, Len(r1)+1) // +1 to be safe(r) + off, err1 := PackRR(r1, wire, 0, nil, false) + if err1 != nil { + return nil, err1 + } + wire = wire[:off] + wires[i] = wire + } + sort.Sort(wires) + for i, wire := range wires { + if i > 0 && bytes.Equal(wire, wires[i-1]) { + continue + } + buf = append(buf, wire...) + } + return buf, nil +} + +func packSigWire(sw *rrsigWireFmt, msg []byte) (int, error) { + // copied from zmsg.go RRSIG packing + off, err := packUint16(sw.TypeCovered, msg, 0) + if err != nil { + return off, err + } + off, err = packUint8(sw.Algorithm, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(sw.Labels, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(sw.OrigTtl, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(sw.Expiration, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(sw.Inception, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(sw.KeyTag, msg, off) + if err != nil { + return off, err + } + off, err = PackDomainName(sw.SignerName, msg, off, nil, false) + if err != nil { + return off, err + } + return off, nil +} + +func packKeyWire(dw *dnskeyWireFmt, msg []byte) (int, error) { + // copied from zmsg.go DNSKEY packing + off, err := packUint16(dw.Flags, msg, 0) + if err != nil { + return off, err + } + off, err = packUint8(dw.Protocol, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(dw.Algorithm, msg, off) + if err != nil { + return off, err + } + off, err = packStringBase64(dw.PublicKey, msg, off) + if err != nil { + return off, err + } + return off, nil +} diff --git a/vendor/github.com/miekg/dns/dnssec_keygen.go b/vendor/github.com/miekg/dns/dnssec_keygen.go new file mode 100644 index 00000000..60737e5b --- /dev/null +++ b/vendor/github.com/miekg/dns/dnssec_keygen.go @@ -0,0 +1,140 @@ +package dns + +import ( + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/rsa" + "math/big" + + "golang.org/x/crypto/ed25519" +) + +// Generate generates a DNSKEY of the given bit size. +// The public part is put inside the DNSKEY record. +// The Algorithm in the key must be set as this will define +// what kind of DNSKEY will be generated. +// The ECDSA algorithms imply a fixed keysize, in that case +// bits should be set to the size of the algorithm. +func (k *DNSKEY) Generate(bits int) (crypto.PrivateKey, error) { + switch k.Algorithm { + case RSAMD5, DSA, DSANSEC3SHA1: + return nil, ErrAlg + case RSASHA1, RSASHA256, RSASHA1NSEC3SHA1: + if bits < 512 || bits > 4096 { + return nil, ErrKeySize + } + case RSASHA512: + if bits < 1024 || bits > 4096 { + return nil, ErrKeySize + } + case ECDSAP256SHA256: + if bits != 256 { + return nil, ErrKeySize + } + case ECDSAP384SHA384: + if bits != 384 { + return nil, ErrKeySize + } + case ED25519: + if bits != 256 { + return nil, ErrKeySize + } + } + + switch k.Algorithm { + case RSASHA1, RSASHA256, RSASHA512, RSASHA1NSEC3SHA1: + priv, err := rsa.GenerateKey(rand.Reader, bits) + if err != nil { + return nil, err + } + k.setPublicKeyRSA(priv.PublicKey.E, priv.PublicKey.N) + return priv, nil + case ECDSAP256SHA256, ECDSAP384SHA384: + var c elliptic.Curve + switch k.Algorithm { + case ECDSAP256SHA256: + c = elliptic.P256() + case ECDSAP384SHA384: + c = elliptic.P384() + } + priv, err := ecdsa.GenerateKey(c, rand.Reader) + if err != nil { + return nil, err + } + k.setPublicKeyECDSA(priv.PublicKey.X, priv.PublicKey.Y) + return priv, nil + case ED25519: + pub, priv, err := ed25519.GenerateKey(rand.Reader) + if err != nil { + return nil, err + } + k.setPublicKeyED25519(pub) + return priv, nil + default: + return nil, ErrAlg + } +} + +// Set the public key (the value E and N) +func (k *DNSKEY) setPublicKeyRSA(_E int, _N *big.Int) bool { + if _E == 0 || _N == nil { + return false + } + buf := exponentToBuf(_E) + buf = append(buf, _N.Bytes()...) + k.PublicKey = toBase64(buf) + return true +} + +// Set the public key for Elliptic Curves +func (k *DNSKEY) setPublicKeyECDSA(_X, _Y *big.Int) bool { + if _X == nil || _Y == nil { + return false + } + var intlen int + switch k.Algorithm { + case ECDSAP256SHA256: + intlen = 32 + case ECDSAP384SHA384: + intlen = 48 + } + k.PublicKey = toBase64(curveToBuf(_X, _Y, intlen)) + return true +} + +// Set the public key for Ed25519 +func (k *DNSKEY) setPublicKeyED25519(_K ed25519.PublicKey) bool { + if _K == nil { + return false + } + k.PublicKey = toBase64(_K) + return true +} + +// Set the public key (the values E and N) for RSA +// RFC 3110: Section 2. RSA Public KEY Resource Records +func exponentToBuf(_E int) []byte { + var buf []byte + i := big.NewInt(int64(_E)).Bytes() + if len(i) < 256 { + buf = make([]byte, 1, 1+len(i)) + buf[0] = uint8(len(i)) + } else { + buf = make([]byte, 3, 3+len(i)) + buf[0] = 0 + buf[1] = uint8(len(i) >> 8) + buf[2] = uint8(len(i)) + } + buf = append(buf, i...) + return buf +} + +// Set the public key for X and Y for Curve. The two +// values are just concatenated. +func curveToBuf(_X, _Y *big.Int, intlen int) []byte { + buf := intToBytes(_X, intlen) + buf = append(buf, intToBytes(_Y, intlen)...) + return buf +} diff --git a/vendor/github.com/miekg/dns/dnssec_keyscan.go b/vendor/github.com/miekg/dns/dnssec_keyscan.go new file mode 100644 index 00000000..0e6f3201 --- /dev/null +++ b/vendor/github.com/miekg/dns/dnssec_keyscan.go @@ -0,0 +1,322 @@ +package dns + +import ( + "bufio" + "crypto" + "crypto/ecdsa" + "crypto/rsa" + "io" + "math/big" + "strconv" + "strings" + + "golang.org/x/crypto/ed25519" +) + +// NewPrivateKey returns a PrivateKey by parsing the string s. +// s should be in the same form of the BIND private key files. +func (k *DNSKEY) NewPrivateKey(s string) (crypto.PrivateKey, error) { + if s == "" || s[len(s)-1] != '\n' { // We need a closing newline + return k.ReadPrivateKey(strings.NewReader(s+"\n"), "") + } + return k.ReadPrivateKey(strings.NewReader(s), "") +} + +// ReadPrivateKey reads a private key from the io.Reader q. The string file is +// only used in error reporting. +// The public key must be known, because some cryptographic algorithms embed +// the public inside the privatekey. +func (k *DNSKEY) ReadPrivateKey(q io.Reader, file string) (crypto.PrivateKey, error) { + m, err := parseKey(q, file) + if m == nil { + return nil, err + } + if _, ok := m["private-key-format"]; !ok { + return nil, ErrPrivKey + } + if m["private-key-format"] != "v1.2" && m["private-key-format"] != "v1.3" { + return nil, ErrPrivKey + } + // TODO(mg): check if the pubkey matches the private key + algo, err := strconv.ParseUint(strings.SplitN(m["algorithm"], " ", 2)[0], 10, 8) + if err != nil { + return nil, ErrPrivKey + } + switch uint8(algo) { + case RSAMD5, DSA, DSANSEC3SHA1: + return nil, ErrAlg + case RSASHA1: + fallthrough + case RSASHA1NSEC3SHA1: + fallthrough + case RSASHA256: + fallthrough + case RSASHA512: + priv, err := readPrivateKeyRSA(m) + if err != nil { + return nil, err + } + pub := k.publicKeyRSA() + if pub == nil { + return nil, ErrKey + } + priv.PublicKey = *pub + return priv, nil + case ECCGOST: + return nil, ErrPrivKey + case ECDSAP256SHA256: + fallthrough + case ECDSAP384SHA384: + priv, err := readPrivateKeyECDSA(m) + if err != nil { + return nil, err + } + pub := k.publicKeyECDSA() + if pub == nil { + return nil, ErrKey + } + priv.PublicKey = *pub + return priv, nil + case ED25519: + return readPrivateKeyED25519(m) + default: + return nil, ErrPrivKey + } +} + +// Read a private key (file) string and create a public key. Return the private key. +func readPrivateKeyRSA(m map[string]string) (*rsa.PrivateKey, error) { + p := new(rsa.PrivateKey) + p.Primes = []*big.Int{nil, nil} + for k, v := range m { + switch k { + case "modulus", "publicexponent", "privateexponent", "prime1", "prime2": + v1, err := fromBase64([]byte(v)) + if err != nil { + return nil, err + } + switch k { + case "modulus": + p.PublicKey.N = new(big.Int).SetBytes(v1) + case "publicexponent": + i := new(big.Int).SetBytes(v1) + p.PublicKey.E = int(i.Int64()) // int64 should be large enough + case "privateexponent": + p.D = new(big.Int).SetBytes(v1) + case "prime1": + p.Primes[0] = new(big.Int).SetBytes(v1) + case "prime2": + p.Primes[1] = new(big.Int).SetBytes(v1) + } + case "exponent1", "exponent2", "coefficient": + // not used in Go (yet) + case "created", "publish", "activate": + // not used in Go (yet) + } + } + return p, nil +} + +func readPrivateKeyECDSA(m map[string]string) (*ecdsa.PrivateKey, error) { + p := new(ecdsa.PrivateKey) + p.D = new(big.Int) + // TODO: validate that the required flags are present + for k, v := range m { + switch k { + case "privatekey": + v1, err := fromBase64([]byte(v)) + if err != nil { + return nil, err + } + p.D.SetBytes(v1) + case "created", "publish", "activate": + /* not used in Go (yet) */ + } + } + return p, nil +} + +func readPrivateKeyED25519(m map[string]string) (ed25519.PrivateKey, error) { + var p ed25519.PrivateKey + // TODO: validate that the required flags are present + for k, v := range m { + switch k { + case "privatekey": + p1, err := fromBase64([]byte(v)) + if err != nil { + return nil, err + } + if len(p1) != ed25519.SeedSize { + return nil, ErrPrivKey + } + p = ed25519.NewKeyFromSeed(p1) + case "created", "publish", "activate": + /* not used in Go (yet) */ + } + } + return p, nil +} + +// parseKey reads a private key from r. It returns a map[string]string, +// with the key-value pairs, or an error when the file is not correct. +func parseKey(r io.Reader, file string) (map[string]string, error) { + m := make(map[string]string) + var k string + + c := newKLexer(r) + + for l, ok := c.Next(); ok; l, ok = c.Next() { + // It should alternate + switch l.value { + case zKey: + k = l.token + case zValue: + if k == "" { + return nil, &ParseError{file, "no private key seen", l} + } + + m[strings.ToLower(k)] = l.token + k = "" + } + } + + // Surface any read errors from r. + if err := c.Err(); err != nil { + return nil, &ParseError{file: file, err: err.Error()} + } + + return m, nil +} + +type klexer struct { + br io.ByteReader + + readErr error + + line int + column int + + key bool + + eol bool // end-of-line +} + +func newKLexer(r io.Reader) *klexer { + br, ok := r.(io.ByteReader) + if !ok { + br = bufio.NewReaderSize(r, 1024) + } + + return &klexer{ + br: br, + + line: 1, + + key: true, + } +} + +func (kl *klexer) Err() error { + if kl.readErr == io.EOF { + return nil + } + + return kl.readErr +} + +// readByte returns the next byte from the input +func (kl *klexer) readByte() (byte, bool) { + if kl.readErr != nil { + return 0, false + } + + c, err := kl.br.ReadByte() + if err != nil { + kl.readErr = err + return 0, false + } + + // delay the newline handling until the next token is delivered, + // fixes off-by-one errors when reporting a parse error. + if kl.eol { + kl.line++ + kl.column = 0 + kl.eol = false + } + + if c == '\n' { + kl.eol = true + } else { + kl.column++ + } + + return c, true +} + +func (kl *klexer) Next() (lex, bool) { + var ( + l lex + + str strings.Builder + + commt bool + ) + + for x, ok := kl.readByte(); ok; x, ok = kl.readByte() { + l.line, l.column = kl.line, kl.column + + switch x { + case ':': + if commt || !kl.key { + break + } + + kl.key = false + + // Next token is a space, eat it + kl.readByte() + + l.value = zKey + l.token = str.String() + return l, true + case ';': + commt = true + case '\n': + if commt { + // Reset a comment + commt = false + } + + if kl.key && str.Len() == 0 { + // ignore empty lines + break + } + + kl.key = true + + l.value = zValue + l.token = str.String() + return l, true + default: + if commt { + break + } + + str.WriteByte(x) + } + } + + if kl.readErr != nil && kl.readErr != io.EOF { + // Don't return any tokens after a read error occurs. + return lex{value: zEOF}, false + } + + if str.Len() > 0 { + // Send remainder + l.value = zValue + l.token = str.String() + return l, true + } + + return lex{value: zEOF}, false +} diff --git a/vendor/github.com/miekg/dns/dnssec_privkey.go b/vendor/github.com/miekg/dns/dnssec_privkey.go new file mode 100644 index 00000000..4493c9d5 --- /dev/null +++ b/vendor/github.com/miekg/dns/dnssec_privkey.go @@ -0,0 +1,94 @@ +package dns + +import ( + "crypto" + "crypto/dsa" + "crypto/ecdsa" + "crypto/rsa" + "math/big" + "strconv" + + "golang.org/x/crypto/ed25519" +) + +const format = "Private-key-format: v1.3\n" + +var bigIntOne = big.NewInt(1) + +// PrivateKeyString converts a PrivateKey to a string. This string has the same +// format as the private-key-file of BIND9 (Private-key-format: v1.3). +// It needs some info from the key (the algorithm), so its a method of the DNSKEY +// It supports rsa.PrivateKey, ecdsa.PrivateKey and dsa.PrivateKey +func (r *DNSKEY) PrivateKeyString(p crypto.PrivateKey) string { + algorithm := strconv.Itoa(int(r.Algorithm)) + algorithm += " (" + AlgorithmToString[r.Algorithm] + ")" + + switch p := p.(type) { + case *rsa.PrivateKey: + modulus := toBase64(p.PublicKey.N.Bytes()) + e := big.NewInt(int64(p.PublicKey.E)) + publicExponent := toBase64(e.Bytes()) + privateExponent := toBase64(p.D.Bytes()) + prime1 := toBase64(p.Primes[0].Bytes()) + prime2 := toBase64(p.Primes[1].Bytes()) + // Calculate Exponent1/2 and Coefficient as per: http://en.wikipedia.org/wiki/RSA#Using_the_Chinese_remainder_algorithm + // and from: http://code.google.com/p/go/issues/detail?id=987 + p1 := new(big.Int).Sub(p.Primes[0], bigIntOne) + q1 := new(big.Int).Sub(p.Primes[1], bigIntOne) + exp1 := new(big.Int).Mod(p.D, p1) + exp2 := new(big.Int).Mod(p.D, q1) + coeff := new(big.Int).ModInverse(p.Primes[1], p.Primes[0]) + + exponent1 := toBase64(exp1.Bytes()) + exponent2 := toBase64(exp2.Bytes()) + coefficient := toBase64(coeff.Bytes()) + + return format + + "Algorithm: " + algorithm + "\n" + + "Modulus: " + modulus + "\n" + + "PublicExponent: " + publicExponent + "\n" + + "PrivateExponent: " + privateExponent + "\n" + + "Prime1: " + prime1 + "\n" + + "Prime2: " + prime2 + "\n" + + "Exponent1: " + exponent1 + "\n" + + "Exponent2: " + exponent2 + "\n" + + "Coefficient: " + coefficient + "\n" + + case *ecdsa.PrivateKey: + var intlen int + switch r.Algorithm { + case ECDSAP256SHA256: + intlen = 32 + case ECDSAP384SHA384: + intlen = 48 + } + private := toBase64(intToBytes(p.D, intlen)) + return format + + "Algorithm: " + algorithm + "\n" + + "PrivateKey: " + private + "\n" + + case *dsa.PrivateKey: + T := divRoundUp(divRoundUp(p.PublicKey.Parameters.G.BitLen(), 8)-64, 8) + prime := toBase64(intToBytes(p.PublicKey.Parameters.P, 64+T*8)) + subprime := toBase64(intToBytes(p.PublicKey.Parameters.Q, 20)) + base := toBase64(intToBytes(p.PublicKey.Parameters.G, 64+T*8)) + priv := toBase64(intToBytes(p.X, 20)) + pub := toBase64(intToBytes(p.PublicKey.Y, 64+T*8)) + return format + + "Algorithm: " + algorithm + "\n" + + "Prime(p): " + prime + "\n" + + "Subprime(q): " + subprime + "\n" + + "Base(g): " + base + "\n" + + "Private_value(x): " + priv + "\n" + + "Public_value(y): " + pub + "\n" + + case ed25519.PrivateKey: + private := toBase64(p.Seed()) + return format + + "Algorithm: " + algorithm + "\n" + + "PrivateKey: " + private + "\n" + + default: + return "" + } +} diff --git a/vendor/github.com/miekg/dns/doc.go b/vendor/github.com/miekg/dns/doc.go new file mode 100644 index 00000000..3318b77e --- /dev/null +++ b/vendor/github.com/miekg/dns/doc.go @@ -0,0 +1,268 @@ +/* +Package dns implements a full featured interface to the Domain Name System. +Both server- and client-side programming is supported. The package allows +complete control over what is sent out to the DNS. The API follows the +less-is-more principle, by presenting a small, clean interface. + +It supports (asynchronous) querying/replying, incoming/outgoing zone transfers, +TSIG, EDNS0, dynamic updates, notifies and DNSSEC validation/signing. + +Note that domain names MUST be fully qualified before sending them, unqualified +names in a message will result in a packing failure. + +Resource records are native types. They are not stored in wire format. Basic +usage pattern for creating a new resource record: + + r := new(dns.MX) + r.Hdr = dns.RR_Header{Name: "miek.nl.", Rrtype: dns.TypeMX, Class: dns.ClassINET, Ttl: 3600} + r.Preference = 10 + r.Mx = "mx.miek.nl." + +Or directly from a string: + + mx, err := dns.NewRR("miek.nl. 3600 IN MX 10 mx.miek.nl.") + +Or when the default origin (.) and TTL (3600) and class (IN) suit you: + + mx, err := dns.NewRR("miek.nl MX 10 mx.miek.nl") + +Or even: + + mx, err := dns.NewRR("$ORIGIN nl.\nmiek 1H IN MX 10 mx.miek") + +In the DNS messages are exchanged, these messages contain resource records +(sets). Use pattern for creating a message: + + m := new(dns.Msg) + m.SetQuestion("miek.nl.", dns.TypeMX) + +Or when not certain if the domain name is fully qualified: + + m.SetQuestion(dns.Fqdn("miek.nl"), dns.TypeMX) + +The message m is now a message with the question section set to ask the MX +records for the miek.nl. zone. + +The following is slightly more verbose, but more flexible: + + m1 := new(dns.Msg) + m1.Id = dns.Id() + m1.RecursionDesired = true + m1.Question = make([]dns.Question, 1) + m1.Question[0] = dns.Question{"miek.nl.", dns.TypeMX, dns.ClassINET} + +After creating a message it can be sent. Basic use pattern for synchronous +querying the DNS at a server configured on 127.0.0.1 and port 53: + + c := new(dns.Client) + in, rtt, err := c.Exchange(m1, "127.0.0.1:53") + +Suppressing multiple outstanding queries (with the same question, type and +class) is as easy as setting: + + c.SingleInflight = true + +More advanced options are available using a net.Dialer and the corresponding API. +For example it is possible to set a timeout, or to specify a source IP address +and port to use for the connection: + + c := new(dns.Client) + laddr := net.UDPAddr{ + IP: net.ParseIP("[::1]"), + Port: 12345, + Zone: "", + } + c.Dialer := &net.Dialer{ + Timeout: 200 * time.Millisecond, + LocalAddr: &laddr, + } + in, rtt, err := c.Exchange(m1, "8.8.8.8:53") + +If these "advanced" features are not needed, a simple UDP query can be sent, +with: + + in, err := dns.Exchange(m1, "127.0.0.1:53") + +When this functions returns you will get DNS message. A DNS message consists +out of four sections. +The question section: in.Question, the answer section: in.Answer, +the authority section: in.Ns and the additional section: in.Extra. + +Each of these sections (except the Question section) contain a []RR. Basic +use pattern for accessing the rdata of a TXT RR as the first RR in +the Answer section: + + if t, ok := in.Answer[0].(*dns.TXT); ok { + // do something with t.Txt + } + +Domain Name and TXT Character String Representations + +Both domain names and TXT character strings are converted to presentation form +both when unpacked and when converted to strings. + +For TXT character strings, tabs, carriage returns and line feeds will be +converted to \t, \r and \n respectively. Back slashes and quotations marks will +be escaped. Bytes below 32 and above 127 will be converted to \DDD form. + +For domain names, in addition to the above rules brackets, periods, spaces, +semicolons and the at symbol are escaped. + +DNSSEC + +DNSSEC (DNS Security Extension) adds a layer of security to the DNS. It uses +public key cryptography to sign resource records. The public keys are stored in +DNSKEY records and the signatures in RRSIG records. + +Requesting DNSSEC information for a zone is done by adding the DO (DNSSEC OK) +bit to a request. + + m := new(dns.Msg) + m.SetEdns0(4096, true) + +Signature generation, signature verification and key generation are all supported. + +DYNAMIC UPDATES + +Dynamic updates reuses the DNS message format, but renames three of the +sections. Question is Zone, Answer is Prerequisite, Authority is Update, only +the Additional is not renamed. See RFC 2136 for the gory details. + +You can set a rather complex set of rules for the existence of absence of +certain resource records or names in a zone to specify if resource records +should be added or removed. The table from RFC 2136 supplemented with the Go +DNS function shows which functions exist to specify the prerequisites. + + 3.2.4 - Table Of Metavalues Used In Prerequisite Section + + CLASS TYPE RDATA Meaning Function + -------------------------------------------------------------- + ANY ANY empty Name is in use dns.NameUsed + ANY rrset empty RRset exists (value indep) dns.RRsetUsed + NONE ANY empty Name is not in use dns.NameNotUsed + NONE rrset empty RRset does not exist dns.RRsetNotUsed + zone rrset rr RRset exists (value dep) dns.Used + +The prerequisite section can also be left empty. If you have decided on the +prerequisites you can tell what RRs should be added or deleted. The next table +shows the options you have and what functions to call. + + 3.4.2.6 - Table Of Metavalues Used In Update Section + + CLASS TYPE RDATA Meaning Function + --------------------------------------------------------------- + ANY ANY empty Delete all RRsets from name dns.RemoveName + ANY rrset empty Delete an RRset dns.RemoveRRset + NONE rrset rr Delete an RR from RRset dns.Remove + zone rrset rr Add to an RRset dns.Insert + +TRANSACTION SIGNATURE + +An TSIG or transaction signature adds a HMAC TSIG record to each message sent. +The supported algorithms include: HmacMD5, HmacSHA1, HmacSHA256 and HmacSHA512. + +Basic use pattern when querying with a TSIG name "axfr." (note that these key names +must be fully qualified - as they are domain names) and the base64 secret +"so6ZGir4GPAqINNh9U5c3A==": + +If an incoming message contains a TSIG record it MUST be the last record in +the additional section (RFC2845 3.2). This means that you should make the +call to SetTsig last, right before executing the query. If you make any +changes to the RRset after calling SetTsig() the signature will be incorrect. + + c := new(dns.Client) + c.TsigSecret = map[string]string{"axfr.": "so6ZGir4GPAqINNh9U5c3A=="} + m := new(dns.Msg) + m.SetQuestion("miek.nl.", dns.TypeMX) + m.SetTsig("axfr.", dns.HmacMD5, 300, time.Now().Unix()) + ... + // When sending the TSIG RR is calculated and filled in before sending + +When requesting an zone transfer (almost all TSIG usage is when requesting zone +transfers), with TSIG, this is the basic use pattern. In this example we +request an AXFR for miek.nl. with TSIG key named "axfr." and secret +"so6ZGir4GPAqINNh9U5c3A==" and using the server 176.58.119.54: + + t := new(dns.Transfer) + m := new(dns.Msg) + t.TsigSecret = map[string]string{"axfr.": "so6ZGir4GPAqINNh9U5c3A=="} + m.SetAxfr("miek.nl.") + m.SetTsig("axfr.", dns.HmacMD5, 300, time.Now().Unix()) + c, err := t.In(m, "176.58.119.54:53") + for r := range c { ... } + +You can now read the records from the transfer as they come in. Each envelope +is checked with TSIG. If something is not correct an error is returned. + +Basic use pattern validating and replying to a message that has TSIG set. + + server := &dns.Server{Addr: ":53", Net: "udp"} + server.TsigSecret = map[string]string{"axfr.": "so6ZGir4GPAqINNh9U5c3A=="} + go server.ListenAndServe() + dns.HandleFunc(".", handleRequest) + + func handleRequest(w dns.ResponseWriter, r *dns.Msg) { + m := new(dns.Msg) + m.SetReply(r) + if r.IsTsig() != nil { + if w.TsigStatus() == nil { + // *Msg r has an TSIG record and it was validated + m.SetTsig("axfr.", dns.HmacMD5, 300, time.Now().Unix()) + } else { + // *Msg r has an TSIG records and it was not valided + } + } + w.WriteMsg(m) + } + +PRIVATE RRS + +RFC 6895 sets aside a range of type codes for private use. This range is 65,280 +- 65,534 (0xFF00 - 0xFFFE). When experimenting with new Resource Records these +can be used, before requesting an official type code from IANA. + +See https://miek.nl/2014/september/21/idn-and-private-rr-in-go-dns/ for more +information. + +EDNS0 + +EDNS0 is an extension mechanism for the DNS defined in RFC 2671 and updated by +RFC 6891. It defines an new RR type, the OPT RR, which is then completely +abused. + +Basic use pattern for creating an (empty) OPT RR: + + o := new(dns.OPT) + o.Hdr.Name = "." // MUST be the root zone, per definition. + o.Hdr.Rrtype = dns.TypeOPT + +The rdata of an OPT RR consists out of a slice of EDNS0 (RFC 6891) interfaces. +Currently only a few have been standardized: EDNS0_NSID (RFC 5001) and +EDNS0_SUBNET (RFC 7871). Note that these options may be combined in an OPT RR. +Basic use pattern for a server to check if (and which) options are set: + + // o is a dns.OPT + for _, s := range o.Option { + switch e := s.(type) { + case *dns.EDNS0_NSID: + // do stuff with e.Nsid + case *dns.EDNS0_SUBNET: + // access e.Family, e.Address, etc. + } + } + +SIG(0) + +From RFC 2931: + + SIG(0) provides protection for DNS transactions and requests .... + ... protection for glue records, DNS requests, protection for message headers + on requests and responses, and protection of the overall integrity of a response. + +It works like TSIG, except that SIG(0) uses public key cryptography, instead of +the shared secret approach in TSIG. Supported algorithms: DSA, ECDSAP256SHA256, +ECDSAP384SHA384, RSASHA1, RSASHA256 and RSASHA512. + +Signing subsequent messages in multi-message sessions is not implemented. +*/ +package dns diff --git a/vendor/github.com/miekg/dns/duplicate.go b/vendor/github.com/miekg/dns/duplicate.go new file mode 100644 index 00000000..49e6940b --- /dev/null +++ b/vendor/github.com/miekg/dns/duplicate.go @@ -0,0 +1,38 @@ +package dns + +//go:generate go run duplicate_generate.go + +// IsDuplicate checks of r1 and r2 are duplicates of each other, excluding the TTL. +// So this means the header data is equal *and* the RDATA is the same. Return true +// is so, otherwise false. +// It's a protocol violation to have identical RRs in a message. +func IsDuplicate(r1, r2 RR) bool { + // Check whether the record header is identical. + if !r1.Header().isDuplicate(r2.Header()) { + return false + } + + // Check whether the RDATA is identical. + return r1.isDuplicate(r2) +} + +func (r1 *RR_Header) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*RR_Header) + if !ok { + return false + } + if r1.Class != r2.Class { + return false + } + if r1.Rrtype != r2.Rrtype { + return false + } + if !isDuplicateName(r1.Name, r2.Name) { + return false + } + // ignore TTL + return true +} + +// isDuplicateName checks if the domain names s1 and s2 are equal. +func isDuplicateName(s1, s2 string) bool { return equal(s1, s2) } diff --git a/vendor/github.com/miekg/dns/edns.go b/vendor/github.com/miekg/dns/edns.go new file mode 100644 index 00000000..04808d57 --- /dev/null +++ b/vendor/github.com/miekg/dns/edns.go @@ -0,0 +1,675 @@ +package dns + +import ( + "encoding/binary" + "encoding/hex" + "errors" + "fmt" + "net" + "strconv" +) + +// EDNS0 Option codes. +const ( + EDNS0LLQ = 0x1 // long lived queries: http://tools.ietf.org/html/draft-sekar-dns-llq-01 + EDNS0UL = 0x2 // update lease draft: http://files.dns-sd.org/draft-sekar-dns-ul.txt + EDNS0NSID = 0x3 // nsid (See RFC 5001) + EDNS0DAU = 0x5 // DNSSEC Algorithm Understood + EDNS0DHU = 0x6 // DS Hash Understood + EDNS0N3U = 0x7 // NSEC3 Hash Understood + EDNS0SUBNET = 0x8 // client-subnet (See RFC 7871) + EDNS0EXPIRE = 0x9 // EDNS0 expire + EDNS0COOKIE = 0xa // EDNS0 Cookie + EDNS0TCPKEEPALIVE = 0xb // EDNS0 tcp keep alive (See RFC 7828) + EDNS0PADDING = 0xc // EDNS0 padding (See RFC 7830) + EDNS0LOCALSTART = 0xFDE9 // Beginning of range reserved for local/experimental use (See RFC 6891) + EDNS0LOCALEND = 0xFFFE // End of range reserved for local/experimental use (See RFC 6891) + _DO = 1 << 15 // DNSSEC OK +) + +// OPT is the EDNS0 RR appended to messages to convey extra (meta) information. +// See RFC 6891. +type OPT struct { + Hdr RR_Header + Option []EDNS0 `dns:"opt"` +} + +func (rr *OPT) String() string { + s := "\n;; OPT PSEUDOSECTION:\n; EDNS: version " + strconv.Itoa(int(rr.Version())) + "; " + if rr.Do() { + s += "flags: do; " + } else { + s += "flags: ; " + } + s += "udp: " + strconv.Itoa(int(rr.UDPSize())) + + for _, o := range rr.Option { + switch o.(type) { + case *EDNS0_NSID: + s += "\n; NSID: " + o.String() + h, e := o.pack() + var r string + if e == nil { + for _, c := range h { + r += "(" + string(c) + ")" + } + s += " " + r + } + case *EDNS0_SUBNET: + s += "\n; SUBNET: " + o.String() + case *EDNS0_COOKIE: + s += "\n; COOKIE: " + o.String() + case *EDNS0_UL: + s += "\n; UPDATE LEASE: " + o.String() + case *EDNS0_LLQ: + s += "\n; LONG LIVED QUERIES: " + o.String() + case *EDNS0_DAU: + s += "\n; DNSSEC ALGORITHM UNDERSTOOD: " + o.String() + case *EDNS0_DHU: + s += "\n; DS HASH UNDERSTOOD: " + o.String() + case *EDNS0_N3U: + s += "\n; NSEC3 HASH UNDERSTOOD: " + o.String() + case *EDNS0_LOCAL: + s += "\n; LOCAL OPT: " + o.String() + case *EDNS0_PADDING: + s += "\n; PADDING: " + o.String() + } + } + return s +} + +func (rr *OPT) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + for _, o := range rr.Option { + l += 4 // Account for 2-byte option code and 2-byte option length. + lo, _ := o.pack() + l += len(lo) + } + return l +} + +func (rr *OPT) parse(c *zlexer, origin string) *ParseError { + panic("dns: internal error: parse should never be called on OPT") +} + +func (r1 *OPT) isDuplicate(r2 RR) bool { return false } + +// return the old value -> delete SetVersion? + +// Version returns the EDNS version used. Only zero is defined. +func (rr *OPT) Version() uint8 { + return uint8(rr.Hdr.Ttl & 0x00FF0000 >> 16) +} + +// SetVersion sets the version of EDNS. This is usually zero. +func (rr *OPT) SetVersion(v uint8) { + rr.Hdr.Ttl = rr.Hdr.Ttl&0xFF00FFFF | uint32(v)<<16 +} + +// ExtendedRcode returns the EDNS extended RCODE field (the upper 8 bits of the TTL). +func (rr *OPT) ExtendedRcode() int { + return int(rr.Hdr.Ttl&0xFF000000>>24) << 4 +} + +// SetExtendedRcode sets the EDNS extended RCODE field. +// +// If the RCODE is not an extended RCODE, will reset the extended RCODE field to 0. +func (rr *OPT) SetExtendedRcode(v uint16) { + rr.Hdr.Ttl = rr.Hdr.Ttl&0x00FFFFFF | uint32(v>>4)<<24 +} + +// UDPSize returns the UDP buffer size. +func (rr *OPT) UDPSize() uint16 { + return rr.Hdr.Class +} + +// SetUDPSize sets the UDP buffer size. +func (rr *OPT) SetUDPSize(size uint16) { + rr.Hdr.Class = size +} + +// Do returns the value of the DO (DNSSEC OK) bit. +func (rr *OPT) Do() bool { + return rr.Hdr.Ttl&_DO == _DO +} + +// SetDo sets the DO (DNSSEC OK) bit. +// If we pass an argument, set the DO bit to that value. +// It is possible to pass 2 or more arguments. Any arguments after the 1st is silently ignored. +func (rr *OPT) SetDo(do ...bool) { + if len(do) == 1 { + if do[0] { + rr.Hdr.Ttl |= _DO + } else { + rr.Hdr.Ttl &^= _DO + } + } else { + rr.Hdr.Ttl |= _DO + } +} + +// EDNS0 defines an EDNS0 Option. An OPT RR can have multiple options appended to it. +type EDNS0 interface { + // Option returns the option code for the option. + Option() uint16 + // pack returns the bytes of the option data. + pack() ([]byte, error) + // unpack sets the data as found in the buffer. Is also sets + // the length of the slice as the length of the option data. + unpack([]byte) error + // String returns the string representation of the option. + String() string + // copy returns a deep-copy of the option. + copy() EDNS0 +} + +// EDNS0_NSID option is used to retrieve a nameserver +// identifier. When sending a request Nsid must be set to the empty string +// The identifier is an opaque string encoded as hex. +// Basic use pattern for creating an nsid option: +// +// o := new(dns.OPT) +// o.Hdr.Name = "." +// o.Hdr.Rrtype = dns.TypeOPT +// e := new(dns.EDNS0_NSID) +// e.Code = dns.EDNS0NSID +// e.Nsid = "AA" +// o.Option = append(o.Option, e) +type EDNS0_NSID struct { + Code uint16 // Always EDNS0NSID + Nsid string // This string needs to be hex encoded +} + +func (e *EDNS0_NSID) pack() ([]byte, error) { + h, err := hex.DecodeString(e.Nsid) + if err != nil { + return nil, err + } + return h, nil +} + +// Option implements the EDNS0 interface. +func (e *EDNS0_NSID) Option() uint16 { return EDNS0NSID } // Option returns the option code. +func (e *EDNS0_NSID) unpack(b []byte) error { e.Nsid = hex.EncodeToString(b); return nil } +func (e *EDNS0_NSID) String() string { return e.Nsid } +func (e *EDNS0_NSID) copy() EDNS0 { return &EDNS0_NSID{e.Code, e.Nsid} } + +// EDNS0_SUBNET is the subnet option that is used to give the remote nameserver +// an idea of where the client lives. See RFC 7871. It can then give back a different +// answer depending on the location or network topology. +// Basic use pattern for creating an subnet option: +// +// o := new(dns.OPT) +// o.Hdr.Name = "." +// o.Hdr.Rrtype = dns.TypeOPT +// e := new(dns.EDNS0_SUBNET) +// e.Code = dns.EDNS0SUBNET +// e.Family = 1 // 1 for IPv4 source address, 2 for IPv6 +// e.SourceNetmask = 32 // 32 for IPV4, 128 for IPv6 +// e.SourceScope = 0 +// e.Address = net.ParseIP("127.0.0.1").To4() // for IPv4 +// // e.Address = net.ParseIP("2001:7b8:32a::2") // for IPV6 +// o.Option = append(o.Option, e) +// +// This code will parse all the available bits when unpacking (up to optlen). +// When packing it will apply SourceNetmask. If you need more advanced logic, +// patches welcome and good luck. +type EDNS0_SUBNET struct { + Code uint16 // Always EDNS0SUBNET + Family uint16 // 1 for IP, 2 for IP6 + SourceNetmask uint8 + SourceScope uint8 + Address net.IP +} + +// Option implements the EDNS0 interface. +func (e *EDNS0_SUBNET) Option() uint16 { return EDNS0SUBNET } + +func (e *EDNS0_SUBNET) pack() ([]byte, error) { + b := make([]byte, 4) + binary.BigEndian.PutUint16(b[0:], e.Family) + b[2] = e.SourceNetmask + b[3] = e.SourceScope + switch e.Family { + case 0: + // "dig" sets AddressFamily to 0 if SourceNetmask is also 0 + // We might don't need to complain either + if e.SourceNetmask != 0 { + return nil, errors.New("dns: bad address family") + } + case 1: + if e.SourceNetmask > net.IPv4len*8 { + return nil, errors.New("dns: bad netmask") + } + if len(e.Address.To4()) != net.IPv4len { + return nil, errors.New("dns: bad address") + } + ip := e.Address.To4().Mask(net.CIDRMask(int(e.SourceNetmask), net.IPv4len*8)) + needLength := (e.SourceNetmask + 8 - 1) / 8 // division rounding up + b = append(b, ip[:needLength]...) + case 2: + if e.SourceNetmask > net.IPv6len*8 { + return nil, errors.New("dns: bad netmask") + } + if len(e.Address) != net.IPv6len { + return nil, errors.New("dns: bad address") + } + ip := e.Address.Mask(net.CIDRMask(int(e.SourceNetmask), net.IPv6len*8)) + needLength := (e.SourceNetmask + 8 - 1) / 8 // division rounding up + b = append(b, ip[:needLength]...) + default: + return nil, errors.New("dns: bad address family") + } + return b, nil +} + +func (e *EDNS0_SUBNET) unpack(b []byte) error { + if len(b) < 4 { + return ErrBuf + } + e.Family = binary.BigEndian.Uint16(b) + e.SourceNetmask = b[2] + e.SourceScope = b[3] + switch e.Family { + case 0: + // "dig" sets AddressFamily to 0 if SourceNetmask is also 0 + // It's okay to accept such a packet + if e.SourceNetmask != 0 { + return errors.New("dns: bad address family") + } + e.Address = net.IPv4(0, 0, 0, 0) + case 1: + if e.SourceNetmask > net.IPv4len*8 || e.SourceScope > net.IPv4len*8 { + return errors.New("dns: bad netmask") + } + addr := make(net.IP, net.IPv4len) + copy(addr, b[4:]) + e.Address = addr.To16() + case 2: + if e.SourceNetmask > net.IPv6len*8 || e.SourceScope > net.IPv6len*8 { + return errors.New("dns: bad netmask") + } + addr := make(net.IP, net.IPv6len) + copy(addr, b[4:]) + e.Address = addr + default: + return errors.New("dns: bad address family") + } + return nil +} + +func (e *EDNS0_SUBNET) String() (s string) { + if e.Address == nil { + s = "" + } else if e.Address.To4() != nil { + s = e.Address.String() + } else { + s = "[" + e.Address.String() + "]" + } + s += "/" + strconv.Itoa(int(e.SourceNetmask)) + "/" + strconv.Itoa(int(e.SourceScope)) + return +} + +func (e *EDNS0_SUBNET) copy() EDNS0 { + return &EDNS0_SUBNET{ + e.Code, + e.Family, + e.SourceNetmask, + e.SourceScope, + e.Address, + } +} + +// The EDNS0_COOKIE option is used to add a DNS Cookie to a message. +// +// o := new(dns.OPT) +// o.Hdr.Name = "." +// o.Hdr.Rrtype = dns.TypeOPT +// e := new(dns.EDNS0_COOKIE) +// e.Code = dns.EDNS0COOKIE +// e.Cookie = "24a5ac.." +// o.Option = append(o.Option, e) +// +// The Cookie field consists out of a client cookie (RFC 7873 Section 4), that is +// always 8 bytes. It may then optionally be followed by the server cookie. The server +// cookie is of variable length, 8 to a maximum of 32 bytes. In other words: +// +// cCookie := o.Cookie[:16] +// sCookie := o.Cookie[16:] +// +// There is no guarantee that the Cookie string has a specific length. +type EDNS0_COOKIE struct { + Code uint16 // Always EDNS0COOKIE + Cookie string // Hex-encoded cookie data +} + +func (e *EDNS0_COOKIE) pack() ([]byte, error) { + h, err := hex.DecodeString(e.Cookie) + if err != nil { + return nil, err + } + return h, nil +} + +// Option implements the EDNS0 interface. +func (e *EDNS0_COOKIE) Option() uint16 { return EDNS0COOKIE } +func (e *EDNS0_COOKIE) unpack(b []byte) error { e.Cookie = hex.EncodeToString(b); return nil } +func (e *EDNS0_COOKIE) String() string { return e.Cookie } +func (e *EDNS0_COOKIE) copy() EDNS0 { return &EDNS0_COOKIE{e.Code, e.Cookie} } + +// The EDNS0_UL (Update Lease) (draft RFC) option is used to tell the server to set +// an expiration on an update RR. This is helpful for clients that cannot clean +// up after themselves. This is a draft RFC and more information can be found at +// https://tools.ietf.org/html/draft-sekar-dns-ul-02 +// +// o := new(dns.OPT) +// o.Hdr.Name = "." +// o.Hdr.Rrtype = dns.TypeOPT +// e := new(dns.EDNS0_UL) +// e.Code = dns.EDNS0UL +// e.Lease = 120 // in seconds +// o.Option = append(o.Option, e) +type EDNS0_UL struct { + Code uint16 // Always EDNS0UL + Lease uint32 + KeyLease uint32 +} + +// Option implements the EDNS0 interface. +func (e *EDNS0_UL) Option() uint16 { return EDNS0UL } +func (e *EDNS0_UL) String() string { return fmt.Sprintf("%d %d", e.Lease, e.KeyLease) } +func (e *EDNS0_UL) copy() EDNS0 { return &EDNS0_UL{e.Code, e.Lease, e.KeyLease} } + +// Copied: http://golang.org/src/pkg/net/dnsmsg.go +func (e *EDNS0_UL) pack() ([]byte, error) { + var b []byte + if e.KeyLease == 0 { + b = make([]byte, 4) + } else { + b = make([]byte, 8) + binary.BigEndian.PutUint32(b[4:], e.KeyLease) + } + binary.BigEndian.PutUint32(b, e.Lease) + return b, nil +} + +func (e *EDNS0_UL) unpack(b []byte) error { + switch len(b) { + case 4: + e.KeyLease = 0 + case 8: + e.KeyLease = binary.BigEndian.Uint32(b[4:]) + default: + return ErrBuf + } + e.Lease = binary.BigEndian.Uint32(b) + return nil +} + +// EDNS0_LLQ stands for Long Lived Queries: http://tools.ietf.org/html/draft-sekar-dns-llq-01 +// Implemented for completeness, as the EDNS0 type code is assigned. +type EDNS0_LLQ struct { + Code uint16 // Always EDNS0LLQ + Version uint16 + Opcode uint16 + Error uint16 + Id uint64 + LeaseLife uint32 +} + +// Option implements the EDNS0 interface. +func (e *EDNS0_LLQ) Option() uint16 { return EDNS0LLQ } + +func (e *EDNS0_LLQ) pack() ([]byte, error) { + b := make([]byte, 18) + binary.BigEndian.PutUint16(b[0:], e.Version) + binary.BigEndian.PutUint16(b[2:], e.Opcode) + binary.BigEndian.PutUint16(b[4:], e.Error) + binary.BigEndian.PutUint64(b[6:], e.Id) + binary.BigEndian.PutUint32(b[14:], e.LeaseLife) + return b, nil +} + +func (e *EDNS0_LLQ) unpack(b []byte) error { + if len(b) < 18 { + return ErrBuf + } + e.Version = binary.BigEndian.Uint16(b[0:]) + e.Opcode = binary.BigEndian.Uint16(b[2:]) + e.Error = binary.BigEndian.Uint16(b[4:]) + e.Id = binary.BigEndian.Uint64(b[6:]) + e.LeaseLife = binary.BigEndian.Uint32(b[14:]) + return nil +} + +func (e *EDNS0_LLQ) String() string { + s := strconv.FormatUint(uint64(e.Version), 10) + " " + strconv.FormatUint(uint64(e.Opcode), 10) + + " " + strconv.FormatUint(uint64(e.Error), 10) + " " + strconv.FormatUint(e.Id, 10) + + " " + strconv.FormatUint(uint64(e.LeaseLife), 10) + return s +} +func (e *EDNS0_LLQ) copy() EDNS0 { + return &EDNS0_LLQ{e.Code, e.Version, e.Opcode, e.Error, e.Id, e.LeaseLife} +} + +// EDNS0_DUA implements the EDNS0 "DNSSEC Algorithm Understood" option. See RFC 6975. +type EDNS0_DAU struct { + Code uint16 // Always EDNS0DAU + AlgCode []uint8 +} + +// Option implements the EDNS0 interface. +func (e *EDNS0_DAU) Option() uint16 { return EDNS0DAU } +func (e *EDNS0_DAU) pack() ([]byte, error) { return e.AlgCode, nil } +func (e *EDNS0_DAU) unpack(b []byte) error { e.AlgCode = b; return nil } + +func (e *EDNS0_DAU) String() string { + s := "" + for _, alg := range e.AlgCode { + if a, ok := AlgorithmToString[alg]; ok { + s += " " + a + } else { + s += " " + strconv.Itoa(int(alg)) + } + } + return s +} +func (e *EDNS0_DAU) copy() EDNS0 { return &EDNS0_DAU{e.Code, e.AlgCode} } + +// EDNS0_DHU implements the EDNS0 "DS Hash Understood" option. See RFC 6975. +type EDNS0_DHU struct { + Code uint16 // Always EDNS0DHU + AlgCode []uint8 +} + +// Option implements the EDNS0 interface. +func (e *EDNS0_DHU) Option() uint16 { return EDNS0DHU } +func (e *EDNS0_DHU) pack() ([]byte, error) { return e.AlgCode, nil } +func (e *EDNS0_DHU) unpack(b []byte) error { e.AlgCode = b; return nil } + +func (e *EDNS0_DHU) String() string { + s := "" + for _, alg := range e.AlgCode { + if a, ok := HashToString[alg]; ok { + s += " " + a + } else { + s += " " + strconv.Itoa(int(alg)) + } + } + return s +} +func (e *EDNS0_DHU) copy() EDNS0 { return &EDNS0_DHU{e.Code, e.AlgCode} } + +// EDNS0_N3U implements the EDNS0 "NSEC3 Hash Understood" option. See RFC 6975. +type EDNS0_N3U struct { + Code uint16 // Always EDNS0N3U + AlgCode []uint8 +} + +// Option implements the EDNS0 interface. +func (e *EDNS0_N3U) Option() uint16 { return EDNS0N3U } +func (e *EDNS0_N3U) pack() ([]byte, error) { return e.AlgCode, nil } +func (e *EDNS0_N3U) unpack(b []byte) error { e.AlgCode = b; return nil } + +func (e *EDNS0_N3U) String() string { + // Re-use the hash map + s := "" + for _, alg := range e.AlgCode { + if a, ok := HashToString[alg]; ok { + s += " " + a + } else { + s += " " + strconv.Itoa(int(alg)) + } + } + return s +} +func (e *EDNS0_N3U) copy() EDNS0 { return &EDNS0_N3U{e.Code, e.AlgCode} } + +// EDNS0_EXPIRE implementes the EDNS0 option as described in RFC 7314. +type EDNS0_EXPIRE struct { + Code uint16 // Always EDNS0EXPIRE + Expire uint32 +} + +// Option implements the EDNS0 interface. +func (e *EDNS0_EXPIRE) Option() uint16 { return EDNS0EXPIRE } +func (e *EDNS0_EXPIRE) String() string { return strconv.FormatUint(uint64(e.Expire), 10) } +func (e *EDNS0_EXPIRE) copy() EDNS0 { return &EDNS0_EXPIRE{e.Code, e.Expire} } + +func (e *EDNS0_EXPIRE) pack() ([]byte, error) { + b := make([]byte, 4) + binary.BigEndian.PutUint32(b, e.Expire) + return b, nil +} + +func (e *EDNS0_EXPIRE) unpack(b []byte) error { + if len(b) == 0 { + // zero-length EXPIRE query, see RFC 7314 Section 2 + return nil + } + if len(b) < 4 { + return ErrBuf + } + e.Expire = binary.BigEndian.Uint32(b) + return nil +} + +// The EDNS0_LOCAL option is used for local/experimental purposes. The option +// code is recommended to be within the range [EDNS0LOCALSTART, EDNS0LOCALEND] +// (RFC6891), although any unassigned code can actually be used. The content of +// the option is made available in Data, unaltered. +// Basic use pattern for creating a local option: +// +// o := new(dns.OPT) +// o.Hdr.Name = "." +// o.Hdr.Rrtype = dns.TypeOPT +// e := new(dns.EDNS0_LOCAL) +// e.Code = dns.EDNS0LOCALSTART +// e.Data = []byte{72, 82, 74} +// o.Option = append(o.Option, e) +type EDNS0_LOCAL struct { + Code uint16 + Data []byte +} + +// Option implements the EDNS0 interface. +func (e *EDNS0_LOCAL) Option() uint16 { return e.Code } +func (e *EDNS0_LOCAL) String() string { + return strconv.FormatInt(int64(e.Code), 10) + ":0x" + hex.EncodeToString(e.Data) +} +func (e *EDNS0_LOCAL) copy() EDNS0 { + b := make([]byte, len(e.Data)) + copy(b, e.Data) + return &EDNS0_LOCAL{e.Code, b} +} + +func (e *EDNS0_LOCAL) pack() ([]byte, error) { + b := make([]byte, len(e.Data)) + copied := copy(b, e.Data) + if copied != len(e.Data) { + return nil, ErrBuf + } + return b, nil +} + +func (e *EDNS0_LOCAL) unpack(b []byte) error { + e.Data = make([]byte, len(b)) + copied := copy(e.Data, b) + if copied != len(b) { + return ErrBuf + } + return nil +} + +// EDNS0_TCP_KEEPALIVE is an EDNS0 option that instructs the server to keep +// the TCP connection alive. See RFC 7828. +type EDNS0_TCP_KEEPALIVE struct { + Code uint16 // Always EDNSTCPKEEPALIVE + Length uint16 // the value 0 if the TIMEOUT is omitted, the value 2 if it is present; + Timeout uint16 // an idle timeout value for the TCP connection, specified in units of 100 milliseconds, encoded in network byte order. +} + +// Option implements the EDNS0 interface. +func (e *EDNS0_TCP_KEEPALIVE) Option() uint16 { return EDNS0TCPKEEPALIVE } + +func (e *EDNS0_TCP_KEEPALIVE) pack() ([]byte, error) { + if e.Timeout != 0 && e.Length != 2 { + return nil, errors.New("dns: timeout specified but length is not 2") + } + if e.Timeout == 0 && e.Length != 0 { + return nil, errors.New("dns: timeout not specified but length is not 0") + } + b := make([]byte, 4+e.Length) + binary.BigEndian.PutUint16(b[0:], e.Code) + binary.BigEndian.PutUint16(b[2:], e.Length) + if e.Length == 2 { + binary.BigEndian.PutUint16(b[4:], e.Timeout) + } + return b, nil +} + +func (e *EDNS0_TCP_KEEPALIVE) unpack(b []byte) error { + if len(b) < 4 { + return ErrBuf + } + e.Length = binary.BigEndian.Uint16(b[2:4]) + if e.Length != 0 && e.Length != 2 { + return errors.New("dns: length mismatch, want 0/2 but got " + strconv.FormatUint(uint64(e.Length), 10)) + } + if e.Length == 2 { + if len(b) < 6 { + return ErrBuf + } + e.Timeout = binary.BigEndian.Uint16(b[4:6]) + } + return nil +} + +func (e *EDNS0_TCP_KEEPALIVE) String() (s string) { + s = "use tcp keep-alive" + if e.Length == 0 { + s += ", timeout omitted" + } else { + s += fmt.Sprintf(", timeout %dms", e.Timeout*100) + } + return +} +func (e *EDNS0_TCP_KEEPALIVE) copy() EDNS0 { return &EDNS0_TCP_KEEPALIVE{e.Code, e.Length, e.Timeout} } + +// EDNS0_PADDING option is used to add padding to a request/response. The default +// value of padding SHOULD be 0x0 but other values MAY be used, for instance if +// compression is applied before encryption which may break signatures. +type EDNS0_PADDING struct { + Padding []byte +} + +// Option implements the EDNS0 interface. +func (e *EDNS0_PADDING) Option() uint16 { return EDNS0PADDING } +func (e *EDNS0_PADDING) pack() ([]byte, error) { return e.Padding, nil } +func (e *EDNS0_PADDING) unpack(b []byte) error { e.Padding = b; return nil } +func (e *EDNS0_PADDING) String() string { return fmt.Sprintf("%0X", e.Padding) } +func (e *EDNS0_PADDING) copy() EDNS0 { + b := make([]byte, len(e.Padding)) + copy(b, e.Padding) + return &EDNS0_PADDING{b} +} diff --git a/vendor/github.com/miekg/dns/format.go b/vendor/github.com/miekg/dns/format.go new file mode 100644 index 00000000..0ec79f2f --- /dev/null +++ b/vendor/github.com/miekg/dns/format.go @@ -0,0 +1,93 @@ +package dns + +import ( + "net" + "reflect" + "strconv" +) + +// NumField returns the number of rdata fields r has. +func NumField(r RR) int { + return reflect.ValueOf(r).Elem().NumField() - 1 // Remove RR_Header +} + +// Field returns the rdata field i as a string. Fields are indexed starting from 1. +// RR types that holds slice data, for instance the NSEC type bitmap will return a single +// string where the types are concatenated using a space. +// Accessing non existing fields will cause a panic. +func Field(r RR, i int) string { + if i == 0 { + return "" + } + d := reflect.ValueOf(r).Elem().Field(i) + switch d.Kind() { + case reflect.String: + return d.String() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return strconv.FormatInt(d.Int(), 10) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return strconv.FormatUint(d.Uint(), 10) + case reflect.Slice: + switch reflect.ValueOf(r).Elem().Type().Field(i).Tag { + case `dns:"a"`: + // TODO(miek): Hmm store this as 16 bytes + if d.Len() < net.IPv4len { + return "" + } + if d.Len() < net.IPv6len { + return net.IPv4(byte(d.Index(0).Uint()), + byte(d.Index(1).Uint()), + byte(d.Index(2).Uint()), + byte(d.Index(3).Uint())).String() + } + return net.IPv4(byte(d.Index(12).Uint()), + byte(d.Index(13).Uint()), + byte(d.Index(14).Uint()), + byte(d.Index(15).Uint())).String() + case `dns:"aaaa"`: + if d.Len() < net.IPv6len { + return "" + } + return net.IP{ + byte(d.Index(0).Uint()), + byte(d.Index(1).Uint()), + byte(d.Index(2).Uint()), + byte(d.Index(3).Uint()), + byte(d.Index(4).Uint()), + byte(d.Index(5).Uint()), + byte(d.Index(6).Uint()), + byte(d.Index(7).Uint()), + byte(d.Index(8).Uint()), + byte(d.Index(9).Uint()), + byte(d.Index(10).Uint()), + byte(d.Index(11).Uint()), + byte(d.Index(12).Uint()), + byte(d.Index(13).Uint()), + byte(d.Index(14).Uint()), + byte(d.Index(15).Uint()), + }.String() + case `dns:"nsec"`: + if d.Len() == 0 { + return "" + } + s := Type(d.Index(0).Uint()).String() + for i := 1; i < d.Len(); i++ { + s += " " + Type(d.Index(i).Uint()).String() + } + return s + default: + // if it does not have a tag its a string slice + fallthrough + case `dns:"txt"`: + if d.Len() == 0 { + return "" + } + s := d.Index(0).String() + for i := 1; i < d.Len(); i++ { + s += " " + d.Index(i).String() + } + return s + } + } + return "" +} diff --git a/vendor/github.com/miekg/dns/fuzz.go b/vendor/github.com/miekg/dns/fuzz.go new file mode 100644 index 00000000..57410acd --- /dev/null +++ b/vendor/github.com/miekg/dns/fuzz.go @@ -0,0 +1,32 @@ +// +build fuzz + +package dns + +import "strings" + +func Fuzz(data []byte) int { + msg := new(Msg) + + if err := msg.Unpack(data); err != nil { + return 0 + } + if _, err := msg.Pack(); err != nil { + return 0 + } + + return 1 +} + +func FuzzNewRR(data []byte) int { + str := string(data) + // Do not fuzz lines that include the $INCLUDE keyword and hint the fuzzer + // at avoiding them. + // See GH#1025 for context. + if strings.Contains(strings.ToUpper(str), "$INCLUDE") { + return -1 + } + if _, err := NewRR(str); err != nil { + return 0 + } + return 1 +} diff --git a/vendor/github.com/miekg/dns/generate.go b/vendor/github.com/miekg/dns/generate.go new file mode 100644 index 00000000..f7e91a23 --- /dev/null +++ b/vendor/github.com/miekg/dns/generate.go @@ -0,0 +1,247 @@ +package dns + +import ( + "bytes" + "fmt" + "io" + "strconv" + "strings" +) + +// Parse the $GENERATE statement as used in BIND9 zones. +// See http://www.zytrax.com/books/dns/ch8/generate.html for instance. +// We are called after '$GENERATE '. After which we expect: +// * the range (12-24/2) +// * lhs (ownername) +// * [[ttl][class]] +// * type +// * rhs (rdata) +// But we are lazy here, only the range is parsed *all* occurrences +// of $ after that are interpreted. +func (zp *ZoneParser) generate(l lex) (RR, bool) { + token := l.token + step := 1 + if i := strings.IndexByte(token, '/'); i >= 0 { + if i+1 == len(token) { + return zp.setParseError("bad step in $GENERATE range", l) + } + + s, err := strconv.Atoi(token[i+1:]) + if err != nil || s <= 0 { + return zp.setParseError("bad step in $GENERATE range", l) + } + + step = s + token = token[:i] + } + + sx := strings.SplitN(token, "-", 2) + if len(sx) != 2 { + return zp.setParseError("bad start-stop in $GENERATE range", l) + } + + start, err := strconv.Atoi(sx[0]) + if err != nil { + return zp.setParseError("bad start in $GENERATE range", l) + } + + end, err := strconv.Atoi(sx[1]) + if err != nil { + return zp.setParseError("bad stop in $GENERATE range", l) + } + if end < 0 || start < 0 || end < start || (end-start)/step > 65535 { + return zp.setParseError("bad range in $GENERATE range", l) + } + + // _BLANK + l, ok := zp.c.Next() + if !ok || l.value != zBlank { + return zp.setParseError("garbage after $GENERATE range", l) + } + + // Create a complete new string, which we then parse again. + var s string + for l, ok := zp.c.Next(); ok; l, ok = zp.c.Next() { + if l.err { + return zp.setParseError("bad data in $GENERATE directive", l) + } + if l.value == zNewline { + break + } + + s += l.token + } + + r := &generateReader{ + s: s, + + cur: start, + start: start, + end: end, + step: step, + + file: zp.file, + lex: &l, + } + zp.sub = NewZoneParser(r, zp.origin, zp.file) + zp.sub.includeDepth, zp.sub.includeAllowed = zp.includeDepth, zp.includeAllowed + zp.sub.generateDisallowed = true + zp.sub.SetDefaultTTL(defaultTtl) + return zp.subNext() +} + +type generateReader struct { + s string + si int + + cur int + start int + end int + step int + + mod bytes.Buffer + + escape bool + + eof bool + + file string + lex *lex +} + +func (r *generateReader) parseError(msg string, end int) *ParseError { + r.eof = true // Make errors sticky. + + l := *r.lex + l.token = r.s[r.si-1 : end] + l.column += r.si // l.column starts one zBLANK before r.s + + return &ParseError{r.file, msg, l} +} + +func (r *generateReader) Read(p []byte) (int, error) { + // NewZLexer, through NewZoneParser, should use ReadByte and + // not end up here. + + panic("not implemented") +} + +func (r *generateReader) ReadByte() (byte, error) { + if r.eof { + return 0, io.EOF + } + if r.mod.Len() > 0 { + return r.mod.ReadByte() + } + + if r.si >= len(r.s) { + r.si = 0 + r.cur += r.step + + r.eof = r.cur > r.end || r.cur < 0 + return '\n', nil + } + + si := r.si + r.si++ + + switch r.s[si] { + case '\\': + if r.escape { + r.escape = false + return '\\', nil + } + + r.escape = true + return r.ReadByte() + case '$': + if r.escape { + r.escape = false + return '$', nil + } + + mod := "%d" + + if si >= len(r.s)-1 { + // End of the string + fmt.Fprintf(&r.mod, mod, r.cur) + return r.mod.ReadByte() + } + + if r.s[si+1] == '$' { + r.si++ + return '$', nil + } + + var offset int + + // Search for { and } + if r.s[si+1] == '{' { + // Modifier block + sep := strings.Index(r.s[si+2:], "}") + if sep < 0 { + return 0, r.parseError("bad modifier in $GENERATE", len(r.s)) + } + + var errMsg string + mod, offset, errMsg = modToPrintf(r.s[si+2 : si+2+sep]) + if errMsg != "" { + return 0, r.parseError(errMsg, si+3+sep) + } + if r.start+offset < 0 || r.end+offset > 1<<31-1 { + return 0, r.parseError("bad offset in $GENERATE", si+3+sep) + } + + r.si += 2 + sep // Jump to it + } + + fmt.Fprintf(&r.mod, mod, r.cur+offset) + return r.mod.ReadByte() + default: + if r.escape { // Pretty useless here + r.escape = false + return r.ReadByte() + } + + return r.s[si], nil + } +} + +// Convert a $GENERATE modifier 0,0,d to something Printf can deal with. +func modToPrintf(s string) (string, int, string) { + // Modifier is { offset [ ,width [ ,base ] ] } - provide default + // values for optional width and type, if necessary. + var offStr, widthStr, base string + switch xs := strings.Split(s, ","); len(xs) { + case 1: + offStr, widthStr, base = xs[0], "0", "d" + case 2: + offStr, widthStr, base = xs[0], xs[1], "d" + case 3: + offStr, widthStr, base = xs[0], xs[1], xs[2] + default: + return "", 0, "bad modifier in $GENERATE" + } + + switch base { + case "o", "d", "x", "X": + default: + return "", 0, "bad base in $GENERATE" + } + + offset, err := strconv.Atoi(offStr) + if err != nil { + return "", 0, "bad offset in $GENERATE" + } + + width, err := strconv.Atoi(widthStr) + if err != nil || width < 0 || width > 255 { + return "", 0, "bad width in $GENERATE" + } + + if width == 0 { + return "%" + base, offset, "" + } + + return "%0" + widthStr + base, offset, "" +} diff --git a/vendor/github.com/miekg/dns/go.mod b/vendor/github.com/miekg/dns/go.mod new file mode 100644 index 00000000..6003d057 --- /dev/null +++ b/vendor/github.com/miekg/dns/go.mod @@ -0,0 +1,11 @@ +module github.com/miekg/dns + +go 1.12 + +require ( + golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550 + golang.org/x/net v0.0.0-20190923162816-aa69164e4478 + golang.org/x/sync v0.0.0-20190423024810-112230192c58 + golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe + golang.org/x/tools v0.0.0-20191216052735-49a3e744a425 // indirect +) diff --git a/vendor/github.com/miekg/dns/go.sum b/vendor/github.com/miekg/dns/go.sum new file mode 100644 index 00000000..96bda3a9 --- /dev/null +++ b/vendor/github.com/miekg/dns/go.sum @@ -0,0 +1,39 @@ +golang.org/x/crypto v0.0.0-20181001203147-e3636079e1a4 h1:Vk3wNqEZwyGyei9yq5ekj7frek2u7HUfffJ1/opblzc= +golang.org/x/crypto v0.0.0-20181001203147-e3636079e1a4/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190829043050-9756ffdc2472 h1:Gv7RPwsi3eZ2Fgewe3CBsuOebPwO27PoXzRpJPsvSSM= +golang.org/x/crypto v0.0.0-20190829043050-9756ffdc2472/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392 h1:ACG4HJsFiNMf47Y4PeRoebLNy/2lXT9EtprMuTFWt1M= +golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550 h1:ObdrDkeb4kJdCP557AjRjq69pTHfNouLtWZG7j9rPN8= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/net v0.0.0-20180926154720-4dfa2610cdf3 h1:dgd4x4kJt7G4k4m93AYLzM8Ni6h2qLTfh9n9vXJT3/0= +golang.org/x/net v0.0.0-20180926154720-4dfa2610cdf3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297 h1:k7pJ2yAPLPgbskkFdhRCsA77k2fySZ1zf2zCjvQCiIM= +golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190923162816-aa69164e4478 h1:l5EDrHhldLYb3ZRHDUhXF7Om7MvYXnkV9/iQNo1lX6g= +golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180928133829-e4b3c5e90611 h1:O33LKL7WyJgjN9CvxfTIomjIClbd/Kq86/iipowHQU0= +golang.org/x/sys v0.0.0-20180928133829-e4b3c5e90611/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd h1:DBH9mDw0zluJT/R+nGuV3jWFWLFaHyYZWD4tOT+cjn0= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe h1:6fAMxZRR6sl1Uq8U61gxU+kPTs2tR8uOySCbBP7BN/M= +golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216052735-49a3e744a425 h1:VvQyQJN0tSuecqgcIxMWnnfG5kSmgy9KZR9sW3W5QeA= +golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/vendor/github.com/miekg/dns/labels.go b/vendor/github.com/miekg/dns/labels.go new file mode 100644 index 00000000..10d82471 --- /dev/null +++ b/vendor/github.com/miekg/dns/labels.go @@ -0,0 +1,212 @@ +package dns + +// Holds a bunch of helper functions for dealing with labels. + +// SplitDomainName splits a name string into it's labels. +// www.miek.nl. returns []string{"www", "miek", "nl"} +// .www.miek.nl. returns []string{"", "www", "miek", "nl"}, +// The root label (.) returns nil. Note that using +// strings.Split(s) will work in most cases, but does not handle +// escaped dots (\.) for instance. +// s must be a syntactically valid domain name, see IsDomainName. +func SplitDomainName(s string) (labels []string) { + if len(s) == 0 { + return nil + } + fqdnEnd := 0 // offset of the final '.' or the length of the name + idx := Split(s) + begin := 0 + if IsFqdn(s) { + fqdnEnd = len(s) - 1 + } else { + fqdnEnd = len(s) + } + + switch len(idx) { + case 0: + return nil + case 1: + // no-op + default: + for _, end := range idx[1:] { + labels = append(labels, s[begin:end-1]) + begin = end + } + } + + return append(labels, s[begin:fqdnEnd]) +} + +// CompareDomainName compares the names s1 and s2 and +// returns how many labels they have in common starting from the *right*. +// The comparison stops at the first inequality. The names are downcased +// before the comparison. +// +// www.miek.nl. and miek.nl. have two labels in common: miek and nl +// www.miek.nl. and www.bla.nl. have one label in common: nl +// +// s1 and s2 must be syntactically valid domain names. +func CompareDomainName(s1, s2 string) (n int) { + // the first check: root label + if s1 == "." || s2 == "." { + return 0 + } + + l1 := Split(s1) + l2 := Split(s2) + + j1 := len(l1) - 1 // end + i1 := len(l1) - 2 // start + j2 := len(l2) - 1 + i2 := len(l2) - 2 + // the second check can be done here: last/only label + // before we fall through into the for-loop below + if equal(s1[l1[j1]:], s2[l2[j2]:]) { + n++ + } else { + return + } + for { + if i1 < 0 || i2 < 0 { + break + } + if equal(s1[l1[i1]:l1[j1]], s2[l2[i2]:l2[j2]]) { + n++ + } else { + break + } + j1-- + i1-- + j2-- + i2-- + } + return +} + +// CountLabel counts the the number of labels in the string s. +// s must be a syntactically valid domain name. +func CountLabel(s string) (labels int) { + if s == "." { + return + } + off := 0 + end := false + for { + off, end = NextLabel(s, off) + labels++ + if end { + return + } + } +} + +// Split splits a name s into its label indexes. +// www.miek.nl. returns []int{0, 4, 9}, www.miek.nl also returns []int{0, 4, 9}. +// The root name (.) returns nil. Also see SplitDomainName. +// s must be a syntactically valid domain name. +func Split(s string) []int { + if s == "." { + return nil + } + idx := make([]int, 1, 3) + off := 0 + end := false + + for { + off, end = NextLabel(s, off) + if end { + return idx + } + idx = append(idx, off) + } +} + +// NextLabel returns the index of the start of the next label in the +// string s starting at offset. +// The bool end is true when the end of the string has been reached. +// Also see PrevLabel. +func NextLabel(s string, offset int) (i int, end bool) { + if s == "" { + return 0, true + } + for i = offset; i < len(s)-1; i++ { + if s[i] != '.' { + continue + } + j := i - 1 + for j >= 0 && s[j] == '\\' { + j-- + } + + if (j-i)%2 == 0 { + continue + } + + return i + 1, false + } + return i + 1, true +} + +// PrevLabel returns the index of the label when starting from the right and +// jumping n labels to the left. +// The bool start is true when the start of the string has been overshot. +// Also see NextLabel. +func PrevLabel(s string, n int) (i int, start bool) { + if s == "" { + return 0, true + } + if n == 0 { + return len(s), false + } + + l := len(s) - 1 + if s[l] == '.' { + l-- + } + + for ; l >= 0 && n > 0; l-- { + if s[l] != '.' { + continue + } + j := l - 1 + for j >= 0 && s[j] == '\\' { + j-- + } + + if (j-l)%2 == 0 { + continue + } + + n-- + if n == 0 { + return l + 1, false + } + } + + return 0, n > 1 +} + +// equal compares a and b while ignoring case. It returns true when equal otherwise false. +func equal(a, b string) bool { + // might be lifted into API function. + la := len(a) + lb := len(b) + if la != lb { + return false + } + + for i := la - 1; i >= 0; i-- { + ai := a[i] + bi := b[i] + if ai >= 'A' && ai <= 'Z' { + ai |= 'a' - 'A' + } + if bi >= 'A' && bi <= 'Z' { + bi |= 'a' - 'A' + } + if ai != bi { + return false + } + } + return true +} diff --git a/vendor/github.com/miekg/dns/listen_go111.go b/vendor/github.com/miekg/dns/listen_go111.go new file mode 100644 index 00000000..fad195cf --- /dev/null +++ b/vendor/github.com/miekg/dns/listen_go111.go @@ -0,0 +1,44 @@ +// +build go1.11 +// +build aix darwin dragonfly freebsd linux netbsd openbsd + +package dns + +import ( + "context" + "net" + "syscall" + + "golang.org/x/sys/unix" +) + +const supportsReusePort = true + +func reuseportControl(network, address string, c syscall.RawConn) error { + var opErr error + err := c.Control(func(fd uintptr) { + opErr = unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_REUSEPORT, 1) + }) + if err != nil { + return err + } + + return opErr +} + +func listenTCP(network, addr string, reuseport bool) (net.Listener, error) { + var lc net.ListenConfig + if reuseport { + lc.Control = reuseportControl + } + + return lc.Listen(context.Background(), network, addr) +} + +func listenUDP(network, addr string, reuseport bool) (net.PacketConn, error) { + var lc net.ListenConfig + if reuseport { + lc.Control = reuseportControl + } + + return lc.ListenPacket(context.Background(), network, addr) +} diff --git a/vendor/github.com/miekg/dns/listen_go_not111.go b/vendor/github.com/miekg/dns/listen_go_not111.go new file mode 100644 index 00000000..b9201417 --- /dev/null +++ b/vendor/github.com/miekg/dns/listen_go_not111.go @@ -0,0 +1,23 @@ +// +build !go1.11 !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd + +package dns + +import "net" + +const supportsReusePort = false + +func listenTCP(network, addr string, reuseport bool) (net.Listener, error) { + if reuseport { + // TODO(tmthrgd): return an error? + } + + return net.Listen(network, addr) +} + +func listenUDP(network, addr string, reuseport bool) (net.PacketConn, error) { + if reuseport { + // TODO(tmthrgd): return an error? + } + + return net.ListenPacket(network, addr) +} diff --git a/vendor/github.com/miekg/dns/msg.go b/vendor/github.com/miekg/dns/msg.go new file mode 100644 index 00000000..29381300 --- /dev/null +++ b/vendor/github.com/miekg/dns/msg.go @@ -0,0 +1,1196 @@ +// DNS packet assembly, see RFC 1035. Converting from - Unpack() - +// and to - Pack() - wire format. +// All the packers and unpackers take a (msg []byte, off int) +// and return (off1 int, ok bool). If they return ok==false, they +// also return off1==len(msg), so that the next unpacker will +// also fail. This lets us avoid checks of ok until the end of a +// packing sequence. + +package dns + +//go:generate go run msg_generate.go + +import ( + "crypto/rand" + "encoding/binary" + "fmt" + "math/big" + "strconv" + "strings" +) + +const ( + maxCompressionOffset = 2 << 13 // We have 14 bits for the compression pointer + maxDomainNameWireOctets = 255 // See RFC 1035 section 2.3.4 + + // This is the maximum number of compression pointers that should occur in a + // semantically valid message. Each label in a domain name must be at least one + // octet and is separated by a period. The root label won't be represented by a + // compression pointer to a compression pointer, hence the -2 to exclude the + // smallest valid root label. + // + // It is possible to construct a valid message that has more compression pointers + // than this, and still doesn't loop, by pointing to a previous pointer. This is + // not something a well written implementation should ever do, so we leave them + // to trip the maximum compression pointer check. + maxCompressionPointers = (maxDomainNameWireOctets+1)/2 - 2 + + // This is the maximum length of a domain name in presentation format. The + // maximum wire length of a domain name is 255 octets (see above), with the + // maximum label length being 63. The wire format requires one extra byte over + // the presentation format, reducing the number of octets by 1. Each label in + // the name will be separated by a single period, with each octet in the label + // expanding to at most 4 bytes (\DDD). If all other labels are of the maximum + // length, then the final label can only be 61 octets long to not exceed the + // maximum allowed wire length. + maxDomainNamePresentationLength = 61*4 + 1 + 63*4 + 1 + 63*4 + 1 + 63*4 + 1 +) + +// Errors defined in this package. +var ( + ErrAlg error = &Error{err: "bad algorithm"} // ErrAlg indicates an error with the (DNSSEC) algorithm. + ErrAuth error = &Error{err: "bad authentication"} // ErrAuth indicates an error in the TSIG authentication. + ErrBuf error = &Error{err: "buffer size too small"} // ErrBuf indicates that the buffer used is too small for the message. + ErrConnEmpty error = &Error{err: "conn has no connection"} // ErrConnEmpty indicates a connection is being used before it is initialized. + ErrExtendedRcode error = &Error{err: "bad extended rcode"} // ErrExtendedRcode ... + ErrFqdn error = &Error{err: "domain must be fully qualified"} // ErrFqdn indicates that a domain name does not have a closing dot. + ErrId error = &Error{err: "id mismatch"} // ErrId indicates there is a mismatch with the message's ID. + ErrKeyAlg error = &Error{err: "bad key algorithm"} // ErrKeyAlg indicates that the algorithm in the key is not valid. + ErrKey error = &Error{err: "bad key"} + ErrKeySize error = &Error{err: "bad key size"} + ErrLongDomain error = &Error{err: fmt.Sprintf("domain name exceeded %d wire-format octets", maxDomainNameWireOctets)} + ErrNoSig error = &Error{err: "no signature found"} + ErrPrivKey error = &Error{err: "bad private key"} + ErrRcode error = &Error{err: "bad rcode"} + ErrRdata error = &Error{err: "bad rdata"} + ErrRRset error = &Error{err: "bad rrset"} + ErrSecret error = &Error{err: "no secrets defined"} + ErrShortRead error = &Error{err: "short read"} + ErrSig error = &Error{err: "bad signature"} // ErrSig indicates that a signature can not be cryptographically validated. + ErrSoa error = &Error{err: "no SOA"} // ErrSOA indicates that no SOA RR was seen when doing zone transfers. + ErrTime error = &Error{err: "bad time"} // ErrTime indicates a timing error in TSIG authentication. +) + +// Id by default returns a 16-bit random number to be used as a message id. The +// number is drawn from a cryptographically secure random number generator. +// This being a variable the function can be reassigned to a custom function. +// For instance, to make it return a static value for testing: +// +// dns.Id = func() uint16 { return 3 } +var Id = id + +// id returns a 16 bits random number to be used as a +// message id. The random provided should be good enough. +func id() uint16 { + var output uint16 + err := binary.Read(rand.Reader, binary.BigEndian, &output) + if err != nil { + panic("dns: reading random id failed: " + err.Error()) + } + return output +} + +// MsgHdr is a a manually-unpacked version of (id, bits). +type MsgHdr struct { + Id uint16 + Response bool + Opcode int + Authoritative bool + Truncated bool + RecursionDesired bool + RecursionAvailable bool + Zero bool + AuthenticatedData bool + CheckingDisabled bool + Rcode int +} + +// Msg contains the layout of a DNS message. +type Msg struct { + MsgHdr + Compress bool `json:"-"` // If true, the message will be compressed when converted to wire format. + Question []Question // Holds the RR(s) of the question section. + Answer []RR // Holds the RR(s) of the answer section. + Ns []RR // Holds the RR(s) of the authority section. + Extra []RR // Holds the RR(s) of the additional section. +} + +// ClassToString is a maps Classes to strings for each CLASS wire type. +var ClassToString = map[uint16]string{ + ClassINET: "IN", + ClassCSNET: "CS", + ClassCHAOS: "CH", + ClassHESIOD: "HS", + ClassNONE: "NONE", + ClassANY: "ANY", +} + +// OpcodeToString maps Opcodes to strings. +var OpcodeToString = map[int]string{ + OpcodeQuery: "QUERY", + OpcodeIQuery: "IQUERY", + OpcodeStatus: "STATUS", + OpcodeNotify: "NOTIFY", + OpcodeUpdate: "UPDATE", +} + +// RcodeToString maps Rcodes to strings. +var RcodeToString = map[int]string{ + RcodeSuccess: "NOERROR", + RcodeFormatError: "FORMERR", + RcodeServerFailure: "SERVFAIL", + RcodeNameError: "NXDOMAIN", + RcodeNotImplemented: "NOTIMP", + RcodeRefused: "REFUSED", + RcodeYXDomain: "YXDOMAIN", // See RFC 2136 + RcodeYXRrset: "YXRRSET", + RcodeNXRrset: "NXRRSET", + RcodeNotAuth: "NOTAUTH", + RcodeNotZone: "NOTZONE", + RcodeBadSig: "BADSIG", // Also known as RcodeBadVers, see RFC 6891 + // RcodeBadVers: "BADVERS", + RcodeBadKey: "BADKEY", + RcodeBadTime: "BADTIME", + RcodeBadMode: "BADMODE", + RcodeBadName: "BADNAME", + RcodeBadAlg: "BADALG", + RcodeBadTrunc: "BADTRUNC", + RcodeBadCookie: "BADCOOKIE", +} + +// compressionMap is used to allow a more efficient compression map +// to be used for internal packDomainName calls without changing the +// signature or functionality of public API. +// +// In particular, map[string]uint16 uses 25% less per-entry memory +// than does map[string]int. +type compressionMap struct { + ext map[string]int // external callers + int map[string]uint16 // internal callers +} + +func (m compressionMap) valid() bool { + return m.int != nil || m.ext != nil +} + +func (m compressionMap) insert(s string, pos int) { + if m.ext != nil { + m.ext[s] = pos + } else { + m.int[s] = uint16(pos) + } +} + +func (m compressionMap) find(s string) (int, bool) { + if m.ext != nil { + pos, ok := m.ext[s] + return pos, ok + } + + pos, ok := m.int[s] + return int(pos), ok +} + +// Domain names are a sequence of counted strings +// split at the dots. They end with a zero-length string. + +// PackDomainName packs a domain name s into msg[off:]. +// If compression is wanted compress must be true and the compression +// map needs to hold a mapping between domain names and offsets +// pointing into msg. +func PackDomainName(s string, msg []byte, off int, compression map[string]int, compress bool) (off1 int, err error) { + return packDomainName(s, msg, off, compressionMap{ext: compression}, compress) +} + +func packDomainName(s string, msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + // XXX: A logical copy of this function exists in IsDomainName and + // should be kept in sync with this function. + + ls := len(s) + if ls == 0 { // Ok, for instance when dealing with update RR without any rdata. + return off, nil + } + + // If not fully qualified, error out. + if !IsFqdn(s) { + return len(msg), ErrFqdn + } + + // Each dot ends a segment of the name. + // We trade each dot byte for a length byte. + // Except for escaped dots (\.), which are normal dots. + // There is also a trailing zero. + + // Compression + pointer := -1 + + // Emit sequence of counted strings, chopping at dots. + var ( + begin int + compBegin int + compOff int + bs []byte + wasDot bool + ) +loop: + for i := 0; i < ls; i++ { + var c byte + if bs == nil { + c = s[i] + } else { + c = bs[i] + } + + switch c { + case '\\': + if off+1 > len(msg) { + return len(msg), ErrBuf + } + + if bs == nil { + bs = []byte(s) + } + + // check for \DDD + if i+3 < ls && isDigit(bs[i+1]) && isDigit(bs[i+2]) && isDigit(bs[i+3]) { + bs[i] = dddToByte(bs[i+1:]) + copy(bs[i+1:ls-3], bs[i+4:]) + ls -= 3 + compOff += 3 + } else { + copy(bs[i:ls-1], bs[i+1:]) + ls-- + compOff++ + } + + wasDot = false + case '.': + if wasDot { + // two dots back to back is not legal + return len(msg), ErrRdata + } + wasDot = true + + labelLen := i - begin + if labelLen >= 1<<6 { // top two bits of length must be clear + return len(msg), ErrRdata + } + + // off can already (we're in a loop) be bigger than len(msg) + // this happens when a name isn't fully qualified + if off+1+labelLen > len(msg) { + return len(msg), ErrBuf + } + + // Don't try to compress '.' + // We should only compress when compress is true, but we should also still pick + // up names that can be used for *future* compression(s). + if compression.valid() && !isRootLabel(s, bs, begin, ls) { + if p, ok := compression.find(s[compBegin:]); ok { + // The first hit is the longest matching dname + // keep the pointer offset we get back and store + // the offset of the current name, because that's + // where we need to insert the pointer later + + // If compress is true, we're allowed to compress this dname + if compress { + pointer = p // Where to point to + break loop + } + } else if off < maxCompressionOffset { + // Only offsets smaller than maxCompressionOffset can be used. + compression.insert(s[compBegin:], off) + } + } + + // The following is covered by the length check above. + msg[off] = byte(labelLen) + + if bs == nil { + copy(msg[off+1:], s[begin:i]) + } else { + copy(msg[off+1:], bs[begin:i]) + } + off += 1 + labelLen + + begin = i + 1 + compBegin = begin + compOff + default: + wasDot = false + } + } + + // Root label is special + if isRootLabel(s, bs, 0, ls) { + return off, nil + } + + // If we did compression and we find something add the pointer here + if pointer != -1 { + // We have two bytes (14 bits) to put the pointer in + binary.BigEndian.PutUint16(msg[off:], uint16(pointer^0xC000)) + return off + 2, nil + } + + if off < len(msg) { + msg[off] = 0 + } + + return off + 1, nil +} + +// isRootLabel returns whether s or bs, from off to end, is the root +// label ".". +// +// If bs is nil, s will be checked, otherwise bs will be checked. +func isRootLabel(s string, bs []byte, off, end int) bool { + if bs == nil { + return s[off:end] == "." + } + + return end-off == 1 && bs[off] == '.' +} + +// Unpack a domain name. +// In addition to the simple sequences of counted strings above, +// domain names are allowed to refer to strings elsewhere in the +// packet, to avoid repeating common suffixes when returning +// many entries in a single domain. The pointers are marked +// by a length byte with the top two bits set. Ignoring those +// two bits, that byte and the next give a 14 bit offset from msg[0] +// where we should pick up the trail. +// Note that if we jump elsewhere in the packet, +// we return off1 == the offset after the first pointer we found, +// which is where the next record will start. +// In theory, the pointers are only allowed to jump backward. +// We let them jump anywhere and stop jumping after a while. + +// UnpackDomainName unpacks a domain name into a string. It returns +// the name, the new offset into msg and any error that occurred. +// +// When an error is encountered, the unpacked name will be discarded +// and len(msg) will be returned as the offset. +func UnpackDomainName(msg []byte, off int) (string, int, error) { + s := make([]byte, 0, maxDomainNamePresentationLength) + off1 := 0 + lenmsg := len(msg) + budget := maxDomainNameWireOctets + ptr := 0 // number of pointers followed +Loop: + for { + if off >= lenmsg { + return "", lenmsg, ErrBuf + } + c := int(msg[off]) + off++ + switch c & 0xC0 { + case 0x00: + if c == 0x00 { + // end of name + break Loop + } + // literal string + if off+c > lenmsg { + return "", lenmsg, ErrBuf + } + budget -= c + 1 // +1 for the label separator + if budget <= 0 { + return "", lenmsg, ErrLongDomain + } + for _, b := range msg[off : off+c] { + switch b { + case '.', '(', ')', ';', ' ', '@': + fallthrough + case '"', '\\': + s = append(s, '\\', b) + default: + if b < ' ' || b > '~' { // unprintable, use \DDD + s = append(s, escapeByte(b)...) + } else { + s = append(s, b) + } + } + } + s = append(s, '.') + off += c + case 0xC0: + // pointer to somewhere else in msg. + // remember location after first ptr, + // since that's how many bytes we consumed. + // also, don't follow too many pointers -- + // maybe there's a loop. + if off >= lenmsg { + return "", lenmsg, ErrBuf + } + c1 := msg[off] + off++ + if ptr == 0 { + off1 = off + } + if ptr++; ptr > maxCompressionPointers { + return "", lenmsg, &Error{err: "too many compression pointers"} + } + // pointer should guarantee that it advances and points forwards at least + // but the condition on previous three lines guarantees that it's + // at least loop-free + off = (c^0xC0)<<8 | int(c1) + default: + // 0x80 and 0x40 are reserved + return "", lenmsg, ErrRdata + } + } + if ptr == 0 { + off1 = off + } + if len(s) == 0 { + return ".", off1, nil + } + return string(s), off1, nil +} + +func packTxt(txt []string, msg []byte, offset int, tmp []byte) (int, error) { + if len(txt) == 0 { + if offset >= len(msg) { + return offset, ErrBuf + } + msg[offset] = 0 + return offset, nil + } + var err error + for _, s := range txt { + if len(s) > len(tmp) { + return offset, ErrBuf + } + offset, err = packTxtString(s, msg, offset, tmp) + if err != nil { + return offset, err + } + } + return offset, nil +} + +func packTxtString(s string, msg []byte, offset int, tmp []byte) (int, error) { + lenByteOffset := offset + if offset >= len(msg) || len(s) > len(tmp) { + return offset, ErrBuf + } + offset++ + bs := tmp[:len(s)] + copy(bs, s) + for i := 0; i < len(bs); i++ { + if len(msg) <= offset { + return offset, ErrBuf + } + if bs[i] == '\\' { + i++ + if i == len(bs) { + break + } + // check for \DDD + if i+2 < len(bs) && isDigit(bs[i]) && isDigit(bs[i+1]) && isDigit(bs[i+2]) { + msg[offset] = dddToByte(bs[i:]) + i += 2 + } else { + msg[offset] = bs[i] + } + } else { + msg[offset] = bs[i] + } + offset++ + } + l := offset - lenByteOffset - 1 + if l > 255 { + return offset, &Error{err: "string exceeded 255 bytes in txt"} + } + msg[lenByteOffset] = byte(l) + return offset, nil +} + +func packOctetString(s string, msg []byte, offset int, tmp []byte) (int, error) { + if offset >= len(msg) || len(s) > len(tmp) { + return offset, ErrBuf + } + bs := tmp[:len(s)] + copy(bs, s) + for i := 0; i < len(bs); i++ { + if len(msg) <= offset { + return offset, ErrBuf + } + if bs[i] == '\\' { + i++ + if i == len(bs) { + break + } + // check for \DDD + if i+2 < len(bs) && isDigit(bs[i]) && isDigit(bs[i+1]) && isDigit(bs[i+2]) { + msg[offset] = dddToByte(bs[i:]) + i += 2 + } else { + msg[offset] = bs[i] + } + } else { + msg[offset] = bs[i] + } + offset++ + } + return offset, nil +} + +func unpackTxt(msg []byte, off0 int) (ss []string, off int, err error) { + off = off0 + var s string + for off < len(msg) && err == nil { + s, off, err = unpackString(msg, off) + if err == nil { + ss = append(ss, s) + } + } + return +} + +// Helpers for dealing with escaped bytes +func isDigit(b byte) bool { return b >= '0' && b <= '9' } + +func dddToByte(s []byte) byte { + _ = s[2] // bounds check hint to compiler; see golang.org/issue/14808 + return byte((s[0]-'0')*100 + (s[1]-'0')*10 + (s[2] - '0')) +} + +func dddStringToByte(s string) byte { + _ = s[2] // bounds check hint to compiler; see golang.org/issue/14808 + return byte((s[0]-'0')*100 + (s[1]-'0')*10 + (s[2] - '0')) +} + +// Helper function for packing and unpacking +func intToBytes(i *big.Int, length int) []byte { + buf := i.Bytes() + if len(buf) < length { + b := make([]byte, length) + copy(b[length-len(buf):], buf) + return b + } + return buf +} + +// PackRR packs a resource record rr into msg[off:]. +// See PackDomainName for documentation about the compression. +func PackRR(rr RR, msg []byte, off int, compression map[string]int, compress bool) (off1 int, err error) { + headerEnd, off1, err := packRR(rr, msg, off, compressionMap{ext: compression}, compress) + if err == nil { + // packRR no longer sets the Rdlength field on the rr, but + // callers might be expecting it so we set it here. + rr.Header().Rdlength = uint16(off1 - headerEnd) + } + return off1, err +} + +func packRR(rr RR, msg []byte, off int, compression compressionMap, compress bool) (headerEnd int, off1 int, err error) { + if rr == nil { + return len(msg), len(msg), &Error{err: "nil rr"} + } + + headerEnd, err = rr.Header().packHeader(msg, off, compression, compress) + if err != nil { + return headerEnd, len(msg), err + } + + off1, err = rr.pack(msg, headerEnd, compression, compress) + if err != nil { + return headerEnd, len(msg), err + } + + rdlength := off1 - headerEnd + if int(uint16(rdlength)) != rdlength { // overflow + return headerEnd, len(msg), ErrRdata + } + + // The RDLENGTH field is the last field in the header and we set it here. + binary.BigEndian.PutUint16(msg[headerEnd-2:], uint16(rdlength)) + return headerEnd, off1, nil +} + +// UnpackRR unpacks msg[off:] into an RR. +func UnpackRR(msg []byte, off int) (rr RR, off1 int, err error) { + h, off, msg, err := unpackHeader(msg, off) + if err != nil { + return nil, len(msg), err + } + + return UnpackRRWithHeader(h, msg, off) +} + +// UnpackRRWithHeader unpacks the record type specific payload given an existing +// RR_Header. +func UnpackRRWithHeader(h RR_Header, msg []byte, off int) (rr RR, off1 int, err error) { + if newFn, ok := TypeToRR[h.Rrtype]; ok { + rr = newFn() + *rr.Header() = h + } else { + rr = &RFC3597{Hdr: h} + } + + if noRdata(h) { + return rr, off, nil + } + + end := off + int(h.Rdlength) + + off, err = rr.unpack(msg, off) + if err != nil { + return nil, end, err + } + if off != end { + return &h, end, &Error{err: "bad rdlength"} + } + + return rr, off, nil +} + +// unpackRRslice unpacks msg[off:] into an []RR. +// If we cannot unpack the whole array, then it will return nil +func unpackRRslice(l int, msg []byte, off int) (dst1 []RR, off1 int, err error) { + var r RR + // Don't pre-allocate, l may be under attacker control + var dst []RR + for i := 0; i < l; i++ { + off1 := off + r, off, err = UnpackRR(msg, off) + if err != nil { + off = len(msg) + break + } + // If offset does not increase anymore, l is a lie + if off1 == off { + l = i + break + } + dst = append(dst, r) + } + if err != nil && off == len(msg) { + dst = nil + } + return dst, off, err +} + +// Convert a MsgHdr to a string, with dig-like headers: +// +//;; opcode: QUERY, status: NOERROR, id: 48404 +// +//;; flags: qr aa rd ra; +func (h *MsgHdr) String() string { + if h == nil { + return " MsgHdr" + } + + s := ";; opcode: " + OpcodeToString[h.Opcode] + s += ", status: " + RcodeToString[h.Rcode] + s += ", id: " + strconv.Itoa(int(h.Id)) + "\n" + + s += ";; flags:" + if h.Response { + s += " qr" + } + if h.Authoritative { + s += " aa" + } + if h.Truncated { + s += " tc" + } + if h.RecursionDesired { + s += " rd" + } + if h.RecursionAvailable { + s += " ra" + } + if h.Zero { // Hmm + s += " z" + } + if h.AuthenticatedData { + s += " ad" + } + if h.CheckingDisabled { + s += " cd" + } + + s += ";" + return s +} + +// Pack packs a Msg: it is converted to to wire format. +// If the dns.Compress is true the message will be in compressed wire format. +func (dns *Msg) Pack() (msg []byte, err error) { + return dns.PackBuffer(nil) +} + +// PackBuffer packs a Msg, using the given buffer buf. If buf is too small a new buffer is allocated. +func (dns *Msg) PackBuffer(buf []byte) (msg []byte, err error) { + // If this message can't be compressed, avoid filling the + // compression map and creating garbage. + if dns.Compress && dns.isCompressible() { + compression := make(map[string]uint16) // Compression pointer mappings. + return dns.packBufferWithCompressionMap(buf, compressionMap{int: compression}, true) + } + + return dns.packBufferWithCompressionMap(buf, compressionMap{}, false) +} + +// packBufferWithCompressionMap packs a Msg, using the given buffer buf. +func (dns *Msg) packBufferWithCompressionMap(buf []byte, compression compressionMap, compress bool) (msg []byte, err error) { + if dns.Rcode < 0 || dns.Rcode > 0xFFF { + return nil, ErrRcode + } + + // Set extended rcode unconditionally if we have an opt, this will allow + // reseting the extended rcode bits if they need to. + if opt := dns.IsEdns0(); opt != nil { + opt.SetExtendedRcode(uint16(dns.Rcode)) + } else if dns.Rcode > 0xF { + // If Rcode is an extended one and opt is nil, error out. + return nil, ErrExtendedRcode + } + + // Convert convenient Msg into wire-like Header. + var dh Header + dh.Id = dns.Id + dh.Bits = uint16(dns.Opcode)<<11 | uint16(dns.Rcode&0xF) + if dns.Response { + dh.Bits |= _QR + } + if dns.Authoritative { + dh.Bits |= _AA + } + if dns.Truncated { + dh.Bits |= _TC + } + if dns.RecursionDesired { + dh.Bits |= _RD + } + if dns.RecursionAvailable { + dh.Bits |= _RA + } + if dns.Zero { + dh.Bits |= _Z + } + if dns.AuthenticatedData { + dh.Bits |= _AD + } + if dns.CheckingDisabled { + dh.Bits |= _CD + } + + dh.Qdcount = uint16(len(dns.Question)) + dh.Ancount = uint16(len(dns.Answer)) + dh.Nscount = uint16(len(dns.Ns)) + dh.Arcount = uint16(len(dns.Extra)) + + // We need the uncompressed length here, because we first pack it and then compress it. + msg = buf + uncompressedLen := msgLenWithCompressionMap(dns, nil) + if packLen := uncompressedLen + 1; len(msg) < packLen { + msg = make([]byte, packLen) + } + + // Pack it in: header and then the pieces. + off := 0 + off, err = dh.pack(msg, off, compression, compress) + if err != nil { + return nil, err + } + for _, r := range dns.Question { + off, err = r.pack(msg, off, compression, compress) + if err != nil { + return nil, err + } + } + for _, r := range dns.Answer { + _, off, err = packRR(r, msg, off, compression, compress) + if err != nil { + return nil, err + } + } + for _, r := range dns.Ns { + _, off, err = packRR(r, msg, off, compression, compress) + if err != nil { + return nil, err + } + } + for _, r := range dns.Extra { + _, off, err = packRR(r, msg, off, compression, compress) + if err != nil { + return nil, err + } + } + return msg[:off], nil +} + +func (dns *Msg) unpack(dh Header, msg []byte, off int) (err error) { + // If we are at the end of the message we should return *just* the + // header. This can still be useful to the caller. 9.9.9.9 sends these + // when responding with REFUSED for instance. + if off == len(msg) { + // reset sections before returning + dns.Question, dns.Answer, dns.Ns, dns.Extra = nil, nil, nil, nil + return nil + } + + // Qdcount, Ancount, Nscount, Arcount can't be trusted, as they are + // attacker controlled. This means we can't use them to pre-allocate + // slices. + dns.Question = nil + for i := 0; i < int(dh.Qdcount); i++ { + off1 := off + var q Question + q, off, err = unpackQuestion(msg, off) + if err != nil { + return err + } + if off1 == off { // Offset does not increase anymore, dh.Qdcount is a lie! + dh.Qdcount = uint16(i) + break + } + dns.Question = append(dns.Question, q) + } + + dns.Answer, off, err = unpackRRslice(int(dh.Ancount), msg, off) + // The header counts might have been wrong so we need to update it + dh.Ancount = uint16(len(dns.Answer)) + if err == nil { + dns.Ns, off, err = unpackRRslice(int(dh.Nscount), msg, off) + } + // The header counts might have been wrong so we need to update it + dh.Nscount = uint16(len(dns.Ns)) + if err == nil { + dns.Extra, off, err = unpackRRslice(int(dh.Arcount), msg, off) + } + // The header counts might have been wrong so we need to update it + dh.Arcount = uint16(len(dns.Extra)) + + // Set extended Rcode + if opt := dns.IsEdns0(); opt != nil { + dns.Rcode |= opt.ExtendedRcode() + } + + if off != len(msg) { + // TODO(miek) make this an error? + // use PackOpt to let people tell how detailed the error reporting should be? + // println("dns: extra bytes in dns packet", off, "<", len(msg)) + } + return err + +} + +// Unpack unpacks a binary message to a Msg structure. +func (dns *Msg) Unpack(msg []byte) (err error) { + dh, off, err := unpackMsgHdr(msg, 0) + if err != nil { + return err + } + + dns.setHdr(dh) + return dns.unpack(dh, msg, off) +} + +// Convert a complete message to a string with dig-like output. +func (dns *Msg) String() string { + if dns == nil { + return " MsgHdr" + } + s := dns.MsgHdr.String() + " " + s += "QUERY: " + strconv.Itoa(len(dns.Question)) + ", " + s += "ANSWER: " + strconv.Itoa(len(dns.Answer)) + ", " + s += "AUTHORITY: " + strconv.Itoa(len(dns.Ns)) + ", " + s += "ADDITIONAL: " + strconv.Itoa(len(dns.Extra)) + "\n" + if len(dns.Question) > 0 { + s += "\n;; QUESTION SECTION:\n" + for _, r := range dns.Question { + s += r.String() + "\n" + } + } + if len(dns.Answer) > 0 { + s += "\n;; ANSWER SECTION:\n" + for _, r := range dns.Answer { + if r != nil { + s += r.String() + "\n" + } + } + } + if len(dns.Ns) > 0 { + s += "\n;; AUTHORITY SECTION:\n" + for _, r := range dns.Ns { + if r != nil { + s += r.String() + "\n" + } + } + } + if len(dns.Extra) > 0 { + s += "\n;; ADDITIONAL SECTION:\n" + for _, r := range dns.Extra { + if r != nil { + s += r.String() + "\n" + } + } + } + return s +} + +// isCompressible returns whether the msg may be compressible. +func (dns *Msg) isCompressible() bool { + // If we only have one question, there is nothing we can ever compress. + return len(dns.Question) > 1 || len(dns.Answer) > 0 || + len(dns.Ns) > 0 || len(dns.Extra) > 0 +} + +// Len returns the message length when in (un)compressed wire format. +// If dns.Compress is true compression it is taken into account. Len() +// is provided to be a faster way to get the size of the resulting packet, +// than packing it, measuring the size and discarding the buffer. +func (dns *Msg) Len() int { + // If this message can't be compressed, avoid filling the + // compression map and creating garbage. + if dns.Compress && dns.isCompressible() { + compression := make(map[string]struct{}) + return msgLenWithCompressionMap(dns, compression) + } + + return msgLenWithCompressionMap(dns, nil) +} + +func msgLenWithCompressionMap(dns *Msg, compression map[string]struct{}) int { + l := headerSize + + for _, r := range dns.Question { + l += r.len(l, compression) + } + for _, r := range dns.Answer { + if r != nil { + l += r.len(l, compression) + } + } + for _, r := range dns.Ns { + if r != nil { + l += r.len(l, compression) + } + } + for _, r := range dns.Extra { + if r != nil { + l += r.len(l, compression) + } + } + + return l +} + +func domainNameLen(s string, off int, compression map[string]struct{}, compress bool) int { + if s == "" || s == "." { + return 1 + } + + escaped := strings.Contains(s, "\\") + + if compression != nil && (compress || off < maxCompressionOffset) { + // compressionLenSearch will insert the entry into the compression + // map if it doesn't contain it. + if l, ok := compressionLenSearch(compression, s, off); ok && compress { + if escaped { + return escapedNameLen(s[:l]) + 2 + } + + return l + 2 + } + } + + if escaped { + return escapedNameLen(s) + 1 + } + + return len(s) + 1 +} + +func escapedNameLen(s string) int { + nameLen := len(s) + for i := 0; i < len(s); i++ { + if s[i] != '\\' { + continue + } + + if i+3 < len(s) && isDigit(s[i+1]) && isDigit(s[i+2]) && isDigit(s[i+3]) { + nameLen -= 3 + i += 3 + } else { + nameLen-- + i++ + } + } + + return nameLen +} + +func compressionLenSearch(c map[string]struct{}, s string, msgOff int) (int, bool) { + for off, end := 0, false; !end; off, end = NextLabel(s, off) { + if _, ok := c[s[off:]]; ok { + return off, true + } + + if msgOff+off < maxCompressionOffset { + c[s[off:]] = struct{}{} + } + } + + return 0, false +} + +// Copy returns a new RR which is a deep-copy of r. +func Copy(r RR) RR { return r.copy() } + +// Len returns the length (in octets) of the uncompressed RR in wire format. +func Len(r RR) int { return r.len(0, nil) } + +// Copy returns a new *Msg which is a deep-copy of dns. +func (dns *Msg) Copy() *Msg { return dns.CopyTo(new(Msg)) } + +// CopyTo copies the contents to the provided message using a deep-copy and returns the copy. +func (dns *Msg) CopyTo(r1 *Msg) *Msg { + r1.MsgHdr = dns.MsgHdr + r1.Compress = dns.Compress + + if len(dns.Question) > 0 { + r1.Question = make([]Question, len(dns.Question)) + copy(r1.Question, dns.Question) // TODO(miek): Question is an immutable value, ok to do a shallow-copy + } + + rrArr := make([]RR, len(dns.Answer)+len(dns.Ns)+len(dns.Extra)) + r1.Answer, rrArr = rrArr[:0:len(dns.Answer)], rrArr[len(dns.Answer):] + r1.Ns, rrArr = rrArr[:0:len(dns.Ns)], rrArr[len(dns.Ns):] + r1.Extra = rrArr[:0:len(dns.Extra)] + + for _, r := range dns.Answer { + r1.Answer = append(r1.Answer, r.copy()) + } + + for _, r := range dns.Ns { + r1.Ns = append(r1.Ns, r.copy()) + } + + for _, r := range dns.Extra { + r1.Extra = append(r1.Extra, r.copy()) + } + + return r1 +} + +func (q *Question) pack(msg []byte, off int, compression compressionMap, compress bool) (int, error) { + off, err := packDomainName(q.Name, msg, off, compression, compress) + if err != nil { + return off, err + } + off, err = packUint16(q.Qtype, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(q.Qclass, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func unpackQuestion(msg []byte, off int) (Question, int, error) { + var ( + q Question + err error + ) + q.Name, off, err = UnpackDomainName(msg, off) + if err != nil { + return q, off, err + } + if off == len(msg) { + return q, off, nil + } + q.Qtype, off, err = unpackUint16(msg, off) + if err != nil { + return q, off, err + } + if off == len(msg) { + return q, off, nil + } + q.Qclass, off, err = unpackUint16(msg, off) + if off == len(msg) { + return q, off, nil + } + return q, off, err +} + +func (dh *Header) pack(msg []byte, off int, compression compressionMap, compress bool) (int, error) { + off, err := packUint16(dh.Id, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(dh.Bits, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(dh.Qdcount, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(dh.Ancount, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(dh.Nscount, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(dh.Arcount, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func unpackMsgHdr(msg []byte, off int) (Header, int, error) { + var ( + dh Header + err error + ) + dh.Id, off, err = unpackUint16(msg, off) + if err != nil { + return dh, off, err + } + dh.Bits, off, err = unpackUint16(msg, off) + if err != nil { + return dh, off, err + } + dh.Qdcount, off, err = unpackUint16(msg, off) + if err != nil { + return dh, off, err + } + dh.Ancount, off, err = unpackUint16(msg, off) + if err != nil { + return dh, off, err + } + dh.Nscount, off, err = unpackUint16(msg, off) + if err != nil { + return dh, off, err + } + dh.Arcount, off, err = unpackUint16(msg, off) + if err != nil { + return dh, off, err + } + return dh, off, nil +} + +// setHdr set the header in the dns using the binary data in dh. +func (dns *Msg) setHdr(dh Header) { + dns.Id = dh.Id + dns.Response = dh.Bits&_QR != 0 + dns.Opcode = int(dh.Bits>>11) & 0xF + dns.Authoritative = dh.Bits&_AA != 0 + dns.Truncated = dh.Bits&_TC != 0 + dns.RecursionDesired = dh.Bits&_RD != 0 + dns.RecursionAvailable = dh.Bits&_RA != 0 + dns.Zero = dh.Bits&_Z != 0 // _Z covers the zero bit, which should be zero; not sure why we set it to the opposite. + dns.AuthenticatedData = dh.Bits&_AD != 0 + dns.CheckingDisabled = dh.Bits&_CD != 0 + dns.Rcode = int(dh.Bits & 0xF) +} diff --git a/vendor/github.com/miekg/dns/msg_helpers.go b/vendor/github.com/miekg/dns/msg_helpers.go new file mode 100644 index 00000000..98fadc31 --- /dev/null +++ b/vendor/github.com/miekg/dns/msg_helpers.go @@ -0,0 +1,810 @@ +package dns + +import ( + "encoding/base32" + "encoding/base64" + "encoding/binary" + "encoding/hex" + "net" + "strings" +) + +// helper functions called from the generated zmsg.go + +// These function are named after the tag to help pack/unpack, if there is no tag it is the name +// of the type they pack/unpack (string, int, etc). We prefix all with unpackData or packData, so packDataA or +// packDataDomainName. + +func unpackDataA(msg []byte, off int) (net.IP, int, error) { + if off+net.IPv4len > len(msg) { + return nil, len(msg), &Error{err: "overflow unpacking a"} + } + a := append(make(net.IP, 0, net.IPv4len), msg[off:off+net.IPv4len]...) + off += net.IPv4len + return a, off, nil +} + +func packDataA(a net.IP, msg []byte, off int) (int, error) { + switch len(a) { + case net.IPv4len, net.IPv6len: + // It must be a slice of 4, even if it is 16, we encode only the first 4 + if off+net.IPv4len > len(msg) { + return len(msg), &Error{err: "overflow packing a"} + } + + copy(msg[off:], a.To4()) + off += net.IPv4len + case 0: + // Allowed, for dynamic updates. + default: + return len(msg), &Error{err: "overflow packing a"} + } + return off, nil +} + +func unpackDataAAAA(msg []byte, off int) (net.IP, int, error) { + if off+net.IPv6len > len(msg) { + return nil, len(msg), &Error{err: "overflow unpacking aaaa"} + } + aaaa := append(make(net.IP, 0, net.IPv6len), msg[off:off+net.IPv6len]...) + off += net.IPv6len + return aaaa, off, nil +} + +func packDataAAAA(aaaa net.IP, msg []byte, off int) (int, error) { + switch len(aaaa) { + case net.IPv6len: + if off+net.IPv6len > len(msg) { + return len(msg), &Error{err: "overflow packing aaaa"} + } + + copy(msg[off:], aaaa) + off += net.IPv6len + case 0: + // Allowed, dynamic updates. + default: + return len(msg), &Error{err: "overflow packing aaaa"} + } + return off, nil +} + +// unpackHeader unpacks an RR header, returning the offset to the end of the header and a +// re-sliced msg according to the expected length of the RR. +func unpackHeader(msg []byte, off int) (rr RR_Header, off1 int, truncmsg []byte, err error) { + hdr := RR_Header{} + if off == len(msg) { + return hdr, off, msg, nil + } + + hdr.Name, off, err = UnpackDomainName(msg, off) + if err != nil { + return hdr, len(msg), msg, err + } + hdr.Rrtype, off, err = unpackUint16(msg, off) + if err != nil { + return hdr, len(msg), msg, err + } + hdr.Class, off, err = unpackUint16(msg, off) + if err != nil { + return hdr, len(msg), msg, err + } + hdr.Ttl, off, err = unpackUint32(msg, off) + if err != nil { + return hdr, len(msg), msg, err + } + hdr.Rdlength, off, err = unpackUint16(msg, off) + if err != nil { + return hdr, len(msg), msg, err + } + msg, err = truncateMsgFromRdlength(msg, off, hdr.Rdlength) + return hdr, off, msg, err +} + +// packHeader packs an RR header, returning the offset to the end of the header. +// See PackDomainName for documentation about the compression. +func (hdr RR_Header) packHeader(msg []byte, off int, compression compressionMap, compress bool) (int, error) { + if off == len(msg) { + return off, nil + } + + off, err := packDomainName(hdr.Name, msg, off, compression, compress) + if err != nil { + return len(msg), err + } + off, err = packUint16(hdr.Rrtype, msg, off) + if err != nil { + return len(msg), err + } + off, err = packUint16(hdr.Class, msg, off) + if err != nil { + return len(msg), err + } + off, err = packUint32(hdr.Ttl, msg, off) + if err != nil { + return len(msg), err + } + off, err = packUint16(0, msg, off) // The RDLENGTH field will be set later in packRR. + if err != nil { + return len(msg), err + } + return off, nil +} + +// helper helper functions. + +// truncateMsgFromRdLength truncates msg to match the expected length of the RR. +// Returns an error if msg is smaller than the expected size. +func truncateMsgFromRdlength(msg []byte, off int, rdlength uint16) (truncmsg []byte, err error) { + lenrd := off + int(rdlength) + if lenrd > len(msg) { + return msg, &Error{err: "overflowing header size"} + } + return msg[:lenrd], nil +} + +var base32HexNoPadEncoding = base32.HexEncoding.WithPadding(base32.NoPadding) + +func fromBase32(s []byte) (buf []byte, err error) { + for i, b := range s { + if b >= 'a' && b <= 'z' { + s[i] = b - 32 + } + } + buflen := base32HexNoPadEncoding.DecodedLen(len(s)) + buf = make([]byte, buflen) + n, err := base32HexNoPadEncoding.Decode(buf, s) + buf = buf[:n] + return +} + +func toBase32(b []byte) string { + return base32HexNoPadEncoding.EncodeToString(b) +} + +func fromBase64(s []byte) (buf []byte, err error) { + buflen := base64.StdEncoding.DecodedLen(len(s)) + buf = make([]byte, buflen) + n, err := base64.StdEncoding.Decode(buf, s) + buf = buf[:n] + return +} + +func toBase64(b []byte) string { return base64.StdEncoding.EncodeToString(b) } + +// dynamicUpdate returns true if the Rdlength is zero. +func noRdata(h RR_Header) bool { return h.Rdlength == 0 } + +func unpackUint8(msg []byte, off int) (i uint8, off1 int, err error) { + if off+1 > len(msg) { + return 0, len(msg), &Error{err: "overflow unpacking uint8"} + } + return msg[off], off + 1, nil +} + +func packUint8(i uint8, msg []byte, off int) (off1 int, err error) { + if off+1 > len(msg) { + return len(msg), &Error{err: "overflow packing uint8"} + } + msg[off] = i + return off + 1, nil +} + +func unpackUint16(msg []byte, off int) (i uint16, off1 int, err error) { + if off+2 > len(msg) { + return 0, len(msg), &Error{err: "overflow unpacking uint16"} + } + return binary.BigEndian.Uint16(msg[off:]), off + 2, nil +} + +func packUint16(i uint16, msg []byte, off int) (off1 int, err error) { + if off+2 > len(msg) { + return len(msg), &Error{err: "overflow packing uint16"} + } + binary.BigEndian.PutUint16(msg[off:], i) + return off + 2, nil +} + +func unpackUint32(msg []byte, off int) (i uint32, off1 int, err error) { + if off+4 > len(msg) { + return 0, len(msg), &Error{err: "overflow unpacking uint32"} + } + return binary.BigEndian.Uint32(msg[off:]), off + 4, nil +} + +func packUint32(i uint32, msg []byte, off int) (off1 int, err error) { + if off+4 > len(msg) { + return len(msg), &Error{err: "overflow packing uint32"} + } + binary.BigEndian.PutUint32(msg[off:], i) + return off + 4, nil +} + +func unpackUint48(msg []byte, off int) (i uint64, off1 int, err error) { + if off+6 > len(msg) { + return 0, len(msg), &Error{err: "overflow unpacking uint64 as uint48"} + } + // Used in TSIG where the last 48 bits are occupied, so for now, assume a uint48 (6 bytes) + i = uint64(msg[off])<<40 | uint64(msg[off+1])<<32 | uint64(msg[off+2])<<24 | uint64(msg[off+3])<<16 | + uint64(msg[off+4])<<8 | uint64(msg[off+5]) + off += 6 + return i, off, nil +} + +func packUint48(i uint64, msg []byte, off int) (off1 int, err error) { + if off+6 > len(msg) { + return len(msg), &Error{err: "overflow packing uint64 as uint48"} + } + msg[off] = byte(i >> 40) + msg[off+1] = byte(i >> 32) + msg[off+2] = byte(i >> 24) + msg[off+3] = byte(i >> 16) + msg[off+4] = byte(i >> 8) + msg[off+5] = byte(i) + off += 6 + return off, nil +} + +func unpackUint64(msg []byte, off int) (i uint64, off1 int, err error) { + if off+8 > len(msg) { + return 0, len(msg), &Error{err: "overflow unpacking uint64"} + } + return binary.BigEndian.Uint64(msg[off:]), off + 8, nil +} + +func packUint64(i uint64, msg []byte, off int) (off1 int, err error) { + if off+8 > len(msg) { + return len(msg), &Error{err: "overflow packing uint64"} + } + binary.BigEndian.PutUint64(msg[off:], i) + off += 8 + return off, nil +} + +func unpackString(msg []byte, off int) (string, int, error) { + if off+1 > len(msg) { + return "", off, &Error{err: "overflow unpacking txt"} + } + l := int(msg[off]) + off++ + if off+l > len(msg) { + return "", off, &Error{err: "overflow unpacking txt"} + } + var s strings.Builder + consumed := 0 + for i, b := range msg[off : off+l] { + switch { + case b == '"' || b == '\\': + if consumed == 0 { + s.Grow(l * 2) + } + s.Write(msg[off+consumed : off+i]) + s.WriteByte('\\') + s.WriteByte(b) + consumed = i + 1 + case b < ' ' || b > '~': // unprintable + if consumed == 0 { + s.Grow(l * 2) + } + s.Write(msg[off+consumed : off+i]) + s.WriteString(escapeByte(b)) + consumed = i + 1 + } + } + if consumed == 0 { // no escaping needed + return string(msg[off : off+l]), off + l, nil + } + s.Write(msg[off+consumed : off+l]) + return s.String(), off + l, nil +} + +func packString(s string, msg []byte, off int) (int, error) { + txtTmp := make([]byte, 256*4+1) + off, err := packTxtString(s, msg, off, txtTmp) + if err != nil { + return len(msg), err + } + return off, nil +} + +func unpackStringBase32(msg []byte, off, end int) (string, int, error) { + if end > len(msg) { + return "", len(msg), &Error{err: "overflow unpacking base32"} + } + s := toBase32(msg[off:end]) + return s, end, nil +} + +func packStringBase32(s string, msg []byte, off int) (int, error) { + b32, err := fromBase32([]byte(s)) + if err != nil { + return len(msg), err + } + if off+len(b32) > len(msg) { + return len(msg), &Error{err: "overflow packing base32"} + } + copy(msg[off:off+len(b32)], b32) + off += len(b32) + return off, nil +} + +func unpackStringBase64(msg []byte, off, end int) (string, int, error) { + // Rest of the RR is base64 encoded value, so we don't need an explicit length + // to be set. Thus far all RR's that have base64 encoded fields have those as their + // last one. What we do need is the end of the RR! + if end > len(msg) { + return "", len(msg), &Error{err: "overflow unpacking base64"} + } + s := toBase64(msg[off:end]) + return s, end, nil +} + +func packStringBase64(s string, msg []byte, off int) (int, error) { + b64, err := fromBase64([]byte(s)) + if err != nil { + return len(msg), err + } + if off+len(b64) > len(msg) { + return len(msg), &Error{err: "overflow packing base64"} + } + copy(msg[off:off+len(b64)], b64) + off += len(b64) + return off, nil +} + +func unpackStringHex(msg []byte, off, end int) (string, int, error) { + // Rest of the RR is hex encoded value, so we don't need an explicit length + // to be set. NSEC and TSIG have hex fields with a length field. + // What we do need is the end of the RR! + if end > len(msg) { + return "", len(msg), &Error{err: "overflow unpacking hex"} + } + + s := hex.EncodeToString(msg[off:end]) + return s, end, nil +} + +func packStringHex(s string, msg []byte, off int) (int, error) { + h, err := hex.DecodeString(s) + if err != nil { + return len(msg), err + } + if off+len(h) > len(msg) { + return len(msg), &Error{err: "overflow packing hex"} + } + copy(msg[off:off+len(h)], h) + off += len(h) + return off, nil +} + +func unpackStringAny(msg []byte, off, end int) (string, int, error) { + if end > len(msg) { + return "", len(msg), &Error{err: "overflow unpacking anything"} + } + return string(msg[off:end]), end, nil +} + +func packStringAny(s string, msg []byte, off int) (int, error) { + if off+len(s) > len(msg) { + return len(msg), &Error{err: "overflow packing anything"} + } + copy(msg[off:off+len(s)], s) + off += len(s) + return off, nil +} + +func unpackStringTxt(msg []byte, off int) ([]string, int, error) { + txt, off, err := unpackTxt(msg, off) + if err != nil { + return nil, len(msg), err + } + return txt, off, nil +} + +func packStringTxt(s []string, msg []byte, off int) (int, error) { + txtTmp := make([]byte, 256*4+1) // If the whole string consists out of \DDD we need this many. + off, err := packTxt(s, msg, off, txtTmp) + if err != nil { + return len(msg), err + } + return off, nil +} + +func unpackDataOpt(msg []byte, off int) ([]EDNS0, int, error) { + var edns []EDNS0 +Option: + var code uint16 + if off+4 > len(msg) { + return nil, len(msg), &Error{err: "overflow unpacking opt"} + } + code = binary.BigEndian.Uint16(msg[off:]) + off += 2 + optlen := binary.BigEndian.Uint16(msg[off:]) + off += 2 + if off+int(optlen) > len(msg) { + return nil, len(msg), &Error{err: "overflow unpacking opt"} + } + switch code { + case EDNS0NSID: + e := new(EDNS0_NSID) + if err := e.unpack(msg[off : off+int(optlen)]); err != nil { + return nil, len(msg), err + } + edns = append(edns, e) + off += int(optlen) + case EDNS0SUBNET: + e := new(EDNS0_SUBNET) + if err := e.unpack(msg[off : off+int(optlen)]); err != nil { + return nil, len(msg), err + } + edns = append(edns, e) + off += int(optlen) + case EDNS0COOKIE: + e := new(EDNS0_COOKIE) + if err := e.unpack(msg[off : off+int(optlen)]); err != nil { + return nil, len(msg), err + } + edns = append(edns, e) + off += int(optlen) + case EDNS0EXPIRE: + e := new(EDNS0_EXPIRE) + if err := e.unpack(msg[off : off+int(optlen)]); err != nil { + return nil, len(msg), err + } + edns = append(edns, e) + off += int(optlen) + case EDNS0UL: + e := new(EDNS0_UL) + if err := e.unpack(msg[off : off+int(optlen)]); err != nil { + return nil, len(msg), err + } + edns = append(edns, e) + off += int(optlen) + case EDNS0LLQ: + e := new(EDNS0_LLQ) + if err := e.unpack(msg[off : off+int(optlen)]); err != nil { + return nil, len(msg), err + } + edns = append(edns, e) + off += int(optlen) + case EDNS0DAU: + e := new(EDNS0_DAU) + if err := e.unpack(msg[off : off+int(optlen)]); err != nil { + return nil, len(msg), err + } + edns = append(edns, e) + off += int(optlen) + case EDNS0DHU: + e := new(EDNS0_DHU) + if err := e.unpack(msg[off : off+int(optlen)]); err != nil { + return nil, len(msg), err + } + edns = append(edns, e) + off += int(optlen) + case EDNS0N3U: + e := new(EDNS0_N3U) + if err := e.unpack(msg[off : off+int(optlen)]); err != nil { + return nil, len(msg), err + } + edns = append(edns, e) + off += int(optlen) + case EDNS0PADDING: + e := new(EDNS0_PADDING) + if err := e.unpack(msg[off : off+int(optlen)]); err != nil { + return nil, len(msg), err + } + edns = append(edns, e) + off += int(optlen) + default: + e := new(EDNS0_LOCAL) + e.Code = code + if err := e.unpack(msg[off : off+int(optlen)]); err != nil { + return nil, len(msg), err + } + edns = append(edns, e) + off += int(optlen) + } + + if off < len(msg) { + goto Option + } + + return edns, off, nil +} + +func packDataOpt(options []EDNS0, msg []byte, off int) (int, error) { + for _, el := range options { + b, err := el.pack() + if err != nil || off+4 > len(msg) { + return len(msg), &Error{err: "overflow packing opt"} + } + binary.BigEndian.PutUint16(msg[off:], el.Option()) // Option code + binary.BigEndian.PutUint16(msg[off+2:], uint16(len(b))) // Length + off += 4 + if off+len(b) > len(msg) { + copy(msg[off:], b) + off = len(msg) + continue + } + // Actual data + copy(msg[off:off+len(b)], b) + off += len(b) + } + return off, nil +} + +func unpackStringOctet(msg []byte, off int) (string, int, error) { + s := string(msg[off:]) + return s, len(msg), nil +} + +func packStringOctet(s string, msg []byte, off int) (int, error) { + txtTmp := make([]byte, 256*4+1) + off, err := packOctetString(s, msg, off, txtTmp) + if err != nil { + return len(msg), err + } + return off, nil +} + +func unpackDataNsec(msg []byte, off int) ([]uint16, int, error) { + var nsec []uint16 + length, window, lastwindow := 0, 0, -1 + for off < len(msg) { + if off+2 > len(msg) { + return nsec, len(msg), &Error{err: "overflow unpacking nsecx"} + } + window = int(msg[off]) + length = int(msg[off+1]) + off += 2 + if window <= lastwindow { + // RFC 4034: Blocks are present in the NSEC RR RDATA in + // increasing numerical order. + return nsec, len(msg), &Error{err: "out of order NSEC block"} + } + if length == 0 { + // RFC 4034: Blocks with no types present MUST NOT be included. + return nsec, len(msg), &Error{err: "empty NSEC block"} + } + if length > 32 { + return nsec, len(msg), &Error{err: "NSEC block too long"} + } + if off+length > len(msg) { + return nsec, len(msg), &Error{err: "overflowing NSEC block"} + } + + // Walk the bytes in the window and extract the type bits + for j, b := range msg[off : off+length] { + // Check the bits one by one, and set the type + if b&0x80 == 0x80 { + nsec = append(nsec, uint16(window*256+j*8+0)) + } + if b&0x40 == 0x40 { + nsec = append(nsec, uint16(window*256+j*8+1)) + } + if b&0x20 == 0x20 { + nsec = append(nsec, uint16(window*256+j*8+2)) + } + if b&0x10 == 0x10 { + nsec = append(nsec, uint16(window*256+j*8+3)) + } + if b&0x8 == 0x8 { + nsec = append(nsec, uint16(window*256+j*8+4)) + } + if b&0x4 == 0x4 { + nsec = append(nsec, uint16(window*256+j*8+5)) + } + if b&0x2 == 0x2 { + nsec = append(nsec, uint16(window*256+j*8+6)) + } + if b&0x1 == 0x1 { + nsec = append(nsec, uint16(window*256+j*8+7)) + } + } + off += length + lastwindow = window + } + return nsec, off, nil +} + +// typeBitMapLen is a helper function which computes the "maximum" length of +// a the NSEC Type BitMap field. +func typeBitMapLen(bitmap []uint16) int { + var l int + var lastwindow, lastlength uint16 + for _, t := range bitmap { + window := t / 256 + length := (t-window*256)/8 + 1 + if window > lastwindow && lastlength != 0 { // New window, jump to the new offset + l += int(lastlength) + 2 + lastlength = 0 + } + if window < lastwindow || length < lastlength { + // packDataNsec would return Error{err: "nsec bits out of order"} here, but + // when computing the length, we want do be liberal. + continue + } + lastwindow, lastlength = window, length + } + l += int(lastlength) + 2 + return l +} + +func packDataNsec(bitmap []uint16, msg []byte, off int) (int, error) { + if len(bitmap) == 0 { + return off, nil + } + var lastwindow, lastlength uint16 + for _, t := range bitmap { + window := t / 256 + length := (t-window*256)/8 + 1 + if window > lastwindow && lastlength != 0 { // New window, jump to the new offset + off += int(lastlength) + 2 + lastlength = 0 + } + if window < lastwindow || length < lastlength { + return len(msg), &Error{err: "nsec bits out of order"} + } + if off+2+int(length) > len(msg) { + return len(msg), &Error{err: "overflow packing nsec"} + } + // Setting the window # + msg[off] = byte(window) + // Setting the octets length + msg[off+1] = byte(length) + // Setting the bit value for the type in the right octet + msg[off+1+int(length)] |= byte(1 << (7 - t%8)) + lastwindow, lastlength = window, length + } + off += int(lastlength) + 2 + return off, nil +} + +func unpackDataDomainNames(msg []byte, off, end int) ([]string, int, error) { + var ( + servers []string + s string + err error + ) + if end > len(msg) { + return nil, len(msg), &Error{err: "overflow unpacking domain names"} + } + for off < end { + s, off, err = UnpackDomainName(msg, off) + if err != nil { + return servers, len(msg), err + } + servers = append(servers, s) + } + return servers, off, nil +} + +func packDataDomainNames(names []string, msg []byte, off int, compression compressionMap, compress bool) (int, error) { + var err error + for _, name := range names { + off, err = packDomainName(name, msg, off, compression, compress) + if err != nil { + return len(msg), err + } + } + return off, nil +} + +func packDataApl(data []APLPrefix, msg []byte, off int) (int, error) { + var err error + for i := range data { + off, err = packDataAplPrefix(&data[i], msg, off) + if err != nil { + return len(msg), err + } + } + return off, nil +} + +func packDataAplPrefix(p *APLPrefix, msg []byte, off int) (int, error) { + if len(p.Network.IP) != len(p.Network.Mask) { + return len(msg), &Error{err: "address and mask lengths don't match"} + } + + var err error + prefix, _ := p.Network.Mask.Size() + addr := p.Network.IP.Mask(p.Network.Mask)[:(prefix+7)/8] + + switch len(p.Network.IP) { + case net.IPv4len: + off, err = packUint16(1, msg, off) + case net.IPv6len: + off, err = packUint16(2, msg, off) + default: + err = &Error{err: "unrecognized address family"} + } + if err != nil { + return len(msg), err + } + + off, err = packUint8(uint8(prefix), msg, off) + if err != nil { + return len(msg), err + } + + var n uint8 + if p.Negation { + n = 0x80 + } + adflen := uint8(len(addr)) & 0x7f + off, err = packUint8(n|adflen, msg, off) + if err != nil { + return len(msg), err + } + + if off+len(addr) > len(msg) { + return len(msg), &Error{err: "overflow packing APL prefix"} + } + off += copy(msg[off:], addr) + + return off, nil +} + +func unpackDataApl(msg []byte, off int) ([]APLPrefix, int, error) { + var result []APLPrefix + for off < len(msg) { + prefix, end, err := unpackDataAplPrefix(msg, off) + if err != nil { + return nil, len(msg), err + } + off = end + result = append(result, prefix) + } + return result, off, nil +} + +func unpackDataAplPrefix(msg []byte, off int) (APLPrefix, int, error) { + family, off, err := unpackUint16(msg, off) + if err != nil { + return APLPrefix{}, len(msg), &Error{err: "overflow unpacking APL prefix"} + } + prefix, off, err := unpackUint8(msg, off) + if err != nil { + return APLPrefix{}, len(msg), &Error{err: "overflow unpacking APL prefix"} + } + nlen, off, err := unpackUint8(msg, off) + if err != nil { + return APLPrefix{}, len(msg), &Error{err: "overflow unpacking APL prefix"} + } + + var ip []byte + switch family { + case 1: + ip = make([]byte, net.IPv4len) + case 2: + ip = make([]byte, net.IPv6len) + default: + return APLPrefix{}, len(msg), &Error{err: "unrecognized APL address family"} + } + if int(prefix) > 8*len(ip) { + return APLPrefix{}, len(msg), &Error{err: "APL prefix too long"} + } + + afdlen := int(nlen & 0x7f) + if (int(prefix)+7)/8 != afdlen { + return APLPrefix{}, len(msg), &Error{err: "invalid APL address length"} + } + if off+afdlen > len(msg) { + return APLPrefix{}, len(msg), &Error{err: "overflow unpacking APL address"} + } + off += copy(ip, msg[off:off+afdlen]) + if prefix%8 > 0 { + last := ip[afdlen-1] + zero := uint8(0xff) >> (prefix % 8) + if last&zero > 0 { + return APLPrefix{}, len(msg), &Error{err: "extra APL address bits"} + } + } + + return APLPrefix{ + Negation: (nlen & 0x80) != 0, + Network: net.IPNet{ + IP: ip, + Mask: net.CIDRMask(int(prefix), 8*len(ip)), + }, + }, off, nil +} diff --git a/vendor/github.com/miekg/dns/msg_truncate.go b/vendor/github.com/miekg/dns/msg_truncate.go new file mode 100644 index 00000000..89d40757 --- /dev/null +++ b/vendor/github.com/miekg/dns/msg_truncate.go @@ -0,0 +1,111 @@ +package dns + +// Truncate ensures the reply message will fit into the requested buffer +// size by removing records that exceed the requested size. +// +// It will first check if the reply fits without compression and then with +// compression. If it won't fit with compression, Truncate then walks the +// record adding as many records as possible without exceeding the +// requested buffer size. +// +// The TC bit will be set if any records were excluded from the message. +// This indicates to that the client should retry over TCP. +// +// According to RFC 2181, the TC bit should only be set if not all of the +// "required" RRs can be included in the response. Unfortunately, we have +// no way of knowing which RRs are required so we set the TC bit if any RR +// had to be omitted from the response. +// +// The appropriate buffer size can be retrieved from the requests OPT +// record, if present, and is transport specific otherwise. dns.MinMsgSize +// should be used for UDP requests without an OPT record, and +// dns.MaxMsgSize for TCP requests without an OPT record. +func (dns *Msg) Truncate(size int) { + if dns.IsTsig() != nil { + // To simplify this implementation, we don't perform + // truncation on responses with a TSIG record. + return + } + + // RFC 6891 mandates that the payload size in an OPT record + // less than 512 bytes must be treated as equal to 512 bytes. + // + // For ease of use, we impose that restriction here. + if size < 512 { + size = 512 + } + + l := msgLenWithCompressionMap(dns, nil) // uncompressed length + if l <= size { + // Don't waste effort compressing this message. + dns.Compress = false + return + } + + dns.Compress = true + + edns0 := dns.popEdns0() + if edns0 != nil { + // Account for the OPT record that gets added at the end, + // by subtracting that length from our budget. + // + // The EDNS(0) OPT record must have the root domain and + // it's length is thus unaffected by compression. + size -= Len(edns0) + } + + compression := make(map[string]struct{}) + + l = headerSize + for _, r := range dns.Question { + l += r.len(l, compression) + } + + var numAnswer int + if l < size { + l, numAnswer = truncateLoop(dns.Answer, size, l, compression) + } + + var numNS int + if l < size { + l, numNS = truncateLoop(dns.Ns, size, l, compression) + } + + var numExtra int + if l < size { + l, numExtra = truncateLoop(dns.Extra, size, l, compression) + } + + // See the function documentation for when we set this. + dns.Truncated = len(dns.Answer) > numAnswer || + len(dns.Ns) > numNS || len(dns.Extra) > numExtra + + dns.Answer = dns.Answer[:numAnswer] + dns.Ns = dns.Ns[:numNS] + dns.Extra = dns.Extra[:numExtra] + + if edns0 != nil { + // Add the OPT record back onto the additional section. + dns.Extra = append(dns.Extra, edns0) + } +} + +func truncateLoop(rrs []RR, size, l int, compression map[string]struct{}) (int, int) { + for i, r := range rrs { + if r == nil { + continue + } + + l += r.len(l, compression) + if l > size { + // Return size, rather than l prior to this record, + // to prevent any further records being added. + return size, i + } + if l == size { + return l, i + 1 + } + } + + return l, len(rrs) +} diff --git a/vendor/github.com/miekg/dns/nsecx.go b/vendor/github.com/miekg/dns/nsecx.go new file mode 100644 index 00000000..8f071a47 --- /dev/null +++ b/vendor/github.com/miekg/dns/nsecx.go @@ -0,0 +1,95 @@ +package dns + +import ( + "crypto/sha1" + "encoding/hex" + "strings" +) + +// HashName hashes a string (label) according to RFC 5155. It returns the hashed string in uppercase. +func HashName(label string, ha uint8, iter uint16, salt string) string { + if ha != SHA1 { + return "" + } + + wireSalt := make([]byte, hex.DecodedLen(len(salt))) + n, err := packStringHex(salt, wireSalt, 0) + if err != nil { + return "" + } + wireSalt = wireSalt[:n] + + name := make([]byte, 255) + off, err := PackDomainName(strings.ToLower(label), name, 0, nil, false) + if err != nil { + return "" + } + name = name[:off] + + s := sha1.New() + // k = 0 + s.Write(name) + s.Write(wireSalt) + nsec3 := s.Sum(nil) + + // k > 0 + for k := uint16(0); k < iter; k++ { + s.Reset() + s.Write(nsec3) + s.Write(wireSalt) + nsec3 = s.Sum(nsec3[:0]) + } + + return toBase32(nsec3) +} + +// Cover returns true if a name is covered by the NSEC3 record +func (rr *NSEC3) Cover(name string) bool { + nameHash := HashName(name, rr.Hash, rr.Iterations, rr.Salt) + owner := strings.ToUpper(rr.Hdr.Name) + labelIndices := Split(owner) + if len(labelIndices) < 2 { + return false + } + ownerHash := owner[:labelIndices[1]-1] + ownerZone := owner[labelIndices[1]:] + if !IsSubDomain(ownerZone, strings.ToUpper(name)) { // name is outside owner zone + return false + } + + nextHash := rr.NextDomain + + // if empty interval found, try cover wildcard hashes so nameHash shouldn't match with ownerHash + if ownerHash == nextHash && nameHash != ownerHash { // empty interval + return true + } + if ownerHash > nextHash { // end of zone + if nameHash > ownerHash { // covered since there is nothing after ownerHash + return true + } + return nameHash < nextHash // if nameHash is before beginning of zone it is covered + } + if nameHash < ownerHash { // nameHash is before ownerHash, not covered + return false + } + return nameHash < nextHash // if nameHash is before nextHash is it covered (between ownerHash and nextHash) +} + +// Match returns true if a name matches the NSEC3 record +func (rr *NSEC3) Match(name string) bool { + nameHash := HashName(name, rr.Hash, rr.Iterations, rr.Salt) + owner := strings.ToUpper(rr.Hdr.Name) + labelIndices := Split(owner) + if len(labelIndices) < 2 { + return false + } + ownerHash := owner[:labelIndices[1]-1] + ownerZone := owner[labelIndices[1]:] + if !IsSubDomain(ownerZone, strings.ToUpper(name)) { // name is outside owner zone + return false + } + if ownerHash == nameHash { + return true + } + return false +} diff --git a/vendor/github.com/miekg/dns/privaterr.go b/vendor/github.com/miekg/dns/privaterr.go new file mode 100644 index 00000000..e28f0663 --- /dev/null +++ b/vendor/github.com/miekg/dns/privaterr.go @@ -0,0 +1,114 @@ +package dns + +import "strings" + +// PrivateRdata is an interface used for implementing "Private Use" RR types, see +// RFC 6895. This allows one to experiment with new RR types, without requesting an +// official type code. Also see dns.PrivateHandle and dns.PrivateHandleRemove. +type PrivateRdata interface { + // String returns the text presentaton of the Rdata of the Private RR. + String() string + // Parse parses the Rdata of the private RR. + Parse([]string) error + // Pack is used when packing a private RR into a buffer. + Pack([]byte) (int, error) + // Unpack is used when unpacking a private RR from a buffer. + // TODO(miek): diff. signature than Pack, see edns0.go for instance. + Unpack([]byte) (int, error) + // Copy copies the Rdata into the PrivateRdata argument. + Copy(PrivateRdata) error + // Len returns the length in octets of the Rdata. + Len() int +} + +// PrivateRR represents an RR that uses a PrivateRdata user-defined type. +// It mocks normal RRs and implements dns.RR interface. +type PrivateRR struct { + Hdr RR_Header + Data PrivateRdata + + generator func() PrivateRdata // for copy +} + +// Header return the RR header of r. +func (r *PrivateRR) Header() *RR_Header { return &r.Hdr } + +func (r *PrivateRR) String() string { return r.Hdr.String() + r.Data.String() } + +// Private len and copy parts to satisfy RR interface. +func (r *PrivateRR) len(off int, compression map[string]struct{}) int { + l := r.Hdr.len(off, compression) + l += r.Data.Len() + return l +} + +func (r *PrivateRR) copy() RR { + // make new RR like this: + rr := &PrivateRR{r.Hdr, r.generator(), r.generator} + + if err := r.Data.Copy(rr.Data); err != nil { + panic("dns: got value that could not be used to copy Private rdata: " + err.Error()) + } + + return rr +} + +func (r *PrivateRR) pack(msg []byte, off int, compression compressionMap, compress bool) (int, error) { + n, err := r.Data.Pack(msg[off:]) + if err != nil { + return len(msg), err + } + off += n + return off, nil +} + +func (r *PrivateRR) unpack(msg []byte, off int) (int, error) { + off1, err := r.Data.Unpack(msg[off:]) + off += off1 + return off, err +} + +func (r *PrivateRR) parse(c *zlexer, origin string) *ParseError { + var l lex + text := make([]string, 0, 2) // could be 0..N elements, median is probably 1 +Fetch: + for { + // TODO(miek): we could also be returning _QUOTE, this might or might not + // be an issue (basically parsing TXT becomes hard) + switch l, _ = c.Next(); l.value { + case zNewline, zEOF: + break Fetch + case zString: + text = append(text, l.token) + } + } + + err := r.Data.Parse(text) + if err != nil { + return &ParseError{"", err.Error(), l} + } + + return nil +} + +func (r1 *PrivateRR) isDuplicate(r2 RR) bool { return false } + +// PrivateHandle registers a private resource record type. It requires +// string and numeric representation of private RR type and generator function as argument. +func PrivateHandle(rtypestr string, rtype uint16, generator func() PrivateRdata) { + rtypestr = strings.ToUpper(rtypestr) + + TypeToRR[rtype] = func() RR { return &PrivateRR{RR_Header{}, generator(), generator} } + TypeToString[rtype] = rtypestr + StringToType[rtypestr] = rtype +} + +// PrivateHandleRemove removes definitions required to support private RR type. +func PrivateHandleRemove(rtype uint16) { + rtypestr, ok := TypeToString[rtype] + if ok { + delete(TypeToRR, rtype) + delete(TypeToString, rtype) + delete(StringToType, rtypestr) + } +} diff --git a/vendor/github.com/miekg/dns/reverse.go b/vendor/github.com/miekg/dns/reverse.go new file mode 100644 index 00000000..28151af8 --- /dev/null +++ b/vendor/github.com/miekg/dns/reverse.go @@ -0,0 +1,52 @@ +package dns + +// StringToType is the reverse of TypeToString, needed for string parsing. +var StringToType = reverseInt16(TypeToString) + +// StringToClass is the reverse of ClassToString, needed for string parsing. +var StringToClass = reverseInt16(ClassToString) + +// StringToOpcode is a map of opcodes to strings. +var StringToOpcode = reverseInt(OpcodeToString) + +// StringToRcode is a map of rcodes to strings. +var StringToRcode = reverseInt(RcodeToString) + +func init() { + // Preserve previous NOTIMP typo, see github.com/miekg/dns/issues/733. + StringToRcode["NOTIMPL"] = RcodeNotImplemented +} + +// StringToAlgorithm is the reverse of AlgorithmToString. +var StringToAlgorithm = reverseInt8(AlgorithmToString) + +// StringToHash is a map of names to hash IDs. +var StringToHash = reverseInt8(HashToString) + +// StringToCertType is the reverseof CertTypeToString. +var StringToCertType = reverseInt16(CertTypeToString) + +// Reverse a map +func reverseInt8(m map[uint8]string) map[string]uint8 { + n := make(map[string]uint8, len(m)) + for u, s := range m { + n[s] = u + } + return n +} + +func reverseInt16(m map[uint16]string) map[string]uint16 { + n := make(map[string]uint16, len(m)) + for u, s := range m { + n[s] = u + } + return n +} + +func reverseInt(m map[int]string) map[string]int { + n := make(map[string]int, len(m)) + for u, s := range m { + n[s] = u + } + return n +} diff --git a/vendor/github.com/miekg/dns/sanitize.go b/vendor/github.com/miekg/dns/sanitize.go new file mode 100644 index 00000000..a638e862 --- /dev/null +++ b/vendor/github.com/miekg/dns/sanitize.go @@ -0,0 +1,86 @@ +package dns + +// Dedup removes identical RRs from rrs. It preserves the original ordering. +// The lowest TTL of any duplicates is used in the remaining one. Dedup modifies +// rrs. +// m is used to store the RRs temporary. If it is nil a new map will be allocated. +func Dedup(rrs []RR, m map[string]RR) []RR { + + if m == nil { + m = make(map[string]RR) + } + // Save the keys, so we don't have to call normalizedString twice. + keys := make([]*string, 0, len(rrs)) + + for _, r := range rrs { + key := normalizedString(r) + keys = append(keys, &key) + if mr, ok := m[key]; ok { + // Shortest TTL wins. + rh, mrh := r.Header(), mr.Header() + if mrh.Ttl > rh.Ttl { + mrh.Ttl = rh.Ttl + } + continue + } + + m[key] = r + } + // If the length of the result map equals the amount of RRs we got, + // it means they were all different. We can then just return the original rrset. + if len(m) == len(rrs) { + return rrs + } + + j := 0 + for i, r := range rrs { + // If keys[i] lives in the map, we should copy and remove it. + if _, ok := m[*keys[i]]; ok { + delete(m, *keys[i]) + rrs[j] = r + j++ + } + + if len(m) == 0 { + break + } + } + + return rrs[:j] +} + +// normalizedString returns a normalized string from r. The TTL +// is removed and the domain name is lowercased. We go from this: +// DomainNameTTLCLASSTYPERDATA to: +// lowercasenameCLASSTYPE... +func normalizedString(r RR) string { + // A string Go DNS makes has: domainnameTTL... + b := []byte(r.String()) + + // find the first non-escaped tab, then another, so we capture where the TTL lives. + esc := false + ttlStart, ttlEnd := 0, 0 + for i := 0; i < len(b) && ttlEnd == 0; i++ { + switch { + case b[i] == '\\': + esc = !esc + case b[i] == '\t' && !esc: + if ttlStart == 0 { + ttlStart = i + continue + } + if ttlEnd == 0 { + ttlEnd = i + } + case b[i] >= 'A' && b[i] <= 'Z' && !esc: + b[i] += 32 + default: + esc = false + } + } + + // remove TTL. + copy(b[ttlStart:], b[ttlEnd:]) + cut := ttlEnd - ttlStart + return string(b[:len(b)-cut]) +} diff --git a/vendor/github.com/miekg/dns/scan.go b/vendor/github.com/miekg/dns/scan.go new file mode 100644 index 00000000..671018b1 --- /dev/null +++ b/vendor/github.com/miekg/dns/scan.go @@ -0,0 +1,1408 @@ +package dns + +import ( + "bufio" + "fmt" + "io" + "os" + "path/filepath" + "strconv" + "strings" +) + +const maxTok = 2048 // Largest token we can return. + +// The maximum depth of $INCLUDE directives supported by the +// ZoneParser API. +const maxIncludeDepth = 7 + +// Tokinize a RFC 1035 zone file. The tokenizer will normalize it: +// * Add ownernames if they are left blank; +// * Suppress sequences of spaces; +// * Make each RR fit on one line (_NEWLINE is send as last) +// * Handle comments: ; +// * Handle braces - anywhere. +const ( + // Zonefile + zEOF = iota + zString + zBlank + zQuote + zNewline + zRrtpe + zOwner + zClass + zDirOrigin // $ORIGIN + zDirTTL // $TTL + zDirInclude // $INCLUDE + zDirGenerate // $GENERATE + + // Privatekey file + zValue + zKey + + zExpectOwnerDir // Ownername + zExpectOwnerBl // Whitespace after the ownername + zExpectAny // Expect rrtype, ttl or class + zExpectAnyNoClass // Expect rrtype or ttl + zExpectAnyNoClassBl // The whitespace after _EXPECT_ANY_NOCLASS + zExpectAnyNoTTL // Expect rrtype or class + zExpectAnyNoTTLBl // Whitespace after _EXPECT_ANY_NOTTL + zExpectRrtype // Expect rrtype + zExpectRrtypeBl // Whitespace BEFORE rrtype + zExpectRdata // The first element of the rdata + zExpectDirTTLBl // Space after directive $TTL + zExpectDirTTL // Directive $TTL + zExpectDirOriginBl // Space after directive $ORIGIN + zExpectDirOrigin // Directive $ORIGIN + zExpectDirIncludeBl // Space after directive $INCLUDE + zExpectDirInclude // Directive $INCLUDE + zExpectDirGenerate // Directive $GENERATE + zExpectDirGenerateBl // Space after directive $GENERATE +) + +// ParseError is a parsing error. It contains the parse error and the location in the io.Reader +// where the error occurred. +type ParseError struct { + file string + err string + lex lex +} + +func (e *ParseError) Error() (s string) { + if e.file != "" { + s = e.file + ": " + } + s += "dns: " + e.err + ": " + strconv.QuoteToASCII(e.lex.token) + " at line: " + + strconv.Itoa(e.lex.line) + ":" + strconv.Itoa(e.lex.column) + return +} + +type lex struct { + token string // text of the token + err bool // when true, token text has lexer error + value uint8 // value: zString, _BLANK, etc. + torc uint16 // type or class as parsed in the lexer, we only need to look this up in the grammar + line int // line in the file + column int // column in the file +} + +// Token holds the token that are returned when a zone file is parsed. +type Token struct { + // The scanned resource record when error is not nil. + RR + // When an error occurred, this has the error specifics. + Error *ParseError + // A potential comment positioned after the RR and on the same line. + Comment string +} + +// ttlState describes the state necessary to fill in an omitted RR TTL +type ttlState struct { + ttl uint32 // ttl is the current default TTL + isByDirective bool // isByDirective indicates whether ttl was set by a $TTL directive +} + +// NewRR reads the RR contained in the string s. Only the first RR is +// returned. If s contains no records, NewRR will return nil with no +// error. +// +// The class defaults to IN and TTL defaults to 3600. The full zone +// file syntax like $TTL, $ORIGIN, etc. is supported. +// +// All fields of the returned RR are set, except RR.Header().Rdlength +// which is set to 0. +func NewRR(s string) (RR, error) { + if len(s) > 0 && s[len(s)-1] != '\n' { // We need a closing newline + return ReadRR(strings.NewReader(s+"\n"), "") + } + return ReadRR(strings.NewReader(s), "") +} + +// ReadRR reads the RR contained in r. +// +// The string file is used in error reporting and to resolve relative +// $INCLUDE directives. +// +// See NewRR for more documentation. +func ReadRR(r io.Reader, file string) (RR, error) { + zp := NewZoneParser(r, ".", file) + zp.SetDefaultTTL(defaultTtl) + zp.SetIncludeAllowed(true) + rr, _ := zp.Next() + return rr, zp.Err() +} + +// ParseZone reads a RFC 1035 style zonefile from r. It returns +// Tokens on the returned channel, each consisting of either a +// parsed RR and optional comment or a nil RR and an error. The +// channel is closed by ParseZone when the end of r is reached. +// +// The string file is used in error reporting and to resolve relative +// $INCLUDE directives. The string origin is used as the initial +// origin, as if the file would start with an $ORIGIN directive. +// +// The directives $INCLUDE, $ORIGIN, $TTL and $GENERATE are all +// supported. Note that $GENERATE's range support up to a maximum of +// of 65535 steps. +// +// Basic usage pattern when reading from a string (z) containing the +// zone data: +// +// for x := range dns.ParseZone(strings.NewReader(z), "", "") { +// if x.Error != nil { +// // log.Println(x.Error) +// } else { +// // Do something with x.RR +// } +// } +// +// Comments specified after an RR (and on the same line!) are +// returned too: +// +// foo. IN A 10.0.0.1 ; this is a comment +// +// The text "; this is comment" is returned in Token.Comment. +// Comments inside the RR are returned concatenated along with the +// RR. Comments on a line by themselves are discarded. +// +// To prevent memory leaks it is important to always fully drain the +// returned channel. If an error occurs, it will always be the last +// Token sent on the channel. +// +// Deprecated: New users should prefer the ZoneParser API. +func ParseZone(r io.Reader, origin, file string) chan *Token { + t := make(chan *Token, 10000) + go parseZone(r, origin, file, t) + return t +} + +func parseZone(r io.Reader, origin, file string, t chan *Token) { + defer close(t) + + zp := NewZoneParser(r, origin, file) + zp.SetIncludeAllowed(true) + + for rr, ok := zp.Next(); ok; rr, ok = zp.Next() { + t <- &Token{RR: rr, Comment: zp.Comment()} + } + + if err := zp.Err(); err != nil { + pe, ok := err.(*ParseError) + if !ok { + pe = &ParseError{file: file, err: err.Error()} + } + + t <- &Token{Error: pe} + } +} + +// ZoneParser is a parser for an RFC 1035 style zonefile. +// +// Each parsed RR in the zone is returned sequentially from Next. An +// optional comment can be retrieved with Comment. +// +// The directives $INCLUDE, $ORIGIN, $TTL and $GENERATE are all +// supported. Although $INCLUDE is disabled by default. +// Note that $GENERATE's range support up to a maximum of 65535 steps. +// +// Basic usage pattern when reading from a string (z) containing the +// zone data: +// +// zp := NewZoneParser(strings.NewReader(z), "", "") +// +// for rr, ok := zp.Next(); ok; rr, ok = zp.Next() { +// // Do something with rr +// } +// +// if err := zp.Err(); err != nil { +// // log.Println(err) +// } +// +// Comments specified after an RR (and on the same line!) are +// returned too: +// +// foo. IN A 10.0.0.1 ; this is a comment +// +// The text "; this is comment" is returned from Comment. Comments inside +// the RR are returned concatenated along with the RR. Comments on a line +// by themselves are discarded. +type ZoneParser struct { + c *zlexer + + parseErr *ParseError + + origin string + file string + + defttl *ttlState + + h RR_Header + + // sub is used to parse $INCLUDE files and $GENERATE directives. + // Next, by calling subNext, forwards the resulting RRs from this + // sub parser to the calling code. + sub *ZoneParser + osFile *os.File + + includeDepth uint8 + + includeAllowed bool + generateDisallowed bool +} + +// NewZoneParser returns an RFC 1035 style zonefile parser that reads +// from r. +// +// The string file is used in error reporting and to resolve relative +// $INCLUDE directives. The string origin is used as the initial +// origin, as if the file would start with an $ORIGIN directive. +func NewZoneParser(r io.Reader, origin, file string) *ZoneParser { + var pe *ParseError + if origin != "" { + origin = Fqdn(origin) + if _, ok := IsDomainName(origin); !ok { + pe = &ParseError{file, "bad initial origin name", lex{}} + } + } + + return &ZoneParser{ + c: newZLexer(r), + + parseErr: pe, + + origin: origin, + file: file, + } +} + +// SetDefaultTTL sets the parsers default TTL to ttl. +func (zp *ZoneParser) SetDefaultTTL(ttl uint32) { + zp.defttl = &ttlState{ttl, false} +} + +// SetIncludeAllowed controls whether $INCLUDE directives are +// allowed. $INCLUDE directives are not supported by default. +// +// The $INCLUDE directive will open and read from a user controlled +// file on the system. Even if the file is not a valid zonefile, the +// contents of the file may be revealed in error messages, such as: +// +// /etc/passwd: dns: not a TTL: "root:x:0:0:root:/root:/bin/bash" at line: 1:31 +// /etc/shadow: dns: not a TTL: "root:$6$::0:99999:7:::" at line: 1:125 +func (zp *ZoneParser) SetIncludeAllowed(v bool) { + zp.includeAllowed = v +} + +// Err returns the first non-EOF error that was encountered by the +// ZoneParser. +func (zp *ZoneParser) Err() error { + if zp.parseErr != nil { + return zp.parseErr + } + + if zp.sub != nil { + if err := zp.sub.Err(); err != nil { + return err + } + } + + return zp.c.Err() +} + +func (zp *ZoneParser) setParseError(err string, l lex) (RR, bool) { + zp.parseErr = &ParseError{zp.file, err, l} + return nil, false +} + +// Comment returns an optional text comment that occurred alongside +// the RR. +func (zp *ZoneParser) Comment() string { + if zp.parseErr != nil { + return "" + } + + if zp.sub != nil { + return zp.sub.Comment() + } + + return zp.c.Comment() +} + +func (zp *ZoneParser) subNext() (RR, bool) { + if rr, ok := zp.sub.Next(); ok { + return rr, true + } + + if zp.sub.osFile != nil { + zp.sub.osFile.Close() + zp.sub.osFile = nil + } + + if zp.sub.Err() != nil { + // We have errors to surface. + return nil, false + } + + zp.sub = nil + return zp.Next() +} + +// Next advances the parser to the next RR in the zonefile and +// returns the (RR, true). It will return (nil, false) when the +// parsing stops, either by reaching the end of the input or an +// error. After Next returns (nil, false), the Err method will return +// any error that occurred during parsing. +func (zp *ZoneParser) Next() (RR, bool) { + if zp.parseErr != nil { + return nil, false + } + if zp.sub != nil { + return zp.subNext() + } + + // 6 possible beginnings of a line (_ is a space): + // + // 0. zRRTYPE -> all omitted until the rrtype + // 1. zOwner _ zRrtype -> class/ttl omitted + // 2. zOwner _ zString _ zRrtype -> class omitted + // 3. zOwner _ zString _ zClass _ zRrtype -> ttl/class + // 4. zOwner _ zClass _ zRrtype -> ttl omitted + // 5. zOwner _ zClass _ zString _ zRrtype -> class/ttl (reversed) + // + // After detecting these, we know the zRrtype so we can jump to functions + // handling the rdata for each of these types. + + st := zExpectOwnerDir // initial state + h := &zp.h + + for l, ok := zp.c.Next(); ok; l, ok = zp.c.Next() { + // zlexer spotted an error already + if l.err { + return zp.setParseError(l.token, l) + } + + switch st { + case zExpectOwnerDir: + // We can also expect a directive, like $TTL or $ORIGIN + if zp.defttl != nil { + h.Ttl = zp.defttl.ttl + } + + h.Class = ClassINET + + switch l.value { + case zNewline: + st = zExpectOwnerDir + case zOwner: + name, ok := toAbsoluteName(l.token, zp.origin) + if !ok { + return zp.setParseError("bad owner name", l) + } + + h.Name = name + + st = zExpectOwnerBl + case zDirTTL: + st = zExpectDirTTLBl + case zDirOrigin: + st = zExpectDirOriginBl + case zDirInclude: + st = zExpectDirIncludeBl + case zDirGenerate: + st = zExpectDirGenerateBl + case zRrtpe: + h.Rrtype = l.torc + + st = zExpectRdata + case zClass: + h.Class = l.torc + + st = zExpectAnyNoClassBl + case zBlank: + // Discard, can happen when there is nothing on the + // line except the RR type + case zString: + ttl, ok := stringToTTL(l.token) + if !ok { + return zp.setParseError("not a TTL", l) + } + + h.Ttl = ttl + + if zp.defttl == nil || !zp.defttl.isByDirective { + zp.defttl = &ttlState{ttl, false} + } + + st = zExpectAnyNoTTLBl + default: + return zp.setParseError("syntax error at beginning", l) + } + case zExpectDirIncludeBl: + if l.value != zBlank { + return zp.setParseError("no blank after $INCLUDE-directive", l) + } + + st = zExpectDirInclude + case zExpectDirInclude: + if l.value != zString { + return zp.setParseError("expecting $INCLUDE value, not this...", l) + } + + neworigin := zp.origin // There may be optionally a new origin set after the filename, if not use current one + switch l, _ := zp.c.Next(); l.value { + case zBlank: + l, _ := zp.c.Next() + if l.value == zString { + name, ok := toAbsoluteName(l.token, zp.origin) + if !ok { + return zp.setParseError("bad origin name", l) + } + + neworigin = name + } + case zNewline, zEOF: + // Ok + default: + return zp.setParseError("garbage after $INCLUDE", l) + } + + if !zp.includeAllowed { + return zp.setParseError("$INCLUDE directive not allowed", l) + } + if zp.includeDepth >= maxIncludeDepth { + return zp.setParseError("too deeply nested $INCLUDE", l) + } + + // Start with the new file + includePath := l.token + if !filepath.IsAbs(includePath) { + includePath = filepath.Join(filepath.Dir(zp.file), includePath) + } + + r1, e1 := os.Open(includePath) + if e1 != nil { + var as string + if !filepath.IsAbs(l.token) { + as = fmt.Sprintf(" as `%s'", includePath) + } + + msg := fmt.Sprintf("failed to open `%s'%s: %v", l.token, as, e1) + return zp.setParseError(msg, l) + } + + zp.sub = NewZoneParser(r1, neworigin, includePath) + zp.sub.defttl, zp.sub.includeDepth, zp.sub.osFile = zp.defttl, zp.includeDepth+1, r1 + zp.sub.SetIncludeAllowed(true) + return zp.subNext() + case zExpectDirTTLBl: + if l.value != zBlank { + return zp.setParseError("no blank after $TTL-directive", l) + } + + st = zExpectDirTTL + case zExpectDirTTL: + if l.value != zString { + return zp.setParseError("expecting $TTL value, not this...", l) + } + + if err := slurpRemainder(zp.c); err != nil { + return zp.setParseError(err.err, err.lex) + } + + ttl, ok := stringToTTL(l.token) + if !ok { + return zp.setParseError("expecting $TTL value, not this...", l) + } + + zp.defttl = &ttlState{ttl, true} + + st = zExpectOwnerDir + case zExpectDirOriginBl: + if l.value != zBlank { + return zp.setParseError("no blank after $ORIGIN-directive", l) + } + + st = zExpectDirOrigin + case zExpectDirOrigin: + if l.value != zString { + return zp.setParseError("expecting $ORIGIN value, not this...", l) + } + + if err := slurpRemainder(zp.c); err != nil { + return zp.setParseError(err.err, err.lex) + } + + name, ok := toAbsoluteName(l.token, zp.origin) + if !ok { + return zp.setParseError("bad origin name", l) + } + + zp.origin = name + + st = zExpectOwnerDir + case zExpectDirGenerateBl: + if l.value != zBlank { + return zp.setParseError("no blank after $GENERATE-directive", l) + } + + st = zExpectDirGenerate + case zExpectDirGenerate: + if zp.generateDisallowed { + return zp.setParseError("nested $GENERATE directive not allowed", l) + } + if l.value != zString { + return zp.setParseError("expecting $GENERATE value, not this...", l) + } + + return zp.generate(l) + case zExpectOwnerBl: + if l.value != zBlank { + return zp.setParseError("no blank after owner", l) + } + + st = zExpectAny + case zExpectAny: + switch l.value { + case zRrtpe: + if zp.defttl == nil { + return zp.setParseError("missing TTL with no previous value", l) + } + + h.Rrtype = l.torc + + st = zExpectRdata + case zClass: + h.Class = l.torc + + st = zExpectAnyNoClassBl + case zString: + ttl, ok := stringToTTL(l.token) + if !ok { + return zp.setParseError("not a TTL", l) + } + + h.Ttl = ttl + + if zp.defttl == nil || !zp.defttl.isByDirective { + zp.defttl = &ttlState{ttl, false} + } + + st = zExpectAnyNoTTLBl + default: + return zp.setParseError("expecting RR type, TTL or class, not this...", l) + } + case zExpectAnyNoClassBl: + if l.value != zBlank { + return zp.setParseError("no blank before class", l) + } + + st = zExpectAnyNoClass + case zExpectAnyNoTTLBl: + if l.value != zBlank { + return zp.setParseError("no blank before TTL", l) + } + + st = zExpectAnyNoTTL + case zExpectAnyNoTTL: + switch l.value { + case zClass: + h.Class = l.torc + + st = zExpectRrtypeBl + case zRrtpe: + h.Rrtype = l.torc + + st = zExpectRdata + default: + return zp.setParseError("expecting RR type or class, not this...", l) + } + case zExpectAnyNoClass: + switch l.value { + case zString: + ttl, ok := stringToTTL(l.token) + if !ok { + return zp.setParseError("not a TTL", l) + } + + h.Ttl = ttl + + if zp.defttl == nil || !zp.defttl.isByDirective { + zp.defttl = &ttlState{ttl, false} + } + + st = zExpectRrtypeBl + case zRrtpe: + h.Rrtype = l.torc + + st = zExpectRdata + default: + return zp.setParseError("expecting RR type or TTL, not this...", l) + } + case zExpectRrtypeBl: + if l.value != zBlank { + return zp.setParseError("no blank before RR type", l) + } + + st = zExpectRrtype + case zExpectRrtype: + if l.value != zRrtpe { + return zp.setParseError("unknown RR type", l) + } + + h.Rrtype = l.torc + + st = zExpectRdata + case zExpectRdata: + var rr RR + if newFn, ok := TypeToRR[h.Rrtype]; ok && canParseAsRR(h.Rrtype) { + rr = newFn() + *rr.Header() = *h + } else { + rr = &RFC3597{Hdr: *h} + } + + _, isPrivate := rr.(*PrivateRR) + if !isPrivate && zp.c.Peek().token == "" { + // This is a dynamic update rr. + + // TODO(tmthrgd): Previously slurpRemainder was only called + // for certain RR types, which may have been important. + if err := slurpRemainder(zp.c); err != nil { + return zp.setParseError(err.err, err.lex) + } + + return rr, true + } else if l.value == zNewline { + return zp.setParseError("unexpected newline", l) + } + + if err := rr.parse(zp.c, zp.origin); err != nil { + // err is a concrete *ParseError without the file field set. + // The setParseError call below will construct a new + // *ParseError with file set to zp.file. + + // If err.lex is nil than we have encounter an unknown RR type + // in that case we substitute our current lex token. + if err.lex == (lex{}) { + return zp.setParseError(err.err, l) + } + + return zp.setParseError(err.err, err.lex) + } + + return rr, true + } + } + + // If we get here, we and the h.Rrtype is still zero, we haven't parsed anything, this + // is not an error, because an empty zone file is still a zone file. + return nil, false +} + +// canParseAsRR returns true if the record type can be parsed as a +// concrete RR. It blacklists certain record types that must be parsed +// according to RFC 3597 because they lack a presentation format. +func canParseAsRR(rrtype uint16) bool { + switch rrtype { + case TypeANY, TypeNULL, TypeOPT, TypeTSIG: + return false + default: + return true + } +} + +type zlexer struct { + br io.ByteReader + + readErr error + + line int + column int + + comBuf string + comment string + + l lex + cachedL *lex + + brace int + quote bool + space bool + commt bool + rrtype bool + owner bool + + nextL bool + + eol bool // end-of-line +} + +func newZLexer(r io.Reader) *zlexer { + br, ok := r.(io.ByteReader) + if !ok { + br = bufio.NewReaderSize(r, 1024) + } + + return &zlexer{ + br: br, + + line: 1, + + owner: true, + } +} + +func (zl *zlexer) Err() error { + if zl.readErr == io.EOF { + return nil + } + + return zl.readErr +} + +// readByte returns the next byte from the input +func (zl *zlexer) readByte() (byte, bool) { + if zl.readErr != nil { + return 0, false + } + + c, err := zl.br.ReadByte() + if err != nil { + zl.readErr = err + return 0, false + } + + // delay the newline handling until the next token is delivered, + // fixes off-by-one errors when reporting a parse error. + if zl.eol { + zl.line++ + zl.column = 0 + zl.eol = false + } + + if c == '\n' { + zl.eol = true + } else { + zl.column++ + } + + return c, true +} + +func (zl *zlexer) Peek() lex { + if zl.nextL { + return zl.l + } + + l, ok := zl.Next() + if !ok { + return l + } + + if zl.nextL { + // Cache l. Next returns zl.cachedL then zl.l. + zl.cachedL = &l + } else { + // In this case l == zl.l, so we just tell Next to return zl.l. + zl.nextL = true + } + + return l +} + +func (zl *zlexer) Next() (lex, bool) { + l := &zl.l + switch { + case zl.cachedL != nil: + l, zl.cachedL = zl.cachedL, nil + return *l, true + case zl.nextL: + zl.nextL = false + return *l, true + case l.err: + // Parsing errors should be sticky. + return lex{value: zEOF}, false + } + + var ( + str [maxTok]byte // Hold string text + com [maxTok]byte // Hold comment text + + stri int // Offset in str (0 means empty) + comi int // Offset in com (0 means empty) + + escape bool + ) + + if zl.comBuf != "" { + comi = copy(com[:], zl.comBuf) + zl.comBuf = "" + } + + zl.comment = "" + + for x, ok := zl.readByte(); ok; x, ok = zl.readByte() { + l.line, l.column = zl.line, zl.column + + if stri >= len(str) { + l.token = "token length insufficient for parsing" + l.err = true + return *l, true + } + if comi >= len(com) { + l.token = "comment length insufficient for parsing" + l.err = true + return *l, true + } + + switch x { + case ' ', '\t': + if escape || zl.quote { + // Inside quotes or escaped this is legal. + str[stri] = x + stri++ + + escape = false + break + } + + if zl.commt { + com[comi] = x + comi++ + break + } + + var retL lex + if stri == 0 { + // Space directly in the beginning, handled in the grammar + } else if zl.owner { + // If we have a string and its the first, make it an owner + l.value = zOwner + l.token = string(str[:stri]) + + // escape $... start with a \ not a $, so this will work + switch strings.ToUpper(l.token) { + case "$TTL": + l.value = zDirTTL + case "$ORIGIN": + l.value = zDirOrigin + case "$INCLUDE": + l.value = zDirInclude + case "$GENERATE": + l.value = zDirGenerate + } + + retL = *l + } else { + l.value = zString + l.token = string(str[:stri]) + + if !zl.rrtype { + tokenUpper := strings.ToUpper(l.token) + if t, ok := StringToType[tokenUpper]; ok { + l.value = zRrtpe + l.torc = t + + zl.rrtype = true + } else if strings.HasPrefix(tokenUpper, "TYPE") { + t, ok := typeToInt(l.token) + if !ok { + l.token = "unknown RR type" + l.err = true + return *l, true + } + + l.value = zRrtpe + l.torc = t + + zl.rrtype = true + } + + if t, ok := StringToClass[tokenUpper]; ok { + l.value = zClass + l.torc = t + } else if strings.HasPrefix(tokenUpper, "CLASS") { + t, ok := classToInt(l.token) + if !ok { + l.token = "unknown class" + l.err = true + return *l, true + } + + l.value = zClass + l.torc = t + } + } + + retL = *l + } + + zl.owner = false + + if !zl.space { + zl.space = true + + l.value = zBlank + l.token = " " + + if retL == (lex{}) { + return *l, true + } + + zl.nextL = true + } + + if retL != (lex{}) { + return retL, true + } + case ';': + if escape || zl.quote { + // Inside quotes or escaped this is legal. + str[stri] = x + stri++ + + escape = false + break + } + + zl.commt = true + zl.comBuf = "" + + if comi > 1 { + // A newline was previously seen inside a comment that + // was inside braces and we delayed adding it until now. + com[comi] = ' ' // convert newline to space + comi++ + if comi >= len(com) { + l.token = "comment length insufficient for parsing" + l.err = true + return *l, true + } + } + + com[comi] = ';' + comi++ + + if stri > 0 { + zl.comBuf = string(com[:comi]) + + l.value = zString + l.token = string(str[:stri]) + return *l, true + } + case '\r': + escape = false + + if zl.quote { + str[stri] = x + stri++ + } + + // discard if outside of quotes + case '\n': + escape = false + + // Escaped newline + if zl.quote { + str[stri] = x + stri++ + break + } + + if zl.commt { + // Reset a comment + zl.commt = false + zl.rrtype = false + + // If not in a brace this ends the comment AND the RR + if zl.brace == 0 { + zl.owner = true + + l.value = zNewline + l.token = "\n" + zl.comment = string(com[:comi]) + return *l, true + } + + zl.comBuf = string(com[:comi]) + break + } + + if zl.brace == 0 { + // If there is previous text, we should output it here + var retL lex + if stri != 0 { + l.value = zString + l.token = string(str[:stri]) + + if !zl.rrtype { + tokenUpper := strings.ToUpper(l.token) + if t, ok := StringToType[tokenUpper]; ok { + zl.rrtype = true + + l.value = zRrtpe + l.torc = t + } + } + + retL = *l + } + + l.value = zNewline + l.token = "\n" + + zl.comment = zl.comBuf + zl.comBuf = "" + zl.rrtype = false + zl.owner = true + + if retL != (lex{}) { + zl.nextL = true + return retL, true + } + + return *l, true + } + case '\\': + // comments do not get escaped chars, everything is copied + if zl.commt { + com[comi] = x + comi++ + break + } + + // something already escaped must be in string + if escape { + str[stri] = x + stri++ + + escape = false + break + } + + // something escaped outside of string gets added to string + str[stri] = x + stri++ + + escape = true + case '"': + if zl.commt { + com[comi] = x + comi++ + break + } + + if escape { + str[stri] = x + stri++ + + escape = false + break + } + + zl.space = false + + // send previous gathered text and the quote + var retL lex + if stri != 0 { + l.value = zString + l.token = string(str[:stri]) + + retL = *l + } + + // send quote itself as separate token + l.value = zQuote + l.token = "\"" + + zl.quote = !zl.quote + + if retL != (lex{}) { + zl.nextL = true + return retL, true + } + + return *l, true + case '(', ')': + if zl.commt { + com[comi] = x + comi++ + break + } + + if escape || zl.quote { + // Inside quotes or escaped this is legal. + str[stri] = x + stri++ + + escape = false + break + } + + switch x { + case ')': + zl.brace-- + + if zl.brace < 0 { + l.token = "extra closing brace" + l.err = true + return *l, true + } + case '(': + zl.brace++ + } + default: + escape = false + + if zl.commt { + com[comi] = x + comi++ + break + } + + str[stri] = x + stri++ + + zl.space = false + } + } + + if zl.readErr != nil && zl.readErr != io.EOF { + // Don't return any tokens after a read error occurs. + return lex{value: zEOF}, false + } + + var retL lex + if stri > 0 { + // Send remainder of str + l.value = zString + l.token = string(str[:stri]) + retL = *l + + if comi <= 0 { + return retL, true + } + } + + if comi > 0 { + // Send remainder of com + l.value = zNewline + l.token = "\n" + zl.comment = string(com[:comi]) + + if retL != (lex{}) { + zl.nextL = true + return retL, true + } + + return *l, true + } + + if zl.brace != 0 { + l.token = "unbalanced brace" + l.err = true + return *l, true + } + + return lex{value: zEOF}, false +} + +func (zl *zlexer) Comment() string { + if zl.l.err { + return "" + } + + return zl.comment +} + +// Extract the class number from CLASSxx +func classToInt(token string) (uint16, bool) { + offset := 5 + if len(token) < offset+1 { + return 0, false + } + class, err := strconv.ParseUint(token[offset:], 10, 16) + if err != nil { + return 0, false + } + return uint16(class), true +} + +// Extract the rr number from TYPExxx +func typeToInt(token string) (uint16, bool) { + offset := 4 + if len(token) < offset+1 { + return 0, false + } + typ, err := strconv.ParseUint(token[offset:], 10, 16) + if err != nil { + return 0, false + } + return uint16(typ), true +} + +// stringToTTL parses things like 2w, 2m, etc, and returns the time in seconds. +func stringToTTL(token string) (uint32, bool) { + var s, i uint32 + for _, c := range token { + switch c { + case 's', 'S': + s += i + i = 0 + case 'm', 'M': + s += i * 60 + i = 0 + case 'h', 'H': + s += i * 60 * 60 + i = 0 + case 'd', 'D': + s += i * 60 * 60 * 24 + i = 0 + case 'w', 'W': + s += i * 60 * 60 * 24 * 7 + i = 0 + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + i *= 10 + i += uint32(c) - '0' + default: + return 0, false + } + } + return s + i, true +} + +// Parse LOC records' [.][mM] into a +// mantissa exponent format. Token should contain the entire +// string (i.e. no spaces allowed) +func stringToCm(token string) (e, m uint8, ok bool) { + if token[len(token)-1] == 'M' || token[len(token)-1] == 'm' { + token = token[0 : len(token)-1] + } + s := strings.SplitN(token, ".", 2) + var meters, cmeters, val int + var err error + switch len(s) { + case 2: + if cmeters, err = strconv.Atoi(s[1]); err != nil { + return + } + fallthrough + case 1: + if meters, err = strconv.Atoi(s[0]); err != nil { + return + } + case 0: + // huh? + return 0, 0, false + } + ok = true + if meters > 0 { + e = 2 + val = meters + } else { + e = 0 + val = cmeters + } + for val > 10 { + e++ + val /= 10 + } + if e > 9 { + ok = false + } + m = uint8(val) + return +} + +func toAbsoluteName(name, origin string) (absolute string, ok bool) { + // check for an explicit origin reference + if name == "@" { + // require a nonempty origin + if origin == "" { + return "", false + } + return origin, true + } + + // require a valid domain name + _, ok = IsDomainName(name) + if !ok || name == "" { + return "", false + } + + // check if name is already absolute + if IsFqdn(name) { + return name, true + } + + // require a nonempty origin + if origin == "" { + return "", false + } + return appendOrigin(name, origin), true +} + +func appendOrigin(name, origin string) string { + if origin == "." { + return name + origin + } + return name + "." + origin +} + +// LOC record helper function +func locCheckNorth(token string, latitude uint32) (uint32, bool) { + switch token { + case "n", "N": + return LOC_EQUATOR + latitude, true + case "s", "S": + return LOC_EQUATOR - latitude, true + } + return latitude, false +} + +// LOC record helper function +func locCheckEast(token string, longitude uint32) (uint32, bool) { + switch token { + case "e", "E": + return LOC_EQUATOR + longitude, true + case "w", "W": + return LOC_EQUATOR - longitude, true + } + return longitude, false +} + +// "Eat" the rest of the "line" +func slurpRemainder(c *zlexer) *ParseError { + l, _ := c.Next() + switch l.value { + case zBlank: + l, _ = c.Next() + if l.value != zNewline && l.value != zEOF { + return &ParseError{"", "garbage after rdata", l} + } + case zNewline: + case zEOF: + default: + return &ParseError{"", "garbage after rdata", l} + } + return nil +} + +// Parse a 64 bit-like ipv6 address: "0014:4fff:ff20:ee64" +// Used for NID and L64 record. +func stringToNodeID(l lex) (uint64, *ParseError) { + if len(l.token) < 19 { + return 0, &ParseError{l.token, "bad NID/L64 NodeID/Locator64", l} + } + // There must be three colons at fixes postitions, if not its a parse error + if l.token[4] != ':' && l.token[9] != ':' && l.token[14] != ':' { + return 0, &ParseError{l.token, "bad NID/L64 NodeID/Locator64", l} + } + s := l.token[0:4] + l.token[5:9] + l.token[10:14] + l.token[15:19] + u, err := strconv.ParseUint(s, 16, 64) + if err != nil { + return 0, &ParseError{l.token, "bad NID/L64 NodeID/Locator64", l} + } + return u, nil +} diff --git a/vendor/github.com/miekg/dns/scan_rr.go b/vendor/github.com/miekg/dns/scan_rr.go new file mode 100644 index 00000000..6c37b2e2 --- /dev/null +++ b/vendor/github.com/miekg/dns/scan_rr.go @@ -0,0 +1,1764 @@ +package dns + +import ( + "encoding/base64" + "net" + "strconv" + "strings" +) + +// A remainder of the rdata with embedded spaces, return the parsed string (sans the spaces) +// or an error +func endingToString(c *zlexer, errstr string) (string, *ParseError) { + var s string + l, _ := c.Next() // zString + for l.value != zNewline && l.value != zEOF { + if l.err { + return s, &ParseError{"", errstr, l} + } + switch l.value { + case zString: + s += l.token + case zBlank: // Ok + default: + return "", &ParseError{"", errstr, l} + } + l, _ = c.Next() + } + + return s, nil +} + +// A remainder of the rdata with embedded spaces, split on unquoted whitespace +// and return the parsed string slice or an error +func endingToTxtSlice(c *zlexer, errstr string) ([]string, *ParseError) { + // Get the remaining data until we see a zNewline + l, _ := c.Next() + if l.err { + return nil, &ParseError{"", errstr, l} + } + + // Build the slice + s := make([]string, 0) + quote := false + empty := false + for l.value != zNewline && l.value != zEOF { + if l.err { + return nil, &ParseError{"", errstr, l} + } + switch l.value { + case zString: + empty = false + if len(l.token) > 255 { + // split up tokens that are larger than 255 into 255-chunks + sx := []string{} + p, i := 0, 255 + for { + if i <= len(l.token) { + sx = append(sx, l.token[p:i]) + } else { + sx = append(sx, l.token[p:]) + break + + } + p, i = p+255, i+255 + } + s = append(s, sx...) + break + } + + s = append(s, l.token) + case zBlank: + if quote { + // zBlank can only be seen in between txt parts. + return nil, &ParseError{"", errstr, l} + } + case zQuote: + if empty && quote { + s = append(s, "") + } + quote = !quote + empty = true + default: + return nil, &ParseError{"", errstr, l} + } + l, _ = c.Next() + } + + if quote { + return nil, &ParseError{"", errstr, l} + } + + return s, nil +} + +func (rr *A) parse(c *zlexer, o string) *ParseError { + l, _ := c.Next() + rr.A = net.ParseIP(l.token) + // IPv4 addresses cannot include ":". + // We do this rather than use net.IP's To4() because + // To4() treats IPv4-mapped IPv6 addresses as being + // IPv4. + isIPv4 := !strings.Contains(l.token, ":") + if rr.A == nil || !isIPv4 || l.err { + return &ParseError{"", "bad A A", l} + } + return slurpRemainder(c) +} + +func (rr *AAAA) parse(c *zlexer, o string) *ParseError { + l, _ := c.Next() + rr.AAAA = net.ParseIP(l.token) + // IPv6 addresses must include ":", and IPv4 + // addresses cannot include ":". + isIPv6 := strings.Contains(l.token, ":") + if rr.AAAA == nil || !isIPv6 || l.err { + return &ParseError{"", "bad AAAA AAAA", l} + } + return slurpRemainder(c) +} + +func (rr *NS) parse(c *zlexer, o string) *ParseError { + l, _ := c.Next() + name, nameOk := toAbsoluteName(l.token, o) + if l.err || !nameOk { + return &ParseError{"", "bad NS Ns", l} + } + rr.Ns = name + return slurpRemainder(c) +} + +func (rr *PTR) parse(c *zlexer, o string) *ParseError { + l, _ := c.Next() + name, nameOk := toAbsoluteName(l.token, o) + if l.err || !nameOk { + return &ParseError{"", "bad PTR Ptr", l} + } + rr.Ptr = name + return slurpRemainder(c) +} + +func (rr *NSAPPTR) parse(c *zlexer, o string) *ParseError { + l, _ := c.Next() + name, nameOk := toAbsoluteName(l.token, o) + if l.err || !nameOk { + return &ParseError{"", "bad NSAP-PTR Ptr", l} + } + rr.Ptr = name + return slurpRemainder(c) +} + +func (rr *RP) parse(c *zlexer, o string) *ParseError { + l, _ := c.Next() + mbox, mboxOk := toAbsoluteName(l.token, o) + if l.err || !mboxOk { + return &ParseError{"", "bad RP Mbox", l} + } + rr.Mbox = mbox + + c.Next() // zBlank + l, _ = c.Next() + rr.Txt = l.token + + txt, txtOk := toAbsoluteName(l.token, o) + if l.err || !txtOk { + return &ParseError{"", "bad RP Txt", l} + } + rr.Txt = txt + + return slurpRemainder(c) +} + +func (rr *MR) parse(c *zlexer, o string) *ParseError { + l, _ := c.Next() + name, nameOk := toAbsoluteName(l.token, o) + if l.err || !nameOk { + return &ParseError{"", "bad MR Mr", l} + } + rr.Mr = name + return slurpRemainder(c) +} + +func (rr *MB) parse(c *zlexer, o string) *ParseError { + l, _ := c.Next() + name, nameOk := toAbsoluteName(l.token, o) + if l.err || !nameOk { + return &ParseError{"", "bad MB Mb", l} + } + rr.Mb = name + return slurpRemainder(c) +} + +func (rr *MG) parse(c *zlexer, o string) *ParseError { + l, _ := c.Next() + name, nameOk := toAbsoluteName(l.token, o) + if l.err || !nameOk { + return &ParseError{"", "bad MG Mg", l} + } + rr.Mg = name + return slurpRemainder(c) +} + +func (rr *HINFO) parse(c *zlexer, o string) *ParseError { + chunks, e := endingToTxtSlice(c, "bad HINFO Fields") + if e != nil { + return e + } + + if ln := len(chunks); ln == 0 { + return nil + } else if ln == 1 { + // Can we split it? + if out := strings.Fields(chunks[0]); len(out) > 1 { + chunks = out + } else { + chunks = append(chunks, "") + } + } + + rr.Cpu = chunks[0] + rr.Os = strings.Join(chunks[1:], " ") + + return nil +} + +func (rr *MINFO) parse(c *zlexer, o string) *ParseError { + l, _ := c.Next() + rmail, rmailOk := toAbsoluteName(l.token, o) + if l.err || !rmailOk { + return &ParseError{"", "bad MINFO Rmail", l} + } + rr.Rmail = rmail + + c.Next() // zBlank + l, _ = c.Next() + rr.Email = l.token + + email, emailOk := toAbsoluteName(l.token, o) + if l.err || !emailOk { + return &ParseError{"", "bad MINFO Email", l} + } + rr.Email = email + + return slurpRemainder(c) +} + +func (rr *MF) parse(c *zlexer, o string) *ParseError { + l, _ := c.Next() + name, nameOk := toAbsoluteName(l.token, o) + if l.err || !nameOk { + return &ParseError{"", "bad MF Mf", l} + } + rr.Mf = name + return slurpRemainder(c) +} + +func (rr *MD) parse(c *zlexer, o string) *ParseError { + l, _ := c.Next() + name, nameOk := toAbsoluteName(l.token, o) + if l.err || !nameOk { + return &ParseError{"", "bad MD Md", l} + } + rr.Md = name + return slurpRemainder(c) +} + +func (rr *MX) parse(c *zlexer, o string) *ParseError { + l, _ := c.Next() + i, e := strconv.ParseUint(l.token, 10, 16) + if e != nil || l.err { + return &ParseError{"", "bad MX Pref", l} + } + rr.Preference = uint16(i) + + c.Next() // zBlank + l, _ = c.Next() // zString + rr.Mx = l.token + + name, nameOk := toAbsoluteName(l.token, o) + if l.err || !nameOk { + return &ParseError{"", "bad MX Mx", l} + } + rr.Mx = name + + return slurpRemainder(c) +} + +func (rr *RT) parse(c *zlexer, o string) *ParseError { + l, _ := c.Next() + i, e := strconv.ParseUint(l.token, 10, 16) + if e != nil { + return &ParseError{"", "bad RT Preference", l} + } + rr.Preference = uint16(i) + + c.Next() // zBlank + l, _ = c.Next() // zString + rr.Host = l.token + + name, nameOk := toAbsoluteName(l.token, o) + if l.err || !nameOk { + return &ParseError{"", "bad RT Host", l} + } + rr.Host = name + + return slurpRemainder(c) +} + +func (rr *AFSDB) parse(c *zlexer, o string) *ParseError { + l, _ := c.Next() + i, e := strconv.ParseUint(l.token, 10, 16) + if e != nil || l.err { + return &ParseError{"", "bad AFSDB Subtype", l} + } + rr.Subtype = uint16(i) + + c.Next() // zBlank + l, _ = c.Next() // zString + rr.Hostname = l.token + + name, nameOk := toAbsoluteName(l.token, o) + if l.err || !nameOk { + return &ParseError{"", "bad AFSDB Hostname", l} + } + rr.Hostname = name + return slurpRemainder(c) +} + +func (rr *X25) parse(c *zlexer, o string) *ParseError { + l, _ := c.Next() + if l.err { + return &ParseError{"", "bad X25 PSDNAddress", l} + } + rr.PSDNAddress = l.token + return slurpRemainder(c) +} + +func (rr *KX) parse(c *zlexer, o string) *ParseError { + l, _ := c.Next() + i, e := strconv.ParseUint(l.token, 10, 16) + if e != nil || l.err { + return &ParseError{"", "bad KX Pref", l} + } + rr.Preference = uint16(i) + + c.Next() // zBlank + l, _ = c.Next() // zString + rr.Exchanger = l.token + + name, nameOk := toAbsoluteName(l.token, o) + if l.err || !nameOk { + return &ParseError{"", "bad KX Exchanger", l} + } + rr.Exchanger = name + return slurpRemainder(c) +} + +func (rr *CNAME) parse(c *zlexer, o string) *ParseError { + l, _ := c.Next() + name, nameOk := toAbsoluteName(l.token, o) + if l.err || !nameOk { + return &ParseError{"", "bad CNAME Target", l} + } + rr.Target = name + return slurpRemainder(c) +} + +func (rr *DNAME) parse(c *zlexer, o string) *ParseError { + l, _ := c.Next() + name, nameOk := toAbsoluteName(l.token, o) + if l.err || !nameOk { + return &ParseError{"", "bad DNAME Target", l} + } + rr.Target = name + return slurpRemainder(c) +} + +func (rr *SOA) parse(c *zlexer, o string) *ParseError { + l, _ := c.Next() + ns, nsOk := toAbsoluteName(l.token, o) + if l.err || !nsOk { + return &ParseError{"", "bad SOA Ns", l} + } + rr.Ns = ns + + c.Next() // zBlank + l, _ = c.Next() + rr.Mbox = l.token + + mbox, mboxOk := toAbsoluteName(l.token, o) + if l.err || !mboxOk { + return &ParseError{"", "bad SOA Mbox", l} + } + rr.Mbox = mbox + + c.Next() // zBlank + + var ( + v uint32 + ok bool + ) + for i := 0; i < 5; i++ { + l, _ = c.Next() + if l.err { + return &ParseError{"", "bad SOA zone parameter", l} + } + if j, e := strconv.ParseUint(l.token, 10, 32); e != nil { + if i == 0 { + // Serial must be a number + return &ParseError{"", "bad SOA zone parameter", l} + } + // We allow other fields to be unitful duration strings + if v, ok = stringToTTL(l.token); !ok { + return &ParseError{"", "bad SOA zone parameter", l} + + } + } else { + v = uint32(j) + } + switch i { + case 0: + rr.Serial = v + c.Next() // zBlank + case 1: + rr.Refresh = v + c.Next() // zBlank + case 2: + rr.Retry = v + c.Next() // zBlank + case 3: + rr.Expire = v + c.Next() // zBlank + case 4: + rr.Minttl = v + } + } + return slurpRemainder(c) +} + +func (rr *SRV) parse(c *zlexer, o string) *ParseError { + l, _ := c.Next() + i, e := strconv.ParseUint(l.token, 10, 16) + if e != nil || l.err { + return &ParseError{"", "bad SRV Priority", l} + } + rr.Priority = uint16(i) + + c.Next() // zBlank + l, _ = c.Next() // zString + i, e = strconv.ParseUint(l.token, 10, 16) + if e != nil || l.err { + return &ParseError{"", "bad SRV Weight", l} + } + rr.Weight = uint16(i) + + c.Next() // zBlank + l, _ = c.Next() // zString + i, e = strconv.ParseUint(l.token, 10, 16) + if e != nil || l.err { + return &ParseError{"", "bad SRV Port", l} + } + rr.Port = uint16(i) + + c.Next() // zBlank + l, _ = c.Next() // zString + rr.Target = l.token + + name, nameOk := toAbsoluteName(l.token, o) + if l.err || !nameOk { + return &ParseError{"", "bad SRV Target", l} + } + rr.Target = name + return slurpRemainder(c) +} + +func (rr *NAPTR) parse(c *zlexer, o string) *ParseError { + l, _ := c.Next() + i, e := strconv.ParseUint(l.token, 10, 16) + if e != nil || l.err { + return &ParseError{"", "bad NAPTR Order", l} + } + rr.Order = uint16(i) + + c.Next() // zBlank + l, _ = c.Next() // zString + i, e = strconv.ParseUint(l.token, 10, 16) + if e != nil || l.err { + return &ParseError{"", "bad NAPTR Preference", l} + } + rr.Preference = uint16(i) + + // Flags + c.Next() // zBlank + l, _ = c.Next() // _QUOTE + if l.value != zQuote { + return &ParseError{"", "bad NAPTR Flags", l} + } + l, _ = c.Next() // Either String or Quote + if l.value == zString { + rr.Flags = l.token + l, _ = c.Next() // _QUOTE + if l.value != zQuote { + return &ParseError{"", "bad NAPTR Flags", l} + } + } else if l.value == zQuote { + rr.Flags = "" + } else { + return &ParseError{"", "bad NAPTR Flags", l} + } + + // Service + c.Next() // zBlank + l, _ = c.Next() // _QUOTE + if l.value != zQuote { + return &ParseError{"", "bad NAPTR Service", l} + } + l, _ = c.Next() // Either String or Quote + if l.value == zString { + rr.Service = l.token + l, _ = c.Next() // _QUOTE + if l.value != zQuote { + return &ParseError{"", "bad NAPTR Service", l} + } + } else if l.value == zQuote { + rr.Service = "" + } else { + return &ParseError{"", "bad NAPTR Service", l} + } + + // Regexp + c.Next() // zBlank + l, _ = c.Next() // _QUOTE + if l.value != zQuote { + return &ParseError{"", "bad NAPTR Regexp", l} + } + l, _ = c.Next() // Either String or Quote + if l.value == zString { + rr.Regexp = l.token + l, _ = c.Next() // _QUOTE + if l.value != zQuote { + return &ParseError{"", "bad NAPTR Regexp", l} + } + } else if l.value == zQuote { + rr.Regexp = "" + } else { + return &ParseError{"", "bad NAPTR Regexp", l} + } + + // After quote no space?? + c.Next() // zBlank + l, _ = c.Next() // zString + rr.Replacement = l.token + + name, nameOk := toAbsoluteName(l.token, o) + if l.err || !nameOk { + return &ParseError{"", "bad NAPTR Replacement", l} + } + rr.Replacement = name + return slurpRemainder(c) +} + +func (rr *TALINK) parse(c *zlexer, o string) *ParseError { + l, _ := c.Next() + previousName, previousNameOk := toAbsoluteName(l.token, o) + if l.err || !previousNameOk { + return &ParseError{"", "bad TALINK PreviousName", l} + } + rr.PreviousName = previousName + + c.Next() // zBlank + l, _ = c.Next() + rr.NextName = l.token + + nextName, nextNameOk := toAbsoluteName(l.token, o) + if l.err || !nextNameOk { + return &ParseError{"", "bad TALINK NextName", l} + } + rr.NextName = nextName + + return slurpRemainder(c) +} + +func (rr *LOC) parse(c *zlexer, o string) *ParseError { + // Non zero defaults for LOC record, see RFC 1876, Section 3. + rr.HorizPre = 165 // 10000 + rr.VertPre = 162 // 10 + rr.Size = 18 // 1 + ok := false + + // North + l, _ := c.Next() + i, e := strconv.ParseUint(l.token, 10, 32) + if e != nil || l.err { + return &ParseError{"", "bad LOC Latitude", l} + } + rr.Latitude = 1000 * 60 * 60 * uint32(i) + + c.Next() // zBlank + // Either number, 'N' or 'S' + l, _ = c.Next() + if rr.Latitude, ok = locCheckNorth(l.token, rr.Latitude); ok { + goto East + } + i, e = strconv.ParseUint(l.token, 10, 32) + if e != nil || l.err { + return &ParseError{"", "bad LOC Latitude minutes", l} + } + rr.Latitude += 1000 * 60 * uint32(i) + + c.Next() // zBlank + l, _ = c.Next() + if i, e := strconv.ParseFloat(l.token, 32); e != nil || l.err { + return &ParseError{"", "bad LOC Latitude seconds", l} + } else { + rr.Latitude += uint32(1000 * i) + } + c.Next() // zBlank + // Either number, 'N' or 'S' + l, _ = c.Next() + if rr.Latitude, ok = locCheckNorth(l.token, rr.Latitude); ok { + goto East + } + // If still alive, flag an error + return &ParseError{"", "bad LOC Latitude North/South", l} + +East: + // East + c.Next() // zBlank + l, _ = c.Next() + if i, e := strconv.ParseUint(l.token, 10, 32); e != nil || l.err { + return &ParseError{"", "bad LOC Longitude", l} + } else { + rr.Longitude = 1000 * 60 * 60 * uint32(i) + } + c.Next() // zBlank + // Either number, 'E' or 'W' + l, _ = c.Next() + if rr.Longitude, ok = locCheckEast(l.token, rr.Longitude); ok { + goto Altitude + } + if i, e := strconv.ParseUint(l.token, 10, 32); e != nil || l.err { + return &ParseError{"", "bad LOC Longitude minutes", l} + } else { + rr.Longitude += 1000 * 60 * uint32(i) + } + c.Next() // zBlank + l, _ = c.Next() + if i, e := strconv.ParseFloat(l.token, 32); e != nil || l.err { + return &ParseError{"", "bad LOC Longitude seconds", l} + } else { + rr.Longitude += uint32(1000 * i) + } + c.Next() // zBlank + // Either number, 'E' or 'W' + l, _ = c.Next() + if rr.Longitude, ok = locCheckEast(l.token, rr.Longitude); ok { + goto Altitude + } + // If still alive, flag an error + return &ParseError{"", "bad LOC Longitude East/West", l} + +Altitude: + c.Next() // zBlank + l, _ = c.Next() + if len(l.token) == 0 || l.err { + return &ParseError{"", "bad LOC Altitude", l} + } + if l.token[len(l.token)-1] == 'M' || l.token[len(l.token)-1] == 'm' { + l.token = l.token[0 : len(l.token)-1] + } + if i, e := strconv.ParseFloat(l.token, 32); e != nil { + return &ParseError{"", "bad LOC Altitude", l} + } else { + rr.Altitude = uint32(i*100.0 + 10000000.0 + 0.5) + } + + // And now optionally the other values + l, _ = c.Next() + count := 0 + for l.value != zNewline && l.value != zEOF { + switch l.value { + case zString: + switch count { + case 0: // Size + e, m, ok := stringToCm(l.token) + if !ok { + return &ParseError{"", "bad LOC Size", l} + } + rr.Size = e&0x0f | m<<4&0xf0 + case 1: // HorizPre + e, m, ok := stringToCm(l.token) + if !ok { + return &ParseError{"", "bad LOC HorizPre", l} + } + rr.HorizPre = e&0x0f | m<<4&0xf0 + case 2: // VertPre + e, m, ok := stringToCm(l.token) + if !ok { + return &ParseError{"", "bad LOC VertPre", l} + } + rr.VertPre = e&0x0f | m<<4&0xf0 + } + count++ + case zBlank: + // Ok + default: + return &ParseError{"", "bad LOC Size, HorizPre or VertPre", l} + } + l, _ = c.Next() + } + return nil +} + +func (rr *HIP) parse(c *zlexer, o string) *ParseError { + // HitLength is not represented + l, _ := c.Next() + i, e := strconv.ParseUint(l.token, 10, 8) + if e != nil || l.err { + return &ParseError{"", "bad HIP PublicKeyAlgorithm", l} + } + rr.PublicKeyAlgorithm = uint8(i) + + c.Next() // zBlank + l, _ = c.Next() // zString + if len(l.token) == 0 || l.err { + return &ParseError{"", "bad HIP Hit", l} + } + rr.Hit = l.token // This can not contain spaces, see RFC 5205 Section 6. + rr.HitLength = uint8(len(rr.Hit)) / 2 + + c.Next() // zBlank + l, _ = c.Next() // zString + if len(l.token) == 0 || l.err { + return &ParseError{"", "bad HIP PublicKey", l} + } + rr.PublicKey = l.token // This cannot contain spaces + rr.PublicKeyLength = uint16(base64.StdEncoding.DecodedLen(len(rr.PublicKey))) + + // RendezvousServers (if any) + l, _ = c.Next() + var xs []string + for l.value != zNewline && l.value != zEOF { + switch l.value { + case zString: + name, nameOk := toAbsoluteName(l.token, o) + if l.err || !nameOk { + return &ParseError{"", "bad HIP RendezvousServers", l} + } + xs = append(xs, name) + case zBlank: + // Ok + default: + return &ParseError{"", "bad HIP RendezvousServers", l} + } + l, _ = c.Next() + } + + rr.RendezvousServers = xs + return nil +} + +func (rr *CERT) parse(c *zlexer, o string) *ParseError { + l, _ := c.Next() + if v, ok := StringToCertType[l.token]; ok { + rr.Type = v + } else if i, e := strconv.ParseUint(l.token, 10, 16); e != nil { + return &ParseError{"", "bad CERT Type", l} + } else { + rr.Type = uint16(i) + } + c.Next() // zBlank + l, _ = c.Next() // zString + i, e := strconv.ParseUint(l.token, 10, 16) + if e != nil || l.err { + return &ParseError{"", "bad CERT KeyTag", l} + } + rr.KeyTag = uint16(i) + c.Next() // zBlank + l, _ = c.Next() // zString + if v, ok := StringToAlgorithm[l.token]; ok { + rr.Algorithm = v + } else if i, e := strconv.ParseUint(l.token, 10, 8); e != nil { + return &ParseError{"", "bad CERT Algorithm", l} + } else { + rr.Algorithm = uint8(i) + } + s, e1 := endingToString(c, "bad CERT Certificate") + if e1 != nil { + return e1 + } + rr.Certificate = s + return nil +} + +func (rr *OPENPGPKEY) parse(c *zlexer, o string) *ParseError { + s, e := endingToString(c, "bad OPENPGPKEY PublicKey") + if e != nil { + return e + } + rr.PublicKey = s + return nil +} + +func (rr *CSYNC) parse(c *zlexer, o string) *ParseError { + l, _ := c.Next() + j, e := strconv.ParseUint(l.token, 10, 32) + if e != nil { + // Serial must be a number + return &ParseError{"", "bad CSYNC serial", l} + } + rr.Serial = uint32(j) + + c.Next() // zBlank + + l, _ = c.Next() + j, e = strconv.ParseUint(l.token, 10, 16) + if e != nil { + // Serial must be a number + return &ParseError{"", "bad CSYNC flags", l} + } + rr.Flags = uint16(j) + + rr.TypeBitMap = make([]uint16, 0) + var ( + k uint16 + ok bool + ) + l, _ = c.Next() + for l.value != zNewline && l.value != zEOF { + switch l.value { + case zBlank: + // Ok + case zString: + tokenUpper := strings.ToUpper(l.token) + if k, ok = StringToType[tokenUpper]; !ok { + if k, ok = typeToInt(l.token); !ok { + return &ParseError{"", "bad CSYNC TypeBitMap", l} + } + } + rr.TypeBitMap = append(rr.TypeBitMap, k) + default: + return &ParseError{"", "bad CSYNC TypeBitMap", l} + } + l, _ = c.Next() + } + return nil +} + +func (rr *SIG) parse(c *zlexer, o string) *ParseError { + return rr.RRSIG.parse(c, o) +} + +func (rr *RRSIG) parse(c *zlexer, o string) *ParseError { + l, _ := c.Next() + tokenUpper := strings.ToUpper(l.token) + if t, ok := StringToType[tokenUpper]; !ok { + if strings.HasPrefix(tokenUpper, "TYPE") { + t, ok = typeToInt(l.token) + if !ok { + return &ParseError{"", "bad RRSIG Typecovered", l} + } + rr.TypeCovered = t + } else { + return &ParseError{"", "bad RRSIG Typecovered", l} + } + } else { + rr.TypeCovered = t + } + + c.Next() // zBlank + l, _ = c.Next() + i, err := strconv.ParseUint(l.token, 10, 8) + if err != nil || l.err { + return &ParseError{"", "bad RRSIG Algorithm", l} + } + rr.Algorithm = uint8(i) + + c.Next() // zBlank + l, _ = c.Next() + i, err = strconv.ParseUint(l.token, 10, 8) + if err != nil || l.err { + return &ParseError{"", "bad RRSIG Labels", l} + } + rr.Labels = uint8(i) + + c.Next() // zBlank + l, _ = c.Next() + i, err = strconv.ParseUint(l.token, 10, 32) + if err != nil || l.err { + return &ParseError{"", "bad RRSIG OrigTtl", l} + } + rr.OrigTtl = uint32(i) + + c.Next() // zBlank + l, _ = c.Next() + if i, err := StringToTime(l.token); err != nil { + // Try to see if all numeric and use it as epoch + if i, err := strconv.ParseInt(l.token, 10, 64); err == nil { + // TODO(miek): error out on > MAX_UINT32, same below + rr.Expiration = uint32(i) + } else { + return &ParseError{"", "bad RRSIG Expiration", l} + } + } else { + rr.Expiration = i + } + + c.Next() // zBlank + l, _ = c.Next() + if i, err := StringToTime(l.token); err != nil { + if i, err := strconv.ParseInt(l.token, 10, 64); err == nil { + rr.Inception = uint32(i) + } else { + return &ParseError{"", "bad RRSIG Inception", l} + } + } else { + rr.Inception = i + } + + c.Next() // zBlank + l, _ = c.Next() + i, err = strconv.ParseUint(l.token, 10, 16) + if err != nil || l.err { + return &ParseError{"", "bad RRSIG KeyTag", l} + } + rr.KeyTag = uint16(i) + + c.Next() // zBlank + l, _ = c.Next() + rr.SignerName = l.token + name, nameOk := toAbsoluteName(l.token, o) + if l.err || !nameOk { + return &ParseError{"", "bad RRSIG SignerName", l} + } + rr.SignerName = name + + s, e := endingToString(c, "bad RRSIG Signature") + if e != nil { + return e + } + rr.Signature = s + + return nil +} + +func (rr *NSEC) parse(c *zlexer, o string) *ParseError { + l, _ := c.Next() + name, nameOk := toAbsoluteName(l.token, o) + if l.err || !nameOk { + return &ParseError{"", "bad NSEC NextDomain", l} + } + rr.NextDomain = name + + rr.TypeBitMap = make([]uint16, 0) + var ( + k uint16 + ok bool + ) + l, _ = c.Next() + for l.value != zNewline && l.value != zEOF { + switch l.value { + case zBlank: + // Ok + case zString: + tokenUpper := strings.ToUpper(l.token) + if k, ok = StringToType[tokenUpper]; !ok { + if k, ok = typeToInt(l.token); !ok { + return &ParseError{"", "bad NSEC TypeBitMap", l} + } + } + rr.TypeBitMap = append(rr.TypeBitMap, k) + default: + return &ParseError{"", "bad NSEC TypeBitMap", l} + } + l, _ = c.Next() + } + return nil +} + +func (rr *NSEC3) parse(c *zlexer, o string) *ParseError { + l, _ := c.Next() + i, e := strconv.ParseUint(l.token, 10, 8) + if e != nil || l.err { + return &ParseError{"", "bad NSEC3 Hash", l} + } + rr.Hash = uint8(i) + c.Next() // zBlank + l, _ = c.Next() + i, e = strconv.ParseUint(l.token, 10, 8) + if e != nil || l.err { + return &ParseError{"", "bad NSEC3 Flags", l} + } + rr.Flags = uint8(i) + c.Next() // zBlank + l, _ = c.Next() + i, e = strconv.ParseUint(l.token, 10, 16) + if e != nil || l.err { + return &ParseError{"", "bad NSEC3 Iterations", l} + } + rr.Iterations = uint16(i) + c.Next() + l, _ = c.Next() + if len(l.token) == 0 || l.err { + return &ParseError{"", "bad NSEC3 Salt", l} + } + if l.token != "-" { + rr.SaltLength = uint8(len(l.token)) / 2 + rr.Salt = l.token + } + + c.Next() + l, _ = c.Next() + if len(l.token) == 0 || l.err { + return &ParseError{"", "bad NSEC3 NextDomain", l} + } + rr.HashLength = 20 // Fix for NSEC3 (sha1 160 bits) + rr.NextDomain = l.token + + rr.TypeBitMap = make([]uint16, 0) + var ( + k uint16 + ok bool + ) + l, _ = c.Next() + for l.value != zNewline && l.value != zEOF { + switch l.value { + case zBlank: + // Ok + case zString: + tokenUpper := strings.ToUpper(l.token) + if k, ok = StringToType[tokenUpper]; !ok { + if k, ok = typeToInt(l.token); !ok { + return &ParseError{"", "bad NSEC3 TypeBitMap", l} + } + } + rr.TypeBitMap = append(rr.TypeBitMap, k) + default: + return &ParseError{"", "bad NSEC3 TypeBitMap", l} + } + l, _ = c.Next() + } + return nil +} + +func (rr *NSEC3PARAM) parse(c *zlexer, o string) *ParseError { + l, _ := c.Next() + i, e := strconv.ParseUint(l.token, 10, 8) + if e != nil || l.err { + return &ParseError{"", "bad NSEC3PARAM Hash", l} + } + rr.Hash = uint8(i) + c.Next() // zBlank + l, _ = c.Next() + i, e = strconv.ParseUint(l.token, 10, 8) + if e != nil || l.err { + return &ParseError{"", "bad NSEC3PARAM Flags", l} + } + rr.Flags = uint8(i) + c.Next() // zBlank + l, _ = c.Next() + i, e = strconv.ParseUint(l.token, 10, 16) + if e != nil || l.err { + return &ParseError{"", "bad NSEC3PARAM Iterations", l} + } + rr.Iterations = uint16(i) + c.Next() + l, _ = c.Next() + if l.token != "-" { + rr.SaltLength = uint8(len(l.token)) + rr.Salt = l.token + } + return slurpRemainder(c) +} + +func (rr *EUI48) parse(c *zlexer, o string) *ParseError { + l, _ := c.Next() + if len(l.token) != 17 || l.err { + return &ParseError{"", "bad EUI48 Address", l} + } + addr := make([]byte, 12) + dash := 0 + for i := 0; i < 10; i += 2 { + addr[i] = l.token[i+dash] + addr[i+1] = l.token[i+1+dash] + dash++ + if l.token[i+1+dash] != '-' { + return &ParseError{"", "bad EUI48 Address", l} + } + } + addr[10] = l.token[15] + addr[11] = l.token[16] + + i, e := strconv.ParseUint(string(addr), 16, 48) + if e != nil { + return &ParseError{"", "bad EUI48 Address", l} + } + rr.Address = i + return slurpRemainder(c) +} + +func (rr *EUI64) parse(c *zlexer, o string) *ParseError { + l, _ := c.Next() + if len(l.token) != 23 || l.err { + return &ParseError{"", "bad EUI64 Address", l} + } + addr := make([]byte, 16) + dash := 0 + for i := 0; i < 14; i += 2 { + addr[i] = l.token[i+dash] + addr[i+1] = l.token[i+1+dash] + dash++ + if l.token[i+1+dash] != '-' { + return &ParseError{"", "bad EUI64 Address", l} + } + } + addr[14] = l.token[21] + addr[15] = l.token[22] + + i, e := strconv.ParseUint(string(addr), 16, 64) + if e != nil { + return &ParseError{"", "bad EUI68 Address", l} + } + rr.Address = i + return slurpRemainder(c) +} + +func (rr *SSHFP) parse(c *zlexer, o string) *ParseError { + l, _ := c.Next() + i, e := strconv.ParseUint(l.token, 10, 8) + if e != nil || l.err { + return &ParseError{"", "bad SSHFP Algorithm", l} + } + rr.Algorithm = uint8(i) + c.Next() // zBlank + l, _ = c.Next() + i, e = strconv.ParseUint(l.token, 10, 8) + if e != nil || l.err { + return &ParseError{"", "bad SSHFP Type", l} + } + rr.Type = uint8(i) + c.Next() // zBlank + s, e1 := endingToString(c, "bad SSHFP Fingerprint") + if e1 != nil { + return e1 + } + rr.FingerPrint = s + return nil +} + +func (rr *DNSKEY) parseDNSKEY(c *zlexer, o, typ string) *ParseError { + l, _ := c.Next() + i, e := strconv.ParseUint(l.token, 10, 16) + if e != nil || l.err { + return &ParseError{"", "bad " + typ + " Flags", l} + } + rr.Flags = uint16(i) + c.Next() // zBlank + l, _ = c.Next() // zString + i, e = strconv.ParseUint(l.token, 10, 8) + if e != nil || l.err { + return &ParseError{"", "bad " + typ + " Protocol", l} + } + rr.Protocol = uint8(i) + c.Next() // zBlank + l, _ = c.Next() // zString + i, e = strconv.ParseUint(l.token, 10, 8) + if e != nil || l.err { + return &ParseError{"", "bad " + typ + " Algorithm", l} + } + rr.Algorithm = uint8(i) + s, e1 := endingToString(c, "bad "+typ+" PublicKey") + if e1 != nil { + return e1 + } + rr.PublicKey = s + return nil +} + +func (rr *DNSKEY) parse(c *zlexer, o string) *ParseError { + return rr.parseDNSKEY(c, o, "DNSKEY") +} + +func (rr *KEY) parse(c *zlexer, o string) *ParseError { + return rr.parseDNSKEY(c, o, "KEY") +} + +func (rr *CDNSKEY) parse(c *zlexer, o string) *ParseError { + return rr.parseDNSKEY(c, o, "CDNSKEY") +} + +func (rr *RKEY) parse(c *zlexer, o string) *ParseError { + l, _ := c.Next() + i, e := strconv.ParseUint(l.token, 10, 16) + if e != nil || l.err { + return &ParseError{"", "bad RKEY Flags", l} + } + rr.Flags = uint16(i) + c.Next() // zBlank + l, _ = c.Next() // zString + i, e = strconv.ParseUint(l.token, 10, 8) + if e != nil || l.err { + return &ParseError{"", "bad RKEY Protocol", l} + } + rr.Protocol = uint8(i) + c.Next() // zBlank + l, _ = c.Next() // zString + i, e = strconv.ParseUint(l.token, 10, 8) + if e != nil || l.err { + return &ParseError{"", "bad RKEY Algorithm", l} + } + rr.Algorithm = uint8(i) + s, e1 := endingToString(c, "bad RKEY PublicKey") + if e1 != nil { + return e1 + } + rr.PublicKey = s + return nil +} + +func (rr *EID) parse(c *zlexer, o string) *ParseError { + s, e := endingToString(c, "bad EID Endpoint") + if e != nil { + return e + } + rr.Endpoint = s + return nil +} + +func (rr *NIMLOC) parse(c *zlexer, o string) *ParseError { + s, e := endingToString(c, "bad NIMLOC Locator") + if e != nil { + return e + } + rr.Locator = s + return nil +} + +func (rr *GPOS) parse(c *zlexer, o string) *ParseError { + l, _ := c.Next() + _, e := strconv.ParseFloat(l.token, 64) + if e != nil || l.err { + return &ParseError{"", "bad GPOS Longitude", l} + } + rr.Longitude = l.token + c.Next() // zBlank + l, _ = c.Next() + _, e = strconv.ParseFloat(l.token, 64) + if e != nil || l.err { + return &ParseError{"", "bad GPOS Latitude", l} + } + rr.Latitude = l.token + c.Next() // zBlank + l, _ = c.Next() + _, e = strconv.ParseFloat(l.token, 64) + if e != nil || l.err { + return &ParseError{"", "bad GPOS Altitude", l} + } + rr.Altitude = l.token + return slurpRemainder(c) +} + +func (rr *DS) parseDS(c *zlexer, o, typ string) *ParseError { + l, _ := c.Next() + i, e := strconv.ParseUint(l.token, 10, 16) + if e != nil || l.err { + return &ParseError{"", "bad " + typ + " KeyTag", l} + } + rr.KeyTag = uint16(i) + c.Next() // zBlank + l, _ = c.Next() + if i, e = strconv.ParseUint(l.token, 10, 8); e != nil { + tokenUpper := strings.ToUpper(l.token) + i, ok := StringToAlgorithm[tokenUpper] + if !ok || l.err { + return &ParseError{"", "bad " + typ + " Algorithm", l} + } + rr.Algorithm = i + } else { + rr.Algorithm = uint8(i) + } + c.Next() // zBlank + l, _ = c.Next() + i, e = strconv.ParseUint(l.token, 10, 8) + if e != nil || l.err { + return &ParseError{"", "bad " + typ + " DigestType", l} + } + rr.DigestType = uint8(i) + s, e1 := endingToString(c, "bad "+typ+" Digest") + if e1 != nil { + return e1 + } + rr.Digest = s + return nil +} + +func (rr *DS) parse(c *zlexer, o string) *ParseError { + return rr.parseDS(c, o, "DS") +} + +func (rr *DLV) parse(c *zlexer, o string) *ParseError { + return rr.parseDS(c, o, "DLV") +} + +func (rr *CDS) parse(c *zlexer, o string) *ParseError { + return rr.parseDS(c, o, "CDS") +} + +func (rr *TA) parse(c *zlexer, o string) *ParseError { + l, _ := c.Next() + i, e := strconv.ParseUint(l.token, 10, 16) + if e != nil || l.err { + return &ParseError{"", "bad TA KeyTag", l} + } + rr.KeyTag = uint16(i) + c.Next() // zBlank + l, _ = c.Next() + if i, e := strconv.ParseUint(l.token, 10, 8); e != nil { + tokenUpper := strings.ToUpper(l.token) + i, ok := StringToAlgorithm[tokenUpper] + if !ok || l.err { + return &ParseError{"", "bad TA Algorithm", l} + } + rr.Algorithm = i + } else { + rr.Algorithm = uint8(i) + } + c.Next() // zBlank + l, _ = c.Next() + i, e = strconv.ParseUint(l.token, 10, 8) + if e != nil || l.err { + return &ParseError{"", "bad TA DigestType", l} + } + rr.DigestType = uint8(i) + s, err := endingToString(c, "bad TA Digest") + if err != nil { + return err + } + rr.Digest = s + return nil +} + +func (rr *TLSA) parse(c *zlexer, o string) *ParseError { + l, _ := c.Next() + i, e := strconv.ParseUint(l.token, 10, 8) + if e != nil || l.err { + return &ParseError{"", "bad TLSA Usage", l} + } + rr.Usage = uint8(i) + c.Next() // zBlank + l, _ = c.Next() + i, e = strconv.ParseUint(l.token, 10, 8) + if e != nil || l.err { + return &ParseError{"", "bad TLSA Selector", l} + } + rr.Selector = uint8(i) + c.Next() // zBlank + l, _ = c.Next() + i, e = strconv.ParseUint(l.token, 10, 8) + if e != nil || l.err { + return &ParseError{"", "bad TLSA MatchingType", l} + } + rr.MatchingType = uint8(i) + // So this needs be e2 (i.e. different than e), because...??t + s, e2 := endingToString(c, "bad TLSA Certificate") + if e2 != nil { + return e2 + } + rr.Certificate = s + return nil +} + +func (rr *SMIMEA) parse(c *zlexer, o string) *ParseError { + l, _ := c.Next() + i, e := strconv.ParseUint(l.token, 10, 8) + if e != nil || l.err { + return &ParseError{"", "bad SMIMEA Usage", l} + } + rr.Usage = uint8(i) + c.Next() // zBlank + l, _ = c.Next() + i, e = strconv.ParseUint(l.token, 10, 8) + if e != nil || l.err { + return &ParseError{"", "bad SMIMEA Selector", l} + } + rr.Selector = uint8(i) + c.Next() // zBlank + l, _ = c.Next() + i, e = strconv.ParseUint(l.token, 10, 8) + if e != nil || l.err { + return &ParseError{"", "bad SMIMEA MatchingType", l} + } + rr.MatchingType = uint8(i) + // So this needs be e2 (i.e. different than e), because...??t + s, e2 := endingToString(c, "bad SMIMEA Certificate") + if e2 != nil { + return e2 + } + rr.Certificate = s + return nil +} + +func (rr *RFC3597) parse(c *zlexer, o string) *ParseError { + l, _ := c.Next() + if l.token != "\\#" { + return &ParseError{"", "bad RFC3597 Rdata", l} + } + + c.Next() // zBlank + l, _ = c.Next() + rdlength, e := strconv.Atoi(l.token) + if e != nil || l.err { + return &ParseError{"", "bad RFC3597 Rdata ", l} + } + + s, e1 := endingToString(c, "bad RFC3597 Rdata") + if e1 != nil { + return e1 + } + if rdlength*2 != len(s) { + return &ParseError{"", "bad RFC3597 Rdata", l} + } + rr.Rdata = s + return nil +} + +func (rr *SPF) parse(c *zlexer, o string) *ParseError { + s, e := endingToTxtSlice(c, "bad SPF Txt") + if e != nil { + return e + } + rr.Txt = s + return nil +} + +func (rr *AVC) parse(c *zlexer, o string) *ParseError { + s, e := endingToTxtSlice(c, "bad AVC Txt") + if e != nil { + return e + } + rr.Txt = s + return nil +} + +func (rr *TXT) parse(c *zlexer, o string) *ParseError { + // no zBlank reading here, because all this rdata is TXT + s, e := endingToTxtSlice(c, "bad TXT Txt") + if e != nil { + return e + } + rr.Txt = s + return nil +} + +// identical to setTXT +func (rr *NINFO) parse(c *zlexer, o string) *ParseError { + s, e := endingToTxtSlice(c, "bad NINFO ZSData") + if e != nil { + return e + } + rr.ZSData = s + return nil +} + +func (rr *URI) parse(c *zlexer, o string) *ParseError { + l, _ := c.Next() + i, e := strconv.ParseUint(l.token, 10, 16) + if e != nil || l.err { + return &ParseError{"", "bad URI Priority", l} + } + rr.Priority = uint16(i) + c.Next() // zBlank + l, _ = c.Next() + i, e = strconv.ParseUint(l.token, 10, 16) + if e != nil || l.err { + return &ParseError{"", "bad URI Weight", l} + } + rr.Weight = uint16(i) + + c.Next() // zBlank + s, err := endingToTxtSlice(c, "bad URI Target") + if err != nil { + return err + } + if len(s) != 1 { + return &ParseError{"", "bad URI Target", l} + } + rr.Target = s[0] + return nil +} + +func (rr *DHCID) parse(c *zlexer, o string) *ParseError { + // awesome record to parse! + s, e := endingToString(c, "bad DHCID Digest") + if e != nil { + return e + } + rr.Digest = s + return nil +} + +func (rr *NID) parse(c *zlexer, o string) *ParseError { + l, _ := c.Next() + i, e := strconv.ParseUint(l.token, 10, 16) + if e != nil || l.err { + return &ParseError{"", "bad NID Preference", l} + } + rr.Preference = uint16(i) + c.Next() // zBlank + l, _ = c.Next() // zString + u, err := stringToNodeID(l) + if err != nil || l.err { + return err + } + rr.NodeID = u + return slurpRemainder(c) +} + +func (rr *L32) parse(c *zlexer, o string) *ParseError { + l, _ := c.Next() + i, e := strconv.ParseUint(l.token, 10, 16) + if e != nil || l.err { + return &ParseError{"", "bad L32 Preference", l} + } + rr.Preference = uint16(i) + c.Next() // zBlank + l, _ = c.Next() // zString + rr.Locator32 = net.ParseIP(l.token) + if rr.Locator32 == nil || l.err { + return &ParseError{"", "bad L32 Locator", l} + } + return slurpRemainder(c) +} + +func (rr *LP) parse(c *zlexer, o string) *ParseError { + l, _ := c.Next() + i, e := strconv.ParseUint(l.token, 10, 16) + if e != nil || l.err { + return &ParseError{"", "bad LP Preference", l} + } + rr.Preference = uint16(i) + + c.Next() // zBlank + l, _ = c.Next() // zString + rr.Fqdn = l.token + name, nameOk := toAbsoluteName(l.token, o) + if l.err || !nameOk { + return &ParseError{"", "bad LP Fqdn", l} + } + rr.Fqdn = name + + return slurpRemainder(c) +} + +func (rr *L64) parse(c *zlexer, o string) *ParseError { + l, _ := c.Next() + i, e := strconv.ParseUint(l.token, 10, 16) + if e != nil || l.err { + return &ParseError{"", "bad L64 Preference", l} + } + rr.Preference = uint16(i) + c.Next() // zBlank + l, _ = c.Next() // zString + u, err := stringToNodeID(l) + if err != nil || l.err { + return err + } + rr.Locator64 = u + return slurpRemainder(c) +} + +func (rr *UID) parse(c *zlexer, o string) *ParseError { + l, _ := c.Next() + i, e := strconv.ParseUint(l.token, 10, 32) + if e != nil || l.err { + return &ParseError{"", "bad UID Uid", l} + } + rr.Uid = uint32(i) + return slurpRemainder(c) +} + +func (rr *GID) parse(c *zlexer, o string) *ParseError { + l, _ := c.Next() + i, e := strconv.ParseUint(l.token, 10, 32) + if e != nil || l.err { + return &ParseError{"", "bad GID Gid", l} + } + rr.Gid = uint32(i) + return slurpRemainder(c) +} + +func (rr *UINFO) parse(c *zlexer, o string) *ParseError { + s, e := endingToTxtSlice(c, "bad UINFO Uinfo") + if e != nil { + return e + } + if ln := len(s); ln == 0 { + return nil + } + rr.Uinfo = s[0] // silently discard anything after the first character-string + return nil +} + +func (rr *PX) parse(c *zlexer, o string) *ParseError { + l, _ := c.Next() + i, e := strconv.ParseUint(l.token, 10, 16) + if e != nil || l.err { + return &ParseError{"", "bad PX Preference", l} + } + rr.Preference = uint16(i) + + c.Next() // zBlank + l, _ = c.Next() // zString + rr.Map822 = l.token + map822, map822Ok := toAbsoluteName(l.token, o) + if l.err || !map822Ok { + return &ParseError{"", "bad PX Map822", l} + } + rr.Map822 = map822 + + c.Next() // zBlank + l, _ = c.Next() // zString + rr.Mapx400 = l.token + mapx400, mapx400Ok := toAbsoluteName(l.token, o) + if l.err || !mapx400Ok { + return &ParseError{"", "bad PX Mapx400", l} + } + rr.Mapx400 = mapx400 + + return slurpRemainder(c) +} + +func (rr *CAA) parse(c *zlexer, o string) *ParseError { + l, _ := c.Next() + i, err := strconv.ParseUint(l.token, 10, 8) + if err != nil || l.err { + return &ParseError{"", "bad CAA Flag", l} + } + rr.Flag = uint8(i) + + c.Next() // zBlank + l, _ = c.Next() // zString + if l.value != zString { + return &ParseError{"", "bad CAA Tag", l} + } + rr.Tag = l.token + + c.Next() // zBlank + s, e := endingToTxtSlice(c, "bad CAA Value") + if e != nil { + return e + } + if len(s) != 1 { + return &ParseError{"", "bad CAA Value", l} + } + rr.Value = s[0] + return nil +} + +func (rr *TKEY) parse(c *zlexer, o string) *ParseError { + l, _ := c.Next() + + // Algorithm + if l.value != zString { + return &ParseError{"", "bad TKEY algorithm", l} + } + rr.Algorithm = l.token + c.Next() // zBlank + + // Get the key length and key values + l, _ = c.Next() + i, err := strconv.ParseUint(l.token, 10, 8) + if err != nil || l.err { + return &ParseError{"", "bad TKEY key length", l} + } + rr.KeySize = uint16(i) + c.Next() // zBlank + l, _ = c.Next() + if l.value != zString { + return &ParseError{"", "bad TKEY key", l} + } + rr.Key = l.token + c.Next() // zBlank + + // Get the otherdata length and string data + l, _ = c.Next() + i, err = strconv.ParseUint(l.token, 10, 8) + if err != nil || l.err { + return &ParseError{"", "bad TKEY otherdata length", l} + } + rr.OtherLen = uint16(i) + c.Next() // zBlank + l, _ = c.Next() + if l.value != zString { + return &ParseError{"", "bad TKEY otherday", l} + } + rr.OtherData = l.token + + return nil +} + +func (rr *APL) parse(c *zlexer, o string) *ParseError { + var prefixes []APLPrefix + + for { + l, _ := c.Next() + if l.value == zNewline || l.value == zEOF { + break + } + if l.value == zBlank && prefixes != nil { + continue + } + if l.value != zString { + return &ParseError{"", "unexpected APL field", l} + } + + // Expected format: [!]afi:address/prefix + + colon := strings.IndexByte(l.token, ':') + if colon == -1 { + return &ParseError{"", "missing colon in APL field", l} + } + + family, cidr := l.token[:colon], l.token[colon+1:] + + var negation bool + if family != "" && family[0] == '!' { + negation = true + family = family[1:] + } + + afi, err := strconv.ParseUint(family, 10, 16) + if err != nil { + return &ParseError{"", "failed to parse APL family: " + err.Error(), l} + } + var addrLen int + switch afi { + case 1: + addrLen = net.IPv4len + case 2: + addrLen = net.IPv6len + default: + return &ParseError{"", "unrecognized APL family", l} + } + + ip, subnet, err := net.ParseCIDR(cidr) + if err != nil { + return &ParseError{"", "failed to parse APL address: " + err.Error(), l} + } + if !ip.Equal(subnet.IP) { + return &ParseError{"", "extra bits in APL address", l} + } + + if len(subnet.IP) != addrLen { + return &ParseError{"", "address mismatch with the APL family", l} + } + + prefixes = append(prefixes, APLPrefix{ + Negation: negation, + Network: *subnet, + }) + } + + rr.Prefixes = prefixes + return nil +} diff --git a/vendor/github.com/miekg/dns/serve_mux.go b/vendor/github.com/miekg/dns/serve_mux.go new file mode 100644 index 00000000..69deb33e --- /dev/null +++ b/vendor/github.com/miekg/dns/serve_mux.go @@ -0,0 +1,123 @@ +package dns + +import ( + "strings" + "sync" +) + +// ServeMux is an DNS request multiplexer. It matches the zone name of +// each incoming request against a list of registered patterns add calls +// the handler for the pattern that most closely matches the zone name. +// +// ServeMux is DNSSEC aware, meaning that queries for the DS record are +// redirected to the parent zone (if that is also registered), otherwise +// the child gets the query. +// +// ServeMux is also safe for concurrent access from multiple goroutines. +// +// The zero ServeMux is empty and ready for use. +type ServeMux struct { + z map[string]Handler + m sync.RWMutex +} + +// NewServeMux allocates and returns a new ServeMux. +func NewServeMux() *ServeMux { + return new(ServeMux) +} + +// DefaultServeMux is the default ServeMux used by Serve. +var DefaultServeMux = NewServeMux() + +func (mux *ServeMux) match(q string, t uint16) Handler { + mux.m.RLock() + defer mux.m.RUnlock() + if mux.z == nil { + return nil + } + + q = strings.ToLower(q) + + var handler Handler + for off, end := 0, false; !end; off, end = NextLabel(q, off) { + if h, ok := mux.z[q[off:]]; ok { + if t != TypeDS { + return h + } + // Continue for DS to see if we have a parent too, if so delegate to the parent + handler = h + } + } + + // Wildcard match, if we have found nothing try the root zone as a last resort. + if h, ok := mux.z["."]; ok { + return h + } + + return handler +} + +// Handle adds a handler to the ServeMux for pattern. +func (mux *ServeMux) Handle(pattern string, handler Handler) { + if pattern == "" { + panic("dns: invalid pattern " + pattern) + } + mux.m.Lock() + if mux.z == nil { + mux.z = make(map[string]Handler) + } + mux.z[Fqdn(pattern)] = handler + mux.m.Unlock() +} + +// HandleFunc adds a handler function to the ServeMux for pattern. +func (mux *ServeMux) HandleFunc(pattern string, handler func(ResponseWriter, *Msg)) { + mux.Handle(pattern, HandlerFunc(handler)) +} + +// HandleRemove deregisters the handler specific for pattern from the ServeMux. +func (mux *ServeMux) HandleRemove(pattern string) { + if pattern == "" { + panic("dns: invalid pattern " + pattern) + } + mux.m.Lock() + delete(mux.z, Fqdn(pattern)) + mux.m.Unlock() +} + +// ServeDNS dispatches the request to the handler whose pattern most +// closely matches the request message. +// +// ServeDNS is DNSSEC aware, meaning that queries for the DS record +// are redirected to the parent zone (if that is also registered), +// otherwise the child gets the query. +// +// If no handler is found, or there is no question, a standard SERVFAIL +// message is returned +func (mux *ServeMux) ServeDNS(w ResponseWriter, req *Msg) { + var h Handler + if len(req.Question) >= 1 { // allow more than one question + h = mux.match(req.Question[0].Name, req.Question[0].Qtype) + } + + if h != nil { + h.ServeDNS(w, req) + } else { + HandleFailed(w, req) + } +} + +// Handle registers the handler with the given pattern +// in the DefaultServeMux. The documentation for +// ServeMux explains how patterns are matched. +func Handle(pattern string, handler Handler) { DefaultServeMux.Handle(pattern, handler) } + +// HandleRemove deregisters the handle with the given pattern +// in the DefaultServeMux. +func HandleRemove(pattern string) { DefaultServeMux.HandleRemove(pattern) } + +// HandleFunc registers the handler function with the given pattern +// in the DefaultServeMux. +func HandleFunc(pattern string, handler func(ResponseWriter, *Msg)) { + DefaultServeMux.HandleFunc(pattern, handler) +} diff --git a/vendor/github.com/miekg/dns/server.go b/vendor/github.com/miekg/dns/server.go new file mode 100644 index 00000000..3cf1a024 --- /dev/null +++ b/vendor/github.com/miekg/dns/server.go @@ -0,0 +1,764 @@ +// DNS server implementation. + +package dns + +import ( + "context" + "crypto/tls" + "encoding/binary" + "errors" + "io" + "net" + "strings" + "sync" + "time" +) + +// Default maximum number of TCP queries before we close the socket. +const maxTCPQueries = 128 + +// aLongTimeAgo is a non-zero time, far in the past, used for +// immediate cancelation of network operations. +var aLongTimeAgo = time.Unix(1, 0) + +// Handler is implemented by any value that implements ServeDNS. +type Handler interface { + ServeDNS(w ResponseWriter, r *Msg) +} + +// The HandlerFunc type is an adapter to allow the use of +// ordinary functions as DNS handlers. If f is a function +// with the appropriate signature, HandlerFunc(f) is a +// Handler object that calls f. +type HandlerFunc func(ResponseWriter, *Msg) + +// ServeDNS calls f(w, r). +func (f HandlerFunc) ServeDNS(w ResponseWriter, r *Msg) { + f(w, r) +} + +// A ResponseWriter interface is used by an DNS handler to +// construct an DNS response. +type ResponseWriter interface { + // LocalAddr returns the net.Addr of the server + LocalAddr() net.Addr + // RemoteAddr returns the net.Addr of the client that sent the current request. + RemoteAddr() net.Addr + // WriteMsg writes a reply back to the client. + WriteMsg(*Msg) error + // Write writes a raw buffer back to the client. + Write([]byte) (int, error) + // Close closes the connection. + Close() error + // TsigStatus returns the status of the Tsig. + TsigStatus() error + // TsigTimersOnly sets the tsig timers only boolean. + TsigTimersOnly(bool) + // Hijack lets the caller take over the connection. + // After a call to Hijack(), the DNS package will not do anything with the connection. + Hijack() +} + +// A ConnectionStater interface is used by a DNS Handler to access TLS connection state +// when available. +type ConnectionStater interface { + ConnectionState() *tls.ConnectionState +} + +type response struct { + closed bool // connection has been closed + hijacked bool // connection has been hijacked by handler + tsigTimersOnly bool + tsigStatus error + tsigRequestMAC string + tsigSecret map[string]string // the tsig secrets + udp *net.UDPConn // i/o connection if UDP was used + tcp net.Conn // i/o connection if TCP was used + udpSession *SessionUDP // oob data to get egress interface right + writer Writer // writer to output the raw DNS bits +} + +// HandleFailed returns a HandlerFunc that returns SERVFAIL for every request it gets. +func HandleFailed(w ResponseWriter, r *Msg) { + m := new(Msg) + m.SetRcode(r, RcodeServerFailure) + // does not matter if this write fails + w.WriteMsg(m) +} + +// ListenAndServe Starts a server on address and network specified Invoke handler +// for incoming queries. +func ListenAndServe(addr string, network string, handler Handler) error { + server := &Server{Addr: addr, Net: network, Handler: handler} + return server.ListenAndServe() +} + +// ListenAndServeTLS acts like http.ListenAndServeTLS, more information in +// http://golang.org/pkg/net/http/#ListenAndServeTLS +func ListenAndServeTLS(addr, certFile, keyFile string, handler Handler) error { + cert, err := tls.LoadX509KeyPair(certFile, keyFile) + if err != nil { + return err + } + + config := tls.Config{ + Certificates: []tls.Certificate{cert}, + } + + server := &Server{ + Addr: addr, + Net: "tcp-tls", + TLSConfig: &config, + Handler: handler, + } + + return server.ListenAndServe() +} + +// ActivateAndServe activates a server with a listener from systemd, +// l and p should not both be non-nil. +// If both l and p are not nil only p will be used. +// Invoke handler for incoming queries. +func ActivateAndServe(l net.Listener, p net.PacketConn, handler Handler) error { + server := &Server{Listener: l, PacketConn: p, Handler: handler} + return server.ActivateAndServe() +} + +// Writer writes raw DNS messages; each call to Write should send an entire message. +type Writer interface { + io.Writer +} + +// Reader reads raw DNS messages; each call to ReadTCP or ReadUDP should return an entire message. +type Reader interface { + // ReadTCP reads a raw message from a TCP connection. Implementations may alter + // connection properties, for example the read-deadline. + ReadTCP(conn net.Conn, timeout time.Duration) ([]byte, error) + // ReadUDP reads a raw message from a UDP connection. Implementations may alter + // connection properties, for example the read-deadline. + ReadUDP(conn *net.UDPConn, timeout time.Duration) ([]byte, *SessionUDP, error) +} + +// defaultReader is an adapter for the Server struct that implements the Reader interface +// using the readTCP and readUDP func of the embedded Server. +type defaultReader struct { + *Server +} + +func (dr defaultReader) ReadTCP(conn net.Conn, timeout time.Duration) ([]byte, error) { + return dr.readTCP(conn, timeout) +} + +func (dr defaultReader) ReadUDP(conn *net.UDPConn, timeout time.Duration) ([]byte, *SessionUDP, error) { + return dr.readUDP(conn, timeout) +} + +// DecorateReader is a decorator hook for extending or supplanting the functionality of a Reader. +// Implementations should never return a nil Reader. +type DecorateReader func(Reader) Reader + +// DecorateWriter is a decorator hook for extending or supplanting the functionality of a Writer. +// Implementations should never return a nil Writer. +type DecorateWriter func(Writer) Writer + +// A Server defines parameters for running an DNS server. +type Server struct { + // Address to listen on, ":dns" if empty. + Addr string + // if "tcp" or "tcp-tls" (DNS over TLS) it will invoke a TCP listener, otherwise an UDP one + Net string + // TCP Listener to use, this is to aid in systemd's socket activation. + Listener net.Listener + // TLS connection configuration + TLSConfig *tls.Config + // UDP "Listener" to use, this is to aid in systemd's socket activation. + PacketConn net.PacketConn + // Handler to invoke, dns.DefaultServeMux if nil. + Handler Handler + // Default buffer size to use to read incoming UDP messages. If not set + // it defaults to MinMsgSize (512 B). + UDPSize int + // The net.Conn.SetReadTimeout value for new connections, defaults to 2 * time.Second. + ReadTimeout time.Duration + // The net.Conn.SetWriteTimeout value for new connections, defaults to 2 * time.Second. + WriteTimeout time.Duration + // TCP idle timeout for multiple queries, if nil, defaults to 8 * time.Second (RFC 5966). + IdleTimeout func() time.Duration + // Secret(s) for Tsig map[]. The zonename must be in canonical form (lowercase, fqdn, see RFC 4034 Section 6.2). + TsigSecret map[string]string + // If NotifyStartedFunc is set it is called once the server has started listening. + NotifyStartedFunc func() + // DecorateReader is optional, allows customization of the process that reads raw DNS messages. + DecorateReader DecorateReader + // DecorateWriter is optional, allows customization of the process that writes raw DNS messages. + DecorateWriter DecorateWriter + // Maximum number of TCP queries before we close the socket. Default is maxTCPQueries (unlimited if -1). + MaxTCPQueries int + // Whether to set the SO_REUSEPORT socket option, allowing multiple listeners to be bound to a single address. + // It is only supported on go1.11+ and when using ListenAndServe. + ReusePort bool + // AcceptMsgFunc will check the incoming message and will reject it early in the process. + // By default DefaultMsgAcceptFunc will be used. + MsgAcceptFunc MsgAcceptFunc + + // Shutdown handling + lock sync.RWMutex + started bool + shutdown chan struct{} + conns map[net.Conn]struct{} + + // A pool for UDP message buffers. + udpPool sync.Pool +} + +func (srv *Server) isStarted() bool { + srv.lock.RLock() + started := srv.started + srv.lock.RUnlock() + return started +} + +func makeUDPBuffer(size int) func() interface{} { + return func() interface{} { + return make([]byte, size) + } +} + +func (srv *Server) init() { + srv.shutdown = make(chan struct{}) + srv.conns = make(map[net.Conn]struct{}) + + if srv.UDPSize == 0 { + srv.UDPSize = MinMsgSize + } + if srv.MsgAcceptFunc == nil { + srv.MsgAcceptFunc = DefaultMsgAcceptFunc + } + if srv.Handler == nil { + srv.Handler = DefaultServeMux + } + + srv.udpPool.New = makeUDPBuffer(srv.UDPSize) +} + +func unlockOnce(l sync.Locker) func() { + var once sync.Once + return func() { once.Do(l.Unlock) } +} + +// ListenAndServe starts a nameserver on the configured address in *Server. +func (srv *Server) ListenAndServe() error { + unlock := unlockOnce(&srv.lock) + srv.lock.Lock() + defer unlock() + + if srv.started { + return &Error{err: "server already started"} + } + + addr := srv.Addr + if addr == "" { + addr = ":domain" + } + + srv.init() + + switch srv.Net { + case "tcp", "tcp4", "tcp6": + l, err := listenTCP(srv.Net, addr, srv.ReusePort) + if err != nil { + return err + } + srv.Listener = l + srv.started = true + unlock() + return srv.serveTCP(l) + case "tcp-tls", "tcp4-tls", "tcp6-tls": + if srv.TLSConfig == nil || (len(srv.TLSConfig.Certificates) == 0 && srv.TLSConfig.GetCertificate == nil) { + return errors.New("dns: neither Certificates nor GetCertificate set in Config") + } + network := strings.TrimSuffix(srv.Net, "-tls") + l, err := listenTCP(network, addr, srv.ReusePort) + if err != nil { + return err + } + l = tls.NewListener(l, srv.TLSConfig) + srv.Listener = l + srv.started = true + unlock() + return srv.serveTCP(l) + case "udp", "udp4", "udp6": + l, err := listenUDP(srv.Net, addr, srv.ReusePort) + if err != nil { + return err + } + u := l.(*net.UDPConn) + if e := setUDPSocketOptions(u); e != nil { + return e + } + srv.PacketConn = l + srv.started = true + unlock() + return srv.serveUDP(u) + } + return &Error{err: "bad network"} +} + +// ActivateAndServe starts a nameserver with the PacketConn or Listener +// configured in *Server. Its main use is to start a server from systemd. +func (srv *Server) ActivateAndServe() error { + unlock := unlockOnce(&srv.lock) + srv.lock.Lock() + defer unlock() + + if srv.started { + return &Error{err: "server already started"} + } + + srv.init() + + pConn := srv.PacketConn + l := srv.Listener + if pConn != nil { + // Check PacketConn interface's type is valid and value + // is not nil + if t, ok := pConn.(*net.UDPConn); ok && t != nil { + if e := setUDPSocketOptions(t); e != nil { + return e + } + srv.started = true + unlock() + return srv.serveUDP(t) + } + } + if l != nil { + srv.started = true + unlock() + return srv.serveTCP(l) + } + return &Error{err: "bad listeners"} +} + +// Shutdown shuts down a server. After a call to Shutdown, ListenAndServe and +// ActivateAndServe will return. +func (srv *Server) Shutdown() error { + return srv.ShutdownContext(context.Background()) +} + +// ShutdownContext shuts down a server. After a call to ShutdownContext, +// ListenAndServe and ActivateAndServe will return. +// +// A context.Context may be passed to limit how long to wait for connections +// to terminate. +func (srv *Server) ShutdownContext(ctx context.Context) error { + srv.lock.Lock() + if !srv.started { + srv.lock.Unlock() + return &Error{err: "server not started"} + } + + srv.started = false + + if srv.PacketConn != nil { + srv.PacketConn.SetReadDeadline(aLongTimeAgo) // Unblock reads + } + + if srv.Listener != nil { + srv.Listener.Close() + } + + for rw := range srv.conns { + rw.SetReadDeadline(aLongTimeAgo) // Unblock reads + } + + srv.lock.Unlock() + + if testShutdownNotify != nil { + testShutdownNotify.Broadcast() + } + + var ctxErr error + select { + case <-srv.shutdown: + case <-ctx.Done(): + ctxErr = ctx.Err() + } + + if srv.PacketConn != nil { + srv.PacketConn.Close() + } + + return ctxErr +} + +var testShutdownNotify *sync.Cond + +// getReadTimeout is a helper func to use system timeout if server did not intend to change it. +func (srv *Server) getReadTimeout() time.Duration { + if srv.ReadTimeout != 0 { + return srv.ReadTimeout + } + return dnsTimeout +} + +// serveTCP starts a TCP listener for the server. +func (srv *Server) serveTCP(l net.Listener) error { + defer l.Close() + + if srv.NotifyStartedFunc != nil { + srv.NotifyStartedFunc() + } + + var wg sync.WaitGroup + defer func() { + wg.Wait() + close(srv.shutdown) + }() + + for srv.isStarted() { + rw, err := l.Accept() + if err != nil { + if !srv.isStarted() { + return nil + } + if neterr, ok := err.(net.Error); ok && neterr.Temporary() { + continue + } + return err + } + srv.lock.Lock() + // Track the connection to allow unblocking reads on shutdown. + srv.conns[rw] = struct{}{} + srv.lock.Unlock() + wg.Add(1) + go srv.serveTCPConn(&wg, rw) + } + + return nil +} + +// serveUDP starts a UDP listener for the server. +func (srv *Server) serveUDP(l *net.UDPConn) error { + defer l.Close() + + if srv.NotifyStartedFunc != nil { + srv.NotifyStartedFunc() + } + + reader := Reader(defaultReader{srv}) + if srv.DecorateReader != nil { + reader = srv.DecorateReader(reader) + } + + var wg sync.WaitGroup + defer func() { + wg.Wait() + close(srv.shutdown) + }() + + rtimeout := srv.getReadTimeout() + // deadline is not used here + for srv.isStarted() { + m, s, err := reader.ReadUDP(l, rtimeout) + if err != nil { + if !srv.isStarted() { + return nil + } + if netErr, ok := err.(net.Error); ok && netErr.Temporary() { + continue + } + return err + } + if len(m) < headerSize { + if cap(m) == srv.UDPSize { + srv.udpPool.Put(m[:srv.UDPSize]) + } + continue + } + wg.Add(1) + go srv.serveUDPPacket(&wg, m, l, s) + } + + return nil +} + +// Serve a new TCP connection. +func (srv *Server) serveTCPConn(wg *sync.WaitGroup, rw net.Conn) { + w := &response{tsigSecret: srv.TsigSecret, tcp: rw} + if srv.DecorateWriter != nil { + w.writer = srv.DecorateWriter(w) + } else { + w.writer = w + } + + reader := Reader(defaultReader{srv}) + if srv.DecorateReader != nil { + reader = srv.DecorateReader(reader) + } + + idleTimeout := tcpIdleTimeout + if srv.IdleTimeout != nil { + idleTimeout = srv.IdleTimeout() + } + + timeout := srv.getReadTimeout() + + limit := srv.MaxTCPQueries + if limit == 0 { + limit = maxTCPQueries + } + + for q := 0; (q < limit || limit == -1) && srv.isStarted(); q++ { + m, err := reader.ReadTCP(w.tcp, timeout) + if err != nil { + // TODO(tmthrgd): handle error + break + } + srv.serveDNS(m, w) + if w.closed { + break // Close() was called + } + if w.hijacked { + break // client will call Close() themselves + } + // The first read uses the read timeout, the rest use the + // idle timeout. + timeout = idleTimeout + } + + if !w.hijacked { + w.Close() + } + + srv.lock.Lock() + delete(srv.conns, w.tcp) + srv.lock.Unlock() + + wg.Done() +} + +// Serve a new UDP request. +func (srv *Server) serveUDPPacket(wg *sync.WaitGroup, m []byte, u *net.UDPConn, s *SessionUDP) { + w := &response{tsigSecret: srv.TsigSecret, udp: u, udpSession: s} + if srv.DecorateWriter != nil { + w.writer = srv.DecorateWriter(w) + } else { + w.writer = w + } + + srv.serveDNS(m, w) + wg.Done() +} + +func (srv *Server) serveDNS(m []byte, w *response) { + dh, off, err := unpackMsgHdr(m, 0) + if err != nil { + // Let client hang, they are sending crap; any reply can be used to amplify. + return + } + + req := new(Msg) + req.setHdr(dh) + + switch action := srv.MsgAcceptFunc(dh); action { + case MsgAccept: + if req.unpack(dh, m, off) == nil { + break + } + + fallthrough + case MsgReject, MsgRejectNotImplemented: + opcode := req.Opcode + req.SetRcodeFormatError(req) + req.Zero = false + if action == MsgRejectNotImplemented { + req.Opcode = opcode + req.Rcode = RcodeNotImplemented + } + + // Are we allowed to delete any OPT records here? + req.Ns, req.Answer, req.Extra = nil, nil, nil + + w.WriteMsg(req) + fallthrough + case MsgIgnore: + if w.udp != nil && cap(m) == srv.UDPSize { + srv.udpPool.Put(m[:srv.UDPSize]) + } + + return + } + + w.tsigStatus = nil + if w.tsigSecret != nil { + if t := req.IsTsig(); t != nil { + if secret, ok := w.tsigSecret[t.Hdr.Name]; ok { + w.tsigStatus = TsigVerify(m, secret, "", false) + } else { + w.tsigStatus = ErrSecret + } + w.tsigTimersOnly = false + w.tsigRequestMAC = req.Extra[len(req.Extra)-1].(*TSIG).MAC + } + } + + if w.udp != nil && cap(m) == srv.UDPSize { + srv.udpPool.Put(m[:srv.UDPSize]) + } + + srv.Handler.ServeDNS(w, req) // Writes back to the client +} + +func (srv *Server) readTCP(conn net.Conn, timeout time.Duration) ([]byte, error) { + // If we race with ShutdownContext, the read deadline may + // have been set in the distant past to unblock the read + // below. We must not override it, otherwise we may block + // ShutdownContext. + srv.lock.RLock() + if srv.started { + conn.SetReadDeadline(time.Now().Add(timeout)) + } + srv.lock.RUnlock() + + var length uint16 + if err := binary.Read(conn, binary.BigEndian, &length); err != nil { + return nil, err + } + + m := make([]byte, length) + if _, err := io.ReadFull(conn, m); err != nil { + return nil, err + } + + return m, nil +} + +func (srv *Server) readUDP(conn *net.UDPConn, timeout time.Duration) ([]byte, *SessionUDP, error) { + srv.lock.RLock() + if srv.started { + // See the comment in readTCP above. + conn.SetReadDeadline(time.Now().Add(timeout)) + } + srv.lock.RUnlock() + + m := srv.udpPool.Get().([]byte) + n, s, err := ReadFromSessionUDP(conn, m) + if err != nil { + srv.udpPool.Put(m) + return nil, nil, err + } + m = m[:n] + return m, s, nil +} + +// WriteMsg implements the ResponseWriter.WriteMsg method. +func (w *response) WriteMsg(m *Msg) (err error) { + if w.closed { + return &Error{err: "WriteMsg called after Close"} + } + + var data []byte + if w.tsigSecret != nil { // if no secrets, dont check for the tsig (which is a longer check) + if t := m.IsTsig(); t != nil { + data, w.tsigRequestMAC, err = TsigGenerate(m, w.tsigSecret[t.Hdr.Name], w.tsigRequestMAC, w.tsigTimersOnly) + if err != nil { + return err + } + _, err = w.writer.Write(data) + return err + } + } + data, err = m.Pack() + if err != nil { + return err + } + _, err = w.writer.Write(data) + return err +} + +// Write implements the ResponseWriter.Write method. +func (w *response) Write(m []byte) (int, error) { + if w.closed { + return 0, &Error{err: "Write called after Close"} + } + + switch { + case w.udp != nil: + return WriteToSessionUDP(w.udp, m, w.udpSession) + case w.tcp != nil: + if len(m) > MaxMsgSize { + return 0, &Error{err: "message too large"} + } + + l := make([]byte, 2) + binary.BigEndian.PutUint16(l, uint16(len(m))) + + n, err := (&net.Buffers{l, m}).WriteTo(w.tcp) + return int(n), err + default: + panic("dns: internal error: udp and tcp both nil") + } +} + +// LocalAddr implements the ResponseWriter.LocalAddr method. +func (w *response) LocalAddr() net.Addr { + switch { + case w.udp != nil: + return w.udp.LocalAddr() + case w.tcp != nil: + return w.tcp.LocalAddr() + default: + panic("dns: internal error: udp and tcp both nil") + } +} + +// RemoteAddr implements the ResponseWriter.RemoteAddr method. +func (w *response) RemoteAddr() net.Addr { + switch { + case w.udpSession != nil: + return w.udpSession.RemoteAddr() + case w.tcp != nil: + return w.tcp.RemoteAddr() + default: + panic("dns: internal error: udpSession and tcp both nil") + } +} + +// TsigStatus implements the ResponseWriter.TsigStatus method. +func (w *response) TsigStatus() error { return w.tsigStatus } + +// TsigTimersOnly implements the ResponseWriter.TsigTimersOnly method. +func (w *response) TsigTimersOnly(b bool) { w.tsigTimersOnly = b } + +// Hijack implements the ResponseWriter.Hijack method. +func (w *response) Hijack() { w.hijacked = true } + +// Close implements the ResponseWriter.Close method +func (w *response) Close() error { + if w.closed { + return &Error{err: "connection already closed"} + } + w.closed = true + + switch { + case w.udp != nil: + // Can't close the udp conn, as that is actually the listener. + return nil + case w.tcp != nil: + return w.tcp.Close() + default: + panic("dns: internal error: udp and tcp both nil") + } +} + +// ConnectionState() implements the ConnectionStater.ConnectionState() interface. +func (w *response) ConnectionState() *tls.ConnectionState { + type tlsConnectionStater interface { + ConnectionState() tls.ConnectionState + } + if v, ok := w.tcp.(tlsConnectionStater); ok { + t := v.ConnectionState() + return &t + } + return nil +} diff --git a/vendor/github.com/miekg/dns/sig0.go b/vendor/github.com/miekg/dns/sig0.go new file mode 100644 index 00000000..55cf1c38 --- /dev/null +++ b/vendor/github.com/miekg/dns/sig0.go @@ -0,0 +1,209 @@ +package dns + +import ( + "crypto" + "crypto/dsa" + "crypto/ecdsa" + "crypto/rsa" + "encoding/binary" + "math/big" + "strings" + "time" +) + +// Sign signs a dns.Msg. It fills the signature with the appropriate data. +// The SIG record should have the SignerName, KeyTag, Algorithm, Inception +// and Expiration set. +func (rr *SIG) Sign(k crypto.Signer, m *Msg) ([]byte, error) { + if k == nil { + return nil, ErrPrivKey + } + if rr.KeyTag == 0 || len(rr.SignerName) == 0 || rr.Algorithm == 0 { + return nil, ErrKey + } + + rr.Hdr = RR_Header{Name: ".", Rrtype: TypeSIG, Class: ClassANY, Ttl: 0} + rr.OrigTtl, rr.TypeCovered, rr.Labels = 0, 0, 0 + + buf := make([]byte, m.Len()+Len(rr)) + mbuf, err := m.PackBuffer(buf) + if err != nil { + return nil, err + } + if &buf[0] != &mbuf[0] { + return nil, ErrBuf + } + off, err := PackRR(rr, buf, len(mbuf), nil, false) + if err != nil { + return nil, err + } + buf = buf[:off:cap(buf)] + + hash, ok := AlgorithmToHash[rr.Algorithm] + if !ok { + return nil, ErrAlg + } + + hasher := hash.New() + // Write SIG rdata + hasher.Write(buf[len(mbuf)+1+2+2+4+2:]) + // Write message + hasher.Write(buf[:len(mbuf)]) + + signature, err := sign(k, hasher.Sum(nil), hash, rr.Algorithm) + if err != nil { + return nil, err + } + + rr.Signature = toBase64(signature) + + buf = append(buf, signature...) + if len(buf) > int(^uint16(0)) { + return nil, ErrBuf + } + // Adjust sig data length + rdoff := len(mbuf) + 1 + 2 + 2 + 4 + rdlen := binary.BigEndian.Uint16(buf[rdoff:]) + rdlen += uint16(len(signature)) + binary.BigEndian.PutUint16(buf[rdoff:], rdlen) + // Adjust additional count + adc := binary.BigEndian.Uint16(buf[10:]) + adc++ + binary.BigEndian.PutUint16(buf[10:], adc) + return buf, nil +} + +// Verify validates the message buf using the key k. +// It's assumed that buf is a valid message from which rr was unpacked. +func (rr *SIG) Verify(k *KEY, buf []byte) error { + if k == nil { + return ErrKey + } + if rr.KeyTag == 0 || len(rr.SignerName) == 0 || rr.Algorithm == 0 { + return ErrKey + } + + var hash crypto.Hash + switch rr.Algorithm { + case DSA, RSASHA1: + hash = crypto.SHA1 + case RSASHA256, ECDSAP256SHA256: + hash = crypto.SHA256 + case ECDSAP384SHA384: + hash = crypto.SHA384 + case RSASHA512: + hash = crypto.SHA512 + default: + return ErrAlg + } + hasher := hash.New() + + buflen := len(buf) + qdc := binary.BigEndian.Uint16(buf[4:]) + anc := binary.BigEndian.Uint16(buf[6:]) + auc := binary.BigEndian.Uint16(buf[8:]) + adc := binary.BigEndian.Uint16(buf[10:]) + offset := headerSize + var err error + for i := uint16(0); i < qdc && offset < buflen; i++ { + _, offset, err = UnpackDomainName(buf, offset) + if err != nil { + return err + } + // Skip past Type and Class + offset += 2 + 2 + } + for i := uint16(1); i < anc+auc+adc && offset < buflen; i++ { + _, offset, err = UnpackDomainName(buf, offset) + if err != nil { + return err + } + // Skip past Type, Class and TTL + offset += 2 + 2 + 4 + if offset+1 >= buflen { + continue + } + rdlen := binary.BigEndian.Uint16(buf[offset:]) + offset += 2 + offset += int(rdlen) + } + if offset >= buflen { + return &Error{err: "overflowing unpacking signed message"} + } + + // offset should be just prior to SIG + bodyend := offset + // owner name SHOULD be root + _, offset, err = UnpackDomainName(buf, offset) + if err != nil { + return err + } + // Skip Type, Class, TTL, RDLen + offset += 2 + 2 + 4 + 2 + sigstart := offset + // Skip Type Covered, Algorithm, Labels, Original TTL + offset += 2 + 1 + 1 + 4 + if offset+4+4 >= buflen { + return &Error{err: "overflow unpacking signed message"} + } + expire := binary.BigEndian.Uint32(buf[offset:]) + offset += 4 + incept := binary.BigEndian.Uint32(buf[offset:]) + offset += 4 + now := uint32(time.Now().Unix()) + if now < incept || now > expire { + return ErrTime + } + // Skip key tag + offset += 2 + var signername string + signername, offset, err = UnpackDomainName(buf, offset) + if err != nil { + return err + } + // If key has come from the DNS name compression might + // have mangled the case of the name + if !strings.EqualFold(signername, k.Header().Name) { + return &Error{err: "signer name doesn't match key name"} + } + sigend := offset + hasher.Write(buf[sigstart:sigend]) + hasher.Write(buf[:10]) + hasher.Write([]byte{ + byte((adc - 1) << 8), + byte(adc - 1), + }) + hasher.Write(buf[12:bodyend]) + + hashed := hasher.Sum(nil) + sig := buf[sigend:] + switch k.Algorithm { + case DSA: + pk := k.publicKeyDSA() + sig = sig[1:] + r := new(big.Int).SetBytes(sig[:len(sig)/2]) + s := new(big.Int).SetBytes(sig[len(sig)/2:]) + if pk != nil { + if dsa.Verify(pk, hashed, r, s) { + return nil + } + return ErrSig + } + case RSASHA1, RSASHA256, RSASHA512: + pk := k.publicKeyRSA() + if pk != nil { + return rsa.VerifyPKCS1v15(pk, hash, hashed, sig) + } + case ECDSAP256SHA256, ECDSAP384SHA384: + pk := k.publicKeyECDSA() + r := new(big.Int).SetBytes(sig[:len(sig)/2]) + s := new(big.Int).SetBytes(sig[len(sig)/2:]) + if pk != nil { + if ecdsa.Verify(pk, hashed, r, s) { + return nil + } + return ErrSig + } + } + return ErrKeyAlg +} diff --git a/vendor/github.com/miekg/dns/singleinflight.go b/vendor/github.com/miekg/dns/singleinflight.go new file mode 100644 index 00000000..febcc300 --- /dev/null +++ b/vendor/github.com/miekg/dns/singleinflight.go @@ -0,0 +1,61 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Adapted for dns package usage by Miek Gieben. + +package dns + +import "sync" +import "time" + +// call is an in-flight or completed singleflight.Do call +type call struct { + wg sync.WaitGroup + val *Msg + rtt time.Duration + err error + dups int +} + +// singleflight represents a class of work and forms a namespace in +// which units of work can be executed with duplicate suppression. +type singleflight struct { + sync.Mutex // protects m + m map[string]*call // lazily initialized + + dontDeleteForTesting bool // this is only to be used by TestConcurrentExchanges +} + +// Do executes and returns the results of the given function, making +// sure that only one execution is in-flight for a given key at a +// time. If a duplicate comes in, the duplicate caller waits for the +// original to complete and receives the same results. +// The return value shared indicates whether v was given to multiple callers. +func (g *singleflight) Do(key string, fn func() (*Msg, time.Duration, error)) (v *Msg, rtt time.Duration, err error, shared bool) { + g.Lock() + if g.m == nil { + g.m = make(map[string]*call) + } + if c, ok := g.m[key]; ok { + c.dups++ + g.Unlock() + c.wg.Wait() + return c.val, c.rtt, c.err, true + } + c := new(call) + c.wg.Add(1) + g.m[key] = c + g.Unlock() + + c.val, c.rtt, c.err = fn() + c.wg.Done() + + if !g.dontDeleteForTesting { + g.Lock() + delete(g.m, key) + g.Unlock() + } + + return c.val, c.rtt, c.err, c.dups > 0 +} diff --git a/vendor/github.com/miekg/dns/smimea.go b/vendor/github.com/miekg/dns/smimea.go new file mode 100644 index 00000000..89f09f0d --- /dev/null +++ b/vendor/github.com/miekg/dns/smimea.go @@ -0,0 +1,44 @@ +package dns + +import ( + "crypto/sha256" + "crypto/x509" + "encoding/hex" +) + +// Sign creates a SMIMEA record from an SSL certificate. +func (r *SMIMEA) Sign(usage, selector, matchingType int, cert *x509.Certificate) (err error) { + r.Hdr.Rrtype = TypeSMIMEA + r.Usage = uint8(usage) + r.Selector = uint8(selector) + r.MatchingType = uint8(matchingType) + + r.Certificate, err = CertificateToDANE(r.Selector, r.MatchingType, cert) + return err +} + +// Verify verifies a SMIMEA record against an SSL certificate. If it is OK +// a nil error is returned. +func (r *SMIMEA) Verify(cert *x509.Certificate) error { + c, err := CertificateToDANE(r.Selector, r.MatchingType, cert) + if err != nil { + return err // Not also ErrSig? + } + if r.Certificate == c { + return nil + } + return ErrSig // ErrSig, really? +} + +// SMIMEAName returns the ownername of a SMIMEA resource record as per the +// format specified in RFC 'draft-ietf-dane-smime-12' Section 2 and 3 +func SMIMEAName(email, domain string) (string, error) { + hasher := sha256.New() + hasher.Write([]byte(email)) + + // RFC Section 3: "The local-part is hashed using the SHA2-256 + // algorithm with the hash truncated to 28 octets and + // represented in its hexadecimal representation to become the + // left-most label in the prepared domain name" + return hex.EncodeToString(hasher.Sum(nil)[:28]) + "." + "_smimecert." + domain, nil +} diff --git a/vendor/github.com/miekg/dns/tlsa.go b/vendor/github.com/miekg/dns/tlsa.go new file mode 100644 index 00000000..4e07983b --- /dev/null +++ b/vendor/github.com/miekg/dns/tlsa.go @@ -0,0 +1,44 @@ +package dns + +import ( + "crypto/x509" + "net" + "strconv" +) + +// Sign creates a TLSA record from an SSL certificate. +func (r *TLSA) Sign(usage, selector, matchingType int, cert *x509.Certificate) (err error) { + r.Hdr.Rrtype = TypeTLSA + r.Usage = uint8(usage) + r.Selector = uint8(selector) + r.MatchingType = uint8(matchingType) + + r.Certificate, err = CertificateToDANE(r.Selector, r.MatchingType, cert) + return err +} + +// Verify verifies a TLSA record against an SSL certificate. If it is OK +// a nil error is returned. +func (r *TLSA) Verify(cert *x509.Certificate) error { + c, err := CertificateToDANE(r.Selector, r.MatchingType, cert) + if err != nil { + return err // Not also ErrSig? + } + if r.Certificate == c { + return nil + } + return ErrSig // ErrSig, really? +} + +// TLSAName returns the ownername of a TLSA resource record as per the +// rules specified in RFC 6698, Section 3. +func TLSAName(name, service, network string) (string, error) { + if !IsFqdn(name) { + return "", ErrFqdn + } + p, err := net.LookupPort(network, service) + if err != nil { + return "", err + } + return "_" + strconv.Itoa(p) + "._" + network + "." + name, nil +} diff --git a/vendor/github.com/miekg/dns/tsig.go b/vendor/github.com/miekg/dns/tsig.go new file mode 100644 index 00000000..61efa248 --- /dev/null +++ b/vendor/github.com/miekg/dns/tsig.go @@ -0,0 +1,389 @@ +package dns + +import ( + "crypto/hmac" + "crypto/md5" + "crypto/sha1" + "crypto/sha256" + "crypto/sha512" + "encoding/binary" + "encoding/hex" + "hash" + "strconv" + "strings" + "time" +) + +// HMAC hashing codes. These are transmitted as domain names. +const ( + HmacMD5 = "hmac-md5.sig-alg.reg.int." + HmacSHA1 = "hmac-sha1." + HmacSHA256 = "hmac-sha256." + HmacSHA512 = "hmac-sha512." +) + +// TSIG is the RR the holds the transaction signature of a message. +// See RFC 2845 and RFC 4635. +type TSIG struct { + Hdr RR_Header + Algorithm string `dns:"domain-name"` + TimeSigned uint64 `dns:"uint48"` + Fudge uint16 + MACSize uint16 + MAC string `dns:"size-hex:MACSize"` + OrigId uint16 + Error uint16 + OtherLen uint16 + OtherData string `dns:"size-hex:OtherLen"` +} + +// TSIG has no official presentation format, but this will suffice. + +func (rr *TSIG) String() string { + s := "\n;; TSIG PSEUDOSECTION:\n; " // add another semi-colon to signify TSIG does not have a presentation format + s += rr.Hdr.String() + + " " + rr.Algorithm + + " " + tsigTimeToString(rr.TimeSigned) + + " " + strconv.Itoa(int(rr.Fudge)) + + " " + strconv.Itoa(int(rr.MACSize)) + + " " + strings.ToUpper(rr.MAC) + + " " + strconv.Itoa(int(rr.OrigId)) + + " " + strconv.Itoa(int(rr.Error)) + // BIND prints NOERROR + " " + strconv.Itoa(int(rr.OtherLen)) + + " " + rr.OtherData + return s +} + +func (rr *TSIG) parse(c *zlexer, origin string) *ParseError { + panic("dns: internal error: parse should never be called on TSIG") +} + +// The following values must be put in wireformat, so that the MAC can be calculated. +// RFC 2845, section 3.4.2. TSIG Variables. +type tsigWireFmt struct { + // From RR_Header + Name string `dns:"domain-name"` + Class uint16 + Ttl uint32 + // Rdata of the TSIG + Algorithm string `dns:"domain-name"` + TimeSigned uint64 `dns:"uint48"` + Fudge uint16 + // MACSize, MAC and OrigId excluded + Error uint16 + OtherLen uint16 + OtherData string `dns:"size-hex:OtherLen"` +} + +// If we have the MAC use this type to convert it to wiredata. Section 3.4.3. Request MAC +type macWireFmt struct { + MACSize uint16 + MAC string `dns:"size-hex:MACSize"` +} + +// 3.3. Time values used in TSIG calculations +type timerWireFmt struct { + TimeSigned uint64 `dns:"uint48"` + Fudge uint16 +} + +// TsigGenerate fills out the TSIG record attached to the message. +// The message should contain +// a "stub" TSIG RR with the algorithm, key name (owner name of the RR), +// time fudge (defaults to 300 seconds) and the current time +// The TSIG MAC is saved in that Tsig RR. +// When TsigGenerate is called for the first time requestMAC is set to the empty string and +// timersOnly is false. +// If something goes wrong an error is returned, otherwise it is nil. +func TsigGenerate(m *Msg, secret, requestMAC string, timersOnly bool) ([]byte, string, error) { + if m.IsTsig() == nil { + panic("dns: TSIG not last RR in additional") + } + // If we barf here, the caller is to blame + rawsecret, err := fromBase64([]byte(secret)) + if err != nil { + return nil, "", err + } + + rr := m.Extra[len(m.Extra)-1].(*TSIG) + m.Extra = m.Extra[0 : len(m.Extra)-1] // kill the TSIG from the msg + mbuf, err := m.Pack() + if err != nil { + return nil, "", err + } + buf := tsigBuffer(mbuf, rr, requestMAC, timersOnly) + + t := new(TSIG) + var h hash.Hash + switch strings.ToLower(rr.Algorithm) { + case HmacMD5: + h = hmac.New(md5.New, rawsecret) + case HmacSHA1: + h = hmac.New(sha1.New, rawsecret) + case HmacSHA256: + h = hmac.New(sha256.New, rawsecret) + case HmacSHA512: + h = hmac.New(sha512.New, rawsecret) + default: + return nil, "", ErrKeyAlg + } + h.Write(buf) + t.MAC = hex.EncodeToString(h.Sum(nil)) + t.MACSize = uint16(len(t.MAC) / 2) // Size is half! + + t.Hdr = RR_Header{Name: rr.Hdr.Name, Rrtype: TypeTSIG, Class: ClassANY, Ttl: 0} + t.Fudge = rr.Fudge + t.TimeSigned = rr.TimeSigned + t.Algorithm = rr.Algorithm + t.OrigId = m.Id + + tbuf := make([]byte, Len(t)) + off, err := PackRR(t, tbuf, 0, nil, false) + if err != nil { + return nil, "", err + } + mbuf = append(mbuf, tbuf[:off]...) + // Update the ArCount directly in the buffer. + binary.BigEndian.PutUint16(mbuf[10:], uint16(len(m.Extra)+1)) + + return mbuf, t.MAC, nil +} + +// TsigVerify verifies the TSIG on a message. +// If the signature does not validate err contains the +// error, otherwise it is nil. +func TsigVerify(msg []byte, secret, requestMAC string, timersOnly bool) error { + rawsecret, err := fromBase64([]byte(secret)) + if err != nil { + return err + } + // Strip the TSIG from the incoming msg + stripped, tsig, err := stripTsig(msg) + if err != nil { + return err + } + + msgMAC, err := hex.DecodeString(tsig.MAC) + if err != nil { + return err + } + + buf := tsigBuffer(stripped, tsig, requestMAC, timersOnly) + + // Fudge factor works both ways. A message can arrive before it was signed because + // of clock skew. + now := uint64(time.Now().Unix()) + ti := now - tsig.TimeSigned + if now < tsig.TimeSigned { + ti = tsig.TimeSigned - now + } + if uint64(tsig.Fudge) < ti { + return ErrTime + } + + var h hash.Hash + switch strings.ToLower(tsig.Algorithm) { + case HmacMD5: + h = hmac.New(md5.New, rawsecret) + case HmacSHA1: + h = hmac.New(sha1.New, rawsecret) + case HmacSHA256: + h = hmac.New(sha256.New, rawsecret) + case HmacSHA512: + h = hmac.New(sha512.New, rawsecret) + default: + return ErrKeyAlg + } + h.Write(buf) + if !hmac.Equal(h.Sum(nil), msgMAC) { + return ErrSig + } + return nil +} + +// Create a wiredata buffer for the MAC calculation. +func tsigBuffer(msgbuf []byte, rr *TSIG, requestMAC string, timersOnly bool) []byte { + var buf []byte + if rr.TimeSigned == 0 { + rr.TimeSigned = uint64(time.Now().Unix()) + } + if rr.Fudge == 0 { + rr.Fudge = 300 // Standard (RFC) default. + } + + // Replace message ID in header with original ID from TSIG + binary.BigEndian.PutUint16(msgbuf[0:2], rr.OrigId) + + if requestMAC != "" { + m := new(macWireFmt) + m.MACSize = uint16(len(requestMAC) / 2) + m.MAC = requestMAC + buf = make([]byte, len(requestMAC)) // long enough + n, _ := packMacWire(m, buf) + buf = buf[:n] + } + + tsigvar := make([]byte, DefaultMsgSize) + if timersOnly { + tsig := new(timerWireFmt) + tsig.TimeSigned = rr.TimeSigned + tsig.Fudge = rr.Fudge + n, _ := packTimerWire(tsig, tsigvar) + tsigvar = tsigvar[:n] + } else { + tsig := new(tsigWireFmt) + tsig.Name = strings.ToLower(rr.Hdr.Name) + tsig.Class = ClassANY + tsig.Ttl = rr.Hdr.Ttl + tsig.Algorithm = strings.ToLower(rr.Algorithm) + tsig.TimeSigned = rr.TimeSigned + tsig.Fudge = rr.Fudge + tsig.Error = rr.Error + tsig.OtherLen = rr.OtherLen + tsig.OtherData = rr.OtherData + n, _ := packTsigWire(tsig, tsigvar) + tsigvar = tsigvar[:n] + } + + if requestMAC != "" { + x := append(buf, msgbuf...) + buf = append(x, tsigvar...) + } else { + buf = append(msgbuf, tsigvar...) + } + return buf +} + +// Strip the TSIG from the raw message. +func stripTsig(msg []byte) ([]byte, *TSIG, error) { + // Copied from msg.go's Unpack() Header, but modified. + var ( + dh Header + err error + ) + off, tsigoff := 0, 0 + + if dh, off, err = unpackMsgHdr(msg, off); err != nil { + return nil, nil, err + } + if dh.Arcount == 0 { + return nil, nil, ErrNoSig + } + + // Rcode, see msg.go Unpack() + if int(dh.Bits&0xF) == RcodeNotAuth { + return nil, nil, ErrAuth + } + + for i := 0; i < int(dh.Qdcount); i++ { + _, off, err = unpackQuestion(msg, off) + if err != nil { + return nil, nil, err + } + } + + _, off, err = unpackRRslice(int(dh.Ancount), msg, off) + if err != nil { + return nil, nil, err + } + _, off, err = unpackRRslice(int(dh.Nscount), msg, off) + if err != nil { + return nil, nil, err + } + + rr := new(TSIG) + var extra RR + for i := 0; i < int(dh.Arcount); i++ { + tsigoff = off + extra, off, err = UnpackRR(msg, off) + if err != nil { + return nil, nil, err + } + if extra.Header().Rrtype == TypeTSIG { + rr = extra.(*TSIG) + // Adjust Arcount. + arcount := binary.BigEndian.Uint16(msg[10:]) + binary.BigEndian.PutUint16(msg[10:], arcount-1) + break + } + } + if rr == nil { + return nil, nil, ErrNoSig + } + return msg[:tsigoff], rr, nil +} + +// Translate the TSIG time signed into a date. There is no +// need for RFC1982 calculations as this date is 48 bits. +func tsigTimeToString(t uint64) string { + ti := time.Unix(int64(t), 0).UTC() + return ti.Format("20060102150405") +} + +func packTsigWire(tw *tsigWireFmt, msg []byte) (int, error) { + // copied from zmsg.go TSIG packing + // RR_Header + off, err := PackDomainName(tw.Name, msg, 0, nil, false) + if err != nil { + return off, err + } + off, err = packUint16(tw.Class, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(tw.Ttl, msg, off) + if err != nil { + return off, err + } + + off, err = PackDomainName(tw.Algorithm, msg, off, nil, false) + if err != nil { + return off, err + } + off, err = packUint48(tw.TimeSigned, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(tw.Fudge, msg, off) + if err != nil { + return off, err + } + + off, err = packUint16(tw.Error, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(tw.OtherLen, msg, off) + if err != nil { + return off, err + } + off, err = packStringHex(tw.OtherData, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func packMacWire(mw *macWireFmt, msg []byte) (int, error) { + off, err := packUint16(mw.MACSize, msg, 0) + if err != nil { + return off, err + } + off, err = packStringHex(mw.MAC, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func packTimerWire(tw *timerWireFmt, msg []byte) (int, error) { + off, err := packUint48(tw.TimeSigned, msg, 0) + if err != nil { + return off, err + } + off, err = packUint16(tw.Fudge, msg, off) + if err != nil { + return off, err + } + return off, nil +} diff --git a/vendor/github.com/miekg/dns/types.go b/vendor/github.com/miekg/dns/types.go new file mode 100644 index 00000000..a6048cb1 --- /dev/null +++ b/vendor/github.com/miekg/dns/types.go @@ -0,0 +1,1527 @@ +package dns + +import ( + "bytes" + "fmt" + "net" + "strconv" + "strings" + "time" +) + +type ( + // Type is a DNS type. + Type uint16 + // Class is a DNS class. + Class uint16 + // Name is a DNS domain name. + Name string +) + +// Packet formats + +// Wire constants and supported types. +const ( + // valid RR_Header.Rrtype and Question.qtype + + TypeNone uint16 = 0 + TypeA uint16 = 1 + TypeNS uint16 = 2 + TypeMD uint16 = 3 + TypeMF uint16 = 4 + TypeCNAME uint16 = 5 + TypeSOA uint16 = 6 + TypeMB uint16 = 7 + TypeMG uint16 = 8 + TypeMR uint16 = 9 + TypeNULL uint16 = 10 + TypePTR uint16 = 12 + TypeHINFO uint16 = 13 + TypeMINFO uint16 = 14 + TypeMX uint16 = 15 + TypeTXT uint16 = 16 + TypeRP uint16 = 17 + TypeAFSDB uint16 = 18 + TypeX25 uint16 = 19 + TypeISDN uint16 = 20 + TypeRT uint16 = 21 + TypeNSAPPTR uint16 = 23 + TypeSIG uint16 = 24 + TypeKEY uint16 = 25 + TypePX uint16 = 26 + TypeGPOS uint16 = 27 + TypeAAAA uint16 = 28 + TypeLOC uint16 = 29 + TypeNXT uint16 = 30 + TypeEID uint16 = 31 + TypeNIMLOC uint16 = 32 + TypeSRV uint16 = 33 + TypeATMA uint16 = 34 + TypeNAPTR uint16 = 35 + TypeKX uint16 = 36 + TypeCERT uint16 = 37 + TypeDNAME uint16 = 39 + TypeOPT uint16 = 41 // EDNS + TypeAPL uint16 = 42 + TypeDS uint16 = 43 + TypeSSHFP uint16 = 44 + TypeRRSIG uint16 = 46 + TypeNSEC uint16 = 47 + TypeDNSKEY uint16 = 48 + TypeDHCID uint16 = 49 + TypeNSEC3 uint16 = 50 + TypeNSEC3PARAM uint16 = 51 + TypeTLSA uint16 = 52 + TypeSMIMEA uint16 = 53 + TypeHIP uint16 = 55 + TypeNINFO uint16 = 56 + TypeRKEY uint16 = 57 + TypeTALINK uint16 = 58 + TypeCDS uint16 = 59 + TypeCDNSKEY uint16 = 60 + TypeOPENPGPKEY uint16 = 61 + TypeCSYNC uint16 = 62 + TypeSPF uint16 = 99 + TypeUINFO uint16 = 100 + TypeUID uint16 = 101 + TypeGID uint16 = 102 + TypeUNSPEC uint16 = 103 + TypeNID uint16 = 104 + TypeL32 uint16 = 105 + TypeL64 uint16 = 106 + TypeLP uint16 = 107 + TypeEUI48 uint16 = 108 + TypeEUI64 uint16 = 109 + TypeURI uint16 = 256 + TypeCAA uint16 = 257 + TypeAVC uint16 = 258 + + TypeTKEY uint16 = 249 + TypeTSIG uint16 = 250 + + // valid Question.Qtype only + TypeIXFR uint16 = 251 + TypeAXFR uint16 = 252 + TypeMAILB uint16 = 253 + TypeMAILA uint16 = 254 + TypeANY uint16 = 255 + + TypeTA uint16 = 32768 + TypeDLV uint16 = 32769 + TypeReserved uint16 = 65535 + + // valid Question.Qclass + ClassINET = 1 + ClassCSNET = 2 + ClassCHAOS = 3 + ClassHESIOD = 4 + ClassNONE = 254 + ClassANY = 255 + + // Message Response Codes, see https://www.iana.org/assignments/dns-parameters/dns-parameters.xhtml + RcodeSuccess = 0 // NoError - No Error [DNS] + RcodeFormatError = 1 // FormErr - Format Error [DNS] + RcodeServerFailure = 2 // ServFail - Server Failure [DNS] + RcodeNameError = 3 // NXDomain - Non-Existent Domain [DNS] + RcodeNotImplemented = 4 // NotImp - Not Implemented [DNS] + RcodeRefused = 5 // Refused - Query Refused [DNS] + RcodeYXDomain = 6 // YXDomain - Name Exists when it should not [DNS Update] + RcodeYXRrset = 7 // YXRRSet - RR Set Exists when it should not [DNS Update] + RcodeNXRrset = 8 // NXRRSet - RR Set that should exist does not [DNS Update] + RcodeNotAuth = 9 // NotAuth - Server Not Authoritative for zone [DNS Update] + RcodeNotZone = 10 // NotZone - Name not contained in zone [DNS Update/TSIG] + RcodeBadSig = 16 // BADSIG - TSIG Signature Failure [TSIG] + RcodeBadVers = 16 // BADVERS - Bad OPT Version [EDNS0] + RcodeBadKey = 17 // BADKEY - Key not recognized [TSIG] + RcodeBadTime = 18 // BADTIME - Signature out of time window [TSIG] + RcodeBadMode = 19 // BADMODE - Bad TKEY Mode [TKEY] + RcodeBadName = 20 // BADNAME - Duplicate key name [TKEY] + RcodeBadAlg = 21 // BADALG - Algorithm not supported [TKEY] + RcodeBadTrunc = 22 // BADTRUNC - Bad Truncation [TSIG] + RcodeBadCookie = 23 // BADCOOKIE - Bad/missing Server Cookie [DNS Cookies] + + // Message Opcodes. There is no 3. + OpcodeQuery = 0 + OpcodeIQuery = 1 + OpcodeStatus = 2 + OpcodeNotify = 4 + OpcodeUpdate = 5 +) + +// Header is the wire format for the DNS packet header. +type Header struct { + Id uint16 + Bits uint16 + Qdcount, Ancount, Nscount, Arcount uint16 +} + +const ( + headerSize = 12 + + // Header.Bits + _QR = 1 << 15 // query/response (response=1) + _AA = 1 << 10 // authoritative + _TC = 1 << 9 // truncated + _RD = 1 << 8 // recursion desired + _RA = 1 << 7 // recursion available + _Z = 1 << 6 // Z + _AD = 1 << 5 // authticated data + _CD = 1 << 4 // checking disabled +) + +// Various constants used in the LOC RR, See RFC 1887. +const ( + LOC_EQUATOR = 1 << 31 // RFC 1876, Section 2. + LOC_PRIMEMERIDIAN = 1 << 31 // RFC 1876, Section 2. + LOC_HOURS = 60 * 1000 + LOC_DEGREES = 60 * LOC_HOURS + LOC_ALTITUDEBASE = 100000 +) + +// Different Certificate Types, see RFC 4398, Section 2.1 +const ( + CertPKIX = 1 + iota + CertSPKI + CertPGP + CertIPIX + CertISPKI + CertIPGP + CertACPKIX + CertIACPKIX + CertURI = 253 + CertOID = 254 +) + +// CertTypeToString converts the Cert Type to its string representation. +// See RFC 4398 and RFC 6944. +var CertTypeToString = map[uint16]string{ + CertPKIX: "PKIX", + CertSPKI: "SPKI", + CertPGP: "PGP", + CertIPIX: "IPIX", + CertISPKI: "ISPKI", + CertIPGP: "IPGP", + CertACPKIX: "ACPKIX", + CertIACPKIX: "IACPKIX", + CertURI: "URI", + CertOID: "OID", +} + +//go:generate go run types_generate.go + +// Question holds a DNS question. There can be multiple questions in the +// question section of a message. Usually there is just one. +type Question struct { + Name string `dns:"cdomain-name"` // "cdomain-name" specifies encoding (and may be compressed) + Qtype uint16 + Qclass uint16 +} + +func (q *Question) len(off int, compression map[string]struct{}) int { + l := domainNameLen(q.Name, off, compression, true) + l += 2 + 2 + return l +} + +func (q *Question) String() (s string) { + // prefix with ; (as in dig) + s = ";" + sprintName(q.Name) + "\t" + s += Class(q.Qclass).String() + "\t" + s += " " + Type(q.Qtype).String() + return s +} + +// ANY is a wildcard record. See RFC 1035, Section 3.2.3. ANY +// is named "*" there. +type ANY struct { + Hdr RR_Header + // Does not have any rdata +} + +func (rr *ANY) String() string { return rr.Hdr.String() } + +func (rr *ANY) parse(c *zlexer, origin string) *ParseError { + panic("dns: internal error: parse should never be called on ANY") +} + +// NULL RR. See RFC 1035. +type NULL struct { + Hdr RR_Header + Data string `dns:"any"` +} + +func (rr *NULL) String() string { + // There is no presentation format; prefix string with a comment. + return ";" + rr.Hdr.String() + rr.Data +} + +func (rr *NULL) parse(c *zlexer, origin string) *ParseError { + panic("dns: internal error: parse should never be called on NULL") +} + +// CNAME RR. See RFC 1034. +type CNAME struct { + Hdr RR_Header + Target string `dns:"cdomain-name"` +} + +func (rr *CNAME) String() string { return rr.Hdr.String() + sprintName(rr.Target) } + +// HINFO RR. See RFC 1034. +type HINFO struct { + Hdr RR_Header + Cpu string + Os string +} + +func (rr *HINFO) String() string { + return rr.Hdr.String() + sprintTxt([]string{rr.Cpu, rr.Os}) +} + +// MB RR. See RFC 1035. +type MB struct { + Hdr RR_Header + Mb string `dns:"cdomain-name"` +} + +func (rr *MB) String() string { return rr.Hdr.String() + sprintName(rr.Mb) } + +// MG RR. See RFC 1035. +type MG struct { + Hdr RR_Header + Mg string `dns:"cdomain-name"` +} + +func (rr *MG) String() string { return rr.Hdr.String() + sprintName(rr.Mg) } + +// MINFO RR. See RFC 1035. +type MINFO struct { + Hdr RR_Header + Rmail string `dns:"cdomain-name"` + Email string `dns:"cdomain-name"` +} + +func (rr *MINFO) String() string { + return rr.Hdr.String() + sprintName(rr.Rmail) + " " + sprintName(rr.Email) +} + +// MR RR. See RFC 1035. +type MR struct { + Hdr RR_Header + Mr string `dns:"cdomain-name"` +} + +func (rr *MR) String() string { + return rr.Hdr.String() + sprintName(rr.Mr) +} + +// MF RR. See RFC 1035. +type MF struct { + Hdr RR_Header + Mf string `dns:"cdomain-name"` +} + +func (rr *MF) String() string { + return rr.Hdr.String() + sprintName(rr.Mf) +} + +// MD RR. See RFC 1035. +type MD struct { + Hdr RR_Header + Md string `dns:"cdomain-name"` +} + +func (rr *MD) String() string { + return rr.Hdr.String() + sprintName(rr.Md) +} + +// MX RR. See RFC 1035. +type MX struct { + Hdr RR_Header + Preference uint16 + Mx string `dns:"cdomain-name"` +} + +func (rr *MX) String() string { + return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + " " + sprintName(rr.Mx) +} + +// AFSDB RR. See RFC 1183. +type AFSDB struct { + Hdr RR_Header + Subtype uint16 + Hostname string `dns:"domain-name"` +} + +func (rr *AFSDB) String() string { + return rr.Hdr.String() + strconv.Itoa(int(rr.Subtype)) + " " + sprintName(rr.Hostname) +} + +// X25 RR. See RFC 1183, Section 3.1. +type X25 struct { + Hdr RR_Header + PSDNAddress string +} + +func (rr *X25) String() string { + return rr.Hdr.String() + rr.PSDNAddress +} + +// RT RR. See RFC 1183, Section 3.3. +type RT struct { + Hdr RR_Header + Preference uint16 + Host string `dns:"domain-name"` // RFC 3597 prohibits compressing records not defined in RFC 1035. +} + +func (rr *RT) String() string { + return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + " " + sprintName(rr.Host) +} + +// NS RR. See RFC 1035. +type NS struct { + Hdr RR_Header + Ns string `dns:"cdomain-name"` +} + +func (rr *NS) String() string { + return rr.Hdr.String() + sprintName(rr.Ns) +} + +// PTR RR. See RFC 1035. +type PTR struct { + Hdr RR_Header + Ptr string `dns:"cdomain-name"` +} + +func (rr *PTR) String() string { + return rr.Hdr.String() + sprintName(rr.Ptr) +} + +// RP RR. See RFC 1138, Section 2.2. +type RP struct { + Hdr RR_Header + Mbox string `dns:"domain-name"` + Txt string `dns:"domain-name"` +} + +func (rr *RP) String() string { + return rr.Hdr.String() + sprintName(rr.Mbox) + " " + sprintName(rr.Txt) +} + +// SOA RR. See RFC 1035. +type SOA struct { + Hdr RR_Header + Ns string `dns:"cdomain-name"` + Mbox string `dns:"cdomain-name"` + Serial uint32 + Refresh uint32 + Retry uint32 + Expire uint32 + Minttl uint32 +} + +func (rr *SOA) String() string { + return rr.Hdr.String() + sprintName(rr.Ns) + " " + sprintName(rr.Mbox) + + " " + strconv.FormatInt(int64(rr.Serial), 10) + + " " + strconv.FormatInt(int64(rr.Refresh), 10) + + " " + strconv.FormatInt(int64(rr.Retry), 10) + + " " + strconv.FormatInt(int64(rr.Expire), 10) + + " " + strconv.FormatInt(int64(rr.Minttl), 10) +} + +// TXT RR. See RFC 1035. +type TXT struct { + Hdr RR_Header + Txt []string `dns:"txt"` +} + +func (rr *TXT) String() string { return rr.Hdr.String() + sprintTxt(rr.Txt) } + +func sprintName(s string) string { + var dst strings.Builder + + for i := 0; i < len(s); { + if i+1 < len(s) && s[i] == '\\' && s[i+1] == '.' { + if dst.Len() != 0 { + dst.WriteString(s[i : i+2]) + } + i += 2 + continue + } + + b, n := nextByte(s, i) + if n == 0 { + i++ + continue + } + if b == '.' { + if dst.Len() != 0 { + dst.WriteByte('.') + } + i += n + continue + } + switch b { + case ' ', '\'', '@', ';', '(', ')', '"', '\\': // additional chars to escape + if dst.Len() == 0 { + dst.Grow(len(s) * 2) + dst.WriteString(s[:i]) + } + dst.WriteByte('\\') + dst.WriteByte(b) + default: + if ' ' <= b && b <= '~' { + if dst.Len() != 0 { + dst.WriteByte(b) + } + } else { + if dst.Len() == 0 { + dst.Grow(len(s) * 2) + dst.WriteString(s[:i]) + } + dst.WriteString(escapeByte(b)) + } + } + i += n + } + if dst.Len() == 0 { + return s + } + return dst.String() +} + +func sprintTxtOctet(s string) string { + var dst strings.Builder + dst.Grow(2 + len(s)) + dst.WriteByte('"') + for i := 0; i < len(s); { + if i+1 < len(s) && s[i] == '\\' && s[i+1] == '.' { + dst.WriteString(s[i : i+2]) + i += 2 + continue + } + + b, n := nextByte(s, i) + switch { + case n == 0: + i++ // dangling back slash + case b == '.': + dst.WriteByte('.') + case b < ' ' || b > '~': + dst.WriteString(escapeByte(b)) + default: + dst.WriteByte(b) + } + i += n + } + dst.WriteByte('"') + return dst.String() +} + +func sprintTxt(txt []string) string { + var out strings.Builder + for i, s := range txt { + out.Grow(3 + len(s)) + if i > 0 { + out.WriteString(` "`) + } else { + out.WriteByte('"') + } + for j := 0; j < len(s); { + b, n := nextByte(s, j) + if n == 0 { + break + } + writeTXTStringByte(&out, b) + j += n + } + out.WriteByte('"') + } + return out.String() +} + +func writeTXTStringByte(s *strings.Builder, b byte) { + switch { + case b == '"' || b == '\\': + s.WriteByte('\\') + s.WriteByte(b) + case b < ' ' || b > '~': + s.WriteString(escapeByte(b)) + default: + s.WriteByte(b) + } +} + +const ( + escapedByteSmall = "" + + `\000\001\002\003\004\005\006\007\008\009` + + `\010\011\012\013\014\015\016\017\018\019` + + `\020\021\022\023\024\025\026\027\028\029` + + `\030\031` + escapedByteLarge = `\127\128\129` + + `\130\131\132\133\134\135\136\137\138\139` + + `\140\141\142\143\144\145\146\147\148\149` + + `\150\151\152\153\154\155\156\157\158\159` + + `\160\161\162\163\164\165\166\167\168\169` + + `\170\171\172\173\174\175\176\177\178\179` + + `\180\181\182\183\184\185\186\187\188\189` + + `\190\191\192\193\194\195\196\197\198\199` + + `\200\201\202\203\204\205\206\207\208\209` + + `\210\211\212\213\214\215\216\217\218\219` + + `\220\221\222\223\224\225\226\227\228\229` + + `\230\231\232\233\234\235\236\237\238\239` + + `\240\241\242\243\244\245\246\247\248\249` + + `\250\251\252\253\254\255` +) + +// escapeByte returns the \DDD escaping of b which must +// satisfy b < ' ' || b > '~'. +func escapeByte(b byte) string { + if b < ' ' { + return escapedByteSmall[b*4 : b*4+4] + } + + b -= '~' + 1 + // The cast here is needed as b*4 may overflow byte. + return escapedByteLarge[int(b)*4 : int(b)*4+4] +} + +func nextByte(s string, offset int) (byte, int) { + if offset >= len(s) { + return 0, 0 + } + if s[offset] != '\\' { + // not an escape sequence + return s[offset], 1 + } + switch len(s) - offset { + case 1: // dangling escape + return 0, 0 + case 2, 3: // too short to be \ddd + default: // maybe \ddd + if isDigit(s[offset+1]) && isDigit(s[offset+2]) && isDigit(s[offset+3]) { + return dddStringToByte(s[offset+1:]), 4 + } + } + // not \ddd, just an RFC 1035 "quoted" character + return s[offset+1], 2 +} + +// SPF RR. See RFC 4408, Section 3.1.1. +type SPF struct { + Hdr RR_Header + Txt []string `dns:"txt"` +} + +func (rr *SPF) String() string { return rr.Hdr.String() + sprintTxt(rr.Txt) } + +// AVC RR. See https://www.iana.org/assignments/dns-parameters/AVC/avc-completed-template. +type AVC struct { + Hdr RR_Header + Txt []string `dns:"txt"` +} + +func (rr *AVC) String() string { return rr.Hdr.String() + sprintTxt(rr.Txt) } + +// SRV RR. See RFC 2782. +type SRV struct { + Hdr RR_Header + Priority uint16 + Weight uint16 + Port uint16 + Target string `dns:"domain-name"` +} + +func (rr *SRV) String() string { + return rr.Hdr.String() + + strconv.Itoa(int(rr.Priority)) + " " + + strconv.Itoa(int(rr.Weight)) + " " + + strconv.Itoa(int(rr.Port)) + " " + sprintName(rr.Target) +} + +// NAPTR RR. See RFC 2915. +type NAPTR struct { + Hdr RR_Header + Order uint16 + Preference uint16 + Flags string + Service string + Regexp string + Replacement string `dns:"domain-name"` +} + +func (rr *NAPTR) String() string { + return rr.Hdr.String() + + strconv.Itoa(int(rr.Order)) + " " + + strconv.Itoa(int(rr.Preference)) + " " + + "\"" + rr.Flags + "\" " + + "\"" + rr.Service + "\" " + + "\"" + rr.Regexp + "\" " + + rr.Replacement +} + +// CERT RR. See RFC 4398. +type CERT struct { + Hdr RR_Header + Type uint16 + KeyTag uint16 + Algorithm uint8 + Certificate string `dns:"base64"` +} + +func (rr *CERT) String() string { + var ( + ok bool + certtype, algorithm string + ) + if certtype, ok = CertTypeToString[rr.Type]; !ok { + certtype = strconv.Itoa(int(rr.Type)) + } + if algorithm, ok = AlgorithmToString[rr.Algorithm]; !ok { + algorithm = strconv.Itoa(int(rr.Algorithm)) + } + return rr.Hdr.String() + certtype + + " " + strconv.Itoa(int(rr.KeyTag)) + + " " + algorithm + + " " + rr.Certificate +} + +// DNAME RR. See RFC 2672. +type DNAME struct { + Hdr RR_Header + Target string `dns:"domain-name"` +} + +func (rr *DNAME) String() string { + return rr.Hdr.String() + sprintName(rr.Target) +} + +// A RR. See RFC 1035. +type A struct { + Hdr RR_Header + A net.IP `dns:"a"` +} + +func (rr *A) String() string { + if rr.A == nil { + return rr.Hdr.String() + } + return rr.Hdr.String() + rr.A.String() +} + +// AAAA RR. See RFC 3596. +type AAAA struct { + Hdr RR_Header + AAAA net.IP `dns:"aaaa"` +} + +func (rr *AAAA) String() string { + if rr.AAAA == nil { + return rr.Hdr.String() + } + return rr.Hdr.String() + rr.AAAA.String() +} + +// PX RR. See RFC 2163. +type PX struct { + Hdr RR_Header + Preference uint16 + Map822 string `dns:"domain-name"` + Mapx400 string `dns:"domain-name"` +} + +func (rr *PX) String() string { + return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + " " + sprintName(rr.Map822) + " " + sprintName(rr.Mapx400) +} + +// GPOS RR. See RFC 1712. +type GPOS struct { + Hdr RR_Header + Longitude string + Latitude string + Altitude string +} + +func (rr *GPOS) String() string { + return rr.Hdr.String() + rr.Longitude + " " + rr.Latitude + " " + rr.Altitude +} + +// LOC RR. See RFC RFC 1876. +type LOC struct { + Hdr RR_Header + Version uint8 + Size uint8 + HorizPre uint8 + VertPre uint8 + Latitude uint32 + Longitude uint32 + Altitude uint32 +} + +// cmToM takes a cm value expressed in RFC1876 SIZE mantissa/exponent +// format and returns a string in m (two decimals for the cm) +func cmToM(m, e uint8) string { + if e < 2 { + if e == 1 { + m *= 10 + } + + return fmt.Sprintf("0.%02d", m) + } + + s := fmt.Sprintf("%d", m) + for e > 2 { + s += "0" + e-- + } + return s +} + +func (rr *LOC) String() string { + s := rr.Hdr.String() + + lat := rr.Latitude + ns := "N" + if lat > LOC_EQUATOR { + lat = lat - LOC_EQUATOR + } else { + ns = "S" + lat = LOC_EQUATOR - lat + } + h := lat / LOC_DEGREES + lat = lat % LOC_DEGREES + m := lat / LOC_HOURS + lat = lat % LOC_HOURS + s += fmt.Sprintf("%02d %02d %0.3f %s ", h, m, float64(lat)/1000, ns) + + lon := rr.Longitude + ew := "E" + if lon > LOC_PRIMEMERIDIAN { + lon = lon - LOC_PRIMEMERIDIAN + } else { + ew = "W" + lon = LOC_PRIMEMERIDIAN - lon + } + h = lon / LOC_DEGREES + lon = lon % LOC_DEGREES + m = lon / LOC_HOURS + lon = lon % LOC_HOURS + s += fmt.Sprintf("%02d %02d %0.3f %s ", h, m, float64(lon)/1000, ew) + + var alt = float64(rr.Altitude) / 100 + alt -= LOC_ALTITUDEBASE + if rr.Altitude%100 != 0 { + s += fmt.Sprintf("%.2fm ", alt) + } else { + s += fmt.Sprintf("%.0fm ", alt) + } + + s += cmToM(rr.Size&0xf0>>4, rr.Size&0x0f) + "m " + s += cmToM(rr.HorizPre&0xf0>>4, rr.HorizPre&0x0f) + "m " + s += cmToM(rr.VertPre&0xf0>>4, rr.VertPre&0x0f) + "m" + + return s +} + +// SIG RR. See RFC 2535. The SIG RR is identical to RRSIG and nowadays only used for SIG(0), See RFC 2931. +type SIG struct { + RRSIG +} + +// RRSIG RR. See RFC 4034 and RFC 3755. +type RRSIG struct { + Hdr RR_Header + TypeCovered uint16 + Algorithm uint8 + Labels uint8 + OrigTtl uint32 + Expiration uint32 + Inception uint32 + KeyTag uint16 + SignerName string `dns:"domain-name"` + Signature string `dns:"base64"` +} + +func (rr *RRSIG) String() string { + s := rr.Hdr.String() + s += Type(rr.TypeCovered).String() + s += " " + strconv.Itoa(int(rr.Algorithm)) + + " " + strconv.Itoa(int(rr.Labels)) + + " " + strconv.FormatInt(int64(rr.OrigTtl), 10) + + " " + TimeToString(rr.Expiration) + + " " + TimeToString(rr.Inception) + + " " + strconv.Itoa(int(rr.KeyTag)) + + " " + sprintName(rr.SignerName) + + " " + rr.Signature + return s +} + +// NSEC RR. See RFC 4034 and RFC 3755. +type NSEC struct { + Hdr RR_Header + NextDomain string `dns:"domain-name"` + TypeBitMap []uint16 `dns:"nsec"` +} + +func (rr *NSEC) String() string { + s := rr.Hdr.String() + sprintName(rr.NextDomain) + for _, t := range rr.TypeBitMap { + s += " " + Type(t).String() + } + return s +} + +func (rr *NSEC) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += domainNameLen(rr.NextDomain, off+l, compression, false) + l += typeBitMapLen(rr.TypeBitMap) + return l +} + +// DLV RR. See RFC 4431. +type DLV struct{ DS } + +// CDS RR. See RFC 7344. +type CDS struct{ DS } + +// DS RR. See RFC 4034 and RFC 3658. +type DS struct { + Hdr RR_Header + KeyTag uint16 + Algorithm uint8 + DigestType uint8 + Digest string `dns:"hex"` +} + +func (rr *DS) String() string { + return rr.Hdr.String() + strconv.Itoa(int(rr.KeyTag)) + + " " + strconv.Itoa(int(rr.Algorithm)) + + " " + strconv.Itoa(int(rr.DigestType)) + + " " + strings.ToUpper(rr.Digest) +} + +// KX RR. See RFC 2230. +type KX struct { + Hdr RR_Header + Preference uint16 + Exchanger string `dns:"domain-name"` +} + +func (rr *KX) String() string { + return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + + " " + sprintName(rr.Exchanger) +} + +// TA RR. See http://www.watson.org/~weiler/INI1999-19.pdf. +type TA struct { + Hdr RR_Header + KeyTag uint16 + Algorithm uint8 + DigestType uint8 + Digest string `dns:"hex"` +} + +func (rr *TA) String() string { + return rr.Hdr.String() + strconv.Itoa(int(rr.KeyTag)) + + " " + strconv.Itoa(int(rr.Algorithm)) + + " " + strconv.Itoa(int(rr.DigestType)) + + " " + strings.ToUpper(rr.Digest) +} + +// TALINK RR. See https://www.iana.org/assignments/dns-parameters/TALINK/talink-completed-template. +type TALINK struct { + Hdr RR_Header + PreviousName string `dns:"domain-name"` + NextName string `dns:"domain-name"` +} + +func (rr *TALINK) String() string { + return rr.Hdr.String() + + sprintName(rr.PreviousName) + " " + sprintName(rr.NextName) +} + +// SSHFP RR. See RFC RFC 4255. +type SSHFP struct { + Hdr RR_Header + Algorithm uint8 + Type uint8 + FingerPrint string `dns:"hex"` +} + +func (rr *SSHFP) String() string { + return rr.Hdr.String() + strconv.Itoa(int(rr.Algorithm)) + + " " + strconv.Itoa(int(rr.Type)) + + " " + strings.ToUpper(rr.FingerPrint) +} + +// KEY RR. See RFC RFC 2535. +type KEY struct { + DNSKEY +} + +// CDNSKEY RR. See RFC 7344. +type CDNSKEY struct { + DNSKEY +} + +// DNSKEY RR. See RFC 4034 and RFC 3755. +type DNSKEY struct { + Hdr RR_Header + Flags uint16 + Protocol uint8 + Algorithm uint8 + PublicKey string `dns:"base64"` +} + +func (rr *DNSKEY) String() string { + return rr.Hdr.String() + strconv.Itoa(int(rr.Flags)) + + " " + strconv.Itoa(int(rr.Protocol)) + + " " + strconv.Itoa(int(rr.Algorithm)) + + " " + rr.PublicKey +} + +// RKEY RR. See https://www.iana.org/assignments/dns-parameters/RKEY/rkey-completed-template. +type RKEY struct { + Hdr RR_Header + Flags uint16 + Protocol uint8 + Algorithm uint8 + PublicKey string `dns:"base64"` +} + +func (rr *RKEY) String() string { + return rr.Hdr.String() + strconv.Itoa(int(rr.Flags)) + + " " + strconv.Itoa(int(rr.Protocol)) + + " " + strconv.Itoa(int(rr.Algorithm)) + + " " + rr.PublicKey +} + +// NSAPPTR RR. See RFC 1348. +type NSAPPTR struct { + Hdr RR_Header + Ptr string `dns:"domain-name"` +} + +func (rr *NSAPPTR) String() string { return rr.Hdr.String() + sprintName(rr.Ptr) } + +// NSEC3 RR. See RFC 5155. +type NSEC3 struct { + Hdr RR_Header + Hash uint8 + Flags uint8 + Iterations uint16 + SaltLength uint8 + Salt string `dns:"size-hex:SaltLength"` + HashLength uint8 + NextDomain string `dns:"size-base32:HashLength"` + TypeBitMap []uint16 `dns:"nsec"` +} + +func (rr *NSEC3) String() string { + s := rr.Hdr.String() + s += strconv.Itoa(int(rr.Hash)) + + " " + strconv.Itoa(int(rr.Flags)) + + " " + strconv.Itoa(int(rr.Iterations)) + + " " + saltToString(rr.Salt) + + " " + rr.NextDomain + for _, t := range rr.TypeBitMap { + s += " " + Type(t).String() + } + return s +} + +func (rr *NSEC3) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += 6 + len(rr.Salt)/2 + 1 + len(rr.NextDomain) + 1 + l += typeBitMapLen(rr.TypeBitMap) + return l +} + +// NSEC3PARAM RR. See RFC 5155. +type NSEC3PARAM struct { + Hdr RR_Header + Hash uint8 + Flags uint8 + Iterations uint16 + SaltLength uint8 + Salt string `dns:"size-hex:SaltLength"` +} + +func (rr *NSEC3PARAM) String() string { + s := rr.Hdr.String() + s += strconv.Itoa(int(rr.Hash)) + + " " + strconv.Itoa(int(rr.Flags)) + + " " + strconv.Itoa(int(rr.Iterations)) + + " " + saltToString(rr.Salt) + return s +} + +// TKEY RR. See RFC 2930. +type TKEY struct { + Hdr RR_Header + Algorithm string `dns:"domain-name"` + Inception uint32 + Expiration uint32 + Mode uint16 + Error uint16 + KeySize uint16 + Key string `dns:"size-hex:KeySize"` + OtherLen uint16 + OtherData string `dns:"size-hex:OtherLen"` +} + +// TKEY has no official presentation format, but this will suffice. +func (rr *TKEY) String() string { + s := ";" + rr.Hdr.String() + + " " + rr.Algorithm + + " " + TimeToString(rr.Inception) + + " " + TimeToString(rr.Expiration) + + " " + strconv.Itoa(int(rr.Mode)) + + " " + strconv.Itoa(int(rr.Error)) + + " " + strconv.Itoa(int(rr.KeySize)) + + " " + rr.Key + + " " + strconv.Itoa(int(rr.OtherLen)) + + " " + rr.OtherData + return s +} + +// RFC3597 represents an unknown/generic RR. See RFC 3597. +type RFC3597 struct { + Hdr RR_Header + Rdata string `dns:"hex"` +} + +func (rr *RFC3597) String() string { + // Let's call it a hack + s := rfc3597Header(rr.Hdr) + + s += "\\# " + strconv.Itoa(len(rr.Rdata)/2) + " " + rr.Rdata + return s +} + +func rfc3597Header(h RR_Header) string { + var s string + + s += sprintName(h.Name) + "\t" + s += strconv.FormatInt(int64(h.Ttl), 10) + "\t" + s += "CLASS" + strconv.Itoa(int(h.Class)) + "\t" + s += "TYPE" + strconv.Itoa(int(h.Rrtype)) + "\t" + return s +} + +// URI RR. See RFC 7553. +type URI struct { + Hdr RR_Header + Priority uint16 + Weight uint16 + Target string `dns:"octet"` +} + +func (rr *URI) String() string { + return rr.Hdr.String() + strconv.Itoa(int(rr.Priority)) + + " " + strconv.Itoa(int(rr.Weight)) + " " + sprintTxtOctet(rr.Target) +} + +// DHCID RR. See RFC 4701. +type DHCID struct { + Hdr RR_Header + Digest string `dns:"base64"` +} + +func (rr *DHCID) String() string { return rr.Hdr.String() + rr.Digest } + +// TLSA RR. See RFC 6698. +type TLSA struct { + Hdr RR_Header + Usage uint8 + Selector uint8 + MatchingType uint8 + Certificate string `dns:"hex"` +} + +func (rr *TLSA) String() string { + return rr.Hdr.String() + + strconv.Itoa(int(rr.Usage)) + + " " + strconv.Itoa(int(rr.Selector)) + + " " + strconv.Itoa(int(rr.MatchingType)) + + " " + rr.Certificate +} + +// SMIMEA RR. See RFC 8162. +type SMIMEA struct { + Hdr RR_Header + Usage uint8 + Selector uint8 + MatchingType uint8 + Certificate string `dns:"hex"` +} + +func (rr *SMIMEA) String() string { + s := rr.Hdr.String() + + strconv.Itoa(int(rr.Usage)) + + " " + strconv.Itoa(int(rr.Selector)) + + " " + strconv.Itoa(int(rr.MatchingType)) + + // Every Nth char needs a space on this output. If we output + // this as one giant line, we can't read it can in because in some cases + // the cert length overflows scan.maxTok (2048). + sx := splitN(rr.Certificate, 1024) // conservative value here + s += " " + strings.Join(sx, " ") + return s +} + +// HIP RR. See RFC 8005. +type HIP struct { + Hdr RR_Header + HitLength uint8 + PublicKeyAlgorithm uint8 + PublicKeyLength uint16 + Hit string `dns:"size-hex:HitLength"` + PublicKey string `dns:"size-base64:PublicKeyLength"` + RendezvousServers []string `dns:"domain-name"` +} + +func (rr *HIP) String() string { + s := rr.Hdr.String() + + strconv.Itoa(int(rr.PublicKeyAlgorithm)) + + " " + rr.Hit + + " " + rr.PublicKey + for _, d := range rr.RendezvousServers { + s += " " + sprintName(d) + } + return s +} + +// NINFO RR. See https://www.iana.org/assignments/dns-parameters/NINFO/ninfo-completed-template. +type NINFO struct { + Hdr RR_Header + ZSData []string `dns:"txt"` +} + +func (rr *NINFO) String() string { return rr.Hdr.String() + sprintTxt(rr.ZSData) } + +// NID RR. See RFC RFC 6742. +type NID struct { + Hdr RR_Header + Preference uint16 + NodeID uint64 +} + +func (rr *NID) String() string { + s := rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + node := fmt.Sprintf("%0.16x", rr.NodeID) + s += " " + node[0:4] + ":" + node[4:8] + ":" + node[8:12] + ":" + node[12:16] + return s +} + +// L32 RR, See RFC 6742. +type L32 struct { + Hdr RR_Header + Preference uint16 + Locator32 net.IP `dns:"a"` +} + +func (rr *L32) String() string { + if rr.Locator32 == nil { + return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + } + return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + + " " + rr.Locator32.String() +} + +// L64 RR, See RFC 6742. +type L64 struct { + Hdr RR_Header + Preference uint16 + Locator64 uint64 +} + +func (rr *L64) String() string { + s := rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + node := fmt.Sprintf("%0.16X", rr.Locator64) + s += " " + node[0:4] + ":" + node[4:8] + ":" + node[8:12] + ":" + node[12:16] + return s +} + +// LP RR. See RFC 6742. +type LP struct { + Hdr RR_Header + Preference uint16 + Fqdn string `dns:"domain-name"` +} + +func (rr *LP) String() string { + return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + " " + sprintName(rr.Fqdn) +} + +// EUI48 RR. See RFC 7043. +type EUI48 struct { + Hdr RR_Header + Address uint64 `dns:"uint48"` +} + +func (rr *EUI48) String() string { return rr.Hdr.String() + euiToString(rr.Address, 48) } + +// EUI64 RR. See RFC 7043. +type EUI64 struct { + Hdr RR_Header + Address uint64 +} + +func (rr *EUI64) String() string { return rr.Hdr.String() + euiToString(rr.Address, 64) } + +// CAA RR. See RFC 6844. +type CAA struct { + Hdr RR_Header + Flag uint8 + Tag string + Value string `dns:"octet"` +} + +func (rr *CAA) String() string { + return rr.Hdr.String() + strconv.Itoa(int(rr.Flag)) + " " + rr.Tag + " " + sprintTxtOctet(rr.Value) +} + +// UID RR. Deprecated, IANA-Reserved. +type UID struct { + Hdr RR_Header + Uid uint32 +} + +func (rr *UID) String() string { return rr.Hdr.String() + strconv.FormatInt(int64(rr.Uid), 10) } + +// GID RR. Deprecated, IANA-Reserved. +type GID struct { + Hdr RR_Header + Gid uint32 +} + +func (rr *GID) String() string { return rr.Hdr.String() + strconv.FormatInt(int64(rr.Gid), 10) } + +// UINFO RR. Deprecated, IANA-Reserved. +type UINFO struct { + Hdr RR_Header + Uinfo string +} + +func (rr *UINFO) String() string { return rr.Hdr.String() + sprintTxt([]string{rr.Uinfo}) } + +// EID RR. See http://ana-3.lcs.mit.edu/~jnc/nimrod/dns.txt. +type EID struct { + Hdr RR_Header + Endpoint string `dns:"hex"` +} + +func (rr *EID) String() string { return rr.Hdr.String() + strings.ToUpper(rr.Endpoint) } + +// NIMLOC RR. See http://ana-3.lcs.mit.edu/~jnc/nimrod/dns.txt. +type NIMLOC struct { + Hdr RR_Header + Locator string `dns:"hex"` +} + +func (rr *NIMLOC) String() string { return rr.Hdr.String() + strings.ToUpper(rr.Locator) } + +// OPENPGPKEY RR. See RFC 7929. +type OPENPGPKEY struct { + Hdr RR_Header + PublicKey string `dns:"base64"` +} + +func (rr *OPENPGPKEY) String() string { return rr.Hdr.String() + rr.PublicKey } + +// CSYNC RR. See RFC 7477. +type CSYNC struct { + Hdr RR_Header + Serial uint32 + Flags uint16 + TypeBitMap []uint16 `dns:"nsec"` +} + +func (rr *CSYNC) String() string { + s := rr.Hdr.String() + strconv.FormatInt(int64(rr.Serial), 10) + " " + strconv.Itoa(int(rr.Flags)) + + for _, t := range rr.TypeBitMap { + s += " " + Type(t).String() + } + return s +} + +func (rr *CSYNC) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += 4 + 2 + l += typeBitMapLen(rr.TypeBitMap) + return l +} + +// APL RR. See RFC 3123. +type APL struct { + Hdr RR_Header + Prefixes []APLPrefix `dns:"apl"` +} + +// APLPrefix is an address prefix hold by an APL record. +type APLPrefix struct { + Negation bool + Network net.IPNet +} + +// String returns presentation form of the APL record. +func (rr *APL) String() string { + var sb strings.Builder + sb.WriteString(rr.Hdr.String()) + for i, p := range rr.Prefixes { + if i > 0 { + sb.WriteByte(' ') + } + sb.WriteString(p.str()) + } + return sb.String() +} + +// str returns presentation form of the APL prefix. +func (p *APLPrefix) str() string { + var sb strings.Builder + if p.Negation { + sb.WriteByte('!') + } + + switch len(p.Network.IP) { + case net.IPv4len: + sb.WriteByte('1') + case net.IPv6len: + sb.WriteByte('2') + } + + sb.WriteByte(':') + + switch len(p.Network.IP) { + case net.IPv4len: + sb.WriteString(p.Network.IP.String()) + case net.IPv6len: + // add prefix for IPv4-mapped IPv6 + if v4 := p.Network.IP.To4(); v4 != nil { + sb.WriteString("::ffff:") + } + sb.WriteString(p.Network.IP.String()) + } + + sb.WriteByte('/') + + prefix, _ := p.Network.Mask.Size() + sb.WriteString(strconv.Itoa(prefix)) + + return sb.String() +} + +// equals reports whether two APL prefixes are identical. +func (a *APLPrefix) equals(b *APLPrefix) bool { + return a.Negation == b.Negation && + bytes.Equal(a.Network.IP, b.Network.IP) && + bytes.Equal(a.Network.Mask, b.Network.Mask) +} + +// copy returns a copy of the APL prefix. +func (p *APLPrefix) copy() APLPrefix { + return APLPrefix{ + Negation: p.Negation, + Network: copyNet(p.Network), + } +} + +// len returns size of the prefix in wire format. +func (p *APLPrefix) len() int { + // 4-byte header and the network address prefix (see Section 4 of RFC 3123) + prefix, _ := p.Network.Mask.Size() + return 4 + (prefix+7)/8 +} + +// TimeToString translates the RRSIG's incep. and expir. times to the +// string representation used when printing the record. +// It takes serial arithmetic (RFC 1982) into account. +func TimeToString(t uint32) string { + mod := (int64(t)-time.Now().Unix())/year68 - 1 + if mod < 0 { + mod = 0 + } + ti := time.Unix(int64(t)-mod*year68, 0).UTC() + return ti.Format("20060102150405") +} + +// StringToTime translates the RRSIG's incep. and expir. times from +// string values like "20110403154150" to an 32 bit integer. +// It takes serial arithmetic (RFC 1982) into account. +func StringToTime(s string) (uint32, error) { + t, err := time.Parse("20060102150405", s) + if err != nil { + return 0, err + } + mod := t.Unix()/year68 - 1 + if mod < 0 { + mod = 0 + } + return uint32(t.Unix() - mod*year68), nil +} + +// saltToString converts a NSECX salt to uppercase and returns "-" when it is empty. +func saltToString(s string) string { + if len(s) == 0 { + return "-" + } + return strings.ToUpper(s) +} + +func euiToString(eui uint64, bits int) (hex string) { + switch bits { + case 64: + hex = fmt.Sprintf("%16.16x", eui) + hex = hex[0:2] + "-" + hex[2:4] + "-" + hex[4:6] + "-" + hex[6:8] + + "-" + hex[8:10] + "-" + hex[10:12] + "-" + hex[12:14] + "-" + hex[14:16] + case 48: + hex = fmt.Sprintf("%12.12x", eui) + hex = hex[0:2] + "-" + hex[2:4] + "-" + hex[4:6] + "-" + hex[6:8] + + "-" + hex[8:10] + "-" + hex[10:12] + } + return +} + +// copyIP returns a copy of ip. +func copyIP(ip net.IP) net.IP { + p := make(net.IP, len(ip)) + copy(p, ip) + return p +} + +// copyNet returns a copy of a subnet. +func copyNet(n net.IPNet) net.IPNet { + m := make(net.IPMask, len(n.Mask)) + copy(m, n.Mask) + + return net.IPNet{ + IP: copyIP(n.IP), + Mask: m, + } +} + +// SplitN splits a string into N sized string chunks. +// This might become an exported function once. +func splitN(s string, n int) []string { + if len(s) < n { + return []string{s} + } + sx := []string{} + p, i := 0, n + for { + if i <= len(s) { + sx = append(sx, s[p:i]) + } else { + sx = append(sx, s[p:]) + break + + } + p, i = p+n, i+n + } + + return sx +} diff --git a/vendor/github.com/miekg/dns/udp.go b/vendor/github.com/miekg/dns/udp.go new file mode 100644 index 00000000..a4826ee2 --- /dev/null +++ b/vendor/github.com/miekg/dns/udp.go @@ -0,0 +1,102 @@ +// +build !windows + +package dns + +import ( + "net" + + "golang.org/x/net/ipv4" + "golang.org/x/net/ipv6" +) + +// This is the required size of the OOB buffer to pass to ReadMsgUDP. +var udpOOBSize = func() int { + // We can't know whether we'll get an IPv4 control message or an + // IPv6 control message ahead of time. To get around this, we size + // the buffer equal to the largest of the two. + + oob4 := ipv4.NewControlMessage(ipv4.FlagDst | ipv4.FlagInterface) + oob6 := ipv6.NewControlMessage(ipv6.FlagDst | ipv6.FlagInterface) + + if len(oob4) > len(oob6) { + return len(oob4) + } + + return len(oob6) +}() + +// SessionUDP holds the remote address and the associated +// out-of-band data. +type SessionUDP struct { + raddr *net.UDPAddr + context []byte +} + +// RemoteAddr returns the remote network address. +func (s *SessionUDP) RemoteAddr() net.Addr { return s.raddr } + +// ReadFromSessionUDP acts just like net.UDPConn.ReadFrom(), but returns a session object instead of a +// net.UDPAddr. +func ReadFromSessionUDP(conn *net.UDPConn, b []byte) (int, *SessionUDP, error) { + oob := make([]byte, udpOOBSize) + n, oobn, _, raddr, err := conn.ReadMsgUDP(b, oob) + if err != nil { + return n, nil, err + } + return n, &SessionUDP{raddr, oob[:oobn]}, err +} + +// WriteToSessionUDP acts just like net.UDPConn.WriteTo(), but uses a *SessionUDP instead of a net.Addr. +func WriteToSessionUDP(conn *net.UDPConn, b []byte, session *SessionUDP) (int, error) { + oob := correctSource(session.context) + n, _, err := conn.WriteMsgUDP(b, oob, session.raddr) + return n, err +} + +func setUDPSocketOptions(conn *net.UDPConn) error { + // Try setting the flags for both families and ignore the errors unless they + // both error. + err6 := ipv6.NewPacketConn(conn).SetControlMessage(ipv6.FlagDst|ipv6.FlagInterface, true) + err4 := ipv4.NewPacketConn(conn).SetControlMessage(ipv4.FlagDst|ipv4.FlagInterface, true) + if err6 != nil && err4 != nil { + return err4 + } + return nil +} + +// parseDstFromOOB takes oob data and returns the destination IP. +func parseDstFromOOB(oob []byte) net.IP { + // Start with IPv6 and then fallback to IPv4 + // TODO(fastest963): Figure out a way to prefer one or the other. Looking at + // the lvl of the header for a 0 or 41 isn't cross-platform. + cm6 := new(ipv6.ControlMessage) + if cm6.Parse(oob) == nil && cm6.Dst != nil { + return cm6.Dst + } + cm4 := new(ipv4.ControlMessage) + if cm4.Parse(oob) == nil && cm4.Dst != nil { + return cm4.Dst + } + return nil +} + +// correctSource takes oob data and returns new oob data with the Src equal to the Dst +func correctSource(oob []byte) []byte { + dst := parseDstFromOOB(oob) + if dst == nil { + return nil + } + // If the dst is definitely an IPv6, then use ipv6's ControlMessage to + // respond otherwise use ipv4's because ipv6's marshal ignores ipv4 + // addresses. + if dst.To4() == nil { + cm := new(ipv6.ControlMessage) + cm.Src = dst + oob = cm.Marshal() + } else { + cm := new(ipv4.ControlMessage) + cm.Src = dst + oob = cm.Marshal() + } + return oob +} diff --git a/vendor/github.com/miekg/dns/udp_windows.go b/vendor/github.com/miekg/dns/udp_windows.go new file mode 100644 index 00000000..e7dd8ca3 --- /dev/null +++ b/vendor/github.com/miekg/dns/udp_windows.go @@ -0,0 +1,35 @@ +// +build windows + +package dns + +import "net" + +// SessionUDP holds the remote address +type SessionUDP struct { + raddr *net.UDPAddr +} + +// RemoteAddr returns the remote network address. +func (s *SessionUDP) RemoteAddr() net.Addr { return s.raddr } + +// ReadFromSessionUDP acts just like net.UDPConn.ReadFrom(), but returns a session object instead of a +// net.UDPAddr. +// TODO(fastest963): Once go1.10 is released, use ReadMsgUDP. +func ReadFromSessionUDP(conn *net.UDPConn, b []byte) (int, *SessionUDP, error) { + n, raddr, err := conn.ReadFrom(b) + if err != nil { + return n, nil, err + } + return n, &SessionUDP{raddr.(*net.UDPAddr)}, err +} + +// WriteToSessionUDP acts just like net.UDPConn.WriteTo(), but uses a *SessionUDP instead of a net.Addr. +// TODO(fastest963): Once go1.10 is released, use WriteMsgUDP. +func WriteToSessionUDP(conn *net.UDPConn, b []byte, session *SessionUDP) (int, error) { + return conn.WriteTo(b, session.raddr) +} + +// TODO(fastest963): Once go1.10 is released and we can use *MsgUDP methods +// use the standard method in udp.go for these. +func setUDPSocketOptions(*net.UDPConn) error { return nil } +func parseDstFromOOB([]byte, net.IP) net.IP { return nil } diff --git a/vendor/github.com/miekg/dns/update.go b/vendor/github.com/miekg/dns/update.go new file mode 100644 index 00000000..69dd3865 --- /dev/null +++ b/vendor/github.com/miekg/dns/update.go @@ -0,0 +1,110 @@ +package dns + +// NameUsed sets the RRs in the prereq section to +// "Name is in use" RRs. RFC 2136 section 2.4.4. +func (u *Msg) NameUsed(rr []RR) { + if u.Answer == nil { + u.Answer = make([]RR, 0, len(rr)) + } + for _, r := range rr { + u.Answer = append(u.Answer, &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: TypeANY, Class: ClassANY}}) + } +} + +// NameNotUsed sets the RRs in the prereq section to +// "Name is in not use" RRs. RFC 2136 section 2.4.5. +func (u *Msg) NameNotUsed(rr []RR) { + if u.Answer == nil { + u.Answer = make([]RR, 0, len(rr)) + } + for _, r := range rr { + u.Answer = append(u.Answer, &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: TypeANY, Class: ClassNONE}}) + } +} + +// Used sets the RRs in the prereq section to +// "RRset exists (value dependent -- with rdata)" RRs. RFC 2136 section 2.4.2. +func (u *Msg) Used(rr []RR) { + if len(u.Question) == 0 { + panic("dns: empty question section") + } + if u.Answer == nil { + u.Answer = make([]RR, 0, len(rr)) + } + for _, r := range rr { + r.Header().Class = u.Question[0].Qclass + u.Answer = append(u.Answer, r) + } +} + +// RRsetUsed sets the RRs in the prereq section to +// "RRset exists (value independent -- no rdata)" RRs. RFC 2136 section 2.4.1. +func (u *Msg) RRsetUsed(rr []RR) { + if u.Answer == nil { + u.Answer = make([]RR, 0, len(rr)) + } + for _, r := range rr { + h := r.Header() + u.Answer = append(u.Answer, &ANY{Hdr: RR_Header{Name: h.Name, Ttl: 0, Rrtype: h.Rrtype, Class: ClassANY}}) + } +} + +// RRsetNotUsed sets the RRs in the prereq section to +// "RRset does not exist" RRs. RFC 2136 section 2.4.3. +func (u *Msg) RRsetNotUsed(rr []RR) { + if u.Answer == nil { + u.Answer = make([]RR, 0, len(rr)) + } + for _, r := range rr { + h := r.Header() + u.Answer = append(u.Answer, &ANY{Hdr: RR_Header{Name: h.Name, Ttl: 0, Rrtype: h.Rrtype, Class: ClassNONE}}) + } +} + +// Insert creates a dynamic update packet that adds an complete RRset, see RFC 2136 section 2.5.1. +func (u *Msg) Insert(rr []RR) { + if len(u.Question) == 0 { + panic("dns: empty question section") + } + if u.Ns == nil { + u.Ns = make([]RR, 0, len(rr)) + } + for _, r := range rr { + r.Header().Class = u.Question[0].Qclass + u.Ns = append(u.Ns, r) + } +} + +// RemoveRRset creates a dynamic update packet that deletes an RRset, see RFC 2136 section 2.5.2. +func (u *Msg) RemoveRRset(rr []RR) { + if u.Ns == nil { + u.Ns = make([]RR, 0, len(rr)) + } + for _, r := range rr { + h := r.Header() + u.Ns = append(u.Ns, &ANY{Hdr: RR_Header{Name: h.Name, Ttl: 0, Rrtype: h.Rrtype, Class: ClassANY}}) + } +} + +// RemoveName creates a dynamic update packet that deletes all RRsets of a name, see RFC 2136 section 2.5.3 +func (u *Msg) RemoveName(rr []RR) { + if u.Ns == nil { + u.Ns = make([]RR, 0, len(rr)) + } + for _, r := range rr { + u.Ns = append(u.Ns, &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: TypeANY, Class: ClassANY}}) + } +} + +// Remove creates a dynamic update packet deletes RR from a RRSset, see RFC 2136 section 2.5.4 +func (u *Msg) Remove(rr []RR) { + if u.Ns == nil { + u.Ns = make([]RR, 0, len(rr)) + } + for _, r := range rr { + h := r.Header() + h.Class = ClassNONE + h.Ttl = 0 + u.Ns = append(u.Ns, r) + } +} diff --git a/vendor/github.com/miekg/dns/version.go b/vendor/github.com/miekg/dns/version.go new file mode 100644 index 00000000..cab46b4f --- /dev/null +++ b/vendor/github.com/miekg/dns/version.go @@ -0,0 +1,15 @@ +package dns + +import "fmt" + +// Version is current version of this library. +var Version = V{1, 1, 27} + +// V holds the version of this library. +type V struct { + Major, Minor, Patch int +} + +func (v V) String() string { + return fmt.Sprintf("%d.%d.%d", v.Major, v.Minor, v.Patch) +} diff --git a/vendor/github.com/miekg/dns/xfr.go b/vendor/github.com/miekg/dns/xfr.go new file mode 100644 index 00000000..43970e64 --- /dev/null +++ b/vendor/github.com/miekg/dns/xfr.go @@ -0,0 +1,266 @@ +package dns + +import ( + "fmt" + "time" +) + +// Envelope is used when doing a zone transfer with a remote server. +type Envelope struct { + RR []RR // The set of RRs in the answer section of the xfr reply message. + Error error // If something went wrong, this contains the error. +} + +// A Transfer defines parameters that are used during a zone transfer. +type Transfer struct { + *Conn + DialTimeout time.Duration // net.DialTimeout, defaults to 2 seconds + ReadTimeout time.Duration // net.Conn.SetReadTimeout value for connections, defaults to 2 seconds + WriteTimeout time.Duration // net.Conn.SetWriteTimeout value for connections, defaults to 2 seconds + TsigSecret map[string]string // Secret(s) for Tsig map[], zonename must be in canonical form (lowercase, fqdn, see RFC 4034 Section 6.2) + tsigTimersOnly bool +} + +// Think we need to away to stop the transfer + +// In performs an incoming transfer with the server in a. +// If you would like to set the source IP, or some other attribute +// of a Dialer for a Transfer, you can do so by specifying the attributes +// in the Transfer.Conn: +// +// d := net.Dialer{LocalAddr: transfer_source} +// con, err := d.Dial("tcp", master) +// dnscon := &dns.Conn{Conn:con} +// transfer = &dns.Transfer{Conn: dnscon} +// channel, err := transfer.In(message, master) +// +func (t *Transfer) In(q *Msg, a string) (env chan *Envelope, err error) { + switch q.Question[0].Qtype { + case TypeAXFR, TypeIXFR: + default: + return nil, &Error{"unsupported question type"} + } + + timeout := dnsTimeout + if t.DialTimeout != 0 { + timeout = t.DialTimeout + } + + if t.Conn == nil { + t.Conn, err = DialTimeout("tcp", a, timeout) + if err != nil { + return nil, err + } + } + + if err := t.WriteMsg(q); err != nil { + return nil, err + } + + env = make(chan *Envelope) + switch q.Question[0].Qtype { + case TypeAXFR: + go t.inAxfr(q, env) + case TypeIXFR: + go t.inIxfr(q, env) + } + + return env, nil +} + +func (t *Transfer) inAxfr(q *Msg, c chan *Envelope) { + first := true + defer t.Close() + defer close(c) + timeout := dnsTimeout + if t.ReadTimeout != 0 { + timeout = t.ReadTimeout + } + for { + t.Conn.SetReadDeadline(time.Now().Add(timeout)) + in, err := t.ReadMsg() + if err != nil { + c <- &Envelope{nil, err} + return + } + if q.Id != in.Id { + c <- &Envelope{in.Answer, ErrId} + return + } + if first { + if in.Rcode != RcodeSuccess { + c <- &Envelope{in.Answer, &Error{err: fmt.Sprintf(errXFR, in.Rcode)}} + return + } + if !isSOAFirst(in) { + c <- &Envelope{in.Answer, ErrSoa} + return + } + first = !first + // only one answer that is SOA, receive more + if len(in.Answer) == 1 { + t.tsigTimersOnly = true + c <- &Envelope{in.Answer, nil} + continue + } + } + + if !first { + t.tsigTimersOnly = true // Subsequent envelopes use this. + if isSOALast(in) { + c <- &Envelope{in.Answer, nil} + return + } + c <- &Envelope{in.Answer, nil} + } + } +} + +func (t *Transfer) inIxfr(q *Msg, c chan *Envelope) { + var serial uint32 // The first serial seen is the current server serial + axfr := true + n := 0 + qser := q.Ns[0].(*SOA).Serial + defer t.Close() + defer close(c) + timeout := dnsTimeout + if t.ReadTimeout != 0 { + timeout = t.ReadTimeout + } + for { + t.SetReadDeadline(time.Now().Add(timeout)) + in, err := t.ReadMsg() + if err != nil { + c <- &Envelope{nil, err} + return + } + if q.Id != in.Id { + c <- &Envelope{in.Answer, ErrId} + return + } + if in.Rcode != RcodeSuccess { + c <- &Envelope{in.Answer, &Error{err: fmt.Sprintf(errXFR, in.Rcode)}} + return + } + if n == 0 { + // Check if the returned answer is ok + if !isSOAFirst(in) { + c <- &Envelope{in.Answer, ErrSoa} + return + } + // This serial is important + serial = in.Answer[0].(*SOA).Serial + // Check if there are no changes in zone + if qser >= serial { + c <- &Envelope{in.Answer, nil} + return + } + } + // Now we need to check each message for SOA records, to see what we need to do + t.tsigTimersOnly = true + for _, rr := range in.Answer { + if v, ok := rr.(*SOA); ok { + if v.Serial == serial { + n++ + // quit if it's a full axfr or the the servers' SOA is repeated the third time + if axfr && n == 2 || n == 3 { + c <- &Envelope{in.Answer, nil} + return + } + } else if axfr { + // it's an ixfr + axfr = false + } + } + } + c <- &Envelope{in.Answer, nil} + } +} + +// Out performs an outgoing transfer with the client connecting in w. +// Basic use pattern: +// +// ch := make(chan *dns.Envelope) +// tr := new(dns.Transfer) +// var wg sync.WaitGroup +// go func() { +// tr.Out(w, r, ch) +// wg.Done() +// }() +// ch <- &dns.Envelope{RR: []dns.RR{soa, rr1, rr2, rr3, soa}} +// close(ch) +// wg.Wait() // wait until everything is written out +// w.Close() // close connection +// +// The server is responsible for sending the correct sequence of RRs through the channel ch. +func (t *Transfer) Out(w ResponseWriter, q *Msg, ch chan *Envelope) error { + for x := range ch { + r := new(Msg) + // Compress? + r.SetReply(q) + r.Authoritative = true + // assume it fits TODO(miek): fix + r.Answer = append(r.Answer, x.RR...) + if tsig := q.IsTsig(); tsig != nil && w.TsigStatus() == nil { + r.SetTsig(tsig.Hdr.Name, tsig.Algorithm, tsig.Fudge, time.Now().Unix()) + } + if err := w.WriteMsg(r); err != nil { + return err + } + w.TsigTimersOnly(true) + } + return nil +} + +// ReadMsg reads a message from the transfer connection t. +func (t *Transfer) ReadMsg() (*Msg, error) { + m := new(Msg) + p := make([]byte, MaxMsgSize) + n, err := t.Read(p) + if err != nil && n == 0 { + return nil, err + } + p = p[:n] + if err := m.Unpack(p); err != nil { + return nil, err + } + if ts := m.IsTsig(); ts != nil && t.TsigSecret != nil { + if _, ok := t.TsigSecret[ts.Hdr.Name]; !ok { + return m, ErrSecret + } + // Need to work on the original message p, as that was used to calculate the tsig. + err = TsigVerify(p, t.TsigSecret[ts.Hdr.Name], t.tsigRequestMAC, t.tsigTimersOnly) + t.tsigRequestMAC = ts.MAC + } + return m, err +} + +// WriteMsg writes a message through the transfer connection t. +func (t *Transfer) WriteMsg(m *Msg) (err error) { + var out []byte + if ts := m.IsTsig(); ts != nil && t.TsigSecret != nil { + if _, ok := t.TsigSecret[ts.Hdr.Name]; !ok { + return ErrSecret + } + out, t.tsigRequestMAC, err = TsigGenerate(m, t.TsigSecret[ts.Hdr.Name], t.tsigRequestMAC, t.tsigTimersOnly) + } else { + out, err = m.Pack() + } + if err != nil { + return err + } + _, err = t.Write(out) + return err +} + +func isSOAFirst(in *Msg) bool { + return len(in.Answer) > 0 && + in.Answer[0].Header().Rrtype == TypeSOA +} + +func isSOALast(in *Msg) bool { + return len(in.Answer) > 0 && + in.Answer[len(in.Answer)-1].Header().Rrtype == TypeSOA +} + +const errXFR = "bad xfr rcode: %d" diff --git a/vendor/github.com/miekg/dns/zduplicate.go b/vendor/github.com/miekg/dns/zduplicate.go new file mode 100644 index 00000000..a58a8c0c --- /dev/null +++ b/vendor/github.com/miekg/dns/zduplicate.go @@ -0,0 +1,1157 @@ +// Code generated by "go run duplicate_generate.go"; DO NOT EDIT. + +package dns + +// isDuplicate() functions + +func (r1 *A) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*A) + if !ok { + return false + } + _ = r2 + if !r1.A.Equal(r2.A) { + return false + } + return true +} + +func (r1 *AAAA) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*AAAA) + if !ok { + return false + } + _ = r2 + if !r1.AAAA.Equal(r2.AAAA) { + return false + } + return true +} + +func (r1 *AFSDB) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*AFSDB) + if !ok { + return false + } + _ = r2 + if r1.Subtype != r2.Subtype { + return false + } + if !isDuplicateName(r1.Hostname, r2.Hostname) { + return false + } + return true +} + +func (r1 *ANY) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*ANY) + if !ok { + return false + } + _ = r2 + return true +} + +func (r1 *APL) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*APL) + if !ok { + return false + } + _ = r2 + if len(r1.Prefixes) != len(r2.Prefixes) { + return false + } + for i := 0; i < len(r1.Prefixes); i++ { + if !r1.Prefixes[i].equals(&r2.Prefixes[i]) { + return false + } + } + return true +} + +func (r1 *AVC) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*AVC) + if !ok { + return false + } + _ = r2 + if len(r1.Txt) != len(r2.Txt) { + return false + } + for i := 0; i < len(r1.Txt); i++ { + if r1.Txt[i] != r2.Txt[i] { + return false + } + } + return true +} + +func (r1 *CAA) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*CAA) + if !ok { + return false + } + _ = r2 + if r1.Flag != r2.Flag { + return false + } + if r1.Tag != r2.Tag { + return false + } + if r1.Value != r2.Value { + return false + } + return true +} + +func (r1 *CERT) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*CERT) + if !ok { + return false + } + _ = r2 + if r1.Type != r2.Type { + return false + } + if r1.KeyTag != r2.KeyTag { + return false + } + if r1.Algorithm != r2.Algorithm { + return false + } + if r1.Certificate != r2.Certificate { + return false + } + return true +} + +func (r1 *CNAME) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*CNAME) + if !ok { + return false + } + _ = r2 + if !isDuplicateName(r1.Target, r2.Target) { + return false + } + return true +} + +func (r1 *CSYNC) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*CSYNC) + if !ok { + return false + } + _ = r2 + if r1.Serial != r2.Serial { + return false + } + if r1.Flags != r2.Flags { + return false + } + if len(r1.TypeBitMap) != len(r2.TypeBitMap) { + return false + } + for i := 0; i < len(r1.TypeBitMap); i++ { + if r1.TypeBitMap[i] != r2.TypeBitMap[i] { + return false + } + } + return true +} + +func (r1 *DHCID) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*DHCID) + if !ok { + return false + } + _ = r2 + if r1.Digest != r2.Digest { + return false + } + return true +} + +func (r1 *DNAME) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*DNAME) + if !ok { + return false + } + _ = r2 + if !isDuplicateName(r1.Target, r2.Target) { + return false + } + return true +} + +func (r1 *DNSKEY) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*DNSKEY) + if !ok { + return false + } + _ = r2 + if r1.Flags != r2.Flags { + return false + } + if r1.Protocol != r2.Protocol { + return false + } + if r1.Algorithm != r2.Algorithm { + return false + } + if r1.PublicKey != r2.PublicKey { + return false + } + return true +} + +func (r1 *DS) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*DS) + if !ok { + return false + } + _ = r2 + if r1.KeyTag != r2.KeyTag { + return false + } + if r1.Algorithm != r2.Algorithm { + return false + } + if r1.DigestType != r2.DigestType { + return false + } + if r1.Digest != r2.Digest { + return false + } + return true +} + +func (r1 *EID) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*EID) + if !ok { + return false + } + _ = r2 + if r1.Endpoint != r2.Endpoint { + return false + } + return true +} + +func (r1 *EUI48) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*EUI48) + if !ok { + return false + } + _ = r2 + if r1.Address != r2.Address { + return false + } + return true +} + +func (r1 *EUI64) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*EUI64) + if !ok { + return false + } + _ = r2 + if r1.Address != r2.Address { + return false + } + return true +} + +func (r1 *GID) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*GID) + if !ok { + return false + } + _ = r2 + if r1.Gid != r2.Gid { + return false + } + return true +} + +func (r1 *GPOS) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*GPOS) + if !ok { + return false + } + _ = r2 + if r1.Longitude != r2.Longitude { + return false + } + if r1.Latitude != r2.Latitude { + return false + } + if r1.Altitude != r2.Altitude { + return false + } + return true +} + +func (r1 *HINFO) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*HINFO) + if !ok { + return false + } + _ = r2 + if r1.Cpu != r2.Cpu { + return false + } + if r1.Os != r2.Os { + return false + } + return true +} + +func (r1 *HIP) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*HIP) + if !ok { + return false + } + _ = r2 + if r1.HitLength != r2.HitLength { + return false + } + if r1.PublicKeyAlgorithm != r2.PublicKeyAlgorithm { + return false + } + if r1.PublicKeyLength != r2.PublicKeyLength { + return false + } + if r1.Hit != r2.Hit { + return false + } + if r1.PublicKey != r2.PublicKey { + return false + } + if len(r1.RendezvousServers) != len(r2.RendezvousServers) { + return false + } + for i := 0; i < len(r1.RendezvousServers); i++ { + if !isDuplicateName(r1.RendezvousServers[i], r2.RendezvousServers[i]) { + return false + } + } + return true +} + +func (r1 *KX) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*KX) + if !ok { + return false + } + _ = r2 + if r1.Preference != r2.Preference { + return false + } + if !isDuplicateName(r1.Exchanger, r2.Exchanger) { + return false + } + return true +} + +func (r1 *L32) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*L32) + if !ok { + return false + } + _ = r2 + if r1.Preference != r2.Preference { + return false + } + if !r1.Locator32.Equal(r2.Locator32) { + return false + } + return true +} + +func (r1 *L64) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*L64) + if !ok { + return false + } + _ = r2 + if r1.Preference != r2.Preference { + return false + } + if r1.Locator64 != r2.Locator64 { + return false + } + return true +} + +func (r1 *LOC) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*LOC) + if !ok { + return false + } + _ = r2 + if r1.Version != r2.Version { + return false + } + if r1.Size != r2.Size { + return false + } + if r1.HorizPre != r2.HorizPre { + return false + } + if r1.VertPre != r2.VertPre { + return false + } + if r1.Latitude != r2.Latitude { + return false + } + if r1.Longitude != r2.Longitude { + return false + } + if r1.Altitude != r2.Altitude { + return false + } + return true +} + +func (r1 *LP) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*LP) + if !ok { + return false + } + _ = r2 + if r1.Preference != r2.Preference { + return false + } + if !isDuplicateName(r1.Fqdn, r2.Fqdn) { + return false + } + return true +} + +func (r1 *MB) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*MB) + if !ok { + return false + } + _ = r2 + if !isDuplicateName(r1.Mb, r2.Mb) { + return false + } + return true +} + +func (r1 *MD) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*MD) + if !ok { + return false + } + _ = r2 + if !isDuplicateName(r1.Md, r2.Md) { + return false + } + return true +} + +func (r1 *MF) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*MF) + if !ok { + return false + } + _ = r2 + if !isDuplicateName(r1.Mf, r2.Mf) { + return false + } + return true +} + +func (r1 *MG) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*MG) + if !ok { + return false + } + _ = r2 + if !isDuplicateName(r1.Mg, r2.Mg) { + return false + } + return true +} + +func (r1 *MINFO) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*MINFO) + if !ok { + return false + } + _ = r2 + if !isDuplicateName(r1.Rmail, r2.Rmail) { + return false + } + if !isDuplicateName(r1.Email, r2.Email) { + return false + } + return true +} + +func (r1 *MR) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*MR) + if !ok { + return false + } + _ = r2 + if !isDuplicateName(r1.Mr, r2.Mr) { + return false + } + return true +} + +func (r1 *MX) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*MX) + if !ok { + return false + } + _ = r2 + if r1.Preference != r2.Preference { + return false + } + if !isDuplicateName(r1.Mx, r2.Mx) { + return false + } + return true +} + +func (r1 *NAPTR) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*NAPTR) + if !ok { + return false + } + _ = r2 + if r1.Order != r2.Order { + return false + } + if r1.Preference != r2.Preference { + return false + } + if r1.Flags != r2.Flags { + return false + } + if r1.Service != r2.Service { + return false + } + if r1.Regexp != r2.Regexp { + return false + } + if !isDuplicateName(r1.Replacement, r2.Replacement) { + return false + } + return true +} + +func (r1 *NID) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*NID) + if !ok { + return false + } + _ = r2 + if r1.Preference != r2.Preference { + return false + } + if r1.NodeID != r2.NodeID { + return false + } + return true +} + +func (r1 *NIMLOC) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*NIMLOC) + if !ok { + return false + } + _ = r2 + if r1.Locator != r2.Locator { + return false + } + return true +} + +func (r1 *NINFO) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*NINFO) + if !ok { + return false + } + _ = r2 + if len(r1.ZSData) != len(r2.ZSData) { + return false + } + for i := 0; i < len(r1.ZSData); i++ { + if r1.ZSData[i] != r2.ZSData[i] { + return false + } + } + return true +} + +func (r1 *NS) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*NS) + if !ok { + return false + } + _ = r2 + if !isDuplicateName(r1.Ns, r2.Ns) { + return false + } + return true +} + +func (r1 *NSAPPTR) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*NSAPPTR) + if !ok { + return false + } + _ = r2 + if !isDuplicateName(r1.Ptr, r2.Ptr) { + return false + } + return true +} + +func (r1 *NSEC) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*NSEC) + if !ok { + return false + } + _ = r2 + if !isDuplicateName(r1.NextDomain, r2.NextDomain) { + return false + } + if len(r1.TypeBitMap) != len(r2.TypeBitMap) { + return false + } + for i := 0; i < len(r1.TypeBitMap); i++ { + if r1.TypeBitMap[i] != r2.TypeBitMap[i] { + return false + } + } + return true +} + +func (r1 *NSEC3) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*NSEC3) + if !ok { + return false + } + _ = r2 + if r1.Hash != r2.Hash { + return false + } + if r1.Flags != r2.Flags { + return false + } + if r1.Iterations != r2.Iterations { + return false + } + if r1.SaltLength != r2.SaltLength { + return false + } + if r1.Salt != r2.Salt { + return false + } + if r1.HashLength != r2.HashLength { + return false + } + if r1.NextDomain != r2.NextDomain { + return false + } + if len(r1.TypeBitMap) != len(r2.TypeBitMap) { + return false + } + for i := 0; i < len(r1.TypeBitMap); i++ { + if r1.TypeBitMap[i] != r2.TypeBitMap[i] { + return false + } + } + return true +} + +func (r1 *NSEC3PARAM) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*NSEC3PARAM) + if !ok { + return false + } + _ = r2 + if r1.Hash != r2.Hash { + return false + } + if r1.Flags != r2.Flags { + return false + } + if r1.Iterations != r2.Iterations { + return false + } + if r1.SaltLength != r2.SaltLength { + return false + } + if r1.Salt != r2.Salt { + return false + } + return true +} + +func (r1 *NULL) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*NULL) + if !ok { + return false + } + _ = r2 + if r1.Data != r2.Data { + return false + } + return true +} + +func (r1 *OPENPGPKEY) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*OPENPGPKEY) + if !ok { + return false + } + _ = r2 + if r1.PublicKey != r2.PublicKey { + return false + } + return true +} + +func (r1 *PTR) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*PTR) + if !ok { + return false + } + _ = r2 + if !isDuplicateName(r1.Ptr, r2.Ptr) { + return false + } + return true +} + +func (r1 *PX) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*PX) + if !ok { + return false + } + _ = r2 + if r1.Preference != r2.Preference { + return false + } + if !isDuplicateName(r1.Map822, r2.Map822) { + return false + } + if !isDuplicateName(r1.Mapx400, r2.Mapx400) { + return false + } + return true +} + +func (r1 *RFC3597) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*RFC3597) + if !ok { + return false + } + _ = r2 + if r1.Rdata != r2.Rdata { + return false + } + return true +} + +func (r1 *RKEY) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*RKEY) + if !ok { + return false + } + _ = r2 + if r1.Flags != r2.Flags { + return false + } + if r1.Protocol != r2.Protocol { + return false + } + if r1.Algorithm != r2.Algorithm { + return false + } + if r1.PublicKey != r2.PublicKey { + return false + } + return true +} + +func (r1 *RP) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*RP) + if !ok { + return false + } + _ = r2 + if !isDuplicateName(r1.Mbox, r2.Mbox) { + return false + } + if !isDuplicateName(r1.Txt, r2.Txt) { + return false + } + return true +} + +func (r1 *RRSIG) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*RRSIG) + if !ok { + return false + } + _ = r2 + if r1.TypeCovered != r2.TypeCovered { + return false + } + if r1.Algorithm != r2.Algorithm { + return false + } + if r1.Labels != r2.Labels { + return false + } + if r1.OrigTtl != r2.OrigTtl { + return false + } + if r1.Expiration != r2.Expiration { + return false + } + if r1.Inception != r2.Inception { + return false + } + if r1.KeyTag != r2.KeyTag { + return false + } + if !isDuplicateName(r1.SignerName, r2.SignerName) { + return false + } + if r1.Signature != r2.Signature { + return false + } + return true +} + +func (r1 *RT) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*RT) + if !ok { + return false + } + _ = r2 + if r1.Preference != r2.Preference { + return false + } + if !isDuplicateName(r1.Host, r2.Host) { + return false + } + return true +} + +func (r1 *SMIMEA) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*SMIMEA) + if !ok { + return false + } + _ = r2 + if r1.Usage != r2.Usage { + return false + } + if r1.Selector != r2.Selector { + return false + } + if r1.MatchingType != r2.MatchingType { + return false + } + if r1.Certificate != r2.Certificate { + return false + } + return true +} + +func (r1 *SOA) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*SOA) + if !ok { + return false + } + _ = r2 + if !isDuplicateName(r1.Ns, r2.Ns) { + return false + } + if !isDuplicateName(r1.Mbox, r2.Mbox) { + return false + } + if r1.Serial != r2.Serial { + return false + } + if r1.Refresh != r2.Refresh { + return false + } + if r1.Retry != r2.Retry { + return false + } + if r1.Expire != r2.Expire { + return false + } + if r1.Minttl != r2.Minttl { + return false + } + return true +} + +func (r1 *SPF) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*SPF) + if !ok { + return false + } + _ = r2 + if len(r1.Txt) != len(r2.Txt) { + return false + } + for i := 0; i < len(r1.Txt); i++ { + if r1.Txt[i] != r2.Txt[i] { + return false + } + } + return true +} + +func (r1 *SRV) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*SRV) + if !ok { + return false + } + _ = r2 + if r1.Priority != r2.Priority { + return false + } + if r1.Weight != r2.Weight { + return false + } + if r1.Port != r2.Port { + return false + } + if !isDuplicateName(r1.Target, r2.Target) { + return false + } + return true +} + +func (r1 *SSHFP) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*SSHFP) + if !ok { + return false + } + _ = r2 + if r1.Algorithm != r2.Algorithm { + return false + } + if r1.Type != r2.Type { + return false + } + if r1.FingerPrint != r2.FingerPrint { + return false + } + return true +} + +func (r1 *TA) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*TA) + if !ok { + return false + } + _ = r2 + if r1.KeyTag != r2.KeyTag { + return false + } + if r1.Algorithm != r2.Algorithm { + return false + } + if r1.DigestType != r2.DigestType { + return false + } + if r1.Digest != r2.Digest { + return false + } + return true +} + +func (r1 *TALINK) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*TALINK) + if !ok { + return false + } + _ = r2 + if !isDuplicateName(r1.PreviousName, r2.PreviousName) { + return false + } + if !isDuplicateName(r1.NextName, r2.NextName) { + return false + } + return true +} + +func (r1 *TKEY) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*TKEY) + if !ok { + return false + } + _ = r2 + if !isDuplicateName(r1.Algorithm, r2.Algorithm) { + return false + } + if r1.Inception != r2.Inception { + return false + } + if r1.Expiration != r2.Expiration { + return false + } + if r1.Mode != r2.Mode { + return false + } + if r1.Error != r2.Error { + return false + } + if r1.KeySize != r2.KeySize { + return false + } + if r1.Key != r2.Key { + return false + } + if r1.OtherLen != r2.OtherLen { + return false + } + if r1.OtherData != r2.OtherData { + return false + } + return true +} + +func (r1 *TLSA) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*TLSA) + if !ok { + return false + } + _ = r2 + if r1.Usage != r2.Usage { + return false + } + if r1.Selector != r2.Selector { + return false + } + if r1.MatchingType != r2.MatchingType { + return false + } + if r1.Certificate != r2.Certificate { + return false + } + return true +} + +func (r1 *TSIG) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*TSIG) + if !ok { + return false + } + _ = r2 + if !isDuplicateName(r1.Algorithm, r2.Algorithm) { + return false + } + if r1.TimeSigned != r2.TimeSigned { + return false + } + if r1.Fudge != r2.Fudge { + return false + } + if r1.MACSize != r2.MACSize { + return false + } + if r1.MAC != r2.MAC { + return false + } + if r1.OrigId != r2.OrigId { + return false + } + if r1.Error != r2.Error { + return false + } + if r1.OtherLen != r2.OtherLen { + return false + } + if r1.OtherData != r2.OtherData { + return false + } + return true +} + +func (r1 *TXT) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*TXT) + if !ok { + return false + } + _ = r2 + if len(r1.Txt) != len(r2.Txt) { + return false + } + for i := 0; i < len(r1.Txt); i++ { + if r1.Txt[i] != r2.Txt[i] { + return false + } + } + return true +} + +func (r1 *UID) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*UID) + if !ok { + return false + } + _ = r2 + if r1.Uid != r2.Uid { + return false + } + return true +} + +func (r1 *UINFO) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*UINFO) + if !ok { + return false + } + _ = r2 + if r1.Uinfo != r2.Uinfo { + return false + } + return true +} + +func (r1 *URI) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*URI) + if !ok { + return false + } + _ = r2 + if r1.Priority != r2.Priority { + return false + } + if r1.Weight != r2.Weight { + return false + } + if r1.Target != r2.Target { + return false + } + return true +} + +func (r1 *X25) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*X25) + if !ok { + return false + } + _ = r2 + if r1.PSDNAddress != r2.PSDNAddress { + return false + } + return true +} diff --git a/vendor/github.com/miekg/dns/zmsg.go b/vendor/github.com/miekg/dns/zmsg.go new file mode 100644 index 00000000..02a5dfa4 --- /dev/null +++ b/vendor/github.com/miekg/dns/zmsg.go @@ -0,0 +1,2741 @@ +// Code generated by "go run msg_generate.go"; DO NOT EDIT. + +package dns + +// pack*() functions + +func (rr *A) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packDataA(rr.A, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *AAAA) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packDataAAAA(rr.AAAA, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *AFSDB) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packUint16(rr.Subtype, msg, off) + if err != nil { + return off, err + } + off, err = packDomainName(rr.Hostname, msg, off, compression, false) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *ANY) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + return off, nil +} + +func (rr *APL) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packDataApl(rr.Prefixes, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *AVC) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packStringTxt(rr.Txt, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *CAA) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packUint8(rr.Flag, msg, off) + if err != nil { + return off, err + } + off, err = packString(rr.Tag, msg, off) + if err != nil { + return off, err + } + off, err = packStringOctet(rr.Value, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *CDNSKEY) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packUint16(rr.Flags, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Protocol, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Algorithm, msg, off) + if err != nil { + return off, err + } + off, err = packStringBase64(rr.PublicKey, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *CDS) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packUint16(rr.KeyTag, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Algorithm, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.DigestType, msg, off) + if err != nil { + return off, err + } + off, err = packStringHex(rr.Digest, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *CERT) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packUint16(rr.Type, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.KeyTag, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Algorithm, msg, off) + if err != nil { + return off, err + } + off, err = packStringBase64(rr.Certificate, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *CNAME) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packDomainName(rr.Target, msg, off, compression, compress) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *CSYNC) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packUint32(rr.Serial, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.Flags, msg, off) + if err != nil { + return off, err + } + off, err = packDataNsec(rr.TypeBitMap, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *DHCID) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packStringBase64(rr.Digest, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *DLV) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packUint16(rr.KeyTag, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Algorithm, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.DigestType, msg, off) + if err != nil { + return off, err + } + off, err = packStringHex(rr.Digest, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *DNAME) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packDomainName(rr.Target, msg, off, compression, false) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *DNSKEY) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packUint16(rr.Flags, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Protocol, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Algorithm, msg, off) + if err != nil { + return off, err + } + off, err = packStringBase64(rr.PublicKey, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *DS) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packUint16(rr.KeyTag, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Algorithm, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.DigestType, msg, off) + if err != nil { + return off, err + } + off, err = packStringHex(rr.Digest, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *EID) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packStringHex(rr.Endpoint, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *EUI48) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packUint48(rr.Address, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *EUI64) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packUint64(rr.Address, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *GID) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packUint32(rr.Gid, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *GPOS) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packString(rr.Longitude, msg, off) + if err != nil { + return off, err + } + off, err = packString(rr.Latitude, msg, off) + if err != nil { + return off, err + } + off, err = packString(rr.Altitude, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *HINFO) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packString(rr.Cpu, msg, off) + if err != nil { + return off, err + } + off, err = packString(rr.Os, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *HIP) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packUint8(rr.HitLength, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.PublicKeyAlgorithm, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.PublicKeyLength, msg, off) + if err != nil { + return off, err + } + off, err = packStringHex(rr.Hit, msg, off) + if err != nil { + return off, err + } + off, err = packStringBase64(rr.PublicKey, msg, off) + if err != nil { + return off, err + } + off, err = packDataDomainNames(rr.RendezvousServers, msg, off, compression, false) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *KEY) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packUint16(rr.Flags, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Protocol, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Algorithm, msg, off) + if err != nil { + return off, err + } + off, err = packStringBase64(rr.PublicKey, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *KX) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packUint16(rr.Preference, msg, off) + if err != nil { + return off, err + } + off, err = packDomainName(rr.Exchanger, msg, off, compression, false) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *L32) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packUint16(rr.Preference, msg, off) + if err != nil { + return off, err + } + off, err = packDataA(rr.Locator32, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *L64) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packUint16(rr.Preference, msg, off) + if err != nil { + return off, err + } + off, err = packUint64(rr.Locator64, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *LOC) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packUint8(rr.Version, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Size, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.HorizPre, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.VertPre, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(rr.Latitude, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(rr.Longitude, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(rr.Altitude, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *LP) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packUint16(rr.Preference, msg, off) + if err != nil { + return off, err + } + off, err = packDomainName(rr.Fqdn, msg, off, compression, false) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *MB) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packDomainName(rr.Mb, msg, off, compression, compress) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *MD) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packDomainName(rr.Md, msg, off, compression, compress) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *MF) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packDomainName(rr.Mf, msg, off, compression, compress) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *MG) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packDomainName(rr.Mg, msg, off, compression, compress) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *MINFO) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packDomainName(rr.Rmail, msg, off, compression, compress) + if err != nil { + return off, err + } + off, err = packDomainName(rr.Email, msg, off, compression, compress) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *MR) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packDomainName(rr.Mr, msg, off, compression, compress) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *MX) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packUint16(rr.Preference, msg, off) + if err != nil { + return off, err + } + off, err = packDomainName(rr.Mx, msg, off, compression, compress) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *NAPTR) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packUint16(rr.Order, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.Preference, msg, off) + if err != nil { + return off, err + } + off, err = packString(rr.Flags, msg, off) + if err != nil { + return off, err + } + off, err = packString(rr.Service, msg, off) + if err != nil { + return off, err + } + off, err = packString(rr.Regexp, msg, off) + if err != nil { + return off, err + } + off, err = packDomainName(rr.Replacement, msg, off, compression, false) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *NID) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packUint16(rr.Preference, msg, off) + if err != nil { + return off, err + } + off, err = packUint64(rr.NodeID, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *NIMLOC) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packStringHex(rr.Locator, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *NINFO) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packStringTxt(rr.ZSData, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *NS) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packDomainName(rr.Ns, msg, off, compression, compress) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *NSAPPTR) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packDomainName(rr.Ptr, msg, off, compression, false) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *NSEC) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packDomainName(rr.NextDomain, msg, off, compression, false) + if err != nil { + return off, err + } + off, err = packDataNsec(rr.TypeBitMap, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *NSEC3) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packUint8(rr.Hash, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Flags, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.Iterations, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.SaltLength, msg, off) + if err != nil { + return off, err + } + // Only pack salt if value is not "-", i.e. empty + if rr.Salt != "-" { + off, err = packStringHex(rr.Salt, msg, off) + if err != nil { + return off, err + } + } + off, err = packUint8(rr.HashLength, msg, off) + if err != nil { + return off, err + } + off, err = packStringBase32(rr.NextDomain, msg, off) + if err != nil { + return off, err + } + off, err = packDataNsec(rr.TypeBitMap, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *NSEC3PARAM) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packUint8(rr.Hash, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Flags, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.Iterations, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.SaltLength, msg, off) + if err != nil { + return off, err + } + // Only pack salt if value is not "-", i.e. empty + if rr.Salt != "-" { + off, err = packStringHex(rr.Salt, msg, off) + if err != nil { + return off, err + } + } + return off, nil +} + +func (rr *NULL) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packStringAny(rr.Data, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *OPENPGPKEY) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packStringBase64(rr.PublicKey, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *OPT) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packDataOpt(rr.Option, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *PTR) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packDomainName(rr.Ptr, msg, off, compression, compress) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *PX) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packUint16(rr.Preference, msg, off) + if err != nil { + return off, err + } + off, err = packDomainName(rr.Map822, msg, off, compression, false) + if err != nil { + return off, err + } + off, err = packDomainName(rr.Mapx400, msg, off, compression, false) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *RFC3597) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packStringHex(rr.Rdata, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *RKEY) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packUint16(rr.Flags, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Protocol, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Algorithm, msg, off) + if err != nil { + return off, err + } + off, err = packStringBase64(rr.PublicKey, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *RP) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packDomainName(rr.Mbox, msg, off, compression, false) + if err != nil { + return off, err + } + off, err = packDomainName(rr.Txt, msg, off, compression, false) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *RRSIG) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packUint16(rr.TypeCovered, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Algorithm, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Labels, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(rr.OrigTtl, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(rr.Expiration, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(rr.Inception, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.KeyTag, msg, off) + if err != nil { + return off, err + } + off, err = packDomainName(rr.SignerName, msg, off, compression, false) + if err != nil { + return off, err + } + off, err = packStringBase64(rr.Signature, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *RT) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packUint16(rr.Preference, msg, off) + if err != nil { + return off, err + } + off, err = packDomainName(rr.Host, msg, off, compression, false) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *SIG) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packUint16(rr.TypeCovered, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Algorithm, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Labels, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(rr.OrigTtl, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(rr.Expiration, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(rr.Inception, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.KeyTag, msg, off) + if err != nil { + return off, err + } + off, err = packDomainName(rr.SignerName, msg, off, compression, false) + if err != nil { + return off, err + } + off, err = packStringBase64(rr.Signature, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *SMIMEA) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packUint8(rr.Usage, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Selector, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.MatchingType, msg, off) + if err != nil { + return off, err + } + off, err = packStringHex(rr.Certificate, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *SOA) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packDomainName(rr.Ns, msg, off, compression, compress) + if err != nil { + return off, err + } + off, err = packDomainName(rr.Mbox, msg, off, compression, compress) + if err != nil { + return off, err + } + off, err = packUint32(rr.Serial, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(rr.Refresh, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(rr.Retry, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(rr.Expire, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(rr.Minttl, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *SPF) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packStringTxt(rr.Txt, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *SRV) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packUint16(rr.Priority, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.Weight, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.Port, msg, off) + if err != nil { + return off, err + } + off, err = packDomainName(rr.Target, msg, off, compression, false) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *SSHFP) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packUint8(rr.Algorithm, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Type, msg, off) + if err != nil { + return off, err + } + off, err = packStringHex(rr.FingerPrint, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *TA) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packUint16(rr.KeyTag, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Algorithm, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.DigestType, msg, off) + if err != nil { + return off, err + } + off, err = packStringHex(rr.Digest, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *TALINK) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packDomainName(rr.PreviousName, msg, off, compression, false) + if err != nil { + return off, err + } + off, err = packDomainName(rr.NextName, msg, off, compression, false) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *TKEY) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packDomainName(rr.Algorithm, msg, off, compression, false) + if err != nil { + return off, err + } + off, err = packUint32(rr.Inception, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(rr.Expiration, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.Mode, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.Error, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.KeySize, msg, off) + if err != nil { + return off, err + } + off, err = packStringHex(rr.Key, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.OtherLen, msg, off) + if err != nil { + return off, err + } + off, err = packStringHex(rr.OtherData, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *TLSA) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packUint8(rr.Usage, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Selector, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.MatchingType, msg, off) + if err != nil { + return off, err + } + off, err = packStringHex(rr.Certificate, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *TSIG) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packDomainName(rr.Algorithm, msg, off, compression, false) + if err != nil { + return off, err + } + off, err = packUint48(rr.TimeSigned, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.Fudge, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.MACSize, msg, off) + if err != nil { + return off, err + } + off, err = packStringHex(rr.MAC, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.OrigId, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.Error, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.OtherLen, msg, off) + if err != nil { + return off, err + } + off, err = packStringHex(rr.OtherData, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *TXT) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packStringTxt(rr.Txt, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *UID) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packUint32(rr.Uid, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *UINFO) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packString(rr.Uinfo, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *URI) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packUint16(rr.Priority, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.Weight, msg, off) + if err != nil { + return off, err + } + off, err = packStringOctet(rr.Target, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *X25) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packString(rr.PSDNAddress, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +// unpack*() functions + +func (rr *A) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.A, off, err = unpackDataA(msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *AAAA) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.AAAA, off, err = unpackDataAAAA(msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *AFSDB) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Subtype, off, err = unpackUint16(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Hostname, off, err = UnpackDomainName(msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *ANY) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + return off, nil +} + +func (rr *APL) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Prefixes, off, err = unpackDataApl(msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *AVC) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Txt, off, err = unpackStringTxt(msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *CAA) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Flag, off, err = unpackUint8(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Tag, off, err = unpackString(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Value, off, err = unpackStringOctet(msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *CDNSKEY) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Flags, off, err = unpackUint16(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Protocol, off, err = unpackUint8(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Algorithm, off, err = unpackUint8(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.PublicKey, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *CDS) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.KeyTag, off, err = unpackUint16(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Algorithm, off, err = unpackUint8(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.DigestType, off, err = unpackUint8(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Digest, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *CERT) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Type, off, err = unpackUint16(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.KeyTag, off, err = unpackUint16(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Algorithm, off, err = unpackUint8(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Certificate, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *CNAME) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Target, off, err = UnpackDomainName(msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *CSYNC) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Serial, off, err = unpackUint32(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Flags, off, err = unpackUint16(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.TypeBitMap, off, err = unpackDataNsec(msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *DHCID) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Digest, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *DLV) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.KeyTag, off, err = unpackUint16(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Algorithm, off, err = unpackUint8(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.DigestType, off, err = unpackUint8(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Digest, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *DNAME) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Target, off, err = UnpackDomainName(msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *DNSKEY) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Flags, off, err = unpackUint16(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Protocol, off, err = unpackUint8(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Algorithm, off, err = unpackUint8(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.PublicKey, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *DS) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.KeyTag, off, err = unpackUint16(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Algorithm, off, err = unpackUint8(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.DigestType, off, err = unpackUint8(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Digest, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *EID) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Endpoint, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *EUI48) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Address, off, err = unpackUint48(msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *EUI64) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Address, off, err = unpackUint64(msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *GID) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Gid, off, err = unpackUint32(msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *GPOS) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Longitude, off, err = unpackString(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Latitude, off, err = unpackString(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Altitude, off, err = unpackString(msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *HINFO) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Cpu, off, err = unpackString(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Os, off, err = unpackString(msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *HIP) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.HitLength, off, err = unpackUint8(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.PublicKeyAlgorithm, off, err = unpackUint8(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.PublicKeyLength, off, err = unpackUint16(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Hit, off, err = unpackStringHex(msg, off, off+int(rr.HitLength)) + if err != nil { + return off, err + } + rr.PublicKey, off, err = unpackStringBase64(msg, off, off+int(rr.PublicKeyLength)) + if err != nil { + return off, err + } + rr.RendezvousServers, off, err = unpackDataDomainNames(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *KEY) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Flags, off, err = unpackUint16(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Protocol, off, err = unpackUint8(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Algorithm, off, err = unpackUint8(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.PublicKey, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *KX) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Preference, off, err = unpackUint16(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Exchanger, off, err = UnpackDomainName(msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *L32) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Preference, off, err = unpackUint16(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Locator32, off, err = unpackDataA(msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *L64) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Preference, off, err = unpackUint16(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Locator64, off, err = unpackUint64(msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *LOC) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Version, off, err = unpackUint8(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Size, off, err = unpackUint8(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.HorizPre, off, err = unpackUint8(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.VertPre, off, err = unpackUint8(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Latitude, off, err = unpackUint32(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Longitude, off, err = unpackUint32(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Altitude, off, err = unpackUint32(msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *LP) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Preference, off, err = unpackUint16(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Fqdn, off, err = UnpackDomainName(msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *MB) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Mb, off, err = UnpackDomainName(msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *MD) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Md, off, err = UnpackDomainName(msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *MF) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Mf, off, err = UnpackDomainName(msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *MG) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Mg, off, err = UnpackDomainName(msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *MINFO) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Rmail, off, err = UnpackDomainName(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Email, off, err = UnpackDomainName(msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *MR) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Mr, off, err = UnpackDomainName(msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *MX) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Preference, off, err = unpackUint16(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Mx, off, err = UnpackDomainName(msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *NAPTR) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Order, off, err = unpackUint16(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Preference, off, err = unpackUint16(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Flags, off, err = unpackString(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Service, off, err = unpackString(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Regexp, off, err = unpackString(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Replacement, off, err = UnpackDomainName(msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *NID) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Preference, off, err = unpackUint16(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.NodeID, off, err = unpackUint64(msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *NIMLOC) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Locator, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *NINFO) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.ZSData, off, err = unpackStringTxt(msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *NS) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Ns, off, err = UnpackDomainName(msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *NSAPPTR) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Ptr, off, err = UnpackDomainName(msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *NSEC) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.NextDomain, off, err = UnpackDomainName(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.TypeBitMap, off, err = unpackDataNsec(msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *NSEC3) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Hash, off, err = unpackUint8(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Flags, off, err = unpackUint8(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Iterations, off, err = unpackUint16(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.SaltLength, off, err = unpackUint8(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Salt, off, err = unpackStringHex(msg, off, off+int(rr.SaltLength)) + if err != nil { + return off, err + } + rr.HashLength, off, err = unpackUint8(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.NextDomain, off, err = unpackStringBase32(msg, off, off+int(rr.HashLength)) + if err != nil { + return off, err + } + rr.TypeBitMap, off, err = unpackDataNsec(msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *NSEC3PARAM) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Hash, off, err = unpackUint8(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Flags, off, err = unpackUint8(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Iterations, off, err = unpackUint16(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.SaltLength, off, err = unpackUint8(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Salt, off, err = unpackStringHex(msg, off, off+int(rr.SaltLength)) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *NULL) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Data, off, err = unpackStringAny(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *OPENPGPKEY) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.PublicKey, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *OPT) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Option, off, err = unpackDataOpt(msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *PTR) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Ptr, off, err = UnpackDomainName(msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *PX) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Preference, off, err = unpackUint16(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Map822, off, err = UnpackDomainName(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Mapx400, off, err = UnpackDomainName(msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *RFC3597) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Rdata, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *RKEY) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Flags, off, err = unpackUint16(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Protocol, off, err = unpackUint8(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Algorithm, off, err = unpackUint8(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.PublicKey, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *RP) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Mbox, off, err = UnpackDomainName(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Txt, off, err = UnpackDomainName(msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *RRSIG) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.TypeCovered, off, err = unpackUint16(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Algorithm, off, err = unpackUint8(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Labels, off, err = unpackUint8(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.OrigTtl, off, err = unpackUint32(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Expiration, off, err = unpackUint32(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Inception, off, err = unpackUint32(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.KeyTag, off, err = unpackUint16(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.SignerName, off, err = UnpackDomainName(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Signature, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *RT) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Preference, off, err = unpackUint16(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Host, off, err = UnpackDomainName(msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *SIG) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.TypeCovered, off, err = unpackUint16(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Algorithm, off, err = unpackUint8(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Labels, off, err = unpackUint8(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.OrigTtl, off, err = unpackUint32(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Expiration, off, err = unpackUint32(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Inception, off, err = unpackUint32(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.KeyTag, off, err = unpackUint16(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.SignerName, off, err = UnpackDomainName(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Signature, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *SMIMEA) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Usage, off, err = unpackUint8(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Selector, off, err = unpackUint8(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.MatchingType, off, err = unpackUint8(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Certificate, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *SOA) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Ns, off, err = UnpackDomainName(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Mbox, off, err = UnpackDomainName(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Serial, off, err = unpackUint32(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Refresh, off, err = unpackUint32(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Retry, off, err = unpackUint32(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Expire, off, err = unpackUint32(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Minttl, off, err = unpackUint32(msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *SPF) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Txt, off, err = unpackStringTxt(msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *SRV) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Priority, off, err = unpackUint16(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Weight, off, err = unpackUint16(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Port, off, err = unpackUint16(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Target, off, err = UnpackDomainName(msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *SSHFP) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Algorithm, off, err = unpackUint8(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Type, off, err = unpackUint8(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.FingerPrint, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *TA) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.KeyTag, off, err = unpackUint16(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Algorithm, off, err = unpackUint8(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.DigestType, off, err = unpackUint8(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Digest, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *TALINK) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.PreviousName, off, err = UnpackDomainName(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.NextName, off, err = UnpackDomainName(msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *TKEY) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Algorithm, off, err = UnpackDomainName(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Inception, off, err = unpackUint32(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Expiration, off, err = unpackUint32(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Mode, off, err = unpackUint16(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Error, off, err = unpackUint16(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.KeySize, off, err = unpackUint16(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Key, off, err = unpackStringHex(msg, off, off+int(rr.KeySize)) + if err != nil { + return off, err + } + rr.OtherLen, off, err = unpackUint16(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.OtherData, off, err = unpackStringHex(msg, off, off+int(rr.OtherLen)) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *TLSA) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Usage, off, err = unpackUint8(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Selector, off, err = unpackUint8(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.MatchingType, off, err = unpackUint8(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Certificate, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *TSIG) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Algorithm, off, err = UnpackDomainName(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.TimeSigned, off, err = unpackUint48(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Fudge, off, err = unpackUint16(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.MACSize, off, err = unpackUint16(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.MAC, off, err = unpackStringHex(msg, off, off+int(rr.MACSize)) + if err != nil { + return off, err + } + rr.OrigId, off, err = unpackUint16(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Error, off, err = unpackUint16(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.OtherLen, off, err = unpackUint16(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.OtherData, off, err = unpackStringHex(msg, off, off+int(rr.OtherLen)) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *TXT) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Txt, off, err = unpackStringTxt(msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *UID) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Uid, off, err = unpackUint32(msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *UINFO) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Uinfo, off, err = unpackString(msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *URI) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Priority, off, err = unpackUint16(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Weight, off, err = unpackUint16(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Target, off, err = unpackStringOctet(msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *X25) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.PSDNAddress, off, err = unpackString(msg, off) + if err != nil { + return off, err + } + return off, nil +} diff --git a/vendor/github.com/miekg/dns/ztypes.go b/vendor/github.com/miekg/dns/ztypes.go new file mode 100644 index 00000000..1cbd6d3f --- /dev/null +++ b/vendor/github.com/miekg/dns/ztypes.go @@ -0,0 +1,898 @@ +// Code generated by "go run types_generate.go"; DO NOT EDIT. + +package dns + +import ( + "encoding/base64" + "net" +) + +// TypeToRR is a map of constructors for each RR type. +var TypeToRR = map[uint16]func() RR{ + TypeA: func() RR { return new(A) }, + TypeAAAA: func() RR { return new(AAAA) }, + TypeAFSDB: func() RR { return new(AFSDB) }, + TypeANY: func() RR { return new(ANY) }, + TypeAPL: func() RR { return new(APL) }, + TypeAVC: func() RR { return new(AVC) }, + TypeCAA: func() RR { return new(CAA) }, + TypeCDNSKEY: func() RR { return new(CDNSKEY) }, + TypeCDS: func() RR { return new(CDS) }, + TypeCERT: func() RR { return new(CERT) }, + TypeCNAME: func() RR { return new(CNAME) }, + TypeCSYNC: func() RR { return new(CSYNC) }, + TypeDHCID: func() RR { return new(DHCID) }, + TypeDLV: func() RR { return new(DLV) }, + TypeDNAME: func() RR { return new(DNAME) }, + TypeDNSKEY: func() RR { return new(DNSKEY) }, + TypeDS: func() RR { return new(DS) }, + TypeEID: func() RR { return new(EID) }, + TypeEUI48: func() RR { return new(EUI48) }, + TypeEUI64: func() RR { return new(EUI64) }, + TypeGID: func() RR { return new(GID) }, + TypeGPOS: func() RR { return new(GPOS) }, + TypeHINFO: func() RR { return new(HINFO) }, + TypeHIP: func() RR { return new(HIP) }, + TypeKEY: func() RR { return new(KEY) }, + TypeKX: func() RR { return new(KX) }, + TypeL32: func() RR { return new(L32) }, + TypeL64: func() RR { return new(L64) }, + TypeLOC: func() RR { return new(LOC) }, + TypeLP: func() RR { return new(LP) }, + TypeMB: func() RR { return new(MB) }, + TypeMD: func() RR { return new(MD) }, + TypeMF: func() RR { return new(MF) }, + TypeMG: func() RR { return new(MG) }, + TypeMINFO: func() RR { return new(MINFO) }, + TypeMR: func() RR { return new(MR) }, + TypeMX: func() RR { return new(MX) }, + TypeNAPTR: func() RR { return new(NAPTR) }, + TypeNID: func() RR { return new(NID) }, + TypeNIMLOC: func() RR { return new(NIMLOC) }, + TypeNINFO: func() RR { return new(NINFO) }, + TypeNS: func() RR { return new(NS) }, + TypeNSAPPTR: func() RR { return new(NSAPPTR) }, + TypeNSEC: func() RR { return new(NSEC) }, + TypeNSEC3: func() RR { return new(NSEC3) }, + TypeNSEC3PARAM: func() RR { return new(NSEC3PARAM) }, + TypeNULL: func() RR { return new(NULL) }, + TypeOPENPGPKEY: func() RR { return new(OPENPGPKEY) }, + TypeOPT: func() RR { return new(OPT) }, + TypePTR: func() RR { return new(PTR) }, + TypePX: func() RR { return new(PX) }, + TypeRKEY: func() RR { return new(RKEY) }, + TypeRP: func() RR { return new(RP) }, + TypeRRSIG: func() RR { return new(RRSIG) }, + TypeRT: func() RR { return new(RT) }, + TypeSIG: func() RR { return new(SIG) }, + TypeSMIMEA: func() RR { return new(SMIMEA) }, + TypeSOA: func() RR { return new(SOA) }, + TypeSPF: func() RR { return new(SPF) }, + TypeSRV: func() RR { return new(SRV) }, + TypeSSHFP: func() RR { return new(SSHFP) }, + TypeTA: func() RR { return new(TA) }, + TypeTALINK: func() RR { return new(TALINK) }, + TypeTKEY: func() RR { return new(TKEY) }, + TypeTLSA: func() RR { return new(TLSA) }, + TypeTSIG: func() RR { return new(TSIG) }, + TypeTXT: func() RR { return new(TXT) }, + TypeUID: func() RR { return new(UID) }, + TypeUINFO: func() RR { return new(UINFO) }, + TypeURI: func() RR { return new(URI) }, + TypeX25: func() RR { return new(X25) }, +} + +// TypeToString is a map of strings for each RR type. +var TypeToString = map[uint16]string{ + TypeA: "A", + TypeAAAA: "AAAA", + TypeAFSDB: "AFSDB", + TypeANY: "ANY", + TypeAPL: "APL", + TypeATMA: "ATMA", + TypeAVC: "AVC", + TypeAXFR: "AXFR", + TypeCAA: "CAA", + TypeCDNSKEY: "CDNSKEY", + TypeCDS: "CDS", + TypeCERT: "CERT", + TypeCNAME: "CNAME", + TypeCSYNC: "CSYNC", + TypeDHCID: "DHCID", + TypeDLV: "DLV", + TypeDNAME: "DNAME", + TypeDNSKEY: "DNSKEY", + TypeDS: "DS", + TypeEID: "EID", + TypeEUI48: "EUI48", + TypeEUI64: "EUI64", + TypeGID: "GID", + TypeGPOS: "GPOS", + TypeHINFO: "HINFO", + TypeHIP: "HIP", + TypeISDN: "ISDN", + TypeIXFR: "IXFR", + TypeKEY: "KEY", + TypeKX: "KX", + TypeL32: "L32", + TypeL64: "L64", + TypeLOC: "LOC", + TypeLP: "LP", + TypeMAILA: "MAILA", + TypeMAILB: "MAILB", + TypeMB: "MB", + TypeMD: "MD", + TypeMF: "MF", + TypeMG: "MG", + TypeMINFO: "MINFO", + TypeMR: "MR", + TypeMX: "MX", + TypeNAPTR: "NAPTR", + TypeNID: "NID", + TypeNIMLOC: "NIMLOC", + TypeNINFO: "NINFO", + TypeNS: "NS", + TypeNSEC: "NSEC", + TypeNSEC3: "NSEC3", + TypeNSEC3PARAM: "NSEC3PARAM", + TypeNULL: "NULL", + TypeNXT: "NXT", + TypeNone: "None", + TypeOPENPGPKEY: "OPENPGPKEY", + TypeOPT: "OPT", + TypePTR: "PTR", + TypePX: "PX", + TypeRKEY: "RKEY", + TypeRP: "RP", + TypeRRSIG: "RRSIG", + TypeRT: "RT", + TypeReserved: "Reserved", + TypeSIG: "SIG", + TypeSMIMEA: "SMIMEA", + TypeSOA: "SOA", + TypeSPF: "SPF", + TypeSRV: "SRV", + TypeSSHFP: "SSHFP", + TypeTA: "TA", + TypeTALINK: "TALINK", + TypeTKEY: "TKEY", + TypeTLSA: "TLSA", + TypeTSIG: "TSIG", + TypeTXT: "TXT", + TypeUID: "UID", + TypeUINFO: "UINFO", + TypeUNSPEC: "UNSPEC", + TypeURI: "URI", + TypeX25: "X25", + TypeNSAPPTR: "NSAP-PTR", +} + +func (rr *A) Header() *RR_Header { return &rr.Hdr } +func (rr *AAAA) Header() *RR_Header { return &rr.Hdr } +func (rr *AFSDB) Header() *RR_Header { return &rr.Hdr } +func (rr *ANY) Header() *RR_Header { return &rr.Hdr } +func (rr *APL) Header() *RR_Header { return &rr.Hdr } +func (rr *AVC) Header() *RR_Header { return &rr.Hdr } +func (rr *CAA) Header() *RR_Header { return &rr.Hdr } +func (rr *CDNSKEY) Header() *RR_Header { return &rr.Hdr } +func (rr *CDS) Header() *RR_Header { return &rr.Hdr } +func (rr *CERT) Header() *RR_Header { return &rr.Hdr } +func (rr *CNAME) Header() *RR_Header { return &rr.Hdr } +func (rr *CSYNC) Header() *RR_Header { return &rr.Hdr } +func (rr *DHCID) Header() *RR_Header { return &rr.Hdr } +func (rr *DLV) Header() *RR_Header { return &rr.Hdr } +func (rr *DNAME) Header() *RR_Header { return &rr.Hdr } +func (rr *DNSKEY) Header() *RR_Header { return &rr.Hdr } +func (rr *DS) Header() *RR_Header { return &rr.Hdr } +func (rr *EID) Header() *RR_Header { return &rr.Hdr } +func (rr *EUI48) Header() *RR_Header { return &rr.Hdr } +func (rr *EUI64) Header() *RR_Header { return &rr.Hdr } +func (rr *GID) Header() *RR_Header { return &rr.Hdr } +func (rr *GPOS) Header() *RR_Header { return &rr.Hdr } +func (rr *HINFO) Header() *RR_Header { return &rr.Hdr } +func (rr *HIP) Header() *RR_Header { return &rr.Hdr } +func (rr *KEY) Header() *RR_Header { return &rr.Hdr } +func (rr *KX) Header() *RR_Header { return &rr.Hdr } +func (rr *L32) Header() *RR_Header { return &rr.Hdr } +func (rr *L64) Header() *RR_Header { return &rr.Hdr } +func (rr *LOC) Header() *RR_Header { return &rr.Hdr } +func (rr *LP) Header() *RR_Header { return &rr.Hdr } +func (rr *MB) Header() *RR_Header { return &rr.Hdr } +func (rr *MD) Header() *RR_Header { return &rr.Hdr } +func (rr *MF) Header() *RR_Header { return &rr.Hdr } +func (rr *MG) Header() *RR_Header { return &rr.Hdr } +func (rr *MINFO) Header() *RR_Header { return &rr.Hdr } +func (rr *MR) Header() *RR_Header { return &rr.Hdr } +func (rr *MX) Header() *RR_Header { return &rr.Hdr } +func (rr *NAPTR) Header() *RR_Header { return &rr.Hdr } +func (rr *NID) Header() *RR_Header { return &rr.Hdr } +func (rr *NIMLOC) Header() *RR_Header { return &rr.Hdr } +func (rr *NINFO) Header() *RR_Header { return &rr.Hdr } +func (rr *NS) Header() *RR_Header { return &rr.Hdr } +func (rr *NSAPPTR) Header() *RR_Header { return &rr.Hdr } +func (rr *NSEC) Header() *RR_Header { return &rr.Hdr } +func (rr *NSEC3) Header() *RR_Header { return &rr.Hdr } +func (rr *NSEC3PARAM) Header() *RR_Header { return &rr.Hdr } +func (rr *NULL) Header() *RR_Header { return &rr.Hdr } +func (rr *OPENPGPKEY) Header() *RR_Header { return &rr.Hdr } +func (rr *OPT) Header() *RR_Header { return &rr.Hdr } +func (rr *PTR) Header() *RR_Header { return &rr.Hdr } +func (rr *PX) Header() *RR_Header { return &rr.Hdr } +func (rr *RFC3597) Header() *RR_Header { return &rr.Hdr } +func (rr *RKEY) Header() *RR_Header { return &rr.Hdr } +func (rr *RP) Header() *RR_Header { return &rr.Hdr } +func (rr *RRSIG) Header() *RR_Header { return &rr.Hdr } +func (rr *RT) Header() *RR_Header { return &rr.Hdr } +func (rr *SIG) Header() *RR_Header { return &rr.Hdr } +func (rr *SMIMEA) Header() *RR_Header { return &rr.Hdr } +func (rr *SOA) Header() *RR_Header { return &rr.Hdr } +func (rr *SPF) Header() *RR_Header { return &rr.Hdr } +func (rr *SRV) Header() *RR_Header { return &rr.Hdr } +func (rr *SSHFP) Header() *RR_Header { return &rr.Hdr } +func (rr *TA) Header() *RR_Header { return &rr.Hdr } +func (rr *TALINK) Header() *RR_Header { return &rr.Hdr } +func (rr *TKEY) Header() *RR_Header { return &rr.Hdr } +func (rr *TLSA) Header() *RR_Header { return &rr.Hdr } +func (rr *TSIG) Header() *RR_Header { return &rr.Hdr } +func (rr *TXT) Header() *RR_Header { return &rr.Hdr } +func (rr *UID) Header() *RR_Header { return &rr.Hdr } +func (rr *UINFO) Header() *RR_Header { return &rr.Hdr } +func (rr *URI) Header() *RR_Header { return &rr.Hdr } +func (rr *X25) Header() *RR_Header { return &rr.Hdr } + +// len() functions +func (rr *A) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + if len(rr.A) != 0 { + l += net.IPv4len + } + return l +} +func (rr *AAAA) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + if len(rr.AAAA) != 0 { + l += net.IPv6len + } + return l +} +func (rr *AFSDB) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += 2 // Subtype + l += domainNameLen(rr.Hostname, off+l, compression, false) + return l +} +func (rr *ANY) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + return l +} +func (rr *APL) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + for _, x := range rr.Prefixes { + l += x.len() + } + return l +} +func (rr *AVC) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + for _, x := range rr.Txt { + l += len(x) + 1 + } + return l +} +func (rr *CAA) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l++ // Flag + l += len(rr.Tag) + 1 + l += len(rr.Value) + return l +} +func (rr *CERT) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += 2 // Type + l += 2 // KeyTag + l++ // Algorithm + l += base64.StdEncoding.DecodedLen(len(rr.Certificate)) + return l +} +func (rr *CNAME) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += domainNameLen(rr.Target, off+l, compression, true) + return l +} +func (rr *DHCID) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += base64.StdEncoding.DecodedLen(len(rr.Digest)) + return l +} +func (rr *DNAME) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += domainNameLen(rr.Target, off+l, compression, false) + return l +} +func (rr *DNSKEY) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += 2 // Flags + l++ // Protocol + l++ // Algorithm + l += base64.StdEncoding.DecodedLen(len(rr.PublicKey)) + return l +} +func (rr *DS) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += 2 // KeyTag + l++ // Algorithm + l++ // DigestType + l += len(rr.Digest) / 2 + return l +} +func (rr *EID) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += len(rr.Endpoint) / 2 + return l +} +func (rr *EUI48) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += 6 // Address + return l +} +func (rr *EUI64) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += 8 // Address + return l +} +func (rr *GID) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += 4 // Gid + return l +} +func (rr *GPOS) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += len(rr.Longitude) + 1 + l += len(rr.Latitude) + 1 + l += len(rr.Altitude) + 1 + return l +} +func (rr *HINFO) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += len(rr.Cpu) + 1 + l += len(rr.Os) + 1 + return l +} +func (rr *HIP) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l++ // HitLength + l++ // PublicKeyAlgorithm + l += 2 // PublicKeyLength + l += len(rr.Hit) / 2 + l += base64.StdEncoding.DecodedLen(len(rr.PublicKey)) + for _, x := range rr.RendezvousServers { + l += domainNameLen(x, off+l, compression, false) + } + return l +} +func (rr *KX) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += 2 // Preference + l += domainNameLen(rr.Exchanger, off+l, compression, false) + return l +} +func (rr *L32) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += 2 // Preference + if len(rr.Locator32) != 0 { + l += net.IPv4len + } + return l +} +func (rr *L64) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += 2 // Preference + l += 8 // Locator64 + return l +} +func (rr *LOC) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l++ // Version + l++ // Size + l++ // HorizPre + l++ // VertPre + l += 4 // Latitude + l += 4 // Longitude + l += 4 // Altitude + return l +} +func (rr *LP) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += 2 // Preference + l += domainNameLen(rr.Fqdn, off+l, compression, false) + return l +} +func (rr *MB) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += domainNameLen(rr.Mb, off+l, compression, true) + return l +} +func (rr *MD) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += domainNameLen(rr.Md, off+l, compression, true) + return l +} +func (rr *MF) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += domainNameLen(rr.Mf, off+l, compression, true) + return l +} +func (rr *MG) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += domainNameLen(rr.Mg, off+l, compression, true) + return l +} +func (rr *MINFO) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += domainNameLen(rr.Rmail, off+l, compression, true) + l += domainNameLen(rr.Email, off+l, compression, true) + return l +} +func (rr *MR) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += domainNameLen(rr.Mr, off+l, compression, true) + return l +} +func (rr *MX) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += 2 // Preference + l += domainNameLen(rr.Mx, off+l, compression, true) + return l +} +func (rr *NAPTR) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += 2 // Order + l += 2 // Preference + l += len(rr.Flags) + 1 + l += len(rr.Service) + 1 + l += len(rr.Regexp) + 1 + l += domainNameLen(rr.Replacement, off+l, compression, false) + return l +} +func (rr *NID) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += 2 // Preference + l += 8 // NodeID + return l +} +func (rr *NIMLOC) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += len(rr.Locator) / 2 + return l +} +func (rr *NINFO) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + for _, x := range rr.ZSData { + l += len(x) + 1 + } + return l +} +func (rr *NS) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += domainNameLen(rr.Ns, off+l, compression, true) + return l +} +func (rr *NSAPPTR) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += domainNameLen(rr.Ptr, off+l, compression, false) + return l +} +func (rr *NSEC3PARAM) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l++ // Hash + l++ // Flags + l += 2 // Iterations + l++ // SaltLength + l += len(rr.Salt) / 2 + return l +} +func (rr *NULL) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += len(rr.Data) + return l +} +func (rr *OPENPGPKEY) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += base64.StdEncoding.DecodedLen(len(rr.PublicKey)) + return l +} +func (rr *PTR) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += domainNameLen(rr.Ptr, off+l, compression, true) + return l +} +func (rr *PX) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += 2 // Preference + l += domainNameLen(rr.Map822, off+l, compression, false) + l += domainNameLen(rr.Mapx400, off+l, compression, false) + return l +} +func (rr *RFC3597) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += len(rr.Rdata) / 2 + return l +} +func (rr *RKEY) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += 2 // Flags + l++ // Protocol + l++ // Algorithm + l += base64.StdEncoding.DecodedLen(len(rr.PublicKey)) + return l +} +func (rr *RP) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += domainNameLen(rr.Mbox, off+l, compression, false) + l += domainNameLen(rr.Txt, off+l, compression, false) + return l +} +func (rr *RRSIG) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += 2 // TypeCovered + l++ // Algorithm + l++ // Labels + l += 4 // OrigTtl + l += 4 // Expiration + l += 4 // Inception + l += 2 // KeyTag + l += domainNameLen(rr.SignerName, off+l, compression, false) + l += base64.StdEncoding.DecodedLen(len(rr.Signature)) + return l +} +func (rr *RT) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += 2 // Preference + l += domainNameLen(rr.Host, off+l, compression, false) + return l +} +func (rr *SMIMEA) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l++ // Usage + l++ // Selector + l++ // MatchingType + l += len(rr.Certificate) / 2 + return l +} +func (rr *SOA) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += domainNameLen(rr.Ns, off+l, compression, true) + l += domainNameLen(rr.Mbox, off+l, compression, true) + l += 4 // Serial + l += 4 // Refresh + l += 4 // Retry + l += 4 // Expire + l += 4 // Minttl + return l +} +func (rr *SPF) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + for _, x := range rr.Txt { + l += len(x) + 1 + } + return l +} +func (rr *SRV) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += 2 // Priority + l += 2 // Weight + l += 2 // Port + l += domainNameLen(rr.Target, off+l, compression, false) + return l +} +func (rr *SSHFP) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l++ // Algorithm + l++ // Type + l += len(rr.FingerPrint) / 2 + return l +} +func (rr *TA) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += 2 // KeyTag + l++ // Algorithm + l++ // DigestType + l += len(rr.Digest) / 2 + return l +} +func (rr *TALINK) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += domainNameLen(rr.PreviousName, off+l, compression, false) + l += domainNameLen(rr.NextName, off+l, compression, false) + return l +} +func (rr *TKEY) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += domainNameLen(rr.Algorithm, off+l, compression, false) + l += 4 // Inception + l += 4 // Expiration + l += 2 // Mode + l += 2 // Error + l += 2 // KeySize + l += len(rr.Key) / 2 + l += 2 // OtherLen + l += len(rr.OtherData) / 2 + return l +} +func (rr *TLSA) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l++ // Usage + l++ // Selector + l++ // MatchingType + l += len(rr.Certificate) / 2 + return l +} +func (rr *TSIG) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += domainNameLen(rr.Algorithm, off+l, compression, false) + l += 6 // TimeSigned + l += 2 // Fudge + l += 2 // MACSize + l += len(rr.MAC) / 2 + l += 2 // OrigId + l += 2 // Error + l += 2 // OtherLen + l += len(rr.OtherData) / 2 + return l +} +func (rr *TXT) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + for _, x := range rr.Txt { + l += len(x) + 1 + } + return l +} +func (rr *UID) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += 4 // Uid + return l +} +func (rr *UINFO) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += len(rr.Uinfo) + 1 + return l +} +func (rr *URI) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += 2 // Priority + l += 2 // Weight + l += len(rr.Target) + return l +} +func (rr *X25) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += len(rr.PSDNAddress) + 1 + return l +} + +// copy() functions +func (rr *A) copy() RR { + return &A{rr.Hdr, copyIP(rr.A)} +} +func (rr *AAAA) copy() RR { + return &AAAA{rr.Hdr, copyIP(rr.AAAA)} +} +func (rr *AFSDB) copy() RR { + return &AFSDB{rr.Hdr, rr.Subtype, rr.Hostname} +} +func (rr *ANY) copy() RR { + return &ANY{rr.Hdr} +} +func (rr *APL) copy() RR { + Prefixes := make([]APLPrefix, len(rr.Prefixes)) + for i := range rr.Prefixes { + Prefixes[i] = rr.Prefixes[i].copy() + } + return &APL{rr.Hdr, Prefixes} +} +func (rr *AVC) copy() RR { + Txt := make([]string, len(rr.Txt)) + copy(Txt, rr.Txt) + return &AVC{rr.Hdr, Txt} +} +func (rr *CAA) copy() RR { + return &CAA{rr.Hdr, rr.Flag, rr.Tag, rr.Value} +} +func (rr *CERT) copy() RR { + return &CERT{rr.Hdr, rr.Type, rr.KeyTag, rr.Algorithm, rr.Certificate} +} +func (rr *CNAME) copy() RR { + return &CNAME{rr.Hdr, rr.Target} +} +func (rr *CSYNC) copy() RR { + TypeBitMap := make([]uint16, len(rr.TypeBitMap)) + copy(TypeBitMap, rr.TypeBitMap) + return &CSYNC{rr.Hdr, rr.Serial, rr.Flags, TypeBitMap} +} +func (rr *DHCID) copy() RR { + return &DHCID{rr.Hdr, rr.Digest} +} +func (rr *DNAME) copy() RR { + return &DNAME{rr.Hdr, rr.Target} +} +func (rr *DNSKEY) copy() RR { + return &DNSKEY{rr.Hdr, rr.Flags, rr.Protocol, rr.Algorithm, rr.PublicKey} +} +func (rr *DS) copy() RR { + return &DS{rr.Hdr, rr.KeyTag, rr.Algorithm, rr.DigestType, rr.Digest} +} +func (rr *EID) copy() RR { + return &EID{rr.Hdr, rr.Endpoint} +} +func (rr *EUI48) copy() RR { + return &EUI48{rr.Hdr, rr.Address} +} +func (rr *EUI64) copy() RR { + return &EUI64{rr.Hdr, rr.Address} +} +func (rr *GID) copy() RR { + return &GID{rr.Hdr, rr.Gid} +} +func (rr *GPOS) copy() RR { + return &GPOS{rr.Hdr, rr.Longitude, rr.Latitude, rr.Altitude} +} +func (rr *HINFO) copy() RR { + return &HINFO{rr.Hdr, rr.Cpu, rr.Os} +} +func (rr *HIP) copy() RR { + RendezvousServers := make([]string, len(rr.RendezvousServers)) + copy(RendezvousServers, rr.RendezvousServers) + return &HIP{rr.Hdr, rr.HitLength, rr.PublicKeyAlgorithm, rr.PublicKeyLength, rr.Hit, rr.PublicKey, RendezvousServers} +} +func (rr *KX) copy() RR { + return &KX{rr.Hdr, rr.Preference, rr.Exchanger} +} +func (rr *L32) copy() RR { + return &L32{rr.Hdr, rr.Preference, copyIP(rr.Locator32)} +} +func (rr *L64) copy() RR { + return &L64{rr.Hdr, rr.Preference, rr.Locator64} +} +func (rr *LOC) copy() RR { + return &LOC{rr.Hdr, rr.Version, rr.Size, rr.HorizPre, rr.VertPre, rr.Latitude, rr.Longitude, rr.Altitude} +} +func (rr *LP) copy() RR { + return &LP{rr.Hdr, rr.Preference, rr.Fqdn} +} +func (rr *MB) copy() RR { + return &MB{rr.Hdr, rr.Mb} +} +func (rr *MD) copy() RR { + return &MD{rr.Hdr, rr.Md} +} +func (rr *MF) copy() RR { + return &MF{rr.Hdr, rr.Mf} +} +func (rr *MG) copy() RR { + return &MG{rr.Hdr, rr.Mg} +} +func (rr *MINFO) copy() RR { + return &MINFO{rr.Hdr, rr.Rmail, rr.Email} +} +func (rr *MR) copy() RR { + return &MR{rr.Hdr, rr.Mr} +} +func (rr *MX) copy() RR { + return &MX{rr.Hdr, rr.Preference, rr.Mx} +} +func (rr *NAPTR) copy() RR { + return &NAPTR{rr.Hdr, rr.Order, rr.Preference, rr.Flags, rr.Service, rr.Regexp, rr.Replacement} +} +func (rr *NID) copy() RR { + return &NID{rr.Hdr, rr.Preference, rr.NodeID} +} +func (rr *NIMLOC) copy() RR { + return &NIMLOC{rr.Hdr, rr.Locator} +} +func (rr *NINFO) copy() RR { + ZSData := make([]string, len(rr.ZSData)) + copy(ZSData, rr.ZSData) + return &NINFO{rr.Hdr, ZSData} +} +func (rr *NS) copy() RR { + return &NS{rr.Hdr, rr.Ns} +} +func (rr *NSAPPTR) copy() RR { + return &NSAPPTR{rr.Hdr, rr.Ptr} +} +func (rr *NSEC) copy() RR { + TypeBitMap := make([]uint16, len(rr.TypeBitMap)) + copy(TypeBitMap, rr.TypeBitMap) + return &NSEC{rr.Hdr, rr.NextDomain, TypeBitMap} +} +func (rr *NSEC3) copy() RR { + TypeBitMap := make([]uint16, len(rr.TypeBitMap)) + copy(TypeBitMap, rr.TypeBitMap) + return &NSEC3{rr.Hdr, rr.Hash, rr.Flags, rr.Iterations, rr.SaltLength, rr.Salt, rr.HashLength, rr.NextDomain, TypeBitMap} +} +func (rr *NSEC3PARAM) copy() RR { + return &NSEC3PARAM{rr.Hdr, rr.Hash, rr.Flags, rr.Iterations, rr.SaltLength, rr.Salt} +} +func (rr *NULL) copy() RR { + return &NULL{rr.Hdr, rr.Data} +} +func (rr *OPENPGPKEY) copy() RR { + return &OPENPGPKEY{rr.Hdr, rr.PublicKey} +} +func (rr *OPT) copy() RR { + Option := make([]EDNS0, len(rr.Option)) + for i, e := range rr.Option { + Option[i] = e.copy() + } + return &OPT{rr.Hdr, Option} +} +func (rr *PTR) copy() RR { + return &PTR{rr.Hdr, rr.Ptr} +} +func (rr *PX) copy() RR { + return &PX{rr.Hdr, rr.Preference, rr.Map822, rr.Mapx400} +} +func (rr *RFC3597) copy() RR { + return &RFC3597{rr.Hdr, rr.Rdata} +} +func (rr *RKEY) copy() RR { + return &RKEY{rr.Hdr, rr.Flags, rr.Protocol, rr.Algorithm, rr.PublicKey} +} +func (rr *RP) copy() RR { + return &RP{rr.Hdr, rr.Mbox, rr.Txt} +} +func (rr *RRSIG) copy() RR { + return &RRSIG{rr.Hdr, rr.TypeCovered, rr.Algorithm, rr.Labels, rr.OrigTtl, rr.Expiration, rr.Inception, rr.KeyTag, rr.SignerName, rr.Signature} +} +func (rr *RT) copy() RR { + return &RT{rr.Hdr, rr.Preference, rr.Host} +} +func (rr *SMIMEA) copy() RR { + return &SMIMEA{rr.Hdr, rr.Usage, rr.Selector, rr.MatchingType, rr.Certificate} +} +func (rr *SOA) copy() RR { + return &SOA{rr.Hdr, rr.Ns, rr.Mbox, rr.Serial, rr.Refresh, rr.Retry, rr.Expire, rr.Minttl} +} +func (rr *SPF) copy() RR { + Txt := make([]string, len(rr.Txt)) + copy(Txt, rr.Txt) + return &SPF{rr.Hdr, Txt} +} +func (rr *SRV) copy() RR { + return &SRV{rr.Hdr, rr.Priority, rr.Weight, rr.Port, rr.Target} +} +func (rr *SSHFP) copy() RR { + return &SSHFP{rr.Hdr, rr.Algorithm, rr.Type, rr.FingerPrint} +} +func (rr *TA) copy() RR { + return &TA{rr.Hdr, rr.KeyTag, rr.Algorithm, rr.DigestType, rr.Digest} +} +func (rr *TALINK) copy() RR { + return &TALINK{rr.Hdr, rr.PreviousName, rr.NextName} +} +func (rr *TKEY) copy() RR { + return &TKEY{rr.Hdr, rr.Algorithm, rr.Inception, rr.Expiration, rr.Mode, rr.Error, rr.KeySize, rr.Key, rr.OtherLen, rr.OtherData} +} +func (rr *TLSA) copy() RR { + return &TLSA{rr.Hdr, rr.Usage, rr.Selector, rr.MatchingType, rr.Certificate} +} +func (rr *TSIG) copy() RR { + return &TSIG{rr.Hdr, rr.Algorithm, rr.TimeSigned, rr.Fudge, rr.MACSize, rr.MAC, rr.OrigId, rr.Error, rr.OtherLen, rr.OtherData} +} +func (rr *TXT) copy() RR { + Txt := make([]string, len(rr.Txt)) + copy(Txt, rr.Txt) + return &TXT{rr.Hdr, Txt} +} +func (rr *UID) copy() RR { + return &UID{rr.Hdr, rr.Uid} +} +func (rr *UINFO) copy() RR { + return &UINFO{rr.Hdr, rr.Uinfo} +} +func (rr *URI) copy() RR { + return &URI{rr.Hdr, rr.Priority, rr.Weight, rr.Target} +} +func (rr *X25) copy() RR { + return &X25{rr.Hdr, rr.PSDNAddress} +} diff --git a/vendor/github.com/open-falcon/rrdlite/.gitignore b/vendor/github.com/open-falcon/rrdlite/.gitignore deleted file mode 100644 index a3d35805..00000000 --- a/vendor/github.com/open-falcon/rrdlite/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -*.swp -G* -tmp/ diff --git a/vendor/github.com/open-falcon/rrdlite/LICENSE b/vendor/github.com/open-falcon/rrdlite/LICENSE deleted file mode 100644 index 9952df9f..00000000 --- a/vendor/github.com/open-falcon/rrdlite/LICENSE +++ /dev/null @@ -1,30 +0,0 @@ -Copyright (c) 2012, Michal Derkacz -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions -are met: -1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. -2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. -3. The name of the author may not be used to endorse or promote products - derived from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR -IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES -OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. -IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, -INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT -NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF -THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -Copyright (C) 2015, Yu Bo -All rights reserved. - -enjoy it :) diff --git a/vendor/github.com/open-falcon/rrdlite/README.md b/vendor/github.com/open-falcon/rrdlite/README.md deleted file mode 100644 index 3c30f6da..00000000 --- a/vendor/github.com/open-falcon/rrdlite/README.md +++ /dev/null @@ -1,11 +0,0 @@ -# Go (golang) for rrdtool(1.4.9) - -一个轻量级的rrdtool工具包,线程安全,解除librrd依赖,只提供create,update,fetch,info - -## Installing - - go get github.com/yubo/rrdlite - - -## Example -See [rrd_test.go](https://github.com/yubo/rrdlite/blob/master/rrd_test.go) for an example of using this package. diff --git a/vendor/github.com/open-falcon/rrdlite/fnv.h b/vendor/github.com/open-falcon/rrdlite/fnv.h deleted file mode 100644 index eaea66b2..00000000 --- a/vendor/github.com/open-falcon/rrdlite/fnv.h +++ /dev/null @@ -1,114 +0,0 @@ -/* - * fnv - Fowler/Noll/Vo- hash code - * - * @(#) $Revision$ - * @(#) $Id$ - * @(#) $Source$ - * - *** - * - * Fowler/Noll/Vo- hash - * - * The basis of this hash algorithm was taken from an idea sent - * as reviewer comments to the IEEE POSIX P1003.2 committee by: - * - * Phong Vo (http://www.research.att.com/info/kpv/) - * Glenn Fowler (http://www.research.att.com/~gsf/) - * - * In a subsequent ballot round: - * - * Landon Curt Noll (http://reality.sgi.com/chongo/) - * - * improved on their algorithm. Some people tried this hash - * and found that it worked rather well. In an EMail message - * to Landon, they named it the ``Fowler/Noll/Vo'' or FNV hash. - * - * FNV hashes are architected to be fast while maintaining a low - * collision rate. The FNV speed allows one to quickly hash lots - * of data while maintaining a reasonable collision rate. See: - * - * http://reality.sgi.com/chongo/tech/comp/fnv/ - * - * for more details as well as other forms of the FNV hash. - * - *** - * - * NOTE: The FNV-0 historic hash is not recommended. One should use - * the FNV-1 hash instead. - * - * To use the 32 bit FNV-0 historic hash, pass FNV0_32_INIT as the - * Fnv32_t hashval argument to fnv_32_buf() or fnv_32_str(). - * - * To use the 64 bit FNV-0 historic hash, pass FNV0_64_INIT as the - * Fnv64_t hashval argument to fnv_64_buf() or fnv_64_str(). - * - * To use the recommended 32 bit FNV-1 hash, pass FNV1_32_INIT as the - * Fnv32_t hashval argument to fnv_32_buf() or fnv_32_str(). - * - * To use the recommended 64 bit FNV-1 hash, pass FNV1_64_INIT as the - * Fnv64_t hashval argument to fnv_64_buf() or fnv_64_str(). - * - *** - * - * Please do not copyright this code. This code is in the public domain. - * - * LANDON CURT NOLL DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, - * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO - * EVENT SHALL LANDON CURT NOLL BE LIABLE FOR ANY SPECIAL, INDIRECT OR - * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF - * USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR - * OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR - * PERFORMANCE OF THIS SOFTWARE. - * - * By: - * chongo /\oo/\ - * http://reality.sgi.com/chongo/ - * EMail: chongo_fnv at prime dot engr dot sgi dot com - * - * Share and Enjoy! :-) - */ - -#if !defined(__FNV_H__) -#define __FNV_H__ - - -/* - * 32 bit FNV-0 hash type - */ -typedef unsigned long Fnv32_t; - - -/* - * 32 bit FNV-0 zero initial basis - * - * This historic hash is not recommended. One should use - * the FNV-1 hash and inital basis instead. - */ -#define FNV0_32_INIT ((Fnv32_t)0) - - -/* - * 32 bit FNV-1 non-zero initial basis - * - * The FNV-1 initial basis is the FNV-0 hash of the following 32 octets: - * - * chongo /\../\ - * - * Note that the \'s above are not back-slashing escape characters. - * They are literal ASCII backslash 0x5c characters. - */ -#define FNV1_32_INIT ((Fnv32_t)0x811c9dc5) - -Fnv32_t fnv_32_buf( - const void *, - size_t, - Fnv32_t); - -Fnv32_t fnv_32_str( - const char *, - Fnv32_t); - -unsigned long FnvHash( - const char *); - -#endif /* __FNV_H__ */ diff --git a/vendor/github.com/open-falcon/rrdlite/hash_32.c b/vendor/github.com/open-falcon/rrdlite/hash_32.c deleted file mode 100644 index 655c757e..00000000 --- a/vendor/github.com/open-falcon/rrdlite/hash_32.c +++ /dev/null @@ -1,154 +0,0 @@ -/* - * hash_32 - 32 bit Fowler/Noll/Vo hash code - * - * - *** - * - * Fowler/Noll/Vo hash - * - * The basis of this hash algorithm was taken from an idea sent - * as reviewer comments to the IEEE POSIX P1003.2 committee by: - * - * Phong Vo (http://www.research.att.com/info/kpv/) - * Glenn Fowler (http://www.research.att.com/~gsf/) - * - * In a subsequent ballot round: - * - * Landon Curt Noll (http://reality.sgi.com/chongo/) - * - * improved on their algorithm. Some people tried this hash - * and found that it worked rather well. In an EMail message - * to Landon, they named it the ``Fowler/Noll/Vo'' or FNV hash. - * - * FNV hashes are architected to be fast while maintaining a low - * collision rate. The FNV speed allows one to quickly hash lots - * of data while maintaining a reasonable collision rate. See: - * - * http://reality.sgi.com/chongo/tech/comp/fnv/ - * - * for more details as well as other forms of the FNV hash. - *** - * - * NOTE: The FNV-0 historic hash is not recommended. One should use - * the FNV-1 hash instead. - * - * To use the 32 bit FNV-0 historic hash, pass FNV0_32_INIT as the - * Fnv32_t hashval argument to fnv_32_buf() or fnv_32_str(). - * - * To use the recommended 32 bit FNV-1 hash, pass FNV1_32_INIT as the - * Fnv32_t hashval argument to fnv_32_buf() or fnv_32_str(). - * - *** - * - * Please do not copyright this code. This code is in the public domain. - * - * LANDON CURT NOLL DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, - * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO - * EVENT SHALL LANDON CURT NOLL BE LIABLE FOR ANY SPECIAL, INDIRECT OR - * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF - * USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR - * OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR - * PERFORMANCE OF THIS SOFTWARE. - * - * By: - * chongo /\oo/\ - * http://reality.sgi.com/chongo/ - * EMail: chongo_fnv at prime dot engr dot sgi dot com - * - * Share and Enjoy! :-) - */ - -#include -#include "fnv.h" - - -/* - * 32 bit magic FNV-0 and FNV-1 prime - */ -#define FNV_32_PRIME ((Fnv32_t)0x01000193) - - -/* - * fnv_32_buf - perform a 32 bit Fowler/Noll/Vo hash on a buffer - * - * input: - * buf - start of buffer to hash - * len - length of buffer in octets - * hval - previous hash value or 0 if first call - * - * returns: - * 32 bit hash as a static hash type - * - * NOTE: To use the 32 bit FNV-0 historic hash, use FNV0_32_INIT as the hval - * argument on the first call to either fnv_32_buf() or fnv_32_str(). - * - * NOTE: To use the recommended 32 bit FNV-1 hash, use FNV1_32_INIT as the hval - * argument on the first call to either fnv_32_buf() or fnv_32_str(). - */ -Fnv32_t fnv_32_buf( - const void *buf, - size_t len, - Fnv32_t hval) -{ - const unsigned char *bp = (const unsigned char *) buf; /* start of buffer */ - const unsigned char *be = bp + len; /* beyond end of buffer */ - - /* - * FNV-1 hash each octet in the buffer - */ - while (bp < be) { - - /* multiply by the 32 bit FNV magic prime mod 2^64 */ - hval *= FNV_32_PRIME; - - /* xor the bottom with the current octet */ - hval ^= (Fnv32_t) *bp++; - } - - /* return our new hash value */ - return hval; -} - - -/* - * fnv_32_str - perform a 32 bit Fowler/Noll/Vo hash on a string - * - * input: - * str - string to hash - * hval - previous hash value or 0 if first call - * - * returns: - * 32 bit hash as a static hash type - * - * NOTE: To use the 32 bit FNV-0 historic hash, use FNV0_32_INIT as the hval - * argument on the first call to either fnv_32_buf() or fnv_32_str(). - * - * NOTE: To use the recommended 32 bit FNV-1 hash, use FNV1_32_INIT as the hval - * argument on the first call to either fnv_32_buf() or fnv_32_str(). - */ -Fnv32_t fnv_32_str( - const char *str, - Fnv32_t hval) -{ - const unsigned char *s = (const unsigned char *) str; /* unsigned string */ - - /* - * FNV-1 hash each octet in the buffer - */ - while (*s) { - - /* multiply by the 32 bit FNV magic prime mod 2^64 */ - hval *= FNV_32_PRIME; - - /* xor the bottom with the current octet */ - hval ^= (Fnv32_t) *s++; - } - - /* return our new hash value */ - return hval; -} - -/* a wrapper function for fnv_32_str */ -unsigned long FnvHash( const char *str) { - return fnv_32_str(str, FNV1_32_INIT); -} diff --git a/vendor/github.com/open-falcon/rrdlite/rrd.go b/vendor/github.com/open-falcon/rrdlite/rrd.go deleted file mode 100644 index 38522f41..00000000 --- a/vendor/github.com/open-falcon/rrdlite/rrd.go +++ /dev/null @@ -1,156 +0,0 @@ -// Simple wrapper for rrdtool C library -package rrdlite - -import ( - "fmt" - "os" - "strings" - "time" - "unsafe" -) - -type Error string - -func (e Error) Error() string { - return string(e) -} - -type cstring []byte - -func newCstring(s string) cstring { - cs := make(cstring, len(s)+1) - copy(cs, s) - return cs -} - -func (cs cstring) p() unsafe.Pointer { - if len(cs) == 0 { - return nil - } - return unsafe.Pointer(&cs[0]) -} - -func (cs cstring) String() string { - return string(cs[:len(cs)-1]) -} - -func join(args []interface{}) string { - sa := make([]string, len(args)) - for i, a := range args { - var s string - switch v := a.(type) { - case time.Time: - s = i64toa(v.Unix()) - default: - s = fmt.Sprint(v) - } - sa[i] = s - } - return strings.Join(sa, ":") -} - -type Creator struct { - filename string - start time.Time - step uint - args []string -} - -// NewCreator returns new Creator object. You need to call Create to really -// create database file. -// filename - name of database file -// start - don't accept any data timed before or at time specified -// step - base interval in seconds with which data will be fed into RRD -func NewCreator(filename string, start time.Time, step uint) *Creator { - return &Creator{ - filename: filename, - start: start, - step: step, - } -} - -func (c *Creator) DS(name, compute string, args ...interface{}) { - c.args = append(c.args, "DS:"+name+":"+compute+":"+join(args)) -} - -func (c *Creator) RRA(cf string, args ...interface{}) { - c.args = append(c.args, "RRA:"+cf+":"+join(args)) -} - -// Create creates new database file. If overwrite is true it overwrites -// database file if exists. If overwrite is false it returns error if file -// exists (you can use os.IsExist function to check this case). -func (c *Creator) Create(overwrite bool) error { - if !overwrite { - f, err := os.OpenFile( - c.filename, - os.O_WRONLY|os.O_CREATE|os.O_EXCL, - 0666, - ) - if err != nil { - return err - } - f.Close() - } - return c.create() -} - -// Use cstring and unsafe.Pointer to avoid alocations for C calls - -type Updater struct { - filename cstring - template cstring - - args []string -} - -func NewUpdater(filename string) *Updater { - return &Updater{filename: newCstring(filename)} -} - -func (u *Updater) SetTemplate(dsName ...string) { - u.template = newCstring(strings.Join(dsName, ":")) -} - -// Cache chaches data for later save using Update(). Use it to avoid -// open/read/write/close for every update. -func (u *Updater) Cache(args ...interface{}) { - u.args = append(u.args, join(args)) -} - -// Update saves data in RRDB. -// Without args Update saves all subsequent updates buffered by Cache method. -// If you specify args it saves them immediately. -func (u *Updater) Update(args ...interface{}) error { - if len(args) != 0 { - a := make([]string, 1) - a[0] = join(args) - return u.update(a) - } else if len(u.args) != 0 { - err := u.update(u.args) - u.args = nil - return err - } - return nil -} - -const ( - maxUint = ^uint(0) - maxInt = int(maxUint >> 1) - minInt = -maxInt - 1 -) - -type FetchResult struct { - Filename string - Cf string - Start time.Time - End time.Time - Step time.Duration - DsNames []string - RowCnt int - values []float64 -} - -func (r *FetchResult) ValueAt(dsIndex, rowIndex int) float64 { - return r.values[len(r.DsNames)*rowIndex+dsIndex] -} diff --git a/vendor/github.com/open-falcon/rrdlite/rrd.h b/vendor/github.com/open-falcon/rrdlite/rrd.h deleted file mode 100644 index 9dc93c5f..00000000 --- a/vendor/github.com/open-falcon/rrdlite/rrd.h +++ /dev/null @@ -1,281 +0,0 @@ -/***************************************************************************** - * RRDtool 1.4.9 Copyright by Tobi Oetiker, 1997-2014 - ***************************************************************************** - * rrdlib.h Public header file for librrd - ***************************************************************************** - * $Id$ - * $Log$ - * Revision 1.9 2005/02/13 16:13:33 oetiker - * let rrd_graph return the actual value range it picked ... - * -- Henrik Stoerner - * - * Revision 1.8 2004/05/26 22:11:12 oetiker - * reduce compiler warnings. Many small fixes. -- Mike Slifcak - * - * Revision 1.7 2003/11/12 22:14:26 oetiker - * allow to pass an open filehandle into rrd_graph as an extra argument - * - * Revision 1.6 2003/11/11 19:46:21 oetiker - * replaced time_value with rrd_time_value as MacOS X introduced a struct of that name in their standard headers - * - * Revision 1.5 2003/04/25 18:35:08 jake - * Alternate update interface, updatev. Returns info about CDPs written to disk as result of update. Output format is similar to rrd_info, a hash of key-values. - * - * Revision 1.4 2003/04/01 22:52:23 jake - * Fix Win32 build. VC++ 6.0 and 7.0 now use the thread-safe code. - * - * Revision 1.3 2003/02/13 07:05:27 oetiker - * Find attached the patch I promised to send to you. Please note that there - * are three new source files (src/rrd_is_thread_safe.h, src/rrd_thread_safe.c - * and src/rrd_not_thread_safe.c) and the introduction of librrd_th. This - * library is identical to librrd, but it contains support code for per-thread - * global variables currently used for error information only. This is similar - * to how errno per-thread variables are implemented. librrd_th must be linked - * alongside of libpthred - * - * There is also a new file "THREADS", holding some documentation. - * - * -- Peter Stamfest - * - * Revision 1.2 2002/05/07 21:58:32 oetiker - * new command rrdtool xport integrated - * -- Wolfgang Schrimm - * - * Revision 1.1.1.1 2001/02/25 22:25:05 oetiker - * checkin - * - *****************************************************************************/ -#ifdef __cplusplus -extern "C" { -#endif - -#ifndef _RRDLIB_H -#define _RRDLIB_H - -#include /* for off_t */ - -#ifndef WIN32 -#include /* for off_t */ -#else -#ifdef _MSC_VER -#ifndef PERLPATCHLEVEL - typedef int mode_t; -#endif -#define strtoll _strtoi64 -#endif - typedef size_t ssize_t; - typedef long off_t; -#endif - -#include -#include /* for FILE */ -#include - - /* Formerly rrd_nan_inf.h */ -#ifndef DNAN -# define DNAN rrd_set_to_DNAN() -#endif - -#ifndef DINF -# define DINF rrd_set_to_DINF() -#endif - double rrd_set_to_DNAN( void); - double rrd_set_to_DINF( void); - /* end of rrd_nan_inf.h */ - - /* Transplanted from rrd_format.h */ - typedef double rrd_value_t; /* the data storage type is - * double */ - /* END rrd_format.h */ - - /* information about an rrd file */ - typedef struct rrd_file_t { - size_t header_len; /* length of the header of this rrd file */ - size_t file_len; /* total size of the rrd file */ - size_t pos; /* current pos in file */ - void *pvt; - } rrd_file_t; - - /* information used for the conventional file access methods */ - typedef struct rrd_simple_file_t { - int fd; /* file descriptor of this rrd file */ -#ifdef HAVE_MMAP - char *file_start; /* start address of an open rrd file */ - int mm_prot; - int mm_flags; -#endif - } rrd_simple_file_t; - - /* rrd info interface */ - typedef struct rrd_blob_t { - unsigned long size; /* size of the blob */ - unsigned char *ptr; /* pointer */ - } rrd_blob_t; - - typedef enum rrd_info_type { RD_I_VAL = 0, - RD_I_CNT, - RD_I_STR, - RD_I_INT, - RD_I_BLO - } rrd_info_type_t; - - typedef union rrd_infoval { - unsigned long u_cnt; - rrd_value_t u_val; - char *u_str; - int u_int; - rrd_blob_t u_blo; - } rrd_infoval_t; - - typedef struct rrd_info_t { - char *key; - rrd_info_type_t type; - rrd_infoval_t value; - struct rrd_info_t *next; - } rrd_info_t; - - typedef size_t (* rrd_output_callback_t)( const void *, size_t, void *); - - /* main function blocks */ - int rrd_create( int, char **); - rrd_info_t *rrd_info( int, char **); - rrd_info_t *rrd_info_push( rrd_info_t *, char *, rrd_info_type_t, rrd_infoval_t); - void rrd_info_print( rrd_info_t * data); - void rrd_info_free( rrd_info_t *); - int rrd_update( int, char **); - rrd_info_t *rrd_update_v( int, char **); - int rrd_graph( int, char **, char ***, int *, int *, - FILE *, double *, double *); - rrd_info_t *rrd_graph_v( int, char **); - - int rrd_fetch( int, char **, time_t *, time_t *, unsigned long *, - unsigned long *, char ***, rrd_value_t **); - int rrd_restore( int, char **); - int rrd_dump( int, char **); - int rrd_tune( int, char **); - time_t rrd_last( int, char **); - int rrd_lastupdate(int argc, char **argv); - time_t rrd_first( int, char **); - int rrd_resize( int, char **); - char *rrd_strversion( void); - double rrd_version( void); - int rrd_xport( int, char **, int *, time_t *, time_t *, - unsigned long *, unsigned long *, char ***, rrd_value_t **); - int rrd_flushcached (int argc, char **argv); - - void rrd_freemem( void *mem); - - /* thread-safe (hopefully) */ - int rrd_create_r( const char *filename, unsigned long pdp_step, time_t last_up, - int argc, const char **argv); - rrd_info_t *rrd_info_r(char *, int *); - /* NOTE: rrd_update_r are only thread-safe if no at-style time - specifications get used!!! */ - - int rrd_update_r( const char *filename, const char *_template, - int argc, const char **argv); - int rrd_fetch_r ( const char *filename, const char *cf, time_t *start, time_t *end, - unsigned long *step, unsigned long *ds_cnt, char ***ds_namv, rrd_value_t **data); - int rrd_dump_r( const char *filename, char *outname); - time_t rrd_last_r (const char *filename); - int rrd_lastupdate_r (const char *filename, time_t *ret_last_update, unsigned long *ret_ds_count, char ***ret_ds_names, char ***ret_last_ds); - time_t rrd_first_r( const char *filename, int rraindex); - - int rrd_dump_cb_r( const char *filename, int opt_header, rrd_output_callback_t cb, void *user); - - /* Transplanted from rrd_parsetime.h */ - typedef enum { - ABSOLUTE_TIME, - RELATIVE_TO_START_TIME, - RELATIVE_TO_END_TIME, - RELATIVE_TO_EPOCH - } rrd_timetype_t; - -#define TIME_OK NULL - - typedef struct rrd_time_value { - rrd_timetype_t type; - long offset; - struct tm tm; - } rrd_time_value_t; - - char *rrd_parsetime( const char *spec, rrd_time_value_t * ptv); - /* END rrd_parsetime.h */ - - typedef struct rrd_context { - char lib_errstr[256]; - char rrd_error[4096]; - } rrd_context_t; - - /* returns the current per-thread rrd_context */ - rrd_context_t *rrd_get_context(void); - -#ifdef WIN32 - /* this was added by the win32 porters Christof.Wegmann@exitgames.com */ - rrd_context_t *rrd_force_new_context(void); -#endif - - int rrd_proc_start_end( rrd_time_value_t *, rrd_time_value_t *, time_t *, time_t *); - - - /* rrd_strerror is thread safe, but still it uses a global buffer - (but one per thread), thus subsequent calls within a single - thread overwrite the same buffer */ - const char *rrd_strerror( int err); - - /** MULTITHREADED HELPER FUNCTIONS */ - - /** UTILITY FUNCTIONS */ - - long rrd_random(void); - - int rrd_add_ptr(void ***dest, size_t *dest_size, void *src); - int rrd_add_strdup(char ***dest, size_t *dest_size, char *src); - void rrd_free_ptrs(void ***src, size_t *cnt); - - int rrd_mkdir_p(const char *pathname, mode_t mode); - - /* - * The following functions are _internal_ functions needed to read the raw RRD - * files. Since they are _internal_ they may change with the file format and - * will be replaced with a more general interface in RRDTool 1.4. Don't use - * these functions unless you have good reasons to do so. If you do use these - * functions you will have to adapt your code for RRDTool 1.4! - * - * To enable the deprecated functions define `RRD_EXPORT_DEPRECATED' before - * including . You have been warned! If you come back to the - * RRDTool mailing list and whine about your broken application, you will get - * hit with something smelly! - */ -#if defined(_RRD_TOOL_H) || defined(RRD_EXPORT_DEPRECATED) - -#include "rrd_error.h" -#include "rrd_format.h" - -#if defined(__GNUC__) && defined (RRD_EXPORT_DEPRECATED) -# define RRD_DEPRECATED __attribute__((deprecated)) -#else -# define RRD_DEPRECATED /**/ -#endif - void rrd_free( rrd_t *rrd) RRD_DEPRECATED; - void rrd_init( rrd_t *rrd) RRD_DEPRECATED; - - rrd_file_t *rrd_open( const char *const file_name, rrd_t *rrd, unsigned rdwr, int *ret_p) RRD_DEPRECATED; - - void rrd_dontneed( rrd_file_t *rrd_file, rrd_t *rrd) RRD_DEPRECATED; - int rrd_close( rrd_file_t *rrd_file) RRD_DEPRECATED; - ssize_t rrd_read( rrd_file_t *rrd_file, void *buf, size_t count) RRD_DEPRECATED; - ssize_t rrd_write( rrd_file_t *rrd_file, const void *buf, size_t count) RRD_DEPRECATED; - void rrd_flush( rrd_file_t *rrd_file) RRD_DEPRECATED; - off_t rrd_seek( rrd_file_t *rrd_file, off_t off, int whence) RRD_DEPRECATED; - off_t rrd_tell( rrd_file_t *rrd_file) RRD_DEPRECATED; - int rrd_lock( rrd_file_t *file) RRD_DEPRECATED; - void rrd_notify_row( rrd_file_t *rrd_file, int rra_idx, unsigned long rra_row, time_t rra_time) RRD_DEPRECATED; - unsigned long rrd_select_initial_row( rrd_file_t *rrd_file, int rra_idx, rra_def_t *rra) RRD_DEPRECATED; -#endif /* defined(_RRD_TOOL_H) || defined(RRD_EXPORT_DEPRECATED) */ - -#endif /* _RRDLIB_H */ - -#ifdef __cplusplus -} -#endif diff --git a/vendor/github.com/open-falcon/rrdlite/rrd_c.go b/vendor/github.com/open-falcon/rrdlite/rrd_c.go deleted file mode 100644 index d79641fb..00000000 --- a/vendor/github.com/open-falcon/rrdlite/rrd_c.go +++ /dev/null @@ -1,306 +0,0 @@ -package rrdlite - -/* -#include -#include "rrd.h" -#include "rrdfunc.h" -#cgo linux CFLAGS: -std=c99 -DRRD_LITE -D_BSD_SOURCE -DHAVE_CONFIG_H -D_POSIX_SOURCE -DNUMVERS=1.4009 -D_LINUX_OS -#cgo darwin CFLAGS: -std=c99 -DRRD_LITE -D_BSD_SOURCE -DHAVE_CONFIG_H -D_POSIX_SOURCE -DNUMVERS=1.4009 -D_DARWIN_OS -#cgo LDFLAGS: -lm -*/ -import "C" - -import ( - "reflect" - "strconv" - "strings" - "time" - "unsafe" -) - -func makeCArgs(args []string) []*C.char { - ret := make([]*C.char, len(args)) - for i, s := range args { - ret[i] = C.CString(s) - } - return ret -} - -func freeCString(s *C.char) { - C.free(unsafe.Pointer(s)) -} - -func freeArgs(cArgs []*C.char) { - for _, s := range cArgs { - freeCString(s) - } -} - -func makeGoError(e *C.char) error { - var null *C.char - if e == null { - return nil - } - return Error(C.GoString(e)) -} - -func (c *Creator) create() error { - filename := C.CString(c.filename) - defer freeCString(filename) - args := makeCArgs(c.args) - defer freeArgs(args) - - e := C.rrdCreate( - filename, - C.ulong(c.step), - C.time_t(c.start.Unix()), - C.int(len(args)), - &args[0], - ) - return makeGoError(e) -} - -func (u *Updater) update(_args []string) error { - - args := makeCArgs(_args) - defer freeArgs(args) - - e := C.rrdUpdate( - (*C.char)(u.filename.p()), - (*C.char)(u.template.p()), - C.int(len(args)), - &args[0], - ) - return makeGoError(e) -} - -var ( - oStart = C.CString("-s") - oEnd = C.CString("-e") - oTitle = C.CString("-t") - oVlabel = C.CString("-v") - oWidth = C.CString("-w") - oHeight = C.CString("-h") - oUpperLimit = C.CString("-u") - oLowerLimit = C.CString("-l") - oRigid = C.CString("-r") - oAltAutoscale = C.CString("-A") - oAltAutoscaleMin = C.CString("-J") - oAltAutoscaleMax = C.CString("-M") - oNoGridFit = C.CString("-N") - - oLogarithmic = C.CString("-o") - oUnitsExponent = C.CString("-X") - oUnitsLength = C.CString("-L") - - oRightAxis = C.CString("--right-axis") - oRightAxisLabel = C.CString("--right-axis-label") - - oDaemon = C.CString("--daemon") - - oNoLegend = C.CString("-g") - - oLazy = C.CString("-z") - - oColor = C.CString("-c") - - oSlopeMode = C.CString("-E") - oImageFormat = C.CString("-a") - oInterlaced = C.CString("-i") - - oBase = C.CString("-b") - oWatermark = C.CString("-W") - - oStep = C.CString("--step") - oMaxRows = C.CString("-m") -) - -func ftoa(f float64) string { - return strconv.FormatFloat(f, 'e', 10, 64) -} - -func ftoc(f float64) *C.char { - return C.CString(ftoa(f)) -} - -func i64toa(i int64) string { - return strconv.FormatInt(i, 10) -} - -func i64toc(i int64) *C.char { - return C.CString(i64toa(i)) -} - -func u64toa(u uint64) string { - return strconv.FormatUint(u, 10) -} - -func u64toc(u uint64) *C.char { - return C.CString(u64toa(u)) -} -func itoa(i int) string { - return i64toa(int64(i)) -} - -func itoc(i int) *C.char { - return i64toc(int64(i)) -} - -func utoa(u uint) string { - return u64toa(uint64(u)) -} - -func utoc(u uint) *C.char { - return u64toc(uint64(u)) -} - -func parseInfoKey(ik string) (kname, kkey string, kid int) { - kid = -1 - o := strings.IndexRune(ik, '[') - if o == -1 { - kname = ik - return - } - c := strings.IndexRune(ik[o+1:], ']') - if c == -1 { - kname = ik - return - } - c += o + 1 - kname = ik[:o] + ik[c+1:] - kkey = ik[o+1 : c] - if strings.HasPrefix(kname, "ds.") { - return - } else if id, err := strconv.Atoi(kkey); err == nil && id >= 0 { - kid = id - } - return -} - -func updateInfoValue(i *C.struct_rrd_info_t, v interface{}) interface{} { - switch i._type { - case C.RD_I_VAL: - return float64(*(*C.rrd_value_t)(unsafe.Pointer(&i.value[0]))) - case C.RD_I_CNT: - return uint(*(*C.ulong)(unsafe.Pointer(&i.value[0]))) - case C.RD_I_STR: - return C.GoString(*(**C.char)(unsafe.Pointer(&i.value[0]))) - case C.RD_I_INT: - return int(*(*C.int)(unsafe.Pointer(&i.value[0]))) - case C.RD_I_BLO: - blob := *(*C.rrd_blob_t)(unsafe.Pointer(&i.value[0])) - b := C.GoBytes(unsafe.Pointer(blob.ptr), C.int(blob.size)) - if v == nil { - return b - } - return append(v.([]byte), b...) - } - - return nil -} - -func parseRRDInfo(i *C.rrd_info_t) map[string]interface{} { - defer C.rrd_info_free(i) - - r := make(map[string]interface{}) - for w := (*C.struct_rrd_info_t)(i); w != nil; w = w.next { - kname, kkey, kid := parseInfoKey(C.GoString(w.key)) - v, ok := r[kname] - switch { - case kid != -1: - var a []interface{} - if ok { - a = v.([]interface{}) - } - if len(a) < kid+1 { - oldA := a - a = make([]interface{}, kid+1) - copy(a, oldA) - } - a[kid] = updateInfoValue(w, a[kid]) - v = a - case kkey != "": - var m map[string]interface{} - if ok { - m = v.(map[string]interface{}) - } else { - m = make(map[string]interface{}) - } - old, _ := m[kkey] - m[kkey] = updateInfoValue(w, old) - v = m - default: - v = updateInfoValue(w, v) - } - r[kname] = v - } - return r -} - -// Info returns information about RRD file. -func Info(filename string) (map[string]interface{}, error) { - fn := C.CString(filename) - defer freeCString(fn) - var i *C.rrd_info_t - err := makeGoError(C.rrdInfo(&i, fn)) - if err != nil { - return nil, err - } - return parseRRDInfo(i), nil -} - -// Fetch retrieves data from RRD file. -func Fetch(filename, cf string, start, end time.Time, step time.Duration) (FetchResult, error) { - - fn := C.CString(filename) - defer freeCString(fn) - cCf := C.CString(cf) - defer freeCString(cCf) - cStart := C.time_t(start.Unix()) - cEnd := C.time_t(end.Unix()) - cStep := C.ulong(step.Seconds()) - var ( - cRet C.int - cDsCnt C.ulong - cDsNames **C.char - cData *C.double - ) - - err := makeGoError(C.rrdFetch(&cRet, fn, cCf, &cStart, &cEnd, &cStep, &cDsCnt, &cDsNames, &cData)) - if err != nil { - return FetchResult{filename, cf, start, end, step, nil, 0, nil}, err - } - - start = time.Unix(int64(cStart), 0) - end = time.Unix(int64(cEnd), 0) - step = time.Duration(cStep) * time.Second - dsCnt := int(cDsCnt) - - dsNames := make([]string, dsCnt) - for i := 0; i < dsCnt; i++ { - dsName := C.arrayGetCString(cDsNames, C.int(i)) - dsNames[i] = C.GoString(dsName) - C.free(unsafe.Pointer(dsName)) - } - C.free(unsafe.Pointer(cDsNames)) - - rowCnt := (int(cEnd)-int(cStart))/int(cStep) + 1 - valuesLen := dsCnt * rowCnt - var values []float64 - sliceHeader := (*reflect.SliceHeader)((unsafe.Pointer(&values))) - sliceHeader.Cap = valuesLen - sliceHeader.Len = valuesLen - sliceHeader.Data = uintptr(unsafe.Pointer(cData)) - return FetchResult{filename, cf, start, end, step, dsNames, rowCnt, values}, nil -} - -// FreeValues free values memory allocated by C. -func (r *FetchResult) FreeValues() { - sliceHeader := (*reflect.SliceHeader)((unsafe.Pointer(&r.values))) - C.free(unsafe.Pointer(sliceHeader.Data)) -} - -// Values returns copy of internal array of values. -func (r *FetchResult) Values() []float64 { - return append([]float64{}, r.values...) -} diff --git a/vendor/github.com/open-falcon/rrdlite/rrd_config.h b/vendor/github.com/open-falcon/rrdlite/rrd_config.h deleted file mode 100644 index cf59c4b7..00000000 --- a/vendor/github.com/open-falcon/rrdlite/rrd_config.h +++ /dev/null @@ -1,400 +0,0 @@ -/* rrd_config.h. Generated from rrd_config.h.in by configure. */ -/* rrd_config.h.in. Generated from configure.ac by autoheader. */ - - -#ifndef RRD_CONFIG_H -#define RRD_CONFIG_H - -/* IEEE can be prevented from raising signals with fpsetmask(0) */ -/* #undef MUST_DISABLE_FPMASK */ - -/* IEEE math only works if SIGFPE gets actively set to IGNORE */ -/* #undef MUST_DISABLE_SIGFPE */ - -/* realloc does not support NULL as argument */ -/* #undef NO_NULL_REALLOC */ - -/* lets enable madvise defines in NetBSD */ -#if defined(__NetBSD__) -# if !defined(_NETBSD_SOURCE) -# define _NETBSD_SOURCE -# endif -#endif - -#if defined(_LINUX_OS) -#define HAVE_DECL_POSIX_FADVISE 1 -#define HAVE_FEATURES_H 1 -#define HAVE_POSIX_FADVISE 1 -#define HAVE_POSIX_FALLOCATE 1 -#define HAVE_MMAP 1 -#define HAVE_DECL_MADVISE 1 -#define HAVE_MADVISE 1 -#endif - -#if defined(_DARWIN_OS) -//#define HAVE_DECL_POSIX_FADVISE 0 -//#define HAVE_FEATURES_H 1 -//#define HAVE_POSIX_FADVISE 0 -//#define HAVE_POSIX_FALLOCATE 1 -#endif - -/* Define if building universal (internal helper macro) */ -/* #undef AC_APPLE_UNIVERSAL_BUILD */ - - -/* set to 1 if msync with MS_ASYNC fails to update mtime */ -/* #undef HAVE_BROKEN_MS_ASYNC */ - -/* Define to 1 if you have the `chdir' function. */ -#define HAVE_CHDIR 1 - -/* Define to 1 if you have the `chroot' function. */ -#define HAVE_CHROOT 1 - -/* Define to 1 if you have the `class' function. */ -/* #undef HAVE_CLASS */ - -/* Define to 1 if you have the header file. */ -#define HAVE_CTYPE_H 1 - - - - -/* Define to 1 if you have the declaration of `strerror_r', and to 0 if you - don't. */ -#define HAVE_DECL_STRERROR_R 1 - -/* Define to 1 if you have the header file, and it defines `DIR'. - */ -#define HAVE_DIRENT_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_DLFCN_H 1 - -/* Define to 1 if you don't have `vprintf' but do have `_doprnt.' */ -/* #undef HAVE_DOPRNT */ - -/* Define to 1 if you have the header file. */ -#define HAVE_ERRNO_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_FCNTL_H 1 - -/* Define to 1 if you have the `fdatasync' function. */ -#define HAVE_FDATASYNC 1 - -/* Define to 1 if you have the `finite' function. */ -/* #undef HAVE_FINITE */ - -/* Define to 1 if you have the header file. */ -#define HAVE_FLOAT_H 1 - -/* Define to 1 if you have the `fpclass' function. */ -/* #undef HAVE_FPCLASS */ - -/* Define to 1 if you have the `fpclassify' function. */ -#define HAVE_FPCLASSIFY 1 - -/* Define to 1 if you have the `fp_class' function. */ -/* #undef HAVE_FP_CLASS */ - -/* Define to 1 if you have the header file. */ -/* #undef HAVE_FP_CLASS_H */ - -/* Define to 1 if you have the `fsync' function. */ -#define HAVE_FSYNC 1 - -/* Define to 1 if you have the `getaddrinfo' function. */ -#define HAVE_GETADDRINFO 1 - -/* Define to 1 if you have the `getpagesize' function. */ -#define HAVE_GETPAGESIZE 1 - -/* Define to 1 if you have the `getrusage' function. */ -#define HAVE_GETRUSAGE 1 - -/* Define to 1 if you have the `gettimeofday' function. */ -#define HAVE_GETTIMEOFDAY 1 - -/* Define to 1 if you have the `getuid' function. */ -#define HAVE_GETUID 1 - -/* Define to 1 if you have the `hosts_access' function. */ -/* #undef HAVE_HOSTS_ACCESS */ - -/* Define to 1 if you have the header file. */ -/* #undef HAVE_IEEEFP_H */ - -/* Define to 1 if you have the header file. */ -#define HAVE_INTTYPES_H 1 - -/* Define to 1 if you have the global variable 'int opterr'. */ -#define HAVE_INT_OPTERR 1 - -/* Define to 1 if you have the `isfinite' function. */ -#define HAVE_ISFINITE 1 - -/* Define to 1 if you have the `isinf' function. */ -#define HAVE_ISINF 1 - -/* Define to 1 if you have the `isnan' function. */ -#define HAVE_ISNAN 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_LANGINFO_H 1 - -/* have got libdbi installed */ -/* #undef HAVE_LIBDBI */ - -/* Define to 1 if you have the header file. */ -#define HAVE_LIBGEN_H 1 - -/* have got libwrap installed */ -/* #undef HAVE_LIBWRAP */ - -/* Define to 1 if you have the header file. */ -/* #undef HAVE_LUALIB_H */ - -/* Define to 1 if you have the header file. */ -/* #undef HAVE_LUA_H */ - - -/* Define to 1 if you have the header file. */ -#define HAVE_MATH_H 1 - -/* Define to 1 if you have the `mbstowcs' function. */ -#define HAVE_MBSTOWCS 1 - -/* Define to 1 if you have the `memmove' function. */ -#define HAVE_MEMMOVE 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_MEMORY_H 1 - -/* Define to 1 if you have the `mktime' function. */ -#define HAVE_MKTIME 1 - - -/* Define to 1 if you have the `msync' function. */ -#define HAVE_MSYNC 1 - -/* Define to 1 if you have the `munmap' function. */ -#define HAVE_MUNMAP 1 - -/* Define to 1 if you have the header file, and it defines `DIR'. */ -/* #undef HAVE_NDIR_H */ - -/* Define to 1 if you have the `nl_langinfo' function. */ -#define HAVE_NL_LANGINFO 1 - -/* Define to 1 if you have the `opendir' function. */ -#define HAVE_OPENDIR 1 - -/* Define to 1 if you have the `posix_madvise' function. */ -/* #undef HAVE_POSIX_MADVISE */ - -/* Define if you have POSIX threads libraries and header files. */ -/* #undef HAVE_PTHREAD */ - -/* Define to 1 if you have the `readdir' function. */ -#define HAVE_READDIR 1 - -/* Define to 1 if you have the `rint' function. */ -#define HAVE_RINT 1 - -/* Define to 1 if you have the `round' function. */ -#define HAVE_ROUND 1 - -/* is rrd_graph supported by this install */ -/* #define HAVE_RRD_GRAPH */ - -/* Define to 1 if you have the header file. */ -#define HAVE_SIGNAL_H 1 - -/* Define to 1 if you have the `snprintf' function. */ -#define HAVE_SNPRINTF 1 - -/* Define to 1 if you have the `socket' function. */ -#define HAVE_SOCKET 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_STDINT_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_STDIO_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_STDLIB_H 1 - -/* Define to 1 if you have the `strchr' function. */ -#define HAVE_STRCHR 1 - -/* Define to 1 if you have the `strerror' function. */ -/* #define HAVE_STRERROR 1 */ - -/* Define to 1 if you have the `strerror_r' function. */ -/* #define HAVE_STRERROR_R 1 */ - -/* Define to 1 if you have the `strftime' function. */ -#define HAVE_STRFTIME 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_STRINGS_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_STRING_H 1 - -/* Define to 1 if you have the header file, and it defines `DIR'. - */ -/* #undef HAVE_SYS_DIR_H */ - -/* Define to 1 if you have the header file. */ -#define HAVE_SYS_MMAN_H 1 - -/* Define to 1 if you have the header file, and it defines `DIR'. - */ -/* #undef HAVE_SYS_NDIR_H */ - -/* Define to 1 if you have the header file. */ -#define HAVE_SYS_PARAM_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_SYS_RESOURCE_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_SYS_STAT_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_SYS_TIMES_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_SYS_TYPES_H 1 - -/* is there an external timezone variable instead ? */ -/* #undef HAVE_TIMEZONE */ - -/* does tm have a tm_gmtoff member */ -#define HAVE_TM_GMTOFF 1 - -/* Define to 1 if you have the `tzset' function. */ -#define HAVE_TZSET 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_UNISTD_H 1 - -/* Define to 1 if you have the `vasprintf' function. */ -#define HAVE_VASPRINTF 1 - -/* Define to 1 if you have the `vprintf' function. */ -#define HAVE_VPRINTF 1 - -/* Define to 1 if you have the `vsnprintf' function. */ -#define HAVE_VSNPRINTF 1 - -/* Define to the sub-directory in which libtool stores uninstalled libraries. - */ -#define LT_OBJDIR ".libs/" - -/* Name of package */ -#define PACKAGE "rrdtool" - -/* Define to the address where bug reports for this package should be sent. */ -#define PACKAGE_BUGREPORT "support@oetiker.ch" - -/* Define to the full name of this package. */ -#define PACKAGE_NAME "rrdtool" - -/* Define to the full name and version of this package. */ -#define PACKAGE_STRING "rrdtool 1.4.9" - -/* Define to the one symbol short name of this package. */ -#define PACKAGE_TARNAME "rrdtool" - -/* Define to the home page for this package. */ -#define PACKAGE_URL "" - -/* Define to the version of this package. */ -#define PACKAGE_VERSION "1.4.9" - -/* Define to necessary symbol if this constant uses a non-standard name on - your system. */ -/* #undef PTHREAD_CREATE_JOINABLE */ - -/* Vertical label angle: -90.0 (default) or 90.0 */ -#define RRDGRAPH_YLEGEND_ANGLE 90.0 - -/* The size of `time_t', as computed by sizeof. */ -#define SIZEOF_TIME_T 8 - -/* Define to 1 if you have the ANSI C header files. */ -#define STDC_HEADERS 1 - -/* Define to 1 if you can safely include both and . */ -#define TIME_WITH_SYS_TIME 1 - -/* the real name of tm_gmtoff */ -#define TM_GMTOFF tm_gmtoff - -/* Define to 1 if your declares `struct tm'. */ -/* #undef TM_IN_SYS_TIME */ - -/* Enable extensions on AIX 3, Interix. */ -#ifndef _ALL_SOURCE -# define _ALL_SOURCE 1 -#endif -/* Enable GNU extensions on systems that have them. */ -#ifndef _GNU_SOURCE -# define _GNU_SOURCE 1 -#endif -/* Enable threading extensions on Solaris. */ -#ifndef _POSIX_PTHREAD_SEMANTICS -# define _POSIX_PTHREAD_SEMANTICS 1 -#endif -/* Enable extensions on HP NonStop. */ -#ifndef _TANDEM_SOURCE -# define _TANDEM_SOURCE 1 -#endif -/* Enable general extensions on Solaris. */ -#ifndef __EXTENSIONS__ -# define __EXTENSIONS__ 1 -#endif - - -/* Version number of package */ -#define VERSION "1.4.9" - -/* Define WORDS_BIGENDIAN to 1 if your processor stores words with the most - significant byte first (like Motorola and SPARC, unlike Intel). */ -#if defined AC_APPLE_UNIVERSAL_BUILD -# if defined __BIG_ENDIAN__ -# define WORDS_BIGENDIAN 1 -# endif -#else -# ifndef WORDS_BIGENDIAN -/* # undef WORDS_BIGENDIAN */ -# endif -#endif - -/* Define to 1 if on MINIX. */ -/* #undef _MINIX */ - -/* Define to 2 if the system does not provide POSIX.1 features except with - this defined. */ -/* #undef _POSIX_1_SOURCE */ - -/* Define to 1 if you need to in order for `stat' and other things to work. */ -/* #undef _POSIX_SOURCE */ - -/* Define to empty if `const' does not conform to ANSI C. */ -/* #undef const */ - - - -#ifdef MUST_HAVE_MALLOC_MALLOC_H -# include -#endif - -#include "rrd_config_bottom.h" - -#endif - diff --git a/vendor/github.com/open-falcon/rrdlite/rrd_config_bottom.h b/vendor/github.com/open-falcon/rrdlite/rrd_config_bottom.h deleted file mode 100644 index c48a06f1..00000000 --- a/vendor/github.com/open-falcon/rrdlite/rrd_config_bottom.h +++ /dev/null @@ -1,241 +0,0 @@ -#ifndef RRD_CONFIG_BOTTOM_H -#define RRD_CONFIG_BOTTOM_H - -/* make sure that we pickup the correct stuff from all headers */ -#ifdef HAVE_FEATURES_H -# ifdef _XOPEN_SOURCE -# undef _XOPEN_SOURCE -# endif -# ifdef _BSD_SOURCE -# undef _BSD_SOURCE -# endif -# define _XOPEN_SOURCE 600 -# define _BSD_SOURCE 1 -# include -#endif - -/* FreeBSD 4.8 wants this included BEFORE sys/types.h */ -#ifdef HAVE_SYS_MMAN_H -# include -#endif - -#ifdef HAVE_SYS_TYPES_H -# include -#endif - -#ifdef HAVE_SYS_PARAM_H -# include -#endif -#ifndef MAXPATH -# ifdef PATH_MAX -# define MAXPATH PATH_MAX -# endif -#endif -#ifndef MAXPATH -/* else try the BSD variant */ -# ifdef MAXPATHLEN -# define MAXPATH MAXPATHLEN -# endif -#endif - -#ifdef HAVE_ERRNO_H -# include -#endif - -#if !defined HAVE_MADVISE && defined HAVE_POSIX_MADVISE -/* use posix_madvise family */ -# define madvise posix_madvise -# define MADV_NORMAL POSIX_MADV_NORMAL -# define MADV_RANDOM POSIX_MADV_RANDOM -# define MADV_SEQUENTIAL POSIX_MADV_SEQUENTIAL -# define MADV_WILLNEED POSIX_MADV_WILLNEED -# define MADV_DONTNEED POSIX_MADV_DONTNEED -#endif -#if defined HAVE_MADVISE || defined HAVE_POSIX_MADVISE -# define USE_MADVISE 1 -#endif - -#ifdef HAVE_SYS_STAT_H -# include -#endif - -#ifdef HAVE_FCNTL_H -#include -#endif - -#ifdef HAVE_UNISTD_H -# include -#endif - -#ifdef TIME_WITH_SYS_TIME -# include -# include -#else -# ifdef HAVE_SYS_TIME_H -# include -# else -# include -# endif -#endif - -#ifdef HAVE_SYS_TIMES_H -# include -#endif - -#ifdef HAVE_SYS_RESOURCE_H -# include -#if (defined(__svr4__) && defined(__sun__)) -/* Solaris headers (pre 2.6) do not have a getrusage prototype. - Use this instead. */ -extern int getrusage(int, struct rusage *); -#endif /* __svr4__ && __sun__ */ -#endif - - -/* define strrchr, strchr and memcpy, memmove in terms of bsd funcs - make sure you are NOT using bcopy, index or rindex in the code */ - -#ifdef STDC_HEADERS -# include -#else -# ifndef HAVE_STRCHR -# define strchr index -# define strrchr rindex -# endif -char *strchr (), *strrchr (); -# ifndef HAVE_MEMMOVE -# define memcpy(d, s, n) bcopy ((s), (d), (n)) -# define memmove(d, s, n) bcopy ((s), (d), (n)) -# endif -#endif - -#ifdef NO_NULL_REALLOC -# define rrd_realloc(a,b) ( (a) == NULL ? malloc( (b) ) : realloc( (a) , (b) )) -#else -# define rrd_realloc(a,b) realloc((a), (b)) -#endif - -#ifdef HAVE_STDIO_H -# include -#endif - -#ifdef HAVE_STDLIB_H -# include -#endif - -#ifdef HAVE_CTYPE_H -# include -#endif - -#ifdef HAVE_DIRENT_H -# include -# define NAMLEN(dirent) strlen((dirent)->d_name) -#else -# define dirent direct -# define NAMLEN(dirent) (dirent)->d_namlen -# ifdef HAVE_SYS_NDIR_H -# include -# endif -# ifdef HAVE_SYS_DIR_H -# include -# endif -# ifdef HAVE_NDIR_H -# include -# endif -#endif - -#ifdef MUST_DISABLE_SIGFPE -# include -#endif - -#ifdef MUST_DISABLE_FPMASK -# include -#endif - - -#ifdef HAVE_MATH_H -# include -#endif - -#ifdef HAVE_FLOAT_H -# include -#endif - -#ifdef HAVE_IEEEFP_H -# include -#endif - -#ifdef HAVE_FP_CLASS_H -# include -#endif - -/* for Solaris */ -#if (! defined(HAVE_ISINF) && defined(HAVE_FPCLASS)) -# define HAVE_ISINF 1 -# ifdef isinf -# undef isinf -# endif -# define isinf(a) (fpclass(a) == FP_NINF || fpclass(a) == FP_PINF) -#endif - -/* solaris 8/9 has rint but not round */ -#if (! defined(HAVE_ROUND) && defined(HAVE_RINT)) -# define round rint -#endif - -/* solaris 10 it defines isnan such that only forte can compile it ... bad bad */ -#if (defined(HAVE_ISNAN) && defined(isnan) && defined(HAVE_FPCLASS)) -# undef isnan -# define isnan(a) (fpclass(a) == FP_SNAN || fpclass(a) == FP_QNAN) -#endif - -/* for OSF1 Digital Unix */ -#if (! defined(HAVE_ISINF) && defined(HAVE_FP_CLASS) && defined(HAVE_FP_CLASS_H)) -# define HAVE_ISINF 1 -# define isinf(a) (fp_class(a) == FP_NEG_INF || fp_class(a) == FP_POS_INF) -#endif - -#if (! defined(HAVE_ISINF) && defined(HAVE_FPCLASSIFY) && defined(FP_PLUS_INF) && defined(FP_MINUS_INF)) -# define HAVE_ISINF 1 -# define isinf(a) (fpclassify(a) == FP_MINUS_INF || fpclassify(a) == FP_PLUS_INF) -#endif - -#if (! defined(HAVE_ISINF) && defined(HAVE_FPCLASSIFY) && defined(FP_INFINITE)) -# define HAVE_ISINF 1 -# define isinf(a) (fpclassify(a) == FP_INFINITE) -#endif - -/* for AIX */ -#if (! defined(HAVE_ISINF) && defined(HAVE_CLASS)) -# define HAVE_ISINF 1 -# define isinf(a) (class(a) == FP_MINUS_INF || class(a) == FP_PLUS_INF) -#endif - -#if (! defined (HAVE_FINITE) && defined (HAVE_ISFINITE)) -# define HAVE_FINITE 1 -# define finite(a) isfinite(a) -#endif - -#if (! defined(HAVE_FINITE) && defined(HAVE_ISNAN) && defined(HAVE_ISINF)) -# define HAVE_FINITE 1 -# define finite(a) (! isnan(a) && ! isinf(a)) -#endif - -#ifndef HAVE_FINITE -#error "Can't compile without finite function" -#endif - -#ifndef HAVE_ISINF -#error "Can't compile without isinf function" -#endif - -#if (! defined(HAVE_FDATASYNC) && defined(HAVE_FSYNC)) -#define fdatasync fsync -#endif - -#if (!defined(HAVE_FDATASYNC) && !defined(HAVE_FSYNC)) -#error "Can't compile with without fsync and fdatasync" -#endif - -#endif /* RRD_CONFIG_BOTTOM_H */ - diff --git a/vendor/github.com/open-falcon/rrdlite/rrd_create.c b/vendor/github.com/open-falcon/rrdlite/rrd_create.c deleted file mode 100644 index 7f39935f..00000000 --- a/vendor/github.com/open-falcon/rrdlite/rrd_create.c +++ /dev/null @@ -1,731 +0,0 @@ -/***************************************************************************** - * RRDtool 1.4.9 Copyright by Tobi Oetiker, 1997-2014 - ***************************************************************************** - * rrd_create.c creates new rrds - *****************************************************************************/ - -#include -#include -#include - -#include "rrd_error.h" -#include "rrd_tool.h" -#include "rrd_rpncalc.h" -#include "rrd_hw.h" -#ifndef RRD_LITE -#include "rrd_client.h" -#endif -#include "rrd_config.h" - -#include "rrd_is_thread_safe.h" -static int opt_no_overwrite = 0; - -#ifdef WIN32 -# include -#endif - -unsigned long FnvHash( const char *str); -int create_hw_contingent_rras( rrd_t *rrd, - unsigned short period, unsigned long hashed_name); -int parseGENERIC_DS( const char *def, rrd_t *rrd, int ds_idx); -static void rrd_free2( rrd_t *rrd); /* our onwn copy, immmune to mmap */ - -/* #define DEBUG */ -int rrd_create_r( const char *filename, unsigned long pdp_step, - time_t last_up, int argc, const char **argv) { - rrd_t rrd; - long i; - int offset; - char *token; - char dummychar1[2], dummychar2[2]; - unsigned short token_idx, error_flag, period = 0; - unsigned long hashed_name; - int ret = 0; - - /* init rrd clean */ - rrd_init(&rrd); - /* static header */ - if ((rrd.stat_head = (stat_head_t*)calloc(1, sizeof(stat_head_t))) == NULL) { - rrd_free2(&rrd); - return -RRD_ERR_ALLOC; - } - - /* live header */ - if ((rrd.live_head = (live_head_t*)calloc(1, sizeof(live_head_t))) == NULL) { - rrd_free2(&rrd); - return -RRD_ERR_ALLOC; - } - - /* set some defaults */ - strcpy(rrd.stat_head->cookie, RRD_COOKIE); - strcpy(rrd.stat_head->version, RRD_VERSION3); /* by default we are still version 3 */ - rrd.stat_head->float_cookie = FLOAT_COOKIE; - rrd.stat_head->ds_cnt = 0; /* this will be adjusted later */ - rrd.stat_head->rra_cnt = 0; /* ditto */ - rrd.stat_head->pdp_step = pdp_step; /* 5 minute default */ - - /* a default value */ - rrd.ds_def = NULL; - rrd.rra_def = NULL; - - rrd.live_head->last_up = last_up; - - /* optind points to the first non-option command line arg, - * in this case, the file name. */ - /* Compute the FNV hash value (used by SEASONAL and DEVSEASONAL - * arrays. */ - hashed_name = FnvHash(filename); - for (i = 0; i < argc; i++) { - unsigned int ii; - - if (strncmp(argv[i], "DS:", 3) == 0) { - size_t old_size = sizeof(ds_def_t) * (rrd.stat_head->ds_cnt); - - if ((rrd.ds_def = (ds_def_t*)rrd_realloc(rrd.ds_def, - old_size + sizeof(ds_def_t))) == - NULL) { - rrd_free2(&rrd); - return -RRD_ERR_ALLOC; - } - memset(&rrd.ds_def[rrd.stat_head->ds_cnt], 0, sizeof(ds_def_t)); - /* extract the name and type */ - switch (sscanf(&argv[i][3], - DS_NAM_FMT "%1[:]" DST_FMT "%1[:]%n", - rrd.ds_def[rrd.stat_head->ds_cnt].ds_nam, - dummychar1, - rrd.ds_def[rrd.stat_head->ds_cnt].dst, - dummychar2, &offset)) { - case 0: - case 1: - ret = -RRD_ERR_INVALID_DS_NAME; - break; - case 2: - case 3: - ret = -RRD_ERR_INVALID_DS_TYPE; - break; - case 4: /* (%n may or may not be counted) */ - case 5: /* check for duplicate datasource names */ - for (ii = 0; ii < rrd.stat_head->ds_cnt; ii++) - if (strcmp(rrd.ds_def[rrd.stat_head->ds_cnt].ds_nam, - rrd.ds_def[ii].ds_nam) == 0) - ret = -RRD_ERR_DUPLICATE_DS_NAME; - /* DS_type may be valid or not. Checked later */ - break; - default: - ret = -RRD_ERR_INVALID_DS_FORMAT; - } - if (ret) { - rrd_free2(&rrd); - return ret; - } - - /* parse the remainder of the arguments */ - switch (dst_conv(rrd.ds_def[rrd.stat_head->ds_cnt].dst)) { - case DST_COUNTER: - case DST_ABSOLUTE: - case DST_GAUGE: - case DST_DERIVE: - ret = parseGENERIC_DS(&argv[i][offset + 3], &rrd, - rrd.stat_head->ds_cnt); - break; - case DST_CDEF: - ret = parseCDEF_DS(&argv[i][offset + 3], &rrd, - rrd.stat_head->ds_cnt); - break; - default: - ret = -RRD_ERR_INVALID_DS_TYPE_SPEC; - break; - } - - if (ret) { - rrd_free2(&rrd); - return ret; - } - rrd.stat_head->ds_cnt++; - } else if (strncmp(argv[i], "RRA:", 4) == 0) { - char *argvcopy; - char *tokptr = ""; - int cf_id = -1; - size_t old_size = sizeof(rra_def_t) * (rrd.stat_head->rra_cnt); - int row_cnt; - int token_min = 4; - if ((rrd.rra_def = (rra_def_t*)rrd_realloc(rrd.rra_def, - old_size + sizeof(rra_def_t))) == - NULL) { - rrd_free2(&rrd); - return -RRD_ERR_ALLOC; - } - memset(&rrd.rra_def[rrd.stat_head->rra_cnt], 0, - sizeof(rra_def_t)); - - argvcopy = strdup(argv[i]); - token = strtok_r(&argvcopy[4], ":", &tokptr); - token_idx = error_flag = 0; - - while (token != NULL) { - switch (token_idx) { - case 0: - if (sscanf(token, CF_NAM_FMT, - rrd.rra_def[rrd.stat_head->rra_cnt].cf_nam) != - 1) - ret = -RRD_ERR_FAILED_PARSE_CF_NAME; - cf_id = cf_conv(rrd.rra_def[rrd.stat_head->rra_cnt].cf_nam); - switch (cf_id) { - case CF_MHWPREDICT: - strcpy(rrd.stat_head->version, RRD_VERSION); /* MHWPREDICT causes Version 4 */ - case CF_HWPREDICT: - token_min = 5; - /* initialize some parameters */ - rrd.rra_def[rrd.stat_head->rra_cnt].par[RRA_hw_alpha]. - u_val = 0.1; - rrd.rra_def[rrd.stat_head->rra_cnt].par[RRA_hw_beta]. - u_val = 1.0 / 288; - rrd.rra_def[rrd.stat_head->rra_cnt]. - par[RRA_dependent_rra_idx].u_cnt = - rrd.stat_head->rra_cnt; - break; - case CF_DEVSEASONAL: - token_min = 3; - case CF_SEASONAL: - if (cf_id == CF_SEASONAL){ - token_min = 4; - } - /* initialize some parameters */ - rrd.rra_def[rrd.stat_head->rra_cnt]. - par[RRA_seasonal_gamma].u_val = 0.1; - rrd.rra_def[rrd.stat_head->rra_cnt]. - par[RRA_seasonal_smoothing_window].u_val = 0.05; - /* fall through */ - case CF_DEVPREDICT: - if (cf_id == CF_DEVPREDICT){ - token_min = 3; - } - rrd.rra_def[rrd.stat_head->rra_cnt]. - par[RRA_dependent_rra_idx].u_cnt = -1; - break; - case CF_FAILURES: - token_min = 5; - rrd.rra_def[rrd.stat_head->rra_cnt]. - par[RRA_delta_pos].u_val = 2.0; - rrd.rra_def[rrd.stat_head->rra_cnt]. - par[RRA_delta_neg].u_val = 2.0; - rrd.rra_def[rrd.stat_head->rra_cnt]. - par[RRA_window_len].u_cnt = 3; - rrd.rra_def[rrd.stat_head->rra_cnt]. - par[RRA_failure_threshold].u_cnt = 2; - rrd.rra_def[rrd.stat_head->rra_cnt]. - par[RRA_dependent_rra_idx].u_cnt = -1; - break; - /* invalid consolidation function */ - case -1: - ret = -RRD_ERR_UNREC_CONSOLIDATION_FUNC; - default: - break; - } - /* default: 1 pdp per cdp */ - rrd.rra_def[rrd.stat_head->rra_cnt].pdp_cnt = 1; - break; - case 1: - switch (cf_conv(rrd.rra_def[rrd.stat_head->rra_cnt].cf_nam)) { - case CF_HWPREDICT: - case CF_MHWPREDICT: - case CF_DEVSEASONAL: - case CF_SEASONAL: - case CF_DEVPREDICT: - case CF_FAILURES: - row_cnt = atoi(token); - if (row_cnt <= 0) - ret = -RRD_ERR_INVALID_ROW_COUNT; - rrd.rra_def[rrd.stat_head->rra_cnt].row_cnt = row_cnt; - break; - default: - rrd.rra_def[rrd.stat_head->rra_cnt]. - par[RRA_cdp_xff_val].u_val = atof(token); - if (rrd.rra_def[rrd.stat_head->rra_cnt]. - par[RRA_cdp_xff_val].u_val < 0.0 - || rrd.rra_def[rrd.stat_head->rra_cnt]. - par[RRA_cdp_xff_val].u_val >= 1.0) - ret = -RRD_ERR_INVALID_XFF; - break; - } - break; - case 2: - switch (cf_conv - (rrd.rra_def[rrd.stat_head->rra_cnt].cf_nam)) { - case CF_HWPREDICT: - case CF_MHWPREDICT: - rrd.rra_def[rrd.stat_head->rra_cnt].par[RRA_hw_alpha]. - u_val = atof(token); - if (atof(token) <= 0.0 || atof(token) >= 1.0) - ret = -RRD_ERR_INVALID_ALPHA; - break; - case CF_DEVSEASONAL: - case CF_SEASONAL: - rrd.rra_def[rrd.stat_head->rra_cnt]. - par[RRA_seasonal_gamma].u_val = atof(token); - if (atof(token) <= 0.0 || atof(token) >= 1.0) - ret = -RRD_ERR_INVALID_GAMMA; - rrd.rra_def[rrd.stat_head->rra_cnt]. - par[RRA_seasonal_smooth_idx].u_cnt = - hashed_name % - rrd.rra_def[rrd.stat_head->rra_cnt].row_cnt; - break; - case CF_FAILURES: - /* specifies the # of violations that constitutes the failure threshold */ - rrd.rra_def[rrd.stat_head->rra_cnt]. - par[RRA_failure_threshold].u_cnt = atoi(token); - if (atoi(token) < 1 - || atoi(token) > MAX_FAILURES_WINDOW_LEN) - ret = -RRD_ERR_FAILURE_THRESHOLD_OUT_OF_RANGE; - break; - case CF_DEVPREDICT: - /* specifies the index (1-based) of CF_DEVSEASONAL array - * associated with this CF_DEVPREDICT array. */ - rrd.rra_def[rrd.stat_head->rra_cnt]. - par[RRA_dependent_rra_idx].u_cnt = - atoi(token) - 1; - break; - default: - rrd.rra_def[rrd.stat_head->rra_cnt].pdp_cnt = - atoi(token); - if (atoi(token) < 1) - ret = -RRD_ERR_INVALID_STEP; - break; - } - break; - case 3: - switch (cf_conv(rrd.rra_def[rrd.stat_head->rra_cnt].cf_nam)) { - case CF_HWPREDICT: - case CF_MHWPREDICT: - rrd.rra_def[rrd.stat_head->rra_cnt].par[RRA_hw_beta]. - u_val = atof(token); - if (atof(token) < 0.0 || atof(token) > 1.0) - ret = -RRD_ERR_INVALID_BETA; - break; - case CF_DEVSEASONAL: - case CF_SEASONAL: - /* specifies the index (1-based) of CF_HWPREDICT array - * associated with this CF_DEVSEASONAL or CF_SEASONAL array. - * */ - rrd.rra_def[rrd.stat_head->rra_cnt]. - par[RRA_dependent_rra_idx].u_cnt = - atoi(token) - 1; - break; - case CF_FAILURES: - /* specifies the window length */ - rrd.rra_def[rrd.stat_head->rra_cnt]. - par[RRA_window_len].u_cnt = atoi(token); - if (atoi(token) < 1 - || atoi(token) > MAX_FAILURES_WINDOW_LEN) - ret = RRD_ERR_WIN_LEN_OUT_OF_RANGE; - /* verify that window length exceeds the failure threshold */ - if (rrd.rra_def[rrd.stat_head->rra_cnt]. - par[RRA_window_len].u_cnt < - rrd.rra_def[rrd.stat_head->rra_cnt]. - par[RRA_failure_threshold].u_cnt) - ret = -RRD_ERR_WINLEN_SHORTER_FAILURE_THRESHOLD; - break; - case CF_DEVPREDICT: - /* shouldn't be any more arguments */ - ret = -RRD_ERR_INVALID_ARG1; - break; - default: - row_cnt = atoi(token); - if (row_cnt <= 0) - ret = -RRD_ERR_INVALID_ROW_COUNT; -#if SIZEOF_TIME_T == 4 - if ((long long) pdp_step * rrd.rra_def[rrd.stat_head->rra_cnt].pdp_cnt * row_cnt > 4294967296LL){ - /* database timespan > 2**32, would overflow time_t */ - ret = -RRD_ERR_TIME_TOO_LARGE; - } -#endif - rrd.rra_def[rrd.stat_head->rra_cnt].row_cnt = row_cnt; - break; - } - break; - case 4: - switch (cf_conv(rrd.rra_def[rrd.stat_head->rra_cnt].cf_nam)) { - case CF_FAILURES: - /* specifies the index (1-based) of CF_DEVSEASONAL array - * associated with this CF_DEVFAILURES array. */ - rrd.rra_def[rrd.stat_head->rra_cnt]. - par[RRA_dependent_rra_idx].u_cnt = - atoi(token) - 1; - break; - case CF_DEVSEASONAL: - case CF_SEASONAL: - /* optional smoothing window */ - if (sscanf(token, "smoothing-window=%lf", - &(rrd.rra_def[rrd.stat_head->rra_cnt]. - par[RRA_seasonal_smoothing_window]. - u_val))) { - strcpy(rrd.stat_head->version, RRD_VERSION); /* smoothing-window causes Version 4 */ - if (rrd.rra_def[rrd.stat_head->rra_cnt]. - par[RRA_seasonal_smoothing_window].u_val < 0.0 - || rrd.rra_def[rrd.stat_head->rra_cnt]. - par[RRA_seasonal_smoothing_window].u_val > - 1.0) { - ret = -RRD_ERR_INVALID_SMOOTHING_WINDOW; - } - } else { - ret = -RRD_ERR_INVALID_OPT; - } - break; - case CF_HWPREDICT: - case CF_MHWPREDICT: - /* length of the associated CF_SEASONAL and CF_DEVSEASONAL arrays. */ - period = atoi(token); - if (period > - rrd.rra_def[rrd.stat_head->rra_cnt].row_cnt) - ret = -RRD_ERR_LEN_OF_SEASONAL_CYCLE; - break; - default: - /* shouldn't be any more arguments */ - ret = -RRD_ERR_INVALID_ARG2; - break; - } - break; - case 5: - /* If we are here, this must be a CF_HWPREDICT RRA. - * Specifies the index (1-based) of CF_SEASONAL array - * associated with this CF_HWPREDICT array. If this argument - * is missing, then the CF_SEASONAL, CF_DEVSEASONAL, CF_DEVPREDICT, - * CF_FAILURES. - * arrays are created automatically. */ - rrd.rra_def[rrd.stat_head->rra_cnt]. - par[RRA_dependent_rra_idx].u_cnt = atoi(token) - 1; - break; - default: - /* should never get here */ - ret = -RRD_ERR_UNKNOWN_ERROR; - break; - } /* end switch */ - if (ret) { - /* all errors are unrecoverable */ - free(argvcopy); - rrd_free2(&rrd); - return ret; - } - token = strtok_r(NULL, ":", &tokptr); - token_idx++; - } /* end while */ - free(argvcopy); - if (token_idx < token_min){ - rrd_free2(&rrd); - return(-RRD_ERR_ARG3); - } -#ifdef DEBUG - fprintf(stderr, - "Creating RRA CF: %s, dep idx %lu, current idx %lu\n", - rrd.rra_def[rrd.stat_head->rra_cnt].cf_nam, - rrd.rra_def[rrd.stat_head->rra_cnt]. - par[RRA_dependent_rra_idx].u_cnt, rrd.stat_head->rra_cnt); -#endif - /* should we create CF_SEASONAL, CF_DEVSEASONAL, and CF_DEVPREDICT? */ - if ((cf_conv(rrd.rra_def[rrd.stat_head->rra_cnt].cf_nam) == - CF_HWPREDICT - || cf_conv(rrd.rra_def[rrd.stat_head->rra_cnt].cf_nam) == - CF_MHWPREDICT) - && rrd.rra_def[rrd.stat_head->rra_cnt]. - par[RRA_dependent_rra_idx].u_cnt == rrd.stat_head->rra_cnt) { -#ifdef DEBUG - fprintf(stderr, "Creating HW contingent RRAs\n"); -#endif - if (create_hw_contingent_rras(&rrd, period, hashed_name) == - -1) { - rrd_free2(&rrd); - return (-RRD_ERR_CREATING_RRA); - } - } - rrd.stat_head->rra_cnt++; - } else { - rrd_free2(&rrd); - return (-RRD_ERR_ARG4); - } - } - - - if (rrd.stat_head->rra_cnt < 1) { - rrd_free2(&rrd); - return (-RRD_ERR_ARG5); - } - - if (rrd.stat_head->ds_cnt < 1) { - rrd_free2(&rrd); - return (-RRD_ERR_ARG6); - } - return rrd_create_fn(filename, &rrd); -} - -int parseGENERIC_DS( const char *def, rrd_t *rrd, int ds_idx) { - char minstr[DS_NAM_SIZE], maxstr[DS_NAM_SIZE]; - char *old_locale; - int ret = 0; - - /* - int temp; - - temp = sscanf(def,"%lu:%18[^:]:%18[^:]", - &(rrd -> ds_def[ds_idx].par[DS_mrhb_cnt].u_cnt), - minstr,maxstr); - */ - old_locale = setlocale(LC_NUMERIC, "C"); - if (sscanf(def, "%lu:%18[^:]:%18[^:]", - &(rrd->ds_def[ds_idx].par[DS_mrhb_cnt].u_cnt), - minstr, maxstr) == 3) { - if (minstr[0] == 'U' && minstr[1] == 0) - rrd->ds_def[ds_idx].par[DS_min_val].u_val = DNAN; - else - rrd->ds_def[ds_idx].par[DS_min_val].u_val = atof(minstr); - - if (maxstr[0] == 'U' && maxstr[1] == 0) - rrd->ds_def[ds_idx].par[DS_max_val].u_val = DNAN; - else - rrd->ds_def[ds_idx].par[DS_max_val].u_val = atof(maxstr); - - if (!isnan(rrd->ds_def[ds_idx].par[DS_min_val].u_val) && - !isnan(rrd->ds_def[ds_idx].par[DS_max_val].u_val) && - rrd->ds_def[ds_idx].par[DS_min_val].u_val - >= rrd->ds_def[ds_idx].par[DS_max_val].u_val) { - ret = -RRD_ERR_ARG7; - setlocale(LC_NUMERIC, old_locale); - return ret; - } - } else { - ret = -RRD_ERR_ARG8; - } - setlocale(LC_NUMERIC, old_locale); - return ret; -} - -/* Create the CF_DEVPREDICT, CF_DEVSEASONAL, CF_SEASONAL, and CF_FAILURES RRAs - * associated with a CF_HWPREDICT RRA. */ -int create_hw_contingent_rras( - rrd_t *rrd, - unsigned short period, - unsigned long hashed_name) -{ - size_t old_size; - rra_def_t *current_rra; - - /* save index to CF_HWPREDICT */ - unsigned long hw_index = rrd->stat_head->rra_cnt; - - /* advance the pointer */ - (rrd->stat_head->rra_cnt)++; - /* allocate the memory for the 4 contingent RRAs */ - old_size = sizeof(rra_def_t) * (rrd->stat_head->rra_cnt); - if ((rrd->rra_def = (rra_def_t*)rrd_realloc(rrd->rra_def, - old_size + 4 * sizeof(rra_def_t))) == - NULL) { - rrd_free2(rrd); - return (-RRD_ERR_ALLOC); - } - /* clear memory */ - memset(&(rrd->rra_def[rrd->stat_head->rra_cnt]), 0, - 4 * sizeof(rra_def_t)); - - /* create the CF_SEASONAL RRA */ - current_rra = &(rrd->rra_def[rrd->stat_head->rra_cnt]); - strcpy(current_rra->cf_nam, "SEASONAL"); - current_rra->row_cnt = period; - current_rra->par[RRA_seasonal_smooth_idx].u_cnt = hashed_name % period; - current_rra->pdp_cnt = 1; - current_rra->par[RRA_seasonal_gamma].u_val = - rrd->rra_def[hw_index].par[RRA_hw_alpha].u_val; - current_rra->par[RRA_dependent_rra_idx].u_cnt = hw_index; - rrd->rra_def[hw_index].par[RRA_dependent_rra_idx].u_cnt = - rrd->stat_head->rra_cnt; - - /* create the CF_DEVSEASONAL RRA */ - (rrd->stat_head->rra_cnt)++; - current_rra = &(rrd->rra_def[rrd->stat_head->rra_cnt]); - strcpy(current_rra->cf_nam, "DEVSEASONAL"); - current_rra->row_cnt = period; - current_rra->par[RRA_seasonal_smooth_idx].u_cnt = hashed_name % period; - current_rra->pdp_cnt = 1; - current_rra->par[RRA_seasonal_gamma].u_val = - rrd->rra_def[hw_index].par[RRA_hw_alpha].u_val; - current_rra->par[RRA_dependent_rra_idx].u_cnt = hw_index; - - /* create the CF_DEVPREDICT RRA */ - (rrd->stat_head->rra_cnt)++; - current_rra = &(rrd->rra_def[rrd->stat_head->rra_cnt]); - strcpy(current_rra->cf_nam, "DEVPREDICT"); - current_rra->row_cnt = (rrd->rra_def[hw_index]).row_cnt; - current_rra->pdp_cnt = 1; - current_rra->par[RRA_dependent_rra_idx].u_cnt = hw_index + 2; /* DEVSEASONAL */ - - /* create the CF_FAILURES RRA */ - (rrd->stat_head->rra_cnt)++; - current_rra = &(rrd->rra_def[rrd->stat_head->rra_cnt]); - strcpy(current_rra->cf_nam, "FAILURES"); - current_rra->row_cnt = period; - current_rra->pdp_cnt = 1; - current_rra->par[RRA_delta_pos].u_val = 2.0; - current_rra->par[RRA_delta_neg].u_val = 2.0; - current_rra->par[RRA_failure_threshold].u_cnt = 7; - current_rra->par[RRA_window_len].u_cnt = 9; - current_rra->par[RRA_dependent_rra_idx].u_cnt = hw_index + 2; /* DEVSEASONAL */ - return 0; -} - -/* create and empty rrd file according to the specs given */ - -int rrd_create_fn( - const char *file_name, - rrd_t *rrd) -{ - unsigned long i, ii; - rrd_value_t *unknown; - int unkn_cnt; - rrd_file_t *rrd_file_dn; - rrd_t rrd_dn; - unsigned rrd_flags = RRD_READWRITE | RRD_CREAT; - int ret = 0; - - if (opt_no_overwrite) { - rrd_flags |= RRD_EXCL ; - } - - unkn_cnt = 0; - for (i = 0; i < rrd->stat_head->rra_cnt; i++) - unkn_cnt += rrd->stat_head->ds_cnt * rrd->rra_def[i].row_cnt; - - if ((rrd_file_dn = rrd_open(file_name, rrd, rrd_flags, &ret)) == NULL) { - rrd_free2(rrd); - return ret; - } - - rrd_write(rrd_file_dn, rrd->stat_head, sizeof(stat_head_t)); - - rrd_write(rrd_file_dn, rrd->ds_def, sizeof(ds_def_t) * rrd->stat_head->ds_cnt); - - rrd_write(rrd_file_dn, rrd->rra_def, - sizeof(rra_def_t) * rrd->stat_head->rra_cnt); - - rrd_write(rrd_file_dn, rrd->live_head, sizeof(live_head_t)); - - if ((rrd->pdp_prep = (pdp_prep_t*)calloc(1, sizeof(pdp_prep_t))) == NULL) { - rrd_free2(rrd); - rrd_close(rrd_file_dn); - return (-RRD_ERR_ALLOC); - } - - strcpy(rrd->pdp_prep->last_ds, "U"); - - rrd->pdp_prep->scratch[PDP_val].u_val = 0.0; - rrd->pdp_prep->scratch[PDP_unkn_sec_cnt].u_cnt = - rrd->live_head->last_up % rrd->stat_head->pdp_step; - - for (i = 0; i < rrd->stat_head->ds_cnt; i++) - rrd_write(rrd_file_dn, rrd->pdp_prep, sizeof(pdp_prep_t)); - - if ((rrd->cdp_prep = (cdp_prep_t*)calloc(1, sizeof(cdp_prep_t))) == NULL) { - rrd_free2(rrd); - rrd_close(rrd_file_dn); - return (-RRD_ERR_ALLOC); - } - - - for (i = 0; i < rrd->stat_head->rra_cnt; i++) { - switch (cf_conv(rrd->rra_def[i].cf_nam)) { - case CF_HWPREDICT: - case CF_MHWPREDICT: - init_hwpredict_cdp(rrd->cdp_prep); - break; - case CF_SEASONAL: - case CF_DEVSEASONAL: - init_seasonal_cdp(rrd->cdp_prep); - break; - case CF_FAILURES: - /* initialize violation history to 0 */ - for (ii = 0; ii < MAX_CDP_PAR_EN; ii++) { - /* We can zero everything out, by setting u_val to the - * NULL address. Each array entry in scratch is 8 bytes - * (a double), but u_cnt only accessed 4 bytes (long) */ - rrd->cdp_prep->scratch[ii].u_val = 0.0; - } - break; - default: - /* can not be zero because we don't know anything ... */ - rrd->cdp_prep->scratch[CDP_val].u_val = DNAN; - /* startup missing pdp count */ - rrd->cdp_prep->scratch[CDP_unkn_pdp_cnt].u_cnt = - ((rrd->live_head->last_up - - rrd->pdp_prep->scratch[PDP_unkn_sec_cnt].u_cnt) - % (rrd->stat_head->pdp_step - * rrd->rra_def[i].pdp_cnt)) / rrd->stat_head->pdp_step; - break; - } - - for (ii = 0; ii < rrd->stat_head->ds_cnt; ii++) { - rrd_write(rrd_file_dn, rrd->cdp_prep, sizeof(cdp_prep_t)); - } - } - - /* now, we must make sure that the rest of the rrd - struct is properly initialized */ - - if ((rrd->rra_ptr = (rra_ptr_t*)calloc(1, sizeof(rra_ptr_t))) == NULL) { - rrd_free2(rrd); - rrd_close(rrd_file_dn); - return -RRD_ERR_ALLOC; - } - - /* changed this initialization to be consistent with - * rrd_restore. With the old value (0), the first update - * would occur for cur_row = 1 because rrd_update increments - * the pointer a priori. */ - for (i = 0; i < rrd->stat_head->rra_cnt; i++) { - rrd->rra_ptr->cur_row = rrd_select_initial_row(rrd_file_dn, i, &rrd->rra_def[i]); - rrd_write(rrd_file_dn, rrd->rra_ptr, sizeof(rra_ptr_t)); - } - - /* write the empty data area */ - if ((unknown = (rrd_value_t *) malloc(512 * sizeof(rrd_value_t))) == NULL) { - rrd_free2(rrd); - rrd_close(rrd_file_dn); - return -RRD_ERR_ALLOC; - } - for (i = 0; i < 512; ++i) - unknown[i] = DNAN; - - while (unkn_cnt > 0) { - if(rrd_write(rrd_file_dn, unknown, sizeof(rrd_value_t) * min(unkn_cnt, 512)) < 0) - { - return -RRD_ERR_CREATE_WRITE; - } - - unkn_cnt -= 512; - } - free(unknown); - rrd_free2(rrd); - if (rrd_close(rrd_file_dn) == -1) { - return -RRD_ERR_CREATE_WRITE; - } - /* flush all we don't need out of the cache */ - rrd_init(&rrd_dn); - if((rrd_file_dn = rrd_open(file_name, &rrd_dn, RRD_READONLY, &ret)) != NULL) - { - rrd_dontneed(rrd_file_dn, &rrd_dn); - /* rrd_free(&rrd_dn); */ - rrd_close(rrd_file_dn); - } - return ret; -} - - -static void rrd_free2( - rrd_t *rrd) -{ - free(rrd->live_head); - free(rrd->stat_head); - free(rrd->ds_def); - free(rrd->rra_def); - free(rrd->rra_ptr); - free(rrd->pdp_prep); - free(rrd->cdp_prep); - free(rrd->rrd_value); -} - diff --git a/vendor/github.com/open-falcon/rrdlite/rrd_diff.c b/vendor/github.com/open-falcon/rrdlite/rrd_diff.c deleted file mode 100644 index 065932ac..00000000 --- a/vendor/github.com/open-falcon/rrdlite/rrd_diff.c +++ /dev/null @@ -1,123 +0,0 @@ -/***************************************************************************** - * RRDtool 1.4.9 Copyright by Tobi Oetiker, 1997-2014 - * This code is stolen from rateup (mrtg-2.x) by Dave Rand - ***************************************************************************** - * diff calculate the difference between two very long integers available as - * strings - ***************************************************************************** - * $Id$ - * $Log$ - * Revision 1.4 2003/03/10 00:30:34 oetiker - * handle cases with two negative numbers - * -- Sasha Mikheev - * - * Revision 1.3 2002/04/01 18:31:22 oetiker - * "!" takes a higher preference than "||" this means rrd_update N:: would - * segfault -- Oliver Cook - * - * Revision 1.2 2002/02/01 20:34:49 oetiker - * fixed version number and date/time - * - * Revision 1.1.1.1 2001/02/25 22:25:05 oetiker - * checkin - * - * Revision 1.1 1998/10/08 18:21:45 oetiker - * Initial revision - * - * Revision 1.3 1998/02/06 21:10:52 oetiker - * removed max define .. it is now in rrd_tool.h - * - * Revision 1.2 1997/12/07 20:38:03 oetiker - * ansified - * - * Revision 1.1 1997/11/28 23:31:59 oetiker - * Initial revision - * - *****************************************************************************/ - -#include -#include "rrd_tool.h" - -double rrd_diff( - char *a, - char *b) -{ - char res[LAST_DS_LEN + 1], *a1, *b1, *r1, *fix; - int c, x, m; - char a_neg = 0, b_neg = 0; - double result; - - while (!(isdigit((int) *a) || *a == 0)) { - if (*a == '-') - a_neg = 1; - a++; - } - fix = a; - while (isdigit((int) *fix)) - fix++; - *fix = 0; /* maybe there is some non digit data in the string */ - while (!(isdigit((int) *b) || *b == 0)) { - if (*b == '-') - b_neg = 1; - b++; - } - fix = b; - while (isdigit((int) *fix)) - fix++; - *fix = 0; /* maybe there is some non digit data in the string */ - if (!isdigit((int) *a) || !isdigit((int) *b)) - return DNAN; - if (a_neg + b_neg == 1) /* can not handle numbers with different signs yet */ - return DNAN; - a1 = &a[strlen(a) - 1]; - m = max(strlen(a), strlen(b)); - if (m > LAST_DS_LEN) - return DNAN; /* result string too short */ - - r1 = &res[m + 1]; - for (b1 = res; b1 <= r1; b1++) - *b1 = ' '; - b1 = &b[strlen(b) - 1]; - r1[1] = 0; /* Null terminate result */ - c = 0; - for (x = 0; x < m; x++) { - if (a1 >= a && b1 >= b) { - *r1 = ((*a1 - c) - *b1) + '0'; - } else if (a1 >= a) { - *r1 = (*a1 - c); - } else { - *r1 = ('0' - *b1 - c) + '0'; - } - if (*r1 < '0') { - *r1 += 10; - c = 1; - } else if (*r1 > '9') { /* 0 - 10 */ - *r1 -= 10; - c = 1; - } else { - c = 0; - } - a1--; - b1--; - r1--; - } - if (c) { - r1 = &res[m + 1]; - for (x = 0; isdigit((int) *r1) && x < m; x++, r1--) { - *r1 = ('9' - *r1 + c) + '0'; - if (*r1 > '9') { - *r1 -= 10; - c = 1; - } else { - c = 0; - } - } - result = -atof(res); - } else - result = atof(res); - - if (a_neg + b_neg == 2) /* both are negatives, reverse sign */ - result = -result; - - return result; -} diff --git a/vendor/github.com/open-falcon/rrdlite/rrd_error.c b/vendor/github.com/open-falcon/rrdlite/rrd_error.c deleted file mode 100644 index 2303f9c2..00000000 --- a/vendor/github.com/open-falcon/rrdlite/rrd_error.c +++ /dev/null @@ -1,185 +0,0 @@ -/***************************************************************************** - * RRDtool 1.4.9 Copyright by Tobi Oetiker, 1997-2014 - ***************************************************************************** - * rrd_error.c Common Header File - ***************************************************************************** - * $Id$ - * $Log$ - * Revision 1.4 2003/02/22 21:57:03 oetiker - * a patch to avoid a memory leak and a Makefile.am patch to - * distribute all required source files -- Peter Stamfest - * - * Revision 1.3 2003/02/13 07:05:27 oetiker - * Find attached the patch I promised to send to you. Please note that there - * are three new source files (src/rrd_is_thread_safe.h, src/rrd_thread_safe.c - * and src/rrd_not_thread_safe.c) and the introduction of librrd_th. This - * library is identical to librrd, but it contains support code for per-thread - * global variables currently used for error information only. This is similar - * to how errno per-thread variables are implemented. librrd_th must be linked - * alongside of libpthred - * - * There is also a new file "THREADS", holding some documentation. - * - * -- Peter Stamfest - * - * Revision 1.2 2002/02/01 20:34:49 oetiker - * fixed version number and date/time - * - * Revision 1.1.1.1 2001/02/25 22:25:05 oetiker - * checkin - * - * changed by yubo@yubo.org - * - *************************************************************************** */ - - -#include -#include - -#include "rrd_error.h" - -char const *rrd_err_text[RRD_ERR_NUM] = { - "allocating error", /* RRD_ERR_ALLOC */ - "Invalid DS name", /* RRD_ERR_INVALID_DS_NAME */ - "Invalid DS type", /* RRD_ERR_INVALID_DS_TYPE */ - "Duplicate DS name", /* RRD_ERR_DUPLICATE_DS_NAME */ - "Invalid DS format", /* RRD_ERR_INVALID_DS_FORMAT */ - "Invalid DS type specified", /* RRD_ERR_INVALID_DS_TYPE_SPEC */ - "creating rrd error", /* RRD_ERR_CREATE_WRITE */ - "Failed to parse CF name", /* RRD_ERR_FAILED_PARSE_CF_NAME */ - "Unrecognized consolidation function", /* RRD_ERR_UNREC_CONSOLIDATION_FUNC */ - "Invalid row count", /* RRD_ERR_INVALID_ROW_COUNT */ - "Invalid xff: must be between 0 and 1", /* RRD_ERR_INVALID_XFF */ - "Invalid alpha: must be between 0 and 1", /* RRD_ERR_INVALID_ALPHA */ - "Invalid gamma: must be between 0 and 1", /* RRD_ERR_INVALID_GAMMA */ - "Failure threshold is out of range 1, 28", /* RRD_ERR_FAILURE_THRESHOLD_OUT_OF_RANGE */ - "Invalid step: must be >= 1", /* RRD_ERR_INVALID_STEP */ - "Invalid beta: must be between 0 and 1", /* RRD_ERR_INVALID_BETA */ - "Window length is out of range 1, 28", /* RRD_ERR_WIN_LEN_OUT_OF_RANGE */ - "Window length is shorter than the failure threshold", /* RRD_ERR_WINLEN_SHORTER_FAILURE_THRESHOLD */ - "Unexpected extra argument for consolidation function DEVPREDICT", /* RRD_ERR_INVALID_ARG1 */ - "The time spanned by the database is too large: must be <= 4294967296 seconds", /* RRD_ERR_TIME_TOO_LARGE */ - "Invalid smoothing-window : must be between 0 and 1", /* RRD_ERR_INVALID_SMOOTHING_WINDOW */ - "Invalid option", /* RRD_ERR_INVALID_OPT */ - "Length of seasonal cycle exceeds length of HW prediction array", /* RRD_ERR_LEN_OF_SEASONAL_CYCLE */ - "Unexpected extra argument for consolidation function", /* RRD_ERR_INVALID_ARG2 */ - "Unknown error", /* RRD_ERR_UNKNOWN_ERROR */ - "Expected at least xxx arguments for RRA but got ooo", /* RRD_ERR_ARG3 */ - "creating contingent RRA", /* RRD_ERR_CREATING_RRA */ - "can't parse argument", /* RRD_ERR_ARG4 */ - "you must define at least one Round Robin Archive", /* RRD_ERR_ARG5 */ - "you must define at least one Data Source", /* RRD_ERR_ARG6 */ - "min must be less than max in DS definition", /* RRD_ERR_ARG7 */ - "failed to parse data source ??", /* RRD_ERR_ARG8 */ - "rrd_open() creating file error", /* RRD_ERR_CREATE_FILE1 */ - "malloc fetch ds_namv array", /* RRD_ERR_MALLOC1 */ - "malloc fetch ds_namv entry", /* RRD_ERR_MALLOC2 */ - "the RRD does not contain an RRA matching the chosen CF", /* RRD_ERR_NO_MATCH_RRA */ - "malloc fetch data area", /* RRD_ERR_MALLOC3 */ - "seek error in RRA", /* RRD_ERR_SEEK_RRA */ - "wrap seek in RRA did fail", /* RRD_ERR_SEEK_RRA1 */ - "fetching cdp from rra", /* RRD_ERR_FETCH_CDP */ - "unknown data source name", /* RRD_ERR_UNKNOWN_DS_NAME */ - "memory allocation failure: seasonal coef", /* RRD_ERR_MALLOC4 */ - "read operation failed in lookup_seasonal()", /* RRD_ERR_READ1 */ - "seek operation failed in lookup_seasonal()", /* RRD_ERR_SEEK1 */ - "apply smoother: memory allocation failure", /* RRD_ERR_MALLOC5 */ - "seek to rra failed", /* RRD_ERR_SEEK2 */ - "reading value failed: ??", /* RRD_ERR_READ2 */ - "apply smoother: SEASONAL rra doesn't have valid dependency", /* RRD_ERR_DEP1 */ - "apply_smoother: seek to cdp_prep failed", /* RRD_ERR_SEEK3 */ - "apply_smoother: cdp_prep write failed", /* RRD_ERR_WRITE1 */ - "apply_smoother: seek to pos ?? failed", /* RRD_ERR_SEEK4 */ - "apply_smoother: write failed to xxx", /* RRD_ERR_WRITE2 */ - "reached EOF while loading header ", /* RRD_ERR_READ3 */ - "rrd_read() malloc error", /* RRD_ERR_MALLOC6 */ - "short read while reading header ", /* RRD_ERR_READ4 */ - "allocating rrd_file descriptor for 'xxx'", /* RRD_ERR_MALLOC7 */ - "allocating rrd_simple_file for 'xxx'", /* RRD_ERR_MALLOC8 */ - "in read/write request mask", /* RRD_ERR_IO1 */ - "opening error", /* RRD_ERR_OPEN_FILE */ - "fstat error", /* RRD_ERR_STAT_FILE */ - "write error", /* RRD_ERR_WRITE5 */ - "mmap error", /* RRD_ERR_MMAP */ - "This file is not an RRD file", /* RRD_ERR_FILE */ - "This RRD was created on another architecture", /* RRD_ERR_FILE1 */ - "can't handle RRD file version", /* RRD_ERR_FILE2 */ - "live_head_t malloc", /* RRD_ERR_MALLOC9 */ - "file is too small (should be ?? bytes)", /* RRD_ERR_FILE3 */ - "msync rrd_file error", /* RRD_ERR_MSYNC */ - "munmap rrd_file error", /* RRD_ERR_MUNMAP */ - "closing rrd_file error", /* RRD_ERR_CLOSE */ - "attempting to write beyond end of file", /* RRD_ERR_WRITE6 */ - "update process_arg error", /* RRD_ERR_ARG9 */ - "write changes to disk error", /* RRD_ERR_WRITE7 */ - "Not enough arguments", /* RRD_ERR_ARG10 */ - "could not lock RRD", /* RRD_ERR_LOCK */ - "failed duplication argv entry", /* RRD_ERR_FAILED_STRDUP */ - "allocating updvals pointer array.", /* RRD_ERR_MALLOC10 */ - "allocating pdp_temp.", /* RRD_ERR_MALLOC11 */ - "allocating skip_update.", /* RRD_ERR_MALLOC12 */ - "allocating tmpl_idx.", /* RRD_ERR_MALLOC13 */ - "allocating rra_step_cnt.", /* RRD_ERR_MALLOC14 */ - "allocating pdp_new.", /* RRD_ERR_MALLOC15 */ - "parse template error", /* RRD_ERR_PARSE */ - "error copying tmplt ", /* RRD_ERR_FAILED_STRDUP1 */ - "tmplt contains more DS definitions than RRD", /* RRD_ERR_MORE_DS */ - "unknown DS name ", /* RRD_ERR_UNKNOWN_DS_NAME1 */ - "expected timestamp not found in data source from ??", /* RRD_ERR_STR */ - "found extra data on update argument: ??", /* RRD_ERR_ARG11 */ - "expected ?? data source readings (got ??) from ??", /* RRD_ERR_EXPECTED */ - "ds time: ??: ??", /* RRD_ERR_TIME1 */ - "specifying time relative to the 'start' or 'end' makes no sense here: ??", /* RRD_ERR_TIME2 */ - "strtod error: converting ?? to float: ??", /* RRD_ERR_STRTOD */ - "illegal attempt to update using time ?? when last update time is ?? (minimum one second step)", /* RRD_ERR_TIME3 */ - "not a simple ?? integer: '??'", /* RRD_ERR_INT */ - "conversion of '??' to float not complete: tail '??'", /* RRD_ERR_DATA */ - "rrd contains unknown DS type : '??'", /* RRD_ERR_UNKNOWN_DS_TYPE */ - "seek error in rrd", /* RRD_ERR_SEEK5 */ - "writing rrd: ??", /* RRD_ERR_WRITE8 */ - "seek rrd for live header writeback", /* RRD_ERR_SEEK6 */ - "rrd_write live_head to rrd", /* RRD_ERR_WRITE9 */ - "rrd_write pdp_prep to rrd", /* RRD_ERR_WRITE10 */ - "rrd_write cdp_prep to rrd", /* RRD_ERR_WRITE11 */ - "rrd_write rra_ptr to rrd", /* RRD_ERR_WRITE12 */ - "the start and end times cannot be specified relative to each other", /* RRD_ERR_TIME4 */ - "the start time cannot be specified relative to itself", /* RRD_ERR_TIME5 */ - "the end time cannot be specified relative to itself", /* RRD_ERR_TIME6 */ - "failed to alloc memory in addop2str", /* RRD_ERR_MALLOC16 */ - "failed to parse computed data source", /* RRD_ERR_PARSE1 */ - "operators TIME, LTIME, PREV COUNT TREND TRENDNAN PREDICT PREDICTSIGMA are not supported with DS COMPUTE", /* RRD_ERR_DS */ - "don't undestand expr", /* RRD_ERR_EXPR */ - "RPN stack overflow", /* RRD_ERR_STACK */ - "RPN stack underflow", /* RRD_ERR_STACK1 */ - "VDEF made it into rpn_calc... aborting", /* RRD_ERR_ABORT */ - "negative shift step not allowed: ??", /* RRD_ERR_ALLOW */ - "malformed trend arguments", /* RRD_ERR_ARG12 */ - "RPN final stack size != 1", /* RRD_ERR_STACK2 */ - "Maximum ?? RPN nodes permitted. Got ?? RPN nodes at present.", /* RRD_ERR_DATA1 */ - "constants must be integers in the interval (??, ??)", /* RRD_ERR_DATA2 */ - "failed allocating rpnp array", /* RRD_ERR_MALLOC17 */ - "unknown data acquisition function '??'", /* RRD_ERR_UNKNOWN_DATA */ - "update_cdp_prep error", /* RRD_ERR_UPDATE_CDP */ - "variable '??' not found", /* RRD_ERR_UNKNOWN_DATA1 */ -}; - - - -const char *rrd_strerror(int err) { - int e; - e = abs(err); - if(e == 0){ - return NULL; - }else{ - if (e >= RRD_ERR_START && e <= RRD_ERR_END){ - printf("errno: 0x%04x, str:%s\n", e, rrd_err_text[e-RRD_ERR_START]); - return rrd_err_text[e-RRD_ERR_START]; - }else{ - printf("errno: 0x%04x, str:%s\n", e, rrd_err_text[RRD_ERR_UNKNOWN_ERROR-RRD_ERR_START]); - return rrd_err_text[RRD_ERR_UNKNOWN_ERROR-RRD_ERR_START]; - } - } -} - - - diff --git a/vendor/github.com/open-falcon/rrdlite/rrd_error.h b/vendor/github.com/open-falcon/rrdlite/rrd_error.h deleted file mode 100644 index aecb29f1..00000000 --- a/vendor/github.com/open-falcon/rrdlite/rrd_error.h +++ /dev/null @@ -1,126 +0,0 @@ -#define RRD_ERR_START 0x0200 -#define RRD_ERR_ALLOC 0x0200 -#define RRD_ERR_INVALID_DS_NAME 0x0201 -#define RRD_ERR_INVALID_DS_TYPE 0x0202 -#define RRD_ERR_DUPLICATE_DS_NAME 0x0203 -#define RRD_ERR_INVALID_DS_FORMAT 0x0204 -#define RRD_ERR_INVALID_DS_TYPE_SPEC 0x0205 -#define RRD_ERR_CREATE_WRITE 0x0206 -#define RRD_ERR_FAILED_PARSE_CF_NAME 0x0207 -#define RRD_ERR_UNREC_CONSOLIDATION_FUNC 0x0208 -#define RRD_ERR_INVALID_ROW_COUNT 0x0209 -#define RRD_ERR_INVALID_XFF 0x020a -#define RRD_ERR_INVALID_ALPHA 0x020b -#define RRD_ERR_INVALID_GAMMA 0x020c -#define RRD_ERR_FAILURE_THRESHOLD_OUT_OF_RANGE 0x020d -#define RRD_ERR_INVALID_STEP 0x020e -#define RRD_ERR_INVALID_BETA 0x020f -#define RRD_ERR_WIN_LEN_OUT_OF_RANGE 0x0210 -#define RRD_ERR_WINLEN_SHORTER_FAILURE_THRESHOLD 0x0211 -#define RRD_ERR_INVALID_ARG1 0x0212 -#define RRD_ERR_TIME_TOO_LARGE 0x0213 -#define RRD_ERR_INVALID_SMOOTHING_WINDOW 0x0214 -#define RRD_ERR_INVALID_OPT 0x0215 -#define RRD_ERR_LEN_OF_SEASONAL_CYCLE 0x0216 -#define RRD_ERR_INVALID_ARG2 0x0217 -#define RRD_ERR_UNKNOWN_ERROR 0x0218 -#define RRD_ERR_ARG3 0x0219 -#define RRD_ERR_CREATING_RRA 0x021a -#define RRD_ERR_ARG4 0x021b -#define RRD_ERR_ARG5 0x021c -#define RRD_ERR_ARG6 0x021d -#define RRD_ERR_ARG7 0x021e -#define RRD_ERR_ARG8 0x021f -#define RRD_ERR_CREATE_FILE1 0x0220 -#define RRD_ERR_MALLOC1 0x0221 -#define RRD_ERR_MALLOC2 0x0222 -#define RRD_ERR_NO_MATCH_RRA 0x0223 -#define RRD_ERR_MALLOC3 0x0224 -#define RRD_ERR_SEEK_RRA 0x0225 -#define RRD_ERR_SEEK_RRA1 0x0226 -#define RRD_ERR_FETCH_CDP 0x0227 -#define RRD_ERR_UNKNOWN_DS_NAME 0x0228 -#define RRD_ERR_MALLOC4 0x0229 -#define RRD_ERR_READ1 0x022a -#define RRD_ERR_SEEK1 0x022b -#define RRD_ERR_MALLOC5 0x022c -#define RRD_ERR_SEEK2 0x022d -#define RRD_ERR_READ2 0x022e -#define RRD_ERR_DEP1 0x022f -#define RRD_ERR_SEEK3 0x0230 -#define RRD_ERR_WRITE1 0x0231 -#define RRD_ERR_SEEK4 0x0232 -#define RRD_ERR_WRITE2 0x0233 -#define RRD_ERR_READ3 0x0234 -#define RRD_ERR_MALLOC6 0x0235 -#define RRD_ERR_READ4 0x0236 -#define RRD_ERR_MALLOC7 0x0237 -#define RRD_ERR_MALLOC8 0x0238 -#define RRD_ERR_IO1 0x0239 -#define RRD_ERR_OPEN_FILE 0x023a -#define RRD_ERR_STAT_FILE 0x023b -#define RRD_ERR_WRITE5 0x023c -#define RRD_ERR_MMAP 0x023d -#define RRD_ERR_FILE 0x023e -#define RRD_ERR_FILE1 0x023f -#define RRD_ERR_FILE2 0x0240 -#define RRD_ERR_MALLOC9 0x0241 -#define RRD_ERR_FILE3 0x0242 -#define RRD_ERR_MSYNC 0x0243 -#define RRD_ERR_MUNMAP 0x0244 -#define RRD_ERR_CLOSE 0x0245 -#define RRD_ERR_WRITE6 0x0246 -#define RRD_ERR_ARG9 0x0247 -#define RRD_ERR_WRITE7 0x0248 -#define RRD_ERR_ARG10 0x0249 -#define RRD_ERR_LOCK 0x024a -#define RRD_ERR_FAILED_STRDUP 0x024b -#define RRD_ERR_MALLOC10 0x024c -#define RRD_ERR_MALLOC11 0x024d -#define RRD_ERR_MALLOC12 0x024e -#define RRD_ERR_MALLOC13 0x024f -#define RRD_ERR_MALLOC14 0x0250 -#define RRD_ERR_MALLOC15 0x0251 -#define RRD_ERR_PARSE 0x0252 -#define RRD_ERR_FAILED_STRDUP1 0x0253 -#define RRD_ERR_MORE_DS 0x0254 -#define RRD_ERR_UNKNOWN_DS_NAME1 0x0255 -#define RRD_ERR_STR 0x0256 -#define RRD_ERR_ARG11 0x0257 -#define RRD_ERR_EXPECTED 0x0258 -#define RRD_ERR_TIME1 0x0259 -#define RRD_ERR_TIME2 0x025a -#define RRD_ERR_STRTOD 0x025b -#define RRD_ERR_TIME3 0x025c -#define RRD_ERR_INT 0x025d -#define RRD_ERR_DATA 0x025e -#define RRD_ERR_UNKNOWN_DS_TYPE 0x025f -#define RRD_ERR_SEEK5 0x0260 -#define RRD_ERR_WRITE8 0x0261 -#define RRD_ERR_SEEK6 0x0262 -#define RRD_ERR_WRITE9 0x0263 -#define RRD_ERR_WRITE10 0x0264 -#define RRD_ERR_WRITE11 0x0265 -#define RRD_ERR_WRITE12 0x0266 -#define RRD_ERR_TIME4 0x0267 -#define RRD_ERR_TIME5 0x0268 -#define RRD_ERR_TIME6 0x0269 -#define RRD_ERR_MALLOC16 0x026a -#define RRD_ERR_PARSE1 0x026b -#define RRD_ERR_DS 0x026c -#define RRD_ERR_EXPR 0x026d -#define RRD_ERR_STACK 0x026e -#define RRD_ERR_STACK1 0x026f -#define RRD_ERR_ABORT 0x0270 -#define RRD_ERR_ALLOW 0x0271 -#define RRD_ERR_ARG12 0x0272 -#define RRD_ERR_STACK2 0x0273 -#define RRD_ERR_DATA1 0x0274 -#define RRD_ERR_DATA2 0x0275 -#define RRD_ERR_MALLOC17 0x0276 -#define RRD_ERR_UNKNOWN_DATA 0x0277 -#define RRD_ERR_UPDATE_CDP 0x0278 -#define RRD_ERR_UNKNOWN_DATA1 0x0279 -/* if add new system event flag, please upadte the RRD_ERR_END */ -#define RRD_ERR_END 0x0279 -#define RRD_ERR_NUM (RRD_ERR_END - RRD_ERR_START + 1) diff --git a/vendor/github.com/open-falcon/rrdlite/rrd_error.sh b/vendor/github.com/open-falcon/rrdlite/rrd_error.sh deleted file mode 100644 index 9e1554f8..00000000 --- a/vendor/github.com/open-falcon/rrdlite/rrd_error.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/sh -FILE=rrd_error.h -echo '#define RRD_ERR_START 0x0200' > $FILE -cat ./rrd_error.c | sed 's|.*\/\*[ ]*\([^\s]*\)[ ]*\*\/.*|\1|' | grep '^RRD_ERR' | awk '{printf("#define %-40s 0x%04x\n", $1, NR+0x1ff)}' >> $FILE -wc -l $FILE | awk '{printf("/* if add new system event flag, please upadte the RRD_ERR_END */\n#define RRD_ERR_END 0x%04x\n#define RRD_ERR_NUM (RRD_ERR_END - RRD_ERR_START + 1)", $1 - 1 + 0x1ff)}' >> $FILE - - diff --git a/vendor/github.com/open-falcon/rrdlite/rrd_fetch.c b/vendor/github.com/open-falcon/rrdlite/rrd_fetch.c deleted file mode 100644 index de8c589a..00000000 --- a/vendor/github.com/open-falcon/rrdlite/rrd_fetch.c +++ /dev/null @@ -1,367 +0,0 @@ -/***************************************************************************** - * RRDtool 1.4.9 Copyright by Tobi Oetiker, 1997-2014 - ***************************************************************************** - * rrd_fetch.c read date from an rrd to use for further processing - ***************************************************************************** - * $Id$ - * $Log$ - * Revision 1.8 2004/05/18 18:53:03 oetiker - * big spell checking patch -- slif@bellsouth.net - * - * Revision 1.7 2003/11/11 19:46:21 oetiker - * replaced time_value with rrd_time_value as MacOS X introduced a struct of that name in their standard headers - * - * Revision 1.6 2003/01/16 23:27:54 oetiker - * fix border condition in rra selection of rrd_fetch - * -- Stanislav Sinyagin - * - * Revision 1.5 2002/06/23 22:29:40 alex - * Added "step=1800" and such to "DEF" - * Cleaned some of the signed vs. unsigned problems - * - * Revision 1.4 2002/02/01 20:34:49 oetiker - * fixed version number and date/time - * - * Revision 1.3 2001/12/24 06:51:49 alex - * A patch of size 44Kbytes... in short: - * - * Found and repaired the off-by-one error in rrd_fetch_fn(). - * As a result I had to remove the hacks in rrd_fetch_fn(), - * rrd_tool.c, vdef_calc(), data_calc(), data_proc() and - * reduce_data(). There may be other places which I didn't - * find so be careful. - * - * Enhanced debugging in rrd_fetch_fn(), it shows the RRA selection - * process. - * - * Added the ability to print VDEF timestamps. At the moment it - * is a hack, I needed it now to fix the off-by-one error. - * If the format string is "%c" (and nothing else!), the time - * will be printed by both ctime() and as a long int. - * - * Moved some code around (slightly altering it) from rrd_graph() - * initializing now in rrd_graph_init() - * options parsing now in rrd_graph_options() - * script parsing now in rrd_graph_script() - * - * Revision 1.2 2001/12/17 12:48:43 oetiker - * fix overflow error ... - * - * Revision 1.1.1.1 2001/02/25 22:25:05 oetiker - * checkin - * - *****************************************************************************/ - -#include "rrd_tool.h" -#ifndef RRD_LITE -#include "rrd_client.h" -#endif - -#include "rrd_is_thread_safe.h" -/* #define DEBUG */ - -int rrd_fetch_r( - const char *filename, /* name of the rrd */ - const char *cf, /* which consolidation function ? */ - time_t *start, - time_t *end, /* which time frame do you want ? - * will be changed to represent reality */ - unsigned long *step, /* which stepsize do you want? - * will be changed to represent reality */ - unsigned long *ds_cnt, /* number of data sources in file */ - char ***ds_namv, /* names of data_sources */ - rrd_value_t **data) -{ /* two dimensional array containing the data */ - enum cf_en cf_idx; - - if ((int) (cf_idx = cf_conv(cf)) == -1) { - return -RRD_ERR_UNREC_CONSOLIDATION_FUNC; - } - - return (rrd_fetch_fn - (filename, cf_idx, start, end, step, ds_cnt, ds_namv, data)); -} /* int rrd_fetch_r */ - -int rrd_fetch_fn( - const char *filename, /* name of the rrd */ - enum cf_en cf_idx, /* which consolidation function ? */ - time_t *start, - time_t *end, /* which time frame do you want ? - * will be changed to represent reality */ - unsigned long *step, /* which stepsize do you want? - * will be changed to represent reality */ - unsigned long *ds_cnt, /* number of data sources in file */ - char ***ds_namv, /* names of data_sources */ - rrd_value_t **data) -{ /* two dimensional array containing the data */ - long i, ii; - time_t cal_start, cal_end, rra_start_time, rra_end_time; - long best_full_rra = 0, best_part_rra = 0, chosen_rra = - 0, rra_pointer = 0; - long best_full_step_diff = 0, best_part_step_diff = - 0, tmp_step_diff = 0, tmp_match = 0, best_match = 0; - long full_match, rra_base; - off_t start_offset, end_offset; - int first_full = 1; - int first_part = 1; - rrd_t rrd; - rrd_file_t *rrd_file; - rrd_value_t *data_ptr; - unsigned long rows; - int ret = 0; - -#ifdef DEBUG - fprintf(stderr, "Entered rrd_fetch_fn() searching for the best match\n"); - fprintf(stderr, "Looking for: start %10lu end %10lu step %5lu\n", - *start, *end, *step); -#endif - -#ifdef HAVE_LIBDBI - /* handle libdbi datasources */ - if (strncmp("sql//",filename,5)==0) { - return rrd_fetch_fn_libdbi(filename,cf_idx,start,end,step,ds_cnt,ds_namv,data); - } -#endif - - rrd_init(&rrd); - rrd_file = rrd_open(filename, &rrd, RRD_READONLY, &ret); - if (rrd_file == NULL) - goto err_free; - - /* when was the really last update of this file ? */ - - if (((*ds_namv) = - (char **) malloc(rrd.stat_head->ds_cnt * sizeof(char *))) == NULL) { - ret = -RRD_ERR_MALLOC1; - goto err_close; - } - - for (i = 0; (unsigned long) i < rrd.stat_head->ds_cnt; i++) { - if ((((*ds_namv)[i]) = (char*)malloc(sizeof(char) * DS_NAM_SIZE)) == NULL) { - for(ii = 0; ii < i; ii++){ - free((*ds_namv)[ii]); - } - ret = -RRD_ERR_MALLOC2; - goto err_free_ds_namv; - } - strncpy((*ds_namv)[i], rrd.ds_def[i].ds_nam, DS_NAM_SIZE - 1); - (*ds_namv)[i][DS_NAM_SIZE - 1] = '\0'; - - } - - /* find the rra which best matches the requirements */ - for (i = 0; (unsigned) i < rrd.stat_head->rra_cnt; i++) { - if (cf_conv(rrd.rra_def[i].cf_nam) == cf_idx) { - - cal_end = (rrd.live_head->last_up - (rrd.live_head->last_up - % (rrd.rra_def[i].pdp_cnt - * - rrd.stat_head-> - pdp_step))); - cal_start = - (cal_end - - (rrd.rra_def[i].pdp_cnt * rrd.rra_def[i].row_cnt * - rrd.stat_head->pdp_step)); - - full_match = *end - *start; -#ifdef DEBUG - fprintf(stderr, "Considering: start %10lu end %10lu step %5lu ", - cal_start, cal_end, - rrd.stat_head->pdp_step * rrd.rra_def[i].pdp_cnt); -#endif - /* we need step difference in either full or partial case */ - tmp_step_diff = labs(*step - (rrd.stat_head->pdp_step - * rrd.rra_def[i].pdp_cnt)); - /* best full match */ - if (cal_start <= *start) { - if (first_full || (tmp_step_diff < best_full_step_diff)) { - first_full = 0; - best_full_step_diff = tmp_step_diff; - best_full_rra = i; -#ifdef DEBUG - fprintf(stderr, "best full match so far\n"); - } else { - fprintf(stderr, "full match, not best\n"); -#endif - } - - } else { - /* best partial match */ - tmp_match = full_match; - if (cal_start > *start) - tmp_match -= (cal_start - *start); - if (first_part || - (best_match < tmp_match) || - (best_match == tmp_match && - tmp_step_diff < best_part_step_diff)) { -#ifdef DEBUG - fprintf(stderr, "best partial so far\n"); -#endif - first_part = 0; - best_match = tmp_match; - best_part_step_diff = tmp_step_diff; - best_part_rra = i; - } else { -#ifdef DEBUG - fprintf(stderr, "partial match, not best\n"); -#endif - } - } - } - } - - /* lets see how the matching went. */ - if (first_full == 0) - chosen_rra = best_full_rra; - else if (first_part == 0) - chosen_rra = best_part_rra; - else { - ret = -RRD_ERR_NO_MATCH_RRA; - goto err_free_all_ds_namv; - } - - /* set the wish parameters to their real values */ - *step = rrd.stat_head->pdp_step * rrd.rra_def[chosen_rra].pdp_cnt; - *start -= (*start % *step); - *end += (*step - *end % *step); - rows = (*end - *start) / *step + 1; - -#ifdef DEBUG - fprintf(stderr, - "We found: start %10lu end %10lu step %5lu rows %lu\n", - *start, *end, *step, rows); -#endif - -/* Start and end are now multiples of the step size. The amount of -** steps we want is (end-start)/step and *not* an extra one. -** Reasoning: if step is s and we want to graph from t to t+s, -** we need exactly ((t+s)-t)/s rows. The row to collect from the -** database is the one with time stamp (t+s) which means t to t+s. -*/ - *ds_cnt = rrd.stat_head->ds_cnt; - if (((*data) = (rrd_value_t*)malloc(*ds_cnt * rows * sizeof(rrd_value_t))) == NULL) { - ret = -RRD_ERR_MALLOC3; - goto err_free_all_ds_namv; - } - - data_ptr = (*data); - - /* find base address of rra */ - rra_base = rrd_file->header_len; - for (i = 0; i < chosen_rra; i++) - rra_base += (*ds_cnt * rrd.rra_def[i].row_cnt * sizeof(rrd_value_t)); - - /* find start and end offset */ - rra_end_time = (rrd.live_head->last_up - - (rrd.live_head->last_up % *step)); - rra_start_time = (rra_end_time - - (*step * (rrd.rra_def[chosen_rra].row_cnt - 1))); - /* here's an error by one if we don't be careful */ - start_offset = ((long long) *start + (long long)*step - (long long)rra_start_time) / (long long) *step; - end_offset = ((long long) rra_end_time - (long long)*end) / (long long) *step; -#ifdef DEBUG - fprintf(stderr, - "rra_start %lu, rra_end %lu, start_off %li, end_off %li\n", - rra_start_time, rra_end_time, start_offset, end_offset); -#endif - /* only seek if the start time is before the end time */ - if (*start <= rra_end_time && *end >= rra_start_time - (off_t)*step ){ - if (start_offset <= 0) - rra_pointer = rrd.rra_ptr[chosen_rra].cur_row + 1; - else - rra_pointer = rrd.rra_ptr[chosen_rra].cur_row + 1 + start_offset; - - rra_pointer = rra_pointer % (signed) rrd.rra_def[chosen_rra].row_cnt; - - if (rrd_seek(rrd_file, (rra_base + (rra_pointer * (*ds_cnt) - * sizeof(rrd_value_t))), - SEEK_SET) != 0) { - ret = -RRD_ERR_SEEK_RRA; - goto err_free_data; - } -#ifdef DEBUG - fprintf(stderr, "First Seek: rra_base %lu rra_pointer %lu\n", - rra_base, rra_pointer); -#endif - } - - /* step trough the array */ - - for (i = start_offset; - i < (signed) rrd.rra_def[chosen_rra].row_cnt - end_offset; i++) { - /* no valid data yet */ - if (i < 0) { -#ifdef DEBUG - fprintf(stderr, "pre fetch %li -- ", i); -#endif - for (ii = 0; (unsigned) ii < *ds_cnt; ii++) { - *(data_ptr++) = DNAN; -#ifdef DEBUG - fprintf(stderr, "%10.2f ", *(data_ptr - 1)); -#endif - } - } - /* past the valid data area */ - else if (i >= (signed) rrd.rra_def[chosen_rra].row_cnt) { -#ifdef DEBUG - fprintf(stderr, "past fetch %li -- ", i); -#endif - for (ii = 0; (unsigned) ii < *ds_cnt; ii++) { - *(data_ptr++) = DNAN; -#ifdef DEBUG - fprintf(stderr, "%10.2f ", *(data_ptr - 1)); -#endif - } - } else { - /* OK we are inside the valid area but the pointer has to - * be wrapped*/ - if (rra_pointer >= (signed) rrd.rra_def[chosen_rra].row_cnt) { - rra_pointer -= rrd.rra_def[chosen_rra].row_cnt; - if (rrd_seek(rrd_file, (rra_base + rra_pointer * (*ds_cnt) - * sizeof(rrd_value_t)), - SEEK_SET) != 0) { - ret = -RRD_ERR_SEEK_RRA1; - goto err_free_data; - } -#ifdef DEBUG - fprintf(stderr, "wrap seek ...\n"); -#endif - } - - if (rrd_read(rrd_file, data_ptr, sizeof(rrd_value_t) * (*ds_cnt)) - != (ssize_t) (sizeof(rrd_value_t) * (*ds_cnt))) { - ret = -RRD_ERR_FETCH_CDP; - goto err_free_data; - } -#ifdef DEBUG - fprintf(stderr, "post fetch %li -- ", i); - for (ii = 0; ii < *ds_cnt; ii++) - fprintf(stderr, "%10.2f ", *(data_ptr + ii)); -#endif - data_ptr += *ds_cnt; - rra_pointer++; - } -#ifdef DEBUG - fprintf(stderr, "\n"); -#endif - - } - - rrd_close(rrd_file); - rrd_free(&rrd); - return (0); - err_free_data: - free(*data); - *data = NULL; - err_free_all_ds_namv: - for (i = 0; (unsigned long) i < rrd.stat_head->ds_cnt; ++i) - free((*ds_namv)[i]); - err_free_ds_namv: - free(*ds_namv); - *ds_namv = NULL; - err_close: - rrd_close(rrd_file); - err_free: - rrd_free(&rrd); - return ret; -} diff --git a/vendor/github.com/open-falcon/rrdlite/rrd_format.c b/vendor/github.com/open-falcon/rrdlite/rrd_format.c deleted file mode 100644 index 36f3cf30..00000000 --- a/vendor/github.com/open-falcon/rrdlite/rrd_format.c +++ /dev/null @@ -1,103 +0,0 @@ -/***************************************************************************** - * RRDtool 1.4.9 Copyright by Tobi Oetiker, 1997-2014 - ***************************************************************************** - * rrd_format.c RRD Database Format helper functions - ***************************************************************************** - * $Id$ - * $Log$ - * Revision 1.5 2004/05/18 18:53:03 oetiker - * big spell checking patch -- slif@bellsouth.net - * - * Revision 1.4 2003/02/13 07:05:27 oetiker - * Find attached the patch I promised to send to you. Please note that there - * are three new source files (src/rrd_is_thread_safe.h, src/rrd_thread_safe.c - * and src/rrd_not_thread_safe.c) and the introduction of librrd_th. This - * library is identical to librrd, but it contains support code for per-thread - * global variables currently used for error information only. This is similar - * to how errno per-thread variables are implemented. librrd_th must be linked - * alongside of libpthred - * - * There is also a new file "THREADS", holding some documentation. - * - * -- Peter Stamfest - * - * Revision 1.3 2002/02/01 20:34:49 oetiker - * fixed version number and date/time - * - * Revision 1.2 2001/03/10 23:54:39 oetiker - * Support for COMPUTE data sources (CDEF data sources). Removes the RPN - * parser and calculator from rrd_graph and puts then in a new file, - * rrd_rpncalc.c. Changes to core files rrd_create and rrd_update. Some - * clean-up of aberrant behavior stuff, including a bug fix. - * Documentation update (rrdcreate.pod, rrdupdate.pod). Change xml format. - * -- Jake Brutlag - * - * Revision 1.1.1.1 2001/02/25 22:25:05 oetiker - * checkin - * - * Revision 1.3 1998/03/08 12:35:11 oetiker - * checkpointing things because the current setup seems to work - * according to the things said in the manpages - * - * Revision 1.2 1998/02/26 22:58:22 oetiker - * fixed define - * - * Revision 1.1 1998/02/21 16:14:41 oetiker - * Initial revision - * - * - *****************************************************************************/ -#include "rrd_tool.h" -#ifdef WIN32 -#include "stdlib.h" -#endif - -#define converter(VV,VVV) \ - if (strcmp(#VV, string) == 0) return VVV; - -/* conversion functions to allow symbolic entry of enumerations */ -enum dst_en dst_conv( char *string) { - converter(COUNTER, DST_COUNTER) - converter(ABSOLUTE, DST_ABSOLUTE) - converter(GAUGE, DST_GAUGE) - converter(DERIVE, DST_DERIVE) - converter(COMPUTE, DST_CDEF) - return (enum dst_en)(-1); -} - - -enum cf_en cf_conv( const char *string) { - - converter(AVERAGE, CF_AVERAGE) - converter(MIN, CF_MINIMUM) - converter(MAX, CF_MAXIMUM) - converter(LAST, CF_LAST) - converter(HWPREDICT, CF_HWPREDICT) - converter(MHWPREDICT, CF_MHWPREDICT) - converter(DEVPREDICT, CF_DEVPREDICT) - converter(SEASONAL, CF_SEASONAL) - converter(DEVSEASONAL, CF_DEVSEASONAL) - converter(FAILURES, CF_FAILURES) - return (enum cf_en)(-1); -} - -#undef converter - -long ds_match( rrd_t *rrd, char *ds_nam) { - unsigned long i; - - for (i = 0; i < rrd->stat_head->ds_cnt; i++) - if ((strcmp(ds_nam, rrd->ds_def[i].ds_nam)) == 0) - return i; - return -RRD_ERR_UNKNOWN_DS_NAME; -} - -off_t rrd_get_header_size( rrd_t *rrd) { - return sizeof(stat_head_t) + \ - sizeof(ds_def_t) * rrd->stat_head->ds_cnt + \ - sizeof(rra_def_t) * rrd->stat_head->rra_cnt + \ - ( atoi(rrd->stat_head->version) < 3 ? sizeof(time_t) : sizeof(live_head_t) ) + \ - sizeof(pdp_prep_t) * rrd->stat_head->ds_cnt + \ - sizeof(cdp_prep_t) * rrd->stat_head->ds_cnt * rrd->stat_head->rra_cnt + \ - sizeof(rra_ptr_t) * rrd->stat_head->rra_cnt; -} diff --git a/vendor/github.com/open-falcon/rrdlite/rrd_format.h b/vendor/github.com/open-falcon/rrdlite/rrd_format.h deleted file mode 100644 index f00c8011..00000000 --- a/vendor/github.com/open-falcon/rrdlite/rrd_format.h +++ /dev/null @@ -1,428 +0,0 @@ -/***************************************************************************** - * RRDtool 1.4.9 Copyright by Tobi Oetiker, 1997-2014 - ***************************************************************************** - * rrd_format.h RRD Database Format header - *****************************************************************************/ - -#ifndef _RRD_FORMAT_H -#define _RRD_FORMAT_H - -/* - * _RRD_TOOL_H - * We're building RRDTool itself. - * - * RRD_EXPORT_DEPRECATED - * User is requesting internal function which need this struct. They have - * been told that this will change and have agreed to adapt their programs. - */ -#if !defined(_RRD_TOOL_H) && !defined(RRD_EXPORT_DEPRECATED) -# error "Do not include rrd_format.h directly. Include rrd.h instead!" -#endif - -#include "rrd.h" - -/***************************************************************************** - * put this in your /usr/lib/magic file (/etc/magic on HPUX) - * - * # rrd database format - * 0 string RRD\0 rrd file - * >5 string >\0 version '%s' - * - *****************************************************************************/ - -#define RRD_COOKIE "RRD" -/* #define RRD_VERSION "0002" */ -/* changed because microsecond precision requires another field */ -#define RRD_VERSION "0004" -#define RRD_VERSION3 "0003" -#define FLOAT_COOKIE ((double)8.642135E130) - -typedef union unival { - unsigned long u_cnt; - rrd_value_t u_val; -} unival; - - -/**************************************************************************** - * The RRD Database Structure - * --------------------------- - * - * In oder to properly describe the database structure lets define a few - * new words: - * - * ds - Data Source (ds) providing input to the database. A Data Source (ds) - * can be a traffic counter, a temperature, the number of users logged - * into a system. The rrd database format can handle the input of - * several Data Sources (ds) in a singe database. - * - * dst - Data Source Type (dst). The Data Source Type (dst) defines the rules - * applied to Build Primary Data Points from the input provided by the - * data sources (ds). - * - * pdp - Primary Data Point (pdp). After the database has accepted the - * input from the data sources (ds). It starts building Primary - * Data Points (pdp) from the data. Primary Data Points (pdp) - * are evenly spaced along the time axis (pdp_step). The values - * of the Primary Data Points are calculated from the values of - * the data source (ds) and the exact time these values were - * provided by the data source (ds). - * - * pdp_st - PDP Start (pdp_st). The moments (pdp_st) in time where - * these steps occur are defined by the moments where the - * number of seconds since 1970-jan-1 modulo pdp_step equals - * zero (pdp_st). - * - * cf - Consolidation Function (cf). An arbitrary Consolidation Function (cf) - * (averaging, min, max) is applied to the primary data points (pdp) to - * calculate the consolidated data point. - * - * cdp - Consolidated Data Point (cdp) is the long term storage format for data - * in the rrd database. Consolidated Data Points represent one or - * several primary data points collected along the time axis. The - * Consolidated Data Points (cdp) are stored in Round Robin Archives - * (rra). - * - * rra - Round Robin Archive (rra). This is the place where the - * consolidated data points (cdp) get stored. The data is - * organized in rows (row) and columns (col). The Round Robin - * Archive got its name from the method data is stored in - * there. An RRD database can contain several Round Robin - * Archives. Each Round Robin Archive can have a different row - * spacing along the time axis (pdp_cnt) and a different - * consolidation function (cf) used to build its consolidated - * data points (cdp). - * - * rra_st - RRA Start (rra_st). The moments (rra_st) in time where - * Consolidated Data Points (cdp) are added to an rra are - * defined by the moments where the number of seconds since - * 1970-jan-1 modulo pdp_cnt*pdp_step equals zero (rra_st). - * - * row - Row (row). A row represent all consolidated data points (cdp) - * in a round robin archive who are of the same age. - * - * col - Column (col). A column (col) represent all consolidated - * data points (cdp) in a round robin archive (rra) who - * originated from the same data source (ds). - * - */ - -/**************************************************************************** - * POS 1: stat_head_t static header of the database - ****************************************************************************/ - -typedef struct stat_head_t { - - /* Data Base Identification Section ** */ - char cookie[4]; /* RRD */ - char version[5]; /* version of the format */ - double float_cookie; /* is it the correct double - * representation ? */ - - /* Data Base Structure Definition **** */ - unsigned long ds_cnt; /* how many different ds provide - * input to the rrd */ - unsigned long rra_cnt; /* how many rras will be maintained - * in the rrd */ - unsigned long pdp_step; /* pdp interval in seconds */ - - unival par[10]; /* global parameters ... unused - at the moment */ -} stat_head_t; - - -/**************************************************************************** - * POS 2: ds_def_t (* ds_cnt) Data Source definitions - ****************************************************************************/ - -enum dst_en { DST_COUNTER = 0, /* data source types available */ - DST_ABSOLUTE, - DST_GAUGE, - DST_DERIVE, - DST_CDEF -}; - -enum ds_param_en { DS_mrhb_cnt = 0, /* minimum required heartbeat. A - * data source must provide input at - * least every ds_mrhb seconds, - * otherwise it is regarded dead and - * will be set to UNKNOWN */ - DS_min_val, /* the processed input of a ds must */ - DS_max_val, /* be between max_val and min_val - * both can be set to UNKNOWN if you - * do not care. Data outside the limits - * set to UNKNOWN */ - DS_cdef = DS_mrhb_cnt -}; /* pointer to encoded rpn - * expression only applies to DST_CDEF */ - -/* The magic number here is one less than DS_NAM_SIZE */ -#define DS_NAM_FMT "%19[a-zA-Z0-9_-]" -#define DS_NAM_SIZE 20 - -#define DST_FMT "%19[A-Z]" -#define DST_SIZE 20 - -typedef struct ds_def_t { - char ds_nam[DS_NAM_SIZE]; /* Name of the data source (null terminated) */ - char dst[DST_SIZE]; /* Type of data source (null terminated) */ - unival par[10]; /* index of this array see ds_param_en */ -} ds_def_t; - -/**************************************************************************** - * POS 3: rra_def_t ( * rra_cnt) one for each store to be maintained - ****************************************************************************/ -enum cf_en { CF_AVERAGE = 0, /* data consolidation functions */ - CF_MINIMUM, - CF_MAXIMUM, - CF_LAST, - CF_HWPREDICT, - /* An array of predictions using the seasonal - * Holt-Winters algorithm. Requires an RRA of type - * CF_SEASONAL for this data source. */ - CF_SEASONAL, - /* An array of seasonal effects. Requires an RRA of - * type CF_HWPREDICT for this data source. */ - CF_DEVPREDICT, - /* An array of deviation predictions based upon - * smoothed seasonal deviations. Requires an RRA of - * type CF_DEVSEASONAL for this data source. */ - CF_DEVSEASONAL, - /* An array of smoothed seasonal deviations. Requires - * an RRA of type CF_HWPREDICT for this data source. - * */ - CF_FAILURES, - /* HWPREDICT that follows a moving baseline */ - CF_MHWPREDICT - /* new entries must come last !!! */ -}; - - /* A binary array of failure indicators: 1 indicates - * that the number of violations in the prescribed - * window exceeded the prescribed threshold. */ - -#define MAX_RRA_PAR_EN 10 -enum rra_par_en { RRA_cdp_xff_val = 0, /* what part of the consolidated - * datapoint must be known, to produce a - * valid entry in the rra */ - /* CF_HWPREDICT: */ - RRA_hw_alpha = 1, - /* exponential smoothing parameter for the intercept in - * the Holt-Winters prediction algorithm. */ - RRA_hw_beta = 2, - /* exponential smoothing parameter for the slope in - * the Holt-Winters prediction algorithm. */ - - RRA_dependent_rra_idx = 3, - /* For CF_HWPREDICT: index of the RRA with the seasonal - * effects of the Holt-Winters algorithm (of type - * CF_SEASONAL). - * For CF_DEVPREDICT: index of the RRA with the seasonal - * deviation predictions (of type CF_DEVSEASONAL). - * For CF_SEASONAL: index of the RRA with the Holt-Winters - * intercept and slope coefficient (of type CF_HWPREDICT). - * For CF_DEVSEASONAL: index of the RRA with the - * Holt-Winters prediction (of type CF_HWPREDICT). - * For CF_FAILURES: index of the CF_DEVSEASONAL array. - * */ - - /* CF_SEASONAL and CF_DEVSEASONAL: */ - RRA_seasonal_gamma = 1, - /* exponential smoothing parameter for seasonal effects. */ - - RRA_seasonal_smoothing_window = 2, - /* fraction of the season to include in the running average - * smoother */ - - /* RRA_dependent_rra_idx = 3, */ - - RRA_seasonal_smooth_idx = 4, - /* an integer between 0 and row_count - 1 which - * is index in the seasonal cycle for applying - * the period smoother. */ - - /* CF_FAILURES: */ - RRA_delta_pos = 1, /* confidence bound scaling parameters */ - RRA_delta_neg = 2, - /* RRA_dependent_rra_idx = 3, */ - RRA_window_len = 4, - RRA_failure_threshold = 5 - /* For CF_FAILURES, number of violations within the last - * window required to mark a failure. */ -}; - - /* For CF_FAILURES, the length of the window for measuring - * failures. */ - -#define CF_NAM_FMT "%19[A-Z]" -#define CF_NAM_SIZE 20 - -typedef struct rra_def_t { - char cf_nam[CF_NAM_SIZE]; /* consolidation function (null term) */ - unsigned long row_cnt; /* number of entries in the store */ - unsigned long pdp_cnt; /* how many primary data points are - * required for a consolidated data - * point?*/ - unival par[MAX_RRA_PAR_EN]; /* index see rra_param_en */ - -} rra_def_t; - - -/**************************************************************************** - **************************************************************************** - **************************************************************************** - * LIVE PART OF THE HEADER. THIS WILL BE WRITTEN ON EVERY UPDATE * - **************************************************************************** - **************************************************************************** - ****************************************************************************/ -/**************************************************************************** - * POS 4: live_head_t - ****************************************************************************/ - -typedef struct live_head_t { - time_t last_up; /* when was rrd last updated */ - long last_up_usec; /* micro seconds part of the - update timestamp. Always >= 0 */ -} live_head_t; - - -/**************************************************************************** - * POS 5: pdp_prep_t (* ds_cnt) here we prepare the pdps - ****************************************************************************/ -#define LAST_DS_LEN 30 /* DO NOT CHANGE THIS ... */ - -enum pdp_par_en { PDP_unkn_sec_cnt = 0, /* how many seconds of the current - * pdp value is unknown data? */ - - PDP_val -}; /* current value of the pdp. - this depends on dst */ - -typedef struct pdp_prep_t { - char last_ds[LAST_DS_LEN]; /* the last reading from the data - * source. this is stored in ASCII - * to cater for very large counters - * we might encounter in connection - * with SNMP. */ - unival scratch[10]; /* contents according to pdp_par_en */ -} pdp_prep_t; - -/* data is passed from pdp to cdp when seconds since epoch modulo pdp_step == 0 - obviously the updates do not occur at these times only. Especially does the - format allow for updates to occur at different times for each data source. - The rules which makes this work is as follows: - - * DS updates may only occur at ever increasing points in time - * When any DS update arrives after a cdp update time, the *previous* - update cycle gets executed. All pdps are transfered to cdps and the - cdps feed the rras where necessary. Only then the new DS value - is loaded into the PDP. */ - - -/**************************************************************************** - * POS 6: cdp_prep_t (* rra_cnt * ds_cnt ) data prep area for cdp values - ****************************************************************************/ -#define MAX_CDP_PAR_EN 10 -#define MAX_CDP_FAILURES_IDX 8 -/* max CDP scratch entries avail to record violations for a FAILURES RRA */ -#define MAX_FAILURES_WINDOW_LEN 28 -enum cdp_par_en { CDP_val = 0, - /* the base_interval is always an - * average */ - CDP_unkn_pdp_cnt, - /* how many unknown pdp were - * integrated. This and the cdp_xff - * will decide if this is going to - * be a UNKNOWN or a valid value */ - CDP_hw_intercept, - /* Current intercept coefficient for the Holt-Winters - * prediction algorithm. */ - CDP_hw_last_intercept, - /* Last iteration intercept coefficient for the Holt-Winters - * prediction algorihtm. */ - CDP_hw_slope, - /* Current slope coefficient for the Holt-Winters - * prediction algorithm. */ - CDP_hw_last_slope, - /* Last iteration slope coeffient. */ - CDP_null_count, - /* Number of sequential Unknown (DNAN) values + 1 preceding - * the current prediction. - * */ - CDP_last_null_count, - /* Last iteration count of Unknown (DNAN) values. */ - CDP_primary_val = 8, - /* optimization for bulk updates: the value of the first CDP - * value to be written in the bulk update. */ - CDP_secondary_val = 9, - /* optimization for bulk updates: the value of subsequent - * CDP values to be written in the bulk update. */ - CDP_hw_seasonal = CDP_hw_intercept, - /* Current seasonal coefficient for the Holt-Winters - * prediction algorithm. This is stored in CDP prep to avoid - * redundant seek operations. */ - CDP_hw_last_seasonal = CDP_hw_last_intercept, - /* Last iteration seasonal coeffient. */ - CDP_seasonal_deviation = CDP_hw_intercept, - CDP_last_seasonal_deviation = CDP_hw_last_intercept, - CDP_init_seasonal = CDP_null_count -}; - - /* init_seasonal is a flag which when > 0, forces smoothing updates - * to occur when rra_ptr.cur_row == 0 */ - -typedef struct cdp_prep_t { - unival scratch[MAX_CDP_PAR_EN]; - /* contents according to cdp_par_en * - * init state should be NAN */ - -} cdp_prep_t; - -/**************************************************************************** - * POS 7: rra_ptr_t (* rra_cnt) pointers to the current row in each rra - ****************************************************************************/ - -typedef struct rra_ptr_t { - unsigned long cur_row; /* current row in the rra */ -} rra_ptr_t; - - -/**************************************************************************** - **************************************************************************** - * One single struct to hold all the others. For convenience. - **************************************************************************** - ****************************************************************************/ -typedef struct rrd_t { - stat_head_t *stat_head; /* the static header */ - ds_def_t *ds_def; /* list of data source definitions */ - rra_def_t *rra_def; /* list of round robin archive def */ - live_head_t *live_head; /* rrd v >= 3 last_up with us */ - time_t *legacy_last_up; /* rrd v < 3 last_up time */ - pdp_prep_t *pdp_prep; /* pdp data prep area */ - cdp_prep_t *cdp_prep; /* cdp prep area */ - rra_ptr_t *rra_ptr; /* list of rra pointers */ - rrd_value_t *rrd_value; /* list of rrd values */ -} rrd_t; - -/**************************************************************************** - **************************************************************************** - * AFTER the header section we have the DATA STORAGE AREA it is made up from - * Consolidated Data Points organized in Round Robin Archives. - **************************************************************************** - **************************************************************************** - - *RRA 0 - (0,0) .................... ( ds_cnt -1 , 0) - . - . - . - (0, row_cnt -1) ... (ds_cnt -1, row_cnt -1) - - *RRA 1 - *RRA 2 - - *RRA rra_cnt -1 - - ****************************************************************************/ - - -#endif diff --git a/vendor/github.com/open-falcon/rrdlite/rrd_hw.c b/vendor/github.com/open-falcon/rrdlite/rrd_hw.c deleted file mode 100644 index 25dcdbe5..00000000 --- a/vendor/github.com/open-falcon/rrdlite/rrd_hw.c +++ /dev/null @@ -1,471 +0,0 @@ -/***************************************************************************** - * RRDtool 1.4.9 Copyright by Tobi Oetiker, 1997-2014 - ***************************************************************************** - * rrd_hw.c : Support for Holt-Winters Smoothing/ Aberrant Behavior Detection - ***************************************************************************** - * Initial version by Jake Brutlag, WebTV Networks, 5/1/00 - *****************************************************************************/ - -#include - -#include "rrd_tool.h" -#include "rrd_hw.h" -#include "rrd_hw_math.h" -#include "rrd_hw_update.h" - -#define hw_dep_idx(rrd, rra_idx) rrd->rra_def[rra_idx].par[RRA_dependent_rra_idx].u_cnt - -/* #define DEBUG */ - -/* private functions */ -static unsigned long MyMod( - signed long val, - unsigned long mod); - -int lookup_seasonal( rrd_t *rrd, unsigned long rra_idx, - unsigned long rra_start, rrd_file_t *rrd_file, - unsigned long offset, rrd_value_t **seasonal_coef) { - unsigned long pos_tmp; - - /* rra_ptr[].cur_row points to the rra row to be written; this function - * reads cur_row + offset */ - unsigned long row_idx = rrd->rra_ptr[rra_idx].cur_row + offset; - int ret = 0; - - /* handle wrap around */ - if (row_idx >= rrd->rra_def[rra_idx].row_cnt) - row_idx = row_idx % (rrd->rra_def[rra_idx].row_cnt); - - /* rra_start points to the appropriate rra block in the file */ - /* compute the pointer to the appropriate location in the file */ - pos_tmp = - rra_start + - (row_idx) * (rrd->stat_head->ds_cnt) * sizeof(rrd_value_t); - - /* allocate memory if need be */ - if (*seasonal_coef == NULL) - *seasonal_coef = - (rrd_value_t *) malloc((rrd->stat_head->ds_cnt) * - sizeof(rrd_value_t)); - if (*seasonal_coef == NULL) { - return -RRD_ERR_MALLOC4; - } - - if (!rrd_seek(rrd_file, pos_tmp, SEEK_SET)) { - if (rrd_read - (rrd_file, *seasonal_coef, - sizeof(rrd_value_t) * rrd->stat_head->ds_cnt) - == (ssize_t) (sizeof(rrd_value_t) * rrd->stat_head->ds_cnt)) { - /* success! */ - /* we can safely ignore the rule requiring a seek operation between read - * and write, because this read moves the file pointer to somewhere - * in the file other than the next write location. - * */ - return 0; - } else { - ret = -RRD_ERR_READ1; - } - } else { - ret = -RRD_ERR_SEEK1; - } - - return ret; -} - -/* For the specified CDP prep area and the FAILURES RRA, - * erase all history of past violations. - */ -int erase_violations( rrd_t *rrd, unsigned long cdp_idx, - unsigned long rra_idx) { - unsigned short i; - char *violations_array; - int ret = 0; - enum dst_en r; - - - /* check that rra_idx is a CF_FAILURES array */ - if ((r = cf_conv(rrd->rra_def[rra_idx].cf_nam)) != CF_FAILURES) { -#ifdef DEBUG - fprintf(stderr, "erase_violations called for non-FAILURES RRA: %s\n", - rrd->rra_def[rra_idx].cf_nam); -#endif - if (r < 0){ - return (int)r; - } - return 0; - } -#ifdef DEBUG - fprintf(stderr, "scratch buffer before erase:\n"); - for (i = 0; i < MAX_CDP_PAR_EN; i++) { - fprintf(stderr, "%lu ", rrd->cdp_prep[cdp_idx].scratch[i].u_cnt); - } - fprintf(stderr, "\n"); -#endif - - /* WARNING: an array of longs on disk is treated as an array of chars - * in memory. */ - violations_array = (char *) ((void *) rrd->cdp_prep[cdp_idx].scratch); - /* erase everything in the part of the CDP scratch array that will be - * used to store violations for the current window */ - for (i = rrd->rra_def[rra_idx].par[RRA_window_len].u_cnt; i > 0; i--) { - violations_array[i - 1] = 0; - } -#ifdef DEBUG - fprintf(stderr, "scratch buffer after erase:\n"); - for (i = 0; i < MAX_CDP_PAR_EN; i++) { - fprintf(stderr, "%lu ", rrd->cdp_prep[cdp_idx].scratch[i].u_cnt); - } - fprintf(stderr, "\n"); -#endif - return 0; -} - -/* Smooth a periodic array with a moving average: equal weights and - * length = 5% of the period. */ -int apply_smoother( rrd_t *rrd, unsigned long rra_idx, unsigned long rra_start, - rrd_file_t *rrd_file) { - unsigned long i, j, k; - unsigned long totalbytes; - rrd_value_t *rrd_values; - unsigned long row_length = rrd->stat_head->ds_cnt; - unsigned long row_count = rrd->rra_def[rra_idx].row_cnt; - unsigned long offset; - FIFOqueue **buffers; - rrd_value_t *working_average; - rrd_value_t *baseline; - int ret = 0; - - if (atoi(rrd->stat_head->version) >= 4) { - offset = floor(rrd->rra_def[rra_idx]. - par[RRA_seasonal_smoothing_window]. - u_val / 2 * row_count); - } else { - offset = floor(0.05 / 2 * row_count); - } - - if (offset == 0) - return 0; /* no smoothing */ - - /* allocate memory */ - totalbytes = sizeof(rrd_value_t) * row_length * row_count; - rrd_values = (rrd_value_t *) malloc(totalbytes); - if (rrd_values == NULL) { - return -RRD_ERR_MALLOC5; - } - - /* rra_start is at the beginning of this rra */ - if (rrd_seek(rrd_file, rra_start, SEEK_SET)) { - free(rrd_values); - return -RRD_ERR_SEEK2; - } - - /* could read all data in a single block, but we need to - * check for NA values */ - for (i = 0; i < row_count; ++i) { - for (j = 0; j < row_length; ++j) { - if (rrd_read - (rrd_file, &(rrd_values[i * row_length + j]), - sizeof(rrd_value_t) * 1) - != (ssize_t) (sizeof(rrd_value_t) * 1)) { - ret = -RRD_ERR_READ2; - } - if (isnan(rrd_values[i * row_length + j])) { - /* can't apply smoothing, still uninitialized values */ -#ifdef DEBUG - fprintf(stderr, - "apply_smoother: NA detected in seasonal array: %ld %ld\n", - i, j); -#endif - free(rrd_values); - return ret; - } - } - } - - /* allocate queues, one for each data source */ - buffers = (FIFOqueue **) malloc(sizeof(FIFOqueue *) * row_length); - for (i = 0; i < row_length; ++i) { - queue_alloc(&(buffers[i]), 2 * offset + 1); - } - /* need working average initialized to 0 */ - working_average = (rrd_value_t *) calloc(row_length, sizeof(rrd_value_t)); - baseline = (rrd_value_t *) calloc(row_length, sizeof(rrd_value_t)); - - /* compute sums of the first 2*offset terms */ - for (i = 0; i < 2 * offset; ++i) { - k = MyMod(i - offset, row_count); - for (j = 0; j < row_length; ++j) { - queue_push(buffers[j], rrd_values[k * row_length + j]); - working_average[j] += rrd_values[k * row_length + j]; - } - } - - /* compute moving averages */ - for (i = offset; i < row_count + offset; ++i) { - for (j = 0; j < row_length; ++j) { - k = MyMod(i, row_count); - /* add a term to the sum */ - working_average[j] += rrd_values[k * row_length + j]; - queue_push(buffers[j], rrd_values[k * row_length + j]); - - /* reset k to be the center of the window */ - k = MyMod(i - offset, row_count); - /* overwrite rdd_values entry, the old value is already - * saved in buffers */ - rrd_values[k * row_length + j] = - working_average[j] / (2 * offset + 1); - baseline[j] += rrd_values[k * row_length + j]; - - /* remove a term from the sum */ - working_average[j] -= queue_pop(buffers[j]); - } - } - - for (i = 0; i < row_length; ++i) { - queue_dealloc(buffers[i]); - baseline[i] /= row_count; - } - free(buffers); - free(working_average); - - if (cf_conv(rrd->rra_def[rra_idx].cf_nam) == CF_SEASONAL) { - rrd_value_t ( - *init_seasonality) ( - rrd_value_t seasonal_coef, - rrd_value_t intercept); - - switch (cf_conv(rrd->rra_def[hw_dep_idx(rrd, rra_idx)].cf_nam)) { - case CF_HWPREDICT: - init_seasonality = hw_additive_init_seasonality; - break; - case CF_MHWPREDICT: - init_seasonality = hw_multiplicative_init_seasonality; - break; - default: - return -RRD_ERR_DEP1; - } - - for (j = 0; j < row_length; ++j) { - for (i = 0; i < row_count; ++i) { - rrd_values[i * row_length + j] = - init_seasonality(rrd_values[i * row_length + j], - baseline[j]); - } - /* update the baseline coefficient, - * first, compute the cdp_index. */ - offset = hw_dep_idx(rrd, rra_idx) * row_length + j; - (rrd->cdp_prep[offset]).scratch[CDP_hw_intercept].u_val += - baseline[j]; - } - /* flush cdp to disk */ - if (rrd_seek(rrd_file, sizeof(stat_head_t) + - rrd->stat_head->ds_cnt * sizeof(ds_def_t) + - rrd->stat_head->rra_cnt * sizeof(rra_def_t) + - sizeof(live_head_t) + - rrd->stat_head->ds_cnt * sizeof(pdp_prep_t), SEEK_SET)) { - free(rrd_values); - return -RRD_ERR_SEEK3; - } - if (rrd_write(rrd_file, rrd->cdp_prep, - sizeof(cdp_prep_t) * - (rrd->stat_head->rra_cnt) * rrd->stat_head->ds_cnt) - != (ssize_t) (sizeof(cdp_prep_t) * (rrd->stat_head->rra_cnt) * - (rrd->stat_head->ds_cnt))) { - free(rrd_values); - return -RRD_ERR_WRITE1; - } - } - - /* endif CF_SEASONAL */ - /* flush updated values to disk */ - if (rrd_seek(rrd_file, rra_start, SEEK_SET)) { - free(rrd_values); - return -RRD_ERR_SEEK4; - } - /* write as a single block */ - if (rrd_write - (rrd_file, rrd_values, sizeof(rrd_value_t) * row_length * row_count) - != (ssize_t) (sizeof(rrd_value_t) * row_length * row_count)) { - free(rrd_values); - return -RRD_ERR_WRITE2; - } - - free(rrd_values); - free(baseline); - return 0; -} - -void init_hwpredict_cdp( - cdp_prep_t *cdp) -{ - cdp->scratch[CDP_hw_intercept].u_val = DNAN; - cdp->scratch[CDP_hw_last_intercept].u_val = DNAN; - cdp->scratch[CDP_hw_slope].u_val = DNAN; - cdp->scratch[CDP_hw_last_slope].u_val = DNAN; - cdp->scratch[CDP_null_count].u_cnt = 1; - cdp->scratch[CDP_last_null_count].u_cnt = 1; -} - -void init_seasonal_cdp( - cdp_prep_t *cdp) -{ - cdp->scratch[CDP_hw_seasonal].u_val = DNAN; - cdp->scratch[CDP_hw_last_seasonal].u_val = DNAN; - cdp->scratch[CDP_init_seasonal].u_cnt = 1; -} - -int update_aberrant_CF( - rrd_t *rrd, - rrd_value_t pdp_val, - enum cf_en current_cf, - unsigned long cdp_idx, - unsigned long rra_idx, - unsigned long ds_idx, - unsigned short CDP_scratch_idx, - rrd_value_t *seasonal_coef) -{ - static hw_functions_t hw_multiplicative_functions = { - hw_multiplicative_calculate_prediction, - hw_multiplicative_calculate_intercept, - hw_calculate_slope, - hw_multiplicative_calculate_seasonality, - hw_multiplicative_init_seasonality, - hw_calculate_seasonal_deviation, - hw_init_seasonal_deviation, - 1.0 /* identity value */ - }; - - static hw_functions_t hw_additive_functions = { - hw_additive_calculate_prediction, - hw_additive_calculate_intercept, - hw_calculate_slope, - hw_additive_calculate_seasonality, - hw_additive_init_seasonality, - hw_calculate_seasonal_deviation, - hw_init_seasonal_deviation, - 0.0 /* identity value */ - }; - - rrd->cdp_prep[cdp_idx].scratch[CDP_scratch_idx].u_val = pdp_val; - switch (current_cf) { - case CF_HWPREDICT: - return update_hwpredict(rrd, cdp_idx, rra_idx, ds_idx, - CDP_scratch_idx, &hw_additive_functions); - case CF_MHWPREDICT: - return update_hwpredict(rrd, cdp_idx, rra_idx, ds_idx, - CDP_scratch_idx, - &hw_multiplicative_functions); - case CF_DEVPREDICT: - return update_devpredict(rrd, cdp_idx, rra_idx, ds_idx, - CDP_scratch_idx); - case CF_SEASONAL: - switch (cf_conv(rrd->rra_def[hw_dep_idx(rrd, rra_idx)].cf_nam)) { - case CF_HWPREDICT: - return update_seasonal(rrd, cdp_idx, rra_idx, ds_idx, - CDP_scratch_idx, seasonal_coef, - &hw_additive_functions); - case CF_MHWPREDICT: - return update_seasonal(rrd, cdp_idx, rra_idx, ds_idx, - CDP_scratch_idx, seasonal_coef, - &hw_multiplicative_functions); - default: - return -RRD_ERR_UNREC_CONSOLIDATION_FUNC; - } - case CF_DEVSEASONAL: - switch (cf_conv(rrd->rra_def[hw_dep_idx(rrd, rra_idx)].cf_nam)) { - case CF_HWPREDICT: - return update_devseasonal(rrd, cdp_idx, rra_idx, ds_idx, - CDP_scratch_idx, seasonal_coef, - &hw_additive_functions); - case CF_MHWPREDICT: - return update_devseasonal(rrd, cdp_idx, rra_idx, ds_idx, - CDP_scratch_idx, seasonal_coef, - &hw_multiplicative_functions); - default: - return -RRD_ERR_UNREC_CONSOLIDATION_FUNC; - } - case CF_FAILURES: - switch (cf_conv - (rrd->rra_def[hw_dep_idx(rrd, hw_dep_idx(rrd, rra_idx))]. - cf_nam)) { - case CF_HWPREDICT: - return update_failures(rrd, cdp_idx, rra_idx, ds_idx, - CDP_scratch_idx, &hw_additive_functions); - case CF_MHWPREDICT: - return update_failures(rrd, cdp_idx, rra_idx, ds_idx, - CDP_scratch_idx, - &hw_multiplicative_functions); - default: - return -RRD_ERR_UNREC_CONSOLIDATION_FUNC; - } - case CF_AVERAGE: - default: - return 0; - } - return -1; -} - -static unsigned long MyMod( - signed long val, - unsigned long mod) -{ - unsigned long new_val; - - if (val < 0) - new_val = ((unsigned long) abs(val)) % mod; - else - new_val = (val % mod); - - if (val < 0) - return (mod - new_val); - else - return (new_val); -} - -/* a standard fixed-capacity FIF0 queue implementation - * No overflow checking is performed. */ -int queue_alloc( - FIFOqueue **q, - int capacity) -{ - *q = (FIFOqueue *) malloc(sizeof(FIFOqueue)); - if (*q == NULL) - return -1; - (*q)->queue = (rrd_value_t *) malloc(sizeof(rrd_value_t) * capacity); - if ((*q)->queue == NULL) { - free(*q); - return -1; - } - (*q)->capacity = capacity; - (*q)->head = capacity; - (*q)->tail = 0; - return 0; -} - -int queue_isempty( - FIFOqueue *q) -{ - return (q->head % q->capacity == q->tail); -} - -void queue_push( - FIFOqueue *q, - rrd_value_t value) -{ - q->queue[(q->tail)++] = value; - q->tail = q->tail % q->capacity; -} - -rrd_value_t queue_pop( - FIFOqueue *q) -{ - q->head = q->head % q->capacity; - return q->queue[(q->head)++]; -} - -void queue_dealloc( - FIFOqueue *q) -{ - free(q->queue); - free(q); -} diff --git a/vendor/github.com/open-falcon/rrdlite/rrd_hw.h b/vendor/github.com/open-falcon/rrdlite/rrd_hw.h deleted file mode 100644 index 56ef567e..00000000 --- a/vendor/github.com/open-falcon/rrdlite/rrd_hw.h +++ /dev/null @@ -1,61 +0,0 @@ -/***************************************************************************** - * RRDtool 1.4.9 Copyright by Tobi Oetiker, 1997-2014 - ***************************************************************************** - * rrd_hw.h : Support for Holt-Winters Smoothing/ Aberrant Behavior Detection - *****************************************************************************/ - -/* functions implemented in rrd_hw.c */ -int update_aberrant_CF( - rrd_t *rrd, - rrd_value_t pdp_val, - enum cf_en current_cf, - unsigned long cdp_idx, - unsigned long rra_idx, - unsigned long ds_idx, - unsigned short CDP_scratch_idx, - rrd_value_t *seasonal_coef); -int create_hw_contingent_rras( - rrd_t *rrd, - unsigned short period, - unsigned long hashed_name); -int lookup_seasonal( - rrd_t *rrd, - unsigned long rra_idx, - unsigned long rra_start, - rrd_file_t *rrd_file, - unsigned long offset, - rrd_value_t **seasonal_coef); -int erase_violations( - rrd_t *rrd, - unsigned long cdp_idx, - unsigned long rra_idx); -int apply_smoother( - rrd_t *rrd, - unsigned long rra_idx, - unsigned long rra_start, - rrd_file_t *rrd_file); -void init_hwpredict_cdp( - cdp_prep_t *); -void init_seasonal_cdp( - cdp_prep_t *); - -#define BURNIN_CYCLES 3 - -/* a standard fixed-capacity FIFO queue implementation */ -typedef struct FIFOqueue { - rrd_value_t *queue; - int capacity, head, tail; -} FIFOqueue; - -int queue_alloc( - FIFOqueue **q, - int capacity); -void queue_dealloc( - FIFOqueue *q); -void queue_push( - FIFOqueue *q, - rrd_value_t value); -int queue_isempty( - FIFOqueue *q); -rrd_value_t queue_pop( - FIFOqueue *q); diff --git a/vendor/github.com/open-falcon/rrdlite/rrd_hw_math.c b/vendor/github.com/open-falcon/rrdlite/rrd_hw_math.c deleted file mode 100644 index 05fbf023..00000000 --- a/vendor/github.com/open-falcon/rrdlite/rrd_hw_math.c +++ /dev/null @@ -1,143 +0,0 @@ -/***************************************************************************** - * rrd_hw_math.c Math functions for Holt-Winters computations - *****************************************************************************/ - -#include "rrd_tool.h" -#include "rrd_hw_math.h" - -/***************************************************************************** - * RRDtool supports both the additive and multiplicative Holt-Winters methods. - * The additive method makes predictions by adding seasonality to the baseline, - * whereas the multiplicative method multiplies the seasonality coefficient by - * the baseline to make a prediction. This file contains all the differences - * between the additive and multiplicative methods, as well as a few math - * functions common to them both. - ****************************************************************************/ - -/***************************************************************************** - * Functions for additive Holt-Winters - *****************************************************************************/ - -rrd_value_t hw_additive_calculate_prediction( - rrd_value_t intercept, - rrd_value_t slope, - int null_count, - rrd_value_t seasonal_coef) -{ - return intercept + slope * null_count + seasonal_coef; -} - -rrd_value_t hw_additive_calculate_intercept( - rrd_value_t hw_alpha, - rrd_value_t observed, - rrd_value_t seasonal_coef, - unival *coefs) -{ - return hw_alpha * (observed - seasonal_coef) - + (1 - hw_alpha) * (coefs[CDP_hw_intercept].u_val - + - (coefs[CDP_hw_slope].u_val) * - (coefs[CDP_null_count].u_cnt)); -} - -rrd_value_t hw_additive_calculate_seasonality( - rrd_value_t hw_gamma, - rrd_value_t observed, - rrd_value_t intercept, - rrd_value_t seasonal_coef) -{ - return hw_gamma * (observed - intercept) - + (1 - hw_gamma) * seasonal_coef; -} - -rrd_value_t hw_additive_init_seasonality( - rrd_value_t seasonal_coef, - rrd_value_t intercept) -{ - return seasonal_coef - intercept; -} - -/***************************************************************************** - * Functions for multiplicative Holt-Winters - *****************************************************************************/ - -rrd_value_t hw_multiplicative_calculate_prediction( - rrd_value_t intercept, - rrd_value_t slope, - int null_count, - rrd_value_t seasonal_coef) -{ - return (intercept + slope * null_count) * seasonal_coef; -} - -rrd_value_t hw_multiplicative_calculate_intercept( - rrd_value_t hw_alpha, - rrd_value_t observed, - rrd_value_t seasonal_coef, - unival *coefs) -{ - if (seasonal_coef <= 0) { - return DNAN; - } - - return hw_alpha * (observed / seasonal_coef) - + (1 - hw_alpha) * (coefs[CDP_hw_intercept].u_val - + - (coefs[CDP_hw_slope].u_val) * - (coefs[CDP_null_count].u_cnt)); -} - -rrd_value_t hw_multiplicative_calculate_seasonality( - rrd_value_t hw_gamma, - rrd_value_t observed, - rrd_value_t intercept, - rrd_value_t seasonal_coef) -{ - if (intercept <= 0) { - return DNAN; - } - - return hw_gamma * (observed / intercept) - + (1 - hw_gamma) * seasonal_coef; -} - -rrd_value_t hw_multiplicative_init_seasonality( - rrd_value_t seasonal_coef, - rrd_value_t intercept) -{ - if (intercept <= 0) { - return DNAN; - } - - return seasonal_coef / intercept; -} - -/***************************************************************************** - * Math functions common to additive and multiplicative Holt-Winters - *****************************************************************************/ - -rrd_value_t hw_calculate_slope( - rrd_value_t hw_beta, - unival *coefs) -{ - return hw_beta * (coefs[CDP_hw_intercept].u_val - - coefs[CDP_hw_last_intercept].u_val) - + (1 - hw_beta) * coefs[CDP_hw_slope].u_val; -} - -rrd_value_t hw_calculate_seasonal_deviation( - rrd_value_t hw_gamma, - rrd_value_t prediction, - rrd_value_t observed, - rrd_value_t last) -{ - return hw_gamma * fabs(prediction - observed) - + (1 - hw_gamma) * last; -} - -rrd_value_t hw_init_seasonal_deviation( - rrd_value_t prediction, - rrd_value_t observed) -{ - return fabs(prediction - observed); -} diff --git a/vendor/github.com/open-falcon/rrdlite/rrd_hw_math.h b/vendor/github.com/open-falcon/rrdlite/rrd_hw_math.h deleted file mode 100644 index 3677b317..00000000 --- a/vendor/github.com/open-falcon/rrdlite/rrd_hw_math.h +++ /dev/null @@ -1,132 +0,0 @@ -/***************************************************************************** - * rrd_hw_math.h Math functions for Holt-Winters computations - *****************************************************************************/ - -#include "rrd.h" -#include "rrd_format.h" - -/* since /usr/include/bits/mathcalls.h:265 defines gamma already */ -#define gamma hw_gamma - -/***************************************************************************** - * Functions for additive Holt-Winters - *****************************************************************************/ - -rrd_value_t hw_additive_calculate_prediction( - rrd_value_t intercept, - rrd_value_t slope, - int null_count, - rrd_value_t seasonal_coef); - -rrd_value_t hw_additive_calculate_intercept( - rrd_value_t alpha, - rrd_value_t scratch, - rrd_value_t seasonal_coef, - unival *coefs); - -rrd_value_t hw_additive_calculate_seasonality( - rrd_value_t gamma, - rrd_value_t scratch, - rrd_value_t intercept, - rrd_value_t seasonal_coef); - -rrd_value_t hw_additive_init_seasonality( - rrd_value_t seasonal_coef, - rrd_value_t intercept); - -/***************************************************************************** - * Functions for multiplicative Holt-Winters - *****************************************************************************/ - -rrd_value_t hw_multiplicative_calculate_prediction( - rrd_value_t intercept, - rrd_value_t slope, - int null_count, - rrd_value_t seasonal_coef); - -rrd_value_t hw_multiplicative_calculate_intercept( - rrd_value_t alpha, - rrd_value_t scratch, - rrd_value_t seasonal_coef, - unival *coefs); - -rrd_value_t hw_multiplicative_calculate_seasonality( - rrd_value_t gamma, - rrd_value_t scratch, - rrd_value_t intercept, - rrd_value_t seasonal_coef); - -rrd_value_t hw_multiplicative_init_seasonality( - rrd_value_t seasonal_coef, - rrd_value_t intercept); - -/***************************************************************************** - * Math functions common to additive and multiplicative Holt-Winters - *****************************************************************************/ - -rrd_value_t hw_calculate_slope( - rrd_value_t beta, - unival *coefs); - -rrd_value_t hw_calculate_seasonal_deviation( - rrd_value_t gamma, - rrd_value_t prediction, - rrd_value_t observed, - rrd_value_t last); - -rrd_value_t hw_init_seasonal_deviation( - rrd_value_t prediction, - rrd_value_t observed); - - -/* Function container */ - -typedef struct hw_functions_t { - rrd_value_t ( - *predict) ( - rrd_value_t intercept, - rrd_value_t slope, - int null_count, - rrd_value_t seasonal_coef); - - rrd_value_t ( - *intercept) ( - rrd_value_t alpha, - rrd_value_t observed, - rrd_value_t seasonal_coef, - unival *coefs); - - rrd_value_t ( - *slope) ( - rrd_value_t beta, - unival *coefs); - - rrd_value_t ( - *seasonality) ( - rrd_value_t gamma, - rrd_value_t observed, - rrd_value_t intercept, - rrd_value_t seasonal_coef); - - rrd_value_t ( - *init_seasonality) ( - rrd_value_t seasonal_coef, - rrd_value_t intercept); - - rrd_value_t ( - *seasonal_deviation) ( - rrd_value_t gamma, - rrd_value_t prediction, - rrd_value_t observed, - rrd_value_t last); - - rrd_value_t ( - *init_seasonal_deviation) ( - rrd_value_t prediction, - rrd_value_t observed); - - rrd_value_t identity; -} hw_functions_t; - - -#undef gamma diff --git a/vendor/github.com/open-falcon/rrdlite/rrd_hw_update.c b/vendor/github.com/open-falcon/rrdlite/rrd_hw_update.c deleted file mode 100644 index 90a8a520..00000000 --- a/vendor/github.com/open-falcon/rrdlite/rrd_hw_update.c +++ /dev/null @@ -1,475 +0,0 @@ -/***************************************************************************** - * rrd_hw_update.c Functions for updating a Holt-Winters RRA - ****************************************************************************/ - -#include "rrd_tool.h" -#include "rrd_format.h" -#include "rrd_hw_math.h" -#include "rrd_hw_update.h" - -static void init_slope_intercept( - unival *coefs, - unsigned short CDP_scratch_idx) -{ -#ifdef DEBUG - fprintf(stderr, "Initialization of slope/intercept\n"); -#endif - coefs[CDP_hw_intercept].u_val = coefs[CDP_scratch_idx].u_val; - coefs[CDP_hw_last_intercept].u_val = coefs[CDP_scratch_idx].u_val; - /* initialize the slope to 0 */ - coefs[CDP_hw_slope].u_val = 0.0; - coefs[CDP_hw_last_slope].u_val = 0.0; - /* initialize null count to 1 */ - coefs[CDP_null_count].u_cnt = 1; - coefs[CDP_last_null_count].u_cnt = 1; -} - -static int hw_is_violation( - rrd_value_t observed, - rrd_value_t prediction, - rrd_value_t deviation, - rrd_value_t delta_pos, - rrd_value_t delta_neg) -{ - return (observed > prediction + delta_pos * deviation - || observed < prediction - delta_neg * deviation); -} - -int update_hwpredict( - rrd_t *rrd, - unsigned long cdp_idx, - unsigned long rra_idx, - unsigned long ds_idx, - unsigned short CDP_scratch_idx, - hw_functions_t * functions) -{ - rrd_value_t prediction; - unsigned long dependent_rra_idx, seasonal_cdp_idx; - unival *coefs = rrd->cdp_prep[cdp_idx].scratch; - rra_def_t *current_rra = &(rrd->rra_def[rra_idx]); - rrd_value_t seasonal_coef; - - /* save coefficients from current prediction */ - coefs[CDP_hw_last_intercept].u_val = coefs[CDP_hw_intercept].u_val; - coefs[CDP_hw_last_slope].u_val = coefs[CDP_hw_slope].u_val; - coefs[CDP_last_null_count].u_cnt = coefs[CDP_null_count].u_cnt; - - /* retrieve the current seasonal coef */ - dependent_rra_idx = current_rra->par[RRA_dependent_rra_idx].u_cnt; - seasonal_cdp_idx = dependent_rra_idx * (rrd->stat_head->ds_cnt) + ds_idx; - - seasonal_coef = (dependent_rra_idx < rra_idx) - ? rrd->cdp_prep[seasonal_cdp_idx].scratch[CDP_hw_last_seasonal].u_val - : rrd->cdp_prep[seasonal_cdp_idx].scratch[CDP_hw_seasonal].u_val; - - /* compute the prediction */ - if (isnan(coefs[CDP_hw_intercept].u_val) - || isnan(coefs[CDP_hw_slope].u_val) - || isnan(seasonal_coef)) { - prediction = DNAN; - - /* bootstrap initialization of slope and intercept */ - if (isnan(coefs[CDP_hw_intercept].u_val) && - !isnan(coefs[CDP_scratch_idx].u_val)) { - init_slope_intercept(coefs, CDP_scratch_idx); - } - /* if seasonal coefficient is NA, then don't update intercept, slope */ - } else { - prediction = functions->predict(coefs[CDP_hw_intercept].u_val, - coefs[CDP_hw_slope].u_val, - coefs[CDP_null_count].u_cnt, - seasonal_coef); -#ifdef DEBUG - fprintf(stderr, - "computed prediction: %f (intercept %f, slope %f, season %f)\n", - prediction, coefs[CDP_hw_intercept].u_val, - coefs[CDP_hw_slope].u_val, seasonal_coef); -#endif - if (isnan(coefs[CDP_scratch_idx].u_val)) { - /* NA value, no updates of intercept, slope; - * increment the null count */ - (coefs[CDP_null_count].u_cnt)++; - } else { - /* update the intercept */ - coefs[CDP_hw_intercept].u_val = - functions->intercept(current_rra->par[RRA_hw_alpha].u_val, - coefs[CDP_scratch_idx].u_val, - seasonal_coef, coefs); - - /* update the slope */ - coefs[CDP_hw_slope].u_val = - functions->slope(current_rra->par[RRA_hw_beta].u_val, coefs); - - /* reset the null count */ - coefs[CDP_null_count].u_cnt = 1; -#ifdef DEBUG - fprintf(stderr, "Updating intercept = %f, slope = %f\n", - coefs[CDP_hw_intercept].u_val, coefs[CDP_hw_slope].u_val); -#endif - } - } - - /* store the prediction for writing */ - coefs[CDP_scratch_idx].u_val = prediction; - return 0; -} - -int update_seasonal( - rrd_t *rrd, - unsigned long cdp_idx, - unsigned long rra_idx, - unsigned long ds_idx, - unsigned short CDP_scratch_idx, - rrd_value_t *seasonal_coef, - hw_functions_t * functions) -{ -/* TODO: extract common if subblocks in the wake of I/O optimization */ - rrd_value_t intercept, seasonal; - rra_def_t *current_rra = &(rrd->rra_def[rra_idx]); - rra_def_t *hw_rra = - &(rrd->rra_def[current_rra->par[RRA_dependent_rra_idx].u_cnt]); - - /* obtain cdp_prep index for HWPREDICT */ - unsigned long hw_cdp_idx = (current_rra->par[RRA_dependent_rra_idx].u_cnt) - * (rrd->stat_head->ds_cnt) + ds_idx; - unival *coefs = rrd->cdp_prep[hw_cdp_idx].scratch; - - /* update seasonal coefficient in cdp prep areas */ - seasonal = rrd->cdp_prep[cdp_idx].scratch[CDP_hw_seasonal].u_val; - rrd->cdp_prep[cdp_idx].scratch[CDP_hw_last_seasonal].u_val = seasonal; - rrd->cdp_prep[cdp_idx].scratch[CDP_hw_seasonal].u_val = - seasonal_coef[ds_idx]; - - if (isnan(rrd->cdp_prep[cdp_idx].scratch[CDP_scratch_idx].u_val)) { - /* no update, store the old value unchanged, - * doesn't matter if it is NA */ - rrd->cdp_prep[cdp_idx].scratch[CDP_scratch_idx].u_val = seasonal; - return 0; - } - - /* update seasonal value for disk */ - if (current_rra->par[RRA_dependent_rra_idx].u_cnt < rra_idx) { - /* associated HWPREDICT has already been updated */ - /* check for possible NA values */ - if (isnan(coefs[CDP_hw_last_intercept].u_val) - || isnan(coefs[CDP_hw_last_slope].u_val)) { - /* this should never happen, as HWPREDICT was already updated */ - rrd->cdp_prep[cdp_idx].scratch[CDP_scratch_idx].u_val = DNAN; - } else if (isnan(seasonal)) { - /* initialization: intercept is not currently being updated */ -#ifdef DEBUG - fprintf(stderr, "Initialization of seasonal coef %lu\n", - rrd->rra_ptr[rra_idx].cur_row); -#endif - rrd->cdp_prep[cdp_idx].scratch[CDP_scratch_idx].u_val = - functions->init_seasonality(rrd->cdp_prep[cdp_idx]. - scratch[CDP_scratch_idx].u_val, - coefs[CDP_hw_last_intercept]. - u_val); - } else { - intercept = coefs[CDP_hw_intercept].u_val; - - rrd->cdp_prep[cdp_idx].scratch[CDP_scratch_idx].u_val = - functions->seasonality(current_rra->par[RRA_seasonal_gamma]. - u_val, - rrd->cdp_prep[cdp_idx]. - scratch[CDP_scratch_idx].u_val, - intercept, seasonal); -#ifdef DEBUG - fprintf(stderr, - "Updating seasonal = %f (params: gamma %f, new intercept %f, old seasonal %f)\n", - rrd->cdp_prep[cdp_idx].scratch[CDP_scratch_idx].u_val, - current_rra->par[RRA_seasonal_gamma].u_val, - intercept, seasonal); -#endif - } - } else { - /* SEASONAL array is updated first, which means the new intercept - * hasn't be computed; so we compute it here. */ - - /* check for possible NA values */ - if (isnan(coefs[CDP_hw_intercept].u_val) - || isnan(coefs[CDP_hw_slope].u_val)) { - /* Initialization of slope and intercept will occur. - * force seasonal coefficient to 0 or 1. */ - rrd->cdp_prep[cdp_idx].scratch[CDP_scratch_idx].u_val = - functions->identity; - } else if (isnan(seasonal)) { - /* initialization: intercept will not be updated - * CDP_hw_intercept = CDP_hw_last_intercept; just need to - * subtract/divide by this baseline value. */ -#ifdef DEBUG - fprintf(stderr, "Initialization of seasonal coef %lu\n", - rrd->rra_ptr[rra_idx].cur_row); -#endif - rrd->cdp_prep[cdp_idx].scratch[CDP_scratch_idx].u_val = - functions->init_seasonality(rrd->cdp_prep[cdp_idx]. - scratch[CDP_scratch_idx].u_val, - coefs[CDP_hw_intercept].u_val); - } else { - /* Note that we must get CDP_scratch_idx from SEASONAL array, as CDP_scratch_idx - * for HWPREDICT array will be DNAN. */ - intercept = functions->intercept(hw_rra->par[RRA_hw_alpha].u_val, - rrd->cdp_prep[cdp_idx]. - scratch[CDP_scratch_idx].u_val, - seasonal, coefs); - - rrd->cdp_prep[cdp_idx].scratch[CDP_scratch_idx].u_val = - functions->seasonality(current_rra->par[RRA_seasonal_gamma]. - u_val, - rrd->cdp_prep[cdp_idx]. - scratch[CDP_scratch_idx].u_val, - intercept, seasonal); - } - } -#ifdef DEBUG - fprintf(stderr, "seasonal coefficient set= %f\n", - rrd->cdp_prep[cdp_idx].scratch[CDP_scratch_idx].u_val); -#endif - return 0; -} - -int update_devpredict( - rrd_t *rrd, - unsigned long cdp_idx, - unsigned long rra_idx, - unsigned long ds_idx, - unsigned short CDP_scratch_idx) -{ - /* there really isn't any "update" here; the only reason this information - * is stored separately from DEVSEASONAL is to preserve deviation predictions - * for a longer duration than one seasonal cycle. */ - unsigned long seasonal_cdp_idx = - (rrd->rra_def[rra_idx].par[RRA_dependent_rra_idx].u_cnt) - * (rrd->stat_head->ds_cnt) + ds_idx; - - if (rrd->rra_def[rra_idx].par[RRA_dependent_rra_idx].u_cnt < rra_idx) { - /* associated DEVSEASONAL array already updated */ - rrd->cdp_prep[cdp_idx].scratch[CDP_scratch_idx].u_val - = - rrd->cdp_prep[seasonal_cdp_idx]. - scratch[CDP_last_seasonal_deviation].u_val; - } else { - /* associated DEVSEASONAL not yet updated */ - rrd->cdp_prep[cdp_idx].scratch[CDP_scratch_idx].u_val - = - rrd->cdp_prep[seasonal_cdp_idx].scratch[CDP_seasonal_deviation]. - u_val; - } - return 0; -} - -int update_devseasonal( - rrd_t *rrd, - unsigned long cdp_idx, - unsigned long rra_idx, - unsigned long ds_idx, - unsigned short CDP_scratch_idx, - rrd_value_t *seasonal_dev, - hw_functions_t * functions) -{ - rrd_value_t prediction = 0, seasonal_coef = DNAN; - rra_def_t *current_rra = &(rrd->rra_def[rra_idx]); - - /* obtain cdp_prep index for HWPREDICT */ - unsigned long hw_rra_idx = current_rra->par[RRA_dependent_rra_idx].u_cnt; - unsigned long hw_cdp_idx = hw_rra_idx * (rrd->stat_head->ds_cnt) + ds_idx; - unsigned long seasonal_cdp_idx; - unival *coefs = rrd->cdp_prep[hw_cdp_idx].scratch; - - rrd->cdp_prep[cdp_idx].scratch[CDP_last_seasonal_deviation].u_val = - rrd->cdp_prep[cdp_idx].scratch[CDP_seasonal_deviation].u_val; - /* retrieve the next seasonal deviation value, could be NA */ - rrd->cdp_prep[cdp_idx].scratch[CDP_seasonal_deviation].u_val = - seasonal_dev[ds_idx]; - - /* retrieve the current seasonal_coef (not to be confused with the - * current seasonal deviation). Could make this more readable by introducing - * some wrapper functions. */ - seasonal_cdp_idx = - (rrd->rra_def[hw_rra_idx].par[RRA_dependent_rra_idx].u_cnt) - * (rrd->stat_head->ds_cnt) + ds_idx; - if (rrd->rra_def[hw_rra_idx].par[RRA_dependent_rra_idx].u_cnt < rra_idx) - /* SEASONAL array already updated */ - seasonal_coef = - rrd->cdp_prep[seasonal_cdp_idx].scratch[CDP_hw_last_seasonal]. - u_val; - else - /* SEASONAL array not yet updated */ - seasonal_coef = - rrd->cdp_prep[seasonal_cdp_idx].scratch[CDP_hw_seasonal].u_val; - - /* compute the abs value of the difference between the prediction and - * observed value */ - if (hw_rra_idx < rra_idx) { - /* associated HWPREDICT has already been updated */ - if (isnan(coefs[CDP_hw_last_intercept].u_val) || - isnan(coefs[CDP_hw_last_slope].u_val) || isnan(seasonal_coef)) { - /* one of the prediction values is uinitialized */ - rrd->cdp_prep[cdp_idx].scratch[CDP_scratch_idx].u_val = DNAN; - return 0; - } else { - prediction = - functions->predict(coefs[CDP_hw_last_intercept].u_val, - coefs[CDP_hw_last_slope].u_val, - coefs[CDP_last_null_count].u_cnt, - seasonal_coef); - } - } else { - /* associated HWPREDICT has NOT been updated */ - if (isnan(coefs[CDP_hw_intercept].u_val) || - isnan(coefs[CDP_hw_slope].u_val) || isnan(seasonal_coef)) { - /* one of the prediction values is uinitialized */ - rrd->cdp_prep[cdp_idx].scratch[CDP_scratch_idx].u_val = DNAN; - return 0; - } else { - prediction = functions->predict(coefs[CDP_hw_intercept].u_val, - coefs[CDP_hw_slope].u_val, - coefs[CDP_null_count].u_cnt, - seasonal_coef); - } - } - - if (isnan(rrd->cdp_prep[cdp_idx].scratch[CDP_scratch_idx].u_val)) { - /* no update, store existing value unchanged, doesn't - * matter if it is NA */ - rrd->cdp_prep[cdp_idx].scratch[CDP_scratch_idx].u_val = - rrd->cdp_prep[cdp_idx].scratch[CDP_last_seasonal_deviation].u_val; - } else - if (isnan - (rrd->cdp_prep[cdp_idx].scratch[CDP_last_seasonal_deviation]. - u_val)) { - /* initialization */ -#ifdef DEBUG - fprintf(stderr, "Initialization of seasonal deviation\n"); -#endif - rrd->cdp_prep[cdp_idx].scratch[CDP_scratch_idx].u_val = - functions->init_seasonal_deviation(prediction, - rrd->cdp_prep[cdp_idx]. - scratch[CDP_scratch_idx]. - u_val); - } else { - /* exponential smoothing update */ - rrd->cdp_prep[cdp_idx].scratch[CDP_scratch_idx].u_val = - functions->seasonal_deviation(rrd->rra_def[rra_idx]. - par[RRA_seasonal_gamma].u_val, - prediction, - rrd->cdp_prep[cdp_idx]. - scratch[CDP_scratch_idx].u_val, - rrd->cdp_prep[cdp_idx]. - scratch - [CDP_last_seasonal_deviation]. - u_val); - } - return 0; -} - -/* Check for a failure based on a threshold # of violations within the specified - * window. */ -int update_failures( - rrd_t *rrd, - unsigned long cdp_idx, - unsigned long rra_idx, - unsigned long ds_idx, - unsigned short CDP_scratch_idx, - hw_functions_t * functions) -{ - /* detection of a violation depends on 3 RRAs: - * HWPREDICT, SEASONAL, and DEVSEASONAL */ - rra_def_t *current_rra = &(rrd->rra_def[rra_idx]); - unsigned long dev_rra_idx = current_rra->par[RRA_dependent_rra_idx].u_cnt; - rra_def_t *dev_rra = &(rrd->rra_def[dev_rra_idx]); - unsigned long hw_rra_idx = dev_rra->par[RRA_dependent_rra_idx].u_cnt; - rra_def_t *hw_rra = &(rrd->rra_def[hw_rra_idx]); - unsigned long seasonal_rra_idx = hw_rra->par[RRA_dependent_rra_idx].u_cnt; - unsigned long temp_cdp_idx; - rrd_value_t deviation = DNAN; - rrd_value_t seasonal_coef = DNAN; - rrd_value_t prediction = DNAN; - char violation = 0; - unsigned short violation_cnt = 0, i; - char *violations_array; - - /* usual checks to determine the order of the RRAs */ - temp_cdp_idx = dev_rra_idx * (rrd->stat_head->ds_cnt) + ds_idx; - if (rra_idx < seasonal_rra_idx) { - /* DEVSEASONAL not yet updated */ - deviation = - rrd->cdp_prep[temp_cdp_idx].scratch[CDP_seasonal_deviation].u_val; - } else { - /* DEVSEASONAL already updated */ - deviation = - rrd->cdp_prep[temp_cdp_idx].scratch[CDP_last_seasonal_deviation]. - u_val; - } - if (!isnan(deviation)) { - - temp_cdp_idx = seasonal_rra_idx * (rrd->stat_head->ds_cnt) + ds_idx; - if (rra_idx < seasonal_rra_idx) { - /* SEASONAL not yet updated */ - seasonal_coef = - rrd->cdp_prep[temp_cdp_idx].scratch[CDP_hw_seasonal].u_val; - } else { - /* SEASONAL already updated */ - seasonal_coef = - rrd->cdp_prep[temp_cdp_idx].scratch[CDP_hw_last_seasonal]. - u_val; - } - /* in this code block, we know seasonal coef is not DNAN, because deviation is not - * null */ - - temp_cdp_idx = hw_rra_idx * (rrd->stat_head->ds_cnt) + ds_idx; - if (rra_idx < hw_rra_idx) { - /* HWPREDICT not yet updated */ - prediction = - functions->predict(rrd->cdp_prep[temp_cdp_idx]. - scratch[CDP_hw_intercept].u_val, - rrd->cdp_prep[temp_cdp_idx]. - scratch[CDP_hw_slope].u_val, - rrd->cdp_prep[temp_cdp_idx]. - scratch[CDP_null_count].u_cnt, - seasonal_coef); - } else { - /* HWPREDICT already updated */ - prediction = - functions->predict(rrd->cdp_prep[temp_cdp_idx]. - scratch[CDP_hw_last_intercept].u_val, - rrd->cdp_prep[temp_cdp_idx]. - scratch[CDP_hw_last_slope].u_val, - rrd->cdp_prep[temp_cdp_idx]. - scratch[CDP_last_null_count].u_cnt, - seasonal_coef); - } - - /* determine if the observed value is a violation */ - if (!isnan(rrd->cdp_prep[cdp_idx].scratch[CDP_scratch_idx].u_val)) { - if (hw_is_violation - (rrd->cdp_prep[cdp_idx].scratch[CDP_scratch_idx].u_val, - prediction, deviation, current_rra->par[RRA_delta_pos].u_val, - current_rra->par[RRA_delta_neg].u_val)) { - violation = 1; - } - } else { - violation = 1; /* count DNAN values as violations */ - } - - } - - /* determine if a failure has occurred and update the failure array */ - violation_cnt = violation; - violations_array = (char *) ((void *) rrd->cdp_prep[cdp_idx].scratch); - for (i = current_rra->par[RRA_window_len].u_cnt; i > 1; i--) { - /* shift */ - violations_array[i - 1] = violations_array[i - 2]; - violation_cnt += violations_array[i - 1]; - } - violations_array[0] = violation; - - if (violation_cnt < current_rra->par[RRA_failure_threshold].u_cnt) - /* not a failure */ - rrd->cdp_prep[cdp_idx].scratch[CDP_scratch_idx].u_val = 0.0; - else - rrd->cdp_prep[cdp_idx].scratch[CDP_scratch_idx].u_val = 1.0; - - return (rrd->cdp_prep[cdp_idx].scratch[CDP_scratch_idx].u_val); -} diff --git a/vendor/github.com/open-falcon/rrdlite/rrd_hw_update.h b/vendor/github.com/open-falcon/rrdlite/rrd_hw_update.h deleted file mode 100644 index e59e2db0..00000000 --- a/vendor/github.com/open-falcon/rrdlite/rrd_hw_update.h +++ /dev/null @@ -1,44 +0,0 @@ -/***************************************************************************** - * rrd_hw_update.h Functions for updating a Holt-Winters RRA - ****************************************************************************/ - -int update_hwpredict( - rrd_t *rrd, - unsigned long cdp_idx, - unsigned long rra_idx, - unsigned long ds_idx, - unsigned short CDP_scratch_idx, - hw_functions_t * functions); - -int update_seasonal( - rrd_t *rrd, - unsigned long cdp_idx, - unsigned long rra_idx, - unsigned long ds_idx, - unsigned short CDP_scratch_idx, - rrd_value_t *seasonal_coef, - hw_functions_t * functions); - -int update_devpredict( - rrd_t *rrd, - unsigned long cdp_idx, - unsigned long rra_idx, - unsigned long ds_idx, - unsigned short CDP_scratch_idx); - -int update_devseasonal( - rrd_t *rrd, - unsigned long cdp_idx, - unsigned long rra_idx, - unsigned long ds_idx, - unsigned short CDP_scratch_idx, - rrd_value_t *seasonal_dev, - hw_functions_t * functions); - -int update_failures( - rrd_t *rrd, - unsigned long cdp_idx, - unsigned long rra_idx, - unsigned long ds_idx, - unsigned short CDP_scratch_idx, - hw_functions_t * functions); diff --git a/vendor/github.com/open-falcon/rrdlite/rrd_info.c b/vendor/github.com/open-falcon/rrdlite/rrd_info.c deleted file mode 100644 index a7273869..00000000 --- a/vendor/github.com/open-falcon/rrdlite/rrd_info.c +++ /dev/null @@ -1,380 +0,0 @@ -/***************************************************************************** - * RRDtool 1.4.9 Copyright by Tobi Oetiker, 1997-2014 - ***************************************************************************** - * rrd_info Get Information about the configuration of an RRD - *****************************************************************************/ - -#include "rrd_tool.h" -#include "rrd_rpncalc.h" -#ifndef RRD_LITE -#include "rrd_client.h" -#endif -#include - -/* proto */ -rrd_info_t *rrd_info(int, char **); -rrd_info_t *rrd_info_r(char *filename, int *ret_p); - -/* allocate memory for string */ -char *sprintf_alloc(char *fmt, ...) { - char *str = NULL; - va_list argp; -#ifdef HAVE_VASPRINTF - va_start( argp, fmt ); - if (vasprintf( &str, fmt, argp ) == -1){ - va_end(argp); - //rrd_set_error ("vasprintf failed."); - return(NULL); - } -#else - int maxlen = 1024 + strlen(fmt); - str = (char*)malloc(sizeof(char) * (maxlen + 1)); - if (str != NULL) { - va_start(argp, fmt); -#ifdef HAVE_VSNPRINTF - vsnprintf(str, maxlen, fmt, argp); -#else - vsprintf(str, fmt, argp); -#endif - } -#endif /* HAVE_VASPRINTF */ - va_end(argp); - return str; -} - -/* the function formerly known as push was renamed to info_push and later - * rrd_info_push because it is now used outside the scope of this file */ -rrd_info_t * rrd_info_push(rrd_info_t * info, char *key, rrd_info_type_t type, - rrd_infoval_t value) { - rrd_info_t *next; - - next = (rrd_info_t*)malloc(sizeof(*next)); - next->next = (rrd_info_t *) 0; - if (info) - info->next = next; - next->type = type; - next->key = key; - switch (type) { - case RD_I_VAL: - next->value.u_val = value.u_val; - break; - case RD_I_CNT: - next->value.u_cnt = value.u_cnt; - break; - case RD_I_INT: - next->value.u_int = value.u_int; - break; - case RD_I_STR: - next->value.u_str = (char*)malloc(sizeof(char) * (strlen(value.u_str) + 1)); - strcpy(next->value.u_str, value.u_str); - break; - case RD_I_BLO: - next->value.u_blo.size = value.u_blo.size; - next->value.u_blo.ptr = - (unsigned char *)malloc(sizeof(unsigned char) * value.u_blo.size); - memcpy(next->value.u_blo.ptr, value.u_blo.ptr, value.u_blo.size); - break; - } - return (next); -} - -rrd_info_t *rrd_info_r(char *filename, int *ret_p) { - unsigned int i, ii = 0; - rrd_t rrd; - rrd_info_t *data = NULL, *cd; - rrd_infoval_t info; - rrd_file_t *rrd_file; - enum cf_en current_cf; - enum dst_en current_ds; - - rrd_init(&rrd); - rrd_file = rrd_open(filename, &rrd, RRD_READONLY, ret_p); - if (rrd_file == NULL) - goto err_free; - - info.u_str = filename; - cd = rrd_info_push(NULL, sprintf_alloc("filename"), RD_I_STR, info); - data = cd; - - info.u_str = rrd.stat_head->version; - cd = rrd_info_push(cd, sprintf_alloc("rrd_version"), RD_I_STR, info); - - info.u_cnt = rrd.stat_head->pdp_step; - cd = rrd_info_push(cd, sprintf_alloc("step"), RD_I_CNT, info); - - info.u_cnt = rrd.live_head->last_up; - cd = rrd_info_push(cd, sprintf_alloc("last_update"), RD_I_CNT, info); - - info.u_cnt = rrd_get_header_size(&rrd); - cd = rrd_info_push(cd, sprintf_alloc("header_size"), RD_I_CNT, info); - - for (i = 0; i < rrd.stat_head->ds_cnt; i++) { - - info.u_cnt=i; - cd= rrd_info_push(cd,sprintf_alloc("ds[%s].index", - rrd.ds_def[i].ds_nam), - RD_I_CNT, info); - - info.u_str = rrd.ds_def[i].dst; - cd = rrd_info_push(cd, sprintf_alloc("ds[%s].type", - rrd.ds_def[i].ds_nam), - RD_I_STR, info); - - current_ds = dst_conv(rrd.ds_def[i].dst); - switch (current_ds) { - case DST_CDEF: - { - char *buffer = NULL; - - rpn_compact2str((rpn_cdefds_t *) &(rrd.ds_def[i].par[DS_cdef]), - rrd.ds_def, &buffer); - info.u_str = buffer; - cd = rrd_info_push(cd, - sprintf_alloc("ds[%s].cdef", - rrd.ds_def[i].ds_nam), RD_I_STR, - info); - free(buffer); - } - break; - default: - info.u_cnt = rrd.ds_def[i].par[DS_mrhb_cnt].u_cnt; - cd = rrd_info_push(cd, - sprintf_alloc("ds[%s].minimal_heartbeat", - rrd.ds_def[i].ds_nam), RD_I_CNT, - info); - - info.u_val = rrd.ds_def[i].par[DS_min_val].u_val; - cd = rrd_info_push(cd, - sprintf_alloc("ds[%s].min", - rrd.ds_def[i].ds_nam), RD_I_VAL, - info); - - info.u_val = rrd.ds_def[i].par[DS_max_val].u_val; - cd = rrd_info_push(cd, - sprintf_alloc("ds[%s].max", - rrd.ds_def[i].ds_nam), RD_I_VAL, - info); - break; - } - - info.u_str = rrd.pdp_prep[i].last_ds; - cd = rrd_info_push(cd, - sprintf_alloc("ds[%s].last_ds", - rrd.ds_def[i].ds_nam), RD_I_STR, - info); - - info.u_val = rrd.pdp_prep[i].scratch[PDP_val].u_val; - cd = rrd_info_push(cd, - sprintf_alloc("ds[%s].value", - rrd.ds_def[i].ds_nam), RD_I_VAL, - info); - - info.u_cnt = rrd.pdp_prep[i].scratch[PDP_unkn_sec_cnt].u_cnt; - cd = rrd_info_push(cd, - sprintf_alloc("ds[%s].unknown_sec", - rrd.ds_def[i].ds_nam), RD_I_CNT, - info); - } - - for (i = 0; i < rrd.stat_head->rra_cnt; i++) { - info.u_str = rrd.rra_def[i].cf_nam; - cd = rrd_info_push(cd, sprintf_alloc("rra[%d].cf", i), RD_I_STR, - info); - current_cf = cf_conv(rrd.rra_def[i].cf_nam); - - info.u_cnt = rrd.rra_def[i].row_cnt; - cd = rrd_info_push(cd, sprintf_alloc("rra[%d].rows", i), RD_I_CNT, - info); - - info.u_cnt = rrd.rra_ptr[i].cur_row; - cd = rrd_info_push(cd, sprintf_alloc("rra[%d].cur_row", i), RD_I_CNT, - info); - - info.u_cnt = rrd.rra_def[i].pdp_cnt; - cd = rrd_info_push(cd, sprintf_alloc("rra[%d].pdp_per_row", i), - RD_I_CNT, info); - - switch (current_cf) { - case CF_HWPREDICT: - case CF_MHWPREDICT: - info.u_val = rrd.rra_def[i].par[RRA_hw_alpha].u_val; - cd = rrd_info_push(cd, sprintf_alloc("rra[%d].alpha", i), - RD_I_VAL, info); - info.u_val = rrd.rra_def[i].par[RRA_hw_beta].u_val; - cd = rrd_info_push(cd, sprintf_alloc("rra[%d].beta", i), RD_I_VAL, - info); - break; - case CF_SEASONAL: - case CF_DEVSEASONAL: - info.u_val = rrd.rra_def[i].par[RRA_seasonal_gamma].u_val; - cd = rrd_info_push(cd, sprintf_alloc("rra[%d].gamma", i), - RD_I_VAL, info); - if (atoi(rrd.stat_head->version) >= 4) { - info.u_val = - rrd.rra_def[i].par[RRA_seasonal_smoothing_window].u_val; - cd = rrd_info_push(cd, - sprintf_alloc("rra[%d].smoothing_window", - i), RD_I_VAL, info); - } - break; - case CF_FAILURES: - info.u_val = rrd.rra_def[i].par[RRA_delta_pos].u_val; - cd = rrd_info_push(cd, sprintf_alloc("rra[%d].delta_pos", i), - RD_I_VAL, info); - info.u_val = rrd.rra_def[i].par[RRA_delta_neg].u_val; - cd = rrd_info_push(cd, sprintf_alloc("rra[%d].delta_neg", i), - RD_I_VAL, info); - info.u_cnt = rrd.rra_def[i].par[RRA_failure_threshold].u_cnt; - cd = rrd_info_push(cd, - sprintf_alloc("rra[%d].failure_threshold", i), - RD_I_CNT, info); - info.u_cnt = rrd.rra_def[i].par[RRA_window_len].u_cnt; - cd = rrd_info_push(cd, sprintf_alloc("rra[%d].window_length", i), - RD_I_CNT, info); - break; - case CF_DEVPREDICT: - break; - default: - info.u_val = rrd.rra_def[i].par[RRA_cdp_xff_val].u_val; - cd = rrd_info_push(cd, sprintf_alloc("rra[%d].xff", i), RD_I_VAL, - info); - break; - } - - for (ii = 0; ii < rrd.stat_head->ds_cnt; ii++) { - switch (current_cf) { - case CF_HWPREDICT: - case CF_MHWPREDICT: - info.u_val = - rrd.cdp_prep[i * rrd.stat_head->ds_cnt + - ii].scratch[CDP_hw_intercept].u_val; - cd = rrd_info_push(cd, - sprintf_alloc - ("rra[%d].cdp_prep[%d].intercept", i, ii), - RD_I_VAL, info); - info.u_val = - rrd.cdp_prep[i * rrd.stat_head->ds_cnt + - ii].scratch[CDP_hw_slope].u_val; - cd = rrd_info_push(cd, - sprintf_alloc("rra[%d].cdp_prep[%d].slope", - i, ii), RD_I_VAL, info); - info.u_cnt = - rrd.cdp_prep[i * rrd.stat_head->ds_cnt + - ii].scratch[CDP_null_count].u_cnt; - cd = rrd_info_push(cd, - sprintf_alloc - ("rra[%d].cdp_prep[%d].NaN_count", i, ii), - RD_I_CNT, info); - break; - case CF_SEASONAL: - info.u_val = - rrd.cdp_prep[i * rrd.stat_head->ds_cnt + - ii].scratch[CDP_hw_seasonal].u_val; - cd = rrd_info_push(cd, - sprintf_alloc - ("rra[%d].cdp_prep[%d].seasonal", i, ii), - RD_I_VAL, info); - break; - case CF_DEVSEASONAL: - info.u_val = - rrd.cdp_prep[i * rrd.stat_head->ds_cnt + - ii].scratch[CDP_seasonal_deviation].u_val; - cd = rrd_info_push(cd, - sprintf_alloc - ("rra[%d].cdp_prep[%d].deviation", i, ii), - RD_I_VAL, info); - break; - case CF_DEVPREDICT: - break; - case CF_FAILURES: - { - unsigned short j; - char *violations_array; - char history[MAX_FAILURES_WINDOW_LEN + 1]; - - violations_array = - (char *) rrd.cdp_prep[i * rrd.stat_head->ds_cnt + - ii].scratch; - for (j = 0; j < rrd.rra_def[i].par[RRA_window_len].u_cnt; ++j) - history[j] = (violations_array[j] == 1) ? '1' : '0'; - history[j] = '\0'; - info.u_str = history; - cd = rrd_info_push(cd, - sprintf_alloc - ("rra[%d].cdp_prep[%d].history", i, ii), - RD_I_STR, info); - } - break; - default: - info.u_val = - rrd.cdp_prep[i * rrd.stat_head->ds_cnt + - ii].scratch[CDP_val].u_val; - cd = rrd_info_push(cd, - sprintf_alloc("rra[%d].cdp_prep[%d].value", - i, ii), RD_I_VAL, info); - info.u_cnt = - rrd.cdp_prep[i * rrd.stat_head->ds_cnt + - ii].scratch[CDP_unkn_pdp_cnt].u_cnt; - cd = rrd_info_push(cd, - sprintf_alloc - ("rra[%d].cdp_prep[%d].unknown_datapoints", - i, ii), RD_I_CNT, info); - break; - } - } - } - - rrd_close(rrd_file); -err_free: - rrd_free(&rrd); - return (data); -} - - -void rrd_info_print(rrd_info_t * data) { - while (data) { - printf("%s = ", data->key); - - switch (data->type) { - case RD_I_VAL: - if (isnan(data->value.u_val)) - printf("NaN\n"); - else - printf("%0.10e\n", data->value.u_val); - break; - case RD_I_CNT: - printf("%lu\n", data->value.u_cnt); - break; - case RD_I_INT: - printf("%d\n", data->value.u_int); - break; - case RD_I_STR: - printf("\"%s\"\n", data->value.u_str); - break; - case RD_I_BLO: - printf("BLOB_SIZE:%lu\n", data->value.u_blo.size); - fwrite(data->value.u_blo.ptr, data->value.u_blo.size, 1, stdout); - break; - } - data = data->next; - } -} - -void rrd_info_free(rrd_info_t * data) { - rrd_info_t *save; - - while (data) { - save = data; - if (data->key) { - if (data->type == RD_I_STR) { - free(data->value.u_str); - } - if (data->type == RD_I_BLO) { - free(data->value.u_blo.ptr); - } - free(data->key); - } - data = data->next; - free(save); - } -} diff --git a/vendor/github.com/open-falcon/rrdlite/rrd_is_thread_safe.h b/vendor/github.com/open-falcon/rrdlite/rrd_is_thread_safe.h deleted file mode 100644 index 86060326..00000000 --- a/vendor/github.com/open-falcon/rrdlite/rrd_is_thread_safe.h +++ /dev/null @@ -1,28 +0,0 @@ -/***************************************************************************** - * RRDtool 1.4.9 Copyright by Tobi Oetiker, 1997-2014 - * This file: Copyright 2003 Peter Stamfest - * & Tobias Oetiker - * Distributed under the GPL - ***************************************************************************** - * rrd_is_thread_safe.c Poisons some nasty function calls using GNU cpp - ***************************************************************************** - * $Id$ - *************************************************************************** */ - -#ifndef _RRD_IS_THREAD_SAFE_H -#define _RRD_IS_THREAD_SAFE_H - -#ifdef __cplusplus -extern "C" { -#endif - -#undef strerror - -#if( 2 < __GNUC__ ) -#pragma GCC poison strtok asctime ctime gmtime localtime tmpnam strerror -#endif - -#ifdef __cplusplus -} -#endif -#endif /*_RRD_IS_THREAD_SAFE_H */ diff --git a/vendor/github.com/open-falcon/rrdlite/rrd_nan_inf.c b/vendor/github.com/open-falcon/rrdlite/rrd_nan_inf.c deleted file mode 100644 index 2914d1a3..00000000 --- a/vendor/github.com/open-falcon/rrdlite/rrd_nan_inf.c +++ /dev/null @@ -1,40 +0,0 @@ -int done_nan = 0; -int done_inf = 0; - -double dnan; -double dinf; - -#if defined(_WIN32) && !defined(__CYGWIN__) && !defined(__CYGWIN32__) -#include -#include "rrd.h" - -#define NAN_FUNC (double)fmod(0.0,0.0) -#define INF_FUNC (double)fabs((double)log(0.0)) - -#else -#include "rrd.h" - -#define NAN_FUNC (double)(0.0/0.0) -#define INF_FUNC (double)(1.0/0.0) - -#endif - -double rrd_set_to_DNAN( - void) -{ - if (!done_nan) { - dnan = NAN_FUNC; - done_nan = 1; - } - return dnan; -} - -double rrd_set_to_DINF( - void) -{ - if (!done_inf) { - dinf = INF_FUNC; - done_inf = 1; - } - return dinf; -} diff --git a/vendor/github.com/open-falcon/rrdlite/rrd_open.c b/vendor/github.com/open-falcon/rrdlite/rrd_open.c deleted file mode 100644 index 2593fed5..00000000 --- a/vendor/github.com/open-falcon/rrdlite/rrd_open.c +++ /dev/null @@ -1,769 +0,0 @@ -/***************************************************************************** - * RRDtool 1.4.9 Copyright by Tobi Oetiker, 1997-2014 - ***************************************************************************** - * rrd_open.c Open an RRD File - ***************************************************************************** - * $Id$ - *****************************************************************************/ - -#include "rrd_tool.h" -#include "unused.h" - -#ifdef WIN32 -#include -#include -#include -#endif - -#ifdef HAVE_BROKEN_MS_ASYNC -#include -#include -#endif - -#define MEMBLK 8192 - -#ifdef WIN32 -#define _LK_UNLCK 0 /* Unlock */ -#define _LK_LOCK 1 /* Lock */ -#define _LK_NBLCK 2 /* Non-blocking lock */ -#define _LK_RLCK 3 /* Lock for read only */ -#define _LK_NBRLCK 4 /* Non-blocking lock for read only */ - - -#define LK_UNLCK _LK_UNLCK -#define LK_LOCK _LK_LOCK -#define LK_NBLCK _LK_NBLCK -#define LK_RLCK _LK_RLCK -#define LK_NBRLCK _LK_NBRLCK -#endif - -/* DEBUG 2 prints information obtained via mincore(2) */ -#define DEBUG 1 -/* do not calculate exact madvise hints but assume 1 page for headers and - * set DONTNEED for the rest, which is assumed to be data */ -/* Avoid calling madvise on areas that were already hinted. May be benefical if - * your syscalls are very slow */ - -#ifdef HAVE_MMAP -/* the cast to void* is there to avoid this warning seen on ia64 with certain - versions of gcc: 'cast increases required alignment of target type' - */ -#define __rrd_read(dst, dst_t, cnt) { \ - size_t wanted = sizeof(dst_t)*(cnt); \ - if (offset + wanted > rrd_file->file_len) { \ - ret = -RRD_ERR_READ3; \ - goto out_nullify_head; \ - } \ - (dst) = (dst_t*)(void*) (data + offset); \ - offset += wanted; \ -} -#else -#define __rrd_read(dst, dst_t, cnt) { \ - size_t wanted = sizeof(dst_t)*(cnt); \ - size_t got; \ - if ((dst = (dst_t*)malloc(wanted)) == NULL) { \ - ret = -RRD_ERR_MALLOC6; \ - goto out_nullify_head; \ - } \ - got = read (rrd_simple_file->fd, dst, wanted); \ - if (got != wanted) { \ - ret = -RRD_ERR_READ4; \ - goto out_nullify_head; \ - } \ - offset += got; \ -} -#endif - -/* get the address of the start of this page */ -#if defined USE_MADVISE || defined HAVE_POSIX_FADVISE -#ifndef PAGE_START -#define PAGE_START(addr) ((addr)&(~(_page_size-1))) -#endif -#endif - -/* Open a database file, return its header and an open filehandle, - * positioned to the first cdp in the first rra. - * In the error path of rrd_open, only rrd_free(&rrd) has to be called - * before returning an error. Do not call rrd_close upon failure of rrd_open. - * If creating a new file, the parameter rrd must be initialised with - * details of the file content. - * If opening an existing file, then use rrd must be initialised by - * rrd_init(rrd) prior to invoking rrd_open - */ - -rrd_file_t *rrd_open( const char *const file_name, rrd_t *rrd, - unsigned rdwr, int *ret_p) { - unsigned long ui; - int flags = 0; - int version; - int ret = 0; - -#ifdef HAVE_MMAP - ssize_t _page_size = sysconf(_SC_PAGESIZE); - char *data = MAP_FAILED; -#endif - off_t offset = 0; - struct stat statb; - rrd_file_t *rrd_file = NULL; - rrd_simple_file_t *rrd_simple_file = NULL; - size_t newfile_size = 0; - size_t header_len, value_cnt, data_len; - - /* Are we creating a new file? */ - if((rdwr & RRD_CREAT) && (rrd->stat_head != NULL)) { - header_len = rrd_get_header_size(rrd); - - value_cnt = 0; - for (ui = 0; ui < rrd->stat_head->rra_cnt; ui++) - value_cnt += rrd->stat_head->ds_cnt * rrd->rra_def[ui].row_cnt; - - data_len = sizeof(rrd_value_t) * value_cnt; - - newfile_size = header_len + data_len; - } - - rrd_file = (rrd_file_t*)malloc(sizeof(rrd_file_t)); - if (rrd_file == NULL) { - *ret_p = -RRD_ERR_MALLOC7; - return NULL; - } - memset(rrd_file, 0, sizeof(rrd_file_t)); - - rrd_file->pvt = malloc(sizeof(rrd_simple_file_t)); - if(rrd_file->pvt == NULL) { - *ret_p = -RRD_ERR_MALLOC8; - return NULL; - } - memset(rrd_file->pvt, 0, sizeof(rrd_simple_file_t)); - rrd_simple_file = (rrd_simple_file_t *)rrd_file->pvt; - -#ifdef DEBUG - if ((rdwr & (RRD_READONLY | RRD_READWRITE)) == - (RRD_READONLY | RRD_READWRITE)) { - /* Both READONLY and READWRITE were given, which is invalid. */ - *ret_p = -RRD_ERR_IO1; - exit(-1); - } -#endif - -#ifdef HAVE_MMAP - rrd_simple_file->mm_prot = PROT_READ; - rrd_simple_file->mm_flags = 0; -#endif - - if (rdwr & RRD_READONLY) { - flags |= O_RDONLY; -#ifdef HAVE_MMAP -# if !defined(AIX) - rrd_simple_file->mm_flags = MAP_PRIVATE; -# endif -# ifdef MAP_NORESERVE - rrd_simple_file->mm_flags |= MAP_NORESERVE; /* readonly, so no swap backing needed */ -# endif -#endif - } else { - if (rdwr & RRD_READWRITE) { - flags |= O_RDWR; -#ifdef HAVE_MMAP - rrd_simple_file->mm_flags = MAP_SHARED; - rrd_simple_file->mm_prot |= PROT_WRITE; -#endif - } - if (rdwr & RRD_CREAT) { - flags |= (O_CREAT | O_TRUNC); - } - if (rdwr & RRD_EXCL) { - flags |= O_EXCL; - } - } - if (rdwr & RRD_READAHEAD) { -#ifdef MAP_POPULATE - rrd_simple_file->mm_flags |= MAP_POPULATE; /* populate ptes and data */ -#endif -#if defined MAP_NONBLOCK - rrd_simple_file->mm_flags |= MAP_NONBLOCK; /* just populate ptes */ -#endif - } -#if defined(_WIN32) && !defined(__CYGWIN__) && !defined(__CYGWIN32__) - flags |= O_BINARY; -#endif - - if ((rrd_simple_file->fd = open(file_name, flags, 0666)) < 0) { - ret = -RRD_ERR_OPEN_FILE; - goto out_free; - } - -#ifdef HAVE_MMAP -#ifdef HAVE_BROKEN_MS_ASYNC - if (rdwr & RRD_READWRITE) { - /* some unices, the files mtime does not get update - on msync MS_ASYNC, in order to help them, - we update the the timestamp at this point. - The thing happens pretty 'close' to the open - call so the chances of a race should be minimal. - - Maybe ask your vendor to fix your OS ... */ - utime(file_name,NULL); - } -#endif -#endif - - /* Better try to avoid seeks as much as possible. stat may be heavy but - * many concurrent seeks are even worse. */ - if (newfile_size == 0 && ((fstat(rrd_simple_file->fd, &statb)) < 0)) { - ret = -RRD_ERR_STAT_FILE; - goto out_close; - } - if (newfile_size == 0) { - rrd_file->file_len = statb.st_size; - } else { - rrd_file->file_len = newfile_size; -#ifdef HAVE_POSIX_FALLOCATE - if (posix_fallocate(rrd_simple_file->fd, 0, newfile_size) == 0){ - /* if all is well we skip the seeking below */ - goto no_lseek_necessary; - } -#endif - lseek(rrd_simple_file->fd, newfile_size - 1, SEEK_SET); - if ( write(rrd_simple_file->fd, "\0", 1) == -1){ /* poke */ - ret = -RRD_ERR_WRITE5; - goto out_close; - } - lseek(rrd_simple_file->fd, 0, SEEK_SET); - } -no_lseek_necessary: -#ifdef HAVE_POSIX_FADVISE - /* In general we need no read-ahead when dealing with rrd_files. - When we stop reading, it is highly unlikely that we start up again. - In this manner we actually save time and diskaccess (and buffer cache). - Thanks to Dave Plonka for the Idea of using POSIX_FADV_RANDOM here. */ - posix_fadvise(rrd_simple_file->fd, 0, 0, POSIX_FADV_RANDOM); -#endif - -#ifdef HAVE_MMAP -#ifndef HAVE_POSIX_FALLOCATE - /* force allocating the file on the underlaying filesystem to prevent any - * future bus error when the filesystem is full and attempting to write - * trough the file mapping. Filling the file using memset on the file - * mapping can also lead some bus error, so we use the old fashioned - * write(). - */ - if (rdwr & RRD_CREAT) { - char buf[4096]; - unsigned i; - - memset(buf, DNAN, sizeof buf); - lseek(rrd_simple_file->fd, offset, SEEK_SET); - - for (i = 0; i < (newfile_size - 1) / sizeof buf; ++i) { - if (write(rrd_simple_file->fd, buf, sizeof buf) == -1) { - ret = -RRD_ERR_WRITE5; - goto out_close; - } - } - - if (write(rrd_simple_file->fd, buf, - (newfile_size - 1) % sizeof buf) == -1) { - ret = -RRD_ERR_WRITE5; - goto out_close; - } - - lseek(rrd_simple_file->fd, 0, SEEK_SET); - } -#endif - - data = mmap(0, rrd_file->file_len, - rrd_simple_file->mm_prot, rrd_simple_file->mm_flags, - rrd_simple_file->fd, offset); - - /* lets see if the first read worked */ - if (data == MAP_FAILED) { - ret = -RRD_ERR_MMAP; - goto out_close; - } - rrd_simple_file->file_start = data; -#endif - if (rdwr & RRD_CREAT) - goto out_done; -#ifdef USE_MADVISE - if (rdwr & RRD_COPY) { - /* We will read everything in a moment (copying) */ - madvise(data, rrd_file->file_len, MADV_WILLNEED ); - madvise(data, rrd_file->file_len, MADV_SEQUENTIAL ); - } else { - /* We do not need to read anything in for the moment */ - madvise(data, rrd_file->file_len, MADV_RANDOM); - /* the stat_head will be needed soonish, so hint accordingly */ - madvise(data, sizeof(stat_head_t), MADV_WILLNEED); - madvise(data, sizeof(stat_head_t), MADV_RANDOM); - } -#endif - - __rrd_read(rrd->stat_head, stat_head_t, 1); - - /* lets do some test if we are on track ... */ - if (memcmp(rrd->stat_head->cookie, RRD_COOKIE, sizeof(RRD_COOKIE)) != 0) { - ret = -RRD_ERR_FILE; - goto out_nullify_head; - } - - if (rrd->stat_head->float_cookie != FLOAT_COOKIE) { - ret = -RRD_ERR_FILE1; - goto out_nullify_head; - } - - version = atoi(rrd->stat_head->version); - - if (version > atoi(RRD_VERSION)) { - ret = -RRD_ERR_FILE2; - goto out_nullify_head; - } -#if defined USE_MADVISE - /* the ds_def will be needed soonish, so hint accordingly */ - madvise(data + PAGE_START(offset), - sizeof(ds_def_t) * rrd->stat_head->ds_cnt, MADV_WILLNEED); -#endif - __rrd_read(rrd->ds_def, ds_def_t, rrd->stat_head->ds_cnt); - -#if defined USE_MADVISE - /* the rra_def will be needed soonish, so hint accordingly */ - madvise(data + PAGE_START(offset), - sizeof(rra_def_t) * rrd->stat_head->rra_cnt, MADV_WILLNEED); -#endif - __rrd_read(rrd->rra_def, rra_def_t, - rrd->stat_head->rra_cnt); - - /* handle different format for the live_head */ - if (version < 3) { - rrd->live_head = (live_head_t *) malloc(sizeof(live_head_t)); - if (rrd->live_head == NULL) { - ret = -RRD_ERR_MALLOC9; - goto out_close; - } -#if defined USE_MADVISE - /* the live_head will be needed soonish, so hint accordingly */ - madvise(data + PAGE_START(offset), sizeof(time_t), MADV_WILLNEED); -#endif - __rrd_read(rrd->legacy_last_up, time_t, - 1); - - rrd->live_head->last_up = *rrd->legacy_last_up; - rrd->live_head->last_up_usec = 0; - } else { -#if defined USE_MADVISE - /* the live_head will be needed soonish, so hint accordingly */ - madvise(data + PAGE_START(offset), - sizeof(live_head_t), MADV_WILLNEED); -#endif - __rrd_read(rrd->live_head, live_head_t, - 1); - } - __rrd_read(rrd->pdp_prep, pdp_prep_t, - rrd->stat_head->ds_cnt); - __rrd_read(rrd->cdp_prep, cdp_prep_t, - rrd->stat_head->rra_cnt * rrd->stat_head->ds_cnt); - __rrd_read(rrd->rra_ptr, rra_ptr_t, - rrd->stat_head->rra_cnt); - - rrd_file->header_len = offset; - rrd_file->pos = offset; - - { - unsigned long row_cnt = 0; - - for (ui=0; uistat_head->rra_cnt; ui++) - row_cnt += rrd->rra_def[ui].row_cnt; - - size_t correct_len = rrd_file->header_len + - sizeof(rrd_value_t) * row_cnt * rrd->stat_head->ds_cnt; - - if (correct_len > rrd_file->file_len) { - ret = -RRD_ERR_FILE3; - goto out_nullify_head; - } - } - -out_done: - return (rrd_file); -out_nullify_head: - rrd->stat_head = NULL; -out_close: -#ifdef HAVE_MMAP - if (data != MAP_FAILED) - munmap(data, rrd_file->file_len); -#endif - - close(rrd_simple_file->fd); -out_free: - free(rrd_file->pvt); - free(rrd_file); - *ret_p = ret; - return NULL; -} - - -#if defined DEBUG && DEBUG > 1 -/* Print list of in-core pages of a the current rrd_file. */ - static -void mincore_print( - rrd_file_t *rrd_file, - char *mark) -{ - rrd_simple_file_t *rrd_simple_file; - rrd_simple_file = (rrd_simple_file_t *)rrd_file->pvt; -#ifdef HAVE_MMAP - /* pretty print blocks in core */ - size_t off; - unsigned char *vec; - ssize_t _page_size = sysconf(_SC_PAGESIZE); - - off = rrd_file->file_len + - ((rrd_file->file_len + _page_size - 1) / _page_size); - vec = malloc(off); - if (vec != NULL) { - memset(vec, 0, off); - if (mincore(rrd_simple_file->file_start, rrd_file->file_len, vec) == 0) { - int prev; - unsigned is_in = 0, was_in = 0; - - for (off = 0, prev = 0; off < rrd_file->file_len; ++off) { - is_in = vec[off] & 1; /* if lsb set then is core resident */ - if (off == 0) - was_in = is_in; - if (was_in != is_in) { - fprintf(stderr, "%s: %sin core: %p len %ld\n", mark, - was_in ? "" : "not ", vec + prev, off - prev); - was_in = is_in; - prev = off; - } - } - fprintf(stderr, - "%s: %sin core: %p len %ld\n", mark, - was_in ? "" : "not ", vec + prev, off - prev); - } else - fprintf(stderr, "mincore: %s", rrd_strerror(errno)); - } -#else - fprintf(stderr, "sorry mincore only works with mmap"); -#endif -} -#endif /* defined DEBUG && DEBUG > 1 */ - -/* - * get exclusive lock to whole file. - * lock gets removed when we close the file - * - * returns 0 on success - */ -int rrd_lock( - rrd_file_t *rrd_file) -{ - int rcstat; - rrd_simple_file_t *rrd_simple_file; - rrd_simple_file = (rrd_simple_file_t *)rrd_file->pvt; - - { -#if defined(_WIN32) && !defined(__CYGWIN__) && !defined(__CYGWIN32__) - struct _stat st; - - if (_fstat(rrd_simple_file->fd, &st) == 0) { - rcstat = _locking(rrd_simple_file->fd, _LK_NBLCK, st.st_size); - } else { - rcstat = -1; - } -#else - struct flock lock; - - lock.l_type = F_WRLCK; /* exclusive write lock */ - lock.l_len = 0; /* whole file */ - lock.l_start = 0; /* start of file */ - lock.l_whence = SEEK_SET; /* end of file */ - - rcstat = fcntl(rrd_simple_file->fd, F_SETLK, &lock); -#endif - } - - return (rcstat); -} - - -/* drop cache except for the header and the active pages */ -void rrd_dontneed( rrd_file_t *rrd_file, rrd_t *rrd) { - rrd_simple_file_t *rrd_simple_file = (rrd_simple_file_t *)rrd_file->pvt; -#if defined USE_MADVISE || defined HAVE_POSIX_FADVISE - size_t dontneed_start; - size_t rra_start; - size_t active_block; - size_t i; - ssize_t _page_size = sysconf(_SC_PAGESIZE); - - if (rrd_file == NULL) { -#if defined DEBUG && DEBUG - fprintf (stderr, "rrd_dontneed: Argument 'rrd_file' is NULL.\n"); -#endif - return; - } - -#if defined DEBUG && DEBUG > 1 - mincore_print(rrd_file, "before"); -#endif - - /* ignoring errors from RRDs that are smaller then the file_len+rounding */ - rra_start = rrd_file->header_len; - dontneed_start = PAGE_START(rra_start) + _page_size; - for (i = 0; i < rrd->stat_head->rra_cnt; ++i) { - active_block = - PAGE_START(rra_start - + rrd->rra_ptr[i].cur_row - * rrd->stat_head->ds_cnt * sizeof(rrd_value_t)); - if (active_block > dontneed_start) { -#ifdef USE_MADVISE - madvise(rrd_simple_file->file_start + dontneed_start, - active_block - dontneed_start - 1, MADV_DONTNEED); -#endif - /* in linux at least only fadvise DONTNEED seems to purge pages from cache */ -#ifdef HAVE_POSIX_FADVISE - posix_fadvise(rrd_simple_file->fd, dontneed_start, - active_block - dontneed_start - 1, - POSIX_FADV_DONTNEED); -#endif - } - dontneed_start = active_block; - /* do not release 'hot' block if update for this RAA will occur - * within 10 minutes */ - if (rrd->stat_head->pdp_step * rrd->rra_def[i].pdp_cnt - - rrd->live_head->last_up % (rrd->stat_head->pdp_step * - rrd->rra_def[i].pdp_cnt) < 10 * 60) { - dontneed_start += _page_size; - } - rra_start += - rrd->rra_def[i].row_cnt * rrd->stat_head->ds_cnt * - sizeof(rrd_value_t); - } - - if (dontneed_start < rrd_file->file_len) { -#ifdef USE_MADVISE - madvise(rrd_simple_file->file_start + dontneed_start, - rrd_file->file_len - dontneed_start, MADV_DONTNEED); -#endif -#ifdef HAVE_POSIX_FADVISE - posix_fadvise(rrd_simple_file->fd, dontneed_start, - rrd_file->file_len - dontneed_start, - POSIX_FADV_DONTNEED); -#endif - } - -#if defined DEBUG && DEBUG > 1 - mincore_print(rrd_file, "after"); -#endif -#endif /* without madvise and posix_fadvise it does not make much sense todo anything */ -} - - - - - -int rrd_close( - rrd_file_t *rrd_file) -{ - rrd_simple_file_t *rrd_simple_file; - rrd_simple_file = (rrd_simple_file_t *)rrd_file->pvt; - int ret; - -#ifdef HAVE_MMAP - ret = msync(rrd_simple_file->file_start, rrd_file->file_len, MS_ASYNC); - if (ret != 0){ - ret = -RRD_ERR_MSYNC; - goto out; - } - ret = munmap(rrd_simple_file->file_start, rrd_file->file_len); - if (ret != 0){ - ret = -RRD_ERR_MUNMAP; - goto out; - } -#endif - ret = close(rrd_simple_file->fd); - if (ret != 0){ - ret = -RRD_ERR_CLOSE; - goto out; - } -out: - free(rrd_file->pvt); - free(rrd_file); - rrd_file = NULL; - return ret; -} - - -/* Set position of rrd_file. */ - -off_t rrd_seek( rrd_file_t *rrd_file, off_t off, int whence) { - off_t ret = 0; -#ifndef HAVE_MMAP - rrd_simple_file_t *rrd_simple_file; - rrd_simple_file = (rrd_simple_file_t *)rrd_file->pvt; -#endif - -#ifdef HAVE_MMAP - if (whence == SEEK_SET) - rrd_file->pos = off; - else if (whence == SEEK_CUR) - rrd_file->pos += off; - else if (whence == SEEK_END) - rrd_file->pos = rrd_file->file_len + off; -#else - ret = lseek(rrd_simple_file->fd, off, whence); - rrd_file->pos = ret; -#endif - /* mimic fseek, which returns 0 upon success */ - return ret < 0; /*XXX: or just ret to mimic lseek */ -} - - -/* Get current position in rrd_file. */ - -off_t rrd_tell( - rrd_file_t *rrd_file) -{ - return rrd_file->pos; -} - - -/* Read count bytes into buffer buf, starting at rrd_file->pos. - * Returns the number of bytes read or <0 on error. */ - -ssize_t rrd_read( - rrd_file_t *rrd_file, - void *buf, - size_t count) -{ - rrd_simple_file_t *rrd_simple_file = (rrd_simple_file_t *)rrd_file->pvt; -#ifdef HAVE_MMAP - size_t _cnt = count; - ssize_t _surplus; - - if (rrd_file->pos > rrd_file->file_len || _cnt == 0) /* EOF */ - return 0; - if (buf == NULL) - return -1; /* EINVAL */ - _surplus = rrd_file->pos + _cnt - rrd_file->file_len; - if (_surplus > 0) { /* short read */ - _cnt -= _surplus; - } - if (_cnt == 0) - return 0; /* EOF */ - buf = memcpy(buf, rrd_simple_file->file_start + rrd_file->pos, _cnt); - - rrd_file->pos += _cnt; /* mimmic read() semantics */ - return _cnt; -#else - ssize_t ret; - - ret = read(rrd_simple_file->fd, buf, count); - if (ret > 0) - rrd_file->pos += ret; /* mimmic read() semantics */ - return ret; -#endif -} - - -/* Write count bytes from buffer buf to the current position - * rrd_file->pos of rrd_simple_file->fd. - * Returns the number of bytes written or <0 on error. */ - -ssize_t rrd_write(rrd_file_t *rrd_file, const void *buf, size_t count){ - rrd_simple_file_t *rrd_simple_file = (rrd_simple_file_t *)rrd_file->pvt; -#ifdef HAVE_MMAP - size_t old_size = rrd_file->file_len; - if (count == 0) - return 0; - if (buf == NULL) - return -1; /* EINVAL */ - - if((rrd_file->pos + count) > old_size) { - return -RRD_ERR_WRITE6; - } - memcpy(rrd_simple_file->file_start + rrd_file->pos, buf, count); - rrd_file->pos += count; - return count; /* mimmic write() semantics */ -#else - ssize_t _sz = write(rrd_simple_file->fd, buf, count); - - if (_sz > 0) - rrd_file->pos += _sz; - return _sz; -#endif -} - - -/* this is a leftover from the old days, it serves no purpose - and is therefore turned into a no-op */ -void rrd_flush(rrd_file_t UNUSED(*rrd_file)) -{ -} - -/* Initialize RRD header. */ - -void rrd_init(rrd_t *rrd) { - rrd->stat_head = NULL; - rrd->ds_def = NULL; - rrd->rra_def = NULL; - rrd->live_head = NULL; - rrd->legacy_last_up = NULL; - rrd->rra_ptr = NULL; - rrd->pdp_prep = NULL; - rrd->cdp_prep = NULL; - rrd->rrd_value = NULL; -} - - -/* free RRD header data. */ - -#ifdef HAVE_MMAP -void rrd_free(rrd_t *rrd) { - if (rrd->legacy_last_up) { /* this gets set for version < 3 only */ - free(rrd->live_head); - } -} -#else -void rrd_free(rrd_t *rrd) { - free(rrd->live_head); - free(rrd->stat_head); - free(rrd->ds_def); - free(rrd->rra_def); - free(rrd->rra_ptr); - free(rrd->pdp_prep); - free(rrd->cdp_prep); - free(rrd->rrd_value); -} -#endif - - -/* routine used by external libraries to free memory allocated by - * rrd library */ - -void rrd_freemem(void *mem) { - free(mem); -} - -/* - * rra_update informs us about the RRAs being updated - * The low level storage API may use this information for - * aligning RRAs within stripes, or other performance enhancements - */ -void rrd_notify_row(rrd_file_t UNUSED(*rrd_file), - int UNUSED(rra_idx), unsigned long UNUSED(rra_row), - time_t UNUSED(rra_time)) { -} - -/* - * This function is called when creating a new RRD - * The storage implementation can use this opportunity to select - * a sensible starting row within the file. - * The default implementation is random, to ensure that all RRAs - * don't change to a new disk block at the same time - */ -unsigned long rrd_select_initial_row( rrd_file_t UNUSED(*rrd_file), - int UNUSED(rra_idx), rra_def_t *rra) { - return rrd_random() % rra->row_cnt; -} diff --git a/vendor/github.com/open-falcon/rrdlite/rrd_parsetime.c b/vendor/github.com/open-falcon/rrdlite/rrd_parsetime.c deleted file mode 100644 index e4c8d4bf..00000000 --- a/vendor/github.com/open-falcon/rrdlite/rrd_parsetime.c +++ /dev/null @@ -1,1043 +0,0 @@ -/* - * rrd_parsetime.c - parse time for at(1) - * Copyright (C) 1993, 1994 Thomas Koenig - * - * modifications for English-language times - * Copyright (C) 1993 David Parsons - * - * A lot of modifications and extensions - * (including the new syntax being useful for RRDB) - * Copyright (C) 1999 Oleg Cherevko (aka Olwi Deer) - * - * severe structural damage inflicted by Tobi Oetiker in 1999 - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. The name of the author(s) may not be used to endorse or promote - * products derived from this software without specific prior written - * permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR(S) ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR(S) BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -/* NOTE: nothing in here is thread-safe!!!! Not even the localtime - calls ... */ - -/* - * The BNF-like specification of the time syntax parsed is below: - * - * As usual, [ X ] means that X is optional, { X } means that X may - * be either omitted or specified as many times as needed, - * alternatives are separated by |, brackets are used for grouping. - * (# marks the beginning of comment that extends to the end of line) - * - * TIME-SPECIFICATION ::= TIME-REFERENCE [ OFFSET-SPEC ] | - * OFFSET-SPEC | - * ( START | END ) OFFSET-SPEC - * - * TIME-REFERENCE ::= NOW | TIME-OF-DAY-SPEC [ DAY-SPEC-1 ] | - * [ TIME-OF-DAY-SPEC ] DAY-SPEC-2 - * - * TIME-OF-DAY-SPEC ::= NUMBER (':') NUMBER [am|pm] | # HH:MM - * 'noon' | 'midnight' | 'teatime' - * - * DAY-SPEC-1 ::= NUMBER '/' NUMBER '/' NUMBER | # MM/DD/[YY]YY - * NUMBER '.' NUMBER '.' NUMBER | # DD.MM.[YY]YY - * NUMBER # Seconds since 1970 - * NUMBER # YYYYMMDD - * - * DAY-SPEC-2 ::= MONTH-NAME NUMBER [NUMBER] | # Month DD [YY]YY - * 'yesterday' | 'today' | 'tomorrow' | - * DAY-OF-WEEK - * - * - * OFFSET-SPEC ::= '+'|'-' NUMBER TIME-UNIT { ['+'|'-'] NUMBER TIME-UNIT } - * - * TIME-UNIT ::= SECONDS | MINUTES | HOURS | - * DAYS | WEEKS | MONTHS | YEARS - * - * NOW ::= 'now' | 'n' - * - * START ::= 'start' | 's' - * END ::= 'end' | 'e' - * - * SECONDS ::= 'seconds' | 'second' | 'sec' | 's' - * MINUTES ::= 'minutes' | 'minute' | 'min' | 'm' - * HOURS ::= 'hours' | 'hour' | 'hr' | 'h' - * DAYS ::= 'days' | 'day' | 'd' - * WEEKS ::= 'weeks' | 'week' | 'wk' | 'w' - * MONTHS ::= 'months' | 'month' | 'mon' | 'm' - * YEARS ::= 'years' | 'year' | 'yr' | 'y' - * - * MONTH-NAME ::= 'jan' | 'january' | 'feb' | 'february' | 'mar' | 'march' | - * 'apr' | 'april' | 'may' | 'jun' | 'june' | 'jul' | 'july' | - * 'aug' | 'august' | 'sep' | 'september' | 'oct' | 'october' | - * 'nov' | 'november' | 'dec' | 'december' - * - * DAY-OF-WEEK ::= 'sunday' | 'sun' | 'monday' | 'mon' | 'tuesday' | 'tue' | - * 'wednesday' | 'wed' | 'thursday' | 'thu' | 'friday' | 'fri' | - * 'saturday' | 'sat' - * - * - * As you may note, there is an ambiguity with respect to - * the 'm' time unit (which can mean either minutes or months). - * To cope with this, code tries to read users mind :) by applying - * certain heuristics. There are two of them: - * - * 1. If 'm' is used in context of (i.e. right after the) years, - * months, weeks, or days it is assumed to mean months, while - * in the context of hours, minutes, and seconds it means minutes. - * (e.g., in -1y6m or +3w1m 'm' means 'months', while in - * -3h20m or +5s2m 'm' means 'minutes') - * - * 2. Out of context (i.e. right after the '+' or '-' sign) the - * meaning of 'm' is guessed from the number it directly follows. - * Currently, if the number absolute value is below 25 it is assumed - * that 'm' means months, otherwise it is treated as minutes. - * (e.g., -25m == -25 minutes, while +24m == +24 months) - * - */ - -/* System Headers */ - -/* Local headers */ - -#include -#include -#include - -#include "rrd_tool.h" - -/* Structures and unions */ - -enum { /* symbols */ - MIDNIGHT, NOON, TEATIME, - PM, AM, YESTERDAY, TODAY, TOMORROW, NOW, START, END, EPOCH, - SECONDS, MINUTES, HOURS, DAYS, WEEKS, MONTHS, YEARS, - MONTHS_MINUTES, - NUMBER, PLUS, MINUS, DOT, COLON, SLASH, ID, JUNK, - JAN, FEB, MAR, APR, MAY, JUN, - JUL, AUG, SEP, OCT, NOV, DEC, - SUN, MON, TUE, WED, THU, FRI, SAT -}; - -/* the below is for plus_minus() */ -#define PREVIOUS_OP (-1) - -/* parse translation table - table driven parsers can be your FRIEND! - */ -struct SpecialToken { - char *name; /* token name */ - int value; /* token id */ -}; -static const struct SpecialToken VariousWords[] = { - {"midnight", MIDNIGHT}, /* 00:00:00 of today or tomorrow */ - {"noon", NOON}, /* 12:00:00 of today or tomorrow */ - {"teatime", TEATIME}, /* 16:00:00 of today or tomorrow */ - {"am", AM}, /* morning times for 0-12 clock */ - {"pm", PM}, /* evening times for 0-12 clock */ - {"tomorrow", TOMORROW}, - {"yesterday", YESTERDAY}, - {"today", TODAY}, - {"now", NOW}, - {"n", NOW}, - {"start", START}, - {"s", START}, - {"end", END}, - {"e", END}, - {"epoch", EPOCH}, - - {"jan", JAN}, - {"feb", FEB}, - {"mar", MAR}, - {"apr", APR}, - {"may", MAY}, - {"jun", JUN}, - {"jul", JUL}, - {"aug", AUG}, - {"sep", SEP}, - {"oct", OCT}, - {"nov", NOV}, - {"dec", DEC}, - {"january", JAN}, - {"february", FEB}, - {"march", MAR}, - {"april", APR}, - {"may", MAY}, - {"june", JUN}, - {"july", JUL}, - {"august", AUG}, - {"september", SEP}, - {"october", OCT}, - {"november", NOV}, - {"december", DEC}, - {"sunday", SUN}, - {"sun", SUN}, - {"monday", MON}, - {"mon", MON}, - {"tuesday", TUE}, - {"tue", TUE}, - {"wednesday", WED}, - {"wed", WED}, - {"thursday", THU}, - {"thu", THU}, - {"friday", FRI}, - {"fri", FRI}, - {"saturday", SAT}, - {"sat", SAT}, - {NULL, 0} /*** SENTINEL ***/ -}; - -static const struct SpecialToken TimeMultipliers[] = { - {"second", SECONDS}, /* seconds multiplier */ - {"seconds", SECONDS}, /* (pluralized) */ - {"sec", SECONDS}, /* (generic) */ - {"s", SECONDS}, /* (short generic) */ - {"minute", MINUTES}, /* minutes multiplier */ - {"minutes", MINUTES}, /* (pluralized) */ - {"min", MINUTES}, /* (generic) */ - {"m", MONTHS_MINUTES}, /* (short generic) */ - {"hour", HOURS}, /* hours ... */ - {"hours", HOURS}, /* (pluralized) */ - {"hr", HOURS}, /* (generic) */ - {"h", HOURS}, /* (short generic) */ - {"day", DAYS}, /* days ... */ - {"days", DAYS}, /* (pluralized) */ - {"d", DAYS}, /* (short generic) */ - {"week", WEEKS}, /* week ... */ - {"weeks", WEEKS}, /* (pluralized) */ - {"wk", WEEKS}, /* (generic) */ - {"w", WEEKS}, /* (short generic) */ - {"month", MONTHS}, /* week ... */ - {"months", MONTHS}, /* (pluralized) */ - {"mon", MONTHS}, /* (generic) */ - {"year", YEARS}, /* year ... */ - {"years", YEARS}, /* (pluralized) */ - {"yr", YEARS}, /* (generic) */ - {"y", YEARS}, /* (short generic) */ - {NULL, 0} /*** SENTINEL ***/ -}; - -/* File scope variables */ - -/* context dependent list of specials for parser to recognize, - * required for us to be able distinguish between 'mon' as 'month' - * and 'mon' as 'monday' - */ -static const struct SpecialToken *Specials; - -static const char **scp; /* scanner - pointer at arglist */ -static char scc; /* scanner - count of remaining arguments */ -static const char *sct; /* scanner - next char pointer in current argument */ -static int need; /* scanner - need to advance to next argument */ - -static char *sc_token = NULL; /* scanner - token buffer */ -static size_t sc_len; /* scanner - length of token buffer */ -static int sc_tokid; /* scanner - token id */ - -/* Local functions */ -static void EnsureMemFree( - void); - -static void EnsureMemFree( - void) -{ - if (sc_token) { - free(sc_token); - sc_token = NULL; - } -} - -/* - * A hack to compensate for the lack of the C++ exceptions - * - * Every function func that might generate parsing "exception" - * should return TIME_OK (aka NULL) or pointer to the error message, - * and should be called like this: try(func(args)); - * - * if the try is not successful it will reset the token pointer ... - * - * [NOTE: when try(...) is used as the only statement in the "if-true" - * part of the if statement that also has an "else" part it should be - * either enclosed in the curly braces (despite the fact that it looks - * like a single statement) or NOT followed by the ";"] - */ -#define try(b) { \ - char *_e; \ - if((_e=(b))) \ - { \ - EnsureMemFree(); \ - return _e; \ - } \ - } - -/* - * The panic() function was used in the original code to die, we redefine - * it as macro to start the chain of ascending returns that in conjunction - * with the try(b) above will simulate a sort of "exception handling" - */ - -#define panic(e) { \ - return (e); \ - } - -/* - * ve() and e() are used to set the return error, - * the most appropriate use for these is inside panic(...) - */ -#define MAX_ERR_MSG_LEN 1024 -static char errmsg[MAX_ERR_MSG_LEN]; - -static char *ve( - char *fmt, - va_list ap) -{ -#ifdef HAVE_VSNPRINTF - vsnprintf(errmsg, MAX_ERR_MSG_LEN, fmt, ap); -#else - vsprintf(errmsg, fmt, ap); -#endif - EnsureMemFree(); - return (errmsg); -} - -static char *e( - char *fmt, - ...) -{ - char *err; - va_list ap; - - va_start(ap, fmt); - err = ve(fmt, ap); - va_end(ap); - return (err); -} - -/* Compare S1 and S2, ignoring case, returning less than, equal to or - greater than zero if S1 is lexicographically less than, - equal to or greater than S2. -- copied from GNU libc*/ -static int mystrcasecmp( - const char *s1, - const char *s2) -{ - const unsigned char *p1 = (const unsigned char *) s1; - const unsigned char *p2 = (const unsigned char *) s2; - unsigned char c1, c2; - - if (p1 == p2) - return 0; - - do { - c1 = tolower(*p1++); - c2 = tolower(*p2++); - if (c1 == '\0') - break; - } - while (c1 == c2); - - return c1 - c2; -} - -/* - * parse a token, checking if it's something special to us - */ -static int parse_token( - char *arg) -{ - int i; - - for (i = 0; Specials[i].name != NULL; i++) - if (mystrcasecmp(Specials[i].name, arg) == 0) - return sc_tokid = Specials[i].value; - - /* not special - must be some random id */ - return sc_tokid = ID; -} /* parse_token */ - - - -/* - * init_scanner() sets up the scanner to eat arguments - */ -static char *init_scanner( - int argc, - const char **argv) -{ - scp = argv; - scc = argc; - need = 1; - sc_len = 1; - while (argc-- > 0) - sc_len += strlen(*argv++); - - sc_token = (char *) malloc(sc_len * sizeof(char)); - if (sc_token == NULL) - return "Failed to allocate memory"; - return TIME_OK; -} /* init_scanner */ - -/* - * token() fetches a token from the input stream - */ -static int token( - void) -{ - int idx; - - while (1) { - memset(sc_token, '\0', sc_len); - sc_tokid = EOF; - idx = 0; - - /* if we need to read another argument, walk along the argument list; - * when we fall off the arglist, we'll just return EOF forever - */ - if (need) { - if (scc < 1) - return sc_tokid; - sct = *scp; - scp++; - scc--; - need = 0; - } - /* eat whitespace now - if we walk off the end of the argument, - * we'll continue, which puts us up at the top of the while loop - * to fetch the next argument in - */ - while (isspace((unsigned char) *sct) || *sct == '_' || *sct == ',') - ++sct; - if (!*sct) { - need = 1; - continue; - } - - /* preserve the first character of the new token - */ - sc_token[0] = *sct++; - - /* then see what it is - */ - if (isdigit((unsigned char) (sc_token[0]))) { - while (isdigit((unsigned char) (*sct))) - sc_token[++idx] = *sct++; - sc_token[++idx] = '\0'; - return sc_tokid = NUMBER; - } else if (isalpha((unsigned char) (sc_token[0]))) { - while (isalpha((unsigned char) (*sct))) - sc_token[++idx] = *sct++; - sc_token[++idx] = '\0'; - return parse_token(sc_token); - } else - switch (sc_token[0]) { - case ':': - return sc_tokid = COLON; - case '.': - return sc_tokid = DOT; - case '+': - return sc_tokid = PLUS; - case '-': - return sc_tokid = MINUS; - case '/': - return sc_tokid = SLASH; - default: - /*OK, we did not make it ... */ - sct--; - return sc_tokid = EOF; - } - } /* while (1) */ -} /* token */ - - -/* - * expect2() gets a token and complains if it's not the token we want - */ -static char *expect2( - int desired, - char *complain_fmt, - ...) -{ - va_list ap; - - va_start(ap, complain_fmt); - if (token() != desired) { - panic(ve(complain_fmt, ap)); - } - va_end(ap); - return TIME_OK; - -} /* expect2 */ - - -/* - * plus_minus() is used to parse a single NUMBER TIME-UNIT pair - * for the OFFSET-SPEC. - * It also applies those m-guessing heuristics. - */ -static char *plus_minus( - rrd_time_value_t * ptv, - int doop) -{ - static int op = PLUS; - static int prev_multiplier = -1; - int delta; - - if (doop >= 0) { - op = doop; - try(expect2 - (NUMBER, "There should be number after '%c'", - op == PLUS ? '+' : '-')); - prev_multiplier = -1; /* reset months-minutes guessing mechanics */ - } - /* if doop is < 0 then we repeat the previous op - * with the prefetched number */ - - delta = atoi(sc_token); - - if (token() == MONTHS_MINUTES) { - /* hard job to guess what does that -5m means: -5mon or -5min? */ - switch (prev_multiplier) { - case DAYS: - case WEEKS: - case MONTHS: - case YEARS: - sc_tokid = MONTHS; - break; - - case SECONDS: - case MINUTES: - case HOURS: - sc_tokid = MINUTES; - break; - - default: - if (delta < 6) /* it may be some other value but in the context - * of RRD who needs less than 6 min deltas? */ - sc_tokid = MONTHS; - else - sc_tokid = MINUTES; - } - } - prev_multiplier = sc_tokid; - switch (sc_tokid) { - case YEARS: - ptv->tm. tm_year += ( - op == PLUS) ? delta : -delta; - - return TIME_OK; - case MONTHS: - ptv->tm. tm_mon += ( - op == PLUS) ? delta : -delta; - - return TIME_OK; - case WEEKS: - delta *= 7; - /* FALLTHRU */ - case DAYS: - ptv->tm. tm_mday += ( - op == PLUS) ? delta : -delta; - - return TIME_OK; - case HOURS: - ptv->offset += (op == PLUS) ? delta * 60 * 60 : -delta * 60 * 60; - return TIME_OK; - case MINUTES: - ptv->offset += (op == PLUS) ? delta * 60 : -delta * 60; - return TIME_OK; - case SECONDS: - ptv->offset += (op == PLUS) ? delta : -delta; - return TIME_OK; - default: /*default unit is seconds */ - ptv->offset += (op == PLUS) ? delta : -delta; - return TIME_OK; - } - panic(e("well-known time unit expected after %d", delta)); - /* NORETURN */ - return TIME_OK; /* to make compiler happy :) */ -} /* plus_minus */ - - -/* - * tod() computes the time of day (TIME-OF-DAY-SPEC) - */ -static char *tod( - rrd_time_value_t * ptv) -{ - int hour, minute = 0; - int tlen; - - /* save token status in case we must abort */ - int scc_sv = scc; - const char *sct_sv = sct; - int sc_tokid_sv = sc_tokid; - - tlen = strlen(sc_token); - - /* first pick out the time of day - we assume a HH (COLON|DOT) MM time - */ - if (tlen > 2) { - return TIME_OK; - } - - hour = atoi(sc_token); - - token(); - if (sc_tokid == SLASH || sc_tokid == DOT) { - /* guess we are looking at a date */ - scc = scc_sv; - sct = sct_sv; - sc_tokid = sc_tokid_sv; - sprintf(sc_token, "%d", hour); - return TIME_OK; - } - if (sc_tokid == COLON) { - try(expect2(NUMBER, - "Parsing HH:MM syntax, expecting MM as number, got none")); - minute = atoi(sc_token); - if (minute > 59) { - panic(e("parsing HH:MM syntax, got MM = %d (>59!)", minute)); - } - token(); - } - - /* check if an AM or PM specifier was given - */ - if (sc_tokid == AM || sc_tokid == PM) { - if (hour > 12) { - panic(e("there cannot be more than 12 AM or PM hours")); - } - if (sc_tokid == PM) { - if (hour != 12) /* 12:xx PM is 12:xx, not 24:xx */ - hour += 12; - } else { - if (hour == 12) /* 12:xx AM is 00:xx, not 12:xx */ - hour = 0; - } - token(); - } else if (hour > 23) { - /* guess it was not a time then ... */ - scc = scc_sv; - sct = sct_sv; - sc_tokid = sc_tokid_sv; - sprintf(sc_token, "%d", hour); - return TIME_OK; - } - ptv->tm. tm_hour = hour; - ptv->tm. tm_min = minute; - ptv->tm. tm_sec = 0; - - if (ptv->tm.tm_hour == 24) { - ptv->tm. tm_hour = 0; - ptv->tm. tm_mday++; - } - return TIME_OK; -} /* tod */ - - -/* - * assign_date() assigns a date, adjusting year as appropriate - */ -static char *assign_date( - rrd_time_value_t * ptv, - long mday, - long mon, - long year) -{ - if (year > 138) { - if (year > 1970) - year -= 1900; - else { - panic(e("invalid year %d (should be either 00-99 or >1900)", - year)); - } - } else if (year >= 0 && year < 38) { - year += 100; /* Allow year 2000-2037 to be specified as */ - } - /* 00-37 until the problem of 2038 year will */ - /* arise for unices with 32-bit time_t :) */ - if (year < 70) { - panic(e("won't handle dates before epoch (01/01/1970), sorry")); - } - - ptv->tm. tm_mday = mday; - ptv->tm. tm_mon = mon; - ptv->tm. tm_year = year; - - return TIME_OK; -} /* assign_date */ - - -/* - * day() picks apart DAY-SPEC-[12] - */ -static char *day( - rrd_time_value_t * ptv) -{ - /* using time_t seems to help portability with 64bit oses */ - time_t mday = 0, wday, mon, year = ptv->tm.tm_year; - - switch (sc_tokid) { - case YESTERDAY: - ptv->tm. tm_mday--; - - /* FALLTRHU */ - case TODAY: /* force ourselves to stay in today - no further processing */ - token(); - break; - case TOMORROW: - ptv->tm. tm_mday++; - - token(); - break; - - case JAN: - case FEB: - case MAR: - case APR: - case MAY: - case JUN: - case JUL: - case AUG: - case SEP: - case OCT: - case NOV: - case DEC: - /* do month mday [year] - */ - mon = (sc_tokid - JAN); - try(expect2(NUMBER, "the day of the month should follow month name")); - mday = atol(sc_token); - if (token() == NUMBER) { - year = atol(sc_token); - token(); - } else - year = ptv->tm.tm_year; - - try(assign_date(ptv, mday, mon, year)); - break; - - case SUN: - case MON: - case TUE: - case WED: - case THU: - case FRI: - case SAT: - /* do a particular day of the week - */ - wday = (sc_tokid - SUN); - ptv->tm. tm_mday += ( - wday - ptv->tm.tm_wday); - - token(); - break; - /* - mday = ptv->tm.tm_mday; - mday += (wday - ptv->tm.tm_wday); - ptv->tm.tm_wday = wday; - - try(assign_date(ptv, mday, ptv->tm.tm_mon, ptv->tm.tm_year)); - break; - */ - - case NUMBER: - /* get numeric , MM/DD/[YY]YY, or DD.MM.[YY]YY - */ - mon = atol(sc_token); - if (mon > 10 * 365 * 24 * 60 * 60) { - ptv->tm = *localtime(&mon); - - token(); - break; - } - - if (mon > 19700101 && mon < 24000101) { /*works between 1900 and 2400 */ - char cmon[3], cmday[3], cyear[5]; - - strncpy(cyear, sc_token, 4); - cyear[4] = '\0'; - year = atol(cyear); - strncpy(cmon, &(sc_token[4]), 2); - cmon[2] = '\0'; - mon = atol(cmon); - strncpy(cmday, &(sc_token[6]), 2); - cmday[2] = '\0'; - mday = atol(cmday); - token(); - } else { - token(); - - if (mon <= 31 && (sc_tokid == SLASH || sc_tokid == DOT)) { - int sep; - - sep = sc_tokid; - try(expect2(NUMBER, "there should be %s number after '%c'", - sep == DOT ? "month" : "day", - sep == DOT ? '.' : '/')); - mday = atol(sc_token); - if (token() == sep) { - try(expect2 - (NUMBER, "there should be year number after '%c'", - sep == DOT ? '.' : '/')); - year = atol(sc_token); - token(); - } - - /* flip months and days for European timing - */ - if (sep == DOT) { - long x = mday; - - mday = mon; - mon = x; - } - } - } - - mon--; - if (mon < 0 || mon > 11) { - panic(e("did you really mean month %d?", mon + 1)); - } - if (mday < 1 || mday > 31) { - panic(e("I'm afraid that %d is not a valid day of the month", - mday)); - } - try(assign_date(ptv, mday, mon, year)); - break; - } /* case */ - return TIME_OK; -} /* month */ - - -/* Global functions */ - - -/* - * rrd_parsetime() is the external interface that takes tspec, parses - * it and puts the result in the rrd_time_value structure *ptv. - * It can return either absolute times (these are ensured to be - * correct) or relative time references that are expected to be - * added to some absolute time value and then normalized by - * mktime() The return value is either TIME_OK (aka NULL) or - * the pointer to the error message in the case of problems - */ -char *rrd_parsetime( - const char *tspec, - rrd_time_value_t * ptv) -{ - time_t now = time(NULL); - int hr = 0; - - /* this MUST be initialized to zero for midnight/noon/teatime */ - - Specials = VariousWords; /* initialize special words context */ - - try(init_scanner(1, &tspec)); - - /* establish the default time reference */ - ptv->type = ABSOLUTE_TIME; - ptv->offset = 0; - ptv->tm = *localtime(&now); - ptv->tm. tm_isdst = -1; /* mk time can figure dst by default ... */ - - token(); - switch (sc_tokid) { - case PLUS: - case MINUS: - break; /* jump to OFFSET-SPEC part */ - - case EPOCH: - ptv->type = RELATIVE_TO_EPOCH; - goto KeepItRelative; - case START: - ptv->type = RELATIVE_TO_START_TIME; - goto KeepItRelative; - case END: - ptv->type = RELATIVE_TO_END_TIME; - KeepItRelative: - ptv->tm. tm_sec = 0; - ptv->tm. tm_min = 0; - ptv->tm. tm_hour = 0; - ptv->tm. tm_mday = 0; - ptv->tm. tm_mon = 0; - ptv->tm. tm_year = 0; - - /* FALLTHRU */ - case NOW: - { - int time_reference = sc_tokid; - - token(); - if (sc_tokid == PLUS || sc_tokid == MINUS) - break; - if (time_reference != NOW) { - panic(e("'start' or 'end' MUST be followed by +|- offset")); - } else if (sc_tokid != EOF) { - panic(e("if 'now' is followed by a token it must be +|- offset")); - } - }; - break; - - /* Only absolute time specifications below */ - case NUMBER: - { - long hour_sv = ptv->tm.tm_hour; - long year_sv = ptv->tm.tm_year; - - ptv->tm. tm_hour = 30; - ptv->tm. tm_year = 30000; - - try(tod(ptv)) - try(day(ptv)) - if (ptv->tm.tm_hour == 30 && ptv->tm.tm_year != 30000) { - try(tod(ptv)) - } - if (ptv->tm.tm_hour == 30) { - ptv->tm. tm_hour = hour_sv; - } - if (ptv->tm.tm_year == 30000) { - ptv->tm. tm_year = year_sv; - } - }; - break; - /* fix month parsing */ - case JAN: - case FEB: - case MAR: - case APR: - case MAY: - case JUN: - case JUL: - case AUG: - case SEP: - case OCT: - case NOV: - case DEC: - try(day(ptv)); - if (sc_tokid != NUMBER) - break; - try(tod(ptv)) - break; - - /* evil coding for TEATIME|NOON|MIDNIGHT - we've initialized - * hr to zero up above, then fall into this case in such a - * way so we add +12 +4 hours to it for teatime, +12 hours - * to it for noon, and nothing at all for midnight, then - * set our rettime to that hour before leaping into the - * month scanner - */ - case TEATIME: - hr += 4; - /* FALLTHRU */ - case NOON: - hr += 12; - /* FALLTHRU */ - case MIDNIGHT: - /* if (ptv->tm.tm_hour >= hr) { - ptv->tm.tm_mday++; - ptv->tm.tm_wday++; - } *//* shifting does not makes sense here ... noon is noon */ - ptv->tm. tm_hour = hr; - ptv->tm. tm_min = 0; - ptv->tm. tm_sec = 0; - - token(); - try(day(ptv)); - break; - default: - panic(e("unparsable time: %s%s", sc_token, sct)); - break; - } /* ugly case statement */ - - /* - * the OFFSET-SPEC part - * - * (NOTE, the sc_tokid was prefetched for us by the previous code) - */ - if (sc_tokid == PLUS || sc_tokid == MINUS) { - Specials = TimeMultipliers; /* switch special words context */ - while (sc_tokid == PLUS || sc_tokid == MINUS || sc_tokid == NUMBER) { - if (sc_tokid == NUMBER) { - try(plus_minus(ptv, PREVIOUS_OP)); - } else - try(plus_minus(ptv, sc_tokid)); - token(); /* We will get EOF eventually but that's OK, since - token() will return us as many EOFs as needed */ - } - } - - /* now we should be at EOF */ - if (sc_tokid != EOF) { - panic(e("unparsable trailing text: '...%s%s'", sc_token, sct)); - } - - if (ptv->type == ABSOLUTE_TIME) - if (mktime(&ptv->tm) == -1) { /* normalize & check */ - /* can happen for "nonexistent" times, e.g. around 3am */ - /* when winter -> summer time correction eats a hour */ - panic(e("the specified time is incorrect (out of range?)")); - } - EnsureMemFree(); - return TIME_OK; -} /* rrd_parsetime */ - - -int rrd_proc_start_end( - rrd_time_value_t * start_tv, - rrd_time_value_t * end_tv, - time_t *start, - time_t *end) -{ - if (start_tv->type == RELATIVE_TO_END_TIME && /* same as the line above */ - end_tv->type == RELATIVE_TO_START_TIME) { - return -RRD_ERR_TIME4; - } - - if (start_tv->type == RELATIVE_TO_START_TIME) { - return -RRD_ERR_TIME5; - } - - if (end_tv->type == RELATIVE_TO_END_TIME) { - return -RRD_ERR_TIME6; - } - - if (start_tv->type == RELATIVE_TO_END_TIME) { - struct tm tmtmp; - - *end = mktime(&(end_tv->tm)) + end_tv->offset; - tmtmp = *localtime(end); /* reinit end including offset */ - tmtmp.tm_mday += start_tv->tm.tm_mday; - tmtmp.tm_mon += start_tv->tm.tm_mon; - tmtmp.tm_year += start_tv->tm.tm_year; - - *start = mktime(&tmtmp) + start_tv->offset; - } else { - *start = mktime(&(start_tv->tm)) + start_tv->offset; - } - if (end_tv->type == RELATIVE_TO_START_TIME) { - struct tm tmtmp; - - *start = mktime(&(start_tv->tm)) + start_tv->offset; - tmtmp = *localtime(start); - tmtmp.tm_mday += end_tv->tm.tm_mday; - tmtmp.tm_mon += end_tv->tm.tm_mon; - tmtmp.tm_year += end_tv->tm.tm_year; - - *end = mktime(&tmtmp) + end_tv->offset; - } else { - *end = mktime(&(end_tv->tm)) + end_tv->offset; - } - return 0; -} /* rrd_proc_start_end */ diff --git a/vendor/github.com/open-falcon/rrdlite/rrd_parsetime.h b/vendor/github.com/open-falcon/rrdlite/rrd_parsetime.h deleted file mode 100644 index d9a34e80..00000000 --- a/vendor/github.com/open-falcon/rrdlite/rrd_parsetime.h +++ /dev/null @@ -1,8 +0,0 @@ -#ifndef __PARSETIME_H__ -#define __PARSETIME_H__ - -#include - -#include "rrd.h" - -#endif diff --git a/vendor/github.com/open-falcon/rrdlite/rrd_rpncalc.c b/vendor/github.com/open-falcon/rrdlite/rrd_rpncalc.c deleted file mode 100644 index 65999fbe..00000000 --- a/vendor/github.com/open-falcon/rrdlite/rrd_rpncalc.c +++ /dev/null @@ -1,964 +0,0 @@ -/**************************************************************************** - * RRDtool 1.4.9 Copyright by Tobi Oetiker, 1997-2014 - **************************************************************************** - * rrd_rpncalc.c RPN calculator functions - ****************************************************************************/ - -#include -#include -#include - -#include "rrd_tool.h" -#include "rrd_rpncalc.h" -// #include "rrd_graph.h" - -short addop2str( - enum op_en op, - enum op_en op_type, - char *op_str, - char **result_str, - unsigned short *offset); -int tzoffset( - time_t); /* used to implement LTIME */ - -short rpn_compact( - rpnp_t *rpnp, - rpn_cdefds_t **rpnc, - short *count) -{ - short i; - - *count = 0; - /* count the number of rpn nodes */ - while (rpnp[*count].op != OP_END) - (*count)++; - if (++(*count) > DS_CDEF_MAX_RPN_NODES) { - return -RRD_ERR_DATA1; - } - - /* allocate memory */ - *rpnc = (rpn_cdefds_t *) calloc(*count, sizeof(rpn_cdefds_t)); - for (i = 0; rpnp[i].op != OP_END; i++) { - (*rpnc)[i].op = (char) rpnp[i].op; - if (rpnp[i].op == OP_NUMBER) { - /* rpnp.val is a double, rpnc.val is a short */ - double temp = floor(rpnp[i].val); - - if (temp < SHRT_MIN || temp > SHRT_MAX || temp != rpnp[i].val) { - free(*rpnc); - return -RRD_ERR_DATA2; - } - (*rpnc)[i].val = (short) temp; - } else if (rpnp[i].op == OP_VARIABLE || rpnp[i].op == OP_PREV_OTHER) { - (*rpnc)[i].val = (short) rpnp[i].ptr; - } - } - /* terminate the sequence */ - (*rpnc)[(*count) - 1].op = OP_END; - return 0; -} - -rpnp_t *rpn_expand( rpn_cdefds_t *rpnc) { - short i; - rpnp_t *rpnp; - - /* DS_CDEF_MAX_RPN_NODES is small, so at the expense of some wasted - * memory we avoid any reallocs */ - rpnp = (rpnp_t *) calloc(DS_CDEF_MAX_RPN_NODES, sizeof(rpnp_t)); - if (rpnp == NULL) { - //RRD_ERR_MALLOC17 - return NULL; - } - for (i = 0; rpnc[i].op != OP_END; ++i) { - rpnp[i].op = (enum op_en)rpnc[i].op; - if (rpnp[i].op == OP_NUMBER) { - rpnp[i].val = (double) rpnc[i].val; - } else if (rpnp[i].op == OP_VARIABLE || rpnp[i].op == OP_PREV_OTHER) { - rpnp[i].ptr = (long) rpnc[i].val; - } - } - /* terminate the sequence */ - rpnp[i].op = OP_END; - return rpnp; -} - -/* rpn_compact2str: convert a compact sequence of RPN operator nodes back - * into a CDEF string. This function is used by rrd_dump. - * arguments: - * rpnc: an array of compact RPN operator nodes - * ds_def: a pointer to the data source definition section of an RRD header - * for lookup of data source names by index - * str: out string, memory is allocated by the function, must be freed by the - * the caller */ -void rpn_compact2str( rpn_cdefds_t *rpnc, ds_def_t *ds_def, char **str) { - unsigned short i, offset = 0; - char buffer[7]; /* short as a string */ - - for (i = 0; rpnc[i].op != OP_END; i++) { - if (i > 0) - (*str)[offset++] = ','; - -#define add_op(VV,VVV) \ - if (addop2str((enum op_en)(rpnc[i].op), VV, VVV, str, &offset) == 1) continue; - - if (rpnc[i].op == OP_NUMBER) { - /* convert a short into a string */ -#if defined(_WIN32) && !defined(__CYGWIN__) && !defined(__CYGWIN32__) - _itoa(rpnc[i].val, buffer, 10); -#else - sprintf(buffer, "%d", rpnc[i].val); -#endif - add_op(OP_NUMBER, buffer) - } - - if (rpnc[i].op == OP_VARIABLE) { - char *ds_name = ds_def[rpnc[i].val].ds_nam; - - add_op(OP_VARIABLE, ds_name) - } - - if (rpnc[i].op == OP_PREV_OTHER) { - char *ds_name = ds_def[rpnc[i].val].ds_nam; - - add_op(OP_VARIABLE, ds_name) - } -#undef add_op - -#define add_op(VV,VVV) \ - if (addop2str((enum op_en)rpnc[i].op, VV, #VVV, str, &offset) == 1) continue; - - add_op(OP_ADD, +) - add_op(OP_SUB, -) - add_op(OP_MUL, *) - add_op(OP_DIV, /) - add_op(OP_MOD, %) - add_op(OP_SIN, SIN) - add_op(OP_COS, COS) - add_op(OP_LOG, LOG) - add_op(OP_FLOOR, FLOOR) - add_op(OP_CEIL, CEIL) - add_op(OP_EXP, EXP) - add_op(OP_DUP, DUP) - add_op(OP_EXC, EXC) - add_op(OP_POP, POP) - add_op(OP_LT, LT) - add_op(OP_LE, LE) - add_op(OP_GT, GT) - add_op(OP_GE, GE) - add_op(OP_EQ, EQ) - add_op(OP_IF, IF) - add_op(OP_MIN, MIN) - add_op(OP_MAX, MAX) - add_op(OP_LIMIT, LIMIT) - add_op(OP_UNKN, UNKN) - add_op(OP_UN, UN) - add_op(OP_NEGINF, NEGINF) - add_op(OP_NE, NE) - add_op(OP_PREV, PREV) - add_op(OP_INF, INF) - add_op(OP_ISINF, ISINF) - add_op(OP_NOW, NOW) - add_op(OP_LTIME, LTIME) - add_op(OP_TIME, TIME) - add_op(OP_ATAN2, ATAN2) - add_op(OP_ATAN, ATAN) - add_op(OP_SQRT, SQRT) - add_op(OP_SORT, SORT) - add_op(OP_REV, REV) - add_op(OP_TREND, TREND) - add_op(OP_TRENDNAN, TRENDNAN) - add_op(OP_PREDICT, PREDICT) - add_op(OP_PREDICTSIGMA, PREDICTSIGMA) - add_op(OP_RAD2DEG, RAD2DEG) - add_op(OP_DEG2RAD, DEG2RAD) - add_op(OP_AVG, AVG) - add_op(OP_ABS, ABS) - add_op(OP_ADDNAN, ADDNAN) - add_op(OP_MINNAN, MINNAN) - add_op(OP_MAXNAN, MAXNAN) -#undef add_op - } - (*str)[offset] = '\0'; - -} - -short addop2str( enum op_en op, enum op_en op_type, char *op_str, - char **result_str, unsigned short *offset) { - if (op == op_type) { - short op_len; - - op_len = strlen(op_str); - *result_str = (char *) rrd_realloc(*result_str, - (op_len + 1 + - *offset) * sizeof(char)); - if (*result_str == NULL) { - return -RRD_ERR_MALLOC16; - } - strncpy(&((*result_str)[*offset]), op_str, op_len); - *offset += op_len; - return 1; - } - return 0; -} - -int parseCDEF_DS( const char *def, rrd_t *rrd, int ds_idx) { - rpnp_t *rpnp = NULL; - rpn_cdefds_t *rpnc = NULL; - short count, i; - - rpnp = rpn_parse((void *) rrd, def, &lookup_DS); - if (rpnp == NULL) { - return -RRD_ERR_PARSE1; - } - /* Check for OP nodes not permitted in COMPUTE DS. - * Moved this check from within rpn_compact() because it really is - * COMPUTE DS specific. This is less efficient, but creation doesn't - * occur too often. */ - for (i = 0; rpnp[i].op != OP_END; i++) { - if (rpnp[i].op == OP_TIME || rpnp[i].op == OP_LTIME || - rpnp[i].op == OP_PREV || rpnp[i].op == OP_COUNT || - rpnp[i].op == OP_TREND || rpnp[i].op == OP_TRENDNAN || - rpnp[i].op == OP_PREDICT || rpnp[i].op == OP_PREDICTSIGMA ) { - free(rpnp); - return -RRD_ERR_DS; - } - } - if (rpn_compact(rpnp, &rpnc, &count) == -1) { - free(rpnp); - return 0; - } - /* copy the compact rpn representation over the ds_def par array */ - memcpy((void *) &(rrd->ds_def[ds_idx].par[DS_cdef]), - (void *) rpnc, count * sizeof(rpn_cdefds_t)); - free(rpnp); - free(rpnc); - return 0; -} - -/* lookup a data source name in the rrd struct and return the index, - * should use ds_match() here except: - * (1) need a void * pointer to the rrd - * (2) error handling is left to the caller - */ -long lookup_DS( - void *rrd_vptr, - char *ds_name) -{ - unsigned int i; - rrd_t *rrd; - - rrd = (rrd_t *) rrd_vptr; - - for (i = 0; i < rrd->stat_head->ds_cnt; ++i) { - if (strcmp(ds_name, rrd->ds_def[i].ds_nam) == 0) - return i; - } - /* the caller handles a bad data source name in the rpn string */ - return -1; -} - -/* rpn_parse : parse a string and generate a rpnp array; modified - * str2rpn() originally included in rrd_graph.c - * arguments: - * key_hash: a transparent argument passed to lookup(); conceptually this - * is a hash object for lookup of a numeric key given a variable name - * expr: the string RPN expression, including variable names - * lookup(): a function that retrieves a numeric key given a variable name - */ -rpnp_t *rpn_parse( void *key_hash, const char *const expr_const, - long (*lookup) (void *, char *)) { - int pos = 0; - char *expr; - long steps = -1; - rpnp_t *rpnp; - char vname[MAX_VNAME_LEN + 10]; - char *old_locale; - int r, ret = 0; - - old_locale = setlocale(LC_NUMERIC, "C"); - - rpnp = NULL; - expr = (char *) expr_const; - - while (*expr) { - if ((rpnp = (rpnp_t *) rrd_realloc(rpnp, (++steps + 2) * - sizeof(rpnp_t))) == NULL) { - setlocale(LC_NUMERIC, old_locale); - return NULL; - } - - else if ((sscanf(expr, "%lf%n", &rpnp[steps].val, &pos) == 1) - && (expr[pos] == ',')) { - rpnp[steps].op = OP_NUMBER; - expr += pos; - } -#define match_op(VV,VVV) \ - else if (strncmp(expr, #VVV, strlen(#VVV))==0 && ( expr[strlen(#VVV)] == ',' || expr[strlen(#VVV)] == '\0' )){ \ - rpnp[steps].op = VV; \ - expr+=strlen(#VVV); \ - } - -#define match_op_param(VV,VVV) \ - else if (sscanf(expr, #VVV "(" DEF_NAM_FMT ")",vname) == 1) { \ - int length = 0; \ - if ((length = strlen(#VVV)+strlen(vname)+2, \ - expr[length] == ',' || expr[length] == '\0') ) { \ - rpnp[steps].op = VV; \ - rpnp[steps].ptr = (*lookup)(key_hash,vname); \ - if (rpnp[steps].ptr < 0) { \ - if(!ret) \ - ret = RRD_ERR_UNKNOWN_DATA1; \ - free(rpnp); \ - return NULL; \ - } else expr+=length; \ - } \ - } - - match_op(OP_ADD, +) - match_op(OP_SUB, -) - match_op(OP_MUL, *) - match_op(OP_DIV, /) - match_op(OP_MOD, %) - match_op(OP_SIN, SIN) - match_op(OP_COS, COS) - match_op(OP_LOG, LOG) - match_op(OP_FLOOR, FLOOR) - match_op(OP_CEIL, CEIL) - match_op(OP_EXP, EXP) - match_op(OP_DUP, DUP) - match_op(OP_EXC, EXC) - match_op(OP_POP, POP) - match_op(OP_LTIME, LTIME) - match_op(OP_LT, LT) - match_op(OP_LE, LE) - match_op(OP_GT, GT) - match_op(OP_GE, GE) - match_op(OP_EQ, EQ) - match_op(OP_IF, IF) - match_op(OP_MIN, MIN) - match_op(OP_MAX, MAX) - match_op(OP_LIMIT, LIMIT) - /* order is important here ! .. match longest first */ - match_op(OP_UNKN, UNKN) - match_op(OP_UN, UN) - match_op(OP_NEGINF, NEGINF) - match_op(OP_NE, NE) - match_op(OP_COUNT, COUNT) - match_op_param(OP_PREV_OTHER, PREV) - match_op(OP_PREV, PREV) - match_op(OP_INF, INF) - match_op(OP_ISINF, ISINF) - match_op(OP_NOW, NOW) - match_op(OP_TIME, TIME) - match_op(OP_ATAN2, ATAN2) - match_op(OP_ATAN, ATAN) - match_op(OP_SQRT, SQRT) - match_op(OP_SORT, SORT) - match_op(OP_REV, REV) - match_op(OP_TREND, TREND) - match_op(OP_TRENDNAN, TRENDNAN) - match_op(OP_PREDICT, PREDICT) - match_op(OP_PREDICTSIGMA, PREDICTSIGMA) - match_op(OP_RAD2DEG, RAD2DEG) - match_op(OP_DEG2RAD, DEG2RAD) - match_op(OP_AVG, AVG) - match_op(OP_ABS, ABS) - match_op(OP_ADDNAN, ADDNAN) - match_op(OP_MINNAN, MINNAN) - match_op(OP_MAXNAN, MAXNAN) -#undef match_op - else if ((sscanf(expr, DEF_NAM_FMT "%n", vname, &pos) == 1) - && ((rpnp[steps].ptr = (*lookup) (key_hash, vname)) != - -1)) { - rpnp[steps].op = OP_VARIABLE; - expr += pos; - } - - else { - setlocale(LC_NUMERIC, old_locale); - free(rpnp); - return NULL; - } - - if (*expr == 0) - break; - if (*expr == ',') - expr++; - else { - setlocale(LC_NUMERIC, old_locale); - free(rpnp); - return NULL; - } - } - rpnp[steps + 1].op = OP_END; - setlocale(LC_NUMERIC, old_locale); - return rpnp; -} - -void rpnstack_init( rpnstack_t *rpnstack) { - rpnstack->s = NULL; - rpnstack->dc_stacksize = 0; - rpnstack->dc_stackblock = 100; -} - -void rpnstack_free( rpnstack_t *rpnstack) { - if (rpnstack->s != NULL) - free(rpnstack->s); - rpnstack->dc_stacksize = 0; -} - -static int rpn_compare_double( const void *x, const void *y) { - double diff = *((const double *) x) - *((const double *) y); - - return (diff < 0) ? -1 : (diff > 0) ? 1 : 0; -} - -/* rpn_calc: run the RPN calculator; also performs variable substitution; - * moved and modified from data_calc() originally included in rrd_graph.c - * arguments: - * rpnp : an array of RPN operators (including variable references) - * rpnstack : the initialized stack - * data_idx : when data_idx is a multiple of rpnp.step, the rpnp.data pointer - * is advanced by rpnp.ds_cnt; used only for variable substitution - * output : an array of output values; OP_PREV assumes this array contains - * the "previous" value at index position output_idx-1; the definition of - * "previous" depends on the calling environment - * output_idx : an index into the output array in which to store the output - * of the RPN calculator - * returns: -1 if the computation failed (also calls rrd_set_error) - * 0 on success - */ -short rpn_calc( rpnp_t *rpnp, rpnstack_t *rpnstack, long data_idx, - rrd_value_t *output, int output_idx) { - int rpi; - long stptr = -1; - - /* process each op from the rpn in turn */ - for (rpi = 0; rpnp[rpi].op != OP_END; rpi++) { - /* allocate or grow the stack */ - if (stptr + 5 > rpnstack->dc_stacksize) { - /* could move this to a separate function */ - rpnstack->dc_stacksize += rpnstack->dc_stackblock; - rpnstack->s = (double*)rrd_realloc(rpnstack->s, - (rpnstack->dc_stacksize) * - sizeof(*(rpnstack->s))); - if (rpnstack->s == NULL) { - return -RRD_ERR_STACK; - } - } -#define stackunderflow(MINSIZE) \ - if(stptrs[++stptr] = rpnp[rpi].val; - break; - case OP_VARIABLE: - case OP_PREV_OTHER: - /* Sanity check: VDEFs shouldn't make it here */ - if (rpnp[rpi].ds_cnt == 0) { - return -RRD_ERR_ABORT; - } else { - /* make sure we pull the correct value from - * the *.data array. Adjust the pointer into - * the array acordingly. Advance the ptr one - * row in the rra (skip over non-relevant - * data sources) - */ - if (rpnp[rpi].op == OP_VARIABLE) { - rpnstack->s[++stptr] = *(rpnp[rpi].data); - } else { - if ((output_idx) <= 0) { - rpnstack->s[++stptr] = DNAN; - } else { - rpnstack->s[++stptr] = - *(rpnp[rpi].data - rpnp[rpi].ds_cnt); - } - - } - if (data_idx % rpnp[rpi].step == 0) { - rpnp[rpi].data += rpnp[rpi].ds_cnt; - } - } - break; - case OP_COUNT: - rpnstack->s[++stptr] = (output_idx + 1); /* Note: Counter starts at 1 */ - break; - case OP_PREV: - if ((output_idx) <= 0) { - rpnstack->s[++stptr] = DNAN; - } else { - rpnstack->s[++stptr] = output[output_idx - 1]; - } - break; - case OP_UNKN: - rpnstack->s[++stptr] = DNAN; - break; - case OP_INF: - rpnstack->s[++stptr] = DINF; - break; - case OP_NEGINF: - rpnstack->s[++stptr] = -DINF; - break; - case OP_NOW: - rpnstack->s[++stptr] = (double) time(NULL); - break; - case OP_TIME: - /* HACK: this relies on the data_idx being the time, - ** which the within-function scope is unaware of */ - rpnstack->s[++stptr] = (double) data_idx; - break; - case OP_LTIME: - rpnstack->s[++stptr] = - (double) tzoffset(data_idx) + (double) data_idx; - break; - case OP_ADD: - stackunderflow(1); - rpnstack->s[stptr - 1] = rpnstack->s[stptr - 1] - + rpnstack->s[stptr]; - stptr--; - break; - case OP_ADDNAN: - stackunderflow(1); - if (isnan(rpnstack->s[stptr - 1])) { - rpnstack->s[stptr - 1] = rpnstack->s[stptr]; - } else if (isnan(rpnstack->s[stptr])) { - /* NOOP */ - /* rpnstack->s[stptr - 1] = rpnstack->s[stptr - 1]; */ - } else { - rpnstack->s[stptr - 1] = rpnstack->s[stptr - 1] - + rpnstack->s[stptr]; - } - - stptr--; - break; - case OP_SUB: - stackunderflow(1); - rpnstack->s[stptr - 1] = rpnstack->s[stptr - 1] - - rpnstack->s[stptr]; - stptr--; - break; - case OP_MUL: - stackunderflow(1); - rpnstack->s[stptr - 1] = (rpnstack->s[stptr - 1]) - * (rpnstack->s[stptr]); - stptr--; - break; - case OP_DIV: - stackunderflow(1); - rpnstack->s[stptr - 1] = rpnstack->s[stptr - 1] - / rpnstack->s[stptr]; - stptr--; - break; - case OP_MOD: - stackunderflow(1); - rpnstack->s[stptr - 1] = fmod(rpnstack->s[stptr - 1] - , rpnstack->s[stptr]); - stptr--; - break; - case OP_SIN: - stackunderflow(0); - rpnstack->s[stptr] = sin(rpnstack->s[stptr]); - break; - case OP_ATAN: - stackunderflow(0); - rpnstack->s[stptr] = atan(rpnstack->s[stptr]); - break; - case OP_RAD2DEG: - stackunderflow(0); - rpnstack->s[stptr] = 57.29577951 * rpnstack->s[stptr]; - break; - case OP_DEG2RAD: - stackunderflow(0); - rpnstack->s[stptr] = 0.0174532952 * rpnstack->s[stptr]; - break; - case OP_ATAN2: - stackunderflow(1); - rpnstack->s[stptr - 1] = atan2(rpnstack->s[stptr - 1], - rpnstack->s[stptr]); - stptr--; - break; - case OP_COS: - stackunderflow(0); - rpnstack->s[stptr] = cos(rpnstack->s[stptr]); - break; - case OP_CEIL: - stackunderflow(0); - rpnstack->s[stptr] = ceil(rpnstack->s[stptr]); - break; - case OP_FLOOR: - stackunderflow(0); - rpnstack->s[stptr] = floor(rpnstack->s[stptr]); - break; - case OP_LOG: - stackunderflow(0); - rpnstack->s[stptr] = log(rpnstack->s[stptr]); - break; - case OP_DUP: - stackunderflow(0); - rpnstack->s[stptr + 1] = rpnstack->s[stptr]; - stptr++; - break; - case OP_POP: - stackunderflow(0); - stptr--; - break; - case OP_EXC: - stackunderflow(1); - { - double dummy; - - dummy = rpnstack->s[stptr]; - rpnstack->s[stptr] = rpnstack->s[stptr - 1]; - rpnstack->s[stptr - 1] = dummy; - } - break; - case OP_EXP: - stackunderflow(0); - rpnstack->s[stptr] = exp(rpnstack->s[stptr]); - break; - case OP_LT: - stackunderflow(1); - if (isnan(rpnstack->s[stptr - 1])); - else if (isnan(rpnstack->s[stptr])) - rpnstack->s[stptr - 1] = rpnstack->s[stptr]; - else - rpnstack->s[stptr - 1] = rpnstack->s[stptr - 1] < - rpnstack->s[stptr] ? 1.0 : 0.0; - stptr--; - break; - case OP_LE: - stackunderflow(1); - if (isnan(rpnstack->s[stptr - 1])); - else if (isnan(rpnstack->s[stptr])) - rpnstack->s[stptr - 1] = rpnstack->s[stptr]; - else - rpnstack->s[stptr - 1] = rpnstack->s[stptr - 1] <= - rpnstack->s[stptr] ? 1.0 : 0.0; - stptr--; - break; - case OP_GT: - stackunderflow(1); - if (isnan(rpnstack->s[stptr - 1])); - else if (isnan(rpnstack->s[stptr])) - rpnstack->s[stptr - 1] = rpnstack->s[stptr]; - else - rpnstack->s[stptr - 1] = rpnstack->s[stptr - 1] > - rpnstack->s[stptr] ? 1.0 : 0.0; - stptr--; - break; - case OP_GE: - stackunderflow(1); - if (isnan(rpnstack->s[stptr - 1])); - else if (isnan(rpnstack->s[stptr])) - rpnstack->s[stptr - 1] = rpnstack->s[stptr]; - else - rpnstack->s[stptr - 1] = rpnstack->s[stptr - 1] >= - rpnstack->s[stptr] ? 1.0 : 0.0; - stptr--; - break; - case OP_NE: - stackunderflow(1); - if (isnan(rpnstack->s[stptr - 1])); - else if (isnan(rpnstack->s[stptr])) - rpnstack->s[stptr - 1] = rpnstack->s[stptr]; - else - rpnstack->s[stptr - 1] = rpnstack->s[stptr - 1] == - rpnstack->s[stptr] ? 0.0 : 1.0; - stptr--; - break; - case OP_EQ: - stackunderflow(1); - if (isnan(rpnstack->s[stptr - 1])); - else if (isnan(rpnstack->s[stptr])) - rpnstack->s[stptr - 1] = rpnstack->s[stptr]; - else - rpnstack->s[stptr - 1] = rpnstack->s[stptr - 1] == - rpnstack->s[stptr] ? 1.0 : 0.0; - stptr--; - break; - case OP_IF: - stackunderflow(2); - rpnstack->s[stptr - 2] = (isnan(rpnstack->s[stptr - 2]) - || rpnstack->s[stptr - 2] == - 0.0) ? rpnstack->s[stptr] : rpnstack-> - s[stptr - 1]; - stptr--; - stptr--; - break; - case OP_MIN: - stackunderflow(1); - if (isnan(rpnstack->s[stptr - 1])); - else if (isnan(rpnstack->s[stptr])) - rpnstack->s[stptr - 1] = rpnstack->s[stptr]; - else if (rpnstack->s[stptr - 1] > rpnstack->s[stptr]) - rpnstack->s[stptr - 1] = rpnstack->s[stptr]; - stptr--; - break; - case OP_MINNAN: - stackunderflow(1); - if (isnan(rpnstack->s[stptr - 1])) - rpnstack->s[stptr - 1] = rpnstack->s[stptr]; - else if (isnan(rpnstack->s[stptr])); - else if (rpnstack->s[stptr - 1] > rpnstack->s[stptr]) - rpnstack->s[stptr - 1] = rpnstack->s[stptr]; - stptr--; - break; - case OP_MAX: - stackunderflow(1); - if (isnan(rpnstack->s[stptr - 1])); - else if (isnan(rpnstack->s[stptr])) - rpnstack->s[stptr - 1] = rpnstack->s[stptr]; - else if (rpnstack->s[stptr - 1] < rpnstack->s[stptr]) - rpnstack->s[stptr - 1] = rpnstack->s[stptr]; - stptr--; - break; - case OP_MAXNAN: - stackunderflow(1); - if (isnan(rpnstack->s[stptr - 1])) - rpnstack->s[stptr - 1] = rpnstack->s[stptr]; - else if (isnan(rpnstack->s[stptr])); - else if (rpnstack->s[stptr - 1] < rpnstack->s[stptr]) - rpnstack->s[stptr - 1] = rpnstack->s[stptr]; - stptr--; - break; - case OP_LIMIT: - stackunderflow(2); - if (isnan(rpnstack->s[stptr - 2])); - else if (isnan(rpnstack->s[stptr - 1])) - rpnstack->s[stptr - 2] = rpnstack->s[stptr - 1]; - else if (isnan(rpnstack->s[stptr])) - rpnstack->s[stptr - 2] = rpnstack->s[stptr]; - else if (rpnstack->s[stptr - 2] < rpnstack->s[stptr - 1]) - rpnstack->s[stptr - 2] = DNAN; - else if (rpnstack->s[stptr - 2] > rpnstack->s[stptr]) - rpnstack->s[stptr - 2] = DNAN; - stptr -= 2; - break; - case OP_UN: - stackunderflow(0); - rpnstack->s[stptr] = isnan(rpnstack->s[stptr]) ? 1.0 : 0.0; - break; - case OP_ISINF: - stackunderflow(0); - rpnstack->s[stptr] = isinf(rpnstack->s[stptr]) ? 1.0 : 0.0; - break; - case OP_SQRT: - stackunderflow(0); - rpnstack->s[stptr] = sqrt(rpnstack->s[stptr]); - break; - case OP_SORT: - stackunderflow(0); - { - int spn = (int) rpnstack->s[stptr--]; - - stackunderflow(spn - 1); - qsort(rpnstack->s + stptr - spn + 1, spn, sizeof(double), - rpn_compare_double); - } - break; - case OP_REV: - stackunderflow(0); - { - int spn = (int) rpnstack->s[stptr--]; - double *p, *q; - - stackunderflow(spn - 1); - - p = rpnstack->s + stptr - spn + 1; - q = rpnstack->s + stptr; - while (p < q) { - double x = *q; - - *q-- = *p; - *p++ = x; - } - } - break; - case OP_PREDICT: - case OP_PREDICTSIGMA: - stackunderflow(2); - { - /* the local averaging window (similar to trend, but better here, as we get better statistics thru numbers)*/ - int locstepsize = rpnstack->s[--stptr]; - /* the number of shifts and range-checking*/ - int shifts = rpnstack->s[--stptr]; - stackunderflow(shifts); - // handle negative shifts special - if (shifts<0) { - stptr--; - } else { - stptr-=shifts; - } - /* the real calculation */ - double val=DNAN; - /* the info on the datasource */ - time_t dsstep = (time_t) rpnp[rpi - 1].step; - int dscount = rpnp[rpi - 1].ds_cnt; - int locstep = (int)ceil((float)locstepsize/(float)dsstep); - - /* the sums */ - double sum = 0; - double sum2 = 0; - int count = 0; - /* now loop for each position */ - int doshifts=shifts; - if (shifts<0) { doshifts=-shifts; } - for(int loop=0;loops[stptr]; - } else { - shiftstep = rpnstack->s[stptr+loop]; - } - if(shiftstep <0) { - return -RRD_ERR_ALLOW; - } - shiftstep=(int)ceil((float)shiftstep/(float)dsstep); - /* loop all local shifts */ - for(int i=0;i<=locstep;i++) { - /* now calculate offset into data-array - relative to output_idx*/ - int offset=shiftstep+i; - /* and process if we have index 0 of above */ - if ((offset>=0)&&(offset0) { - val = sum/(double)count; - } - } else { - if (count>1) { /* the sigma case */ - val=count*sum2-sum*sum; - if (val<0) { - val=DNAN; - } else { - val=sqrt(val/((float)count*((float)count-1.0))); - } - } - } - rpnstack->s[stptr] = val; - } - break; - case OP_TREND: - case OP_TRENDNAN: - stackunderflow(1); - if ((rpi < 2) || (rpnp[rpi - 2].op != OP_VARIABLE)) { - return -RRD_ERR_ARG12; - } else { - time_t dur = (time_t) rpnstack->s[stptr]; - time_t step = (time_t) rpnp[rpi - 2].step; - - if (output_idx + 1 >= (int) ceil((float) dur / (float) step)) { - int ignorenan = (rpnp[rpi].op == OP_TREND); - double accum = 0.0; - int i = -1; /* pick the current entries, not the next one - as the data pointer has already been forwarded - when the OP_VARIABLE was processed */ - int count = 0; - - do { - double val = - rpnp[rpi - 2].data[rpnp[rpi - 2].ds_cnt * i--]; - if (ignorenan || !isnan(val)) { - accum += val; - ++count; - } - - dur -= step; - } while (dur > 0); - - rpnstack->s[--stptr] = - (count == 0) ? DNAN : (accum / count); - } else - rpnstack->s[--stptr] = DNAN; - } - break; - case OP_AVG: - stackunderflow(0); - { - int i = (int) rpnstack->s[stptr--]; - double sum = 0; - int count = 0; - - stackunderflow(i - 1); - while (i > 0) { - double val = rpnstack->s[stptr--]; - - i--; - if (isnan(val)) { - continue; - } - count++; - sum += val; - } - /* now push the result back on stack */ - if (count > 0) { - rpnstack->s[++stptr] = sum / count; - } else { - rpnstack->s[++stptr] = DNAN; - } - } - break; - case OP_ABS: - stackunderflow(0); - rpnstack->s[stptr] = fabs(rpnstack->s[stptr]); - break; - case OP_END: - break; - } -#undef stackunderflow - } - if (stptr != 0) { - return -RRD_ERR_STACK2; - } - - output[output_idx] = rpnstack->s[0]; - return 0; -} - -/* figure out what the local timezone offset for any point in - time was. Return it in seconds */ -int tzoffset( - time_t now) -{ - int gm_sec, gm_min, gm_hour, gm_yday, gm_year, - l_sec, l_min, l_hour, l_yday, l_year; - struct tm t; - int off; - - gmtime_r(&now, &t); - gm_sec = t.tm_sec; - gm_min = t.tm_min; - gm_hour = t.tm_hour; - gm_yday = t.tm_yday; - gm_year = t.tm_year; - localtime_r(&now, &t); - l_sec = t.tm_sec; - l_min = t.tm_min; - l_hour = t.tm_hour; - l_yday = t.tm_yday; - l_year = t.tm_year; - off = - (l_sec - gm_sec) + (l_min - gm_min) * 60 + (l_hour - gm_hour) * 3600; - if (l_yday > gm_yday || l_year > gm_year) { - off += 24 * 3600; - } else if (l_yday < gm_yday || l_year < gm_year) { - off -= 24 * 3600; - } - return off; -} diff --git a/vendor/github.com/open-falcon/rrdlite/rrd_rpncalc.h b/vendor/github.com/open-falcon/rrdlite/rrd_rpncalc.h deleted file mode 100644 index bfbac634..00000000 --- a/vendor/github.com/open-falcon/rrdlite/rrd_rpncalc.h +++ /dev/null @@ -1,88 +0,0 @@ -/**************************************************************************** - * RRDtool 1.4.9 Copyright by Tobi Oetiker, 1997-2014 - **************************************************************************** - * rrd_rpncalc.h RPN calculator functions - ****************************************************************************/ -#ifndef _RRD_RPNCALC_H -#define _RRD_RPNCALC_H - -/* WARNING: if new operators are added, they MUST be added at the very end of the list. - * This is because COMPUTE (CDEF) DS store OP nodes by number (name is not - * an option due to limited par array size). OP nodes must have the same - * numeric values, otherwise the stored numbers will mean something different. */ -enum op_en { OP_NUMBER = 0, OP_VARIABLE, OP_INF, OP_PREV, OP_NEGINF, - OP_UNKN, OP_NOW, OP_TIME, OP_ADD, OP_MOD, OP_SUB, OP_MUL, - OP_DIV, OP_SIN, OP_DUP, OP_EXC, OP_POP, - OP_COS, OP_LOG, OP_EXP, OP_LT, OP_LE, OP_GT, OP_GE, OP_EQ, OP_IF, - OP_MIN, OP_MAX, OP_LIMIT, OP_FLOOR, OP_CEIL, - OP_UN, OP_END, OP_LTIME, OP_NE, OP_ISINF, OP_PREV_OTHER, OP_COUNT, - OP_ATAN, OP_SQRT, OP_SORT, OP_REV, OP_TREND, OP_TRENDNAN, - OP_ATAN2, OP_RAD2DEG, OP_DEG2RAD, - OP_PREDICT,OP_PREDICTSIGMA, - OP_AVG, OP_ABS, OP_ADDNAN, - OP_MINNAN, OP_MAXNAN -}; - -typedef struct rpnp_t { - enum op_en op; - double val; /* value for a OP_NUMBER */ - long ptr; /* pointer into the gdes array for OP_VAR */ - double *data; /* pointer to the current value from OP_VAR DAS */ - long ds_cnt; /* data source count for data pointer */ - long step; /* time step for OP_VAR das */ -} rpnp_t; - -/* a compact representation of rpnp_t for computed data sources */ -typedef struct rpn_cdefds_t { - char op; /* rpn operator type */ - short val; /* used by OP_NUMBER and OP_VARIABLE */ -} rpn_cdefds_t; - -#define MAX_VNAME_LEN 255 -#define DEF_NAM_FMT "%255[-_A-Za-z0-9]" - -/* limit imposed by sizeof(rpn_cdefs_t) and rrd.ds_def.par */ -#define DS_CDEF_MAX_RPN_NODES (int)(sizeof(unival)*10 / sizeof(rpn_cdefds_t)) - -typedef struct rpnstack_t { - double *s; - long dc_stacksize; - long dc_stackblock; -} rpnstack_t; - -void rpnstack_init( - rpnstack_t *rpnstack); -void rpnstack_free( - rpnstack_t *rpnstack); - -int parseCDEF_DS( - const char *def, - rrd_t *rrd, - int ds_idx); -long lookup_DS( - void *rrd_vptr, - char *ds_name); - -short rpn_compact( - rpnp_t *rpnp, - rpn_cdefds_t **rpnc, - short *count); -rpnp_t *rpn_expand( - rpn_cdefds_t *rpnc); -void rpn_compact2str( - rpn_cdefds_t *rpnc, - ds_def_t *ds_def, - char **str); -rpnp_t *rpn_parse( - void *key_hash, - const char *const expr, - long (*lookup) (void *, - char *)); -short rpn_calc( - rpnp_t *rpnp, - rpnstack_t *rpnstack, - long data_idx, - rrd_value_t *output, - int output_idx); - -#endif diff --git a/vendor/github.com/open-falcon/rrdlite/rrd_tool.h b/vendor/github.com/open-falcon/rrdlite/rrd_tool.h deleted file mode 100644 index d00876e6..00000000 --- a/vendor/github.com/open-falcon/rrdlite/rrd_tool.h +++ /dev/null @@ -1,133 +0,0 @@ -/***************************************************************************** - * RRDtool 1.4.9 Copyright by Tobi Oetiker, 1997-2014 - ***************************************************************************** - * rrd_tool.h Common Header File - *****************************************************************************/ -#ifdef __cplusplus -extern "C" { -#endif - -#ifndef _RRD_TOOL_H -#define _RRD_TOOL_H - -#if defined(WIN32) && !defined(__CYGWIN__) && !defined(__CYGWIN32__) -#include "../win32/config.h" -#else -#ifdef HAVE_CONFIG_H -#include "rrd_config.h" -#endif -#endif - -#include "rrd.h" - -#if defined(_WIN32) && !defined(__CYGWIN__) && !defined(__CYGWIN32__) - -/* Win32 only includes */ - -#include /* for _isnan */ -#include /* for chdir */ - - struct tm *localtime_r( - const time_t *timep, - struct tm *result); - char *ctime_r( - const time_t *timep, - char *result); - struct tm *gmtime_r( - const time_t *timep, - struct tm *result); - char *strtok_r( - char *str, - const char *sep, - char **last); - -#else - -/* unix-only includes */ -#if !defined(isnan) && !defined(HAVE_ISNAN) - int isnan( - double value); -#endif - -#endif - -/* local include files -- need to be after the system ones */ -#ifndef RRD_LITE -#ifdef HAVE_GETOPT_LONG -#define _GNU_SOURCE -#include -#else -#include "rrd_getopt.h" -#endif -#endif - -#include "rrd_format.h" - -#ifndef max -#define max(a,b) ((a) > (b) ? (a) : (b)) -#endif - -#ifndef min -#define min(a,b) ((a) < (b) ? (a) : (b)) -#endif - -#define DIM(x) (sizeof(x)/sizeof(x[0])) - - char *sprintf_alloc( - char *, - ...); - -/* HELPER FUNCTIONS */ - - int PngSize( - FILE *, - long *, - long *); - - int rrd_create_fn( - const char *file_name, - rrd_t *rrd); - int rrd_fetch_fn (const char *filename, - enum cf_en cf_idx, - time_t *start, - time_t *end, - unsigned long *step, - unsigned long *ds_cnt, - char ***ds_namv, - rrd_value_t **data); - - -#ifdef HAVE_LIBDBI -int rrd_fetch_fn_libdbi(const char *filename, enum cf_en cf_idx, - time_t *start,time_t *end, - unsigned long *step, - unsigned long *ds_cnt, - char ***ds_namv, - rrd_value_t **data); -#endif - -#define RRD_READONLY (1<<0) -#define RRD_READWRITE (1<<1) -#define RRD_CREAT (1<<2) -#define RRD_READAHEAD (1<<3) -#define RRD_COPY (1<<4) -#define RRD_EXCL (1<<5) - - enum cf_en cf_conv( - const char *string); - enum dst_en dst_conv( - char *string); - long ds_match( - rrd_t *rrd, - char *ds_nam); - off_t rrd_get_header_size( - rrd_t *rrd); - double rrd_diff( - char *a, - char *b); - -#endif /* _RRD_TOOL_H */ - -#ifdef __cplusplus -} -#endif diff --git a/vendor/github.com/open-falcon/rrdlite/rrd_update.c b/vendor/github.com/open-falcon/rrdlite/rrd_update.c deleted file mode 100644 index 1486b3be..00000000 --- a/vendor/github.com/open-falcon/rrdlite/rrd_update.c +++ /dev/null @@ -1,1734 +0,0 @@ -/***************************************************************************** - * RRDtool 1.4.9 Copyright by Tobi Oetiker, 1997-2014 - * Copyright by Florian Forster, 2008 - ***************************************************************************** - * rrd_update.c RRD Update Function - ***************************************************************************** - * $Id$ - *****************************************************************************/ - -#include "rrd_tool.h" - -#define DISABLE_USEC - -#if defined(_WIN32) && !defined(__CYGWIN__) && !defined(__CYGWIN32__) -#include -#include -#include -#endif - -#include - -#include "rrd_hw.h" -#include "rrd_rpncalc.h" - -#include "rrd_is_thread_safe.h" -#include "unused.h" - -#ifndef RRD_LITE -#include "rrd_client.h" -#endif - -#if defined(_WIN32) && !defined(__CYGWIN__) && !defined(__CYGWIN32__) -/* - * WIN32 does not have gettimeofday and struct timeval. This is a quick and dirty - * replacement. - */ -#include - -#ifndef __MINGW32__ -struct timeval { - time_t tv_sec; /* seconds */ - long tv_usec; /* microseconds */ -}; -#endif - -struct __timezone { - int tz_minuteswest; /* minutes W of Greenwich */ - int tz_dsttime; /* type of dst correction */ -}; - -static int gettimeofday( struct timeval *t, struct __timezone *tz) { - - struct _timeb current_time; - - _ftime(¤t_time); - - t->tv_sec = current_time.time; - t->tv_usec = current_time.millitm * 1000; - - return 0; -} - -#endif - -/* FUNCTION PROTOTYPES */ - -int rrd_update_r( const char *filename, const char *tmplt, - int argc, const char **argv); -int _rrd_update( const char *filename, const char *tmplt, - int argc, const char **argv, rrd_info_t *); - -static int allocate_data_structures( rrd_t *rrd, char ***updvals, - rrd_value_t **pdp_temp, const char *tmplt, long **tmpl_idx, - unsigned long *tmpl_cnt, unsigned long **rra_step_cnt, - unsigned long **skip_update, rrd_value_t **pdp_new); - -static int parse_template( rrd_t *rrd, const char *tmplt, - unsigned long *tmpl_cnt, long *tmpl_idx); - -static int process_arg( char *step_start, rrd_t *rrd, rrd_file_t *rrd_file, - unsigned long rra_begin, time_t *current_time, - unsigned long *current_time_usec, rrd_value_t *pdp_temp, - rrd_value_t *pdp_new, unsigned long *rra_step_cnt, - char **updvals, long *tmpl_idx, unsigned long tmpl_cnt, - rrd_info_t ** pcdp_summary, int version, - unsigned long *skip_update, int *schedule_smooth); - -static int parse_ds( rrd_t *rrd, char **updvals, long *tmpl_idx, - char *input, unsigned long tmpl_cnt, time_t *current_time, - unsigned long *current_time_usec, int version); - -static int get_time_from_reading( rrd_t *rrd, char timesyntax, - char **updvals, time_t *current_time, - unsigned long *current_time_usec, int version); - -static int update_pdp_prep( rrd_t *rrd, char **updvals, - rrd_value_t *pdp_new, double interval, int *periodic); - -static int calculate_elapsed_steps( rrd_t *rrd, unsigned long current_time, - unsigned long current_time_usec, double interval, - double *pre_int, double *post_int, unsigned long *proc_pdp_cnt); - -static void simple_update( rrd_t *rrd, double interval, rrd_value_t *pdp_new); - -static int process_all_pdp_st( rrd_t *rrd, double interval, double pre_int, - double post_int, unsigned long elapsed_pdp_st, rrd_value_t *pdp_new, - rrd_value_t *pdp_temp); - -static int process_pdp_st( rrd_t *rrd, unsigned long ds_idx, double interval, - double pre_int, double post_int, long diff_pdp_st, rrd_value_t *pdp_new, - rrd_value_t *pdp_temp); - -static int update_all_cdp_prep( rrd_t *rrd, unsigned long *rra_step_cnt, - unsigned long rra_begin, rrd_file_t *rrd_file, - unsigned long elapsed_pdp_st, unsigned long proc_pdp_cnt, - rrd_value_t **last_seasonal_coef, rrd_value_t **seasonal_coef, - rrd_value_t *pdp_temp, unsigned long *skip_update, - int *schedule_smooth); - -static int do_schedule_smooth( rrd_t *rrd, unsigned long rra_idx, - unsigned long elapsed_pdp_st); - -static int update_cdp_prep( rrd_t *rrd, unsigned long elapsed_pdp_st, - unsigned long start_pdp_offset, unsigned long *rra_step_cnt, int rra_idx, - rrd_value_t *pdp_temp, rrd_value_t *last_seasonal_coef, - rrd_value_t *seasonal_coef, int current_cf); - -static void update_cdp( unival *scratch, int current_cf, - rrd_value_t pdp_temp_val, unsigned long rra_step_cnt, - unsigned long elapsed_pdp_st, unsigned long start_pdp_offset, - unsigned long pdp_cnt, rrd_value_t xff, int i, int ii); - -static void initialize_cdp_val( unival *scratch, int current_cf, - rrd_value_t pdp_temp_val, unsigned long start_pdp_offset, - unsigned long pdp_cnt); - -static int reset_cdp( rrd_t *rrd, unsigned long elapsed_pdp_st, - rrd_value_t *pdp_temp, rrd_value_t *last_seasonal_coef, - rrd_value_t *seasonal_coef, int rra_idx, - int ds_idx, int cdp_idx, enum cf_en current_cf); - -static rrd_value_t initialize_carry_over( rrd_value_t pdp_temp_val, - int current_cf, unsigned long elapsed_pdp_st, - unsigned long start_pdp_offset, unsigned long pdp_cnt); - -static rrd_value_t calculate_cdp_val( rrd_value_t cdp_val, - rrd_value_t pdp_temp_val, unsigned long elapsed_pdp_st, - int current_cf, int i, int ii); - -static int update_aberrant_cdps( - rrd_t *rrd, - rrd_file_t *rrd_file, - unsigned long rra_begin, - unsigned long elapsed_pdp_st, - rrd_value_t *pdp_temp, - rrd_value_t **seasonal_coef); - -static int write_to_rras( - rrd_t *rrd, - rrd_file_t *rrd_file, - unsigned long *rra_step_cnt, - unsigned long rra_begin, - time_t current_time, - unsigned long *skip_update, - rrd_info_t ** pcdp_summary, - int periodic); - -static int write_RRA_row( - rrd_file_t *rrd_file, - rrd_t *rrd, - unsigned long rra_idx, - unsigned short CDP_scratch_idx, - rrd_info_t ** pcdp_summary, - time_t rra_time, - int flag); - -static int smooth_all_rras( - rrd_t *rrd, - rrd_file_t *rrd_file, - unsigned long rra_begin); - -#ifndef HAVE_MMAP -static int write_changes_to_disk( - rrd_t *rrd, - rrd_file_t *rrd_file, - int version); -#endif - -/* - * normalize time as returned by gettimeofday. usec part must - * be always >= 0 - */ -static void normalize_time( struct timeval *t) { - if (t->tv_usec < 0) { - t->tv_sec--; - t->tv_usec += 1e6L; - } -} - -/* - * Sets current_time and current_time_usec based on the current time. - * current_time_usec is set to 0 if the version number is 1 or 2. - */ -static void initialize_time( time_t *current_time, unsigned long *current_time_usec, - int version) { - struct timeval tmp_time; /* used for time conversion */ - - gettimeofday(&tmp_time, 0); - normalize_time(&tmp_time); - *current_time = tmp_time.tv_sec; - if (version >= 3) { - *current_time_usec = tmp_time.tv_usec; - } else { - *current_time_usec = 0; - } -#ifdef DISABLE_USEC - *current_time_usec = 0; -#endif -} - -#define IFDNAN(X,Y) (isnan(X) ? (Y) : (X)); - -int rrd_update_r( const char *filename, const char *tmplt, int argc, - const char **argv) { - return _rrd_update(filename, tmplt, argc, argv, NULL); -} - -int _rrd_update( const char *filename, const char *tmplt, - int argc, const char **argv, rrd_info_t * pcdp_summary) { - - int arg_i = 2; - - unsigned long rra_begin; /* byte pointer to the rra - * area in the rrd file. this - * pointer never changes value */ - rrd_value_t *pdp_new; /* prepare the incoming data to be added - * to the existing entry */ - rrd_value_t *pdp_temp; /* prepare the pdp values to be added - * to the cdp values */ - - long *tmpl_idx; /* index representing the settings - * transported by the tmplt index */ - unsigned long tmpl_cnt = 2; /* time and data */ - rrd_t rrd; - time_t current_time = 0; - unsigned long current_time_usec = 0; /* microseconds part of current time */ - char **updvals; - int schedule_smooth = 0; - - /* number of elapsed PDP steps since last update */ - unsigned long *rra_step_cnt = NULL; - - int version; /* rrd version */ - rrd_file_t *rrd_file; - char *arg_copy; /* for processing the argv */ - unsigned long *skip_update; /* RRAs to advance but not write */ - int ret = 0; - - /* need at least 1 arguments: data. */ - if (argc < 1) { - ret = -RRD_ERR_ARG10; - goto err_out; - } - - rrd_init(&rrd); - if ((rrd_file = rrd_open(filename, &rrd, RRD_READWRITE, &ret)) == NULL) { - goto err_free; - } - /* We are now at the beginning of the rra's */ - rra_begin = rrd_file->header_len; - - version = atoi(rrd.stat_head->version); - - initialize_time(¤t_time, ¤t_time_usec, version); - - /* get exclusive lock to whole file. - * lock gets removed when we close the file. - */ - if (rrd_lock(rrd_file) != 0) { - ret = -RRD_ERR_LOCK; - goto err_close; - } - - if ((ret = allocate_data_structures(&rrd, &updvals, - &pdp_temp, tmplt, &tmpl_idx, &tmpl_cnt, - &rra_step_cnt, &skip_update, - &pdp_new)) < 0) { - goto err_close; - } - - /* loop through the arguments. */ - for (arg_i = 0; arg_i < argc; arg_i++) { - if ((arg_copy = strdup(argv[arg_i])) == NULL) { - ret = -RRD_ERR_FAILED_STRDUP; - break; - } - ret = process_arg(arg_copy, &rrd, rrd_file, rra_begin, - ¤t_time, ¤t_time_usec, pdp_temp, pdp_new, - rra_step_cnt, updvals, tmpl_idx, tmpl_cnt, - &pcdp_summary, version, skip_update, - &schedule_smooth); - if (ret == -RRD_ERR_TIME3) { - //nothing to do - //current_time <= last_up - ret = 0; - }else if(ret < 0){ - //ret = -RRD_ERR_ARG9; - free(arg_copy); - break; - } - free(arg_copy); - } - - free(rra_step_cnt); - - /* if we got here and if there is an error and if the file has not been - * written to, then close things up and return. */ - if (ret) { - goto err_free_structures; - } -#ifndef HAVE_MMAP - if ((ret = write_changes_to_disk(&rrd, rrd_file, version)) < -1) { - //ret = -RRD_ERR_WRITE7; - goto err_free_structures; - } -#endif - - /* calling the smoothing code here guarantees at most one smoothing - * operation per rrd_update call. Unfortunately, it is possible with bulk - * updates, or a long-delayed update for smoothing to occur off-schedule. - * This really isn't critical except during the burn-in cycles. */ - if (schedule_smooth) { - ret = smooth_all_rras(&rrd, rrd_file, rra_begin); - } - - /* rrd_dontneed(rrd_file,&rrd); */ - rrd_free(&rrd); - rrd_close(rrd_file); - - free(pdp_new); - free(tmpl_idx); - free(pdp_temp); - free(skip_update); - free(updvals); - return 0; - -err_free_structures: - free(pdp_new); - free(tmpl_idx); - free(pdp_temp); - free(skip_update); - free(updvals); -err_close: - rrd_close(rrd_file); -err_free: - rrd_free(&rrd); -err_out: - return ret; -} - -/* - * Allocate some important arrays used, and initialize the template. - * - * When it returns, either all of the structures are allocated - * or none of them are. - * - * Returns 0 on success, < 0 on error. - */ -static int allocate_data_structures( rrd_t *rrd, char ***updvals, rrd_value_t **pdp_temp, - const char *tmplt, long **tmpl_idx, unsigned long *tmpl_cnt, unsigned long **rra_step_cnt, - unsigned long **skip_update, rrd_value_t **pdp_new) { - unsigned i, ii; - int ret = 0; - if ((*updvals = (char **) malloc(sizeof(char *) - * (rrd->stat_head->ds_cnt + 1))) == NULL) { - return -RRD_ERR_MALLOC10; - } - if ((*pdp_temp = (rrd_value_t *) malloc(sizeof(rrd_value_t) - * rrd->stat_head->ds_cnt)) == - NULL) { - ret = -RRD_ERR_MALLOC11; - goto err_free_updvals; - } - if ((*skip_update = (unsigned long *) malloc(sizeof(unsigned long) - * - rrd->stat_head->rra_cnt)) == - NULL) { - ret = -RRD_ERR_MALLOC12; - goto err_free_pdp_temp; - } - if ((*tmpl_idx = (long *) malloc(sizeof(unsigned long) - * (rrd->stat_head->ds_cnt + 1))) == NULL) { - ret = -RRD_ERR_MALLOC13; - goto err_free_skip_update; - } - if ((*rra_step_cnt = (unsigned long *) malloc(sizeof(unsigned long) - * - (rrd->stat_head-> - rra_cnt))) == NULL) { - ret = -RRD_ERR_MALLOC14; - goto err_free_tmpl_idx; - } - - /* initialize tmplt redirector */ - /* default config example (assume DS 1 is a CDEF DS) - tmpl_idx[0] -> 0; (time) - tmpl_idx[1] -> 1; (DS 0) - tmpl_idx[2] -> 3; (DS 2) - tmpl_idx[3] -> 4; (DS 3) */ - (*tmpl_idx)[0] = 0; /* time */ - for (i = 1, ii = 1; i <= rrd->stat_head->ds_cnt; i++) { - if (dst_conv(rrd->ds_def[i - 1].dst) != DST_CDEF) - (*tmpl_idx)[ii++] = i; - } - *tmpl_cnt = ii; - - if (tmplt != NULL) { - if (parse_template(rrd, tmplt, tmpl_cnt, *tmpl_idx) < 0) { - ret = -RRD_ERR_PARSE; - goto err_free_rra_step_cnt; - } - } - - if ((*pdp_new = (rrd_value_t *) malloc(sizeof(rrd_value_t) - * rrd->stat_head->ds_cnt)) == NULL) { - ret = -RRD_ERR_MALLOC15; - goto err_free_rra_step_cnt; - } - - return 0; - -err_free_rra_step_cnt: - free(*rra_step_cnt); -err_free_tmpl_idx: - free(*tmpl_idx); -err_free_skip_update: - free(*skip_update); -err_free_pdp_temp: - free(*pdp_temp); -err_free_updvals: - free(*updvals); - return ret; -} - -/* - * Parses tmplt and puts an ordered list of DS's into tmpl_idx. - * - * Returns 0 on success. - */ -static int parse_template( rrd_t *rrd, const char *tmplt, - unsigned long *tmpl_cnt, long *tmpl_idx) { - char *dsname, *tmplt_copy; - unsigned int tmpl_len, i; - int ret = 0; - - *tmpl_cnt = 1; /* the first entry is the time */ - - /* we should work on a writeable copy here */ - if ((tmplt_copy = strdup(tmplt)) == NULL) { - ret = -RRD_ERR_FAILED_STRDUP1; - goto out; - } - - dsname = tmplt_copy; - tmpl_len = strlen(tmplt_copy); - for (i = 0; i <= tmpl_len; i++) { - if (tmplt_copy[i] == ':' || tmplt_copy[i] == '\0') { - tmplt_copy[i] = '\0'; - if (*tmpl_cnt > rrd->stat_head->ds_cnt) { - ret = -RRD_ERR_MORE_DS; - goto out_free_tmpl_copy; - } - if ((tmpl_idx[(*tmpl_cnt)++] = ds_match(rrd, dsname) + 1) == 0) { - ret = -RRD_ERR_UNKNOWN_DS_NAME1; - goto out_free_tmpl_copy; - } - /* go to the next entry on the tmplt_copy */ - if (i < tmpl_len) - dsname = &tmplt_copy[i + 1]; - } - } -out_free_tmpl_copy: - free(tmplt_copy); -out: - return ret; -} - -/* - * Parse an update string, updates the primary data points (PDPs) - * and consolidated data points (CDPs), and writes changes to the RRAs. - * - * Returns 0 on success, < 0 on error. - */ -static int process_arg( char *step_start, rrd_t *rrd, rrd_file_t *rrd_file, - unsigned long rra_begin, time_t *current_time, - unsigned long *current_time_usec, rrd_value_t *pdp_temp, - rrd_value_t *pdp_new, unsigned long *rra_step_cnt, - char **updvals, long *tmpl_idx, unsigned long tmpl_cnt, - rrd_info_t ** pcdp_summary, int version, unsigned long *skip_update, - int *schedule_smooth) { - rrd_value_t *seasonal_coef = NULL, *last_seasonal_coef = NULL; - - /* a vector of future Holt-Winters seasonal coefs */ - unsigned long elapsed_pdp_st; - - double interval, pre_int, post_int; /* interval between this and - * the last run */ - unsigned long proc_pdp_cnt; - - int periodic = 1; /* A sign, 1 for priodic, 0 for nonperiodic, initialize to periodic */ - - int ret = 0; - ret = parse_ds(rrd, updvals, tmpl_idx, step_start, tmpl_cnt, - current_time, current_time_usec, version); - if (ret) { - return ret; - } - - - interval = (double) (*current_time - rrd->live_head->last_up) - + (double) ((long) *current_time_usec - - (long) rrd->live_head->last_up_usec) / 1e6f; - - /* process the data sources and update the pdp_prep - * area accordingly */ - if ((ret = update_pdp_prep(rrd, updvals, pdp_new, interval, &periodic)) < 0) { - return ret; - } - - elapsed_pdp_st = calculate_elapsed_steps(rrd, - *current_time, - *current_time_usec, interval, - &pre_int, &post_int, - &proc_pdp_cnt); - - /* has a pdp_st moment occurred since the last run ? */ - if (elapsed_pdp_st == 0) { - /* no we have not passed a pdp_st moment. therefore update is simple */ - simple_update(rrd, interval, pdp_new); - } else { - /* an pdp_st has occurred. */ - if ((ret = process_all_pdp_st(rrd, interval, - pre_int, post_int, - elapsed_pdp_st, pdp_new, pdp_temp)) < 0) { - return ret; - } - if ((ret = update_all_cdp_prep(rrd, rra_step_cnt, - rra_begin, rrd_file, - elapsed_pdp_st, - proc_pdp_cnt, - &last_seasonal_coef, - &seasonal_coef, - pdp_temp, - skip_update, schedule_smooth)) < 0) { - goto err_free_coefficients; - } - if ((ret = update_aberrant_cdps(rrd, rrd_file, rra_begin, - elapsed_pdp_st, pdp_temp, - &seasonal_coef)) < 0) { - goto err_free_coefficients; - } - if ((ret = write_to_rras(rrd, rrd_file, rra_step_cnt, rra_begin, - *current_time, skip_update, - pcdp_summary, periodic)) < 0) { - goto err_free_coefficients; - } - } /* endif a pdp_st has occurred */ - rrd->live_head->last_up = *current_time; - rrd->live_head->last_up_usec = *current_time_usec; - - if (version < 3) { - *rrd->legacy_last_up = rrd->live_head->last_up; - } - free(seasonal_coef); - free(last_seasonal_coef); - return 0; - -err_free_coefficients: - free(seasonal_coef); - free(last_seasonal_coef); - return ret; -} - -/* - * Parse a DS string (time + colon-separated values), storing the - * results in current_time, current_time_usec, and updvals. - * - * Returns 0 on success, < 0 on error. - */ -static int parse_ds( rrd_t *rrd, char **updvals, long *tmpl_idx, char *input, - unsigned long tmpl_cnt, time_t *current_time, - unsigned long *current_time_usec, int version) { - char *p; - unsigned long i; - char timesyntax; - int ret = 0; - - updvals[0] = input; - /* initialize all ds input to unknown except the first one - which has always got to be set */ - for (i = 1; i <= rrd->stat_head->ds_cnt; i++) - updvals[i] = "U"; - - /* separate all ds elements; first must be examined separately - due to alternate time syntax */ - if ((p = strchr(input, '@')) != NULL) { - timesyntax = '@'; - } else if ((p = strchr(input, ':')) != NULL) { - timesyntax = ':'; - } else { - return -RRD_ERR_STR; - } - *p = '\0'; - i = 1; - updvals[tmpl_idx[i++]] = p + 1; - while (*(++p)) { - if (*p == ':') { - *p = '\0'; - if (i < tmpl_cnt) { - updvals[tmpl_idx[i++]] = p + 1; - } else { - return -RRD_ERR_ARG11; - } - } - } - - if (i != tmpl_cnt) { - return -RRD_ERR_EXPECTED; - } - - return get_time_from_reading(rrd, timesyntax, updvals, - current_time, current_time_usec, - version); -} - -/* - * Parse the time in a DS string, store it in current_time and - * current_time_usec and verify that it's later than the last - * update for this DS. - * - * Returns 0 on success, < 0 on error. - */ -static int get_time_from_reading( rrd_t *rrd, char timesyntax, - char **updvals, time_t *current_time, - unsigned long *current_time_usec, int version) { - double tmp; - char *parsetime_error = NULL; - char *old_locale; - rrd_time_value_t ds_tv; - struct timeval tmp_time; /* used for time conversion */ - - /* get the time from the reading ... handle N */ - if (timesyntax == '@') { /* at-style */ - if ((parsetime_error = rrd_parsetime(updvals[0], &ds_tv))) { - return -RRD_ERR_TIME1; - } - if (ds_tv.type == RELATIVE_TO_END_TIME || - ds_tv.type == RELATIVE_TO_START_TIME) { - return -RRD_ERR_TIME2; - } - *current_time = mktime(&ds_tv.tm) +ds_tv.offset; - *current_time_usec = 0; /* FIXME: how to handle usecs here ? */ - } else if (strcmp(updvals[0], "N") == 0) { - gettimeofday(&tmp_time, 0); - normalize_time(&tmp_time); - *current_time = tmp_time.tv_sec; - *current_time_usec = tmp_time.tv_usec; - } else { - old_locale = setlocale(LC_NUMERIC, "C"); - errno = 0; - tmp = strtod(updvals[0], 0); - if (errno > 0) { - return -RRD_ERR_STRTOD; - }; - setlocale(LC_NUMERIC, old_locale); - if (tmp < 0.0){ - gettimeofday(&tmp_time, 0); - tmp = (double)tmp_time.tv_sec + (double)tmp_time.tv_usec * 1e-6f + tmp; - } - - *current_time = floor(tmp); - *current_time_usec = (long) ((tmp - (double) *current_time) * 1e6f); - } - /* dont do any correction for old version RRDs */ - if (version < 3) - *current_time_usec = 0; - -#ifdef DISABLE_USEC - *current_time_usec = 0; -#endif - - - if (*current_time < rrd->live_head->last_up || - (*current_time == rrd->live_head->last_up && - (long) *current_time_usec <= (long) rrd->live_head->last_up_usec)) { - return -RRD_ERR_TIME3; - } - return 0; -} - -/* - * Update pdp_new by interpreting the updvals according to the DS type - * (COUNTER, GAUGE, etc.). - * - * Returns 0 on success, < 0 on error. - */ -static int update_pdp_prep( rrd_t *rrd, char **updvals, rrd_value_t *pdp_new, - double interval, int *periodic) { - unsigned long ds_idx; - int ii; - char *endptr; /* used in the conversion */ - double rate; - char *old_locale; - enum dst_en dst_idx; - int ret = 0; - - for (ds_idx = 0; ds_idx < rrd->stat_head->ds_cnt; ds_idx++) { - dst_idx = dst_conv(rrd->ds_def[ds_idx].dst); - - /* to set sign if periodic or nonperiodic */ - if (rrd->ds_def[ds_idx].par[DS_mrhb_cnt].u_cnt < interval) { - *periodic = 0; - } - - /* NOTE: DST_CDEF should never enter this if block, because - * updvals[ds_idx+1][0] is initialized to 'U'; unless the caller - * accidently specified a value for the DST_CDEF. To handle this case, - * an extra check is required. */ - - if ((updvals[ds_idx + 1][0] != 'U') && - (dst_idx != DST_CDEF)) { - //rrd->ds_def[ds_idx].par[DS_mrhb_cnt].u_cnt >= interval) { - rate = DNAN; - - /* pdp_new contains rate * time ... eg the bytes transferred during - * the interval. Doing it this way saves a lot of math operations - */ - switch (dst_idx) { - case DST_COUNTER: - case DST_DERIVE: - /* Check if this is a valid integer. `U' is already handled in - * another branch. */ - for (ii = 0; updvals[ds_idx + 1][ii] != 0; ii++) { - if ((ii == 0) && (dst_idx == DST_DERIVE) - && (updvals[ds_idx + 1][ii] == '-')) - continue; - - if ((updvals[ds_idx + 1][ii] < '0') - || (updvals[ds_idx + 1][ii] > '9')) { - return -RRD_ERR_INT; - } - } /* for (ii = 0; updvals[ds_idx + 1][ii] != 0; ii++) */ - - if (rrd->pdp_prep[ds_idx].last_ds[0] != 'U') { - pdp_new[ds_idx] = - rrd_diff(updvals[ds_idx + 1], - rrd->pdp_prep[ds_idx].last_ds); - if (dst_idx == DST_COUNTER) { - /* simple overflow catcher. This will fail - * terribly for non 32 or 64 bit counters - * ... are there any others in SNMP land? - */ - if (pdp_new[ds_idx] < (double) 0.0) - pdp_new[ds_idx] += (double) 4294967296.0; /* 2^32 */ - if (pdp_new[ds_idx] < (double) 0.0) - pdp_new[ds_idx] += (double) 18446744069414584320.0; /* 2^64-2^32 */ - } - rate = pdp_new[ds_idx] / interval; - } else { - pdp_new[ds_idx] = DNAN; - } - break; - case DST_ABSOLUTE: - old_locale = setlocale(LC_NUMERIC, "C"); - errno = 0; - pdp_new[ds_idx] = strtod(updvals[ds_idx + 1], &endptr); - if (errno > 0) { - return -RRD_ERR_STRTOD; - }; - setlocale(LC_NUMERIC, old_locale); - if (endptr[0] != '\0') { - return -RRD_ERR_DATA; - } - rate = pdp_new[ds_idx] / interval; - break; - case DST_GAUGE: - old_locale = setlocale(LC_NUMERIC, "C"); - errno = 0; - pdp_new[ds_idx] = - strtod(updvals[ds_idx + 1], &endptr) * interval; - if (errno) { - return -RRD_ERR_STRTOD; - }; - setlocale(LC_NUMERIC, old_locale); - if (endptr[0] != '\0') { - return -RRD_ERR_DATA; - } - rate = pdp_new[ds_idx] / interval; - break; - default: - return -RRD_ERR_UNKNOWN_DS_TYPE; - } - /* break out of this for loop if the error string is set */ - if (ret) { - return ret; - } - /* make sure pdp_temp is neither too large or too small - * if any of these occur it becomes unknown ... - * sorry folks ... */ - if (!isnan(rate) && - ((!isnan(rrd->ds_def[ds_idx].par[DS_max_val].u_val) && - rate > rrd->ds_def[ds_idx].par[DS_max_val].u_val) || - (!isnan(rrd->ds_def[ds_idx].par[DS_min_val].u_val) && - rate < rrd->ds_def[ds_idx].par[DS_min_val].u_val))) { - pdp_new[ds_idx] = DNAN; - } - } else { - /* no news is news all the same */ - pdp_new[ds_idx] = DNAN; - } - - - /* make a copy of the command line argument for the next run */ -#ifdef DEBUG - fprintf(stderr, "prep ds[%lu]\t" - "last_arg '%s'\t" - "this_arg '%s'\t" - "pdp_new %10.2f\n", - ds_idx, rrd->pdp_prep[ds_idx].last_ds, updvals[ds_idx + 1], - pdp_new[ds_idx]); -#endif - strncpy(rrd->pdp_prep[ds_idx].last_ds, updvals[ds_idx + 1], - LAST_DS_LEN - 1); - rrd->pdp_prep[ds_idx].last_ds[LAST_DS_LEN - 1] = '\0'; - } - return 0; -} - -/* - * How many PDP steps have elapsed since the last update? Returns the answer, - * and stores the time between the last update and the last PDP in pre_time, - * and the time between the last PDP and the current time in post_int. - */ -static int calculate_elapsed_steps( rrd_t *rrd, unsigned long current_time, - unsigned long current_time_usec, double interval, - double *pre_int, double *post_int, unsigned long *proc_pdp_cnt) { - unsigned long proc_pdp_st; /* which pdp_st was the last to be processed */ - unsigned long occu_pdp_st; /* when was the pdp_st before the last update - * time */ - unsigned long proc_pdp_age; /* how old was the data in the pdp prep area - * when it was last updated */ - unsigned long occu_pdp_age; /* how long ago was the last pdp_step time */ - - /* when was the current pdp started */ - proc_pdp_age = rrd->live_head->last_up % rrd->stat_head->pdp_step; - proc_pdp_st = rrd->live_head->last_up - proc_pdp_age; - - /* when did the last pdp_st occur */ - occu_pdp_age = current_time % rrd->stat_head->pdp_step; - occu_pdp_st = current_time - occu_pdp_age; - - if (occu_pdp_st > proc_pdp_st) { - /* OK we passed the pdp_st moment */ - *pre_int = (long) occu_pdp_st - rrd->live_head->last_up; /* how much of the input data - * occurred before the latest - * pdp_st moment*/ - *pre_int -= ((double) rrd->live_head->last_up_usec) / 1e6f; /* adjust usecs */ - *post_int = occu_pdp_age; /* how much after it */ - *post_int += ((double) current_time_usec) / 1e6f; /* adjust usecs */ - } else { - *pre_int = interval; - *post_int = 0; - } - - *proc_pdp_cnt = proc_pdp_st / rrd->stat_head->pdp_step; - -#ifdef DEBUG - printf("proc_pdp_age %lu\t" - "proc_pdp_st %lu\t" - "occu_pfp_age %lu\t" - "occu_pdp_st %lu\t" - "int %lf\t" - "pre_int %lf\t" - "post_int %lf\n", proc_pdp_age, proc_pdp_st, - occu_pdp_age, occu_pdp_st, interval, *pre_int, *post_int); -#endif - - /* compute the number of elapsed pdp_st moments */ - return (occu_pdp_st - proc_pdp_st) / rrd->stat_head->pdp_step; -} - -/* - * Increment the PDP values by the values in pdp_new, or else initialize them. - */ -static void simple_update( rrd_t *rrd, double interval, rrd_value_t *pdp_new) { - int i; - - for (i = 0; i < (signed) rrd->stat_head->ds_cnt; i++) { - if (isnan(pdp_new[i])) { - /* this is not really accurate if we use subsecond data arrival time - should have thought of it when going subsecond resolution ... - sorry next format change we will have it! */ - rrd->pdp_prep[i].scratch[PDP_unkn_sec_cnt].u_cnt += - floor(interval); - } else { - if (isnan(rrd->pdp_prep[i].scratch[PDP_val].u_val)) { - rrd->pdp_prep[i].scratch[PDP_val].u_val = pdp_new[i]; - } else { - rrd->pdp_prep[i].scratch[PDP_val].u_val += pdp_new[i]; - } - } -#ifdef DEBUG - fprintf(stderr, - "NO PDP ds[%i]\t" - "value %10.2f\t" - "unkn_sec %5lu\n", - i, - rrd->pdp_prep[i].scratch[PDP_val].u_val, - rrd->pdp_prep[i].scratch[PDP_unkn_sec_cnt].u_cnt); -#endif - } -} - -/* - * Call process_pdp_st for each DS. - * - * Returns 0 on success, < 0 on error. - */ -static int process_all_pdp_st( rrd_t *rrd, double interval, - double pre_int, double post_int, unsigned long elapsed_pdp_st, - rrd_value_t *pdp_new, rrd_value_t *pdp_temp) { - unsigned long ds_idx; - int ret = 0; - - /* in pdp_prep[].scratch[PDP_val].u_val we have collected - rate*seconds which occurred up to the last run. - pdp_new[] contains rate*seconds from the latest run. - pdp_temp[] will contain the rate for cdp */ - - for (ds_idx = 0; ds_idx < rrd->stat_head->ds_cnt; ds_idx++) { - if ((ret = process_pdp_st(rrd, ds_idx, interval, pre_int, post_int, - elapsed_pdp_st * rrd->stat_head->pdp_step, - pdp_new, pdp_temp)) < 0 ) { - return ret; - } -#ifdef DEBUG - fprintf(stderr, "PDP UPD ds[%lu]\t" - "elapsed_pdp_st %lu\t" - "pdp_temp %10.2f\t" - "new_prep %10.2f\t" - "new_unkn_sec %5lu\n", - ds_idx, - elapsed_pdp_st, - pdp_temp[ds_idx], - rrd->pdp_prep[ds_idx].scratch[PDP_val].u_val, - rrd->pdp_prep[ds_idx].scratch[PDP_unkn_sec_cnt].u_cnt); -#endif - } - return 0; -} - -/* - * Process an update that occurs after one of the PDP moments. - * Increments the PDP value, sets NAN if time greater than the - * heartbeats have elapsed, processes CDEFs. - * - * Returns 0 on success, < 0 on error. - */ -static int process_pdp_st( rrd_t *rrd, unsigned long ds_idx, - double interval, double pre_int, double post_int, - long diff_pdp_st, /* number of seconds in full steps passed since last update */ - rrd_value_t *pdp_new, rrd_value_t *pdp_temp) { - int i; - int ret = 0; - - /* update pdp_prep to the current pdp_st. */ - double pre_unknown = 0.0; - unival *scratch = rrd->pdp_prep[ds_idx].scratch; - unsigned long mrhb = rrd->ds_def[ds_idx].par[DS_mrhb_cnt].u_cnt; - - rpnstack_t rpnstack; /* used for COMPUTE DS */ - - rpnstack_init(&rpnstack); - - - if (isnan(pdp_new[ds_idx])) { - /* a final bit of unknown to be added before calculation - we use a temporary variable for this so that we - don't have to turn integer lines before using the value */ - pre_unknown = pre_int; - } else { - if (isnan(scratch[PDP_val].u_val)) { - scratch[PDP_val].u_val = 0; - } - scratch[PDP_val].u_val += pdp_new[ds_idx] / interval * pre_int; - } - - /* if too much of the pdp_prep is unknown we dump it */ - /* if the interval is larger thatn mrhb we get NAN */ - if ((rrd->stat_head->pdp_step / 2.0 < - (signed) scratch[PDP_unkn_sec_cnt].u_cnt)) { - pdp_temp[ds_idx] = DNAN; - } else { - pdp_temp[ds_idx] = scratch[PDP_val].u_val / - ((double) (diff_pdp_st - scratch[PDP_unkn_sec_cnt].u_cnt) - - pre_unknown); - } - - /* process CDEF data sources; remember each CDEF DS can - * only reference other DS with a lower index number */ - if (dst_conv(rrd->ds_def[ds_idx].dst) == DST_CDEF) { - rpnp_t *rpnp; - - rpnp = - rpn_expand((rpn_cdefds_t *) &(rrd->ds_def[ds_idx].par[DS_cdef])); - if(rpnp == NULL) { - rpnstack_free(&rpnstack); - return -RRD_ERR_MALLOC17; - } - /* substitute data values for OP_VARIABLE nodes */ - for (i = 0; rpnp[i].op != OP_END; i++) { - if (rpnp[i].op == OP_VARIABLE) { - rpnp[i].op = OP_NUMBER; - rpnp[i].val = pdp_temp[rpnp[i].ptr]; - } - } - /* run the rpn calculator */ - if ((ret = rpn_calc(rpnp, &rpnstack, 0, pdp_temp, ds_idx)) < 0) { - free(rpnp); - rpnstack_free(&rpnstack); - return ret; - } - free(rpnp); - } - - /* make pdp_prep ready for the next run */ - if (isnan(pdp_new[ds_idx])) { - /* this is not realy accurate if we use subsecond data arival time - should have thought of it when going subsecond resolution ... - sorry next format change we will have it! */ - scratch[PDP_unkn_sec_cnt].u_cnt = floor(post_int); - scratch[PDP_val].u_val = DNAN; - } else { - scratch[PDP_unkn_sec_cnt].u_cnt = 0; - scratch[PDP_val].u_val = pdp_new[ds_idx] / interval * post_int; - } - rpnstack_free(&rpnstack); - return ret; -} - -/* - * Iterate over all the RRAs for a given DS and: - * 1. Decide whether to schedule a smooth later - * 2. Decide whether to skip updating SEASONAL and DEVSEASONAL - * 3. Update the CDP - * - * Returns 0 on success, < 0 on error - */ -static int update_all_cdp_prep( rrd_t *rrd, unsigned long *rra_step_cnt, - unsigned long rra_begin, rrd_file_t *rrd_file, - unsigned long elapsed_pdp_st, unsigned long proc_pdp_cnt, - rrd_value_t **last_seasonal_coef, rrd_value_t **seasonal_coef, - rrd_value_t *pdp_temp, unsigned long *skip_update, - int *schedule_smooth) { - unsigned long rra_idx; - - /* index into the CDP scratch array */ - enum cf_en current_cf; - unsigned long rra_start; - - /* number of rows to be updated in an RRA for a data value. */ - unsigned long start_pdp_offset; - int ret = 0; - - rra_start = rra_begin; - for (rra_idx = 0; rra_idx < rrd->stat_head->rra_cnt; rra_idx++) { - current_cf = cf_conv(rrd->rra_def[rra_idx].cf_nam); - if (current_cf < 0){ - ret = -RRD_ERR_UNREC_CONSOLIDATION_FUNC; - } - start_pdp_offset = - rrd->rra_def[rra_idx].pdp_cnt - - proc_pdp_cnt % rrd->rra_def[rra_idx].pdp_cnt; - skip_update[rra_idx] = 0; - if (start_pdp_offset <= elapsed_pdp_st) { - rra_step_cnt[rra_idx] = (elapsed_pdp_st - start_pdp_offset) / - rrd->rra_def[rra_idx].pdp_cnt + 1; - } else { - rra_step_cnt[rra_idx] = 0; - } - - if (current_cf == CF_SEASONAL || current_cf == CF_DEVSEASONAL) { - /* If this is a bulk update, we need to skip ahead in the seasonal arrays - * so that they will be correct for the next observed value; note that for - * the bulk update itself, no update will occur to DEVSEASONAL or SEASONAL; - * futhermore, HWPREDICT and DEVPREDICT will be set to DNAN. */ - if (rra_step_cnt[rra_idx] > 1) { - skip_update[rra_idx] = 1; - if((ret = lookup_seasonal(rrd, rra_idx, rra_start, rrd_file, - elapsed_pdp_st, last_seasonal_coef))) - return ret; - if((ret = lookup_seasonal(rrd, rra_idx, rra_start, rrd_file, - elapsed_pdp_st + 1, seasonal_coef))) - return ret; - } - /* periodically run a smoother for seasonal effects */ - if (do_schedule_smooth(rrd, rra_idx, elapsed_pdp_st)) { -#ifdef DEBUG - fprintf(stderr, - "schedule_smooth: cur_row %lu, elapsed_pdp_st %lu, smooth idx %lu\n", - rrd->rra_ptr[rra_idx].cur_row, elapsed_pdp_st, - rrd->rra_def[rra_idx].par[RRA_seasonal_smooth_idx]. - u_cnt); -#endif - *schedule_smooth = 1; - } - } - if (ret) - return ret; - - if (update_cdp_prep - (rrd, elapsed_pdp_st, start_pdp_offset, rra_step_cnt, rra_idx, - pdp_temp, *last_seasonal_coef, *seasonal_coef, - current_cf) < 0) { - return -RRD_ERR_UPDATE_CDP; - } - rra_start += - rrd->rra_def[rra_idx].row_cnt * rrd->stat_head->ds_cnt * - sizeof(rrd_value_t); - } - return 0; -} - -/* - * Are we due for a smooth? Also increments our position in the burn-in cycle. - */ -static int do_schedule_smooth( rrd_t *rrd, unsigned long rra_idx, - unsigned long elapsed_pdp_st) { - unsigned long cdp_idx = rra_idx * (rrd->stat_head->ds_cnt); - unsigned long cur_row = rrd->rra_ptr[rra_idx].cur_row; - unsigned long row_cnt = rrd->rra_def[rra_idx].row_cnt; - unsigned long seasonal_smooth_idx = - rrd->rra_def[rra_idx].par[RRA_seasonal_smooth_idx].u_cnt; - unsigned long *init_seasonal = - &(rrd->cdp_prep[cdp_idx].scratch[CDP_init_seasonal].u_cnt); - - /* Need to use first cdp parameter buffer to track burnin (burnin requires - * a specific smoothing schedule). The CDP_init_seasonal parameter is - * really an RRA level, not a data source within RRA level parameter, but - * the rra_def is read only for rrd_update (not flushed to disk). */ - if (*init_seasonal > BURNIN_CYCLES) { - /* someone has no doubt invented a trick to deal with this wrap around, - * but at least this code is clear. */ - if (seasonal_smooth_idx > cur_row) { - /* here elapsed_pdp_st = rra_step_cnt[rra_idx] because of 1-1 mapping - * between PDP and CDP */ - return (cur_row + elapsed_pdp_st >= seasonal_smooth_idx); - } - /* can't rely on negative numbers because we are working with - * unsigned values */ - return (cur_row + elapsed_pdp_st >= row_cnt - && cur_row + elapsed_pdp_st >= row_cnt + seasonal_smooth_idx); - } - /* mark off one of the burn-in cycles */ - return (cur_row + elapsed_pdp_st >= row_cnt && ++(*init_seasonal)); -} - -/* - * For a given RRA, iterate over the data sources and call the appropriate - * consolidation function. - * - * Returns 0 on success, < 0 on error. - */ -static int update_cdp_prep( rrd_t *rrd, unsigned long elapsed_pdp_st, - unsigned long start_pdp_offset, unsigned long *rra_step_cnt, - int rra_idx, rrd_value_t *pdp_temp, rrd_value_t *last_seasonal_coef, - rrd_value_t *seasonal_coef, int current_cf) { - unsigned long ds_idx, cdp_idx; - int ret = 0; - - /* update CDP_PREP areas */ - /* loop over data soures within each RRA */ - for (ds_idx = 0; ds_idx < rrd->stat_head->ds_cnt; ds_idx++) { - - cdp_idx = rra_idx * rrd->stat_head->ds_cnt + ds_idx; - - if (rrd->rra_def[rra_idx].pdp_cnt > 1) { - update_cdp(rrd->cdp_prep[cdp_idx].scratch, current_cf, - pdp_temp[ds_idx], rra_step_cnt[rra_idx], - elapsed_pdp_st, start_pdp_offset, - rrd->rra_def[rra_idx].pdp_cnt, - rrd->rra_def[rra_idx].par[RRA_cdp_xff_val].u_val, - rra_idx, ds_idx); - } else { - /* Nothing to consolidate if there's one PDP per CDP. However, if - * we've missed some PDPs, let's update null counters etc. */ - if (elapsed_pdp_st > 2) { - ret = reset_cdp(rrd, elapsed_pdp_st, pdp_temp, last_seasonal_coef, - seasonal_coef, rra_idx, ds_idx, cdp_idx, - (enum cf_en)current_cf); - } - } - - if (ret) - return ret; - } /* endif data sources loop */ - return 0; -} - -/* - * Given the new reading (pdp_temp_val), update or initialize the CDP value, - * primary value, secondary value, and # of unknowns. - */ -static void update_cdp( unival *scratch, int current_cf, - rrd_value_t pdp_temp_val, unsigned long rra_step_cnt, - unsigned long elapsed_pdp_st, unsigned long start_pdp_offset, - unsigned long pdp_cnt, rrd_value_t xff, int i, int ii) { - /* shorthand variables */ - rrd_value_t *cdp_val = &scratch[CDP_val].u_val; - rrd_value_t *cdp_primary_val = &scratch[CDP_primary_val].u_val; - rrd_value_t *cdp_secondary_val = &scratch[CDP_secondary_val].u_val; - unsigned long *cdp_unkn_pdp_cnt = &scratch[CDP_unkn_pdp_cnt].u_cnt; - - if (rra_step_cnt) { - /* If we are in this block, as least 1 CDP value will be written to - * disk, this is the CDP_primary_val entry. If more than 1 value needs - * to be written, then the "fill in" value is the CDP_secondary_val - * entry. */ - if (isnan(pdp_temp_val)) { - *cdp_unkn_pdp_cnt += start_pdp_offset; - *cdp_secondary_val = DNAN; - } else { - /* CDP_secondary value is the RRA "fill in" value for intermediary - * CDP data entries. No matter the CF, the value is the same because - * the average, max, min, and last of a list of identical values is - * the same, namely, the value itself. */ - *cdp_secondary_val = pdp_temp_val; - } - - if (*cdp_unkn_pdp_cnt > pdp_cnt * xff) { - *cdp_primary_val = DNAN; - } else { - initialize_cdp_val(scratch, current_cf, pdp_temp_val, - start_pdp_offset, pdp_cnt); - } - *cdp_val = - initialize_carry_over(pdp_temp_val,current_cf, - elapsed_pdp_st, - start_pdp_offset, pdp_cnt); - /* endif meets xff value requirement for a valid value */ - /* initialize carry over CDP_unkn_pdp_cnt, this must after CDP_primary_val - * is set because CDP_unkn_pdp_cnt is required to compute that value. */ - if (isnan(pdp_temp_val)) - *cdp_unkn_pdp_cnt = (elapsed_pdp_st - start_pdp_offset) % pdp_cnt; - else - *cdp_unkn_pdp_cnt = 0; - } else { /* rra_step_cnt[i] == 0 */ - -#ifdef DEBUG - if (isnan(*cdp_val)) { - fprintf(stderr, "schedule CDP_val update, RRA %d DS %d, DNAN\n", - i, ii); - } else { - fprintf(stderr, "schedule CDP_val update, RRA %d DS %d, %10.2f\n", - i, ii, *cdp_val); - } -#endif - if (isnan(pdp_temp_val)) { - *cdp_unkn_pdp_cnt += elapsed_pdp_st; - } else { - *cdp_val = - calculate_cdp_val(*cdp_val, pdp_temp_val, elapsed_pdp_st, - current_cf, i, ii); - } - } -} - -/* - * Set the CDP_primary_val and CDP_val to the appropriate initial value based - * on the type of consolidation function. - */ -static void initialize_cdp_val( unival *scratch, int current_cf, - rrd_value_t pdp_temp_val, unsigned long start_pdp_offset, - unsigned long pdp_cnt) { - rrd_value_t cum_val, cur_val; - - switch (current_cf) { - case CF_AVERAGE: - if(isnan(scratch[CDP_val].u_val) && isnan(pdp_temp_val)){ - scratch[CDP_primary_val].u_val = DINF; - }else{ - cum_val = IFDNAN(scratch[CDP_val].u_val, 0.0); - cur_val = IFDNAN(pdp_temp_val, 0.0); - scratch[CDP_primary_val].u_val = - (cum_val + cur_val * start_pdp_offset) / - (pdp_cnt - scratch[CDP_unkn_pdp_cnt].u_cnt); - } - break; - case CF_MAXIMUM: - cum_val = IFDNAN(scratch[CDP_val].u_val, -DINF); - cur_val = IFDNAN(pdp_temp_val, -DINF); - -#if 0 -#ifdef DEBUG - if (isnan(scratch[CDP_val].u_val) && isnan(pdp_temp)) { - fprintf(stderr, - "RRA %lu, DS %lu, both CDP_val and pdp_temp are DNAN!", - i, ii); - exit(-1); - } -#endif -#endif - if (cur_val > cum_val) - scratch[CDP_primary_val].u_val = cur_val; - else - scratch[CDP_primary_val].u_val = cum_val; - break; - case CF_MINIMUM: - cum_val = IFDNAN(scratch[CDP_val].u_val, DINF); - cur_val = IFDNAN(pdp_temp_val, DINF); -#if 0 -#ifdef DEBUG - if (isnan(scratch[CDP_val].u_val) && isnan(pdp_temp)) { - fprintf(stderr, - "RRA %lu, DS %lu, both CDP_val and pdp_temp are DNAN!", i, - ii); - exit(-1); - } -#endif -#endif - if (cur_val < cum_val) - scratch[CDP_primary_val].u_val = cur_val; - else - scratch[CDP_primary_val].u_val = cum_val; - break; - case CF_LAST: - default: - scratch[CDP_primary_val].u_val = pdp_temp_val; - break; - } -} - -/* - * Update the consolidation function for Holt-Winters functions as - * well as other functions that don't actually consolidate multiple - * PDPs. - */ -static int reset_cdp( rrd_t *rrd, unsigned long elapsed_pdp_st, - rrd_value_t *pdp_temp, rrd_value_t *last_seasonal_coef, - rrd_value_t *seasonal_coef, int rra_idx, int ds_idx, - int cdp_idx, enum cf_en current_cf) { - unival *scratch = rrd->cdp_prep[cdp_idx].scratch; - int ret = 0; - - switch (current_cf) { - case CF_AVERAGE: - default: - scratch[CDP_primary_val].u_val = pdp_temp[ds_idx]; - scratch[CDP_secondary_val].u_val = pdp_temp[ds_idx]; - break; - case CF_SEASONAL: - case CF_DEVSEASONAL: - /* need to update cached seasonal values, so they are consistent - * with the bulk update */ - /* WARNING: code relies on the fact that CDP_hw_last_seasonal and - * CDP_last_deviation are the same. */ - scratch[CDP_hw_last_seasonal].u_val = last_seasonal_coef[ds_idx]; - scratch[CDP_hw_seasonal].u_val = seasonal_coef[ds_idx]; - break; - case CF_HWPREDICT: - case CF_MHWPREDICT: - /* need to update the null_count and last_null_count. - * even do this for non-DNAN pdp_temp because the - * algorithm is not learning from batch updates. */ - scratch[CDP_null_count].u_cnt += elapsed_pdp_st; - scratch[CDP_last_null_count].u_cnt += elapsed_pdp_st - 1; - /* fall through */ - case CF_DEVPREDICT: - scratch[CDP_primary_val].u_val = DNAN; - scratch[CDP_secondary_val].u_val = DNAN; - break; - case CF_FAILURES: - /* do not count missed bulk values as failures */ - scratch[CDP_primary_val].u_val = 0; - scratch[CDP_secondary_val].u_val = 0; - /* need to reset violations buffer. - * could do this more carefully, but for now, just - * assume a bulk update wipes away all violations. */ - ret = erase_violations(rrd, cdp_idx, rra_idx); - break; - } - return ret; -} - -static rrd_value_t initialize_carry_over( rrd_value_t pdp_temp_val, - int current_cf, unsigned long elapsed_pdp_st, - unsigned long start_pdp_offset, unsigned long pdp_cnt) { - unsigned long pdp_into_cdp_cnt = ((elapsed_pdp_st - start_pdp_offset) % pdp_cnt); - if ( pdp_into_cdp_cnt == 0 || isnan(pdp_temp_val)){ - switch (current_cf) { - case CF_MAXIMUM: - return -DINF; - case CF_MINIMUM: - return DINF; - case CF_AVERAGE: - return 0; - default: - return DNAN; - } - } - else { - switch (current_cf) { - case CF_AVERAGE: - return pdp_temp_val * pdp_into_cdp_cnt ; - default: - return pdp_temp_val; - } - } -} - -/* - * Update or initialize a CDP value based on the consolidation - * function. - * - * Returns the new value. - */ -static rrd_value_t calculate_cdp_val( rrd_value_t cdp_val, - rrd_value_t pdp_temp_val, unsigned long elapsed_pdp_st, int current_cf, -#ifdef DEBUG - int i, int ii -#else - int UNUSED(i), int UNUSED(ii) -#endif - ) -{ - if (isnan(cdp_val)) { - if (current_cf == CF_AVERAGE) { - pdp_temp_val *= elapsed_pdp_st; - } -#ifdef DEBUG - fprintf(stderr, "Initialize CDP_val for RRA %d DS %d: %10.2f\n", - i, ii, pdp_temp_val); -#endif - return pdp_temp_val; - } - if (current_cf == CF_AVERAGE) - return cdp_val + pdp_temp_val * elapsed_pdp_st; - if (current_cf == CF_MINIMUM) - return (pdp_temp_val < cdp_val) ? pdp_temp_val : cdp_val; - if (current_cf == CF_MAXIMUM) - return (pdp_temp_val > cdp_val) ? pdp_temp_val : cdp_val; - - return pdp_temp_val; -} - -/* - * For each RRA, update the seasonal values and then call update_aberrant_CF - * for each data source. - * - * Return 0 on success, < 0 on error. - */ -static int update_aberrant_cdps( rrd_t *rrd, rrd_file_t *rrd_file, - unsigned long rra_begin, unsigned long elapsed_pdp_st, - rrd_value_t *pdp_temp, rrd_value_t **seasonal_coef) { - unsigned long rra_idx, ds_idx, j; - - /* number of PDP steps since the last update that - * are assigned to the first CDP to be generated - * since the last update. */ - unsigned short scratch_idx; - unsigned long rra_start; - enum cf_en current_cf; - int r, ret = 0; - - /* this loop is only entered if elapsed_pdp_st < 3 */ - for (j = elapsed_pdp_st, scratch_idx = CDP_primary_val; - j > 0 && j < 3; j--, scratch_idx = CDP_secondary_val) { - rra_start = rra_begin; - for (rra_idx = 0; rra_idx < rrd->stat_head->rra_cnt; rra_idx++) { - if (rrd->rra_def[rra_idx].pdp_cnt == 1) { - current_cf = cf_conv(rrd->rra_def[rra_idx].cf_nam); - if (current_cf == CF_SEASONAL || current_cf == CF_DEVSEASONAL) { - if (scratch_idx == CDP_primary_val) { - r = lookup_seasonal(rrd, rra_idx, rra_start, rrd_file, - elapsed_pdp_st + 1, seasonal_coef); - } else { - r = lookup_seasonal(rrd, rra_idx, rra_start, rrd_file, - elapsed_pdp_st + 2, seasonal_coef); - } - }else if(current_cf < 0){ - return -RRD_ERR_UNREC_CONSOLIDATION_FUNC; - } - /* loop over data soures within each RRA */ - for (ds_idx = 0; ds_idx < rrd->stat_head->ds_cnt; ds_idx++) { - r = update_aberrant_CF(rrd, pdp_temp[ds_idx], current_cf, - rra_idx * (rrd->stat_head->ds_cnt) + - ds_idx, rra_idx, ds_idx, scratch_idx, - *seasonal_coef); - } - } - rra_start += rrd->rra_def[rra_idx].row_cnt - * rrd->stat_head->ds_cnt * sizeof(rrd_value_t); - if (r) - ret = r; - } - } - return ret; -} - -/* - * Move sequentially through the file, writing one RRA at a time. Note this - * architecture divorces the computation of CDP with flushing updated RRA - * entries to disk. - * - * Return 0 on success, < 0 on error. - */ -static int write_to_rras( rrd_t *rrd, rrd_file_t *rrd_file, - unsigned long *rra_step_cnt, unsigned long rra_begin, - time_t current_time, unsigned long *skip_update, - rrd_info_t ** pcdp_summary, int periodic) { - unsigned long rra_idx; - unsigned long rra_start; - time_t rra_time = 0; /* time of update for a RRA */ - - unsigned long ds_cnt = rrd->stat_head->ds_cnt; - int ret = 0; - - /* Ready to write to disk */ - rra_start = rra_begin; - - for (rra_idx = 0; rra_idx < rrd->stat_head->rra_cnt; rra_idx++) { - rra_def_t *rra_def = &rrd->rra_def[rra_idx]; - rra_ptr_t *rra_ptr = &rrd->rra_ptr[rra_idx]; - - /* for cdp_prep */ - unsigned short scratch_idx; - unsigned long step_subtract; - - for (scratch_idx = CDP_primary_val, - step_subtract = 1; - rra_step_cnt[rra_idx] > 0; - rra_step_cnt[rra_idx]--, - scratch_idx = CDP_secondary_val, - step_subtract = 2) { - - size_t rra_pos_new; -#ifdef DEBUG - fprintf(stderr, " -- RRA Preseek %ld\n", rrd_file->pos); -#endif - /* increment, with wrap-around */ - if (++rra_ptr->cur_row >= rra_def->row_cnt) - rra_ptr->cur_row = 0; - - /* we know what our position should be */ - rra_pos_new = rra_start - + ds_cnt * rra_ptr->cur_row * sizeof(rrd_value_t); - - /* re-seek if the position is wrong or we wrapped around */ - if ((size_t)rra_pos_new != rrd_file->pos) { - if (rrd_seek(rrd_file, rra_pos_new, SEEK_SET) != 0) { - return -RRD_ERR_SEEK5; - } - } -#ifdef DEBUG - fprintf(stderr, " -- RRA Postseek %ld\n", rrd_file->pos); -#endif - - if (skip_update[rra_idx]) - continue; - - if (*pcdp_summary != NULL) { - unsigned long step_time = rra_def->pdp_cnt * rrd->stat_head->pdp_step; - - rra_time = (current_time - current_time % step_time) - - ((rra_step_cnt[rra_idx] - step_subtract) * step_time); - } - - if (periodic == 1) { - if ((ret = write_RRA_row(rrd_file, rrd, rra_idx, scratch_idx, - pcdp_summary, rra_time, 1)) < 0) - return ret; - } else { - if (rra_step_cnt[rra_idx] == 1) { - if ((ret = write_RRA_row(rrd_file, rrd, rra_idx, scratch_idx, - pcdp_summary, rra_time, 1)) < 0) - return ret; - } else { - if ((ret = write_RRA_row(rrd_file, rrd, rra_idx, scratch_idx, - pcdp_summary, rra_time, 0)) < 0) - return ret; - } - } - - - rrd_notify_row(rrd_file, rra_idx, rra_pos_new, rra_time); - } - - rra_start += rra_def->row_cnt * ds_cnt * sizeof(rrd_value_t); - } /* RRA LOOP */ - - return 0; -} - -/* - * Write out one row of values (one value per DS) to the archive. - * - * Returns 0 on success, < 0 on error. - */ -static int write_RRA_row( rrd_file_t *rrd_file, rrd_t *rrd, - unsigned long rra_idx, unsigned short CDP_scratch_idx, - rrd_info_t ** pcdp_summary, time_t rra_time, int flag) { - unsigned long ds_idx, cdp_idx; - rrd_infoval_t iv; - - for (ds_idx = 0; ds_idx < rrd->stat_head->ds_cnt; ds_idx++) { - /* compute the cdp index */ - cdp_idx = rra_idx * (rrd->stat_head->ds_cnt) + ds_idx; -#ifdef DEBUG - fprintf(stderr, " -- RRA WRITE VALUE %e, at %ld CF:%s\n", - rrd->cdp_prep[cdp_idx].scratch[CDP_scratch_idx].u_val, - rrd_file->pos, rrd->rra_def[rra_idx].cf_nam); -#endif - if (*pcdp_summary != NULL) { - iv.u_val = rrd->cdp_prep[cdp_idx].scratch[CDP_scratch_idx].u_val; - /* append info to the return hash */ - *pcdp_summary = rrd_info_push(*pcdp_summary, - sprintf_alloc - ("[%lli]RRA[%s][%lu]DS[%s]", - (long long)rra_time, - rrd->rra_def[rra_idx].cf_nam, - rrd->rra_def[rra_idx].pdp_cnt, - rrd->ds_def[ds_idx].ds_nam), - RD_I_VAL, iv); - } - errno = 0; - - //if flag == 0 , write nan - //if flag == 1 , write normally - // rrd_set_to_DNAN - if (flag == 0) { - rrd_value_t tmp; - tmp = rrd_set_to_DNAN(); - if (rrd_write(rrd_file, &tmp, sizeof(rrd_value_t)) != sizeof(rrd_value_t)) { - return -RRD_ERR_WRITE8; - } - } else { - if (rrd_write(rrd_file, - &(rrd->cdp_prep[cdp_idx].scratch[CDP_scratch_idx]. - u_val), sizeof(rrd_value_t)) != sizeof(rrd_value_t)) { - return -RRD_ERR_WRITE8; - } - } - } - return 0; -} - -/* - * Call apply_smoother for all DEVSEASONAL and SEASONAL RRAs. - * - * Returns 0 on success, < 0 otherwise - */ -static int smooth_all_rras( rrd_t *rrd, rrd_file_t *rrd_file, - unsigned long rra_begin) { - unsigned long rra_start = rra_begin; - unsigned long rra_idx; - int ret; - - for (rra_idx = 0; rra_idx < rrd->stat_head->rra_cnt; ++rra_idx) { - if (cf_conv(rrd->rra_def[rra_idx].cf_nam) == CF_DEVSEASONAL || - cf_conv(rrd->rra_def[rra_idx].cf_nam) == CF_SEASONAL) { -#ifdef DEBUG - fprintf(stderr, "Running smoother for rra %lu\n", rra_idx); -#endif - ret = apply_smoother(rrd, rra_idx, rra_start, rrd_file); - if (ret) - return ret; - } - rra_start += rrd->rra_def[rra_idx].row_cnt - * rrd->stat_head->ds_cnt * sizeof(rrd_value_t); - } - return 0; -} - -#ifndef HAVE_MMAP -/* - * Flush changes to disk (unless we're using mmap) - * - * Returns 0 on success, < 0 otherwise - */ -static int write_changes_to_disk( rrd_t *rrd, rrd_file_t *rrd_file, - int version) { - /* we just need to write back the live header portion now */ - if (rrd_seek(rrd_file, (sizeof(stat_head_t) - + sizeof(ds_def_t) * rrd->stat_head->ds_cnt - + sizeof(rra_def_t) * rrd->stat_head->rra_cnt), - SEEK_SET) != 0) { - return -RRD_ERR_SEEK6; - } - if (version >= 3) { - if (rrd_write(rrd_file, rrd->live_head, - sizeof(live_head_t) * 1) != sizeof(live_head_t) * 1) { - return -RRD_ERR_WRITE9; - } - } else { - if (rrd_write(rrd_file, rrd->legacy_last_up, - sizeof(time_t) * 1) != sizeof(time_t) * 1) { - return -RRD_ERR_WRITE9; - } - } - - - if (rrd_write(rrd_file, rrd->pdp_prep, - sizeof(pdp_prep_t) * rrd->stat_head->ds_cnt) - != (ssize_t) (sizeof(pdp_prep_t) * rrd->stat_head->ds_cnt)) { - return -RRD_ERR_WRITE10; - } - - if (rrd_write(rrd_file, rrd->cdp_prep, - sizeof(cdp_prep_t) * rrd->stat_head->rra_cnt * - rrd->stat_head->ds_cnt) - != (ssize_t) (sizeof(cdp_prep_t) * rrd->stat_head->rra_cnt * - rrd->stat_head->ds_cnt)) { - - return -RRD_ERR_WRITE11; - } - - if (rrd_write(rrd_file, rrd->rra_ptr, - sizeof(rra_ptr_t) * rrd->stat_head->rra_cnt) - != (ssize_t) (sizeof(rra_ptr_t) * rrd->stat_head->rra_cnt)) { - return -RRD_ERR_WRITE12; - } - return 0; -} -#endif diff --git a/vendor/github.com/open-falcon/rrdlite/rrd_utils.c b/vendor/github.com/open-falcon/rrdlite/rrd_utils.c deleted file mode 100644 index 94c86558..00000000 --- a/vendor/github.com/open-falcon/rrdlite/rrd_utils.c +++ /dev/null @@ -1,183 +0,0 @@ -/** - * RRDtool - src/rrd_utils.c - * Copyright (C) 2009 Kevin Brintnall - * Copyright (C) 2008 Sebastian Harl - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; only version 2 of the License is applicable. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - * - * Authors: - * kevin brintnall - * Sebastian Harl - **/ - -#include "rrd_tool.h" - -#include -#include -#include -#include -#include -#include -#ifndef _MSC_VER -#include -#include -#endif -#ifdef WIN32 -# define random() rand() -# define srandom(x) srand(x) -# define getpid() 0 -#endif /* WIN32 */ - -#ifndef S_ISDIR -#define S_ISDIR(x) (((x) & S_IFMT) == S_IFDIR) -#endif - -/* make sure that the random number generator seeded EXACTLY ONCE */ -long rrd_random(void) -{ - static int rand_init = 0; - if (!rand_init) { - srandom((unsigned int) time(NULL) + (unsigned int) getpid()); - rand_init++; - } - - return random(); -} - -/* rrd_add_ptr: add a pointer to a dynamically sized array of pointers, - * realloc as necessary. returns 1 on success, 0 on failure. - */ - -int rrd_add_ptr(void ***dest, size_t *dest_size, void *src) -{ - void **temp; - - assert(dest != NULL); - - temp = (void **) rrd_realloc(*dest, (*dest_size+1) * sizeof(*dest)); - if (!temp) - return 0; - - *dest = temp; - temp[*dest_size] = src; - (*dest_size)++; - - return 1; -} - -/* like rrd_add_ptr, but calls strdup() on a string first. */ -int rrd_add_strdup(char ***dest, size_t *dest_size, char *src) -{ - char *dup_src; - int add_ok; - - assert(dest != NULL); - assert(src != NULL); - - dup_src = strdup(src); - if (!dup_src) - return 0; - - add_ok = rrd_add_ptr((void ***)dest, dest_size, (void *)dup_src); - if (!add_ok) - free(dup_src); - - return add_ok; -} - -void rrd_free_ptrs(void ***src, size_t *cnt) -{ - void **sp; - - assert(src != NULL); - sp = *src; - - if (sp == NULL) - return; - - while (*cnt > 0) { - (*cnt)--; - free(sp[*cnt]); - } - - free (sp); - *src = NULL; -} - -/* recursively create the directory named by 'pathname' - * (similar to "mkdir -p" on the command line) */ -int rrd_mkdir_p(const char *pathname, mode_t mode) -{ - struct stat sb; - - char *pathname_copy; - char *base_dir; - - if ((NULL == pathname) || ('\0' == *pathname)) { - errno = EINVAL; - return -1; - } - - if (0 == stat(pathname, &sb)) { - if (! S_ISDIR(sb.st_mode)) { - errno = ENOTDIR; - return -1; - } - return 0; - } - - /* keep errno as set by stat() */ - if (ENOENT != errno) - return -1; - - /* dirname might modify its first argument */ - if (NULL == (pathname_copy = strdup(pathname))) - return -1; - -#ifndef _MSC_VER - /* the data pointedd too by dirname might change too (bsd) */ - if (NULL == (base_dir = strdup(dirname(pathname_copy)))) { - free(pathname_copy); - return -1; - } -#else - _splitpath(pathname_copy, NULL, base_dir, NULL, NULL); -#endif - - if (0 != rrd_mkdir_p(base_dir, mode)) { - int orig_errno = errno; - free(pathname_copy); -#ifndef _MSC_VER - free(base_dir); -#endif - errno = orig_errno; - return -1; - } - - free(pathname_copy); -#ifndef _MSC_VER - free(base_dir); -#endif - - /* keep errno as set by mkdir() */ -#ifdef _MSC_VER - if (0 != mkdir(pathname)) - return -1; -#else - if (0 != mkdir(pathname, mode)) - return -1; -#endif - return 0; -} /* rrd_mkdir_p */ - diff --git a/vendor/github.com/open-falcon/rrdlite/rrd_version.c b/vendor/github.com/open-falcon/rrdlite/rrd_version.c deleted file mode 100644 index 9487254f..00000000 --- a/vendor/github.com/open-falcon/rrdlite/rrd_version.c +++ /dev/null @@ -1,21 +0,0 @@ -/***************************************************************************** - * RRDtool 1.4.9 Copyright by Tobi Oetiker, 1997-2014 - ***************************************************************************** - * rrd_version Return - ***************************************************************************** - * Initial version by Burton Strauss, ntopSupport.com - 5/2005 - *****************************************************************************/ - -#include "rrd_tool.h" - -double rrd_version( - void) -{ - return NUMVERS; -} - -char *rrd_strversion( - void) -{ - return PACKAGE_VERSION; -} diff --git a/vendor/github.com/open-falcon/rrdlite/rrdfunc.c b/vendor/github.com/open-falcon/rrdlite/rrdfunc.c deleted file mode 100644 index 13d460cd..00000000 --- a/vendor/github.com/open-falcon/rrdlite/rrdfunc.c +++ /dev/null @@ -1,50 +0,0 @@ -#include -#include - -#include "rrd.h" - -const char *rrdCreate(const char *filename, unsigned long step, - time_t start, int argc, const char **argv) { - int ret; - ret = rrd_create_r(filename, step, start, argc, argv); - if (ret != 0){ - printf("rrd_create %s, errno: %d\n", filename, ret); - } - return rrd_strerror(ret); -} - -const char *rrdUpdate(const char *filename, const char *template, - int argc, const char **argv) { - int ret; - ret = rrd_update_r(filename, template, argc, argv); - if (ret != 0){ - printf("rrd_update %s, errno: %d\n", filename, ret); - } - return rrd_strerror(ret); -} - - -const char *rrdInfo(rrd_info_t **info, char *filename) { - int ret = 0; - *info = rrd_info_r(filename, &ret); - if (ret != 0){ - printf("rrd_info %s, errno: %d\n", filename, ret); - } - return rrd_strerror(ret); -} - -const char *rrdFetch(int *ret, char *filename, const char *cf, time_t *start, - time_t *end, unsigned long *step, unsigned long *ds_cnt, - char ***ds_namv, double **data) { - //setbuf(stdout, NULL); - //setbuf(stderr, NULL); - *ret = rrd_fetch_r(filename, cf, start, end, step, ds_cnt, ds_namv, data); - if (*ret != 0){ - printf("rrdfetch %s, errno: %d\n", filename, *ret); - } - return rrd_strerror(*ret); -} - -char *arrayGetCString(char **values, int i) { - return values[i]; -} diff --git a/vendor/github.com/open-falcon/rrdlite/rrdfunc.h b/vendor/github.com/open-falcon/rrdlite/rrdfunc.h deleted file mode 100644 index 075ae536..00000000 --- a/vendor/github.com/open-falcon/rrdlite/rrdfunc.h +++ /dev/null @@ -1,5 +0,0 @@ -extern const char *rrdCreate(const char *filename, unsigned long step, time_t start, int argc, const char **argv); -extern const char *rrdUpdate(const char *filename, const char *template, int argc, const char **argv); -extern const char *rrdInfo(rrd_info_t **ret, char *filename); -extern const char *rrdFetch(int *ret, char *filename, const char *cf, time_t *start, time_t *end, unsigned long *step, unsigned long *ds_cnt, char ***ds_namv, double **data); -extern char *arrayGetCString(char **values, int i); diff --git a/vendor/github.com/open-falcon/rrdlite/unused.h b/vendor/github.com/open-falcon/rrdlite/unused.h deleted file mode 100644 index ecaa4de7..00000000 --- a/vendor/github.com/open-falcon/rrdlite/unused.h +++ /dev/null @@ -1,14 +0,0 @@ -/* define a macro to wrap variables that would - otherwise generate UNUSED variable warnings - Note that GCC's attribute unused only supresses the warning, so - it is perfectly safe to declare something unused although it is not. -*/ - -#ifdef UNUSED -#elif defined(__GNUC__) -# define UNUSED(x) x __attribute__((unused)) -#elif defined(__LCLINT__) -# define UNUSED(x) /*@unused@*/ x -#else -# define UNUSED(x) x -#endif diff --git a/vendor/github.com/sparrc/go-ping/.gitignore b/vendor/github.com/sparrc/go-ping/.gitignore new file mode 100644 index 00000000..db723fb6 --- /dev/null +++ b/vendor/github.com/sparrc/go-ping/.gitignore @@ -0,0 +1 @@ +/ping diff --git a/vendor/github.com/codegangsta/negroni/LICENSE b/vendor/github.com/sparrc/go-ping/LICENSE similarity index 96% rename from vendor/github.com/codegangsta/negroni/LICENSE rename to vendor/github.com/sparrc/go-ping/LICENSE index 08b5e20a..981053c9 100644 --- a/vendor/github.com/codegangsta/negroni/LICENSE +++ b/vendor/github.com/sparrc/go-ping/LICENSE @@ -1,6 +1,6 @@ The MIT License (MIT) -Copyright (c) 2014 Jeremy Saenz +Copyright (c) 2016 Cameron Sparr Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/vendor/github.com/sparrc/go-ping/README.md b/vendor/github.com/sparrc/go-ping/README.md new file mode 100644 index 00000000..f6b828b3 --- /dev/null +++ b/vendor/github.com/sparrc/go-ping/README.md @@ -0,0 +1,102 @@ +# go-ping +[![GoDoc](https://godoc.org/github.com/sparrc/go-ping?status.svg)](https://godoc.org/github.com/sparrc/go-ping) +[![Circle CI](https://circleci.com/gh/sparrc/go-ping.svg?style=svg)](https://circleci.com/gh/sparrc/go-ping) + +ICMP Ping library for Go, inspired by +[go-fastping](https://github.com/tatsushid/go-fastping) + +Here is a very simple example that sends & receives 3 packets: + +```go +pinger, err := ping.NewPinger("www.google.com") +if err != nil { + panic(err) +} +pinger.Count = 3 +pinger.Run() // blocks until finished +stats := pinger.Statistics() // get send/receive/rtt stats +``` + +Here is an example that emulates the unix ping command: + +```go +pinger, err := ping.NewPinger("www.google.com") +if err != nil { + panic(err) +} + +// listen for ctrl-C signal +c := make(chan os.Signal, 1) +signal.Notify(c, os.Interrupt) +go func() { + for _ = range c { + pinger.Stop() + } +}() + +pinger.OnRecv = func(pkt *ping.Packet) { + fmt.Printf("%d bytes from %s: icmp_seq=%d time=%v\n", + pkt.Nbytes, pkt.IPAddr, pkt.Seq, pkt.Rtt) +} +pinger.OnFinish = func(stats *ping.Statistics) { + fmt.Printf("\n--- %s ping statistics ---\n", stats.Addr) + fmt.Printf("%d packets transmitted, %d packets received, %v%% packet loss\n", + stats.PacketsSent, stats.PacketsRecv, stats.PacketLoss) + fmt.Printf("round-trip min/avg/max/stddev = %v/%v/%v/%v\n", + stats.MinRtt, stats.AvgRtt, stats.MaxRtt, stats.StdDevRtt) +} + +fmt.Printf("PING %s (%s):\n", pinger.Addr(), pinger.IPAddr()) +pinger.Run() +``` + +It sends ICMP packet(s) and waits for a response. If it receives a response, +it calls the "receive" callback. When it's finished, it calls the "finish" +callback. + +For a full ping example, see +[cmd/ping/ping.go](https://github.com/sparrc/go-ping/blob/master/cmd/ping/ping.go) + +## Installation: + +``` +go get github.com/sparrc/go-ping +``` + +To install the native Go ping executable: + +```bash +go get github.com/sparrc/go-ping/... +$GOPATH/bin/ping +``` + +## Note on Linux Support: + +This library attempts to send an +"unprivileged" ping via UDP. On linux, this must be enabled by setting + +``` +sudo sysctl -w net.ipv4.ping_group_range="0 2147483647" +``` + +If you do not wish to do this, you can set `pinger.SetPrivileged(true)` and +use setcap to allow your binary using go-ping to bind to raw sockets +(or just run as super-user): + +``` +setcap cap_net_raw=+ep /bin/go-ping +``` + +See [this blog](https://sturmflut.github.io/linux/ubuntu/2015/01/17/unprivileged-icmp-sockets-on-linux/) +and [the Go icmp library](https://godoc.org/golang.org/x/net/icmp) for more details. + +## Note on Windows Support: + +You must use `pinger.SetPrivileged(true)`, otherwise you will receive an error: + +``` +Error listening for ICMP packets: socket: The requested protocol has not been configured into the system, or no implementation for it exists. +``` + +This should without admin privileges. Tested on Windows 10. + diff --git a/vendor/github.com/sparrc/go-ping/ping.go b/vendor/github.com/sparrc/go-ping/ping.go new file mode 100644 index 00000000..c48730bc --- /dev/null +++ b/vendor/github.com/sparrc/go-ping/ping.go @@ -0,0 +1,602 @@ +// Package ping is an ICMP ping library seeking to emulate the unix "ping" +// command. +// +// Here is a very simple example that sends & receives 3 packets: +// +// pinger, err := ping.NewPinger("www.google.com") +// if err != nil { +// panic(err) +// } +// +// pinger.Count = 3 +// pinger.Run() // blocks until finished +// stats := pinger.Statistics() // get send/receive/rtt stats +// +// Here is an example that emulates the unix ping command: +// +// pinger, err := ping.NewPinger("www.google.com") +// if err != nil { +// fmt.Printf("ERROR: %s\n", err.Error()) +// return +// } +// +// pinger.OnRecv = func(pkt *ping.Packet) { +// fmt.Printf("%d bytes from %s: icmp_seq=%d time=%v\n", +// pkt.Nbytes, pkt.IPAddr, pkt.Seq, pkt.Rtt) +// } +// pinger.OnFinish = func(stats *ping.Statistics) { +// fmt.Printf("\n--- %s ping statistics ---\n", stats.Addr) +// fmt.Printf("%d packets transmitted, %d packets received, %v%% packet loss\n", +// stats.PacketsSent, stats.PacketsRecv, stats.PacketLoss) +// fmt.Printf("round-trip min/avg/max/stddev = %v/%v/%v/%v\n", +// stats.MinRtt, stats.AvgRtt, stats.MaxRtt, stats.StdDevRtt) +// } +// +// fmt.Printf("PING %s (%s):\n", pinger.Addr(), pinger.IPAddr()) +// pinger.Run() +// +// It sends ICMP packet(s) and waits for a response. If it receives a response, +// it calls the "receive" callback. When it's finished, it calls the "finish" +// callback. +// +// For a full ping example, see "cmd/ping/ping.go". +// +package ping + +import ( + "bytes" + "encoding/binary" + "fmt" + "math" + "math/rand" + "net" + "sync" + "syscall" + "time" + + "golang.org/x/net/icmp" + "golang.org/x/net/ipv4" + "golang.org/x/net/ipv6" +) + +const ( + timeSliceLength = 8 + trackerLength = 8 + protocolICMP = 1 + protocolIPv6ICMP = 58 +) + +var ( + ipv4Proto = map[string]string{"ip": "ip4:icmp", "udp": "udp4"} + ipv6Proto = map[string]string{"ip": "ip6:ipv6-icmp", "udp": "udp6"} +) + +// NewPinger returns a new Pinger struct pointer +func NewPinger(addr string) (*Pinger, error) { + ipaddr, err := net.ResolveIPAddr("ip", addr) + if err != nil { + return nil, err + } + + var ipv4 bool + if isIPv4(ipaddr.IP) { + ipv4 = true + } else if isIPv6(ipaddr.IP) { + ipv4 = false + } + + r := rand.New(rand.NewSource(time.Now().UnixNano())) + return &Pinger{ + ipaddr: ipaddr, + addr: addr, + Interval: time.Second, + Timeout: time.Second * 100000, + Count: -1, + id: r.Intn(math.MaxInt16), + network: "udp", + ipv4: ipv4, + Size: timeSliceLength, + Tracker: r.Int63n(math.MaxInt64), + done: make(chan bool), + }, nil +} + +// Pinger represents ICMP packet sender/receiver +type Pinger struct { + // Interval is the wait time between each packet send. Default is 1s. + Interval time.Duration + + // Timeout specifies a timeout before ping exits, regardless of how many + // packets have been received. + Timeout time.Duration + + // Count tells pinger to stop after sending (and receiving) Count echo + // packets. If this option is not specified, pinger will operate until + // interrupted. + Count int + + // Debug runs in debug mode + Debug bool + + // Number of packets sent + PacketsSent int + + // Number of packets received + PacketsRecv int + + // rtts is all of the Rtts + rtts []time.Duration + + // OnRecv is called when Pinger receives and processes a packet + OnRecv func(*Packet) + + // OnFinish is called when Pinger exits + OnFinish func(*Statistics) + + // Size of packet being sent + Size int + + // Tracker: Used to uniquely identify packet when non-priviledged + Tracker int64 + + // Source is the source IP address + Source string + + // stop chan bool + done chan bool + + ipaddr *net.IPAddr + addr string + + ipv4 bool + size int + id int + sequence int + network string +} + +type packet struct { + bytes []byte + nbytes int + ttl int +} + +// Packet represents a received and processed ICMP echo packet. +type Packet struct { + // Rtt is the round-trip time it took to ping. + Rtt time.Duration + + // IPAddr is the address of the host being pinged. + IPAddr *net.IPAddr + + // Addr is the string address of the host being pinged. + Addr string + + // NBytes is the number of bytes in the message. + Nbytes int + + // Seq is the ICMP sequence number. + Seq int + + // TTL is the Time To Live on the packet. + Ttl int +} + +// Statistics represent the stats of a currently running or finished +// pinger operation. +type Statistics struct { + // PacketsRecv is the number of packets received. + PacketsRecv int + + // PacketsSent is the number of packets sent. + PacketsSent int + + // PacketLoss is the percentage of packets lost. + PacketLoss float64 + + // IPAddr is the address of the host being pinged. + IPAddr *net.IPAddr + + // Addr is the string address of the host being pinged. + Addr string + + // Rtts is all of the round-trip times sent via this pinger. + Rtts []time.Duration + + // MinRtt is the minimum round-trip time sent via this pinger. + MinRtt time.Duration + + // MaxRtt is the maximum round-trip time sent via this pinger. + MaxRtt time.Duration + + // AvgRtt is the average round-trip time sent via this pinger. + AvgRtt time.Duration + + // StdDevRtt is the standard deviation of the round-trip times sent via + // this pinger. + StdDevRtt time.Duration +} + +// SetIPAddr sets the ip address of the target host. +func (p *Pinger) SetIPAddr(ipaddr *net.IPAddr) { + var ipv4 bool + if isIPv4(ipaddr.IP) { + ipv4 = true + } else if isIPv6(ipaddr.IP) { + ipv4 = false + } + + p.ipaddr = ipaddr + p.addr = ipaddr.String() + p.ipv4 = ipv4 +} + +// IPAddr returns the ip address of the target host. +func (p *Pinger) IPAddr() *net.IPAddr { + return p.ipaddr +} + +// SetAddr resolves and sets the ip address of the target host, addr can be a +// DNS name like "www.google.com" or IP like "127.0.0.1". +func (p *Pinger) SetAddr(addr string) error { + ipaddr, err := net.ResolveIPAddr("ip", addr) + if err != nil { + return err + } + + p.SetIPAddr(ipaddr) + p.addr = addr + return nil +} + +// Addr returns the string ip address of the target host. +func (p *Pinger) Addr() string { + return p.addr +} + +// SetPrivileged sets the type of ping pinger will send. +// false means pinger will send an "unprivileged" UDP ping. +// true means pinger will send a "privileged" raw ICMP ping. +// NOTE: setting to true requires that it be run with super-user privileges. +func (p *Pinger) SetPrivileged(privileged bool) { + if privileged { + p.network = "ip" + } else { + p.network = "udp" + } +} + +// Privileged returns whether pinger is running in privileged mode. +func (p *Pinger) Privileged() bool { + return p.network == "ip" +} + +// Run runs the pinger. This is a blocking function that will exit when it's +// done. If Count or Interval are not specified, it will run continuously until +// it is interrupted. +func (p *Pinger) Run() { + p.run() +} + +func (p *Pinger) run() { + var conn *icmp.PacketConn + if p.ipv4 { + if conn = p.listen(ipv4Proto[p.network]); conn == nil { + return + } + conn.IPv4PacketConn().SetControlMessage(ipv4.FlagTTL, true) + } else { + if conn = p.listen(ipv6Proto[p.network]); conn == nil { + return + } + conn.IPv6PacketConn().SetControlMessage(ipv6.FlagHopLimit, true) + } + defer conn.Close() + defer p.finish() + + var wg sync.WaitGroup + recv := make(chan *packet, 5) + defer close(recv) + wg.Add(1) + go p.recvICMP(conn, recv, &wg) + + err := p.sendICMP(conn) + if err != nil { + fmt.Println(err.Error()) + } + + timeout := time.NewTicker(p.Timeout) + defer timeout.Stop() + interval := time.NewTicker(p.Interval) + defer interval.Stop() + + for { + select { + case <-p.done: + wg.Wait() + return + case <-timeout.C: + close(p.done) + wg.Wait() + return + case <-interval.C: + if p.Count > 0 && p.PacketsSent >= p.Count { + continue + } + err = p.sendICMP(conn) + if err != nil { + fmt.Println("FATAL: ", err.Error()) + } + case r := <-recv: + err := p.processPacket(r) + if err != nil { + fmt.Println("FATAL: ", err.Error()) + } + } + if p.Count > 0 && p.PacketsRecv >= p.Count { + close(p.done) + wg.Wait() + return + } + } +} + +func (p *Pinger) Stop() { + close(p.done) +} + +func (p *Pinger) finish() { + handler := p.OnFinish + if handler != nil { + s := p.Statistics() + handler(s) + } +} + +// Statistics returns the statistics of the pinger. This can be run while the +// pinger is running or after it is finished. OnFinish calls this function to +// get it's finished statistics. +func (p *Pinger) Statistics() *Statistics { + loss := float64(p.PacketsSent-p.PacketsRecv) / float64(p.PacketsSent) * 100 + var min, max, total time.Duration + if len(p.rtts) > 0 { + min = p.rtts[0] + max = p.rtts[0] + } + for _, rtt := range p.rtts { + if rtt < min { + min = rtt + } + if rtt > max { + max = rtt + } + total += rtt + } + s := Statistics{ + PacketsSent: p.PacketsSent, + PacketsRecv: p.PacketsRecv, + PacketLoss: loss, + Rtts: p.rtts, + Addr: p.addr, + IPAddr: p.ipaddr, + MaxRtt: max, + MinRtt: min, + } + if len(p.rtts) > 0 { + s.AvgRtt = total / time.Duration(len(p.rtts)) + var sumsquares time.Duration + for _, rtt := range p.rtts { + sumsquares += (rtt - s.AvgRtt) * (rtt - s.AvgRtt) + } + s.StdDevRtt = time.Duration(math.Sqrt( + float64(sumsquares / time.Duration(len(p.rtts))))) + } + return &s +} + +func (p *Pinger) recvICMP( + conn *icmp.PacketConn, + recv chan<- *packet, + wg *sync.WaitGroup, +) { + defer wg.Done() + for { + select { + case <-p.done: + return + default: + bytes := make([]byte, 512) + conn.SetReadDeadline(time.Now().Add(time.Millisecond * 100)) + var n, ttl int + var err error + if p.ipv4 { + var cm *ipv4.ControlMessage + n, cm, _, err = conn.IPv4PacketConn().ReadFrom(bytes) + if cm != nil { + ttl = cm.TTL + } + } else { + var cm *ipv6.ControlMessage + n, cm, _, err = conn.IPv6PacketConn().ReadFrom(bytes) + if cm != nil { + ttl = cm.HopLimit + } + } + if err != nil { + if neterr, ok := err.(*net.OpError); ok { + if neterr.Timeout() { + // Read timeout + continue + } else { + close(p.done) + return + } + } + } + + recv <- &packet{bytes: bytes, nbytes: n, ttl: ttl} + } + } +} + +func (p *Pinger) processPacket(recv *packet) error { + receivedAt := time.Now() + var proto int + if p.ipv4 { + proto = protocolICMP + } else { + proto = protocolIPv6ICMP + } + + var m *icmp.Message + var err error + if m, err = icmp.ParseMessage(proto, recv.bytes); err != nil { + return fmt.Errorf("error parsing icmp message: %s", err.Error()) + } + + if m.Type != ipv4.ICMPTypeEchoReply && m.Type != ipv6.ICMPTypeEchoReply { + // Not an echo reply, ignore it + return nil + } + + outPkt := &Packet{ + Nbytes: recv.nbytes, + IPAddr: p.ipaddr, + Addr: p.addr, + Ttl: recv.ttl, + } + + switch pkt := m.Body.(type) { + case *icmp.Echo: + // If we are privileged, we can match icmp.ID + if p.network == "ip" { + // Check if reply from same ID + if pkt.ID != p.id { + return nil + } + } + + if len(pkt.Data) < timeSliceLength+trackerLength { + return fmt.Errorf("insufficient data received; got: %d %v", + len(pkt.Data), pkt.Data) + } + + tracker := bytesToInt(pkt.Data[timeSliceLength:]) + timestamp := bytesToTime(pkt.Data[:timeSliceLength]) + + if tracker != p.Tracker { + return nil + } + + outPkt.Rtt = receivedAt.Sub(timestamp) + outPkt.Seq = pkt.Seq + p.PacketsRecv++ + default: + // Very bad, not sure how this can happen + return fmt.Errorf("invalid ICMP echo reply; type: '%T', '%v'", pkt, pkt) + } + + p.rtts = append(p.rtts, outPkt.Rtt) + handler := p.OnRecv + if handler != nil { + handler(outPkt) + } + + return nil +} + +func (p *Pinger) sendICMP(conn *icmp.PacketConn) error { + var typ icmp.Type + if p.ipv4 { + typ = ipv4.ICMPTypeEcho + } else { + typ = ipv6.ICMPTypeEchoRequest + } + + var dst net.Addr = p.ipaddr + if p.network == "udp" { + dst = &net.UDPAddr{IP: p.ipaddr.IP, Zone: p.ipaddr.Zone} + } + + t := append(timeToBytes(time.Now()), intToBytes(p.Tracker)...) + if remainSize := p.Size - timeSliceLength - trackerLength; remainSize > 0 { + t = append(t, bytes.Repeat([]byte{1}, remainSize)...) + } + + body := &icmp.Echo{ + ID: p.id, + Seq: p.sequence, + Data: t, + } + + msg := &icmp.Message{ + Type: typ, + Code: 0, + Body: body, + } + + msgBytes, err := msg.Marshal(nil) + if err != nil { + return err + } + + for { + if _, err := conn.WriteTo(msgBytes, dst); err != nil { + if neterr, ok := err.(*net.OpError); ok { + if neterr.Err == syscall.ENOBUFS { + continue + } + } + } + p.PacketsSent++ + p.sequence++ + break + } + + return nil +} + +func (p *Pinger) listen(netProto string) *icmp.PacketConn { + conn, err := icmp.ListenPacket(netProto, p.Source) + if err != nil { + fmt.Printf("Error listening for ICMP packets: %s\n", err.Error()) + close(p.done) + return nil + } + return conn +} + +func bytesToTime(b []byte) time.Time { + var nsec int64 + for i := uint8(0); i < 8; i++ { + nsec += int64(b[i]) << ((7 - i) * 8) + } + return time.Unix(nsec/1000000000, nsec%1000000000) +} + +func isIPv4(ip net.IP) bool { + return len(ip.To4()) == net.IPv4len +} + +func isIPv6(ip net.IP) bool { + return len(ip) == net.IPv6len +} + +func timeToBytes(t time.Time) []byte { + nsec := t.UnixNano() + b := make([]byte, 8) + for i := uint8(0); i < 8; i++ { + b[i] = byte((nsec >> ((7 - i) * 8)) & 0xff) + } + return b +} + +func bytesToInt(b []byte) int64 { + return int64(binary.BigEndian.Uint64(b)) +} + +func intToBytes(tracker int64) []byte { + b := make([]byte, 8) + binary.BigEndian.PutUint64(b, uint64(tracker)) + return b +} diff --git a/vendor/github.com/toolkits/file/downloader.go b/vendor/github.com/toolkits/file/downloader.go new file mode 100644 index 00000000..4d6e17c6 --- /dev/null +++ b/vendor/github.com/toolkits/file/downloader.go @@ -0,0 +1,26 @@ +package file + +import ( + "io" + "net/http" + "os" +) + +func Download(toFile, url string) error { + out, err := os.Create(toFile) + if err != nil { + return err + } + + defer out.Close() + + resp, err := http.Get(url) + if err != nil { + return err + } + + defer resp.Body.Close() + + _, err = io.Copy(out, resp.Body) + return err +} diff --git a/vendor/github.com/toolkits/file/file.go b/vendor/github.com/toolkits/file/file.go new file mode 100644 index 00000000..1127022e --- /dev/null +++ b/vendor/github.com/toolkits/file/file.go @@ -0,0 +1,223 @@ +package file + +import ( + "fmt" + "io/ioutil" + "log" + "os" + "path" + "path/filepath" + "strings" + "time" +) + +// SelfPath gets compiled executable file absolute path +func SelfPath() string { + path, _ := filepath.Abs(os.Args[0]) + return path +} + +// get absolute filepath, based on built executable file +func RealPath(fp string) (string, error) { + if path.IsAbs(fp) { + return fp, nil + } + wd, err := os.Getwd() + return path.Join(wd, fp), err +} + +// SelfDir gets compiled executable file directory +func SelfDir() string { + return filepath.Dir(SelfPath()) +} + +// get filepath base name +func Basename(fp string) string { + return path.Base(fp) +} + +// get filepath dir name +func Dir(fp string) string { + return path.Dir(fp) +} + +func InsureDir(fp string) error { + if IsExist(fp) { + return nil + } + return os.MkdirAll(fp, os.ModePerm) +} + +// mkdir dir if not exist +func EnsureDir(fp string) error { + return os.MkdirAll(fp, os.ModePerm) +} + +// ensure the datadir and make sure it's rw-able +func EnsureDirRW(dataDir string) error { + err := EnsureDir(dataDir) + if err != nil { + return err + } + + checkFile := fmt.Sprintf("%s/rw.%d", dataDir, time.Now().UnixNano()) + fd, err := Create(checkFile) + if err != nil { + if os.IsPermission(err) { + return fmt.Errorf("open %s: rw permission denied", dataDir) + } + return err + } + Close(fd) + Remove(checkFile) + + return nil +} + +// create one file +func Create(name string) (*os.File, error) { + return os.Create(name) +} + +// remove one file +func Remove(name string) error { + return os.Remove(name) +} + +// close fd +func Close(fd *os.File) error { + return fd.Close() +} + +func Ext(fp string) string { + return path.Ext(fp) +} + +// rename file name +func Rename(src string, target string) error { + return os.Rename(src, target) +} + +// delete file +func Unlink(fp string) error { + return os.Remove(fp) +} + +// IsFile checks whether the path is a file, +// it returns false when it's a directory or does not exist. +func IsFile(fp string) bool { + f, e := os.Stat(fp) + if e != nil { + return false + } + return !f.IsDir() +} + +// IsExist checks whether a file or directory exists. +// It returns false when the file or directory does not exist. +func IsExist(fp string) bool { + _, err := os.Stat(fp) + return err == nil || os.IsExist(err) +} + +// Search a file in paths. +// this is often used in search config file in /etc ~/ +func SearchFile(filename string, paths ...string) (fullPath string, err error) { + for _, path := range paths { + if fullPath = filepath.Join(path, filename); IsExist(fullPath) { + return + } + } + err = fmt.Errorf("%s not found in paths", fullPath) + return +} + +// get file modified time +func FileMTime(fp string) (int64, error) { + f, e := os.Stat(fp) + if e != nil { + return 0, e + } + return f.ModTime().Unix(), nil +} + +// get file size as how many bytes +func FileSize(fp string) (int64, error) { + f, e := os.Stat(fp) + if e != nil { + return 0, e + } + return f.Size(), nil +} + +// list dirs under dirPath +func DirsUnder(dirPath string) ([]string, error) { + if !IsExist(dirPath) { + return []string{}, nil + } + + fs, err := ioutil.ReadDir(dirPath) + if err != nil { + return []string{}, err + } + + sz := len(fs) + if sz == 0 { + return []string{}, nil + } + + ret := make([]string, 0, sz) + for i := 0; i < sz; i++ { + if fs[i].IsDir() { + name := fs[i].Name() + if name != "." && name != ".." { + ret = append(ret, name) + } + } + } + + return ret, nil +} + +// list files under dirPath +func FilesUnder(dirPath string) ([]string, error) { + if !IsExist(dirPath) { + return []string{}, nil + } + + fs, err := ioutil.ReadDir(dirPath) + if err != nil { + return []string{}, err + } + + sz := len(fs) + if sz == 0 { + return []string{}, nil + } + + ret := make([]string, 0, sz) + for i := 0; i < sz; i++ { + if !fs[i].IsDir() { + ret = append(ret, fs[i].Name()) + } + } + + return ret, nil +} + +func MustOpenLogFile(fp string) *os.File { + if strings.Contains(fp, "/") { + dir := Dir(fp) + err := EnsureDir(dir) + if err != nil { + log.Fatalf("mkdir -p %s occur error %v", dir, err) + } + } + + f, err := os.OpenFile(fp, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666) + if err != nil { + log.Fatalf("open %s occur error %v", fp, err) + } + + return f +} diff --git a/vendor/github.com/toolkits/file/reader.go b/vendor/github.com/toolkits/file/reader.go new file mode 100644 index 00000000..c85ff971 --- /dev/null +++ b/vendor/github.com/toolkits/file/reader.go @@ -0,0 +1,66 @@ +package file + +import ( + "bufio" + "io/ioutil" + "strconv" + "strings" +) + +func ToBytes(filePath string) ([]byte, error) { + return ioutil.ReadFile(filePath) +} + +func ToString(filePath string) (string, error) { + b, err := ioutil.ReadFile(filePath) + if err != nil { + return "", err + } + return string(b), nil +} + +func ToTrimString(filePath string) (string, error) { + str, err := ToString(filePath) + if err != nil { + return "", err + } + + return strings.TrimSpace(str), nil +} + +func ToUint64(filePath string) (uint64, error) { + content, err := ToTrimString(filePath) + if err != nil { + return 0, err + } + + var ret uint64 + if ret, err = strconv.ParseUint(content, 10, 64); err != nil { + return 0, err + } + return ret, nil +} + +func ToInt64(filePath string) (int64, error) { + content, err := ToTrimString(filePath) + if err != nil { + return 0, err + } + + var ret int64 + if ret, err = strconv.ParseInt(content, 10, 64); err != nil { + return 0, err + } + return ret, nil +} + +func ReadLine(r *bufio.Reader) ([]byte, error) { + line, isPrefix, err := r.ReadLine() + for isPrefix && err == nil { + var bs []byte + bs, isPrefix, err = r.ReadLine() + line = append(line, bs...) + } + + return line, err +} diff --git a/vendor/github.com/toolkits/file/writer.go b/vendor/github.com/toolkits/file/writer.go new file mode 100644 index 00000000..0242ab5d --- /dev/null +++ b/vendor/github.com/toolkits/file/writer.go @@ -0,0 +1,20 @@ +package file + +import ( + "os" + "path" +) + +func WriteBytes(filePath string, b []byte) (int, error) { + os.MkdirAll(path.Dir(filePath), os.ModePerm) + fw, err := os.Create(filePath) + if err != nil { + return 0, err + } + defer fw.Close() + return fw.Write(b) +} + +func WriteString(filePath string, s string) (int, error) { + return WriteBytes(filePath, []byte(s)) +} diff --git a/vendor/github.com/toolkits/sys/cmd.go b/vendor/github.com/toolkits/sys/cmd.go new file mode 100644 index 00000000..c87f77ec --- /dev/null +++ b/vendor/github.com/toolkits/sys/cmd.go @@ -0,0 +1,63 @@ +package sys + +import ( + "bytes" + "log" + "os/exec" + "strings" + "syscall" + "time" +) + +func CmdOut(name string, arg ...string) (string, error) { + cmd := exec.Command(name, arg...) + var out bytes.Buffer + cmd.Stdout = &out + err := cmd.Run() + return out.String(), err +} + +func CmdOutBytes(name string, arg ...string) ([]byte, error) { + cmd := exec.Command(name, arg...) + var out bytes.Buffer + cmd.Stdout = &out + err := cmd.Run() + return out.Bytes(), err +} + +func CmdOutNoLn(name string, arg ...string) (out string, err error) { + out, err = CmdOut(name, arg...) + if err != nil { + return + } + + return strings.TrimSpace(string(out)), nil +} + +func CmdRunWithTimeout(cmd *exec.Cmd, timeout time.Duration) (error, bool) { + var err error + + done := make(chan error) + go func() { + done <- cmd.Wait() + }() + + select { + case <-time.After(timeout): + log.Printf("timeout, process:%s will be killed", cmd.Path) + + go func() { + <-done // allow goroutine to exit + }() + + //IMPORTANT: cmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true} is necessary before cmd.Start() + err = syscall.Kill(-cmd.Process.Pid, syscall.SIGKILL) + if err != nil { + log.Println("kill failed, error:", err) + } + + return err, true + case err = <-done: + return err, false + } +} diff --git a/vendor/github.com/unrolled/render/.travis.yml b/vendor/github.com/unrolled/render/.travis.yml deleted file mode 100644 index 9ec8d547..00000000 --- a/vendor/github.com/unrolled/render/.travis.yml +++ /dev/null @@ -1,15 +0,0 @@ -language: go - -go: - - 1.11.x - - 1.12.x - - tip - -env: - - GO111MODULE=on - -install: - - go mod download - -script: - - go test -v -race -tags=integration diff --git a/vendor/github.com/unrolled/render/README.md b/vendor/github.com/unrolled/render/README.md deleted file mode 100644 index 5ffa2a7c..00000000 --- a/vendor/github.com/unrolled/render/README.md +++ /dev/null @@ -1,508 +0,0 @@ -# Render [![GoDoc](http://godoc.org/github.com/unrolled/render?status.svg)](http://godoc.org/github.com/unrolled/render) [![Build Status](https://travis-ci.org/unrolled/render.svg)](https://travis-ci.org/unrolled/render) - -Render is a package that provides functionality for easily rendering JSON, XML, text, binary data, and HTML templates. This package is based on the [Martini](https://github.com/go-martini/martini) [render](https://github.com/martini-contrib/render) work. - -## Block Deprecation Notice -Go 1.6 introduces a new [block](https://github.com/golang/go/blob/release-branch.go1.6/src/html/template/example_test.go#L128) action. This conflicts with Render's included `block` template function. To provide an easy migration path, a new function was created called `partial`. It is a duplicate of the old `block` function. It is advised that all users of the `block` function update their code to avoid any issues in the future. Previous to Go 1.6, Render's `block` functionality will continue to work but a message will be logged urging you to migrate to the new `partial` function. - -## Usage -Render can be used with pretty much any web framework providing you can access the `http.ResponseWriter` from your handler. The rendering functions simply wraps Go's existing functionality for marshaling and rendering data. - -- HTML: Uses the [html/template](http://golang.org/pkg/html/template/) package to render HTML templates. -- JSON: Uses the [encoding/json](http://golang.org/pkg/encoding/json/) package to marshal data into a JSON-encoded response. -- XML: Uses the [encoding/xml](http://golang.org/pkg/encoding/xml/) package to marshal data into an XML-encoded response. -- Binary data: Passes the incoming data straight through to the `http.ResponseWriter`. -- Text: Passes the incoming string straight through to the `http.ResponseWriter`. - -~~~ go -// main.go -package main - -import ( - "encoding/xml" - "net/http" - - "github.com/unrolled/render" // or "gopkg.in/unrolled/render.v1" -) - -type ExampleXml struct { - XMLName xml.Name `xml:"example"` - One string `xml:"one,attr"` - Two string `xml:"two,attr"` -} - -func main() { - r := render.New() - mux := http.NewServeMux() - - mux.HandleFunc("/", func(w http.ResponseWriter, req *http.Request) { - w.Write([]byte("Welcome, visit sub pages now.")) - }) - - mux.HandleFunc("/data", func(w http.ResponseWriter, req *http.Request) { - r.Data(w, http.StatusOK, []byte("Some binary data here.")) - }) - - mux.HandleFunc("/text", func(w http.ResponseWriter, req *http.Request) { - r.Text(w, http.StatusOK, "Plain text here") - }) - - mux.HandleFunc("/json", func(w http.ResponseWriter, req *http.Request) { - r.JSON(w, http.StatusOK, map[string]string{"hello": "json"}) - }) - - mux.HandleFunc("/jsonp", func(w http.ResponseWriter, req *http.Request) { - r.JSONP(w, http.StatusOK, "callbackName", map[string]string{"hello": "jsonp"}) - }) - - mux.HandleFunc("/xml", func(w http.ResponseWriter, req *http.Request) { - r.XML(w, http.StatusOK, ExampleXml{One: "hello", Two: "xml"}) - }) - - mux.HandleFunc("/html", func(w http.ResponseWriter, req *http.Request) { - // Assumes you have a template in ./templates called "example.tmpl" - // $ mkdir -p templates && echo "

Hello {{.}}.

" > templates/example.tmpl - r.HTML(w, http.StatusOK, "example", "World") - }) - - http.ListenAndServe("127.0.0.1:3000", mux) -} -~~~ - -~~~ html - -

Hello {{.}}.

-~~~ - -### Available Options -Render comes with a variety of configuration options _(Note: these are not the default option values. See the defaults below.)_: - -~~~ go -// ... -r := render.New(render.Options{ - Directory: "templates", // Specify what path to load the templates from. - FileSystem: &LocalFileSystem{}, // Specify filesystem from where files are loaded. - Asset: func(name string) ([]byte, error) { // Load from an Asset function instead of file. - return []byte("template content"), nil - }, - AssetNames: func() []string { // Return a list of asset names for the Asset function - return []string{"filename.tmpl"} - }, - Layout: "layout", // Specify a layout template. Layouts can call {{ yield }} to render the current template or {{ partial "css" }} to render a partial from the current template. - Extensions: []string{".tmpl", ".html"}, // Specify extensions to load for templates. - Funcs: []template.FuncMap{AppHelpers}, // Specify helper function maps for templates to access. - Delims: render.Delims{"{[{", "}]}"}, // Sets delimiters to the specified strings. - Charset: "UTF-8", // Sets encoding for content-types. Default is "UTF-8". - DisableCharset: true, // Prevents the charset from being appended to the content type header. - IndentJSON: true, // Output human readable JSON. - IndentXML: true, // Output human readable XML. - PrefixJSON: []byte(")]}',\n"), // Prefixes JSON responses with the given bytes. - PrefixXML: []byte(""), // Prefixes XML responses with the given bytes. - HTMLContentType: "application/xhtml+xml", // Output XHTML content type instead of default "text/html". - IsDevelopment: true, // Render will now recompile the templates on every HTML response. - UnEscapeHTML: true, // Replace ensure '&<>' are output correctly (JSON only). - StreamingJSON: true, // Streams the JSON response via json.Encoder. - RequirePartials: true, // Return an error if a template is missing a partial used in a layout. - DisableHTTPErrorRendering: true, // Disables automatic rendering of http.StatusInternalServerError when an error occurs. -}) -// ... -~~~ - -### Default Options -These are the preset options for Render: - -~~~ go -r := render.New() - -// Is the same as the default configuration options: - -r := render.New(render.Options{ - Directory: "templates", - FileSystem: &LocalFileSystem{}, - Asset: nil, - AssetNames: nil, - Layout: "", - Extensions: []string{".tmpl"}, - Funcs: []template.FuncMap{}, - Delims: render.Delims{"{{", "}}"}, - Charset: "UTF-8", - DisableCharset: false, - IndentJSON: false, - IndentXML: false, - PrefixJSON: []byte(""), - PrefixXML: []byte(""), - BinaryContentType: "application/octet-stream", - HTMLContentType: "text/html", - JSONContentType: "application/json", - JSONPContentType: "application/javascript", - TextContentType: "text/plain", - XMLContentType: "application/xhtml+xml", - IsDevelopment: false, - UnEscapeHTML: false, - StreamingJSON: false, - RequirePartials: false, - DisableHTTPErrorRendering: false, -}) -~~~ - -### JSON vs Streaming JSON -By default, Render does **not** stream JSON to the `http.ResponseWriter`. It instead marshalls your object into a byte array, and if no errors occurred, writes that byte array to the `http.ResponseWriter`. If you would like to use the built it in streaming functionality (`json.Encoder`), you can set the `StreamingJSON` setting to `true`. This will stream the output directly to the `http.ResponseWriter`. Also note that streaming is only implemented in `render.JSON` and not `render.JSONP`, and the `UnEscapeHTML` and `Indent` options are ignored when streaming. - -### Loading Templates -By default Render will attempt to load templates with a '.tmpl' extension from the "templates" directory. Templates are found by traversing the templates directory and are named by path and basename. For instance, the following directory structure: - -~~~ -templates/ - | - |__ admin/ - | | - | |__ index.tmpl - | | - | |__ edit.tmpl - | - |__ home.tmpl -~~~ - -Will provide the following templates: -~~~ -admin/index -admin/edit -home -~~~ - -You can also load templates from memory by providing the Asset and AssetNames options, -e.g. when generating an asset file using [go-bindata](https://github.com/jteeuwen/go-bindata). - -### Layouts -Render provides `yield` and `partial` functions for layouts to access: -~~~ go -// ... -r := render.New(render.Options{ - Layout: "layout", -}) -// ... -~~~ - -~~~ html - - - - My Layout - - {{ partial "css" }} - - - - {{ partial "header" }} - - {{ yield }} - - {{ partial "footer" }} - - -~~~ - -`current` can also be called to get the current template being rendered. -~~~ html - - - - My Layout - - - This is the {{ current }} page. - - -~~~ - -Partials are defined by individual templates as seen below. The partial template's -name needs to be defined as "{partial name}-{template name}". -~~~ html - -{{ define "header-home" }} -

Home

-{{ end }} - -{{ define "footer-home"}} -

The End

-{{ end }} -~~~ - -By default, the template is not required to define all partials referenced in the -layout. If you want an error to be returned when a template does not define a -partial, set `Options.RequirePartials = true`. - -### Character Encodings -Render will automatically set the proper Content-Type header based on which function you call. See below for an example of what the default settings would output (note that UTF-8 is the default, and binary data does not output the charset): -~~~ go -// main.go -package main - -import ( - "encoding/xml" - "net/http" - - "github.com/unrolled/render" // or "gopkg.in/unrolled/render.v1" -) - -type ExampleXml struct { - XMLName xml.Name `xml:"example"` - One string `xml:"one,attr"` - Two string `xml:"two,attr"` -} - -func main() { - r := render.New(render.Options{}) - mux := http.NewServeMux() - - // This will set the Content-Type header to "application/octet-stream". - // Note that this does not receive a charset value. - mux.HandleFunc("/data", func(w http.ResponseWriter, req *http.Request) { - r.Data(w, http.StatusOK, []byte("Some binary data here.")) - }) - - // This will set the Content-Type header to "application/json; charset=UTF-8". - mux.HandleFunc("/json", func(w http.ResponseWriter, req *http.Request) { - r.JSON(w, http.StatusOK, map[string]string{"hello": "json"}) - }) - - // This will set the Content-Type header to "text/xml; charset=UTF-8". - mux.HandleFunc("/xml", func(w http.ResponseWriter, req *http.Request) { - r.XML(w, http.StatusOK, ExampleXml{One: "hello", Two: "xml"}) - }) - - // This will set the Content-Type header to "text/plain; charset=UTF-8". - mux.HandleFunc("/text", func(w http.ResponseWriter, req *http.Request) { - r.Text(w, http.StatusOK, "Plain text here") - }) - - // This will set the Content-Type header to "text/html; charset=UTF-8". - mux.HandleFunc("/html", func(w http.ResponseWriter, req *http.Request) { - // Assumes you have a template in ./templates called "example.tmpl" - // $ mkdir -p templates && echo "

Hello {{.}}.

" > templates/example.tmpl - r.HTML(w, http.StatusOK, "example", "World") - }) - - http.ListenAndServe("127.0.0.1:3000", mux) -} -~~~ - -In order to change the charset, you can set the `Charset` within the `render.Options` to your encoding value: -~~~ go -// main.go -package main - -import ( - "encoding/xml" - "net/http" - - "github.com/unrolled/render" // or "gopkg.in/unrolled/render.v1" -) - -type ExampleXml struct { - XMLName xml.Name `xml:"example"` - One string `xml:"one,attr"` - Two string `xml:"two,attr"` -} - -func main() { - r := render.New(render.Options{ - Charset: "ISO-8859-1", - }) - mux := http.NewServeMux() - - // This will set the Content-Type header to "application/octet-stream". - // Note that this does not receive a charset value. - mux.HandleFunc("/data", func(w http.ResponseWriter, req *http.Request) { - r.Data(w, http.StatusOK, []byte("Some binary data here.")) - }) - - // This will set the Content-Type header to "application/json; charset=ISO-8859-1". - mux.HandleFunc("/json", func(w http.ResponseWriter, req *http.Request) { - r.JSON(w, http.StatusOK, map[string]string{"hello": "json"}) - }) - - // This will set the Content-Type header to "text/xml; charset=ISO-8859-1". - mux.HandleFunc("/xml", func(w http.ResponseWriter, req *http.Request) { - r.XML(w, http.StatusOK, ExampleXml{One: "hello", Two: "xml"}) - }) - - // This will set the Content-Type header to "text/plain; charset=ISO-8859-1". - mux.HandleFunc("/text", func(w http.ResponseWriter, req *http.Request) { - r.Text(w, http.StatusOK, "Plain text here") - }) - - // This will set the Content-Type header to "text/html; charset=ISO-8859-1". - mux.HandleFunc("/html", func(w http.ResponseWriter, req *http.Request) { - // Assumes you have a template in ./templates called "example.tmpl" - // $ mkdir -p templates && echo "

Hello {{.}}.

" > templates/example.tmpl - r.HTML(w, http.StatusOK, "example", "World") - }) - - http.ListenAndServe("127.0.0.1:3000", mux) -} -~~~ - -### Error Handling - -The rendering functions return any errors from the rendering engine. -By default, they will also write the error to the HTTP response and set the status code to 500. You can disable -this behavior so that you can handle errors yourself by setting -`Options.DisableHTTPErrorRendering: true`. - -~~~go -r := render.New(render.Options{ - DisableHTTPErrorRendering: true, -}) - -//... - -err := r.HTML(w, http.StatusOK, "example", "World") -if err != nil{ - http.Redirect(w, r, "/my-custom-500", http.StatusFound) -} -~~~ - -## Integration Examples - -### [Echo](https://github.com/labstack/echo) -~~~ go -// main.go -package main - -import ( - "io" - "net/http" - - "github.com/labstack/echo" - "github.com/unrolled/render" // or "gopkg.in/unrolled/render.v1" -) - -type RenderWrapper struct { // We need to wrap the renderer because we need a different signature for echo. - rnd *render.Render -} - -func (r *RenderWrapper) Render(w io.Writer, name string, data interface{},c echo.Context) error { - return r.rnd.HTML(w, 0, name, data) // The zero status code is overwritten by echo. -} - -func main() { - r := &RenderWrapper{render.New()} - - e := echo.New() - - e.Renderer = r - - e.GET("/", func(c echo.Context) error { - return c.Render(http.StatusOK, "TemplateName", "TemplateData") - }) - - e.Logger.Fatal(e.Start(":1323")) -} -~~~ - -### [Gin](https://github.com/gin-gonic/gin) -~~~ go -// main.go -package main - -import ( - "net/http" - - "github.com/gin-gonic/gin" - "github.com/unrolled/render" // or "gopkg.in/unrolled/render.v1" -) - -func main() { - r := render.New(render.Options{ - IndentJSON: true, - }) - - router := gin.Default() - - router.GET("/", func(c *gin.Context) { - r.JSON(c.Writer, http.StatusOK, map[string]string{"welcome": "This is rendered JSON!"}) - }) - - router.Run(":3000") -} -~~~ - -### [Goji](https://github.com/zenazn/goji) -~~~ go -// main.go -package main - -import ( - "net/http" - - "github.com/zenazn/goji" - "github.com/zenazn/goji/web" - "github.com/unrolled/render" // or "gopkg.in/unrolled/render.v1" -) - -func main() { - r := render.New(render.Options{ - IndentJSON: true, - }) - - goji.Get("/", func(c web.C, w http.ResponseWriter, req *http.Request) { - r.JSON(w, http.StatusOK, map[string]string{"welcome": "This is rendered JSON!"}) - }) - goji.Serve() // Defaults to ":8000". -} -~~~ - -### [Negroni](https://github.com/codegangsta/negroni) -~~~ go -// main.go -package main - -import ( - "net/http" - - "github.com/urfave/negroni" - "github.com/unrolled/render" // or "gopkg.in/unrolled/render.v1" -) - -func main() { - r := render.New(render.Options{ - IndentJSON: true, - }) - mux := http.NewServeMux() - - mux.HandleFunc("/", func(w http.ResponseWriter, req *http.Request) { - r.JSON(w, http.StatusOK, map[string]string{"welcome": "This is rendered JSON!"}) - }) - - n := negroni.Classic() - n.UseHandler(mux) - n.Run(":3000") -} -~~~ - -### [Traffic](https://github.com/pilu/traffic) -~~~ go -// main.go -package main - -import ( - "net/http" - - "github.com/pilu/traffic" - "github.com/unrolled/render" // or "gopkg.in/unrolled/render.v1" -) - -func main() { - r := render.New(render.Options{ - IndentJSON: true, - }) - - router := traffic.New() - router.Get("/", func(w traffic.ResponseWriter, req *traffic.Request) { - r.JSON(w, http.StatusOK, map[string]string{"welcome": "This is rendered JSON!"}) - }) - - router.Run() -} -~~~ diff --git a/vendor/github.com/unrolled/render/buffer.go b/vendor/github.com/unrolled/render/buffer.go deleted file mode 100644 index cdc92ffb..00000000 --- a/vendor/github.com/unrolled/render/buffer.go +++ /dev/null @@ -1,46 +0,0 @@ -package render - -import "bytes" - -// bufPool represents a reusable buffer pool for executing templates into. -var bufPool *BufferPool - -// BufferPool implements a pool of bytes.Buffers in the form of a bounded channel. -// Pulled from the github.com/oxtoacart/bpool package (Apache licensed). -type BufferPool struct { - c chan *bytes.Buffer -} - -// NewBufferPool creates a new BufferPool bounded to the given size. -func NewBufferPool(size int) (bp *BufferPool) { - return &BufferPool{ - c: make(chan *bytes.Buffer, size), - } -} - -// Get gets a Buffer from the BufferPool, or creates a new one if none are -// available in the pool. -func (bp *BufferPool) Get() (b *bytes.Buffer) { - select { - case b = <-bp.c: - // reuse existing buffer - default: - // create new buffer - b = bytes.NewBuffer([]byte{}) - } - return -} - -// Put returns the given Buffer to the BufferPool. -func (bp *BufferPool) Put(b *bytes.Buffer) { - b.Reset() - select { - case bp.c <- b: - default: // Discard the buffer if the pool is full. - } -} - -// Initialize buffer pool for writing templates into. -func init() { - bufPool = NewBufferPool(64) -} diff --git a/vendor/github.com/unrolled/render/doc.go b/vendor/github.com/unrolled/render/doc.go deleted file mode 100644 index d3487ffb..00000000 --- a/vendor/github.com/unrolled/render/doc.go +++ /dev/null @@ -1,55 +0,0 @@ -/*Package render is a package that provides functionality for easily rendering JSON, XML, binary data, and HTML templates. - - package main - - import ( - "encoding/xml" - "net/http" - - "github.com/unrolled/render" // or "gopkg.in/unrolled/render.v1" - ) - - type ExampleXml struct { - XMLName xml.Name `xml:"example"` - One string `xml:"one,attr"` - Two string `xml:"two,attr"` - } - - func main() { - r := render.New() - mux := http.NewServeMux() - - mux.HandleFunc("/", func(w http.ResponseWriter, req *http.Request) { - w.Write([]byte("Welcome, visit sub pages now.")) - }) - - mux.HandleFunc("/data", func(w http.ResponseWriter, req *http.Request) { - r.Data(w, http.StatusOK, []byte("Some binary data here.")) - }) - - mux.HandleFunc("/text", func(w http.ResponseWriter, req *http.Request) { - r.Text(w, http.StatusOK, "Plain text here") - }) - - mux.HandleFunc("/json", func(w http.ResponseWriter, req *http.Request) { - r.JSON(w, http.StatusOK, map[string]string{"hello": "json"}) - }) - - mux.HandleFunc("/jsonp", func(w http.ResponseWriter, req *http.Request) { - r.JSONP(w, http.StatusOK, "callbackName", map[string]string{"hello": "jsonp"}) - }) - - mux.HandleFunc("/xml", func(w http.ResponseWriter, req *http.Request) { - r.XML(w, http.StatusOK, ExampleXml{One: "hello", Two: "xml"}) - }) - - mux.HandleFunc("/html", func(w http.ResponseWriter, req *http.Request) { - // Assumes you have a template in ./templates called "example.tmpl". - // $ mkdir -p templates && echo "

Hello HTML world.

" > templates/example.tmpl - r.HTML(w, http.StatusOK, "example", nil) - }) - - http.ListenAndServe("0.0.0.0:3000", mux) - } -*/ -package render diff --git a/vendor/github.com/unrolled/render/engine.go b/vendor/github.com/unrolled/render/engine.go deleted file mode 100644 index cdf1a1b1..00000000 --- a/vendor/github.com/unrolled/render/engine.go +++ /dev/null @@ -1,217 +0,0 @@ -package render - -import ( - "bytes" - "encoding/json" - "encoding/xml" - "html/template" - "io" - "net/http" -) - -// Engine is the generic interface for all responses. -type Engine interface { - Render(io.Writer, interface{}) error -} - -// Head defines the basic ContentType and Status fields. -type Head struct { - ContentType string - Status int -} - -// Data built-in renderer. -type Data struct { - Head -} - -// HTML built-in renderer. -type HTML struct { - Head - Name string - Templates *template.Template -} - -// JSON built-in renderer. -type JSON struct { - Head - Indent bool - UnEscapeHTML bool - Prefix []byte - StreamingJSON bool -} - -// JSONP built-in renderer. -type JSONP struct { - Head - Indent bool - Callback string -} - -// Text built-in renderer. -type Text struct { - Head -} - -// XML built-in renderer. -type XML struct { - Head - Indent bool - Prefix []byte -} - -// Write outputs the header content. -func (h Head) Write(w http.ResponseWriter) { - w.Header().Set(ContentType, h.ContentType) - w.WriteHeader(h.Status) -} - -// Render a data response. -func (d Data) Render(w io.Writer, v interface{}) error { - if hw, ok := w.(http.ResponseWriter); ok { - c := hw.Header().Get(ContentType) - if c != "" { - d.Head.ContentType = c - } - d.Head.Write(hw) - } - - w.Write(v.([]byte)) - return nil -} - -// Render a HTML response. -func (h HTML) Render(w io.Writer, binding interface{}) error { - // Retrieve a buffer from the pool to write to. - out := bufPool.Get() - err := h.Templates.ExecuteTemplate(out, h.Name, binding) - if err != nil { - return err - } - - if hw, ok := w.(http.ResponseWriter); ok { - h.Head.Write(hw) - } - out.WriteTo(w) - - // Return the buffer to the pool. - bufPool.Put(out) - return nil -} - -// Render a JSON response. -func (j JSON) Render(w io.Writer, v interface{}) error { - if j.StreamingJSON { - return j.renderStreamingJSON(w, v) - } - - var result []byte - var err error - - if j.Indent { - result, err = json.MarshalIndent(v, "", " ") - result = append(result, '\n') - } else { - result, err = json.Marshal(v) - } - if err != nil { - return err - } - - // Unescape HTML if needed. - if j.UnEscapeHTML { - result = bytes.Replace(result, []byte("\\u003c"), []byte("<"), -1) - result = bytes.Replace(result, []byte("\\u003e"), []byte(">"), -1) - result = bytes.Replace(result, []byte("\\u0026"), []byte("&"), -1) - } - - // JSON marshaled fine, write out the result. - if hw, ok := w.(http.ResponseWriter); ok { - j.Head.Write(hw) - } - if len(j.Prefix) > 0 { - w.Write(j.Prefix) - } - w.Write(result) - return nil -} - -func (j JSON) renderStreamingJSON(w io.Writer, v interface{}) error { - if hw, ok := w.(http.ResponseWriter); ok { - j.Head.Write(hw) - } - if len(j.Prefix) > 0 { - w.Write(j.Prefix) - } - - return json.NewEncoder(w).Encode(v) -} - -// Render a JSONP response. -func (j JSONP) Render(w io.Writer, v interface{}) error { - var result []byte - var err error - - if j.Indent { - result, err = json.MarshalIndent(v, "", " ") - } else { - result, err = json.Marshal(v) - } - if err != nil { - return err - } - - // JSON marshaled fine, write out the result. - if hw, ok := w.(http.ResponseWriter); ok { - j.Head.Write(hw) - } - w.Write([]byte(j.Callback + "(")) - w.Write(result) - w.Write([]byte(");")) - - // If indenting, append a new line. - if j.Indent { - w.Write([]byte("\n")) - } - return nil -} - -// Render a text response. -func (t Text) Render(w io.Writer, v interface{}) error { - if hw, ok := w.(http.ResponseWriter); ok { - c := hw.Header().Get(ContentType) - if c != "" { - t.Head.ContentType = c - } - t.Head.Write(hw) - } - - w.Write([]byte(v.(string))) - return nil -} - -// Render an XML response. -func (x XML) Render(w io.Writer, v interface{}) error { - var result []byte - var err error - - if x.Indent { - result, err = xml.MarshalIndent(v, "", " ") - result = append(result, '\n') - } else { - result, err = xml.Marshal(v) - } - if err != nil { - return err - } - - // XML marshaled fine, write out the result. - if hw, ok := w.(http.ResponseWriter); ok { - x.Head.Write(hw) - } - if len(x.Prefix) > 0 { - w.Write(x.Prefix) - } - w.Write(result) - return nil -} diff --git a/vendor/github.com/unrolled/render/fs.go b/vendor/github.com/unrolled/render/fs.go deleted file mode 100644 index 3e607762..00000000 --- a/vendor/github.com/unrolled/render/fs.go +++ /dev/null @@ -1,21 +0,0 @@ -package render - -import ( - "io/ioutil" - "path/filepath" -) - -type FileSystem interface { - Walk(root string, walkFn filepath.WalkFunc) error - ReadFile(filename string) ([]byte, error) -} - -type LocalFileSystem struct{} - -func (LocalFileSystem) Walk(root string, walkFn filepath.WalkFunc) error { - return filepath.Walk(root, walkFn) -} - -func (LocalFileSystem) ReadFile(filename string) ([]byte, error) { - return ioutil.ReadFile(filename) -} diff --git a/vendor/github.com/unrolled/render/go.mod b/vendor/github.com/unrolled/render/go.mod deleted file mode 100644 index 22d793cb..00000000 --- a/vendor/github.com/unrolled/render/go.mod +++ /dev/null @@ -1,5 +0,0 @@ -module github.com/unrolled/render - -go 1.12 - -require github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385 diff --git a/vendor/github.com/unrolled/render/go.sum b/vendor/github.com/unrolled/render/go.sum deleted file mode 100644 index 21999590..00000000 --- a/vendor/github.com/unrolled/render/go.sum +++ /dev/null @@ -1,2 +0,0 @@ -github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385 h1:clC1lXBpe2kTj2VHdaIu9ajZQe4kcEY9j0NsnDDBZ3o= -github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385/go.mod h1:0vRUJqYpeSZifjYj7uP3BG/gKcuzL9xWVV/Y+cK33KM= diff --git a/vendor/github.com/unrolled/render/helpers.go b/vendor/github.com/unrolled/render/helpers.go deleted file mode 100644 index 699508a4..00000000 --- a/vendor/github.com/unrolled/render/helpers.go +++ /dev/null @@ -1,21 +0,0 @@ -// +build go1.6 - -package render - -import ( - "fmt" - "html/template" -) - -// Included helper functions for use when rendering HTML. -var helperFuncs = template.FuncMap{ - "yield": func() (string, error) { - return "", fmt.Errorf("yield called with no layout defined") - }, - "partial": func() (string, error) { - return "", fmt.Errorf("block called with no layout defined") - }, - "current": func() (string, error) { - return "", nil - }, -} diff --git a/vendor/github.com/unrolled/render/helpers_pre16.go b/vendor/github.com/unrolled/render/helpers_pre16.go deleted file mode 100644 index 999d9af4..00000000 --- a/vendor/github.com/unrolled/render/helpers_pre16.go +++ /dev/null @@ -1,26 +0,0 @@ -// +build !go1.6 - -package render - -import ( - "fmt" - "html/template" -) - -// Included helper functions for use when rendering HTML. -var helperFuncs = template.FuncMap{ - "yield": func() (string, error) { - return "", fmt.Errorf("yield called with no layout defined") - }, - // `block` is deprecated! Use the `partial` below if you need this functionality still. - // Otherwise, checkout Go's `block` implementation introduced in 1.6 - "block": func() (string, error) { - return "", fmt.Errorf("block called with no layout defined") - }, - "partial": func() (string, error) { - return "", fmt.Errorf("block called with no layout defined") - }, - "current": func() (string, error) { - return "", nil - }, -} diff --git a/vendor/github.com/unrolled/render/render.go b/vendor/github.com/unrolled/render/render.go deleted file mode 100644 index 3259f620..00000000 --- a/vendor/github.com/unrolled/render/render.go +++ /dev/null @@ -1,480 +0,0 @@ -package render - -import ( - "bytes" - "fmt" - "html/template" - "io" - "log" - "net/http" - "os" - "path/filepath" - "strings" - "sync" -) - -const ( - // ContentBinary header value for binary data. - ContentBinary = "application/octet-stream" - // ContentHTML header value for HTML data. - ContentHTML = "text/html" - // ContentJSON header value for JSON data. - ContentJSON = "application/json" - // ContentJSONP header value for JSONP data. - ContentJSONP = "application/javascript" - // ContentLength header constant. - ContentLength = "Content-Length" - // ContentText header value for Text data. - ContentText = "text/plain" - // ContentType header constant. - ContentType = "Content-Type" - // ContentXHTML header value for XHTML data. - ContentXHTML = "application/xhtml+xml" - // ContentXML header value for XML data. - ContentXML = "text/xml" - // Default character encoding. - defaultCharset = "UTF-8" -) - -// helperFuncs had to be moved out. See helpers.go|helpers_pre16.go files. - -// Delims represents a set of Left and Right delimiters for HTML template rendering. -type Delims struct { - // Left delimiter, defaults to {{. - Left string - // Right delimiter, defaults to }}. - Right string -} - -// Options is a struct for specifying configuration options for the render.Render object. -type Options struct { - // Directory to load templates. Default is "templates". - Directory string - // FileSystem to access files - FileSystem FileSystem - // Asset function to use in place of directory. Defaults to nil. - Asset func(name string) ([]byte, error) - // AssetNames function to use in place of directory. Defaults to nil. - AssetNames func() []string - // Layout template name. Will not render a layout if blank (""). Defaults to blank (""). - Layout string - // Extensions to parse template files from. Defaults to [".tmpl"]. - Extensions []string - // Funcs is a slice of FuncMaps to apply to the template upon compilation. This is useful for helper functions. Defaults to empty map. - Funcs []template.FuncMap - // Delims sets the action delimiters to the specified strings in the Delims struct. - Delims Delims - // Appends the given character set to the Content-Type header. Default is "UTF-8". - Charset string - // If DisableCharset is set to true, it will not append the above Charset value to the Content-Type header. Default is false. - DisableCharset bool - // Outputs human readable JSON. - IndentJSON bool - // Outputs human readable XML. Default is false. - IndentXML bool - // Prefixes the JSON output with the given bytes. Default is false. - PrefixJSON []byte - // Prefixes the XML output with the given bytes. - PrefixXML []byte - // Allows changing the binary content type. - BinaryContentType string - // Allows changing the HTML content type. - HTMLContentType string - // Allows changing the JSON content type. - JSONContentType string - // Allows changing the JSONP content type. - JSONPContentType string - // Allows changing the Text content type. - TextContentType string - // Allows changing the XML content type. - XMLContentType string - // If IsDevelopment is set to true, this will recompile the templates on every request. Default is false. - IsDevelopment bool - // Unescape HTML characters "&<>" to their original values. Default is false. - UnEscapeHTML bool - // Streams JSON responses instead of marshalling prior to sending. Default is false. - StreamingJSON bool - // Require that all partials executed in the layout are implemented in all templates using the layout. Default is false. - RequirePartials bool - // Deprecated: Use the above `RequirePartials` instead of this. As of Go 1.6, blocks are built in. Default is false. - RequireBlocks bool - // Disables automatic rendering of http.StatusInternalServerError when an error occurs. Default is false. - DisableHTTPErrorRendering bool - // Enables using partials without the current filename suffix which allows use of the same template in multiple files. e.g {{ partial "carosuel" }} inside the home template will match carosel-home or carosel. - // ***NOTE*** - This option should be named RenderPartialsWithoutSuffix as that is what it does. "Prefix" is a typo. Maintaining the existing name for backwards compatibility. - RenderPartialsWithoutPrefix bool -} - -// HTMLOptions is a struct for overriding some rendering Options for specific HTML call. -type HTMLOptions struct { - // Layout template name. Overrides Options.Layout. - Layout string - // Funcs added to Options.Funcs. - Funcs template.FuncMap -} - -// Render is a service that provides functions for easily writing JSON, XML, -// binary data, and HTML templates out to a HTTP Response. -type Render struct { - // Customize Secure with an Options struct. - opt Options - templates *template.Template - templatesLk sync.Mutex - compiledCharset string -} - -// New constructs a new Render instance with the supplied options. -func New(options ...Options) *Render { - var o Options - if len(options) > 0 { - o = options[0] - } - - r := Render{ - opt: o, - } - - r.prepareOptions() - r.compileTemplates() - - return &r -} - -func (r *Render) prepareOptions() { - // Fill in the defaults if need be. - if len(r.opt.Charset) == 0 { - r.opt.Charset = defaultCharset - } - if r.opt.DisableCharset == false { - r.compiledCharset = "; charset=" + r.opt.Charset - } - - if len(r.opt.Directory) == 0 { - r.opt.Directory = "templates" - } - if r.opt.FileSystem == nil { - r.opt.FileSystem = &LocalFileSystem{} - } - if len(r.opt.Extensions) == 0 { - r.opt.Extensions = []string{".tmpl"} - } - if len(r.opt.BinaryContentType) == 0 { - r.opt.BinaryContentType = ContentBinary - } - if len(r.opt.HTMLContentType) == 0 { - r.opt.HTMLContentType = ContentHTML - } - if len(r.opt.JSONContentType) == 0 { - r.opt.JSONContentType = ContentJSON - } - if len(r.opt.JSONPContentType) == 0 { - r.opt.JSONPContentType = ContentJSONP - } - if len(r.opt.TextContentType) == 0 { - r.opt.TextContentType = ContentText - } - if len(r.opt.XMLContentType) == 0 { - r.opt.XMLContentType = ContentXML - } -} - -func (r *Render) compileTemplates() { - if r.opt.Asset == nil || r.opt.AssetNames == nil { - r.compileTemplatesFromDir() - return - } - r.compileTemplatesFromAsset() -} - -func (r *Render) compileTemplatesFromDir() { - dir := r.opt.Directory - r.templates = template.New(dir) - r.templates.Delims(r.opt.Delims.Left, r.opt.Delims.Right) - - // Walk the supplied directory and compile any files that match our extension list. - r.opt.FileSystem.Walk(dir, func(path string, info os.FileInfo, err error) error { - // Fix same-extension-dirs bug: some dir might be named to: "users.tmpl", "local.html". - // These dirs should be excluded as they are not valid golang templates, but files under - // them should be treat as normal. - // If is a dir, return immediately (dir is not a valid golang template). - if info == nil || info.IsDir() { - return nil - } - - rel, err := filepath.Rel(dir, path) - if err != nil { - return err - } - - ext := "" - if strings.Index(rel, ".") != -1 { - ext = filepath.Ext(rel) - } - - for _, extension := range r.opt.Extensions { - if ext == extension { - buf, err := r.opt.FileSystem.ReadFile(path) - if err != nil { - panic(err) - } - - name := (rel[0 : len(rel)-len(ext)]) - tmpl := r.templates.New(filepath.ToSlash(name)) - - // Add our funcmaps. - for _, funcs := range r.opt.Funcs { - tmpl.Funcs(funcs) - } - - // Break out if this parsing fails. We don't want any silent server starts. - template.Must(tmpl.Funcs(helperFuncs).Parse(string(buf))) - break - } - } - return nil - }) -} - -func (r *Render) compileTemplatesFromAsset() { - dir := r.opt.Directory - r.templates = template.New(dir) - r.templates.Delims(r.opt.Delims.Left, r.opt.Delims.Right) - - for _, path := range r.opt.AssetNames() { - if !strings.HasPrefix(path, dir) { - continue - } - - rel, err := filepath.Rel(dir, path) - if err != nil { - panic(err) - } - - ext := "" - if strings.Index(rel, ".") != -1 { - ext = "." + strings.Join(strings.Split(rel, ".")[1:], ".") - } - - for _, extension := range r.opt.Extensions { - if ext == extension { - - buf, err := r.opt.Asset(path) - if err != nil { - panic(err) - } - - name := (rel[0 : len(rel)-len(ext)]) - tmpl := r.templates.New(filepath.ToSlash(name)) - - // Add our funcmaps. - for _, funcs := range r.opt.Funcs { - tmpl.Funcs(funcs) - } - - // Break out if this parsing fails. We don't want any silent server starts. - template.Must(tmpl.Funcs(helperFuncs).Parse(string(buf))) - break - } - } - } -} - -// TemplateLookup is a wrapper around template.Lookup and returns -// the template with the given name that is associated with t, or nil -// if there is no such template. -func (r *Render) TemplateLookup(t string) *template.Template { - return r.templates.Lookup(t) -} - -func (r *Render) execute(name string, binding interface{}) (*bytes.Buffer, error) { - buf := new(bytes.Buffer) - return buf, r.templates.ExecuteTemplate(buf, name, binding) -} - -func (r *Render) layoutFuncs(name string, binding interface{}) template.FuncMap { - return template.FuncMap{ - "yield": func() (template.HTML, error) { - buf, err := r.execute(name, binding) - // Return safe HTML here since we are rendering our own template. - return template.HTML(buf.String()), err - }, - "current": func() (string, error) { - return name, nil - }, - "block": func(partialName string) (template.HTML, error) { - log.Print("Render's `block` implementation is now depericated. Use `partial` as a drop in replacement.") - fullPartialName := fmt.Sprintf("%s-%s", partialName, name) - if r.TemplateLookup(fullPartialName) == nil && r.opt.RenderPartialsWithoutPrefix { - fullPartialName = partialName - } - if r.opt.RequireBlocks || r.TemplateLookup(fullPartialName) != nil { - buf, err := r.execute(fullPartialName, binding) - // Return safe HTML here since we are rendering our own template. - return template.HTML(buf.String()), err - } - return "", nil - }, - "partial": func(partialName string) (template.HTML, error) { - fullPartialName := fmt.Sprintf("%s-%s", partialName, name) - if r.TemplateLookup(fullPartialName) == nil && r.opt.RenderPartialsWithoutPrefix { - fullPartialName = partialName - } - if r.opt.RequirePartials || r.TemplateLookup(fullPartialName) != nil { - buf, err := r.execute(fullPartialName, binding) - // Return safe HTML here since we are rendering our own template. - return template.HTML(buf.String()), err - } - return "", nil - }, - } -} - -func (r *Render) prepareHTMLOptions(htmlOpt []HTMLOptions) HTMLOptions { - layout := r.opt.Layout - funcs := template.FuncMap{} - - for _, tmp := range r.opt.Funcs { - for k, v := range tmp { - funcs[k] = v - } - } - - if len(htmlOpt) > 0 { - opt := htmlOpt[0] - if len(opt.Layout) > 0 { - layout = opt.Layout - } - - for k, v := range opt.Funcs { - funcs[k] = v - } - } - - return HTMLOptions{ - Layout: layout, - Funcs: funcs, - } -} - -// Render is the generic function called by XML, JSON, Data, HTML, and can be called by custom implementations. -func (r *Render) Render(w io.Writer, e Engine, data interface{}) error { - err := e.Render(w, data) - if hw, ok := w.(http.ResponseWriter); err != nil && !r.opt.DisableHTTPErrorRendering && ok { - http.Error(hw, err.Error(), http.StatusInternalServerError) - } - return err -} - -// Data writes out the raw bytes as binary data. -func (r *Render) Data(w io.Writer, status int, v []byte) error { - head := Head{ - ContentType: r.opt.BinaryContentType, - Status: status, - } - - d := Data{ - Head: head, - } - - return r.Render(w, d, v) -} - -// HTML builds up the response from the specified template and bindings. -func (r *Render) HTML(w io.Writer, status int, name string, binding interface{}, htmlOpt ...HTMLOptions) error { - r.templatesLk.Lock() - defer r.templatesLk.Unlock() - - // If we are in development mode, recompile the templates on every HTML request. - if r.opt.IsDevelopment { - r.compileTemplates() - } - - opt := r.prepareHTMLOptions(htmlOpt) - if tpl := r.templates.Lookup(name); tpl != nil { - if len(opt.Layout) > 0 { - tpl.Funcs(r.layoutFuncs(name, binding)) - name = opt.Layout - } - - if len(opt.Funcs) > 0 { - tpl.Funcs(opt.Funcs) - } - } - - head := Head{ - ContentType: r.opt.HTMLContentType + r.compiledCharset, - Status: status, - } - - h := HTML{ - Head: head, - Name: name, - Templates: r.templates, - } - - return r.Render(w, h, binding) -} - -// JSON marshals the given interface object and writes the JSON response. -func (r *Render) JSON(w io.Writer, status int, v interface{}) error { - head := Head{ - ContentType: r.opt.JSONContentType + r.compiledCharset, - Status: status, - } - - j := JSON{ - Head: head, - Indent: r.opt.IndentJSON, - Prefix: r.opt.PrefixJSON, - UnEscapeHTML: r.opt.UnEscapeHTML, - StreamingJSON: r.opt.StreamingJSON, - } - - return r.Render(w, j, v) -} - -// JSONP marshals the given interface object and writes the JSON response. -func (r *Render) JSONP(w io.Writer, status int, callback string, v interface{}) error { - head := Head{ - ContentType: r.opt.JSONPContentType + r.compiledCharset, - Status: status, - } - - j := JSONP{ - Head: head, - Indent: r.opt.IndentJSON, - Callback: callback, - } - - return r.Render(w, j, v) -} - -// Text writes out a string as plain text. -func (r *Render) Text(w io.Writer, status int, v string) error { - head := Head{ - ContentType: r.opt.TextContentType + r.compiledCharset, - Status: status, - } - - t := Text{ - Head: head, - } - - return r.Render(w, t, v) -} - -// XML marshals the given interface object and writes the XML response. -func (r *Render) XML(w io.Writer, status int, v interface{}) error { - head := Head{ - ContentType: r.opt.XMLContentType + r.compiledCharset, - Status: status, - } - - x := XML{ - Head: head, - Indent: r.opt.IndentXML, - Prefix: r.opt.PrefixXML, - } - - return r.Render(w, x, v) -} diff --git a/vendor/golang.org/x/net/icmp/dstunreach.go b/vendor/golang.org/x/net/icmp/dstunreach.go new file mode 100644 index 00000000..8615cf54 --- /dev/null +++ b/vendor/golang.org/x/net/icmp/dstunreach.go @@ -0,0 +1,59 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp + +import ( + "golang.org/x/net/internal/iana" + "golang.org/x/net/ipv4" + "golang.org/x/net/ipv6" +) + +// A DstUnreach represents an ICMP destination unreachable message +// body. +type DstUnreach struct { + Data []byte // data, known as original datagram field + Extensions []Extension // extensions +} + +// Len implements the Len method of MessageBody interface. +func (p *DstUnreach) Len(proto int) int { + if p == nil { + return 0 + } + l, _ := multipartMessageBodyDataLen(proto, true, p.Data, p.Extensions) + return l +} + +// Marshal implements the Marshal method of MessageBody interface. +func (p *DstUnreach) Marshal(proto int) ([]byte, error) { + var typ Type + switch proto { + case iana.ProtocolICMP: + typ = ipv4.ICMPTypeDestinationUnreachable + case iana.ProtocolIPv6ICMP: + typ = ipv6.ICMPTypeDestinationUnreachable + default: + return nil, errInvalidProtocol + } + if !validExtensions(typ, p.Extensions) { + return nil, errInvalidExtension + } + return marshalMultipartMessageBody(proto, true, p.Data, p.Extensions) +} + +// parseDstUnreach parses b as an ICMP destination unreachable message +// body. +func parseDstUnreach(proto int, typ Type, b []byte) (MessageBody, error) { + if len(b) < 4 { + return nil, errMessageTooShort + } + p := &DstUnreach{} + var err error + p.Data, p.Extensions, err = parseMultipartMessageBody(proto, typ, b) + if err != nil { + return nil, err + } + return p, nil +} diff --git a/vendor/golang.org/x/net/icmp/echo.go b/vendor/golang.org/x/net/icmp/echo.go new file mode 100644 index 00000000..b5918642 --- /dev/null +++ b/vendor/golang.org/x/net/icmp/echo.go @@ -0,0 +1,173 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp + +import ( + "encoding/binary" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/ipv4" + "golang.org/x/net/ipv6" +) + +// An Echo represents an ICMP echo request or reply message body. +type Echo struct { + ID int // identifier + Seq int // sequence number + Data []byte // data +} + +// Len implements the Len method of MessageBody interface. +func (p *Echo) Len(proto int) int { + if p == nil { + return 0 + } + return 4 + len(p.Data) +} + +// Marshal implements the Marshal method of MessageBody interface. +func (p *Echo) Marshal(proto int) ([]byte, error) { + b := make([]byte, 4+len(p.Data)) + binary.BigEndian.PutUint16(b[:2], uint16(p.ID)) + binary.BigEndian.PutUint16(b[2:4], uint16(p.Seq)) + copy(b[4:], p.Data) + return b, nil +} + +// parseEcho parses b as an ICMP echo request or reply message body. +func parseEcho(proto int, _ Type, b []byte) (MessageBody, error) { + bodyLen := len(b) + if bodyLen < 4 { + return nil, errMessageTooShort + } + p := &Echo{ID: int(binary.BigEndian.Uint16(b[:2])), Seq: int(binary.BigEndian.Uint16(b[2:4]))} + if bodyLen > 4 { + p.Data = make([]byte, bodyLen-4) + copy(p.Data, b[4:]) + } + return p, nil +} + +// An ExtendedEchoRequest represents an ICMP extended echo request +// message body. +type ExtendedEchoRequest struct { + ID int // identifier + Seq int // sequence number + Local bool // must be true when identifying by name or index + Extensions []Extension // extensions +} + +// Len implements the Len method of MessageBody interface. +func (p *ExtendedEchoRequest) Len(proto int) int { + if p == nil { + return 0 + } + l, _ := multipartMessageBodyDataLen(proto, false, nil, p.Extensions) + return l +} + +// Marshal implements the Marshal method of MessageBody interface. +func (p *ExtendedEchoRequest) Marshal(proto int) ([]byte, error) { + var typ Type + switch proto { + case iana.ProtocolICMP: + typ = ipv4.ICMPTypeExtendedEchoRequest + case iana.ProtocolIPv6ICMP: + typ = ipv6.ICMPTypeExtendedEchoRequest + default: + return nil, errInvalidProtocol + } + if !validExtensions(typ, p.Extensions) { + return nil, errInvalidExtension + } + b, err := marshalMultipartMessageBody(proto, false, nil, p.Extensions) + if err != nil { + return nil, err + } + binary.BigEndian.PutUint16(b[:2], uint16(p.ID)) + b[2] = byte(p.Seq) + if p.Local { + b[3] |= 0x01 + } + return b, nil +} + +// parseExtendedEchoRequest parses b as an ICMP extended echo request +// message body. +func parseExtendedEchoRequest(proto int, typ Type, b []byte) (MessageBody, error) { + if len(b) < 4 { + return nil, errMessageTooShort + } + p := &ExtendedEchoRequest{ID: int(binary.BigEndian.Uint16(b[:2])), Seq: int(b[2])} + if b[3]&0x01 != 0 { + p.Local = true + } + var err error + _, p.Extensions, err = parseMultipartMessageBody(proto, typ, b) + if err != nil { + return nil, err + } + return p, nil +} + +// An ExtendedEchoReply represents an ICMP extended echo reply message +// body. +type ExtendedEchoReply struct { + ID int // identifier + Seq int // sequence number + State int // 3-bit state working together with Message.Code + Active bool // probed interface is active + IPv4 bool // probed interface runs IPv4 + IPv6 bool // probed interface runs IPv6 +} + +// Len implements the Len method of MessageBody interface. +func (p *ExtendedEchoReply) Len(proto int) int { + if p == nil { + return 0 + } + return 4 +} + +// Marshal implements the Marshal method of MessageBody interface. +func (p *ExtendedEchoReply) Marshal(proto int) ([]byte, error) { + b := make([]byte, 4) + binary.BigEndian.PutUint16(b[:2], uint16(p.ID)) + b[2] = byte(p.Seq) + b[3] = byte(p.State<<5) & 0xe0 + if p.Active { + b[3] |= 0x04 + } + if p.IPv4 { + b[3] |= 0x02 + } + if p.IPv6 { + b[3] |= 0x01 + } + return b, nil +} + +// parseExtendedEchoReply parses b as an ICMP extended echo reply +// message body. +func parseExtendedEchoReply(proto int, _ Type, b []byte) (MessageBody, error) { + if len(b) < 4 { + return nil, errMessageTooShort + } + p := &ExtendedEchoReply{ + ID: int(binary.BigEndian.Uint16(b[:2])), + Seq: int(b[2]), + State: int(b[3]) >> 5, + } + if b[3]&0x04 != 0 { + p.Active = true + } + if b[3]&0x02 != 0 { + p.IPv4 = true + } + if b[3]&0x01 != 0 { + p.IPv6 = true + } + return p, nil +} diff --git a/vendor/golang.org/x/net/icmp/endpoint.go b/vendor/golang.org/x/net/icmp/endpoint.go new file mode 100644 index 00000000..4841bdd2 --- /dev/null +++ b/vendor/golang.org/x/net/icmp/endpoint.go @@ -0,0 +1,113 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp + +import ( + "net" + "runtime" + "time" + + "golang.org/x/net/ipv4" + "golang.org/x/net/ipv6" +) + +var _ net.PacketConn = &PacketConn{} + +// A PacketConn represents a packet network endpoint that uses either +// ICMPv4 or ICMPv6. +type PacketConn struct { + c net.PacketConn + p4 *ipv4.PacketConn + p6 *ipv6.PacketConn +} + +func (c *PacketConn) ok() bool { return c != nil && c.c != nil } + +// IPv4PacketConn returns the ipv4.PacketConn of c. +// It returns nil when c is not created as the endpoint for ICMPv4. +func (c *PacketConn) IPv4PacketConn() *ipv4.PacketConn { + if !c.ok() { + return nil + } + return c.p4 +} + +// IPv6PacketConn returns the ipv6.PacketConn of c. +// It returns nil when c is not created as the endpoint for ICMPv6. +func (c *PacketConn) IPv6PacketConn() *ipv6.PacketConn { + if !c.ok() { + return nil + } + return c.p6 +} + +// ReadFrom reads an ICMP message from the connection. +func (c *PacketConn) ReadFrom(b []byte) (int, net.Addr, error) { + if !c.ok() { + return 0, nil, errInvalidConn + } + // Please be informed that ipv4.NewPacketConn enables + // IP_STRIPHDR option by default on Darwin. + // See golang.org/issue/9395 for further information. + if runtime.GOOS == "darwin" && c.p4 != nil { + n, _, peer, err := c.p4.ReadFrom(b) + return n, peer, err + } + return c.c.ReadFrom(b) +} + +// WriteTo writes the ICMP message b to dst. +// The provided dst must be net.UDPAddr when c is a non-privileged +// datagram-oriented ICMP endpoint. +// Otherwise it must be net.IPAddr. +func (c *PacketConn) WriteTo(b []byte, dst net.Addr) (int, error) { + if !c.ok() { + return 0, errInvalidConn + } + return c.c.WriteTo(b, dst) +} + +// Close closes the endpoint. +func (c *PacketConn) Close() error { + if !c.ok() { + return errInvalidConn + } + return c.c.Close() +} + +// LocalAddr returns the local network address. +func (c *PacketConn) LocalAddr() net.Addr { + if !c.ok() { + return nil + } + return c.c.LocalAddr() +} + +// SetDeadline sets the read and write deadlines associated with the +// endpoint. +func (c *PacketConn) SetDeadline(t time.Time) error { + if !c.ok() { + return errInvalidConn + } + return c.c.SetDeadline(t) +} + +// SetReadDeadline sets the read deadline associated with the +// endpoint. +func (c *PacketConn) SetReadDeadline(t time.Time) error { + if !c.ok() { + return errInvalidConn + } + return c.c.SetReadDeadline(t) +} + +// SetWriteDeadline sets the write deadline associated with the +// endpoint. +func (c *PacketConn) SetWriteDeadline(t time.Time) error { + if !c.ok() { + return errInvalidConn + } + return c.c.SetWriteDeadline(t) +} diff --git a/vendor/golang.org/x/net/icmp/extension.go b/vendor/golang.org/x/net/icmp/extension.go new file mode 100644 index 00000000..eeb85c3f --- /dev/null +++ b/vendor/golang.org/x/net/icmp/extension.go @@ -0,0 +1,170 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp + +import ( + "encoding/binary" + + "golang.org/x/net/ipv4" + "golang.org/x/net/ipv6" +) + +// An Extension represents an ICMP extension. +type Extension interface { + // Len returns the length of ICMP extension. + // The provided proto must be either the ICMPv4 or ICMPv6 + // protocol number. + Len(proto int) int + + // Marshal returns the binary encoding of ICMP extension. + // The provided proto must be either the ICMPv4 or ICMPv6 + // protocol number. + Marshal(proto int) ([]byte, error) +} + +const extensionVersion = 2 + +func validExtensionHeader(b []byte) bool { + v := int(b[0]&0xf0) >> 4 + s := binary.BigEndian.Uint16(b[2:4]) + if s != 0 { + s = checksum(b) + } + if v != extensionVersion || s != 0 { + return false + } + return true +} + +// parseExtensions parses b as a list of ICMP extensions. +// The length attribute l must be the length attribute field in +// received icmp messages. +// +// It will return a list of ICMP extensions and an adjusted length +// attribute that represents the length of the padded original +// datagram field. Otherwise, it returns an error. +func parseExtensions(typ Type, b []byte, l int) ([]Extension, int, error) { + // Still a lot of non-RFC 4884 compliant implementations are + // out there. Set the length attribute l to 128 when it looks + // inappropriate for backwards compatibility. + // + // A minimal extension at least requires 8 octets; 4 octets + // for an extension header, and 4 octets for a single object + // header. + // + // See RFC 4884 for further information. + switch typ { + case ipv4.ICMPTypeExtendedEchoRequest, ipv6.ICMPTypeExtendedEchoRequest: + if len(b) < 8 || !validExtensionHeader(b) { + return nil, -1, errNoExtension + } + l = 0 + default: + if 128 > l || l+8 > len(b) { + l = 128 + } + if l+8 > len(b) { + return nil, -1, errNoExtension + } + if !validExtensionHeader(b[l:]) { + if l == 128 { + return nil, -1, errNoExtension + } + l = 128 + if !validExtensionHeader(b[l:]) { + return nil, -1, errNoExtension + } + } + } + var exts []Extension + for b = b[l+4:]; len(b) >= 4; { + ol := int(binary.BigEndian.Uint16(b[:2])) + if 4 > ol || ol > len(b) { + break + } + switch b[2] { + case classMPLSLabelStack: + ext, err := parseMPLSLabelStack(b[:ol]) + if err != nil { + return nil, -1, err + } + exts = append(exts, ext) + case classInterfaceInfo: + ext, err := parseInterfaceInfo(b[:ol]) + if err != nil { + return nil, -1, err + } + exts = append(exts, ext) + case classInterfaceIdent: + ext, err := parseInterfaceIdent(b[:ol]) + if err != nil { + return nil, -1, err + } + exts = append(exts, ext) + default: + ext := &RawExtension{Data: make([]byte, ol)} + copy(ext.Data, b[:ol]) + exts = append(exts, ext) + } + b = b[ol:] + } + return exts, l, nil +} + +func validExtensions(typ Type, exts []Extension) bool { + switch typ { + case ipv4.ICMPTypeDestinationUnreachable, ipv4.ICMPTypeTimeExceeded, ipv4.ICMPTypeParameterProblem, + ipv6.ICMPTypeDestinationUnreachable, ipv6.ICMPTypeTimeExceeded: + for i := range exts { + switch exts[i].(type) { + case *MPLSLabelStack, *InterfaceInfo, *RawExtension: + default: + return false + } + } + return true + case ipv4.ICMPTypeExtendedEchoRequest, ipv6.ICMPTypeExtendedEchoRequest: + var n int + for i := range exts { + switch exts[i].(type) { + case *InterfaceIdent: + n++ + case *RawExtension: + default: + return false + } + } + // Not a single InterfaceIdent object or a combo of + // RawExtension and InterfaceIdent objects is not + // allowed. + if n == 1 && len(exts) > 1 { + return false + } + return true + default: + return false + } +} + +// A RawExtension represents a raw extension. +// +// A raw extension is excluded from message processing and can be used +// to construct applications such as protocol conformance testing. +type RawExtension struct { + Data []byte // data +} + +// Len implements the Len method of Extension interface. +func (p *RawExtension) Len(proto int) int { + if p == nil { + return 0 + } + return len(p.Data) +} + +// Marshal implements the Marshal method of Extension interface. +func (p *RawExtension) Marshal(proto int) ([]byte, error) { + return p.Data, nil +} diff --git a/vendor/golang.org/x/net/icmp/helper_posix.go b/vendor/golang.org/x/net/icmp/helper_posix.go new file mode 100644 index 00000000..75e75575 --- /dev/null +++ b/vendor/golang.org/x/net/icmp/helper_posix.go @@ -0,0 +1,75 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris windows + +package icmp + +import ( + "net" + "strconv" + "syscall" +) + +func sockaddr(family int, address string) (syscall.Sockaddr, error) { + switch family { + case syscall.AF_INET: + a, err := net.ResolveIPAddr("ip4", address) + if err != nil { + return nil, err + } + if len(a.IP) == 0 { + a.IP = net.IPv4zero + } + if a.IP = a.IP.To4(); a.IP == nil { + return nil, net.InvalidAddrError("non-ipv4 address") + } + sa := &syscall.SockaddrInet4{} + copy(sa.Addr[:], a.IP) + return sa, nil + case syscall.AF_INET6: + a, err := net.ResolveIPAddr("ip6", address) + if err != nil { + return nil, err + } + if len(a.IP) == 0 { + a.IP = net.IPv6unspecified + } + if a.IP.Equal(net.IPv4zero) { + a.IP = net.IPv6unspecified + } + if a.IP = a.IP.To16(); a.IP == nil || a.IP.To4() != nil { + return nil, net.InvalidAddrError("non-ipv6 address") + } + sa := &syscall.SockaddrInet6{ZoneId: zoneToUint32(a.Zone)} + copy(sa.Addr[:], a.IP) + return sa, nil + default: + return nil, net.InvalidAddrError("unexpected family") + } +} + +func zoneToUint32(zone string) uint32 { + if zone == "" { + return 0 + } + if ifi, err := net.InterfaceByName(zone); err == nil { + return uint32(ifi.Index) + } + n, err := strconv.Atoi(zone) + if err != nil { + return 0 + } + return uint32(n) +} + +func last(s string, b byte) int { + i := len(s) + for i--; i >= 0; i-- { + if s[i] == b { + break + } + } + return i +} diff --git a/vendor/golang.org/x/net/icmp/interface.go b/vendor/golang.org/x/net/icmp/interface.go new file mode 100644 index 00000000..b3dd72fb --- /dev/null +++ b/vendor/golang.org/x/net/icmp/interface.go @@ -0,0 +1,322 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp + +import ( + "encoding/binary" + "net" + "strings" + + "golang.org/x/net/internal/iana" +) + +const ( + classInterfaceInfo = 2 +) + +const ( + attrMTU = 1 << iota + attrName + attrIPAddr + attrIfIndex +) + +// An InterfaceInfo represents interface and next-hop identification. +type InterfaceInfo struct { + Class int // extension object class number + Type int // extension object sub-type + Interface *net.Interface + Addr *net.IPAddr +} + +func (ifi *InterfaceInfo) nameLen() int { + if len(ifi.Interface.Name) > 63 { + return 64 + } + l := 1 + len(ifi.Interface.Name) + return (l + 3) &^ 3 +} + +func (ifi *InterfaceInfo) attrsAndLen(proto int) (attrs, l int) { + l = 4 + if ifi.Interface != nil && ifi.Interface.Index > 0 { + attrs |= attrIfIndex + l += 4 + if len(ifi.Interface.Name) > 0 { + attrs |= attrName + l += ifi.nameLen() + } + if ifi.Interface.MTU > 0 { + attrs |= attrMTU + l += 4 + } + } + if ifi.Addr != nil { + switch proto { + case iana.ProtocolICMP: + if ifi.Addr.IP.To4() != nil { + attrs |= attrIPAddr + l += 4 + net.IPv4len + } + case iana.ProtocolIPv6ICMP: + if ifi.Addr.IP.To16() != nil && ifi.Addr.IP.To4() == nil { + attrs |= attrIPAddr + l += 4 + net.IPv6len + } + } + } + return +} + +// Len implements the Len method of Extension interface. +func (ifi *InterfaceInfo) Len(proto int) int { + _, l := ifi.attrsAndLen(proto) + return l +} + +// Marshal implements the Marshal method of Extension interface. +func (ifi *InterfaceInfo) Marshal(proto int) ([]byte, error) { + attrs, l := ifi.attrsAndLen(proto) + b := make([]byte, l) + if err := ifi.marshal(proto, b, attrs, l); err != nil { + return nil, err + } + return b, nil +} + +func (ifi *InterfaceInfo) marshal(proto int, b []byte, attrs, l int) error { + binary.BigEndian.PutUint16(b[:2], uint16(l)) + b[2], b[3] = classInterfaceInfo, byte(ifi.Type) + for b = b[4:]; len(b) > 0 && attrs != 0; { + switch { + case attrs&attrIfIndex != 0: + b = ifi.marshalIfIndex(proto, b) + attrs &^= attrIfIndex + case attrs&attrIPAddr != 0: + b = ifi.marshalIPAddr(proto, b) + attrs &^= attrIPAddr + case attrs&attrName != 0: + b = ifi.marshalName(proto, b) + attrs &^= attrName + case attrs&attrMTU != 0: + b = ifi.marshalMTU(proto, b) + attrs &^= attrMTU + } + } + return nil +} + +func (ifi *InterfaceInfo) marshalIfIndex(proto int, b []byte) []byte { + binary.BigEndian.PutUint32(b[:4], uint32(ifi.Interface.Index)) + return b[4:] +} + +func (ifi *InterfaceInfo) parseIfIndex(b []byte) ([]byte, error) { + if len(b) < 4 { + return nil, errMessageTooShort + } + ifi.Interface.Index = int(binary.BigEndian.Uint32(b[:4])) + return b[4:], nil +} + +func (ifi *InterfaceInfo) marshalIPAddr(proto int, b []byte) []byte { + switch proto { + case iana.ProtocolICMP: + binary.BigEndian.PutUint16(b[:2], uint16(iana.AddrFamilyIPv4)) + copy(b[4:4+net.IPv4len], ifi.Addr.IP.To4()) + b = b[4+net.IPv4len:] + case iana.ProtocolIPv6ICMP: + binary.BigEndian.PutUint16(b[:2], uint16(iana.AddrFamilyIPv6)) + copy(b[4:4+net.IPv6len], ifi.Addr.IP.To16()) + b = b[4+net.IPv6len:] + } + return b +} + +func (ifi *InterfaceInfo) parseIPAddr(b []byte) ([]byte, error) { + if len(b) < 4 { + return nil, errMessageTooShort + } + afi := int(binary.BigEndian.Uint16(b[:2])) + b = b[4:] + switch afi { + case iana.AddrFamilyIPv4: + if len(b) < net.IPv4len { + return nil, errMessageTooShort + } + ifi.Addr.IP = make(net.IP, net.IPv4len) + copy(ifi.Addr.IP, b[:net.IPv4len]) + b = b[net.IPv4len:] + case iana.AddrFamilyIPv6: + if len(b) < net.IPv6len { + return nil, errMessageTooShort + } + ifi.Addr.IP = make(net.IP, net.IPv6len) + copy(ifi.Addr.IP, b[:net.IPv6len]) + b = b[net.IPv6len:] + } + return b, nil +} + +func (ifi *InterfaceInfo) marshalName(proto int, b []byte) []byte { + l := byte(ifi.nameLen()) + b[0] = l + copy(b[1:], []byte(ifi.Interface.Name)) + return b[l:] +} + +func (ifi *InterfaceInfo) parseName(b []byte) ([]byte, error) { + if 4 > len(b) || len(b) < int(b[0]) { + return nil, errMessageTooShort + } + l := int(b[0]) + if l%4 != 0 || 4 > l || l > 64 { + return nil, errInvalidExtension + } + var name [63]byte + copy(name[:], b[1:l]) + ifi.Interface.Name = strings.Trim(string(name[:]), "\000") + return b[l:], nil +} + +func (ifi *InterfaceInfo) marshalMTU(proto int, b []byte) []byte { + binary.BigEndian.PutUint32(b[:4], uint32(ifi.Interface.MTU)) + return b[4:] +} + +func (ifi *InterfaceInfo) parseMTU(b []byte) ([]byte, error) { + if len(b) < 4 { + return nil, errMessageTooShort + } + ifi.Interface.MTU = int(binary.BigEndian.Uint32(b[:4])) + return b[4:], nil +} + +func parseInterfaceInfo(b []byte) (Extension, error) { + ifi := &InterfaceInfo{ + Class: int(b[2]), + Type: int(b[3]), + } + if ifi.Type&(attrIfIndex|attrName|attrMTU) != 0 { + ifi.Interface = &net.Interface{} + } + if ifi.Type&attrIPAddr != 0 { + ifi.Addr = &net.IPAddr{} + } + attrs := ifi.Type & (attrIfIndex | attrIPAddr | attrName | attrMTU) + for b = b[4:]; len(b) > 0 && attrs != 0; { + var err error + switch { + case attrs&attrIfIndex != 0: + b, err = ifi.parseIfIndex(b) + attrs &^= attrIfIndex + case attrs&attrIPAddr != 0: + b, err = ifi.parseIPAddr(b) + attrs &^= attrIPAddr + case attrs&attrName != 0: + b, err = ifi.parseName(b) + attrs &^= attrName + case attrs&attrMTU != 0: + b, err = ifi.parseMTU(b) + attrs &^= attrMTU + } + if err != nil { + return nil, err + } + } + if ifi.Interface != nil && ifi.Interface.Name != "" && ifi.Addr != nil && ifi.Addr.IP.To16() != nil && ifi.Addr.IP.To4() == nil { + ifi.Addr.Zone = ifi.Interface.Name + } + return ifi, nil +} + +const ( + classInterfaceIdent = 3 + typeInterfaceByName = 1 + typeInterfaceByIndex = 2 + typeInterfaceByAddress = 3 +) + +// An InterfaceIdent represents interface identification. +type InterfaceIdent struct { + Class int // extension object class number + Type int // extension object sub-type + Name string // interface name + Index int // interface index + AFI int // address family identifier; see address family numbers in IANA registry + Addr []byte // address +} + +// Len implements the Len method of Extension interface. +func (ifi *InterfaceIdent) Len(_ int) int { + switch ifi.Type { + case typeInterfaceByName: + l := len(ifi.Name) + if l > 255 { + l = 255 + } + return 4 + (l+3)&^3 + case typeInterfaceByIndex: + return 4 + 4 + case typeInterfaceByAddress: + return 4 + 4 + (len(ifi.Addr)+3)&^3 + default: + return 4 + } +} + +// Marshal implements the Marshal method of Extension interface. +func (ifi *InterfaceIdent) Marshal(proto int) ([]byte, error) { + b := make([]byte, ifi.Len(proto)) + if err := ifi.marshal(proto, b); err != nil { + return nil, err + } + return b, nil +} + +func (ifi *InterfaceIdent) marshal(proto int, b []byte) error { + l := ifi.Len(proto) + binary.BigEndian.PutUint16(b[:2], uint16(l)) + b[2], b[3] = classInterfaceIdent, byte(ifi.Type) + switch ifi.Type { + case typeInterfaceByName: + copy(b[4:], ifi.Name) + case typeInterfaceByIndex: + binary.BigEndian.PutUint32(b[4:4+4], uint32(ifi.Index)) + case typeInterfaceByAddress: + binary.BigEndian.PutUint16(b[4:4+2], uint16(ifi.AFI)) + b[4+2] = byte(len(ifi.Addr)) + copy(b[4+4:], ifi.Addr) + } + return nil +} + +func parseInterfaceIdent(b []byte) (Extension, error) { + ifi := &InterfaceIdent{ + Class: int(b[2]), + Type: int(b[3]), + } + switch ifi.Type { + case typeInterfaceByName: + ifi.Name = strings.Trim(string(b[4:]), "\x00") + case typeInterfaceByIndex: + if len(b[4:]) < 4 { + return nil, errInvalidExtension + } + ifi.Index = int(binary.BigEndian.Uint32(b[4 : 4+4])) + case typeInterfaceByAddress: + if len(b[4:]) < 4 { + return nil, errInvalidExtension + } + ifi.AFI = int(binary.BigEndian.Uint16(b[4 : 4+2])) + l := int(b[4+2]) + if len(b[4+4:]) < l { + return nil, errInvalidExtension + } + ifi.Addr = make([]byte, l) + copy(ifi.Addr, b[4+4:]) + } + return ifi, nil +} diff --git a/vendor/golang.org/x/net/icmp/ipv4.go b/vendor/golang.org/x/net/icmp/ipv4.go new file mode 100644 index 00000000..c4629240 --- /dev/null +++ b/vendor/golang.org/x/net/icmp/ipv4.go @@ -0,0 +1,69 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp + +import ( + "encoding/binary" + "net" + "runtime" + + "golang.org/x/net/internal/socket" + "golang.org/x/net/ipv4" +) + +// freebsdVersion is set in sys_freebsd.go. +// See http://www.freebsd.org/doc/en/books/porters-handbook/freebsd-versions.html. +var freebsdVersion uint32 + +// ParseIPv4Header returns the IPv4 header of the IPv4 packet that +// triggered an ICMP error message. +// This is found in the Data field of the ICMP error message body. +// +// The provided b must be in the format used by a raw ICMP socket on +// the local system. +// This may differ from the wire format, and the format used by a raw +// IP socket, depending on the system. +// +// To parse an IPv6 header, use ipv6.ParseHeader. +func ParseIPv4Header(b []byte) (*ipv4.Header, error) { + if len(b) < ipv4.HeaderLen { + return nil, errHeaderTooShort + } + hdrlen := int(b[0]&0x0f) << 2 + if hdrlen > len(b) { + return nil, errBufferTooShort + } + h := &ipv4.Header{ + Version: int(b[0] >> 4), + Len: hdrlen, + TOS: int(b[1]), + ID: int(binary.BigEndian.Uint16(b[4:6])), + FragOff: int(binary.BigEndian.Uint16(b[6:8])), + TTL: int(b[8]), + Protocol: int(b[9]), + Checksum: int(binary.BigEndian.Uint16(b[10:12])), + Src: net.IPv4(b[12], b[13], b[14], b[15]), + Dst: net.IPv4(b[16], b[17], b[18], b[19]), + } + switch runtime.GOOS { + case "darwin": + h.TotalLen = int(socket.NativeEndian.Uint16(b[2:4])) + case "freebsd": + if freebsdVersion >= 1000000 { + h.TotalLen = int(binary.BigEndian.Uint16(b[2:4])) + } else { + h.TotalLen = int(socket.NativeEndian.Uint16(b[2:4])) + } + default: + h.TotalLen = int(binary.BigEndian.Uint16(b[2:4])) + } + h.Flags = ipv4.HeaderFlags(h.FragOff&0xe000) >> 13 + h.FragOff = h.FragOff & 0x1fff + if hdrlen-ipv4.HeaderLen > 0 { + h.Options = make([]byte, hdrlen-ipv4.HeaderLen) + copy(h.Options, b[ipv4.HeaderLen:]) + } + return h, nil +} diff --git a/vendor/golang.org/x/net/icmp/ipv6.go b/vendor/golang.org/x/net/icmp/ipv6.go new file mode 100644 index 00000000..2e8cfeb1 --- /dev/null +++ b/vendor/golang.org/x/net/icmp/ipv6.go @@ -0,0 +1,23 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp + +import ( + "net" + + "golang.org/x/net/internal/iana" +) + +const ipv6PseudoHeaderLen = 2*net.IPv6len + 8 + +// IPv6PseudoHeader returns an IPv6 pseudo header for checksum +// calculation. +func IPv6PseudoHeader(src, dst net.IP) []byte { + b := make([]byte, ipv6PseudoHeaderLen) + copy(b, src.To16()) + copy(b[net.IPv6len:], dst.To16()) + b[len(b)-1] = byte(iana.ProtocolIPv6ICMP) + return b +} diff --git a/vendor/golang.org/x/net/icmp/listen_posix.go b/vendor/golang.org/x/net/icmp/listen_posix.go new file mode 100644 index 00000000..f0f1f2ff --- /dev/null +++ b/vendor/golang.org/x/net/icmp/listen_posix.go @@ -0,0 +1,103 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris windows + +package icmp + +import ( + "net" + "os" + "runtime" + "syscall" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/ipv4" + "golang.org/x/net/ipv6" +) + +const sysIP_STRIPHDR = 0x17 // for now only darwin supports this option + +// ListenPacket listens for incoming ICMP packets addressed to +// address. See net.Dial for the syntax of address. +// +// For non-privileged datagram-oriented ICMP endpoints, network must +// be "udp4" or "udp6". The endpoint allows to read, write a few +// limited ICMP messages such as echo request and echo reply. +// Currently only Darwin and Linux support this. +// +// Examples: +// ListenPacket("udp4", "192.168.0.1") +// ListenPacket("udp4", "0.0.0.0") +// ListenPacket("udp6", "fe80::1%en0") +// ListenPacket("udp6", "::") +// +// For privileged raw ICMP endpoints, network must be "ip4" or "ip6" +// followed by a colon and an ICMP protocol number or name. +// +// Examples: +// ListenPacket("ip4:icmp", "192.168.0.1") +// ListenPacket("ip4:1", "0.0.0.0") +// ListenPacket("ip6:ipv6-icmp", "fe80::1%en0") +// ListenPacket("ip6:58", "::") +func ListenPacket(network, address string) (*PacketConn, error) { + var family, proto int + switch network { + case "udp4": + family, proto = syscall.AF_INET, iana.ProtocolICMP + case "udp6": + family, proto = syscall.AF_INET6, iana.ProtocolIPv6ICMP + default: + i := last(network, ':') + if i < 0 { + i = len(network) + } + switch network[:i] { + case "ip4": + proto = iana.ProtocolICMP + case "ip6": + proto = iana.ProtocolIPv6ICMP + } + } + var cerr error + var c net.PacketConn + switch family { + case syscall.AF_INET, syscall.AF_INET6: + s, err := syscall.Socket(family, syscall.SOCK_DGRAM, proto) + if err != nil { + return nil, os.NewSyscallError("socket", err) + } + if runtime.GOOS == "darwin" && family == syscall.AF_INET { + if err := syscall.SetsockoptInt(s, iana.ProtocolIP, sysIP_STRIPHDR, 1); err != nil { + syscall.Close(s) + return nil, os.NewSyscallError("setsockopt", err) + } + } + sa, err := sockaddr(family, address) + if err != nil { + syscall.Close(s) + return nil, err + } + if err := syscall.Bind(s, sa); err != nil { + syscall.Close(s) + return nil, os.NewSyscallError("bind", err) + } + f := os.NewFile(uintptr(s), "datagram-oriented icmp") + c, cerr = net.FilePacketConn(f) + f.Close() + default: + c, cerr = net.ListenPacket(network, address) + } + if cerr != nil { + return nil, cerr + } + switch proto { + case iana.ProtocolICMP: + return &PacketConn{c: c, p4: ipv4.NewPacketConn(c)}, nil + case iana.ProtocolIPv6ICMP: + return &PacketConn{c: c, p6: ipv6.NewPacketConn(c)}, nil + default: + return &PacketConn{c: c}, nil + } +} diff --git a/vendor/golang.org/x/net/icmp/listen_stub.go b/vendor/golang.org/x/net/icmp/listen_stub.go new file mode 100644 index 00000000..3acd91dc --- /dev/null +++ b/vendor/golang.org/x/net/icmp/listen_stub.go @@ -0,0 +1,33 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows + +package icmp + +// ListenPacket listens for incoming ICMP packets addressed to +// address. See net.Dial for the syntax of address. +// +// For non-privileged datagram-oriented ICMP endpoints, network must +// be "udp4" or "udp6". The endpoint allows to read, write a few +// limited ICMP messages such as echo request and echo reply. +// Currently only Darwin and Linux support this. +// +// Examples: +// ListenPacket("udp4", "192.168.0.1") +// ListenPacket("udp4", "0.0.0.0") +// ListenPacket("udp6", "fe80::1%en0") +// ListenPacket("udp6", "::") +// +// For privileged raw ICMP endpoints, network must be "ip4" or "ip6" +// followed by a colon and an ICMP protocol number or name. +// +// Examples: +// ListenPacket("ip4:icmp", "192.168.0.1") +// ListenPacket("ip4:1", "0.0.0.0") +// ListenPacket("ip6:ipv6-icmp", "fe80::1%en0") +// ListenPacket("ip6:58", "::") +func ListenPacket(network, address string) (*PacketConn, error) { + return nil, errNotImplemented +} diff --git a/vendor/golang.org/x/net/icmp/message.go b/vendor/golang.org/x/net/icmp/message.go new file mode 100644 index 00000000..40db65d0 --- /dev/null +++ b/vendor/golang.org/x/net/icmp/message.go @@ -0,0 +1,162 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package icmp provides basic functions for the manipulation of +// messages used in the Internet Control Message Protocols, +// ICMPv4 and ICMPv6. +// +// ICMPv4 and ICMPv6 are defined in RFC 792 and RFC 4443. +// Multi-part message support for ICMP is defined in RFC 4884. +// ICMP extensions for MPLS are defined in RFC 4950. +// ICMP extensions for interface and next-hop identification are +// defined in RFC 5837. +// PROBE: A utility for probing interfaces is defined in RFC 8335. +package icmp // import "golang.org/x/net/icmp" + +import ( + "encoding/binary" + "errors" + "net" + "runtime" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/ipv4" + "golang.org/x/net/ipv6" +) + +// BUG(mikio): This package is not implemented on JS, NaCl and Plan 9. + +var ( + errInvalidConn = errors.New("invalid connection") + errInvalidProtocol = errors.New("invalid protocol") + errMessageTooShort = errors.New("message too short") + errHeaderTooShort = errors.New("header too short") + errBufferTooShort = errors.New("buffer too short") + errInvalidBody = errors.New("invalid body") + errNoExtension = errors.New("no extension") + errInvalidExtension = errors.New("invalid extension") + errNotImplemented = errors.New("not implemented on " + runtime.GOOS + "/" + runtime.GOARCH) +) + +func checksum(b []byte) uint16 { + csumcv := len(b) - 1 // checksum coverage + s := uint32(0) + for i := 0; i < csumcv; i += 2 { + s += uint32(b[i+1])<<8 | uint32(b[i]) + } + if csumcv&1 == 0 { + s += uint32(b[csumcv]) + } + s = s>>16 + s&0xffff + s = s + s>>16 + return ^uint16(s) +} + +// A Type represents an ICMP message type. +type Type interface { + Protocol() int +} + +// A Message represents an ICMP message. +type Message struct { + Type Type // type, either ipv4.ICMPType or ipv6.ICMPType + Code int // code + Checksum int // checksum + Body MessageBody // body +} + +// Marshal returns the binary encoding of the ICMP message m. +// +// For an ICMPv4 message, the returned message always contains the +// calculated checksum field. +// +// For an ICMPv6 message, the returned message contains the calculated +// checksum field when psh is not nil, otherwise the kernel will +// compute the checksum field during the message transmission. +// When psh is not nil, it must be the pseudo header for IPv6. +func (m *Message) Marshal(psh []byte) ([]byte, error) { + var mtype byte + switch typ := m.Type.(type) { + case ipv4.ICMPType: + mtype = byte(typ) + case ipv6.ICMPType: + mtype = byte(typ) + default: + return nil, errInvalidProtocol + } + b := []byte{mtype, byte(m.Code), 0, 0} + proto := m.Type.Protocol() + if proto == iana.ProtocolIPv6ICMP && psh != nil { + b = append(psh, b...) + } + if m.Body != nil && m.Body.Len(proto) != 0 { + mb, err := m.Body.Marshal(proto) + if err != nil { + return nil, err + } + b = append(b, mb...) + } + if proto == iana.ProtocolIPv6ICMP { + if psh == nil { // cannot calculate checksum here + return b, nil + } + off, l := 2*net.IPv6len, len(b)-len(psh) + binary.BigEndian.PutUint32(b[off:off+4], uint32(l)) + } + s := checksum(b) + // Place checksum back in header; using ^= avoids the + // assumption the checksum bytes are zero. + b[len(psh)+2] ^= byte(s) + b[len(psh)+3] ^= byte(s >> 8) + return b[len(psh):], nil +} + +var parseFns = map[Type]func(int, Type, []byte) (MessageBody, error){ + ipv4.ICMPTypeDestinationUnreachable: parseDstUnreach, + ipv4.ICMPTypeTimeExceeded: parseTimeExceeded, + ipv4.ICMPTypeParameterProblem: parseParamProb, + + ipv4.ICMPTypeEcho: parseEcho, + ipv4.ICMPTypeEchoReply: parseEcho, + ipv4.ICMPTypeExtendedEchoRequest: parseExtendedEchoRequest, + ipv4.ICMPTypeExtendedEchoReply: parseExtendedEchoReply, + + ipv6.ICMPTypeDestinationUnreachable: parseDstUnreach, + ipv6.ICMPTypePacketTooBig: parsePacketTooBig, + ipv6.ICMPTypeTimeExceeded: parseTimeExceeded, + ipv6.ICMPTypeParameterProblem: parseParamProb, + + ipv6.ICMPTypeEchoRequest: parseEcho, + ipv6.ICMPTypeEchoReply: parseEcho, + ipv6.ICMPTypeExtendedEchoRequest: parseExtendedEchoRequest, + ipv6.ICMPTypeExtendedEchoReply: parseExtendedEchoReply, +} + +// ParseMessage parses b as an ICMP message. +// The provided proto must be either the ICMPv4 or ICMPv6 protocol +// number. +func ParseMessage(proto int, b []byte) (*Message, error) { + if len(b) < 4 { + return nil, errMessageTooShort + } + var err error + m := &Message{Code: int(b[1]), Checksum: int(binary.BigEndian.Uint16(b[2:4]))} + switch proto { + case iana.ProtocolICMP: + m.Type = ipv4.ICMPType(b[0]) + case iana.ProtocolIPv6ICMP: + m.Type = ipv6.ICMPType(b[0]) + default: + return nil, errInvalidProtocol + } + if fn, ok := parseFns[m.Type]; !ok { + m.Body, err = parseRawBody(proto, b[4:]) + } else { + m.Body, err = fn(proto, m.Type, b[4:]) + } + if err != nil { + return nil, err + } + return m, nil +} diff --git a/vendor/golang.org/x/net/icmp/messagebody.go b/vendor/golang.org/x/net/icmp/messagebody.go new file mode 100644 index 00000000..e2d9bfa0 --- /dev/null +++ b/vendor/golang.org/x/net/icmp/messagebody.go @@ -0,0 +1,52 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp + +// A MessageBody represents an ICMP message body. +type MessageBody interface { + // Len returns the length of ICMP message body. + // The provided proto must be either the ICMPv4 or ICMPv6 + // protocol number. + Len(proto int) int + + // Marshal returns the binary encoding of ICMP message body. + // The provided proto must be either the ICMPv4 or ICMPv6 + // protocol number. + Marshal(proto int) ([]byte, error) +} + +// A RawBody represents a raw message body. +// +// A raw message body is excluded from message processing and can be +// used to construct applications such as protocol conformance +// testing. +type RawBody struct { + Data []byte // data +} + +// Len implements the Len method of MessageBody interface. +func (p *RawBody) Len(proto int) int { + if p == nil { + return 0 + } + return len(p.Data) +} + +// Marshal implements the Marshal method of MessageBody interface. +func (p *RawBody) Marshal(proto int) ([]byte, error) { + return p.Data, nil +} + +// parseRawBody parses b as an ICMP message body. +func parseRawBody(proto int, b []byte) (MessageBody, error) { + p := &RawBody{Data: make([]byte, len(b))} + copy(p.Data, b) + return p, nil +} + +// A DefaultMessageBody represents the default message body. +// +// Deprecated: Use RawBody instead. +type DefaultMessageBody = RawBody diff --git a/vendor/golang.org/x/net/icmp/mpls.go b/vendor/golang.org/x/net/icmp/mpls.go new file mode 100644 index 00000000..f9f4841b --- /dev/null +++ b/vendor/golang.org/x/net/icmp/mpls.go @@ -0,0 +1,77 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp + +import "encoding/binary" + +// MPLSLabel represents an MPLS label stack entry. +type MPLSLabel struct { + Label int // label value + TC int // traffic class; formerly experimental use + S bool // bottom of stack + TTL int // time to live +} + +const ( + classMPLSLabelStack = 1 + typeIncomingMPLSLabelStack = 1 +) + +// MPLSLabelStack represents an MPLS label stack. +type MPLSLabelStack struct { + Class int // extension object class number + Type int // extension object sub-type + Labels []MPLSLabel +} + +// Len implements the Len method of Extension interface. +func (ls *MPLSLabelStack) Len(proto int) int { + return 4 + (4 * len(ls.Labels)) +} + +// Marshal implements the Marshal method of Extension interface. +func (ls *MPLSLabelStack) Marshal(proto int) ([]byte, error) { + b := make([]byte, ls.Len(proto)) + if err := ls.marshal(proto, b); err != nil { + return nil, err + } + return b, nil +} + +func (ls *MPLSLabelStack) marshal(proto int, b []byte) error { + l := ls.Len(proto) + binary.BigEndian.PutUint16(b[:2], uint16(l)) + b[2], b[3] = classMPLSLabelStack, typeIncomingMPLSLabelStack + off := 4 + for _, ll := range ls.Labels { + b[off], b[off+1], b[off+2] = byte(ll.Label>>12), byte(ll.Label>>4&0xff), byte(ll.Label<<4&0xf0) + b[off+2] |= byte(ll.TC << 1 & 0x0e) + if ll.S { + b[off+2] |= 0x1 + } + b[off+3] = byte(ll.TTL) + off += 4 + } + return nil +} + +func parseMPLSLabelStack(b []byte) (Extension, error) { + ls := &MPLSLabelStack{ + Class: int(b[2]), + Type: int(b[3]), + } + for b = b[4:]; len(b) >= 4; b = b[4:] { + ll := MPLSLabel{ + Label: int(b[0])<<12 | int(b[1])<<4 | int(b[2])>>4, + TC: int(b[2]&0x0e) >> 1, + TTL: int(b[3]), + } + if b[2]&0x1 != 0 { + ll.S = true + } + ls.Labels = append(ls.Labels, ll) + } + return ls, nil +} diff --git a/vendor/golang.org/x/net/icmp/multipart.go b/vendor/golang.org/x/net/icmp/multipart.go new file mode 100644 index 00000000..5f366755 --- /dev/null +++ b/vendor/golang.org/x/net/icmp/multipart.go @@ -0,0 +1,129 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp + +import "golang.org/x/net/internal/iana" + +// multipartMessageBodyDataLen takes b as an original datagram and +// exts as extensions, and returns a required length for message body +// and a required length for a padded original datagram in wire +// format. +func multipartMessageBodyDataLen(proto int, withOrigDgram bool, b []byte, exts []Extension) (bodyLen, dataLen int) { + bodyLen = 4 // length of leading octets + var extLen int + var rawExt bool // raw extension may contain an empty object + for _, ext := range exts { + extLen += ext.Len(proto) + if _, ok := ext.(*RawExtension); ok { + rawExt = true + } + } + if extLen > 0 && withOrigDgram { + dataLen = multipartMessageOrigDatagramLen(proto, b) + } else { + dataLen = len(b) + } + if extLen > 0 || rawExt { + bodyLen += 4 // length of extension header + } + bodyLen += dataLen + extLen + return bodyLen, dataLen +} + +// multipartMessageOrigDatagramLen takes b as an original datagram, +// and returns a required length for a padded orignal datagram in wire +// format. +func multipartMessageOrigDatagramLen(proto int, b []byte) int { + roundup := func(b []byte, align int) int { + // According to RFC 4884, the padded original datagram + // field must contain at least 128 octets. + if len(b) < 128 { + return 128 + } + r := len(b) + return (r + align - 1) &^ (align - 1) + } + switch proto { + case iana.ProtocolICMP: + return roundup(b, 4) + case iana.ProtocolIPv6ICMP: + return roundup(b, 8) + default: + return len(b) + } +} + +// marshalMultipartMessageBody takes data as an original datagram and +// exts as extesnsions, and returns a binary encoding of message body. +// It can be used for non-multipart message bodies when exts is nil. +func marshalMultipartMessageBody(proto int, withOrigDgram bool, data []byte, exts []Extension) ([]byte, error) { + bodyLen, dataLen := multipartMessageBodyDataLen(proto, withOrigDgram, data, exts) + b := make([]byte, bodyLen) + copy(b[4:], data) + if len(exts) > 0 { + b[4+dataLen] = byte(extensionVersion << 4) + off := 4 + dataLen + 4 // leading octets, data, extension header + for _, ext := range exts { + switch ext := ext.(type) { + case *MPLSLabelStack: + if err := ext.marshal(proto, b[off:]); err != nil { + return nil, err + } + off += ext.Len(proto) + case *InterfaceInfo: + attrs, l := ext.attrsAndLen(proto) + if err := ext.marshal(proto, b[off:], attrs, l); err != nil { + return nil, err + } + off += ext.Len(proto) + case *InterfaceIdent: + if err := ext.marshal(proto, b[off:]); err != nil { + return nil, err + } + off += ext.Len(proto) + case *RawExtension: + copy(b[off:], ext.Data) + off += ext.Len(proto) + } + } + s := checksum(b[4+dataLen:]) + b[4+dataLen+2] ^= byte(s) + b[4+dataLen+3] ^= byte(s >> 8) + if withOrigDgram { + switch proto { + case iana.ProtocolICMP: + b[1] = byte(dataLen / 4) + case iana.ProtocolIPv6ICMP: + b[0] = byte(dataLen / 8) + } + } + } + return b, nil +} + +// parseMultipartMessageBody parses b as either a non-multipart +// message body or a multipart message body. +func parseMultipartMessageBody(proto int, typ Type, b []byte) ([]byte, []Extension, error) { + var l int + switch proto { + case iana.ProtocolICMP: + l = 4 * int(b[1]) + case iana.ProtocolIPv6ICMP: + l = 8 * int(b[0]) + } + if len(b) == 4 { + return nil, nil, nil + } + exts, l, err := parseExtensions(typ, b[4:], l) + if err != nil { + l = len(b) - 4 + } + var data []byte + if l > 0 { + data = make([]byte, l) + copy(data, b[4:]) + } + return data, exts, nil +} diff --git a/vendor/golang.org/x/net/icmp/packettoobig.go b/vendor/golang.org/x/net/icmp/packettoobig.go new file mode 100644 index 00000000..afbf24f1 --- /dev/null +++ b/vendor/golang.org/x/net/icmp/packettoobig.go @@ -0,0 +1,43 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp + +import "encoding/binary" + +// A PacketTooBig represents an ICMP packet too big message body. +type PacketTooBig struct { + MTU int // maximum transmission unit of the nexthop link + Data []byte // data, known as original datagram field +} + +// Len implements the Len method of MessageBody interface. +func (p *PacketTooBig) Len(proto int) int { + if p == nil { + return 0 + } + return 4 + len(p.Data) +} + +// Marshal implements the Marshal method of MessageBody interface. +func (p *PacketTooBig) Marshal(proto int) ([]byte, error) { + b := make([]byte, 4+len(p.Data)) + binary.BigEndian.PutUint32(b[:4], uint32(p.MTU)) + copy(b[4:], p.Data) + return b, nil +} + +// parsePacketTooBig parses b as an ICMP packet too big message body. +func parsePacketTooBig(proto int, _ Type, b []byte) (MessageBody, error) { + bodyLen := len(b) + if bodyLen < 4 { + return nil, errMessageTooShort + } + p := &PacketTooBig{MTU: int(binary.BigEndian.Uint32(b[:4]))} + if bodyLen > 4 { + p.Data = make([]byte, bodyLen-4) + copy(p.Data, b[4:]) + } + return p, nil +} diff --git a/vendor/golang.org/x/net/icmp/paramprob.go b/vendor/golang.org/x/net/icmp/paramprob.go new file mode 100644 index 00000000..f16fd33e --- /dev/null +++ b/vendor/golang.org/x/net/icmp/paramprob.go @@ -0,0 +1,72 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp + +import ( + "encoding/binary" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/ipv4" +) + +// A ParamProb represents an ICMP parameter problem message body. +type ParamProb struct { + Pointer uintptr // offset within the data where the error was detected + Data []byte // data, known as original datagram field + Extensions []Extension // extensions +} + +// Len implements the Len method of MessageBody interface. +func (p *ParamProb) Len(proto int) int { + if p == nil { + return 0 + } + l, _ := multipartMessageBodyDataLen(proto, true, p.Data, p.Extensions) + return l +} + +// Marshal implements the Marshal method of MessageBody interface. +func (p *ParamProb) Marshal(proto int) ([]byte, error) { + switch proto { + case iana.ProtocolICMP: + if !validExtensions(ipv4.ICMPTypeParameterProblem, p.Extensions) { + return nil, errInvalidExtension + } + b, err := marshalMultipartMessageBody(proto, true, p.Data, p.Extensions) + if err != nil { + return nil, err + } + b[0] = byte(p.Pointer) + return b, nil + case iana.ProtocolIPv6ICMP: + b := make([]byte, p.Len(proto)) + binary.BigEndian.PutUint32(b[:4], uint32(p.Pointer)) + copy(b[4:], p.Data) + return b, nil + default: + return nil, errInvalidProtocol + } +} + +// parseParamProb parses b as an ICMP parameter problem message body. +func parseParamProb(proto int, typ Type, b []byte) (MessageBody, error) { + if len(b) < 4 { + return nil, errMessageTooShort + } + p := &ParamProb{} + if proto == iana.ProtocolIPv6ICMP { + p.Pointer = uintptr(binary.BigEndian.Uint32(b[:4])) + p.Data = make([]byte, len(b)-4) + copy(p.Data, b[4:]) + return p, nil + } + p.Pointer = uintptr(b[0]) + var err error + p.Data, p.Extensions, err = parseMultipartMessageBody(proto, typ, b) + if err != nil { + return nil, err + } + return p, nil +} diff --git a/vendor/golang.org/x/net/icmp/sys_freebsd.go b/vendor/golang.org/x/net/icmp/sys_freebsd.go new file mode 100644 index 00000000..c75f3dda --- /dev/null +++ b/vendor/golang.org/x/net/icmp/sys_freebsd.go @@ -0,0 +1,11 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp + +import "syscall" + +func init() { + freebsdVersion, _ = syscall.SysctlUint32("kern.osreldate") +} diff --git a/vendor/golang.org/x/net/icmp/timeexceeded.go b/vendor/golang.org/x/net/icmp/timeexceeded.go new file mode 100644 index 00000000..ffa986fd --- /dev/null +++ b/vendor/golang.org/x/net/icmp/timeexceeded.go @@ -0,0 +1,57 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp + +import ( + "golang.org/x/net/internal/iana" + "golang.org/x/net/ipv4" + "golang.org/x/net/ipv6" +) + +// A TimeExceeded represents an ICMP time exceeded message body. +type TimeExceeded struct { + Data []byte // data, known as original datagram field + Extensions []Extension // extensions +} + +// Len implements the Len method of MessageBody interface. +func (p *TimeExceeded) Len(proto int) int { + if p == nil { + return 0 + } + l, _ := multipartMessageBodyDataLen(proto, true, p.Data, p.Extensions) + return l +} + +// Marshal implements the Marshal method of MessageBody interface. +func (p *TimeExceeded) Marshal(proto int) ([]byte, error) { + var typ Type + switch proto { + case iana.ProtocolICMP: + typ = ipv4.ICMPTypeTimeExceeded + case iana.ProtocolIPv6ICMP: + typ = ipv6.ICMPTypeTimeExceeded + default: + return nil, errInvalidProtocol + } + if !validExtensions(typ, p.Extensions) { + return nil, errInvalidExtension + } + return marshalMultipartMessageBody(proto, true, p.Data, p.Extensions) +} + +// parseTimeExceeded parses b as an ICMP time exceeded message body. +func parseTimeExceeded(proto int, typ Type, b []byte) (MessageBody, error) { + if len(b) < 4 { + return nil, errMessageTooShort + } + p := &TimeExceeded{} + var err error + p.Data, p.Extensions, err = parseMultipartMessageBody(proto, typ, b) + if err != nil { + return nil, err + } + return p, nil +} diff --git a/vendor/modules.txt b/vendor/modules.txt index e6f71e4e..047dad33 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -6,6 +6,8 @@ github.com/MichaelTJones/pcg github.com/Shopify/sarama # github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d github.com/alecthomas/units +# github.com/alouca/gologger v0.0.0-20120904114645-7d4b7291de9c +github.com/alouca/gologger # github.com/apache/thrift v0.13.0 => github.com/m3db/thrift v0.0.0-20190820191926-05b5a2227fe4 github.com/apache/thrift/lib/go/thrift # github.com/beorn7/perks v1.0.1 @@ -14,8 +16,6 @@ github.com/beorn7/perks/quantile github.com/cespare/xxhash # github.com/cespare/xxhash/v2 v2.1.1 github.com/cespare/xxhash/v2 -# github.com/codegangsta/negroni v1.0.0 -github.com/codegangsta/negroni # github.com/coreos/go-oidc v2.2.1+incompatible github.com/coreos/go-oidc # github.com/coreos/go-semver v0.3.0 @@ -26,9 +26,6 @@ github.com/coreos/go-systemd/journal github.com/coreos/pkg/capnslog # github.com/davecgh/go-spew v1.1.1 github.com/davecgh/go-spew/spew -# github.com/dgryski/go-tsz v0.0.0-20180227144327-03b7d791f4fe -github.com/dgryski/go-tsz -github.com/dgryski/go-tsz/testdata # github.com/eapache/go-resiliency v1.2.0 github.com/eapache/go-resiliency/breaker # github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 @@ -47,8 +44,14 @@ github.com/ericchiang/k8s/runtime github.com/ericchiang/k8s/runtime/schema github.com/ericchiang/k8s/util/intstr github.com/ericchiang/k8s/watch/versioned +# github.com/freedomkk-qfeng/go-fastping v0.0.0-20160109021039-d7bb493dee3e +github.com/freedomkk-qfeng/go-fastping # github.com/fsnotify/fsnotify v1.4.7 github.com/fsnotify/fsnotify +# github.com/gaochao1/gosnmp v0.0.0-20150630013918-783a67a067fd +github.com/gaochao1/gosnmp +# github.com/gaochao1/sw v4.0.0+incompatible +github.com/gaochao1/sw # github.com/garyburd/redigo v1.6.2 github.com/garyburd/redigo/internal github.com/garyburd/redigo/redis @@ -64,6 +67,8 @@ github.com/gin-gonic/gin/binding github.com/gin-gonic/gin/internal/bytesconv github.com/gin-gonic/gin/internal/json github.com/gin-gonic/gin/render +# github.com/go-ping/ping v0.0.0-20201115131931-3300c582a663 +github.com/go-ping/ping # github.com/go-playground/locales v0.13.0 github.com/go-playground/locales github.com/go-playground/locales/currency @@ -129,10 +134,6 @@ github.com/google/go-github/v32/github github.com/google/go-querystring/query # github.com/google/uuid v1.1.2 github.com/google/uuid -# github.com/gorilla/mux v1.7.3 -github.com/gorilla/mux -# github.com/gosnmp/gosnmp v1.29.0 -github.com/gosnmp/gosnmp # github.com/hashicorp/go-uuid v1.0.2 github.com/hashicorp/go-uuid # github.com/hashicorp/golang-lru v0.5.4 @@ -165,21 +166,22 @@ github.com/influxdata/influxdb/pkg/escape github.com/influxdata/telegraf github.com/influxdata/telegraf/filter github.com/influxdata/telegraf/internal -github.com/influxdata/telegraf/internal/snmp github.com/influxdata/telegraf/metric github.com/influxdata/telegraf/plugins/common/tls github.com/influxdata/telegraf/plugins/inputs +github.com/influxdata/telegraf/plugins/inputs/dns_query github.com/influxdata/telegraf/plugins/inputs/elasticsearch github.com/influxdata/telegraf/plugins/inputs/github +github.com/influxdata/telegraf/plugins/inputs/http_response github.com/influxdata/telegraf/plugins/inputs/mongodb github.com/influxdata/telegraf/plugins/inputs/mysql github.com/influxdata/telegraf/plugins/inputs/mysql/v1 github.com/influxdata/telegraf/plugins/inputs/mysql/v2 +github.com/influxdata/telegraf/plugins/inputs/net_response github.com/influxdata/telegraf/plugins/inputs/nginx github.com/influxdata/telegraf/plugins/inputs/prometheus github.com/influxdata/telegraf/plugins/inputs/rabbitmq github.com/influxdata/telegraf/plugins/inputs/redis -github.com/influxdata/telegraf/plugins/inputs/snmp github.com/influxdata/telegraf/plugins/inputs/tengine github.com/influxdata/telegraf/plugins/inputs/zookeeper github.com/influxdata/telegraf/plugins/parsers/json @@ -187,8 +189,6 @@ github.com/influxdata/telegraf/plugins/parsers/prometheus github.com/influxdata/telegraf/plugins/parsers/prometheus/common github.com/influxdata/telegraf/selfstat github.com/influxdata/telegraf/testutil -# github.com/influxdata/wlog v0.0.0-20160411224016-7c63b0a71ef8 -github.com/influxdata/wlog # github.com/jcmturner/gofork v1.0.0 github.com/jcmturner/gofork/encoding/asn1 github.com/jcmturner/gofork/x/crypto/pbkdf2 @@ -362,6 +362,8 @@ github.com/magiconair/properties github.com/mattn/go-isatty # github.com/matttproud/golang_protobuf_extensions v1.0.1 github.com/matttproud/golang_protobuf_extensions/pbutil +# github.com/miekg/dns v1.1.27 +github.com/miekg/dns # github.com/mitchellh/mapstructure v1.3.3 github.com/mitchellh/mapstructure # github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd @@ -370,8 +372,6 @@ github.com/modern-go/concurrent github.com/modern-go/reflect2 # github.com/mojocn/base64Captcha v1.3.1 github.com/mojocn/base64Captcha -# github.com/open-falcon/rrdlite v0.0.0-20200214140804-bf5829f786ad -github.com/open-falcon/rrdlite # github.com/opentracing/opentracing-go v1.2.0 github.com/opentracing/opentracing-go github.com/opentracing/opentracing-go/ext @@ -412,6 +412,8 @@ github.com/robfig/go-cache github.com/satori/go.uuid # github.com/spaolacci/murmur3 v1.1.0 github.com/spaolacci/murmur3 +# github.com/sparrc/go-ping v0.0.0-20190613174326-4e5b6552494c +github.com/sparrc/go-ping # github.com/spf13/afero v1.2.2 github.com/spf13/afero github.com/spf13/afero/mem @@ -439,6 +441,8 @@ github.com/tidwall/gjson github.com/tidwall/match # github.com/tidwall/pretty v1.0.0 github.com/tidwall/pretty +# github.com/toolkits/file v0.0.0-20160325033739-a5b3c5147e07 +github.com/toolkits/file # github.com/toolkits/pkg v1.1.3 github.com/toolkits/pkg/cache github.com/toolkits/pkg/concurrent/semaphore @@ -456,6 +460,8 @@ github.com/toolkits/pkg/runner github.com/toolkits/pkg/slice github.com/toolkits/pkg/str github.com/toolkits/pkg/sys +# github.com/toolkits/sys v0.0.0-20170615103026-1f33b217ffaf +github.com/toolkits/sys # github.com/twotwotwo/sorts v0.0.0-20160814051341-bf5c1f2b8553 github.com/twotwotwo/sorts # github.com/uber-go/tally v3.3.13+incompatible @@ -502,8 +508,6 @@ github.com/uber/tchannel-go/trand github.com/uber/tchannel-go/typed # github.com/ugorji/go/codec v1.1.7 github.com/ugorji/go/codec -# github.com/unrolled/render v1.0.3 -github.com/unrolled/render # github.com/willf/bitset v1.1.10 github.com/willf/bitset # go.etcd.io/etcd v0.5.0-alpha.5.0.20200824191128-ae9734ed278b @@ -569,6 +573,7 @@ golang.org/x/net/context/ctxhttp golang.org/x/net/http/httpguts golang.org/x/net/http2 golang.org/x/net/http2/hpack +golang.org/x/net/icmp golang.org/x/net/idna golang.org/x/net/internal/iana golang.org/x/net/internal/socket