diff --git a/README.md b/README.md
new file mode 100644
index 0000000..d29629d
--- /dev/null
+++ b/README.md
@@ -0,0 +1,84 @@
+# Munin plugin for elasticsearch
+
+A useful Munin plugin for monitoring elasticsearch 1.x nodes in Perl.
+This original codes has out of maintenance, so I have started maintenance this plugin.
+
+## Plugins
+
+* elasticsearch_cache - field and filter cache stats
* elasticsearch_cluster_shards - cluster shards stats
* elasticsearch_docs - document count
* elasticsearch_index_size - index size
* elasticsearch_index_total - index total count
* elasticsearch_jvm_memory - JVM heap stats
* elasticsearch_jvm_threads - JVM thread stats
* elasticsearch_open_files - open files count
* elasticsearch_translog_size - translog file size
+
+## Configuration
+
+### Variables
+
+* env.host - a elasticsearch node capable of providing stats interface (default localhost)
+* env.port - elasticsearch HTTP API port (default 9200)
+
+### Example Config
+
+Before use, put these settings into munin configuration.
+
+ * examples of munin config file
+ * in the case of all plugin config into single file.
+ `/etc/munin/plugin-conf.d/munin-node`
+ * in the case of creating file per plugins.
+ `/etc/munin/plugin-conf.d/elasticsearch`
+
+##### example of minimam configuration
+
+`elasticsearch_open_files` has required root privilege to read under the `/proc`.
+
+```
+[elasticsearch_open_files]
+user root
+```
+
+##### example of custom host and port configuration
+
+```
+[elasticsearch_*]
+env.host localhost
+env.port 9200
+
+[elasticsearch_open_files]
+user root
+env.host localhost
+env.port 9200
+```
+
+## Install
+
+Install this plugins with following steps after config setuped.
+
+```sh
+# For centos
+$ cd /usr/local/src/
+$ git clone https://github.com/y-ken/munin-plugin-elasticsearch.git
+$ cd munin-plugin-elasticsearch
+$ cp -p elasticsearch_* /usr/share/munin/plugins/
+$ ln -s /usr/share/munin/plugins/elasticsearch_* /etc/munin/plugins/
+$ sudo -H munin-node-configure --shell | grep elasticsearch | sudo -H sh
+$ munin-node-configure | grep elasticsearch
+$ service munin-node restart
+```
+
+To confirm wokring fine or not, you can check like below.
+
+```sh
+$ munin-run elasticsearch_jvm_memory
+heap_init.value 8589934592
+non_heap_max.value 224395264
+heap_max.value 8520204288
+direct_max.value 8520204288
+non_heap_init.value 24313856
+```
+
+## Author
+
+* Original code by [@rafl](https://github.com/rafl) has imported from https://gist.github.com/2159398
+* [Contributors to y-ken/munin-plugin-elasticsearch](https://github.com/y-ken/munin-plugin-elasticsearch/graphs/contributors)
+* maintained by [@y-ken](https://github.com/y-ken)
+
+## Licence
+
+MIT License
diff --git a/elasticsearch_cache b/elasticsearch_cache
index 4e6b945..ae4473e 100755
--- a/elasticsearch_cache
+++ b/elasticsearch_cache
@@ -16,7 +16,7 @@ use JSON qw/decode_json/;
=head1 NAME
-elasticsearch_jvm - A munin plugin that collects stats from the JVM of your elasticsearch instances
+elasticsearch_cache - A munin plugin that collects cache stats of your elasticsearch instances
=head1 APPLICABLE SYSTEMS
@@ -36,7 +36,8 @@ Tomas Doran (t0m) - c<< >>
=cut
-my $host = 'localhost';
+my $host = exists $ENV{'host'} ? $ENV{'host'} : 'localhost';
+my $port = exists $ENV{'port'} ? $ENV{'port'} : 9200;
my $ua = LWP::UserAgent->new;
$ua->timeout(10);
@@ -50,15 +51,16 @@ sub get_json_from_url {
return $data;
}
-my $data = get_json_from_url("http://$host:9200/_cluster/nodes");
-my $t_data = get_json_from_url("http://$host:9200/_cluster/nodes/stats");
-my %out;
+my $data = get_json_from_url("http://$host:$port/_nodes");
+my $t_data = get_json_from_url("http://$host:$port/_nodes/stats");
+my %out = (field_size => 0, filter_size => 0);
foreach my $full_node_name (keys %{$data->{nodes}}) {
next unless $t_data->{nodes}{$full_node_name};
- $out{field_size} = $t_data->{nodes}{$full_node_name}{indices}{cache}{field_size_in_bytes};
- $out{filter_size} = $t_data->{nodes}{$full_node_name}{indices}{cache}{filter_size_in_bytes};
-
+ if (defined($t_data->{nodes}{$full_node_name}{indices}{cache})) {
+ $out{field_size} += $t_data->{nodes}{$full_node_name}{indices}{cache}{field_size_in_bytes};
+ $out{filter_size} += $t_data->{nodes}{$full_node_name}{indices}{cache}{filter_size_in_bytes};
+ }
}
if ($ARGV[0] and $ARGV[0] eq 'config') {
print "graph_args --base 1024\n";
diff --git a/elasticsearch_cluster_shards b/elasticsearch_cluster_shards
index 56ce429..d3d88db 100755
--- a/elasticsearch_cluster_shards
+++ b/elasticsearch_cluster_shards
@@ -16,7 +16,7 @@ use JSON qw/decode_json/;
=head1 NAME
-elasticsearch_jvm - A munin plugin that collects stats from the JVM of your elasticsearch instances
+elasticsearch_cluster_shards - A munin plugin that collects shard stats of your elasticsearch instances
=head1 APPLICABLE SYSTEMS
@@ -36,7 +36,8 @@ Tomas Doran (t0m) - c<< >>
=cut
-my $host = 'localhost';
+my $host = exists $ENV{'host'} ? $ENV{'host'} : 'localhost';
+my $port = exists $ENV{'port'} ? $ENV{'port'} : 9200;
my $ua = LWP::UserAgent->new;
$ua->timeout(10);
@@ -50,7 +51,7 @@ sub get_json_from_url {
return $data;
}
-my $data = get_json_from_url("http://$host:9200/_cluster/health");
+my $data = get_json_from_url("http://$host:$port/_cluster/health");
if ($ARGV[0] and $ARGV[0] eq 'config') {
print "graph_title ElasticSearch cluster shards\n";
diff --git a/elasticsearch_docs b/elasticsearch_docs
index 5d1e53c..a2d9cd3 100755
--- a/elasticsearch_docs
+++ b/elasticsearch_docs
@@ -16,7 +16,7 @@ use JSON qw/decode_json/;
=head1 NAME
-elasticsearch_jvm - A munin plugin that collects stats from the JVM of your elasticsearch instances
+elasticsearch_docs - A munin plugin that collects document stats of your elasticsearch instances
=head1 APPLICABLE SYSTEMS
@@ -36,7 +36,8 @@ Tomas Doran (t0m) - c<< >>
=cut
-my $host = 'localhost';
+my $host = exists $ENV{'host'} ? $ENV{'host'} : 'localhost';
+my $port = exists $ENV{'port'} ? $ENV{'port'} : 9200;
my $ua = LWP::UserAgent->new;
$ua->timeout(10);
@@ -50,13 +51,13 @@ sub get_json_from_url {
return $data;
}
-my $data = get_json_from_url("http://$host:9200/_cluster/nodes");
-my $t_data = get_json_from_url("http://$host:9200/_cluster/nodes/stats");
-my %out;
+my $data = get_json_from_url("http://$host:$port/_nodes");
+my $t_data = get_json_from_url("http://$host:$port/_nodes/stats");
+my %out = (num_docs => 0);
foreach my $full_node_name (keys %{$data->{nodes}}) {
next unless $t_data->{nodes}{$full_node_name};
- $out{num_docs} = $t_data->{nodes}{$full_node_name}{indices}{docs}{count};
+ $out{num_docs} += $t_data->{nodes}{$full_node_name}{indices}{docs}{count};
}
if ($ARGV[0] and $ARGV[0] eq 'config') {
diff --git a/elasticsearch_index_size b/elasticsearch_index_size
index c7daeb5..685d45c 100755
--- a/elasticsearch_index_size
+++ b/elasticsearch_index_size
@@ -16,7 +16,7 @@ use JSON qw/decode_json/;
=head1 NAME
-elasticsearch_jvm - A munin plugin that collects stats from the JVM of your elasticsearch instances
+elasticsearch_index_size - A munin plugin that collects index size of your elasticsearch instances
=head1 APPLICABLE SYSTEMS
@@ -36,7 +36,8 @@ Tomas Doran (t0m) - c<< >>
=cut
-my $host = 'localhost';
+my $host = exists $ENV{'host'} ? $ENV{'host'} : 'localhost';
+my $port = exists $ENV{'port'} ? $ENV{'port'} : 9200;
my $ua = LWP::UserAgent->new;
$ua->timeout(10);
@@ -50,13 +51,13 @@ sub get_json_from_url {
return $data;
}
-my $data = get_json_from_url("http://$host:9200/_cluster/nodes");
-my $t_data = get_json_from_url("http://$host:9200/_cluster/nodes/stats");
-my %out;
+my $data = get_json_from_url("http://$host:$port/_nodes");
+my $t_data = get_json_from_url("http://$host:$port/_nodes/stats");
+my %out = (index_size => 0);
foreach my $full_node_name (keys %{$data->{nodes}}) {
next unless $t_data->{nodes}{$full_node_name};
- $out{index_size} = $t_data->{nodes}{$full_node_name}{indices}{store}{size_in_bytes};
+ $out{index_size} += $t_data->{nodes}{$full_node_name}{indices}{store}{size_in_bytes};
}
if ($ARGV[0] and $ARGV[0] eq 'config') {
print "graph_args --base 1024\n";
diff --git a/elasticsearch_index_total b/elasticsearch_index_total
new file mode 100755
index 0000000..806fa43
--- /dev/null
+++ b/elasticsearch_index_total
@@ -0,0 +1,98 @@
+#!/usr/bin/env perl
+
+# Parameters supported:
+#
+# config
+# autoconf
+#
+# Magic markers:
+#%# family=auto
+#%# capabilities=autoconf
+
+use strict;
+use warnings;
+use LWP;
+use JSON qw/decode_json/;
+
+=head1 NAME
+
+elasticsearch_index_total - A munin plugin that collects stats about the index totals
+
+=head1 APPLICABLE SYSTEMS
+
+ElasticSearch
+
+=head1 CONFIGURATION
+
+None
+
+=head1 BUGS
+
+None known so far. If you find any, let me know.
+
+=head1 AUTHOR
+
+
+=cut
+
+my $host = exists $ENV{'host'} ? $ENV{'host'} : 'localhost';
+my $port = exists $ENV{'port'} ? $ENV{'port'} : 9200;
+
+my $ua = LWP::UserAgent->new;
+$ua->timeout(10);
+
+sub get_json_from_url {
+ my $uri = shift;
+ my $res = $ua->get($uri, 'Content-Type' => 'application/json' );
+ Carp::confess($res->code . " for " . $uri) unless $res->is_success;
+ my $data = do { local $@; eval { decode_json($res->content) } };
+ die("Could not decode JSON from: " . $res->content) unless $data;
+ return $data;
+}
+
+my $data = get_json_from_url("http://$host:9200/_nodes");
+my $t_data = get_json_from_url("http://$host:9200/_nodes/stats");
+my %out;
+
+foreach my $full_node_name (keys %{$data->{nodes}}) {
+ next unless $t_data->{nodes}{$full_node_name};
+ $out{index} = $t_data->{nodes}{$full_node_name}{indices}{indexing}{index_total};
+ $out{get} = $t_data->{nodes}{$full_node_name}{indices}{get}{total};
+ $out{search} = $t_data->{nodes}{$full_node_name}{indices}{search}{query_total};
+ $out{delete} = $t_data->{nodes}{$full_node_name}{indices}{indexing}{delete_total};
+}
+if ($ARGV[0] and $ARGV[0] eq 'config') {
+ print "graph_title elasticsearch index operations\n";
+ print "graph_category elasticsearch\n";
+ print "graph_args --base 1000 -l 0\n";
+ print "graph_vlabel Operations per second\n";
+
+ print "graph_order index get search delete\n";
+ print "index.label index\n";
+ print "index.type DERIVE\n";
+ print "index.min 0\n";
+ print "index.draw LINE2\n";
+
+ print "get.label get\n";
+ print "get.type DERIVE\n";
+ print "get.min 0\n";
+ print "get.draw LINE2\n";
+
+ print "search.label search\n";
+ print "search.type DERIVE\n";
+ print "search.min 0\n";
+ print "search.draw LINE2\n";
+
+ print "delete.label delete\n";
+ print "delete.type DERIVE\n";
+ print "delete.min 0\n";
+ print "delete.draw LINE2\n";
+
+}
+elsif (!$ARGV[0] || $ARGV[0] eq 'autoconf') {
+ foreach my $name (keys %out) {
+ print "$name.value " . $out{$name} . "\n";
+ }
+}
+
+exit(0);
diff --git a/elasticsearch_jvm_memory b/elasticsearch_jvm_memory
index 9c3bc40..9b25b0f 100755
--- a/elasticsearch_jvm_memory
+++ b/elasticsearch_jvm_memory
@@ -16,7 +16,7 @@ use JSON qw/decode_json/;
=head1 NAME
-elasticsearch_jvm - A munin plugin that collects stats from the JVM of your elasticsearch instances
+elasticsearch_jvm_memory - A munin plugin that collects stats from the JVM of your elasticsearch instances
=head1 APPLICABLE SYSTEMS
@@ -36,7 +36,8 @@ Tomas Doran (t0m) - c<< >>
=cut
-my $host = 'localhost';
+my $host = exists $ENV{'host'} ? $ENV{'host'} : 'localhost';
+my $port = exists $ENV{'port'} ? $ENV{'port'} : 9200;
my $ua = LWP::UserAgent->new;
$ua->timeout(10);
@@ -50,13 +51,14 @@ sub get_json_from_url {
return $data;
}
-my $data = get_json_from_url("http://$host:9200/_cluster/nodes?jvm=true");
-my %out;
+my $data = get_json_from_url("http://$host:$port/_nodes?jvm=true");
+my %out = (direct_max => 0, heap_init =>0, heap_max => 0, non_heap_init => 0, non_heap_max => 0);
foreach my $full_node_name (keys %{$data->{nodes}}) {
+ next unless $data->{nodes}{$full_node_name};
foreach my $name (grep { /_in_bytes$/ } keys %{ $data->{nodes}{$full_node_name}{jvm}{mem} }) {
my ($dname) = $name =~ m/(.+)_in_bytes$/;
- $out{$dname} = $data->{nodes}{$full_node_name}{jvm}{mem}{$name};
+ $out{$dname} += $data->{nodes}{$full_node_name}{jvm}{mem}{$name};
}
}
if ($ARGV[0] and $ARGV[0] eq 'config') {
diff --git a/elasticsearch_jvm_threads b/elasticsearch_jvm_threads
index 794c397..5418b40 100755
--- a/elasticsearch_jvm_threads
+++ b/elasticsearch_jvm_threads
@@ -16,7 +16,7 @@ use JSON qw/decode_json/;
=head1 NAME
-elasticsearch_jvm - A munin plugin that collects stats from the JVM of your elasticsearch instances
+elasticsearch_jvm_threads - A munin plugin that collects stats from the JVM of your elasticsearch instances
=head1 APPLICABLE SYSTEMS
@@ -36,7 +36,8 @@ Tomas Doran (t0m) - c<< >>
=cut
-my $host = 'localhost';
+my $host = exists $ENV{'host'} ? $ENV{'host'} : 'localhost';
+my $port = exists $ENV{'port'} ? $ENV{'port'} : 9200;
my $ua = LWP::UserAgent->new;
$ua->timeout(10);
@@ -50,14 +51,14 @@ sub get_json_from_url {
return $data;
}
-my $data = get_json_from_url("http://$host:9200/_cluster/nodes?jvm=true");
-my $t_data = get_json_from_url("http://$host:9200/_cluster/nodes/stats?jvm=true");
-my %out;
+my $data = get_json_from_url("http://$host:$port/_nodes?jvm=true");
+my $t_data = get_json_from_url("http://$host:$port/_nodes/stats?jvm=true");
+my %out = (count => 0, peak_count => 0);
foreach my $full_node_name (keys %{$data->{nodes}}) {
next unless $t_data->{nodes}{$full_node_name};
foreach my $name (keys %{ $t_data->{nodes}{$full_node_name}{jvm}{threads} }) {
- $out{$name} = $t_data->{nodes}{$full_node_name}{jvm}{threads}{$name};
+ $out{$name} += $t_data->{nodes}{$full_node_name}{jvm}{threads}{$name};
}
}
if ($ARGV[0] and $ARGV[0] eq 'config') {
diff --git a/elasticsearch_open_files b/elasticsearch_open_files
index b72870e..b513735 100755
--- a/elasticsearch_open_files
+++ b/elasticsearch_open_files
@@ -8,7 +8,7 @@ if [ "$1" = "config" ]; then
echo 'graph_title ElasticSearch open files'
echo 'graph_args --base 1000 -l 0'
echo 'graph_vlabel number of open files'
- echo 'graph_category system'
+ echo 'graph_category elasticsearch'
echo 'used.label open files'
echo 'used.type GAUGE'
echo 'used.info The number of currently open files.'
@@ -17,10 +17,8 @@ if [ "$1" = "config" ]; then
exit 0
fi
-
-# while true; do su -c 'ls /proc/$(pidof whatever)/fd' elasticsearch |wc -l |nc -l -q0 localhost 9998; done
-
PID=`ps -u elasticsearch -opid,comm | grep java | awk '{ print $1 }'`
VALUE=`ls /proc/$PID/fd/ | wc | awk '{ print $1 }'`
+MAX_VALUE=`grep 'Max open files' /proc/$PID/limits | awk '{ print $5 }'`
echo "used.value $VALUE"
-echo "max.value 65000"
+echo "max.value $MAX_VALUE"
diff --git a/elasticsearch_translog_size b/elasticsearch_translog_size
index 7f9260c..1d1e59d 100755
--- a/elasticsearch_translog_size
+++ b/elasticsearch_translog_size
@@ -10,7 +10,7 @@ if [ "$1" = "config" ]; then
echo 'graph_title ElasticSearch translog size'
echo 'graph_args --base 1024 -l 0'
echo 'graph_vlabel size of translogs'
- echo 'graph_category system'
+ echo 'graph_category elasticsearch'
echo 'size.label size of translog files'
echo 'size.type GAUGE'
echo 'number.label number of translog files'
@@ -18,7 +18,8 @@ if [ "$1" = "config" ]; then
exit 0
fi
-NUMBER=`ls /srv/elasticsearch/metacpan_v1/nodes/0/indices/*/*/translog/* | wc -l`
-SIZE=`du -s /srv/elasticsearch/metacpan_v1/nodes/0/indices/*/*/translog/* | awk '{sum+=$1};END{print sum}'`
+DIR=`ps -u elasticsearch -opid,command |tail -1| perl -ne 'print $1 if(/-Des.default.path.data=([\w\/]+)/)'`
+NUMBER=`ls $DIR/*/nodes/0/indices/*/*/translog/* | wc -l`
+SIZE=`du -s $DIR/*/nodes/0/indices/*/*/translog/* | awk '{sum+=$1};END{print sum}'`
echo "number.value $NUMBER"
echo "size.value $SIZE"