summaryrefslogtreecommitdiff
path: root/vendor/ruflin/elastica/ansible/roles/elasticsearch
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/ruflin/elastica/ansible/roles/elasticsearch')
-rw-r--r--vendor/ruflin/elastica/ansible/roles/elasticsearch/handlers/main.yml6
-rw-r--r--vendor/ruflin/elastica/ansible/roles/elasticsearch/tasks/main.yml101
-rw-r--r--vendor/ruflin/elastica/ansible/roles/elasticsearch/templates/config-0.yml10
-rw-r--r--vendor/ruflin/elastica/ansible/roles/elasticsearch/templates/config-1.yml10
-rw-r--r--vendor/ruflin/elastica/ansible/roles/elasticsearch/templates/config-default.yml40
-rwxr-xr-xvendor/ruflin/elastica/ansible/roles/elasticsearch/templates/elasticsearch.service229
-rw-r--r--vendor/ruflin/elastica/ansible/roles/elasticsearch/templates/logging.yml56
7 files changed, 452 insertions, 0 deletions
diff --git a/vendor/ruflin/elastica/ansible/roles/elasticsearch/handlers/main.yml b/vendor/ruflin/elastica/ansible/roles/elasticsearch/handlers/main.yml
new file mode 100644
index 00000000..97cccdd2
--- /dev/null
+++ b/vendor/ruflin/elastica/ansible/roles/elasticsearch/handlers/main.yml
@@ -0,0 +1,6 @@
+---
+
+- name: restart elasticsearch
+ service: >
+ name=elasticsearch
+ state=restarted
diff --git a/vendor/ruflin/elastica/ansible/roles/elasticsearch/tasks/main.yml b/vendor/ruflin/elastica/ansible/roles/elasticsearch/tasks/main.yml
new file mode 100644
index 00000000..c2041343
--- /dev/null
+++ b/vendor/ruflin/elastica/ansible/roles/elasticsearch/tasks/main.yml
@@ -0,0 +1,101 @@
+---
+
+- name: import ppa:webupd8team/java gpg key
+ apt_key: >
+ id=EEA14886
+ keyserver=keyserver.ubuntu.com
+ state=present
+
+- name: add ppa:webupd8team/java repository
+ apt_repository: >
+ repo="deb http://ppa.launchpad.net/webupd8team/java/ubuntu precise main"
+ state=present
+ update_cache=yes
+
+- name: accept oracle license
+ shell: >
+ echo "oracle-java7-installer shared/accepted-oracle-license-v1-1 select true" | debconf-set-selections
+
+- name: install java
+ apt: >
+ force=yes
+ name={{ item }}
+ state=present
+ update_cache=no
+ with_items:
+ - oracle-java7-installer
+ - oracle-java7-set-default
+
+- name: import elasticsearch gpg key
+ apt_key: >
+ id=D88E42B4
+ url=https://packages.elasticsearch.org/GPG-KEY-elasticsearch
+ state=present
+
+- name: add elasticsearch repository
+ apt_repository: >
+ repo="deb http://packages.elasticsearch.org/elasticsearch/{{ ES_SHORT_VER }}/debian stable main"
+ state=present
+ update_cache=yes
+
+- name: install elasticsearch
+ apt: >
+ force=yes
+ name=elasticsearch={{ ES_VER }}
+ state=present
+ update_cache=no
+
+- name: install image plugin
+ command: >
+ creates=/usr/share/elasticsearch/plugins/image
+ /usr/share/elasticsearch/bin/plugin --url https://github.com/SibaTokyo/elasticsearch-image/releases/download/{{ ES_IMAGE_PLUGIN_VER }}/elasticsearch-image-{{ ES_IMAGE_PLUGIN_VER }}.zip -install image
+
+- name: install mapper-attachments plugin
+ command: >
+ creates=/usr/share/elasticsearch/plugins/mapper-attachments
+ /usr/share/elasticsearch/bin/plugin -install elasticsearch/elasticsearch-mapper-attachments/{{ ES_MAPPER_ATTACHMENTS_VER }}
+
+- name: install geocluster-facet plugin
+ command: >
+ creates=/usr/share/elasticsearch/plugins/geocluster-facet
+ /usr/share/elasticsearch/bin/plugin -install geocluster-facet --url https://github.com/zenobase/geocluster-facet/releases/download/{{ ES_GEOCLUSTER_FACET_VER }}/geocluster-facet-{{ ES_GEOCLUSTER_FACET_VER }}.jar
+
+- name: install transport-thrift plugin
+ command: >
+ creates=/usr/share/elasticsearch/plugins/transport-thrift
+ /usr/share/elasticsearch/bin/plugin -install elasticsearch/elasticsearch-transport-thrift/{{ ES_TRANSPORT_THRIFT_VER }}
+
+- name: install transport-memcached plugin
+ command: >
+ creates=/usr/share/elasticsearch/plugins/transport-memcached
+ /usr/share/elasticsearch/bin/plugin -install elasticsearch/elasticsearch-transport-memcached/{{ ES_TRANSPORT_MEMCACHED_VER }}
+
+- name: remove default config
+ file: >
+ path={{ item }}
+ state=absent
+ with_items:
+ - /etc/default/elasticsearch
+ - /etc/elasticsearch/elasticsearch.yml
+
+- name: create custom config
+ template: >
+ dest=/etc/elasticsearch/{{ item }}
+ src={{ item }}
+ with_items:
+ - config-0.yml
+ - config-1.yml
+ - logging.yml
+ notify: restart elasticsearch
+
+- name: create elasticsearch service script
+ template: >
+ dest=/etc/init.d/elasticsearch
+ src=elasticsearch.service
+ notify: restart elasticsearch
+
+- name: start elasticsearch
+ service: >
+ enabled=yes
+ name=elasticsearch
+ state=started
diff --git a/vendor/ruflin/elastica/ansible/roles/elasticsearch/templates/config-0.yml b/vendor/ruflin/elastica/ansible/roles/elasticsearch/templates/config-0.yml
new file mode 100644
index 00000000..aa956910
--- /dev/null
+++ b/vendor/ruflin/elastica/ansible/roles/elasticsearch/templates/config-0.yml
@@ -0,0 +1,10 @@
+{% extends "config-default.yml" %}
+
+{% block config %}
+
+http.port: 9200
+transport.tcp.port: 9300
+thrift.port: 9500
+memcached.port: 11211
+
+{% endblock %}
diff --git a/vendor/ruflin/elastica/ansible/roles/elasticsearch/templates/config-1.yml b/vendor/ruflin/elastica/ansible/roles/elasticsearch/templates/config-1.yml
new file mode 100644
index 00000000..a54d719e
--- /dev/null
+++ b/vendor/ruflin/elastica/ansible/roles/elasticsearch/templates/config-1.yml
@@ -0,0 +1,10 @@
+{% extends "config-default.yml" %}
+
+{% block config %}
+
+http.port: 9201
+transport.tcp.port: 9301
+thrift.port: 9501
+memcached.port: 11212
+
+{% endblock %}
diff --git a/vendor/ruflin/elastica/ansible/roles/elasticsearch/templates/config-default.yml b/vendor/ruflin/elastica/ansible/roles/elasticsearch/templates/config-default.yml
new file mode 100644
index 00000000..0917f244
--- /dev/null
+++ b/vendor/ruflin/elastica/ansible/roles/elasticsearch/templates/config-default.yml
@@ -0,0 +1,40 @@
+{% block default_config %}
+
+index.number_of_shards: 2
+index.number_of_replicas: 0
+
+# Dont write data to hdd in tests
+index.store.type: memory
+
+# Required plugins
+plugin.mandatory: mapper-attachments, geocluster-facet, transport-thrift, transport-memcached, image
+
+# For bulk tests
+bulk.udp.enabled: true
+bulk.udp.bulk_actions: 5
+
+# For script tests
+script.inline: on
+script.indexed: on
+
+# Disable dynamic memory allocation
+bootstrap.mlockall: true
+
+# Dont accept connections not from localhost
+network.host: "127.0.0.1"
+
+# Limit threadpool by set number of available processors to 1
+# Without this, travis builds will be failed with OutOfMemory error
+processors: 1
+
+# All nodes will be called Elastica
+node.name: Elastica
+
+# Added for snapshot tests
+path.repo: ["/tmp/test_register", "/tmp/test_repository"]
+
+{% endblock %}
+
+{% block config %}
+# Node specific config should be overwritten in child template
+{% endblock %}
diff --git a/vendor/ruflin/elastica/ansible/roles/elasticsearch/templates/elasticsearch.service b/vendor/ruflin/elastica/ansible/roles/elasticsearch/templates/elasticsearch.service
new file mode 100755
index 00000000..0268e230
--- /dev/null
+++ b/vendor/ruflin/elastica/ansible/roles/elasticsearch/templates/elasticsearch.service
@@ -0,0 +1,229 @@
+#!/bin/sh
+#
+# /etc/init.d/elasticsearch -- startup script for Elasticsearch
+#
+# Written by Miquel van Smoorenburg <miquels@cistron.nl>.
+# Modified for Debian GNU/Linux by Ian Murdock <imurdock@gnu.ai.mit.edu>.
+# Modified for Tomcat by Stefan Gybas <sgybas@debian.org>.
+# Modified for Tomcat6 by Thierry Carrez <thierry.carrez@ubuntu.com>.
+# Additional improvements by Jason Brittain <jason.brittain@mulesoft.com>.
+# Modified by Nicolas Huray for Elasticsearch <nicolas.huray@gmail.com>.
+# Modified by Igor Denisenko for Elastica <im.denisenko@yahoo.com>
+#
+### BEGIN INIT INFO
+# Provides: elasticsearch
+# Required-Start: $network $remote_fs $named
+# Required-Stop: $network $remote_fs $named
+# Default-Start: 2 3 4 5
+# Default-Stop: 0 1 6
+# Short-Description: Starts elasticsearch
+# Description: Starts elasticsearch using start-stop-daemon
+### END INIT INFO
+
+PATH="/bin:/usr/bin:/sbin:/usr/sbin"
+NAME="elasticsearch"
+DESC="Elasticsearch Server"
+
+if [ `id -u` -ne 0 ]; then
+ echo "You need root privileges to run this script"
+ exit 1
+fi
+
+
+. /lib/lsb/init-functions
+
+if [ -r /etc/default/rcS ]; then
+ . /etc/default/rcS
+fi
+
+
+# Run Elasticsearch as this user ID and group ID
+ES_USER="elasticsearch"
+ES_GROUP="elasticsearch"
+
+# The first existing directory is used for JAVA_HOME (if JAVA_HOME is not defined)
+JDK_DIRS="/usr/lib/jvm/java-8-oracle/ /usr/lib/jvm/j2sdk1.8-oracle/ /usr/lib/jvm/jdk-7-oracle-x64 /usr/lib/jvm/java-7-oracle /usr/lib/jvm/j2sdk1.7-oracle/ /usr/lib/jvm/java-7-openjdk /usr/lib/jvm/java-7-openjdk-amd64/ /usr/lib/jvm/java-7-openjdk-armhf /usr/lib/jvm/java-7-openjdk-i386/ /usr/lib/jvm/default-java"
+
+# Look for the right JVM to use
+for jdir in $JDK_DIRS; do
+ if [ -r "$jdir/bin/java" -a -z "${JAVA_HOME}" ]; then
+ JAVA_HOME="$jdir"
+ fi
+done
+export JAVA_HOME
+
+# Directory where the Elasticsearch binary distribution resides
+ES_HOME="/usr/share/$NAME"
+
+# Heap size defaults to 256m min, 1g max
+# Be modest. Entire cluster will allocate (3*ES_HEAP_SIZE) memory
+ES_HEAP_SIZE="256m"
+export ES_HEAP_SIZE
+
+# Heap new generation
+# ES_HEAP_NEWSIZE=
+# export ES_HEAP_NEWSIZE
+
+# max direct memory
+# ES_DIRECT_SIZE=
+# export ES_DIRECT_SIZE
+
+# Additional Java OPTS
+ES_JAVA_OPTS="-server"
+export ES_JAVA_OPTS
+
+# Maximum number of open files
+MAX_OPEN_FILES="65535"
+
+# Maximum amount of locked memory
+MAX_LOCKED_MEMORY="unlimited"
+
+# Elasticsearch log directory
+LOG_DIR="/var/log/$NAME"
+
+# Elasticsearch data directory
+DATA_DIR="/var/lib/$NAME"
+
+# Elasticsearch work directory
+WORK_DIR="/tmp/$NAME"
+
+# Elasticsearch configuration directory
+CONF_DIR="/etc/$NAME"
+
+# Define other required variables
+DAEMON="$ES_HOME/bin/elasticsearch"
+
+# Check DAEMON exists
+if [ ! -x $DAEMON ]; then
+ exit 0
+fi
+
+checkJava() {
+ if [ -x "$JAVA_HOME/bin/java" ]; then
+ JAVA="$JAVA_HOME/bin/java"
+ else
+ JAVA=`which java`
+ fi
+
+ if [ ! -x "$JAVA" ]; then
+ echo "Could not find any executable java binary. Please install java in your PATH or set JAVA_HOME"
+ exit 1
+ fi
+}
+
+case "$1" in
+ start)
+ checkJava
+
+ if [ -n "$MAX_LOCKED_MEMORY" -a -z "$ES_HEAP_SIZE" ]; then
+ log_failure_msg "MAX_LOCKED_MEMORY is set - ES_HEAP_SIZE must also be set"
+ exit 1
+ fi
+
+
+ mkdir -p "$LOG_DIR" "$DATA_DIR" "$WORK_DIR"
+ chown "$ES_USER":"$ES_GROUP" "$LOG_DIR" "$DATA_DIR" "$WORK_DIR"
+
+ if [ -n "$MAX_OPEN_FILES" ];then
+ ulimit -n $MAX_OPEN_FILES
+ fi
+
+ if [ -n "$MAX_LOCKED_MEMORY" ];then
+ ulimit -l $MAX_LOCKED_MEMORY
+ fi
+
+ ulimit -s 1024
+
+ for node in 0 1; do
+ log_daemon_msg "Starting elasticsearch node #$node"
+
+ PID_FILE="/var/run/$NAME-$node.pid"
+ CONF_FILE="$CONF_DIR/config-$node.yml"
+
+ DAEMON="$ES_HOME/bin/elasticsearch"
+ DAEMON_OPTS="
+ -Des.config=$CONF_FILE \
+ -Des.path.home=$ES_HOME \
+ -Des.path.logs=$LOG_DIR \
+ -Des.path.data=$DATA_DIR \
+ -Des.path.work=$WORK_DIR \
+ -Des.path.conf=$CONF_DIR \
+ -p $PID_FILE
+ "
+
+ pid=`pidofproc -p $PID_FILE elasticsearch`
+ if [ -n "$pid" ] ; then
+ log_begin_msg "Elasticsearch node #$node already running"
+ continue
+ fi
+
+ touch "$PID_FILE"
+ chown "$ES_USER":"$ES_GROUP" "$PID_FILE"
+
+ # Start Daemon
+ start-stop-daemon --start -b --user "$ES_USER" -c "$ES_USER" --pidfile "$PID_FILE" --exec "$DAEMON" -- "$DAEMON_OPTS"
+ return=$?
+ if [ $return -eq 0 ]; then
+ i=0
+ timeout=10
+ # Wait for the process to be properly started before exiting
+ until { cat "$PID_FILE" | xargs kill -0; } >/dev/null 2>&1
+ do
+ sleep 1
+ i=$(($i + 1))
+ [ $i -gt $timeout ] && log_end_msg 1
+ done
+ else
+ log_end_msg $return
+ fi
+ done
+ ;;
+ stop)
+ for node in 0 1; do
+ log_daemon_msg "Stopping elasticsearch node #$node"
+
+ PID_FILE="/var/run/$NAME-$node.pid"
+
+ if [ -f "$PID_FILE" ]; then
+ start-stop-daemon --stop --pidfile "$PID_FILE" \
+ --user "$ES_USER" \
+ --retry=TERM/20/KILL/5 >/dev/null
+ if [ $? -eq 1 ]; then
+ log_progress_msg "$DESC is not running but pid file exists, cleaning up"
+ elif [ $? -eq 3 ]; then
+ PID="`cat $PID_FILE`"
+ log_failure_msg "Failed to stop $DESC (pid $PID)"
+ exit 1
+ fi
+ rm -f "$PID_FILE"
+ else
+ log_progress_msg "(not running)"
+ fi
+ done
+
+ log_end_msg 0
+ ;;
+ status)
+ for node in 0 1; do
+ PID_FILE="/var/run/$NAME-$node.pid"
+ status_of_proc -p $PID_FILE "Elasticsearch node #$node" "Elasticsearch node #$node"
+ done
+ exit 0
+ ;;
+ restart|force-reload)
+ for node in 0 1; do
+ PID_FILE="/var/run/$NAME-$node.pid"
+ if [ -f "$PID_FILE" ]; then
+ $0 stop
+ sleep 1
+ fi
+ done
+ $0 start
+ ;;
+ *)
+ log_success_msg "Usage: $0 {start|stop|restart|force-reload|status}"
+ exit 1
+ ;;
+esac
+
+exit 0
diff --git a/vendor/ruflin/elastica/ansible/roles/elasticsearch/templates/logging.yml b/vendor/ruflin/elastica/ansible/roles/elasticsearch/templates/logging.yml
new file mode 100644
index 00000000..9e00d01c
--- /dev/null
+++ b/vendor/ruflin/elastica/ansible/roles/elasticsearch/templates/logging.yml
@@ -0,0 +1,56 @@
+# you can override this using by setting a system property, for example -Des.logger.level=DEBUG
+es.logger.level: INFO
+rootLogger: ${es.logger.level}, console, file
+logger:
+ # log action execution errors for easier debugging
+ action: DEBUG
+ # reduce the logging for aws, too much is logged under the default INFO
+ com.amazonaws: WARN
+
+ # gateway
+ #gateway: DEBUG
+ #index.gateway: DEBUG
+
+ # peer shard recovery
+ #indices.recovery: DEBUG
+
+ # discovery
+ #discovery: TRACE
+
+ index.search.slowlog: TRACE, index_search_slow_log_file
+ index.indexing.slowlog: TRACE, index_indexing_slow_log_file
+
+additivity:
+ index.search.slowlog: false
+ index.indexing.slowlog: false
+
+appender:
+ console:
+ type: console
+ layout:
+ type: consolePattern
+ conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n"
+
+ file:
+ type: dailyRollingFile
+ file: ${path.logs}/${cluster.name}.log
+ datePattern: "'.'yyyy-MM-dd"
+ layout:
+ type: pattern
+ conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n"
+
+ index_search_slow_log_file:
+ type: dailyRollingFile
+ file: ${path.logs}/${cluster.name}_index_search_slowlog.log
+ datePattern: "'.'yyyy-MM-dd"
+ layout:
+ type: pattern
+ conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n"
+
+ index_indexing_slow_log_file:
+ type: dailyRollingFile
+ file: ${path.logs}/${cluster.name}_index_indexing_slowlog.log
+ datePattern: "'.'yyyy-MM-dd"
+ layout:
+ type: pattern
+ conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n"