parent
0e244cb8cf
commit
e3beaa001c
@ -0,0 +1,144 @@ |
||||
Deploying a shared production ready MongoDB cluster with Ansible |
||||
------------------------------------------------------------------------------ |
||||
|
||||
In this example we demonstrate how we can orchestrate the deployment of a production grade MongoDB Cluster. The functionality of this example includes: |
||||
|
||||
1) Deploying a N node MongoDB cluster, which has N shards and N replication nodes. |
||||
2) Scale out capability. Expand the Cluster by adding nodes to the cluster. |
||||
|
||||
###Deployment Architecture. |
||||
|
||||
To better explain the deployment architecture let's take an example where we are deploying a 3 node MongoDB cluster ( Minimum recommended by MongoDB). |
||||
The way Ansible configures the three nodes is as follows: |
||||
|
||||
1) Install the mongodb software on all nodes. |
||||
2) Creates 3 replication sets, with one primary on each node and the rest two acting as secondaries. |
||||
3) Configures MongodDB configuration servers as listed in the inventory section[mongocservers]. Recommended number is 3, so it can be the same three servers as the datanodes. |
||||
4) Configures a Mongos server as listed in the inventory file [mongosservers]. |
||||
5) Adds 3 Shards each belonging to individual relocation sets. |
||||
|
||||
Once the cluster is deployed, if we want to scale the cluster, Ansible configures it as follows: |
||||
|
||||
1) Install the MongoDB application on the new node. |
||||
2) Configure the replication set with primary as the new node and the secondaries as listed in the inventory file [replicationservers]. ( don't forget to add the new node also in the replicationservers section] |
||||
3) Adds a new shard to the mongos service pointing to the new replication set. |
||||
|
||||
###The following example deploys a three node MongoDB Cluster |
||||
|
||||
The inventory file looks as follows: |
||||
|
||||
#The site wide list of mongodb servers |
||||
[mongoservers] |
||||
mongo1 |
||||
mongo2 |
||||
mongo3 |
||||
|
||||
#The list of servers where replication should happen, by default include all servers |
||||
[replicationservers] |
||||
mongo1 |
||||
mongo2 |
||||
mongo3 |
||||
|
||||
#The list of mongodb configuration servers, make sure it is 1 or 3 |
||||
[mongocservers] |
||||
mongo1 |
||||
mongo2 |
||||
mongo3 |
||||
|
||||
#The list of servers where mongos servers would run. |
||||
[mongosservers] |
||||
mongos |
||||
|
||||
Build the site with the following command: |
||||
|
||||
ansible-playbook -i hosts site.yml |
||||
|
||||
##Verification |
||||
|
||||
Once completed we can check replication set availibitly by connecting to individual primary replication set nodes, 'mongo --host <ip host> --port <port number> |
||||
and issue the command to query the status of replication set, we should get a similar output. |
||||
|
||||
|
||||
web2:PRIMARY> rs.status() |
||||
{ |
||||
"set" : "web2", |
||||
"date" : ISODate("2013-03-19T10:26:35Z"), |
||||
"myState" : 1, |
||||
"members" : [ |
||||
{ |
||||
"_id" : 0, |
||||
"name" : "web2:2013", |
||||
"health" : 1, |
||||
"state" : 1, |
||||
"stateStr" : "PRIMARY", |
||||
"uptime" : 102, |
||||
"optime" : Timestamp(1363688755000, 1), |
||||
"optimeDate" : ISODate("2013-03-19T10:25:55Z"), |
||||
"self" : true |
||||
}, |
||||
{ |
||||
"_id" : 1, |
||||
"name" : "web3:2013", |
||||
"health" : 1, |
||||
"state" : 2, |
||||
"stateStr" : "SECONDARY", |
||||
"uptime" : 40, |
||||
"optime" : Timestamp(1363688755000, 1), |
||||
"optimeDate" : ISODate("2013-03-19T10:25:55Z"), |
||||
"lastHeartbeat" : ISODate("2013-03-19T10:26:33Z"), |
||||
"pingMs" : 1 |
||||
} |
||||
], |
||||
"ok" : 1 |
||||
} |
||||
|
||||
we can check the status of the Shards as follows: connect to the mongos service 'mongos --host <ip of mongos server> --port 8888' |
||||
and issue the following command to get the status of the Shards. |
||||
|
||||
|
||||
mongos> sh.status() |
||||
--- Sharding Status --- |
||||
sharding version: { "_id" : 1, "version" : 3 } |
||||
shards: |
||||
{ "_id" : "web2", "host" : "web2/web2:2013,web3:2013" } |
||||
{ "_id" : "web3", "host" : "web3/web2:2014,web3:2014" } |
||||
databases: |
||||
{ "_id" : "admin", "partitioned" : false, "primary" : "config" } |
||||
|
||||
|
||||
|
||||
|
||||
|
||||
### Adding a new node to the Cluster |
||||
|
||||
To add a new node to the configured MongoDb Cluster, setup the inventory file as follows: |
||||
|
||||
#The site wide list of mongodb servers |
||||
[mongoservers] |
||||
mongo1 |
||||
mongo2 |
||||
mongo3 |
||||
mongo4 |
||||
|
||||
#The list of servers where replication should happen, by default include all servers |
||||
[replicationservers] |
||||
mongo4 |
||||
mongo1 |
||||
mongo2 |
||||
|
||||
#The list of mongodb configuration servers, make sure it is 1 or 3 |
||||
[mongocservers] |
||||
mongo1 |
||||
mongo2 |
||||
mongo3 |
||||
|
||||
#The list of servers where mongos servers would run. |
||||
[mongosservers] |
||||
mongos |
||||
|
||||
Make sure you have the new node added in the replicationservers section and execute the following command: |
||||
|
||||
ansible-playbook -i hosts playbooks/addnode.yml -e servername=mongo4 |
||||
|
||||
Verification can be done using the same steps mentioned above. |
||||
|
@ -0,0 +1,19 @@ |
||||
#The Global variable file mongodb installation |
||||
|
||||
#The chunksize for shards in MB |
||||
mongos_chunk_size: 1 |
||||
|
||||
#The port in which mongos server should listen on |
||||
mongos_port: 8888 |
||||
|
||||
#The port for mongo config server |
||||
mongoc_port: 7777 |
||||
|
||||
#The port prefix for mongod servers, the latter part is appended the playbook (the last octect of the ipaddress) |
||||
mongodb_port_prefix: 201 |
||||
|
||||
#The directory prefix where the database files would be stored |
||||
mongodb_datadir_prefix: /data/ |
||||
|
||||
#The interface where the mongodb process should listen on. |
||||
iface: eth0 |
@ -0,0 +1,17 @@ |
||||
#The site wide list of mongodb servers |
||||
[mongoservers] |
||||
web2 |
||||
web3 |
||||
|
||||
#The list of servers where replication should happen, by default include all servers |
||||
[replicationservers] |
||||
web2 |
||||
web3 |
||||
|
||||
#The list of mongodb configuration servers, make sure it is 1 or 3 |
||||
[mongocservers] |
||||
web3 |
||||
|
||||
#The list of servers where mongos servers would run. |
||||
[mongosservers] |
||||
web3 |
@ -0,0 +1,11 @@ |
||||
--- |
||||
#This playbook is used to add a new node the mongodb cluster |
||||
|
||||
- hosts: mongoservers |
||||
- hosts: replicationservers |
||||
- hosts: mongosservers |
||||
- hosts: ${servername} |
||||
tasks: |
||||
- include: ../roles/common/tasks/main.yml |
||||
- include: ../roles/mongod/tasks/main.yml |
||||
- include: ../roles/mongod/tasks/addshard.yml |
@ -0,0 +1,9 @@ |
||||
--- |
||||
# This playbook adds the shards to the mongos service |
||||
|
||||
- hosts: mongosservers |
||||
|
||||
- hosts: mongoservers |
||||
user: root |
||||
tasks: |
||||
- include: ../roles/mongod/tasks/addshard.yml |
@ -0,0 +1,7 @@ |
||||
--- |
||||
# Deploys all common plays for the site |
||||
|
||||
- hosts: mongoservers |
||||
user: root |
||||
tasks: |
||||
- include: ../roles/common/tasks/main.yml |
@ -0,0 +1,7 @@ |
||||
--- |
||||
# Deploys the mongodb configuratin db servers |
||||
|
||||
- hosts: mongocservers |
||||
user: root |
||||
tasks: |
||||
- include: ../roles/mongoc/tasks/main.yml |
@ -0,0 +1,9 @@ |
||||
--- |
||||
# Deploys the mongodb service and sets up replication sets |
||||
|
||||
- hosts: mongoservers |
||||
user: root |
||||
tasks: |
||||
- include: ../roles/mongod/tasks/main.yml |
||||
handlers: |
||||
- include: ../roles/mongod/handlers/main.yml |
@ -0,0 +1,7 @@ |
||||
--- |
||||
# Responsible for setting up and configuring mongos services |
||||
|
||||
- hosts: mongosservers |
||||
user: root |
||||
tasks: |
||||
- include: ../roles/mongos/tasks/main.yml |
@ -0,0 +1,14 @@ |
||||
--- |
||||
# This Playbook runs all the common plays in the deployment |
||||
|
||||
- name: Create the hosts file for all machines |
||||
template: src=../roles/common/templates/hosts.j2 dest=/etc/hosts |
||||
|
||||
- name: Creates the repository for 10Gen |
||||
template: src=../roles/common/templates/10gen.repo.j2 dest=/etc/yum.repos.d/10gen.repo |
||||
|
||||
- name: Install the mongodb package |
||||
yum: name=$item state=installed |
||||
with_items: |
||||
- mongo-10gen |
||||
- mongo-10gen-server |
@ -0,0 +1,6 @@ |
||||
[10gen] |
||||
name=10gen Repository |
||||
baseurl=http://downloads-distro.mongodb.org/repo/redhat/os/x86_64 |
||||
gpgcheck=0 |
||||
enabled=1 |
||||
|
@ -0,0 +1,4 @@ |
||||
127.0.0.1 localhost |
||||
{% for host in groups['mongoservers'] %} |
||||
{{ hostvars[host]['ansible_' + iface].ipv4.address }} {{ host }} |
||||
{% endfor %} |
@ -0,0 +1,21 @@ |
||||
--- |
||||
# This playbook deploys the mongodb configurationdb servers |
||||
|
||||
- name: Create data directory for mongoc configuration server |
||||
file: path=${mongodb_datadir_prefix}/configdb state=directory owner=mongod group=mongod |
||||
|
||||
- name: Create the mongo configuration server startup file |
||||
template: src=../roles/mongoc/templates/mongoc.j2 dest=/etc/init.d/mongoc mode=0655 |
||||
|
||||
- name: Insert iptables rule for mongoc |
||||
lineinfile: dest=/etc/sysconfig/iptables state=present regexp="$mongoc_port" insertafter="^:OUTPUT " line="-A INPUT -p tcp --dport "$mongoc_port" -j ACCEPT" |
||||
|
||||
- name: Apply iptable rule |
||||
service: name=iptables state=restarted |
||||
|
||||
- name: Create the mongo configuration server file |
||||
template: src=../roles/mongoc/templates/mongoc.conf.j2 dest=/etc/mongoc.conf |
||||
|
||||
- name: Start the mongo configuration server service |
||||
command: creates=/var/lock/subsys/mongoc /etc/init.d/mongoc start |
||||
|
@ -0,0 +1,17 @@ |
||||
|
||||
#where to log |
||||
logpath=/var/log/mongo/mongod-config.log |
||||
|
||||
logappend=true |
||||
|
||||
# fork and run in background |
||||
fork = true |
||||
|
||||
port = {{ mongoc_port }} |
||||
|
||||
dbpath={{ mongodb_datadir_prefix }}configdb |
||||
|
||||
# location of pidfile |
||||
pidfilepath = /var/run/mongoc.pid |
||||
|
||||
configsvr=true |
@ -0,0 +1,94 @@ |
||||
#!/bin/bash |
||||
|
||||
# mongod - Startup script for mongod |
||||
|
||||
# chkconfig: 35 85 15 |
||||
# description: Mongo is a scalable, document-oriented database. |
||||
# processname: mongod |
||||
# config: /etc/mongod.conf |
||||
# pidfile: /var/run/mongo/mongod.pid |
||||
|
||||
. /etc/rc.d/init.d/functions |
||||
|
||||
# things from mongod.conf get there by mongod reading it |
||||
|
||||
|
||||
# NOTE: if you change any OPTIONS here, you get what you pay for: |
||||
# this script assumes all options are in the config file. |
||||
CONFIGFILE="/etc/mongoc.conf" |
||||
OPTIONS=" -f $CONFIGFILE" |
||||
SYSCONFIG="/etc/sysconfig/mongod" |
||||
|
||||
# FIXME: 1.9.x has a --shutdown flag that parses the config file and |
||||
# shuts down the correct running pid, but that's unavailable in 1.8 |
||||
# for now. This can go away when this script stops supporting 1.8. |
||||
DBPATH=`awk -F= '/^dbpath=/{print $2}' "$CONFIGFILE"` |
||||
PIDFILE=`awk -F= '/^dbpath\s=\s/{print $2}' "$CONFIGFILE"` |
||||
mongod=${MONGOD-/usr/bin/mongod} |
||||
|
||||
MONGO_USER=mongod |
||||
MONGO_GROUP=mongod |
||||
|
||||
if [ -f "$SYSCONFIG" ]; then |
||||
. "$SYSCONFIG" |
||||
fi |
||||
|
||||
# Handle NUMA access to CPUs (SERVER-3574) |
||||
# This verifies the existence of numactl as well as testing that the command works |
||||
NUMACTL_ARGS="--interleave=all" |
||||
if which numactl >/dev/null 2>/dev/null && numactl $NUMACTL_ARGS ls / >/dev/null 2>/dev/null |
||||
then |
||||
NUMACTL="numactl $NUMACTL_ARGS" |
||||
else |
||||
NUMACTL="" |
||||
fi |
||||
|
||||
start() |
||||
{ |
||||
echo -n $"Starting mongod: " |
||||
daemon --user "$MONGO_USER" $NUMACTL $mongod $OPTIONS |
||||
RETVAL=$? |
||||
echo |
||||
[ $RETVAL -eq 0 ] && touch /var/lock/subsys/mongoc |
||||
} |
||||
|
||||
stop() |
||||
{ |
||||
echo -n $"Stopping mongod: " |
||||
killproc -p "$PIDFILE" -d 300 /usr/bin/mongod |
||||
RETVAL=$? |
||||
echo |
||||
[ $RETVAL -eq 0 ] && rm -f /var/lock/subsys/mongoc |
||||
} |
||||
|
||||
restart () { |
||||
stop |
||||
start |
||||
} |
||||
|
||||
ulimit -n 12000 |
||||
RETVAL=0 |
||||
|
||||
case "$1" in |
||||
start) |
||||
start |
||||
;; |
||||
stop) |
||||
stop |
||||
;; |
||||
restart|reload|force-reload) |
||||
restart |
||||
;; |
||||
condrestart) |
||||
[ -f /var/lock/subsys/mongod ] && restart || : |
||||
;; |
||||
status) |
||||
status $mongod |
||||
RETVAL=$? |
||||
;; |
||||
*) |
||||
echo "Usage: $0 {start|stop|status|restart|reload|force-reload|condrestart}" |
||||
RETVAL=1 |
||||
esac |
||||
|
||||
exit $RETVAL |
@ -0,0 +1,5 @@ |
||||
--- |
||||
# Handler for mongod |
||||
|
||||
- name: restart iptables |
||||
service: name=iptables state=restarted |
@ -0,0 +1,19 @@ |
||||
--- |
||||
#This Playbooks adds shards to the mongos servers once everythig is added |
||||
|
||||
- name: get the port number for mongod processes |
||||
shell: ifconfig ${iface} | grep "inet addr" | cut -d':' -f2 | cut -d. -f4 | cut -d' ' -f1 |
||||
register: result |
||||
|
||||
|
||||
- name: Create the file to initialize the mongod Shard |
||||
template: src=../roles/mongod/templates/shard_init.j2 dest=/tmp/shard_init_${inventory_hostname}.js |
||||
delegate_to: $item |
||||
with_items: ${groups.mongosservers} |
||||
|
||||
- name: Add the shard to the mongos |
||||
shell: /usr/bin/mongo --port ${mongos_port} /tmp/shard_init_${inventory_hostname}.js |
||||
delegate_to: $item |
||||
with_items: ${groups.mongosservers} |
||||
|
||||
|
@ -0,0 +1,44 @@ |
||||
--- |
||||
#This Playbook deploys the mongod processes and sets up the firewall rules and sets up the replication set. |
||||
|
||||
- name: create data directory for mongodb |
||||
file: path=${mongodb_datadir_prefix}/mongo-${inventory_hostname} state=directory owner=mongod group=mongod |
||||
delegate_to: $item |
||||
with_items: ${groups.replicationservers} |
||||
|
||||
- name: Create a port number for mongod processes |
||||
shell: ifconfig ${iface} | grep "inet addr" | cut -d':' -f2 | cut -d. -f4 | cut -d' ' -f1 |
||||
register: result |
||||
|
||||
- name: Create the mongodb startup file |
||||
template: src=../roles/mongod/templates/mongod.j2 dest=/etc/init.d/mongod-${inventory_hostname} mode=0655 |
||||
delegate_to: $item |
||||
with_items: ${groups.replicationservers} |
||||
|
||||
- name: insert iptables rule for mongod |
||||
lineinfile: dest=/etc/sysconfig/iptables state=present regexp="$mongodb_port_prefix${result.stdout}" insertafter="^:OUTPUT " line="-A INPUT -p tcp --dport "$mongodb_port_prefix${result.stdout}" -j ACCEPT" |
||||
delegate_to: $item |
||||
with_items: ${groups.replicationservers} |
||||
|
||||
- name: Add the iptable rule to allow traffice dynamically |
||||
shell: iptables -I INPUT 2 -p tcp --dport ${mongodb_port_prefix}${result.stdout} -j ACCEPT |
||||
delegate_to: $item |
||||
with_items: ${groups.replicationservers} |
||||
|
||||
- name: Create the mongodb configuration file |
||||
template: src=../roles/mongod/templates/mongod.conf.j2 dest=/etc/mongod-${inventory_hostname}.conf |
||||
delegate_to: $item |
||||
with_items: ${groups.replicationservers} |
||||
|
||||
- name: Start the mongodb service |
||||
command: creates=/var/lock/subsys/mongod-${inventory_hostname} /etc/init.d/mongod-${inventory_hostname} start |
||||
delegate_to: $item |
||||
with_items: ${groups.replicationservers} |
||||
|
||||
- name: Create the file to initialize the mongod replica set |
||||
template: src=../roles/mongod/templates/repset_init.j2 dest=/tmp/repset_init.js |
||||
|
||||
- name: Initialize the replication set |
||||
shell: /usr/bin/mongo --port "$mongodb_port_prefix${result.stdout}" /tmp/repset_init.js |
||||
|
||||
|
@ -0,0 +1,84 @@ |
||||
# mongo.conf |
||||
smallfiles=true |
||||
|
||||
#where to log |
||||
logpath=/var/log/mongo/mongod-{{ inventory_hostname }}.log |
||||
|
||||
logappend=true |
||||
|
||||
# fork and run in background |
||||
fork = true |
||||
|
||||
port = {{ mongodb_port_prefix }}{{ result.stdout }} |
||||
|
||||
dbpath={{ mongodb_datadir_prefix }}mongo-{{ inventory_hostname }} |
||||
|
||||
# location of pidfile |
||||
pidfilepath = /var/run/mongod.pid |
||||
|
||||
# Disables write-ahead journaling |
||||
# nojournal = true |
||||
|
||||
# Enables periodic logging of CPU utilization and I/O wait |
||||
#cpu = true |
||||
|
||||
# Turn on/off security. Off is currently the default |
||||
#noauth = true |
||||
#auth = true |
||||
|
||||
# Verbose logging output. |
||||
#verbose = true |
||||
|
||||
# Inspect all client data for validity on receipt (useful for |
||||
# developing drivers) |
||||
#objcheck = true |
||||
|
||||
# Enable db quota management |
||||
#quota = true |
||||
|
||||
# Set oplogging level where n is |
||||
# 0=off (default) |
||||
# 1=W |
||||
# 2=R |
||||
# 3=both |
||||
# 7=W+some reads |
||||
#diaglog = 0 |
||||
|
||||
# Ignore query hints |
||||
#nohints = true |
||||
|
||||
# Disable the HTTP interface (Defaults to localhost:27018). |
||||
#nohttpinterface = true |
||||
|
||||
# Turns off server-side scripting. This will result in greatly limited |
||||
# functionality |
||||
#noscripting = true |
||||
|
||||
# Turns off table scans. Any query that would do a table scan fails. |
||||
#notablescan = true |
||||
|
||||
# Disable data file preallocation. |
||||
#noprealloc = true |
||||
|
||||
# Specify .ns file size for new databases. |
||||
# nssize = <size> |
||||
|
||||
# Accout token for Mongo monitoring server. |
||||
#mms-token = <token> |
||||
|
||||
# Server name for Mongo monitoring server. |
||||
#mms-name = <server-name> |
||||
|
||||
# Ping interval for Mongo monitoring server. |
||||
#mms-interval = <seconds> |
||||
|
||||
# Replication Options |
||||
replSet={{ inventory_hostname }} |
||||
# in replicated mongo databases, specify here whether this is a slave or master |
||||
#slave = true |
||||
#source = master.example.com |
||||
# Slave only: specify a single database to replicate |
||||
#only = master.example.com |
||||
# or |
||||
#master = true |
||||
#source = slave.example.com |
@ -0,0 +1,94 @@ |
||||
#!/bin/bash |
||||
|
||||
# mongod - Startup script for mongod |
||||
|
||||
# chkconfig: 35 85 15 |
||||
# description: Mongo is a scalable, document-oriented database. |
||||
# processname: mongod |
||||
# config: /etc/mongod.conf |
||||
# pidfile: /var/run/mongo/mongod.pid |
||||
|
||||
. /etc/rc.d/init.d/functions |
||||
|
||||
# things from mongod.conf get there by mongod reading it |
||||
|
||||
|
||||
# NOTE: if you change any OPTIONS here, you get what you pay for: |
||||
# this script assumes all options are in the config file. |
||||
CONFIGFILE="/etc/mongod-{{ inventory_hostname }}.conf" |
||||
OPTIONS=" -f $CONFIGFILE" |
||||
SYSCONFIG="/etc/sysconfig/mongod" |
||||
|
||||
# FIXME: 1.9.x has a --shutdown flag that parses the config file and |
||||
# shuts down the correct running pid, but that's unavailable in 1.8 |
||||
# for now. This can go away when this script stops supporting 1.8. |
||||
DBPATH=`awk -F= '/^dbpath=/{print $2}' "$CONFIGFILE"` |
||||
PIDFILE=`awk -F= '/^dbpath\s=\s/{print $2}' "$CONFIGFILE"` |
||||
mongod=${MONGOD-/usr/bin/mongod} |
||||
|
||||
MONGO_USER=mongod |
||||
MONGO_GROUP=mongod |
||||
|
||||
if [ -f "$SYSCONFIG" ]; then |
||||
. "$SYSCONFIG" |
||||
fi |
||||
|
||||
# Handle NUMA access to CPUs (SERVER-3574) |
||||
# This verifies the existence of numactl as well as testing that the command works |
||||
NUMACTL_ARGS="--interleave=all" |
||||
if which numactl >/dev/null 2>/dev/null && numactl $NUMACTL_ARGS ls / >/dev/null 2>/dev/null |
||||
then |
||||
NUMACTL="numactl $NUMACTL_ARGS" |
||||
else |
||||
NUMACTL="" |
||||
fi |
||||
|
||||
start() |
||||
{ |
||||
echo -n $"Starting mongod: " |
||||
daemon --user "$MONGO_USER" $NUMACTL $mongod $OPTIONS |
||||
RETVAL=$? |
||||
echo |
||||
[ $RETVAL -eq 0 ] && touch /var/lock/subsys/mongod-{{ inventory_hostname }} |
||||
} |
||||
|
||||
stop() |
||||
{ |
||||
echo -n $"Stopping mongod: " |
||||
killproc -p "$PIDFILE" -d 300 /usr/bin/mongod |
||||
RETVAL=$? |
||||
echo |
||||
[ $RETVAL -eq 0 ] && rm -f /var/lock/subsys/mongod-{{ inventory_hostname }} |
||||
} |
||||
|
||||
restart () { |
||||
stop |
||||
start |
||||
} |
||||
|
||||
ulimit -n 12000 |
||||
RETVAL=0 |
||||
|
||||
case "$1" in |
||||
start) |
||||
start |
||||
;; |
||||
stop) |
||||
stop |
||||
;; |
||||
restart|reload|force-reload) |
||||
restart |
||||
;; |
||||
condrestart) |
||||
[ -f /var/lock/subsys/mongod ] && restart || : |
||||
;; |
||||
status) |
||||
status $mongod |
||||
RETVAL=$? |
||||
;; |
||||
*) |
||||
echo "Usage: $0 {start|stop|status|restart|reload|force-reload|condrestart}" |
||||
RETVAL=1 |
||||
esac |
||||
|
||||
exit $RETVAL |
@ -0,0 +1,7 @@ |
||||
rs.initiate() |
||||
sleep(13000) |
||||
{% for host in groups['replicationservers'] %} |
||||
rs.add("{{ host }}:{{ mongodb_port_prefix }}{{ result.stdout }}") |
||||
sleep(8000) |
||||
{% endfor %} |
||||
printjson(rs.status()) |
@ -0,0 +1,2 @@ |
||||
sh.addShard("{{ inventory_hostname}}/{{ inventory_hostname }}:{{ mongodb_port_prefix }}{{ result.stdout }}") |
||||
printjson(rs.status()) |
@ -0,0 +1,18 @@ |
||||
--- |
||||
#This Playbook configures the mongos service of mongodb |
||||
|
||||
- name: Create the mongos startup file |
||||
template: src=../roles/mongos/templates/mongos.j2 dest=/etc/init.d/mongos mode=0655 |
||||
|
||||
- name: insert iptables rule for mongos |
||||
lineinfile: dest=/etc/sysconfig/iptables state=present regexp="$mongos_port" insertafter="^:OUTPUT " line="-A INPUT -p tcp --dport "$mongos_port" -j ACCEPT" |
||||
|
||||
- name: Apply iptable rule on replication servers also |
||||
service: name=iptables state=restarted |
||||
|
||||
- name: Create the mongos configuration file |
||||
template: src=../roles/mongos/templates/mongos.conf.j2 dest=/etc/mongos.conf |
||||
|
||||
- name: Start the mongos service |
||||
command: creates=/var/lock/subsys/mongos /etc/init.d/mongos start |
||||
|
@ -0,0 +1,22 @@ |
||||
#where to log |
||||
logpath=/var/log/mongo/mongos.log |
||||
|
||||
logappend=true |
||||
|
||||
# fork and run in background |
||||
fork = true |
||||
|
||||
port = {{ mongos_port }} |
||||
{% set hosts = '' %} |
||||
{% for host in groups['mongocservers'] %} |
||||
{% if loop.last %} |
||||
{% set hosts = hosts + host + ':' ~ mongoc_port %} |
||||
configdb = {{ hosts }} |
||||
{% else %} |
||||
{% set hosts = hosts + host + ':' ~ mongoc_port + ',' %} |
||||
{% endif %} |
||||
{% endfor %} |
||||
|
||||
# location of pidfile |
||||
pidfilepath = /var/run/mongodb/mongos.pid |
||||
chunkSize={{ mongos_chunk_size }} |
@ -0,0 +1,95 @@ |
||||
#!/bin/bash |
||||
|
||||
# mongod - Startup script for mongod |
||||
|
||||
# chkconfig: 35 85 15 |
||||
# description: Mongo is a scalable, document-oriented database. |
||||
# processname: mongod |
||||
# config: /etc/mongod.conf |
||||
# pidfile: /var/run/mongo/mongod.pid |
||||
|
||||
. /etc/rc.d/init.d/functions |
||||
|
||||
# things from mongod.conf get there by mongod reading it |
||||
|
||||
|
||||
# NOTE: if you change any OPTIONS here, you get what you pay for: |
||||
# this script assumes all options are in the config file. |
||||
CONFIGFILE="/etc/mongos.conf" |
||||
OPTIONS=" -f $CONFIGFILE" |
||||
SYSCONFIG="/etc/sysconfig/mongod" |
||||
|
||||
# FIXME: 1.9.x has a --shutdown flag that parses the config file and |
||||
# shuts down the correct running pid, but that's unavailable in 1.8 |
||||
# for now. This can go away when this script stops supporting 1.8. |
||||
DBPATH=`awk -F= '/^dbpath=/{print $2}' "$CONFIGFILE"` |
||||
PIDFILE=`awk -F= '/^dbpath\s=\s/{print $2}' "$CONFIGFILE"` |
||||
mongod=${MONGOD-/usr/bin/mongos} |
||||
|
||||
MONGO_USER=mongod |
||||
MONGO_GROUP=mongod |
||||
|
||||
if [ -f "$SYSCONFIG" ]; then |
||||
. "$SYSCONFIG" |
||||
fi |
||||
|
||||
# Handle NUMA access to CPUs (SERVER-3574) |
||||
# This verifies the existence of numactl as well as testing that the command works |
||||
NUMACTL_ARGS="--interleave=all" |
||||
if which numactl >/dev/null 2>/dev/null && numactl $NUMACTL_ARGS ls / >/dev/null 2>/dev/null |
||||
then |
||||
NUMACTL="numactl $NUMACTL_ARGS" |
||||
else |
||||
NUMACTL="" |
||||
fi |
||||
|
||||
start() |
||||
{ |
||||
echo -n $"Starting mongod: " |
||||
daemon --user "$MONGO_USER" $NUMACTL $mongod $OPTIONS |
||||
RETVAL=$? |
||||
echo |
||||
[ $RETVAL -eq 0 ] && touch /var/lock/subsys/mongos |
||||
} |
||||
|
||||
stop() |
||||
{ |
||||
echo -n $"Stopping mongod: " |
||||
killproc -p "$PIDFILE" -d 300 /usr/bin/mongos |
||||
RETVAL=$? |
||||
echo |
||||
[ $RETVAL -eq 0 ] && rm -f /var/lock/subsys/mongos |
||||
} |
||||
|
||||
restart () { |
||||
stop |
||||
start |
||||
} |
||||
|
||||
ulimit -n 12000 |
||||
RETVAL=0 |
||||
|
||||
case "$1" in |
||||
start) |
||||
start |
||||
;; |
||||
stop) |
||||
stop |
||||
;; |
||||
restart|reload|force-reload) |
||||
restart |
||||
;; |
||||
condrestart) |
||||
[ -f /var/lock/subsys/mongod ] && restart || : |
||||
;; |
||||
status) |
||||
status $mongod |
||||
RETVAL=$? |
||||
;; |
||||
*) |
||||
echo "Usage: $0 {start|stop|status|restart|reload|force-reload|condrestart}" |
||||
RETVAL=1 |
||||
esac |
||||
|
||||
exit $RETVAL |
||||
|
@ -0,0 +1,10 @@ |
||||
--- |
||||
# This Playbook would deploy the whole mongodb cluster with replication and sharding. |
||||
|
||||
- include: playbooks/common.yml |
||||
- include: playbooks/mongod.yml |
||||
- include: playbooks/mongoc.yml |
||||
- include: playbooks/mongos.yml |
||||
- include: playbooks/addshard.yml |
||||
|
||||
|
Reference in new issue