Replacing with wordpress-based

pull/63/head
Ivan Grynenko 8 years ago
parent c502cf8736
commit 6ac688417d
  1. 4
      jboss-standalone/LICENSE.md
  2. 31
      jboss-standalone/README.md
  3. 4
      jboss-standalone/group_vars/jboss-servers
  4. 2
      jboss-standalone/hosts
  5. 180
      jboss-standalone/roles/jboss-standalone/files/jboss-as-standalone.sh
  6. 6
      jboss-standalone/roles/jboss-standalone/handlers/main.yml
  7. 39
      jboss-standalone/roles/jboss-standalone/tasks/main.yml
  8. 14
      jboss-standalone/roles/jboss-standalone/templates/iptables-save
  9. 308
      jboss-standalone/roles/jboss-standalone/templates/standalone.xml
  10. 8
      jboss-standalone/site.yml
  11. 4
      lamp_centos7/LICENSE.md
  12. 32
      lamp_centos7/README.md
  13. 6
      lamp_centos7/group_vars/all
  14. 9
      lamp_centos7/group_vars/dbservers
  15. 5
      lamp_centos7/hosts
  16. 6
      lamp_centos7/roles/common/handlers/main.yml
  17. 15
      lamp_centos7/roles/common/tasks/main.yml
  18. 12
      lamp_centos7/roles/common/templates/ntp.conf.j2
  19. 36
      lamp_centos7/roles/db/tasks/main.yml
  20. 11
      lamp_centos7/roles/db/templates/my.cnf.j2
  21. 27
      lamp_centos7/roles/php-fpm/tasks/main.yml
  22. 9
      lamp_centos7/roles/web/tasks/copy_code.yml
  23. 16
      lamp_centos7/roles/web/tasks/install_httpd.yml
  24. 3
      lamp_centos7/roles/web/tasks/main.yml
  25. 24
      lamp_centos7/roles/web/templates/index.php.j2
  26. 24
      lamp_centos7/site.yml
  27. 4
      lamp_haproxy/LICENSE.md
  28. 72
      lamp_haproxy/README.md
  29. 5
      lamp_haproxy/group_vars/all
  30. 9
      lamp_haproxy/group_vars/dbservers
  31. 25
      lamp_haproxy/group_vars/lbservers
  32. 17
      lamp_haproxy/group_vars/webservers
  33. 12
      lamp_haproxy/hosts
  34. 10
      lamp_haproxy/roles/base-apache/tasks/main.yml
  35. 29
      lamp_haproxy/roles/common/files/RPM-GPG-KEY-EPEL-6
  36. 26
      lamp_haproxy/roles/common/files/epel.repo
  37. 8
      lamp_haproxy/roles/common/handlers/main.yml
  38. 46
      lamp_haproxy/roles/common/tasks/main.yml
  39. 30
      lamp_haproxy/roles/common/templates/iptables.j2
  40. 12
      lamp_haproxy/roles/common/templates/ntp.conf.j2
  41. 6
      lamp_haproxy/roles/db/handlers/main.yml
  42. 26
      lamp_haproxy/roles/db/tasks/main.yml
  43. 11
      lamp_haproxy/roles/db/templates/my.cnf.j2
  44. 9
      lamp_haproxy/roles/haproxy/handlers/main.yml
  45. 15
      lamp_haproxy/roles/haproxy/tasks/main.yml
  46. 39
      lamp_haproxy/roles/haproxy/templates/haproxy.cfg.j2
  47. 39
      lamp_haproxy/roles/nagios/files/ansible-managed-services.cfg
  48. 144
      lamp_haproxy/roles/nagios/files/localhost.cfg
  49. 1332
      lamp_haproxy/roles/nagios/files/nagios.cfg
  50. 7
      lamp_haproxy/roles/nagios/handlers/main.yml
  51. 41
      lamp_haproxy/roles/nagios/tasks/main.yml
  52. 25
      lamp_haproxy/roles/nagios/templates/dbservers.cfg.j2
  53. 22
      lamp_haproxy/roles/nagios/templates/lbservers.cfg.j2
  54. 25
      lamp_haproxy/roles/nagios/templates/webservers.cfg.j2
  55. 16
      lamp_haproxy/roles/web/tasks/main.yml
  56. 48
      lamp_haproxy/rolling_update.yml
  57. 36
      lamp_haproxy/site.yml
  58. 4
      lamp_simple/LICENSE.md
  59. 27
      lamp_simple/README.md
  60. 6
      lamp_simple/group_vars/all
  61. 9
      lamp_simple/group_vars/dbservers
  62. 7
      lamp_simple/hosts
  63. 9
      lamp_simple/roles/common/handlers/main.yml
  64. 20
      lamp_simple/roles/common/tasks/main.yml
  65. 12
      lamp_simple/roles/common/templates/ntp.conf.j2
  66. 8
      lamp_simple/roles/db/handlers/main.yml
  67. 33
      lamp_simple/roles/db/tasks/main.yml
  68. 11
      lamp_simple/roles/db/templates/my.cnf.j2
  69. 6
      lamp_simple/roles/web/handlers/main.yml
  70. 9
      lamp_simple/roles/web/tasks/copy_code.yml
  71. 24
      lamp_simple/roles/web/tasks/install_httpd.yml
  72. 3
      lamp_simple/roles/web/tasks/main.yml
  73. 24
      lamp_simple/roles/web/templates/index.php.j2
  74. 23
      lamp_simple/site.yml
  75. 56
      language_features/ansible_pull.yml
  76. 19
      language_features/batch_size_control.yml
  77. 45
      language_features/cloudformation.yaml
  78. 45
      language_features/complex_args.yml
  79. 50
      language_features/conditionals_part1.yml
  80. 40
      language_features/conditionals_part2.yml
  81. 6
      language_features/custom_filters.yml
  82. 39
      language_features/delegation.yml
  83. 33
      language_features/environment.yml
  84. 65
      language_features/eucalyptus-ec2.yml
  85. 18
      language_features/file_secontext.yml
  86. 399
      language_features/files/cloudformation-example.json
  87. 29
      language_features/filter_plugins/custom_plugins.py
  88. 16
      language_features/get_url.yml
  89. 35
      language_features/group_by.yml
  90. 18
      language_features/group_commands.yml
  91. 10
      language_features/handlers/handlers.yml
  92. 91
      language_features/intermediate_example.yml
  93. 76
      language_features/intro_example.yml
  94. 24
      language_features/loop_nested.yml
  95. 20
      language_features/loop_plugins.yml
  96. 35
      language_features/loop_with_items.yml
  97. 18
      language_features/mysql.yml
  98. 26
      language_features/nested_playbooks.yml
  99. 25
      language_features/netscaler.yml
  100. 41
      language_features/postgresql.yml
  101. Some files were not shown because too many files have changed in this diff Show More

@ -1,4 +0,0 @@
Copyright (C) 2013 AnsibleWorks, Inc.
This work is licensed under the Creative Commons Attribution 3.0 Unported License.
To view a copy of this license, visit http://creativecommons.org/licenses/by/3.0/deed.en_US.

@ -1,31 +0,0 @@
## Standalone JBoss Deployment
- Requires Ansible 1.2 or newer
- Expects CentOS/RHEL 6.x hosts
These playbooks deploy a very basic implementation of JBoss Application Server,
version 7. To use them, first edit the "hosts" inventory file to contain the
hostnames of the machines on which you want JBoss deployed, and edit the
group_vars/jboss-servers file to set any JBoss configuration parameters you need.
Then run the playbook, like this:
ansible-playbook -i hosts site.yml
When the playbook run completes, you should be able to see the JBoss
Application Server running on the ports you chose, on the target machines.
This is a very simple playbook and could serve as a starting point for more
complex JBoss-based projects.
### Ideas for Improvement
Here are some ideas for ways that these playbooks could be extended:
- Write a playbook or an Ansible module to configure JBoss users.
- Write a playbook to deploy an actual application into the server.
- Extend this configuration to multiple application servers fronted by a load
balancer or other web server frontend.
We would love to see contributions and improvements, so please fork this
repository on GitHub and send us your changes via pull requests.

@ -1,4 +0,0 @@
# Here are variables related to the standalone JBoss installation
http_port: 8080
https_port: 8443

@ -1,2 +0,0 @@
[jboss-servers]
webserver1

@ -1,180 +0,0 @@
#!/bin/sh
#
# JBoss standalone control script
#
# chkconfig: - 80 20
# description: JBoss AS Standalone
# processname: standalone
# pidfile: /var/run/jboss-as/jboss-as-standalone.pid
# config: /etc/jboss-as/jboss-as.conf
# Source function library.
. /etc/init.d/functions
# Load Java configuration.
[ -r /etc/java/java.conf ] && . /etc/java/java.conf
export JAVA_HOME
##
# Set the JBoss user
JBOSS_USER=jboss
export JBOSS_USER
# Load JBoss AS init.d configuration.
if [ -z "$JBOSS_CONF" ]; then
JBOSS_CONF="/etc/jboss-as/jboss-as.conf"
fi
[ -r "$JBOSS_CONF" ] && . "${JBOSS_CONF}"
# Set defaults.
if [ -z "$JBOSS_HOME" ]; then
JBOSS_HOME=/usr/share/jboss-as
fi
export JBOSS_HOME
if [ -z "$JBOSS_PIDFILE" ]; then
JBOSS_PIDFILE=/var/run/jboss-as/jboss-as-standalone.pid
fi
export JBOSS_PIDFILE
if [ -z "$JBOSS_CONSOLE_LOG" ]; then
JBOSS_CONSOLE_LOG=/var/log/jboss-as/console.log
fi
if [ -z "$STARTUP_WAIT" ]; then
STARTUP_WAIT=30
fi
if [ -z "$SHUTDOWN_WAIT" ]; then
SHUTDOWN_WAIT=30
fi
if [ -z "$JBOSS_CONFIG" ]; then
JBOSS_CONFIG=standalone.xml
fi
JBOSS_SCRIPT=$JBOSS_HOME/bin/standalone.sh
prog='jboss-as'
CMD_PREFIX=''
if [ ! -z "$JBOSS_USER" ]; then
if [ -x /etc/rc.d/init.d/functions ]; then
CMD_PREFIX="daemon --user $JBOSS_USER"
else
CMD_PREFIX="su - $JBOSS_USER -c"
fi
fi
start() {
echo -n "Starting $prog: "
if [ -f $JBOSS_PIDFILE ]; then
read ppid < $JBOSS_PIDFILE
if [ `ps --pid $ppid 2> /dev/null | grep -c $ppid 2> /dev/null` -eq '1' ]; then
echo -n "$prog is already running"
failure
echo
return 1
else
rm -f $JBOSS_PIDFILE
fi
fi
mkdir -p $(dirname $JBOSS_CONSOLE_LOG)
cat /dev/null > $JBOSS_CONSOLE_LOG
mkdir -p $(dirname $JBOSS_PIDFILE)
chown $JBOSS_USER $(dirname $JBOSS_PIDFILE) || true
#$CMD_PREFIX JBOSS_PIDFILE=$JBOSS_PIDFILE $JBOSS_SCRIPT 2>&1 > $JBOSS_CONSOLE_LOG &
#$CMD_PREFIX JBOSS_PIDFILE=$JBOSS_PIDFILE $JBOSS_SCRIPT &
if [ ! -z "$JBOSS_USER" ]; then
if [ -x /etc/rc.d/init.d/functions ]; then
daemon --user $JBOSS_USER LAUNCH_JBOSS_IN_BACKGROUND=1 JBOSS_PIDFILE=$JBOSS_PIDFILE $JBOSS_SCRIPT -c $JBOSS_CONFIG 2>&1 > $JBOSS_CONSOLE_LOG &
else
su - $JBOSS_USER -c "LAUNCH_JBOSS_IN_BACKGROUND=1 JBOSS_PIDFILE=$JBOSS_PIDFILE $JBOSS_SCRIPT -c $JBOSS_CONFIG" 2>&1 > $JBOSS_CONSOLE_LOG &
fi
fi
count=0
launched=false
until [ $count -gt $STARTUP_WAIT ]
do
grep 'JBoss AS.*started in' $JBOSS_CONSOLE_LOG > /dev/null
if [ $? -eq 0 ] ; then
launched=true
break
fi
sleep 1
let count=$count+1;
done
success
echo
return 0
}
stop() {
echo -n $"Stopping $prog: "
count=0;
if [ -f $JBOSS_PIDFILE ]; then
read kpid < $JBOSS_PIDFILE
let kwait=$SHUTDOWN_WAIT
# Try issuing SIGTERM
kill -15 $kpid
until [ `ps --pid $kpid 2> /dev/null | grep -c $kpid 2> /dev/null` -eq '0' ] || [ $count -gt $kwait ]
do
sleep 1
let count=$count+1;
done
if [ $count -gt $kwait ]; then
kill -9 $kpid
fi
fi
rm -f $JBOSS_PIDFILE
success
echo
}
status() {
if [ -f $JBOSS_PIDFILE ]; then
read ppid < $JBOSS_PIDFILE
if [ `ps --pid $ppid 2> /dev/null | grep -c $ppid 2> /dev/null` -eq '1' ]; then
echo "$prog is running (pid $ppid)"
return 0
else
echo "$prog dead but pid file exists"
return 1
fi
fi
echo "$prog is not running"
return 3
}
case "$1" in
start)
start
;;
stop)
stop
;;
restart)
$0 stop
$0 start
;;
status)
status
;;
*)
## If no parameters are given, print which are avaiable.
echo "Usage: $0 {start|stop|status|restart|reload}"
exit 1
;;
esac

@ -1,6 +0,0 @@
---
- name: restart jboss
service: name=jboss state=restarted
- name: restart iptables
service: name=iptables state=restarted

@ -1,39 +0,0 @@
---
- name: Install Java 1.7 and some basic dependencies
yum: name={{item}} state=present
with_items:
- unzip
- java-1.7.0-openjdk
- name: Download JBoss from jboss.org
get_url: url=http://download.jboss.org/jbossas/7.1/jboss-as-7.1.1.Final/jboss-as-7.1.1.Final.zip dest=/opt/jboss-as-7.1.1.Final.zip
- name: Extract archive
command: chdir=/usr/share /usr/bin/unzip -q /opt/jboss-as-7.1.1.Final.zip creates=/usr/share/jboss-as
# Rename the dir to avoid encoding the version in the init script
- name: Rename install directory
command: chdir=/usr/share /bin/mv jboss-as-7.1.1.Final jboss-as creates=/usr/share/jboss-as
- name: Copying standalone.xml configuration file
template: src=standalone.xml dest=/usr/share/jboss-as/standalone/configuration/
notify: restart jboss
- name: Add group "jboss"
group: name=jboss
- name: Add user "jboss"
user: name=jboss group=jboss home=/usr/share/jboss-as
- name: Change ownership of JBoss installation
file: path=/usr/share/jboss-as/ owner=jboss group=jboss state=directory recurse=yes
- name: Copy the init script
copy: src=jboss-as-standalone.sh dest=/etc/init.d/jboss mode=0755
- name: Enable JBoss to be started at boot
service: name=jboss enabled=yes state=started
- name: deploy iptables rules
template: src=iptables-save dest=/etc/sysconfig/iptables
notify: restart iptables

@ -1,14 +0,0 @@
# {{ ansible_managed }}
*filter
:INPUT ACCEPT [0:0]
:FORWARD ACCEPT [0:0]
:OUTPUT ACCEPT [4:512]
-A INPUT -m state --state RELATED,ESTABLISHED -j ACCEPT
-A INPUT -p icmp -j ACCEPT
-A INPUT -i lo -j ACCEPT
-A INPUT -p tcp -m state --state NEW -m tcp --dport 22 -j ACCEPT
-A INPUT -p tcp -m state --state NEW -m tcp --dport {{ http_port }} -j ACCEPT
-A INPUT -p tcp -m state --state NEW -m tcp --dport {{ https_port }} -j ACCEPT
-A INPUT -j REJECT --reject-with icmp-host-prohibited
-A FORWARD -j REJECT --reject-with icmp-host-prohibited
COMMIT

@ -1,308 +0,0 @@
<?xml version='1.0' encoding='UTF-8'?>
<!-- {{ ansible_managed }} -->
<server xmlns="urn:jboss:domain:1.2">
<extensions>
<extension module="org.jboss.as.clustering.infinispan"/>
<extension module="org.jboss.as.configadmin"/>
<extension module="org.jboss.as.connector"/>
<extension module="org.jboss.as.deployment-scanner"/>
<extension module="org.jboss.as.ee"/>
<extension module="org.jboss.as.ejb3"/>
<extension module="org.jboss.as.jaxrs"/>
<extension module="org.jboss.as.jdr"/>
<extension module="org.jboss.as.jmx"/>
<extension module="org.jboss.as.jpa"/>
<extension module="org.jboss.as.logging"/>
<extension module="org.jboss.as.mail"/>
<extension module="org.jboss.as.naming"/>
<extension module="org.jboss.as.osgi"/>
<extension module="org.jboss.as.pojo"/>
<extension module="org.jboss.as.remoting"/>
<extension module="org.jboss.as.sar"/>
<extension module="org.jboss.as.security"/>
<extension module="org.jboss.as.threads"/>
<extension module="org.jboss.as.transactions"/>
<extension module="org.jboss.as.web"/>
<extension module="org.jboss.as.webservices"/>
<extension module="org.jboss.as.weld"/>
</extensions>
<management>
<security-realms>
<security-realm name="ManagementRealm">
<authentication>
<properties path="mgmt-users.properties" relative-to="jboss.server.config.dir"/>
</authentication>
</security-realm>
<security-realm name="ApplicationRealm">
<authentication>
<properties path="application-users.properties" relative-to="jboss.server.config.dir"/>
</authentication>
</security-realm>
</security-realms>
<management-interfaces>
<native-interface security-realm="ManagementRealm">
<socket-binding native="management-native"/>
</native-interface>
<http-interface security-realm="ManagementRealm">
<socket-binding http="management-http"/>
</http-interface>
</management-interfaces>
</management>
<profile>
<subsystem xmlns="urn:jboss:domain:logging:1.1">
<console-handler name="CONSOLE">
<level name="INFO"/>
<formatter>
<pattern-formatter pattern="%d{HH:mm:ss,SSS} %-5p [%c] (%t) %s%E%n"/>
</formatter>
</console-handler>
<periodic-rotating-file-handler name="FILE">
<formatter>
<pattern-formatter pattern="%d{HH:mm:ss,SSS} %-5p [%c] (%t) %s%E%n"/>
</formatter>
<file relative-to="jboss.server.log.dir" path="server.log"/>
<suffix value=".yyyy-MM-dd"/>
<append value="true"/>
</periodic-rotating-file-handler>
<logger category="com.arjuna">
<level name="WARN"/>
</logger>
<logger category="org.apache.tomcat.util.modeler">
<level name="WARN"/>
</logger>
<logger category="sun.rmi">
<level name="WARN"/>
</logger>
<logger category="jacorb">
<level name="WARN"/>
</logger>
<logger category="jacorb.config">
<level name="ERROR"/>
</logger>
<root-logger>
<level name="INFO"/>
<handlers>
<handler name="CONSOLE"/>
<handler name="FILE"/>
</handlers>
</root-logger>
</subsystem>
<subsystem xmlns="urn:jboss:domain:configadmin:1.0"/>
<subsystem xmlns="urn:jboss:domain:datasources:1.0">
<datasources>
<datasource jndi-name="java:jboss/datasources/ExampleDS" pool-name="ExampleDS" enabled="true" use-java-context="true">
<connection-url>jdbc:h2:mem:test;DB_CLOSE_DELAY=-1</connection-url>
<driver>h2</driver>
<security>
<user-name>sa</user-name>
<password>sa</password>
</security>
</datasource>
<drivers>
<driver name="h2" module="com.h2database.h2">
<xa-datasource-class>org.h2.jdbcx.JdbcDataSource</xa-datasource-class>
</driver>
</drivers>
</datasources>
</subsystem>
<subsystem xmlns="urn:jboss:domain:deployment-scanner:1.1">
<deployment-scanner path="deployments" relative-to="jboss.server.base.dir" scan-interval="5000"/>
</subsystem>
<subsystem xmlns="urn:jboss:domain:ee:1.0"/>
<subsystem xmlns="urn:jboss:domain:ejb3:1.2">
<session-bean>
<stateless>
<bean-instance-pool-ref pool-name="slsb-strict-max-pool"/>
</stateless>
<stateful default-access-timeout="5000" cache-ref="simple"/>
<singleton default-access-timeout="5000"/>
</session-bean>
<pools>
<bean-instance-pools>
<strict-max-pool name="slsb-strict-max-pool" max-pool-size="20" instance-acquisition-timeout="5" instance-acquisition-timeout-unit="MINUTES"/>
<strict-max-pool name="mdb-strict-max-pool" max-pool-size="20" instance-acquisition-timeout="5" instance-acquisition-timeout-unit="MINUTES"/>
</bean-instance-pools>
</pools>
<caches>
<cache name="simple" aliases="NoPassivationCache"/>
<cache name="passivating" passivation-store-ref="file" aliases="SimpleStatefulCache"/>
</caches>
<passivation-stores>
<file-passivation-store name="file"/>
</passivation-stores>
<async thread-pool-name="default"/>
<timer-service thread-pool-name="default">
<data-store path="timer-service-data" relative-to="jboss.server.data.dir"/>
</timer-service>
<remote connector-ref="remoting-connector" thread-pool-name="default"/>
<thread-pools>
<thread-pool name="default">
<max-threads count="10"/>
<keepalive-time time="100" unit="milliseconds"/>
</thread-pool>
</thread-pools>
</subsystem>
<subsystem xmlns="urn:jboss:domain:infinispan:1.2" default-cache-container="hibernate">
<cache-container name="hibernate" default-cache="local-query">
<local-cache name="entity">
<transaction mode="NON_XA"/>
<eviction strategy="LRU" max-entries="10000"/>
<expiration max-idle="100000"/>
</local-cache>
<local-cache name="local-query">
<transaction mode="NONE"/>
<eviction strategy="LRU" max-entries="10000"/>
<expiration max-idle="100000"/>
</local-cache>
<local-cache name="timestamps">
<transaction mode="NONE"/>
<eviction strategy="NONE"/>
</local-cache>
</cache-container>
</subsystem>
<subsystem xmlns="urn:jboss:domain:jaxrs:1.0"/>
<subsystem xmlns="urn:jboss:domain:jca:1.1">
<archive-validation enabled="true" fail-on-error="true" fail-on-warn="false"/>
<bean-validation enabled="true"/>
<default-workmanager>
<short-running-threads>
<core-threads count="50"/>
<queue-length count="50"/>
<max-threads count="50"/>
<keepalive-time time="10" unit="seconds"/>
</short-running-threads>
<long-running-threads>
<core-threads count="50"/>
<queue-length count="50"/>
<max-threads count="50"/>
<keepalive-time time="10" unit="seconds"/>
</long-running-threads>
</default-workmanager>
<cached-connection-manager/>
</subsystem>
<subsystem xmlns="urn:jboss:domain:jdr:1.0"/>
<subsystem xmlns="urn:jboss:domain:jmx:1.1">
<show-model value="true"/>
<remoting-connector/>
</subsystem>
<subsystem xmlns="urn:jboss:domain:jpa:1.0">
<jpa default-datasource=""/>
</subsystem>
<subsystem xmlns="urn:jboss:domain:mail:1.0">
<mail-session jndi-name="java:jboss/mail/Default">
<smtp-server outbound-socket-binding-ref="mail-smtp"/>
</mail-session>
</subsystem>
<subsystem xmlns="urn:jboss:domain:naming:1.1"/>
<subsystem xmlns="urn:jboss:domain:osgi:1.2" activation="lazy">
<properties>
<!-- Specifies the beginning start level of the framework -->
<property name="org.osgi.framework.startlevel.beginning">1</property>
</properties>
<capabilities>
<!-- modules registered with the OSGi layer on startup -->
<capability name="javax.servlet.api:v25"/>
<capability name="javax.transaction.api"/>
<!-- bundles started in startlevel 1 -->
<capability name="org.apache.felix.log" startlevel="1"/>
<capability name="org.jboss.osgi.logging" startlevel="1"/>
<capability name="org.apache.felix.configadmin" startlevel="1"/>
<capability name="org.jboss.as.osgi.configadmin" startlevel="1"/>
</capabilities>
</subsystem>
<subsystem xmlns="urn:jboss:domain:pojo:1.0"/>
<subsystem xmlns="urn:jboss:domain:remoting:1.1">
<connector name="remoting-connector" socket-binding="remoting" security-realm="ApplicationRealm"/>
</subsystem>
<subsystem xmlns="urn:jboss:domain:resource-adapters:1.0"/>
<subsystem xmlns="urn:jboss:domain:sar:1.0"/>
<subsystem xmlns="urn:jboss:domain:security:1.1">
<security-domains>
<security-domain name="other" cache-type="default">
<authentication>
<login-module code="Remoting" flag="optional">
<module-option name="password-stacking" value="useFirstPass"/>
</login-module>
<login-module code="RealmUsersRoles" flag="required">
<module-option name="usersProperties" value="${jboss.server.config.dir}/application-users.properties"/>
<module-option name="rolesProperties" value="${jboss.server.config.dir}/application-roles.properties"/>
<module-option name="realm" value="ApplicationRealm"/>
<module-option name="password-stacking" value="useFirstPass"/>
</login-module>
</authentication>
</security-domain>
<security-domain name="jboss-web-policy" cache-type="default">
<authorization>
<policy-module code="Delegating" flag="required"/>
</authorization>
</security-domain>
<security-domain name="jboss-ejb-policy" cache-type="default">
<authorization>
<policy-module code="Delegating" flag="required"/>
</authorization>
</security-domain>
</security-domains>
</subsystem>
<subsystem xmlns="urn:jboss:domain:threads:1.1"/>
<subsystem xmlns="urn:jboss:domain:transactions:1.1">
<core-environment>
<process-id>
<uuid/>
</process-id>
</core-environment>
<recovery-environment socket-binding="txn-recovery-environment" status-socket-binding="txn-status-manager"/>
<coordinator-environment default-timeout="300"/>
</subsystem>
<subsystem xmlns="urn:jboss:domain:web:1.1" default-virtual-server="default-host" native="false">
<connector name="http" protocol="HTTP/1.1" scheme="http" socket-binding="http"/>
<virtual-server name="default-host" enable-welcome-root="true">
<alias name="localhost"/>
<alias name="example.com"/>
</virtual-server>
</subsystem>
<subsystem xmlns="urn:jboss:domain:webservices:1.1">
<modify-wsdl-address>true</modify-wsdl-address>
<wsdl-host>${jboss.bind.address:127.0.0.1}</wsdl-host>
<endpoint-config name="Standard-Endpoint-Config"/>
<endpoint-config name="Recording-Endpoint-Config">
<pre-handler-chain name="recording-handlers" protocol-bindings="##SOAP11_HTTP ##SOAP11_HTTP_MTOM ##SOAP12_HTTP ##SOAP12_HTTP_MTOM">
<handler name="RecordingHandler" class="org.jboss.ws.common.invocation.RecordingServerHandler"/>
</pre-handler-chain>
</endpoint-config>
</subsystem>
<subsystem xmlns="urn:jboss:domain:weld:1.0"/>
</profile>
<interfaces>
<interface name="management">
<inet-address value="${jboss.bind.address.management:0.0.0.0}"/>
</interface>
<interface name="public">
<inet-address value="${jboss.bind.address:0.0.0.0}"/>
</interface>
<!-- TODO - only show this if the jacorb subsystem is added -->
<interface name="unsecure">
<!--
~ Used for IIOP sockets in the standard configuration.
~ To secure JacORB you need to setup SSL
-->
<inet-address value="${jboss.bind.address.unsecure:127.0.0.1}"/>
</interface>
</interfaces>
<socket-binding-group name="standard-sockets" default-interface="public" port-offset="${jboss.socket.binding.port-offset:0}">
<socket-binding name="management-native" interface="management" port="${jboss.management.native.port:9999}"/>
<socket-binding name="management-http" interface="management" port="${jboss.management.http.port:9990}"/>
<socket-binding name="management-https" interface="management" port="${jboss.management.https.port:9443}"/>
<socket-binding name="ajp" port="8009"/>
<socket-binding name="http" port="{{ http_port }}"/>
<socket-binding name="https" port="{{ https_port }}"/>
<socket-binding name="osgi-http" interface="management" port="8090"/>
<socket-binding name="remoting" port="4447"/>
<socket-binding name="txn-recovery-environment" port="4712"/>
<socket-binding name="txn-status-manager" port="4713"/>
<outbound-socket-binding name="mail-smtp">
<remote-destination host="localhost" port="25"/>
</outbound-socket-binding>
</socket-binding-group>
</server>

@ -1,8 +0,0 @@
---
# This playbook deploys a simple standalone JBoss server.
- hosts: jboss-servers
remote_user: root
roles:
- jboss-standalone

@ -1,4 +0,0 @@
Copyright (C) 2015 Eugene Varnavsky (varnavruz@gmail.com)
This work is licensed under the Creative Commons Attribution 3.0 Unported License.
To view a copy of this license, visit http://creativecommons.org/licenses/by/3.0/deed.en_US.

@ -1,32 +0,0 @@
Building a simple LAMP stack and deploying Application using Ansible Playbooks.
-------------------------------------------
These playbooks require Ansible 1.2.
These playbooks are meant to be a reference and starter's guide to building
Ansible Playbooks. These playbooks were tested on CentOS 7.x so we recommend
that you use CentOS or RHEL to test these modules.
RHEL7 version reflects changes in Red Hat Enterprise Linux and CentOS 7:
1. Network device naming scheme has changed
2. iptables is replaced with firewalld
3. MySQL is replaced with MariaDB
This LAMP stack can be on a single node or multiple nodes. The inventory file
'hosts' defines the nodes in which the stacks should be configured.
[webservers]
localhost
[dbservers]
bensible
Here the webserver would be configured on the local host and the dbserver on a
server called "bensible". The stack can be deployed using the following
command:
ansible-playbook -i hosts site.yml
Once done, you can check the results by browsing to http://localhost/index.php.
You should see a simple test page and a list of databases retrieved from the
database server.

@ -1,6 +0,0 @@
---
# Variables listed here are applicable to all host groups
httpd_port: 82
ntpserver: 0.au.pool.ntp.org
repository: https://git.drupal.org/project/drupal.git

@ -1,9 +0,0 @@
---
# The variables file used by the playbooks in the dbservers group.
# These don't have to be explicitly imported by vars_files: they are autopopulated.
mysqlservice: mysqld
mysql_port: 3306
dbuser: foouser
dbname: foodb
upassword: abc

@ -1,5 +0,0 @@
[webservers]
122.129.219.67:221
[dbservers]
122.129.219.67:221

@ -1,6 +0,0 @@
---
# Handler to handle common notifications. Handlers are called by other plays.
# See http://docs.ansible.com/playbooks_intro.html for more information about handlers.
- name: restart ntp
service: name=ntpd state=restarted

@ -1,15 +0,0 @@
---
# This playbook contains common plays that will be run on all nodes.
- name: Install ntp
yum: name=ntp state=present
tags: ntp
- name: Configure ntp file
template: src=ntp.conf.j2 dest=/etc/ntp.conf
tags: ntp
notify: restart ntp
- name: Start the ntp service
service: name=ntpd state=started enabled=yes
tags: ntp

@ -1,12 +0,0 @@
driftfile /var/lib/ntp/drift
restrict 127.0.0.1
restrict -6 ::1
server {{ ntpserver }}
includefile /etc/ntp/crypto/pw
keys /etc/ntp/keys

@ -1,36 +0,0 @@
---
# This playbook will install MariaDB and create db user and give permissions.
- name: Install MariaDB package
yum: name={{ item }} state=installed
with_items:
- mariadb-server
- MySQL-python
- libselinux-python
- libsemanage-python
- name: Configure SELinux to start mysql on any port
seboolean: name=mysql_connect_any state=true persistent=yes
- name: Create Mysql configuration file
template: src=my.cnf.j2 dest=/etc/my.cnf
notify:
- restart mariadb
- name: Create MariaDB log file
file: path=/var/log/mysqld.log state=touch owner=mysql group=mysql mode=0775
- name: Create MariaDB PID directory
file: path=/var/run/mysqld state=directory owner=mysql group=mysql mode=0775
- name: Start MariaDB Service
service: name=mariadb state=started enabled=yes
- name: insert firewalld rule
firewalld: port={{ mysql_port }}/tcp permanent=true state=enabled immediate=yes
- name: Create Application Database
mysql_db: name={{ dbname }} state=present
- name: Create Application DB User
mysql_user: name={{ dbuser }} password={{ upassword }} priv=*.*:ALL host='%' state=present

@ -1,11 +0,0 @@
[mysqld]
datadir=/var/lib/mysql
socket=/var/lib/mysql/mysql.sock
user=mysql
# Disabling symbolic-links is recommended to prevent assorted security risks
symbolic-links=0
port={{ mysql_port }}
[mysqld_safe]
log-error=/var/log/mysqld.log
pid-file=/var/run/mysqld/mysqld.pid

@ -1,27 +0,0 @@
---
- name: Install php-fpm and deps
yum: name={{ item }} state=present
with_items:
- php
- php-fpm
- php-enchant
- php-IDNA_Convert
- php-mbstring
- php-mysql
- php-PHPMailer
- php-process
- php-simplepie
- php-xml
- php-gd
- php-mbstring
- php-opcache
- php-pdo-dblib
- php-pecl-apcu
- php-pecl-memcached
- php-pecl-uploadprogress
- php-pecl-uuid
- php-pspell
- php-soap
- php-twig
- php-twig-ctwig
- php-xmlrpc

@ -1,9 +0,0 @@
---
# These tasks are responsible for copying the latest dev/production code from
# the version control system.
- name: Copy the code from repository
git: repo={{ repository }} dest=/var/www/html/
- name: Creates the index.php file
template: src=index.php.j2 dest=/var/www/html/index.php

@ -1,16 +0,0 @@
---
# These tasks install http and the php modules.
- name: Install http and libraries
yum: name={{ item }} state=latest
with_items:
- httpd
- git
- libsemanage-python
- libselinux-python
- name: http service state
service: name=httpd state=started enabled=yes
- name: php-fpm service state
service: name=php-fpm state=started enabled=yes

@ -1,3 +0,0 @@
---
- include: install_httpd.yml
- include: copy_code.yml

@ -1,24 +0,0 @@
<html>
<head>
<title>Ansible Application</title>
</head>
<body>
</br>
<a href=http://{{ ansible_default_ipv4.address }}/index.html>Homepage</a>
</br>
<?php
Print "Hello, World! I am a web server configured using Ansible and I am : ";
echo exec('hostname');
Print "</BR>";
echo "List of Databases: </BR>";
{% for host in groups['dbservers'] %}
$link = mysqli_connect('{{ hostvars[host].ansible_default_ipv4.address }}', '{{ hostvars[host].dbuser }}', '{{ hostvars[host].upassword }}') or die(mysqli_connect_error($link));
{% endfor %}
$res = mysqli_query($link, "SHOW DATABASES;");
while ($row = mysqli_fetch_assoc($res)) {
echo $row['Database'] . "\n";
}
?>
</body>
</html>

@ -1,24 +0,0 @@
---
# This playbook deploys the whole application stack in this site.
- name: apply common configuration to all nodes
hosts: all
remote_user: root
roles:
- common
- name: configure and deploy the webservers and application code
hosts: webservers
remote_user: root
roles:
- web
- php-fpm
- name: deploy MySQL and configure the databases
hosts: dbservers
remote_user: root
roles:
- db

@ -1,4 +0,0 @@
Copyright (C) 2013 AnsibleWorks, Inc.
This work is licensed under the Creative Commons Attribution 3.0 Unported License.
To view a copy of this license, visit http://creativecommons.org/licenses/by/3.0/deed.en_US.

@ -1,72 +0,0 @@
LAMP Stack + HAProxy: Example Playbooks
-----------------------------------------------------------------------------
- Requires Ansible 1.2
- Expects CentOS/RHEL 6 hosts
This example is an extension of the simple LAMP deployment. Here we'll install
and configure a web server with an HAProxy load balancer in front, and deploy
an application to the web servers. This set of playbooks also have the
capability to dynamically add and remove web server nodes from the deployment.
It also includes examples to do a rolling update of a stack without affecting
the service.
You can also optionally configure a Nagios monitoring node.
### Initial Site Setup
First we configure the entire stack by listing our hosts in the 'hosts'
inventory file, grouped by their purpose:
[webservers]
webserver1
webserver2
[dbservers]
dbserver
[lbservers]
lbserver
[monitoring]
nagios
After which we execute the following command to deploy the site:
ansible-playbook -i hosts site.yml
The deployment can be verified by accessing the IP address of your load
balancer host in a web browser: http://<ip-of-lb>:8888. Reloading the page
should have you hit different webservers.
The Nagios web interface can be reached at http://<ip-of-nagios>/nagios/
The default username and password are "nagiosadmin" / "nagiosadmin".
### Removing and Adding a Node
Removal and addition of nodes to the cluster is as simple as editing the
hosts inventory and re-running:
ansible-playbook -i hosts site.yml
### Rolling Update
Rolling updates are the preferred way to update the web server software or
deployed application, since the load balancer can be dynamically configured
to take the hosts to be updated out of the pool. This will keep the service
running on other servers so that the users are not interrupted.
In this example the hosts are updated in serial fashion, which means that
only one server will be updated at one time. If you have a lot of web server
hosts, this behaviour can be changed by setting the 'serial' keyword in
webservers.yml file.
Once the code has been updated in the source repository for your application
which can be defined in the group_vars/all file, execute the following
command:
ansible-playbook -i hosts rolling_update.yml
You can optionally pass: -e webapp_version=xxx to the rolling_update
playbook to specify a specific version of the example webapp to deploy.

@ -1,5 +0,0 @@
---
# Variables here are applicable to all host groups
httpd_port: 80
ntpserver: 192.168.1.2

@ -1,9 +0,0 @@
---
# The variables file used by the playbooks in the dbservers group.
# These don't have to be explicitly imported by vars_files: they are autopopulated.
mysqlservice: mysqld
mysql_port: 3306
dbuser: root
dbname: foodb
upassword: abc

@ -1,25 +0,0 @@
---
# Variables for the HAproxy configuration
# HAProxy supports "http" and "tcp". For SSL, SMTP, etc, use "tcp".
mode: http
# Port on which HAProxy should listen
listenport: 8888
# A name for the proxy daemon, this wil be the suffix in the logs.
daemonname: myapplb
# Balancing Algorithm. Available options:
# roundrobin, source, leastconn, source, uri
# (if persistance is required use, "source")
balance: roundrobin
# Ethernet interface on which the load balancer should listen
# Defaults to the first interface. Change this to:
#
# iface: eth1
#
# ...to override.
#
iface: '{{ ansible_default_ipv4.interface }}'

@ -1,17 +0,0 @@
---
# Variables for the web server configuration
# Ethernet interface on which the web server should listen.
# Defaults to the first interface. Change this to:
#
# iface: eth1
#
# ...to override.
#
iface: '{{ ansible_default_ipv4.interface }}'
# this is the repository that holds our sample webapp
repository: https://github.com/bennojoy/mywebapp.git
# this is the sha1sum of V5 of the test webapp.
webapp_version: 351e47276cc66b018f4890a04709d4cc3d3edb0d

@ -1,12 +0,0 @@
[webservers]
web1
web2
[dbservers]
db1
[lbservers]
lb1
[monitoring]
nagios

@ -1,10 +0,0 @@
---
# This role installs httpd
- name: Install http and php etc
yum: name={{ item }} state=present
with_items:
- httpd
- name: http service state
service: name=httpd state=started enabled=yes

@ -1,29 +0,0 @@
-----BEGIN PGP PUBLIC KEY BLOCK-----
Version: GnuPG v1.4.5 (GNU/Linux)
mQINBEvSKUIBEADLGnUj24ZVKW7liFN/JA5CgtzlNnKs7sBg7fVbNWryiE3URbn1
JXvrdwHtkKyY96/ifZ1Ld3lE2gOF61bGZ2CWwJNee76Sp9Z+isP8RQXbG5jwj/4B
M9HK7phktqFVJ8VbY2jfTjcfxRvGM8YBwXF8hx0CDZURAjvf1xRSQJ7iAo58qcHn
XtxOAvQmAbR9z6Q/h/D+Y/PhoIJp1OV4VNHCbCs9M7HUVBpgC53PDcTUQuwcgeY6
pQgo9eT1eLNSZVrJ5Bctivl1UcD6P6CIGkkeT2gNhqindRPngUXGXW7Qzoefe+fV
QqJSm7Tq2q9oqVZ46J964waCRItRySpuW5dxZO34WM6wsw2BP2MlACbH4l3luqtp
Xo3Bvfnk+HAFH3HcMuwdaulxv7zYKXCfNoSfgrpEfo2Ex4Im/I3WdtwME/Gbnwdq
3VJzgAxLVFhczDHwNkjmIdPAlNJ9/ixRjip4dgZtW8VcBCrNoL+LhDrIfjvnLdRu
vBHy9P3sCF7FZycaHlMWP6RiLtHnEMGcbZ8QpQHi2dReU1wyr9QgguGU+jqSXYar
1yEcsdRGasppNIZ8+Qawbm/a4doT10TEtPArhSoHlwbvqTDYjtfV92lC/2iwgO6g
YgG9XrO4V8dV39Ffm7oLFfvTbg5mv4Q/E6AWo/gkjmtxkculbyAvjFtYAQARAQAB
tCFFUEVMICg2KSA8ZXBlbEBmZWRvcmFwcm9qZWN0Lm9yZz6JAjYEEwECACAFAkvS
KUICGw8GCwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAKCRA7Sd8qBgi4lR/GD/wLGPv9
qO39eyb9NlrwfKdUEo1tHxKdrhNz+XYrO4yVDTBZRPSuvL2yaoeSIhQOKhNPfEgT
9mdsbsgcfmoHxmGVcn+lbheWsSvcgrXuz0gLt8TGGKGGROAoLXpuUsb1HNtKEOwP
Q4z1uQ2nOz5hLRyDOV0I2LwYV8BjGIjBKUMFEUxFTsL7XOZkrAg/WbTH2PW3hrfS
WtcRA7EYonI3B80d39ffws7SmyKbS5PmZjqOPuTvV2F0tMhKIhncBwoojWZPExft
HpKhzKVh8fdDO/3P1y1Fk3Cin8UbCO9MWMFNR27fVzCANlEPljsHA+3Ez4F7uboF
p0OOEov4Yyi4BEbgqZnthTG4ub9nyiupIZ3ckPHr3nVcDUGcL6lQD/nkmNVIeLYP
x1uHPOSlWfuojAYgzRH6LL7Idg4FHHBA0to7FW8dQXFIOyNiJFAOT2j8P5+tVdq8
wB0PDSH8yRpn4HdJ9RYquau4OkjluxOWf0uRaS//SUcCZh+1/KBEOmcvBHYRZA5J
l/nakCgxGb2paQOzqqpOcHKvlyLuzO5uybMXaipLExTGJXBlXrbbASfXa/yGYSAG
iVrGz9CE6676dMlm8F+s3XXE13QZrXmjloc6jwOljnfAkjTGXjiB7OULESed96MR
XtfLk0W5Ab9pd7tKDR6QHI7rgHXfCopRnZ2VVQ==
=V/6I
-----END PGP PUBLIC KEY BLOCK-----

@ -1,26 +0,0 @@
[epel]
name=Extra Packages for Enterprise Linux 6 - $basearch
#baseurl=http://download.fedoraproject.org/pub/epel/6/$basearch
mirrorlist=https://mirrors.fedoraproject.org/metalink?repo=epel-6&arch=$basearch
failovermethod=priority
enabled=1
gpgcheck=1
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-6
[epel-debuginfo]
name=Extra Packages for Enterprise Linux 6 - $basearch - Debug
#baseurl=http://download.fedoraproject.org/pub/epel/6/$basearch/debug
mirrorlist=https://mirrors.fedoraproject.org/metalink?repo=epel-debug-6&arch=$basearch
failovermethod=priority
enabled=0
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-6
gpgcheck=1
[epel-source]
name=Extra Packages for Enterprise Linux 6 - $basearch - Source
#baseurl=http://download.fedoraproject.org/pub/epel/6/SRPMS
mirrorlist=https://mirrors.fedoraproject.org/metalink?repo=epel-source-6&arch=$basearch
failovermethod=priority
enabled=0
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-6
gpgcheck=1

@ -1,8 +0,0 @@
---
# Handlers for common notifications
- name: restart ntp
service: name=ntpd state=restarted
- name: restart iptables
service: name=iptables state=restarted

@ -1,46 +0,0 @@
---
# This role contains common plays that will run on all nodes.
- name: Install python bindings for SE Linux
yum: name={{ item }} state=present
with_items:
- libselinux-python
- libsemanage-python
- name: Create the repository for EPEL
copy: src=epel.repo dest=/etc/yum.repos.d/epel.repo
- name: Create the GPG key for EPEL
copy: src=RPM-GPG-KEY-EPEL-6 dest=/etc/pki/rpm-gpg
- name: install some useful nagios plugins
yum: name={{ item }} state=present
with_items:
- nagios-nrpe
- nagios-plugins-swap
- nagios-plugins-users
- nagios-plugins-procs
- nagios-plugins-load
- nagios-plugins-disk
- name: Install ntp
yum: name=ntp state=present
tags: ntp
- name: Configure ntp file
template: src=ntp.conf.j2 dest=/etc/ntp.conf
tags: ntp
notify: restart ntp
- name: Start the ntp service
service: name=ntpd state=started enabled=yes
tags: ntp
- name: insert iptables template
template: src=iptables.j2 dest=/etc/sysconfig/iptables
notify: restart iptables
- name: test to see if selinux is running
command: getenforce
register: sestatus
changed_when: false

@ -1,30 +0,0 @@
# {{ ansible_managed }}
# Manual customization of this file is not recommended.
*filter
:INPUT ACCEPT [0:0]
:FORWARD ACCEPT [0:0]
:OUTPUT ACCEPT [0:0]
{% if (inventory_hostname in groups['webservers']) or (inventory_hostname in groups['monitoring']) %}
-A INPUT -p tcp --dport 80 -j ACCEPT
{% endif %}
{% if inventory_hostname in groups['dbservers'] %}
-A INPUT -p tcp --dport 3306 -j ACCEPT
{% endif %}
{% if inventory_hostname in groups['lbservers'] %}
-A INPUT -p tcp --dport {{ listenport }} -j ACCEPT
{% endif %}
{% for host in groups['monitoring'] %}
-A INPUT -p tcp -s {{ hostvars[host].ansible_default_ipv4.address }} --dport 5666 -j ACCEPT
{% endfor %}
-A INPUT -m state --state ESTABLISHED,RELATED -j ACCEPT
-A INPUT -p icmp -j ACCEPT
-A INPUT -i lo -j ACCEPT
-A INPUT -m state --state NEW -m tcp -p tcp --dport 22 -j ACCEPT
-A INPUT -j REJECT --reject-with icmp-host-prohibited
-A FORWARD -j REJECT --reject-with icmp-host-prohibited
COMMIT

@ -1,12 +0,0 @@
driftfile /var/lib/ntp/drift
restrict 127.0.0.1
restrict -6 ::1
server {{ ntpserver }}
includefile /etc/ntp/crypto/pw
keys /etc/ntp/keys

@ -1,6 +0,0 @@
---
# Handler to handle DB tier notifications
- name: restart mysql
service: name=mysqld state=restarted

@ -1,26 +0,0 @@
---
# This role will install MySQL and create db user and give permissions.
- name: Install Mysql package
yum: name={{ item }} state=present
with_items:
- mysql-server
- MySQL-python
- name: Configure SELinux to start mysql on any port
seboolean: name=mysql_connect_any state=true persistent=yes
when: sestatus.rc != 0
- name: Create Mysql configuration file
template: src=my.cnf.j2 dest=/etc/my.cnf
notify:
- restart mysql
- name: Start Mysql Service
service: name=mysqld state=started enabled=yes
- name: Create Application Database
mysql_db: name={{ dbname }} state=present
- name: Create Application DB User
mysql_user: name={{ dbuser }} password={{ upassword }} priv=*.*:ALL host='%' state=present

@ -1,11 +0,0 @@
[mysqld]
datadir=/var/lib/mysql
socket=/var/lib/mysql/mysql.sock
user=mysql
# Disabling symbolic-links is recommended to prevent assorted security risks
symbolic-links=0
port={{ mysql_port }}
[mysqld_safe]
log-error=/var/log/mysqld.log
pid-file=/var/run/mysqld/mysqld.pid

@ -1,9 +0,0 @@
---
# Handlers for HAproxy
- name: restart haproxy
service: name=haproxy state=restarted
- name: reload haproxy
service: name=haproxy state=reloaded

@ -1,15 +0,0 @@
---
# This role installs HAProxy and configures it.
- name: Download and install haproxy and socat
yum: name={{ item }} state=present
with_items:
- haproxy
- socat
- name: Configure the haproxy cnf file with hosts
template: src=haproxy.cfg.j2 dest=/etc/haproxy/haproxy.cfg
notify: restart haproxy
- name: Start the haproxy service
service: name=haproxy state=started enabled=yes

@ -1,39 +0,0 @@
global
log 127.0.0.1 local2
chroot /var/lib/haproxy
pidfile /var/run/haproxy.pid
maxconn 4000
user root
group root
daemon
# turn on stats unix socket
stats socket /var/lib/haproxy/stats level admin
defaults
mode {{ mode }}
log global
option httplog
option dontlognull
option http-server-close
option forwardfor except 127.0.0.0/8
option redispatch
retries 3
timeout http-request 10s
timeout queue 1m
timeout connect 10s
timeout client 1m
timeout server 1m
timeout http-keep-alive 10s
timeout check 10s
maxconn 3000
backend app
{% for host in groups['lbservers'] %}
listen {{ daemonname }} {{ hostvars[host]['ansible_' + iface].ipv4.address }}:{{ listenport }}
{% endfor %}
balance {{ balance }}
{% for host in groups['webservers'] %}
server {{ host }} {{ hostvars[host]['ansible_' + iface].ipv4.address }}:{{ httpd_port }}
{% endfor %}

@ -1,39 +0,0 @@
# {{ ansible_managed }}
# service checks to be applied to all hosts
define service {
use local-service
host_name localhost
service_description Root Partition
check_command check_local_disk!20%!10%!/
}
define service {
use local-service
host_name *
service_description Current Users
check_command check_local_users!20!50
}
define service {
use local-service
host_name *
service_description Total Processes
check_command check_local_procs!250!400!RSZDT
}
define service {
use local-service
host_name *
service_description Current Load
check_command check_local_load!5.0,4.0,3.0!10.0,6.0,4.0
}
define service {
use local-service
host_name *
service_description Swap Usage
check_command check_local_swap!20!10
}

@ -1,144 +0,0 @@
###############################################################################
# LOCALHOST.CFG - SAMPLE OBJECT CONFIG FILE FOR MONITORING THIS MACHINE
#
# Last Modified: 05-31-2007
#
# NOTE: This config file is intended to serve as an *extremely* simple
# example of how you can create configuration entries to monitor
# the local (Linux) machine.
#
###############################################################################
###############################################################################
###############################################################################
#
# HOST DEFINITION
#
###############################################################################
###############################################################################
# Define a host for the local machine
define host{
use linux-server ; Name of host template to use
; This host definition will inherit all variables that are defined
; in (or inherited by) the linux-server host template definition.
host_name localhost
alias localhost
address 127.0.0.1
}
###############################################################################
###############################################################################
#
# HOST GROUP DEFINITION
#
###############################################################################
###############################################################################
# Define an optional hostgroup for Linux machines
define hostgroup{
hostgroup_name linux-servers ; The name of the hostgroup
alias Linux Servers ; Long name of the group
members localhost ; Comma separated list of hosts that belong to this group
}
###############################################################################
###############################################################################
#
# SERVICE DEFINITIONS
#
###############################################################################
###############################################################################
# Define a service to "ping" the local machine
define service{
use local-service ; Name of service template to use
host_name localhost
service_description PING
check_command check_ping!100.0,20%!500.0,60%
}
# Define a service to check the disk space of the root partition
# on the local machine. Warning if < 20% free, critical if
# < 10% free space on partition.
define service{
use local-service ; Name of service template to use
host_name localhost
service_description Root Partition
check_command check_local_disk!20%!10%!/
}
# Define a service to check the number of currently logged in
# users on the local machine. Warning if > 20 users, critical
# if > 50 users.
define service{
use local-service ; Name of service template to use
host_name localhost
service_description Current Users
check_command check_local_users!20!50
}
# Define a service to check the number of currently running procs
# on the local machine. Warning if > 250 processes, critical if
# > 400 users.
define service{
use local-service ; Name of service template to use
host_name localhost
service_description Total Processes
check_command check_local_procs!250!400!RSZDT
}
# Define a service to check the load on the local machine.
define service{
use local-service ; Name of service template to use
host_name localhost
service_description Current Load
check_command check_local_load!5.0,4.0,3.0!10.0,6.0,4.0
}
# Define a service to check the swap usage the local machine.
# Critical if less than 10% of swap is free, warning if less than 20% is free
define service{
use local-service ; Name of service template to use
host_name localhost
service_description Swap Usage
check_command check_local_swap!20!10
}
# Define a service to check SSH on the local machine.
# Disable notifications for this service by default, as not all users may have SSH enabled.
define service{
use local-service ; Name of service template to use
host_name localhost
service_description SSH
check_command check_ssh
notifications_enabled 0
}

File diff suppressed because it is too large Load Diff

@ -1,7 +0,0 @@
---
# handlers for nagios
- name: restart httpd
service: name=httpd state=restarted
- name: restart nagios
service: name=nagios state=restarted

@ -1,41 +0,0 @@
---
# This will install nagios
- name: install nagios
yum: pkg={{ item }} state=present
with_items:
- nagios
- nagios-plugins
- nagios-plugins-nrpe
- nagios-plugins-ping
- nagios-plugins-ssh
- nagios-plugins-http
- nagios-plugins-mysql
- nagios-devel
notify: restart httpd
- name: create nagios config dir
file: path=/etc/nagios/ansible-managed state=directory
- name: configure nagios
copy: src=nagios.cfg dest=/etc/nagios/nagios.cfg
notify: restart nagios
- name: configure localhost monitoring
copy: src=localhost.cfg dest=/etc/nagios/objects/localhost.cfg
notify: restart nagios
- name: configure nagios services
copy: src=ansible-managed-services.cfg dest=/etc/nagios/
- name: create the nagios object files
template: src={{ item + ".j2" }}
dest=/etc/nagios/ansible-managed/{{ item }}
with_items:
- webservers.cfg
- dbservers.cfg
- lbservers.cfg
notify: restart nagios
- name: start nagios
service: name=nagios state=started enabled=yes

@ -1,25 +0,0 @@
# {{ ansible_managed }}
define hostgroup {
hostgroup_name dbservers
alias Database Servers
}
{% for host in groups['dbservers'] %}
define host {
use linux-server
host_name {{ host }}
alias {{ host }}
address {{ hostvars[host].ansible_default_ipv4.address }}
hostgroups dbservers
}
{% endfor %}
#define service {
# use local-service
# hostgroup_name dbservers
# service_description MySQL Database Server
# check_command check_mysql
# notifications_enabled 0
#}

@ -1,22 +0,0 @@
# {{ ansible_managed }}
define hostgroup {
hostgroup_name loadbalancers
alias Load Balancers
}
{% for host in groups['lbservers'] %}
define host {
use linux-server
host_name {{ host }}
alias {{ host }}
address {{ hostvars[host].ansible_default_ipv4.address }}
hostgroups loadbalancers
}
define service {
use local-service
host_name {{ host }}
service_description HAProxy Load Balancer
check_command check_http!-p{{ hostvars[host].listenport }}
}
{% endfor %}

@ -1,25 +0,0 @@
# {{ ansible_managed }}
define hostgroup {
hostgroup_name webservers
alias Web Servers
}
{% for host in groups['webservers'] %}
define host {
use linux-server
host_name {{ host }}
alias {{ host }}
address {{ hostvars[host].ansible_default_ipv4.address }}
hostgroups webservers
}
{% endfor %}
# service checks to be applied to the web server
define service {
use local-service
hostgroup_name webservers
service_description webserver
check_command check_http
notifications_enabled 0
}

@ -1,16 +0,0 @@
---
# httpd is handled by the base-apache role upstream
- name: Install php and git
yum: name={{ item }} state=present
with_items:
- php
- php-mysql
- git
- name: Configure SELinux to allow httpd to connect to remote database
seboolean: name=httpd_can_network_connect_db state=true persistent=yes
when: sestatus.rc != 0
- name: Copy the code from repository
git: repo={{ repository }} version={{ webapp_version }} dest=/var/www/html/

@ -1,48 +0,0 @@
---
# This playbook does a rolling update for all webservers serially (one at a time).
# Change the value of serial: to adjust the number of server to be updated.
#
# The three roles that apply to the webserver hosts will be applied: common,
# base-apache, and web. So any changes to configuration, package updates, etc,
# will be applied as part of the rolling update process.
#
# gather facts from monitoring nodes for iptables rules
- hosts: monitoring
tasks: []
- hosts: webservers
remote_user: root
serial: 1
# These are the tasks to run before applying updates:
pre_tasks:
- name: disable nagios alerts for this host webserver service
nagios: 'action=disable_alerts host={{ inventory_hostname }} services=webserver'
delegate_to: "{{ item }}"
with_items: groups.monitoring
- name: disable the server in haproxy
haproxy: 'state=disabled backend=myapplb host={{ inventory_hostname }} socket=/var/lib/haproxy/stats'
delegate_to: "{{ item }}"
with_items: groups.lbservers
roles:
- common
- base-apache
- web
# These tasks run after the roles:
post_tasks:
- name: wait for webserver to come up
wait_for: 'host={{ inventory_hostname }} port=80 state=started timeout=80'
- name: enable the server in haproxy
haproxy: 'state=enabled backend=myapplb host={{ inventory_hostname }} socket=/var/lib/haproxy/stats'
delegate_to: "{{ item }}"
with_items: groups.lbservers
- name: re-enable nagios alerts
nagios: 'action=enable_alerts host={{ inventory_hostname }} services=webserver'
delegate_to: "{{ item }}"
with_items: groups.monitoring

@ -1,36 +0,0 @@
---
# This playbook deploys the whole application stack in this site.
# Apply common configuration to all hosts
- hosts: all
remote_user: root
roles:
- common
# Configure and deploy database servers.
- hosts: dbservers
remote_user: root
roles:
- db
# Configure and deploy the web servers. Note that we include two roles here,
# the 'base-apache' role which simply sets up Apache, and 'web' which includes
# our example web application.
- hosts: webservers
remote_user: root
roles:
- base-apache
- web
# Configure and deploy the load balancer(s).
- hosts: lbservers
remote_user: root
roles:
- haproxy
# Configure and deploy the Nagios monitoring node(s).
- hosts: monitoring
remote_user: root
roles:
- base-apache
- nagios

@ -1,4 +0,0 @@
Copyright (C) 2013 AnsibleWorks, Inc.
This work is licensed under the Creative Commons Attribution 3.0 Unported License.
To view a copy of this license, visit http://creativecommons.org/licenses/by/3.0/deed.en_US.

@ -1,27 +0,0 @@
Building a simple LAMP stack and deploying Application using Ansible Playbooks.
-------------------------------------------
These playbooks require Ansible 1.2.
These playbooks are meant to be a reference and starter's guide to building
Ansible Playbooks. These playbooks were tested on CentOS 6.x so we recommend
that you use CentOS or RHEL to test these modules.
This LAMP stack can be on a single node or multiple nodes. The inventory file
'hosts' defines the nodes in which the stacks should be configured.
[webservers]
localhost
[dbservers]
bensible
Here the webserver would be configured on the local host and the dbserver on a
server called "bensible". The stack can be deployed using the following
command:
ansible-playbook -i hosts site.yml
Once done, you can check the results by browsing to http://localhost/index.php.
You should see a simple test page and a list of databases retrieved from the
database server.

@ -1,6 +0,0 @@
---
# Variables listed here are applicable to all host groups
httpd_port: 80
ntpserver: 192.168.1.2
repository: https://github.com/bennojoy/mywebapp.git

@ -1,9 +0,0 @@
---
# The variables file used by the playbooks in the dbservers group.
# These don't have to be explicitly imported by vars_files: they are autopopulated.
mysqlservice: mysqld
mysql_port: 3306
dbuser: foouser
dbname: foodb
upassword: abc

@ -1,7 +0,0 @@
[webservers]
web3
[dbservers]
web2

@ -1,9 +0,0 @@
---
# Handler to handle common notifications. Handlers are called by other plays.
# See http://docs.ansible.com/playbooks_intro.html for more information about handlers.
- name: restart ntp
service: name=ntpd state=restarted
- name: restart iptables
service: name=iptables state=restarted

@ -1,20 +0,0 @@
---
# This playbook contains common plays that will be run on all nodes.
- name: Install ntp
yum: name=ntp state=present
tags: ntp
- name: Configure ntp file
template: src=ntp.conf.j2 dest=/etc/ntp.conf
tags: ntp
notify: restart ntp
- name: Start the ntp service
service: name=ntpd state=started enabled=yes
tags: ntp
- name: test to see if selinux is running
command: getenforce
register: sestatus
changed_when: false

@ -1,12 +0,0 @@
driftfile /var/lib/ntp/drift
restrict 127.0.0.1
restrict -6 ::1
server {{ ntpserver }}
includefile /etc/ntp/crypto/pw
keys /etc/ntp/keys

@ -1,8 +0,0 @@
---
# Handler to handle DB tier notifications
- name: restart mysql
service: name=mysqld state=restarted
- name: restart iptables
service: name=iptables state=restarted

@ -1,33 +0,0 @@
---
# This playbook will install mysql and create db user and give permissions.
- name: Install Mysql package
yum: name={{ item }} state=installed
with_items:
- mysql-server
- MySQL-python
- libselinux-python
- libsemanage-python
- name: Configure SELinux to start mysql on any port
seboolean: name=mysql_connect_any state=true persistent=yes
when: sestatus.rc != 0
- name: Create Mysql configuration file
template: src=my.cnf.j2 dest=/etc/my.cnf
notify:
- restart mysql
- name: Start Mysql Service
service: name=mysqld state=started enabled=yes
- name: insert iptables rule
lineinfile: dest=/etc/sysconfig/iptables state=present regexp="{{ mysql_port }}"
insertafter="^:OUTPUT " line="-A INPUT -p tcp --dport {{ mysql_port }} -j ACCEPT"
notify: restart iptables
- name: Create Application Database
mysql_db: name={{ dbname }} state=present
- name: Create Application DB User
mysql_user: name={{ dbuser }} password={{ upassword }} priv=*.*:ALL host='%' state=present

@ -1,11 +0,0 @@
[mysqld]
datadir=/var/lib/mysql
socket=/var/lib/mysql/mysql.sock
user=mysql
# Disabling symbolic-links is recommended to prevent assorted security risks
symbolic-links=0
port={{ mysql_port }}
[mysqld_safe]
log-error=/var/log/mysqld.log
pid-file=/var/run/mysqld/mysqld.pid

@ -1,6 +0,0 @@
---
# Handler for the webtier: handlers are called by other plays.
# See http://docs.ansible.com/playbooks_intro.html for more information about handlers.
- name: restart iptables
service: name=iptables state=restarted

@ -1,9 +0,0 @@
---
# These tasks are responsible for copying the latest dev/production code from
# the version control system.
- name: Copy the code from repository
git: repo={{ repository }} dest=/var/www/html/
- name: Creates the index.php file
template: src=index.php.j2 dest=/var/www/html/index.php

@ -1,24 +0,0 @@
---
# These tasks install http and the php modules.
- name: Install http and php etc
yum: name={{ item }} state=present
with_items:
- httpd
- php
- php-mysql
- git
- libsemanage-python
- libselinux-python
- name: insert iptables rule for httpd
lineinfile: dest=/etc/sysconfig/iptables create=yes state=present regexp="{{ httpd_port }}" insertafter="^:OUTPUT "
line="-A INPUT -p tcp --dport {{ httpd_port }} -j ACCEPT"
notify: restart iptables
- name: http service state
service: name=httpd state=started enabled=yes
- name: Configure SELinux to allow httpd to connect to remote database
seboolean: name=httpd_can_network_connect_db state=true persistent=yes
when: sestatus.rc != 0

@ -1,3 +0,0 @@
---
- include: install_httpd.yml
- include: copy_code.yml

@ -1,24 +0,0 @@
<html>
<head>
<title>Ansible Application</title>
</head>
<body>
</br>
<a href=http://{{ ansible_default_ipv4.address }}/index.html>Homepage</a>
</br>
<?php
Print "Hello, World! I am a web server configured using Ansible and I am : ";
echo exec('hostname');
Print "</BR>";
echo "List of Databases: </BR>";
{% for host in groups['dbservers'] %}
$link = mysqli_connect('{{ hostvars[host].ansible_default_ipv4.address }}', '{{ hostvars[host].dbuser }}', '{{ hostvars[host].upassword }}') or die(mysqli_connect_error($link));
{% endfor %}
$res = mysqli_query($link, "SHOW DATABASES;");
while ($row = mysqli_fetch_assoc($res)) {
echo $row['Database'] . "\n";
}
?>
</body>
</html>

@ -1,23 +0,0 @@
---
# This playbook deploys the whole application stack in this site.
- name: apply common configuration to all nodes
hosts: all
remote_user: root
roles:
- common
- name: configure and deploy the webservers and application code
hosts: webservers
remote_user: root
roles:
- web
- name: deploy MySQL and configure the databases
hosts: dbservers
remote_user: root
roles:
- db

@ -1,56 +0,0 @@
# ansible-pull setup
#
# on remote hosts, set up ansible to run periodically using the latest code
# from a particular checkout, in pull based fashion, inverting Ansible's
# usual push-based operating mode.
#
# This particular pull based mode is ideal for:
#
# (A) massive scale out
# (B) continual system remediation
#
# DO NOT RUN THIS AGAINST YOUR HOSTS WITHOUT CHANGING THE repo_url
# TO SOMETHING YOU HAVE PERSONALLY VERIFIED
#
#
---
- hosts: pull_mode_hosts
remote_user: root
vars:
# schedule is fed directly to cron
schedule: '*/15 * * * *'
# User to run ansible-pull as from cron
cron_user: root
# File that ansible will use for logs
logfile: /var/log/ansible-pull.log
# Directory to where repository will be cloned
workdir: /var/lib/ansible/local
# Repository to check out -- YOU MUST CHANGE THIS
# repo must contain a local.yml file at top level
#repo_url: git://github.com/sfromm/ansible-playbooks.git
repo_url: SUPPLY_YOUR_OWN_GIT_URL_HERE
tasks:
- name: Install ansible
yum: pkg=ansible state=installed
- name: Create local directory to work from
file: path={{workdir}} state=directory owner=root group=root mode=0751
- name: Copy ansible inventory file to client
copy: src=/etc/ansible/hosts dest=/etc/ansible/hosts
owner=root group=root mode=0644
- name: Create crontab entry to clone/pull git repository
template: src=templates/etc_cron.d_ansible-pull.j2 dest=/etc/cron.d/ansible-pull owner=root group=root mode=0644
- name: Create logrotate entry for ansible-pull.log
template: src=templates/etc_logrotate.d_ansible-pull.j2 dest=/etc/logrotate.d/ansible-pull owner=root group=root mode=0644

@ -1,19 +0,0 @@
# ordinarily, without the 'serial' keyword set, ansible will control all of your machines in a play at once, in parallel.
# if you want to perform a rolling update, so that each play completes all the way through on a certain number of hosts
# before moving on to the remaining hosts, use the 'serial' keyword like so:
---
- hosts: all
serial: 3
# now each of the tasks below will complete on 3 hosts before moving on to the next 3, regardless of how many
# hosts are selected by the "hosts:" line
tasks:
- name: ping
ping:
- name: ping2
ping:

@ -1,45 +0,0 @@
---
# This playbook demonstrates how to use the ansible cloudformation module to launch an AWS CloudFormation stack.
#
# This module requires that the boto python library is installed, and that you have your AWS credentials
# in $HOME/.boto
#The thought here is to bring up a bare infrastructure with CloudFormation, but use ansible to configure it.
#I generally do this in 2 different playbook runs as to allow the ec2.py inventory to be updated.
#This module also uses "complex arguments" which were introduced in ansible 1.1 allowing you to specify the
#Cloudformation template parameters
#This example launches a 3 node AutoScale group, with a security group, and an InstanceProfile with root permissions.
#If a stack does not exist, it will be created. If it does exist and the template file has changed, the stack will be updated.
#If the parameters are different, the stack will also be updated.
#CloudFormation stacks can take awhile to provision, if you are curious about its status, use the AWS
#web console or one of the CloudFormation CLI's.
#Example update -- try first launching the stack with 3 as the ClusterSize. After it is launched, change it to 4
#and run the playbook again.
- name: provision stack
hosts: localhost
connection: local
gather_facts: false
# Launch the cloudformation-example.json template. Register the output.
tasks:
- name: launch ansible cloudformation example
cloudformation: >
stack_name="ansible-cloudformation" state=present
region=us-east-1 disable_rollback=true
template=files/cloudformation-example.json
args:
template_parameters:
KeyName: jmartin
DiskType: ephemeral
InstanceType: m1.small
ClusterSize: 3
register: stack
- name: show stack outputs
debug: msg="My stack outputs are {{stack.stack_outputs}}"

@ -1,45 +0,0 @@
---
# this is a bit of an advanced topic.
#
# generally Ansible likes to pass simple key=value arguments to modules. It
# occasionally comes up though that you might want to write a module that takes
# COMPLEX arguments, like lists and dictionaries.
#
# In order for this to happen, at least right now, it should be a Python
# module, so it can leverage some common code in Ansible that makes this easy.
# If you write a non-Python module, you can still pass data across, but only
# hashes that do not contain lists or other hashes. If you write the Python
# module, you can do anything.
#
# note that if you were to use BOTH the key=value form and the 'args' form for
# passing data in, the behaviour is currently undefined. Ansible is working to
# standardize on returning a duplicate parameter failure in this case but
# modules which don't use the common module framework may do something
# different.
- hosts: localhost
gather_facts: no
vars:
complex:
ghostbusters: [ 'egon', 'ray', 'peter', 'winston' ]
mice: [ 'pinky', 'brain', 'larry' ]
tasks:
- name: this is the basic way data passing works for any module
action: ping data='Hi Mom'
- name: of course this can also be written like so, which is shorter
ping: data='Hi Mom'
- name: but what if you have a complex module that needs complicated data?
ping:
data:
moo: cow
asdf: [1,2,3,4]
- name: can we make that cleaner? sure!
ping:
data: "{{ complex }}"

@ -1,50 +0,0 @@
---
# this is a demo of conditional imports. This is a powerful concept
# and can be used to use the same recipe for different types of hosts,
# based on variables that bubble up from the hosts from tools such
# as ohai or facter.
#
# Here's an example use case:
#
# what to do if the service for apache is named 'httpd' on CentOS
# but is named 'apache' on Debian?
# there is only one play in this playbook, it runs on all hosts
# as root
- hosts: all
remote_user: root
# we have a common list of variables stored in /vars/external_vars.yml
# that we will always import
# next, we want to import files that are different per operating system
# and if no per operating system file is found, load a defaults file.
# for instance, if the OS was "CentOS", we'd try to load vars/CentOS.yml.
# if that was found, we would immediately stop. However if that wasn't
# present, we'd try to load vars/defaults.yml. If that in turn was not
# found, we would fail immediately, because we had gotten to the end of
# the list without importing anything.
vars_files:
- "vars/external_vars.yml"
- [ "vars/{{ facter_operatingsystem }}.yml", "vars/defaults.yml" ]
# and this is just a regular task line from a playbook, as we're used to.
# but with variables in it that come from above. Note that the variables
# from above are *also* available in templates
tasks:
- name: ensure apache is latest
action: "{{ packager }} pkg={{ apache }} state=latest"
- name: ensure apache is running
service: name={{ apache }} state=running

@ -1,40 +0,0 @@
---
# this is a demo of conditional executions using 'when' statements, which can skip
# certain tasks on machines/platforms/etc where they do not apply.
- hosts: all
remote_user: root
vars:
favcolor: "red"
dog: "fido"
cat: "whiskers"
ssn: 8675309
tasks:
- name: "do this if my favcolor is blue, and my dog is named fido"
shell: /bin/false
when: favcolor == 'blue' and dog == 'fido'
- name: "do this if my favcolor is not blue, and my dog is named fido"
shell: /bin/true
when: favcolor != 'blue' and dog == 'fido'
- name: "do this if my SSN is over 9000"
shell: /bin/true
when: ssn > 9000
- name: "do this if I have one of these SSNs"
shell: /bin/true
when: ssn in [ 8675309, 8675310, 8675311 ]
- name: "do this if a variable named hippo is NOT defined"
shell: /bin/true
when: hippo is not defined
- name: "do this if a variable named hippo is defined"
shell: /bin/true
when: hippo is defined

@ -1,6 +0,0 @@
---
- name: Demonstrate custom jinja2 filters
hosts: all
tasks:
- template: src=templates/custom-filters.j2 dest=/tmp/custom-filters.txt

@ -1,39 +0,0 @@
---
# this is an example of how we can perform actions on a given host on behalf of all the hosts
# in a play.
#
# The two main uses of this would be signalling an outage window for hosts that
# we are going to start upgrading, or to take a machine out of rotation by talking to a load
# balancer.
#
# This example cheats by replacing the load balancer script with the 'echo' command,
# leaving actual communication with the load balancer as an exercise to the reader. In reality,
# you could call anything you want, the main thing is that it should do something with
# {{inventory_hostname}}
# NOTE: see batch_size_control.yml for an example of the 'serial' keyword, which you almost certainly
# want to use in this kind of example. Here we have a mocked up example that does something to
# 5 hosts at a time
- hosts: all
serial: 5
tasks:
- name: take the machine out of rotation
command: echo taking out of rotation {{inventory_hostname}}
delegate_to: 127.0.0.1
# here's an alternate notation if you are delegating to 127.0.0.1, you can use 'local_action'
# instead of 'action' and leave off the 'delegate_to' part.
#
# - local_action: command echo taking out of rotation {{inventory_hostname}}
- name: do several things on the actual host
command: echo hi mom {{inventory_hostname}}
- name: put machine back into rotation
command: echo inserting into rotation {{inventory_hostname}}
delegate_to: 127.0.0.1

@ -1,33 +0,0 @@
---
# it is often useful to be able to set the environment for one command and have that environment be totally
# different for another. An example is you might use a HTTP proxy for some packages but not for others.
#
# in Ansible 1.1 and later, you can pass the environment to any module using either a dictionary variable
# or a dictionary itself.
- hosts: all
remote_user: root
# here we make a variable named "env" that is a dictionary
vars:
env:
HI: test2
http_proxy: http://proxy.example.com:8080
tasks:
# here we just define the dictionary directly and use it
# (here $HI is the shell variable as nothing in Ansible will replace it)
- shell: echo $HI
environment:
HI: test1
# here we are using the $env variable above
- shell: echo $HI
environment: env

@ -1,65 +0,0 @@
---
# This playbook is an example for deploying multiple instances into
# EC2/Euca and "doing something" with them.
#
# - uses the ec2 and ec2_vol module.
#
# Run this with ansible-playbook and supply the private key for your
# EC2/Euca user (to access the instance in the second play), e.g:
#
# ansible-playbook eucalyptus-ec2-deploy.yml -v --private-key=/path/to/ec2/pri/key
#
# The play operates on the local (Ansible control) machine.
- name: Stage instance(s)
hosts: local
connection: local
remote_user: root
gather_facts: false
vars:
keypair: mykeypair
instance_type: m1.small
security_group: default
image: emi-048B3A37
# Launch 5 instances with the following parameters. Register the output.
tasks:
- name: Launch instance
ec2: keypair={{keypair}} group={{security_group}}
instance_type={{instance_type}} image={{image}}
wait=true count=5
register: ec2
# Use with_items to add each instances public IP to a new hostgroup for use in the next play.
- name: Add new instances to host group
add_host: hostname={{item.public_ip}} groupname=deploy
with_items: ec2.instances
- name: Wait for the instances to boot by checking the ssh port
wait_for: host={{item.public_dns_name}} port=22 delay=60 timeout=320 state=started
with_items: ec2.instances
# Use the ec2_vol module to create volumes for attachment to each instance.
# Use with_items to attach to each instance (by returned id) launched previously.
- name: Create a volume and attach
ec2_vol: volume_size=20 instance={{item.id}}
with_items: ec2.instances
# This play targets the new host group
- name: Configure instance
hosts: deploy
remote_user: root
# Do some stuff on each instance ....
tasks:
- name: Ensure NTP is up and running
service: name=ntpd state=started
- name: Install Apache Web Server
yum: pkg=httpd state=latest

@ -1,18 +0,0 @@
---
# This is a demo of how to manage the selinux context using the file module
- hosts: test
remote_user: root
tasks:
- name: Change setype of /etc/exports to non-default value
file: path=/etc/exports setype=etc_t
- name: Change seuser of /etc/exports to non-default value
file: path=/etc/exports seuser=unconfined_u
- name: Set selinux context back to default value
file: path=/etc/exports context=default
- name: Create empty file
command: /bin/touch /tmp/foo
- name: Change setype of /tmp/foo
file: path=/tmp/foo setype=default_t
- name: Try to set secontext to default, but this will fail
because of the lack of a default in the policy
file: path=/tmp/foo context=default

@ -1,399 +0,0 @@
{
"Outputs" : {
"ClusterSecGroup" : {
"Description" : "Name of RegionalManagerSecGroup",
"Value" : {
"Ref" : "InstanceSecurityGroup"
}
}
},
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "Launches an example cluster",
"Mappings" : {
"ebs" : {
"ap-northeast-1" : {
"AMI" : "ami-4e6cd34f"
},
"ap-southeast-1" : {
"AMI" : "ami-a6a7e7f4"
},
"eu-west-1" : {
"AMI" : "ami-c37474b7"
},
"sa-east-1" : {
"AMI" : "ami-1e08d103"
},
"us-east-1" : {
"AMI" : "ami-1624987f"
},
"us-west-1" : {
"AMI" : "ami-1bf9de5e"
},
"us-west-2" : {
"AMI" : "ami-2a31bf1a"
}
},
"ephemeral" : {
"ap-northeast-1" : {
"AMI" : "ami-5a6cd35b"
},
"ap-southeast-1" : {
"AMI" : "ami-a8a7e7fa"
},
"eu-west-1" : {
"AMI" : "ami-b57474c1"
},
"sa-east-1" : {
"AMI" : "ami-1608d10b"
},
"us-east-1" : {
"AMI" : "ami-e8249881"
},
"us-west-1" : {
"AMI" : "ami-21f9de64"
},
"us-west-2" : {
"AMI" : "ami-2e31bf1e"
}
}
},
"Parameters" : {
"ClusterSize" : {
"Description" : "Number of nodes in the cluster",
"Type" : "String"
},
"DiskType" : {
"AllowedValues" : [
"ephemeral",
"ebs"
],
"Default" : "ephemeral",
"Description" : "Type of Disk to use ( ephemeral/ebs )",
"Type" : "String"
},
"InstanceType" : {
"AllowedValues" : [
"t1.micro",
"m1.small",
"m1.medium",
"m1.large",
"m1.xlarge",
"m2.xlarge",
"m2.2xlarge",
"m2.4xlarge",
"c1.medium",
"c1.xlarge",
"cc1.4xlarge"
],
"ConstraintDescription" : "must be valid instance type. ",
"Default" : "m1.large",
"Description" : "Type of EC2 instance for cluster",
"Type" : "String"
},
"KeyName" : {
"Description" : "Name of an existing EC2 KeyPair to enable SSH access to the cluster",
"Type" : "String"
}
},
"Resources" : {
"ApplicationWaitCondition" : {
"DependsOn" : "ClusterServerGroup",
"Properties" : {
"Handle" : {
"Ref" : "ApplicationWaitHandle"
},
"Timeout" : "4500"
},
"Type" : "AWS::CloudFormation::WaitCondition"
},
"ApplicationWaitHandle" : {
"Type" : "AWS::CloudFormation::WaitConditionHandle"
},
"CFNInitUser" : {
"Properties" : {
"Path" : "/",
"Policies" : [
{
"PolicyDocument" : {
"Statement" : [
{
"Action" : [
"cloudformation:DescribeStackResource",
"s3:GetObject"
],
"Effect" : "Allow",
"Resource" : "*"
}
]
},
"PolicyName" : "AccessForCFNInit"
}
]
},
"Type" : "AWS::IAM::User"
},
"CFNKeys" : {
"Properties" : {
"UserName" : {
"Ref" : "CFNInitUser"
}
},
"Type" : "AWS::IAM::AccessKey"
},
"ClusterCommunication1" : {
"Properties" : {
"FromPort" : "-1",
"GroupName" : {
"Ref" : "InstanceSecurityGroup"
},
"IpProtocol" : "icmp",
"SourceSecurityGroupName" : {
"Ref" : "InstanceSecurityGroup"
},
"ToPort" : "-1"
},
"Type" : "AWS::EC2::SecurityGroupIngress"
},
"ClusterCommunication2" : {
"Properties" : {
"FromPort" : "1",
"GroupName" : {
"Ref" : "InstanceSecurityGroup"
},
"IpProtocol" : "tcp",
"SourceSecurityGroupName" : {
"Ref" : "InstanceSecurityGroup"
},
"ToPort" : "65356"
},
"Type" : "AWS::EC2::SecurityGroupIngress"
},
"ClusterCommunication3" : {
"Properties" : {
"FromPort" : "1",
"GroupName" : {
"Ref" : "InstanceSecurityGroup"
},
"IpProtocol" : "udp",
"SourceSecurityGroupName" : {
"Ref" : "InstanceSecurityGroup"
},
"ToPort" : "65356"
},
"Type" : "AWS::EC2::SecurityGroupIngress"
},
"InstanceSecurityGroup" : {
"Properties" : {
"GroupDescription" : "Enable SSH access via port 22",
"SecurityGroupIngress" : [
{
"CidrIp" : "0.0.0.0/0",
"FromPort" : "22",
"IpProtocol" : "tcp",
"ToPort" : "22"
}
]
},
"Type" : "AWS::EC2::SecurityGroup"
},
"LaunchConfig" : {
"Properties" : {
"IamInstanceProfile" : {
"Ref" : "RootInstanceProfile"
},
"ImageId" : {
"Fn::FindInMap" : [
{
"Ref" : "DiskType"
},
{
"Ref" : "AWS::Region"
},
"AMI"
]
},
"InstanceType" : {
"Ref" : "InstanceType"
},
"KeyName" : {
"Ref" : "KeyName"
},
"SecurityGroups" : [
{
"Ref" : "InstanceSecurityGroup"
}
],
"UserData" : {
"Fn::Base64" : {
"Fn::Join" : [
"\n",
[
"#!/bin/bash -v",
"exec > >(tee /var/log/cfn-data.log|logger -t user-data -s 2>/dev/console) 2>&1",
"",
"sleep 10",
"",
"function retry {",
" nTrys=0",
" maxTrys=5",
" status=256",
" until [ $status == 0 ] ; do",
" $1",
" status=$?",
" nTrys=$(($nTrys + 1))",
" if [ $nTrys -gt $maxTrys ] ; then",
" echo \"Number of re-trys exceeded. Exit code: $status\"",
" exit $status",
" fi",
" if [ $status != 0 ] ; then",
" echo \"Failed (exit code $status)... retry $nTrys\"",
" sleep 10",
" fi",
" done",
"}",
"",
"yum update -y aws-cfn-bootstrap",
"",
"#for all the stuff that complains about sudo and tty",
"sed -i 's,Defaults requiretty,#Defaults requiretty,g' /etc/sudoers",
"",
"function error_exit",
"{",
{
"Fn::Join" : [
"",
[
" /opt/aws/bin/cfn-signal -e 1 -r \"$1\" '",
{
"Ref" : "ApplicationWaitHandle"
},
"'"
]
]
},
"}",
"yum update -y aws-cfn-bootstrap",
"#this runs the first stage of cfinit",
{
"Fn::Join" : [
"",
[
"#/opt/aws/bin/cfn-init -c ascending -v --region ",
{
"Ref" : "AWS::Region"
},
" -s ",
{
"Ref" : "AWS::StackName"
},
" -r ",
"LaunchConfig",
" --access-key ",
{
"Ref" : "CFNKeys"
},
" --secret-key ",
{
"Fn::GetAtt" : [
"CFNKeys",
"SecretAccessKey"
]
},
" || error_exit 'Failed to initialize client using cfn-init'"
]
]
},
"",
"",
"",
"result_code=$?",
{
"Fn::Join" : [
"",
[
"/opt/aws/bin/cfn-signal -e $result_code '",
{
"Ref" : "ApplicationWaitHandle"
},
"'"
]
]
}
]
]
}
}
},
"Type" : "AWS::AutoScaling::LaunchConfiguration"
},
"ClusterServerGroup" : {
"Properties" : {
"AvailabilityZones" : {
"Fn::GetAZs" : ""
},
"LaunchConfigurationName" : {
"Ref" : "LaunchConfig"
},
"MaxSize" : {
"Ref" : "ClusterSize"
},
"MinSize" : {
"Ref" : "ClusterSize"
}
},
"Type" : "AWS::AutoScaling::AutoScalingGroup"
},
"RolePolicies" : {
"Properties" : {
"PolicyDocument" : {
"Statement" : [
{
"Action" : "*",
"Effect" : "Allow",
"Resource" : "*"
}
]
},
"PolicyName" : "root",
"Roles" : [
{
"Ref" : "RootRole"
}
]
},
"Type" : "AWS::IAM::Policy"
},
"RootInstanceProfile" : {
"Properties" : {
"Path" : "/",
"Roles" : [
{
"Ref" : "RootRole"
}
]
},
"Type" : "AWS::IAM::InstanceProfile"
},
"RootRole" : {
"Properties" : {
"AssumeRolePolicyDocument" : {
"Statement" : [
{
"Action" : [
"sts:AssumeRole"
],
"Effect" : "Allow",
"Principal" : {
"Service" : [
"ec2.amazonaws.com"
]
}
}
]
},
"Path" : "/"
},
"Type" : "AWS::IAM::Role"
}
}
}

@ -1,29 +0,0 @@
# (c) 2012, Jeroen Hoekx <jeroen@hoekx.be>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
class FilterModule(object):
''' Custom filters are loaded by FilterModule objects '''
def filters(self):
''' FilterModule objects return a dict mapping filter names to
filter functions. '''
return {
'generate_answer': self.generate_answer,
}
def generate_answer(self, value):
return '42'

@ -1,16 +0,0 @@
---
- hosts: webservers
vars:
- jquery_directory: /var/www/html/javascript
- person: 'Susie%20Smith'
tasks:
- name: Create directory for jQuery
file: dest={{jquery_directory}} state=directory mode=0755
- name: Grab a bunch of jQuery stuff
get_url: url=http://code.jquery.com/{{item}} dest={{jquery_directory}} mode=0444
with_items:
- jquery.min.js
- mobile/latest/jquery.mobile.min.js
- ui/jquery-ui-git.css
#- name: Pass urlencoded name to CGI
# get_url: url=http://example.com/name.cgi?name='{{person}}' dest=/tmp/test

@ -1,35 +0,0 @@
---
# Example playbook to demonstrate the group_by action plugin.
#
# as we know, the setup module will automatically run in each play, and sets up various
# facts. We can then create temporary (in memory only) groups based on those facts, which
# are useful ways of selecting similar sets of hosts.
#
# Additionally, we can use the 'register' keyword in Ansible to set similar variables
# and use those for grouping. This is not shown in this example.
- hosts: all
tasks:
- name: Create a group of all hosts by operating system
group_by: key={{ansible_distribution}}-{{ansible_distribution_version}}
# the following host group does not exist in inventory and was created by the group_by
# module.
- hosts: CentOS-6.2
tasks:
- name: ping all CentOS 6.2 hosts
ping:
- hosts: CentOS-6.3
tasks:
- name: ping all CentOS 6.3 hosts
ping:

@ -1,18 +0,0 @@
---
# This is a demo of how the group command works.
- hosts: all
remote_user: root
sudo: yes
tasks:
# Walk through group creation, modification, and deletion
- name: create a group
group: name=tset
# You can only modify the group's gid
- group: name=tset gid=7777
# And finally remove the group
- group: name=tset state=absent

@ -1,10 +0,0 @@
---
# this is an example to show that handlers can be included from yaml files,
# to promote reuse between different plays or even playbooks. They work
# just like normal handlers.
- name: restart apache
service: name=httpd state=restarted
- name: restart memcached
service: name=memcached state=restarted

@ -1,91 +0,0 @@
---
# see intro_example.yml first!
# This file explains some more advanced features of playbooks.
# because of the comments it's less concise than it normally is. But feel
# free to comment your playbooks if you like.
- hosts: all
# we can define variables the normal way...
vars:
release: 2.0
# but they can also come from other files. This can be a relative
# or absolute path. This is a good way to store 'secret' variable
# files but still keep the playbook in public source control
vars_files:
- vars/external_vars.yml
# as with before, every play has a list of tasks in it
tasks:
# tasks can be written the normal way...
- name: arbitrary command
command: /bin/true
# or we can promote reuse and simplicity by including tasks
# from other files, for instance, to reuse common tasks
- include: tasks/base.yml
# we could also have done something like:
# - include: wordpress.yml user=timmy
# and had access to the template variable $user in the
# included file, if we wanted to. Variables from vars
# and vars_files are also available inside include files
handlers:
# handlers can also be included from files, to promote reuse
# and simpler recipes, you may wish to only have one
# handler file for all your plays and playbooks. This example really
# doesn't notify any handlers, it is just showing you how they would
# be included (see intro_example for usage).
- include: handlers/handlers.yml
# you can mix things that are directly in the file with things
# that are included. Order is executed as written, but only
# handlers that have been notified get executed
- name: restart foo
service: name=foo state=restarted
# ===============================================================
# Here's a second play in the same playbook. This will be run
# after the first playbook completes on all hosts. You may want
# a different play for each class of systems, or may want a different
# play for each stage in a complex multi-node deployment push
# process. How you use them are up to you.
# any play in a playbook can be executed by a user other than root
# if you want. sudo support is coming too.
- hosts: webservers
remote_user: mdehaan
# vars must be specified again for the next play in the playbook
# but can be reused by including from vars_files if you want
# you can use vars, vars_files, or both. vars_files overrides
# those set in vars.
vars:
release: 2.0
vars_files:
- vars/external_vars.yml
# these all runs as the user 'mdehaan'. If there were any handlers
# they would as well.
tasks:
- name: some random command
command: /bin/true

@ -1,76 +0,0 @@
---
# this is an annotated example of some features available in playbooks
# it shows how to make sure packages are updated, how to make sure
# services are running, and how to template files. It also demos
# change handlers that can restart things (or trigger other actions)
# when resources change. For more advanced examples, see example2.yml
# on all hosts, run as the user root...
- name: example play
hosts: all
remote_user: root
# could have also have done:
# remote_user: mdehaan
# sudo: yes
# make these variables available inside of templates
# for when we use the 'template' action/module later on...
vars:
http_port: 80
max_clients: 200
# define the tasks that are part of this play...
tasks:
# task #1 is to run an arbitrary command
# we'll simulate a long running task, wait for up to 45 seconds, poll every 5
# obviously this does nothing useful but you get the idea
- name: longrunner
command: /bin/sleep 15
async: 45
poll: 5
# let's demo file operations.
#
# We can 'copy' files or 'template' them instead, using jinja2
# as the templating engine. This is done using the variables
# from the vars section above mixed in with variables bubbled up
# automatically from tools like facter and ohai. 'copy'
# works just like 'template' but does not do variable subsitution.
#
# If and only if the file changes, restart apache at the very
# end of the playbook run
- name: write some_random_foo configuration
template: src=templates/foo.j2 dest=/etc/some_random_foo.conf
notify:
- restart apache
# make sure httpd is installed at the latest version
- name: install httpd
yum: pkg=httpd state=latest
# make sure httpd is running
- name: httpd start
service: name=httpd state=running
# handlers are only run when things change, at the very end of each
# play. Let's define some. The names are significant and must
# match the 'notify' sections above
handlers:
# this particular handler is run when some_random_foo.conf
# is changed, and only then
- name: restart apache
service: name=httpd state=restarted

@ -1,24 +0,0 @@
---
# this is a trivial example of how to do a nested loop.
- hosts: all
tasks:
- shell: echo "nested test a={{ item[0] }} b={{ item[1] }} c={{ item[2] }}"
with_nested:
- [ 'red', 'blue', 'green' ]
- [ 1, 2, 3 ]
- [ 'up', 'down', 'strange']
# you can reference a raw variable name without putting it in {{ brackets }}
- hosts: all
vars:
listvar1:
- 'a'
- 'b'
- 'c'
tasks:
- shell: echo "nested test a={{ item[0] }} b={{ item[1] }}"
with_nested:
- listvar1
- [ 1, 2, 3 ]

@ -1,20 +0,0 @@
---
# in addition to loop_with_items, the loop that works over a variable, ansible can do more sophisticated looping.
# developer types: these are powered by 'lookup_plugins' should you ever decide to write your own
# see lib/ansible/runner/lookup_plugins/fileglob.py -- they can do basically anything!
- hosts: all
gather_facts: no
tasks:
# this will copy a bunch of config files over -- dir must be created first
- file: dest=/etc/fooapp state=directory
- copy: src={{ item }} dest=/etc/fooapp/ owner=root mode=600
with_fileglob: /playbooks/files/fooapp/*

@ -1,35 +0,0 @@
---
# this is an example of how to run repeated task elements over lists
# of items, for example, installing multiple packages or configuring
# multiple users
- hosts: all
remote_user: root
tasks:
- name: install packages
yum: name={{ item }} state=installed
with_items:
- cobbler
- httpd
- name: configure users
user: name={{ item }} state=present groups=wheel
with_items:
- testuser1
- testuser2
- name: remove users
user: name={{ item }} state=absent
with_items:
- testuser1
- testuser2
- name: copy templates
template: src={{ item.src }} dest={{ item.dest }}
with_items:
- src: templates/testsource1
dest: /example/dest1/test.conf
- src: templates/testsource2
dest: /example/dest2/test.conf

@ -1,18 +0,0 @@
##
# Example Ansible playbook that uses the MySQL module.
#
---
- hosts: all
remote_user: root
tasks:
- name: Create database user
mysql_user: user=bob password=12345 priv=*.*:ALL state=present
- name: Create database
mysql_db: db=bobdata state=present
- name: Ensure no user named 'sally' exists and delete if found.
mysql_user: user=sally state=absent

@ -1,26 +0,0 @@
---
# it is possible to have top level playbook files import other playbook
# files. For example, a playbook called could include three
# different playbooks, such as webservers, workers, dbservers, etc.
#
# Running the site playbook would run all playbooks, while individual
# playbooks could still be run directly. This is somewhat like
# the tag feature and can be used in conjunction for very fine grained
# control over what you want to target when running ansible.
- name: this is a play at the top level of a file
hosts: all
remote_user: root
tasks:
- name: say hi
tags: foo
shell: echo "hi..."
# and this is how we include another playbook, be careful and
# don't recurse infinitely or anything. Note you can't use
# any variables in the include path here.
- include: intro_example.yml
# and if we wanted, we can continue with more includes here,
# or more plays inline in this file

@ -1,25 +0,0 @@
---
#
# NetScaler module example
#
- hosts: web-pool
serial: 3
vars:
nsc_host: nsc.example.com
nsc_user: admin
nsc_pass: nimda
# type of the netscaler object you want to manipulate
type: service
# netscaler object name
name: "{{facter_fqdn}}:8080"
tasks:
- name: disable service in the lb
netscaler: nsc_host={{nsc_host}} user={{nsc_user}} password={{nsc_pass}} name={{name}} type={{type}} action=disable
- name: deploy new code
shell: yum upgrade -y
- name: enable in the lb
netscaler: nsc_host={{nsc_host}} user={{nsc_user}} password={{nsc_pass}} name={{name}} type={{type}} action=enable

@ -1,41 +0,0 @@
##
# Example Ansible playbook that uses the PostgreSQL module.
#
# This installs PostgreSQL on an Ubuntu system, creates a database called
# "myapp" and a user called "django" with password "mysupersecretpassword"
# with access to the "myapp" database.
#
---
- hosts: webservers
sudo: yes
gather_facts: no
tasks:
- name: ensure apt cache is up to date
apt: update_cache=yes
- name: ensure packages are installed
apt: name={{item}}
with_items:
- postgresql
- libpq-dev
- python-psycopg2
- hosts: webservers
sudo: yes
sudo_user: postgres
gather_facts: no
vars:
dbname: myapp
dbuser: django
dbpassword: mysupersecretpassword
tasks:
- name: ensure database is created
postgresql_db: name={{dbname}}
- name: ensure user has access to database
postgresql_user: db={{dbname}} name={{dbuser}} password={{dbpassword}} priv=ALL
- name: ensure user does not have unnecessary privilege
postgresql_user: name={{dbuser}} role_attr_flags=NOSUPERUSER,NOCREATEDB

Some files were not shown because too many files have changed in this diff Show More