hadoop added

pull/63/head
bennojoy 11 years ago
parent 01f0c6454e
commit c537976273
  1. 27
      hadoop/.common_test.yml.retry
  2. 20
      hadoop/.job.yml.retry
  3. 20
      hadoop/.site.yml.retry
  4. 145
      hadoop/README.md
  5. 1
      hadoop/group_vars/all
  6. 47
      hadoop/hadoop_vars/hadoop
  7. 26
      hadoop/hosts
  8. 19
      hadoop/playbooks/inputfile
  9. 21
      hadoop/playbooks/job.yml
  10. 5
      hadoop/roles/common/files/etc/cloudera-CDH4.repo
  11. 2
      hadoop/roles/common/handlers/main.yml
  12. 30
      hadoop/roles/common/tasks/common.yml
  13. 5
      hadoop/roles/common/tasks/main.yml
  14. 5
      hadoop/roles/common/templates/etc/hosts.j2
  15. 25
      hadoop/roles/common/templates/hadoop_conf/core-site.xml.j2
  16. 75
      hadoop/roles/common/templates/hadoop_conf/hadoop-metrics.properties.j2
  17. 44
      hadoop/roles/common/templates/hadoop_conf/hadoop-metrics2.properties.j2
  18. 57
      hadoop/roles/common/templates/hadoop_conf/hdfs-site.xml.j2
  19. 219
      hadoop/roles/common/templates/hadoop_conf/log4j.properties.j2
  20. 22
      hadoop/roles/common/templates/hadoop_conf/mapred-site.xml.j2
  21. 3
      hadoop/roles/common/templates/hadoop_conf/slaves.j2
  22. 80
      hadoop/roles/common/templates/hadoop_conf/ssl-client.xml.example.j2
  23. 77
      hadoop/roles/common/templates/hadoop_conf/ssl-server.xml.example.j2
  24. 25
      hadoop/roles/common/templates/hadoop_ha_conf/core-site.xml.j2
  25. 75
      hadoop/roles/common/templates/hadoop_ha_conf/hadoop-metrics.properties.j2
  26. 44
      hadoop/roles/common/templates/hadoop_ha_conf/hadoop-metrics2.properties.j2
  27. 103
      hadoop/roles/common/templates/hadoop_ha_conf/hdfs-site.xml.j2
  28. 219
      hadoop/roles/common/templates/hadoop_ha_conf/log4j.properties.j2
  29. 120
      hadoop/roles/common/templates/hadoop_ha_conf/mapred-site.xml.j2
  30. 3
      hadoop/roles/common/templates/hadoop_ha_conf/slaves.j2
  31. 80
      hadoop/roles/common/templates/hadoop_ha_conf/ssl-client.xml.example.j2
  32. 77
      hadoop/roles/common/templates/hadoop_ha_conf/ssl-server.xml.example.j2
  33. 41
      hadoop/roles/common/templates/iptables.j2
  34. 14
      hadoop/roles/hadoop_primary/handlers/main.yml
  35. 76
      hadoop/roles/hadoop_primary/tasks/hadoop_master.yml
  36. 5
      hadoop/roles/hadoop_primary/tasks/main.yml
  37. 14
      hadoop/roles/hadoop_secondary/handlers/main.yml
  38. 73
      hadoop/roles/hadoop_secondary/tasks/hadoop_secondary.yml
  39. 4
      hadoop/roles/hadoop_secondary/tasks/main.yml
  40. 8
      hadoop/roles/hadoop_slaves/handlers/main.yml
  41. 4
      hadoop/roles/hadoop_slaves/tasks/main.yml
  42. 53
      hadoop/roles/hadoop_slaves/tasks/slaves.yml
  43. 5
      hadoop/roles/qjournal_servers/handlers/main.yml
  44. 38
      hadoop/roles/qjournal_servers/tasks/main.yml
  45. 5
      hadoop/roles/zookeeper_servers/handlers/main.yml
  46. 13
      hadoop/roles/zookeeper_servers/tasks/main.yml
  47. 9
      hadoop/roles/zookeeper_servers/templates/zoo.cfg.j2
  48. 6
      hadoop/roles/zookeeper_servers/vars/main.yml
  49. 41
      hadoop/site.yml

@ -0,0 +1,27 @@
# dynamically generated inventory file
# retries previously failed hosts only
[hadoop_masters]
nisserver zoo_id=1 role=active
nisclient1 zoo_id=2 role=standby
[CentOS]
nisclient1 zoo_id=2 role=standby
nisclient2 zoo_id=3
nisserver zoo_id=1 role=active
[qjournal_servers]
nisserver zoo_id=1 role=active
nisclient1 zoo_id=2 role=standby
nisclient2 zoo_id=3
[zookeeper_servers]
nisserver zoo_id=1 role=active
nisclient1 zoo_id=2 role=standby
nisclient2 zoo_id=3
[hadoop_slaves]
nisserver zoo_id=1 role=active
nisclient1 zoo_id=2 role=standby
nisclient2 zoo_id=3

@ -0,0 +1,20 @@
# dynamically generated inventory file
# retries previously failed hosts only
[hadoop_masters:children]
hadoop_master_primary
hadoop_master_secondary
[qjournal_servers]
hadoop1 zoo_id=1
[hadoop_master_primary]
hadoop1 zoo_id=1
[zookeeper_servers]
hadoop1 zoo_id=1
[hadoop_slaves]
hadoop1 zoo_id=1

@ -0,0 +1,20 @@
# dynamically generated inventory file
# retries previously failed hosts only
[hadoop_masters:children]
hadoop_master_primary
hadoop_master_secondary
[qjournal_servers]
hadoop1 zoo_id=1
[hadoop_master_primary]
hadoop1 zoo_id=1
[zookeeper_servers]
hadoop1 zoo_id=1
[hadoop_slaves]
hadoop1 zoo_id=1

@ -0,0 +1,145 @@
### Deploying Hadoop Clusters using Ansible.
##### Preface
The Playbooks in this example are made to deploy Hadoop Clusters for users, these playbooks can be used to:
1) Deploy a fully functional Hadoop Cluster wth HA and automatic failover.
2) Deploy a fully functional hadoop cluster with no HA.
3) Deploy Additional nodes to scale the cluster
4) Verify the cluster by deploying MapReduce jobs
##### Brief introduction to diffrent components of Hadoop Cluster.
The following diagram depicts a Hadoop Cluster with HA and automated failover which would be deployed by the Ansible Playbooks.
The two major categories of machines roles in a Hadoop cluster are Hadoop Masters and Hadoop Slaves.
The Hadoop masters consists of:
NameNode: The NameNode is the centerpiece of an HDFS file system. It keeps the directory tree of all files in the file system, and tracks where across the cluster the file data is kept. It does not store the data of these files itself. Client applications talk to the NameNode whenever they wish to locate a file, or when they want to add/copy/move/delete a file. The NameNode responds the successful requests by returning a list of relevant DataNode servers where the data lives.
JobTracker: The JobTracker is the service within Hadoop that gives out MapReduce tasks to specific nodes in the cluster, Applications submit jobs to the Job tracker and JobTracker talks to the NameNode to determine the location of the data , once located the JobTracker submits the work to the chosen TaskTracker nodes.
The Hadoop Slaves consists of:
DataNode: A DataNode is responsible for storing data in the HadoopFileSystem. A functional hdfs filesystem has more than one DataNode, and data is replicated across them.
TaskTracker: A TaskTracker is a node in the cluster that accepts tasks - Map, Reduce and Shuffle operations from a JobTracker.
The Hadoop Master processes does not have high availability built into them as thier counterparts (datanode, tasktracker). Inorder to have HA for the NameNode and Jobtracker we have the following processes.
Quorum Journal Nodes: The journal nodes are responsible for maintaining a journal of any modifications made to the HDFS namespace, The active namenode logs any modifications to the jounal nodes and the standby namenode reads the changes from the journal nodes and applies it to it's local namespace. In a production environment the mininum recommended number of journal nodes is 3, these nodes can also be colocated with namenode/Jobtracker.
Zookeeper Nodes: The purpose of Zookeepr is cluster management, Do remember that Hadoop HA is an active/passive cluster so the cluster requires stuff's like hearbeats, locks, leader election, quorum etc.. these service are provided by the zookeeper services. The recommended number for a production use is 3.
zkfc namenode: zkfc (zookeeper failover controller) is a zookeeper client application that runs on each namenode server, it's responsibilites include health monitoring, zookeeper session management, leader election etc.. i,e incase of a namenode failure the zkfc process running on that machine detects the failure and informs the zookeeper as a result of which re-election takes place and a new active namenode is selected.
zkfc JobTracker: The zkfc Tasktracker performs the same functionalities as that of zkfc namenode, the diffrence being the process that zkfc is resposible for is the jobtracker
#### Deploying a Hadoop Cluster with HA
#####Pre-requesite's
The Playbooks have been tested using Ansible v1.2, and Centos 6.x (64 bit)
Modify group_vars/all to choose the interface for hadoop communication.
Optionally you change the hadoop specific parameter like port's or directories by editing hadoop_vars/hadoop file.
Before launching the deployment playbook make sure the inventory file ( hosts ) have be setup properly, Here's a sample:
[hadoop_master_primary]15yy
zhadoop1
[hadoop_master_secondary]
zhadoop2
[hadoop_masters:children]
hadoop_master_primary
hadoop_master_secondary
[hadoop_slaves]
hadoop1
hadoop2
hadoop3
[qjournal_servers]
zhadoop1
zhadoop2
zhadoop3
[zookeeper_servers]
zhadoop1 zoo_id=1
zhadoop2 zoo_id=2
zhadoop3 zoo_id=3
Once the inventory is setup the Hadoop cluster can be setup using the following command
ansible-playbook -i hosts site.yml
Once deployed we can check the cluster sanity in difrent ways, to check the status of the hdfs filesystem and a report on all the datanodes login as hdfs useron any hadoop master servers, and issue the following command to get the report.
hadoop dfsadmin -report
To check the sanity of HA, first login as hdfs user on any hadoop master server and get the current active/standby namenode servers.
-bash-4.1$ hdfs haadmin -getServiceState zhadoop1
active
-bash-4.1$ hdfs haadmin -getServiceState zhadoop2
standby
To get the state of the Jobtracker process login as mapred user in any hadoop master server and issue the following command:
-bash-4.1$ hadoop mrhaadmin -getServiceState hadoop1
standby
-bash-4.1$ hadoop mrhaadmin -getServiceState hadoop2
active
Once the active and the standby has been detected kill the namenode/jobtracker process in the server listed as active and issue the same commands as above
and you should get a result where the standby has been promoted to the active state. Later you can start the killed process and see those processes listed as the passive processes.
#### Running a mapreduce job on the cluster.
To run a mapreduce job on the cluster a sample playbook has been written, this playbook runs a job on the cluster which counts the occurance of the word 'hello' on an inputfile. A sample inputfile file has been created in the playbooks/inputfile file, modify the file to match your testing.
To deploy the mapreduce job run the following command.( Below -e server=<any of your hadoop master server>
ansible-playbook -i hosts playbooks/job.yml -e server=zhadoop1
to verify the result read the file on your ansible server located at /tmp/zhadoop1/tmp/outputfile/part-00000, which should give you the count.
####Scale the Cluster
The Hadoop cluster when reaches it's maximum capacity it can be scaled by adding nodes, this can be easily accomplished by adding the node entry in the invetory file (hosts) under the hadoop_slaves group and running the following command.
ansible-playbook -i hosts site.yml --tags=slaves
#### Deploy a non HA Hadoop Cluster
The following diagram illustrates a standalone hadoop cluster.
To deploy this cluster fill in the inventory file as follows:
[hadoop_master_primary]
zhadoop1
[hadoop_master_secondary]
[hadoop_masters:children]
hadoop_master_primary
hadoop_master_secondary
[hadoop_slaves]
hadoop1
hadoop2
hadoop3
and issue the following command:
ansible-playbook -i hosts site.yml -e ha_disabled=true --tags=no_ha
The validity of the cluster can be checked by running the same mapreduce job that has documented above for an HA Hadoop Cluster

@ -0,0 +1 @@
iface: eth1

@ -0,0 +1,47 @@
hadoop:
#Variables for <core-site.xml> - common
fs.default.FS.port: 8020
nameservice.id: mycluster2
#Variables for <hdfs-site.xml>
dfs.permissions.superusergroup: hdfs
dfs_namenode_name_dir:
- /namedir1/
- /namedir2/
dfs.replication: 3
dfs.namenode.handler.count: 50
dfs.blocksize: 67108864
dfs_datanode_data_dir:
- /datadir1/
- /datadir2/
dfs.datanode.address.port: 50010
dfs.datanode.http.address.port: 50075
dfs.datanode.ipc.address.port: 50020
dfs.namenode.http.address.port: 50070
dfs.ha.zkfc.port: 8019
qjournal.port: 8485
qjournal.http.port: 8480
dfs_journalnode_edits_dir: /journaldir/
zookeeper.clientport: 2181
zookeeper.leader_port: 2888
zookeeper.election_port: 3888
#Variables for <mapred-site.xml> - common
mapred.job.tracker.ha.servicename: myjt2
mapred.job.tracker.http.address.port: 50030
mapred.task.tracker.http.address.port: 50060
mapred.job.tracker.port: 8021
mapred.ha.jobtracker.rpc-address.port: 8023
mapred.ha.zkfc.port: 8018
mapred_job_tracker_persist_jobstatus_dir: /jobdir/
mapred_local_dir:
- /mapred1/
- /mapred2/

@ -0,0 +1,26 @@
[hadoop_master_primary]
hadoop1
[hadoop_master_secondary]
hadoop2
[hadoop_masters:children]
hadoop_master_primary
hadoop_master_secondary
[hadoop_slaves]
hadoop1
hadoop2
hadoop3
[qjournal_servers]
hadoop1
hadoop2
hadoop3
[zookeeper_servers]
hadoop1 zoo_id=1
hadoop2 zoo_id=2
hadoop3 zoo_id=3

@ -0,0 +1,19 @@
asdf
sdf
sdf
sd
f
sf
sdf
sd
fsd
hello
asf
sf
sd
fsd
f
sdf
sd
hello

@ -0,0 +1,21 @@
---
# Launch Job to count occurance of a word.
- hosts: $server
user: root
tasks:
- name: copy the file
copy: src=inputfile dest=/tmp/inputfile
- name: upload the file
shell: su - hdfs -c "hadoop fs -put /tmp/inputfile /inputfile"
- name: Run the MapReduce job to count the occurance of word hello
shell: su - hdfs -c "hadoop jar /usr/lib/hadoop-0.20-mapreduce/hadoop-examples.jar grep /inputfile /outputfile 'hello'"
- name: Fetch the outputfile to local tmp dir
shell: su - hdfs -c "hadoop fs -get /outputfile /tmp/outputfile"
- name: Get the outputfile to ansible server
fetch: dest=/tmp src=/tmp/outputfile/part-00000

@ -0,0 +1,5 @@
[cloudera-cdh4]
name=Cloudera's Distribution for Hadoop, Version 4
baseurl=http://archive.cloudera.com/cdh4/redhat/6/x86_64/cdh/4/
gpgkey = http://archive.cloudera.com/cdh4/redhat/6/x86_64/cdh/RPM-GPG-KEY-cloudera
gpgcheck = 1

@ -0,0 +1,2 @@
- name: restart iptables
service: name=iptables state=restarted

@ -0,0 +1,30 @@
---
# The playbook for common tasks
- name: Deploy the Cloudera Repository
copy: src=etc/cloudera-CDH4.repo dest=/etc/yum.repos.d/cloudera-CDH4.repo
- name: Install the openjdk package
yum: name=java-1.6.0-openjdk state=installed
- name: create a directory for java
file: state=directory path=/usr/java/
tags: link
- name: create a link for java
file: src=/usr/lib/jvm/java-1.6.0-openjdk-1.6.0.0.x86_64/jre state=link path=/usr/java/default
tags: link
- name: Create the hosts file for all machines
template: src=etc/hosts.j2 dest=/etc/hosts
- name: Disable SELinux in conf file
lineinfile: dest=/etc/sysconfig/selinux regexp='^SELINUX=' line='SELINUX=disabled' state=present
- name: Disabel selinux dynamically
shell: creates=/etc/sysconfig/selinux.disabled setenforce 0 ; touch /etc/sysconfig/selinux.disabled
- name: Create the iptables file for all machines
template: src=iptables.j2 dest=/etc/sysconfig/iptables
notify: restart iptables

@ -0,0 +1,5 @@
---
# The playbook for common tasks
- include: common.yml tags=slaves,no_ha

@ -0,0 +1,5 @@
127.0.0.1 localhost
{% for host in groups.all %}
{{ hostvars[host]['ansible_' + iface].ipv4.address }} {{ host }}
{% endfor %}

@ -0,0 +1,25 @@
<?xml version="1.0"?>
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
<property>
<name>fs.defaultFS</name>
<value>hdfs://{{ hostvars[groups['hadoop_masters'][0]]['ansible_hostname'] + ':' ~ hadoop['fs.default.FS.port'] }}/</value>
</property>
</configuration>

@ -0,0 +1,75 @@
# Configuration of the "dfs" context for null
dfs.class=org.apache.hadoop.metrics.spi.NullContext
# Configuration of the "dfs" context for file
#dfs.class=org.apache.hadoop.metrics.file.FileContext
#dfs.period=10
#dfs.fileName=/tmp/dfsmetrics.log
# Configuration of the "dfs" context for ganglia
# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
# dfs.class=org.apache.hadoop.metrics.ganglia.GangliaContext
# dfs.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
# dfs.period=10
# dfs.servers=localhost:8649
# Configuration of the "mapred" context for null
mapred.class=org.apache.hadoop.metrics.spi.NullContext
# Configuration of the "mapred" context for file
#mapred.class=org.apache.hadoop.metrics.file.FileContext
#mapred.period=10
#mapred.fileName=/tmp/mrmetrics.log
# Configuration of the "mapred" context for ganglia
# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
# mapred.class=org.apache.hadoop.metrics.ganglia.GangliaContext
# mapred.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
# mapred.period=10
# mapred.servers=localhost:8649
# Configuration of the "jvm" context for null
#jvm.class=org.apache.hadoop.metrics.spi.NullContext
# Configuration of the "jvm" context for file
#jvm.class=org.apache.hadoop.metrics.file.FileContext
#jvm.period=10
#jvm.fileName=/tmp/jvmmetrics.log
# Configuration of the "jvm" context for ganglia
# jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext
# jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
# jvm.period=10
# jvm.servers=localhost:8649
# Configuration of the "rpc" context for null
rpc.class=org.apache.hadoop.metrics.spi.NullContext
# Configuration of the "rpc" context for file
#rpc.class=org.apache.hadoop.metrics.file.FileContext
#rpc.period=10
#rpc.fileName=/tmp/rpcmetrics.log
# Configuration of the "rpc" context for ganglia
# rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext
# rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
# rpc.period=10
# rpc.servers=localhost:8649
# Configuration of the "ugi" context for null
ugi.class=org.apache.hadoop.metrics.spi.NullContext
# Configuration of the "ugi" context for file
#ugi.class=org.apache.hadoop.metrics.file.FileContext
#ugi.period=10
#ugi.fileName=/tmp/ugimetrics.log
# Configuration of the "ugi" context for ganglia
# ugi.class=org.apache.hadoop.metrics.ganglia.GangliaContext
# ugi.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
# ugi.period=10
# ugi.servers=localhost:8649

@ -0,0 +1,44 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# syntax: [prefix].[source|sink].[instance].[options]
# See javadoc of package-info.java for org.apache.hadoop.metrics2 for details
*.sink.file.class=org.apache.hadoop.metrics2.sink.FileSink
# default sampling period, in seconds
*.period=10
# The namenode-metrics.out will contain metrics from all context
#namenode.sink.file.filename=namenode-metrics.out
# Specifying a special sampling period for namenode:
#namenode.sink.*.period=8
#datanode.sink.file.filename=datanode-metrics.out
# the following example split metrics of different
# context to different sinks (in this case files)
#jobtracker.sink.file_jvm.context=jvm
#jobtracker.sink.file_jvm.filename=jobtracker-jvm-metrics.out
#jobtracker.sink.file_mapred.context=mapred
#jobtracker.sink.file_mapred.filename=jobtracker-mapred-metrics.out
#tasktracker.sink.file.filename=tasktracker-metrics.out
#maptask.sink.file.filename=maptask-metrics.out
#reducetask.sink.file.filename=reducetask-metrics.out

@ -0,0 +1,57 @@
<?xml version="1.0"?>
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
<property>
<name>dfs.blocksize</name>
<value>{{ hadoop['dfs.blocksize'] }}</value>
</property>
<property>
<name>dfs.permissions.superusergroup</name>
<value>{{ hadoop['dfs.permissions.superusergroup'] }}</value>
</property>
<property>
<name>dfs.namenode.http.address</name>
<value>0.0.0.0:{{ hadoop['dfs.namenode.http.address.port'] }}</value>
</property>
<property>
<name>dfs.datanode.address</name>
<value>0.0.0.0:{{ hadoop['dfs.datanode.address.port'] }}</value>
</property>
<property>
<name>dfs.datanode.http.address</name>
<value>0.0.0.0:{{ hadoop['dfs.datanode.http.address.port'] }}</value>
</property>
<property>
<name>dfs.datanode.ipc.address</name>
<value>0.0.0.0:{{ hadoop['dfs.datanode.ipc.address.port'] }}</value>
</property>
<property>
<name>dfs.replication</name>
<value>{{ hadoop['dfs.replication'] }}</value>
</property>
<property>
<name>dfs.namenode.name.dir</name>
<value>{{ hadoop['dfs_namenode_name_dir'] | join(',') }}</value>
</property>
<property>
<name>dfs.datanode.data.dir</name>
<value>{{ hadoop['dfs_datanode_data_dir'] | join(',') }}</value>
</property>
</configuration>

@ -0,0 +1,219 @@
# Copyright 2011 The Apache Software Foundation
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Define some default values that can be overridden by system properties
hadoop.root.logger=INFO,console
hadoop.log.dir=.
hadoop.log.file=hadoop.log
# Define the root logger to the system property "hadoop.root.logger".
log4j.rootLogger=${hadoop.root.logger}, EventCounter
# Logging Threshold
log4j.threshold=ALL
# Null Appender
log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender
#
# Rolling File Appender - cap space usage at 5gb.
#
hadoop.log.maxfilesize=256MB
hadoop.log.maxbackupindex=20
log4j.appender.RFA=org.apache.log4j.RollingFileAppender
log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}
log4j.appender.RFA.MaxFileSize=${hadoop.log.maxfilesize}
log4j.appender.RFA.MaxBackupIndex=${hadoop.log.maxbackupindex}
log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
# Pattern format: Date LogLevel LoggerName LogMessage
log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
# Debugging Pattern format
#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
#
# Daily Rolling File Appender
#
log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
log4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}
# Rollver at midnight
log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
# 30-day backup
#log4j.appender.DRFA.MaxBackupIndex=30
log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
# Pattern format: Date LogLevel LoggerName LogMessage
log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
# Debugging Pattern format
#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
#
# console
# Add "console" to rootlogger above if you want to use this
#
log4j.appender.console=org.apache.log4j.ConsoleAppender
log4j.appender.console.target=System.err
log4j.appender.console.layout=org.apache.log4j.PatternLayout
log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
#
# TaskLog Appender
#
#Default values
hadoop.tasklog.taskid=null
hadoop.tasklog.iscleanup=false
hadoop.tasklog.noKeepSplits=4
hadoop.tasklog.totalLogFileSize=100
hadoop.tasklog.purgeLogSplits=true
hadoop.tasklog.logsRetainHours=12
log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender
log4j.appender.TLA.taskId=${hadoop.tasklog.taskid}
log4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}
log4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}
log4j.appender.TLA.layout=org.apache.log4j.PatternLayout
log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
#
# HDFS block state change log from block manager
#
# Uncomment the following to suppress normal block state change
# messages from BlockManager in NameNode.
#log4j.logger.BlockStateChange=WARN
#
#Security appender
#
hadoop.security.logger=INFO,NullAppender
hadoop.security.log.maxfilesize=256MB
hadoop.security.log.maxbackupindex=20
log4j.category.SecurityLogger=${hadoop.security.logger}
hadoop.security.log.file=SecurityAuth-${user.name}.audit
log4j.appender.RFAS=org.apache.log4j.RollingFileAppender
log4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
log4j.appender.RFAS.layout=org.apache.log4j.PatternLayout
log4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
log4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}
log4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}
#
# Daily Rolling Security appender
#
log4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender
log4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
log4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout
log4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
log4j.appender.DRFAS.DatePattern=.yyyy-MM-dd
#
# hdfs audit logging
#
hdfs.audit.logger=INFO,NullAppender
hdfs.audit.log.maxfilesize=256MB
hdfs.audit.log.maxbackupindex=20
log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}
log4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false
log4j.appender.RFAAUDIT=org.apache.log4j.RollingFileAppender
log4j.appender.RFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log
log4j.appender.RFAAUDIT.layout=org.apache.log4j.PatternLayout
log4j.appender.RFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
log4j.appender.RFAAUDIT.MaxFileSize=${hdfs.audit.log.maxfilesize}
log4j.appender.RFAAUDIT.MaxBackupIndex=${hdfs.audit.log.maxbackupindex}
#
# mapred audit logging
#
mapred.audit.logger=INFO,NullAppender
mapred.audit.log.maxfilesize=256MB
mapred.audit.log.maxbackupindex=20
log4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}
log4j.additivity.org.apache.hadoop.mapred.AuditLogger=false
log4j.appender.MRAUDIT=org.apache.log4j.RollingFileAppender
log4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log
log4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout
log4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
log4j.appender.MRAUDIT.MaxFileSize=${mapred.audit.log.maxfilesize}
log4j.appender.MRAUDIT.MaxBackupIndex=${mapred.audit.log.maxbackupindex}
# Custom Logging levels
#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG
#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG
#log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=DEBUG
# Jets3t library
log4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR
#
# Event Counter Appender
# Sends counts of logging messages at different severity levels to Hadoop Metrics.
#
log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter
#
# Job Summary Appender
#
# Use following logger to send summary to separate file defined by
# hadoop.mapreduce.jobsummary.log.file :
# hadoop.mapreduce.jobsummary.logger=INFO,JSA
#
hadoop.mapreduce.jobsummary.logger=${hadoop.root.logger}
hadoop.mapreduce.jobsummary.log.file=hadoop-mapreduce.jobsummary.log
hadoop.mapreduce.jobsummary.log.maxfilesize=256MB
hadoop.mapreduce.jobsummary.log.maxbackupindex=20
log4j.appender.JSA=org.apache.log4j.RollingFileAppender
log4j.appender.JSA.File=${hadoop.log.dir}/${hadoop.mapreduce.jobsummary.log.file}
log4j.appender.JSA.MaxFileSize=${hadoop.mapreduce.jobsummary.log.maxfilesize}
log4j.appender.JSA.MaxBackupIndex=${hadoop.mapreduce.jobsummary.log.maxbackupindex}
log4j.appender.JSA.layout=org.apache.log4j.PatternLayout
log4j.appender.JSA.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
log4j.logger.org.apache.hadoop.mapred.JobInProgress$JobSummary=${hadoop.mapreduce.jobsummary.logger}
log4j.additivity.org.apache.hadoop.mapred.JobInProgress$JobSummary=false
#
# Yarn ResourceManager Application Summary Log
#
# Set the ResourceManager summary log filename
#yarn.server.resourcemanager.appsummary.log.file=rm-appsummary.log
# Set the ResourceManager summary log level and appender
#yarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY
# Appender for ResourceManager Application Summary Log
# Requires the following properties to be set
# - hadoop.log.dir (Hadoop Log directory)
# - yarn.server.resourcemanager.appsummary.log.file (resource manager app summary log filename)
# - yarn.server.resourcemanager.appsummary.logger (resource manager app summary log level and appender)
#log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=${yarn.server.resourcemanager.appsummary.logger}
#log4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=false
#log4j.appender.RMSUMMARY=org.apache.log4j.RollingFileAppender
#log4j.appender.RMSUMMARY.File=${hadoop.log.dir}/${yarn.server.resourcemanager.appsummary.log.file}
#log4j.appender.RMSUMMARY.MaxFileSize=256MB
#log4j.appender.RMSUMMARY.MaxBackupIndex=20
#log4j.appender.RMSUMMARY.layout=org.apache.log4j.PatternLayout
#log4j.appender.RMSUMMARY.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n

@ -0,0 +1,22 @@
<configuration>
<property>
<name>mapred.job.tracker</name>
<value>{{ hostvars[groups['hadoop_masters'][0]]['ansible_hostname'] }}:{{ hadoop['mapred.job.tracker.port'] }}</value>
</property>
<property>
<name>mapred.local.dir</name>
<value>{{ hadoop["mapred_local_dir"] | join(',') }}</value>
</property>
<property>
<name>mapred.task.tracker.http.address</name>
<value>0.0.0.0:{{ hadoop['mapred.task.tracker.http.address.port'] }}</value>
</property>
<property>
<name>mapred.job.tracker.http.address</name>
<value>0.0.0.0:{{ hadoop['mapred.job.tracker.http.address.port'] }}</value>
</property>
</configuration>

@ -0,0 +1,3 @@
{% for host in groups['hadoop_slaves'] %}
{{ host }}
{% endfor %}

@ -0,0 +1,80 @@
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<configuration>
<property>
<name>ssl.client.truststore.location</name>
<value></value>
<description>Truststore to be used by clients like distcp. Must be
specified.
</description>
</property>
<property>
<name>ssl.client.truststore.password</name>
<value></value>
<description>Optional. Default value is "".
</description>
</property>
<property>
<name>ssl.client.truststore.type</name>
<value>jks</value>
<description>Optional. The keystore file format, default value is "jks".
</description>
</property>
<property>
<name>ssl.client.truststore.reload.interval</name>
<value>10000</value>
<description>Truststore reload check interval, in milliseconds.
Default value is 10000 (10 seconds).
</description>
</property>
<property>
<name>ssl.client.keystore.location</name>
<value></value>
<description>Keystore to be used by clients like distcp. Must be
specified.
</description>
</property>
<property>
<name>ssl.client.keystore.password</name>
<value></value>
<description>Optional. Default value is "".
</description>
</property>
<property>
<name>ssl.client.keystore.keypassword</name>
<value></value>
<description>Optional. Default value is "".
</description>
</property>
<property>
<name>ssl.client.keystore.type</name>
<value>jks</value>
<description>Optional. The keystore file format, default value is "jks".
</description>
</property>
</configuration>

@ -0,0 +1,77 @@
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<configuration>
<property>
<name>ssl.server.truststore.location</name>
<value></value>
<description>Truststore to be used by NN and DN. Must be specified.
</description>
</property>
<property>
<name>ssl.server.truststore.password</name>
<value></value>
<description>Optional. Default value is "".
</description>
</property>
<property>
<name>ssl.server.truststore.type</name>
<value>jks</value>
<description>Optional. The keystore file format, default value is "jks".
</description>
</property>
<property>
<name>ssl.server.truststore.reload.interval</name>
<value>10000</value>
<description>Truststore reload check interval, in milliseconds.
Default value is 10000 (10 seconds).
</property>
<property>
<name>ssl.server.keystore.location</name>
<value></value>
<description>Keystore to be used by NN and DN. Must be specified.
</description>
</property>
<property>
<name>ssl.server.keystore.password</name>
<value></value>
<description>Must be specified.
</description>
</property>
<property>
<name>ssl.server.keystore.keypassword</name>
<value></value>
<description>Must be specified.
</description>
</property>
<property>
<name>ssl.server.keystore.type</name>
<value>jks</value>
<description>Optional. The keystore file format, default value is "jks".
</description>
</property>
</configuration>

@ -0,0 +1,25 @@
<?xml version="1.0"?>
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
<property>
<name>fs.defaultFS</name>
<value>hdfs://{{ hadoop['nameservice.id'] }}/</value>
</property>
</configuration>

@ -0,0 +1,75 @@
# Configuration of the "dfs" context for null
dfs.class=org.apache.hadoop.metrics.spi.NullContext
# Configuration of the "dfs" context for file
#dfs.class=org.apache.hadoop.metrics.file.FileContext
#dfs.period=10
#dfs.fileName=/tmp/dfsmetrics.log
# Configuration of the "dfs" context for ganglia
# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
# dfs.class=org.apache.hadoop.metrics.ganglia.GangliaContext
# dfs.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
# dfs.period=10
# dfs.servers=localhost:8649
# Configuration of the "mapred" context for null
mapred.class=org.apache.hadoop.metrics.spi.NullContext
# Configuration of the "mapred" context for file
#mapred.class=org.apache.hadoop.metrics.file.FileContext
#mapred.period=10
#mapred.fileName=/tmp/mrmetrics.log
# Configuration of the "mapred" context for ganglia
# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
# mapred.class=org.apache.hadoop.metrics.ganglia.GangliaContext
# mapred.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
# mapred.period=10
# mapred.servers=localhost:8649
# Configuration of the "jvm" context for null
#jvm.class=org.apache.hadoop.metrics.spi.NullContext
# Configuration of the "jvm" context for file
#jvm.class=org.apache.hadoop.metrics.file.FileContext
#jvm.period=10
#jvm.fileName=/tmp/jvmmetrics.log
# Configuration of the "jvm" context for ganglia
# jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext
# jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
# jvm.period=10
# jvm.servers=localhost:8649
# Configuration of the "rpc" context for null
rpc.class=org.apache.hadoop.metrics.spi.NullContext
# Configuration of the "rpc" context for file
#rpc.class=org.apache.hadoop.metrics.file.FileContext
#rpc.period=10
#rpc.fileName=/tmp/rpcmetrics.log
# Configuration of the "rpc" context for ganglia
# rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext
# rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
# rpc.period=10
# rpc.servers=localhost:8649
# Configuration of the "ugi" context for null
ugi.class=org.apache.hadoop.metrics.spi.NullContext
# Configuration of the "ugi" context for file
#ugi.class=org.apache.hadoop.metrics.file.FileContext
#ugi.period=10
#ugi.fileName=/tmp/ugimetrics.log
# Configuration of the "ugi" context for ganglia
# ugi.class=org.apache.hadoop.metrics.ganglia.GangliaContext
# ugi.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
# ugi.period=10
# ugi.servers=localhost:8649

@ -0,0 +1,44 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# syntax: [prefix].[source|sink].[instance].[options]
# See javadoc of package-info.java for org.apache.hadoop.metrics2 for details
*.sink.file.class=org.apache.hadoop.metrics2.sink.FileSink
# default sampling period, in seconds
*.period=10
# The namenode-metrics.out will contain metrics from all context
#namenode.sink.file.filename=namenode-metrics.out
# Specifying a special sampling period for namenode:
#namenode.sink.*.period=8
#datanode.sink.file.filename=datanode-metrics.out
# the following example split metrics of different
# context to different sinks (in this case files)
#jobtracker.sink.file_jvm.context=jvm
#jobtracker.sink.file_jvm.filename=jobtracker-jvm-metrics.out
#jobtracker.sink.file_mapred.context=mapred
#jobtracker.sink.file_mapred.filename=jobtracker-mapred-metrics.out
#tasktracker.sink.file.filename=tasktracker-metrics.out
#maptask.sink.file.filename=maptask-metrics.out
#reducetask.sink.file.filename=reducetask-metrics.out

@ -0,0 +1,103 @@
<?xml version="1.0"?>
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
<property>
<name>dfs.nameservices</name>
<value>{{ hadoop['nameservice.id'] }}</value>
</property>
<property>
<name>dfs.ha.namenodes.{{ hadoop['nameservice.id'] }}</name>
<value>{{ groups.hadoop_masters | join(',') }}</value>
</property>
<property>
<name>dfs.blocksize</name>
<value>{{ hadoop['dfs.blocksize'] }}</value>
</property>
<property>
<name>dfs.permissions.superusergroup</name>
<value>{{ hadoop['dfs.permissions.superusergroup'] }}</value>
</property>
<property>
<name>dfs.ha.automatic-failover.enabled</name>
<value>true</value>
</property>
<property>
<name>ha.zookeeper.quorum</name>
<value>{{ groups.zookeeper_servers | join(':' ~ hadoop['zookeeper.clientport'] + ',') }}:{{ hadoop['zookeeper.clientport'] }}</value>
</property>
{% for host in groups['hadoop_masters'] %}
<property>
<name>dfs.namenode.rpc-address.{{ hadoop['nameservice.id'] }}.{{ host }}</name>
<value>{{ host }}:{{ hadoop['fs.default.FS.port'] }}</value>
</property>
{% endfor %}
{% for host in groups['hadoop_masters'] %}
<property>
<name>dfs.namenode.http-address.{{ hadoop['nameservice.id'] }}.{{ host }}</name>
<value>{{ host }}:{{ hadoop['dfs.namenode.http.address.port'] }}</value>
</property>
{% endfor %}
<property>
<name>dfs.namenode.shared.edits.dir</name>
<value>qjournal://{{ groups.qjournal_servers | join(':' ~ hadoop['qjournal.port'] + ';') }}:{{ hadoop['qjournal.port'] }}/{{ hadoop['nameservice.id'] }}</value>
</property>
<property>
<name>dfs.journalnode.edits.dir</name>
<value>{{ hadoop['dfs_journalnode_edits_dir'] }}</value>
</property>
<property>
<name>dfs.client.failover.proxy.provider.{{ hadoop['nameservice.id'] }}</name>
<value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
</property>
<property>
<name>dfs.ha.fencing.methods</name>
<value>shell(/bin/true )</value>
</property>
<property>
<name>dfs.ha.zkfc.port</name>
<value>{{ hadoop['dfs.ha.zkfc.port'] }}</value>
</property>
<property>
<name>dfs.datanode.address</name>
<value>0.0.0.0:{{ hadoop['dfs.datanode.address.port'] }}</value>
</property>
<property>
<name>dfs.datanode.http.address</name>
<value>0.0.0.0:{{ hadoop['dfs.datanode.http.address.port'] }}</value>
</property>
<property>
<name>dfs.datanode.ipc.address</name>
<value>0.0.0.0:{{ hadoop['dfs.datanode.ipc.address.port'] }}</value>
</property>
<property>
<name>dfs.replication</name>
<value>{{ hadoop['dfs.replication'] }}</value>
</property>
<property>
<name>dfs.namenode.name.dir</name>
<value>{{ hadoop['dfs_namenode_name_dir'] | join(',') }}</value>
</property>
<property>
<name>dfs.datanode.data.dir</name>
<value>{{ hadoop['dfs_datanode_data_dir'] | join(',') }}</value>
</property>
</configuration>

@ -0,0 +1,219 @@
# Copyright 2011 The Apache Software Foundation
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Define some default values that can be overridden by system properties
hadoop.root.logger=INFO,console
hadoop.log.dir=.
hadoop.log.file=hadoop.log
# Define the root logger to the system property "hadoop.root.logger".
log4j.rootLogger=${hadoop.root.logger}, EventCounter
# Logging Threshold
log4j.threshold=ALL
# Null Appender
log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender
#
# Rolling File Appender - cap space usage at 5gb.
#
hadoop.log.maxfilesize=256MB
hadoop.log.maxbackupindex=20
log4j.appender.RFA=org.apache.log4j.RollingFileAppender
log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}
log4j.appender.RFA.MaxFileSize=${hadoop.log.maxfilesize}
log4j.appender.RFA.MaxBackupIndex=${hadoop.log.maxbackupindex}
log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
# Pattern format: Date LogLevel LoggerName LogMessage
log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
# Debugging Pattern format
#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
#
# Daily Rolling File Appender
#
log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
log4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}
# Rollver at midnight
log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
# 30-day backup
#log4j.appender.DRFA.MaxBackupIndex=30
log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
# Pattern format: Date LogLevel LoggerName LogMessage
log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
# Debugging Pattern format
#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
#
# console
# Add "console" to rootlogger above if you want to use this
#
log4j.appender.console=org.apache.log4j.ConsoleAppender
log4j.appender.console.target=System.err
log4j.appender.console.layout=org.apache.log4j.PatternLayout
log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
#
# TaskLog Appender
#
#Default values
hadoop.tasklog.taskid=null
hadoop.tasklog.iscleanup=false
hadoop.tasklog.noKeepSplits=4
hadoop.tasklog.totalLogFileSize=100
hadoop.tasklog.purgeLogSplits=true
hadoop.tasklog.logsRetainHours=12
log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender
log4j.appender.TLA.taskId=${hadoop.tasklog.taskid}
log4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}
log4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}
log4j.appender.TLA.layout=org.apache.log4j.PatternLayout
log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
#
# HDFS block state change log from block manager
#
# Uncomment the following to suppress normal block state change
# messages from BlockManager in NameNode.
#log4j.logger.BlockStateChange=WARN
#
#Security appender
#
hadoop.security.logger=INFO,NullAppender
hadoop.security.log.maxfilesize=256MB
hadoop.security.log.maxbackupindex=20
log4j.category.SecurityLogger=${hadoop.security.logger}
hadoop.security.log.file=SecurityAuth-${user.name}.audit
log4j.appender.RFAS=org.apache.log4j.RollingFileAppender
log4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
log4j.appender.RFAS.layout=org.apache.log4j.PatternLayout
log4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
log4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}
log4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}
#
# Daily Rolling Security appender
#
log4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender
log4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
log4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout
log4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
log4j.appender.DRFAS.DatePattern=.yyyy-MM-dd
#
# hdfs audit logging
#
hdfs.audit.logger=INFO,NullAppender
hdfs.audit.log.maxfilesize=256MB
hdfs.audit.log.maxbackupindex=20
log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}
log4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false
log4j.appender.RFAAUDIT=org.apache.log4j.RollingFileAppender
log4j.appender.RFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log
log4j.appender.RFAAUDIT.layout=org.apache.log4j.PatternLayout
log4j.appender.RFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
log4j.appender.RFAAUDIT.MaxFileSize=${hdfs.audit.log.maxfilesize}
log4j.appender.RFAAUDIT.MaxBackupIndex=${hdfs.audit.log.maxbackupindex}
#
# mapred audit logging
#
mapred.audit.logger=INFO,NullAppender
mapred.audit.log.maxfilesize=256MB
mapred.audit.log.maxbackupindex=20
log4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}
log4j.additivity.org.apache.hadoop.mapred.AuditLogger=false
log4j.appender.MRAUDIT=org.apache.log4j.RollingFileAppender
log4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log
log4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout
log4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
log4j.appender.MRAUDIT.MaxFileSize=${mapred.audit.log.maxfilesize}
log4j.appender.MRAUDIT.MaxBackupIndex=${mapred.audit.log.maxbackupindex}
# Custom Logging levels
#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG
#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG
#log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=DEBUG
# Jets3t library
log4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR
#
# Event Counter Appender
# Sends counts of logging messages at different severity levels to Hadoop Metrics.
#
log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter
#
# Job Summary Appender
#
# Use following logger to send summary to separate file defined by
# hadoop.mapreduce.jobsummary.log.file :
# hadoop.mapreduce.jobsummary.logger=INFO,JSA
#
hadoop.mapreduce.jobsummary.logger=${hadoop.root.logger}
hadoop.mapreduce.jobsummary.log.file=hadoop-mapreduce.jobsummary.log
hadoop.mapreduce.jobsummary.log.maxfilesize=256MB
hadoop.mapreduce.jobsummary.log.maxbackupindex=20
log4j.appender.JSA=org.apache.log4j.RollingFileAppender
log4j.appender.JSA.File=${hadoop.log.dir}/${hadoop.mapreduce.jobsummary.log.file}
log4j.appender.JSA.MaxFileSize=${hadoop.mapreduce.jobsummary.log.maxfilesize}
log4j.appender.JSA.MaxBackupIndex=${hadoop.mapreduce.jobsummary.log.maxbackupindex}
log4j.appender.JSA.layout=org.apache.log4j.PatternLayout
log4j.appender.JSA.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
log4j.logger.org.apache.hadoop.mapred.JobInProgress$JobSummary=${hadoop.mapreduce.jobsummary.logger}
log4j.additivity.org.apache.hadoop.mapred.JobInProgress$JobSummary=false
#
# Yarn ResourceManager Application Summary Log
#
# Set the ResourceManager summary log filename
#yarn.server.resourcemanager.appsummary.log.file=rm-appsummary.log
# Set the ResourceManager summary log level and appender
#yarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY
# Appender for ResourceManager Application Summary Log
# Requires the following properties to be set
# - hadoop.log.dir (Hadoop Log directory)
# - yarn.server.resourcemanager.appsummary.log.file (resource manager app summary log filename)
# - yarn.server.resourcemanager.appsummary.logger (resource manager app summary log level and appender)
#log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=${yarn.server.resourcemanager.appsummary.logger}
#log4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=false
#log4j.appender.RMSUMMARY=org.apache.log4j.RollingFileAppender
#log4j.appender.RMSUMMARY.File=${hadoop.log.dir}/${yarn.server.resourcemanager.appsummary.log.file}
#log4j.appender.RMSUMMARY.MaxFileSize=256MB
#log4j.appender.RMSUMMARY.MaxBackupIndex=20
#log4j.appender.RMSUMMARY.layout=org.apache.log4j.PatternLayout
#log4j.appender.RMSUMMARY.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n

@ -0,0 +1,120 @@
<configuration>
<property>
<name>mapred.job.tracker</name>
<value>{{ hadoop['mapred.job.tracker.ha.servicename'] }}</value>
</property>
<property>
<name>mapred.jobtrackers.{{ hadoop['mapred.job.tracker.ha.servicename'] }}</name>
<value>{{ groups['hadoop_masters'] | join(',') }}</value>
<description>Comma-separated list of JobTracker IDs.</description>
</property>
<property>
<name>mapred.ha.automatic-failover.enabled</name>
<value>true</value>
</property>
<property>
<name>mapred.ha.zkfc.port</name>
<value>{{ hadoop['mapred.ha.zkfc.port'] }}</value>
</property>
<property>
<name>mapred.ha.fencing.methods</name>
<value>shell(/bin/true)</value>
</property>
<property>
<name>ha.zookeeper.quorum</name>
<value>{{ groups.zookeeper_servers | join(':' ~ hadoop['zookeeper.clientport'] + ',') }}:{{ hadoop['zookeeper.clientport'] }}</value>
</property>
{% for host in groups['hadoop_masters'] %}
<property>
<name>mapred.jobtracker.rpc-address.{{ hadoop['mapred.job.tracker.ha.servicename'] }}.{{ host }}</name>
<value>{{ host }}:{{ hadoop['mapred.job.tracker.port'] }}</value>
</property>
{% endfor %}
{% for host in groups['hadoop_masters'] %}
<property>
<name>mapred.job.tracker.http.address.{{ hadoop['mapred.job.tracker.ha.servicename'] }}.{{ host }}</name>
<value>0.0.0.0:{{ hadoop['mapred.job.tracker.http.address.port'] }}</value>
</property>
{% endfor %}
{% for host in groups['hadoop_masters'] %}
<property>
<name>mapred.ha.jobtracker.rpc-address.{{ hadoop['mapred.job.tracker.ha.servicename'] }}.{{ host }}</name>
<value>{{ host }}:{{ hadoop['mapred.ha.jobtracker.rpc-address.port'] }}</value>
</property>
{% endfor %}
{% for host in groups['hadoop_masters'] %}
<property>
<name>mapred.ha.jobtracker.http-redirect-address.{{ hadoop['mapred.job.tracker.ha.servicename'] }}.{{ host }}</name>
<value>{{ host }}:{{ hadoop['mapred.job.tracker.http.address.port'] }}</value>
</property>
{% endfor %}
<property>
<name>mapred.jobtracker.restart.recover</name>
<value>true</value>
</property>
<property>
<name>mapred.job.tracker.persist.jobstatus.active</name>
<value>true</value>
</property>
<property>
<name>mapred.job.tracker.persist.jobstatus.hours</name>
<value>1</value>
</property>
<property>
<name>mapred.job.tracker.persist.jobstatus.dir</name>
<value>{{ hadoop['mapred_job_tracker_persist_jobstatus_dir'] }}</value>
</property>
<property>
<name>mapred.client.failover.proxy.provider.{{ hadoop['mapred.job.tracker.ha.servicename'] }}</name>
<value>org.apache.hadoop.mapred.ConfiguredFailoverProxyProvider</value>
</property>
<property>
<name>mapred.client.failover.max.attempts</name>
<value>15</value>
</property>
<property>
<name>mapred.client.failover.sleep.base.millis</name>
<value>500</value>
</property>
<property>
<name>mapred.client.failover.sleep.max.millis</name>
<value>1500</value>
</property>
<property>
<name>mapred.client.failover.connection.retries</name>
<value>0</value>
</property>
<property>
<name>mapred.client.failover.connection.retries.on.timeouts</name>
<value>0</value>
</property>
<property>
<name>mapred.local.dir</name>
<value>{{ hadoop["mapred_local_dir"] | join(',') }}</value>
</property>
<property>
<name>mapred.task.tracker.http.address</name>
<value>0.0.0.0:{{ hadoop['mapred.task.tracker.http.address.port'] }}</value>
</property>
</configuration>

@ -0,0 +1,3 @@
{% for host in groups['hadoop_slaves'] %}
{{ host }}
{% endfor %}

@ -0,0 +1,80 @@
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<configuration>
<property>
<name>ssl.client.truststore.location</name>
<value></value>
<description>Truststore to be used by clients like distcp. Must be
specified.
</description>
</property>
<property>
<name>ssl.client.truststore.password</name>
<value></value>
<description>Optional. Default value is "".
</description>
</property>
<property>
<name>ssl.client.truststore.type</name>
<value>jks</value>
<description>Optional. The keystore file format, default value is "jks".
</description>
</property>
<property>
<name>ssl.client.truststore.reload.interval</name>
<value>10000</value>
<description>Truststore reload check interval, in milliseconds.
Default value is 10000 (10 seconds).
</description>
</property>
<property>
<name>ssl.client.keystore.location</name>
<value></value>
<description>Keystore to be used by clients like distcp. Must be
specified.
</description>
</property>
<property>
<name>ssl.client.keystore.password</name>
<value></value>
<description>Optional. Default value is "".
</description>
</property>
<property>
<name>ssl.client.keystore.keypassword</name>
<value></value>
<description>Optional. Default value is "".
</description>
</property>
<property>
<name>ssl.client.keystore.type</name>
<value>jks</value>
<description>Optional. The keystore file format, default value is "jks".
</description>
</property>
</configuration>

@ -0,0 +1,77 @@
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<configuration>
<property>
<name>ssl.server.truststore.location</name>
<value></value>
<description>Truststore to be used by NN and DN. Must be specified.
</description>
</property>
<property>
<name>ssl.server.truststore.password</name>
<value></value>
<description>Optional. Default value is "".
</description>
</property>
<property>
<name>ssl.server.truststore.type</name>
<value>jks</value>
<description>Optional. The keystore file format, default value is "jks".
</description>
</property>
<property>
<name>ssl.server.truststore.reload.interval</name>
<value>10000</value>
<description>Truststore reload check interval, in milliseconds.
Default value is 10000 (10 seconds).
</property>
<property>
<name>ssl.server.keystore.location</name>
<value></value>
<description>Keystore to be used by NN and DN. Must be specified.
</description>
</property>
<property>
<name>ssl.server.keystore.password</name>
<value></value>
<description>Must be specified.
</description>
</property>
<property>
<name>ssl.server.keystore.keypassword</name>
<value></value>
<description>Must be specified.
</description>
</property>
<property>
<name>ssl.server.keystore.type</name>
<value>jks</value>
<description>Optional. The keystore file format, default value is "jks".
</description>
</property>
</configuration>

@ -0,0 +1,41 @@
# Firewall configuration written by system-config-firewall
# Manual customization of this file is not recommended.
*filter
:INPUT ACCEPT [0:0]
:FORWARD ACCEPT [0:0]
:OUTPUT ACCEPT [0:0]
{% if 'hadoop_masters' in group_names %}
-A INPUT -p tcp --dport {{ hadoop['fs.default.FS.port'] }} -j ACCEPT
-A INPUT -p tcp --dport {{ hadoop['dfs.namenode.http.address.port'] }} -j ACCEPT
-A INPUT -p tcp --dport {{ hadoop['mapred.job.tracker.port'] }} -j ACCEPT
-A INPUT -p tcp --dport {{ hadoop['mapred.job.tracker.http.address.port'] }} -j ACCEPT
-A INPUT -p tcp --dport {{ hadoop['mapred.ha.jobtracker.rpc-address.port'] }} -j ACCEPT
-A INPUT -p tcp --dport {{ hadoop['mapred.ha.zkfc.port'] }} -j ACCEPT
-A INPUT -p tcp --dport {{ hadoop['dfs.ha.zkfc.port'] }} -j ACCEPT
{% endif %}
{% if 'hadoop_slaves' in group_names %}
-A INPUT -p tcp --dport {{ hadoop['dfs.datanode.address.port'] }} -j ACCEPT
-A INPUT -p tcp --dport {{ hadoop['dfs.datanode.http.address.port'] }} -j ACCEPT
-A INPUT -p tcp --dport {{ hadoop['dfs.datanode.ipc.address.port'] }} -j ACCEPT
-A INPUT -p tcp --dport {{ hadoop['mapred.task.tracker.http.address.port'] }} -j ACCEPT
{% endif %}
{% if 'qjournal_servers' in group_names %}
-A INPUT -p tcp --dport {{ hadoop['qjournal.port'] }} -j ACCEPT
-A INPUT -p tcp --dport {{ hadoop['qjournal.http.port'] }} -j ACCEPT
{% endif %}
{% if 'zookeeper_servers' in group_names %}
-A INPUT -p tcp --dport {{ hadoop['zookeeper.clientport'] }} -j ACCEPT
-A INPUT -p tcp --dport {{ hadoop['zookeeper.leader_port'] }} -j ACCEPT
-A INPUT -p tcp --dport {{ hadoop['zookeeper.election_port'] }} -j ACCEPT
{% endif %}
-A INPUT -m state --state ESTABLISHED,RELATED -j ACCEPT
-A INPUT -p icmp -j ACCEPT
-A INPUT -i lo -j ACCEPT
-A INPUT -m state --state NEW -m tcp -p tcp --dport 22 -j ACCEPT
-A INPUT -j REJECT --reject-with icmp-host-prohibited
-A FORWARD -j REJECT --reject-with icmp-host-prohibited
COMMIT

@ -0,0 +1,14 @@
---
# Handlers for the hadoop master services
- name: restart hadoop master services
service: name=${item} state=restarted
with_items:
- hadoop-0.20-mapreduce-jobtracker
- hadoop-hdfs-namenode
- name: restart hadoopha master services
service: name=${item} state=restarted
with_items:
- hadoop-0.20-mapreduce-jobtrackerha
- hadoop-hdfs-namenode

@ -0,0 +1,76 @@
---
# Playbook for Hadoop master servers
- name: Install the namenode and jobtracker packages
yum: name=${item} state=installed
with_items:
- hadoop-0.20-mapreduce-jobtracker
- hadoop-hdfs-namenode
when_set: $ha_disabled
- name: Install the namenode and jobtracker packages
yum: name=${item} state=installed
with_items:
- hadoop-0.20-mapreduce-jobtrackerha
- hadoop-hdfs-namenode
- hadoop-hdfs-zkfc
- hadoop-0.20-mapreduce-zkfc
when_unset: $ha_disabled
- name: Copy the hadoop configuration files
template: src=roles/common/templates/hadoop_ha_conf/${item}.j2 dest=/etc/hadoop/conf/${item}
with_items:
- core-site.xml
- hadoop-metrics.properties
- hadoop-metrics2.properties
- hdfs-site.xml
- log4j.properties
- mapred-site.xml
- slaves
- ssl-client.xml.example
- ssl-server.xml.example
when_unset: $ha_disabled
notify: restart hadoopha master services
- name: Copy the hadoop configuration files for no ha
template: src=roles/common/templates/hadoop_conf/${item}.j2 dest=/etc/hadoop/conf/${item}
with_items:
- core-site.xml
- hadoop-metrics.properties
- hadoop-metrics2.properties
- hdfs-site.xml
- log4j.properties
- mapred-site.xml
- slaves
- ssl-client.xml.example
- ssl-server.xml.example
when_set: $ha_disabled
notify: restart hadoop master services
- name: Create the data directory for the namenode metadata
file: path=${item} owner=hdfs group=hdfs state=directory
with_items: ${hadoop.dfs_namenode_name_dir}
- name: Create the data directory for the jobtracker ha
file: path=${item} owner=mapred group=mapred state=directory
with_items: ${hadoop.mapred_job_tracker_persist_jobstatus_dir}
when_unset: $ha_disabled
- name: Format the namenode
shell: creates=/usr/lib/hadoop/namenode.formatted su - hdfs -c "hadoop namenode -format"; touch /usr/lib/hadoop/namenode.formatted
- name: start hadoop namenode services
service: name=${item} state=started
with_items:
- hadoop-hdfs-namenode
- name: Give permissions for mapred users
shell: creates=/usr/lib/hadoop/fs.initialized su - hdfs -c "hadoop fs -chown hdfs:hadoop /"; su - hdfs -c "hadoop fs -chmod 0774 /"; touch /usr/lib/hadoop/namenode.initialized
when_set: $ha_disabled
- name: start hadoop jobtracker services
service: name=${item} state=started
with_items:
- hadoop-0.20-mapreduce-jobtracker
when_set: $ha_disabled

@ -0,0 +1,5 @@
---
# Playbook for Hadoop master primary servers
- include: hadoop_master.yml tags=no_ha

@ -0,0 +1,14 @@
---
# Handlers for the hadoop master services
- name: restart hadoop master services
service: name=${item} state=restarted
with_items:
- hadoop-0.20-mapreduce-jobtracker
- hadoop-hdfs-namenode
- name: restart hadoopha master services
service: name=${item} state=restarted
with_items:
- hadoop-0.20-mapreduce-jobtrackerha
- hadoop-hdfs-namenode

@ -0,0 +1,73 @@
---
# Playbook for Hadoop master secondary server
- name: Install the namenode and jobtracker packages
yum: name=${item} state=installed
with_items:
- hadoop-0.20-mapreduce-jobtrackerha
- hadoop-hdfs-namenode
- hadoop-hdfs-zkfc
- hadoop-0.20-mapreduce-zkfc
- name: Copy the hadoop configuration files
template: src=roles/common/templates/hadoop_ha_conf/${item}.j2 dest=/etc/hadoop/conf/${item}
with_items:
- core-site.xml
- hadoop-metrics.properties
- hadoop-metrics2.properties
- hdfs-site.xml
- log4j.properties
- mapred-site.xml
- slaves
- ssl-client.xml.example
- ssl-server.xml.example
when_unset: $ha_disabled
notify: restart hadoopha master services
- name: Create the data directory for the namenode metadata
file: path=${item} owner=hdfs group=hdfs state=directory
with_items: ${hadoop.dfs_namenode_name_dir}
- name: Create the data directory for the jobtracker ha
file: path=${item} owner=mapred group=mapred state=directory
with_items: ${hadoop.mapred_job_tracker_persist_jobstatus_dir}
- name: Initialize the secodary namenode
shell: creates=/usr/lib/hadoop/namenode.formatted su - hdfs -c "hadoop namenode -bootstrapStandby"; touch /usr/lib/hadoop/namenode.formatted
- name: start hadoop namenode services
service: name=${item} state=started
with_items:
- hadoop-hdfs-namenode
- name: Initialize the zkfc for namenode
shell: creates=/usr/lib/hadoop/zkfc.formatted su - hdfs -c "hdfs zkfc -formatZK"; touch /usr/lib/hadoop/zkfc.formatted
register: nn_result
- name: restart zkfc for namenode
service: name=hadoop-hdfs-zkfc state=restarted
delegate_to: ${item}
with_items: ${groups.hadoop_masters}
when_set: $nn_result and $nn_result.changed
- name: Give permissions for mapred users
shell: creates=/usr/lib/hadoop/fs.initialized su - hdfs -c "hadoop fs -chown hdfs:hadoop /"; su - hdfs -c "hadoop fs -chmod 0774 /"; touch /usr/lib/hadoop/namenode.initialized
- name: Initialize the zkfc for jobtracker
shell: creates=/usr/lib/hadoop/zkfcjob.formatted su - mapred -c "hadoop mrzkfc -formatZK"; touch /usr/lib/hadoop/zkfcjob.formatted
register: jt_result
- name: restart zkfc for jobtracker
service: name=hadoop-0.20-mapreduce-zkfc state=restarted
delegate_to: ${item}
with_items: ${groups.hadoop_masters}
when_set: $jt_result and $jt_result.changed
- name: start hadoop Jobtracker services
service: name=hadoop-0.20-mapreduce-jobtrackerha state=started
delegate_to: ${item}
with_items: ${groups.hadoop_masters}
when_set: $jt_result and $jt_result.changed

@ -0,0 +1,4 @@
---
# Playbook for Hadoop master secondary server
- include: hadoop_secondary.yml

@ -0,0 +1,8 @@
---
# Handlers for the hadoop slave services
- name: restart hadoop slave services
service: name=${item} state=restarted
with_items:
- hadoop-0.20-mapreduce-tasktracker
- hadoop-hdfs-datanode

@ -0,0 +1,4 @@
---
# Playbook for Hadoop slave servers
- include: slaves.yml tags=slaves,no_ha

@ -0,0 +1,53 @@
---
# Playbook for Hadoop slave servers
- name: Install the datanode and tasktracker packages
yum: name=${item} state=installed
with_items:
- hadoop-0.20-mapreduce-tasktracker
- hadoop-hdfs-datanode
- name: Copy the hadoop configuration files
template: src=roles/common/templates/hadoop_ha_conf/${item}.j2 dest=/etc/hadoop/conf/${item}
with_items:
- core-site.xml
- hadoop-metrics.properties
- hadoop-metrics2.properties
- hdfs-site.xml
- log4j.properties
- mapred-site.xml
- slaves
- ssl-client.xml.example
- ssl-server.xml.example
when_unset: $ha_disabled
notify: restart hadoop slave services
- name: Copy the hadoop configuration files for non ha
template: src=roles/common/templates/hadoop_conf/${item}.j2 dest=/etc/hadoop/conf/${item}
with_items:
- core-site.xml
- hadoop-metrics.properties
- hadoop-metrics2.properties
- hdfs-site.xml
- log4j.properties
- mapred-site.xml
- slaves
- ssl-client.xml.example
- ssl-server.xml.example
when_set: $ha_disabled
notify: restart hadoop slave services
- name: Create the data directory for the slave nodes to store the data
file: path=${item} owner=hdfs group=hdfs state=directory
with_items: ${hadoop.dfs_datanode_data_dir}
- name: Create the data directory for the slave nodes for mapreduce
file: path=${item} owner=mapred group=mapred state=directory
with_items: ${hadoop.mapred_local_dir}
- name: start hadoop slave services
service: name=${item} state=restarted
with_items:
- hadoop-0.20-mapreduce-tasktracker
- hadoop-hdfs-datanode

@ -0,0 +1,5 @@
---
# The journal node handlers
- name: restart qjournal services
service: name=hadoop-hdfs-journalnode state=restarted

@ -0,0 +1,38 @@
---
# Playbook for the qjournal nodes
- name: Install the qjournal package
yum: name=hadoop-hdfs-journalnode state=installed
- name: Create folder for Journaling
file: path=${hadoop.dfs_journalnode_edits_dir} state=directory owner=hdfs group=hdfs
- name: Copy the hadoop configuration files
template: src=roles/common/templates/hadoop_ha_conf/${item}.j2 dest=/etc/hadoop/conf/${item}
with_items:
- core-site.xml
- hadoop-metrics.properties
- hadoop-metrics2.properties
- hdfs-site.xml
- log4j.properties
- mapred-site.xml
- slaves
- ssl-client.xml.example
- ssl-server.xml.example
when_unset: $ha_disabled
notify: restart qjournal services
- name: Copy the non ha hadoop configuration files
template: src=roles/common/templates/hadoop_conf/${item}.j2 dest=/etc/hadoop/conf/${item}
with_items:
- core-site.xml
- hadoop-metrics.properties
- hadoop-metrics2.properties
- hdfs-site.xml
- log4j.properties
- mapred-site.xml
- slaves
- ssl-client.xml.example
- ssl-server.xml.example
when_set: $ha_disabled
notify: restart qjournal services

@ -0,0 +1,5 @@
---
# Handler for the zookeeper services
- name: restart zookeeper
service: name=zookeeper-server state=restarted

@ -0,0 +1,13 @@
---
# The plays for zookeper daemons
- name: Install the zookeeper files
yum: name=zookeeper-server state=installed
- name: Copy the configuration file for zookeeper
template: src=zoo.cfg.j2 dest=/etc/zookeeper/conf/zoo.cfg
notify: restart zookeeper
- name: initialize the zookeper
shell: creates=/var/lib/zookeeper/myid service zookeeper-server init --myid=${zoo_id}

@ -0,0 +1,9 @@
tickTime=2000
dataDir=/var/lib/zookeeper/
clientPort={{ hadoop['zookeeper.clientport'] }}
initLimit=5
syncLimit=2
{% for host in groups['zookeeper_servers'] %}
server.{{ hostvars[host].zoo_id }}={{ host }}:{{ hadoop['zookeeper.leader_port'] }}:{{ hadoop['zookeeper.election_port'] }}
{% endfor %}

@ -0,0 +1,6 @@
---
# Vars for Zookeeper
clientport: 2181
leader_port: 2888
election_port: 3888

@ -0,0 +1,41 @@
---
# The main file to delpoy the site
- hosts: all
vars_files:
- hadoop_vars/hadoop
roles:
- common
- hosts: zookeeper_servers
vars_files:
- hadoop_vars/hadoop
roles:
- zookeeper_servers
- hosts: qjournal_servers
vars_files:
- hadoop_vars/hadoop
roles:
- qjournal_servers
- hosts: hadoop_master_primary
vars_files:
- hadoop_vars/hadoop
roles:
- hadoop_primary
- hosts: hadoop_master_secondary
vars_files:
- hadoop_vars/hadoop
roles:
- hadoop_secondary
- hosts: hadoop_slaves
vars_files:
- hadoop_vars/hadoop
roles:
- hadoop_slaves