2.11核心文件配置
注:这里特别提醒,配置文件中的路径在启动集群之前,得存在(若不存在,请事先创建)。下面为给出本篇文章需要创建的路径脚本,命令如下:
-
mkdir -p /home/hadoop/tmp
-
mkdir -p /home/hadoop/data/tmp/journal
-
mkdir -p /home/hadoop/data/dfs/name
-
mkdir -p /home/hadoop/data/dfs/data
-
mkdir -p /home/hadoop/data/yarn/local
-
mkdir -p /home/hadoop/log/yarn
-
<?xml version="1.0" encoding="UTF-8"?>
-
<configuration>
-
<property>
-
<name>fs.defaultFS</name>
-
<value>hdfs://cluster1</value>
-
</property>
-
-
<property>
-
<name>io.file.buffer.size</name>
-
<value>131072</value>
-
</property>
-
<property>
-
<name>hadoop.tmp.dir</name>
-
<value>/home/hadoop/tmp</value>
-
</property>
-
<property>
-
<name>hadoop.proxyuser.hduser.hosts</name>
-
<value>*</value>
-
</property>
-
<property>
-
<name>hadoop.proxyuser.hduser.groups</name>
-
<value>*</value>
-
</property>
-
<property>
-
<name>ha.zookeeper.quorum</name>
-
<value>dn1:2181,dn2:2181,dn3:2181</value>
-
</property>
-
</configuration>
-
<?xml version="1.0" encoding="UTF-8"?>
-
<configuration>
-
<property>
-
<name>dfs.nameservices</name>
-
<value>cluster1</value>
-
</property>
-
<property>
-
<name>dfs.ha.namenodes.cluster1</name>
-
<value>nna,nns</value>
-
</property>
-
<property>
-
<name>dfs.namenode.rpc-address.cluster1.nna</name>
-
<value>nna:9000</value>
-
</property>
-
<property>
-
<name>dfs.namenode.rpc-address.cluster1.nns</name>
-
<value>nns:9000</value>
-
</property>
-
-
<property>
-
<name>dfs.namenode.http-address.cluster1.nna</name>
-
<value>nna:50070</value>
-
</property>
-
-
<property>
-
<name>dfs.namenode.http-address.cluster1.nns</name>
-
<value>nns:50070</value>
-
</property>
-
<property>
-
<name>dfs.namenode.shared.edits.dir</name>
-
<value>qjournal://dn1:8485;dn2:8485;dn3:8485/cluster1</value>
-
</property>
-
-
<property>
-
<name>dfs.client.failover.proxy.provider.cluster1</name>
-
<value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
-
</property>
-
<property>
-
<name>dfs.ha.fencing.methods</name>
-
<value>sshfence</value>
-
</property>
-
<property>
-
<name>dfs.ha.fencing.ssh.private-key-files</name>
-
<value>/home/hadoop/.ssh/id_rsa</value>
-
</property>
-
<property>
-
<name>dfs.journalnode.edits.dir</name>
-
<value>/home/hadoop/data/tmp/journal</value>
-
</property>
-
<property>
-
<name>dfs.ha.automatic-failover.enabled</name>
-
<value>true</value>
-
</property>
-
<property>
-
<name>dfs.namenode.name.dir</name>
-
<value>/home/hadoop/data/dfs/name</value>
-
</property>
-
<property>
-
<name>dfs.datanode.data.dir</name>
-
<value>/home/hadoop/data/dfs/data</value>
-
</property>
-
<property>
-
<name>dfs.replication</name>
-
<value>3</value>
-
</property>
-
<property>
-
<name>dfs.webhdfs.enabled</name>
-
<value>true</value>
-
</property>
-
-
<property>
-
<name>dfs.journalnode.http-address</name>
-
<value>0.0.0.0:8480</value>
-
</property>
-
<property>
-
<name>dfs.journalnode.rpc-address</name>
-
<value>0.0.0.0:8485</value>
-
</property>
-
<property>
-
<name>ha.zookeeper.quorum</name>
-
<value>dn1:2181,dn2:2181,dn3:2181</value>
-
</property>
-
-
</configuration>
-
<?xml version="1.0" encoding="UTF-8"?>
-
<configuration>
-
<property>
-
<name>mapreduce.framework.name</name>
-
<value>yarn</value>
-
</property>
-
<property>
-
<name>mapreduce.jobhistory.address</name>
-
<value>nna:10020</value>
-
</property>
-
<property>
-
<name>mapreduce.jobhistory.webapp.address</name>
-
<value>nna:19888</value>
-
</property>
-
</configuration>
-
<?xml version="1.0" encoding="UTF-8"?>
-
<configuration>
-
<property>
-
<name>yarn.resourcemanager.connect.retry-interval.ms</name>
-
<value>2000</value>
-
</property>
-
<property>
-
<name>yarn.resourcemanager.ha.enabled</name>
-
<value>true</value>
-
</property>
-
<property>
-
<name>yarn.resourcemanager.ha.rm-ids</name>
-
<value>rm1,rm2</value>
-
</property>
-
<property>
-
<name>ha.zookeeper.quorum</name>
-
<value>dn1:2181,dn2:2181,dn3:2181</value>
-
</property>
-
-
<property>
-
<name>yarn.resourcemanager.ha.automatic-failover.enabled</name>
-
<value>true</value>
-
</property>
-
<property>
-
<name>yarn.resourcemanager.hostname.rm1</name>
-
<value>nna</value>
-
</property>
-
-
<property>
-
<name>yarn.resourcemanager.hostname.rm2</name>
-
<value>nns</value>
-
</property>
-
<!--在namenode1上配置rm1,在namenode2上配置rm2,注意:一般都喜欢把配置好的文件远程复制到其它机器上,但这个在YARN的另一个机器上一定要修改 -->
-
<property>
-
<name>yarn.resourcemanager.ha.id</name>
-
<value>rm1</value>
-
</property>
-
<!--开启自动恢复功能 -->
-
<property>
-
<name>yarn.resourcemanager.recovery.enabled</name>
-
<value>true</value>
-
</property>
-
<!--配置与zookeeper的连接地址 -->
-
<property>
-
<name>yarn.resourcemanager.zk-state-store.address</name>
-
<value>dn1:2181,dn2:2181,dn3:2181</value>
-
</property>
-
<property>
-
<name>yarn.resourcemanager.store.class</name>
-
<value>org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore</value>
-
</property>
-
<property>
-
<name>yarn.resourcemanager.zk-address</name>
-
<value>dn1:2181,dn2:2181,dn3:2181</value>
-
</property>
-
<property>
-
<name>yarn.resourcemanager.cluster-id</name>
-
<value>cluster1-yarn</value>
-
</property>
-
<!--schelduler失联等待连接时间 -->
-
<property>
-
<name>yarn.app.mapreduce.am.scheduler.connection.wait.interval-ms</name>
-
<value>5000</value>
-
</property>
-
<!--配置rm1 -->
-
<property>
-
<name>yarn.resourcemanager.address.rm1</name>
-
<value>nna:8132</value>
-
</property>
-
<property>
-
<name>yarn.resourcemanager.scheduler.address.rm1</name>
-
<value>nna:8130</value>
-
</property>
-
<property>
-
<name>yarn.resourcemanager.webapp.address.rm1</name>
-
<value>nna:8188</value>
-
</property>
-
<property>
-
<name>yarn.resourcemanager.resource-tracker.address.rm1</name>
-
<value>nna:8131</value>
-
</property>
-
<property>
-
<name>yarn.resourcemanager.admin.address.rm1</name>
-
<value>nna:8033</value>
-
</property>
-
<property>
-
<name>yarn.resourcemanager.ha.admin.address.rm1</name>
-
<value>nna:23142</value>
-
</property>
-
<!--配置rm2 -->
-
<property>
-
<name>yarn.resourcemanager.address.rm2</name>
-
<value>nns:8132</value>
-
</property>
-
<property>
-
<name>yarn.resourcemanager.scheduler.address.rm2</name>
-
<value>nns:8130</value>
-
</property>
-
<property>
-
<name>yarn.resourcemanager.webapp.address.rm2</name>
-
<value>nns:8188</value>
-
</property>
-
<property>
-
<name>yarn.resourcemanager.resource-tracker.address.rm2</name>
-
<value>nns:8131</value>
-
</property>
-
<property>
-
<name>yarn.resourcemanager.admin.address.rm2</name>
-
<value>nns:8033</value>
-
</property>
-
<property>
-
<name>yarn.resourcemanager.ha.admin.address.rm2</name>
-
<value>nns:23142</value>
-
</property>
-
<property>
-
<name>yarn.nodemanager.aux-services</name>
-
<value>mapreduce_shuffle</value>
-
</property>
-
<property>
-
<name>yarn.nodemanager.aux-services.mapreduce.shuffle.class</name>
-
<value>org.apache.hadoop.mapred.ShuffleHandler</value>
-
</property>
-
<property>
-
<name>yarn.nodemanager.local-dirs</name>
-
<value>/home/hadoop/data/yarn/local</value>
-
</property>
-
<property>
-
<name>yarn.nodemanager.log-dirs</name>
-
<value>/home/hadoop/log/yarn</value>
-
</property>
-
<property>
-
<name>mapreduce.shuffle.port</name>
-
<value>23080</value>
-
</property>
-
<!--故障处理类 -->
-
<property>
-
<name>yarn.client.failover-proxy-provider</name>
-
<value>org.apache.hadoop.yarn.client.ConfiguredRMFailoverProxyProvider</value>
-
</property>
-
<property>
-
<name>yarn.resourcemanager.ha.automatic-failover.zk-base-path</name>
-
<value>/yarn-leader-election</value>
-
</property>
-
</configuration>
-
# The java implementation to use.
-
export JAVA_HOME=/usr/java/jdk1.7
-
# some Java parameters
-
export JAVA_HOME=/usr/java/jdk1.7
修改hadoop安装目录下的slave文件:
-
dn1
-
dn2
-
dn3
2.13启动命令(hdfs和yarn的相关命令)
由于我们配置了QJM,所以我们需要先启动QJM的服务,启动顺序如下所示:
-
进入到DN节点,启动zk的服务:zkServer.sh start,之后可以输入zkServer.sh status查看启动状态,本次我们配置了三个DN节点,会出现一个leader和两个follower。输入jps,会显示启动进程:QuorumPeerMain
-
在NN节点上(选一台即可,这里我选择的是一台预NNA节点),然后启动journalnode服务,命令如下:hadoop-daemons.sh start journalnode。或者单独进入到每个DN输入启动命令:hadoop-daemon.sh start journalnode。输入jps显示启动进程:JournalNode。
-
接着若是配置后,我们首次启动,需要格式化HDFS,命令如下:hadoop namenode –format。
-
之后我们需要格式化ZK,命令如下:hdfs zkfc –formatZK。
-
接着我们启动hdfs和yarn,命令如下:start-dfs.sh和start-yarn.sh,我们在nna输入jps查看进程,显示如下:DFSZKFailoverController,NameNode,ResourceManager。
-
接着我们在NNS输入jps查看,发现只有DFSZKFailoverController进程,这里我们需要手动启动NNS上的namenode和ResourceManager进程,命令如下:hadoop-daemon.sh start namenode和yarn-daemon.sh start resourcemanager。需要注意的是,在NNS上的yarn-site.xml中,需要配置指向NNS,属性配置为rm2,在NNA中配置的是rm1。
-
最后我们需要同步NNA节点的元数据,命令如下:hdfs namenode –bootstrapStandby,若执行正常,日志最后显示如下信息:
-
15/02/21 10:30:59 INFO common.Storage: Storage directory /home/hadoop/data/dfs/name has been successfully formatted.
-
15/02/21 10:30:59 WARN common.Util: Path /home/hadoop/data/dfs/name should be specified as a URI in configuration files. Please update hdfs configuration.
-
15/02/21 10:30:59 WARN common.Util: Path /home/hadoop/data/dfs/name should be specified as a URI in configuration files. Please update hdfs configuration.
-
15/02/21 10:31:00 INFO namenode.TransferFsImage: Opening connection to http://nna:50070/imagetransfer?getimage=1&txid=0&storageInfo=-60:1079068934:0:CID-1dd0c11e-b27e-4651-aad6-73bc7dd820bd
-
15/02/21 10:31:01 INFO namenode.TransferFsImage: Image Transfer timeout configured to 60000 milliseconds
-
15/02/21 10:31:01 INFO namenode.TransferFsImage: Transfer took 0.01s at 0.00 KB/s
-
15/02/21 10:31:01 INFO namenode.TransferFsImage: Downloaded file fsimage.ckpt_0000000000000000000 size 353 bytes.
-
15/02/21 10:31:01 INFO util.ExitUtil: Exiting with status 0
-
15/02/21 10:31:01 INFO namenode.NameNode: SHUTDOWN_MSG: /************************************************************ SHUTDOWN_MSG: Shutting down NameNode at nns/10.211.55.13 ************************************************************/
2.14HA的切换
由于我配置的是自动切换,若NNA节点宕掉,NNS节点会立即由standby状态切换为active状态。若是配置的手动状态,可以输入如下命令进行人工切换:
-
hdfs haadmin -failover --forcefence --forceactive nna nns
这条命令的意思是,将nna变成standby,nns变成active。而且手动状态下需要重启服务。
2.15效果截图
3.总结
这篇文章就赘述到这里,若在配置过程中有什么疑问或问题,可以加入QQ群讨论或发送邮件给我,我会尽我所能为您解答,与君共勉!
(责任编辑:IT) |