当前位置: > Linux集群 > Hadoop >

配置高可用的Hadoop平台(2)

时间:2015-02-25 23:03来源:cnblogs.com 作者:哥不是小萝莉





2.11核心文件配置

  注:这里特别提醒,配置文件中的路径在启动集群之前,得存在(若不存在,请事先创建)。下面为给出本篇文章需要创建的路径脚本,命令如下:


  1. mkdir -p /home/hadoop/tmp
  2. mkdir -p /home/hadoop/data/tmp/journal
  3. mkdir -p /home/hadoop/data/dfs/name
  4. mkdir -p /home/hadoop/data/dfs/data
  5. mkdir -p /home/hadoop/data/yarn/local
  6. mkdir -p /home/hadoop/log/yarn
  • core-site.xml

  1. <?xml version="1.0" encoding="UTF-8"?>
  2. <configuration>
  3. <property>
  4. <name>fs.defaultFS</name>
  5. <value>hdfs://cluster1</value>
  6. </property>
  7.  
  8. <property>
  9. <name>io.file.buffer.size</name>
  10. <value>131072</value>
  11. </property>
  12. <property>
  13. <name>hadoop.tmp.dir</name>
  14. <value>/home/hadoop/tmp</value>
  15. </property>
  16. <property>
  17. <name>hadoop.proxyuser.hduser.hosts</name>
  18. <value>*</value>
  19. </property>
  20. <property>
  21. <name>hadoop.proxyuser.hduser.groups</name>
  22. <value>*</value>
  23. </property>
  24. <property>
  25. <name>ha.zookeeper.quorum</name>
  26. <value>dn1:2181,dn2:2181,dn3:2181</value>
  27. </property>
  28. </configuration>
  • hdfs-site.xml

  1. <?xml version="1.0" encoding="UTF-8"?>
  2. <configuration>
  3. <property>
  4. <name>dfs.nameservices</name>
  5. <value>cluster1</value>
  6. </property>
  7. <property>
  8. <name>dfs.ha.namenodes.cluster1</name>
  9. <value>nna,nns</value>
  10. </property>
  11. <property>
  12. <name>dfs.namenode.rpc-address.cluster1.nna</name>
  13. <value>nna:9000</value>
  14. </property>
  15. <property>
  16. <name>dfs.namenode.rpc-address.cluster1.nns</name>
  17. <value>nns:9000</value>
  18. </property>
  19.  
  20. <property>
  21. <name>dfs.namenode.http-address.cluster1.nna</name>
  22. <value>nna:50070</value>
  23. </property>
  24.  
  25. <property>
  26. <name>dfs.namenode.http-address.cluster1.nns</name>
  27. <value>nns:50070</value>
  28. </property>
  29. <property>
  30. <name>dfs.namenode.shared.edits.dir</name>
  31. <value>qjournal://dn1:8485;dn2:8485;dn3:8485/cluster1</value>
  32. </property>
  33.  
  34. <property>
  35. <name>dfs.client.failover.proxy.provider.cluster1</name>
  36. <value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
  37. </property>
  38. <property>
  39. <name>dfs.ha.fencing.methods</name>
  40. <value>sshfence</value>
  41. </property>
  42. <property>
  43. <name>dfs.ha.fencing.ssh.private-key-files</name>
  44. <value>/home/hadoop/.ssh/id_rsa</value>
  45. </property>
  46. <property>
  47. <name>dfs.journalnode.edits.dir</name>
  48. <value>/home/hadoop/data/tmp/journal</value>
  49. </property>
  50. <property>
  51. <name>dfs.ha.automatic-failover.enabled</name>
  52. <value>true</value>
  53. </property>
  54. <property>
  55. <name>dfs.namenode.name.dir</name>
  56. <value>/home/hadoop/data/dfs/name</value>
  57. </property>
  58. <property>
  59. <name>dfs.datanode.data.dir</name>
  60. <value>/home/hadoop/data/dfs/data</value>
  61. </property>
  62. <property>
  63. <name>dfs.replication</name>
  64. <value>3</value>
  65. </property>
  66. <property>
  67. <name>dfs.webhdfs.enabled</name>
  68. <value>true</value>
  69. </property>
  70.  
  71. <property>
  72. <name>dfs.journalnode.http-address</name>
  73. <value>0.0.0.0:8480</value>
  74. </property>
  75. <property>
  76. <name>dfs.journalnode.rpc-address</name>
  77. <value>0.0.0.0:8485</value>
  78. </property>
  79. <property>
  80. <name>ha.zookeeper.quorum</name>
  81. <value>dn1:2181,dn2:2181,dn3:2181</value>
  82. </property>
  83.  
  84. </configuration>
  • map-site.xml

  1. <?xml version="1.0" encoding="UTF-8"?>
  2. <configuration>
  3. <property>
  4. <name>mapreduce.framework.name</name>
  5. <value>yarn</value>
  6. </property>
  7. <property>
  8. <name>mapreduce.jobhistory.address</name>
  9. <value>nna:10020</value>
  10. </property>
  11. <property>
  12. <name>mapreduce.jobhistory.webapp.address</name>
  13. <value>nna:19888</value>
  14. </property>
  15. </configuration>
  • yarn-site.xml

  1. <?xml version="1.0" encoding="UTF-8"?>
  2. <configuration>
  3. <property>
  4. <name>yarn.resourcemanager.connect.retry-interval.ms</name>
  5. <value>2000</value>
  6. </property>
  7. <property>
  8. <name>yarn.resourcemanager.ha.enabled</name>
  9. <value>true</value>
  10. </property>
  11. <property>
  12. <name>yarn.resourcemanager.ha.rm-ids</name>
  13. <value>rm1,rm2</value>
  14. </property>
  15. <property>
  16. <name>ha.zookeeper.quorum</name>
  17. <value>dn1:2181,dn2:2181,dn3:2181</value>
  18. </property>
  19.  
  20. <property>
  21. <name>yarn.resourcemanager.ha.automatic-failover.enabled</name>
  22. <value>true</value>
  23. </property>
  24. <property>
  25. <name>yarn.resourcemanager.hostname.rm1</name>
  26. <value>nna</value>
  27. </property>
  28.  
  29. <property>
  30. <name>yarn.resourcemanager.hostname.rm2</name>
  31. <value>nns</value>
  32. </property>
  33. <!--在namenode1上配置rm1,在namenode2上配置rm2,注意:一般都喜欢把配置好的文件远程复制到其它机器上,但这个在YARN的另一个机器上一定要修改 -->
  34. <property>
  35. <name>yarn.resourcemanager.ha.id</name>
  36. <value>rm1</value>
  37. </property>
  38. <!--开启自动恢复功能 -->
  39. <property>
  40. <name>yarn.resourcemanager.recovery.enabled</name>
  41. <value>true</value>
  42. </property>
  43. <!--配置与zookeeper的连接地址 -->
  44. <property>
  45. <name>yarn.resourcemanager.zk-state-store.address</name>
  46. <value>dn1:2181,dn2:2181,dn3:2181</value>
  47. </property>
  48. <property>
  49. <name>yarn.resourcemanager.store.class</name>
  50. <value>org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore</value>
  51. </property>
  52. <property>
  53. <name>yarn.resourcemanager.zk-address</name>
  54. <value>dn1:2181,dn2:2181,dn3:2181</value>
  55. </property>
  56. <property>
  57. <name>yarn.resourcemanager.cluster-id</name>
  58. <value>cluster1-yarn</value>
  59. </property>
  60. <!--schelduler失联等待连接时间 -->
  61. <property>
  62. <name>yarn.app.mapreduce.am.scheduler.connection.wait.interval-ms</name>
  63. <value>5000</value>
  64. </property>
  65. <!--配置rm1 -->
  66. <property>
  67. <name>yarn.resourcemanager.address.rm1</name>
  68. <value>nna:8132</value>
  69. </property>
  70. <property>
  71. <name>yarn.resourcemanager.scheduler.address.rm1</name>
  72. <value>nna:8130</value>
  73. </property>
  74. <property>
  75. <name>yarn.resourcemanager.webapp.address.rm1</name>
  76. <value>nna:8188</value>
  77. </property>
  78. <property>
  79. <name>yarn.resourcemanager.resource-tracker.address.rm1</name>
  80. <value>nna:8131</value>
  81. </property>
  82. <property>
  83. <name>yarn.resourcemanager.admin.address.rm1</name>
  84. <value>nna:8033</value>
  85. </property>
  86. <property>
  87. <name>yarn.resourcemanager.ha.admin.address.rm1</name>
  88. <value>nna:23142</value>
  89. </property>
  90. <!--配置rm2 -->
  91. <property>
  92. <name>yarn.resourcemanager.address.rm2</name>
  93. <value>nns:8132</value>
  94. </property>
  95. <property>
  96. <name>yarn.resourcemanager.scheduler.address.rm2</name>
  97. <value>nns:8130</value>
  98. </property>
  99. <property>
  100. <name>yarn.resourcemanager.webapp.address.rm2</name>
  101. <value>nns:8188</value>
  102. </property>
  103. <property>
  104. <name>yarn.resourcemanager.resource-tracker.address.rm2</name>
  105. <value>nns:8131</value>
  106. </property>
  107. <property>
  108. <name>yarn.resourcemanager.admin.address.rm2</name>
  109. <value>nns:8033</value>
  110. </property>
  111. <property>
  112. <name>yarn.resourcemanager.ha.admin.address.rm2</name>
  113. <value>nns:23142</value>
  114. </property>
  115. <property>
  116. <name>yarn.nodemanager.aux-services</name>
  117. <value>mapreduce_shuffle</value>
  118. </property>
  119. <property>
  120. <name>yarn.nodemanager.aux-services.mapreduce.shuffle.class</name>
  121. <value>org.apache.hadoop.mapred.ShuffleHandler</value>
  122. </property>
  123. <property>
  124. <name>yarn.nodemanager.local-dirs</name>
  125. <value>/home/hadoop/data/yarn/local</value>
  126. </property>
  127. <property>
  128. <name>yarn.nodemanager.log-dirs</name>
  129. <value>/home/hadoop/log/yarn</value>
  130. </property>
  131. <property>
  132. <name>mapreduce.shuffle.port</name>
  133. <value>23080</value>
  134. </property>
  135. <!--故障处理类 -->
  136. <property>
  137. <name>yarn.client.failover-proxy-provider</name>
  138. <value>org.apache.hadoop.yarn.client.ConfiguredRMFailoverProxyProvider</value>
  139. </property>
  140. <property>
  141. <name>yarn.resourcemanager.ha.automatic-failover.zk-base-path</name>
  142. <value>/yarn-leader-election</value>
  143. </property>
  144. </configuration>
  • hadoop-env.sh

  1. # The java implementation to use.
  2. export JAVA_HOME=/usr/java/jdk1.7
  • yarn-env.sh

  1. # some Java parameters
  2. export JAVA_HOME=/usr/java/jdk1.7
  • 2.12slave

  修改hadoop安装目录下的slave文件:


  1. dn1
  2. dn2
  3. dn3

2.13启动命令(hdfs和yarn的相关命令)

  由于我们配置了QJM,所以我们需要先启动QJM的服务,启动顺序如下所示:

  1. 进入到DN节点,启动zk的服务:zkServer.sh start,之后可以输入zkServer.sh status查看启动状态,本次我们配置了三个DN节点,会出现一个leader和两个follower。输入jps,会显示启动进程:QuorumPeerMain
  2. 在NN节点上(选一台即可,这里我选择的是一台预NNA节点),然后启动journalnode服务,命令如下:hadoop-daemons.sh start journalnode。或者单独进入到每个DN输入启动命令:hadoop-daemon.sh start journalnode。输入jps显示启动进程:JournalNode
  3. 接着若是配置后,我们首次启动,需要格式化HDFS,命令如下:hadoop namenode –format
  4. 之后我们需要格式化ZK,命令如下:hdfs zkfc –formatZK
  5. 接着我们启动hdfs和yarn,命令如下:start-dfs.shstart-yarn.sh,我们在nna输入jps查看进程,显示如下:DFSZKFailoverControllerNameNodeResourceManager
  6. 接着我们在NNS输入jps查看,发现只有DFSZKFailoverController进程,这里我们需要手动启动NNS上的namenodeResourceManager进程,命令如下:hadoop-daemon.sh start namenodeyarn-daemon.sh start resourcemanager。需要注意的是,在NNS上的yarn-site.xml中,需要配置指向NNS,属性配置为rm2,在NNA中配置的是rm1。
  7. 最后我们需要同步NNA节点的元数据,命令如下:hdfs namenode –bootstrapStandby,若执行正常,日志最后显示如下信息:

  1. 15/02/21 10:30:59 INFO common.Storage: Storage directory /home/hadoop/data/dfs/name has been successfully formatted.
  2. 15/02/21 10:30:59 WARN common.Util: Path /home/hadoop/data/dfs/name should be specified as a URI in configuration files. Please update hdfs configuration.
  3. 15/02/21 10:30:59 WARN common.Util: Path /home/hadoop/data/dfs/name should be specified as a URI in configuration files. Please update hdfs configuration.
  4. 15/02/21 10:31:00 INFO namenode.TransferFsImage: Opening connection to http://nna:50070/imagetransfer?getimage=1&txid=0&storageInfo=-60:1079068934:0:CID-1dd0c11e-b27e-4651-aad6-73bc7dd820bd
  5. 15/02/21 10:31:01 INFO namenode.TransferFsImage: Image Transfer timeout configured to 60000 milliseconds
  6. 15/02/21 10:31:01 INFO namenode.TransferFsImage: Transfer took 0.01s at 0.00 KB/s
  7. 15/02/21 10:31:01 INFO namenode.TransferFsImage: Downloaded file fsimage.ckpt_0000000000000000000 size 353 bytes.
  8. 15/02/21 10:31:01 INFO util.ExitUtil: Exiting with status 0
  9. 15/02/21 10:31:01 INFO namenode.NameNode: SHUTDOWN_MSG: /************************************************************ SHUTDOWN_MSG: Shutting down NameNode at nns/10.211.55.13 ************************************************************/

2.14HA的切换

  由于我配置的是自动切换,若NNA节点宕掉,NNS节点会立即由standby状态切换为active状态。若是配置的手动状态,可以输入如下命令进行人工切换:


  1. hdfs haadmin -failover --forcefence --forceactive nna nns

  这条命令的意思是,将nna变成standby,nns变成active。而且手动状态下需要重启服务。

2.15效果截图

3.总结

  这篇文章就赘述到这里,若在配置过程中有什么疑问或问题,可以加入QQ群讨论或发送邮件给我,我会尽我所能为您解答,与君共勉!



(责任编辑:IT)
------分隔线----------------------------