当前位置: > Linux集群 > Hadoop >

hadoop 2.4.1 集群安装一

时间:2016-12-24 17:34来源:linux.it.net.cn 作者:IT
配置主机名参考 Hadoop 1.2.1 集群安装一

配置JDK环境参考Hadoop1.2.1集群安装二

配置Hadoop
A:下载解压hadoop

http://mirrors.cnnic.cn/apache/hadoop/common/hadoop-2.4.1/hadoop-2.4.1.tar.gz
在/home/it 创建目录   mkdir hadoop 

hadoop-2.4.1.tar.gz下载保存到/home/it/hadoop

tar zxf hadoop-1.2.1.tar.gz

B:配置

涉及到的配置文件有7个,在hadoop-2.4.1的目录下:
/etc/hadoop/hadoop-env.sh
/etc/hadoop/yarn-env.sh
/etc/hadoop/slaves
/etc/hadoop/core-site.xml
/etc/hadoop/hdfs-site.xml
/etc/hadoop/mapred-site.xml
/etc/hadoop/yarn-site.xml
以上个别文件默认丌存在的,可以复制相应的template文件获得。

1:hadoop-env.sh

配置 JAVA_HOME



# Copyright 2011 The Apache Software Foundation 
#  
# Licensed to the Apache Software Foundation (ASF) under one 
# or more contributor license agreements.  See the NOTICE file 
# distributed with this work for additional information 
# regarding copyright ownership.  The ASF licenses this file 
# to you under the Apache License, Version 2.0 (the 
# "License"); you may not use this file except in compliance 
# with the License.  You may obtain a copy of the License at 

#     http://www.apache.org/licenses/LICENSE-2.0 

# Unless required by applicable law or agreed to in writing, software 
# distributed under the License is distributed on an "AS IS" BASIS, 
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 
# See the License for the specific language governing permissions and 
# limitations under the License. 
 
# Set Hadoop-specific environment variables here. 
 
# The only required environment variable is JAVA_HOME.  All others are 
# optional.  When running a distributed configuration it is best to 
# set JAVA_HOME in this file, so that it is correctly defined on 
# remote nodes. 
 
# The java implementation to use. 
export JAVA_HOME=/home/it/jdk1.7.0_45 
 
# The jsvc implementation to use. Jsvc is required to run secure datanodes. 
#export JSVC_HOME=${JSVC_HOME} 
 
export HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-"/etc/hadoop"} 
 
# Extra Java CLASSPATH elements.  Automatically insert capacity-scheduler. 
for f in $HADOOP_HOME/contrib/capacity-scheduler/*.jar; do 
  if [ "$HADOOP_CLASSPATH" ]; then 
    export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:$f 
  else 
    export HADOOP_CLASSPATH=$f 
  fi 
done 
 
# The maximum amount of heap to use, in MB. Default is 1000. 
#export HADOOP_HEAPSIZE= 
#export HADOOP_NAMENODE_INIT_HEAPSIZE="" 
 
# Extra Java runtime options.  Empty by default. 
export HADOOP_OPTS="$HADOOP_OPTS -Djava.net.preferIPv4Stack=true" 
 
# Command specific options appended to HADOOP_OPTS when specified 
export HADOOP_NAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER:-INFO,NullAppender} $HADOOP_NAMENODE_OPTS" 
export HADOOP_DATANODE_OPTS="-Dhadoop.security.logger=ERROR,RFAS $HADOOP_DATANODE_OPTS" 
 
export HADOOP_SECONDARYNAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER:-INFO,NullAppender} $HADOOP_SECONDARYNAMENODE_OPTS" 
 
export HADOOP_NFS3_OPTS="$HADOOP_NFS3_OPTS" 
export HADOOP_PORTMAP_OPTS="-Xmx512m $HADOOP_PORTMAP_OPTS" 
 
# The following applies to multiple commands (fs, dfs, fsck, distcp etc) 
export HADOOP_CLIENT_OPTS="-Xmx512m $HADOOP_CLIENT_OPTS" 
#HADOOP_JAVA_PLATFORM_OPTS="-XX:-UsePerfData $HADOOP_JAVA_PLATFORM_OPTS" 
 
# On secure datanodes, user to run the datanode as after dropping privileges 
export HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER} 
 
# Where log files are stored.  $HADOOP_HOME/logs by default. 
#export HADOOP_LOG_DIR=${HADOOP_LOG_DIR}/$USER 
 
# Where log files are stored in the secure data environment. 
export HADOOP_SECURE_DN_LOG_DIR=${HADOOP_LOG_DIR}/${HADOOP_HDFS_USER} 
 
# The directory where pid files are stored. /tmp by default. 
# NOTE: this should be set to a directory that can only be written to by  
#       the user that will run the hadoop daemons.  Otherwise there is the 
#       potential for a symlink attack. 
export HADOOP_PID_DIR=${HADOOP_PID_DIR} 
export HADOOP_SECURE_DN_PID_DIR=${HADOOP_PID_DIR} 
 
# A string representing this instance of hadoop. $USER by default. 
export HADOOP_IDENT_STRING=$USER 

2:yarn-env.sh
配置JAVA_HOME



# Licensed to the Apache Software Foundation (ASF) under one or more 
# contributor license agreements.  See the NOTICE file distributed with 
# this work for additional information regarding copyright ownership. 
# The ASF licenses this file to You under the Apache License, Version 2.0 
# (the "License"); you may not use this file except in compliance with 
# the License.  You may obtain a copy of the License at 

#     http://www.apache.org/licenses/LICENSE-2.0 

# Unless required by applicable law or agreed to in writing, software 
# distributed under the License is distributed on an "AS IS" BASIS, 
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 
# See the License for the specific language governing permissions and 
# limitations under the License. 
 
# User for YARN daemons 
export HADOOP_YARN_USER=${HADOOP_YARN_USER:-yarn} 
 
# resolve links - $0 may be a softlink 
export YARN_CONF_DIR="${YARN_CONF_DIR:-$HADOOP_YARN_HOME/conf}" 
 
# some Java parameters 
export JAVA_HOME=/home/it/jdk1.7.0_45 
if [ "$JAVA_HOME" != "" ]; then 
  #echo "run java in $JAVA_HOME" 
  JAVA_HOME=$JAVA_HOME 
fi 
   
if [ "$JAVA_HOME" = "" ]; then 
  echo "Error: JAVA_HOME is not set." 
  exit 1 
fi 
 
JAVA=$JAVA_HOME/bin/java 
JAVA_HEAP_MAX=-Xmx1000m  
 
# For setting YARN specific HEAP sizes please use this 
# Parameter and set appropriately 
# YARN_HEAPSIZE=1000 
 
# check envvars which might override default args 
if [ "$YARN_HEAPSIZE" != "" ]; then 
  JAVA_HEAP_MAX="-Xmx""$YARN_HEAPSIZE""m" 
fi 
 
# Resource Manager specific parameters 
 
# Specify the max Heapsize for the ResourceManager using a numerical value 
# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set 
# the value to 1000. 
# This value will be overridden by an Xmx setting specified in either YARN_OPTS 
# and/or YARN_RESOURCEMANAGER_OPTS. 
# If not specified, the default value will be picked from either YARN_HEAPMAX 
# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two. 
#export YARN_RESOURCEMANAGER_HEAPSIZE=1000 
 
# Specify the max Heapsize for the HistoryManager using a numerical value 
# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set 
# the value to 1000. 
# This value will be overridden by an Xmx setting specified in either YARN_OPTS 
# and/or YARN_HISTORYSERVER_OPTS. 
# If not specified, the default value will be picked from either YARN_HEAPMAX 
# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two. 
#export YARN_HISTORYSERVER_HEAPSIZE=1000 
 
# Specify the JVM options to be used when starting the ResourceManager. 
# These options will be appended to the options specified as YARN_OPTS 
# and therefore may override any similar flags set in YARN_OPTS 
#export YARN_RESOURCEMANAGER_OPTS= 
 
# Node Manager specific parameters 
 
# Specify the max Heapsize for the NodeManager using a numerical value 
# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set 
# the value to 1000. 
# This value will be overridden by an Xmx setting specified in either YARN_OPTS 
# and/or YARN_NODEMANAGER_OPTS. 
# If not specified, the default value will be picked from either YARN_HEAPMAX 
# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two. 
#export YARN_NODEMANAGER_HEAPSIZE=1000 
 
# Specify the JVM options to be used when starting the NodeManager. 
# These options will be appended to the options specified as YARN_OPTS 
# and therefore may override any similar flags set in YARN_OPTS 
#export YARN_NODEMANAGER_OPTS= 
 
# so that filenames w/ spaces are handled correctly in loops below 
IFS= 
 
 
# default log directory & file 
if [ "$YARN_LOG_DIR" = "" ]; then 
  YARN_LOG_DIR="$HADOOP_YARN_HOME/logs" 
fi 
if [ "$YARN_LOGFILE" = "" ]; then 
  YARN_LOGFILE='yarn.log' 
fi 
 
# default policy file for service-level authorization 
if [ "$YARN_POLICYFILE" = "" ]; then 
  YARN_POLICYFILE="hadoop-policy.xml" 
fi 
 
# restore ordinary behaviour 
unset IFS 
 
 
YARN_OPTS="$YARN_OPTS -Dhadoop.log.dir=$YARN_LOG_DIR" 
YARN_OPTS="$YARN_OPTS -Dyarn.log.dir=$YARN_LOG_DIR" 
YARN_OPTS="$YARN_OPTS -Dhadoop.log.file=$YARN_LOGFILE" 
YARN_OPTS="$YARN_OPTS -Dyarn.log.file=$YARN_LOGFILE" 
YARN_OPTS="$YARN_OPTS -Dyarn.home.dir=$YARN_COMMON_HOME" 
YARN_OPTS="$YARN_OPTS -Dyarn.id.str=$YARN_IDENT_STRING" 
YARN_OPTS="$YARN_OPTS -Dhadoop.root.logger=${YARN_ROOT_LOGGER:-INFO,console}" 
YARN_OPTS="$YARN_OPTS -Dyarn.root.logger=${YARN_ROOT_LOGGER:-INFO,console}" 
if [ "x$JAVA_LIBRARY_PATH" != "x" ]; then 
  YARN_OPTS="$YARN_OPTS -Djava.library.path=$JAVA_LIBRARY_PATH" 
fi   
YARN_OPTS="$YARN_OPTS -Dyarn.policy.file=$YARN_POLICYFILE" 

3:core-site.xml

[html] view plain copy 在CODE上查看代码片派生到我的代码片
<?xml version="1.0" encoding="UTF-8"?> 
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?> 
<!-- 
  Licensed under the Apache License, Version 2.0 (the "License"); 
  you may not use this file except in compliance with the License. 
  You may obtain a copy of the License at 
 
    http://www.apache.org/licenses/LICENSE-2.0 
 
  Unless required by applicable law or agreed to in writing, software 
  distributed under the License is distributed on an "AS IS" BASIS, 
  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 
  See the License for the specific language governing permissions and 
  limitations under the License. See accompanying LICENSE file. 
--> 
 
<!-- Put site-specific property overrides in this file. --> 
 
<configuration> 
<property> 
<name>fs.defaultFS</name> 
<value>hdfs://feng01:9000</value> 
</property> 
<property> 
<name>io.file.buffer.size</name> 
<value>131072</value> 
</property> 
<property> 
<name>hadoop.tmp.dir</name> 
<value>file:/home/it/hadoop/tmp</value> 
<description>Abase for other temporary directories.</description> 
</property> 
<property> 
<name>hadoop.proxyuser.hduser.hosts</name> 
<value>*</value> 
</property> 
<property> 
<name>hadoop.proxyuser.hduser.groups</name> 
<value>*</value> 
</property> 
</configuration> 
4:hdfs-site.xml

[html] view plain copy 在CODE上查看代码片派生到我的代码片
<?xml version="1.0" encoding="UTF-8"?> 
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?> 
<!-- 
  Licensed under the Apache License, Version 2.0 (the "License"); 
  you may not use this file except in compliance with the License. 
  You may obtain a copy of the License at 
 
    http://www.apache.org/licenses/LICENSE-2.0 
 
  Unless required by applicable law or agreed to in writing, software 
  distributed under the License is distributed on an "AS IS" BASIS, 
  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 
  See the License for the specific language governing permissions and 
  limitations under the License. See accompanying LICENSE file. 
--> 
 
<!-- Put site-specific property overrides in this file. --> 
 
<configuration> 
<property> 
<name>dfs.namenode.secondary.http-address</name> 
<value>feng01:9001</value> 
</property> 
<property> 
<name>dfs.namenode.name.dir</name> 
<value>file:/home/it/hadoop/name</value> 
</property> 
<property> 
<name>dfs.datanode.data.dir</name> 
<value>file:/home/it/hadoop/data</value> 
</property> 
<property> 
<name>dfs.replication</name> 
<value>1</value> 
</property> 
<property> 
<name>dfs.webhdfs.enabled</name> 
<value>true</value> 
</property> 
</configuration> 

5:mapred-site.xml


<?xml version="1.0"?> 
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?> 
<!-- 
  Licensed under the Apache License, Version 2.0 (the "License"); 
  you may not use this file except in compliance with the License. 
  You may obtain a copy of the License at 
 
    http://www.apache.org/licenses/LICENSE-2.0 
 
  Unless required by applicable law or agreed to in writing, software 
  distributed under the License is distributed on an "AS IS" BASIS, 
  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 
  See the License for the specific language governing permissions and 
  limitations under the License. See accompanying LICENSE file. 
--> 
 
<!-- Put site-specific property overrides in this file. --> 
 
 
<configuration> 
<property> 
<name>mapreduce.framework.name</name> 
<value>yarn</value> 
</property> 
<property> 
<name>mapreduce.jobhistory.address</name> 
<value>feng01:10020</value> 
</property> 
<property> 
<name>mapreduce.jobhistory.webapp.address</name> 
<value>feng01:19888</value> 
</property> 
</configuration> 

6:yarn-site.xml

[html] view plain copy 在CODE上查看代码片派生到我的代码片
<?xml version="1.0"?> 
<!-- 
  Licensed under the Apache License, Version 2.0 (the "License"); 
  you may not use this file except in compliance with the License. 
  You may obtain a copy of the License at 
 
    http://www.apache.org/licenses/LICENSE-2.0 
 
  Unless required by applicable law or agreed to in writing, software 
  distributed under the License is distributed on an "AS IS" BASIS, 
  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 
  See the License for the specific language governing permissions and 
  limitations under the License. See accompanying LICENSE file. 
--> 
<configuration> 
 
<!-- Site specific YARN configuration properties --> 
<property> 
<name>yarn.nodemanager.aux-services</name> 
<value>mapreduce_shuffle</value> 
</property> 
<property> 
<name>yarn.nodemanager.aux-services.mapreduce.shuffle.class</name> 
<value>org.apache.hadoop.mapred.ShuffleHandler</value> 
</property> 
<property> 
<name>yarn.resourcemanager.address</name> 
<value>feng01:8032</value> 
</property> 
<property> 
<name>yarn.resourcemanager.scheduler.address</name> 
<value>feng01:8030</value> 
</property> 
<property> 
<name>yarn.resourcemanager.resource-tracker.address</name> 
<value>feng01:8031</value> 
</property> 
<property> 
<name>yarn.resourcemanager.admin.address</name> 
<value>feng01:8033</value> 
</property> 
<property> 
<name>yarn.resourcemanager.webapp.address</name> 
<value>feng01:8088</value> 
</property>  
</configuration> 

7:slaves


[it@it.net.cnhadoop]$ cat slaves 
feng02 
feng03 


配置完成



(责任编辑:IT)
------分隔线----------------------------