博客
关于我
强烈建议你试试无所不能的chatGPT,快点击我
5 centos 6.10 三节点安装apache hadoop 2.9.1
阅读量:4344 次
发布时间:2019-06-07

本文共 14359 字,大约阅读时间需要 47 分钟。

Hadoop 版本: apache hadoop 2.9.1

JDK 版本: Oracle JDK1.8
集群规划
master(1): NN, RM, DN, NM, JHS
slave1(2): DN, NM
slave2(3): DN, NM
jdk-8u172-linux-x64.tar.gz
hadoop-2.9.1.tar.gz

一 环境初始化

[root@hadoop1 opt]# cat /etc/redhat-release CentOS release 6.10 (Final)# service iptables stop# chkconfig iptables off# sed -i 's/=enforcing/=disabled/' /etc/selinux/config# cat >> /etc/profile << EOFexport HISTTIMEFORMAT='%F %T 'EOF# sed -i '$a vm.swappiness = 0' /etc/sysctl.confecho never > /sys/kernel/mm/redhat_transparent_hugepage/defragecho never > /sys/kernel/mm/redhat_transparent_hugepage/enabled# sed -i '$a echo never > /sys/kernel/mm/redhat_transparent_hugepage/defrag' /etc/rc.local# sed -i '$a echo never > /sys/kernel/mm/redhat_transparent_hugepage/enabled' /etc/rc.local# vim /etc/ntp.conf ##注释掉所有的 server 开头内容# Use public servers from the pool.ntp.org project.# Please consider joining the pool (http://www.pool.ntp.org/join.html).#server 0.centos.pool.ntp.org iburst#server 1.centos.pool.ntp.org iburst#server 2.centos.pool.ntp.org iburst#server 3.centos.pool.ntp.org iburstserver cn.pool.ntp.org# service ntpd start# chkconfig ntpd on# rpm -qa | grep java-1java-1.5.0-gcj-1.5.0.0-29.1.el6.x86_64java-1.6.0-openjdk-1.6.0.41-1.13.13.1.el6_8.x86_64java-1.7.0-openjdk-1.7.0.181-2.6.14.10.el6.x86_64# rpm -e --nodeps ` rpm -qa | grep java-1`# tar -zxvf jdk-8u191-linux-x64.tar.gz# ln -s jdk1.8.0_191 jdk# alternatives --install /usr/bin/java java /opt/jdk/bin/java 100# alternatives --install /usr/bin/javac javac /opt/jdk/bin/javac 100# cat >> /etc/profile << EOFexport JAVA_HOME=/opt/jdkexport PATH=$PATH:$JAVA_HOME/binEOF[root@hadoop1 opt]# source /etc/profile[root@hadoop1 opt]# java -versionjava version "1.8.0_191"Java(TM) SE Runtime Environment (build 1.8.0_191-b12)Java HotSpot(TM) 64-Bit Server VM (build 25.191-b12, mixed mode)[root@hadoop1 opt]# javac -versionjavac 1.8.0_191# cat >> /etc/hosts << EOF1 hadoop12 hadoop23 hadoop3EOF[root@hadoop1 opt]# id hadoopid: hadoop: No such user[root@hadoop1 opt]# useradd hadoop[root@hadoop1 opt]# vim /etc/sudoershadoop ALL=NOPASSWD:ALL

三个节点都要执行

[hadoop@hadoop1 opt]$ ./test_hadoop_env.sh ######### 1. iptables: ##################### current iptables status: iptables: Only usable by root.   [WARNING] chkconfig status: iptables           0:off    1:off    2:off    3:off    4:off    5:off    6:off######### 2. Selinux: ###################### current selinux status: Disabled config of selinux: SELINUX=disabled######### 3. THP: ########################## defrag status:always madvise [never] enabled status:always madvise [never]######### 4. Swappiness => 0 : ################### current swapness setting: 0######### 5.ntp: ###################     remote           refid      st t when poll reach   delay   offset  jitter==============================================================================**   *    3 u  178  256  377  253.202    8.052   2.663######### 6. JDK: ################### current java version:javac 1.8.0_191######### 7. hosts: ###################127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4::1         localhost localhost.localdomain localhost6 localhost6.localdomain61 hadoop12 hadoop23 hadoop3

安装

[root@hadoop1 opt]# tar -zxvf hadoop-2.9.1.tar.gz[root@hadoop1 opt]# ln -s hadoop-2.9.1 hadoop[root@hadoop1 opt]# vim ~/.bash_profileexport HADOOP_HOME=/opt/hadoopexport PATH=$PATH:$HADOOP_HOME/bin:$HADOOP_HOME/sbin[root@hadoop1 opt]# vim /opt/hadoop/etc/hadoop/hadoop-env.shexport JAVA_HOME=/opt/jdk[root@hadoop1 opt]# source ~/.bash_profile [root@hadoop1 opt]# hadoop versionHadoop 2.9.1Subversion https://github.com/apache/hadoop.git -r e30710aea4e6e55e69372929106cf119af06fd0eCompiled by root on 2018-04-16T09:33ZCompiled with protoc 2.5.0From source with checksum 7d6d2b655115c6cc336d662cc2b919bdThis command was run using /opt/hadoop-2.9.1/share/hadoop/common/hadoop-common-2.9.1.jar[root@hadoop1 opt]# vim /opt/hadoop/etc/hadoop/yarn-env.sh# export JAVA_HOME=/home/y/libexec/jdk1.6.0/export JAVA_HOME=/opt/jdk[root@hadoop1 opt]# vim /opt/hadoop/etc/hadoop/core-site.xml
fs.defaultFS
hdfs://hadoop1:9000
hadoop.tmp.dir
/opt/hadoopdata/tmp
[root@hadoop1 opt]# mkdir -p /opt/hadoopdata/tmp[root@hadoop1 opt]# vim /opt/hadoop/etc/hadoop/hdfs-site.xml
dfs.namenode.secondary.http-address
hadoop2:9001
dfs.namenode.name.dir
/opt/hadoopdata/hdfs/name
dfs.datanode.data.dir
/opt/hadoopdata/hdfs/data
dfs.namenode.checkpoint.dir
/opt/hadoopdata/hdfs/snn
dfs.namenode.checkpoint.period
3600
dfs.replication
2
dfs.webhdfs.enabled
true
mkdir -p /opt/hadoopdata/hdfs/namemkdir -p /opt/hadoopdata/hdfs/datamkdir -p /opt/hadoopdata/hdfs/snn[root@hadoop1 opt]# vim /opt/hadoop/etc/hadoop/yarn-site.xml
yarn.nodemanager.aux-services
mapreduce_shuffle
yarn.nodemanager.aux-services.mapreduce.shuffle.class
org.apache.hadoop.mapred.ShuffleHandler
yarn.resourcemanager.address
hadoop1:8032
yarn.resourcemanager.scheduler.address
hadoop1:8030
yarn.resourcemanager.resource-tracker.address
hadoop1:8031
yarn.resourcemanager.admin.address
hadoop1:8033
yarn.resourcemanager.webapp.address
hadoop1:8088
[root@hadoop1 opt]# cd /opt/hadoop/etc/hadoop/[root@hadoop1 hadoop]# cp mapred-site.xml.template mapred-site.xml[root@hadoop1 hadoop]# vim mapred-site.xml
mapreduce.framework.name
yarn
mapreduce.jobhistory.address
hadoop1:10020
mapreduce.jobhistory.webapp.address
hadoop1:19888
[root@hadoop1 hadoop]# vim slaveshadoop1hadoop2hadoop3[root@hadoop2 ~]# useradd -g hadoop hadoop[root@hadoop3 ~]# useradd -g hadoop hadoop[root@hadoop1 ~]# passwd hadoop ##hadoop[root@hadoop1 hadoop]# chown -R hadoop:hadoop /opt/hadoop[hadoop@hadoop1 ~]# ssh-keygen[hadoop@hadoop2 ~]# ssh-keygen[hadoop@hadoop3 ~]# ssh-keygen[hadoop@hadoop1 ~]# ssh-copy-id hadoop1[hadoop@hadoop2 ~]# ssh-copy-id hadoop1[hadoop@hadoop3 ~]# ssh-copy-id hadoop1[hadoop@hadoop1 ~]$ scp ~/.ssh/authorized_keys hadoop2:~/.ssh/[hadoop@hadoop1 ~]$ scp ~/.ssh/authorized_keys hadoop3:~/.ssh/[hadoop@hadoop1 ~]$ ssh hadoop2[hadoop@hadoop2 ~]$ ssh hadoop3[hadoop@hadoop3 ~]$ ssh hadoop1[hadoop@hadoop1 ~]$ ssh hadoop3Last login: Fri May 31 15:58:20 2019 from hadoop2[hadoop@hadoop3 ~]$ ssh hadoop2Last login: Fri May 31 15:58:12 2019 from hadoop1[hadoop@hadoop2 ~]$ ssh hadoop1Last login: Fri May 31 15:58:29 2019 from hadoop3

--启动并测试 Hadoop

sudo chown -R hadoop:hadoop /opt/hadoop/[hadoop@hadoop1 bin]$ vim ~/.bash_profile[hadoop@hadoop1 bin]$ source ~/.bash_profile[hadoop@hadoop1 bin]$ hdfs namenode -formathare/hadoop/mapreduce/hadoop-mapreduce-client-jobclient-2.9.1.jar:/contrib/capacity-scheduler/*.jarSTARTUP_MSG:   build = https://github.com/apache/hadoop.git -r e30710aea4e6e55e69372929106cf119af06fd0e; compiled by 'root' on 2018-04-16T09:33ZSTARTUP_MSG:   java = 1.8.0_1919/05/31 16:04:24 WARN namenode.NameNode: Encountered exception during format: java.io.IOException: Cannot create directory /opt/hadoopdata/hdfs/name/current    at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.clearDirectory(Storage.java:361)    at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:571)    at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:592)    at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:172)    at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1172)    at org.apache.hadoop.hdfs.server.namenode.NameNode.createNameNode(NameNode.java:1614)    at org.apache.hadoop.hdfs.server.namenode.NameNode.main(NameNode.java:1741)19/05/31 16:04:24 ERROR namenode.NameNode: Failed to start namenode.java.io.IOException: Cannot create directory /opt/hadoopdata/hdfs/name/current    at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.clearDirectory(Storage.java:361)    at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:571)    at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:592)    at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:172)    at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1172)    at org.apache.hadoop.hdfs.server.namenode.NameNode.createNameNode(NameNode.java:1614)    at org.apache.hadoop.hdfs.server.namenode.NameNode.main(NameNode.java:1741)19/05/31 16:04:24 INFO util.ExitUtil: Exiting with status 1: java.io.IOException: Cannot create directory /opt/hadoopdata/hdfs/name/current19/05/31 16:04:24 INFO namenode.NameNode: SHUTDOWN_MSG: /************************************************************SHUTDOWN_MSG: Shutting down NameNode at hadoop1/1[hadoop@hadoop1 opt]$ sudo chown -R hadoop:hadoop /opt/hadoopdata/[hadoop@hadoop1 opt]$ hdfs namenode -format19/05/31 16:10:20 INFO namenode.FSImage: Allocated new BlockPoolId: BP-339605524-192.168.19.69-155929022093719/05/31 16:10:20 INFO common.Storage: Storage directory /opt/hadoopdata/hdfs/name has been successfully formatted.--启动并测试 HDFS[hadoop@hadoop1 bin]$ start-dfs.sh19/05/31 16:12:08 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicableStarting namenodes on [hadoop1]hadoop1: starting namenode, logging to /opt/hadoop-2.9.1/logs/hadoop-hadoop-namenode-hadoop1.outhadoop2: starting datanode, logging to /opt/hadoop-2.9.1/logs/hadoop-hadoop-datanode-hadoop2.outhadoop3: starting datanode, logging to /opt/hadoop-2.9.1/logs/hadoop-hadoop-datanode-hadoop3.outhadoop1: starting datanode, logging to /opt/hadoop-2.9.1/logs/hadoop-hadoop-datanode-hadoop1.outStarting secondary namenodes [hadoop2]hadoop2: starting secondarynamenode, logging to /opt/hadoop-2.9.1/logs/hadoop-hadoop-secondarynamenode-hadoop2.out19/05/31 16:12:26 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable--http://1:50070/ 是否正常

 检查进程是否启动

[hadoop@hadoop1 opt]$ jps3347 DataNode3242 NameNode3647 Jps[hadoop@hadoop2 hadoop]$ jps2880 DataNode3057 Jps2990 SecondaryNameNode[hadoop@hadoop3 ~]$ jps2929 DataNode3022 Jps上传文件并查看 hdfs[hadoop@hadoop1 opt]$ hdfs dfs -mkdir -p /user/hadoop19/05/31 16:20:01 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable[hadoop@hadoop1 opt]$ hdfs dfs -put /etc/hosts .19/05/31 16:20:26 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable[hadoop@hadoop1 opt]$ hdfs dfs -ls .19/05/31 16:20:35 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicableFound 1 items-rw-r--r--   2 hadoop supergroup        224 2019-05-31 16:20 hosts

 

 启动 YARN 并查看 Web UI

[hadoop@hadoop1 opt]$ start-yarn.shstarting yarn daemonsstarting resourcemanager, logging to /opt/hadoop-2.9.1/logs/yarn-hadoop-resourcemanager-hadoop1.outhadoop3: starting nodemanager, logging to /opt/hadoop-2.9.1/logs/yarn-hadoop-nodemanager-hadoop3.outhadoop2: starting nodemanager, logging to /opt/hadoop-2.9.1/logs/yarn-hadoop-nodemanager-hadoop2.outhadoop1: starting nodemanager, logging to /opt/hadoop-2.9.1/logs/yarn-hadoop-nodemanager-hadoop1.out

启动 history-server 并查看 Web UI

[hadoop@hadoop1 opt]$ mr-jobhistory-daemon.sh start historyserverstarting historyserver, logging to /opt/hadoop-2.9.1/logs/mapred-hadoop-historyserver-hadoop1.out

运行 MR 作业

[hadoop@hadoop1 opt]$ hadoop jar /opt/hadoop/share/hadoop/mapreduce/hadoop-mapreduce-examples-2.9.1.jar wordcount hosts result
[hadoop@hadoop1 opt]$ hdfs dfs -ls result19/05/31 16:29:07 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicableFound 2 items-rw-r--r--   2 hadoop supergroup          0 2019-05-31 16:28 result/_SUCCESS-rw-r--r--   2 hadoop supergroup        210 2019-05-31 16:28 result/part-r-00000[hadoop@hadoop1 opt]$ hdfs dfs -cat result/part-r-0000019/05/31 16:29:35 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable127.0.0.1    11    12    13    1::1    1hadoop1    1hadoop2    1hadoop3    1localhost    2localhost.localdomain    2localhost4    1localhost4.localdomain4    1localhost6    1localhost6.localdomain6    1

通过 hdfs 的 Web UI 查看:

 通过 ResourceManager 的 Web UI 查看

通过 JobHistoryServer Web UI 查看

 

简单测试完成。

转载于:https://www.cnblogs.com/yhq1314/p/10956008.html

你可能感兴趣的文章
python的字符串内建函数
查看>>
Spring - DI
查看>>
微软自己的官网介绍 SSL 参数相关
查看>>
Composite UI Application Block (CAB) 概念和术语
查看>>
ajax跨域,携带cookie
查看>>
阶段3 2.Spring_01.Spring框架简介_03.spring概述
查看>>
阶段3 2.Spring_01.Spring框架简介_05.spring的优势
查看>>
阶段3 2.Spring_02.程序间耦合_7 分析工厂模式中的问题并改造
查看>>
阶段3 2.Spring_03.Spring的 IOC 和 DI_2 spring中的Ioc前期准备
查看>>
阶段3 2.Spring_03.Spring的 IOC 和 DI_4 ApplicationContext的三个实现类
查看>>
阶段3 2.Spring_02.程序间耦合_8 工厂模式解耦的升级版
查看>>
阶段3 2.Spring_03.Spring的 IOC 和 DI_6 spring中bean的细节之三种创建Bean对象的方式
查看>>
阶段3 2.Spring_04.Spring的常用注解_2 常用IOC注解按照作用分类
查看>>
阶段3 2.Spring_09.JdbcTemplate的基本使用_5 JdbcTemplate在spring的ioc中使用
查看>>
阶段3 3.SpringMVC·_07.SSM整合案例_02.ssm整合之搭建环境
查看>>
小D课堂 - 零基础入门SpringBoot2.X到实战_第1节零基础快速入门SpringBoot2.0_3、快速创建SpringBoot应用之手工创建web应用...
查看>>
小D课堂 - 零基础入门SpringBoot2.X到实战_第1节零基础快速入门SpringBoot2.0_5、SpringBoot2.x的依赖默认Maven版本...
查看>>
阶段3 3.SpringMVC·_07.SSM整合案例_08.ssm整合之Spring整合MyBatis框架
查看>>
小D课堂 - 零基础入门SpringBoot2.X到实战_第2节 SpringBoot接口Http协议开发实战_9、SpringBoot基础HTTP其他提交方法请求实战...
查看>>
小D课堂 - 零基础入门SpringBoot2.X到实战_第2节 SpringBoot接口Http协议开发实战_12、SpringBoot2.x文件上传实战...
查看>>