hadoop集群搭建(一)HDFS的namenode的HA搭建

HDFS的namenode的HA搭建,准备好机器

站在用户的角度思考问题,与客户深入沟通,找到云龙网站设计与云龙网站推广的解决方案,凭借多年的经验,让设计与互联网技术结合,创造个性化、用户体验好的作品,建站类型包括:成都做网站、网站建设、企业官网、英文网站、手机端网站、网站推广、域名注册、网络空间、企业邮箱。业务覆盖云龙地区。

hadoop01    IP:192.168.216.203   GATEWAY:192.168.216.2        

hadoop02    IP:192.168.216.204   GATEWAY:192.168.216.2

hadoop03    IP:192.168.216.205   GATEWAY:192.168.216.2

配置网卡

[root@hadoop01 ~]# vim /etc/sysconfig/network-scripts/ifcfg-eth0

DEVICE=eth0

TYPE=Ethernet

HWADDR=00:0C:29:6B:CD:B3                             网卡MAC地址

ONBOOT=yes                                                        yes表示开机启动

NM_CONTROLLED=yes

BOOTPROTO=none

IPADDR=192.168.216.203                                   IP地址

PREFIX=24                                                          

GATEWAY=192.168.216.2                                    网关

DNS1=8.8.8.8                                                        域名解析服务器地址一

DNS2=192.168.10.254 域名解析服务器地址           域名解析服务器地址二

DEFROUTE=yes

IPV4_FAILURE_FATAL=yes

IPV6INIT=no

NAME="System eth0"


安装java JDK 并配置环境变量

[root@hadoop01 jdk1.8.0_152]# vim /etc/profile

#my setting

export JAVA_HOME=/usr/local/jdk1.8.0_152/

export PATH=$PATH:$JAVA_HOME/bin:



配置hadoop01/hadoop02/hadoop03之间互相ssh免密登陆



[root@hadoop01 hadoop-2.7.1]# vim ./etc/hadoop/hadoop-env.sh

# The java implementation to use.

export JAVA_HOME=/usr/local/jdk1.8.0_152/


[root@hadoop01 ~]# vim /usr/local/hadoop-2.7.1/etc/hadoop/core-site.xml

       

               fs.defaultFS

               hdfs://qian

       

       

       

               ha.zookeeper.quorum

               hadoop01:2181,hadoop02:2181,hadoop03:2181

       


[root@hadoop01 ~]# vim /usr/local/hadoop-2.7.1/etc/hadoop/hdfs-site.xml

               dfs.nameservices

               qian

       

       

               dfs.ha.namenodes.qian

               nn1,nn2

       

       

               dfs.namenode.rpc-address.qian.nn1

               hadoop01:9000

       

       

               dfs.namenode.rpc-address.qian.nn2

               hadoop02:9000

       

               dfs.namenode.http-address.qian.nn1

               hadoop01:50070

       

       

               dfs.namenode.http-address.qian.nn2

               hadoop02:50070

       

       

               dfs.namenode.shared.edits.dir

               qjournal://hadoop01:8485;hadoop02:8485;hadoop03:8485/qian

       

       

               dfs.journalnode.edits.dir

               /home/hadata/journalnode/data

       

               dfs.ha.automatic-failover.enabled

               true

       

       

               dfs.client.failover.proxy.provider.qian

               org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider

       

       

               dfs.ha.fencing.methods

               sshfence

       

       

               dfs.ha.fencing.ssh.private-key-files

               /root/.ssh/id_rsa

       

       

               dfs.ha.fencing.ssh.connect-timeout

               30000

       

                dfs.namenode.name.dir

                /home/hadata/dfs/name

       

       

                dfs.datanode.data.dir

                /home/hadata/dfs/data

       

       

                dfs.blocksize

                134217728

       

       

                dfs.permissions.enabled

                false

       

                dfs.replication

                3

       


[root@hadoop01 ~]# vim /usr/local/hadoop-2.7.1/etc/hadoop/slaves

hadoop01

hadoop02

hadoop03


安装并配置zookeeper

[root@hadoop01 zookeeper-3.4.10]# tar -zxvf /home/zookeeper-3.4.10.tar.gz -C /usr/local/

[root@hadoop01 zookeeper-3.4.10]# cp ./conf/zoo_sample.cfg ./conf/zoo.cfg

# The number of milliseconds of each tick

tickTime=2000

# The number of ticks that the initial

# synchronization phase can take

initLimit=5

# The number of ticks that can pass between

# sending a request and getting an acknowledgement

syncLimit=2

# the directory where the snapshot is stored.

# do not use /tmp for storage, /tmp here is just

# example sakes.

dataDir=/home/zookeeperdata

# the port at which the clients will connect

clientPort=2181

server.1=hadoop01:2888:3888

server.2=hadoop02:2888:3888

server.3=hadoop03:2888:3888


[root@hadoop01 zookeeper-3.4.10]# scp -r /usr/local/zookeeper-3.4.10 hadoop02:/usr/local/

[root@hadoop01 zookeeper-3.4.10]# scp -r /usr/local/zookeeper-3.4.10 hadoop03:/usr/local/


配置三台机器的环境变量

[root@hadoop01 zookeeper-3.4.10]# vim /etc/profile

#my setting

export JAVA_HOME=/usr/local/jdk1.8.0_152/

export HADOOP_HOME=/usr/local/hadoop-2.7.1/

export ZK_HOME=/usr/local/zookeeper-3.4.10/

export PATH=$PATH:$JAVA_HOME/bin:$HADOOP_HOME/bin:$HADOOP_HOME/sbin:$ZK_HOME/bin:


[root@hadoop01 zookeeper-3.4.10]# scp -r /etc/profile hadoop02:/etc

profile

[root@hadoop01 zookeeper-3.4.10]# scp -r /etc/profile hadoop03:/etc

profile


[root@hadoop01 ~]# source /etc/profile

[root@hadoop02 ~]# source /etc/profile

[root@hadoop03 ~]# source /etc/profile



[root@hadoop01 zookeeper-3.4.10]# mkdir /home/zookeeperdata

[root@hadoop01 zookeeper-3.4.10]# vim /home/zookeeperdata/myid                         myid文件里输入          1

1

[root@hadoop02 ~]# mkdir /home/zookeeperdata

[root@hadoop02 ~]# vim /home/zookeeperdata/myid                                                   myid文件里输入          2

2

[root@hadoop03 ~]# mkdir /home/zookeeperdata

[root@hadoop03 ~]# vim /home/zookeeperdata/myid                                                    myid文件里输入          3

3


[root@hadoop01 zookeeper-3.4.10]# zkServer.sh status

ZooKeeper JMX enabled by default

Using config: /usr/local/zookeeper-3.4.10/bin/../conf/zoo.cfg

Mode: follower

[root@hadoop02 ~]# zkServer.sh status

ZooKeeper JMX enabled by default

Using config: /usr/local/zookeeper-3.4.10/bin/../conf/zoo.cfg

Mode: follower

[root@hadoop03 ~]# zkServer.sh status

ZooKeeper JMX enabled by default

Using config: /usr/local/zookeeper-3.4.10/bin/../conf/zoo.cfg

Mode: leader

[root@hadoop01 zookeeper-3.4.10]# scp -r /usr/local/hadoop-2.7.1/ hadoop02:/usr/local/

[root@hadoop01 zookeeper-3.4.10]# scp -r /usr/local/hadoop-2.7.1/ hadoop03:/usr/local/


[root@hadoop01 zookeeper-3.4.10]# hadoop-daemon.sh start journalnode

[root@hadoop02 zookeeper-3.4.10]# hadoop-daemon.sh start journalnode

[root@hadoop03 zookeeper-3.4.10]# hadoop-daemon.sh start journalnode


[root@hadoop01 zookeeper-3.4.10]# hadoop namenode -format

[root@hadoop01 zookeeper-3.4.10]# hadoop-daemon.sh start namenode

starting namenode, logging to /usr/local/hadoop-2.7.1/logs/hadoop-root-namenode-hadoop01.out


同步已启动的namenode的元数据到为启动的nomenode

[root@hadoop02 ~]# hdfs namenode -bootstrapStandby


确认zookeeper集群是否启动

[root@hadoop01 zookeeper-3.4.10]# zkServer.sh status

ZooKeeper JMX enabled by default

Using config: /usr/local/zookeeper-3.4.10/bin/../conf/zoo.cfg

Mode: follower

[root@hadoop02 ~]# zkServer.sh status

ZooKeeper JMX enabled by default

Using config: /usr/local/zookeeper-3.4.10/bin/../conf/zoo.cfg

Mode: follower

[root@hadoop03 ~]# zkServer.sh status

ZooKeeper JMX enabled by default

Using config: /usr/local/zookeeper-3.4.10/bin/../conf/zoo.cfg

Mode: leader

[root@hadoop01 zookeeper-3.4.10]# hdfs zkfc -formatZK

.

.

.

.

....INFO ha.ActiveStandbyElector: Successfully created /hadoop-ha/qian in ZK.

.

.

.

[root@hadoop03 ~]# zkCli.sh

WatchedEvent state:SyncConnected type:None path:null

[zk: localhost:2181(CONNECTED) 0] ls /

[zookeeper, hadoop-ha]

[zk: localhost:2181(CONNECTED) 1] ls /hadoop-ha

[qian]

[zk: localhost:2181(CONNECTED) 2] ls /hadoop-ha/qian

[]

注意:退出zkCli,输入quit


[root@hadoop01 zookeeper-3.4.10]# start-dfs.sh

[root@hadoop01 zookeeper-3.4.10]# jps

3281 JournalNode

4433 Jps

3475 NameNode

4068 DataNode

3110 QuorumPeerMain

4367 DFSZKFailoverController

[root@hadoop02 ~]# jps

3489 DataNode

3715 Jps

2970 QuorumPeerMain

3162 JournalNode

3646 DFSZKFailoverController

3423 NameNode

[root@hadoop03 ~]# zkCli.sh

zkCli.sh

WATCHER::

WatchedEvent state:SyncConnected type:None path:null

[zk: localhost:2181(CONNECTED) 4] ls /hadoop-ha/qian

[ActiveBreadCrumb, ActiveStandbyElectorLock]

[zk: localhost:2181(CONNECTED) 2] get /hadoop-ha/qian/ActiveBreadCrumb

                                                                                                                       qiannn1hadoop01 �F(�>

cZxid = 0x10000000a

ctime = Sat Jan 13 01:40:21 CST 2018

mZxid = 0x10000000a

mtime = Sat Jan 13 01:40:21 CST 2018

pZxid = 0x10000000a

cversion = 0

dataVersion = 0

aclVersion = 0

ephemeralOwner = 0x0

dataLength = 31

numChildren = 0


[root@hadoop01 hadoop-2.7.1]# hdfs dfs -put ./README.txt hdfs:/

[root@hadoop01 hadoop-2.7.1]# hdfs dfs -ls /

18/01/13 01:58:24 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable

Found 1 items

-rw-r--r--   3 root supergroup       1366 2018-01-13 01:57 /README.txt


测试是否失败转移

[root@hadoop01 hadoop-2.7.1]# jps

3281 JournalNode

3475 NameNode

4644 Jps

4068 DataNode

3110 QuorumPeerMain

4367 DFSZKFailoverController


[root@hadoop01 hadoop-2.7.1]# kill -9 3475

[root@hadoop03 ~]# zkCli.sh

ActiveBreadCrumb           ActiveStandbyElectorLock

[zk: localhost:2181(CONNECTED) 6] get /hadoop-ha/qian/ActiveBreadCrumb

                                                                                                                       qiannn2hadoop02 �F(�>

cZxid = 0x10000000a

ctime = Sat Jan 13 01:40:21 CST 2018

mZxid = 0x100000011

mtime = Sat Jan 13 02:01:57 CST 2018

pZxid = 0x10000000a

cversion = 0

dataVersion = 1

aclVersion = 0

ephemeralOwner = 0x0

dataLength = 31

numChildren = 0


[root@hadoop02 ~]# jps

3489 DataNode

3989 Jps

2970 QuorumPeerMain

3162 JournalNode

3646 DFSZKFailoverController

3423 NameNode


注意:一个namenode1死了会自动切换到另一个namenode2上,namenode2死后,就都死了,不会自动启动namenode1



配置集群时间同步



HA搭建完毕


本文标题:hadoop集群搭建(一)HDFS的namenode的HA搭建
文章链接:http://pwwzsj.com/article/ppcopg.html