这里写自定义目录标题
1、前置要求与规划
环境要求:
- 虚拟机配置:
- 3台 CentOS 7.6 虚拟机,最小化安装
- 内存 ≥ 2GB/节点,CPU核数 >=2,硬盘 ≥ 20GB/节点
- 网络配置为 NAT 或桥接模式,保证互通
- 软件版本:
- CentOS Linux release 7.6.1810 (Core)
- Apache Hadoop 3.2.4
- Apache Hive 4.0.1
- openjdk 11
- MariaDB 10.6.x
集群节点规划:
节点 | 主机名 | 进程 | 安装的软件 |
---|---|---|---|
192.168.37.101 | master | NameNode、SecondaryNameNode、ResourceManager、Hive | JDK、Hadoop、Hive、Sqoop、MariaDB |
192.168.37.102 | slave1 | DataNode、NodeManager | JDK、Hadoop |
192.168.37.103 | slave2 | DataNode、NodeManager | JDK、Hadoop |
2、基础环境配置
2.1. 配置主机名与 hosts 文件
# 所有节点执行(以 master 为例)
sudo vi /etc/hosts
# 添加以下内容:
192.168.37.101 master
192.168.37.102 slave1
192.168.37.103 slave2
2.2. 关闭防火墙与 SELinux
# 所有节点执行
systemctl stop firewalld
systemctl disable firewalld
setenforce 0
sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config
2.3. 安装 OpenJDK 11
# 所有节点执行
sudo yum install -y java-11-openjdk-devel
# 配置环境变量
echo 'export JAVA_HOME=/usr/lib/jvm/java-11-openjdk' >> ~/.bashrc
echo 'export PATH=$JAVA_HOME/bin:$PATH' >> ~/.bashrc
source ~/.bashrc
# 验证安装
java -version # 应输出 OpenJDK 11
如果找不到java-11-openjdk-devel,可以尝试启用 EPEL 仓库后重试
sudo yum install -y epel-release
sudo yum clean all
sudo yum update
2.4. 配置 SSH 免密登录
# 所有节点生成密钥
ssh-keygen -t rsa -P '' -f ~/.ssh/id_rsa
# 在 master 节点将公钥分发到所有节点(包括自己)
ssh-copy-id master
ssh-copy-id slave1
ssh-copy-id slave2
# 测试免密登录
ssh slave1 date # 无需密码即成功
2.5. 时间同步(NTP)
# 所有节点执行
sudo yum install -y ntp
sudo systemctl start ntpd
sudo systemctl enable ntpd
3、Hadoop 3.2.4 集群部署
3.1. 安装 Hadoop
# 在Master主节点执行
[root@master ~]# wget https://archive.apache.org/dist/hadoop/core/hadoop-3.2.4/hadoop-3.2.4.tar.gz
[root@master ~]# tar -zxvf hadoop-3.2.4.tar.gz -C /opt/
[root@master ~]# sudo mv /opt/hadoop-3.2.4 /opt/hadoop
如果提示wget未找到,可以运行 yum insall -y wget 安装
3.2. 修改 Hadoop 配置文件
hadoop-env.sh
# 在 master 节点配置
echo "export JAVA_HOME=/usr/lib/jvm/java-11-openjdk" >> /opt/hadoop/etc/hadoop/hadoop-env.sh
echo "export HDFS_NAMENODE_USER=root" >> /opt/hadoop/etc/hadoop/hadoop-env.sh
echo "export HDFS_DATANODE_USER=root" >> /opt/hadoop/etc/hadoop/hadoop-env.sh
echo "export HDFS_SECONDARYNAMENODE_USER=root" >> /opt/hadoop/etc/hadoop/hadoop-env.sh
echo "export YARN_RESOURCEMANAGER_USER=root" >> /opt/hadoop/etc/hadoop/hadoop-env.sh
echo "export YARN_NODEMANAGER_USER=root" >> /opt/hadoop/etc/hadoop/hadoop-env.sh
core-site.xml
[root@master ~]# vim /opt/hadoop/etc/hadoop/core-site.xml
<configuration>
<property>
<name>fs.defaultFS</name>
<value>hdfs://master:9000</value>
</property>
<property>
<name>hadoop.tmp.dir</name>
<value>/opt/hadoop/data/tmp</value>
</property>
<property>
<name>hadoop.proxyuser.root.groups</name>
<value>*</value>
</property>
<property>
<name>hadoop.proxyuser.root.hosts</name>
<value>*</value>
</property>
</configuration>
hdfs-site.xml
[root@master ~]# vim /opt/hadoop/etc/hadoop/hdfs-site.xml
<configuration>
<property>
<name>dfs.replication</name>
<value>2</value>
</property>
<property>
<name>dfs.namenode.name.dir</name>
<value>/opt/hadoop/data/namenode</value>
</property>
<property>
<name>dfs.datanode.data.dir</name>
<value>/opt/hadoop/data/datanode</value>
</property>
</configuration>
yarn-site.xml
[root@master ~]# vim /opt/hadoop/etc/hadoop/yarn-site.xml
<configuration>
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
<property>
<name>yarn.resourcemanager.hostname</name>
<value>master</value>
</property>
</configuration>
mapred-site.xml
[root@master ~]# vim /opt/hadoop/etc/hadoop/mapred-site.xml
<configuration>
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
<property>
<name>mapreduce.application.classpath</name>
<value>/opt/hadoop/share/hadoop/mapreduce/*:/opt/hadoop/share/hadoop/mapreduce/lib/*</value>
</property>
<property>
<name>mapreduce.jobhistory.address</name>
<value>master:10020</value>
</property>
<property>
<name>mapreduce.jobhistory.webapp.address</name>
<value>master:19888</value>
</property>
<property>
<name>mapreduce.jobhistory.done-dir</name>
<value>/history/done</value>
</property>
<property>
<name>mapreduce.jobhistory.intermediate-done-dir</name>
<value>/history/intermediate-done</value>
</property>
</configuration>
workers 文件
# 在 master 节点配置
echo "slave1" > /opt/hadoop/etc/hadoop/workers
echo "slave2" >> /opt/hadoop/etc/hadoop/workers
3.3. 同步配置到所有节点
# 在 master 节点配置
scp -r /opt/hadoop slave1:/opt/
scp -r /opt/hadoop slave2:/opt/
3.4. 配置环境变量
# 所有节点编辑 ~/.bashrc
echo 'export HADOOP_HOME=/opt/hadoop' >> ~/.bashrc
echo 'export PATH=$HADOOP_HOME/bin:$HADOOP_HOME/sbin:$PATH' >> ~/.bashrc
source ~/.bashrc
3.5. 初始化并启动集群
# 在 master 节点执行
hdfs namenode -format # 格式化 HDFS
start-dfs.sh
start-yarn.sh
# 启动历史服务进程 MR任务运行出错,有找不到错误,可以使用它查看详细日志信息
# mapred --daemon start historyserver
# 验证进程
jps # master 应有 NameNode/SecondaryNameNode/ResourceManager,slave 应有 DataNode/NodeManager
4、MariaDB 10.6.x 安装(仅 master 节点)
4.1. 安装 MariaDB
sudo vi /etc/yum.repos.d/MariaDB.repo
# 添加以下内容:
[mariadb]
name = MariaDB
baseurl = http://yum.mariadb.org/10.6/centos7-amd64
gpgkey=https://yum.mariadb.org/RPM-GPG-KEY-MariaDB
gpgcheck=1
# 安装
sudo yum install -y MariaDB-server MariaDB-client
sudo systemctl start mariadb
sudo systemctl enable mariadb
# 查看安装的版本
mariadb --version # mariadb Ver 15.1 Distrib 10.6.19-MariaDB
4.2. 配置数据库
# 设置 root 密码和远程操作权限
ALTER USER 'root'@'localhost' IDENTIFIED BY 'root';
CREATE USER 'root'@'%' IDENTIFIED BY 'root';
GRANT ALL PRIVILEGES ON *.* TO 'root'@'%' IDENTIFIED BY 'root' WITH GRANT OPTION;
DELETE FROM mysql.global_priv WHERE User='';
FLUSH PRIVILEGES;
# 设置字符编码,没有的就添加[client][mysql]
vim /etc/my.cnf.d/server.cnf
[client]
default-character-set=utf8
[mysql]
default-character-set=utf8
[mysqld]
init_connect='SET collation_connection = utf8_unicode_ci'
init_connect='SET NAMES utf8'
character-set-server=utf8
collation-server=utf8_unicode_ci
# 重启服务,登录查看字符编码
systemctl restart mariadb
mysql -uroot -proot
show variables like "%char%";
5、Hive 4.0.1 部署(仅 master 节点)
5.1. 安装 Hive
wget https://archive.apache.org/dist/hive/hive-4.0.1/apache-hive-4.0.1-bin.tar.gz
tar -zxvf apache-hive-4.0.1-bin.tar.gz -C /opt/
sudo mv /opt/apache-hive-4.0.1-bin /opt/hive
5.2. 配置环境变量
echo 'export HIVE_HOME=/opt/hive' >> ~/.bashrc
echo 'export PATH=$HIVE_HOME/bin:$PATH' >> ~/.bashrc
source ~/.bashrc
5.3. 修改配置文件
hive-site.xml
[root@master ~]# vim /opt/hive/conf/hive-site.xml # 文件没有提供,直接创建
<?xml version="1.0" encoding="UTF-8"?>
<configuration>
<property>
<name>javax.jdo.option.ConnectionURL</name>
<value>jdbc:mysql://localhost:3306/hive?createDatabaseIfNotExist=true&useSSL=false</value>
</property>
<property>
<name>javax.jdo.option.ConnectionDriverName</name>
<value>com.mysql.cj.jdbc.Driver</value>
</property>
<property>
<name>javax.jdo.option.ConnectionUserName</name>
<value>root</value>
</property>
<property>
<name>javax.jdo.option.ConnectionPassword</name>
<value>root</value>
</property>
<!--设置hive客户端访问的用户名和密码-->
<property>
<name>hive.server2.thrift.client.user</name>
<value>root</value>
</property>
<property>
<name>hive.server2.thrift.client.password</name>
<value>123456</value>
</property>
<property>
<name>hive.metastore.schema.verification</name>
<value>false</value>
</property>
</configuration>
5.4. 初始化元数据库
# 下载 MySQL JDBC 驱动
wget https://downloads.mysql.com/archives/get/p/3/file/mysql-connector-j-8.0.33.tar.gz
tar -zxvf mysql-connector-j-8.0.33.tar.gz
cp mysql-connector-j-8.0.33/mysql-connector-j-8.0.33.jar $HIVE_HOME/lib/
# 初始化元数据库
schematool -dbType mysql -initSchema
5.5. 创建Hive的真实数据存放目录和权限设置
hadoop fs -mkdir -p /user/hive/warehouse/
hadoop fs -chown root:root /user/hive/warehouse
5.5. 登录进入Hive测试
# 启动Hive服务
hive --service hiveserver2&
# 连接测试
[root@master ~]# beeline -u jdbc:hive2://master:10000 -n root
0: jdbc:hive2://master:10000> create database lx_db;
0: jdbc:hive2://master:10000> show databases;
+----------------+
| database_name |
+----------------+
| default |
| lx_db |
+----------------+
2 rows selected (0.264 seconds)
0: jdbc:hive2://master:10000> !quit
6、Sqoop 1.4.7 安装(仅 master 节点)
6.1. 安装 Sqoop
wget https://archive.apache.org/dist/sqoop/1.4.7/sqoop-1.4.7.bin__hadoop-2.6.0.tar.gz
tar -zxvf sqoop-1.4.7.bin__hadoop-2.6.0.tar.gz -C /opt/
sudo mv /opt/sqoop-1.4.7.bin__hadoop-2.6.0 /opt/sqoop
6.2. 配置环境变量
echo 'export SQOOP_HOME=/opt/sqoop' >> ~/.bashrc
echo 'export PATH=$SQOOP_HOME/bin:$PATH' >> ~/.bashrc
source ~/.bashrc
6.3. 配置数据库驱动和Hive执行依赖
cd $HIVE_HOME/lib
cp mysql-connector-j-8.0.33.jar hive-exec-4.0.1.jar hive-common-4.0.1.jar $SQOOP_HOME/lib/
6.4. Sqoop 测试
sqoop list-databases --connect jdbc:mysql://master:3306 --username root --password root
6、Hive 离线数据预处理
# 1. 上传“良信电器售后数据.csv”数据文件到CentOS操作系统用户家目录下
# 2. 在Hive中创建解析表用于解析上传的数据
use lx_db;
CREATE TABLE IF NOT EXISTS service_records (
work_order_id STRING COMMENT '工单唯一标识,如 XC****0001',
check_in_time TIMESTAMP COMMENT '工程师打卡时间,格式:yyyy-MM-dd HH:mm:ss',
engineer_name STRING COMMENT '售后工程师姓名(脱敏,如 Zou**)',
service_address STRING COMMENT '服务地址(脱敏,如 浙江省杭州市**运达风电)',
service_city STRING COMMENT '服务城市名称',
industry STRING COMMENT '所属行业分类',
region_assigned STRING COMMENT '编制人员所属大区',
region STRING COMMENT '服务所属大区',
office STRING COMMENT '所属办事处',
product_line STRING COMMENT '大产品线分类',
sub_product_line STRING COMMENT '子产品线分类',
longitude DOUBLE COMMENT '经度坐标',
latitude DOUBLE COMMENT '纬度坐标',
customer_name STRING COMMENT '终端客户名称(脱敏)',
check_in_status STRING COMMENT '打卡状态(如 服务中)'
)
COMMENT '良信电器售后数据'
ROW FORMAT DELIMITED
FIELDS TERMINATED BY ','
STORED AS TEXTFILE;
# 3. 加载数据文件到Hive表中
# 注意,需要把文件中的表头信息删除
sed -i "1d" 良信电器售后数据.csv
# 加载数据到Hive
LOAD DATA LOCAL INPATH '/root/良信电器售后数据.csv' INTO TABLE service_records;
# 4. 查看解析结果
select * from service_records limit 10;
# 5. 预处理数据--> 根据自己业务需求进行处理,如果数据不需要处理则省略该步骤即可
7、Sqoop导出预处理结果到MySQL
# 在 MySQL 中创建与 Hive 表结构匹配的表
CREATE DATABASE lx_db default charset=utf8;
CREATE TABLE lx_db.service_records (
work_order_id VARCHAR(50) COMMENT '工单唯一标识',
check_in_time TIMESTAMP COMMENT '打卡时间',
engineer_name VARCHAR(100) COMMENT '工程师姓名',
service_address VARCHAR(200) COMMENT '服务地址',
service_city VARCHAR(50) COMMENT '服务城市',
industry VARCHAR(50) COMMENT '所属行业',
region_assigned VARCHAR(50) COMMENT '编制所属大区',
region VARCHAR(50) COMMENT '所属大区',
office VARCHAR(50) COMMENT '所属办事处',
product_line VARCHAR(50) COMMENT '大产品线',
sub_product_line VARCHAR(50) COMMENT '子产品线',
longitude DOUBLE COMMENT '经度',
latitude DOUBLE COMMENT '纬度',
customer_name VARCHAR(100) COMMENT '终端客户',
check_in_status VARCHAR(20) COMMENT '打卡状态'
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
# 使用Sqoop命令导出数据到MySQL中
sqoop export \
--connect "jdbc:mysql://master:3306/lx_db" \
--username root \
--password root \
--table service_records \
--export-dir /user/hive/warehouse/lx_db.db/service_records \
--input-fields-terminated-by ',' \
--input-lines-terminated-by '\n' \
--input-optionally-enclosed-by '\"' \
--input-null-string '\\N' \
--input-null-non-string '\\N'