假设我通过对数脚本发现,这些表对不上。
假设检测出来是这样的(这些表存在于源端,但不存在目标端)
我们需要从源端迁移过去。
diff.txt
ads_xx1 dt=20250219
ads_xx2 dt=20250217
ads_xx2 dt=20250218
ads_xx2 dt=20250219
ads_xx3 dt=20250217
ads_xx4 dt=20250217
bak_xx1 dt=20250109
bak_xx1 dt=20250110
bak_xx1 dt=20250111
bak_xx1 dt=20250112
bak_xx1 dt=20250113
dim_e dt=20250131
dim_j dt=20250216
dim_j dt=20250217
dim_m_df dt=20250216
dim_m_df dt=20250217
dim_m_df dt=20250218
tmp_np
tmp_an dt=20250217
tmp_an dt=20250218
tmp_dhc_allcon_20250131
tmp_hj dt=20250216
tmp_hj dt=20250217
tmp_hj_2 dt=20250216
tmp_hj_2 dt=20250217~
脚本
#!/bin/bash
#场景:数据在同一库下,并且hive是内部表(前缀的hdfs地址是相同的)
echo "" > rs.txt
#1.读取每一行,获取到表名和分区名
#定义map
declare -A table_map
while IFS=' ' read -r table_name fenqu_name
do
#table_name为空的情况
if [[ -z "${table_name// }" ]]; then
echo "变量为空或只有空格,跳过操作"
continue
else
#1.如果map中存在此表,并且分区不为空,则加入数组中
if [[ -v table_map["$table_name"] ]]; then
echo "$table_name exists in the map!"
value_arr="${table_map[$table_name]}"
IFS=' ' read -r -a new_arr <<< "${table_map["$table_name"]}"
#判断分区为空吗
if [ ${#new_arr[@]} -eq 0 ]; then
echo "Array is empty!,不操作"
else
echo "Array is not empty!加入数组,放到map"
new_arr+=("$fenqu_name")
table_map["$table_name"]="$(IFS=' '; echo "${new_arr[*]}")"
fi
else
#2.如果map中不存在此表,则创建数组,分区不可能存在又缺分区又缺表的存在(跑的时候筛出来,分批跑),则放进去。
echo "$table_name does not exist in the map!"
value_arr=()
value_arr+=("$fenqu_name")
table_map["$table_name"]="$(IFS=' '; echo "${value_arr[*]}")"
fi
fi
done < "$1"
echo "-----------"
#测试一下数组中的元素
#for table_name in "${!table_map[@]}"; do
# echo "$table_name: ${table_map[$table_name]}"
#done
#2.遍历map3.遍历map
#删除每一个map里对应的分区
for table_name in "${!table_map[@]}"; do
fenqu_arr_str=${table_map[$table_name]}
echo "$table_name: $fenqu_arr_str"
#如果没有分区,则删除表
if [[ -z "$fenqu_arr_str" || "$fenqu_arr_str" =~ ^[[:space:]]*$ ]]; then
echo "hdfs dfs -rm -r hdfs://xx.xx.xx.104:4007/apps/hive/warehouse/bigdata.db/$table_name"
eval "hdfs dfs -rm -r hdfs://xx.xx.xx.104:4007/apps/hive/warehouse/bigdata.db/$table_name"
# 在这里执行相应的操作
else
IFS=' ' read -r -a value_array <<< "$fenqu_arr_str"
# 遍历数组
for fenqu_name in "${value_array[@]}"; do
# 输出每个 dt
echo "hdfs dfs -rm -r hdfs://xx.xx.xx.104:4007/apps/hive/warehouse/bigdata.db/$table_name/$fenqu_name"
eval "hdfs dfs -rm -r hdfs://xx.xx.xx.104:4007/apps/hive/warehouse/bigdata.db/$table_name/$fenqu_name"
done
fi
done
#3.distcp任务
for table_name in "${!table_map[@]}"; do
fenqu_arr_str=${table_map[$table_name]}
echo "$table_name: $fenqu_arr_str"
distcp_str="hadoop distcp -skipcrccheck -i -strategy dynamic -bandwidth 30 -m 20"
source_path=hdfs://xx.xx.xx.7:8020/apps/hive/warehouse/bigdata.db/$table_name
target_path=hdfs://xx.xx.xx.104:4007/apps/hive/warehouse/bigdata.db/$table_name
#如果没有分区,则distcp表
if [[ -z "$fenqu_arr_str" || "$fenqu_arr_str" =~ ^[[:space:]]*$ ]]; then
distcp_str="$distcp_str $source_path $target_path"
echo "$distcp_str"
eval "$distcp_str"
else
IFS=' ' read -r -a value_array <<< "$fenqu_arr_str"
# 遍历数组
for fenqu_name in "${value_array[@]}"; do
# 拼接
distcp_str="$distcp_str $source_path/$fenqu_name"
done
#执行
distcp_str="$distcp_str $target_path"
echo "$distcp_str"
eval "$distcp_str"
fi
echo "$table_name 完成" >> rs.txt
done
BEELINE_CMD="beeline -u 'jdbc:hive2://xx.xx.xx.104:7001/cfc;principal=hadoop/xx.xx.xx.104@TBDS-09T7KXLE'"
#4.修复分区
for table_name in "${!table_map[@]}"; do
# 执行MSCK REPAIR TABLE命令
echo "Repairing partitions for table: $table_name"
$BEELINE_CMD -e "MSCK REPAIR TABLE $table_name;"
if [ $? -eq 0 ]; then
echo "Successfully repaired partitions for table: $table_name"
else
echo "Failed to repair partitions for table: $table_name"
fi
done
使用
nohup sh bushu.sh diff.txt &