在当今数据驱动的时代,获取和分析网络数据已成为许多领域的重要技能。本文将详细介绍如何使用Python爬取豆瓣电影Top250榜单数据,并将结果保存到Excel文件中。这个项目不仅适合Python初学者学习网络爬虫基础,也能帮助数据分析师获取有价值的电影数据。
一、项目概述
豆瓣电影Top250是一个非常有价值的电影榜单,包含了全球范围内评价最高的250部电影。通过爬取这些数据,我们可以进行各种有趣的分析,比如:
哪些导演的作品上榜最多?
哪些年份的电影质量最高?
不同类型电影的评分分布如何?
二、准备工作
在开始之前,我们需要安装必要的Python库:
pip install requests beautifulsoup4 openpyxl
这些库的作用分别是:
requests
:发送HTTP请求获取网页内容beautifulsoup4
:解析HTML文档openpyxl
:操作Excel文件
三、代码解析
1. 导入所需库
import re
import requests
from bs4 import BeautifulSoup
import openpyxl
from openpyxl import Workbook
2. 创建Excel工作簿
# 创建excel工作表
wb = Workbook()
ws = wb.active
ws.title = "豆瓣电影top250"
# 写入表头
ws.append(["排名", "电影名称", "英文名称", "其他名称", "导演", "主演", "年份", "国家", "类型", "评分", "评价人数", "简介"])
3. 设置请求头(headers)和cookies
headers = {
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/135.0.0.0 Safari/537.36",
# 其他headers...
}
cookies = {
"bid": "DwhFQXCqdEQ",
# 其他cookies...
}
4. 主爬取逻辑
for i in range(0, 250, 25):
# 确定网址
url = f"https://movie.douban.com/top250?start={i}&filter="
# 发起请求
response = requests.get(url, headers=headers, cookies=cookies)
soup = BeautifulSoup(response.text, "html.parser")
for item in soup.find_all("div", class_="item"):
# 解析各个数据字段...
5. 数据解析详解
获取电影排名
rank = item.find("em").text
获取电影名称
title = item.find("span", class_="title").text
获取导演和主演信息
info = item.find("p").get_text()
info_lines = [line.strip() for line in info.split("\n") if line.strip()]
director = ""
actors = ""
if info_lines:
director_part = info_lines[0].replace("导演:", "").strip()
director_parts = director_part.split("主演:")
director = director_parts[0].strip()
if len(director_parts) > 1:
actors = director_parts[1].strip()
获取评分和评价人数
rating_num = item.find("span", class_="rating_num")
rating = rating_num.text if rating_num else ""
votes_span = item.find("span", string = lambda text: text and "人评价" in text)
votes = votes_span.text.replace("人评价", "") if votes_span else ""
6. 保存数据到Excel
wb.save("豆瓣电影top250.xlsx")
print("数据已经保存到豆瓣电影top250.xlsx")
四、完整代码
import re
import requests
from bs4 import BeautifulSoup
import openpyxl
from openpyxl import Workbook
# 创建excel工作表
wb = Workbook()
ws = wb.active
ws.title = "豆瓣电影top250"
# 写入表头
ws.append(["排名", "电影名称", "英文名称", "其他名称", "导演", "主演", "年份", "国家", "类型", "评分", "评价人数", "简介"])
headers = {
"accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
"accept-language": "zh-CN,zh;q=0.9",
"cache-control": "no-cache",
"pragma": "no-cache",
"priority": "u=0, i",
"referer": "https://movie.douban.com/top250",
"sec-ch-ua": "\"Google Chrome\";v=\"135\", \"Not-A.Brand\";v=\"8\", \"Chromium\";v=\"135\"",
"sec-ch-ua-mobile": "?0",
"sec-ch-ua-platform": "\"Windows\"",
"sec-fetch-dest": "document",
"sec-fetch-mode": "navigate",
"sec-fetch-site": "same-origin",
"sec-fetch-user": "?1",
"upgrade-insecure-requests": "1",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/135.0.0.0 Safari/537.36"
}
cookies = {
"bid": "DwhFQXCqdEQ",
"_pk_id.100001.4cf6": "7f5a4ed7ee20a463.1744699317.",
"ap_v": "0,6.0",
"__utmc": "223695111",
"__utmz": "223695111.1744699317.1.1.utmcsr=(direct)|utmccn=(direct)|utmcmd=(none)",
"__yadk_uid": "jb67OdmeCnxkr8xj7kmcnNIblmohGNR6",
"_pk_ses.100001.4cf6": "1",
"__utma": "223695111.257683867.1744699317.1744699317.1744703580.2",
"__utmb": "30149280.8.10.1744703580",
"dbcl2": "\"288340287:KHPK9ybQcc8\"",
"ck": "NPUI",
"push_noty_num": "0",
"push_doumail_num": "0",
"__utmt": "1",
"__utmv": "30149280.28834"
}
for i in range(0, 250, 25):
# 确定网址
url = f"https://movie.douban.com/top250?start={i}&filter="
# 发起请求
response = requests.get(url, headers=headers, cookies=cookies)
soup = BeautifulSoup(response.text, "html.parser")
for item in soup.find_all("div", class_="item"):
# 排名
rank = item.find("em").text
# 电影名称
title = item.find("span", class_="title").text
# 英文名称
english_title = item.find("span", class_="title").find_next_sibling("span").text
# 其他名称
other_names = item.find("span", class_="other").text
# 导演和主演信息
bd = item.find("div", class_="bd")
info = item.find("p").get_text()
info_lines = [line.strip() for line in info.split("\n") if line.strip()]
# 处理导演和主演
director = ""
actors = ""
if info_lines:
director_part = info_lines[0].replace("导演:", "").strip()
director_parts = director_part.split("主演:")
director = director_parts[0].strip()
if len(director_parts) > 1:
actors = director_parts[1].strip()
# 处理年份、国家、类型
year = country = genre = ""
if len(info_lines) > 1:
line2 = info_lines[1]
parts = re.split(r'\s*/\s*', line2.strip())
if parts:
year = parts[0] if len(parts) > 0 else ""
country = parts[1] if len(parts) > 1 else ""
genre = parts[2] if len(parts) > 2 else ""
# 评分
rating_num = item.find("span", class_="rating_num")
rating = rating_num.text if rating_num else ""
# 评价人数
votes_span = item.find("span", string = lambda text: text and "人评价" in text)
votes = votes_span.text.replace("人评价", "") if votes_span else ""
# 简介
quote_span = item.find("p", class_="quote")
quote = quote_span.find("span").text if quote_span and quote_span.find("span") else ""
# 写入excel
ws.append([rank, title, english_title, other_names, director, actors, year, country, genre, rating, votes, quote])
# 保存excel文件
wb.save("豆瓣电影top250.xlsx")
print("数据已经保存到豆瓣电影top250.xlsx")
五、可能遇到的问题及解决方案
反爬虫机制:
如果被封IP,可以尝试使用代理IP
增加请求间隔时间:
time.sleep(random.uniform(1, 3))
页面结构变化:
定期检查CSS选择器是否仍然有效
使用更灵活的解析方式,如正则表达式
数据缺失处理:
添加异常处理,确保某个字段缺失时程序不会中断
六、进一步改进建议
添加异常处理:
try: response = requests.get(url, headers=headers, cookies=cookies) response.raise_for_status() except requests.exceptions.RequestException as e: print(f"请求失败: {e}") continue
增加数据清洗功能:
def clean_text(text): return text.strip().replace("\n", "").replace("\r", "")
添加进度显示:
print(f"正在处理第{i//25 + 1}页,共10页...")
支持命令行参数:
import argparse parser = argparse.ArgumentParser() parser.add_argument("--output", default="豆瓣电影top250.xlsx") args = parser.parse_args() wb.save(args.output)
七、结语
通过这个项目,我们学习了如何使用Python爬取网页数据并保存到Excel。这个技能可以应用于许多其他场景,如爬取商品信息、新闻数据等。记住要遵守网站的robots.txt规定,合理控制爬取频率,不要给目标网站造成过大负担。