python使用selenium,实现简单爬虫功能

发布于:2024-09-05 ⋅ 阅读:(40) ⋅ 点赞:(0)

有个朋友想爬取一些数据,让我帮忙搞下,我也比较菜,不怎么用python就随便搜了点资料尝试下。

环境

idea,python3.1.0

edge浏览器(谷歌也可以),都需要在python的安装目录下存放驱动。

使用edge浏览器,当浏览器更新时,需要更新edgedriver驱动

https://developer.microsoft.com/en-us/microsoft-edge/tools/webdriver/#downloads

然后在页面搜索版本号,比如127.0.2651.98
下载x86的,放在C:\Python311(python的安装目录) 修改文件名为MicrosoftWebDriver.exe

代码

data_model类,主要做数据的导出用的
import xlwt
FORMULA = 1
NORMAL = 0

class Cell:
    def __init__(self, sheet, value, type=0, row_index=0, column_index=0, merge_row=0, merge_column=0):
        self.sheet = sheet
        self.row_index = row_index
        self.column_index = column_index
        self.row_name = row_index + 1
        self.column_name = self.transfer_column(column_index + 1)
        self.merge_row = merge_row
        self.merge_column = merge_column
        # type==0是写入值,1是写入公式
        self.type = type
        self.value = value

    def get_cell_location(self):
        return self.format_cell_location(self.column_name, self.row_name)

    def format_cell_location(self, col, row):
        return "{col}{row}".format(col=col, row=row)

    def get_pre_cell_location(self):
        return self.format_cell_location(self.transfer_column(self.column_index), self.row_name)

    def write(self):
        if self.type == NORMAL:
            self.write_value(self.value)
        elif self.type == FORMULA:
            self.write_formula(self.value)

    def write_value(self, value):
        print("write_value cell:", self.row_index, "-", self.column_index, "_", self.value, self.row_name)
        if self.merge_row == 0 and self.merge_column == 0:
            self.sheet.write(self.row_index, self.column_index, str(value))
        else:
            merge_row_index = self.row_index + self.merge_row
            merge_column_index = self.column_index + self.merge_column

            if self.merge_row > 0:
                merge_row_index = merge_row_index - 1
            if self.merge_column > 0:
                merge_column_index = merge_column_index - 1

            self.sheet.write_merge(self.row_index, merge_row_index, self.column_index, merge_column_index, str(value))

    def write_formula(self, formula):
        #print(" write_formula cell:", self.row_index, "-", self.column_index, "_", self.value, "_", self.row_name)
        if self.merge_row == 0 and self.merge_column == 0:
            self.sheet.write(self.row_index, self.column_index, xlwt.Formula(formula))
        else:
            merge_row_index = self.row_index + self.merge_row
            merge_column_index = self.column_index + self.merge_column

            if self.merge_row > 0:
                merge_row_index = merge_row_index - 1
            if self.merge_column > 0:
                merge_column_index = merge_column_index - 1
            self.sheet.write_merge(self.row_index, merge_row_index, self.column_index, merge_column_index,
                                   xlwt.Formula(formula))

    def transfer_column(self, index):
        '''
        转换列名
        :param index:
        :return:
        '''
        chars = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T',
                 'U',
                 'V', 'W', 'X', 'Y', 'Z']
        b = len(chars)
        result = ""
        while True:
            if index % b == 0:
                result = chars[25]
                index = int(index / b)-1
                if index == 1:
                    break
            else:
                result = chars[index % b - 1] + result
                index = int(index / b)
                if index == 0:
                    break
        return result



class ZhuanLiInfo:
    def __init__(self, sheet, data):
        self.data = data
        self.rows = []
        self.init_title(sheet)
        self.init_rows(sheet)

    def init_title(self, sheet):
        self.rows.append(Cell(sheet, "公司名称", NORMAL, 0, 0))
        self.rows.append(Cell(sheet, "电子邮箱", NORMAL, 0, 1))
        self.rows.append(Cell(sheet, "联系电话", NORMAL, 0, 2))
        self.rows.append(Cell(sheet, "机构网址", NORMAL, 0, 3))
        self.rows.append(Cell(sheet, "邮政编码", NORMAL, 0, 4))
        self.rows.append(Cell(sheet, "法定代表人", NORMAL, 0, 5))
        self.rows.append(Cell(sheet, "机构类型", NORMAL, 0, 6))
        self.rows.append(Cell(sheet, "通讯地址", NORMAL, 0, 7))
        self.rows.append(Cell(sheet, "代理机构状态", NORMAL, 0, 8))
        self.rows.append(Cell(sheet, "代理机构成立年限", NORMAL, 0, 9))
        self.rows.append(Cell(sheet, "信用等级", NORMAL, 0, 10))

    def init_rows(self, sheet):
        row_index = 0
        for third_transport_monitor_data in self.data:
            row_index = row_index+1
            col_index = 0
            for val in third_transport_monitor_data:
                self.rows.append(Cell(sheet, val, NORMAL, row_index, col_index))
                col_index = col_index+1

    def write(self):
        for row in self.rows:
            row.write()

主类:

ddd.py 
from selenium import webdriver
from selenium.webdriver.common.by import By
import  time
import random
import xlwt
from datetime import datetime
from data_model import ZhuanLiInfo


def getData(driver,detail_list):
    # 查找页面上的所有<a>标签
    all_a_tags = driver.find_elements(By.CSS_SELECTOR, 'a.name')

    # 遍历所有<a>标签,并打印它们的href属性(如果有的话)
    row = 0
    for a_tag in all_a_tags:
        try:
            a_tag.click()
        except Exception as e:
            #捕获try块中发生的任何其他异常
            print(f"查询详情发生了其他异常: {e}")
            break;
        row += 1
        print("row:"+str(row))
        time.sleep(random.randint(30, 50))
        # 切换到新窗口(假设新窗口是最后一个打开的)
        original_window = driver.current_window_handle
        all_handles = driver.window_handles
        for handle in all_handles:
            if handle != original_window:
                driver.switch_to.window(handle)
                break
        # 在新页面上执行操作
        # 使用XPath查找元素
        companyName = driver.find_element(By.XPATH, '//div[contains(@class, "box")]/h5[contains(@class, "name")]').text
        email = driver.find_element(By.XPATH,"//dt[text()='电子邮箱:']/following-sibling::dd").text
        phone = driver.find_element(By.XPATH,"//dt[text()='联系电话:']/following-sibling::dd").text
        webUrl = driver.find_element(By.XPATH,"//dt[text()='机构网址:']/following-sibling::dd").text
        postCode = driver.find_element(By.XPATH,"//dt[text()='邮政编码:']/following-sibling::dd").text
        faren_elements = driver.find_elements(By.XPATH,'//dt[contains(text(), "法定代表人:")]/following-sibling::dd')
        faren = ''
        if faren_elements:
            faren=faren_elements[0].text;
        faren_elements2 = driver.find_elements(By.XPATH,'//dt[contains(text(), "执行事务合伙人:")]/following-sibling::dd')
        if faren_elements2:
            faren=faren_elements2[0].text;
        type = driver.find_element(By.XPATH,"//dt[text()='机构类型:']/following-sibling::dd").text
        address = driver.find_element(By.XPATH,"//dt[text()='通讯地址:']/following-sibling::dd").text
        status = driver.find_element(By.XPATH,"//dt[text()='代理机构状态']/following-sibling::dd").text
        years = driver.find_element(By.XPATH,"//dt[text()='代理机构成立年限']/following-sibling::dd").text
        level = driver.find_element(By.XPATH,"//dt[text()='信用等级']/following-sibling::dd").text
        detail_list.append(
            [companyName, email, phone, webUrl, postCode, faren, type,address,status,years,level])
        print(detail_list[-1])
        driver.close()  # 关闭当前窗口
        # (可选)切换回原始窗口
        driver.switch_to.window(original_window)
    return detail_list
def queryByProvince(driver):
    #只查询广东省的
    all_province_link = driver.find_elements(By.CSS_SELECTOR, '.localoffice a')
    for a_province in all_province_link:
        if a_province.text.strip() == '广东省':
            a_province.click()
            break
    #查询
    queryButton=driver.find_element(By.CSS_SELECTOR, '.button-btn')
    queryButton.click()


# 初始化WebDriver
driver = webdriver.Edge()
# 打开网页
driver.get("http://XXXX")
#只查询广东省的
queryByProvince(driver)
#数据列表
detail_list = []
detail_list = getData(driver,detail_list);

page = 1
while True:
    #下一页
    all_page_link = driver.find_elements(By.CSS_SELECTOR, '.incpage a')
    hasnext = 0
    for a_page in all_page_link:
        if a_page.text.strip() == '下一页':
            try:
                a_page.click()
                hasnext = 1
            except Exception as e:
                #捕获try块中发生的任何其他异常
                print(f"page发生了其他异常: {e}")
            finally:
                break;
    if hasnext:
        page += 1
        print("page:"+str(page))
        try:
            detail_list = getData(driver,detail_list)
        except Exception as e:
            #捕获try块中发生的任何其他异常
            print(f"getData发生了其他异常: {e}")
        finally:
            break;
    else:
        break

# 获取当前时间
now = datetime.now()
# 格式化时间为年月日时分秒字符串,例如:"2023-04-01 14:30:45"
datetime_str = now.strftime("%Y%m%d%H%M%S")
week = datetime_str + "信息"
wb = xlwt.Workbook(encoding="utf-8")
zhuanlisheet=wb.add_sheet("信息")
zhuanliObj=ZhuanLiInfo(zhuanlisheet,detail_list)
zhuanliObj.write()
wb.save(f"res/data-{week}1.xls")
# 关闭WebDriver
driver.quit()



大多数网站都有防爬机制,最好是能换ip,不行就间隔时间久点查询,我这里间隔time.sleep(random.randint(30, 50)) 30-50s查询,还是会被识别需要验证码。大家学习为主,不要随便爬取网站信息哈


网站公告

今日签到

点亮在社区的每一天
去签到