from langchain_community.document_loaders import PyPDFLoader
from pathlib import Path
def load_local_pdf(file_path):
if not Path(file_path).exists():
raise FileNotFoundError(f"文件 {file_path} 不存在!")
loader = PyPDFLoader(file_path)
try:
docs = loader.load()
print(f"成功加载 {len(docs)} 页 | 首页内容片段: {docs[0].page_content[:200]}...")
return docs
except Exception as e:
print(f"加载失败: {str(e)}")
return None
# For openai key
import os
os.environ["OPENAI_API_KEY"] = "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"
# 1. 初始化OpenAI模型
from langchain_openai.chat_models import ChatOpenAI
llm = ChatOpenAI(model_name="gpt-4o-mini")
# 测试OpenAI调用
response = llm.invoke("奖惩的原则是什么?")
print(response.content)
# 2. 加载PDF文档
from langchain_community.document_loaders import PyPDFLoader
# Or download the paper and put a path to the local file instead
# loader = PyPDFLoader("https://arxiv.org/pdf/2402.03216")
# docs = loader.load()
# print(docs[0].metadata)
local_docs = load_local_pdf("C:\\员工奖惩管理办法.pdf")
# 3. 分割文本
from langchain.text_splitter import RecursiveCharacterTextSplitter
# initialize a splitter
# 配置智能分割器
splitter = RecursiveCharacterTextSplitter(
chunk_size=1000,
chunk_overlap=200, # 增加重叠比例
separators=["\n\n", "。", "\n", " ", ""], # 优化分隔符优先级
length_function=len,
add_start_index=True # 记录起始位置
)
# use the splitter to split our paper
corpus = splitter.split_documents(local_docs)
print(f"分割后文档数: {len(corpus)} | 首块内容示例:\n{corpus[0].page_content[:200]}...")
# 4. 初始化嵌入模型
from langchain_huggingface.embeddings import HuggingFaceEmbeddings
# 指定本地模型路径
model_path = "./models/bge-large-zh-v1.5"
# embedding_model = HuggingFaceEmbeddings(model_name="BAAI/bge-large-zh-v1.5", encode_kwargs={"normalize_embeddings": True})
embedding_model = HuggingFaceEmbeddings(
model_name=model_path, # 直接指向本地路径
encode_kwargs={"normalize_embeddings": True},
model_kwargs={"local_files_only": True} # 强制从本地加载
)
# 5. 构建向量数据库
from langchain_community.vectorstores import FAISS
vectordb = FAISS.from_documents(corpus, embedding_model)
# (optional) save the vector database to a local directory
# 保存向量库(确保目录权限)
if not os.path.exists("vectorstore.db"):
vectordb.save_local("vectorstore.db")
print("向量数据库已保存")
# 6. 创建检索链
from langchain_core.prompts import ChatPromptTemplate
template = """
You are a Q&A chat bot.
Use the given context only, answer the question.
<context>
{context}
</context>
Question: {input}
"""
# Create a prompt template
prompt = ChatPromptTemplate.from_template(template)
from langchain.chains.combine_documents import create_stuff_documents_chain
from langchain.chains import create_retrieval_chain
doc_chain = create_stuff_documents_chain(llm, prompt)
# Create retriever for later use
retriever = vectordb.as_retriever(search_kwargs={"k": 3}) # 调整检索数量
chain = create_retrieval_chain(retriever, doc_chain)
# 7. 执行查询
response = chain.invoke({"input": "奖惩的原则是什么?"})
# print the answer only
print("\n答案:", response['answer'])
模型下载参考上一篇文章: 使用huggingface-cli下载模型