-
Notifications
You must be signed in to change notification settings - Fork 1
/
2.2.读取文档huggingFace.py
97 lines (79 loc) · 3.35 KB
/
2.2.读取文档huggingFace.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
import os
# 通过.env管理api_token
from dotenv import load_dotenv
load_dotenv(override=True)
# os.environ["HUGGINGFACEHUB_API_TOKEN"] = ''
# os.environ["TOKENIZERS_PARALLELISM"] = 'True'
#
# 1.Load 导入Document Loaders
from langchain.document_loaders import PyPDFLoader
from langchain.document_loaders import Docx2txtLoader
from langchain.document_loaders import TextLoader
# 加载Documents
base_dir = './OneFlower' # 文档的存放目录
documents = []
for file in os.listdir(base_dir):
# 构建完整的文件路径
file_path = os.path.join(base_dir, file)
if file.endswith('.pdf'):
loader = PyPDFLoader(file_path)
documents.extend(loader.load())
elif file.endswith('.docx') or file.endswith('.doc'):
loader = Docx2txtLoader(file_path)
documents.extend(loader.load())
elif file.endswith('.txt'):
loader = TextLoader(file_path)
documents.extend(loader.load())
# 2.Split 将Documents切分成块以便后续进行嵌入和向量存储
from langchain.text_splitter import RecursiveCharacterTextSplitter
text_splitter = RecursiveCharacterTextSplitter(chunk_size=200, chunk_overlap=10)
chunked_documents = text_splitter.split_documents(documents)
# 3.Store 将分割嵌入并存储在矢量数据库Qdrant中
from langchain.vectorstores import Qdrant
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
# from langchain.embeddings import OpenAIEmbeddings
vectorstore = Qdrant.from_documents(
documents=chunked_documents, # 以分块的文档
embedding=HuggingFaceEmbeddings(), # 用OpenAI的Embedding Model做嵌入
location=":memory:", # in-memory 存储
# path="./vectors",
collection_name="wby_docs001",
force_recreate=True
) # 指定collection_name
# 4. Retrieval 准备模型和Retrieval链
import logging # 导入Logging工具
from langchain.chat_models import ChatOpenAI # ChatOpenAI模型
from langchain.retrievers.multi_query import MultiQueryRetriever # MultiQueryRetriever工具
from langchain.chains import RetrievalQA # RetrievalQA链
# 设置Logging
logging.basicConfig()
logging.getLogger('langchain.retrievers.multi_query').setLevel(logging.INFO)
# 实例化一个大模型工具 - OpenAI的GPT-3.5
# llm = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0)
from langchain.llms import HuggingFaceHub
#
# llm = HuggingFaceHub(repo_id='google/flan-t5-xl')
llm = HuggingFaceHub(
repo_id="google/flan-t5-xl",
model_kwargs={"temperature": 0.8, "max_length": 512},
)
# 实例化一个MultiQueryRetriever
retriever_from_llm = MultiQueryRetriever.from_llm(retriever=vectorstore.as_retriever(), llm=llm)
# 实例化一个RetrievalQA链
qa_chain = RetrievalQA.from_chain_type(llm, retriever=retriever_from_llm)
# 5. Output 问答系统的UI实现
from flask import Flask, request, render_template
app = Flask(__name__) # Flask APP
@app.route('/', methods=['GET', 'POST'])
def home():
if request.method == 'POST':
# 接收用户输入作为问题
question = request.form.get('question')
print(question)
# RetrievalQA链 - 读入问题,生成答案
result = qa_chain({"query": question})
# 把大模型的回答结果返回网页进行渲染
return render_template('index.html', result=result)
return render_template('index.html')
if __name__ == "__main__":
app.run(host='0.0.0.0', debug=True, port=5002)