本文主要是【RAG】——RAG(检索增强生成 )的文章,如果有什么需要改进的地方还请大佬指出??
🎬作者简介:大家好,我是听风与他🥇
??博客首页:CSDN主页听风与他
🌄每日一句:狠狠沉淀,顶峰相见
结合检索系统和生成模型。能利用最新信息,提高答案质量,具有更好的可解释性和适应性。简单来说,就是实时更新检索库。
pip install langchain openai weaviate-client
OPENAI_API_KEY="此处添openai的api_key"
from langchain.document_loaders import TextLoader
loader = TextLoader('./a.txt')
documents = loader.load()
from langchain.text_splitter import CharacterTextSplitter
text_splitter = CharacterTextSplitter(chunk_size=1024, chunk_overlap=128)
chunks = text_splitter.split_documents(documents)
from langchain.embeddings import OpenAIEmbeddings
from langchain.vectorstores import Weaviate
import weaviate
from weaviate.embedded import EmbeddedOptions
client = weaviate.Client(
embedded_options = EmbeddedOptions()
)
vectorstore = Weaviate.from_documents(
client = client,
documents = chunks,
embedding = OpenAIEmbeddings(),
by_text = False
)
retriever = vectorstore.as_retriever()
from langchain.prompts import ChatPromptTemplate
template = """你是一个问答机器人助手,请使用以下检索到的上下文来回答问题,如果你不知道答案,就说你不知道。问题是:{question},上下文: {context},答案是:
"""
prompt = ChatPromptTemplate.from_template(template)
from langchain.chat_models import ChatOpenAI
from langchain.schema.runnable import RunnablePassthrough
from langchain.schema.output_parser import StrOutputParser
llm = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0)
rag_chain = (
{"context": retriever, "question": RunnablePassthrough()}
| prompt
| llm
| StrOutputParser()
)
query = "萧炎的表妹是谁?"
res=rag_chain.invoke(query)
print(f'答案:{res}')