提交 bb822402 作者: imClumsyPanda

Merge branch 'master' into dev

...@@ -227,6 +227,6 @@ Web UI 可以实现如下功能: ...@@ -227,6 +227,6 @@ Web UI 可以实现如下功能:
- [x] VUE 前端 - [x] VUE 前端
## 项目交流群 ## 项目交流群
![二维码](img/qr_code_27.jpg) ![二维码](img/qr_code_28.jpg)
🎉 langchain-ChatGLM 项目交流群,如果你也对本项目感兴趣,欢迎加入群聊参与讨论交流。 🎉 langchain-ChatGLM 项目交流群,如果你也对本项目感兴趣,欢迎加入群聊参与讨论交流。
...@@ -278,7 +278,7 @@ class LocalDocQA: ...@@ -278,7 +278,7 @@ class LocalDocQA:
if not one_content_segmentation: if not one_content_segmentation:
text_splitter = ChineseTextSplitter(pdf=False, sentence_size=sentence_size) text_splitter = ChineseTextSplitter(pdf=False, sentence_size=sentence_size)
docs = text_splitter.split_documents(docs) docs = text_splitter.split_documents(docs)
if os.path.isdir(vs_path): if os.path.isdir(vs_path) and os.path.isfile(vs_path+"/index.faiss"):
vector_store = load_vector_store(vs_path, self.embeddings) vector_store = load_vector_store(vs_path, self.embeddings)
vector_store.add_documents(docs) vector_store.add_documents(docs)
else: else:
...@@ -298,7 +298,10 @@ class LocalDocQA: ...@@ -298,7 +298,10 @@ class LocalDocQA:
vector_store.score_threshold = self.score_threshold vector_store.score_threshold = self.score_threshold
related_docs_with_score = vector_store.similarity_search_with_score(query, k=self.top_k) related_docs_with_score = vector_store.similarity_search_with_score(query, k=self.top_k)
torch_gc() torch_gc()
if len(related_docs_with_score)>0:
prompt = generate_prompt(related_docs_with_score, query) prompt = generate_prompt(related_docs_with_score, query)
else:
prompt = query
for answer_result in self.llm.generatorAnswer(prompt=prompt, history=chat_history, for answer_result in self.llm.generatorAnswer(prompt=prompt, history=chat_history,
streaming=streaming): streaming=streaming):
......
...@@ -52,7 +52,7 @@ class ChatGLM(BaseAnswer, LLM, ABC): ...@@ -52,7 +52,7 @@ class ChatGLM(BaseAnswer, LLM, ABC):
for inum, (stream_resp, _) in enumerate(self.checkPoint.model.stream_chat( for inum, (stream_resp, _) in enumerate(self.checkPoint.model.stream_chat(
self.checkPoint.tokenizer, self.checkPoint.tokenizer,
prompt, prompt,
history=history[-self.history_len:-1] if self.history_len > 0 else [], history=history[-self.history_len:] if self.history_len > 0 else [],
max_length=self.max_token, max_length=self.max_token,
temperature=self.temperature temperature=self.temperature
)): )):
......
...@@ -55,7 +55,7 @@ class MOSSLLM(BaseAnswer, LLM, ABC): ...@@ -55,7 +55,7 @@ class MOSSLLM(BaseAnswer, LLM, ABC):
history: List[List[str]] = [], history: List[List[str]] = [],
streaming: bool = False): streaming: bool = False):
if len(history) > 0: if len(history) > 0:
history = history[-self.history_len:-1] if self.history_len > 0 else [] history = history[-self.history_len:] if self.history_len > 0 else []
prompt_w_history = str(history) prompt_w_history = str(history)
prompt_w_history += '<|Human|>: ' + prompt + '<eoh>' prompt_w_history += '<|Human|>: ' + prompt + '<eoh>'
else: else:
......
...@@ -87,7 +87,7 @@ def get_answer(query, vs_path, history, mode, score_threshold=VECTOR_SEARCH_SCOR ...@@ -87,7 +87,7 @@ def get_answer(query, vs_path, history, mode, score_threshold=VECTOR_SEARCH_SCOR
yield history + [[query, yield history + [[query,
"请选择知识库后进行测试,当前未选择知识库。"]], "" "请选择知识库后进行测试,当前未选择知识库。"]], ""
else: else:
for answer_result in local_doc_qa.llm.generatorAnswer(prompt=query, history=history[:-1], for answer_result in local_doc_qa.llm.generatorAnswer(prompt=query, history=history,
streaming=streaming): streaming=streaming):
resp = answer_result.llm_output["answer"] resp = answer_result.llm_output["answer"]
history = answer_result.history history = answer_result.history
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论