Skip to content
项目
群组
代码片段
帮助
当前项目
正在载入...
登录 / 注册
切换导航面板
J
jinchat-server
概览
概览
详情
活动
周期分析
版本库
存储库
文件
提交
分支
标签
贡献者
分支图
比较
统计图
问题
0
议题
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
CI / CD
CI / CD
流水线
作业
日程表
图表
维基
Wiki
代码片段
代码片段
成员
成员
折叠边栏
关闭边栏
活动
图像
聊天
创建新问题
作业
提交
问题看板
Open sidebar
aigc-pioneer
jinchat-server
Commits
0234a95f
提交
0234a95f
authored
4月 15, 2023
作者:
imClumsyPanda
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
update README.md and README_en.md
上级
abb1bc89
显示空白字符变更
内嵌
并排
正在显示
4 个修改的文件
包含
0 行增加
和
168 行删除
+0
-168
__main__.py
agent/__main__.py
+0
-18
chatglm_with_shared_memory_openai_llm.py
agent/chatglm_with_shared_memory_openai_llm.py
+0
-128
ceshi-dev.txt
ceshi-dev.txt
+0
-0
test_chatglm_with_shared_memory_openai_llm.py.py
test/test_chatglm_with_shared_memory_openai_llm.py.py
+0
-22
没有找到文件。
agent/__main__.py
deleted
100644 → 0
浏览文件 @
abb1bc89
from
.chatglm_with_shared_memory_openai_llm
import
*
if
__name__
==
"__main__"
:
# 创建 ChatglmWithSharedMemoryOpenaiLLM 类的实例
chatglm_instance
=
ChatglmWithSharedMemoryOpenaiLLM
()
# 使用代理链运行一些示例输入
chatglm_instance
.
agent_chain
.
run
(
input
=
"我跟露露聊了什么?"
)
chatglm_instance
.
agent_chain
.
run
(
input
=
"她开心吗?"
)
chatglm_instance
.
agent_chain
.
run
(
input
=
"她有表达意见吗?"
)
chatglm_instance
.
agent_chain
.
run
(
input
=
"根据历史对话总结下?"
)
chatglm_instance
.
agent_chain
.
run
(
input
=
"""可以拓展下吗?,比如写个小作文。
大纲:游戏的美好回忆,触不可及的距离,不在乎得失
主题:露露的陪伴无比珍贵
背景:游戏,通话,当下
开篇需要以游戏相识你挑逗的话语讲起
"""
)
agent/chatglm_with_shared_memory_openai_llm.py
deleted
100644 → 0
浏览文件 @
abb1bc89
import
torch
from
langchain.agents
import
ZeroShotAgent
,
Tool
,
AgentExecutor
from
langchain.llms
import
OpenAI
from
langchain.memory
import
ConversationBufferMemory
,
ReadOnlySharedMemory
from
langchain.chains
import
LLMChain
,
RetrievalQA
from
langchain.embeddings.huggingface
import
HuggingFaceEmbeddings
from
langchain.prompts
import
PromptTemplate
from
langchain.text_splitter
import
CharacterTextSplitter
from
langchain.vectorstores
import
Chroma
from
langchain.document_loaders
import
TextLoader
from
models
import
ChatGLM
import
sentence_transformers
import
os
import
readline
from
pathlib
import
Path
class
ChatglmWithSharedMemoryOpenaiLLM
:
def
__init__
(
self
,
params
:
dict
=
None
):
params
=
params
or
{}
self
.
embedding_model
=
params
.
get
(
'embedding_model'
,
'text2vec'
)
self
.
vector_search_top_k
=
params
.
get
(
'vector_search_top_k'
,
6
)
self
.
llm_model
=
params
.
get
(
'llm_model'
,
'chatglm-6b'
)
self
.
llm_history_len
=
params
.
get
(
'llm_history_len'
,
10
)
self
.
device
=
'cuda'
if
params
.
get
(
'use_cuda'
,
False
)
else
'cpu'
self
.
_embedding_model_dict
=
{
"ernie-tiny"
:
"nghuyong/ernie-3.0-nano-zh"
,
"ernie-base"
:
"nghuyong/ernie-3.0-base-zh"
,
"text2vec"
:
"GanymedeNil/text2vec-large-chinese"
,
}
self
.
_llm_model_dict
=
{
"chatglm-6b-int4-qe"
:
"THUDM/chatglm-6b-int4-qe"
,
"chatglm-6b-int4"
:
"THUDM/chatglm-6b-int4"
,
"chatglm-6b"
:
"THUDM/chatglm-6b"
,
}
self
.
init_cfg
()
self
.
init_docsearch
()
self
.
init_state_of_history
()
self
.
summry_chain
,
self
.
memory
=
self
.
agents_answer
()
self
.
agent_chain
=
self
.
create_agent_chain
()
def
init_cfg
(
self
):
self
.
chatglm
=
ChatGLM
()
self
.
chatglm
.
load_model
(
model_name_or_path
=
self
.
_llm_model_dict
[
self
.
llm_model
])
self
.
chatglm
.
history_len
=
self
.
llm_history_len
self
.
embeddings
=
HuggingFaceEmbeddings
(
model_name
=
self
.
_embedding_model_dict
[
self
.
embedding_model
],)
self
.
embeddings
.
client
=
sentence_transformers
.
SentenceTransformer
(
self
.
embeddings
.
model_name
,
device
=
self
.
device
)
def
init_docsearch
(
self
):
doc_path
=
str
(
Path
.
cwd
()
/
"content/state_of_the_search.txt"
)
loader
=
TextLoader
(
doc_path
)
documents
=
loader
.
load
()
text_splitter
=
CharacterTextSplitter
(
chunk_size
=
1000
,
chunk_overlap
=
0
)
texts
=
text_splitter
.
split_documents
(
documents
)
docsearch
=
Chroma
.
from_documents
(
texts
,
self
.
embeddings
,
collection_name
=
"state-of-search"
)
self
.
state_of_search
=
RetrievalQA
.
from_chain_type
(
llm
=
self
.
chatglm
,
chain_type
=
"stuff"
,
retriever
=
docsearch
.
as_retriever
())
def
init_state_of_history
(
self
):
doc_path
=
str
(
Path
.
cwd
()
/
"content/state_of_the_history.txt"
)
loader
=
TextLoader
(
doc_path
)
documents
=
loader
.
load
()
text_splitter
=
CharacterTextSplitter
(
chunk_size
=
100
,
chunk_overlap
=
0
)
texts
=
text_splitter
.
split_documents
(
documents
)
docsearch
=
Chroma
.
from_documents
(
texts
,
self
.
embeddings
,
collection_name
=
"state-of-history"
)
self
.
state_of_history
=
RetrievalQA
.
from_chain_type
(
llm
=
self
.
chatglm
,
chain_type
=
"stuff"
,
retriever
=
docsearch
.
as_retriever
())
def
agents_answer
(
self
):
template
=
"""This is a conversation between a human and a bot:
{chat_history}
Write a summary of the conversation for {input}:
"""
prompt
=
PromptTemplate
(
input_variables
=
[
"input"
,
"chat_history"
],
template
=
template
)
memory
=
ConversationBufferMemory
(
memory_key
=
"chat_history"
)
readonlymemory
=
ReadOnlySharedMemory
(
memory
=
memory
)
summry_chain
=
LLMChain
(
llm
=
self
.
chatglm
,
prompt
=
prompt
,
verbose
=
True
,
memory
=
readonlymemory
,
# use the read-only memory to prevent the tool from modifying the memory
)
return
summry_chain
,
memory
def
create_agent_chain
(
self
):
tools
=
[
Tool
(
name
=
"State of Search QA System"
,
func
=
self
.
state_of_search
.
run
,
description
=
"当您需要搜索有关问题时非常有用。输入应该是一个完整的问题。"
),
Tool
(
name
=
"state-of-history-qa"
,
func
=
self
.
state_of_history
.
run
,
description
=
"跟露露的历史对话 - 当提出我们之间发生了什么事请时,这里面的回答是很有用的"
),
Tool
(
name
=
"Summary"
,
func
=
self
.
summry_chain
.
run
,
description
=
"useful for when you summarize a conversation. The input to this tool should be a string, representing who will read this summary."
)
]
prefix
=
"""你需要充当一个倾听者,尽量回答人类的问题,你可以使用这里工具,它们非常有用:"""
suffix
=
"""Begin!
{chat_history}
Question: {input}
{agent_scratchpad}"""
prompt
=
ZeroShotAgent
.
create_prompt
(
tools
,
prefix
=
prefix
,
suffix
=
suffix
,
input_variables
=
[
"input"
,
"chat_history"
,
"agent_scratchpad"
]
)
llm_chain
=
LLMChain
(
llm
=
OpenAI
(
temperature
=
0
),
prompt
=
prompt
)
agent
=
ZeroShotAgent
(
llm_chain
=
llm_chain
,
tools
=
tools
,
verbose
=
True
)
agent_chain
=
AgentExecutor
.
from_agent_and_tools
(
agent
=
agent
,
tools
=
tools
,
verbose
=
True
,
memory
=
self
.
memory
)
return
agent_chain
ceshi-dev.txt
deleted
100644 → 0
浏览文件 @
abb1bc89
test/test_chatglm_with_shared_memory_openai_llm.py.py
deleted
100644 → 0
浏览文件 @
abb1bc89
import
sys
import
os
sys
.
path
.
append
(
os
.
path
.
dirname
(
os
.
path
.
abspath
(
__file__
))
+
'/../'
)
from
agent
import
ChatglmWithSharedMemoryOpenaiLLM
if
__name__
==
"__main__"
:
# 创建 ChatglmWithSharedMemoryOpenaiLLM 类的实例
chatglm_instance
=
ChatglmWithSharedMemoryOpenaiLLM
()
# 使用代理链运行一些示例输入
chatglm_instance
.
agent_chain
.
run
(
input
=
"我跟露露聊了什么?"
)
chatglm_instance
.
agent_chain
.
run
(
input
=
"她开心吗?"
)
chatglm_instance
.
agent_chain
.
run
(
input
=
"她有表达意见吗?"
)
chatglm_instance
.
agent_chain
.
run
(
input
=
"根据历史对话总结下?"
)
# chatglm_instance.agent_chain.run(input="""可以拓展下吗?,比如写个小作文。
# 大纲:游戏的美好回忆,触不可及的距离,不在乎得失
# 主题:露露的陪伴无比珍贵
# 背景:游戏,通话,当下
# 开篇需要以游戏相识你挑逗的话语讲起
# """)
编写
预览
Markdown
格式
0%
重试
或
添加新文件
添加附件
取消
您添加了
0
人
到此讨论。请谨慎行事。
请先完成此评论的编辑!
取消
请
注册
或者
登录
后发表评论