Skip to content
项目
群组
代码片段
帮助
当前项目
正在载入...
登录 / 注册
切换导航面板
J
jinchat-server
概览
概览
详情
活动
周期分析
版本库
存储库
文件
提交
分支
标签
贡献者
分支图
比较
统计图
问题
0
议题
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
CI / CD
CI / CD
流水线
作业
日程表
图表
维基
Wiki
代码片段
代码片段
成员
成员
折叠边栏
关闭边栏
活动
图像
聊天
创建新问题
作业
提交
问题看板
Open sidebar
aigc-pioneer
jinchat-server
Commits
5afee735
提交
5afee735
authored
6月 10, 2023
作者:
glide-the
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
bing搜索agent
上级
b657eb24
全部展开
隐藏空白字符变更
内嵌
并排
正在显示
4 个修改的文件
包含
176 行增加
和
2 行删除
+176
-2
agent模式实验.ipynb
agent/agent模式实验.ipynb
+0
-0
custom_agent.py
agent/custom_agent.py
+128
-0
custom_search.py
agent/custom_search.py
+46
-0
model_config.py
configs/model_config.py
+2
-2
没有找到文件。
agent/agent模式
测试
.ipynb
→
agent/agent模式
实验
.ipynb
浏览文件 @
5afee735
差异被折叠。
点击展开。
agent/custom_agent.py
0 → 100644
浏览文件 @
5afee735
from
langchain.agents
import
Tool
from
langchain.tools
import
BaseTool
from
langchain
import
PromptTemplate
,
LLMChain
from
agent.custom_search
import
DeepSearch
from
langchain.agents
import
BaseSingleActionAgent
,
AgentOutputParser
,
LLMSingleActionAgent
,
AgentExecutor
from
typing
import
List
,
Tuple
,
Any
,
Union
,
Optional
,
Type
from
langchain.schema
import
AgentAction
,
AgentFinish
from
langchain.prompts
import
StringPromptTemplate
from
langchain.callbacks.manager
import
CallbackManagerForToolRun
from
langchain.base_language
import
BaseLanguageModel
import
re
agent_template
=
"""
你现在是一个{role}。这里是一些已知信息:
{related_content}
{background_infomation}
{question_guide}:{input}
{answer_format}
"""
class
CustomPromptTemplate
(
StringPromptTemplate
):
template
:
str
tools
:
List
[
Tool
]
def
format
(
self
,
**
kwargs
)
->
str
:
intermediate_steps
=
kwargs
.
pop
(
"intermediate_steps"
)
# 没有互联网查询信息
if
len
(
intermediate_steps
)
==
0
:
background_infomation
=
"
\n
"
role
=
"傻瓜机器人"
question_guide
=
"我现在有一个问题"
answer_format
=
"如果你知道答案,请直接给出你的回答!如果你不知道答案,请你只回答
\"
DeepSearch('搜索词')
\"
,并将'搜索词'替换为你认为需要搜索的关键词,除此之外不要回答其他任何内容。
\n\n
下面请回答我上面提出的问题!"
# 返回了背景信息
else
:
# 根据 intermediate_steps 中的 AgentAction 拼装 background_infomation
background_infomation
=
"
\n\n
你还有这些已知信息作为参考:
\n\n
"
action
,
observation
=
intermediate_steps
[
0
]
background_infomation
+=
f
"{observation}
\n
"
role
=
"聪明的 AI 助手"
question_guide
=
"请根据这些已知信息回答我的问题"
answer_format
=
""
kwargs
[
"background_infomation"
]
=
background_infomation
kwargs
[
"role"
]
=
role
kwargs
[
"question_guide"
]
=
question_guide
kwargs
[
"answer_format"
]
=
answer_format
return
self
.
template
.
format
(
**
kwargs
)
class
CustomSearchTool
(
BaseTool
):
name
:
str
=
"DeepSearch"
description
:
str
=
""
def
_run
(
self
,
query
:
str
,
run_manager
:
Optional
[
CallbackManagerForToolRun
]
=
None
):
return
DeepSearch
.
search
(
query
=
query
)
async
def
_arun
(
self
,
query
:
str
):
raise
NotImplementedError
(
"DeepSearch does not support async"
)
class
CustomAgent
(
BaseSingleActionAgent
):
@property
def
input_keys
(
self
):
return
[
"input"
]
def
plan
(
self
,
intermedate_steps
:
List
[
Tuple
[
AgentAction
,
str
]],
**
kwargs
:
Any
)
->
Union
[
AgentAction
,
AgentFinish
]:
return
AgentAction
(
tool
=
"DeepSearch"
,
tool_input
=
kwargs
[
"input"
],
log
=
""
)
class
CustomOutputParser
(
AgentOutputParser
):
def
parse
(
self
,
llm_output
:
str
)
->
Union
[
AgentAction
,
AgentFinish
]:
# group1 = 调用函数名字
# group2 = 传入参数
match
=
re
.
match
(
r'^[\s\w]*(DeepSearch)\(([^\)]+)\)'
,
llm_output
,
re
.
DOTALL
)
print
(
match
)
# 如果 llm 没有返回 DeepSearch() 则认为直接结束指令
if
not
match
:
return
AgentFinish
(
return_values
=
{
"output"
:
llm_output
.
strip
()},
log
=
llm_output
,
)
# 否则的话都认为需要调用 Tool
else
:
action
=
match
.
group
(
1
)
.
strip
()
action_input
=
match
.
group
(
2
)
.
strip
()
return
AgentAction
(
tool
=
action
,
tool_input
=
action_input
.
strip
(
" "
)
.
strip
(
'"'
),
log
=
llm_output
)
class
DeepAgent
:
tool_name
:
str
=
"DeepSearch"
agent_executor
:
any
tools
:
List
[
Tool
]
llm_chain
:
any
def
query
(
self
,
related_content
:
str
=
""
,
query
:
str
=
""
):
tool_name
=
self
.
tool_name
result
=
self
.
agent_executor
.
run
(
related_content
=
related_content
,
input
=
query
,
tool_name
=
self
.
tool_name
)
return
result
def
__init__
(
self
,
llm
:
BaseLanguageModel
,
**
kwargs
):
tools
=
[
Tool
.
from_function
(
func
=
DeepSearch
.
search
,
name
=
"DeepSearch"
,
description
=
""
)
]
self
.
tools
=
tools
tool_names
=
[
tool
.
name
for
tool
in
tools
]
output_parser
=
CustomOutputParser
()
prompt
=
CustomPromptTemplate
(
template
=
agent_template
,
tools
=
tools
,
input_variables
=
[
"related_content"
,
"tool_name"
,
"input"
,
"intermediate_steps"
])
llm_chain
=
LLMChain
(
llm
=
llm
,
prompt
=
prompt
)
self
.
llm_chain
=
llm_chain
agent
=
LLMSingleActionAgent
(
llm_chain
=
llm_chain
,
output_parser
=
output_parser
,
stop
=
[
"
\n
Observation:"
],
allowed_tools
=
tool_names
)
agent_executor
=
AgentExecutor
.
from_agent_and_tools
(
agent
=
agent
,
tools
=
tools
,
verbose
=
True
)
self
.
agent_executor
=
agent_executor
agent/custom_search.py
0 → 100644
浏览文件 @
5afee735
import
requests
RapidAPIKey
=
"90bbe925ebmsh1c015166fc5e12cp14c503jsn6cca55551ae4"
class
DeepSearch
:
def
search
(
query
:
str
=
""
):
query
=
query
.
strip
()
if
query
==
""
:
return
""
if
RapidAPIKey
==
""
:
return
"请配置你的 RapidAPIKey"
url
=
"https://bing-web-search1.p.rapidapi.com/search"
querystring
=
{
"q"
:
query
,
"mkt"
:
"zh-cn"
,
"textDecorations"
:
"false"
,
"setLang"
:
"CN"
,
"safeSearch"
:
"Off"
,
"textFormat"
:
"Raw"
}
headers
=
{
"Accept"
:
"application/json"
,
"X-BingApis-SDK"
:
"true"
,
"X-RapidAPI-Key"
:
RapidAPIKey
,
"X-RapidAPI-Host"
:
"bing-web-search1.p.rapidapi.com"
}
response
=
requests
.
get
(
url
,
headers
=
headers
,
params
=
querystring
)
data_list
=
response
.
json
()[
'value'
]
if
len
(
data_list
)
==
0
:
return
""
else
:
result_arr
=
[]
result_str
=
""
count_index
=
0
for
i
in
range
(
6
):
item
=
data_list
[
i
]
title
=
item
[
"name"
]
description
=
item
[
"description"
]
item_str
=
f
"{title}: {description}"
result_arr
=
result_arr
+
[
item_str
]
result_str
=
"
\n
"
.
join
(
result_arr
)
return
result_str
configs/model_config.py
浏览文件 @
5afee735
...
...
@@ -47,7 +47,7 @@ llm_model_dict = {
"chatglm-6b"
:
{
"name"
:
"chatglm-6b"
,
"pretrained_model_name"
:
"THUDM/chatglm-6b"
,
"local_model_path"
:
None
,
"local_model_path"
:
"/media/checkpoint/chatglm-6b"
,
"provides"
:
"ChatGLM"
},
...
...
@@ -66,7 +66,7 @@ llm_model_dict = {
"vicuna-13b-hf"
:
{
"name"
:
"vicuna-13b-hf"
,
"pretrained_model_name"
:
"vicuna-13b-hf"
,
"local_model_path"
:
None
,
"local_model_path"
:
"/media/checkpoint/vicuna-13b-hf"
,
"provides"
:
"LLamaLLM"
},
...
...
编写
预览
Markdown
格式
0%
重试
或
添加新文件
添加附件
取消
您添加了
0
人
到此讨论。请谨慎行事。
请先完成此评论的编辑!
取消
请
注册
或者
登录
后发表评论