提交 5afee735 作者: glide-the

bing搜索agent

上级 b657eb24
{ {
...@@ -2,24 +2,10 @@ ...@@ -2,24 +2,10 @@
"cells": [ "cells": [
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 1, "execution_count": 8,
"id": "d2ff171c-f5f8-4590-9ce0-21c87e3d5b39", "id": "d2ff171c-f5f8-4590-9ce0-21c87e3d5b39",
"metadata": {}, "metadata": {},
"outputs": [ "outputs": [],
{
"name": "stderr",
"output_type": "stream",
"text": [
"INFO 2023-06-09 20:52:01,296-1d: \n",
"loading model config\n",
"llm device: cuda\n",
"embedding device: cuda\n",
"dir: /media/gpt4-pdf-chatbot-langchain/dev-langchain-ChatGLM\n",
"flagging username: 35d96e513c5347dbb0c1d7c2fb21cbd4\n",
"\n"
]
}
],
"source": [ "source": [
"import sys\n", "import sys\n",
"sys.path.append('/media/gpt4-pdf-chatbot-langchain/dev-langchain-ChatGLM/')\n", "sys.path.append('/media/gpt4-pdf-chatbot-langchain/dev-langchain-ChatGLM/')\n",
...@@ -45,64 +31,7 @@ ...@@ -45,64 +31,7 @@
"execution_count": 2, "execution_count": 2,
"id": "68978c38-c0e9-4ae9-ba90-9c02aca335be", "id": "68978c38-c0e9-4ae9-ba90-9c02aca335be",
"metadata": {}, "metadata": {},
"outputs": [ "outputs": [],
{
"name": "stdout",
"output_type": "stream",
"text": [
"Loading vicuna-13b-hf...\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"Overriding torch_dtype=None with `torch_dtype=torch.float16` due to requirements of `bitsandbytes` to enable model loading in mixed int8. Either pass torch_dtype=torch.float16 or don't pass this argument at all to remove this warning.\n",
"/media/gpt4-pdf-chatbot-langchain/pyenv-langchain/lib/python3.10/site-packages/bitsandbytes/cuda_setup/main.py:149: UserWarning: /media/gpt4-pdf-chatbot-langchain/pyenv-langchain did not contain ['libcudart.so', 'libcudart.so.11.0', 'libcudart.so.12.0'] as expected! Searching further paths...\n",
" warn(msg)\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"===================================BUG REPORT===================================\n",
"Welcome to bitsandbytes. For bug reports, please run\n",
"\n",
"python -m bitsandbytes\n",
"\n",
" and submit this information together with your error trace to: https://github.com/TimDettmers/bitsandbytes/issues\n",
"================================================================================\n",
"bin /media/gpt4-pdf-chatbot-langchain/pyenv-langchain/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda118.so\n",
"CUDA SETUP: CUDA runtime path found: /opt/cuda/lib64/libcudart.so.11.0\n",
"CUDA SETUP: Highest compute capability among GPUs detected: 8.6\n",
"CUDA SETUP: Detected CUDA version 118\n",
"CUDA SETUP: Loading binary /media/gpt4-pdf-chatbot-langchain/pyenv-langchain/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda118.so...\n"
]
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "9df1856e06d1460683851a0b73537a6d",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"Loading checkpoint shards: 0%| | 0/3 [00:00<?, ?it/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Loaded the model in 11.22 seconds.\n"
]
}
],
"source": [ "source": [
"import asyncio\n", "import asyncio\n",
"from argparse import Namespace\n", "from argparse import Namespace\n",
...@@ -110,7 +39,7 @@ ...@@ -110,7 +39,7 @@
"from langchain.agents import initialize_agent, Tool\n", "from langchain.agents import initialize_agent, Tool\n",
"from langchain.agents import AgentType\n", "from langchain.agents import AgentType\n",
" \n", " \n",
"args = parser.parse_args(args=['--model', 'vicuna-13b-hf', '--no-remote-model', '--load-in-8bit'])\n", "args = parser.parse_args(args=['--model', 'fastchat-chatglm-6b', '--no-remote-model', '--load-in-8bit'])\n",
"\n", "\n",
"args_dict = vars(args)\n", "args_dict = vars(args)\n",
"\n", "\n",
...@@ -122,113 +51,160 @@ ...@@ -122,113 +51,160 @@
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 3, "execution_count": 3,
"id": "c8e4a58d-1a3a-484a-8417-bcec0eb7170e", "id": "9baa881f-5ff2-4958-b3a2-1653a5e8bc3b",
"metadata": {}, "metadata": {},
"outputs": [ "outputs": [],
{
"name": "stdout",
"output_type": "stream",
"text": [
"{'action': 'summary', 'action_input': '露ᥫᩣ,'}\n"
]
}
],
"source": [ "source": [
"from jsonformer import Jsonformer\n", "import sys\n",
"json_schema = {\n", "sys.path.append('/media/gpt4-pdf-chatbot-langchain/dev-langchain-ChatGLM/')\n",
" \"type\": \"object\",\n", "from langchain.agents import Tool\n",
" \"properties\": {\n", "from langchain.tools import BaseTool\n",
" \"action\": {\"type\": \"string\"},\n", "from agent.custom_search import DeepSearch\n",
" \"action_input\": {\"type\": \"string\"}\n", "from agent.custom_agent import *\n",
" }\n",
"}\n",
"\n",
"prompt = \"\"\" Respond to the human as helpfully and accurately as possible. You have access to the following tools:\n",
"\n",
"State of Dialogue History System: 露ᥫᩣ, 张毛峰的对话 - 当需要查找露ᥫᩣ, 张毛峰之间的聊天内容时,这里面的回答是很有用的。输入应该是一个完整的问题。\n",
"Summary: useful for when you summarize a conversation. The input to this tool should be a string, representing who will read this summary.\n",
"\n",
"The way you use the tools is by specifying a json blob.\n",
"Specifically, this json should have a `action` key (with the name of the tool to use) and a `action_input` key (with the input to the tool going here).\n",
"\n",
"The only values that should be in the \"action\" field are: {tool_names}\n",
"\n",
"The $JSON_BLOB should only contain a SINGLE action, do NOT return a list of multiple actions. Here is an example of a valid $JSON_BLOB:\n",
"\n",
"```\n",
"{{{{\n",
" \"action\": $TOOL_NAME,\n",
" \"action_input\": $INPUT\n",
"}}}}\n",
"```\n",
"\n",
"ALWAYS use the following format:\n",
"\n",
"Question: the input question you must answer\n",
"Thought: you should always think about what to do\n",
"Action:\n",
"```\n",
"$JSON_BLOB\n",
"```\n",
"Observation: the result of the action\n",
"... (this Thought/Action/Observation can repeat N times)\n",
"Thought: I now know the final answer\n",
"Final Answer: the final answer to the original input question\n",
"\n", "\n",
"Begin! Reminder to always use the exact characters `Final Answer` when responding.\n",
"\n", "\n",
"Question: 我想查看关于露露的摘要信息\n", "tools = [\n",
"\"\"\"\n", " Tool.from_function(\n",
"jsonformer = Jsonformer(shared.loaderCheckPoint.model, shared.loaderCheckPoint.tokenizer, json_schema, prompt)\n", " func=DeepSearch.search,\n",
"generated_data = jsonformer()\n", " name=\"DeepSearch\",\n",
" description=\"\"\n",
" )\n",
"]\n",
"tool_names = [tool.name for tool in tools]\n",
"output_parser = CustomOutputParser()\n",
"prompt = CustomPromptTemplate(template=agent_template,\n",
" tools=tools,\n",
" input_variables=[\"related_content\",\"tool_name\", \"input\", \"intermediate_steps\"])\n",
"\n", "\n",
"print(generated_data)" "llm_chain = LLMChain(llm=llm, prompt=prompt)\n"
] ]
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 4, "execution_count": 4,
"id": "a55f92ce-4ebf-4cb3-8e16-780c14b6517f", "id": "2ffd56a1-6f15-40ae-969f-68de228a9dff",
"metadata": {},
"outputs": [],
"source": [
"from langchain.tools import StructuredTool\n",
"\n",
"def multiplier(a: float, b: float) -> float:\n",
" \"\"\"Multiply the provided floats.\"\"\"\n",
" return a * b\n",
"\n",
"tool = StructuredTool.from_function(multiplier)"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "e089a828-b662-4d9a-8d88-4bf95ccadbab",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [
{
"data": {
"text/plain": [
"FastChatOpenAILLM(cache=None, verbose=False, callbacks=None, callback_manager=None, api_base_url='http://localhost:8000/v1', model_name='chatglm-6b', max_token=10000, temperature=0.01, checkPoint=<models.loader.loader.LoaderCheckPoint object at 0x7fa630590c10>, history_len=10, top_p=0.9, history=[])"
]
},
"execution_count": 4,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [ "source": [
"from langchain import OpenAI\n", "llm"
"from langchain.agents import initialize_agent, AgentType\n"
] ]
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 6, "execution_count": 9,
"id": "d4ea7f0e-1ba9-4f40-82ec-7c453bd64945", "id": "21d66643-8d0b-40a2-a49f-2dc1c4f68698",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"\n",
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
"__call:\n",
"你现在是一个傻瓜机器人。这里是一些已知信息:\n",
"\n",
"\n",
"\n",
"我现在有一个问题:请问近期携程有什么大的新闻\n",
"\n",
"如果你知道答案,请直接给出你的回答!如果你不知道答案,请你只回答\"DeepSearch('搜索词')\",并将'搜索词'替换为你认为需要搜索的关键词,除此之外不要回答其他任何内容。\n",
"\n",
"下面请回答我上面提出的问题!\n",
"\n",
"response:DeepSearch('旅游')\n",
"+++++++++++++++++++++++++++++++++++\n",
"\u001b[32;1m\u001b[1;3mDeepSearch('旅游')\u001b[0m\n",
"\n",
"Observation:\u001b[36;1m\u001b[1;3m震撼!恐龙展空降南山旅游景区,带你穿越侏罗纪!: 声明:该文观点仅代表作者本人,搜狐号系信息发布平台,搜狐仅提供信息存储空间服务。\n",
"承德进入旅游最佳时节 55处A级景区遍布全域: 新京报讯(记者耿子叶)6月9日,第十七届承德国际旅游文化节暨第五届承德市旅游产业发展大会正式开幕。记者在现场获悉,目前承德已经迎来旅游黄金时节,作为亚洲最受欢迎旅游城市之一,承德现有55处A级景区遍布全域。近年来,承德拓展全域全季旅游业态,今年将谋划实施文旅项目71个,总投资达到766亿元,长城国家文化公园、二十一世纪避暑山庄文化产业园、金山岭国际滑雪度假区、蟠龙湖旅游度假区、中国马镇等一批重点旅\n",
"“一带一路”电影周浦东启动!上海国际旅游度假区再迎一批重点 ...: 第二十五届上海国际电影节“一带一路”电影周今天在浦东新区举行启动仪式,一批重点影视文化企业签约落户上海国际旅游度假区,一批重点影视文创项目签约合作。 2023年是“一带一路”倡议10周年,也是“一带一路”电影周5周年。为进一步落实通过影视文化交流实践国家文化战略、推动浦东及度假区影视产业发展的要求,本届电影周活动把“文化更开放、内容更专业、形式更潮流、宣传更联动”作为主基调。 一方面凸显共建国家电\n",
"文旅大家谈|云南旅游特色“出圈路”,路在何方?: 近段时间,一场从全国各地涌来的“进淄赶烤”热潮,引起广泛关注。淄博烧烤没什么“秘密武器”,羊肉、小饼、蘸料,配方简单,复制非常容易,但人潮还是涌向了淄博。如网友所说,没有人缺这一顿烧烤,缺的是淄博别具特色的人文环境。从消费产品到消费场景和情感,淄博烧烤透露着中国乡土社会“交情”的独特魅力。 凭借一串烧烤,快速成为全国人民心中的“网红城市”?淄博火爆“出圈”的背后,给云南带来什么启示?云南旅游的特色\n",
"避暑度假到贵州|这一份旅游优惠大礼包,赶紧码住!: 夏天最让人欢喜。在贵州荔波,太阳热乎乎照射进来的夏日午后,跟朋友在晶莹剔透的湖面玩水,简直美到天上去了。 晚上,在贵州施秉,当月亮升起,山涧的风送来缕缕清凉。趁夜色正浓,霓虹灯在山间闪烁,一幅戏水的画卷正缓缓铺开。 没错,2023年多彩贵州避暑旅游优惠大礼包,它,来了!高考完的同学们,准备带着孩子出门玩耍的妈妈爸爸,腿脚比年轻人还康健的奶奶爷爷赶紧码住! 为鼓励青少年亲近自然、热爱文化、健康成长。\n",
"农文旅跨界融合,助力乡村产业兴旺!常熟市董浜镇举办“苏韵乡情 ...: “这就是董浜镇智林村荷花浜的百亩荷塘嘛,果然名不虚传,大片荷塘一眼望不到头,荷花也盛开了,确实是难得见到的风景。好几年没回家乡,家乡大变样,基础设施完备、村庄别致、景色秀丽,让人眼前一亮。”一位常熟市侨界人士说道。6月10日,常熟市董浜镇智林村荷花浜内举办了“苏韵乡情 福地常熟”乡村休闲旅游农业系列活动——沪虞田园 “荷”你相“玉”第三届董浜农文旅节暨第七届董浜黄金小玉米节、第二届荷花节。 此次活\u001b[0m\n",
"__call:\n",
"你现在是一个聪明的 AI 助手。这里是一些已知信息:\n",
"\n",
"\n",
"\n",
"你还有这些已知信息作为参考:\n",
"\n",
"震撼!恐龙展空降南山旅游景区,带你穿越侏罗纪!: 声明:该文观点仅代表作者本人,搜狐号系信息发布平台,搜狐仅提供信息存储空间服务。\n",
"承德进入旅游最佳时节 55处A级景区遍布全域: 新京报讯(记者耿子叶)6月9日,第十七届承德国际旅游文化节暨第五届承德市旅游产业发展大会正式开幕。记者在现场获悉,目前承德已经迎来旅游黄金时节,作为亚洲最受欢迎旅游城市之一,承德现有55处A级景区遍布全域。近年来,承德拓展全域全季旅游业态,今年将谋划实施文旅项目71个,总投资达到766亿元,长城国家文化公园、二十一世纪避暑山庄文化产业园、金山岭国际滑雪度假区、蟠龙湖旅游度假区、中国马镇等一批重点旅\n",
"“一带一路”电影周浦东启动!上海国际旅游度假区再迎一批重点 ...: 第二十五届上海国际电影节“一带一路”电影周今天在浦东新区举行启动仪式,一批重点影视文化企业签约落户上海国际旅游度假区,一批重点影视文创项目签约合作。 2023年是“一带一路”倡议10周年,也是“一带一路”电影周5周年。为进一步落实通过影视文化交流实践国家文化战略、推动浦东及度假区影视产业发展的要求,本届电影周活动把“文化更开放、内容更专业、形式更潮流、宣传更联动”作为主基调。 一方面凸显共建国家电\n",
"文旅大家谈|云南旅游特色“出圈路”,路在何方?: 近段时间,一场从全国各地涌来的“进淄赶烤”热潮,引起广泛关注。淄博烧烤没什么“秘密武器”,羊肉、小饼、蘸料,配方简单,复制非常容易,但人潮还是涌向了淄博。如网友所说,没有人缺这一顿烧烤,缺的是淄博别具特色的人文环境。从消费产品到消费场景和情感,淄博烧烤透露着中国乡土社会“交情”的独特魅力。 凭借一串烧烤,快速成为全国人民心中的“网红城市”?淄博火爆“出圈”的背后,给云南带来什么启示?云南旅游的特色\n",
"避暑度假到贵州|这一份旅游优惠大礼包,赶紧码住!: 夏天最让人欢喜。在贵州荔波,太阳热乎乎照射进来的夏日午后,跟朋友在晶莹剔透的湖面玩水,简直美到天上去了。 晚上,在贵州施秉,当月亮升起,山涧的风送来缕缕清凉。趁夜色正浓,霓虹灯在山间闪烁,一幅戏水的画卷正缓缓铺开。 没错,2023年多彩贵州避暑旅游优惠大礼包,它,来了!高考完的同学们,准备带着孩子出门玩耍的妈妈爸爸,腿脚比年轻人还康健的奶奶爷爷赶紧码住! 为鼓励青少年亲近自然、热爱文化、健康成长。\n",
"农文旅跨界融合,助力乡村产业兴旺!常熟市董浜镇举办“苏韵乡情 ...: “这就是董浜镇智林村荷花浜的百亩荷塘嘛,果然名不虚传,大片荷塘一眼望不到头,荷花也盛开了,确实是难得见到的风景。好几年没回家乡,家乡大变样,基础设施完备、村庄别致、景色秀丽,让人眼前一亮。”一位常熟市侨界人士说道。6月10日,常熟市董浜镇智林村荷花浜内举办了“苏韵乡情 福地常熟”乡村休闲旅游农业系列活动——沪虞田园 “荷”你相“玉”第三届董浜农文旅节暨第七届董浜黄金小玉米节、第二届荷花节。 此次活\n",
"\n",
"请根据这些已知信息回答我的问题:请问近期携程有什么大的新闻\n",
"\n",
"\n",
"\n",
"response:根据提供的已知信息,携程近期有以下大的新闻:\n",
"\n",
"1. 携程宣布启动“全球合作伙伴计划”,计划在全球范围内拓展旅游业务,加强与目的地政府和企业的合作。\n",
"\n",
"2. 携程亲子园事件引发广泛关注,携程回应“亲子园事件”:深感愧疚并向公众道歉。\n",
"\n",
"3. 携程在上海电影节“一带一路”电影周上签约落户,成为上海市浦东新区影视旅游度假区的签约企业之一。\n",
"\n",
"4. 携程旗下的“携程旅行”APP荣膺2022年中国旅游APP50强,这是携程在旅游业领域的一次重要胜利。\n",
"+++++++++++++++++++++++++++++++++++\n",
"\u001b[32;1m\u001b[1;3m根据提供的已知信息,携程近期有以下大的新闻:\n",
"\n",
"1. 携程宣布启动“全球合作伙伴计划”,计划在全球范围内拓展旅游业务,加强与目的地政府和企业的合作。\n",
"\n",
"2. 携程亲子园事件引发广泛关注,携程回应“亲子园事件”:深感愧疚并向公众道歉。\n",
"\n",
"3. 携程在上海电影节“一带一路”电影周上签约落户,成为上海市浦东新区影视旅游度假区的签约企业之一。\n",
"\n",
"4. 携程旗下的“携程旅行”APP荣膺2022年中国旅游APP50强,这是携程在旅游业领域的一次重要胜利。\u001b[0m\n",
"\n",
"\u001b[1m> Finished chain.\u001b[0m\n",
"根据提供的已知信息,携程近期有以下大的新闻:\n",
"\n",
"1. 携程宣布启动“全球合作伙伴计划”,计划在全球范围内拓展旅游业务,加强与目的地政府和企业的合作。\n",
"\n",
"2. 携程亲子园事件引发广泛关注,携程回应“亲子园事件”:深感愧疚并向公众道歉。\n",
"\n",
"3. 携程在上海电影节“一带一路”电影周上签约落户,成为上海市浦东新区影视旅游度假区的签约企业之一。\n",
"\n",
"4. 携程旗下的“携程旅行”APP荣膺2022年中国旅游APP50强,这是携程在旅游业领域的一次重要胜利。\n"
]
}
],
"source": [ "source": [
"from langchain.agents import BaseSingleActionAgent, AgentOutputParser, LLMSingleActionAgent, AgentExecutor\n",
" \n",
"\n", "\n",
"agent = LLMSingleActionAgent(\n",
" llm_chain=llm_chain,\n",
" output_parser=output_parser,\n",
" stop=[\"\\nObservation:\"],\n",
" allowed_tools=tool_names\n",
")\n",
"\n", "\n",
"# Structured tools are compatible with the STRUCTURED_CHAT_ZERO_SHOT_REACT_DESCRIPTION agent type. \n", "agent_executor = AgentExecutor.from_agent_and_tools(agent=agent, tools=tools, verbose=True)\n",
"agent_executor = initialize_agent([tool], llm, agent=AgentType.STRUCTURED_CHAT_ZERO_SHOT_REACT_DESCRIPTION, verbose=True)" "print(agent_executor.run(related_content=\"\", input=\"请问近期携程有什么大的新闻\", tool_name=\"DeepSearch\"))\n",
"\n"
] ]
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 19, "execution_count": 7,
"id": "640bfdfb-41e7-4429-9718-8fa724de12b7", "id": "71ec6ba6-8898-4f53-b42c-26a0aa098de7",
"metadata": {}, "metadata": {},
"outputs": [ "outputs": [
{ {
...@@ -276,16 +252,25 @@ ...@@ -276,16 +252,25 @@
"\n", "\n",
"Begin! Reminder to ALWAYS respond with a valid json blob of a single action. Use tools if necessary. Respond directly if appropriate. Format is Action:```$JSON_BLOB```then Observation:.\n", "Begin! Reminder to ALWAYS respond with a valid json blob of a single action. Use tools if necessary. Respond directly if appropriate. Format is Action:```$JSON_BLOB```then Observation:.\n",
"Thought:\n", "Thought:\n",
"Human: What is 1 times 14\n", "Human: What is 12111 times 14\n",
"\n", "\n",
"\n", "\n",
"response:System: ```{\"action\":\"multiplier\",\"action_input\":{\"a\":1,\"b\":14}}``\n", "response:```\n",
"\n", "{\n",
"Observation:\n", " \"action\": \" multiplier\",\n",
"\u001b[32;1m\u001b[1;3mSystem: ```{\"action\":\"multiplier\",\"action_input\":{\"a\":1,\"b\":14}}``\n", " \"action_input\": \"12111\" ,\n",
"\n", " \"observe\": \"12111 x 14 = 16222\"\n",
"Observation:\u001b[0m\n", "}\n",
"Observation: \u001b[36;1m\u001b[1;3m14.0\u001b[0m\n", "```\n",
"+++++++++++++++++++++++++++++++++++\n",
"\u001b[32;1m\u001b[1;3m```\n",
"{\n",
" \"action\": \" multiplier\",\n",
" \"action_input\": \"12111\" ,\n",
" \"observe\": \"12111 x 14 = 16222\"\n",
"}\n",
"```\u001b[0m\n",
"Observation: multiplier is not a valid tool, try another one.\n",
"Thought:__call:System: Respond to the human as helpfully and accurately as possible. You have access to the following tools:\n", "Thought:__call:System: Respond to the human as helpfully and accurately as possible. You have access to the following tools:\n",
"\n", "\n",
"multiplier: multiplier(a: float, b: float) -> float - Multiply the provided floats., args: {{'a': {{'title': 'A', 'type': 'number'}}, 'b': {{'title': 'B', 'type': 'number'}}}}\n", "multiplier: multiplier(a: float, b: float) -> float - Multiply the provided floats., args: {{'a': {{'title': 'A', 'type': 'number'}}, 'b': {{'title': 'B', 'type': 'number'}}}}\n",
...@@ -324,16 +309,41 @@ ...@@ -324,16 +309,41 @@
"\n", "\n",
"Begin! Reminder to ALWAYS respond with a valid json blob of a single action. Use tools if necessary. Respond directly if appropriate. Format is Action:```$JSON_BLOB```then Observation:.\n", "Begin! Reminder to ALWAYS respond with a valid json blob of a single action. Use tools if necessary. Respond directly if appropriate. Format is Action:```$JSON_BLOB```then Observation:.\n",
"Thought:\n", "Thought:\n",
"Human: What is 1 times 14\n", "Human: What is 12111 times 14\n",
"\n", "\n",
"This was your previous work (but I haven't seen any of it! I only see what you return as final answer):\n", "This was your previous work (but I haven't seen any of it! I only see what you return as final answer):\n",
"System: ```{\"action\":\"multiplier\",\"action_input\":{\"a\":1,\"b\":14}}``\n", "```\n",
"\n", "{\n",
"Observation:\n", " \"action\": \" multiplier\",\n",
"Observation: 14.0\n", " \"action_input\": \"12111\" ,\n",
" \"observe\": \"12111 x 14 = 16222\"\n",
"}\n",
"```\n",
"Observation: multiplier is not a valid tool, try another one.\n",
"Thought:\n", "Thought:\n",
"response:\n", "response:I apologize for the mistake.\n",
"\u001b[32;1m\u001b[1;3m\u001b[0m\n", "\n",
"Action:\n",
"```\n",
"{\n",
" \"action\": \"Final Answer\",\n",
" \"action_input\": \"12111\",\n",
" \"observe\": \"12111 x 14 = 16222\"\n",
"}\n",
"```\n",
"End!\n",
"+++++++++++++++++++++++++++++++++++\n",
"\u001b[32;1m\u001b[1;3mI apologize for the mistake.\n",
"\n",
"Action:\n",
"```\n",
"{\n",
" \"action\": \"Final Answer\",\n",
" \"action_input\": \"12111\",\n",
" \"observe\": \"12111 x 14 = 16222\"\n",
"}\n",
"```\n",
"End!\u001b[0m\n",
"\n", "\n",
"\u001b[1m> Finished chain.\u001b[0m\n" "\u001b[1m> Finished chain.\u001b[0m\n"
] ]
...@@ -341,22 +351,32 @@ ...@@ -341,22 +351,32 @@
{ {
"data": { "data": {
"text/plain": [ "text/plain": [
"''" "'12111'"
] ]
}, },
"execution_count": 19, "execution_count": 7,
"metadata": {}, "metadata": {},
"output_type": "execute_result" "output_type": "execute_result"
} }
], ],
"source": [ "source": [
"agent_executor.run(\"What is 1 times 14\")" "\n",
"from langchain.tools import StructuredTool\n",
"\n",
"def multiplier(a: float, b: float) -> float:\n",
" \"\"\"Multiply the provided floats.\"\"\"\n",
" return a * b\n",
"\n",
"tool = StructuredTool.from_function(multiplier)\n",
"# Structured tools are compatible with the STRUCTURED_CHAT_ZERO_SHOT_REACT_DESCRIPTION agent type. \n",
"agent_executor = initialize_agent([tool], llm, agent=AgentType.STRUCTURED_CHAT_ZERO_SHOT_REACT_DESCRIPTION, verbose=True)\n",
"agent_executor.run(\"What is 12111 times 14\")"
] ]
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": null,
"id": "9baa881f-5ff2-4958-b3a2-1653a5e8bc3b", "id": "5ea510c3-88ce-4d30-86f3-cdd99973f27f",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [] "source": []
......
from langchain.agents import Tool
from langchain.tools import BaseTool
from langchain import PromptTemplate, LLMChain
from agent.custom_search import DeepSearch
from langchain.agents import BaseSingleActionAgent, AgentOutputParser, LLMSingleActionAgent, AgentExecutor
from typing import List, Tuple, Any, Union, Optional, Type
from langchain.schema import AgentAction, AgentFinish
from langchain.prompts import StringPromptTemplate
from langchain.callbacks.manager import CallbackManagerForToolRun
from langchain.base_language import BaseLanguageModel
import re
agent_template = """
你现在是一个{role}。这里是一些已知信息:
{related_content}
{background_infomation}
{question_guide}:{input}
{answer_format}
"""
class CustomPromptTemplate(StringPromptTemplate):
template: str
tools: List[Tool]
def format(self, **kwargs) -> str:
intermediate_steps = kwargs.pop("intermediate_steps")
# 没有互联网查询信息
if len(intermediate_steps) == 0:
background_infomation = "\n"
role = "傻瓜机器人"
question_guide = "我现在有一个问题"
answer_format = "如果你知道答案,请直接给出你的回答!如果你不知道答案,请你只回答\"DeepSearch('搜索词')\",并将'搜索词'替换为你认为需要搜索的关键词,除此之外不要回答其他任何内容。\n\n下面请回答我上面提出的问题!"
# 返回了背景信息
else:
# 根据 intermediate_steps 中的 AgentAction 拼装 background_infomation
background_infomation = "\n\n你还有这些已知信息作为参考:\n\n"
action, observation = intermediate_steps[0]
background_infomation += f"{observation}\n"
role = "聪明的 AI 助手"
question_guide = "请根据这些已知信息回答我的问题"
answer_format = ""
kwargs["background_infomation"] = background_infomation
kwargs["role"] = role
kwargs["question_guide"] = question_guide
kwargs["answer_format"] = answer_format
return self.template.format(**kwargs)
class CustomSearchTool(BaseTool):
name: str = "DeepSearch"
description: str = ""
def _run(self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None):
return DeepSearch.search(query = query)
async def _arun(self, query: str):
raise NotImplementedError("DeepSearch does not support async")
class CustomAgent(BaseSingleActionAgent):
@property
def input_keys(self):
return ["input"]
def plan(self, intermedate_steps: List[Tuple[AgentAction, str]],
**kwargs: Any) -> Union[AgentAction, AgentFinish]:
return AgentAction(tool="DeepSearch", tool_input=kwargs["input"], log="")
class CustomOutputParser(AgentOutputParser):
def parse(self, llm_output: str) -> Union[AgentAction, AgentFinish]:
# group1 = 调用函数名字
# group2 = 传入参数
match = re.match(r'^[\s\w]*(DeepSearch)\(([^\)]+)\)', llm_output, re.DOTALL)
print(match)
# 如果 llm 没有返回 DeepSearch() 则认为直接结束指令
if not match:
return AgentFinish(
return_values={"output": llm_output.strip()},
log=llm_output,
)
# 否则的话都认为需要调用 Tool
else:
action = match.group(1).strip()
action_input = match.group(2).strip()
return AgentAction(tool=action, tool_input=action_input.strip(" ").strip('"'), log=llm_output)
class DeepAgent:
tool_name: str = "DeepSearch"
agent_executor: any
tools: List[Tool]
llm_chain: any
def query(self, related_content: str = "", query: str = ""):
tool_name = self.tool_name
result = self.agent_executor.run(related_content=related_content, input=query ,tool_name=self.tool_name)
return result
def __init__(self, llm: BaseLanguageModel, **kwargs):
tools = [
Tool.from_function(
func=DeepSearch.search,
name="DeepSearch",
description=""
)
]
self.tools = tools
tool_names = [tool.name for tool in tools]
output_parser = CustomOutputParser()
prompt = CustomPromptTemplate(template=agent_template,
tools=tools,
input_variables=["related_content","tool_name", "input", "intermediate_steps"])
llm_chain = LLMChain(llm=llm, prompt=prompt)
self.llm_chain = llm_chain
agent = LLMSingleActionAgent(
llm_chain=llm_chain,
output_parser=output_parser,
stop=["\nObservation:"],
allowed_tools=tool_names
)
agent_executor = AgentExecutor.from_agent_and_tools(agent=agent, tools=tools, verbose=True)
self.agent_executor = agent_executor
import requests
RapidAPIKey = "90bbe925ebmsh1c015166fc5e12cp14c503jsn6cca55551ae4"
class DeepSearch:
def search(query: str = ""):
query = query.strip()
if query == "":
return ""
if RapidAPIKey == "":
return "请配置你的 RapidAPIKey"
url = "https://bing-web-search1.p.rapidapi.com/search"
querystring = {"q": query,
"mkt":"zh-cn","textDecorations":"false","setLang":"CN","safeSearch":"Off","textFormat":"Raw"}
headers = {
"Accept": "application/json",
"X-BingApis-SDK": "true",
"X-RapidAPI-Key": RapidAPIKey,
"X-RapidAPI-Host": "bing-web-search1.p.rapidapi.com"
}
response = requests.get(url, headers=headers, params=querystring)
data_list = response.json()['value']
if len(data_list) == 0:
return ""
else:
result_arr = []
result_str = ""
count_index = 0
for i in range(6):
item = data_list[i]
title = item["name"]
description = item["description"]
item_str = f"{title}: {description}"
result_arr = result_arr + [item_str]
result_str = "\n".join(result_arr)
return result_str
...@@ -47,7 +47,7 @@ llm_model_dict = { ...@@ -47,7 +47,7 @@ llm_model_dict = {
"chatglm-6b": { "chatglm-6b": {
"name": "chatglm-6b", "name": "chatglm-6b",
"pretrained_model_name": "THUDM/chatglm-6b", "pretrained_model_name": "THUDM/chatglm-6b",
"local_model_path": None, "local_model_path": "/media/checkpoint/chatglm-6b",
"provides": "ChatGLM" "provides": "ChatGLM"
}, },
...@@ -66,7 +66,7 @@ llm_model_dict = { ...@@ -66,7 +66,7 @@ llm_model_dict = {
"vicuna-13b-hf": { "vicuna-13b-hf": {
"name": "vicuna-13b-hf", "name": "vicuna-13b-hf",
"pretrained_model_name": "vicuna-13b-hf", "pretrained_model_name": "vicuna-13b-hf",
"local_model_path": None, "local_model_path": "/media/checkpoint/vicuna-13b-hf",
"provides": "LLamaLLM" "provides": "LLamaLLM"
}, },
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论