Skip to content
项目
群组
代码片段
帮助
当前项目
正在载入...
登录 / 注册
切换导航面板
J
jinchat-server
概览
概览
详情
活动
周期分析
版本库
存储库
文件
提交
分支
标签
贡献者
分支图
比较
统计图
问题
0
议题
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
CI / CD
CI / CD
流水线
作业
日程表
图表
维基
Wiki
代码片段
代码片段
成员
成员
折叠边栏
关闭边栏
活动
图像
聊天
创建新问题
作业
提交
问题看板
Open sidebar
aigc-pioneer
jinchat-server
Commits
8ae84c6c
提交
8ae84c6c
authored
4月 17, 2023
作者:
imClumsyPanda
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
update local_doc_qa.py
上级
88941d39
显示空白字符变更
内嵌
并排
正在显示
5 个修改的文件
包含
52 行增加
和
11 行删除
+52
-11
local_doc_qa.py
chains/local_doc_qa.py
+23
-10
cli_demo.py
cli_demo.py
+1
-1
1.pdf
content/1.pdf
+0
-0
__init__.py
textsplitter/__init__.py
+3
-0
chinese_text_splitter.py
textsplitter/chinese_text_splitter.py
+25
-0
没有找到文件。
chains/local_doc_qa.py
浏览文件 @
8ae84c6c
...
@@ -9,6 +9,7 @@ import os
...
@@ -9,6 +9,7 @@ import os
from
configs.model_config
import
*
from
configs.model_config
import
*
import
datetime
import
datetime
from
typing
import
List
from
typing
import
List
from
textsplitter
import
ChineseTextSplitter
# return top-k text chunk from vector store
# return top-k text chunk from vector store
VECTOR_SEARCH_TOP_K
=
6
VECTOR_SEARCH_TOP_K
=
6
...
@@ -17,6 +18,18 @@ VECTOR_SEARCH_TOP_K = 6
...
@@ -17,6 +18,18 @@ VECTOR_SEARCH_TOP_K = 6
LLM_HISTORY_LEN
=
3
LLM_HISTORY_LEN
=
3
def
load_file
(
filepath
):
if
filepath
.
lower
()
.
endswith
(
".pdf"
):
loader
=
UnstructuredFileLoader
(
filepath
)
textsplitter
=
ChineseTextSplitter
(
pdf
=
True
)
docs
=
loader
.
load_and_split
(
textsplitter
)
else
:
loader
=
UnstructuredFileLoader
(
filepath
,
mode
=
"elements"
)
textsplitter
=
ChineseTextSplitter
(
pdf
=
False
)
docs
=
loader
.
load_and_split
(
text_splitter
=
textsplitter
)
return
docs
class
LocalDocQA
:
class
LocalDocQA
:
llm
:
object
=
None
llm
:
object
=
None
embeddings
:
object
=
None
embeddings
:
object
=
None
...
@@ -48,10 +61,10 @@ class LocalDocQA:
...
@@ -48,10 +61,10 @@ class LocalDocQA:
elif
os
.
path
.
isfile
(
filepath
):
elif
os
.
path
.
isfile
(
filepath
):
file
=
os
.
path
.
split
(
filepath
)[
-
1
]
file
=
os
.
path
.
split
(
filepath
)[
-
1
]
try
:
try
:
loader
=
UnstructuredFileLoader
(
filepath
,
mode
=
"elements"
)
docs
=
load_file
(
filepath
)
docs
=
loader
.
load
()
print
(
f
"{file} 已成功加载"
)
print
(
f
"{file} 已成功加载"
)
except
:
except
Exception
as
e
:
print
(
e
)
print
(
f
"{file} 未能成功加载"
)
print
(
f
"{file} 未能成功加载"
)
return
None
return
None
elif
os
.
path
.
isdir
(
filepath
):
elif
os
.
path
.
isdir
(
filepath
):
...
@@ -59,25 +72,25 @@ class LocalDocQA:
...
@@ -59,25 +72,25 @@ class LocalDocQA:
for
file
in
os
.
listdir
(
filepath
):
for
file
in
os
.
listdir
(
filepath
):
fullfilepath
=
os
.
path
.
join
(
filepath
,
file
)
fullfilepath
=
os
.
path
.
join
(
filepath
,
file
)
try
:
try
:
loader
=
UnstructuredFileLoader
(
fullfilepath
,
mode
=
"elements"
)
docs
+=
load_file
(
fullfilepath
)
docs
+=
loader
.
load
()
print
(
f
"{file} 已成功加载"
)
print
(
f
"{file} 已成功加载"
)
except
:
except
Exception
as
e
:
print
(
e
)
print
(
f
"{file} 未能成功加载"
)
print
(
f
"{file} 未能成功加载"
)
else
:
else
:
docs
=
[]
docs
=
[]
for
file
in
filepath
:
for
file
in
filepath
:
try
:
try
:
loader
=
UnstructuredFileLoader
(
file
,
mode
=
"elements"
)
docs
+=
load_file
(
file
)
docs
+=
loader
.
load
()
print
(
f
"{file} 已成功加载"
)
print
(
f
"{file} 已成功加载"
)
except
:
except
Exception
as
e
:
print
(
e
)
print
(
f
"{file} 未能成功加载"
)
print
(
f
"{file} 未能成功加载"
)
vector_store
=
FAISS
.
from_documents
(
docs
,
self
.
embeddings
)
vector_store
=
FAISS
.
from_documents
(
docs
,
self
.
embeddings
)
vs_path
=
f
"""./vector_store/{os.path.splitext(file)[0]}_FAISS_{datetime.datetime.now().strftime("
%
Y
%
m
%
d_
%
H
%
M
%
S")}"""
vs_path
=
f
"""./vector_store/{os.path.splitext(file)[0]}_FAISS_{datetime.datetime.now().strftime("
%
Y
%
m
%
d_
%
H
%
M
%
S")}"""
vector_store
.
save_local
(
vs_path
)
vector_store
.
save_local
(
vs_path
)
return
vs_path
if
len
(
docs
)
>
0
else
None
return
vs_path
if
len
(
docs
)
>
0
else
None
def
get_knowledge_based_answer
(
self
,
def
get_knowledge_based_answer
(
self
,
query
,
query
,
...
...
cli_demo.py
浏览文件 @
8ae84c6c
...
@@ -2,7 +2,7 @@ from configs.model_config import *
...
@@ -2,7 +2,7 @@ from configs.model_config import *
from
chains.local_doc_qa
import
LocalDocQA
from
chains.local_doc_qa
import
LocalDocQA
# return top-k text chunk from vector store
# return top-k text chunk from vector store
VECTOR_SEARCH_TOP_K
=
10
VECTOR_SEARCH_TOP_K
=
6
# LLM input history length
# LLM input history length
LLM_HISTORY_LEN
=
3
LLM_HISTORY_LEN
=
3
...
...
content/1.pdf
0 → 100644
浏览文件 @
8ae84c6c
File added
textsplitter/__init__.py
0 → 100644
浏览文件 @
8ae84c6c
from
.chinese_text_splitter
import
*
\ No newline at end of file
textsplitter/chinese_text_splitter.py
0 → 100644
浏览文件 @
8ae84c6c
from
langchain.text_splitter
import
CharacterTextSplitter
import
re
from
typing
import
List
class
ChineseTextSplitter
(
CharacterTextSplitter
):
def
__init__
(
self
,
pdf
:
bool
=
False
,
**
kwargs
):
super
()
.
__init__
(
**
kwargs
)
self
.
pdf
=
pdf
def
split_text
(
self
,
text
:
str
)
->
List
[
str
]:
if
self
.
pdf
:
text
=
re
.
sub
(
r"\n{3,}"
,
"
\n
"
,
text
)
text
=
re
.
sub
(
'
\
s'
,
' '
,
text
)
text
=
text
.
replace
(
"
\n\n
"
,
""
)
sent_sep_pattern
=
re
.
compile
(
'([﹒﹔﹖﹗.。!?]["’”」』]{0,2}|(?=["‘“「『]{1,2}|$))'
)
# del :;
sent_list
=
[]
for
ele
in
sent_sep_pattern
.
split
(
text
):
if
sent_sep_pattern
.
match
(
ele
)
and
sent_list
:
sent_list
[
-
1
]
+=
ele
elif
ele
:
sent_list
.
append
(
ele
)
return
sent_list
编写
预览
Markdown
格式
0%
重试
或
添加新文件
添加附件
取消
您添加了
0
人
到此讨论。请谨慎行事。
请先完成此评论的编辑!
取消
请
注册
或者
登录
后发表评论