Skip to content
项目
群组
代码片段
帮助
当前项目
正在载入...
登录 / 注册
切换导航面板
J
jinchat-server
概览
概览
详情
活动
周期分析
版本库
存储库
文件
提交
分支
标签
贡献者
分支图
比较
统计图
问题
0
议题
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
CI / CD
CI / CD
流水线
作业
日程表
图表
维基
Wiki
代码片段
代码片段
成员
成员
折叠边栏
关闭边栏
活动
图像
聊天
创建新问题
作业
提交
问题看板
Open sidebar
aigc-pioneer
jinchat-server
Commits
7f25c9fa
提交
7f25c9fa
authored
5月 07, 2023
作者:
imClumsyPanda
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
update textsplitter
上级
d898c7dd
隐藏空白字符变更
内嵌
并排
正在显示
3 个修改的文件
包含
65 行增加
和
59 行删除
+65
-59
__init__.py
textsplitter/__init__.py
+2
-0
ali_text_splitter.py
textsplitter/ali_text_splitter.py
+26
-0
chinese_text_splitter.py
textsplitter/chinese_text_splitter.py
+37
-59
没有找到文件。
textsplitter/__init__.py
浏览文件 @
7f25c9fa
from
.chinese_text_splitter
import
ChineseTextSplitter
from
.ali_text_splitter
import
AliTextSplitter
\ No newline at end of file
textsplitter/ali_text_splitter.py
0 → 100644
浏览文件 @
7f25c9fa
from
langchain.text_splitter
import
CharacterTextSplitter
import
re
from
typing
import
List
from
modelscope.pipelines
import
pipeline
p
=
pipeline
(
task
=
"document-segmentation"
,
model
=
'damo/nlp_bert_document-segmentation_chinese-base'
,
device
=
"cpu"
)
class
AliTextSplitter
(
CharacterTextSplitter
):
def
__init__
(
self
,
pdf
:
bool
=
False
,
**
kwargs
):
super
()
.
__init__
(
**
kwargs
)
self
.
pdf
=
pdf
def
split_text
(
self
,
text
:
str
)
->
List
[
str
]:
# use_document_segmentation参数指定是否用语义切分文档,此处采取的文档语义分割模型为达摩院开源的nlp_bert_document-segmentation_chinese-base,论文见https://arxiv.org/abs/2107.09278
# 如果使用模型进行文档语义切分,那么需要安装modelscope[nlp]:pip install "modelscope[nlp]" -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html
# 考虑到使用了三个模型,可能对于低配置gpu不太友好,因此这里将模型load进cpu计算,有需要的话可以替换device为自己的显卡id
if
self
.
pdf
:
text
=
re
.
sub
(
r"\n{3,}"
,
r"\n"
,
text
)
text
=
re
.
sub
(
'
\
s'
,
" "
,
text
)
text
=
re
.
sub
(
"
\n\n
"
,
""
,
text
)
result
=
p
(
documents
=
text
)
sent_list
=
[
i
for
i
in
result
[
"text"
]
.
split
(
"
\n\t
"
)
if
i
]
return
sent_list
textsplitter/chinese_text_splitter.py
浏览文件 @
7f25c9fa
...
...
@@ -9,73 +9,51 @@ class ChineseTextSplitter(CharacterTextSplitter):
super
()
.
__init__
(
**
kwargs
)
self
.
pdf
=
pdf
def
split_text1
(
self
,
text
:
str
,
use_document_segmentation
:
bool
=
False
)
->
List
[
str
]:
# use_document_segmentation参数指定是否用语义切分文档,此处采取的文档语义分割模型为达摩院开源的nlp_bert_document-segmentation_chinese-base,论文见https://arxiv.org/abs/2107.09278
# 如果使用模型进行文档语义切分,那么需要安装modelscope[nlp]:pip install "modelscope[nlp]" -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html
# 考虑到使用了三个模型,可能对于低配置gpu不太友好,因此这里将模型load进cpu计算,有需要的话可以替换device为自己的显卡id
def
split_text1
(
self
,
text
:
str
)
->
List
[
str
]:
if
self
.
pdf
:
text
=
re
.
sub
(
r"\n{3,}"
,
"
\n
"
,
text
)
text
=
re
.
sub
(
'
\
s'
,
' '
,
text
)
text
=
text
.
replace
(
"
\n\n
"
,
""
)
if
use_document_segmentation
:
from
modelscope.pipelines
import
pipeline
p
=
pipeline
(
task
=
"document-segmentation"
,
model
=
'damo/nlp_bert_document-segmentation_chinese-base'
,
device
=
"cpu"
)
result
=
p
(
documents
=
text
)
sent_list
=
[
i
for
i
in
result
[
"text"
]
.
split
(
"
\n\t
"
)
if
i
]
return
sent_list
else
:
sent_sep_pattern
=
re
.
compile
(
'([﹒﹔﹖﹗.。!?]["’”」』]{0,2}|(?=["‘“「『]{1,2}|$))'
)
# del :;
sent_list
=
[]
for
ele
in
sent_sep_pattern
.
split
(
text
):
if
sent_sep_pattern
.
match
(
ele
)
and
sent_list
:
sent_list
[
-
1
]
+=
ele
elif
ele
:
sent_list
.
append
(
ele
)
sent_sep_pattern
=
re
.
compile
(
'([﹒﹔﹖﹗.。!?]["’”」』]{0,2}|(?=["‘“「『]{1,2}|$))'
)
# del :;
sent_list
=
[]
for
ele
in
sent_sep_pattern
.
split
(
text
):
if
sent_sep_pattern
.
match
(
ele
)
and
sent_list
:
sent_list
[
-
1
]
+=
ele
elif
ele
:
sent_list
.
append
(
ele
)
return
sent_list
def
split_text
(
self
,
text
:
str
,
use_document_segmentation
:
bool
=
False
)
->
List
[
str
]:
def
split_text
(
self
,
text
:
str
)
->
List
[
str
]:
if
self
.
pdf
:
text
=
re
.
sub
(
r"\n{3,}"
,
r"\n"
,
text
)
text
=
re
.
sub
(
'
\
s'
,
" "
,
text
)
text
=
re
.
sub
(
"
\n\n
"
,
""
,
text
)
if
use_document_segmentation
:
from
modelscope.pipelines
import
pipeline
p
=
pipeline
(
task
=
"document-segmentation"
,
model
=
'damo/nlp_bert_document-segmentation_chinese-base'
,
device
=
"cpu"
)
result
=
p
(
documents
=
text
)
sent_list
=
[
i
for
i
in
result
[
"text"
]
.
split
(
"
\n\t
"
)
if
i
]
return
sent_list
else
:
text
=
re
.
sub
(
r'([;;.!?。!?\?])([^”’])'
,
r"\1\n\2"
,
text
)
# 单字符断句符
text
=
re
.
sub
(
r'(\.{6})([^"’”」』])'
,
r"\1\n\2"
,
text
)
# 英文省略号
text
=
re
.
sub
(
r'(\…{2})([^"’”」』])'
,
r"\1\n\2"
,
text
)
# 中文省略号
text
=
re
.
sub
(
r'([;;!?。!?\?]["’”」』]{0,2})([^;;!?,。!?\?])'
,
r'\1\n\2'
,
text
)
# 如果双引号前有终止符,那么双引号才是句子的终点,把分句符\n放到双引号后,注意前面的几句都小心保留了双引号
text
=
text
.
rstrip
()
# 段尾如果有多余的\n就去掉它
# 很多规则中会考虑分号;,但是这里我把它忽略不计,破折号、英文双引号等同样忽略,需要的再做些简单调整即可。
ls
=
[
i
for
i
in
text
.
split
(
"
\n
"
)
if
i
]
for
ele
in
ls
:
if
len
(
ele
)
>
SENTENCE_SIZE
:
ele1
=
re
.
sub
(
r'([,,.]["’”」』]{0,2})([^,,.])'
,
r'\1\n\2'
,
ele
)
ele1_ls
=
ele1
.
split
(
"
\n
"
)
for
ele_ele1
in
ele1_ls
:
if
len
(
ele_ele1
)
>
SENTENCE_SIZE
:
ele_ele2
=
re
.
sub
(
r'([\n]{1,}| {2,}["’”」』]{0,2})([^\s])'
,
r'\1\n\2'
,
ele_ele1
)
ele2_ls
=
ele_ele2
.
split
(
"
\n
"
)
for
ele_ele2
in
ele2_ls
:
if
len
(
ele_ele2
)
>
SENTENCE_SIZE
:
ele_ele3
=
re
.
sub
(
'( ["’”」』]{0,2})([^ ])'
,
r'\1\n\2'
,
ele_ele2
)
ele2_id
=
ele2_ls
.
index
(
ele_ele2
)
ele2_ls
=
ele2_ls
[:
ele2_id
]
+
[
i
for
i
in
ele_ele3
.
split
(
"
\n
"
)
if
i
]
+
ele2_ls
[
ele2_id
+
1
:]
ele_id
=
ele1_ls
.
index
(
ele_ele1
)
ele1_ls
=
ele1_ls
[:
ele_id
]
+
[
i
for
i
in
ele2_ls
if
i
]
+
ele1_ls
[
ele_id
+
1
:]
id
=
ls
.
index
(
ele
)
ls
=
ls
[:
id
]
+
[
i
for
i
in
ele1_ls
if
i
]
+
ls
[
id
+
1
:]
return
ls
text
=
re
.
sub
(
r'([;;.!?。!?\?])([^”’])'
,
r"\1\n\2"
,
text
)
# 单字符断句符
text
=
re
.
sub
(
r'(\.{6})([^"’”」』])'
,
r"\1\n\2"
,
text
)
# 英文省略号
text
=
re
.
sub
(
r'(\…{2})([^"’”」』])'
,
r"\1\n\2"
,
text
)
# 中文省略号
text
=
re
.
sub
(
r'([;;!?。!?\?]["’”」』]{0,2})([^;;!?,。!?\?])'
,
r'\1\n\2'
,
text
)
# 如果双引号前有终止符,那么双引号才是句子的终点,把分句符\n放到双引号后,注意前面的几句都小心保留了双引号
text
=
text
.
rstrip
()
# 段尾如果有多余的\n就去掉它
# 很多规则中会考虑分号;,但是这里我把它忽略不计,破折号、英文双引号等同样忽略,需要的再做些简单调整即可。
ls
=
[
i
for
i
in
text
.
split
(
"
\n
"
)
if
i
]
for
ele
in
ls
:
if
len
(
ele
)
>
SENTENCE_SIZE
:
ele1
=
re
.
sub
(
r'([,,.]["’”」』]{0,2})([^,,.])'
,
r'\1\n\2'
,
ele
)
ele1_ls
=
ele1
.
split
(
"
\n
"
)
for
ele_ele1
in
ele1_ls
:
if
len
(
ele_ele1
)
>
SENTENCE_SIZE
:
ele_ele2
=
re
.
sub
(
r'([\n]{1,}| {2,}["’”」』]{0,2})([^\s])'
,
r'\1\n\2'
,
ele_ele1
)
ele2_ls
=
ele_ele2
.
split
(
"
\n
"
)
for
ele_ele2
in
ele2_ls
:
if
len
(
ele_ele2
)
>
SENTENCE_SIZE
:
ele_ele3
=
re
.
sub
(
'( ["’”」』]{0,2})([^ ])'
,
r'\1\n\2'
,
ele_ele2
)
ele2_id
=
ele2_ls
.
index
(
ele_ele2
)
ele2_ls
=
ele2_ls
[:
ele2_id
]
+
[
i
for
i
in
ele_ele3
.
split
(
"
\n
"
)
if
i
]
+
ele2_ls
[
ele2_id
+
1
:]
ele_id
=
ele1_ls
.
index
(
ele_ele1
)
ele1_ls
=
ele1_ls
[:
ele_id
]
+
[
i
for
i
in
ele2_ls
if
i
]
+
ele1_ls
[
ele_id
+
1
:]
id
=
ls
.
index
(
ele
)
ls
=
ls
[:
id
]
+
[
i
for
i
in
ele1_ls
if
i
]
+
ls
[
id
+
1
:]
return
ls
编写
预览
Markdown
格式
0%
重试
或
添加新文件
添加附件
取消
您添加了
0
人
到此讨论。请谨慎行事。
请先完成此评论的编辑!
取消
请
注册
或者
登录
后发表评论