提交 3eaa25c9 作者: wangxinkai

feat: add api for knowledge_based QA

上级 a231f92d
...@@ -164,4 +164,7 @@ cython_debug/ ...@@ -164,4 +164,7 @@ cython_debug/
output/* output/*
log/* log/*
.chroma .chroma
vector_store/* vector_store/*
\ No newline at end of file
llm/*
embedding/*
\ No newline at end of file
...@@ -6,10 +6,11 @@ embedding_model_dict = { ...@@ -6,10 +6,11 @@ embedding_model_dict = {
"ernie-tiny": "nghuyong/ernie-3.0-nano-zh", "ernie-tiny": "nghuyong/ernie-3.0-nano-zh",
"ernie-base": "nghuyong/ernie-3.0-base-zh", "ernie-base": "nghuyong/ernie-3.0-base-zh",
"text2vec": "GanymedeNil/text2vec-large-chinese", "text2vec": "GanymedeNil/text2vec-large-chinese",
"local-text2vec": "./embedding/text2vec-large-chinese"
} }
# Embedding model name # Embedding model name
EMBEDDING_MODEL = "text2vec" EMBEDDING_MODEL = "local-text2vec"
# Embedding running device # Embedding running device
EMBEDDING_DEVICE = "cuda" if torch.cuda.is_available() else "mps" if torch.backends.mps.is_available() else "cpu" EMBEDDING_DEVICE = "cuda" if torch.cuda.is_available() else "mps" if torch.backends.mps.is_available() else "cpu"
...@@ -19,10 +20,11 @@ llm_model_dict = { ...@@ -19,10 +20,11 @@ llm_model_dict = {
"chatglm-6b-int4-qe": "THUDM/chatglm-6b-int4-qe", "chatglm-6b-int4-qe": "THUDM/chatglm-6b-int4-qe",
"chatglm-6b-int4": "THUDM/chatglm-6b-int4", "chatglm-6b-int4": "THUDM/chatglm-6b-int4",
"chatglm-6b": "THUDM/chatglm-6b", "chatglm-6b": "THUDM/chatglm-6b",
"local-chatglm-6b": "./llm/chatglm-6b"
} }
# LLM model name # LLM model name
LLM_MODEL = "chatglm-6b" LLM_MODEL = "local-chatglm-6b"
# Use p-tuning-v2 PrefixEncoder # Use p-tuning-v2 PrefixEncoder
USE_PTUNING_V2 = False USE_PTUNING_V2 = False
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论