Преглед изворни кода

fix #917 #915 (#946)

### What problem does this PR solve?

#917 
#915

### Type of change

- [x] Bug Fix (non-breaking change which fixes an issue)
tags/v0.7.0
KevinHuSh пре 1 година
родитељ
комит
7eee193956
No account linked to committer's email address
5 измењених фајлова са 15 додато и 5 уклоњено
  1. 1
    1
      deepdoc/parser/pdf_parser.py
  2. 11
    3
      rag/app/naive.py
  3. 1
    1
      rag/nlp/rag_tokenizer.py
  4. 1
    0
      requirements.txt
  5. 1
    0
      requirements_dev.txt

+ 1
- 1
deepdoc/parser/pdf_parser.py Прегледај датотеку

@@ -392,7 +392,7 @@ class RAGFlowPdfParser:
b["text"].strip()[-1] in ",;:'\",、‘“;:-",
len(b["text"].strip()) > 1 and b["text"].strip(
)[-2] in ",;:'\",‘“、;:",
b["text"].strip()[0] in "。;?!?”)),,、:",
b_["text"].strip()[0] in "。;?!?”)),,、:",
]
# features for not concating
feats = [

+ 11
- 3
rag/app/naive.py Прегледај датотеку

@@ -19,6 +19,8 @@ from deepdoc.parser.pdf_parser import PlainParser
from rag.nlp import rag_tokenizer, naive_merge, tokenize_table, tokenize_chunks, find_codec
from deepdoc.parser import PdfParser, ExcelParser, DocxParser
from rag.settings import cron_logger
from rag.utils import num_tokens_from_string
class Docx(DocxParser):
def __init__(self):
@@ -149,8 +151,14 @@ def chunk(filename, binary=None, from_page=0, to_page=100000,
if not l:
break
txt += l
sections = txt.split("\n")
sections = [(l, "") for l in sections if l]
sections = []
for sec in txt.split("\n"):
if num_tokens_from_string(sec) > 10 * parser_config.get("chunk_token_num", 128):
sections.append((sec[:int(len(sec)/2)], ""))
sections.append((sec[int(len(sec)/2):], ""))
else:
sections.append((sec, ""))
callback(0.8, "Finish parsing.")
elif re.search(r"\.doc$", filename, re.IGNORECASE):
@@ -163,7 +171,7 @@ def chunk(filename, binary=None, from_page=0, to_page=100000,
else:
raise NotImplementedError(
"file type not supported yet(doc, docx, pdf, txt supported)")
"file type not supported yet(pdf, xlsx, doc, docx, txt supported)")
st = timer()
chunks = naive_merge(

+ 1
- 1
rag/nlp/rag_tokenizer.py Прегледај датотеку

@@ -24,7 +24,7 @@ class RagTokenizer:
def loadDict_(self, fnm):
print("[HUQIE]:Build trie", fnm, file=sys.stderr)
try:
of = open(fnm, "r")
of = open(fnm, "r", encoding='utf-8')
while True:
line = of.readline()
if not line:

+ 1
- 0
requirements.txt Прегледај датотеку

@@ -136,3 +136,4 @@ BCEmbedding
loguru==0.7.2
umap-learn
fasttext==0.9.2
volcengine

+ 1
- 0
requirements_dev.txt Прегледај датотеку

@@ -124,3 +124,4 @@ ollama==0.1.8
redis==5.0.4
fasttext==0.9.2
umap-learn
volcengine

Loading…
Откажи
Сачувај