Browse Source

make titles in markdown not be splited with following content (#2971)

### What problem does this PR solve?

#2970 
### Type of change

- [ ] Bug Fix (non-breaking change which fixes an issue)
- [x] New Feature (non-breaking change which adds functionality)
tags/v0.13.0
Kevin Hu 1 year ago
parent
commit
1fce6caf80
No account linked to committer's email address
3 changed files with 12 additions and 4 deletions
  1. 5
    2
      rag/app/manual.py
  2. 6
    2
      rag/app/naive.py
  3. 1
    0
      rag/app/qa.py

+ 5
- 2
rag/app/manual.py View File

@@ -67,9 +67,11 @@ class Pdf(PdfParser):
return [(b["text"], b.get("layout_no", ""), self.get_position(b, zoomin))
for i, b in enumerate(self.boxes)], tbls


class Docx(DocxParser):
def __init__(self):
pass

def get_picture(self, document, paragraph):
img = paragraph._element.xpath('.//pic:pic')
if not img:
@@ -80,6 +82,7 @@ class Docx(DocxParser):
image = related_part.image
image = Image.open(BytesIO(image.blob))
return image

def concat_img(self, img1, img2):
if img1 and not img2:
return img1
@@ -160,6 +163,7 @@ class Docx(DocxParser):
tbls.append(((None, html), ""))
return ti_list, tbls


def chunk(filename, binary=None, from_page=0, to_page=100000,
lang="Chinese", callback=None, **kwargs):
"""
@@ -244,6 +248,7 @@ def chunk(filename, binary=None, from_page=0, to_page=100000,
res = tokenize_table(tbls, doc, eng)
res.extend(tokenize_chunks(chunks, doc, eng, pdf_parser))
return res

if re.search(r"\.docx$", filename, re.IGNORECASE):
docx_parser = Docx()
ti_list, tbls = docx_parser(filename, binary,
@@ -259,8 +264,6 @@ def chunk(filename, binary=None, from_page=0, to_page=100000,
raise NotImplementedError("file type not supported yet(pdf and docx supported)")



if __name__ == "__main__":
import sys


+ 6
- 2
rag/app/naive.py View File

@@ -168,8 +168,12 @@ class Markdown(MarkdownParser):
sections.append((sec[:int(len(sec) / 2)], ""))
sections.append((sec[int(len(sec) / 2):], ""))
else:
sections.append((sec, ""))
print(tables)
if sections and sections[-1][0].strip().find("#") == 0:
sec_, _ = sections.pop(-1)
sections.append((sec_+"\n"+sec, ""))
else:
sections.append((sec, ""))

for table in tables:
tbls.append(((None, markdown(table, extensions=['markdown.extensions.tables'])), ""))
return sections, tbls

+ 1
- 0
rag/app/qa.py View File

@@ -393,6 +393,7 @@ def chunk(filename, binary=None, lang="Chinese", callback=None, **kwargs):
if sum_question:
res.append(beAdoc(deepcopy(doc), sum_question, markdown(last_answer, extensions=['markdown.extensions.tables']), eng))
return res

elif re.search(r"\.docx$", filename, re.IGNORECASE):
docx_parser = Docx()
qai_list, tbls = docx_parser(filename, binary,

Loading…
Cancel
Save