### What problem does this PR solve? Add support for HTML file ### Type of change - [x] New Feature (non-breaking change which adds functionality)tags/v0.7.0
| return FileType.PDF.value | return FileType.PDF.value | ||||
| if re.match( | if re.match( | ||||
| r".*\.(doc|docx|ppt|pptx|yml|xml|htm|json|csv|txt|ini|xls|xlsx|wps|rtf|hlp|pages|numbers|key|md|py|js|java|c|cpp|h|php|go|ts|sh|cs|kt)$", filename): | |||||
| r".*\.(doc|docx|ppt|pptx|yml|xml|htm|json|csv|txt|ini|xls|xlsx|wps|rtf|hlp|pages|numbers|key|md|py|js|java|c|cpp|h|php|go|ts|sh|cs|kt|html)$", filename): | |||||
| return FileType.DOC.value | return FileType.DOC.value | ||||
| if re.match( | if re.match( |
| from .docx_parser import RAGFlowDocxParser as DocxParser | from .docx_parser import RAGFlowDocxParser as DocxParser | ||||
| from .excel_parser import RAGFlowExcelParser as ExcelParser | from .excel_parser import RAGFlowExcelParser as ExcelParser | ||||
| from .ppt_parser import RAGFlowPptParser as PptParser | from .ppt_parser import RAGFlowPptParser as PptParser | ||||
| from .html_parser import RAGFlowHtmlParser as HtmlParser |
| # -*- coding: utf-8 -*- | |||||
| from rag.nlp import find_codec | |||||
| import readability | |||||
| import html_text | |||||
| import chardet | |||||
| def get_encoding(file): | |||||
| with open(file,'rb') as f: | |||||
| tmp = chardet.detect(f.read()) | |||||
| return tmp['encoding'] | |||||
| class RAGFlowHtmlParser: | |||||
| def __call__(self, fnm, binary=None): | |||||
| txt = "" | |||||
| if binary: | |||||
| encoding = find_codec(binary) | |||||
| txt = binary.decode(encoding, errors="ignore") | |||||
| else: | |||||
| with open(fnm, "r",encoding=get_encoding(fnm)) as f: | |||||
| txt = f.read() | |||||
| html_doc = readability.Document(txt) | |||||
| title = html_doc.title() | |||||
| content = html_text.extract_text(html_doc.summary(html_partial=True)) | |||||
| txt = f'{title}\n{content}' | |||||
| sections = txt.split("\n") | |||||
| return sections |
| hierarchical_merge, make_colon_as_title, naive_merge, random_choices, tokenize_table, add_positions, \ | hierarchical_merge, make_colon_as_title, naive_merge, random_choices, tokenize_table, add_positions, \ | ||||
| tokenize_chunks, find_codec | tokenize_chunks, find_codec | ||||
| from rag.nlp import rag_tokenizer | from rag.nlp import rag_tokenizer | ||||
| from deepdoc.parser import PdfParser, DocxParser, PlainParser | |||||
| from deepdoc.parser import PdfParser, DocxParser, PlainParser, HtmlParser | |||||
| class Pdf(PdfParser): | class Pdf(PdfParser): | ||||
| random_choices([t for t, _ in sections], k=200))) | random_choices([t for t, _ in sections], k=200))) | ||||
| callback(0.8, "Finish parsing.") | callback(0.8, "Finish parsing.") | ||||
| elif re.search(r"\.(htm|html)$", filename, re.IGNORECASE): | |||||
| callback(0.1, "Start to parse.") | |||||
| sections = HtmlParser()(filename, binary) | |||||
| sections = [(l, "") for l in sections if l] | |||||
| remove_contents_table(sections, eng=is_english( | |||||
| random_choices([t for t, _ in sections], k=200))) | |||||
| callback(0.8, "Finish parsing.") | |||||
| elif re.search(r"\.doc$", filename, re.IGNORECASE): | elif re.search(r"\.doc$", filename, re.IGNORECASE): | ||||
| callback(0.1, "Start to parse.") | callback(0.1, "Start to parse.") | ||||
| binary = BytesIO(binary) | binary = BytesIO(binary) |
| from rag.nlp import bullets_category, is_english, tokenize, remove_contents_table, hierarchical_merge, \ | from rag.nlp import bullets_category, is_english, tokenize, remove_contents_table, hierarchical_merge, \ | ||||
| make_colon_as_title, add_positions, tokenize_chunks, find_codec | make_colon_as_title, add_positions, tokenize_chunks, find_codec | ||||
| from rag.nlp import rag_tokenizer | from rag.nlp import rag_tokenizer | ||||
| from deepdoc.parser import PdfParser, DocxParser, PlainParser | |||||
| from deepdoc.parser import PdfParser, DocxParser, PlainParser, HtmlParser | |||||
| from rag.settings import cron_logger | from rag.settings import cron_logger | ||||
| sections = [l for l in sections if l] | sections = [l for l in sections if l] | ||||
| callback(0.8, "Finish parsing.") | callback(0.8, "Finish parsing.") | ||||
| elif re.search(r"\.(htm|html)$", filename, re.IGNORECASE): | |||||
| callback(0.1, "Start to parse.") | |||||
| sections = HtmlParser()(filename, binary) | |||||
| sections = [l for l in sections if l] | |||||
| callback(0.8, "Finish parsing.") | |||||
| elif re.search(r"\.doc$", filename, re.IGNORECASE): | elif re.search(r"\.doc$", filename, re.IGNORECASE): | ||||
| callback(0.1, "Start to parse.") | callback(0.1, "Start to parse.") | ||||
| binary = BytesIO(binary) | binary = BytesIO(binary) |
| import re | import re | ||||
| from deepdoc.parser.pdf_parser import PlainParser | from deepdoc.parser.pdf_parser import PlainParser | ||||
| from rag.nlp import rag_tokenizer, naive_merge, tokenize_table, tokenize_chunks, find_codec | from rag.nlp import rag_tokenizer, naive_merge, tokenize_table, tokenize_chunks, find_codec | ||||
| from deepdoc.parser import PdfParser, ExcelParser, DocxParser | |||||
| from deepdoc.parser import PdfParser, ExcelParser, DocxParser, HtmlParser | |||||
| from rag.settings import cron_logger | from rag.settings import cron_logger | ||||
| from rag.utils import num_tokens_from_string | from rag.utils import num_tokens_from_string | ||||
| callback(0.8, "Finish parsing.") | callback(0.8, "Finish parsing.") | ||||
| elif re.search(r"\.(htm|html)$", filename, re.IGNORECASE): | |||||
| callback(0.1, "Start to parse.") | |||||
| sections = HtmlParser()(filename, binary) | |||||
| sections = [(l, "") for l in sections if l] | |||||
| callback(0.8, "Finish parsing.") | |||||
| elif re.search(r"\.doc$", filename, re.IGNORECASE): | elif re.search(r"\.doc$", filename, re.IGNORECASE): | ||||
| callback(0.1, "Start to parse.") | callback(0.1, "Start to parse.") | ||||
| binary = BytesIO(binary) | binary = BytesIO(binary) |
| import re | import re | ||||
| from rag.app import laws | from rag.app import laws | ||||
| from rag.nlp import rag_tokenizer, tokenize, find_codec | from rag.nlp import rag_tokenizer, tokenize, find_codec | ||||
| from deepdoc.parser import PdfParser, ExcelParser, PlainParser | |||||
| from deepdoc.parser import PdfParser, ExcelParser, PlainParser, HtmlParser | |||||
| class Pdf(PdfParser): | class Pdf(PdfParser): | ||||
| sections = [s for s in sections if s] | sections = [s for s in sections if s] | ||||
| callback(0.8, "Finish parsing.") | callback(0.8, "Finish parsing.") | ||||
| elif re.search(r"\.(htm|html)$", filename, re.IGNORECASE): | |||||
| callback(0.1, "Start to parse.") | |||||
| sections = HtmlParser()(filename, binary) | |||||
| sections = [s for s in sections if s] | |||||
| callback(0.8, "Finish parsing.") | |||||
| elif re.search(r"\.doc$", filename, re.IGNORECASE): | elif re.search(r"\.doc$", filename, re.IGNORECASE): | ||||
| callback(0.1, "Start to parse.") | callback(0.1, "Start to parse.") | ||||
| binary = BytesIO(binary) | binary = BytesIO(binary) |
| umap-learn | umap-learn | ||||
| fasttext==0.9.2 | fasttext==0.9.2 | ||||
| volcengine | volcengine | ||||
| readability-lxml==0.8.1 | |||||
| html_text==0.6.2 |
| umap-learn | umap-learn | ||||
| fasttext==0.9.2 | fasttext==0.9.2 | ||||
| volcengine | volcengine | ||||
| opencv-python-headless==4.9.0.80 | |||||
| opencv-python-headless==4.9.0.80 | |||||
| readability-lxml==0.8.1 | |||||
| html_text==0.6.2 |
| fasttext==0.9.2 | fasttext==0.9.2 | ||||
| umap-learn | umap-learn | ||||
| volcengine | volcengine | ||||
| readability-lxml==0.8.1 | |||||
| html_text==0.6.2 |