| @@ -51,7 +51,7 @@ def list(): | |||
| if not e: | |||
| return get_data_error_result(retmsg="Document not found!") | |||
| query = { | |||
| "doc_ids": [doc_id], "page": page, "size": size, "question": question | |||
| "doc_ids": [doc_id], "page": page, "size": size, "question": question, "sort": True | |||
| } | |||
| if "available_int" in req: | |||
| query["available_int"] = int(req["available_int"]) | |||
| @@ -66,7 +66,12 @@ def list(): | |||
| "important_kwd": sres.field[id].get("important_kwd", []), | |||
| "img_id": sres.field[id].get("img_id", ""), | |||
| "available_int": sres.field[id].get("available_int", 1), | |||
| "positions": sres.field[id].get("position_int", "").split("\t") | |||
| } | |||
| poss = [] | |||
| for i in range(0, len(d["positions"]), 5): | |||
| poss.append([float(d["positions"][i]), float(d["positions"][i+1]), float(d["positions"][i+2]), float(d["positions"][i+3]), float(d["positions"][i+4])]) | |||
| d["positions"] = poss | |||
| res["chunks"].append(d) | |||
| return get_json_result(data=res) | |||
| except Exception as e: | |||
| @@ -21,9 +21,14 @@ from api.utils import get_base_config,decrypt_database_config | |||
| from api.utils.file_utils import get_project_base_directory | |||
| from api.utils.log_utils import LoggerFactory, getLogger | |||
| from rag.nlp import search | |||
| from rag.utils import ELASTICSEARCH | |||
| # Logger | |||
| LoggerFactory.set_directory(os.path.join(get_project_base_directory(), "logs", "api")) | |||
| # {CRITICAL: 50, FATAL:50, ERROR:40, WARNING:30, WARN:30, INFO:20, DEBUG:10, NOTSET:0} | |||
| LoggerFactory.LEVEL = 10 | |||
| stat_logger = getLogger("stat") | |||
| access_logger = getLogger("access") | |||
| database_logger = getLogger("database") | |||
| API_VERSION = "v1" | |||
| RAG_FLOW_SERVICE_NAME = "ragflow" | |||
| @@ -133,16 +138,10 @@ AUTHENTICATION_DEFAULT_TIMEOUT = 30 * 24 * 60 * 60 # s | |||
| PRIVILEGE_COMMAND_WHITELIST = [] | |||
| CHECK_NODES_IDENTITY = False | |||
| from rag.nlp import search | |||
| from rag.utils import ELASTICSEARCH | |||
| retrievaler = search.Dealer(ELASTICSEARCH) | |||
| # Logger | |||
| LoggerFactory.set_directory(os.path.join(get_project_base_directory(), "logs", "api")) | |||
| # {CRITICAL: 50, FATAL:50, ERROR:40, WARNING:30, WARN:30, INFO:20, DEBUG:10, NOTSET:0} | |||
| LoggerFactory.LEVEL = 10 | |||
| stat_logger = getLogger("stat") | |||
| access_logger = getLogger("access") | |||
| database_logger = getLogger("database") | |||
| class CustomEnum(Enum): | |||
| @classmethod | |||
| @@ -545,7 +545,7 @@ class HuParser: | |||
| b_["top"] = b["top"] | |||
| self.boxes.pop(i) | |||
| def _extract_table_figure(self, need_image, ZM, return_html): | |||
| def _extract_table_figure(self, need_image, ZM, return_html, need_position): | |||
| tables = {} | |||
| figures = {} | |||
| # extract figure and table boxes | |||
| @@ -658,8 +658,9 @@ class HuParser: | |||
| self.boxes.pop(i) | |||
| res = [] | |||
| positions = [] | |||
| def cropout(bxs, ltype): | |||
| def cropout(bxs, ltype, poss): | |||
| nonlocal ZM | |||
| pn = set([b["page_number"] - 1 for b in bxs]) | |||
| if len(pn) < 2: | |||
| @@ -682,6 +683,7 @@ class HuParser: | |||
| "layoutno", ""))) | |||
| left, top, right, bott = b["x0"], b["top"], b["x1"], b["bottom"] | |||
| poss.append((pn, left, right, top, bott)) | |||
| return self.page_images[pn] \ | |||
| .crop((left * ZM, top * ZM, | |||
| right * ZM, bott * ZM)) | |||
| @@ -692,7 +694,7 @@ class HuParser: | |||
| pn[p] = [] | |||
| pn[p].append(b) | |||
| pn = sorted(pn.items(), key=lambda x: x[0]) | |||
| imgs = [cropout(arr, ltype) for p, arr in pn] | |||
| imgs = [cropout(arr, ltype, poss) for p, arr in pn] | |||
| pic = Image.new("RGB", | |||
| (int(np.max([i.size[0] for i in imgs])), | |||
| int(np.sum([m.size[1] for m in imgs]))), | |||
| @@ -714,18 +716,26 @@ class HuParser: | |||
| if not txt: | |||
| continue | |||
| poss = [] | |||
| res.append( | |||
| (cropout( | |||
| bxs, | |||
| "figure"), | |||
| "figure", poss), | |||
| [txt] if not return_html else [f"<p>{txt}</p>"])) | |||
| positions.append(poss) | |||
| for k, bxs in tables.items(): | |||
| if not bxs: | |||
| continue | |||
| res.append((cropout(bxs, "table"), | |||
| bxs = Recognizer.sort_Y_firstly(bxs, np.mean([(b["bottom"]-b["top"])/2 for b in bxs])) | |||
| poss = [] | |||
| res.append((cropout(bxs, "table", poss), | |||
| self.tbl_det.construct_table(bxs, html=return_html, is_english=self.is_english))) | |||
| positions.append(poss) | |||
| assert len(positions) == len(res) | |||
| if need_position: return list(zip(res, positions)) | |||
| return res | |||
| def proj_match(self, line): | |||
| @@ -922,13 +932,13 @@ class HuParser: | |||
| self._text_merge() | |||
| self._concat_downward() | |||
| self._filter_forpages() | |||
| tbls = self._extract_table_figure(need_image, zoomin, return_html) | |||
| tbls = self._extract_table_figure(need_image, zoomin, return_html, False) | |||
| return self.__filterout_scraps(deepcopy(self.boxes), zoomin), tbls | |||
| def remove_tag(self, txt): | |||
| return re.sub(r"@@[\t0-9.-]+?##", "", txt) | |||
| def crop(self, text, ZM=3): | |||
| def crop(self, text, ZM=3, need_position=False): | |||
| imgs = [] | |||
| poss = [] | |||
| for tag in re.findall(r"@@[0-9-]+\t[0-9.\t]+##", text): | |||
| @@ -946,6 +956,7 @@ class HuParser: | |||
| pos = poss[-1] | |||
| poss.append(([pos[0][-1]], pos[1], pos[2], min(self.page_images[pos[0][-1]].size[1]/ZM, pos[4]+GAP), min(self.page_images[pos[0][-1]].size[1]/ZM, pos[4]+120))) | |||
| positions = [] | |||
| for ii, (pns, left, right, top, bottom) in enumerate(poss): | |||
| right = left + max_width | |||
| bottom *= ZM | |||
| @@ -958,6 +969,8 @@ class HuParser: | |||
| bottom, self.page_images[pns[0]].size[1]) | |||
| )) | |||
| ) | |||
| positions.append((pns[0], left, right, top, min( | |||
| bottom, self.page_images[pns[0]].size[1])/ZM)) | |||
| bottom -= self.page_images[pns[0]].size[1] | |||
| for pn in pns[1:]: | |||
| imgs.append( | |||
| @@ -967,9 +980,12 @@ class HuParser: | |||
| self.page_images[pn].size[1]) | |||
| )) | |||
| ) | |||
| positions.append((pn, left, right, 0, min( | |||
| bottom, self.page_images[pn].size[1]) / ZM)) | |||
| bottom -= self.page_images[pn].size[1] | |||
| if not imgs: | |||
| if need_position: return None, None | |||
| return | |||
| height = 0 | |||
| for img in imgs: | |||
| @@ -988,6 +1004,9 @@ class HuParser: | |||
| img = Image.alpha_composite(img, overlay).convert("RGB") | |||
| pic.paste(img, (0, int(height))) | |||
| height += img.size[1] + GAP | |||
| if need_position: | |||
| return pic, positions | |||
| return pic | |||
| @@ -265,6 +265,7 @@ class Recognizer(object): | |||
| return | |||
| min_dis, min_i = 1000000, None | |||
| for i,b in enumerate(boxes): | |||
| if box.get("layoutno", "0") != b.get("layoutno", "0"): continue | |||
| dis = min(abs(box["x0"] - b["x0"]), abs(box["x1"] - b["x1"]), abs(box["x0"]+box["x1"] - b["x1"] - b["x0"])/2) | |||
| if dis < min_dis: | |||
| min_i = i | |||
| @@ -13,7 +13,7 @@ | |||
| import copy | |||
| import re | |||
| from rag.nlp import bullets_category, is_english, tokenize, remove_contents_table, \ | |||
| hierarchical_merge, make_colon_as_title, naive_merge, random_choices, tokenize_table | |||
| hierarchical_merge, make_colon_as_title, naive_merge, random_choices, tokenize_table, add_positions | |||
| from rag.nlp import huqie | |||
| from deepdoc.parser import PdfParser, DocxParser | |||
| @@ -21,6 +21,7 @@ from deepdoc.parser import PdfParser, DocxParser | |||
| class Pdf(PdfParser): | |||
| def __call__(self, filename, binary=None, from_page=0, | |||
| to_page=100000, zoomin=3, callback=None): | |||
| callback(msg="OCR is running...") | |||
| self.__images__( | |||
| filename if not binary else binary, | |||
| zoomin, | |||
| @@ -40,11 +41,11 @@ class Pdf(PdfParser): | |||
| self._filter_forpages() | |||
| self._merge_with_same_bullet() | |||
| callback(0.75, "Text merging finished.") | |||
| tbls = self._extract_table_figure(True, zoomin, False) | |||
| tbls = self._extract_table_figure(True, zoomin, False, True) | |||
| callback(0.8, "Text extraction finished") | |||
| return [(b["text"] + self._line_tag(b, zoomin), b.get("layoutno","")) for b in self.boxes], tbls | |||
| return [(b["text"] + self._line_tag(b, zoomin), b.get("layoutno","")) for b in self.boxes], tbls, tbl_poss | |||
| def chunk(filename, binary=None, from_page=0, to_page=100000, lang="Chinese", callback=None, **kwargs): | |||
| @@ -69,7 +70,7 @@ def chunk(filename, binary=None, from_page=0, to_page=100000, lang="Chinese", ca | |||
| callback(0.8, "Finish parsing.") | |||
| elif re.search(r"\.pdf$", filename, re.IGNORECASE): | |||
| pdf_parser = Pdf() | |||
| sections,tbls = pdf_parser(filename if not binary else binary, | |||
| sections, tbls = pdf_parser(filename if not binary else binary, | |||
| from_page=from_page, to_page=to_page, callback=callback) | |||
| elif re.search(r"\.txt$", filename, re.IGNORECASE): | |||
| callback(0.1, "Start to parse.") | |||
| @@ -105,7 +106,8 @@ def chunk(filename, binary=None, from_page=0, to_page=100000, lang="Chinese", ca | |||
| d = copy.deepcopy(doc) | |||
| ck = "\n".join(ck) | |||
| if pdf_parser: | |||
| d["image"] = pdf_parser.crop(ck) | |||
| d["image"], poss = pdf_parser.crop(ck, need_position=True) | |||
| add_positions(d, poss) | |||
| ck = pdf_parser.remove_tag(ck) | |||
| tokenize(d, ck, eng) | |||
| res.append(d) | |||
| @@ -15,7 +15,7 @@ import re | |||
| from io import BytesIO | |||
| from docx import Document | |||
| from rag.nlp import bullets_category, is_english, tokenize, remove_contents_table, hierarchical_merge, \ | |||
| make_colon_as_title | |||
| make_colon_as_title, add_positions | |||
| from rag.nlp import huqie | |||
| from deepdoc.parser import PdfParser, DocxParser | |||
| from rag.settings import cron_logger | |||
| @@ -49,6 +49,7 @@ class Docx(DocxParser): | |||
| class Pdf(PdfParser): | |||
| def __call__(self, filename, binary=None, from_page=0, | |||
| to_page=100000, zoomin=3, callback=None): | |||
| callback(msg="OCR is running...") | |||
| self.__images__( | |||
| filename if not binary else binary, | |||
| zoomin, | |||
| @@ -122,7 +123,8 @@ def chunk(filename, binary=None, from_page=0, to_page=100000, lang="Chinese", ca | |||
| ck = "\n".join(ck) | |||
| d = copy.deepcopy(doc) | |||
| if pdf_parser: | |||
| d["image"] = pdf_parser.crop(ck) | |||
| d["image"], poss = pdf_parser.crop(ck, need_position=True) | |||
| add_positions(d, poss) | |||
| ck = pdf_parser.remove_tag(ck) | |||
| tokenize(d, ck, eng) | |||
| res.append(d) | |||
| @@ -2,7 +2,7 @@ import copy | |||
| import re | |||
| from api.db import ParserType | |||
| from rag.nlp import huqie, tokenize, tokenize_table | |||
| from rag.nlp import huqie, tokenize, tokenize_table, add_positions | |||
| from deepdoc.parser import PdfParser | |||
| from rag.utils import num_tokens_from_string | |||
| @@ -14,6 +14,7 @@ class Pdf(PdfParser): | |||
| def __call__(self, filename, binary=None, from_page=0, | |||
| to_page=100000, zoomin=3, callback=None): | |||
| callback(msg="OCR is running...") | |||
| self.__images__( | |||
| filename if not binary else binary, | |||
| zoomin, | |||
| @@ -32,7 +33,7 @@ class Pdf(PdfParser): | |||
| self._concat_downward(concat_between_pages=False) | |||
| self._filter_forpages() | |||
| callback(0.77, "Text merging finished") | |||
| tbls = self._extract_table_figure(True, zoomin, False) | |||
| tbls = self._extract_table_figure(True, zoomin, False, True) | |||
| # clean mess | |||
| for b in self.boxes: | |||
| @@ -91,7 +92,8 @@ def chunk(filename, binary=None, from_page=0, to_page=100000, lang="Chinese", ca | |||
| d = copy.deepcopy(doc) | |||
| ck = "\n".join(chunk) | |||
| tokenize(d, pdf_parser.remove_tag(ck), pdf_parser.is_english) | |||
| d["image"] = pdf_parser.crop(ck) | |||
| d["image"], poss = pdf_parser.crop(ck, need_position=True) | |||
| add_positions(d, poss) | |||
| res.append(d) | |||
| chunk = [] | |||
| tk_cnt = 0 | |||
| @@ -13,7 +13,7 @@ | |||
| import copy | |||
| import re | |||
| from rag.app import laws | |||
| from rag.nlp import huqie, is_english, tokenize, naive_merge, tokenize_table | |||
| from rag.nlp import huqie, is_english, tokenize, naive_merge, tokenize_table, add_positions | |||
| from deepdoc.parser import PdfParser | |||
| from rag.settings import cron_logger | |||
| @@ -21,6 +21,7 @@ from rag.settings import cron_logger | |||
| class Pdf(PdfParser): | |||
| def __call__(self, filename, binary=None, from_page=0, | |||
| to_page=100000, zoomin=3, callback=None): | |||
| callback(msg="OCR is running...") | |||
| self.__images__( | |||
| filename if not binary else binary, | |||
| zoomin, | |||
| @@ -39,7 +40,7 @@ class Pdf(PdfParser): | |||
| self._concat_downward(concat_between_pages=False) | |||
| self._filter_forpages() | |||
| callback(0.77, "Text merging finished") | |||
| tbls = self._extract_table_figure(True, zoomin, False) | |||
| tbls = self._extract_table_figure(True, zoomin, False, True) | |||
| cron_logger.info("paddle layouts:".format((timer() - start) / (self.total_page + 0.1))) | |||
| #self._naive_vertical_merge() | |||
| @@ -95,11 +96,12 @@ def chunk(filename, binary=None, from_page=0, to_page=100000, lang="Chinese", ca | |||
| # wrap up to es documents | |||
| for ck in cks: | |||
| if len(ck.strip()) == 0:continue | |||
| print("--", ck) | |||
| if not ck:continue | |||
| d = copy.deepcopy(doc) | |||
| if pdf_parser: | |||
| d["image"] = pdf_parser.crop(ck) | |||
| d["image"], poss = pdf_parser.crop(ck, need_position=True) | |||
| add_positions(d, poss) | |||
| ck = pdf_parser.remove_tag(ck) | |||
| tokenize(d, ck, eng) | |||
| res.append(d) | |||
| @@ -15,7 +15,7 @@ import re | |||
| from collections import Counter | |||
| from api.db import ParserType | |||
| from rag.nlp import huqie, tokenize, tokenize_table | |||
| from rag.nlp import huqie, tokenize, tokenize_table, add_positions | |||
| from deepdoc.parser import PdfParser | |||
| import numpy as np | |||
| from rag.utils import num_tokens_from_string | |||
| @@ -28,6 +28,7 @@ class Pdf(PdfParser): | |||
| def __call__(self, filename, binary=None, from_page=0, | |||
| to_page=100000, zoomin=3, callback=None): | |||
| callback(msg="OCR is running...") | |||
| self.__images__( | |||
| filename if not binary else binary, | |||
| zoomin, | |||
| @@ -47,7 +48,7 @@ class Pdf(PdfParser): | |||
| self._concat_downward(concat_between_pages=False) | |||
| self._filter_forpages() | |||
| callback(0.75, "Text merging finished.") | |||
| tbls = self._extract_table_figure(True, zoomin, False) | |||
| tbls = self._extract_table_figure(True, zoomin, False, True) | |||
| # clean mess | |||
| if column_width < self.page_images[0].size[0] / zoomin / 2: | |||
| @@ -165,7 +166,8 @@ def chunk(filename, binary=None, from_page=0, to_page=100000, lang="Chinese", ca | |||
| txt = pdf_parser.remove_tag(paper["abstract"]) | |||
| d["important_kwd"] = ["abstract", "总结", "概括", "summary", "summarize"] | |||
| d["important_tks"] = " ".join(d["important_kwd"]) | |||
| d["image"] = pdf_parser.crop(paper["abstract"]) | |||
| d["image"], poss = pdf_parser.crop(paper["abstract"], need_position=True) | |||
| add_positions(d, poss) | |||
| tokenize(d, txt, eng) | |||
| res.append(d) | |||
| @@ -198,8 +200,9 @@ def chunk(filename, binary=None, from_page=0, to_page=100000, lang="Chinese", ca | |||
| for p in proj: | |||
| d = copy.deepcopy(doc) | |||
| txt += "\n" + pdf_parser.remove_tag(p) | |||
| d["image"] = pdf_parser.crop(p) | |||
| tokenize(d, txt) | |||
| d["image"], poss = pdf_parser.crop(p, need_position=True) | |||
| add_positions(d, poss) | |||
| tokenize(d, txt, eng) | |||
| res.append(d) | |||
| i = 0 | |||
| @@ -210,7 +213,8 @@ def chunk(filename, binary=None, from_page=0, to_page=100000, lang="Chinese", ca | |||
| d = copy.deepcopy(doc) | |||
| ck = "\n".join(chunk) | |||
| tokenize(d, pdf_parser.remove_tag(ck), pdf_parser.is_english) | |||
| d["image"] = pdf_parser.crop(ck) | |||
| d["image"], poss = pdf_parser.crop(ck, need_position=True) | |||
| add_positions(d, poss) | |||
| res.append(d) | |||
| chunk = [] | |||
| tk_cnt = 0 | |||
| @@ -48,6 +48,7 @@ class Pdf(PdfParser): | |||
| return False | |||
| def __call__(self, filename, binary=None, from_page=0, to_page=100000, zoomin=3, callback=None): | |||
| callback(msg="OCR is running...") | |||
| self.__images__(filename if not binary else binary, zoomin, from_page, to_page) | |||
| callback(0.8, "Page {}~{}: OCR finished".format(from_page, min(to_page, self.total_page))) | |||
| assert len(self.boxes) == len(self.page_images), "{} vs. {}".format(len(self.boxes), len(self.page_images)) | |||
| @@ -94,9 +95,10 @@ def chunk(filename, binary=None, from_page=0, to_page=100000, callback=None, **k | |||
| return res | |||
| elif re.search(r"\.pdf$", filename, re.IGNORECASE): | |||
| pdf_parser = Pdf() | |||
| for txt,img in pdf_parser(filename if not binary else binary, from_page=from_page, to_page=to_page, callback=callback): | |||
| for pn, (txt,img) in enumerate(pdf_parser(filename if not binary else binary, from_page=from_page, to_page=to_page, callback=callback)): | |||
| d = copy.deepcopy(doc) | |||
| d["image"] = img | |||
| d["page_num_obj"] = [pn+1] | |||
| tokenize(d, txt, pdf_parser.is_english) | |||
| res.append(d) | |||
| return res | |||
| @@ -83,17 +83,39 @@ def tokenize(d, t, eng): | |||
| def tokenize_table(tbls, doc, eng, batch_size=10): | |||
| res = [] | |||
| # add tables | |||
| for img, rows in tbls: | |||
| for (img, rows), poss in tbls: | |||
| if not rows:continue | |||
| if isinstance(rows, str): | |||
| d = copy.deepcopy(doc) | |||
| r = re.sub(r"<[^<>]{,12}>", "", rows) | |||
| tokenize(d, r, eng) | |||
| d["content_with_weight"] = rows | |||
| d["image"] = img | |||
| add_positions(d, poss) | |||
| res.append(d) | |||
| continue | |||
| de = "; " if eng else "; " | |||
| for i in range(0, len(rows), batch_size): | |||
| d = copy.deepcopy(doc) | |||
| r = de.join(rows[i:i + batch_size]) | |||
| tokenize(d, r, eng) | |||
| d["image"] = img | |||
| add_positions(d, poss) | |||
| res.append(d) | |||
| return res | |||
| def add_positions(d, poss): | |||
| if not poss:return | |||
| d["page_num_int"] = [] | |||
| d["position_int"] = [] | |||
| d["top_int"] = [] | |||
| for pn, left, right, top, bottom in poss: | |||
| d["page_num_int"].append(pn+1) | |||
| d["top_int"].append(top) | |||
| d["position_int"].append((pn+1, left, right, top, bottom)) | |||
| def remove_contents_table(sections, eng=False): | |||
| i = 0 | |||
| while i < len(sections): | |||
| @@ -68,17 +68,25 @@ class Dealer: | |||
| pg = int(req.get("page", 1)) - 1 | |||
| ps = int(req.get("size", 1000)) | |||
| src = req.get("fields", ["docnm_kwd", "content_ltks", "kb_id", "img_id", | |||
| "image_id", "doc_id", "q_512_vec", "q_768_vec", | |||
| "image_id", "doc_id", "q_512_vec", "q_768_vec", "position_int", | |||
| "q_1024_vec", "q_1536_vec", "available_int", "content_with_weight"]) | |||
| s = s.query(bqry)[pg * ps:(pg + 1) * ps] | |||
| s = s.highlight("content_ltks") | |||
| s = s.highlight("title_ltks") | |||
| if not qst: | |||
| s = s.sort( | |||
| {"create_time": {"order": "desc", "unmapped_type": "date"}}, | |||
| {"create_timestamp_flt": {"order": "desc", "unmapped_type": "float"}} | |||
| ) | |||
| if not req.get("sort"): | |||
| s = s.sort( | |||
| {"create_time": {"order": "desc", "unmapped_type": "date"}}, | |||
| {"create_timestamp_flt": {"order": "desc", "unmapped_type": "float"}} | |||
| ) | |||
| else: | |||
| s = s.sort( | |||
| {"page_num_int": {"order": "asc", "unmapped_type": "float"}}, | |||
| {"top_int": {"order": "asc", "unmapped_type": "float"}}, | |||
| {"create_time": {"order": "desc", "unmapped_type": "date"}}, | |||
| {"create_timestamp_flt": {"order": "desc", "unmapped_type": "float"}} | |||
| ) | |||
| if qst: | |||
| s = s.highlight_options( | |||
| @@ -169,7 +177,7 @@ class Dealer: | |||
| m = {n: d.get(n) for n in flds if d.get(n) is not None} | |||
| for n, v in m.items(): | |||
| if isinstance(v, type([])): | |||
| m[n] = "\t".join([str(vv) for vv in v]) | |||
| m[n] = "\t".join([str(vv) if not isinstance(vv, list) else "\t".join([str(vvv) for vvv in vv]) for vv in v]) | |||
| continue | |||
| if not isinstance(v, type("")): | |||
| m[n] = str(m[n]) | |||
| @@ -48,6 +48,7 @@ from api.utils.file_utils import get_project_base_directory | |||
| BATCH_SIZE = 64 | |||
| FACTORY = { | |||
| "general": naive, | |||
| ParserType.NAIVE.value: naive, | |||
| ParserType.PAPER.value: paper, | |||
| ParserType.BOOK.value: book, | |||
| @@ -228,6 +229,8 @@ def main(comm, mod): | |||
| es_r = ELASTICSEARCH.bulk(cks, search.index_name(r["tenant_id"])) | |||
| if es_r: | |||
| callback(-1, "Index failure!") | |||
| ELASTICSEARCH.deleteByQuery( | |||
| Q("match", doc_id=r["doc_id"]), idxnm=search.index_name(r["tenant_id"])) | |||
| cron_logger.error(str(es_r)) | |||
| else: | |||
| if TaskService.do_cancel(r["id"]): | |||