| # Release Notification | # Release Notification | ||||
| **Star us on GitHub, and be notified for a new releases instantly!** | **Star us on GitHub, and be notified for a new releases instantly!** | ||||
|  | |||||
|  | |||||
| # Installation | # Installation | ||||
| ## System Requirements | ## System Requirements |
| return retrievaler.sql_retrieval(sql, format="json"), sql | return retrievaler.sql_retrieval(sql, format="json"), sql | ||||
| tbl, sql = get_table() | tbl, sql = get_table() | ||||
| if tbl is None: | |||||
| return None, None | |||||
| if tbl.get("error") and tried_times <= 2: | if tbl.get("error") and tried_times <= 2: | ||||
| user_promt = """ | user_promt = """ | ||||
| 表名:{}; | 表名:{}; |
| llms = LLMService.get_all() | llms = LLMService.get_all() | ||||
| llms = [m.to_dict() for m in llms if m.status == StatusEnum.VALID.value] | llms = [m.to_dict() for m in llms if m.status == StatusEnum.VALID.value] | ||||
| for m in llms: | for m in llms: | ||||
| m["available"] = m["fid"] in facts | |||||
| m["available"] = m["fid"] in facts or m["llm_name"].lower() == "flag-embedding" | |||||
| res = {} | res = {} | ||||
| for m in llms: | for m in llms: |
| "model_type": LLMType.CHAT.value | "model_type": LLMType.CHAT.value | ||||
| }, { | }, { | ||||
| "fid": factory_infos[3]["name"], | "fid": factory_infos[3]["name"], | ||||
| "llm_name": "flag-enbedding", | |||||
| "llm_name": "flag-embedding", | |||||
| "tags": "TEXT EMBEDDING,", | "tags": "TEXT EMBEDDING,", | ||||
| "max_tokens": 128 * 1000, | "max_tokens": 128 * 1000, | ||||
| "model_type": LLMType.EMBEDDING.value | "model_type": LLMType.EMBEDDING.value | ||||
| "model_type": LLMType.CHAT.value | "model_type": LLMType.CHAT.value | ||||
| }, { | }, { | ||||
| "fid": factory_infos[4]["name"], | "fid": factory_infos[4]["name"], | ||||
| "llm_name": "flag-enbedding", | |||||
| "llm_name": "flag-embedding", | |||||
| "tags": "TEXT EMBEDDING,", | "tags": "TEXT EMBEDDING,", | ||||
| "max_tokens": 128 * 1000, | "max_tokens": 128 * 1000, | ||||
| "model_type": LLMType.EMBEDDING.value | "model_type": LLMType.EMBEDDING.value |
| }, | }, | ||||
| "Local": { | "Local": { | ||||
| "chat_model": "qwen-14B-chat", | "chat_model": "qwen-14B-chat", | ||||
| "embedding_model": "flag-enbedding", | |||||
| "embedding_model": "flag-embedding", | |||||
| "image2text_model": "", | "image2text_model": "", | ||||
| "asr_model": "", | "asr_model": "", | ||||
| }, | }, | ||||
| "Moonshot": { | "Moonshot": { | ||||
| "chat_model": "moonshot-v1-8k", | "chat_model": "moonshot-v1-8k", | ||||
| "embedding_model": "flag-enbedding", | |||||
| "embedding_model": "", | |||||
| "image2text_model": "", | "image2text_model": "", | ||||
| "asr_model": "", | "asr_model": "", | ||||
| } | } |
| b["SP"] = ii | b["SP"] = ii | ||||
| def __ocr(self, pagenum, img, chars, ZM=3): | def __ocr(self, pagenum, img, chars, ZM=3): | ||||
| bxs = self.ocr(np.array(img)) | |||||
| bxs = self.ocr.detect(np.array(img)) | |||||
| if not bxs: | if not bxs: | ||||
| self.boxes.append([]) | self.boxes.append([]) | ||||
| return | return | ||||
| for b in bxs: | for b in bxs: | ||||
| if not b["text"]: | if not b["text"]: | ||||
| b["text"] = b["txt"] | |||||
| left, right, top, bott = b["x0"]*ZM, b["x1"]*ZM, b["top"]*ZM, b["bottom"]*ZM | |||||
| b["text"] = self.ocr.recognize(np.array(img), np.array([[left, top], [right, top], [right, bott], [left, bott]], dtype=np.float32)) | |||||
| del b["txt"] | del b["txt"] | ||||
| bxs = [b for b in bxs if b["text"]] | |||||
| if self.mean_height[-1] == 0: | if self.mean_height[-1] == 0: | ||||
| self.mean_height[-1] = np.median([b["bottom"] - b["top"] | self.mean_height[-1] = np.median([b["bottom"] - b["top"] | ||||
| for b in bxs]) | for b in bxs]) |
| options.execution_mode = ort.ExecutionMode.ORT_SEQUENTIAL | options.execution_mode = ort.ExecutionMode.ORT_SEQUENTIAL | ||||
| options.intra_op_num_threads = 2 | options.intra_op_num_threads = 2 | ||||
| options.inter_op_num_threads = 2 | options.inter_op_num_threads = 2 | ||||
| if ort.get_device() == "GPU": | |||||
| if False and ort.get_device() == "GPU": | |||||
| sess = ort.InferenceSession(model_file_path, options=options, providers=['CUDAExecutionProvider']) | sess = ort.InferenceSession(model_file_path, options=options, providers=['CUDAExecutionProvider']) | ||||
| else: | else: | ||||
| sess = ort.InferenceSession(model_file_path, options=options, providers=['CPUExecutionProvider']) | sess = ort.InferenceSession(model_file_path, options=options, providers=['CPUExecutionProvider']) | ||||
| 'keep_keys': ['image', 'shape'] | 'keep_keys': ['image', 'shape'] | ||||
| } | } | ||||
| }] | }] | ||||
| postprocess_params = {"name": "DBPostProcess", "thresh": 0.3, "box_thresh": 0.6, "max_candidates": 1000, | |||||
| postprocess_params = {"name": "DBPostProcess", "thresh": 0.3, "box_thresh": 0.5, "max_candidates": 1000, | |||||
| "unclip_ratio": 1.5, "use_dilation": False, "score_mode": "fast", "box_type": "quad"} | "unclip_ratio": 1.5, "use_dilation": False, "score_mode": "fast", "box_type": "quad"} | ||||
| self.postprocess_op = build_post_process(postprocess_params) | self.postprocess_op = build_post_process(postprocess_params) | ||||
| break | break | ||||
| return _boxes | return _boxes | ||||
| def detect(self, img): | |||||
| time_dict = {'det': 0, 'rec': 0, 'cls': 0, 'all': 0} | |||||
| if img is None: | |||||
| return None, None, time_dict | |||||
| start = time.time() | |||||
| dt_boxes, elapse = self.text_detector(img) | |||||
| time_dict['det'] = elapse | |||||
| if dt_boxes is None: | |||||
| end = time.time() | |||||
| time_dict['all'] = end - start | |||||
| return None, None, time_dict | |||||
| else: | |||||
| cron_logger.debug("dt_boxes num : {}, elapsed : {}".format( | |||||
| len(dt_boxes), elapse)) | |||||
| return zip(self.sorted_boxes(dt_boxes), [("",0) for _ in range(len(dt_boxes))]) | |||||
| def recognize(self, ori_im, box): | |||||
| img_crop = self.get_rotate_crop_image(ori_im, box) | |||||
| rec_res, elapse = self.text_recognizer([img_crop]) | |||||
| text, score = rec_res[0] | |||||
| if score < self.drop_score:return "" | |||||
| return text | |||||
| def __call__(self, img, cls=True): | def __call__(self, img, cls=True): | ||||
| time_dict = {'det': 0, 'rec': 0, 'cls': 0, 'all': 0} | time_dict = {'det': 0, 'rec': 0, 'cls': 0, 'all': 0} | ||||
| img_crop_list.append(img_crop) | img_crop_list.append(img_crop) | ||||
| rec_res, elapse = self.text_recognizer(img_crop_list) | rec_res, elapse = self.text_recognizer(img_crop_list) | ||||
| time_dict['rec'] = elapse | time_dict['rec'] = elapse | ||||
| cron_logger.debug("rec_res num : {}, elapsed : {}".format( | cron_logger.debug("rec_res num : {}, elapsed : {}".format( | ||||
| len(rec_res), elapse)) | len(rec_res), elapse)) | ||||
| end = time.time() | end = time.time() | ||||
| time_dict['all'] = end - start | time_dict['all'] = end - start | ||||
| #for bno in range(len(img_crop_list)): | #for bno in range(len(img_crop_list)): | ||||
| # print(f"{bno}, {rec_res[bno]}") | # print(f"{bno}, {rec_res[bno]}") | ||||
| if not os.path.exists(model_file_path): | if not os.path.exists(model_file_path): | ||||
| raise ValueError("not find model file path {}".format( | raise ValueError("not find model file path {}".format( | ||||
| model_file_path)) | model_file_path)) | ||||
| if ort.get_device() == "GPU": | |||||
| if False and ort.get_device() == "GPU": | |||||
| options = ort.SessionOptions() | options = ort.SessionOptions() | ||||
| options.enable_cpu_mem_arena = False | options.enable_cpu_mem_arena = False | ||||
| self.ort_sess = ort.InferenceSession(model_file_path, options=options, providers=[('CUDAExecutionProvider')]) | self.ort_sess = ort.InferenceSession(model_file_path, options=options, providers=[('CUDAExecutionProvider')]) |
| import re | import re | ||||
| from api.db import ParserType | from api.db import ParserType | ||||
| from rag.nlp import huqie, tokenize, tokenize_table, add_positions | |||||
| from rag.nlp import huqie, tokenize, tokenize_table, add_positions, bullets_category, title_frequency | |||||
| from deepdoc.parser import PdfParser | from deepdoc.parser import PdfParser | ||||
| from rag.utils import num_tokens_from_string | from rag.utils import num_tokens_from_string | ||||
| def __call__(self, filename, binary=None, from_page=0, | def __call__(self, filename, binary=None, from_page=0, | ||||
| to_page=100000, zoomin=3, callback=None): | to_page=100000, zoomin=3, callback=None): | ||||
| from timeit import default_timer as timer | |||||
| start = timer() | |||||
| callback(msg="OCR is running...") | callback(msg="OCR is running...") | ||||
| self.__images__( | self.__images__( | ||||
| filename if not binary else binary, | filename if not binary else binary, | ||||
| callback | callback | ||||
| ) | ) | ||||
| callback(msg="OCR finished.") | callback(msg="OCR finished.") | ||||
| #for bb in self.boxes: | |||||
| # for b in bb: | |||||
| # print(b) | |||||
| print("OCR:", timer()-start) | |||||
| def get_position(bx): | |||||
| poss = [] | |||||
| pn = bx["page_number"] | |||||
| top = bx["top"] - self.page_cum_height[pn - 1] | |||||
| bott = bx["bottom"] - self.page_cum_height[pn - 1] | |||||
| poss.append((pn, bx["x0"], bx["x1"], top, min(bott, self.page_images[pn-1].size[1]/zoomin))) | |||||
| while bott * zoomin > self.page_images[pn - 1].size[1]: | |||||
| bott -= self.page_images[pn- 1].size[1] / zoomin | |||||
| top = 0 | |||||
| pn += 1 | |||||
| poss.append((pn, bx["x0"], bx["x1"], top, min(bott, self.page_images[pn - 1].size[1] / zoomin))) | |||||
| return poss | |||||
| def tag(pn, left, right, top, bottom): | |||||
| return "@@{}\t{:.1f}\t{:.1f}\t{:.1f}\t{:.1f}##" \ | |||||
| .format(pn, left, right, top, bottom) | |||||
| from timeit import default_timer as timer | |||||
| start = timer() | |||||
| self._layouts_rec(zoomin) | self._layouts_rec(zoomin) | ||||
| callback(0.65, "Layout analysis finished.") | callback(0.65, "Layout analysis finished.") | ||||
| print("paddle layouts:", timer() - start) | print("paddle layouts:", timer() - start) | ||||
| self._table_transformer_job(zoomin) | self._table_transformer_job(zoomin) | ||||
| callback(0.67, "Table analysis finished.") | callback(0.67, "Table analysis finished.") | ||||
| self._text_merge() | self._text_merge() | ||||
| self._concat_downward(concat_between_pages=False) | |||||
| tbls = self._extract_table_figure(True, zoomin, True, True) | |||||
| self._naive_vertical_merge() | |||||
| self._filter_forpages() | self._filter_forpages() | ||||
| callback(0.68, "Text merging finished") | callback(0.68, "Text merging finished") | ||||
| tbls = self._extract_table_figure(True, zoomin, True, True) | |||||
| # clean mess | # clean mess | ||||
| for b in self.boxes: | for b in self.boxes: | ||||
| # merge chunks with the same bullets | # merge chunks with the same bullets | ||||
| self._merge_with_same_bullet() | self._merge_with_same_bullet() | ||||
| # merge title with decent chunk | |||||
| i = 0 | |||||
| while i + 1 < len(self.boxes): | |||||
| b = self.boxes[i] | |||||
| if b.get("layoutno","").find("title") < 0: | |||||
| i += 1 | |||||
| continue | |||||
| b_ = self.boxes[i + 1] | |||||
| b_["text"] = b["text"] + "\n" + b_["text"] | |||||
| b_["x0"] = min(b["x0"], b_["x0"]) | |||||
| b_["x1"] = max(b["x1"], b_["x1"]) | |||||
| b_["top"] = b["top"] | |||||
| self.boxes.pop(i) | |||||
| callback(0.8, "Parsing finished") | |||||
| for b in self.boxes: print(b["text"], b.get("layoutno")) | |||||
| print(tbls) | |||||
| return [b["text"] + self._line_tag(b, zoomin) for b in self.boxes], tbls | |||||
| # set pivot using the most frequent type of title, | |||||
| # then merge between 2 pivot | |||||
| bull = bullets_category([b["text"] for b in self.boxes]) | |||||
| most_level, levels = title_frequency(bull, [(b["text"], b.get("layout_no","")) for b in self.boxes]) | |||||
| assert len(self.boxes) == len(levels) | |||||
| sec_ids = [] | |||||
| sid = 0 | |||||
| for i, lvl in enumerate(levels): | |||||
| if lvl <= most_level: sid += 1 | |||||
| sec_ids.append(sid) | |||||
| #print(lvl, self.boxes[i]["text"], most_level) | |||||
| sections = [(b["text"], sec_ids[i], get_position(b)) for i, b in enumerate(self.boxes)] | |||||
| for (img, rows), poss in tbls: | |||||
| sections.append((rows[0], -1, [(p[0]+1, p[1], p[2], p[3], p[4]) for p in poss])) | |||||
| chunks = [] | |||||
| last_sid = -2 | |||||
| for txt, sec_id, poss in sorted(sections, key=lambda x: (x[-1][0][0], x[-1][0][3], x[-1][0][1])): | |||||
| poss = "\t".join([tag(*pos) for pos in poss]) | |||||
| if sec_id == last_sid or sec_id == -1: | |||||
| if chunks: | |||||
| chunks[-1] += "\n" + txt + poss | |||||
| continue | |||||
| chunks.append(txt + poss) | |||||
| if sec_id >-1: last_sid = sec_id | |||||
| return chunks | |||||
| def chunk(filename, binary=None, from_page=0, to_page=100000, lang="Chinese", callback=None, **kwargs): | def chunk(filename, binary=None, from_page=0, to_page=100000, lang="Chinese", callback=None, **kwargs): | ||||
| if re.search(r"\.pdf$", filename, re.IGNORECASE): | if re.search(r"\.pdf$", filename, re.IGNORECASE): | ||||
| pdf_parser = Pdf() | pdf_parser = Pdf() | ||||
| cks, tbls = pdf_parser(filename if not binary else binary, | |||||
| cks = pdf_parser(filename if not binary else binary, | |||||
| from_page=from_page, to_page=to_page, callback=callback) | from_page=from_page, to_page=to_page, callback=callback) | ||||
| else: raise NotImplementedError("file type not supported yet(pdf supported)") | else: raise NotImplementedError("file type not supported yet(pdf supported)") | ||||
| doc = { | doc = { | ||||
| # is it English | # is it English | ||||
| eng = lang.lower() == "english"#pdf_parser.is_english | eng = lang.lower() == "english"#pdf_parser.is_english | ||||
| res = tokenize_table(tbls, doc, eng) | |||||
| i = 0 | i = 0 | ||||
| chunk = [] | chunk = [] | ||||
| tk_cnt = 0 | tk_cnt = 0 | ||||
| res = [] | |||||
| def add_chunk(): | def add_chunk(): | ||||
| nonlocal chunk, res, doc, pdf_parser, tk_cnt | nonlocal chunk, res, doc, pdf_parser, tk_cnt | ||||
| d = copy.deepcopy(doc) | d = copy.deepcopy(doc) | ||||
| ck = "\n".join(chunk) | ck = "\n".join(chunk) | ||||
| tokenize(d, pdf_parser.remove_tag(ck), pdf_parser.is_english) | |||||
| tokenize(d, pdf_parser.remove_tag(ck), eng) | |||||
| d["image"], poss = pdf_parser.crop(ck, need_position=True) | d["image"], poss = pdf_parser.crop(ck, need_position=True) | ||||
| add_positions(d, poss) | add_positions(d, poss) | ||||
| res.append(d) | res.append(d) | ||||
| tk_cnt = 0 | tk_cnt = 0 | ||||
| while i < len(cks): | while i < len(cks): | ||||
| if tk_cnt > 128: add_chunk() | |||||
| if tk_cnt > 256: add_chunk() | |||||
| txt = cks[i] | txt = cks[i] | ||||
| txt_ = pdf_parser.remove_tag(txt) | txt_ = pdf_parser.remove_tag(txt) | ||||
| i += 1 | i += 1 | ||||
| chunk.append(txt) | chunk.append(txt) | ||||
| tk_cnt += cnt | tk_cnt += cnt | ||||
| if chunk: add_chunk() | if chunk: add_chunk() | ||||
| for i, d in enumerate(res): | for i, d in enumerate(res): | ||||
| print(d) | print(d) | ||||
| # d["image"].save(f"./logs/{i}.jpg") | # d["image"].save(f"./logs/{i}.jpg") | ||||
| if __name__ == "__main__": | if __name__ == "__main__": | ||||
| import sys | import sys | ||||
| def dummy(a, b): | |||||
| def dummy(prog=None, msg=""): | |||||
| pass | pass | ||||
| chunk(sys.argv[1], callback=dummy) | chunk(sys.argv[1], callback=dummy) |
| print("--", ck) | print("--", ck) | ||||
| d = copy.deepcopy(doc) | d = copy.deepcopy(doc) | ||||
| if pdf_parser: | if pdf_parser: | ||||
| d["image"], poss = pdf_parser.crop(ck, need_position=True) | |||||
| try: | |||||
| d["image"], poss = pdf_parser.crop(ck, need_position=True) | |||||
| except Exception as e: | |||||
| continue | |||||
| add_positions(d, poss) | add_positions(d, poss) | ||||
| ck = pdf_parser.remove_tag(ck) | ck = pdf_parser.remove_tag(ck) | ||||
| tokenize(d, ck, eng) | tokenize(d, ck, eng) |
| import random | import random | ||||
| from collections import Counter | |||||
| from rag.utils import num_tokens_from_string | from rag.utils import num_tokens_from_string | ||||
| from . import huqie | from . import huqie | ||||
| from nltk import word_tokenize | from nltk import word_tokenize | ||||
| i += 1 | i += 1 | ||||
| def title_frequency(bull, sections): | |||||
| bullets_size = len(BULLET_PATTERN[bull]) | |||||
| levels = [bullets_size+1 for _ in range(len(sections))] | |||||
| if not sections or bull < 0: | |||||
| return bullets_size+1, levels | |||||
| for i, (txt, layout) in enumerate(sections): | |||||
| for j, p in enumerate(BULLET_PATTERN[bull]): | |||||
| if re.match(p, txt.strip()): | |||||
| levels[i] = j | |||||
| break | |||||
| else: | |||||
| if re.search(r"(title|head)", layout) and not not_title(txt.split("@")[0]): | |||||
| levels[i] = bullets_size | |||||
| most_level = bullets_size+1 | |||||
| for l, c in sorted(Counter(levels).items(), key=lambda x:x[1]*-1): | |||||
| if l <= bullets_size: | |||||
| most_level = l | |||||
| break | |||||
| return most_level, levels | |||||
| def not_title(txt): | |||||
| if re.match(r"第[零一二三四五六七八九十百0-9]+条", txt): | |||||
| return False | |||||
| if len(txt.split(" ")) > 12 or (txt.find(" ") < 0 and len(txt) >= 32): | |||||
| return True | |||||
| return re.search(r"[,;,。;!!]", txt) | |||||
| def hierarchical_merge(bull, sections, depth): | def hierarchical_merge(bull, sections, depth): | ||||
| if not sections or bull < 0: | if not sections or bull < 0: | ||||
| return [] | return [] | ||||
| bullets_size = len(BULLET_PATTERN[bull]) | bullets_size = len(BULLET_PATTERN[bull]) | ||||
| levels = [[] for _ in range(bullets_size + 2)] | levels = [[] for _ in range(bullets_size + 2)] | ||||
| def not_title(txt): | |||||
| if re.match(r"第[零一二三四五六七八九十百0-9]+条", txt): | |||||
| return False | |||||
| if len(txt.split(" ")) > 12 or (txt.find(" ") < 0 and len(txt) >= 32): | |||||
| return True | |||||
| return re.search(r"[,;,。;!!]", txt) | |||||
| for i, (txt, layout) in enumerate(sections): | for i, (txt, layout) in enumerate(sections): | ||||
| for j, p in enumerate(BULLET_PATTERN[bull]): | for j, p in enumerate(BULLET_PATTERN[bull]): |
| "", | "", | ||||
| txt) | txt) | ||||
| return re.sub( | return re.sub( | ||||
| r"(what|who|how|which|where|why|(is|are|were|was) there) (is|are|were|was)*", "", txt, re.IGNORECASE) | |||||
| r"(what|who|how|which|where|why|(is|are|were|was) there) (is|are|were|was|to)*", "", txt, re.IGNORECASE) | |||||
| def question(self, txt, tbl="qa", min_match="60%"): | def question(self, txt, tbl="qa", min_match="60%"): | ||||
| txt = re.sub( | txt = re.sub( | ||||
| txt = EsQueryer.rmWWW(txt) | txt = EsQueryer.rmWWW(txt) | ||||
| if not self.isChinese(txt): | if not self.isChinese(txt): | ||||
| tks = txt.split(" ") | |||||
| q = [] | |||||
| tks = [t for t in txt.split(" ") if t.strip()] | |||||
| q = tks | |||||
| for i in range(1, len(tks)): | for i in range(1, len(tks)): | ||||
| q.append("\"%s %s\"~2" % (tks[i - 1], tks[i])) | |||||
| q.append("\"%s %s\"^2" % (tks[i - 1], tks[i])) | |||||
| if not q: | if not q: | ||||
| q.append(txt) | q.append(txt) | ||||
| return Q("bool", | return Q("bool", | ||||
| must=Q("query_string", fields=self.flds, | must=Q("query_string", fields=self.flds, | ||||
| type="best_fields", query=" OR ".join(q), | type="best_fields", query=" OR ".join(q), | ||||
| boost=1, minimum_should_match="60%") | |||||
| boost=1, minimum_should_match=min_match) | |||||
| ), txt.split(" ") | ), txt.split(" ") | ||||
| def needQieqie(tk): | def needQieqie(tk): | ||||
| atks = toDict(atks) | atks = toDict(atks) | ||||
| btkss = [toDict(tks) for tks in btkss] | btkss = [toDict(tks) for tks in btkss] | ||||
| tksim = [self.similarity(atks, btks) for btks in btkss] | tksim = [self.similarity(atks, btks) for btks in btkss] | ||||
| return np.array(sims[0]) * vtweight + np.array(tksim) * tkweight, sims[0], tksim | |||||
| return np.array(sims[0]) * vtweight + np.array(tksim) * tkweight, tksim, sims[0] | |||||
| def similarity(self, qtwt, dtwt): | def similarity(self, qtwt, dtwt): | ||||
| if isinstance(dtwt, type("")): | if isinstance(dtwt, type("")): |
| s["knn"]["filter"] = bqry.to_dict() | s["knn"]["filter"] = bqry.to_dict() | ||||
| s["knn"]["similarity"] = 0.17 | s["knn"]["similarity"] = 0.17 | ||||
| res = self.es.search(s, idxnm=idxnm, timeout="600s", src=src) | res = self.es.search(s, idxnm=idxnm, timeout="600s", src=src) | ||||
| es_logger.info("【Q】: {}".format(json.dumps(s))) | |||||
| kwds = set([]) | kwds = set([]) | ||||
| for k in keywords: | for k in keywords: |