### What problem does this PR solve? Rename page_num_list, top_list, position_list to page_num_int, top_int, position_int ### Type of change - [x] Refactoringtags/v0.15.0
| @@ -34,9 +34,15 @@ RUN --mount=type=bind,from=infiniflow/ragflow_deps:latest,source=/,target=/deps | |||
| cp /deps/cl100k_base.tiktoken /ragflow/9b5ad71b2ce5302211f9c61530b329a4922fc6a4 | |||
| ENV TIKA_SERVER_JAR="file:///ragflow/tika-server-standard-3.0.0.jar" | |||
| ENV DEBIAN_FRONTEND=noninteractive | |||
| # Setup apt | |||
| # cv2 requires libGL.so.1 | |||
| # Python package and implicit dependencies: | |||
| # opencv-python: libglib2.0-0 libglx-mesa0 libgl1 | |||
| # aspose-slides: pkg-config libicu-dev libgdiplus libssl1.1_1.1.1f-1ubuntu2_amd64.deb | |||
| # python-pptx: default-jdk tika-server-standard-3.0.0.jar | |||
| # selenium: libatk-bridge2.0-0 chrome-linux64-121-0-6167-85 | |||
| # Building C extensions: libpython3-dev libgtk-4-1 libnss3 xdg-utils libgbm-dev | |||
| RUN --mount=type=cache,id=ragflow_apt,target=/var/cache/apt,sharing=locked \ | |||
| if [ "$NEED_MIRROR" == "1" ]; then \ | |||
| sed -i 's|http://archive.ubuntu.com|https://mirrors.tuna.tsinghua.edu.cn|g' /etc/apt/sources.list; \ | |||
| @@ -47,8 +53,12 @@ RUN --mount=type=cache,id=ragflow_apt,target=/var/cache/apt,sharing=locked \ | |||
| apt update && \ | |||
| apt --no-install-recommends install -y ca-certificates && \ | |||
| apt update && \ | |||
| DEBIAN_FRONTEND=noninteractive apt install -y curl libpython3-dev nginx libglib2.0-0 libglx-mesa0 pkg-config libicu-dev libgdiplus default-jdk python3-pip pipx \ | |||
| libatk-bridge2.0-0 libgtk-4-1 libnss3 xdg-utils unzip libgbm-dev wget git nginx libgl1 vim less | |||
| apt install -y libglib2.0-0 libglx-mesa0 libgl1 && \ | |||
| apt install -y pkg-config libicu-dev libgdiplus && \ | |||
| apt install -y default-jdk && \ | |||
| apt install -y libatk-bridge2.0-0 && \ | |||
| apt install -y libpython3-dev libgtk-4-1 libnss3 xdg-utils libgbm-dev && \ | |||
| apt install -y python3-pip pipx nginx unzip curl wget git vim less | |||
| RUN if [ "$NEED_MIRROR" == "1" ]; then \ | |||
| pip3 config set global.index-url https://pypi.tuna.tsinghua.edu.cn/simple && \ | |||
| @@ -71,7 +71,7 @@ def list_chunk(): | |||
| "question_kwd": sres.field[id].get("question_kwd", []), | |||
| "image_id": sres.field[id].get("img_id", ""), | |||
| "available_int": int(sres.field[id].get("available_int", 1)), | |||
| "positions": json.loads(sres.field[id].get("position_list", "[]")), | |||
| "positions": sres.field[id].get("position_int", []), | |||
| } | |||
| assert isinstance(d["positions"], list) | |||
| assert len(d["positions"]) == 0 or (isinstance(d["positions"][0], list) and len(d["positions"][0]) == 5) | |||
| @@ -846,7 +846,7 @@ def list_chunks(tenant_id, dataset_id, document_id): | |||
| "question_kwd": sres.field[id].get("question_kwd", []), | |||
| "img_id": sres.field[id].get("img_id", ""), | |||
| "available_int": sres.field[id].get("available_int", 1), | |||
| "positions": sres.field[id].get("position_int", "").split("\t"), | |||
| "positions": sres.field[id].get("position_int", []), | |||
| } | |||
| if len(d["positions"]) % 5 == 0: | |||
| poss = [] | |||
| @@ -16,9 +16,9 @@ | |||
| "content_with_weight": {"type": "varchar", "default": ""}, | |||
| "content_ltks": {"type": "varchar", "default": ""}, | |||
| "content_sm_ltks": {"type": "varchar", "default": ""}, | |||
| "page_num_list": {"type": "varchar", "default": ""}, | |||
| "top_list": {"type": "varchar", "default": ""}, | |||
| "position_list": {"type": "varchar", "default": ""}, | |||
| "page_num_int": {"type": "varchar", "default": ""}, | |||
| "top_int": {"type": "varchar", "default": ""}, | |||
| "position_int": {"type": "varchar", "default": ""}, | |||
| "weight_int": {"type": "integer", "default": 0}, | |||
| "weight_flt": {"type": "float", "default": 0.0}, | |||
| "rank_int": {"type": "integer", "default": 0}, | |||
| @@ -58,7 +58,7 @@ class KGSearch(Dealer): | |||
| matchDense = self.get_vector(qst, emb_mdl, 1024, req.get("similarity", 0.1)) | |||
| q_vec = matchDense.embedding_data | |||
| src = req.get("fields", ["docnm_kwd", "content_ltks", "kb_id", "img_id", "title_tks", "important_kwd", | |||
| "doc_id", f"q_{len(q_vec)}_vec", "position_list", "name_kwd", | |||
| "doc_id", f"q_{len(q_vec)}_vec", "position_int", "name_kwd", | |||
| "available_int", "content_with_weight", | |||
| "weight_int", "weight_flt" | |||
| ]) | |||
| @@ -20,7 +20,6 @@ from rag.nlp import tokenize, is_english | |||
| from rag.nlp import rag_tokenizer | |||
| from deepdoc.parser import PdfParser, PptParser, PlainParser | |||
| from PyPDF2 import PdfReader as pdf2_read | |||
| import json | |||
| class Ppt(PptParser): | |||
| @@ -109,9 +108,9 @@ def chunk(filename, binary=None, from_page=0, to_page=100000, | |||
| d = copy.deepcopy(doc) | |||
| pn += from_page | |||
| d["image"] = img | |||
| d["page_num_list"] = json.dumps([pn + 1]) | |||
| d["top_list"] = json.dumps([0]) | |||
| d["position_list"] = json.dumps([(pn + 1, 0, img.size[0], 0, img.size[1])]) | |||
| d["page_num_int"] = [pn + 1] | |||
| d["top_int"] = [0] | |||
| d["position_int"] = [(pn + 1, 0, img.size[0], 0, img.size[1])] | |||
| tokenize(d, txt, eng) | |||
| res.append(d) | |||
| return res | |||
| @@ -125,10 +124,9 @@ def chunk(filename, binary=None, from_page=0, to_page=100000, | |||
| pn += from_page | |||
| if img: | |||
| d["image"] = img | |||
| d["page_num_list"] = json.dumps([pn + 1]) | |||
| d["top_list"] = json.dumps([0]) | |||
| d["position_list"] = json.dumps([ | |||
| (pn + 1, 0, img.size[0] if img else 0, 0, img.size[1] if img else 0)]) | |||
| d["page_num_int"] = [pn + 1] | |||
| d["top_int"] = [0] | |||
| d["position_int"] = [(pn + 1, 0, img.size[0] if img else 0, 0, img.size[1] if img else 0)] | |||
| tokenize(d, txt, eng) | |||
| res.append(d) | |||
| return res | |||
| @@ -22,7 +22,6 @@ from rag.utils import num_tokens_from_string | |||
| from . import rag_tokenizer | |||
| import re | |||
| import copy | |||
| import json | |||
| import roman_numbers as r | |||
| from word2number import w2n | |||
| from cn2an import cn2an | |||
| @@ -311,16 +310,16 @@ def tokenize_table(tbls, doc, eng, batch_size=10): | |||
| def add_positions(d, poss): | |||
| if not poss: | |||
| return | |||
| page_num_list = [] | |||
| position_list = [] | |||
| top_list = [] | |||
| page_num_int = [] | |||
| position_int = [] | |||
| top_int = [] | |||
| for pn, left, right, top, bottom in poss: | |||
| page_num_list.append(int(pn + 1)) | |||
| top_list.append(int(top)) | |||
| position_list.append((int(pn + 1), int(left), int(right), int(top), int(bottom))) | |||
| d["page_num_list"] = json.dumps(page_num_list) | |||
| d["position_list"] = json.dumps(position_list) | |||
| d["top_list"] = json.dumps(top_list) | |||
| page_num_int.append(int(pn + 1)) | |||
| top_int.append(int(top)) | |||
| position_int.append((int(pn + 1), int(left), int(right), int(top), int(bottom))) | |||
| d["page_num_int"] = page_num_int | |||
| d["position_int"] = position_int | |||
| d["top_int"] = top_int | |||
| def remove_contents_table(sections, eng=False): | |||
| @@ -16,7 +16,6 @@ | |||
| import logging | |||
| import re | |||
| import json | |||
| from dataclasses import dataclass | |||
| from rag.utils import rmSpace | |||
| @@ -74,7 +73,7 @@ class Dealer: | |||
| offset, limit = pg * ps, (pg + 1) * ps | |||
| src = req.get("fields", ["docnm_kwd", "content_ltks", "kb_id", "img_id", "title_tks", "important_kwd", | |||
| "doc_id", "position_list", "knowledge_graph_kwd", "question_kwd", "question_tks", | |||
| "doc_id", "page_num_int", "top_int", "create_timestamp_flt", "knowledge_graph_kwd", "question_kwd", "question_tks", | |||
| "available_int", "content_with_weight", "pagerank_fea"]) | |||
| kwds = set([]) | |||
| @@ -82,6 +81,8 @@ class Dealer: | |||
| q_vec = [] | |||
| if not qst: | |||
| if req.get("sort"): | |||
| orderBy.asc("page_num_int") | |||
| orderBy.asc("top_int") | |||
| orderBy.desc("create_timestamp_flt") | |||
| res = self.dataStore.search(src, [], filters, [], orderBy, offset, limit, idx_names, kb_ids) | |||
| total=self.dataStore.getTotal(res) | |||
| @@ -340,7 +341,7 @@ class Dealer: | |||
| chunk = sres.field[id] | |||
| dnm = chunk["docnm_kwd"] | |||
| did = chunk["doc_id"] | |||
| position_list = chunk.get("position_list", "[]") | |||
| position_int = chunk.get("position_int", []) | |||
| d = { | |||
| "chunk_id": id, | |||
| "content_ltks": chunk["content_ltks"], | |||
| @@ -354,7 +355,7 @@ class Dealer: | |||
| "vector_similarity": vsim[i], | |||
| "term_similarity": tsim[i], | |||
| "vector": chunk.get(vector_column, zero_vector), | |||
| "positions": json.loads(position_list) | |||
| "positions": position_int, | |||
| } | |||
| if highlight and sres.highlight: | |||
| if id in sres.highlight: | |||
| @@ -211,9 +211,9 @@ def build_chunks(task, progress_callback): | |||
| if not d.get("image"): | |||
| _ = d.pop("image", None) | |||
| d["img_id"] = "" | |||
| d["page_num_list"] = json.dumps([]) | |||
| d["position_list"] = json.dumps([]) | |||
| d["top_list"] = json.dumps([]) | |||
| d["page_num_int"] = [] | |||
| d["position_int"] = [] | |||
| d["top_int"] = [] | |||
| docs.append(d) | |||
| continue | |||
| @@ -185,8 +185,14 @@ class ESConnection(DocStoreConnection): | |||
| orders = list() | |||
| for field, order in orderBy.fields: | |||
| order = "asc" if order == 0 else "desc" | |||
| orders.append({field: {"order": order, "unmapped_type": "float", | |||
| "mode": "avg", "numeric_type": "double"}}) | |||
| if field in ["page_num_int", "top_int"]: | |||
| order_info = {"order": order, "unmapped_type": "float", | |||
| "mode": "avg", "numeric_type": "double"} | |||
| elif field.endswith("_int") or field.endswith("_flt"): | |||
| order_info = {"order": order, "unmapped_type": "float"} | |||
| else: | |||
| order_info = {"order": order, "unmapped_type": "text"} | |||
| orders.append({field: order_info}) | |||
| s = s.sort(*orders) | |||
| if limit > 0: | |||
| @@ -297,7 +297,7 @@ class InfinityConnection(DocStoreConnection): | |||
| df_list.append(kb_res) | |||
| self.connPool.release_conn(inf_conn) | |||
| res = concat_dataframes(df_list, selectFields) | |||
| logger.debug("INFINITY search tables: " + str(table_list)) | |||
| logger.debug(f"INFINITY search tables: {str(table_list)}, result: {str(res)}") | |||
| return res | |||
| def get( | |||
| @@ -307,8 +307,10 @@ class InfinityConnection(DocStoreConnection): | |||
| db_instance = inf_conn.get_database(self.dbName) | |||
| df_list = list() | |||
| assert isinstance(knowledgebaseIds, list) | |||
| table_list = list() | |||
| for knowledgebaseId in knowledgebaseIds: | |||
| table_name = f"{indexName}_{knowledgebaseId}" | |||
| table_list.append(table_name) | |||
| table_instance = db_instance.get_table(table_name) | |||
| kb_res = table_instance.output(["*"]).filter(f"id = '{chunkId}'").to_pl() | |||
| if len(kb_res) != 0 and kb_res.shape[0] > 0: | |||
| @@ -316,6 +318,7 @@ class InfinityConnection(DocStoreConnection): | |||
| self.connPool.release_conn(inf_conn) | |||
| res = concat_dataframes(df_list, ["id"]) | |||
| logger.debug(f"INFINITY get tables: {str(table_list)}, result: {str(res)}") | |||
| res_fields = self.getFields(res, res.columns) | |||
| return res_fields.get(chunkId, None) | |||
| @@ -349,15 +352,22 @@ class InfinityConnection(DocStoreConnection): | |||
| for k, v in d.items(): | |||
| if k.endswith("_kwd") and isinstance(v, list): | |||
| d[k] = " ".join(v) | |||
| if k == 'kb_id': | |||
| elif k == 'kb_id': | |||
| if isinstance(d[k], list): | |||
| d[k] = d[k][0] # since d[k] is a list, but we need a str | |||
| elif k == "position_int": | |||
| assert isinstance(v, list) | |||
| arr = [num for row in v for num in row] | |||
| d[k] = "_".join(f"{num:08x}" for num in arr) | |||
| elif k in ["page_num_int", "top_int", "position_int"]: | |||
| assert isinstance(v, list) | |||
| d[k] = "_".join(f"{num:08x}" for num in v) | |||
| ids = ["'{}'".format(d["id"]) for d in documents] | |||
| str_ids = ", ".join(ids) | |||
| str_filter = f"id IN ({str_ids})" | |||
| table_instance.delete(str_filter) | |||
| # for doc in documents: | |||
| # logger.info(f"insert position_list: {doc['position_list']}") | |||
| # logger.info(f"insert position_int: {doc['position_int']}") | |||
| # logger.info(f"InfinityConnection.insert {json.dumps(documents)}") | |||
| table_instance.insert(documents) | |||
| self.connPool.release_conn(inf_conn) | |||
| @@ -367,8 +377,8 @@ class InfinityConnection(DocStoreConnection): | |||
| def update( | |||
| self, condition: dict, newValue: dict, indexName: str, knowledgebaseId: str | |||
| ) -> bool: | |||
| # if 'position_list' in newValue: | |||
| # logger.info(f"upsert position_list: {newValue['position_list']}") | |||
| # if 'position_int' in newValue: | |||
| # logger.info(f"update position_int: {newValue['position_int']}") | |||
| inf_conn = self.connPool.get_conn() | |||
| db_instance = inf_conn.get_database(self.dbName) | |||
| table_name = f"{indexName}_{knowledgebaseId}" | |||
| @@ -377,6 +387,16 @@ class InfinityConnection(DocStoreConnection): | |||
| for k, v in newValue.items(): | |||
| if k.endswith("_kwd") and isinstance(v, list): | |||
| newValue[k] = " ".join(v) | |||
| elif k == 'kb_id': | |||
| if isinstance(newValue[k], list): | |||
| newValue[k] = newValue[k][0] # since d[k] is a list, but we need a str | |||
| elif k == "position_int": | |||
| assert isinstance(v, list) | |||
| arr = [num for row in v for num in row] | |||
| newValue[k] = "_".join(f"{num:08x}" for num in arr) | |||
| elif k in ["page_num_int", "top_int"]: | |||
| assert isinstance(v, list) | |||
| newValue[k] = "_".join(f"{num:08x}" for num in v) | |||
| table_instance.update(filter, newValue) | |||
| self.connPool.release_conn(inf_conn) | |||
| return True | |||
| @@ -423,9 +443,22 @@ class InfinityConnection(DocStoreConnection): | |||
| v = res[fieldnm][i] | |||
| if isinstance(v, Series): | |||
| v = list(v) | |||
| elif fieldnm == "important_kwd": | |||
| elif fieldnm.endswith("_kwd"): | |||
| assert isinstance(v, str) | |||
| v = v.split() | |||
| elif fieldnm == "position_int": | |||
| assert isinstance(v, str) | |||
| if v: | |||
| arr = [int(hex_val, 16) for hex_val in v.split('_')] | |||
| v = [arr[i:i + 4] for i in range(0, len(arr), 4)] | |||
| else: | |||
| v = [] | |||
| elif fieldnm in ["page_num_int", "top_int"]: | |||
| assert isinstance(v, str) | |||
| if v: | |||
| v = [int(hex_val, 16) for hex_val in v.split('_')] | |||
| else: | |||
| v = [] | |||
| else: | |||
| if not isinstance(v, str): | |||
| v = str(v) | |||