### What problem does this PR solve? Rename page_num_list, top_list, position_list to page_num_int, top_int, position_int ### Type of change - [x] Refactoringtags/v0.15.0
| cp /deps/cl100k_base.tiktoken /ragflow/9b5ad71b2ce5302211f9c61530b329a4922fc6a4 | cp /deps/cl100k_base.tiktoken /ragflow/9b5ad71b2ce5302211f9c61530b329a4922fc6a4 | ||||
| ENV TIKA_SERVER_JAR="file:///ragflow/tika-server-standard-3.0.0.jar" | ENV TIKA_SERVER_JAR="file:///ragflow/tika-server-standard-3.0.0.jar" | ||||
| ENV DEBIAN_FRONTEND=noninteractive | |||||
| # Setup apt | # Setup apt | ||||
| # cv2 requires libGL.so.1 | |||||
| # Python package and implicit dependencies: | |||||
| # opencv-python: libglib2.0-0 libglx-mesa0 libgl1 | |||||
| # aspose-slides: pkg-config libicu-dev libgdiplus libssl1.1_1.1.1f-1ubuntu2_amd64.deb | |||||
| # python-pptx: default-jdk tika-server-standard-3.0.0.jar | |||||
| # selenium: libatk-bridge2.0-0 chrome-linux64-121-0-6167-85 | |||||
| # Building C extensions: libpython3-dev libgtk-4-1 libnss3 xdg-utils libgbm-dev | |||||
| RUN --mount=type=cache,id=ragflow_apt,target=/var/cache/apt,sharing=locked \ | RUN --mount=type=cache,id=ragflow_apt,target=/var/cache/apt,sharing=locked \ | ||||
| if [ "$NEED_MIRROR" == "1" ]; then \ | if [ "$NEED_MIRROR" == "1" ]; then \ | ||||
| sed -i 's|http://archive.ubuntu.com|https://mirrors.tuna.tsinghua.edu.cn|g' /etc/apt/sources.list; \ | sed -i 's|http://archive.ubuntu.com|https://mirrors.tuna.tsinghua.edu.cn|g' /etc/apt/sources.list; \ | ||||
| apt update && \ | apt update && \ | ||||
| apt --no-install-recommends install -y ca-certificates && \ | apt --no-install-recommends install -y ca-certificates && \ | ||||
| apt update && \ | apt update && \ | ||||
| DEBIAN_FRONTEND=noninteractive apt install -y curl libpython3-dev nginx libglib2.0-0 libglx-mesa0 pkg-config libicu-dev libgdiplus default-jdk python3-pip pipx \ | |||||
| libatk-bridge2.0-0 libgtk-4-1 libnss3 xdg-utils unzip libgbm-dev wget git nginx libgl1 vim less | |||||
| apt install -y libglib2.0-0 libglx-mesa0 libgl1 && \ | |||||
| apt install -y pkg-config libicu-dev libgdiplus && \ | |||||
| apt install -y default-jdk && \ | |||||
| apt install -y libatk-bridge2.0-0 && \ | |||||
| apt install -y libpython3-dev libgtk-4-1 libnss3 xdg-utils libgbm-dev && \ | |||||
| apt install -y python3-pip pipx nginx unzip curl wget git vim less | |||||
| RUN if [ "$NEED_MIRROR" == "1" ]; then \ | RUN if [ "$NEED_MIRROR" == "1" ]; then \ | ||||
| pip3 config set global.index-url https://pypi.tuna.tsinghua.edu.cn/simple && \ | pip3 config set global.index-url https://pypi.tuna.tsinghua.edu.cn/simple && \ | 
| "question_kwd": sres.field[id].get("question_kwd", []), | "question_kwd": sres.field[id].get("question_kwd", []), | ||||
| "image_id": sres.field[id].get("img_id", ""), | "image_id": sres.field[id].get("img_id", ""), | ||||
| "available_int": int(sres.field[id].get("available_int", 1)), | "available_int": int(sres.field[id].get("available_int", 1)), | ||||
| "positions": json.loads(sres.field[id].get("position_list", "[]")), | |||||
| "positions": sres.field[id].get("position_int", []), | |||||
| } | } | ||||
| assert isinstance(d["positions"], list) | assert isinstance(d["positions"], list) | ||||
| assert len(d["positions"]) == 0 or (isinstance(d["positions"][0], list) and len(d["positions"][0]) == 5) | assert len(d["positions"]) == 0 or (isinstance(d["positions"][0], list) and len(d["positions"][0]) == 5) | 
| "question_kwd": sres.field[id].get("question_kwd", []), | "question_kwd": sres.field[id].get("question_kwd", []), | ||||
| "img_id": sres.field[id].get("img_id", ""), | "img_id": sres.field[id].get("img_id", ""), | ||||
| "available_int": sres.field[id].get("available_int", 1), | "available_int": sres.field[id].get("available_int", 1), | ||||
| "positions": sres.field[id].get("position_int", "").split("\t"), | |||||
| "positions": sres.field[id].get("position_int", []), | |||||
| } | } | ||||
| if len(d["positions"]) % 5 == 0: | if len(d["positions"]) % 5 == 0: | ||||
| poss = [] | poss = [] | 
| "content_with_weight": {"type": "varchar", "default": ""}, | "content_with_weight": {"type": "varchar", "default": ""}, | ||||
| "content_ltks": {"type": "varchar", "default": ""}, | "content_ltks": {"type": "varchar", "default": ""}, | ||||
| "content_sm_ltks": {"type": "varchar", "default": ""}, | "content_sm_ltks": {"type": "varchar", "default": ""}, | ||||
| "page_num_list": {"type": "varchar", "default": ""}, | |||||
| "top_list": {"type": "varchar", "default": ""}, | |||||
| "position_list": {"type": "varchar", "default": ""}, | |||||
| "page_num_int": {"type": "varchar", "default": ""}, | |||||
| "top_int": {"type": "varchar", "default": ""}, | |||||
| "position_int": {"type": "varchar", "default": ""}, | |||||
| "weight_int": {"type": "integer", "default": 0}, | "weight_int": {"type": "integer", "default": 0}, | ||||
| "weight_flt": {"type": "float", "default": 0.0}, | "weight_flt": {"type": "float", "default": 0.0}, | ||||
| "rank_int": {"type": "integer", "default": 0}, | "rank_int": {"type": "integer", "default": 0}, | 
| matchDense = self.get_vector(qst, emb_mdl, 1024, req.get("similarity", 0.1)) | matchDense = self.get_vector(qst, emb_mdl, 1024, req.get("similarity", 0.1)) | ||||
| q_vec = matchDense.embedding_data | q_vec = matchDense.embedding_data | ||||
| src = req.get("fields", ["docnm_kwd", "content_ltks", "kb_id", "img_id", "title_tks", "important_kwd", | src = req.get("fields", ["docnm_kwd", "content_ltks", "kb_id", "img_id", "title_tks", "important_kwd", | ||||
| "doc_id", f"q_{len(q_vec)}_vec", "position_list", "name_kwd", | |||||
| "doc_id", f"q_{len(q_vec)}_vec", "position_int", "name_kwd", | |||||
| "available_int", "content_with_weight", | "available_int", "content_with_weight", | ||||
| "weight_int", "weight_flt" | "weight_int", "weight_flt" | ||||
| ]) | ]) | 
| from rag.nlp import rag_tokenizer | from rag.nlp import rag_tokenizer | ||||
| from deepdoc.parser import PdfParser, PptParser, PlainParser | from deepdoc.parser import PdfParser, PptParser, PlainParser | ||||
| from PyPDF2 import PdfReader as pdf2_read | from PyPDF2 import PdfReader as pdf2_read | ||||
| import json | |||||
| class Ppt(PptParser): | class Ppt(PptParser): | ||||
| d = copy.deepcopy(doc) | d = copy.deepcopy(doc) | ||||
| pn += from_page | pn += from_page | ||||
| d["image"] = img | d["image"] = img | ||||
| d["page_num_list"] = json.dumps([pn + 1]) | |||||
| d["top_list"] = json.dumps([0]) | |||||
| d["position_list"] = json.dumps([(pn + 1, 0, img.size[0], 0, img.size[1])]) | |||||
| d["page_num_int"] = [pn + 1] | |||||
| d["top_int"] = [0] | |||||
| d["position_int"] = [(pn + 1, 0, img.size[0], 0, img.size[1])] | |||||
| tokenize(d, txt, eng) | tokenize(d, txt, eng) | ||||
| res.append(d) | res.append(d) | ||||
| return res | return res | ||||
| pn += from_page | pn += from_page | ||||
| if img: | if img: | ||||
| d["image"] = img | d["image"] = img | ||||
| d["page_num_list"] = json.dumps([pn + 1]) | |||||
| d["top_list"] = json.dumps([0]) | |||||
| d["position_list"] = json.dumps([ | |||||
| (pn + 1, 0, img.size[0] if img else 0, 0, img.size[1] if img else 0)]) | |||||
| d["page_num_int"] = [pn + 1] | |||||
| d["top_int"] = [0] | |||||
| d["position_int"] = [(pn + 1, 0, img.size[0] if img else 0, 0, img.size[1] if img else 0)] | |||||
| tokenize(d, txt, eng) | tokenize(d, txt, eng) | ||||
| res.append(d) | res.append(d) | ||||
| return res | return res | 
| from . import rag_tokenizer | from . import rag_tokenizer | ||||
| import re | import re | ||||
| import copy | import copy | ||||
| import json | |||||
| import roman_numbers as r | import roman_numbers as r | ||||
| from word2number import w2n | from word2number import w2n | ||||
| from cn2an import cn2an | from cn2an import cn2an | ||||
| def add_positions(d, poss): | def add_positions(d, poss): | ||||
| if not poss: | if not poss: | ||||
| return | return | ||||
| page_num_list = [] | |||||
| position_list = [] | |||||
| top_list = [] | |||||
| page_num_int = [] | |||||
| position_int = [] | |||||
| top_int = [] | |||||
| for pn, left, right, top, bottom in poss: | for pn, left, right, top, bottom in poss: | ||||
| page_num_list.append(int(pn + 1)) | |||||
| top_list.append(int(top)) | |||||
| position_list.append((int(pn + 1), int(left), int(right), int(top), int(bottom))) | |||||
| d["page_num_list"] = json.dumps(page_num_list) | |||||
| d["position_list"] = json.dumps(position_list) | |||||
| d["top_list"] = json.dumps(top_list) | |||||
| page_num_int.append(int(pn + 1)) | |||||
| top_int.append(int(top)) | |||||
| position_int.append((int(pn + 1), int(left), int(right), int(top), int(bottom))) | |||||
| d["page_num_int"] = page_num_int | |||||
| d["position_int"] = position_int | |||||
| d["top_int"] = top_int | |||||
| def remove_contents_table(sections, eng=False): | def remove_contents_table(sections, eng=False): | 
| import logging | import logging | ||||
| import re | import re | ||||
| import json | |||||
| from dataclasses import dataclass | from dataclasses import dataclass | ||||
| from rag.utils import rmSpace | from rag.utils import rmSpace | ||||
| offset, limit = pg * ps, (pg + 1) * ps | offset, limit = pg * ps, (pg + 1) * ps | ||||
| src = req.get("fields", ["docnm_kwd", "content_ltks", "kb_id", "img_id", "title_tks", "important_kwd", | src = req.get("fields", ["docnm_kwd", "content_ltks", "kb_id", "img_id", "title_tks", "important_kwd", | ||||
| "doc_id", "position_list", "knowledge_graph_kwd", "question_kwd", "question_tks", | |||||
| "doc_id", "page_num_int", "top_int", "create_timestamp_flt", "knowledge_graph_kwd", "question_kwd", "question_tks", | |||||
| "available_int", "content_with_weight", "pagerank_fea"]) | "available_int", "content_with_weight", "pagerank_fea"]) | ||||
| kwds = set([]) | kwds = set([]) | ||||
| q_vec = [] | q_vec = [] | ||||
| if not qst: | if not qst: | ||||
| if req.get("sort"): | if req.get("sort"): | ||||
| orderBy.asc("page_num_int") | |||||
| orderBy.asc("top_int") | |||||
| orderBy.desc("create_timestamp_flt") | orderBy.desc("create_timestamp_flt") | ||||
| res = self.dataStore.search(src, [], filters, [], orderBy, offset, limit, idx_names, kb_ids) | res = self.dataStore.search(src, [], filters, [], orderBy, offset, limit, idx_names, kb_ids) | ||||
| total=self.dataStore.getTotal(res) | total=self.dataStore.getTotal(res) | ||||
| chunk = sres.field[id] | chunk = sres.field[id] | ||||
| dnm = chunk["docnm_kwd"] | dnm = chunk["docnm_kwd"] | ||||
| did = chunk["doc_id"] | did = chunk["doc_id"] | ||||
| position_list = chunk.get("position_list", "[]") | |||||
| position_int = chunk.get("position_int", []) | |||||
| d = { | d = { | ||||
| "chunk_id": id, | "chunk_id": id, | ||||
| "content_ltks": chunk["content_ltks"], | "content_ltks": chunk["content_ltks"], | ||||
| "vector_similarity": vsim[i], | "vector_similarity": vsim[i], | ||||
| "term_similarity": tsim[i], | "term_similarity": tsim[i], | ||||
| "vector": chunk.get(vector_column, zero_vector), | "vector": chunk.get(vector_column, zero_vector), | ||||
| "positions": json.loads(position_list) | |||||
| "positions": position_int, | |||||
| } | } | ||||
| if highlight and sres.highlight: | if highlight and sres.highlight: | ||||
| if id in sres.highlight: | if id in sres.highlight: | 
| if not d.get("image"): | if not d.get("image"): | ||||
| _ = d.pop("image", None) | _ = d.pop("image", None) | ||||
| d["img_id"] = "" | d["img_id"] = "" | ||||
| d["page_num_list"] = json.dumps([]) | |||||
| d["position_list"] = json.dumps([]) | |||||
| d["top_list"] = json.dumps([]) | |||||
| d["page_num_int"] = [] | |||||
| d["position_int"] = [] | |||||
| d["top_int"] = [] | |||||
| docs.append(d) | docs.append(d) | ||||
| continue | continue | ||||
| orders = list() | orders = list() | ||||
| for field, order in orderBy.fields: | for field, order in orderBy.fields: | ||||
| order = "asc" if order == 0 else "desc" | order = "asc" if order == 0 else "desc" | ||||
| orders.append({field: {"order": order, "unmapped_type": "float", | |||||
| "mode": "avg", "numeric_type": "double"}}) | |||||
| if field in ["page_num_int", "top_int"]: | |||||
| order_info = {"order": order, "unmapped_type": "float", | |||||
| "mode": "avg", "numeric_type": "double"} | |||||
| elif field.endswith("_int") or field.endswith("_flt"): | |||||
| order_info = {"order": order, "unmapped_type": "float"} | |||||
| else: | |||||
| order_info = {"order": order, "unmapped_type": "text"} | |||||
| orders.append({field: order_info}) | |||||
| s = s.sort(*orders) | s = s.sort(*orders) | ||||
| if limit > 0: | if limit > 0: | 
| df_list.append(kb_res) | df_list.append(kb_res) | ||||
| self.connPool.release_conn(inf_conn) | self.connPool.release_conn(inf_conn) | ||||
| res = concat_dataframes(df_list, selectFields) | res = concat_dataframes(df_list, selectFields) | ||||
| logger.debug("INFINITY search tables: " + str(table_list)) | |||||
| logger.debug(f"INFINITY search tables: {str(table_list)}, result: {str(res)}") | |||||
| return res | return res | ||||
| def get( | def get( | ||||
| db_instance = inf_conn.get_database(self.dbName) | db_instance = inf_conn.get_database(self.dbName) | ||||
| df_list = list() | df_list = list() | ||||
| assert isinstance(knowledgebaseIds, list) | assert isinstance(knowledgebaseIds, list) | ||||
| table_list = list() | |||||
| for knowledgebaseId in knowledgebaseIds: | for knowledgebaseId in knowledgebaseIds: | ||||
| table_name = f"{indexName}_{knowledgebaseId}" | table_name = f"{indexName}_{knowledgebaseId}" | ||||
| table_list.append(table_name) | |||||
| table_instance = db_instance.get_table(table_name) | table_instance = db_instance.get_table(table_name) | ||||
| kb_res = table_instance.output(["*"]).filter(f"id = '{chunkId}'").to_pl() | kb_res = table_instance.output(["*"]).filter(f"id = '{chunkId}'").to_pl() | ||||
| if len(kb_res) != 0 and kb_res.shape[0] > 0: | if len(kb_res) != 0 and kb_res.shape[0] > 0: | ||||
| self.connPool.release_conn(inf_conn) | self.connPool.release_conn(inf_conn) | ||||
| res = concat_dataframes(df_list, ["id"]) | res = concat_dataframes(df_list, ["id"]) | ||||
| logger.debug(f"INFINITY get tables: {str(table_list)}, result: {str(res)}") | |||||
| res_fields = self.getFields(res, res.columns) | res_fields = self.getFields(res, res.columns) | ||||
| return res_fields.get(chunkId, None) | return res_fields.get(chunkId, None) | ||||
| for k, v in d.items(): | for k, v in d.items(): | ||||
| if k.endswith("_kwd") and isinstance(v, list): | if k.endswith("_kwd") and isinstance(v, list): | ||||
| d[k] = " ".join(v) | d[k] = " ".join(v) | ||||
| if k == 'kb_id': | |||||
| elif k == 'kb_id': | |||||
| if isinstance(d[k], list): | if isinstance(d[k], list): | ||||
| d[k] = d[k][0] # since d[k] is a list, but we need a str | d[k] = d[k][0] # since d[k] is a list, but we need a str | ||||
| elif k == "position_int": | |||||
| assert isinstance(v, list) | |||||
| arr = [num for row in v for num in row] | |||||
| d[k] = "_".join(f"{num:08x}" for num in arr) | |||||
| elif k in ["page_num_int", "top_int", "position_int"]: | |||||
| assert isinstance(v, list) | |||||
| d[k] = "_".join(f"{num:08x}" for num in v) | |||||
| ids = ["'{}'".format(d["id"]) for d in documents] | ids = ["'{}'".format(d["id"]) for d in documents] | ||||
| str_ids = ", ".join(ids) | str_ids = ", ".join(ids) | ||||
| str_filter = f"id IN ({str_ids})" | str_filter = f"id IN ({str_ids})" | ||||
| table_instance.delete(str_filter) | table_instance.delete(str_filter) | ||||
| # for doc in documents: | # for doc in documents: | ||||
| # logger.info(f"insert position_list: {doc['position_list']}") | |||||
| # logger.info(f"insert position_int: {doc['position_int']}") | |||||
| # logger.info(f"InfinityConnection.insert {json.dumps(documents)}") | # logger.info(f"InfinityConnection.insert {json.dumps(documents)}") | ||||
| table_instance.insert(documents) | table_instance.insert(documents) | ||||
| self.connPool.release_conn(inf_conn) | self.connPool.release_conn(inf_conn) | ||||
| def update( | def update( | ||||
| self, condition: dict, newValue: dict, indexName: str, knowledgebaseId: str | self, condition: dict, newValue: dict, indexName: str, knowledgebaseId: str | ||||
| ) -> bool: | ) -> bool: | ||||
| # if 'position_list' in newValue: | |||||
| # logger.info(f"upsert position_list: {newValue['position_list']}") | |||||
| # if 'position_int' in newValue: | |||||
| # logger.info(f"update position_int: {newValue['position_int']}") | |||||
| inf_conn = self.connPool.get_conn() | inf_conn = self.connPool.get_conn() | ||||
| db_instance = inf_conn.get_database(self.dbName) | db_instance = inf_conn.get_database(self.dbName) | ||||
| table_name = f"{indexName}_{knowledgebaseId}" | table_name = f"{indexName}_{knowledgebaseId}" | ||||
| for k, v in newValue.items(): | for k, v in newValue.items(): | ||||
| if k.endswith("_kwd") and isinstance(v, list): | if k.endswith("_kwd") and isinstance(v, list): | ||||
| newValue[k] = " ".join(v) | newValue[k] = " ".join(v) | ||||
| elif k == 'kb_id': | |||||
| if isinstance(newValue[k], list): | |||||
| newValue[k] = newValue[k][0] # since d[k] is a list, but we need a str | |||||
| elif k == "position_int": | |||||
| assert isinstance(v, list) | |||||
| arr = [num for row in v for num in row] | |||||
| newValue[k] = "_".join(f"{num:08x}" for num in arr) | |||||
| elif k in ["page_num_int", "top_int"]: | |||||
| assert isinstance(v, list) | |||||
| newValue[k] = "_".join(f"{num:08x}" for num in v) | |||||
| table_instance.update(filter, newValue) | table_instance.update(filter, newValue) | ||||
| self.connPool.release_conn(inf_conn) | self.connPool.release_conn(inf_conn) | ||||
| return True | return True | ||||
| v = res[fieldnm][i] | v = res[fieldnm][i] | ||||
| if isinstance(v, Series): | if isinstance(v, Series): | ||||
| v = list(v) | v = list(v) | ||||
| elif fieldnm == "important_kwd": | |||||
| elif fieldnm.endswith("_kwd"): | |||||
| assert isinstance(v, str) | assert isinstance(v, str) | ||||
| v = v.split() | v = v.split() | ||||
| elif fieldnm == "position_int": | |||||
| assert isinstance(v, str) | |||||
| if v: | |||||
| arr = [int(hex_val, 16) for hex_val in v.split('_')] | |||||
| v = [arr[i:i + 4] for i in range(0, len(arr), 4)] | |||||
| else: | |||||
| v = [] | |||||
| elif fieldnm in ["page_num_int", "top_int"]: | |||||
| assert isinstance(v, str) | |||||
| if v: | |||||
| v = [int(hex_val, 16) for hex_val in v.split('_')] | |||||
| else: | |||||
| v = [] | |||||
| else: | else: | ||||
| if not isinstance(v, str): | if not isinstance(v, str): | ||||
| v = str(v) | v = str(v) |