### What problem does this PR solve? Refactor Document API ### Type of change - [x] Refactoring Co-authored-by: liuhua <10215101452@stu.ecun.edu.cn>tags/v0.13.0
| page_number = int(request.args.get("page", 1)) | page_number = int(request.args.get("page", 1)) | ||||
| items_per_page = int(request.args.get("page_size", 1024)) | items_per_page = int(request.args.get("page_size", 1024)) | ||||
| orderby = request.args.get("orderby", "create_time") | orderby = request.args.get("orderby", "create_time") | ||||
| if request.args.get("desc") == "False": | |||||
| if request.args.get("desc") == "False" or request.args.get("desc") == "false": | |||||
| desc = False | desc = False | ||||
| else: | else: | ||||
| desc = True | desc = True |
| if req["tenant_id"] != tenant_id: | if req["tenant_id"] != tenant_id: | ||||
| return get_error_data_result( | return get_error_data_result( | ||||
| retmsg="Can't change tenant_id.") | retmsg="Can't change tenant_id.") | ||||
| if "embedding_model" in req: | |||||
| if req["embedding_model"] != t.embd_id: | |||||
| return get_error_data_result( | |||||
| retmsg="Can't change embedding_model.") | |||||
| req.pop("embedding_model") | |||||
| e, kb = KnowledgebaseService.get_by_id(dataset_id) | e, kb = KnowledgebaseService.get_by_id(dataset_id) | ||||
| if "chunk_count" in req: | if "chunk_count" in req: | ||||
| if req["chunk_count"] != kb.chunk_num: | if req["chunk_count"] != kb.chunk_num: | ||||
| return get_error_data_result( | return get_error_data_result( | ||||
| retmsg="If chunk count is not 0, parse method is not changable.") | retmsg="If chunk count is not 0, parse method is not changable.") | ||||
| req['parser_id'] = req.pop('parse_method') | req['parser_id'] = req.pop('parse_method') | ||||
| if "embedding_model" in req: | |||||
| if kb.chunk_num != 0 and req['parse_method'] != kb.parser_id: | |||||
| return get_error_data_result( | |||||
| retmsg="If chunk count is not 0, parse method is not changable.") | |||||
| req['embd_id'] = req.pop('embedding_model') | |||||
| if "name" in req: | if "name" in req: | ||||
| req["name"] = req["name"].strip() | req["name"] = req["name"].strip() | ||||
| if req["name"].lower() != kb.name.lower() \ | if req["name"].lower() != kb.name.lower() \ | ||||
| page_number = int(request.args.get("page", 1)) | page_number = int(request.args.get("page", 1)) | ||||
| items_per_page = int(request.args.get("page_size", 1024)) | items_per_page = int(request.args.get("page_size", 1024)) | ||||
| orderby = request.args.get("orderby", "create_time") | orderby = request.args.get("orderby", "create_time") | ||||
| if request.args.get("desc") == "False": | |||||
| if request.args.get("desc") == "False" or request.args.get("desc") == "false" : | |||||
| desc = False | desc = False | ||||
| else: | else: | ||||
| desc = True | desc = True |
| from flask import request | from flask import request | ||||
| from flask_login import login_required, current_user | from flask_login import login_required, current_user | ||||
| from elasticsearch_dsl import Q | from elasticsearch_dsl import Q | ||||
| from pygments import highlight | |||||
| from sphinx.addnodes import document | from sphinx.addnodes import document | ||||
| from rag.app.qa import rmPrefix, beAdoc | from rag.app.qa import rmPrefix, beAdoc | ||||
| return get_error_data_result(retmsg=f'You do not own the dataset {dataset_id}.') | return get_error_data_result(retmsg=f'You do not own the dataset {dataset_id}.') | ||||
| doc = DocumentService.query(kb_id=dataset_id, id=document_id) | doc = DocumentService.query(kb_id=dataset_id, id=document_id) | ||||
| if not doc: | if not doc: | ||||
| return get_error_data_result(retmsg=f'The dataset not own the document {doc.id}.') | |||||
| return get_error_data_result(retmsg=f'The dataset not own the document {document_id}.') | |||||
| # The process of downloading | # The process of downloading | ||||
| doc_id, doc_location = File2DocumentService.get_storage_address(doc_id=document_id) # minio address | doc_id, doc_location = File2DocumentService.get_storage_address(doc_id=document_id) # minio address | ||||
| file_stream = STORAGE_IMPL.get(doc_id, doc_location) | file_stream = STORAGE_IMPL.get(doc_id, doc_location) | ||||
| return get_result() | return get_result() | ||||
| @manager.route('/dataset/{dataset_id}/document/{document_id}/chunk', methods=['GET']) | |||||
| @manager.route('/dataset/<dataset_id>/document/<document_id>/chunk', methods=['GET']) | |||||
| @token_required | @token_required | ||||
| def list_chunk(tenant_id,dataset_id,document_id): | def list_chunk(tenant_id,dataset_id,document_id): | ||||
| if not KnowledgebaseService.query(id=dataset_id, tenant_id=tenant_id): | if not KnowledgebaseService.query(id=dataset_id, tenant_id=tenant_id): | ||||
| return server_error_response(e) | return server_error_response(e) | ||||
| @manager.route('/dataset/{dataset_id}/document/{document_id}/chunk', methods=['POST']) | |||||
| @manager.route('/dataset/<dataset_id>/document/<document_id>/chunk', methods=['POST']) | |||||
| @token_required | @token_required | ||||
| def create(tenant_id,dataset_id,document_id): | def create(tenant_id,dataset_id,document_id): | ||||
| if not KnowledgebaseService.query(id=dataset_id, tenant_id=tenant_id): | if not KnowledgebaseService.query(id=dataset_id, tenant_id=tenant_id): | ||||
| doc = DocumentService.query(id=document_id, kb_id=dataset_id) | doc = DocumentService.query(id=document_id, kb_id=dataset_id) | ||||
| if not doc: | if not doc: | ||||
| return get_error_data_result(retmsg=f"You don't own the document {document_id}.") | return get_error_data_result(retmsg=f"You don't own the document {document_id}.") | ||||
| doc = doc[0] | |||||
| req = request.json | req = request.json | ||||
| if not req.get("content"): | if not req.get("content"): | ||||
| return get_error_data_result(retmsg="`content` is required") | return get_error_data_result(retmsg="`content` is required") | ||||
| # return get_result(data={"chunk_id": chunk_id}) | # return get_result(data={"chunk_id": chunk_id}) | ||||
| @manager.route('dataset/{dataset_id}/document/{document_id}/chunk', methods=['DELETE']) | |||||
| @manager.route('dataset/<dataset_id>/document/<document_id>/chunk', methods=['DELETE']) | |||||
| @token_required | @token_required | ||||
| def rm_chunk(tenant_id,dataset_id,document_id): | def rm_chunk(tenant_id,dataset_id,document_id): | ||||
| if not KnowledgebaseService.query(id=dataset_id, tenant_id=tenant_id): | if not KnowledgebaseService.query(id=dataset_id, tenant_id=tenant_id): | ||||
| doc = DocumentService.query(id=document_id, kb_id=dataset_id) | doc = DocumentService.query(id=document_id, kb_id=dataset_id) | ||||
| if not doc: | if not doc: | ||||
| return get_error_data_result(retmsg=f"You don't own the document {document_id}.") | return get_error_data_result(retmsg=f"You don't own the document {document_id}.") | ||||
| doc = doc[0] | |||||
| req = request.json | req = request.json | ||||
| if not req.get("chunk_ids"): | if not req.get("chunk_ids"): | ||||
| return get_error_data_result("`chunk_ids` is required") | return get_error_data_result("`chunk_ids` is required") | ||||
| for chunk_id in req.get("chunk_ids"): | |||||
| res = ELASTICSEARCH.get( | |||||
| chunk_id, search.index_name( | |||||
| tenant_id)) | |||||
| if not res.get("found"): | |||||
| return server_error_response(f"Chunk {chunk_id} not found") | |||||
| if not ELASTICSEARCH.deleteByQuery( | if not ELASTICSEARCH.deleteByQuery( | ||||
| Q("ids", values=req["chunk_ids"]), search.index_name(tenant_id)): | Q("ids", values=req["chunk_ids"]), search.index_name(tenant_id)): | ||||
| return get_error_data_result(retmsg="Index updating failure") | return get_error_data_result(retmsg="Index updating failure") | ||||
| @manager.route('/dataset/{dataset_id}/document/{document_id}/chunk/{chunk_id}', methods=['PUT']) | |||||
| @manager.route('/dataset/<dataset_id>/document/<document_id>/chunk/<chunk_id>', methods=['PUT']) | |||||
| @token_required | @token_required | ||||
| def set(tenant_id,dataset_id,document_id,chunk_id): | def set(tenant_id,dataset_id,document_id,chunk_id): | ||||
| res = ELASTICSEARCH.get( | |||||
| chunk_id, search.index_name( | |||||
| tenant_id)) | |||||
| if not res.get("found"): | |||||
| return get_error_data_result(f"Chunk {chunk_id} not found") | |||||
| if not KnowledgebaseService.query(id=dataset_id, tenant_id=tenant_id): | if not KnowledgebaseService.query(id=dataset_id, tenant_id=tenant_id): | ||||
| return get_error_data_result(retmsg=f"You don't own the dataset {dataset_id}.") | return get_error_data_result(retmsg=f"You don't own the dataset {dataset_id}.") | ||||
| doc = DocumentService.query(id=document_id, kb_id=dataset_id) | doc = DocumentService.query(id=document_id, kb_id=dataset_id) | ||||
| if not doc: | if not doc: | ||||
| return get_error_data_result(retmsg=f"You don't own the document {document_id}.") | return get_error_data_result(retmsg=f"You don't own the document {document_id}.") | ||||
| req = request.json | req = request.json | ||||
| if not req.get("content"): | |||||
| return get_error_data_result("`content` is required") | |||||
| if not req.get("important_keywords"): | |||||
| return get_error_data_result("`important_keywords` is required") | |||||
| d = { | d = { | ||||
| "id": chunk_id, | "id": chunk_id, | ||||
| "content_with_weight": req["content"]} | |||||
| "content_with_weight": req.get("content",res.get["content_with_weight"])} | |||||
| d["content_ltks"] = rag_tokenizer.tokenize(req["content"]) | d["content_ltks"] = rag_tokenizer.tokenize(req["content"]) | ||||
| d["content_sm_ltks"] = rag_tokenizer.fine_grained_tokenize(d["content_ltks"]) | d["content_sm_ltks"] = rag_tokenizer.fine_grained_tokenize(d["content_ltks"]) | ||||
| d["important_kwd"] = req["important_keywords"] | |||||
| d["important_kwd"] = req.get("important_keywords",[]) | |||||
| d["important_tks"] = rag_tokenizer.tokenize(" ".join(req["important_keywords"])) | d["important_tks"] = rag_tokenizer.tokenize(" ".join(req["important_keywords"])) | ||||
| if "available" in req: | if "available" in req: | ||||
| d["available_int"] = req["available"] | d["available_int"] = req["available"] | ||||
| @token_required | @token_required | ||||
| def retrieval_test(tenant_id): | def retrieval_test(tenant_id): | ||||
| req = request.args | req = request.args | ||||
| if not req.get("datasets"): | |||||
| req_json = request.json | |||||
| if not req_json.get("datasets"): | |||||
| return get_error_data_result("`datasets` is required.") | return get_error_data_result("`datasets` is required.") | ||||
| for id in req.get("datasets"): | |||||
| for id in req_json.get("datasets"): | |||||
| if not KnowledgebaseService.query(id=id,tenant_id=tenant_id): | if not KnowledgebaseService.query(id=id,tenant_id=tenant_id): | ||||
| return get_error_data_result(f"You don't own the dataset {id}.") | return get_error_data_result(f"You don't own the dataset {id}.") | ||||
| if not req.get("question"): | |||||
| if "question" not in req_json: | |||||
| return get_error_data_result("`question` is required.") | return get_error_data_result("`question` is required.") | ||||
| page = int(req.get("offset", 1)) | page = int(req.get("offset", 1)) | ||||
| size = int(req.get("limit", 30)) | size = int(req.get("limit", 30)) | ||||
| question = req["question"] | |||||
| kb_id = req["datasets"] | |||||
| question = req_json["question"] | |||||
| kb_id = req_json["datasets"] | |||||
| if isinstance(kb_id, str): kb_id = [kb_id] | if isinstance(kb_id, str): kb_id = [kb_id] | ||||
| doc_ids = req.get("documents", []) | |||||
| similarity_threshold = float(req.get("similarity_threshold", 0.2)) | |||||
| doc_ids = req_json.get("documents", []) | |||||
| similarity_threshold = float(req.get("similarity_threshold", 0.0)) | |||||
| vector_similarity_weight = float(req.get("vector_similarity_weight", 0.3)) | vector_similarity_weight = float(req.get("vector_similarity_weight", 0.3)) | ||||
| top = int(req.get("top_k", 1024)) | top = int(req.get("top_k", 1024)) | ||||
| if req.get("highlight")=="False" or req.get("highlight")=="false": | |||||
| highlight = False | |||||
| else: | |||||
| highlight = True | |||||
| try: | try: | ||||
| e, kb = KnowledgebaseService.get_by_id(kb_id[0]) | e, kb = KnowledgebaseService.get_by_id(kb_id[0]) | ||||
| if not e: | if not e: | ||||
| retr = retrievaler if kb.parser_id != ParserType.KG else kg_retrievaler | retr = retrievaler if kb.parser_id != ParserType.KG else kg_retrievaler | ||||
| ranks = retr.retrieval(question, embd_mdl, kb.tenant_id, kb_id, page, size, | ranks = retr.retrieval(question, embd_mdl, kb.tenant_id, kb_id, page, size, | ||||
| similarity_threshold, vector_similarity_weight, top, | similarity_threshold, vector_similarity_weight, top, | ||||
| doc_ids, rerank_mdl=rerank_mdl, highlight=req.get("highlight")) | |||||
| doc_ids, rerank_mdl=rerank_mdl, highlight=highlight) | |||||
| for c in ranks["chunks"]: | for c in ranks["chunks"]: | ||||
| if "vector" in c: | if "vector" in c: | ||||
| del c["vector"] | del c["vector"] | ||||
| for key, value in chunk.items(): | for key, value in chunk.items(): | ||||
| new_key = key_mapping.get(key, key) | new_key = key_mapping.get(key, key) | ||||
| rename_chunk[new_key] = value | rename_chunk[new_key] = value | ||||
| renamed_chunks.append(rename_chunk) | |||||
| renamed_chunks.append(rename_chunk) | |||||
| ranks["chunks"] = renamed_chunks | ranks["chunks"] = renamed_chunks | ||||
| return get_result(data=ranks) | return get_result(data=ranks) | ||||
| except Exception as e: | except Exception as e: | ||||
| if str(e).find("not_found") > 0: | if str(e).find("not_found") > 0: | ||||
| return get_result(retmsg=f'No chunk found! Check the chunk status please!', | |||||
| return get_result(retmsg=f'No chunk found! Check the chunk statu s please!', | |||||
| retcode=RetCode.DATA_ERROR) | retcode=RetCode.DATA_ERROR) | ||||
| return server_error_response(e) | return server_error_response(e) |
| page_number = int(request.args.get("page", 1)) | page_number = int(request.args.get("page", 1)) | ||||
| items_per_page = int(request.args.get("page_size", 1024)) | items_per_page = int(request.args.get("page_size", 1024)) | ||||
| orderby = request.args.get("orderby", "create_time") | orderby = request.args.get("orderby", "create_time") | ||||
| if request.args.get("desc") == "False": | |||||
| if request.args.get("desc") == "False" or request.args.get("desc") == "false": | |||||
| desc = False | desc = False | ||||
| else: | else: | ||||
| desc = True | desc = True |
| **POST** `/api/v1/dataset` | **POST** `/api/v1/dataset` | ||||
| Creates a knowledge base (dataset). | |||||
| Creates a dataset. | |||||
| ### Request | ### Request | ||||
| #### Request example | #### Request example | ||||
| ```bash | ```bash | ||||
| # "id": "id" must not be provided. | |||||
| # "name": name is required and cannot be duplicated. | |||||
| # "id": id must not be provided. | |||||
| # "name": name is required and can't be duplicated. | |||||
| # "tenant_id": tenant_id must not be provided. | # "tenant_id": tenant_id must not be provided. | ||||
| # "embedding_model": REQUIRED. | |||||
| # "naive": general. | |||||
| # "embedding_model": embedding_model must not be provided. | |||||
| # "navie" means general. | |||||
| curl --request POST \ | curl --request POST \ | ||||
| --url http://{address}/api/v1/dataset \ | --url http://{address}/api/v1/dataset \ | ||||
| --header 'Content-Type: application/json' \ | --header 'Content-Type: application/json' \ | ||||
| #### Request parameters | #### Request parameters | ||||
| - `"id"`: (*Body parameter*) | - `"id"`: (*Body parameter*) | ||||
| The unique identifier of each created dataset. | |||||
| - When creating a dataset, `id` must not be provided. | |||||
| The ID of the created dataset used to uniquely identify different datasets. | |||||
| - If creating a dataset, `id` must not be provided. | |||||
| - `"name"`: (*Body parameter*) | - `"name"`: (*Body parameter*) | ||||
| The name of the dataset, which must adhere to the following requirements: | The name of the dataset, which must adhere to the following requirements: | ||||
| - Required when creating a dataset and must be unique. | - Required when creating a dataset and must be unique. | ||||
| - When updating a dataset, `name` must still be unique. | |||||
| - If updating a dataset, `name` must still be unique. | |||||
| - `"avatar"`: (*Body parameter*) | - `"avatar"`: (*Body parameter*) | ||||
| Base64 encoding of the avatar. | Base64 encoding of the avatar. | ||||
| - `"tenant_id"`: (*Body parameter*) | - `"tenant_id"`: (*Body parameter*) | ||||
| The ID of the tenant associated with the dataset, used to link it with specific users. | The ID of the tenant associated with the dataset, used to link it with specific users. | ||||
| - When creating a dataset, `tenant_id` must not be provided. | |||||
| - When updating a dataset, `tenant_id` cannot be changed. | |||||
| - If creating a dataset, `tenant_id` must not be provided. | |||||
| - If updating a dataset, `tenant_id` cannot be changed. | |||||
| - `"description"`: (*Body parameter*) | - `"description"`: (*Body parameter*) | ||||
| The description of the dataset. | The description of the dataset. | ||||
| The language setting for the dataset. | The language setting for the dataset. | ||||
| - `"embedding_model"`: (*Body parameter*) | - `"embedding_model"`: (*Body parameter*) | ||||
| Embedding model used in the dataset for generating vector embeddings. | |||||
| - When creating a dataset, `embedding_model` must not be provided. | |||||
| - When updating a dataset, `embedding_model` cannot be changed. | |||||
| Embedding model used in the dataset to generate vector embeddings. | |||||
| - If creating a dataset, `embedding_model` must not be provided. | |||||
| - If updating a dataset, `embedding_model` cannot be changed. | |||||
| - `"permission"`: (*Body parameter*) | - `"permission"`: (*Body parameter*) | ||||
| Specifies who can manipulate the dataset. | Specifies who can manipulate the dataset. | ||||
| - `"document_count"`: (*Body parameter*) | - `"document_count"`: (*Body parameter*) | ||||
| Document count of the dataset. | Document count of the dataset. | ||||
| - When updating a dataset, `document_count` cannot be changed. | |||||
| - If updating a dataset, `document_count` cannot be changed. | |||||
| - `"chunk_count"`: (*Body parameter*) | - `"chunk_count"`: (*Body parameter*) | ||||
| Chunk count of the dataset. | Chunk count of the dataset. | ||||
| - When updating a dataset, `chunk_count` cannot be changed. | |||||
| - If updating a dataset, `chunk_count` cannot be changed. | |||||
| - `"parse_method"`: (*Body parameter*) | - `"parse_method"`: (*Body parameter*) | ||||
| Parsing method of the dataset. | Parsing method of the dataset. | ||||
| - When updating `parse_method`, `chunk_count` must be greater than 0. | |||||
| - If updating `parse_method`, `chunk_count` must be greater than 0. | |||||
| - `"parser_config"`: (*Body parameter*) | - `"parser_config"`: (*Body parameter*) | ||||
| The configuration settings for the dataset parser. | The configuration settings for the dataset parser. | ||||
| ### Response | ### Response | ||||
| A successful response includes a JSON object like the following: | |||||
| The successful response includes a JSON object like the following: | |||||
| ```json | ```json | ||||
| { | { | ||||
| - `"error_code"`: `integer` | - `"error_code"`: `integer` | ||||
| `0`: The operation succeeds. | `0`: The operation succeeds. | ||||
| An error response includes a JSON object like the following: | |||||
| The error response includes a JSON object like the following: | |||||
| ```json | ```json | ||||
| { | { | ||||
| **DELETE** `/api/v1/dataset` | **DELETE** `/api/v1/dataset` | ||||
| Deletes datasets by their IDs. | |||||
| Deletes datasets by ids. | |||||
| ### Request | ### Request | ||||
| #### Request example | #### Request example | ||||
| ```bash | ```bash | ||||
| # Specify either "ids" or "names", NOT both. | |||||
| # Either id or name must be provided, but not both. | |||||
| curl --request DELETE \ | curl --request DELETE \ | ||||
| --url http://{address}/api/v1/dataset \ | --url http://{address}/api/v1/dataset \ | ||||
| --header 'Content-Type: application/json' \ | --header 'Content-Type: application/json' \ | ||||
| #### Request parameters | #### Request parameters | ||||
| - `"ids"`: (*Body parameter*) | |||||
| IDs of the datasets to delete. | |||||
| - `"ids"`: (*Body parameter*) | |||||
| Dataset IDs to delete. | |||||
| ### Response | ### Response | ||||
| A successful response includes a JSON object like the following: | |||||
| The successful response includes a JSON object like the following: | |||||
| ```json | ```json | ||||
| { | { | ||||
| `0`: The operation succeeds. | `0`: The operation succeeds. | ||||
| An error response includes a JSON object like the following: | |||||
| The error response includes a JSON object like the following: | |||||
| ```json | ```json | ||||
| { | { | ||||
| **PUT** `/api/v1/dataset/{dataset_id}` | **PUT** `/api/v1/dataset/{dataset_id}` | ||||
| Updates a dataset by its ID. | |||||
| Updates a dataset by its id. | |||||
| ### Request | ### Request | ||||
| #### Request example | #### Request example | ||||
| ```bash | ```bash | ||||
| # "id": REQUIRED | |||||
| # "name": If you update "name", it cannot be duplicated. | |||||
| # "tenant_id": If you update "tenant_id", it cannot be changed | |||||
| # "embedding_model": If you update "embedding_model", it cannot be changed. | |||||
| # "chunk_count": If you update "chunk_count", it cannot be changed. | |||||
| # "document_count": If you update "document_count", it cannot be changed. | |||||
| # "parse_method": If you update "parse_method", "chunk_count" must be 0. | |||||
| # "naive": General. | |||||
| # "id": id is required. | |||||
| # "name": If you update name, it can't be duplicated. | |||||
| # "tenant_id": If you update tenant_id, it can't be changed | |||||
| # "embedding_model": If you update embedding_model, it can't be changed. | |||||
| # "chunk_count": If you update chunk_count, it can't be changed. | |||||
| # "document_count": If you update document_count, it can't be changed. | |||||
| # "parse_method": If you update parse_method, chunk_count must be 0. | |||||
| # "navie" means general. | |||||
| curl --request PUT \ | curl --request PUT \ | ||||
| --url http://{address}/api/v1/dataset/{dataset_id} \ | --url http://{address}/api/v1/dataset/{dataset_id} \ | ||||
| --header 'Content-Type: application/json' \ | --header 'Content-Type: application/json' \ | ||||
| "embedding_model": "BAAI/bge-zh-v1.5", | "embedding_model": "BAAI/bge-zh-v1.5", | ||||
| "chunk_count": 0, | "chunk_count": 0, | ||||
| "document_count": 0, | "document_count": 0, | ||||
| "parse_method": "naive" | |||||
| "parse_method": "navie" | |||||
| }' | }' | ||||
| ``` | ``` | ||||
| #### Request parameters | #### Request parameters | ||||
| See the "Create Dataset" for the complete structure of the request parameters. | |||||
| (Refer to the "Create Dataset" for the complete structure of the request parameters.) | |||||
| ### Response | ### Response | ||||
| A successful response includes a JSON object like the following: | |||||
| The successful response includes a JSON object like the following: | |||||
| ```json | ```json | ||||
| { | { | ||||
| `0`: The operation succeeds. | `0`: The operation succeeds. | ||||
| An error response includes a JSON object like the following: | |||||
| The error response includes a JSON object like the following: | |||||
| ```json | ```json | ||||
| { | { | ||||
| ### Response | ### Response | ||||
| A successful response includes a JSON object like the following: | |||||
| The successful response includes a JSON object like the following: | |||||
| ```json | ```json | ||||
| { | { | ||||
| ``` | ``` | ||||
| An error response includes a JSON object like the following: | |||||
| The error response includes a JSON object like the following: | |||||
| ```json | ```json | ||||
| { | { | ||||
| #### Request example | #### Request example | ||||
| ```shell | |||||
| ```bash | |||||
| curl --request POST \ | curl --request POST \ | ||||
| --url http://{address}/api/v1/dataset/{dataset_id}/document \ | --url http://{address}/api/v1/dataset/{dataset_id}/document \ | ||||
| --header 'Content-Type: multipart/form-data' \ | --header 'Content-Type: multipart/form-data' \ | ||||
| --header 'Authorization: Bearer {YOUR_ACCESS_TOKEN}' \ | |||||
| --form 'file=@test.txt' | |||||
| --header 'Authorization: Bearer {YOUR_ACCESS_TOKEN}' \ | |||||
| --form 'file=@./test.txt' | |||||
| ``` | ``` | ||||
| #### Request parameters | #### Request parameters | ||||
| ### Response | ### Response | ||||
| A successful response includes a JSON object like the following: | |||||
| The successful response includes a JSON object like the following: | |||||
| ```shell | |||||
| ```json | |||||
| { | { | ||||
| "code": 0 | "code": 0 | ||||
| } | } | ||||
| `0`: The operation succeeds. | `0`: The operation succeeds. | ||||
| An error response includes a JSON object like the following: | |||||
| The error response includes a JSON object like the following: | |||||
| ```shell | |||||
| ```json | |||||
| { | { | ||||
| "code": 3016, | |||||
| "message": "Can't connect database" | |||||
| "code": 101, | |||||
| "message": "No file part!" | |||||
| } | } | ||||
| ``` | ``` | ||||
| - '{FILE_NAME}' | - '{FILE_NAME}' | ||||
| #### Request example | #### Request example | ||||
| ```shell | |||||
| ```bash | |||||
| curl --request GET \ | curl --request GET \ | ||||
| --url http://{address}/api/v1/dataset/{dataset_id}/document/{documents_id} \ | --url http://{address}/api/v1/dataset/{dataset_id}/document/{documents_id} \ | ||||
| --header 'Content-Type: application/json' \ | --header 'Content-Type: application/json' \ | ||||
| ### Response | ### Response | ||||
| A successful response includes a JSON object like the following: | |||||
| The successful response includes a JSON object like the following: | |||||
| ```shell | |||||
| { | |||||
| "code": 0 | |||||
| } | |||||
| ```text | |||||
| test_2. | |||||
| ``` | ``` | ||||
| - `"error_code"`: `integer` | - `"error_code"`: `integer` | ||||
| `0`: The operation succeeds. | `0`: The operation succeeds. | ||||
| An error response includes a JSON object like the following: | |||||
| The error response includes a JSON object like the following: | |||||
| ```shell | |||||
| ```json | |||||
| { | { | ||||
| "code": 3016, | |||||
| "message": "Can't connect database" | |||||
| "code": 102, | |||||
| "message": "You do not own the dataset 7898da028a0511efbf750242ac1220005." | |||||
| } | } | ||||
| ``` | ``` | ||||
| ## List files of a dataset | ## List files of a dataset | ||||
| **GET** `/api/v1/dataset/{dataset_id}/info?keywords={keyword}&page={page}&page_size={limit}&orderby={orderby}&desc={desc}&name={name}` | |||||
| **GET** `/api/v1/dataset/{dataset_id}/info?offset={offset}&limit={limit}&orderby={orderby}&desc={desc}&keywords={keywords}&id={document_id}` | |||||
| List files to a dataset. | List files to a dataset. | ||||
| #### Request example | #### Request example | ||||
| ```shell | |||||
| ```bash | |||||
| curl --request GET \ | curl --request GET \ | ||||
| --url http://{address}/api/v1/dataset/{dataset_id}/info?keywords=rag&page=0&page_size=10&orderby=create_time&desc=yes \ | |||||
| --header 'Content-Type: application/json' \ | |||||
| --header 'Authorization: Bearer {YOUR_ACCESS_TOKEN}' | |||||
| --url http://{address}/api/v1/dataset/{dataset_id}/info?offset={offset}&limit={limit}&orderby={orderby}&desc={desc}&keywords={keywords}&id={document_id} \ | |||||
| --header 'Authorization: Bearer {YOUR_ACCESS_TOKEN}' | |||||
| ``` | ``` | ||||
| #### Request parameters | #### Request parameters | ||||
| - `"dataset_id"`: (*PATH parameter*) | - `"dataset_id"`: (*PATH parameter*) | ||||
| The dataset id | The dataset id | ||||
| - `offset`: (*Filter parameter*) | |||||
| The beginning number of records for paging. | |||||
| - `keywords`: (*Filter parameter*) | - `keywords`: (*Filter parameter*) | ||||
| The keywords matches the search key workds; | The keywords matches the search key workds; | ||||
| - `page`: (*Filter parameter*) | |||||
| The current page number to retrieve from the paginated data. This parameter determines which set of records will be fetched. | |||||
| - `page_size`: (*Filter parameter*) | |||||
| The number of records to retrieve per page. This controls how many records will be included in each page. | |||||
| - `limit`: (*Filter parameter*) | |||||
| Records number to return. | |||||
| - `orderby`: (*Filter parameter*) | - `orderby`: (*Filter parameter*) | ||||
| The field by which the records should be sorted. This specifies the attribute or column used to order the results. | The field by which the records should be sorted. This specifies the attribute or column used to order the results. | ||||
| - `desc`: (*Filter parameter*) | - `desc`: (*Filter parameter*) | ||||
| A boolean flag indicating whether the sorting should be in descending order. | A boolean flag indicating whether the sorting should be in descending order. | ||||
| - `name`: (*Filter parameter*) | |||||
| File name. | |||||
| - `id`: (*Filter parameter*) | |||||
| The id of the document to be got. | |||||
| ### Response | ### Response | ||||
| A successful response includes a JSON object like the following: | |||||
| The successful response includes a JSON object like the following: | |||||
| ```shell | |||||
| ```json | |||||
| { | { | ||||
| "code": 0, | "code": 0, | ||||
| "data": { | "data": { | ||||
| "docs": [ | "docs": [ | ||||
| { | { | ||||
| "chunk_count": 0, | "chunk_count": 0, | ||||
| "create_date": "Wed, 18 Sep 2024 08:20:49 GMT", | |||||
| "create_time": 1726647649379, | |||||
| "created_by": "134408906b6811efbcd20242ac120005", | |||||
| "id": "e970a94a759611efae5b0242ac120004", | |||||
| "knowledgebase_id": "e95f574e759611efbc850242ac120004", | |||||
| "location": "Test Document222.txt", | |||||
| "name": "Test Document222.txt", | |||||
| "create_date": "Mon, 14 Oct 2024 09:11:01 GMT", | |||||
| "create_time": 1728897061948, | |||||
| "created_by": "69736c5e723611efb51b0242ac120007", | |||||
| "id": "3bcfbf8a8a0c11ef8aba0242ac120006", | |||||
| "knowledgebase_id": "7898da028a0511efbf750242ac120005", | |||||
| "location": "Test_2.txt", | |||||
| "name": "Test_2.txt", | |||||
| "parser_config": { | "parser_config": { | ||||
| "chunk_token_count": 128, | "chunk_token_count": 128, | ||||
| "delimiter": "\n!?。;!?", | "delimiter": "\n!?。;!?", | ||||
| "progress": 0.0, | "progress": 0.0, | ||||
| "progress_msg": "", | "progress_msg": "", | ||||
| "run": "0", | "run": "0", | ||||
| "size": 46, | |||||
| "size": 7, | |||||
| "source_type": "local", | "source_type": "local", | ||||
| "status": "1", | "status": "1", | ||||
| "thumbnail": null, | "thumbnail": null, | ||||
| "token_count": 0, | "token_count": 0, | ||||
| "type": "doc", | "type": "doc", | ||||
| "update_date": "Wed, 18 Sep 2024 08:20:49 GMT", | |||||
| "update_time": 1726647649379 | |||||
| }, | |||||
| { | |||||
| "chunk_count": 0, | |||||
| "create_date": "Wed, 18 Sep 2024 08:20:49 GMT", | |||||
| "create_time": 1726647649340, | |||||
| "created_by": "134408906b6811efbcd20242ac120005", | |||||
| "id": "e96aad9c759611ef9ab60242ac120004", | |||||
| "knowledgebase_id": "e95f574e759611efbc850242ac120004", | |||||
| "location": "Test Document111.txt", | |||||
| "name": "Test Document111.txt", | |||||
| "parser_config": { | |||||
| "chunk_token_count": 128, | |||||
| "delimiter": "\n!?。;!?", | |||||
| "layout_recognize": true, | |||||
| "task_page_size": 12 | |||||
| }, | |||||
| "parser_method": "naive", | |||||
| "process_begin_at": null, | |||||
| "process_duation": 0.0, | |||||
| "progress": 0.0, | |||||
| "progress_msg": "", | |||||
| "run": "0", | |||||
| "size": 46, | |||||
| "source_type": "local", | |||||
| "status": "1", | |||||
| "thumbnail": null, | |||||
| "token_count": 0, | |||||
| "type": "doc", | |||||
| "update_date": "Wed, 18 Sep 2024 08:20:49 GMT", | |||||
| "update_time": 1726647649340 | |||||
| "update_date": "Mon, 14 Oct 2024 09:11:01 GMT", | |||||
| "update_time": 1728897061948 | |||||
| } | } | ||||
| ], | ], | ||||
| "total": 2 | |||||
| }, | |||||
| "total": 1 | |||||
| } | |||||
| } | } | ||||
| ``` | ``` | ||||
| `0`: The operation succeeds. | `0`: The operation succeeds. | ||||
| An error response includes a JSON object like the following: | |||||
| The error response includes a JSON object like the following: | |||||
| ```shell | |||||
| ```json | |||||
| { | { | ||||
| "code": 3016, | |||||
| "message": "Can't connect database" | |||||
| "code": 102, | |||||
| "message": "You don't own the dataset 7898da028a0511efbf750242ac1220005. " | |||||
| } | } | ||||
| ``` | ``` | ||||
| ### Request | ### Request | ||||
| - Method: PUT | - Method: PUT | ||||
| - URL: `/api/v1/dataset/{dataset_id}/document` | |||||
| - URL: `http://{address}/api/v1/dataset/{dataset_id}/document/{document_id}` | |||||
| - Headers: | - Headers: | ||||
| - `content-Type: application/json` | - `content-Type: application/json` | ||||
| - 'Authorization: Bearer {YOUR_ACCESS_TOKEN}' | - 'Authorization: Bearer {YOUR_ACCESS_TOKEN}' | ||||
| #### Request example | #### Request example | ||||
| ```shell | |||||
| ```bash | |||||
| curl --request PUT \ | curl --request PUT \ | ||||
| --url http://{address}/api/v1/dataset/{dataset_id}/info/{document_id} \ | |||||
| --header 'Content-Type: application/json' \ | |||||
| --header 'Authorization: Bearer {YOUR_ACCESS_TOKEN}' | |||||
| --raw '{ | |||||
| "document_id": "f6b170ac758811efa0660242ac120004", | |||||
| "document_name": "manual.txt", | |||||
| "thumbnail": null, | |||||
| "knowledgebase_id": "779333c0758611ef910f0242ac120004", | |||||
| "parser_method": "manual", | |||||
| "parser_config": {"chunk_token_count": 128, "delimiter": "\n!?。;!?", "layout_recognize": true, "task_page_size": 12}, | |||||
| "source_type": "local", "type": "doc", | |||||
| "created_by": "134408906b6811efbcd20242ac120005", | |||||
| "size": 0, "token_count": 0, "chunk_count": 0, | |||||
| "progress": 0.0, | |||||
| "progress_msg": "", | |||||
| "process_begin_at": null, | |||||
| "process_duration": 0.0 | |||||
| }' | |||||
| --url http://{address}/api/v1/dataset/{dataset_id}/document/{document_id} \ | |||||
| --header 'Authorization: Bearer {YOUR_ACCESS TOKEN}' \ | |||||
| --header 'Content-Type: application/json' \ | |||||
| --data '{ | |||||
| "name": "manual.txt", | |||||
| "thumbnail": null, | |||||
| "knowledgebase_id": "779333c0758611ef910f0242ac120004", | |||||
| "parser_method": "manual", | |||||
| "parser_config": {"chunk_token_count": 128, "delimiter": "\n!?。;!?", "layout_recognize": true, "task_page_size": 12}, | |||||
| "source_type": "local", "type": "doc", | |||||
| "created_by": "134408906b6811efbcd20242ac120005", | |||||
| "size": 0, "token_count": 0, "chunk_count": 0, | |||||
| "progress": 0.0, | |||||
| "progress_msg": "", | |||||
| "process_begin_at": null, | |||||
| "process_duration": 0.0 | |||||
| }' | |||||
| ``` | ``` | ||||
| #### Request parameters | #### Request parameters | ||||
| - `"document_id"`: (*Body parameter*) | |||||
| - `"document_name"`: (*Body parameter*) | |||||
| - `"thumbnail"`: (*Body parameter*) | |||||
| Thumbnail image of the document. | |||||
| - `""` | |||||
| - `"knowledgebase_id"`: (*Body parameter*) | |||||
| Knowledge base ID related to the document. | |||||
| - `""` | |||||
| - `"parser_method"`: (*Body parameter*) | |||||
| Method used to parse the document. | |||||
| - `""` | |||||
| - `"parser_config"`: (*Body parameter*) | |||||
| Configuration object for the parser. | |||||
| - If the value is `None`, a dictionary with default values will be generated. | |||||
| - `"source_type"`: (*Body parameter*) | |||||
| Source type of the document. | |||||
| - `""` | |||||
| - `"type"`: (*Body parameter*) | |||||
| Type or category of the document. | |||||
| - `""` | |||||
| - `"created_by"`: (*Body parameter*) | |||||
| Creator of the document. | |||||
| - `""` | |||||
| - `"name"`: (*Body parameter*) | |||||
| Name or title of the document. | |||||
| - `""` | |||||
| - `"size"`: (*Body parameter*) | |||||
| Size of the document in bytes or some other unit. | |||||
| - `0` | |||||
| - `"token_count"`: (*Body parameter*) | |||||
| Number of tokens in the document. | |||||
| - `0` | |||||
| - `"chunk_count"`: (*Body parameter*) | |||||
| Number of chunks the document is split into. | |||||
| - `0` | |||||
| - `"progress"`: (*Body parameter*) | |||||
| Current processing progress as a percentage. | |||||
| - `0.0` | |||||
| - `"progress_msg"`: (*Body parameter*) | |||||
| Message indicating current progress status. | |||||
| - `""` | |||||
| - `"process_begin_at"`: (*Body parameter*) | |||||
| Start time of the document processing. | |||||
| - `None` | |||||
| - `"process_duration"`: (*Body parameter*) | |||||
| Duration of the processing in seconds or minutes. | |||||
| - `0.0` | |||||
| ### Response | ### Response | ||||
| A successful response includes a JSON object like the following: | |||||
| The successful response includes a JSON object like the following: | |||||
| ```shell | |||||
| ```json | |||||
| { | { | ||||
| "code": 0 | "code": 0 | ||||
| } | } | ||||
| ``` | ``` | ||||
| An error response includes a JSON object like the following: | |||||
| The error response includes a JSON object like the following: | |||||
| ```shell | |||||
| ```json | |||||
| { | { | ||||
| "code": 3016, | |||||
| "message": "Can't connect database" | |||||
| "code": 102, | |||||
| "message": "The dataset not own the document." | |||||
| } | } | ||||
| ``` | ``` | ||||
| ### Response | ### Response | ||||
| A successful response includes a JSON object like the following: | |||||
| The successful response includes a JSON object like the following: | |||||
| ```shell | ```shell | ||||
| { | { | ||||
| } | } | ||||
| ``` | ``` | ||||
| An error response includes a JSON object like the following: | |||||
| The error response includes a JSON object like the following: | |||||
| ```shell | ```shell | ||||
| { | { | ||||
| ### Response | ### Response | ||||
| A successful response includes a JSON object like the following: | |||||
| The successful response includes a JSON object like the following: | |||||
| ```shell | ```shell | ||||
| { | { | ||||
| } | } | ||||
| ``` | ``` | ||||
| An error response includes a JSON object like the following: | |||||
| The error response includes a JSON object like the following: | |||||
| ```shell | ```shell | ||||
| { | { | ||||
| ### Response | ### Response | ||||
| A successful response includes a JSON object like the following: | |||||
| The successful response includes a JSON object like the following: | |||||
| ```shell | ```shell | ||||
| { | { | ||||
| } | } | ||||
| ``` | ``` | ||||
| An error response includes a JSON object like the following: | |||||
| The error response includes a JSON object like the following: | |||||
| ```shell | ```shell | ||||
| { | { |
| ### Parameters | ### Parameters | ||||
| #### name: *Required* | |||||
| #### name: `str`, *Required* | |||||
| The unique name of the dataset to create. It must adhere to the following requirements: | The unique name of the dataset to create. It must adhere to the following requirements: | ||||
| - Maximum 65,535 characters. | - Maximum 65,535 characters. | ||||
| - Case-insensitive. | - Case-insensitive. | ||||
| #### avatar | |||||
| #### avatar: `str` | |||||
| Base64 encoding of the avatar. Defaults to `""` | Base64 encoding of the avatar. Defaults to `""` | ||||
| #### tenant_id | |||||
| #### tenant_id: `str` | |||||
| The id of the tenant associated with the created dataset is used to identify different users. Defaults to `None`. | The id of the tenant associated with the created dataset is used to identify different users. Defaults to `None`. | ||||
| - When creating a dataset, `tenant_id` must not be provided. | |||||
| - When updating a dataset, `tenant_id` cannot be changed. | |||||
| - If creating a dataset, tenant_id must not be provided. | |||||
| - If updating a dataset, tenant_id can't be changed. | |||||
| #### description | |||||
| #### description: `str` | |||||
| The description of the created dataset. Defaults to `""`. | The description of the created dataset. Defaults to `""`. | ||||
| #### language | |||||
| #### language: `str` | |||||
| The language setting of the created dataset. Defaults to `"English"`. | |||||
| The language setting of the created dataset. Defaults to `"English"`. ???????????? | |||||
| #### embedding_model | |||||
| #### embedding_model: `str` | |||||
| The specific model used by the dataset to generate vector embeddings. Defaults to `""`. | The specific model used by the dataset to generate vector embeddings. Defaults to `""`. | ||||
| - When creating a dataset, `embedding_model` must not be provided. | |||||
| - When updating a dataset, `embedding_model` cannot be changed. | |||||
| - If creating a dataset, embedding_model must not be provided. | |||||
| - If updating a dataset, embedding_model can't be changed. | |||||
| #### permission | |||||
| #### permission: `str` | |||||
| The person who can operate on the dataset. Defaults to `"me"`. | |||||
| Specify who can operate on the dataset. Defaults to `"me"`. | |||||
| #### document_count | |||||
| #### document_count: `int` | |||||
| The number of documents associated with the dataset. Defaults to `0`. | The number of documents associated with the dataset. Defaults to `0`. | ||||
| :::tip NOTE | |||||
| When updating a dataset, `document_count` cannot be changed. | |||||
| ::: | |||||
| - If updating a dataset, `document_count` can't be changed. | |||||
| #### chunk_count | |||||
| #### chunk_count: `int` | |||||
| The number of data chunks generated or processed by the created dataset. Defaults to `0`. | The number of data chunks generated or processed by the created dataset. Defaults to `0`. | ||||
| :::tip NOTE | |||||
| When updating a dataset, `chunk_count` cannot be changed. | |||||
| ::: | |||||
| #### parse_method | |||||
| - If updating a dataset, chunk_count can't be changed. | |||||
| The method used by the dataset to parse and process data. Defaults to `"naive"`. | |||||
| #### parse_method, `str` | |||||
| :::tip NOTE | |||||
| When updating `parse_method` in a dataset, `chunk_count` must be greater than 0. | |||||
| ::: | |||||
| The method used by the dataset to parse and process data. | |||||
| #### parser_config | |||||
| - If updating parse_method in a dataset, chunk_count must be greater than 0. Defaults to `"naive"`. | |||||
| The parser configuration of the dataset. A `ParserConfig` object contains the following attributes: | |||||
| #### parser_config, `Dataset.ParserConfig` | |||||
| - `chunk_token_count`: Defaults to `128`. | |||||
| - `layout_recognize`: Defaults to `True`. | |||||
| - `delimiter`: Defaults to `'\n!?。;!?'`. | |||||
| - `task_page_size`: Defaults to `12`. | |||||
| The configuration settings for the parser used by the dataset. | |||||
| ### Returns | ### Returns | ||||
| - Success: A `dataset` object. | |||||
| - Failure: `Exception` | |||||
| ```python | |||||
| DataSet | |||||
| description: dataset object | |||||
| ``` | |||||
| ### Examples | ### Examples | ||||
| ```python | ```python | ||||
| from ragflow import RAGFlow | from ragflow import RAGFlow | ||||
| rag_object = RAGFlow(api_key="<YOUR_API_KEY>", base_url="http://<YOUR_BASE_URL>:9380") | |||||
| ds = rag_object.create_dataset(name="kb_1") | |||||
| rag = RAGFlow(api_key="xxxxxx", base_url="http://xxx.xx.xx.xxx:9380") | |||||
| ds = rag.create_dataset(name="kb_1") | |||||
| ``` | ``` | ||||
| --- | --- | ||||
| ## Delete knowledge bases | ## Delete knowledge bases | ||||
| ```python | ```python | ||||
| RAGFlow.delete_datasets(ids: list[str] = None) | |||||
| RAGFlow.delete_datasets(ids: List[str] = None) | |||||
| ``` | ``` | ||||
| Deletes knowledge bases by name or ID. | |||||
| Deletes knowledge bases. | |||||
| ### Parameters | ### Parameters | ||||
| #### ids | |||||
| #### ids: `List[str]` | |||||
| The ids of the datasets to be deleted. | |||||
| The IDs of the knowledge bases to delete. | |||||
| ### Returns | ### Returns | ||||
| - Success: No value is returned. | |||||
| - Failure: `Exception` | |||||
| ```python | |||||
| no return | |||||
| ``` | |||||
| ### Examples | ### Examples | ||||
| #### Delete knowledge bases by name | |||||
| ```python | ```python | ||||
| from ragflow import RAGFlow | |||||
| rag = RAGFlow(api_key="xxxxxx", base_url="http://xxx.xx.xx.xxx:9380") | |||||
| rag.delete_datasets(ids=["id_1","id_2"]) | rag.delete_datasets(ids=["id_1","id_2"]) | ||||
| ``` | ``` | ||||
| desc: bool = True, | desc: bool = True, | ||||
| id: str = None, | id: str = None, | ||||
| name: str = None | name: str = None | ||||
| ) -> list[DataSet] | |||||
| ) -> List[DataSet] | |||||
| ``` | ``` | ||||
| Lists all knowledge bases. | |||||
| Lists all knowledge bases in the RAGFlow system. | |||||
| ### Parameters | ### Parameters | ||||
| #### page | |||||
| #### page: `int` | |||||
| The current page number to retrieve from the paginated data. This parameter determines which set of records will be fetched. Defaults to `1`. | The current page number to retrieve from the paginated data. This parameter determines which set of records will be fetched. Defaults to `1`. | ||||
| #### page_size | |||||
| #### page_size: `int` | |||||
| The number of records to retrieve per page. This controls how many records will be included in each page. Defaults to `1024`. | The number of records to retrieve per page. This controls how many records will be included in each page. Defaults to `1024`. | ||||
| #### order_by | |||||
| #### order_by: `str` | |||||
| The attribute by which the results are sorted. Defaults to `"create_time"`. | |||||
| The field by which the records should be sorted. This specifies the attribute or column used to order the results. Defaults to `"create_time"`. | |||||
| #### desc | |||||
| #### desc: `bool` | |||||
| Indicates whether to sort the results in descending order. Defaults to `True`. | |||||
| Whether the sorting should be in descending order. Defaults to `True`. | |||||
| #### id | |||||
| #### id: `str` | |||||
| The ID of the dataset to retrieve. Defaults to `None`. | |||||
| The id of the dataset to be got. Defaults to `None`. | |||||
| #### name | |||||
| #### name: `str` | |||||
| The name of the dataset to retrieve. Defaults to `None`. | |||||
| The name of the dataset to be got. Defaults to `None`. | |||||
| ### Returns | ### Returns | ||||
| - Success: A list of `DataSet` objects representing the retrieved knowledge bases. | |||||
| - Failure: `Exception`. | |||||
| ### Examples | |||||
| #### Retrieve a list of knowledge bases associated with the current user | |||||
| ```python | ```python | ||||
| for ds in rag_object.list_datasets(): | |||||
| print(ds.name) | |||||
| List[DataSet] | |||||
| description:the list of datasets. | |||||
| ``` | ``` | ||||
| #### Retrieve a knowledge base by ID | |||||
| ### Examples | |||||
| ```python | ```python | ||||
| ds = rag_object.list_datasets(id = "id_1") | |||||
| print(ds.name) | |||||
| from ragflow import RAGFlow | |||||
| rag = RAGFlow(api_key="xxxxxx", base_url="http://xxx.xx.xx.xxx:9380") | |||||
| for ds in rag.list_datasets(): | |||||
| print(ds) | |||||
| ``` | ``` | ||||
| --- | --- | ||||
| ## Update knowledge base | |||||
| ## Update knowledge base | |||||
| ```python | ```python | ||||
| DataSet.update(update_message: dict) | DataSet.update(update_message: dict) | ||||
| ``` | ``` | ||||
| Updates the current knowledge base. | |||||
| ### Parameters | |||||
| #### update_message | |||||
| ### Returns | ### Returns | ||||
| - Success: No value is returned. | |||||
| - Failure: `Exception` | |||||
| ```python | |||||
| no return | |||||
| ``` | |||||
| ### Examples | ### Examples | ||||
| ```python | ```python | ||||
| from ragflow import RAGFlow | from ragflow import RAGFlow | ||||
| rag = RAGFlow(api_key="<YOUR_API_KEY>", base_url="http://<YOUR_BASE_URL>:9380") | |||||
| ds = rag.list_datasets(name="kb_1") | |||||
| rag = RAGFlow(api_key="xxxxxx", base_url="http://xxx.xx.xx.xxx:9380") | |||||
| ds = rag.get_dataset(name="kb_1") | |||||
| ds.update({"parse_method":"manual", ...}} | ds.update({"parse_method":"manual", ...}} | ||||
| ``` | ``` | ||||
| ### Parameters | ### Parameters | ||||
| #### ds | |||||
| #### name | #### name | ||||
| #### blob | #### blob | ||||
| ```python | ```python | ||||
| from ragflow import RAGFlow | from ragflow import RAGFlow | ||||
| rag = RAGFlow(api_key="<YOUR_API_KEY>", base_url="http://<YOUR_BASE_URL>:9380") | |||||
| rag = RAGFlow(api_key="xxxxxx", base_url="http://xxx.xx.xx.xxx:9380") | |||||
| doc = rag.get_document(id="wdfxb5t547d",name='testdocument.txt') | doc = rag.get_document(id="wdfxb5t547d",name='testdocument.txt') | ||||
| print(doc) | print(doc) | ||||
| ``` | ``` | ||||
| ```python | ```python | ||||
| from ragflow import RAGFlow | from ragflow import RAGFlow | ||||
| rag = RAGFlow(api_key="<YOUR_API_KEY>", base_url="http://<YOUR_BASE_URL>:9380") | |||||
| rag = RAGFlow(api_key="xxxxxx", base_url="http://xxx.xx.xx.xxx:9380") | |||||
| doc = rag.get_document(id="wdfxb5t547d") | doc = rag.get_document(id="wdfxb5t547d") | ||||
| doc.parser_method= "manual" | doc.parser_method= "manual" | ||||
| doc.save() | doc.save() | ||||
| ```python | ```python | ||||
| from ragflow import RAGFlow | from ragflow import RAGFlow | ||||
| rag = RAGFlow(api_key="<YOUR_API_KEY>", base_url="http://<YOUR_BASE_URL>:9380") | |||||
| rag = RAGFlow(api_key="xxxxxx", base_url="http://xxx.xx.xx.xxx:9380") | |||||
| doc = rag.get_document(id="wdfxb5t547d") | doc = rag.get_document(id="wdfxb5t547d") | ||||
| open("~/ragflow.txt", "w+").write(doc.download()) | open("~/ragflow.txt", "w+").write(doc.download()) | ||||
| print(doc) | print(doc) | ||||
| ## List documents | ## List documents | ||||
| ```python | ```python | ||||
| Dataset.list_docs(keywords: str=None, offset: int=0, limit:int = -1) -> list[Document] | |||||
| Dataset.list_docs(keywords: str=None, offset: int=0, limit:int = -1) -> List[Document] | |||||
| ``` | ``` | ||||
| ### Parameters | ### Parameters | ||||
| #### limit: `int` | #### limit: `int` | ||||
| Records number to return, -1 means all of them. | |||||
| Records number to return, -1 means all of them. Records number to return, -1 means all of them. | |||||
| ### Returns | ### Returns | ||||
| list[Document] | |||||
| List[Document] | |||||
| ### Examples | ### Examples | ||||
| ```python | ```python | ||||
| from ragflow import RAGFlow | from ragflow import RAGFlow | ||||
| rag = RAGFlow(api_key="<YOUR_API_KEY>", base_url="http://<YOUR_BASE_URL>:9380") | |||||
| rag = RAGFlow(api_key="xxxxxx", base_url="http://xxx.xx.xx.xxx:9380") | |||||
| ds = rag.create_dataset(name="kb_1") | ds = rag.create_dataset(name="kb_1") | ||||
| filename1 = "~/ragflow.txt" | filename1 = "~/ragflow.txt" | ||||
| ```python | ```python | ||||
| from ragflow import RAGFlow | from ragflow import RAGFlow | ||||
| rag = RAGFlow(api_key="<YOUR_API_KEY>", base_url="http://<YOUR_BASE_URL>:9380") | |||||
| rag = RAGFlow(api_key="xxxxxx", base_url="http://xxx.xx.xx.xxx:9380") | |||||
| ds = rag.create_dataset(name="kb_1") | ds = rag.create_dataset(name="kb_1") | ||||
| filename1 = "~/ragflow.txt" | filename1 = "~/ragflow.txt" | ||||
| ```python | ```python | ||||
| from ragflow import RAGFlow | from ragflow import RAGFlow | ||||
| rag = RAGFlow(api_key="<YOUR_API_KEY>", base_url="http://<YOUR_BASE_URL>:9380") | |||||
| rag = RAGFlow(api_key="xxxxxx", base_url="http://xxx.xx.xx.xxx:9380") | |||||
| doc = rag.get_document(id="wdfxb5t547d") | doc = rag.get_document(id="wdfxb5t547d") | ||||
| chunk = doc.add_chunk(content="xxxxxxx") | chunk = doc.add_chunk(content="xxxxxxx") | ||||
| ``` | ``` | ||||
| ```python | ```python | ||||
| from ragflow import RAGFlow | from ragflow import RAGFlow | ||||
| rag = RAGFlow(api_key="<YOUR_API_KEY>", base_url="http://<YOUR_BASE_URL>:9380") | |||||
| rag = RAGFlow(api_key="xxxxxx", base_url="http://xxx.xx.xx.xxx:9380") | |||||
| doc = rag.get_document(id="wdfxb5t547d") | doc = rag.get_document(id="wdfxb5t547d") | ||||
| chunk = doc.add_chunk(content="xxxxxxx") | chunk = doc.add_chunk(content="xxxxxxx") | ||||
| chunk.delete() | chunk.delete() | ||||
| ```python | ```python | ||||
| from ragflow import RAGFlow | from ragflow import RAGFlow | ||||
| rag = RAGFlow(api_key="<YOUR_API_KEY>", base_url="http://<YOUR_BASE_URL>:9380") | |||||
| rag = RAGFlow(api_key="xxxxxx", base_url="http://xxx.xx.xx.xxx:9380") | |||||
| doc = rag.get_document(id="wdfxb5t547d") | doc = rag.get_document(id="wdfxb5t547d") | ||||
| chunk = doc.add_chunk(content="xxxxxxx") | chunk = doc.add_chunk(content="xxxxxxx") | ||||
| chunk.content = "sdfx" | chunk.content = "sdfx" | ||||
| ## Retrieval | ## Retrieval | ||||
| ```python | ```python | ||||
| RAGFlow.retrieval(question:str, datasets:list[Dataset], document=list[Document]=None, offset:int=0, limit:int=6, similarity_threshold:float=0.1, vector_similarity_weight:float=0.3, top_k:int=1024) -> list[Chunk] | |||||
| RAGFlow.retrieval(question:str, datasets:List[Dataset], document=List[Document]=None, offset:int=0, limit:int=6, similarity_threshold:float=0.1, vector_similarity_weight:float=0.3, top_k:int=1024) -> List[Chunk] | |||||
| ``` | ``` | ||||
| ### Parameters | ### Parameters | ||||
| The user query or query keywords. Defaults to `""`. | The user query or query keywords. Defaults to `""`. | ||||
| #### datasets: `list[Dataset]`, *Required* | |||||
| #### datasets: `List[Dataset]`, *Required* | |||||
| The scope of datasets. | The scope of datasets. | ||||
| #### document: `list[Document]` | |||||
| #### document: `List[Document]` | |||||
| The scope of document. `None` means no limitation. Defaults to `None`. | The scope of document. `None` means no limitation. Defaults to `None`. | ||||
| ### Returns | ### Returns | ||||
| list[Chunk] | |||||
| List[Chunk] | |||||
| ### Examples | ### Examples | ||||
| ```python | ```python | ||||
| from ragflow import RAGFlow | from ragflow import RAGFlow | ||||
| rag = RAGFlow(api_key="<YOUR_API_KEY>", base_url="http://<YOUR_BASE_URL>:9380") | |||||
| rag = RAGFlow(api_key="xxxxxx", base_url="http://xxx.xx.xx.xxx:9380") | |||||
| ds = rag.get_dataset(name="ragflow") | ds = rag.get_dataset(name="ragflow") | ||||
| name = 'ragflow_test.txt' | name = 'ragflow_test.txt' | ||||
| path = 'test_data/ragflow_test.txt' | path = 'test_data/ragflow_test.txt' | ||||
| RAGFlow.create_chat( | RAGFlow.create_chat( | ||||
| name: str = "assistant", | name: str = "assistant", | ||||
| avatar: str = "path", | avatar: str = "path", | ||||
| knowledgebases: list[DataSet] = ["kb1"], | |||||
| knowledgebases: List[DataSet] = ["kb1"], | |||||
| llm: Chat.LLM = None, | llm: Chat.LLM = None, | ||||
| prompt: Chat.Prompt = None | prompt: Chat.Prompt = None | ||||
| ) -> Chat | ) -> Chat | ||||
| The icon of the created chat. Defaults to `"path"`. | The icon of the created chat. Defaults to `"path"`. | ||||
| #### knowledgebases: `list[DataSet]` | |||||
| #### knowledgebases: `List[DataSet]` | |||||
| Select knowledgebases associated. Defaults to `["kb1"]`. | Select knowledgebases associated. Defaults to `["kb1"]`. | ||||
| ```python | ```python | ||||
| from ragflow import RAGFlow | from ragflow import RAGFlow | ||||
| rag = RAGFlow(api_key="<YOUR_API_KEY>", base_url="http://<YOUR_BASE_URL>:9380") | |||||
| rag = RAGFlow(api_key="xxxxxx", base_url="http://xxx.xx.xx.xxx:9380") | |||||
| kb = rag.get_dataset(name="kb_1") | kb = rag.get_dataset(name="kb_1") | ||||
| assi = rag.create_chat("Miss R", knowledgebases=[kb]) | assi = rag.create_chat("Miss R", knowledgebases=[kb]) | ||||
| ``` | ``` | ||||
| ```python | ```python | ||||
| from ragflow import RAGFlow | from ragflow import RAGFlow | ||||
| rag = RAGFlow(api_key="<YOUR_API_KEY>", base_url="http://<YOUR_BASE_URL>:9380") | |||||
| rag = RAGFlow(api_key="xxxxxx", base_url="http://xxx.xx.xx.xxx:9380") | |||||
| kb = rag.get_knowledgebase(name="kb_1") | kb = rag.get_knowledgebase(name="kb_1") | ||||
| assi = rag.create_chat("Miss R", knowledgebases=[kb]) | assi = rag.create_chat("Miss R", knowledgebases=[kb]) | ||||
| assi.update({"temperature":0.8}) | assi.update({"temperature":0.8}) | ||||
| ## Delete chats | ## Delete chats | ||||
| ```python | ```python | ||||
| RAGFlow.delete_chats(ids: list[str] = None) | |||||
| RAGFlow.delete_chats(ids: List[str] = None) | |||||
| ``` | ``` | ||||
| ### Parameters | ### Parameters | ||||
| ```python | ```python | ||||
| from ragflow import RAGFlow | from ragflow import RAGFlow | ||||
| rag = RAGFlow(api_key="<YOUR_API_KEY>", base_url="http://<YOUR_BASE_URL>:9380") | |||||
| rag = RAGFlow(api_key="xxxxxx", base_url="http://xxx.xx.xx.xxx:9380") | |||||
| rag.delete_chats(ids=["id_1","id_2"]) | rag.delete_chats(ids=["id_1","id_2"]) | ||||
| ``` | ``` | ||||
| desc: bool = True, | desc: bool = True, | ||||
| id: str = None, | id: str = None, | ||||
| name: str = None | name: str = None | ||||
| ) -> list[Chat] | |||||
| ) -> List[Chat] | |||||
| ``` | ``` | ||||
| ### Parameters | ### Parameters | ||||
| ```python | ```python | ||||
| from ragflow import RAGFlow | from ragflow import RAGFlow | ||||
| rag = RAGFlow(api_key="<YOUR_API_KEY>", base_url="http://<YOUR_BASE_URL>:9380") | |||||
| rag = RAGFlow(api_key="xxxxxx", base_url="http://xxx.xx.xx.xxx:9380") | |||||
| for assi in rag.list_chats(): | for assi in rag.list_chats(): | ||||
| print(assi) | print(assi) | ||||
| ``` | ``` | ||||
| The name of the created session. Defaults to `"New session"`. | The name of the created session. Defaults to `"New session"`. | ||||
| #### messages: `list[Message]` | |||||
| #### messages: `List[Message]` | |||||
| The messages of the created session. | The messages of the created session. | ||||
| - messages cannot be provided. | - messages cannot be provided. | ||||
| ```python | ```python | ||||
| from ragflow import RAGFlow | from ragflow import RAGFlow | ||||
| rag = RAGFlow(api_key="<YOUR_API_KEY>", base_url="http://<YOUR_BASE_URL>:9380") | |||||
| rag = RAGFlow(api_key="xxxxxx", base_url="http://xxx.xx.xx.xxx:9380") | |||||
| assi = rag.list_chats(name="Miss R") | assi = rag.list_chats(name="Miss R") | ||||
| assi = assi[0] | assi = assi[0] | ||||
| sess = assi.create_session() | sess = assi.create_session() | ||||
| ```python | ```python | ||||
| from ragflow import RAGFlow | from ragflow import RAGFlow | ||||
| rag = RAGFlow(api_key="<YOUR_API_KEY>", base_url="http://<YOUR_BASE_URL>:9380") | |||||
| rag = RAGFlow(api_key="xxxxxx", base_url="http://xxx.xx.xx.xxx:9380") | |||||
| assi = rag.list_chats(name="Miss R") | assi = rag.list_chats(name="Miss R") | ||||
| assi = assi[0] | assi = assi[0] | ||||
| sess = assi.create_session("new_session") | sess = assi.create_session("new_session") | ||||
| The content of the message. Defaults to `"Hi! I am your assistant, can I help you?"`. | The content of the message. Defaults to `"Hi! I am your assistant, can I help you?"`. | ||||
| #### reference: `list[Chunk]` | |||||
| #### reference: `List[Chunk]` | |||||
| The auto-generated reference of the message. Each `chunk` object includes the following attributes: | The auto-generated reference of the message. Each `chunk` object includes the following attributes: | ||||
| A similarity score based on vector representations. This score is obtained by converting texts, words, or objects into vectors and then calculating the cosine similarity or other distance measures between these vectors to determine the similarity in vector space. A higher value indicates greater similarity in the vector space. Defaults to `None`. ????????????????????????????????? | A similarity score based on vector representations. This score is obtained by converting texts, words, or objects into vectors and then calculating the cosine similarity or other distance measures between these vectors to determine the similarity in vector space. A higher value indicates greater similarity in the vector space. Defaults to `None`. ????????????????????????????????? | ||||
| - **term_similarity**: `float` | - **term_similarity**: `float` | ||||
| The similarity score based on terms or keywords. This score is calculated by comparing the similarity of key terms between texts or datasets, typically measuring how similar two words or phrases are in meaning or context. A higher value indicates a stronger similarity between terms. Defaults to `None`. ??????????????????? | The similarity score based on terms or keywords. This score is calculated by comparing the similarity of key terms between texts or datasets, typically measuring how similar two words or phrases are in meaning or context. A higher value indicates a stronger similarity between terms. Defaults to `None`. ??????????????????? | ||||
| - **position**: `list[string]` | |||||
| - **position**: `List[string]` | |||||
| Indicates the position or index of keywords or specific terms within the text. An array is typically used to mark the location of keywords or specific elements, facilitating precise operations or analysis of the text. Defaults to `None`. ?????????????? | Indicates the position or index of keywords or specific terms within the text. An array is typically used to mark the location of keywords or specific elements, facilitating precise operations or analysis of the text. Defaults to `None`. ?????????????? | ||||
| ### Examples | ### Examples | ||||
| ```python | ```python | ||||
| from ragflow import RAGFlow | from ragflow import RAGFlow | ||||
| rag = RAGFlow(api_key="<YOUR_API_KEY>", base_url="http://<YOUR_BASE_URL>:9380") | |||||
| rag = RAGFlow(api_key="xxxxxx", base_url="http://xxx.xx.xx.xxx:9380") | |||||
| assi = rag.list_chats(name="Miss R") | assi = rag.list_chats(name="Miss R") | ||||
| assi = assi[0] | assi = assi[0] | ||||
| sess = assi.create_session() | sess = assi.create_session() | ||||
| desc: bool = True, | desc: bool = True, | ||||
| id: str = None, | id: str = None, | ||||
| name: str = None | name: str = None | ||||
| ) -> list[Session] | |||||
| ) -> List[Session] | |||||
| ``` | ``` | ||||
| ### Returns | ### Returns | ||||
| list[Session] | |||||
| List[Session] | |||||
| description: the List contains information about multiple assistant object, with each dictionary containing information about one assistant. | description: the List contains information about multiple assistant object, with each dictionary containing information about one assistant. | ||||
| ### Examples | ### Examples | ||||
| ```python | ```python | ||||
| from ragflow import RAGFlow | from ragflow import RAGFlow | ||||
| rag = RAGFlow(api_key="<YOUR_API_KEY>", base_url="http://<YOUR_BASE_URL>:9380") | |||||
| rag = RAGFlow(api_key="xxxxxx", base_url="http://xxx.xx.xx.xxx:9380") | |||||
| assi = rag.list_chats(name="Miss R") | assi = rag.list_chats(name="Miss R") | ||||
| assi = assi[0] | assi = assi[0] | ||||
| for sess in assi.list_sessions(): | for sess in assi.list_sessions(): | ||||
| ## Delete session | ## Delete session | ||||
| ```python | ```python | ||||
| Chat.delete_sessions(ids:list[str] = None) | |||||
| Chat.delete_sessions(ids:List[str] = None) | |||||
| ``` | ``` | ||||
| ### Returns | ### Returns | ||||
| ```python | ```python | ||||
| from ragflow import RAGFlow | from ragflow import RAGFlow | ||||
| rag = RAGFlow(api_key="<YOUR_API_KEY>", base_url="http://<YOUR_BASE_URL>:9380") | |||||
| rag = RAGFlow(api_key="xxxxxx", base_url="http://xxx.xx.xx.xxx:9380") | |||||
| assi = rag.list_chats(name="Miss R") | assi = rag.list_chats(name="Miss R") | ||||
| assi = assi[0] | assi = assi[0] | ||||
| assi.delete_sessions(ids=["id_1","id_2"]) | assi.delete_sessions(ids=["id_1","id_2"]) | ||||
| ``` | ``` | ||||
| ### Parameters | ### Parameters | ||||
| #### ids: `list[string]` | |||||
| #### ids: `List[string]` | |||||
| IDs of the sessions to be deleted. | IDs of the sessions to be deleted. | ||||
| - `None` | - `None` | ||||
| pr[name] = value | pr[name] = value | ||||
| return pr | return pr | ||||
| def post(self, path, json, stream=False): | |||||
| res = self.rag.post(path, json, stream=stream) | |||||
| def post(self, path, json=None, stream=False, files=None): | |||||
| res = self.rag.post(path, json, stream=stream,files=files) | |||||
| return res | return res | ||||
| def get(self, path, params): | def get(self, path, params): |
| from typing import Optional, List | from typing import Optional, List | ||||
| from transformers.models.bloom.modeling_bloom import bloom_gelu_back | |||||
| from .document import Document | from .document import Document | ||||
| from .base import Base | from .base import Base | ||||
| if res.get("code") != 0: | if res.get("code") != 0: | ||||
| raise Exception(res["message"]) | raise Exception(res["message"]) | ||||
| def upload_documents(self,document_list: List[dict]): | |||||
| url = f"/dataset/{self.id}/document" | |||||
| files = [("file",(ele["name"],ele["blob"])) for ele in document_list] | |||||
| res = self.post(path=url,json=None,files=files) | |||||
| res = res.json() | |||||
| if res.get("code") != 0: | |||||
| raise Exception(res.get("message")) | |||||
| def list_docs(self, keywords: Optional[str] = None, offset: int = 0, limit: int = -1) -> List[Document]: | |||||
| """ | |||||
| List the documents in the dataset, optionally filtering by keywords, with pagination support. | |||||
| Args: | |||||
| keywords (Optional[str]): A string of keywords to filter the documents. Defaults to None. | |||||
| offset (int): The starting point for pagination. Defaults to 0. | |||||
| limit (int): The maximum number of documents to return. Defaults to -1 (no limit). | |||||
| Returns: | |||||
| List[Document]: A list of Document objects. | |||||
| """ | |||||
| # Construct the request payload for listing documents | |||||
| payload = { | |||||
| "knowledgebase_id": self.id, | |||||
| "keywords": keywords, | |||||
| "offset": offset, | |||||
| "limit": limit | |||||
| } | |||||
| # Send the request to the server to list documents | |||||
| res = self.get(f'/doc/dataset/{self.id}/documents', payload) | |||||
| res_json = res.json() | |||||
| # Handle response and error checking | |||||
| if res_json.get("retmsg") != "success": | |||||
| raise Exception(res_json.get("retmsg")) | |||||
| # Parse the document data from the response | |||||
| def list_documents(self, id: str = None, keywords: str = None, offset: int =1, limit: int = 1024, orderby: str = "create_time", desc: bool = True): | |||||
| res = self.get(f"/dataset/{self.id}/info",params={"id": id,"keywords": keywords,"offset": offset,"limit": limit,"orderby": orderby,"desc": desc}) | |||||
| res = res.json() | |||||
| documents = [] | documents = [] | ||||
| for doc_data in res_json["data"].get("docs", []): | |||||
| doc = Document(self.rag, doc_data) | |||||
| documents.append(doc) | |||||
| if res.get("code") == 0: | |||||
| for document in res["data"].get("docs"): | |||||
| documents.append(Document(self.rag,document)) | |||||
| return documents | |||||
| raise Exception(res["message"]) | |||||
| def delete_documents(self,ids: List[str] = None): | |||||
| res = self.rm(f"/dataset/{self.id}/document",{"ids":ids}) | |||||
| res = res.json() | |||||
| if res.get("code") != 0: | |||||
| raise Exception(res["message"]) | |||||
| return documents |
| res_dict.pop(k) | res_dict.pop(k) | ||||
| super().__init__(rag, res_dict) | super().__init__(rag, res_dict) | ||||
| def save(self) -> bool: | |||||
| def update(self,update_message:dict) -> bool: | |||||
| """ | """ | ||||
| Save the document details to the server. | Save the document details to the server. | ||||
| """ | """ | ||||
| res = self.post('/doc/save', | |||||
| {"id": self.id, "name": self.name, "thumbnail": self.thumbnail, "knowledgebase_id": self.knowledgebase_id, | |||||
| "parser_method": self.parser_method, "parser_config": self.parser_config.to_json(), | |||||
| }) | |||||
| res = self.post(f'/dataset/{self.knowledgebase_id}/info/{self.id}',update_message) | |||||
| res = res.json() | res = res.json() | ||||
| if res.get("retmsg") == "success": | |||||
| return True | |||||
| raise Exception(res["retmsg"]) | |||||
| if res.get("code") != 0: | |||||
| raise Exception(res["message"]) | |||||
| def delete(self) -> bool: | def delete(self) -> bool: | ||||
| """ | """ | ||||
| :return: The downloaded document content in bytes. | :return: The downloaded document content in bytes. | ||||
| """ | """ | ||||
| # Construct the URL for the API request using the document ID and knowledge base ID | # Construct the URL for the API request using the document ID and knowledge base ID | ||||
| res = self.get(f"/doc/{self.id}", | |||||
| {"headers": self.rag.authorization_header, "id": self.id, "name": self.name, "stream": True}) | |||||
| res = self.get(f"/dataset/{self.knowledgebase_id}/document/{self.id}") | |||||
| # Check the response status code to ensure the request was successful | # Check the response status code to ensure the request was successful | ||||
| if res.status_code == 200: | if res.status_code == 200: |
| self.api_url = f"{base_url}/api/{version}" | self.api_url = f"{base_url}/api/{version}" | ||||
| self.authorization_header = {"Authorization": "{} {}".format("Bearer", self.user_key)} | self.authorization_header = {"Authorization": "{} {}".format("Bearer", self.user_key)} | ||||
| def post(self, path, json, stream=False): | |||||
| res = requests.post(url=self.api_url + path, json=json, headers=self.authorization_header, stream=stream) | |||||
| def post(self, path, json=None, stream=False, files=None): | |||||
| res = requests.post(url=self.api_url + path, json=json, headers=self.authorization_header, stream=stream,files=files) | |||||
| return res | return res | ||||
| def get(self, path, params=None): | |||||
| res = requests.get(url=self.api_url + path, params=params, headers=self.authorization_header) | |||||
| def get(self, path, params=None, json=None): | |||||
| res = requests.get(url=self.api_url + path, params=params, headers=self.authorization_header,json=json) | |||||
| return res | return res | ||||
| def delete(self, path, json): | def delete(self, path, json): | ||||
| return result_list | return result_list | ||||
| raise Exception(res["message"]) | raise Exception(res["message"]) | ||||
| def create_document(self, ds: DataSet, name: str, blob: bytes) -> bool: | |||||
| url = f"/doc/dataset/{ds.id}/documents/upload" | |||||
| files = { | |||||
| 'file': (name, blob) | |||||
| } | |||||
| headers = { | |||||
| 'Authorization': f"Bearer {ds.rag.user_key}" | |||||
| } | |||||
| response = requests.post(self.api_url + url, files=files, | |||||
| headers=headers) | |||||
| if response.status_code == 200 and response.json().get('retmsg') == 'success': | |||||
| return True | |||||
| else: | |||||
| raise Exception(f"Upload failed: {response.json().get('retmsg')}") | |||||
| return False | |||||
| def get_document(self, id: str = None, name: str = None) -> Document: | |||||
| res = self.get("/doc/infos", {"id": id, "name": name}) | |||||
| res = res.json() | |||||
| if res.get("retmsg") == "success": | |||||
| return Document(self, res['data']) | |||||
| raise Exception(res["retmsg"]) | |||||
| def async_parse_documents(self, doc_ids): | def async_parse_documents(self, doc_ids): | ||||
| """ | """ |
| # Step 2: Create a new document | # Step 2: Create a new document | ||||
| # The blob is the actual file content or a placeholder in this case | # The blob is the actual file content or a placeholder in this case | ||||
| name = "TestDocument.txt" | |||||
| blob = b"Sample document content for ingestion test." | blob = b"Sample document content for ingestion test." | ||||
| res = rag.create_document(ds, name=name, blob=blob) | |||||
| blob_2 = b"test_2." | |||||
| list_1 = [] | |||||
| list_1.append({"name":"Test_1.txt", | |||||
| "blob":blob}) | |||||
| list_1.append({"name":"Test_2.txt", | |||||
| "blob":blob_2}) | |||||
| res = ds.upload_documents(list_1) | |||||
| # Ensure document ingestion was successful | # Ensure document ingestion was successful | ||||
| assert res is True, f"Failed to create document, error: {res}" | |||||
| def test_get_detail_document_with_success(self): | |||||
| """ | |||||
| Test getting a document's detail with success | |||||
| """ | |||||
| rag = RAGFlow(API_KEY, HOST_ADDRESS) | |||||
| doc = rag.get_document(name="TestDocument.txt") | |||||
| assert isinstance(doc, Document), f"Failed to get dataset, error: {doc}." | |||||
| assert doc.name == "TestDocument.txt", "Name does not match" | |||||
| assert res is None, f"Failed to create document, error: {res}" | |||||
| def test_update_document_with_success(self): | def test_update_document_with_success(self): | ||||
| """ | """ | ||||
| Update name or parser_method are supported | Update name or parser_method are supported | ||||
| """ | """ | ||||
| rag = RAGFlow(API_KEY, HOST_ADDRESS) | rag = RAGFlow(API_KEY, HOST_ADDRESS) | ||||
| doc = rag.get_document(name="TestDocument.txt") | |||||
| ds = rag.list_datasets(name="God") | |||||
| ds = ds[0] | |||||
| doc = ds.list_documents() | |||||
| doc = doc[0] | |||||
| if isinstance(doc, Document): | if isinstance(doc, Document): | ||||
| doc.parser_method = "manual" | |||||
| doc.name = "manual.txt" | |||||
| res = doc.save() | |||||
| assert res is True, f"Failed to update document, error: {res}" | |||||
| res = doc.update({"parser_method":"manual","name":"manual.txt"}) | |||||
| assert res is None, f"Failed to update document, error: {res}" | |||||
| else: | else: | ||||
| assert False, f"Failed to get document, error: {doc}" | assert False, f"Failed to get document, error: {doc}" | ||||
| rag = RAGFlow(API_KEY, HOST_ADDRESS) | rag = RAGFlow(API_KEY, HOST_ADDRESS) | ||||
| # Retrieve a document | # Retrieve a document | ||||
| doc = rag.get_document(name="manual.txt") | |||||
| ds = rag.list_datasets(name="God") | |||||
| ds = ds[0] | |||||
| doc = ds.list_documents(name="manual.txt") | |||||
| doc = doc[0] | |||||
| # Check if the retrieved document is of type Document | # Check if the retrieved document is of type Document | ||||
| if isinstance(doc, Document): | if isinstance(doc, Document): | ||||
| # Download the document content and save it to a file | # Download the document content and save it to a file | ||||
| # If the document retrieval fails, assert failure | # If the document retrieval fails, assert failure | ||||
| assert False, f"Failed to get document, error: {doc}" | assert False, f"Failed to get document, error: {doc}" | ||||
| def test_list_all_documents_in_dataset_with_success(self): | |||||
| def test_list_documents_in_dataset_with_success(self): | |||||
| """ | """ | ||||
| Test list all documents into a dataset with success. | Test list all documents into a dataset with success. | ||||
| """ | """ | ||||
| blob1 = b"Sample document content for ingestion test111." | blob1 = b"Sample document content for ingestion test111." | ||||
| name2 = "Test Document222.txt" | name2 = "Test Document222.txt" | ||||
| blob2 = b"Sample document content for ingestion test222." | blob2 = b"Sample document content for ingestion test222." | ||||
| rag.create_document(ds, name=name1, blob=blob1) | |||||
| rag.create_document(ds, name=name2, blob=blob2) | |||||
| list_1 = [{"name":name1,"blob":blob1},{"name":name2,"blob":blob2}] | |||||
| ds.upload_documents(list_1) | |||||
| for d in ds.list_docs(keywords="test", offset=0, limit=12): | for d in ds.list_docs(keywords="test", offset=0, limit=12): | ||||
| assert isinstance(d, Document) | |||||
| print(d) | |||||
| assert isinstance(d, Document), "Failed to upload documents" | |||||
| def test_delete_documents_in_dataset_with_success(self): | def test_delete_documents_in_dataset_with_success(self): | ||||
| """ | """ |