You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

common.py 4.3KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158
  1. #
  2. # Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. #
  16. import os
  17. from pathlib import Path
  18. import requests
  19. from libs.utils.file_utils import create_txt_file
  20. from requests_toolbelt import MultipartEncoder
  21. HEADERS = {"Content-Type": "application/json"}
  22. HOST_ADDRESS = os.getenv("HOST_ADDRESS", "http://127.0.0.1:9380")
  23. DATASETS_API_URL = "/api/v1/datasets"
  24. FILE_API_URL = "/api/v1/datasets/{dataset_id}/documents"
  25. INVALID_API_TOKEN = "invalid_key_123"
  26. DATASET_NAME_LIMIT = 128
  27. DOCUMENT_NAME_LIMIT = 128
  28. # DATASET MANAGEMENT
  29. def create_dataset(auth, payload):
  30. res = requests.post(
  31. url=f"{HOST_ADDRESS}{DATASETS_API_URL}",
  32. headers=HEADERS,
  33. auth=auth,
  34. json=payload,
  35. )
  36. return res.json()
  37. def list_dataset(auth, params=None):
  38. res = requests.get(
  39. url=f"{HOST_ADDRESS}{DATASETS_API_URL}",
  40. headers=HEADERS,
  41. auth=auth,
  42. params=params,
  43. )
  44. return res.json()
  45. def update_dataset(auth, dataset_id, payload):
  46. res = requests.put(
  47. url=f"{HOST_ADDRESS}{DATASETS_API_URL}/{dataset_id}",
  48. headers=HEADERS,
  49. auth=auth,
  50. json=payload,
  51. )
  52. return res.json()
  53. def delete_dataset(auth, payload=None):
  54. res = requests.delete(
  55. url=f"{HOST_ADDRESS}{DATASETS_API_URL}",
  56. headers=HEADERS,
  57. auth=auth,
  58. json=payload,
  59. )
  60. return res.json()
  61. def create_datasets(auth, num):
  62. ids = []
  63. for i in range(num):
  64. res = create_dataset(auth, {"name": f"dataset_{i}"})
  65. ids.append(res["data"]["id"])
  66. return ids
  67. # FILE MANAGEMENT WITHIN DATASET
  68. def upload_documnets(auth, dataset_id, files_path=None):
  69. url = f"{HOST_ADDRESS}{FILE_API_URL}".format(dataset_id=dataset_id)
  70. if files_path is None:
  71. files_path = []
  72. fields = []
  73. file_objects = []
  74. try:
  75. for fp in files_path:
  76. p = Path(fp)
  77. f = p.open("rb")
  78. fields.append(("file", (p.name, f)))
  79. file_objects.append(f)
  80. m = MultipartEncoder(fields=fields)
  81. res = requests.post(
  82. url=url,
  83. headers={"Content-Type": m.content_type},
  84. auth=auth,
  85. data=m,
  86. )
  87. return res.json()
  88. finally:
  89. for f in file_objects:
  90. f.close()
  91. def batch_upload_documents(auth, dataset_id, num, tmp_path):
  92. fps = []
  93. for i in range(num):
  94. fp = create_txt_file(tmp_path / f"ragflow_test_upload_{i}.txt")
  95. fps.append(fp)
  96. res = upload_documnets(auth, dataset_id, fps)
  97. document_ids = []
  98. for document in res["data"]:
  99. document_ids.append(document["id"])
  100. return document_ids
  101. def download_document(auth, dataset_id, document_id, save_path):
  102. url = f"{HOST_ADDRESS}{FILE_API_URL}/{document_id}".format(dataset_id=dataset_id)
  103. res = requests.get(url=url, auth=auth, stream=True)
  104. try:
  105. if res.status_code == 200:
  106. with open(save_path, "wb") as f:
  107. for chunk in res.iter_content(chunk_size=8192):
  108. f.write(chunk)
  109. finally:
  110. res.close()
  111. return res
  112. def list_documnet(auth, dataset_id, params=None):
  113. url = f"{HOST_ADDRESS}{FILE_API_URL}".format(dataset_id=dataset_id)
  114. res = requests.get(
  115. url=url,
  116. headers=HEADERS,
  117. auth=auth,
  118. params=params,
  119. )
  120. return res.json()
  121. def update_documnet(auth, dataset_id, document_id, payload):
  122. url = f"{HOST_ADDRESS}{FILE_API_URL}/{document_id}".format(dataset_id=dataset_id)
  123. res = requests.put(url=url, headers=HEADERS, auth=auth, json=payload)
  124. return res.json()
  125. def delete_documnet(auth, dataset_id, payload=None):
  126. url = f"{HOST_ADDRESS}{FILE_API_URL}".format(dataset_id=dataset_id)
  127. res = requests.delete(url=url, headers=HEADERS, auth=auth, json=payload)
  128. return res.json()