Nevar pievienot vairāk kā 25 tēmas Tēmai ir jāsākas ar burtu vai ciparu, tā var saturēt domu zīmes ('-') un var būt līdz 35 simboliem gara.

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171
  1. #
  2. # Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. #
  16. import os
  17. from pathlib import Path
  18. import requests
  19. from libs.utils.file_utils import create_txt_file
  20. from requests_toolbelt import MultipartEncoder
  21. HEADERS = {"Content-Type": "application/json"}
  22. HOST_ADDRESS = os.getenv("HOST_ADDRESS", "http://127.0.0.1:9380")
  23. DATASETS_API_URL = "/api/v1/datasets"
  24. FILE_API_URL = "/api/v1/datasets/{dataset_id}/documents"
  25. FILE_CHUNK_API_URL = "/api/v1/datasets/{dataset_id}/chunks"
  26. INVALID_API_TOKEN = "invalid_key_123"
  27. DATASET_NAME_LIMIT = 128
  28. DOCUMENT_NAME_LIMIT = 128
  29. # DATASET MANAGEMENT
  30. def create_dataset(auth, payload=None):
  31. res = requests.post(
  32. url=f"{HOST_ADDRESS}{DATASETS_API_URL}",
  33. headers=HEADERS,
  34. auth=auth,
  35. json=payload,
  36. )
  37. return res.json()
  38. def list_dataset(auth, params=None):
  39. res = requests.get(
  40. url=f"{HOST_ADDRESS}{DATASETS_API_URL}",
  41. headers=HEADERS,
  42. auth=auth,
  43. params=params,
  44. )
  45. return res.json()
  46. def update_dataset(auth, dataset_id, payload=None):
  47. res = requests.put(
  48. url=f"{HOST_ADDRESS}{DATASETS_API_URL}/{dataset_id}",
  49. headers=HEADERS,
  50. auth=auth,
  51. json=payload,
  52. )
  53. return res.json()
  54. def delete_dataset(auth, payload=None):
  55. res = requests.delete(
  56. url=f"{HOST_ADDRESS}{DATASETS_API_URL}",
  57. headers=HEADERS,
  58. auth=auth,
  59. json=payload,
  60. )
  61. return res.json()
  62. def create_datasets(auth, num):
  63. ids = []
  64. for i in range(num):
  65. res = create_dataset(auth, {"name": f"dataset_{i}"})
  66. ids.append(res["data"]["id"])
  67. return ids
  68. # FILE MANAGEMENT WITHIN DATASET
  69. def upload_documnets(auth, dataset_id, files_path=None):
  70. url = f"{HOST_ADDRESS}{FILE_API_URL}".format(dataset_id=dataset_id)
  71. if files_path is None:
  72. files_path = []
  73. fields = []
  74. file_objects = []
  75. try:
  76. for fp in files_path:
  77. p = Path(fp)
  78. f = p.open("rb")
  79. fields.append(("file", (p.name, f)))
  80. file_objects.append(f)
  81. m = MultipartEncoder(fields=fields)
  82. res = requests.post(
  83. url=url,
  84. headers={"Content-Type": m.content_type},
  85. auth=auth,
  86. data=m,
  87. )
  88. return res.json()
  89. finally:
  90. for f in file_objects:
  91. f.close()
  92. def batch_upload_documents(auth, dataset_id, num, tmp_path):
  93. fps = []
  94. for i in range(num):
  95. fp = create_txt_file(tmp_path / f"ragflow_test_upload_{i}.txt")
  96. fps.append(fp)
  97. res = upload_documnets(auth, dataset_id, fps)
  98. document_ids = []
  99. for document in res["data"]:
  100. document_ids.append(document["id"])
  101. return document_ids
  102. def download_document(auth, dataset_id, document_id, save_path):
  103. url = f"{HOST_ADDRESS}{FILE_API_URL}/{document_id}".format(dataset_id=dataset_id)
  104. res = requests.get(url=url, auth=auth, stream=True)
  105. try:
  106. if res.status_code == 200:
  107. with open(save_path, "wb") as f:
  108. for chunk in res.iter_content(chunk_size=8192):
  109. f.write(chunk)
  110. finally:
  111. res.close()
  112. return res
  113. def list_documnet(auth, dataset_id, params=None):
  114. url = f"{HOST_ADDRESS}{FILE_API_URL}".format(dataset_id=dataset_id)
  115. res = requests.get(
  116. url=url,
  117. headers=HEADERS,
  118. auth=auth,
  119. params=params,
  120. )
  121. return res.json()
  122. def update_documnet(auth, dataset_id, document_id, payload=None):
  123. url = f"{HOST_ADDRESS}{FILE_API_URL}/{document_id}".format(dataset_id=dataset_id)
  124. res = requests.put(url=url, headers=HEADERS, auth=auth, json=payload)
  125. return res.json()
  126. def delete_documnet(auth, dataset_id, payload=None):
  127. url = f"{HOST_ADDRESS}{FILE_API_URL}".format(dataset_id=dataset_id)
  128. res = requests.delete(url=url, headers=HEADERS, auth=auth, json=payload)
  129. return res.json()
  130. def parse_documnet(auth, dataset_id, payload=None):
  131. url = f"{HOST_ADDRESS}{FILE_CHUNK_API_URL}".format(dataset_id=dataset_id)
  132. res = requests.post(url=url, headers=HEADERS, auth=auth, json=payload)
  133. return res.json()
  134. def stop_parse_documnet(auth, dataset_id, payload=None):
  135. url = f"{HOST_ADDRESS}{FILE_CHUNK_API_URL}".format(dataset_id=dataset_id)
  136. res = requests.delete(url=url, headers=HEADERS, auth=auth, json=payload)
  137. return res.json()