### What problem does this PR solve? Configure test case priorities to reduce CI execution time ### Type of change - [x] Test cases updatetags/v0.19.0
| @@ -106,7 +106,7 @@ jobs: | |||
| echo "Waiting for service to be available..." | |||
| sleep 5 | |||
| done | |||
| cd sdk/python && uv sync --python 3.10 --group test --frozen && source .venv/bin/activate && cd test/test_http_api && pytest -s --tb=short -m "not slow" | |||
| cd sdk/python && uv sync --python 3.10 --group test --frozen && source .venv/bin/activate && cd test/test_http_api && pytest -s --tb=short --level=p2 | |||
| - name: Stop ragflow:nightly | |||
| if: always() # always run this step even if previous steps failed | |||
| @@ -145,7 +145,7 @@ jobs: | |||
| echo "Waiting for service to be available..." | |||
| sleep 5 | |||
| done | |||
| cd sdk/python && uv sync --python 3.10 --group test --frozen && source .venv/bin/activate && cd test/test_http_api && DOC_ENGINE=infinity pytest -s --tb=short -m "not slow" | |||
| cd sdk/python && uv sync --python 3.10 --group test --frozen && source .venv/bin/activate && cd test/test_http_api && DOC_ENGINE=infinity pytest -s --tb=short --level=p2 | |||
| - name: Stop ragflow:nightly | |||
| if: always() # always run this step even if previous steps failed | |||
| @@ -24,4 +24,8 @@ test = [ | |||
| [tool.pytest.ini_options] | |||
| markers = ["slow: marks tests as slow (deselect with '-m \"not slow\"')"] | |||
| markers = [ | |||
| "p1: high priority test cases", | |||
| "p2: medium priority test cases", | |||
| "p3: low priority test cases", | |||
| ] | |||
| @@ -40,6 +40,29 @@ from libs.utils.file_utils import ( | |||
| create_txt_file, | |||
| ) | |||
| MARKER_EXPRESSIONS = { | |||
| "p1": "p1", | |||
| "p2": "p1 or p2", | |||
| "p3": "p1 or p2 or p3", | |||
| } | |||
| def pytest_addoption(parser: pytest.Parser) -> None: | |||
| parser.addoption( | |||
| "--level", | |||
| action="store", | |||
| default="p2", | |||
| choices=list(MARKER_EXPRESSIONS.keys()), | |||
| help=f"Test level ({'/'.join(MARKER_EXPRESSIONS)}): p1=smoke, p2=core, p3=full", | |||
| ) | |||
| def pytest_configure(config: pytest.Config) -> None: | |||
| level = config.getoption("--level") | |||
| config.option.markexpr = MARKER_EXPRESSIONS[level] | |||
| if config.option.verbose > 0: | |||
| print(f"\n[CONFIG] Active test level: {level}") | |||
| @wait_for(30, 1, "Document parsing timeout") | |||
| def condition(_auth, _dataset_id): | |||
| @@ -21,7 +21,7 @@ from libs.utils import encode_avatar | |||
| from libs.utils.file_utils import create_image_file | |||
| @pytest.mark.usefixtures("clear_chat_assistants") | |||
| @pytest.mark.p1 | |||
| class TestAuthorization: | |||
| @pytest.mark.parametrize( | |||
| "auth, expected_code, expected_message", | |||
| @@ -42,6 +42,7 @@ class TestAuthorization: | |||
| @pytest.mark.usefixtures("clear_chat_assistants") | |||
| class TestChatAssistantCreate: | |||
| @pytest.mark.p1 | |||
| @pytest.mark.parametrize( | |||
| "payload, expected_code, expected_message", | |||
| [ | |||
| @@ -67,6 +68,7 @@ class TestChatAssistantCreate: | |||
| else: | |||
| assert res["message"] == expected_message | |||
| @pytest.mark.p1 | |||
| @pytest.mark.parametrize( | |||
| "dataset_ids, expected_code, expected_message", | |||
| [ | |||
| @@ -91,12 +93,14 @@ class TestChatAssistantCreate: | |||
| else: | |||
| assert res["message"] == expected_message | |||
| @pytest.mark.p3 | |||
| def test_avatar(self, get_http_api_auth, tmp_path): | |||
| fn = create_image_file(tmp_path / "ragflow_test.png") | |||
| payload = {"name": "avatar_test", "avatar": encode_avatar(fn), "dataset_ids": []} | |||
| res = create_chat_assistant(get_http_api_auth, payload) | |||
| assert res["code"] == 0 | |||
| @pytest.mark.p2 | |||
| @pytest.mark.parametrize( | |||
| "llm, expected_code, expected_message", | |||
| [ | |||
| @@ -150,6 +154,7 @@ class TestChatAssistantCreate: | |||
| else: | |||
| assert res["message"] == expected_message | |||
| @pytest.mark.p2 | |||
| @pytest.mark.parametrize( | |||
| "prompt, expected_code, expected_message", | |||
| [ | |||
| @@ -226,8 +231,8 @@ class TestChatAssistantCreate: | |||
| assert res["message"] == expected_message | |||
| @pytest.mark.usefixtures("clear_chat_assistants") | |||
| class TestChatAssistantCreate2: | |||
| @pytest.mark.p2 | |||
| def test_unparsed_document(self, get_http_api_auth, add_document): | |||
| dataset_id, _ = add_document | |||
| payload = {"name": "prompt_test", "dataset_ids": [dataset_id]} | |||
| @@ -20,6 +20,7 @@ from common import INVALID_API_TOKEN, batch_create_chat_assistants, delete_chat_ | |||
| from libs.auth import RAGFlowHttpApiAuth | |||
| @pytest.mark.p1 | |||
| class TestAuthorization: | |||
| @pytest.mark.parametrize( | |||
| "auth, expected_code, expected_message", | |||
| @@ -42,13 +43,13 @@ class TestChatAssistantsDelete: | |||
| @pytest.mark.parametrize( | |||
| "payload, expected_code, expected_message, remaining", | |||
| [ | |||
| (None, 0, "", 0), | |||
| ({"ids": []}, 0, "", 0), | |||
| ({"ids": ["invalid_id"]}, 102, "Assistant(invalid_id) not found.", 5), | |||
| ({"ids": ["\n!?。;!?\"'"]}, 102, """Assistant(\n!?。;!?"\') not found.""", 5), | |||
| ("not json", 100, "AttributeError(\"'str' object has no attribute 'get'\")", 5), | |||
| (lambda r: {"ids": r[:1]}, 0, "", 4), | |||
| (lambda r: {"ids": r}, 0, "", 0), | |||
| pytest.param(None, 0, "", 0, marks=pytest.mark.p3), | |||
| pytest.param({"ids": []}, 0, "", 0, marks=pytest.mark.p3), | |||
| pytest.param({"ids": ["invalid_id"]}, 102, "Assistant(invalid_id) not found.", 5, marks=pytest.mark.p3), | |||
| pytest.param({"ids": ["\n!?。;!?\"'"]}, 102, """Assistant(\n!?。;!?"\') not found.""", 5, marks=pytest.mark.p3), | |||
| pytest.param("not json", 100, "AttributeError(\"'str' object has no attribute 'get'\")", 5, marks=pytest.mark.p3), | |||
| pytest.param(lambda r: {"ids": r[:1]}, 0, "", 4, marks=pytest.mark.p3), | |||
| pytest.param(lambda r: {"ids": r}, 0, "", 0, marks=pytest.mark.p1), | |||
| ], | |||
| ) | |||
| def test_basic_scenarios(self, get_http_api_auth, add_chat_assistants_func, payload, expected_code, expected_message, remaining): | |||
| @@ -66,9 +67,9 @@ class TestChatAssistantsDelete: | |||
| @pytest.mark.parametrize( | |||
| "payload", | |||
| [ | |||
| lambda r: {"ids": ["invalid_id"] + r}, | |||
| lambda r: {"ids": r[:1] + ["invalid_id"] + r[1:5]}, | |||
| lambda r: {"ids": r + ["invalid_id"]}, | |||
| pytest.param(lambda r: {"ids": ["invalid_id"] + r}, marks=pytest.mark.p3), | |||
| pytest.param(lambda r: {"ids": r[:1] + ["invalid_id"] + r[1:5]}, marks=pytest.mark.p1), | |||
| pytest.param(lambda r: {"ids": r + ["invalid_id"]}, marks=pytest.mark.p3), | |||
| ], | |||
| ) | |||
| def test_delete_partial_invalid_id(self, get_http_api_auth, add_chat_assistants_func, payload): | |||
| @@ -83,6 +84,7 @@ class TestChatAssistantsDelete: | |||
| res = list_chat_assistants(get_http_api_auth) | |||
| assert len(res["data"]) == 0 | |||
| @pytest.mark.p3 | |||
| def test_repeated_deletion(self, get_http_api_auth, add_chat_assistants_func): | |||
| _, _, chat_assistant_ids = add_chat_assistants_func | |||
| res = delete_chat_assistants(get_http_api_auth, {"ids": chat_assistant_ids}) | |||
| @@ -92,6 +94,7 @@ class TestChatAssistantsDelete: | |||
| assert res["code"] == 102 | |||
| assert "not found" in res["message"] | |||
| @pytest.mark.p3 | |||
| def test_duplicate_deletion(self, get_http_api_auth, add_chat_assistants_func): | |||
| _, _, chat_assistant_ids = add_chat_assistants_func | |||
| res = delete_chat_assistants(get_http_api_auth, {"ids": chat_assistant_ids + chat_assistant_ids}) | |||
| @@ -102,7 +105,7 @@ class TestChatAssistantsDelete: | |||
| res = list_chat_assistants(get_http_api_auth) | |||
| assert res["code"] == 0 | |||
| @pytest.mark.slow | |||
| @pytest.mark.p3 | |||
| def test_concurrent_deletion(self, get_http_api_auth): | |||
| ids = batch_create_chat_assistants(get_http_api_auth, 100) | |||
| @@ -111,7 +114,7 @@ class TestChatAssistantsDelete: | |||
| responses = [f.result() for f in futures] | |||
| assert all(r["code"] == 0 for r in responses) | |||
| @pytest.mark.slow | |||
| @pytest.mark.p3 | |||
| def test_delete_10k(self, get_http_api_auth): | |||
| ids = batch_create_chat_assistants(get_http_api_auth, 10_000) | |||
| res = delete_chat_assistants(get_http_api_auth, {"ids": ids}) | |||
| @@ -21,6 +21,7 @@ from libs.auth import RAGFlowHttpApiAuth | |||
| from libs.utils import is_sorted | |||
| @pytest.mark.p1 | |||
| class TestAuthorization: | |||
| @pytest.mark.parametrize( | |||
| "auth, expected_code, expected_message", | |||
| @@ -41,11 +42,13 @@ class TestAuthorization: | |||
| @pytest.mark.usefixtures("add_chat_assistants") | |||
| class TestChatAssistantsList: | |||
| @pytest.mark.p1 | |||
| def test_default(self, get_http_api_auth): | |||
| res = list_chat_assistants(get_http_api_auth) | |||
| assert res["code"] == 0 | |||
| assert len(res["data"]) == 5 | |||
| @pytest.mark.p1 | |||
| @pytest.mark.parametrize( | |||
| "params, expected_code, expected_page_size, expected_message", | |||
| [ | |||
| @@ -78,6 +81,7 @@ class TestChatAssistantsList: | |||
| else: | |||
| assert res["message"] == expected_message | |||
| @pytest.mark.p1 | |||
| @pytest.mark.parametrize( | |||
| "params, expected_code, expected_page_size, expected_message", | |||
| [ | |||
| @@ -117,6 +121,7 @@ class TestChatAssistantsList: | |||
| else: | |||
| assert res["message"] == expected_message | |||
| @pytest.mark.p3 | |||
| @pytest.mark.parametrize( | |||
| "params, expected_code, assertions, expected_message", | |||
| [ | |||
| @@ -155,6 +160,7 @@ class TestChatAssistantsList: | |||
| else: | |||
| assert res["message"] == expected_message | |||
| @pytest.mark.p3 | |||
| @pytest.mark.parametrize( | |||
| "params, expected_code, assertions, expected_message", | |||
| [ | |||
| @@ -191,6 +197,7 @@ class TestChatAssistantsList: | |||
| else: | |||
| assert res["message"] == expected_message | |||
| @pytest.mark.p1 | |||
| @pytest.mark.parametrize( | |||
| "params, expected_code, expected_num, expected_message", | |||
| [ | |||
| @@ -211,6 +218,7 @@ class TestChatAssistantsList: | |||
| else: | |||
| assert res["message"] == expected_message | |||
| @pytest.mark.p1 | |||
| @pytest.mark.parametrize( | |||
| "chat_assistant_id, expected_code, expected_num, expected_message", | |||
| [ | |||
| @@ -245,6 +253,7 @@ class TestChatAssistantsList: | |||
| else: | |||
| assert res["message"] == expected_message | |||
| @pytest.mark.p3 | |||
| @pytest.mark.parametrize( | |||
| "chat_assistant_id, name, expected_code, expected_num, expected_message", | |||
| [ | |||
| @@ -277,19 +286,21 @@ class TestChatAssistantsList: | |||
| else: | |||
| assert res["message"] == expected_message | |||
| @pytest.mark.slow | |||
| @pytest.mark.p3 | |||
| def test_concurrent_list(self, get_http_api_auth): | |||
| with ThreadPoolExecutor(max_workers=5) as executor: | |||
| futures = [executor.submit(list_chat_assistants, get_http_api_auth) for i in range(100)] | |||
| responses = [f.result() for f in futures] | |||
| assert all(r["code"] == 0 for r in responses) | |||
| @pytest.mark.p3 | |||
| def test_invalid_params(self, get_http_api_auth): | |||
| params = {"a": "b"} | |||
| res = list_chat_assistants(get_http_api_auth, params=params) | |||
| assert res["code"] == 0 | |||
| assert len(res["data"]) == 5 | |||
| @pytest.mark.p2 | |||
| def test_list_chats_after_deleting_associated_dataset(self, get_http_api_auth, add_chat_assistants): | |||
| dataset_id, _, _ = add_chat_assistants | |||
| res = delete_datasets(get_http_api_auth, {"ids": [dataset_id]}) | |||
| @@ -20,7 +20,7 @@ from libs.utils import encode_avatar | |||
| from libs.utils.file_utils import create_image_file | |||
| @pytest.mark.usefixtures("clear_chat_assistants") | |||
| @pytest.mark.p1 | |||
| class TestAuthorization: | |||
| @pytest.mark.parametrize( | |||
| "auth, expected_code, expected_message", | |||
| @@ -43,12 +43,12 @@ class TestChatAssistantUpdate: | |||
| @pytest.mark.parametrize( | |||
| "payload, expected_code, expected_message", | |||
| [ | |||
| ({"name": "valid_name"}, 0, ""), | |||
| pytest.param({"name": "valid_name"}, 0, "", marks=pytest.mark.p1), | |||
| pytest.param({"name": "a" * (CHAT_ASSISTANT_NAME_LIMIT + 1)}, 102, "", marks=pytest.mark.skip(reason="issues/")), | |||
| pytest.param({"name": 1}, 100, "", marks=pytest.mark.skip(reason="issues/")), | |||
| ({"name": ""}, 102, "`name` cannot be empty."), | |||
| ({"name": "test_chat_assistant_1"}, 102, "Duplicated chat name in updating chat."), | |||
| ({"name": "TEST_CHAT_ASSISTANT_1"}, 102, "Duplicated chat name in updating chat."), | |||
| pytest.param({"name": ""}, 102, "`name` cannot be empty.", marks=pytest.mark.p3), | |||
| pytest.param({"name": "test_chat_assistant_1"}, 102, "Duplicated chat name in updating chat.", marks=pytest.mark.p3), | |||
| pytest.param({"name": "TEST_CHAT_ASSISTANT_1"}, 102, "Duplicated chat name in updating chat.", marks=pytest.mark.p3), | |||
| ], | |||
| ) | |||
| def test_name(self, get_http_api_auth, add_chat_assistants_func, payload, expected_code, expected_message): | |||
| @@ -66,9 +66,9 @@ class TestChatAssistantUpdate: | |||
| "dataset_ids, expected_code, expected_message", | |||
| [ | |||
| pytest.param([], 0, "", marks=pytest.mark.skip(reason="issues/")), | |||
| (lambda r: [r], 0, ""), | |||
| (["invalid_dataset_id"], 102, "You don't own the dataset invalid_dataset_id"), | |||
| ("invalid_dataset_id", 102, "You don't own the dataset i"), | |||
| pytest.param(lambda r: [r], 0, "", marks=pytest.mark.p1), | |||
| pytest.param(["invalid_dataset_id"], 102, "You don't own the dataset invalid_dataset_id", marks=pytest.mark.p3), | |||
| pytest.param("invalid_dataset_id", 102, "You don't own the dataset i", marks=pytest.mark.p3), | |||
| ], | |||
| ) | |||
| def test_dataset_ids(self, get_http_api_auth, add_chat_assistants_func, dataset_ids, expected_code, expected_message): | |||
| @@ -87,6 +87,7 @@ class TestChatAssistantUpdate: | |||
| else: | |||
| assert res["message"] == expected_message | |||
| @pytest.mark.p3 | |||
| def test_avatar(self, get_http_api_auth, add_chat_assistants_func, tmp_path): | |||
| dataset_id, _, chat_assistant_ids = add_chat_assistants_func | |||
| fn = create_image_file(tmp_path / "ragflow_test.png") | |||
| @@ -94,6 +95,7 @@ class TestChatAssistantUpdate: | |||
| res = update_chat_assistant(get_http_api_auth, chat_assistant_ids[0], payload) | |||
| assert res["code"] == 0 | |||
| @pytest.mark.p3 | |||
| @pytest.mark.parametrize( | |||
| "llm, expected_code, expected_message", | |||
| [ | |||
| @@ -148,6 +150,7 @@ class TestChatAssistantUpdate: | |||
| else: | |||
| assert expected_message in res["message"] | |||
| @pytest.mark.p3 | |||
| @pytest.mark.parametrize( | |||
| "prompt, expected_code, expected_message", | |||
| [ | |||
| @@ -31,6 +31,7 @@ def validate_chunk_details(dataset_id, document_id, payload, res): | |||
| assert chunk["questions"] == [str(q).strip() for q in payload.get("questions", []) if str(q).strip()] | |||
| @pytest.mark.p1 | |||
| class TestAuthorization: | |||
| @pytest.mark.parametrize( | |||
| "auth, expected_code, expected_message", | |||
| @@ -50,6 +51,7 @@ class TestAuthorization: | |||
| class TestAddChunk: | |||
| @pytest.mark.p1 | |||
| @pytest.mark.parametrize( | |||
| "payload, expected_code, expected_message", | |||
| [ | |||
| @@ -83,6 +85,7 @@ class TestAddChunk: | |||
| else: | |||
| assert res["message"] == expected_message | |||
| @pytest.mark.p2 | |||
| @pytest.mark.parametrize( | |||
| "payload, expected_code, expected_message", | |||
| [ | |||
| @@ -115,6 +118,7 @@ class TestAddChunk: | |||
| else: | |||
| assert res["message"] == expected_message | |||
| @pytest.mark.p2 | |||
| @pytest.mark.parametrize( | |||
| "payload, expected_code, expected_message", | |||
| [ | |||
| @@ -143,6 +147,7 @@ class TestAddChunk: | |||
| else: | |||
| assert res["message"] == expected_message | |||
| @pytest.mark.p3 | |||
| @pytest.mark.parametrize( | |||
| "dataset_id, expected_code, expected_message", | |||
| [ | |||
| @@ -167,6 +172,7 @@ class TestAddChunk: | |||
| assert res["code"] == expected_code | |||
| assert res["message"] == expected_message | |||
| @pytest.mark.p3 | |||
| @pytest.mark.parametrize( | |||
| "document_id, expected_code, expected_message", | |||
| [ | |||
| @@ -184,6 +190,7 @@ class TestAddChunk: | |||
| assert res["code"] == expected_code | |||
| assert res["message"] == expected_message | |||
| @pytest.mark.p3 | |||
| def test_repeated_add_chunk(self, get_http_api_auth, add_document): | |||
| payload = {"content": "chunk test"} | |||
| dataset_id, document_id = add_document | |||
| @@ -207,6 +214,7 @@ class TestAddChunk: | |||
| assert False, res | |||
| assert res["data"]["doc"]["chunk_count"] == chunks_count + 2 | |||
| @pytest.mark.p2 | |||
| def test_add_chunk_to_deleted_document(self, get_http_api_auth, add_document): | |||
| dataset_id, document_id = add_document | |||
| delete_documnets(get_http_api_auth, dataset_id, {"ids": [document_id]}) | |||
| @@ -214,7 +222,6 @@ class TestAddChunk: | |||
| assert res["code"] == 102 | |||
| assert res["message"] == f"You don't own the document {document_id}." | |||
| @pytest.mark.slow | |||
| @pytest.mark.skip(reason="issues/6411") | |||
| def test_concurrent_add_chunk(self, get_http_api_auth, add_document): | |||
| chunk_num = 50 | |||
| @@ -20,6 +20,7 @@ from common import INVALID_API_TOKEN, batch_add_chunks, delete_chunks, list_chun | |||
| from libs.auth import RAGFlowHttpApiAuth | |||
| @pytest.mark.p1 | |||
| class TestAuthorization: | |||
| @pytest.mark.parametrize( | |||
| "auth, expected_code, expected_message", | |||
| @@ -39,6 +40,7 @@ class TestAuthorization: | |||
| class TestChunksDeletion: | |||
| @pytest.mark.p3 | |||
| @pytest.mark.parametrize( | |||
| "dataset_id, expected_code, expected_message", | |||
| [ | |||
| @@ -56,6 +58,7 @@ class TestChunksDeletion: | |||
| assert res["code"] == expected_code | |||
| assert res["message"] == expected_message | |||
| @pytest.mark.p3 | |||
| @pytest.mark.parametrize( | |||
| "document_id, expected_code, expected_message", | |||
| [ | |||
| @@ -72,9 +75,9 @@ class TestChunksDeletion: | |||
| @pytest.mark.parametrize( | |||
| "payload", | |||
| [ | |||
| lambda r: {"chunk_ids": ["invalid_id"] + r}, | |||
| lambda r: {"chunk_ids": r[:1] + ["invalid_id"] + r[1:4]}, | |||
| lambda r: {"chunk_ids": r + ["invalid_id"]}, | |||
| pytest.param(lambda r: {"chunk_ids": ["invalid_id"] + r}, marks=pytest.mark.p3), | |||
| pytest.param(lambda r: {"chunk_ids": r[:1] + ["invalid_id"] + r[1:4]}, marks=pytest.mark.p1), | |||
| pytest.param(lambda r: {"chunk_ids": r + ["invalid_id"]}, marks=pytest.mark.p3), | |||
| ], | |||
| ) | |||
| def test_delete_partial_invalid_id(self, get_http_api_auth, add_chunks_func, payload): | |||
| @@ -91,6 +94,7 @@ class TestChunksDeletion: | |||
| assert len(res["data"]["chunks"]) == 1 | |||
| assert res["data"]["total"] == 1 | |||
| @pytest.mark.p3 | |||
| def test_repeated_deletion(self, get_http_api_auth, add_chunks_func): | |||
| dataset_id, document_id, chunk_ids = add_chunks_func | |||
| payload = {"chunk_ids": chunk_ids} | |||
| @@ -101,6 +105,7 @@ class TestChunksDeletion: | |||
| assert res["code"] == 102 | |||
| assert res["message"] == "rm_chunk deleted chunks 0, expect 4" | |||
| @pytest.mark.p3 | |||
| def test_duplicate_deletion(self, get_http_api_auth, add_chunks_func): | |||
| dataset_id, document_id, chunk_ids = add_chunks_func | |||
| res = delete_chunks(get_http_api_auth, dataset_id, document_id, {"chunk_ids": chunk_ids * 2}) | |||
| @@ -114,7 +119,7 @@ class TestChunksDeletion: | |||
| assert len(res["data"]["chunks"]) == 1 | |||
| assert res["data"]["total"] == 1 | |||
| @pytest.mark.slow | |||
| @pytest.mark.p3 | |||
| def test_concurrent_deletion(self, get_http_api_auth, add_document): | |||
| chunks_num = 100 | |||
| dataset_id, document_id = add_document | |||
| @@ -134,7 +139,7 @@ class TestChunksDeletion: | |||
| responses = [f.result() for f in futures] | |||
| assert all(r["code"] == 0 for r in responses) | |||
| @pytest.mark.slow | |||
| @pytest.mark.p3 | |||
| def test_delete_1k(self, get_http_api_auth, add_document): | |||
| chunks_num = 1_000 | |||
| dataset_id, document_id = add_document | |||
| @@ -158,17 +163,11 @@ class TestChunksDeletion: | |||
| "payload, expected_code, expected_message, remaining", | |||
| [ | |||
| pytest.param(None, 100, """TypeError("argument of type \'NoneType\' is not iterable")""", 5, marks=pytest.mark.skip), | |||
| ({"chunk_ids": ["invalid_id"]}, 102, "rm_chunk deleted chunks 0, expect 1", 5), | |||
| pytest.param( | |||
| "not json", | |||
| 100, | |||
| """UnboundLocalError("local variable \'duplicate_messages\' referenced before assignment")""", | |||
| 5, | |||
| marks=pytest.mark.skip(reason="pull/6376"), | |||
| ), | |||
| (lambda r: {"chunk_ids": r[:1]}, 0, "", 4), | |||
| (lambda r: {"chunk_ids": r}, 0, "", 1), | |||
| ({"chunk_ids": []}, 0, "", 0), | |||
| pytest.param({"chunk_ids": ["invalid_id"]}, 102, "rm_chunk deleted chunks 0, expect 1", 5, marks=pytest.mark.p3), | |||
| pytest.param("not json", 100, """UnboundLocalError("local variable \'duplicate_messages\' referenced before assignment")""", 5, marks=pytest.mark.skip(reason="pull/6376")), | |||
| pytest.param(lambda r: {"chunk_ids": r[:1]}, 0, "", 4, marks=pytest.mark.p3), | |||
| pytest.param(lambda r: {"chunk_ids": r}, 0, "", 1, marks=pytest.mark.p1), | |||
| pytest.param({"chunk_ids": []}, 0, "", 0, marks=pytest.mark.p3), | |||
| ], | |||
| ) | |||
| def test_basic_scenarios( | |||
| @@ -21,6 +21,7 @@ from common import INVALID_API_TOKEN, batch_add_chunks, list_chunks | |||
| from libs.auth import RAGFlowHttpApiAuth | |||
| @pytest.mark.p1 | |||
| class TestAuthorization: | |||
| @pytest.mark.parametrize( | |||
| "auth, expected_code, expected_message", | |||
| @@ -40,6 +41,7 @@ class TestAuthorization: | |||
| class TestChunksList: | |||
| @pytest.mark.p1 | |||
| @pytest.mark.parametrize( | |||
| "params, expected_code, expected_page_size, expected_message", | |||
| [ | |||
| @@ -61,6 +63,7 @@ class TestChunksList: | |||
| else: | |||
| assert res["message"] == expected_message | |||
| @pytest.mark.p1 | |||
| @pytest.mark.parametrize( | |||
| "params, expected_code, expected_page_size, expected_message", | |||
| [ | |||
| @@ -83,6 +86,7 @@ class TestChunksList: | |||
| else: | |||
| assert res["message"] == expected_message | |||
| @pytest.mark.p2 | |||
| @pytest.mark.parametrize( | |||
| "params, expected_page_size", | |||
| [ | |||
| @@ -100,6 +104,7 @@ class TestChunksList: | |||
| assert res["code"] == 0 | |||
| assert len(res["data"]["chunks"]) == expected_page_size | |||
| @pytest.mark.p1 | |||
| @pytest.mark.parametrize( | |||
| "chunk_id, expected_code, expected_page_size, expected_message", | |||
| [ | |||
| @@ -133,6 +138,7 @@ class TestChunksList: | |||
| else: | |||
| assert res["message"] == expected_message | |||
| @pytest.mark.p3 | |||
| def test_invalid_params(self, get_http_api_auth, add_chunks): | |||
| dataset_id, document_id, _ = add_chunks | |||
| params = {"a": "b"} | |||
| @@ -140,6 +146,7 @@ class TestChunksList: | |||
| assert res["code"] == 0 | |||
| assert len(res["data"]["chunks"]) == 5 | |||
| @pytest.mark.p3 | |||
| def test_concurrent_list(self, get_http_api_auth, add_chunks): | |||
| dataset_id, document_id, _ = add_chunks | |||
| @@ -149,6 +156,7 @@ class TestChunksList: | |||
| assert all(r["code"] == 0 for r in responses) | |||
| assert all(len(r["data"]["chunks"]) == 5 for r in responses) | |||
| @pytest.mark.p1 | |||
| def test_default(self, get_http_api_auth, add_document): | |||
| dataset_id, document_id = add_document | |||
| @@ -164,6 +172,7 @@ class TestChunksList: | |||
| assert len(res["data"]["chunks"]) == 30 | |||
| assert res["data"]["doc"]["chunk_count"] == chunks_count + 31 | |||
| @pytest.mark.p3 | |||
| @pytest.mark.parametrize( | |||
| "dataset_id, expected_code, expected_message", | |||
| [ | |||
| @@ -181,6 +190,7 @@ class TestChunksList: | |||
| assert res["code"] == expected_code | |||
| assert res["message"] == expected_message | |||
| @pytest.mark.p3 | |||
| @pytest.mark.parametrize( | |||
| "document_id, expected_code, expected_message", | |||
| [ | |||
| @@ -23,6 +23,7 @@ from common import ( | |||
| from libs.auth import RAGFlowHttpApiAuth | |||
| @pytest.mark.p1 | |||
| class TestAuthorization: | |||
| @pytest.mark.parametrize( | |||
| "auth, expected_code, expected_message", | |||
| @@ -42,6 +43,7 @@ class TestAuthorization: | |||
| class TestChunksRetrieval: | |||
| @pytest.mark.p1 | |||
| @pytest.mark.parametrize( | |||
| "payload, expected_code, expected_page_size, expected_message", | |||
| [ | |||
| @@ -64,6 +66,7 @@ class TestChunksRetrieval: | |||
| else: | |||
| assert res["message"] == expected_message | |||
| @pytest.mark.p2 | |||
| @pytest.mark.parametrize( | |||
| "payload, expected_code, expected_page_size, expected_message", | |||
| [ | |||
| @@ -110,6 +113,7 @@ class TestChunksRetrieval: | |||
| else: | |||
| assert res["message"] == expected_message | |||
| @pytest.mark.p3 | |||
| @pytest.mark.parametrize( | |||
| "payload, expected_code, expected_page_size, expected_message", | |||
| [ | |||
| @@ -145,6 +149,7 @@ class TestChunksRetrieval: | |||
| else: | |||
| assert res["message"] == expected_message | |||
| @pytest.mark.p3 | |||
| @pytest.mark.parametrize( | |||
| "payload, expected_code, expected_page_size, expected_message", | |||
| [ | |||
| @@ -170,6 +175,7 @@ class TestChunksRetrieval: | |||
| else: | |||
| assert res["message"] == expected_message | |||
| @pytest.mark.p2 | |||
| @pytest.mark.parametrize( | |||
| "payload, expected_code, expected_page_size, expected_message", | |||
| [ | |||
| @@ -260,6 +266,7 @@ class TestChunksRetrieval: | |||
| else: | |||
| assert res["message"] == expected_message | |||
| @pytest.mark.p3 | |||
| @pytest.mark.parametrize( | |||
| "payload, expected_code, expected_highlight, expected_message", | |||
| [ | |||
| @@ -285,6 +292,7 @@ class TestChunksRetrieval: | |||
| if expected_code != 0: | |||
| assert res["message"] == expected_message | |||
| @pytest.mark.p3 | |||
| def test_invalid_params(self, get_http_api_auth, add_chunks): | |||
| dataset_id, _, _ = add_chunks | |||
| payload = {"question": "chunk", "dataset_ids": [dataset_id], "a": "b"} | |||
| @@ -292,7 +300,7 @@ class TestChunksRetrieval: | |||
| assert res["code"] == 0 | |||
| assert len(res["data"]["chunks"]) == 4 | |||
| @pytest.mark.slow | |||
| @pytest.mark.p3 | |||
| def test_concurrent_retrieval(self, get_http_api_auth, add_chunks): | |||
| from concurrent.futures import ThreadPoolExecutor | |||
| @@ -22,6 +22,7 @@ from common import INVALID_API_TOKEN, delete_documnets, update_chunk | |||
| from libs.auth import RAGFlowHttpApiAuth | |||
| @pytest.mark.p1 | |||
| class TestAuthorization: | |||
| @pytest.mark.parametrize( | |||
| "auth, expected_code, expected_message", | |||
| @@ -41,6 +42,7 @@ class TestAuthorization: | |||
| class TestUpdatedChunk: | |||
| @pytest.mark.p1 | |||
| @pytest.mark.parametrize( | |||
| "payload, expected_code, expected_message", | |||
| [ | |||
| @@ -74,6 +76,7 @@ class TestUpdatedChunk: | |||
| if expected_code != 0: | |||
| assert res["message"] == expected_message | |||
| @pytest.mark.p2 | |||
| @pytest.mark.parametrize( | |||
| "payload, expected_code, expected_message", | |||
| [ | |||
| @@ -92,6 +95,7 @@ class TestUpdatedChunk: | |||
| if expected_code != 0: | |||
| assert res["message"] == expected_message | |||
| @pytest.mark.p2 | |||
| @pytest.mark.parametrize( | |||
| "payload, expected_code, expected_message", | |||
| [ | |||
| @@ -110,6 +114,7 @@ class TestUpdatedChunk: | |||
| if expected_code != 0: | |||
| assert res["message"] == expected_message | |||
| @pytest.mark.p2 | |||
| @pytest.mark.parametrize( | |||
| "payload, expected_code, expected_message", | |||
| [ | |||
| @@ -135,6 +140,7 @@ class TestUpdatedChunk: | |||
| if expected_code != 0: | |||
| assert res["message"] == expected_message | |||
| @pytest.mark.p3 | |||
| @pytest.mark.parametrize( | |||
| "dataset_id, expected_code, expected_message", | |||
| [ | |||
| @@ -149,6 +155,7 @@ class TestUpdatedChunk: | |||
| assert res["code"] == expected_code | |||
| assert expected_message in res["message"] | |||
| @pytest.mark.p3 | |||
| @pytest.mark.parametrize( | |||
| "document_id, expected_code, expected_message", | |||
| [ | |||
| @@ -166,6 +173,7 @@ class TestUpdatedChunk: | |||
| assert res["code"] == expected_code | |||
| assert res["message"] == expected_message | |||
| @pytest.mark.p3 | |||
| @pytest.mark.parametrize( | |||
| "chunk_id, expected_code, expected_message", | |||
| [ | |||
| @@ -183,6 +191,7 @@ class TestUpdatedChunk: | |||
| assert res["code"] == expected_code | |||
| assert res["message"] == expected_message | |||
| @pytest.mark.p3 | |||
| def test_repeated_update_chunk(self, get_http_api_auth, add_chunks): | |||
| dataset_id, document_id, chunk_ids = add_chunks | |||
| res = update_chunk(get_http_api_auth, dataset_id, document_id, chunk_ids[0], {"content": "chunk test 1"}) | |||
| @@ -191,6 +200,7 @@ class TestUpdatedChunk: | |||
| res = update_chunk(get_http_api_auth, dataset_id, document_id, chunk_ids[0], {"content": "chunk test 2"}) | |||
| assert res["code"] == 0 | |||
| @pytest.mark.p3 | |||
| @pytest.mark.parametrize( | |||
| "payload, expected_code, expected_message", | |||
| [ | |||
| @@ -206,7 +216,7 @@ class TestUpdatedChunk: | |||
| if expected_code != 0: | |||
| assert res["message"] == expected_message | |||
| @pytest.mark.slow | |||
| @pytest.mark.p3 | |||
| @pytest.mark.skipif(os.getenv("DOC_ENGINE") == "infinity", reason="issues/6554") | |||
| def test_concurrent_update_chunk(self, get_http_api_auth, add_chunks): | |||
| chunk_num = 50 | |||
| @@ -227,6 +237,7 @@ class TestUpdatedChunk: | |||
| responses = [f.result() for f in futures] | |||
| assert all(r["code"] == 0 for r in responses) | |||
| @pytest.mark.p3 | |||
| def test_update_chunk_to_deleted_document(self, get_http_api_auth, add_chunks): | |||
| dataset_id, document_id, chunk_ids = add_chunks | |||
| delete_documnets(get_http_api_auth, dataset_id, {"ids": [document_id]}) | |||
| @@ -34,6 +34,7 @@ def valid_names(draw): | |||
| return name.encode("utf-8").decode("utf-8") | |||
| @pytest.mark.p1 | |||
| @pytest.mark.usefixtures("clear_datasets") | |||
| class TestAuthorization: | |||
| @pytest.mark.parametrize( | |||
| @@ -56,6 +57,7 @@ class TestAuthorization: | |||
| @pytest.mark.usefixtures("clear_datasets") | |||
| class TestDatasetCreation: | |||
| @pytest.mark.p1 | |||
| @given(name=valid_names()) | |||
| @example("a" * 128) | |||
| @settings(max_examples=20) | |||
| @@ -64,6 +66,7 @@ class TestDatasetCreation: | |||
| assert res["code"] == 0, res | |||
| assert res["data"]["name"] == name, res | |||
| @pytest.mark.p1 | |||
| @pytest.mark.parametrize( | |||
| "name, expected_message", | |||
| [ | |||
| @@ -79,6 +82,7 @@ class TestDatasetCreation: | |||
| assert res["code"] == 101, res | |||
| assert expected_message in res["message"], res | |||
| @pytest.mark.p2 | |||
| def test_duplicated_name(self, get_http_api_auth): | |||
| name = "duplicated_name" | |||
| payload = {"name": name} | |||
| @@ -89,6 +93,7 @@ class TestDatasetCreation: | |||
| assert res["code"] == 101, res | |||
| assert res["message"] == f"Dataset name '{name}' already exists", res | |||
| @pytest.mark.p2 | |||
| def test_case_insensitive(self, get_http_api_auth): | |||
| name = "CaseInsensitive" | |||
| res = create_dataset(get_http_api_auth, {"name": name.upper()}) | |||
| @@ -98,12 +103,14 @@ class TestDatasetCreation: | |||
| assert res["code"] == 101, res | |||
| assert res["message"] == f"Dataset name '{name.lower()}' already exists", res | |||
| @pytest.mark.p3 | |||
| def test_bad_content_type(self, get_http_api_auth): | |||
| BAD_CONTENT_TYPE = "text/xml" | |||
| res = create_dataset(get_http_api_auth, {"name": "name"}, {"Content-Type": BAD_CONTENT_TYPE}) | |||
| assert res["code"] == 101, res | |||
| assert res["message"] == f"Unsupported content type: Expected application/json, got {BAD_CONTENT_TYPE}", res | |||
| @pytest.mark.p3 | |||
| @pytest.mark.parametrize( | |||
| "payload, expected_message", | |||
| [ | |||
| @@ -117,6 +124,7 @@ class TestDatasetCreation: | |||
| assert res["code"] == 101, res | |||
| assert expected_message in res["message"], res | |||
| @pytest.mark.p2 | |||
| def test_avatar(self, get_http_api_auth, tmp_path): | |||
| fn = create_image_file(tmp_path / "ragflow_test.png") | |||
| payload = { | |||
| @@ -126,17 +134,20 @@ class TestDatasetCreation: | |||
| res = create_dataset(get_http_api_auth, payload) | |||
| assert res["code"] == 0, res | |||
| @pytest.mark.p3 | |||
| def test_avatar_none(self, get_http_api_auth, tmp_path): | |||
| payload = {"name": "test_avatar_none", "avatar": None} | |||
| res = create_dataset(get_http_api_auth, payload) | |||
| assert res["code"] == 0, res | |||
| assert res["data"]["avatar"] is None, res | |||
| @pytest.mark.p2 | |||
| def test_avatar_exceeds_limit_length(self, get_http_api_auth): | |||
| res = create_dataset(get_http_api_auth, {"name": "exceeds_limit_length_avatar", "avatar": "a" * 65536}) | |||
| assert res["code"] == 101, res | |||
| assert "String should have at most 65535 characters" in res["message"], res | |||
| @pytest.mark.p3 | |||
| @pytest.mark.parametrize( | |||
| "name, avatar_prefix, expected_message", | |||
| [ | |||
| @@ -157,18 +168,21 @@ class TestDatasetCreation: | |||
| assert res["code"] == 101, res | |||
| assert expected_message in res["message"], res | |||
| @pytest.mark.p3 | |||
| def test_description_none(self, get_http_api_auth): | |||
| payload = {"name": "test_description_none", "description": None} | |||
| res = create_dataset(get_http_api_auth, payload) | |||
| assert res["code"] == 0, res | |||
| assert res["data"]["description"] is None, res | |||
| @pytest.mark.p2 | |||
| def test_description_exceeds_limit_length(self, get_http_api_auth): | |||
| payload = {"name": "exceeds_limit_length_description", "description": "a" * 65536} | |||
| res = create_dataset(get_http_api_auth, payload) | |||
| assert res["code"] == 101, res | |||
| assert "String should have at most 65535 characters" in res["message"], res | |||
| @pytest.mark.p1 | |||
| @pytest.mark.parametrize( | |||
| "name, embedding_model", | |||
| [ | |||
| @@ -192,6 +206,7 @@ class TestDatasetCreation: | |||
| else: | |||
| assert res["data"]["embedding_model"] == embedding_model, res | |||
| @pytest.mark.p2 | |||
| @pytest.mark.parametrize( | |||
| "name, embedding_model", | |||
| [ | |||
| @@ -211,6 +226,7 @@ class TestDatasetCreation: | |||
| else: | |||
| assert res["message"] == f"Unsupported model: <{embedding_model}>", res | |||
| @pytest.mark.p2 | |||
| @pytest.mark.parametrize( | |||
| "name, embedding_model", | |||
| [ | |||
| @@ -231,6 +247,7 @@ class TestDatasetCreation: | |||
| else: | |||
| assert "Both model_name and provider must be non-empty strings" in res["message"], res | |||
| @pytest.mark.p1 | |||
| @pytest.mark.parametrize( | |||
| "name, permission", | |||
| [ | |||
| @@ -254,6 +271,7 @@ class TestDatasetCreation: | |||
| else: | |||
| assert res["data"]["permission"] == permission.lower(), res | |||
| @pytest.mark.p2 | |||
| @pytest.mark.parametrize( | |||
| "name, permission", | |||
| [ | |||
| @@ -268,6 +286,7 @@ class TestDatasetCreation: | |||
| assert res["code"] == 101 | |||
| assert "Input should be 'me' or 'team'" in res["message"] | |||
| @pytest.mark.p1 | |||
| @pytest.mark.parametrize( | |||
| "name, chunk_method", | |||
| [ | |||
| @@ -298,6 +317,7 @@ class TestDatasetCreation: | |||
| else: | |||
| assert res["data"]["chunk_method"] == chunk_method, res | |||
| @pytest.mark.p2 | |||
| @pytest.mark.parametrize( | |||
| "name, chunk_method", | |||
| [ | |||
| @@ -312,6 +332,7 @@ class TestDatasetCreation: | |||
| assert res["code"] == 101, res | |||
| assert "Input should be 'naive', 'book', 'email', 'laws', 'manual', 'one', 'paper', 'picture', 'presentation', 'qa', 'table' or 'tag'" in res["message"], res | |||
| @pytest.mark.p1 | |||
| @pytest.mark.parametrize( | |||
| "name, parser_config", | |||
| [ | |||
| @@ -458,6 +479,7 @@ class TestDatasetCreation: | |||
| else: | |||
| assert res["data"]["parser_config"][k] == v | |||
| @pytest.mark.p2 | |||
| @pytest.mark.parametrize( | |||
| "name, parser_config, expected_message", | |||
| [ | |||
| @@ -579,7 +601,7 @@ class TestDatasetCreation: | |||
| assert res["code"] == 101, res | |||
| assert expected_message in res["message"], res | |||
| @pytest.mark.slow | |||
| @pytest.mark.p3 | |||
| def test_dataset_10k(self, get_http_api_auth): | |||
| for i in range(10_000): | |||
| payload = {"name": f"dataset_{i}"} | |||
| @@ -25,6 +25,7 @@ from common import ( | |||
| from libs.auth import RAGFlowHttpApiAuth | |||
| @pytest.mark.p1 | |||
| class TestAuthorization: | |||
| @pytest.mark.parametrize( | |||
| "auth, expected_code, expected_message", | |||
| @@ -44,6 +45,7 @@ class TestAuthorization: | |||
| class TestDatasetsDeletion: | |||
| @pytest.mark.p1 | |||
| @pytest.mark.parametrize( | |||
| "payload, expected_code, expected_message, remaining", | |||
| [ | |||
| @@ -78,6 +80,7 @@ class TestDatasetsDeletion: | |||
| res = list_datasets(get_http_api_auth) | |||
| assert len(res["data"]) == remaining | |||
| @pytest.mark.p2 | |||
| @pytest.mark.parametrize( | |||
| "payload", | |||
| [ | |||
| @@ -98,6 +101,7 @@ class TestDatasetsDeletion: | |||
| res = list_datasets(get_http_api_auth) | |||
| assert len(res["data"]) == 0 | |||
| @pytest.mark.p2 | |||
| def test_repeated_deletion(self, get_http_api_auth, add_datasets_func): | |||
| dataset_ids = add_datasets_func | |||
| res = delete_datasets(get_http_api_auth, {"ids": dataset_ids}) | |||
| @@ -107,6 +111,7 @@ class TestDatasetsDeletion: | |||
| assert res["code"] == 102 | |||
| assert "You don't own the dataset" in res["message"] | |||
| @pytest.mark.p2 | |||
| def test_duplicate_deletion(self, get_http_api_auth, add_datasets_func): | |||
| dataset_ids = add_datasets_func | |||
| res = delete_datasets(get_http_api_auth, {"ids": dataset_ids + dataset_ids}) | |||
| @@ -117,7 +122,7 @@ class TestDatasetsDeletion: | |||
| res = list_datasets(get_http_api_auth) | |||
| assert len(res["data"]) == 0 | |||
| @pytest.mark.slow | |||
| @pytest.mark.p3 | |||
| def test_concurrent_deletion(self, get_http_api_auth): | |||
| ids = batch_create_datasets(get_http_api_auth, 100) | |||
| @@ -126,7 +131,7 @@ class TestDatasetsDeletion: | |||
| responses = [f.result() for f in futures] | |||
| assert all(r["code"] == 0 for r in responses) | |||
| @pytest.mark.slow | |||
| @pytest.mark.p3 | |||
| def test_delete_10k(self, get_http_api_auth): | |||
| ids = batch_create_datasets(get_http_api_auth, 10_000) | |||
| res = delete_datasets(get_http_api_auth, {"ids": ids}) | |||
| @@ -21,6 +21,7 @@ from libs.auth import RAGFlowHttpApiAuth | |||
| from libs.utils import is_sorted | |||
| @pytest.mark.p1 | |||
| class TestAuthorization: | |||
| @pytest.mark.parametrize( | |||
| "auth, expected_code, expected_message", | |||
| @@ -41,12 +42,14 @@ class TestAuthorization: | |||
| @pytest.mark.usefixtures("add_datasets") | |||
| class TestDatasetsList: | |||
| @pytest.mark.p1 | |||
| def test_default(self, get_http_api_auth): | |||
| res = list_datasets(get_http_api_auth, params={}) | |||
| assert res["code"] == 0 | |||
| assert len(res["data"]) == 5 | |||
| @pytest.mark.p1 | |||
| @pytest.mark.parametrize( | |||
| "params, expected_code, expected_page_size, expected_message", | |||
| [ | |||
| @@ -79,6 +82,7 @@ class TestDatasetsList: | |||
| else: | |||
| assert res["message"] == expected_message | |||
| @pytest.mark.p1 | |||
| @pytest.mark.parametrize( | |||
| "params, expected_code, expected_page_size, expected_message", | |||
| [ | |||
| @@ -118,6 +122,7 @@ class TestDatasetsList: | |||
| else: | |||
| assert res["message"] == expected_message | |||
| @pytest.mark.p3 | |||
| @pytest.mark.parametrize( | |||
| "params, expected_code, assertions, expected_message", | |||
| [ | |||
| @@ -156,6 +161,7 @@ class TestDatasetsList: | |||
| else: | |||
| assert res["message"] == expected_message | |||
| @pytest.mark.p3 | |||
| @pytest.mark.parametrize( | |||
| "params, expected_code, assertions, expected_message", | |||
| [ | |||
| @@ -192,6 +198,7 @@ class TestDatasetsList: | |||
| else: | |||
| assert res["message"] == expected_message | |||
| @pytest.mark.p1 | |||
| @pytest.mark.parametrize( | |||
| "params, expected_code, expected_num, expected_message", | |||
| [ | |||
| @@ -212,6 +219,7 @@ class TestDatasetsList: | |||
| else: | |||
| assert res["message"] == expected_message | |||
| @pytest.mark.p1 | |||
| @pytest.mark.parametrize( | |||
| "dataset_id, expected_code, expected_num, expected_message", | |||
| [ | |||
| @@ -246,6 +254,7 @@ class TestDatasetsList: | |||
| else: | |||
| assert res["message"] == expected_message | |||
| @pytest.mark.p3 | |||
| @pytest.mark.parametrize( | |||
| "dataset_id, name, expected_code, expected_num, expected_message", | |||
| [ | |||
| @@ -278,13 +287,14 @@ class TestDatasetsList: | |||
| else: | |||
| assert res["message"] == expected_message | |||
| @pytest.mark.slow | |||
| @pytest.mark.p3 | |||
| def test_concurrent_list(self, get_http_api_auth): | |||
| with ThreadPoolExecutor(max_workers=5) as executor: | |||
| futures = [executor.submit(list_datasets, get_http_api_auth) for i in range(100)] | |||
| responses = [f.result() for f in futures] | |||
| assert all(r["code"] == 0 for r in responses) | |||
| @pytest.mark.p3 | |||
| def test_invalid_params(self, get_http_api_auth): | |||
| params = {"a": "b"} | |||
| res = list_datasets(get_http_api_auth, params=params) | |||
| @@ -29,6 +29,7 @@ from libs.utils.file_utils import create_image_file | |||
| # TODO: Missing scenario for updating embedding_model with chunk_count != 0 | |||
| @pytest.mark.p1 | |||
| class TestAuthorization: | |||
| @pytest.mark.parametrize( | |||
| "auth, expected_code, expected_message", | |||
| @@ -47,6 +48,7 @@ class TestAuthorization: | |||
| assert res["message"] == expected_message | |||
| @pytest.mark.p1 | |||
| class TestDatasetUpdate: | |||
| @pytest.mark.parametrize( | |||
| "name, expected_code, expected_message", | |||
| @@ -235,7 +237,7 @@ class TestDatasetUpdate: | |||
| res = update_dataset(get_http_api_auth, dataset_id, {"unknown_field": 0}) | |||
| assert res["code"] == 100 | |||
| @pytest.mark.slow | |||
| @pytest.mark.p3 | |||
| def test_concurrent_update(self, get_http_api_auth, add_dataset_func): | |||
| dataset_id = add_dataset_func | |||
| @@ -20,6 +20,7 @@ from common import INVALID_API_TOKEN, bulk_upload_documents, delete_documnets, l | |||
| from libs.auth import RAGFlowHttpApiAuth | |||
| @pytest.mark.p1 | |||
| class TestAuthorization: | |||
| @pytest.mark.parametrize( | |||
| "auth, expected_code, expected_message", | |||
| @@ -39,6 +40,7 @@ class TestAuthorization: | |||
| class TestDocumentsDeletion: | |||
| @pytest.mark.p1 | |||
| @pytest.mark.parametrize( | |||
| "payload, expected_code, expected_message, remaining", | |||
| [ | |||
| @@ -82,6 +84,7 @@ class TestDocumentsDeletion: | |||
| assert len(res["data"]["docs"]) == remaining | |||
| assert res["data"]["total"] == remaining | |||
| @pytest.mark.p3 | |||
| @pytest.mark.parametrize( | |||
| "dataset_id, expected_code, expected_message", | |||
| [ | |||
| @@ -99,6 +102,7 @@ class TestDocumentsDeletion: | |||
| assert res["code"] == expected_code | |||
| assert res["message"] == expected_message | |||
| @pytest.mark.p2 | |||
| @pytest.mark.parametrize( | |||
| "payload", | |||
| [ | |||
| @@ -119,6 +123,7 @@ class TestDocumentsDeletion: | |||
| assert len(res["data"]["docs"]) == 0 | |||
| assert res["data"]["total"] == 0 | |||
| @pytest.mark.p2 | |||
| def test_repeated_deletion(self, get_http_api_auth, add_documents_func): | |||
| dataset_id, document_ids = add_documents_func | |||
| res = delete_documnets(get_http_api_auth, dataset_id, {"ids": document_ids}) | |||
| @@ -128,6 +133,7 @@ class TestDocumentsDeletion: | |||
| assert res["code"] == 102 | |||
| assert "Documents not found" in res["message"] | |||
| @pytest.mark.p2 | |||
| def test_duplicate_deletion(self, get_http_api_auth, add_documents_func): | |||
| dataset_id, document_ids = add_documents_func | |||
| res = delete_documnets(get_http_api_auth, dataset_id, {"ids": document_ids + document_ids}) | |||
| @@ -140,7 +146,7 @@ class TestDocumentsDeletion: | |||
| assert res["data"]["total"] == 0 | |||
| @pytest.mark.slow | |||
| @pytest.mark.p3 | |||
| def test_concurrent_deletion(get_http_api_auth, add_dataset, tmp_path): | |||
| documnets_num = 100 | |||
| dataset_id = add_dataset | |||
| @@ -160,7 +166,7 @@ def test_concurrent_deletion(get_http_api_auth, add_dataset, tmp_path): | |||
| assert all(r["code"] == 0 for r in responses) | |||
| @pytest.mark.slow | |||
| @pytest.mark.p3 | |||
| def test_delete_1k(get_http_api_auth, add_dataset, tmp_path): | |||
| documnets_num = 1_000 | |||
| dataset_id = add_dataset | |||
| @@ -24,6 +24,7 @@ from libs.utils import compare_by_hash | |||
| from requests import codes | |||
| @pytest.mark.p1 | |||
| class TestAuthorization: | |||
| @pytest.mark.parametrize( | |||
| "auth, expected_code, expected_message", | |||
| @@ -45,6 +46,7 @@ class TestAuthorization: | |||
| assert response_json["message"] == expected_message | |||
| @pytest.mark.p1 | |||
| @pytest.mark.parametrize( | |||
| "generate_test_files", | |||
| [ | |||
| @@ -81,6 +83,7 @@ def test_file_type_validation(get_http_api_auth, add_dataset, generate_test_file | |||
| class TestDocumentDownload: | |||
| @pytest.mark.p3 | |||
| @pytest.mark.parametrize( | |||
| "document_id, expected_code, expected_message", | |||
| [ | |||
| @@ -105,6 +108,7 @@ class TestDocumentDownload: | |||
| assert response_json["code"] == expected_code | |||
| assert response_json["message"] == expected_message | |||
| @pytest.mark.p3 | |||
| @pytest.mark.parametrize( | |||
| "dataset_id, expected_code, expected_message", | |||
| [ | |||
| @@ -130,6 +134,7 @@ class TestDocumentDownload: | |||
| assert response_json["code"] == expected_code | |||
| assert response_json["message"] == expected_message | |||
| @pytest.mark.p3 | |||
| def test_same_file_repeat(self, get_http_api_auth, add_documents, tmp_path, ragflow_tmp_dir): | |||
| num = 5 | |||
| dataset_id, document_ids = add_documents | |||
| @@ -147,7 +152,7 @@ class TestDocumentDownload: | |||
| ) | |||
| @pytest.mark.slow | |||
| @pytest.mark.p3 | |||
| def test_concurrent_download(get_http_api_auth, add_dataset, tmp_path): | |||
| document_count = 20 | |||
| dataset_id = add_dataset | |||
| @@ -21,6 +21,7 @@ from libs.auth import RAGFlowHttpApiAuth | |||
| from libs.utils import is_sorted | |||
| @pytest.mark.p1 | |||
| class TestAuthorization: | |||
| @pytest.mark.parametrize( | |||
| "auth, expected_code, expected_message", | |||
| @@ -40,6 +41,7 @@ class TestAuthorization: | |||
| class TestDocumentsList: | |||
| @pytest.mark.p1 | |||
| def test_default(self, get_http_api_auth, add_documents): | |||
| dataset_id, _ = add_documents | |||
| res = list_documnets(get_http_api_auth, dataset_id) | |||
| @@ -47,6 +49,7 @@ class TestDocumentsList: | |||
| assert len(res["data"]["docs"]) == 5 | |||
| assert res["data"]["total"] == 5 | |||
| @pytest.mark.p3 | |||
| @pytest.mark.parametrize( | |||
| "dataset_id, expected_code, expected_message", | |||
| [ | |||
| @@ -63,6 +66,7 @@ class TestDocumentsList: | |||
| assert res["code"] == expected_code | |||
| assert res["message"] == expected_message | |||
| @pytest.mark.p1 | |||
| @pytest.mark.parametrize( | |||
| "params, expected_code, expected_page_size, expected_message", | |||
| [ | |||
| @@ -105,6 +109,7 @@ class TestDocumentsList: | |||
| else: | |||
| assert res["message"] == expected_message | |||
| @pytest.mark.p1 | |||
| @pytest.mark.parametrize( | |||
| "params, expected_code, expected_page_size, expected_message", | |||
| [ | |||
| @@ -146,6 +151,7 @@ class TestDocumentsList: | |||
| else: | |||
| assert res["message"] == expected_message | |||
| @pytest.mark.p3 | |||
| @pytest.mark.parametrize( | |||
| "params, expected_code, assertions, expected_message", | |||
| [ | |||
| @@ -174,6 +180,7 @@ class TestDocumentsList: | |||
| else: | |||
| assert res["message"] == expected_message | |||
| @pytest.mark.p3 | |||
| @pytest.mark.parametrize( | |||
| "params, expected_code, assertions, expected_message", | |||
| [ | |||
| @@ -206,6 +213,7 @@ class TestDocumentsList: | |||
| else: | |||
| assert res["message"] == expected_message | |||
| @pytest.mark.p2 | |||
| @pytest.mark.parametrize( | |||
| "params, expected_num", | |||
| [ | |||
| @@ -223,6 +231,7 @@ class TestDocumentsList: | |||
| assert len(res["data"]["docs"]) == expected_num | |||
| assert res["data"]["total"] == expected_num | |||
| @pytest.mark.p1 | |||
| @pytest.mark.parametrize( | |||
| "params, expected_code, expected_num, expected_message", | |||
| [ | |||
| @@ -257,6 +266,7 @@ class TestDocumentsList: | |||
| else: | |||
| assert res["message"] == expected_message | |||
| @pytest.mark.p1 | |||
| @pytest.mark.parametrize( | |||
| "document_id, expected_code, expected_num, expected_message", | |||
| [ | |||
| @@ -291,6 +301,7 @@ class TestDocumentsList: | |||
| else: | |||
| assert res["message"] == expected_message | |||
| @pytest.mark.p3 | |||
| @pytest.mark.parametrize( | |||
| "document_id, name, expected_code, expected_num, expected_message", | |||
| [ | |||
| @@ -328,7 +339,7 @@ class TestDocumentsList: | |||
| else: | |||
| assert res["message"] == expected_message | |||
| @pytest.mark.slow | |||
| @pytest.mark.p3 | |||
| def test_concurrent_list(self, get_http_api_auth, add_documents): | |||
| dataset_id, _ = add_documents | |||
| @@ -337,6 +348,7 @@ class TestDocumentsList: | |||
| responses = [f.result() for f in futures] | |||
| assert all(r["code"] == 0 for r in responses) | |||
| @pytest.mark.p3 | |||
| def test_invalid_params(self, get_http_api_auth, add_documents): | |||
| dataset_id, _ = add_documents | |||
| params = {"a": "b"} | |||
| @@ -51,6 +51,7 @@ def validate_document_details(auth, dataset_id, document_ids): | |||
| assert "Task done" in doc["progress_msg"] | |||
| @pytest.mark.p1 | |||
| class TestAuthorization: | |||
| @pytest.mark.parametrize( | |||
| "auth, expected_code, expected_message", | |||
| @@ -73,31 +74,13 @@ class TestDocumentsParse: | |||
| @pytest.mark.parametrize( | |||
| "payload, expected_code, expected_message", | |||
| [ | |||
| pytest.param( | |||
| None, | |||
| 102, | |||
| """AttributeError("\'NoneType\' object has no attribute \'get\'")""", | |||
| marks=pytest.mark.skip, | |||
| ), | |||
| ({"document_ids": []}, 102, "`document_ids` is required"), | |||
| ( | |||
| {"document_ids": ["invalid_id"]}, | |||
| 102, | |||
| "Documents not found: ['invalid_id']", | |||
| ), | |||
| ( | |||
| {"document_ids": ["\n!?。;!?\"'"]}, | |||
| 102, | |||
| """Documents not found: [\'\\n!?。;!?"\\\'\']""", | |||
| ), | |||
| pytest.param( | |||
| "not json", | |||
| 102, | |||
| "AttributeError(\"'str' object has no attribute 'get'\")", | |||
| marks=pytest.mark.skip, | |||
| ), | |||
| (lambda r: {"document_ids": r[:1]}, 0, ""), | |||
| (lambda r: {"document_ids": r}, 0, ""), | |||
| pytest.param(None, 102, """AttributeError("\'NoneType\' object has no attribute \'get\'")""", marks=pytest.mark.skip), | |||
| pytest.param({"document_ids": []}, 102, "`document_ids` is required", marks=pytest.mark.p1), | |||
| pytest.param({"document_ids": ["invalid_id"]}, 102, "Documents not found: ['invalid_id']", marks=pytest.mark.p3), | |||
| pytest.param({"document_ids": ["\n!?。;!?\"'"]}, 102, """Documents not found: [\'\\n!?。;!?"\\\'\']""", marks=pytest.mark.p3), | |||
| pytest.param("not json", 102, "AttributeError(\"'str' object has no attribute 'get'\")", marks=pytest.mark.skip), | |||
| pytest.param(lambda r: {"document_ids": r[:1]}, 0, "", marks=pytest.mark.p1), | |||
| pytest.param(lambda r: {"document_ids": r}, 0, "", marks=pytest.mark.p1), | |||
| ], | |||
| ) | |||
| def test_basic_scenarios(self, get_http_api_auth, add_documents_func, payload, expected_code, expected_message): | |||
| @@ -112,6 +95,7 @@ class TestDocumentsParse: | |||
| condition(get_http_api_auth, dataset_id, payload["document_ids"]) | |||
| validate_document_details(get_http_api_auth, dataset_id, payload["document_ids"]) | |||
| @pytest.mark.p3 | |||
| @pytest.mark.parametrize( | |||
| "dataset_id, expected_code, expected_message", | |||
| [ | |||
| @@ -139,9 +123,9 @@ class TestDocumentsParse: | |||
| @pytest.mark.parametrize( | |||
| "payload", | |||
| [ | |||
| lambda r: {"document_ids": ["invalid_id"] + r}, | |||
| lambda r: {"document_ids": r[:1] + ["invalid_id"] + r[1:3]}, | |||
| lambda r: {"document_ids": r + ["invalid_id"]}, | |||
| pytest.param(lambda r: {"document_ids": ["invalid_id"] + r}, marks=pytest.mark.p3), | |||
| pytest.param(lambda r: {"document_ids": r[:1] + ["invalid_id"] + r[1:3]}, marks=pytest.mark.p1), | |||
| pytest.param(lambda r: {"document_ids": r + ["invalid_id"]}, marks=pytest.mark.p3), | |||
| ], | |||
| ) | |||
| def test_parse_partial_invalid_document_id(self, get_http_api_auth, add_documents_func, payload): | |||
| @@ -156,6 +140,7 @@ class TestDocumentsParse: | |||
| validate_document_details(get_http_api_auth, dataset_id, document_ids) | |||
| @pytest.mark.p3 | |||
| def test_repeated_parse(self, get_http_api_auth, add_documents_func): | |||
| dataset_id, document_ids = add_documents_func | |||
| res = parse_documnets(get_http_api_auth, dataset_id, {"document_ids": document_ids}) | |||
| @@ -166,6 +151,7 @@ class TestDocumentsParse: | |||
| res = parse_documnets(get_http_api_auth, dataset_id, {"document_ids": document_ids}) | |||
| assert res["code"] == 0 | |||
| @pytest.mark.p3 | |||
| def test_duplicate_parse(self, get_http_api_auth, add_documents_func): | |||
| dataset_id, document_ids = add_documents_func | |||
| res = parse_documnets(get_http_api_auth, dataset_id, {"document_ids": document_ids + document_ids}) | |||
| @@ -178,8 +164,8 @@ class TestDocumentsParse: | |||
| validate_document_details(get_http_api_auth, dataset_id, document_ids) | |||
| @pytest.mark.slow | |||
| def test_parse_100_files(get_http_api_auth, add_datase_func, tmp_path): | |||
| @pytest.mark.p3 | |||
| def test_parse_100_files(get_http_api_auth, add_dataset_func, tmp_path): | |||
| @wait_for(100, 1, "Document parsing timeout") | |||
| def condition(_auth, _dataset_id, _document_num): | |||
| res = list_documnets(_auth, _dataset_id, {"page_size": _document_num}) | |||
| @@ -189,7 +175,7 @@ def test_parse_100_files(get_http_api_auth, add_datase_func, tmp_path): | |||
| return True | |||
| document_num = 100 | |||
| dataset_id = add_datase_func | |||
| dataset_id = add_dataset_func | |||
| document_ids = bulk_upload_documents(get_http_api_auth, dataset_id, document_num, tmp_path) | |||
| res = parse_documnets(get_http_api_auth, dataset_id, {"document_ids": document_ids}) | |||
| assert res["code"] == 0 | |||
| @@ -199,8 +185,8 @@ def test_parse_100_files(get_http_api_auth, add_datase_func, tmp_path): | |||
| validate_document_details(get_http_api_auth, dataset_id, document_ids) | |||
| @pytest.mark.slow | |||
| def test_concurrent_parse(get_http_api_auth, add_datase_func, tmp_path): | |||
| @pytest.mark.p3 | |||
| def test_concurrent_parse(get_http_api_auth, add_dataset_func, tmp_path): | |||
| @wait_for(120, 1, "Document parsing timeout") | |||
| def condition(_auth, _dataset_id, _document_num): | |||
| res = list_documnets(_auth, _dataset_id, {"page_size": _document_num}) | |||
| @@ -210,7 +196,7 @@ def test_concurrent_parse(get_http_api_auth, add_datase_func, tmp_path): | |||
| return True | |||
| document_num = 100 | |||
| dataset_id = add_datase_func | |||
| dataset_id = add_dataset_func | |||
| document_ids = bulk_upload_documents(get_http_api_auth, dataset_id, document_num, tmp_path) | |||
| with ThreadPoolExecutor(max_workers=5) as executor: | |||
| @@ -41,6 +41,7 @@ def validate_document_parse_cancel(auth, dataset_id, document_ids): | |||
| assert doc["progress"] == 0.0 | |||
| @pytest.mark.p1 | |||
| class TestAuthorization: | |||
| @pytest.mark.parametrize( | |||
| "auth, expected_code, expected_message", | |||
| @@ -64,31 +65,13 @@ class TestDocumentsParseStop: | |||
| @pytest.mark.parametrize( | |||
| "payload, expected_code, expected_message", | |||
| [ | |||
| pytest.param( | |||
| None, | |||
| 102, | |||
| """AttributeError("\'NoneType\' object has no attribute \'get\'")""", | |||
| marks=pytest.mark.skip, | |||
| ), | |||
| ({"document_ids": []}, 102, "`document_ids` is required"), | |||
| ( | |||
| {"document_ids": ["invalid_id"]}, | |||
| 102, | |||
| "You don't own the document invalid_id.", | |||
| ), | |||
| ( | |||
| {"document_ids": ["\n!?。;!?\"'"]}, | |||
| 102, | |||
| """You don\'t own the document \n!?。;!?"\'.""", | |||
| ), | |||
| pytest.param( | |||
| "not json", | |||
| 102, | |||
| "AttributeError(\"'str' object has no attribute 'get'\")", | |||
| marks=pytest.mark.skip, | |||
| ), | |||
| (lambda r: {"document_ids": r[:1]}, 0, ""), | |||
| (lambda r: {"document_ids": r}, 0, ""), | |||
| pytest.param(None, 102, """AttributeError("\'NoneType\' object has no attribute \'get\'")""", marks=pytest.mark.skip), | |||
| pytest.param({"document_ids": []}, 102, "`document_ids` is required", marks=pytest.mark.p1), | |||
| pytest.param({"document_ids": ["invalid_id"]}, 102, "You don't own the document invalid_id.", marks=pytest.mark.p3), | |||
| pytest.param({"document_ids": ["\n!?。;!?\"'"]}, 102, """You don\'t own the document \n!?。;!?"\'.""", marks=pytest.mark.p3), | |||
| pytest.param("not json", 102, "AttributeError(\"'str' object has no attribute 'get'\")", marks=pytest.mark.skip), | |||
| pytest.param(lambda r: {"document_ids": r[:1]}, 0, "", marks=pytest.mark.p1), | |||
| pytest.param(lambda r: {"document_ids": r}, 0, "", marks=pytest.mark.p1), | |||
| ], | |||
| ) | |||
| def test_basic_scenarios(self, get_http_api_auth, add_documents_func, payload, expected_code, expected_message): | |||
| @@ -116,6 +99,7 @@ class TestDocumentsParseStop: | |||
| validate_document_parse_cancel(get_http_api_auth, dataset_id, payload["document_ids"]) | |||
| validate_document_parse_done(get_http_api_auth, dataset_id, completed_document_ids) | |||
| @pytest.mark.p3 | |||
| @pytest.mark.parametrize( | |||
| "invalid_dataset_id, expected_code, expected_message", | |||
| [ | |||
| @@ -162,6 +146,7 @@ class TestDocumentsParseStop: | |||
| validate_document_parse_cancel(get_http_api_auth, dataset_id, document_ids) | |||
| @pytest.mark.p3 | |||
| def test_repeated_stop_parse(self, get_http_api_auth, add_documents_func): | |||
| dataset_id, document_ids = add_documents_func | |||
| parse_documnets(get_http_api_auth, dataset_id, {"document_ids": document_ids}) | |||
| @@ -172,6 +157,7 @@ class TestDocumentsParseStop: | |||
| assert res["code"] == 102 | |||
| assert res["message"] == "Can't stop parsing document with progress at 0 or 1" | |||
| @pytest.mark.p3 | |||
| def test_duplicate_stop_parse(self, get_http_api_auth, add_documents_func): | |||
| dataset_id, document_ids = add_documents_func | |||
| parse_documnets(get_http_api_auth, dataset_id, {"document_ids": document_ids}) | |||
| @@ -181,10 +167,10 @@ class TestDocumentsParseStop: | |||
| assert f"Duplicate document ids: {document_ids[0]}" in res["data"]["errors"] | |||
| @pytest.mark.slow | |||
| def test_stop_parse_100_files(get_http_api_auth, add_datase_func, tmp_path): | |||
| @pytest.mark.p3 | |||
| def test_stop_parse_100_files(get_http_api_auth, add_dataset_func, tmp_path): | |||
| document_num = 100 | |||
| dataset_id = add_datase_func | |||
| dataset_id = add_dataset_func | |||
| document_ids = bulk_upload_documents(get_http_api_auth, dataset_id, document_num, tmp_path) | |||
| parse_documnets(get_http_api_auth, dataset_id, {"document_ids": document_ids}) | |||
| res = stop_parse_documnets(get_http_api_auth, dataset_id, {"document_ids": document_ids}) | |||
| @@ -192,10 +178,10 @@ def test_stop_parse_100_files(get_http_api_auth, add_datase_func, tmp_path): | |||
| validate_document_parse_cancel(get_http_api_auth, dataset_id, document_ids) | |||
| @pytest.mark.slow | |||
| def test_concurrent_parse(get_http_api_auth, add_datase_func, tmp_path): | |||
| @pytest.mark.p3 | |||
| def test_concurrent_parse(get_http_api_auth, add_dataset_func, tmp_path): | |||
| document_num = 50 | |||
| dataset_id = add_datase_func | |||
| dataset_id = add_dataset_func | |||
| document_ids = bulk_upload_documents(get_http_api_auth, dataset_id, document_num, tmp_path) | |||
| parse_documnets(get_http_api_auth, dataset_id, {"document_ids": document_ids}) | |||
| @@ -20,6 +20,7 @@ from common import DOCUMENT_NAME_LIMIT, INVALID_API_TOKEN, list_documnets, updat | |||
| from libs.auth import RAGFlowHttpApiAuth | |||
| @pytest.mark.p1 | |||
| class TestAuthorization: | |||
| @pytest.mark.parametrize( | |||
| "auth, expected_code, expected_message", | |||
| @@ -39,6 +40,7 @@ class TestAuthorization: | |||
| class TestDocumentsUpdated: | |||
| @pytest.mark.p1 | |||
| @pytest.mark.parametrize( | |||
| "name, expected_code, expected_message", | |||
| [ | |||
| @@ -90,6 +92,7 @@ class TestDocumentsUpdated: | |||
| else: | |||
| assert res["message"] == expected_message | |||
| @pytest.mark.p3 | |||
| @pytest.mark.parametrize( | |||
| "document_id, expected_code, expected_message", | |||
| [ | |||
| @@ -107,6 +110,7 @@ class TestDocumentsUpdated: | |||
| assert res["code"] == expected_code | |||
| assert res["message"] == expected_message | |||
| @pytest.mark.p3 | |||
| @pytest.mark.parametrize( | |||
| "dataset_id, expected_code, expected_message", | |||
| [ | |||
| @@ -124,6 +128,7 @@ class TestDocumentsUpdated: | |||
| assert res["code"] == expected_code | |||
| assert res["message"] == expected_message | |||
| @pytest.mark.p3 | |||
| @pytest.mark.parametrize( | |||
| "meta_fields, expected_code, expected_message", | |||
| [({"test": "test"}, 0, ""), ("test", 102, "meta_fields must be a dictionary")], | |||
| @@ -137,6 +142,7 @@ class TestDocumentsUpdated: | |||
| else: | |||
| assert res["message"] == expected_message | |||
| @pytest.mark.p2 | |||
| @pytest.mark.parametrize( | |||
| "chunk_method, expected_code, expected_message", | |||
| [ | |||
| @@ -174,6 +180,7 @@ class TestDocumentsUpdated: | |||
| else: | |||
| assert res["message"] == expected_message | |||
| @pytest.mark.p3 | |||
| @pytest.mark.parametrize( | |||
| "payload, expected_code, expected_message", | |||
| [ | |||
| @@ -293,6 +300,7 @@ class TestDocumentsUpdated: | |||
| class TestUpdateDocumentParserConfig: | |||
| @pytest.mark.p2 | |||
| @pytest.mark.parametrize( | |||
| "chunk_method, parser_config, expected_code, expected_message", | |||
| [ | |||
| @@ -25,6 +25,7 @@ from libs.utils.file_utils import create_txt_file | |||
| from requests_toolbelt import MultipartEncoder | |||
| @pytest.mark.p1 | |||
| @pytest.mark.usefixtures("clear_datasets") | |||
| class TestAuthorization: | |||
| @pytest.mark.parametrize( | |||
| @@ -45,6 +46,7 @@ class TestAuthorization: | |||
| class TestDocumentsUpload: | |||
| @pytest.mark.p1 | |||
| def test_valid_single_upload(self, get_http_api_auth, add_dataset_func, tmp_path): | |||
| dataset_id = add_dataset_func | |||
| fp = create_txt_file(tmp_path / "ragflow_test.txt") | |||
| @@ -53,6 +55,7 @@ class TestDocumentsUpload: | |||
| assert res["data"][0]["dataset_id"] == dataset_id | |||
| assert res["data"][0]["name"] == fp.name | |||
| @pytest.mark.p1 | |||
| @pytest.mark.parametrize( | |||
| "generate_test_files", | |||
| [ | |||
| @@ -77,6 +80,7 @@ class TestDocumentsUpload: | |||
| assert res["data"][0]["dataset_id"] == dataset_id | |||
| assert res["data"][0]["name"] == fp.name | |||
| @pytest.mark.p2 | |||
| @pytest.mark.parametrize( | |||
| "file_type", | |||
| ["exe", "unknown"], | |||
| @@ -89,12 +93,14 @@ class TestDocumentsUpload: | |||
| assert res["code"] == 500 | |||
| assert res["message"] == f"ragflow_test.{file_type}: This type of file has not been supported yet!" | |||
| @pytest.mark.p2 | |||
| def test_missing_file(self, get_http_api_auth, add_dataset_func): | |||
| dataset_id = add_dataset_func | |||
| res = upload_documnets(get_http_api_auth, dataset_id) | |||
| assert res["code"] == 101 | |||
| assert res["message"] == "No file part!" | |||
| @pytest.mark.p3 | |||
| def test_empty_file(self, get_http_api_auth, add_dataset_func, tmp_path): | |||
| dataset_id = add_dataset_func | |||
| fp = tmp_path / "empty.txt" | |||
| @@ -104,6 +110,7 @@ class TestDocumentsUpload: | |||
| assert res["code"] == 0 | |||
| assert res["data"][0]["size"] == 0 | |||
| @pytest.mark.p3 | |||
| def test_filename_empty(self, get_http_api_auth, add_dataset_func, tmp_path): | |||
| dataset_id = add_dataset_func | |||
| fp = create_txt_file(tmp_path / "ragflow_test.txt") | |||
| @@ -119,6 +126,7 @@ class TestDocumentsUpload: | |||
| assert res.json()["code"] == 101 | |||
| assert res.json()["message"] == "No file selected!" | |||
| @pytest.mark.p2 | |||
| def test_filename_exceeds_max_length(self, get_http_api_auth, add_dataset_func, tmp_path): | |||
| dataset_id = add_dataset_func | |||
| # filename_length = 129 | |||
| @@ -127,12 +135,14 @@ class TestDocumentsUpload: | |||
| assert res["code"] == 101 | |||
| assert res["message"] == "File name should be less than 128 bytes." | |||
| @pytest.mark.p2 | |||
| def test_invalid_dataset_id(self, get_http_api_auth, tmp_path): | |||
| fp = create_txt_file(tmp_path / "ragflow_test.txt") | |||
| res = upload_documnets(get_http_api_auth, "invalid_dataset_id", [fp]) | |||
| assert res["code"] == 100 | |||
| assert res["message"] == """LookupError("Can\'t find the dataset with ID invalid_dataset_id!")""" | |||
| @pytest.mark.p2 | |||
| def test_duplicate_files(self, get_http_api_auth, add_dataset_func, tmp_path): | |||
| dataset_id = add_dataset_func | |||
| fp = create_txt_file(tmp_path / "ragflow_test.txt") | |||
| @@ -146,6 +156,7 @@ class TestDocumentsUpload: | |||
| expected_name = f"{fp.stem}({i}){fp.suffix}" | |||
| assert res["data"][i]["name"] == expected_name | |||
| @pytest.mark.p2 | |||
| def test_same_file_repeat(self, get_http_api_auth, add_dataset_func, tmp_path): | |||
| dataset_id = add_dataset_func | |||
| fp = create_txt_file(tmp_path / "ragflow_test.txt") | |||
| @@ -159,6 +170,7 @@ class TestDocumentsUpload: | |||
| expected_name = f"{fp.stem}({i}){fp.suffix}" | |||
| assert res["data"][0]["name"] == expected_name | |||
| @pytest.mark.p3 | |||
| def test_filename_special_characters(self, get_http_api_auth, add_dataset_func, tmp_path): | |||
| dataset_id = add_dataset_func | |||
| illegal_chars = '<>:"/\\|?*' | |||
| @@ -173,6 +185,7 @@ class TestDocumentsUpload: | |||
| assert res["data"][0]["dataset_id"] == dataset_id | |||
| assert res["data"][0]["name"] == fp.name | |||
| @pytest.mark.p1 | |||
| def test_multiple_files(self, get_http_api_auth, add_dataset_func, tmp_path): | |||
| dataset_id = add_dataset_func | |||
| expected_document_count = 20 | |||
| @@ -186,7 +199,7 @@ class TestDocumentsUpload: | |||
| res = list_datasets(get_http_api_auth, {"id": dataset_id}) | |||
| assert res["data"][0]["document_count"] == expected_document_count | |||
| @pytest.mark.slow | |||
| @pytest.mark.p3 | |||
| def test_concurrent_upload(self, get_http_api_auth, add_dataset_func, tmp_path): | |||
| dataset_id = add_dataset_func | |||
| @@ -20,6 +20,7 @@ from common import INVALID_API_TOKEN, SESSION_WITH_CHAT_NAME_LIMIT, create_sessi | |||
| from libs.auth import RAGFlowHttpApiAuth | |||
| @pytest.mark.p1 | |||
| class TestAuthorization: | |||
| @pytest.mark.parametrize( | |||
| "auth, expected_code, expected_message", | |||
| @@ -40,6 +41,7 @@ class TestAuthorization: | |||
| @pytest.mark.usefixtures("clear_session_with_chat_assistants") | |||
| class TestSessionWithChatAssistantCreate: | |||
| @pytest.mark.p1 | |||
| @pytest.mark.parametrize( | |||
| "payload, expected_code, expected_message", | |||
| [ | |||
| @@ -66,6 +68,7 @@ class TestSessionWithChatAssistantCreate: | |||
| else: | |||
| assert res["message"] == expected_message | |||
| @pytest.mark.p3 | |||
| @pytest.mark.parametrize( | |||
| "chat_assistant_id, expected_code, expected_message", | |||
| [ | |||
| @@ -78,7 +81,7 @@ class TestSessionWithChatAssistantCreate: | |||
| assert res["code"] == expected_code | |||
| assert res["message"] == expected_message | |||
| @pytest.mark.slow | |||
| @pytest.mark.p3 | |||
| def test_concurrent_create_session(self, get_http_api_auth, add_chat_assistants): | |||
| chunk_num = 1000 | |||
| _, _, chat_assistant_ids = add_chat_assistants | |||
| @@ -104,6 +107,7 @@ class TestSessionWithChatAssistantCreate: | |||
| assert False, res | |||
| assert len(res["data"]) == chunks_count + chunk_num | |||
| @pytest.mark.p3 | |||
| def test_add_session_to_deleted_chat_assistant(self, get_http_api_auth, add_chat_assistants): | |||
| _, _, chat_assistant_ids = add_chat_assistants | |||
| res = delete_chat_assistants(get_http_api_auth, {"ids": [chat_assistant_ids[0]]}) | |||
| @@ -20,6 +20,7 @@ from common import INVALID_API_TOKEN, batch_add_sessions_with_chat_assistant, de | |||
| from libs.auth import RAGFlowHttpApiAuth | |||
| @pytest.mark.p1 | |||
| class TestAuthorization: | |||
| @pytest.mark.parametrize( | |||
| "auth, expected_code, expected_message", | |||
| @@ -39,6 +40,7 @@ class TestAuthorization: | |||
| class TestSessionWithChatAssistantDelete: | |||
| @pytest.mark.p3 | |||
| @pytest.mark.parametrize( | |||
| "chat_assistant_id, expected_code, expected_message", | |||
| [ | |||
| @@ -59,9 +61,9 @@ class TestSessionWithChatAssistantDelete: | |||
| @pytest.mark.parametrize( | |||
| "payload", | |||
| [ | |||
| lambda r: {"ids": ["invalid_id"] + r}, | |||
| lambda r: {"ids": r[:1] + ["invalid_id"] + r[1:5]}, | |||
| lambda r: {"ids": r + ["invalid_id"]}, | |||
| pytest.param(lambda r: {"ids": ["invalid_id"] + r}, marks=pytest.mark.p3), | |||
| pytest.param(lambda r: {"ids": r[:1] + ["invalid_id"] + r[1:5]}, marks=pytest.mark.p1), | |||
| pytest.param(lambda r: {"ids": r + ["invalid_id"]}, marks=pytest.mark.p3), | |||
| ], | |||
| ) | |||
| def test_delete_partial_invalid_id(self, get_http_api_auth, add_sessions_with_chat_assistant_func, payload): | |||
| @@ -77,6 +79,7 @@ class TestSessionWithChatAssistantDelete: | |||
| assert False, res | |||
| assert len(res["data"]) == 0 | |||
| @pytest.mark.p3 | |||
| def test_repeated_deletion(self, get_http_api_auth, add_sessions_with_chat_assistant_func): | |||
| chat_assistant_id, session_ids = add_sessions_with_chat_assistant_func | |||
| payload = {"ids": session_ids} | |||
| @@ -87,6 +90,7 @@ class TestSessionWithChatAssistantDelete: | |||
| assert res["code"] == 102 | |||
| assert "The chat doesn't own the session" in res["message"] | |||
| @pytest.mark.p3 | |||
| def test_duplicate_deletion(self, get_http_api_auth, add_sessions_with_chat_assistant_func): | |||
| chat_assistant_id, session_ids = add_sessions_with_chat_assistant_func | |||
| res = delete_session_with_chat_assistants(get_http_api_auth, chat_assistant_id, {"ids": session_ids * 2}) | |||
| @@ -99,7 +103,7 @@ class TestSessionWithChatAssistantDelete: | |||
| assert False, res | |||
| assert len(res["data"]) == 0 | |||
| @pytest.mark.slow | |||
| @pytest.mark.p3 | |||
| def test_concurrent_deletion(self, get_http_api_auth, add_chat_assistants): | |||
| sessions_num = 100 | |||
| _, _, chat_assistant_ids = add_chat_assistants | |||
| @@ -118,7 +122,7 @@ class TestSessionWithChatAssistantDelete: | |||
| responses = [f.result() for f in futures] | |||
| assert all(r["code"] == 0 for r in responses) | |||
| @pytest.mark.slow | |||
| @pytest.mark.p3 | |||
| def test_delete_1k(self, get_http_api_auth, add_chat_assistants): | |||
| sessions_num = 1_000 | |||
| _, _, chat_assistant_ids = add_chat_assistants | |||
| @@ -136,17 +140,11 @@ class TestSessionWithChatAssistantDelete: | |||
| "payload, expected_code, expected_message, remaining", | |||
| [ | |||
| pytest.param(None, 0, """TypeError("argument of type \'NoneType\' is not iterable")""", 0, marks=pytest.mark.skip), | |||
| ({"ids": ["invalid_id"]}, 102, "The chat doesn't own the session invalid_id", 5), | |||
| pytest.param( | |||
| "not json", | |||
| 100, | |||
| """AttributeError("\'str\' object has no attribute \'get\'")""", | |||
| 5, | |||
| marks=pytest.mark.skip, | |||
| ), | |||
| (lambda r: {"ids": r[:1]}, 0, "", 4), | |||
| (lambda r: {"ids": r}, 0, "", 0), | |||
| ({"ids": []}, 0, "", 0), | |||
| pytest.param({"ids": ["invalid_id"]}, 102, "The chat doesn't own the session invalid_id", 5, marks=pytest.mark.p3), | |||
| pytest.param("not json", 100, """AttributeError("\'str\' object has no attribute \'get\'")""", 5, marks=pytest.mark.skip), | |||
| pytest.param(lambda r: {"ids": r[:1]}, 0, "", 4, marks=pytest.mark.p3), | |||
| pytest.param(lambda r: {"ids": r}, 0, "", 0, marks=pytest.mark.p1), | |||
| pytest.param({"ids": []}, 0, "", 0, marks=pytest.mark.p3), | |||
| ], | |||
| ) | |||
| def test_basic_scenarios( | |||
| @@ -21,6 +21,7 @@ from libs.auth import RAGFlowHttpApiAuth | |||
| from libs.utils import is_sorted | |||
| @pytest.mark.p1 | |||
| class TestAuthorization: | |||
| @pytest.mark.parametrize( | |||
| "auth, expected_code, expected_message", | |||
| @@ -40,6 +41,7 @@ class TestAuthorization: | |||
| class TestSessionsWithChatAssistantList: | |||
| @pytest.mark.p1 | |||
| @pytest.mark.parametrize( | |||
| "params, expected_code, expected_page_size, expected_message", | |||
| [ | |||
| @@ -61,6 +63,7 @@ class TestSessionsWithChatAssistantList: | |||
| else: | |||
| assert res["message"] == expected_message | |||
| @pytest.mark.p1 | |||
| @pytest.mark.parametrize( | |||
| "params, expected_code, expected_page_size, expected_message", | |||
| [ | |||
| @@ -82,6 +85,7 @@ class TestSessionsWithChatAssistantList: | |||
| else: | |||
| assert res["message"] == expected_message | |||
| @pytest.mark.p3 | |||
| @pytest.mark.parametrize( | |||
| "params, expected_code, assertions, expected_message", | |||
| [ | |||
| @@ -110,6 +114,7 @@ class TestSessionsWithChatAssistantList: | |||
| else: | |||
| assert res["message"] == expected_message | |||
| @pytest.mark.p3 | |||
| @pytest.mark.parametrize( | |||
| "params, expected_code, assertions, expected_message", | |||
| [ | |||
| @@ -142,6 +147,7 @@ class TestSessionsWithChatAssistantList: | |||
| else: | |||
| assert res["message"] == expected_message | |||
| @pytest.mark.p1 | |||
| @pytest.mark.parametrize( | |||
| "params, expected_code, expected_num, expected_message", | |||
| [ | |||
| @@ -163,6 +169,7 @@ class TestSessionsWithChatAssistantList: | |||
| else: | |||
| assert res["message"] == expected_message | |||
| @pytest.mark.p1 | |||
| @pytest.mark.parametrize( | |||
| "session_id, expected_code, expected_num, expected_message", | |||
| [ | |||
| @@ -189,6 +196,7 @@ class TestSessionsWithChatAssistantList: | |||
| else: | |||
| assert res["message"] == expected_message | |||
| @pytest.mark.p3 | |||
| @pytest.mark.parametrize( | |||
| "session_id, name, expected_code, expected_num, expected_message", | |||
| [ | |||
| @@ -212,7 +220,7 @@ class TestSessionsWithChatAssistantList: | |||
| else: | |||
| assert res["message"] == expected_message | |||
| @pytest.mark.slow | |||
| @pytest.mark.p3 | |||
| def test_concurrent_list(self, get_http_api_auth, add_sessions_with_chat_assistant): | |||
| chat_assistant_id, _ = add_sessions_with_chat_assistant | |||
| with ThreadPoolExecutor(max_workers=5) as executor: | |||
| @@ -220,6 +228,7 @@ class TestSessionsWithChatAssistantList: | |||
| responses = [f.result() for f in futures] | |||
| assert all(r["code"] == 0 for r in responses) | |||
| @pytest.mark.p3 | |||
| def test_invalid_params(self, get_http_api_auth, add_sessions_with_chat_assistant): | |||
| chat_assistant_id, _ = add_sessions_with_chat_assistant | |||
| params = {"a": "b"} | |||
| @@ -227,6 +236,7 @@ class TestSessionsWithChatAssistantList: | |||
| assert res["code"] == 0 | |||
| assert len(res["data"]) == 5 | |||
| @pytest.mark.p3 | |||
| def test_list_chats_after_deleting_associated_chat_assistant(self, get_http_api_auth, add_sessions_with_chat_assistant): | |||
| chat_assistant_id, _ = add_sessions_with_chat_assistant | |||
| res = delete_chat_assistants(get_http_api_auth, {"ids": [chat_assistant_id]}) | |||
| @@ -21,6 +21,7 @@ from common import INVALID_API_TOKEN, SESSION_WITH_CHAT_NAME_LIMIT, delete_chat_ | |||
| from libs.auth import RAGFlowHttpApiAuth | |||
| @pytest.mark.p1 | |||
| class TestAuthorization: | |||
| @pytest.mark.parametrize( | |||
| "auth, expected_code, expected_message", | |||
| @@ -43,12 +44,12 @@ class TestSessionWithChatAssistantUpdate: | |||
| @pytest.mark.parametrize( | |||
| "payload, expected_code, expected_message", | |||
| [ | |||
| ({"name": "valid_name"}, 0, ""), | |||
| pytest.param({"name": "valid_name"}, 0, "", marks=pytest.mark.p1), | |||
| pytest.param({"name": "a" * (SESSION_WITH_CHAT_NAME_LIMIT + 1)}, 102, "", marks=pytest.mark.skip(reason="issues/")), | |||
| pytest.param({"name": 1}, 100, "", marks=pytest.mark.skip(reason="issues/")), | |||
| ({"name": ""}, 102, "`name` can not be empty."), | |||
| ({"name": "duplicated_name"}, 0, ""), | |||
| ({"name": "case insensitive"}, 0, ""), | |||
| pytest.param({"name": ""}, 102, "`name` can not be empty.", marks=pytest.mark.p3), | |||
| pytest.param({"name": "duplicated_name"}, 0, "", marks=pytest.mark.p3), | |||
| pytest.param({"name": "case insensitive"}, 0, "", marks=pytest.mark.p3), | |||
| ], | |||
| ) | |||
| def test_name(self, get_http_api_auth, add_sessions_with_chat_assistant_func, payload, expected_code, expected_message): | |||
| @@ -66,6 +67,7 @@ class TestSessionWithChatAssistantUpdate: | |||
| else: | |||
| assert res["message"] == expected_message | |||
| @pytest.mark.p3 | |||
| @pytest.mark.parametrize( | |||
| "chat_assistant_id, expected_code, expected_message", | |||
| [ | |||
| @@ -79,6 +81,7 @@ class TestSessionWithChatAssistantUpdate: | |||
| assert res["code"] == expected_code | |||
| assert res["message"] == expected_message | |||
| @pytest.mark.p3 | |||
| @pytest.mark.parametrize( | |||
| "session_id, expected_code, expected_message", | |||
| [ | |||
| @@ -92,6 +95,7 @@ class TestSessionWithChatAssistantUpdate: | |||
| assert res["code"] == expected_code | |||
| assert res["message"] == expected_message | |||
| @pytest.mark.p3 | |||
| def test_repeated_update_session(self, get_http_api_auth, add_sessions_with_chat_assistant_func): | |||
| chat_assistant_id, session_ids = add_sessions_with_chat_assistant_func | |||
| res = update_session_with_chat_assistant(get_http_api_auth, chat_assistant_id, session_ids[0], {"name": "valid_name_1"}) | |||
| @@ -100,6 +104,7 @@ class TestSessionWithChatAssistantUpdate: | |||
| res = update_session_with_chat_assistant(get_http_api_auth, chat_assistant_id, session_ids[0], {"name": "valid_name_2"}) | |||
| assert res["code"] == 0 | |||
| @pytest.mark.p3 | |||
| @pytest.mark.parametrize( | |||
| "payload, expected_code, expected_message", | |||
| [ | |||
| @@ -115,7 +120,7 @@ class TestSessionWithChatAssistantUpdate: | |||
| if expected_code != 0: | |||
| assert expected_message in res["message"] | |||
| @pytest.mark.slow | |||
| @pytest.mark.p3 | |||
| def test_concurrent_update_session(self, get_http_api_auth, add_sessions_with_chat_assistant_func): | |||
| chunk_num = 50 | |||
| chat_assistant_id, session_ids = add_sessions_with_chat_assistant_func | |||
| @@ -134,6 +139,7 @@ class TestSessionWithChatAssistantUpdate: | |||
| responses = [f.result() for f in futures] | |||
| assert all(r["code"] == 0 for r in responses) | |||
| @pytest.mark.p3 | |||
| def test_update_session_to_deleted_chat_assistant(self, get_http_api_auth, add_sessions_with_chat_assistant_func): | |||
| chat_assistant_id, session_ids = add_sessions_with_chat_assistant_func | |||
| delete_chat_assistants(get_http_api_auth, {"ids": [chat_assistant_id]}) | |||