### What problem does this PR solve? ### Type of change - [x] Performance Improvementtags/v0.10.0
| @@ -61,11 +61,11 @@ def build_knowlege_graph_chunks(tenant_id: str, chunks: List[str], callback, ent | |||
| assert left_token_count > 0, f"The LLM context length({llm_bdl.max_length}) is smaller than prompt({ext.prompt_token_count})" | |||
| BATCH_SIZE=1 | |||
| BATCH_SIZE=4 | |||
| texts, graphs = [], [] | |||
| cnt = 0 | |||
| threads = [] | |||
| exe = ThreadPoolExecutor(max_workers=12) | |||
| exe = ThreadPoolExecutor(max_workers=50) | |||
| for i in range(len(chunks)): | |||
| tkn_cnt = num_tokens_from_string(chunks[i]) | |||
| if cnt+tkn_cnt >= left_token_count and texts: | |||