Vous ne pouvez pas sélectionner plus de 25 sujets Les noms de sujets doivent commencer par une lettre ou un nombre, peuvent contenir des tirets ('-') et peuvent comporter jusqu'à 35 caractères.

website_service.py 17KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416
  1. import datetime
  2. import json
  3. from dataclasses import dataclass
  4. from typing import Any
  5. import httpx
  6. from flask_login import current_user
  7. from core.helper import encrypter
  8. from core.rag.extractor.firecrawl.firecrawl_app import FirecrawlApp
  9. from core.rag.extractor.watercrawl.provider import WaterCrawlProvider
  10. from extensions.ext_redis import redis_client
  11. from extensions.ext_storage import storage
  12. from services.datasource_provider_service import DatasourceProviderService
  13. @dataclass
  14. class CrawlOptions:
  15. """Options for crawling operations."""
  16. limit: int = 1
  17. crawl_sub_pages: bool = False
  18. only_main_content: bool = False
  19. includes: str | None = None
  20. excludes: str | None = None
  21. max_depth: int | None = None
  22. use_sitemap: bool = True
  23. def get_include_paths(self) -> list[str]:
  24. """Get list of include paths from comma-separated string."""
  25. return self.includes.split(",") if self.includes else []
  26. def get_exclude_paths(self) -> list[str]:
  27. """Get list of exclude paths from comma-separated string."""
  28. return self.excludes.split(",") if self.excludes else []
  29. @dataclass
  30. class CrawlRequest:
  31. """Request container for crawling operations."""
  32. url: str
  33. provider: str
  34. options: CrawlOptions
  35. @dataclass
  36. class ScrapeRequest:
  37. """Request container for scraping operations."""
  38. provider: str
  39. url: str
  40. tenant_id: str
  41. only_main_content: bool
  42. @dataclass
  43. class WebsiteCrawlApiRequest:
  44. """Request container for website crawl API arguments."""
  45. provider: str
  46. url: str
  47. options: dict[str, Any]
  48. def to_crawl_request(self) -> CrawlRequest:
  49. """Convert API request to internal CrawlRequest."""
  50. options = CrawlOptions(
  51. limit=self.options.get("limit", 1),
  52. crawl_sub_pages=self.options.get("crawl_sub_pages", False),
  53. only_main_content=self.options.get("only_main_content", False),
  54. includes=self.options.get("includes"),
  55. excludes=self.options.get("excludes"),
  56. max_depth=self.options.get("max_depth"),
  57. use_sitemap=self.options.get("use_sitemap", True),
  58. )
  59. return CrawlRequest(url=self.url, provider=self.provider, options=options)
  60. @classmethod
  61. def from_args(cls, args: dict) -> "WebsiteCrawlApiRequest":
  62. """Create from Flask-RESTful parsed arguments."""
  63. provider = args.get("provider")
  64. url = args.get("url")
  65. options = args.get("options", {})
  66. if not provider:
  67. raise ValueError("Provider is required")
  68. if not url:
  69. raise ValueError("URL is required")
  70. if not options:
  71. raise ValueError("Options are required")
  72. return cls(provider=provider, url=url, options=options)
  73. @dataclass
  74. class WebsiteCrawlStatusApiRequest:
  75. """Request container for website crawl status API arguments."""
  76. provider: str
  77. job_id: str
  78. @classmethod
  79. def from_args(cls, args: dict, job_id: str) -> "WebsiteCrawlStatusApiRequest":
  80. """Create from Flask-RESTful parsed arguments."""
  81. provider = args.get("provider")
  82. if not provider:
  83. raise ValueError("Provider is required")
  84. if not job_id:
  85. raise ValueError("Job ID is required")
  86. return cls(provider=provider, job_id=job_id)
  87. class WebsiteService:
  88. """Service class for website crawling operations using different providers."""
  89. @classmethod
  90. def _get_credentials_and_config(cls, tenant_id: str, provider: str) -> tuple[Any, Any]:
  91. """Get and validate credentials for a provider."""
  92. if provider == "firecrawl":
  93. plugin_id = "langgenius/firecrawl_datasource"
  94. elif provider == "watercrawl":
  95. plugin_id = "langgenius/watercrawl_datasource"
  96. elif provider == "jinareader":
  97. plugin_id = "langgenius/jina_datasource"
  98. else:
  99. raise ValueError("Invalid provider")
  100. datasource_provider_service = DatasourceProviderService()
  101. credential = datasource_provider_service.get_datasource_credentials(
  102. tenant_id=tenant_id,
  103. provider=provider,
  104. plugin_id=plugin_id,
  105. )
  106. if provider == "firecrawl":
  107. return credential.get("firecrawl_api_key"), credential
  108. elif provider in {"watercrawl", "jinareader"}:
  109. return credential.get("api_key"), credential
  110. else:
  111. raise ValueError("Invalid provider")
  112. @classmethod
  113. def _get_decrypted_api_key(cls, tenant_id: str, config: dict) -> str:
  114. """Decrypt and return the API key from config."""
  115. api_key = config.get("api_key")
  116. if not api_key:
  117. raise ValueError("API key not found in configuration")
  118. return encrypter.decrypt_token(tenant_id=tenant_id, token=api_key)
  119. @classmethod
  120. def document_create_args_validate(cls, args: dict):
  121. """Validate arguments for document creation."""
  122. try:
  123. WebsiteCrawlApiRequest.from_args(args)
  124. except ValueError as e:
  125. raise ValueError(f"Invalid arguments: {e}")
  126. @classmethod
  127. def crawl_url(cls, api_request: WebsiteCrawlApiRequest) -> dict[str, Any]:
  128. """Crawl a URL using the specified provider with typed request."""
  129. request = api_request.to_crawl_request()
  130. api_key, config = cls._get_credentials_and_config(current_user.current_tenant_id, request.provider)
  131. if request.provider == "firecrawl":
  132. return cls._crawl_with_firecrawl(request=request, api_key=api_key, config=config)
  133. elif request.provider == "watercrawl":
  134. return cls._crawl_with_watercrawl(request=request, api_key=api_key, config=config)
  135. elif request.provider == "jinareader":
  136. return cls._crawl_with_jinareader(request=request, api_key=api_key)
  137. else:
  138. raise ValueError("Invalid provider")
  139. @classmethod
  140. def _crawl_with_firecrawl(cls, request: CrawlRequest, api_key: str, config: dict) -> dict[str, Any]:
  141. firecrawl_app = FirecrawlApp(api_key=api_key, base_url=config.get("base_url"))
  142. if not request.options.crawl_sub_pages:
  143. params = {
  144. "includePaths": [],
  145. "excludePaths": [],
  146. "limit": 1,
  147. "scrapeOptions": {"onlyMainContent": request.options.only_main_content},
  148. }
  149. else:
  150. params = {
  151. "includePaths": request.options.get_include_paths(),
  152. "excludePaths": request.options.get_exclude_paths(),
  153. "limit": request.options.limit,
  154. "scrapeOptions": {"onlyMainContent": request.options.only_main_content},
  155. }
  156. if request.options.max_depth:
  157. params["maxDepth"] = request.options.max_depth
  158. job_id = firecrawl_app.crawl_url(request.url, params)
  159. website_crawl_time_cache_key = f"website_crawl_{job_id}"
  160. time = str(datetime.datetime.now().timestamp())
  161. redis_client.setex(website_crawl_time_cache_key, 3600, time)
  162. return {"status": "active", "job_id": job_id}
  163. @classmethod
  164. def _crawl_with_watercrawl(cls, request: CrawlRequest, api_key: str, config: dict) -> dict[str, Any]:
  165. # Convert CrawlOptions back to dict format for WaterCrawlProvider
  166. options = {
  167. "limit": request.options.limit,
  168. "crawl_sub_pages": request.options.crawl_sub_pages,
  169. "only_main_content": request.options.only_main_content,
  170. "includes": request.options.includes,
  171. "excludes": request.options.excludes,
  172. "max_depth": request.options.max_depth,
  173. "use_sitemap": request.options.use_sitemap,
  174. }
  175. return WaterCrawlProvider(api_key=api_key, base_url=config.get("base_url")).crawl_url(
  176. url=request.url, options=options
  177. )
  178. @classmethod
  179. def _crawl_with_jinareader(cls, request: CrawlRequest, api_key: str) -> dict[str, Any]:
  180. if not request.options.crawl_sub_pages:
  181. response = httpx.get(
  182. f"https://r.jina.ai/{request.url}",
  183. headers={"Accept": "application/json", "Authorization": f"Bearer {api_key}"},
  184. )
  185. if response.json().get("code") != 200:
  186. raise ValueError("Failed to crawl:")
  187. return {"status": "active", "data": response.json().get("data")}
  188. else:
  189. response = httpx.post(
  190. "https://adaptivecrawl-kir3wx7b3a-uc.a.run.app",
  191. json={
  192. "url": request.url,
  193. "maxPages": request.options.limit,
  194. "useSitemap": request.options.use_sitemap,
  195. },
  196. headers={
  197. "Content-Type": "application/json",
  198. "Authorization": f"Bearer {api_key}",
  199. },
  200. )
  201. if response.json().get("code") != 200:
  202. raise ValueError("Failed to crawl")
  203. return {"status": "active", "job_id": response.json().get("data", {}).get("taskId")}
  204. @classmethod
  205. def get_crawl_status(cls, job_id: str, provider: str) -> dict[str, Any]:
  206. """Get crawl status using string parameters."""
  207. api_request = WebsiteCrawlStatusApiRequest(provider=provider, job_id=job_id)
  208. return cls.get_crawl_status_typed(api_request)
  209. @classmethod
  210. def get_crawl_status_typed(cls, api_request: WebsiteCrawlStatusApiRequest) -> dict[str, Any]:
  211. """Get crawl status using typed request."""
  212. api_key, config = cls._get_credentials_and_config(current_user.current_tenant_id, api_request.provider)
  213. if api_request.provider == "firecrawl":
  214. return cls._get_firecrawl_status(api_request.job_id, api_key, config)
  215. elif api_request.provider == "watercrawl":
  216. return cls._get_watercrawl_status(api_request.job_id, api_key, config)
  217. elif api_request.provider == "jinareader":
  218. return cls._get_jinareader_status(api_request.job_id, api_key)
  219. else:
  220. raise ValueError("Invalid provider")
  221. @classmethod
  222. def _get_firecrawl_status(cls, job_id: str, api_key: str, config: dict) -> dict[str, Any]:
  223. firecrawl_app = FirecrawlApp(api_key=api_key, base_url=config.get("base_url"))
  224. result = firecrawl_app.check_crawl_status(job_id)
  225. crawl_status_data = {
  226. "status": result.get("status", "active"),
  227. "job_id": job_id,
  228. "total": result.get("total", 0),
  229. "current": result.get("current", 0),
  230. "data": result.get("data", []),
  231. }
  232. if crawl_status_data["status"] == "completed":
  233. website_crawl_time_cache_key = f"website_crawl_{job_id}"
  234. start_time = redis_client.get(website_crawl_time_cache_key)
  235. if start_time:
  236. end_time = datetime.datetime.now().timestamp()
  237. time_consuming = abs(end_time - float(start_time))
  238. crawl_status_data["time_consuming"] = f"{time_consuming:.2f}"
  239. redis_client.delete(website_crawl_time_cache_key)
  240. return crawl_status_data
  241. @classmethod
  242. def _get_watercrawl_status(cls, job_id: str, api_key: str, config: dict) -> dict[str, Any]:
  243. return WaterCrawlProvider(api_key, config.get("base_url")).get_crawl_status(job_id)
  244. @classmethod
  245. def _get_jinareader_status(cls, job_id: str, api_key: str) -> dict[str, Any]:
  246. response = httpx.post(
  247. "https://adaptivecrawlstatus-kir3wx7b3a-uc.a.run.app",
  248. headers={"Content-Type": "application/json", "Authorization": f"Bearer {api_key}"},
  249. json={"taskId": job_id},
  250. )
  251. data = response.json().get("data", {})
  252. crawl_status_data = {
  253. "status": data.get("status", "active"),
  254. "job_id": job_id,
  255. "total": len(data.get("urls", [])),
  256. "current": len(data.get("processed", [])) + len(data.get("failed", [])),
  257. "data": [],
  258. "time_consuming": data.get("duration", 0) / 1000,
  259. }
  260. if crawl_status_data["status"] == "completed":
  261. response = httpx.post(
  262. "https://adaptivecrawlstatus-kir3wx7b3a-uc.a.run.app",
  263. headers={"Content-Type": "application/json", "Authorization": f"Bearer {api_key}"},
  264. json={"taskId": job_id, "urls": list(data.get("processed", {}).keys())},
  265. )
  266. data = response.json().get("data", {})
  267. formatted_data = [
  268. {
  269. "title": item.get("data", {}).get("title"),
  270. "source_url": item.get("data", {}).get("url"),
  271. "description": item.get("data", {}).get("description"),
  272. "markdown": item.get("data", {}).get("content"),
  273. }
  274. for item in data.get("processed", {}).values()
  275. ]
  276. crawl_status_data["data"] = formatted_data
  277. return crawl_status_data
  278. @classmethod
  279. def get_crawl_url_data(cls, job_id: str, provider: str, url: str, tenant_id: str) -> dict[str, Any] | None:
  280. api_key, config = cls._get_credentials_and_config(tenant_id, provider)
  281. if provider == "firecrawl":
  282. return cls._get_firecrawl_url_data(job_id, url, api_key, config)
  283. elif provider == "watercrawl":
  284. return cls._get_watercrawl_url_data(job_id, url, api_key, config)
  285. elif provider == "jinareader":
  286. return cls._get_jinareader_url_data(job_id, url, api_key)
  287. else:
  288. raise ValueError("Invalid provider")
  289. @classmethod
  290. def _get_firecrawl_url_data(cls, job_id: str, url: str, api_key: str, config: dict) -> dict[str, Any] | None:
  291. crawl_data: list[dict[str, Any]] | None = None
  292. file_key = "website_files/" + job_id + ".txt"
  293. if storage.exists(file_key):
  294. stored_data = storage.load_once(file_key)
  295. if stored_data:
  296. crawl_data = json.loads(stored_data.decode("utf-8"))
  297. else:
  298. firecrawl_app = FirecrawlApp(api_key=api_key, base_url=config.get("base_url"))
  299. result = firecrawl_app.check_crawl_status(job_id)
  300. if result.get("status") != "completed":
  301. raise ValueError("Crawl job is not completed")
  302. crawl_data = result.get("data")
  303. if crawl_data:
  304. for item in crawl_data:
  305. if item.get("source_url") == url:
  306. return dict(item)
  307. return None
  308. @classmethod
  309. def _get_watercrawl_url_data(cls, job_id: str, url: str, api_key: str, config: dict) -> dict[str, Any] | None:
  310. return WaterCrawlProvider(api_key, config.get("base_url")).get_crawl_url_data(job_id, url)
  311. @classmethod
  312. def _get_jinareader_url_data(cls, job_id: str, url: str, api_key: str) -> dict[str, Any] | None:
  313. if not job_id:
  314. response = httpx.get(
  315. f"https://r.jina.ai/{url}",
  316. headers={"Accept": "application/json", "Authorization": f"Bearer {api_key}"},
  317. )
  318. if response.json().get("code") != 200:
  319. raise ValueError("Failed to crawl")
  320. return dict(response.json().get("data", {}))
  321. else:
  322. # Get crawl status first
  323. status_response = httpx.post(
  324. "https://adaptivecrawlstatus-kir3wx7b3a-uc.a.run.app",
  325. headers={"Content-Type": "application/json", "Authorization": f"Bearer {api_key}"},
  326. json={"taskId": job_id},
  327. )
  328. status_data = status_response.json().get("data", {})
  329. if status_data.get("status") != "completed":
  330. raise ValueError("Crawl job is not completed")
  331. # Get processed data
  332. data_response = httpx.post(
  333. "https://adaptivecrawlstatus-kir3wx7b3a-uc.a.run.app",
  334. headers={"Content-Type": "application/json", "Authorization": f"Bearer {api_key}"},
  335. json={"taskId": job_id, "urls": list(status_data.get("processed", {}).keys())},
  336. )
  337. processed_data = data_response.json().get("data", {})
  338. for item in processed_data.get("processed", {}).values():
  339. if item.get("data", {}).get("url") == url:
  340. return dict(item.get("data", {}))
  341. return None
  342. @classmethod
  343. def get_scrape_url_data(cls, provider: str, url: str, tenant_id: str, only_main_content: bool) -> dict[str, Any]:
  344. request = ScrapeRequest(provider=provider, url=url, tenant_id=tenant_id, only_main_content=only_main_content)
  345. api_key, config = cls._get_credentials_and_config(tenant_id=request.tenant_id, provider=request.provider)
  346. if request.provider == "firecrawl":
  347. return cls._scrape_with_firecrawl(request=request, api_key=api_key, config=config)
  348. elif request.provider == "watercrawl":
  349. return cls._scrape_with_watercrawl(request=request, api_key=api_key, config=config)
  350. else:
  351. raise ValueError("Invalid provider")
  352. @classmethod
  353. def _scrape_with_firecrawl(cls, request: ScrapeRequest, api_key: str, config: dict) -> dict[str, Any]:
  354. firecrawl_app = FirecrawlApp(api_key=api_key, base_url=config.get("base_url"))
  355. params = {"onlyMainContent": request.only_main_content}
  356. return firecrawl_app.scrape_url(url=request.url, params=params)
  357. @classmethod
  358. def _scrape_with_watercrawl(cls, request: ScrapeRequest, api_key: str, config: dict) -> dict[str, Any]:
  359. return WaterCrawlProvider(api_key=api_key, base_url=config.get("base_url")).scrape_url(request.url)