Du kan inte välja fler än 25 ämnen Ämnen måste starta med en bokstav eller siffra, kan innehålla bindestreck ('-') och vara max 35 tecken långa.

website_service.py 17KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414
  1. import datetime
  2. import json
  3. from dataclasses import dataclass
  4. from typing import Any, Optional
  5. import requests
  6. from flask_login import current_user
  7. from core.helper import encrypter
  8. from core.rag.extractor.firecrawl.firecrawl_app import FirecrawlApp
  9. from core.rag.extractor.watercrawl.provider import WaterCrawlProvider
  10. from extensions.ext_redis import redis_client
  11. from extensions.ext_storage import storage
  12. from services.datasource_provider_service import DatasourceProviderService
  13. @dataclass
  14. class CrawlOptions:
  15. """Options for crawling operations."""
  16. limit: int = 1
  17. crawl_sub_pages: bool = False
  18. only_main_content: bool = False
  19. includes: Optional[str] = None
  20. excludes: Optional[str] = None
  21. max_depth: Optional[int] = None
  22. use_sitemap: bool = True
  23. def get_include_paths(self) -> list[str]:
  24. """Get list of include paths from comma-separated string."""
  25. return self.includes.split(",") if self.includes else []
  26. def get_exclude_paths(self) -> list[str]:
  27. """Get list of exclude paths from comma-separated string."""
  28. return self.excludes.split(",") if self.excludes else []
  29. @dataclass
  30. class CrawlRequest:
  31. """Request container for crawling operations."""
  32. url: str
  33. provider: str
  34. options: CrawlOptions
  35. @dataclass
  36. class ScrapeRequest:
  37. """Request container for scraping operations."""
  38. provider: str
  39. url: str
  40. tenant_id: str
  41. only_main_content: bool
  42. @dataclass
  43. class WebsiteCrawlApiRequest:
  44. """Request container for website crawl API arguments."""
  45. provider: str
  46. url: str
  47. options: dict[str, Any]
  48. def to_crawl_request(self) -> CrawlRequest:
  49. """Convert API request to internal CrawlRequest."""
  50. options = CrawlOptions(
  51. limit=self.options.get("limit", 1),
  52. crawl_sub_pages=self.options.get("crawl_sub_pages", False),
  53. only_main_content=self.options.get("only_main_content", False),
  54. includes=self.options.get("includes"),
  55. excludes=self.options.get("excludes"),
  56. max_depth=self.options.get("max_depth"),
  57. use_sitemap=self.options.get("use_sitemap", True),
  58. )
  59. return CrawlRequest(url=self.url, provider=self.provider, options=options)
  60. @classmethod
  61. def from_args(cls, args: dict) -> "WebsiteCrawlApiRequest":
  62. """Create from Flask-RESTful parsed arguments."""
  63. provider = args.get("provider")
  64. url = args.get("url")
  65. options = args.get("options", {})
  66. if not provider:
  67. raise ValueError("Provider is required")
  68. if not url:
  69. raise ValueError("URL is required")
  70. if not options:
  71. raise ValueError("Options are required")
  72. return cls(provider=provider, url=url, options=options)
  73. @dataclass
  74. class WebsiteCrawlStatusApiRequest:
  75. """Request container for website crawl status API arguments."""
  76. provider: str
  77. job_id: str
  78. @classmethod
  79. def from_args(cls, args: dict, job_id: str) -> "WebsiteCrawlStatusApiRequest":
  80. """Create from Flask-RESTful parsed arguments."""
  81. provider = args.get("provider")
  82. if not provider:
  83. raise ValueError("Provider is required")
  84. if not job_id:
  85. raise ValueError("Job ID is required")
  86. return cls(provider=provider, job_id=job_id)
  87. class WebsiteService:
  88. """Service class for website crawling operations using different providers."""
  89. @classmethod
  90. def _get_credentials_and_config(cls, tenant_id: str, provider: str) -> tuple[Any, Any]:
  91. """Get and validate credentials for a provider."""
  92. if provider == "firecrawl":
  93. plugin_id = "langgenius/firecrawl_datasource"
  94. elif provider == "watercrawl":
  95. plugin_id = "langgenius/watercrawl_datasource"
  96. elif provider == "jinareader":
  97. plugin_id = "langgenius/jina_datasource"
  98. datasource_provider_service = DatasourceProviderService()
  99. credential = datasource_provider_service.get_datasource_credentials(
  100. tenant_id=tenant_id,
  101. provider=provider,
  102. plugin_id=plugin_id,
  103. )
  104. if provider == "firecrawl":
  105. return credential.get("firecrawl_api_key"), credential
  106. elif provider in {"watercrawl", "jinareader"}:
  107. return credential.get("api_key"), credential
  108. else:
  109. raise ValueError("Invalid provider")
  110. @classmethod
  111. def _get_decrypted_api_key(cls, tenant_id: str, config: dict) -> str:
  112. """Decrypt and return the API key from config."""
  113. api_key = config.get("api_key")
  114. if not api_key:
  115. raise ValueError("API key not found in configuration")
  116. return encrypter.decrypt_token(tenant_id=tenant_id, token=api_key)
  117. @classmethod
  118. def document_create_args_validate(cls, args: dict) -> None:
  119. """Validate arguments for document creation."""
  120. try:
  121. WebsiteCrawlApiRequest.from_args(args)
  122. except ValueError as e:
  123. raise ValueError(f"Invalid arguments: {e}")
  124. @classmethod
  125. def crawl_url(cls, api_request: WebsiteCrawlApiRequest) -> dict[str, Any]:
  126. """Crawl a URL using the specified provider with typed request."""
  127. request = api_request.to_crawl_request()
  128. api_key, config = cls._get_credentials_and_config(current_user.current_tenant_id, request.provider)
  129. if request.provider == "firecrawl":
  130. return cls._crawl_with_firecrawl(request=request, api_key=api_key, config=config)
  131. elif request.provider == "watercrawl":
  132. return cls._crawl_with_watercrawl(request=request, api_key=api_key, config=config)
  133. elif request.provider == "jinareader":
  134. return cls._crawl_with_jinareader(request=request, api_key=api_key)
  135. else:
  136. raise ValueError("Invalid provider")
  137. @classmethod
  138. def _crawl_with_firecrawl(cls, request: CrawlRequest, api_key: str, config: dict) -> dict[str, Any]:
  139. firecrawl_app = FirecrawlApp(api_key=api_key, base_url=config.get("base_url"))
  140. if not request.options.crawl_sub_pages:
  141. params = {
  142. "includePaths": [],
  143. "excludePaths": [],
  144. "limit": 1,
  145. "scrapeOptions": {"onlyMainContent": request.options.only_main_content},
  146. }
  147. else:
  148. params = {
  149. "includePaths": request.options.get_include_paths(),
  150. "excludePaths": request.options.get_exclude_paths(),
  151. "limit": request.options.limit,
  152. "scrapeOptions": {"onlyMainContent": request.options.only_main_content},
  153. }
  154. if request.options.max_depth:
  155. params["maxDepth"] = request.options.max_depth
  156. job_id = firecrawl_app.crawl_url(request.url, params)
  157. website_crawl_time_cache_key = f"website_crawl_{job_id}"
  158. time = str(datetime.datetime.now().timestamp())
  159. redis_client.setex(website_crawl_time_cache_key, 3600, time)
  160. return {"status": "active", "job_id": job_id}
  161. @classmethod
  162. def _crawl_with_watercrawl(cls, request: CrawlRequest, api_key: str, config: dict) -> dict[str, Any]:
  163. # Convert CrawlOptions back to dict format for WaterCrawlProvider
  164. options = {
  165. "limit": request.options.limit,
  166. "crawl_sub_pages": request.options.crawl_sub_pages,
  167. "only_main_content": request.options.only_main_content,
  168. "includes": request.options.includes,
  169. "excludes": request.options.excludes,
  170. "max_depth": request.options.max_depth,
  171. "use_sitemap": request.options.use_sitemap,
  172. }
  173. return WaterCrawlProvider(api_key=api_key, base_url=config.get("base_url")).crawl_url(
  174. url=request.url, options=options
  175. )
  176. @classmethod
  177. def _crawl_with_jinareader(cls, request: CrawlRequest, api_key: str) -> dict[str, Any]:
  178. if not request.options.crawl_sub_pages:
  179. response = requests.get(
  180. f"https://r.jina.ai/{request.url}",
  181. headers={"Accept": "application/json", "Authorization": f"Bearer {api_key}"},
  182. )
  183. if response.json().get("code") != 200:
  184. raise ValueError("Failed to crawl:")
  185. return {"status": "active", "data": response.json().get("data")}
  186. else:
  187. response = requests.post(
  188. "https://adaptivecrawl-kir3wx7b3a-uc.a.run.app",
  189. json={
  190. "url": request.url,
  191. "maxPages": request.options.limit,
  192. "useSitemap": request.options.use_sitemap,
  193. },
  194. headers={
  195. "Content-Type": "application/json",
  196. "Authorization": f"Bearer {api_key}",
  197. },
  198. )
  199. if response.json().get("code") != 200:
  200. raise ValueError("Failed to crawl")
  201. return {"status": "active", "job_id": response.json().get("data", {}).get("taskId")}
  202. @classmethod
  203. def get_crawl_status(cls, job_id: str, provider: str) -> dict[str, Any]:
  204. """Get crawl status using string parameters."""
  205. api_request = WebsiteCrawlStatusApiRequest(provider=provider, job_id=job_id)
  206. return cls.get_crawl_status_typed(api_request)
  207. @classmethod
  208. def get_crawl_status_typed(cls, api_request: WebsiteCrawlStatusApiRequest) -> dict[str, Any]:
  209. """Get crawl status using typed request."""
  210. api_key, config = cls._get_credentials_and_config(current_user.current_tenant_id, api_request.provider)
  211. if api_request.provider == "firecrawl":
  212. return cls._get_firecrawl_status(api_request.job_id, api_key, config)
  213. elif api_request.provider == "watercrawl":
  214. return cls._get_watercrawl_status(api_request.job_id, api_key, config)
  215. elif api_request.provider == "jinareader":
  216. return cls._get_jinareader_status(api_request.job_id, api_key)
  217. else:
  218. raise ValueError("Invalid provider")
  219. @classmethod
  220. def _get_firecrawl_status(cls, job_id: str, api_key: str, config: dict) -> dict[str, Any]:
  221. firecrawl_app = FirecrawlApp(api_key=api_key, base_url=config.get("base_url"))
  222. result = firecrawl_app.check_crawl_status(job_id)
  223. crawl_status_data = {
  224. "status": result.get("status", "active"),
  225. "job_id": job_id,
  226. "total": result.get("total", 0),
  227. "current": result.get("current", 0),
  228. "data": result.get("data", []),
  229. }
  230. if crawl_status_data["status"] == "completed":
  231. website_crawl_time_cache_key = f"website_crawl_{job_id}"
  232. start_time = redis_client.get(website_crawl_time_cache_key)
  233. if start_time:
  234. end_time = datetime.datetime.now().timestamp()
  235. time_consuming = abs(end_time - float(start_time))
  236. crawl_status_data["time_consuming"] = f"{time_consuming:.2f}"
  237. redis_client.delete(website_crawl_time_cache_key)
  238. return crawl_status_data
  239. @classmethod
  240. def _get_watercrawl_status(cls, job_id: str, api_key: str, config: dict) -> dict[str, Any]:
  241. return WaterCrawlProvider(api_key, config.get("base_url")).get_crawl_status(job_id)
  242. @classmethod
  243. def _get_jinareader_status(cls, job_id: str, api_key: str) -> dict[str, Any]:
  244. response = requests.post(
  245. "https://adaptivecrawlstatus-kir3wx7b3a-uc.a.run.app",
  246. headers={"Content-Type": "application/json", "Authorization": f"Bearer {api_key}"},
  247. json={"taskId": job_id},
  248. )
  249. data = response.json().get("data", {})
  250. crawl_status_data = {
  251. "status": data.get("status", "active"),
  252. "job_id": job_id,
  253. "total": len(data.get("urls", [])),
  254. "current": len(data.get("processed", [])) + len(data.get("failed", [])),
  255. "data": [],
  256. "time_consuming": data.get("duration", 0) / 1000,
  257. }
  258. if crawl_status_data["status"] == "completed":
  259. response = requests.post(
  260. "https://adaptivecrawlstatus-kir3wx7b3a-uc.a.run.app",
  261. headers={"Content-Type": "application/json", "Authorization": f"Bearer {api_key}"},
  262. json={"taskId": job_id, "urls": list(data.get("processed", {}).keys())},
  263. )
  264. data = response.json().get("data", {})
  265. formatted_data = [
  266. {
  267. "title": item.get("data", {}).get("title"),
  268. "source_url": item.get("data", {}).get("url"),
  269. "description": item.get("data", {}).get("description"),
  270. "markdown": item.get("data", {}).get("content"),
  271. }
  272. for item in data.get("processed", {}).values()
  273. ]
  274. crawl_status_data["data"] = formatted_data
  275. return crawl_status_data
  276. @classmethod
  277. def get_crawl_url_data(cls, job_id: str, provider: str, url: str, tenant_id: str) -> dict[str, Any] | None:
  278. api_key, config = cls._get_credentials_and_config(tenant_id, provider)
  279. if provider == "firecrawl":
  280. return cls._get_firecrawl_url_data(job_id, url, api_key, config)
  281. elif provider == "watercrawl":
  282. return cls._get_watercrawl_url_data(job_id, url, api_key, config)
  283. elif provider == "jinareader":
  284. return cls._get_jinareader_url_data(job_id, url, api_key)
  285. else:
  286. raise ValueError("Invalid provider")
  287. @classmethod
  288. def _get_firecrawl_url_data(cls, job_id: str, url: str, api_key: str, config: dict) -> dict[str, Any] | None:
  289. crawl_data: list[dict[str, Any]] | None = None
  290. file_key = "website_files/" + job_id + ".txt"
  291. if storage.exists(file_key):
  292. stored_data = storage.load_once(file_key)
  293. if stored_data:
  294. crawl_data = json.loads(stored_data.decode("utf-8"))
  295. else:
  296. firecrawl_app = FirecrawlApp(api_key=api_key, base_url=config.get("base_url"))
  297. result = firecrawl_app.check_crawl_status(job_id)
  298. if result.get("status") != "completed":
  299. raise ValueError("Crawl job is not completed")
  300. crawl_data = result.get("data")
  301. if crawl_data:
  302. for item in crawl_data:
  303. if item.get("source_url") == url:
  304. return dict(item)
  305. return None
  306. @classmethod
  307. def _get_watercrawl_url_data(cls, job_id: str, url: str, api_key: str, config: dict) -> dict[str, Any] | None:
  308. return WaterCrawlProvider(api_key, config.get("base_url")).get_crawl_url_data(job_id, url)
  309. @classmethod
  310. def _get_jinareader_url_data(cls, job_id: str, url: str, api_key: str) -> dict[str, Any] | None:
  311. if not job_id:
  312. response = requests.get(
  313. f"https://r.jina.ai/{url}",
  314. headers={"Accept": "application/json", "Authorization": f"Bearer {api_key}"},
  315. )
  316. if response.json().get("code") != 200:
  317. raise ValueError("Failed to crawl")
  318. return dict(response.json().get("data", {}))
  319. else:
  320. # Get crawl status first
  321. status_response = requests.post(
  322. "https://adaptivecrawlstatus-kir3wx7b3a-uc.a.run.app",
  323. headers={"Content-Type": "application/json", "Authorization": f"Bearer {api_key}"},
  324. json={"taskId": job_id},
  325. )
  326. status_data = status_response.json().get("data", {})
  327. if status_data.get("status") != "completed":
  328. raise ValueError("Crawl job is not completed")
  329. # Get processed data
  330. data_response = requests.post(
  331. "https://adaptivecrawlstatus-kir3wx7b3a-uc.a.run.app",
  332. headers={"Content-Type": "application/json", "Authorization": f"Bearer {api_key}"},
  333. json={"taskId": job_id, "urls": list(status_data.get("processed", {}).keys())},
  334. )
  335. processed_data = data_response.json().get("data", {})
  336. for item in processed_data.get("processed", {}).values():
  337. if item.get("data", {}).get("url") == url:
  338. return dict(item.get("data", {}))
  339. return None
  340. @classmethod
  341. def get_scrape_url_data(cls, provider: str, url: str, tenant_id: str, only_main_content: bool) -> dict[str, Any]:
  342. request = ScrapeRequest(provider=provider, url=url, tenant_id=tenant_id, only_main_content=only_main_content)
  343. api_key, config = cls._get_credentials_and_config(tenant_id=request.tenant_id, provider=request.provider)
  344. if request.provider == "firecrawl":
  345. return cls._scrape_with_firecrawl(request=request, api_key=api_key, config=config)
  346. elif request.provider == "watercrawl":
  347. return cls._scrape_with_watercrawl(request=request, api_key=api_key, config=config)
  348. else:
  349. raise ValueError("Invalid provider")
  350. @classmethod
  351. def _scrape_with_firecrawl(cls, request: ScrapeRequest, api_key: str, config: dict) -> dict[str, Any]:
  352. firecrawl_app = FirecrawlApp(api_key=api_key, base_url=config.get("base_url"))
  353. params = {"onlyMainContent": request.only_main_content}
  354. return firecrawl_app.scrape_url(url=request.url, params=params)
  355. @classmethod
  356. def _scrape_with_watercrawl(cls, request: ScrapeRequest, api_key: str, config: dict) -> dict[str, Any]:
  357. return WaterCrawlProvider(api_key=api_key, base_url=config.get("base_url")).scrape_url(request.url)