您最多选择25个主题 主题必须以字母或数字开头,可以包含连字符 (-),并且长度不得超过35个字符

website_service.py 17KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405
  1. import datetime
  2. import json
  3. from dataclasses import dataclass
  4. from typing import Any, Optional
  5. import requests
  6. from flask_login import current_user
  7. from core.helper import encrypter
  8. from core.rag.extractor.firecrawl.firecrawl_app import FirecrawlApp
  9. from core.rag.extractor.watercrawl.provider import WaterCrawlProvider
  10. from extensions.ext_redis import redis_client
  11. from extensions.ext_storage import storage
  12. from services.auth.api_key_auth_service import ApiKeyAuthService
  13. @dataclass
  14. class CrawlOptions:
  15. """Options for crawling operations."""
  16. limit: int = 1
  17. crawl_sub_pages: bool = False
  18. only_main_content: bool = False
  19. includes: Optional[str] = None
  20. excludes: Optional[str] = None
  21. max_depth: Optional[int] = None
  22. use_sitemap: bool = True
  23. def get_include_paths(self) -> list[str]:
  24. """Get list of include paths from comma-separated string."""
  25. return self.includes.split(",") if self.includes else []
  26. def get_exclude_paths(self) -> list[str]:
  27. """Get list of exclude paths from comma-separated string."""
  28. return self.excludes.split(",") if self.excludes else []
  29. @dataclass
  30. class CrawlRequest:
  31. """Request container for crawling operations."""
  32. url: str
  33. provider: str
  34. options: CrawlOptions
  35. @dataclass
  36. class ScrapeRequest:
  37. """Request container for scraping operations."""
  38. provider: str
  39. url: str
  40. tenant_id: str
  41. only_main_content: bool
  42. @dataclass
  43. class WebsiteCrawlApiRequest:
  44. """Request container for website crawl API arguments."""
  45. provider: str
  46. url: str
  47. options: dict[str, Any]
  48. def to_crawl_request(self) -> CrawlRequest:
  49. """Convert API request to internal CrawlRequest."""
  50. options = CrawlOptions(
  51. limit=self.options.get("limit", 1),
  52. crawl_sub_pages=self.options.get("crawl_sub_pages", False),
  53. only_main_content=self.options.get("only_main_content", False),
  54. includes=self.options.get("includes"),
  55. excludes=self.options.get("excludes"),
  56. max_depth=self.options.get("max_depth"),
  57. use_sitemap=self.options.get("use_sitemap", True),
  58. )
  59. return CrawlRequest(url=self.url, provider=self.provider, options=options)
  60. @classmethod
  61. def from_args(cls, args: dict) -> "WebsiteCrawlApiRequest":
  62. """Create from Flask-RESTful parsed arguments."""
  63. provider = args.get("provider")
  64. url = args.get("url")
  65. options = args.get("options", {})
  66. if not provider:
  67. raise ValueError("Provider is required")
  68. if not url:
  69. raise ValueError("URL is required")
  70. if not options:
  71. raise ValueError("Options are required")
  72. return cls(provider=provider, url=url, options=options)
  73. @dataclass
  74. class WebsiteCrawlStatusApiRequest:
  75. """Request container for website crawl status API arguments."""
  76. provider: str
  77. job_id: str
  78. @classmethod
  79. def from_args(cls, args: dict, job_id: str) -> "WebsiteCrawlStatusApiRequest":
  80. """Create from Flask-RESTful parsed arguments."""
  81. provider = args.get("provider")
  82. if not provider:
  83. raise ValueError("Provider is required")
  84. if not job_id:
  85. raise ValueError("Job ID is required")
  86. return cls(provider=provider, job_id=job_id)
  87. class WebsiteService:
  88. """Service class for website crawling operations using different providers."""
  89. @classmethod
  90. def _get_credentials_and_config(cls, tenant_id: str, provider: str) -> tuple[dict, dict]:
  91. """Get and validate credentials for a provider."""
  92. credentials = ApiKeyAuthService.get_auth_credentials(tenant_id, "website", provider)
  93. if not credentials or "config" not in credentials:
  94. raise ValueError("No valid credentials found for the provider")
  95. return credentials, credentials["config"]
  96. @classmethod
  97. def _get_decrypted_api_key(cls, tenant_id: str, config: dict) -> str:
  98. """Decrypt and return the API key from config."""
  99. api_key = config.get("api_key")
  100. if not api_key:
  101. raise ValueError("API key not found in configuration")
  102. return encrypter.decrypt_token(tenant_id=tenant_id, token=api_key)
  103. @classmethod
  104. def document_create_args_validate(cls, args: dict) -> None:
  105. """Validate arguments for document creation."""
  106. try:
  107. WebsiteCrawlApiRequest.from_args(args)
  108. except ValueError as e:
  109. raise ValueError(f"Invalid arguments: {e}")
  110. @classmethod
  111. def crawl_url(cls, api_request: WebsiteCrawlApiRequest) -> dict[str, Any]:
  112. """Crawl a URL using the specified provider with typed request."""
  113. request = api_request.to_crawl_request()
  114. _, config = cls._get_credentials_and_config(current_user.current_tenant_id, request.provider)
  115. api_key = cls._get_decrypted_api_key(current_user.current_tenant_id, config)
  116. if request.provider == "firecrawl":
  117. return cls._crawl_with_firecrawl(request=request, api_key=api_key, config=config)
  118. elif request.provider == "watercrawl":
  119. return cls._crawl_with_watercrawl(request=request, api_key=api_key, config=config)
  120. elif request.provider == "jinareader":
  121. return cls._crawl_with_jinareader(request=request, api_key=api_key)
  122. else:
  123. raise ValueError("Invalid provider")
  124. @classmethod
  125. def _crawl_with_firecrawl(cls, request: CrawlRequest, api_key: str, config: dict) -> dict[str, Any]:
  126. firecrawl_app = FirecrawlApp(api_key=api_key, base_url=config.get("base_url"))
  127. if not request.options.crawl_sub_pages:
  128. params = {
  129. "includePaths": [],
  130. "excludePaths": [],
  131. "limit": 1,
  132. "scrapeOptions": {"onlyMainContent": request.options.only_main_content},
  133. }
  134. else:
  135. params = {
  136. "includePaths": request.options.get_include_paths(),
  137. "excludePaths": request.options.get_exclude_paths(),
  138. "limit": request.options.limit,
  139. "scrapeOptions": {"onlyMainContent": request.options.only_main_content},
  140. }
  141. if request.options.max_depth:
  142. params["maxDepth"] = request.options.max_depth
  143. job_id = firecrawl_app.crawl_url(request.url, params)
  144. website_crawl_time_cache_key = f"website_crawl_{job_id}"
  145. time = str(datetime.datetime.now().timestamp())
  146. redis_client.setex(website_crawl_time_cache_key, 3600, time)
  147. return {"status": "active", "job_id": job_id}
  148. @classmethod
  149. def _crawl_with_watercrawl(cls, request: CrawlRequest, api_key: str, config: dict) -> dict[str, Any]:
  150. # Convert CrawlOptions back to dict format for WaterCrawlProvider
  151. options = {
  152. "limit": request.options.limit,
  153. "crawl_sub_pages": request.options.crawl_sub_pages,
  154. "only_main_content": request.options.only_main_content,
  155. "includes": request.options.includes,
  156. "excludes": request.options.excludes,
  157. "max_depth": request.options.max_depth,
  158. "use_sitemap": request.options.use_sitemap,
  159. }
  160. return WaterCrawlProvider(api_key=api_key, base_url=config.get("base_url")).crawl_url(
  161. url=request.url, options=options
  162. )
  163. @classmethod
  164. def _crawl_with_jinareader(cls, request: CrawlRequest, api_key: str) -> dict[str, Any]:
  165. if not request.options.crawl_sub_pages:
  166. response = requests.get(
  167. f"https://r.jina.ai/{request.url}",
  168. headers={"Accept": "application/json", "Authorization": f"Bearer {api_key}"},
  169. )
  170. if response.json().get("code") != 200:
  171. raise ValueError("Failed to crawl")
  172. return {"status": "active", "data": response.json().get("data")}
  173. else:
  174. response = requests.post(
  175. "https://adaptivecrawl-kir3wx7b3a-uc.a.run.app",
  176. json={
  177. "url": request.url,
  178. "maxPages": request.options.limit,
  179. "useSitemap": request.options.use_sitemap,
  180. },
  181. headers={
  182. "Content-Type": "application/json",
  183. "Authorization": f"Bearer {api_key}",
  184. },
  185. )
  186. if response.json().get("code") != 200:
  187. raise ValueError("Failed to crawl")
  188. return {"status": "active", "job_id": response.json().get("data", {}).get("taskId")}
  189. @classmethod
  190. def get_crawl_status(cls, job_id: str, provider: str) -> dict[str, Any]:
  191. """Get crawl status using string parameters."""
  192. api_request = WebsiteCrawlStatusApiRequest(provider=provider, job_id=job_id)
  193. return cls.get_crawl_status_typed(api_request)
  194. @classmethod
  195. def get_crawl_status_typed(cls, api_request: WebsiteCrawlStatusApiRequest) -> dict[str, Any]:
  196. """Get crawl status using typed request."""
  197. _, config = cls._get_credentials_and_config(current_user.current_tenant_id, api_request.provider)
  198. api_key = cls._get_decrypted_api_key(current_user.current_tenant_id, config)
  199. if api_request.provider == "firecrawl":
  200. return cls._get_firecrawl_status(api_request.job_id, api_key, config)
  201. elif api_request.provider == "watercrawl":
  202. return cls._get_watercrawl_status(api_request.job_id, api_key, config)
  203. elif api_request.provider == "jinareader":
  204. return cls._get_jinareader_status(api_request.job_id, api_key)
  205. else:
  206. raise ValueError("Invalid provider")
  207. @classmethod
  208. def _get_firecrawl_status(cls, job_id: str, api_key: str, config: dict) -> dict[str, Any]:
  209. firecrawl_app = FirecrawlApp(api_key=api_key, base_url=config.get("base_url"))
  210. result = firecrawl_app.check_crawl_status(job_id)
  211. crawl_status_data = {
  212. "status": result.get("status", "active"),
  213. "job_id": job_id,
  214. "total": result.get("total", 0),
  215. "current": result.get("current", 0),
  216. "data": result.get("data", []),
  217. }
  218. if crawl_status_data["status"] == "completed":
  219. website_crawl_time_cache_key = f"website_crawl_{job_id}"
  220. start_time = redis_client.get(website_crawl_time_cache_key)
  221. if start_time:
  222. end_time = datetime.datetime.now().timestamp()
  223. time_consuming = abs(end_time - float(start_time))
  224. crawl_status_data["time_consuming"] = f"{time_consuming:.2f}"
  225. redis_client.delete(website_crawl_time_cache_key)
  226. return crawl_status_data
  227. @classmethod
  228. def _get_watercrawl_status(cls, job_id: str, api_key: str, config: dict) -> dict[str, Any]:
  229. return WaterCrawlProvider(api_key, config.get("base_url")).get_crawl_status(job_id)
  230. @classmethod
  231. def _get_jinareader_status(cls, job_id: str, api_key: str) -> dict[str, Any]:
  232. response = requests.post(
  233. "https://adaptivecrawlstatus-kir3wx7b3a-uc.a.run.app",
  234. headers={"Content-Type": "application/json", "Authorization": f"Bearer {api_key}"},
  235. json={"taskId": job_id},
  236. )
  237. data = response.json().get("data", {})
  238. crawl_status_data = {
  239. "status": data.get("status", "active"),
  240. "job_id": job_id,
  241. "total": len(data.get("urls", [])),
  242. "current": len(data.get("processed", [])) + len(data.get("failed", [])),
  243. "data": [],
  244. "time_consuming": data.get("duration", 0) / 1000,
  245. }
  246. if crawl_status_data["status"] == "completed":
  247. response = requests.post(
  248. "https://adaptivecrawlstatus-kir3wx7b3a-uc.a.run.app",
  249. headers={"Content-Type": "application/json", "Authorization": f"Bearer {api_key}"},
  250. json={"taskId": job_id, "urls": list(data.get("processed", {}).keys())},
  251. )
  252. data = response.json().get("data", {})
  253. formatted_data = [
  254. {
  255. "title": item.get("data", {}).get("title"),
  256. "source_url": item.get("data", {}).get("url"),
  257. "description": item.get("data", {}).get("description"),
  258. "markdown": item.get("data", {}).get("content"),
  259. }
  260. for item in data.get("processed", {}).values()
  261. ]
  262. crawl_status_data["data"] = formatted_data
  263. return crawl_status_data
  264. @classmethod
  265. def get_crawl_url_data(cls, job_id: str, provider: str, url: str, tenant_id: str) -> dict[str, Any] | None:
  266. _, config = cls._get_credentials_and_config(tenant_id, provider)
  267. api_key = cls._get_decrypted_api_key(tenant_id, config)
  268. if provider == "firecrawl":
  269. return cls._get_firecrawl_url_data(job_id, url, api_key, config)
  270. elif provider == "watercrawl":
  271. return cls._get_watercrawl_url_data(job_id, url, api_key, config)
  272. elif provider == "jinareader":
  273. return cls._get_jinareader_url_data(job_id, url, api_key)
  274. else:
  275. raise ValueError("Invalid provider")
  276. @classmethod
  277. def _get_firecrawl_url_data(cls, job_id: str, url: str, api_key: str, config: dict) -> dict[str, Any] | None:
  278. crawl_data: list[dict[str, Any]] | None = None
  279. file_key = "website_files/" + job_id + ".txt"
  280. if storage.exists(file_key):
  281. stored_data = storage.load_once(file_key)
  282. if stored_data:
  283. crawl_data = json.loads(stored_data.decode("utf-8"))
  284. else:
  285. firecrawl_app = FirecrawlApp(api_key=api_key, base_url=config.get("base_url"))
  286. result = firecrawl_app.check_crawl_status(job_id)
  287. if result.get("status") != "completed":
  288. raise ValueError("Crawl job is not completed")
  289. crawl_data = result.get("data")
  290. if crawl_data:
  291. for item in crawl_data:
  292. if item.get("source_url") == url:
  293. return dict(item)
  294. return None
  295. @classmethod
  296. def _get_watercrawl_url_data(cls, job_id: str, url: str, api_key: str, config: dict) -> dict[str, Any] | None:
  297. return WaterCrawlProvider(api_key, config.get("base_url")).get_crawl_url_data(job_id, url)
  298. @classmethod
  299. def _get_jinareader_url_data(cls, job_id: str, url: str, api_key: str) -> dict[str, Any] | None:
  300. if not job_id:
  301. response = requests.get(
  302. f"https://r.jina.ai/{url}",
  303. headers={"Accept": "application/json", "Authorization": f"Bearer {api_key}"},
  304. )
  305. if response.json().get("code") != 200:
  306. raise ValueError("Failed to crawl")
  307. return dict(response.json().get("data", {}))
  308. else:
  309. # Get crawl status first
  310. status_response = requests.post(
  311. "https://adaptivecrawlstatus-kir3wx7b3a-uc.a.run.app",
  312. headers={"Content-Type": "application/json", "Authorization": f"Bearer {api_key}"},
  313. json={"taskId": job_id},
  314. )
  315. status_data = status_response.json().get("data", {})
  316. if status_data.get("status") != "completed":
  317. raise ValueError("Crawl job is not completed")
  318. # Get processed data
  319. data_response = requests.post(
  320. "https://adaptivecrawlstatus-kir3wx7b3a-uc.a.run.app",
  321. headers={"Content-Type": "application/json", "Authorization": f"Bearer {api_key}"},
  322. json={"taskId": job_id, "urls": list(status_data.get("processed", {}).keys())},
  323. )
  324. processed_data = data_response.json().get("data", {})
  325. for item in processed_data.get("processed", {}).values():
  326. if item.get("data", {}).get("url") == url:
  327. return dict(item.get("data", {}))
  328. return None
  329. @classmethod
  330. def get_scrape_url_data(cls, provider: str, url: str, tenant_id: str, only_main_content: bool) -> dict[str, Any]:
  331. request = ScrapeRequest(provider=provider, url=url, tenant_id=tenant_id, only_main_content=only_main_content)
  332. _, config = cls._get_credentials_and_config(tenant_id=request.tenant_id, provider=request.provider)
  333. api_key = cls._get_decrypted_api_key(tenant_id=request.tenant_id, config=config)
  334. if request.provider == "firecrawl":
  335. return cls._scrape_with_firecrawl(request=request, api_key=api_key, config=config)
  336. elif request.provider == "watercrawl":
  337. return cls._scrape_with_watercrawl(request=request, api_key=api_key, config=config)
  338. else:
  339. raise ValueError("Invalid provider")
  340. @classmethod
  341. def _scrape_with_firecrawl(cls, request: ScrapeRequest, api_key: str, config: dict) -> dict[str, Any]:
  342. firecrawl_app = FirecrawlApp(api_key=api_key, base_url=config.get("base_url"))
  343. params = {"onlyMainContent": request.only_main_content}
  344. return firecrawl_app.scrape_url(url=request.url, params=params)
  345. @classmethod
  346. def _scrape_with_watercrawl(cls, request: ScrapeRequest, api_key: str, config: dict) -> dict[str, Any]:
  347. return WaterCrawlProvider(api_key=api_key, base_url=config.get("base_url")).scrape_url(request.url)