You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

crawler.py 2.3KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667
  1. #
  2. # Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. #
  16. from abc import ABC
  17. import asyncio
  18. from crawl4ai import AsyncWebCrawler
  19. from agent.component.base import ComponentBase, ComponentParamBase
  20. from api.utils.web_utils import is_valid_url
  21. class CrawlerParam(ComponentParamBase):
  22. """
  23. Define the Crawler component parameters.
  24. """
  25. def __init__(self):
  26. super().__init__()
  27. self.proxy = None
  28. self.extract_type = "markdown"
  29. def check(self):
  30. self.check_valid_value(self.extract_type, "Type of content from the crawler", ['html', 'markdown', 'content'])
  31. class Crawler(ComponentBase, ABC):
  32. component_name = "Crawler"
  33. def _run(self, history, **kwargs):
  34. ans = self.get_input()
  35. ans = " - ".join(ans["content"]) if "content" in ans else ""
  36. if not is_valid_url(ans):
  37. return Crawler.be_output("")
  38. try:
  39. result = asyncio.run(self.get_web(ans))
  40. return Crawler.be_output(result)
  41. except Exception as e:
  42. return Crawler.be_output(f"An unexpected error occurred: {str(e)}")
  43. async def get_web(self, url):
  44. proxy = self._param.proxy if self._param.proxy else None
  45. async with AsyncWebCrawler(verbose=True, proxy=proxy) as crawler:
  46. result = await crawler.arun(
  47. url=url,
  48. bypass_cache=True
  49. )
  50. if self._param.extract_type == 'html':
  51. return result.cleaned_html
  52. elif self._param.extract_type == 'markdown':
  53. return result.markdown
  54. elif self._param.extract_type == 'content':
  55. result.extracted_content
  56. return result.markdown