You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889
  1. #
  2. # Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. #
  16. import io
  17. import numpy as np
  18. from PIL import Image
  19. from api.db import LLMType
  20. from api.db.services.llm_service import LLMBundle
  21. from deepdoc.vision import OCR
  22. from rag.nlp import tokenize
  23. from rag.utils import clean_markdown_block
  24. ocr = OCR()
  25. def chunk(filename, binary, tenant_id, lang, callback=None, **kwargs):
  26. img = Image.open(io.BytesIO(binary)).convert('RGB')
  27. doc = {
  28. "docnm_kwd": filename,
  29. "image": img
  30. }
  31. bxs = ocr(np.array(img))
  32. txt = "\n".join([t[0] for _, t in bxs if t[0]])
  33. eng = lang.lower() == "english"
  34. callback(0.4, "Finish OCR: (%s ...)" % txt[:12])
  35. if (eng and len(txt.split()) > 32) or len(txt) > 32:
  36. tokenize(doc, txt, eng)
  37. callback(0.8, "OCR results is too long to use CV LLM.")
  38. return [doc]
  39. try:
  40. callback(0.4, "Use CV LLM to describe the picture.")
  41. cv_mdl = LLMBundle(tenant_id, LLMType.IMAGE2TEXT, lang=lang)
  42. img_binary = io.BytesIO()
  43. img.save(img_binary, format='JPEG')
  44. img_binary.seek(0)
  45. ans = cv_mdl.describe(img_binary.read())
  46. callback(0.8, "CV LLM respond: %s ..." % ans[:32])
  47. txt += "\n" + ans
  48. tokenize(doc, txt, eng)
  49. return [doc]
  50. except Exception as e:
  51. callback(prog=-1, msg=str(e))
  52. return []
  53. def vision_llm_chunk(binary, vision_model, prompt=None, callback=None):
  54. """
  55. A simple wrapper to process image to markdown texts via VLM.
  56. Returns:
  57. Simple markdown texts generated by VLM.
  58. """
  59. callback = callback or (lambda prog, msg: None)
  60. img = binary
  61. txt = ""
  62. try:
  63. img_binary = io.BytesIO()
  64. img.save(img_binary, format='JPEG')
  65. img_binary.seek(0)
  66. ans = clean_markdown_block(vision_model.describe_with_prompt(img_binary.read(), prompt))
  67. txt += "\n" + ans
  68. return txt
  69. except Exception as e:
  70. callback(-1, str(e))
  71. return ""