Du kan inte välja fler än 25 ämnen Ämnen måste starta med en bokstav eller siffra, kan innehålla bindestreck ('-') och vara max 35 tecken långa.

picture.py 2.7KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394
  1. #
  2. # Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. #
  16. import io
  17. import re
  18. import numpy as np
  19. from PIL import Image
  20. from api.db import LLMType
  21. from api.db.services.llm_service import LLMBundle
  22. from deepdoc.vision import OCR
  23. from rag.nlp import tokenize
  24. from rag.utils import clean_markdown_block
  25. from rag.nlp import rag_tokenizer
  26. ocr = OCR()
  27. def chunk(filename, binary, tenant_id, lang, callback=None, **kwargs):
  28. img = Image.open(io.BytesIO(binary)).convert('RGB')
  29. doc = {
  30. "docnm_kwd": filename,
  31. "title_tks": rag_tokenizer.tokenize(re.sub(r"\.[a-zA-Z]+$", "", filename)),
  32. "image": img,
  33. "doc_type_kwd": "image"
  34. }
  35. bxs = ocr(np.array(img))
  36. txt = "\n".join([t[0] for _, t in bxs if t[0]])
  37. eng = lang.lower() == "english"
  38. callback(0.4, "Finish OCR: (%s ...)" % txt[:12])
  39. if (eng and len(txt.split()) > 32) or len(txt) > 32:
  40. tokenize(doc, txt, eng)
  41. callback(0.8, "OCR results is too long to use CV LLM.")
  42. return [doc]
  43. try:
  44. callback(0.4, "Use CV LLM to describe the picture.")
  45. cv_mdl = LLMBundle(tenant_id, LLMType.IMAGE2TEXT, lang=lang)
  46. img_binary = io.BytesIO()
  47. img.save(img_binary, format='JPEG')
  48. img_binary.seek(0)
  49. ans = cv_mdl.describe(img_binary.read())
  50. callback(0.8, "CV LLM respond: %s ..." % ans[:32])
  51. txt += "\n" + ans
  52. tokenize(doc, txt, eng)
  53. return [doc]
  54. except Exception as e:
  55. callback(prog=-1, msg=str(e))
  56. return []
  57. def vision_llm_chunk(binary, vision_model, prompt=None, callback=None):
  58. """
  59. A simple wrapper to process image to markdown texts via VLM.
  60. Returns:
  61. Simple markdown texts generated by VLM.
  62. """
  63. callback = callback or (lambda prog, msg: None)
  64. img = binary
  65. txt = ""
  66. try:
  67. img_binary = io.BytesIO()
  68. img.save(img_binary, format='JPEG')
  69. img_binary.seek(0)
  70. ans = clean_markdown_block(vision_model.describe_with_prompt(img_binary.read(), prompt))
  71. txt += "\n" + ans
  72. return txt
  73. except Exception as e:
  74. callback(-1, str(e))
  75. return ""