Du kannst nicht mehr als 25 Themen auswählen Themen müssen mit entweder einem Buchstaben oder einer Ziffer beginnen. Sie können Bindestriche („-“) enthalten und bis zu 35 Zeichen lang sein.

picture.py 1.8KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556
  1. # Licensed under the Apache License, Version 2.0 (the "License");
  2. # you may not use this file except in compliance with the License.
  3. # You may obtain a copy of the License at
  4. #
  5. # http://www.apache.org/licenses/LICENSE-2.0
  6. #
  7. # Unless required by applicable law or agreed to in writing, software
  8. # distributed under the License is distributed on an "AS IS" BASIS,
  9. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. # See the License for the specific language governing permissions and
  11. # limitations under the License.
  12. #
  13. import io
  14. import numpy as np
  15. from PIL import Image
  16. from api.db import LLMType
  17. from api.db.services.llm_service import LLMBundle
  18. from rag.nlp import tokenize
  19. from deepdoc.vision import OCR
  20. ocr = OCR()
  21. def chunk(filename, binary, tenant_id, lang, callback=None, **kwargs):
  22. try:
  23. cv_mdl = LLMBundle(tenant_id, LLMType.IMAGE2TEXT, lang=lang)
  24. except Exception as e:
  25. callback(prog=-1, msg=str(e))
  26. return []
  27. img = Image.open(io.BytesIO(binary)).convert('RGB')
  28. doc = {
  29. "docnm_kwd": filename,
  30. "image": img
  31. }
  32. bxs = ocr(np.array(img))
  33. txt = "\n".join([t[0] for _, t in bxs if t[0]])
  34. eng = lang.lower() == "english"
  35. callback(0.4, "Finish OCR: (%s ...)" % txt[:12])
  36. if (eng and len(txt.split(" ")) > 32) or len(txt) > 32:
  37. tokenize(doc, txt, eng)
  38. callback(0.8, "OCR results is too long to use CV LLM.")
  39. return [doc]
  40. try:
  41. callback(0.4, "Use CV LLM to describe the picture.")
  42. ans = cv_mdl.describe(binary)
  43. callback(0.8, "CV LLM respoond: %s ..." % ans[:32])
  44. txt += "\n" + ans
  45. tokenize(doc, txt, eng)
  46. return [doc]
  47. except Exception as e:
  48. callback(prog=-1, msg=str(e))
  49. return []