You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

presentation.py 4.9KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128
  1. import copy
  2. import re
  3. from io import BytesIO
  4. from pptx import Presentation
  5. from rag.parser import tokenize, is_english
  6. from rag.nlp import huqie
  7. from rag.parser.pdf_parser import HuParser
  8. class Ppt(object):
  9. def __init__(self):
  10. super().__init__()
  11. def __extract(self, shape):
  12. if shape.shape_type == 19:
  13. tb = shape.table
  14. rows = []
  15. for i in range(1, len(tb.rows)):
  16. rows.append("; ".join([tb.cell(0, j).text + ": " + tb.cell(i, j).text for j in range(len(tb.columns)) if tb.cell(i, j)]))
  17. return "\n".join(rows)
  18. if shape.has_text_frame:
  19. return shape.text_frame.text
  20. if shape.shape_type == 6:
  21. texts = []
  22. for p in shape.shapes:
  23. t = self.__extract(p)
  24. if t: texts.append(t)
  25. return "\n".join(texts)
  26. def __call__(self, fnm, from_page, to_page, callback=None):
  27. ppt = Presentation(fnm) if isinstance(
  28. fnm, str) else Presentation(
  29. BytesIO(fnm))
  30. txts = []
  31. self.total_page = len(ppt.slides)
  32. for i, slide in enumerate(ppt.slides[from_page: to_page]):
  33. texts = []
  34. for shape in slide.shapes:
  35. txt = self.__extract(shape)
  36. if txt: texts.append(txt)
  37. txts.append("\n".join(texts))
  38. callback(0.5, "Text extraction finished.")
  39. import aspose.slides as slides
  40. import aspose.pydrawing as drawing
  41. imgs = []
  42. with slides.Presentation(BytesIO(fnm)) as presentation:
  43. for i, slide in enumerate(presentation.slides[from_page: to_page]):
  44. buffered = BytesIO()
  45. slide.get_thumbnail(0.5, 0.5).save(buffered, drawing.imaging.ImageFormat.jpeg)
  46. imgs.append(buffered.getvalue())
  47. assert len(imgs) == len(txts), "Slides text and image do not match: {} vs. {}".format(len(imgs), len(txts))
  48. callback(0.9, "Image extraction finished")
  49. self.is_english = is_english(txts)
  50. return [(txts[i], imgs[i]) for i in range(len(txts))]
  51. class Pdf(HuParser):
  52. def __init__(self):
  53. super().__init__()
  54. def __garbage(self, txt):
  55. txt = txt.lower().strip()
  56. if re.match(r"[0-9\.,%/-]+$", txt): return True
  57. if len(txt) < 3:return True
  58. return False
  59. def __call__(self, filename, binary=None, from_page=0, to_page=100000, zoomin=3, callback=None):
  60. self.__images__(filename if not binary else binary, zoomin, from_page, to_page)
  61. callback(0.8, "Page {}~{}: OCR finished".format(from_page, min(to_page, self.total_page)))
  62. assert len(self.boxes) == len(self.page_images), "{} vs. {}".format(len(self.boxes), len(self.page_images))
  63. res = []
  64. #################### More precisely ###################
  65. # self._layouts_paddle(zoomin)
  66. # self._text_merge()
  67. # pages = {}
  68. # for b in self.boxes:
  69. # if self.__garbage(b["text"]):continue
  70. # if b["page_number"] not in pages: pages[b["page_number"]] = []
  71. # pages[b["page_number"]].append(b["text"])
  72. # for i, lines in pages.items():
  73. # res.append(("\n".join(lines), self.page_images[i-1]))
  74. # return res
  75. ########################################
  76. for i in range(len(self.boxes)):
  77. lines = "\n".join([b["text"] for b in self.boxes[i] if not self.__garbage(b["text"])])
  78. res.append((lines, self.page_images[i]))
  79. callback(0.9, "Page {}~{}: Parsing finished".format(from_page, min(to_page, self.total_page)))
  80. return res
  81. def chunk(filename, binary=None, from_page=0, to_page=100000, callback=None, **kwargs):
  82. doc = {
  83. "docnm_kwd": filename,
  84. "title_tks": huqie.qie(re.sub(r"\.[a-zA-Z]+$", "", filename))
  85. }
  86. doc["title_sm_tks"] = huqie.qieqie(doc["title_tks"])
  87. res = []
  88. if re.search(r"\.pptx?$", filename, re.IGNORECASE):
  89. ppt_parser = Ppt()
  90. for txt,img in ppt_parser(filename if not binary else binary, from_page, 1000000, callback):
  91. d = copy.deepcopy(doc)
  92. d["image"] = img
  93. tokenize(d, txt, ppt_parser.is_english)
  94. res.append(d)
  95. return res
  96. elif re.search(r"\.pdf$", filename, re.IGNORECASE):
  97. pdf_parser = Pdf()
  98. for txt,img in pdf_parser(filename if not binary else binary, from_page=from_page, to_page=to_page, callback=callback):
  99. d = copy.deepcopy(doc)
  100. d["image"] = img
  101. tokenize(d, txt, pdf_parser.is_english)
  102. res.append(d)
  103. return res
  104. raise NotImplementedError("file type not supported yet(pptx, pdf supported)")
  105. if __name__== "__main__":
  106. import sys
  107. def dummy(a, b):
  108. pass
  109. chunk(sys.argv[1], callback=dummy)