mirror of
https://github.com/DS4SD/docling.git
synced 2025-07-31 22:44:27 +00:00
Merge remote-tracking branch 'origin/main' into shubham/docitem-images
Signed-off-by: Shubham Gupta <26436285+sh-gupta@users.noreply.github.com>
This commit is contained in:
commit
6c518bcdf4
374
docling/backend/msexcel_backend.py
Normal file
374
docling/backend/msexcel_backend.py
Normal file
@ -0,0 +1,374 @@
|
|||||||
|
import logging
|
||||||
|
from io import BytesIO
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Dict, Set, Tuple, Union
|
||||||
|
|
||||||
|
from docling_core.types.doc import (
|
||||||
|
DoclingDocument,
|
||||||
|
DocumentOrigin,
|
||||||
|
GroupLabel,
|
||||||
|
ImageRef,
|
||||||
|
TableCell,
|
||||||
|
TableData,
|
||||||
|
)
|
||||||
|
|
||||||
|
# from lxml import etree
|
||||||
|
from openpyxl import Workbook, load_workbook
|
||||||
|
from openpyxl.cell.cell import Cell
|
||||||
|
from openpyxl.drawing.image import Image
|
||||||
|
from openpyxl.worksheet.worksheet import Worksheet
|
||||||
|
|
||||||
|
from docling.backend.abstract_backend import DeclarativeDocumentBackend
|
||||||
|
from docling.datamodel.base_models import InputFormat
|
||||||
|
from docling.datamodel.document import InputDocument
|
||||||
|
|
||||||
|
_log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
from typing import Any, List
|
||||||
|
|
||||||
|
from pydantic import BaseModel
|
||||||
|
|
||||||
|
|
||||||
|
class ExcelCell(BaseModel):
|
||||||
|
row: int
|
||||||
|
col: int
|
||||||
|
text: str
|
||||||
|
row_span: int
|
||||||
|
col_span: int
|
||||||
|
|
||||||
|
|
||||||
|
class ExcelTable(BaseModel):
|
||||||
|
num_rows: int
|
||||||
|
num_cols: int
|
||||||
|
data: List[ExcelCell]
|
||||||
|
|
||||||
|
|
||||||
|
class MsExcelDocumentBackend(DeclarativeDocumentBackend):
|
||||||
|
|
||||||
|
def __init__(self, in_doc: "InputDocument", path_or_stream: Union[BytesIO, Path]):
|
||||||
|
super().__init__(in_doc, path_or_stream)
|
||||||
|
|
||||||
|
# Initialise the parents for the hierarchy
|
||||||
|
self.max_levels = 10
|
||||||
|
|
||||||
|
self.parents: Dict[int, Any] = {}
|
||||||
|
for i in range(-1, self.max_levels):
|
||||||
|
self.parents[i] = None
|
||||||
|
|
||||||
|
self.workbook = None
|
||||||
|
try:
|
||||||
|
if isinstance(self.path_or_stream, BytesIO):
|
||||||
|
self.workbook = load_workbook(filename=self.path_or_stream)
|
||||||
|
|
||||||
|
elif isinstance(self.path_or_stream, Path):
|
||||||
|
self.workbook = load_workbook(filename=str(self.path_or_stream))
|
||||||
|
|
||||||
|
self.valid = True
|
||||||
|
except Exception as e:
|
||||||
|
self.valid = False
|
||||||
|
|
||||||
|
raise RuntimeError(
|
||||||
|
f"MsPowerpointDocumentBackend could not load document with hash {self.document_hash}"
|
||||||
|
) from e
|
||||||
|
|
||||||
|
def is_valid(self) -> bool:
|
||||||
|
_log.info(f"valid: {self.valid}")
|
||||||
|
return self.valid
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def supports_pagination(cls) -> bool:
|
||||||
|
return True
|
||||||
|
|
||||||
|
def unload(self):
|
||||||
|
if isinstance(self.path_or_stream, BytesIO):
|
||||||
|
self.path_or_stream.close()
|
||||||
|
|
||||||
|
self.path_or_stream = None
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def supported_formats(cls) -> Set[InputFormat]:
|
||||||
|
return {InputFormat.XLSX}
|
||||||
|
|
||||||
|
def convert(self) -> DoclingDocument:
|
||||||
|
# Parses the XLSX into a structured document model.
|
||||||
|
|
||||||
|
origin = DocumentOrigin(
|
||||||
|
filename=self.file.name or "file.xlsx",
|
||||||
|
mimetype="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
|
||||||
|
binary_hash=self.document_hash,
|
||||||
|
)
|
||||||
|
|
||||||
|
doc = DoclingDocument(name=self.file.stem or "file.xlsx", origin=origin)
|
||||||
|
|
||||||
|
if self.is_valid():
|
||||||
|
doc = self._convert_workbook(doc)
|
||||||
|
else:
|
||||||
|
raise RuntimeError(
|
||||||
|
f"Cannot convert doc with {self.document_hash} because the backend failed to init."
|
||||||
|
)
|
||||||
|
|
||||||
|
return doc
|
||||||
|
|
||||||
|
def _convert_workbook(self, doc: DoclingDocument) -> DoclingDocument:
|
||||||
|
|
||||||
|
if self.workbook is not None:
|
||||||
|
|
||||||
|
# Iterate over all sheets
|
||||||
|
for sheet_name in self.workbook.sheetnames:
|
||||||
|
_log.info(f"Processing sheet: {sheet_name}")
|
||||||
|
|
||||||
|
# Access the sheet by name
|
||||||
|
sheet = self.workbook[sheet_name]
|
||||||
|
|
||||||
|
self.parents[0] = doc.add_group(
|
||||||
|
parent=None,
|
||||||
|
label=GroupLabel.SECTION,
|
||||||
|
name=f"sheet: {sheet_name}",
|
||||||
|
)
|
||||||
|
|
||||||
|
doc = self._convert_sheet(doc, sheet)
|
||||||
|
else:
|
||||||
|
_log.error("Workbook is not initialized.")
|
||||||
|
|
||||||
|
return doc
|
||||||
|
|
||||||
|
def _convert_sheet(self, doc: DoclingDocument, sheet: Worksheet):
|
||||||
|
|
||||||
|
doc = self._find_tables_in_sheet(doc, sheet)
|
||||||
|
|
||||||
|
doc = self._find_images_in_sheet(doc, sheet)
|
||||||
|
|
||||||
|
return doc
|
||||||
|
|
||||||
|
def _find_tables_in_sheet(self, doc: DoclingDocument, sheet: Worksheet):
|
||||||
|
|
||||||
|
tables = self._find_data_tables(sheet)
|
||||||
|
|
||||||
|
for excel_table in tables:
|
||||||
|
num_rows = excel_table.num_rows
|
||||||
|
num_cols = excel_table.num_cols
|
||||||
|
|
||||||
|
table_data = TableData(
|
||||||
|
num_rows=num_rows,
|
||||||
|
num_cols=num_cols,
|
||||||
|
table_cells=[],
|
||||||
|
)
|
||||||
|
|
||||||
|
for excel_cell in excel_table.data:
|
||||||
|
|
||||||
|
cell = TableCell(
|
||||||
|
text=excel_cell.text,
|
||||||
|
row_span=excel_cell.row_span,
|
||||||
|
col_span=excel_cell.col_span,
|
||||||
|
start_row_offset_idx=excel_cell.row,
|
||||||
|
end_row_offset_idx=excel_cell.row + excel_cell.row_span,
|
||||||
|
start_col_offset_idx=excel_cell.col,
|
||||||
|
end_col_offset_idx=excel_cell.col + excel_cell.col_span,
|
||||||
|
col_header=False,
|
||||||
|
row_header=False,
|
||||||
|
)
|
||||||
|
table_data.table_cells.append(cell)
|
||||||
|
|
||||||
|
doc.add_table(data=table_data, parent=self.parents[0])
|
||||||
|
|
||||||
|
return doc
|
||||||
|
|
||||||
|
def _find_data_tables(self, sheet: Worksheet):
|
||||||
|
"""
|
||||||
|
Find all compact rectangular data tables in a sheet.
|
||||||
|
"""
|
||||||
|
# _log.info("find_data_tables")
|
||||||
|
|
||||||
|
tables = [] # List to store found tables
|
||||||
|
visited: set[Tuple[int, int]] = set() # Track already visited cells
|
||||||
|
|
||||||
|
# Iterate over all cells in the sheet
|
||||||
|
for ri, row in enumerate(sheet.iter_rows(values_only=False)):
|
||||||
|
for rj, cell in enumerate(row):
|
||||||
|
|
||||||
|
# Skip empty or already visited cells
|
||||||
|
if cell.value is None or (ri, rj) in visited:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# If the cell starts a new table, find its bounds
|
||||||
|
table_bounds, visited_cells = self._find_table_bounds(
|
||||||
|
sheet, ri, rj, visited
|
||||||
|
)
|
||||||
|
|
||||||
|
visited.update(visited_cells) # Mark these cells as visited
|
||||||
|
tables.append(table_bounds)
|
||||||
|
|
||||||
|
return tables
|
||||||
|
|
||||||
|
def _find_table_bounds(
|
||||||
|
self,
|
||||||
|
sheet: Worksheet,
|
||||||
|
start_row: int,
|
||||||
|
start_col: int,
|
||||||
|
visited: set[Tuple[int, int]],
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Determine the bounds of a compact rectangular table.
|
||||||
|
Returns:
|
||||||
|
- A dictionary with the bounds and data.
|
||||||
|
- A set of visited cell coordinates.
|
||||||
|
"""
|
||||||
|
_log.info("find_table_bounds")
|
||||||
|
|
||||||
|
max_row = self._find_table_bottom(sheet, start_row, start_col)
|
||||||
|
max_col = self._find_table_right(sheet, start_row, start_col)
|
||||||
|
|
||||||
|
# Collect the data within the bounds
|
||||||
|
data = []
|
||||||
|
visited_cells = set()
|
||||||
|
for ri in range(start_row, max_row + 1):
|
||||||
|
for rj in range(start_col, max_col + 1):
|
||||||
|
|
||||||
|
cell = sheet.cell(row=ri + 1, column=rj + 1) # 1-based indexing
|
||||||
|
|
||||||
|
# Check if the cell belongs to a merged range
|
||||||
|
row_span = 1
|
||||||
|
col_span = 1
|
||||||
|
|
||||||
|
# _log.info(sheet.merged_cells.ranges)
|
||||||
|
for merged_range in sheet.merged_cells.ranges:
|
||||||
|
|
||||||
|
if (
|
||||||
|
merged_range.min_row <= ri + 1
|
||||||
|
and ri + 1 <= merged_range.max_row
|
||||||
|
and merged_range.min_col <= rj + 1
|
||||||
|
and rj + 1 <= merged_range.max_col
|
||||||
|
):
|
||||||
|
|
||||||
|
row_span = merged_range.max_row - merged_range.min_row + 1
|
||||||
|
col_span = merged_range.max_col - merged_range.min_col + 1
|
||||||
|
break
|
||||||
|
|
||||||
|
if (ri, rj) not in visited_cells:
|
||||||
|
data.append(
|
||||||
|
ExcelCell(
|
||||||
|
row=ri - start_row,
|
||||||
|
col=rj - start_col,
|
||||||
|
text=str(cell.value),
|
||||||
|
row_span=row_span,
|
||||||
|
col_span=col_span,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
# _log.info(f"cell: {ri}, {rj} -> {ri - start_row}, {rj - start_col}, {row_span}, {col_span}: {str(cell.value)}")
|
||||||
|
|
||||||
|
# Mark all cells in the span as visited
|
||||||
|
for span_row in range(ri, ri + row_span):
|
||||||
|
for span_col in range(rj, rj + col_span):
|
||||||
|
visited_cells.add((span_row, span_col))
|
||||||
|
|
||||||
|
return (
|
||||||
|
ExcelTable(
|
||||||
|
num_rows=max_row + 1 - start_row,
|
||||||
|
num_cols=max_col + 1 - start_col,
|
||||||
|
data=data,
|
||||||
|
),
|
||||||
|
visited_cells,
|
||||||
|
)
|
||||||
|
|
||||||
|
def _find_table_bottom(self, sheet: Worksheet, start_row: int, start_col: int):
|
||||||
|
"""Function to find the bottom boundary of the table"""
|
||||||
|
|
||||||
|
max_row = start_row
|
||||||
|
|
||||||
|
while max_row < sheet.max_row - 1:
|
||||||
|
# Get the cell value or check if it is part of a merged cell
|
||||||
|
cell = sheet.cell(row=max_row + 2, column=start_col + 1)
|
||||||
|
|
||||||
|
# Check if the cell is part of a merged range
|
||||||
|
merged_range = next(
|
||||||
|
(mr for mr in sheet.merged_cells.ranges if cell.coordinate in mr),
|
||||||
|
None,
|
||||||
|
)
|
||||||
|
|
||||||
|
if cell.value is None and not merged_range:
|
||||||
|
break # Stop if the cell is empty and not merged
|
||||||
|
|
||||||
|
# Expand max_row to include the merged range if applicable
|
||||||
|
if merged_range:
|
||||||
|
max_row = max(max_row, merged_range.max_row - 1)
|
||||||
|
else:
|
||||||
|
max_row += 1
|
||||||
|
|
||||||
|
return max_row
|
||||||
|
|
||||||
|
def _find_table_right(self, sheet: Worksheet, start_row: int, start_col: int):
|
||||||
|
"""Function to find the right boundary of the table"""
|
||||||
|
|
||||||
|
max_col = start_col
|
||||||
|
|
||||||
|
while max_col < sheet.max_column - 1:
|
||||||
|
# Get the cell value or check if it is part of a merged cell
|
||||||
|
cell = sheet.cell(row=start_row + 1, column=max_col + 2)
|
||||||
|
|
||||||
|
# Check if the cell is part of a merged range
|
||||||
|
merged_range = next(
|
||||||
|
(mr for mr in sheet.merged_cells.ranges if cell.coordinate in mr),
|
||||||
|
None,
|
||||||
|
)
|
||||||
|
|
||||||
|
if cell.value is None and not merged_range:
|
||||||
|
break # Stop if the cell is empty and not merged
|
||||||
|
|
||||||
|
# Expand max_col to include the merged range if applicable
|
||||||
|
if merged_range:
|
||||||
|
max_col = max(max_col, merged_range.max_col - 1)
|
||||||
|
else:
|
||||||
|
max_col += 1
|
||||||
|
|
||||||
|
return max_col
|
||||||
|
|
||||||
|
def _find_images_in_sheet(
|
||||||
|
self, doc: DoclingDocument, sheet: Worksheet
|
||||||
|
) -> DoclingDocument:
|
||||||
|
|
||||||
|
# FIXME: mypy does not agree with _images ...
|
||||||
|
"""
|
||||||
|
# Iterate over images in the sheet
|
||||||
|
for idx, image in enumerate(sheet._images): # Access embedded images
|
||||||
|
|
||||||
|
image_bytes = BytesIO(image.ref.blob)
|
||||||
|
pil_image = Image.open(image_bytes)
|
||||||
|
|
||||||
|
doc.add_picture(
|
||||||
|
parent=self.parents[0],
|
||||||
|
image=ImageRef.from_pil(image=pil_image, dpi=72),
|
||||||
|
caption=None,
|
||||||
|
)
|
||||||
|
"""
|
||||||
|
|
||||||
|
# FIXME: mypy does not agree with _charts ...
|
||||||
|
"""
|
||||||
|
for idx, chart in enumerate(sheet._charts): # Access embedded charts
|
||||||
|
chart_path = f"chart_{idx + 1}.png"
|
||||||
|
_log.info(
|
||||||
|
f"Chart found, but dynamic rendering is required for: {chart_path}"
|
||||||
|
)
|
||||||
|
|
||||||
|
_log.info(f"Chart {idx + 1}:")
|
||||||
|
|
||||||
|
# Chart type
|
||||||
|
_log.info(f"Type: {type(chart).__name__}")
|
||||||
|
|
||||||
|
# Title
|
||||||
|
if chart.title:
|
||||||
|
_log.info(f"Title: {chart.title}")
|
||||||
|
else:
|
||||||
|
_log.info("No title")
|
||||||
|
|
||||||
|
# Data series
|
||||||
|
for series in chart.series:
|
||||||
|
_log.info(" => series ...")
|
||||||
|
_log.info(f"Data Series: {series.title}")
|
||||||
|
_log.info(f"Values: {series.values}")
|
||||||
|
_log.info(f"Categories: {series.categories}")
|
||||||
|
|
||||||
|
# Position
|
||||||
|
# _log.info(f"Anchor Cell: {chart.anchor}")
|
||||||
|
"""
|
||||||
|
|
||||||
|
return doc
|
@ -10,11 +10,13 @@ from docling_core.types.doc import (
|
|||||||
DoclingDocument,
|
DoclingDocument,
|
||||||
DocumentOrigin,
|
DocumentOrigin,
|
||||||
GroupLabel,
|
GroupLabel,
|
||||||
|
ImageRef,
|
||||||
ProvenanceItem,
|
ProvenanceItem,
|
||||||
Size,
|
Size,
|
||||||
TableCell,
|
TableCell,
|
||||||
TableData,
|
TableData,
|
||||||
)
|
)
|
||||||
|
from PIL import Image
|
||||||
from pptx import Presentation
|
from pptx import Presentation
|
||||||
from pptx.enum.shapes import MSO_SHAPE_TYPE, PP_PLACEHOLDER
|
from pptx.enum.shapes import MSO_SHAPE_TYPE, PP_PLACEHOLDER
|
||||||
|
|
||||||
@ -268,9 +270,22 @@ class MsPowerpointDocumentBackend(DeclarativeDocumentBackend, PaginatedDocumentB
|
|||||||
return
|
return
|
||||||
|
|
||||||
def handle_pictures(self, shape, parent_slide, slide_ind, doc):
|
def handle_pictures(self, shape, parent_slide, slide_ind, doc):
|
||||||
|
# Get the image bytes
|
||||||
|
image = shape.image
|
||||||
|
image_bytes = image.blob
|
||||||
|
im_dpi, _ = image.dpi
|
||||||
|
|
||||||
|
# Open it with PIL
|
||||||
|
pil_image = Image.open(BytesIO(image_bytes))
|
||||||
|
|
||||||
# shape has picture
|
# shape has picture
|
||||||
prov = self.generate_prov(shape, slide_ind, "")
|
prov = self.generate_prov(shape, slide_ind, "")
|
||||||
doc.add_picture(parent=parent_slide, caption=None, prov=prov)
|
doc.add_picture(
|
||||||
|
parent=parent_slide,
|
||||||
|
image=ImageRef.from_pil(image=pil_image, dpi=im_dpi),
|
||||||
|
caption=None,
|
||||||
|
prov=prov,
|
||||||
|
)
|
||||||
return
|
return
|
||||||
|
|
||||||
def handle_tables(self, shape, parent_slide, slide_ind, doc):
|
def handle_tables(self, shape, parent_slide, slide_ind, doc):
|
||||||
|
@ -32,6 +32,7 @@ class InputFormat(str, Enum):
|
|||||||
PDF = "pdf"
|
PDF = "pdf"
|
||||||
ASCIIDOC = "asciidoc"
|
ASCIIDOC = "asciidoc"
|
||||||
MD = "md"
|
MD = "md"
|
||||||
|
XLSX = "xlsx"
|
||||||
|
|
||||||
|
|
||||||
class OutputFormat(str, Enum):
|
class OutputFormat(str, Enum):
|
||||||
@ -49,6 +50,7 @@ FormatToExtensions: Dict[InputFormat, List[str]] = {
|
|||||||
InputFormat.HTML: ["html", "htm", "xhtml"],
|
InputFormat.HTML: ["html", "htm", "xhtml"],
|
||||||
InputFormat.IMAGE: ["jpg", "jpeg", "png", "tif", "tiff", "bmp"],
|
InputFormat.IMAGE: ["jpg", "jpeg", "png", "tif", "tiff", "bmp"],
|
||||||
InputFormat.ASCIIDOC: ["adoc", "asciidoc", "asc"],
|
InputFormat.ASCIIDOC: ["adoc", "asciidoc", "asc"],
|
||||||
|
InputFormat.XLSX: ["xlsx"],
|
||||||
}
|
}
|
||||||
|
|
||||||
FormatToMimeType: Dict[InputFormat, List[str]] = {
|
FormatToMimeType: Dict[InputFormat, List[str]] = {
|
||||||
@ -72,7 +74,11 @@ FormatToMimeType: Dict[InputFormat, List[str]] = {
|
|||||||
InputFormat.PDF: ["application/pdf"],
|
InputFormat.PDF: ["application/pdf"],
|
||||||
InputFormat.ASCIIDOC: ["text/asciidoc"],
|
InputFormat.ASCIIDOC: ["text/asciidoc"],
|
||||||
InputFormat.MD: ["text/markdown", "text/x-markdown"],
|
InputFormat.MD: ["text/markdown", "text/x-markdown"],
|
||||||
|
InputFormat.XLSX: [
|
||||||
|
"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet"
|
||||||
|
],
|
||||||
}
|
}
|
||||||
|
|
||||||
MimeTypeToFormat = {
|
MimeTypeToFormat = {
|
||||||
mime: fmt for fmt, mimes in FormatToMimeType.items() for mime in mimes
|
mime: fmt for fmt, mimes in FormatToMimeType.items() for mime in mimes
|
||||||
}
|
}
|
||||||
|
@ -12,6 +12,7 @@ from docling.backend.asciidoc_backend import AsciiDocBackend
|
|||||||
from docling.backend.docling_parse_backend import DoclingParseDocumentBackend
|
from docling.backend.docling_parse_backend import DoclingParseDocumentBackend
|
||||||
from docling.backend.html_backend import HTMLDocumentBackend
|
from docling.backend.html_backend import HTMLDocumentBackend
|
||||||
from docling.backend.md_backend import MarkdownDocumentBackend
|
from docling.backend.md_backend import MarkdownDocumentBackend
|
||||||
|
from docling.backend.msexcel_backend import MsExcelDocumentBackend
|
||||||
from docling.backend.mspowerpoint_backend import MsPowerpointDocumentBackend
|
from docling.backend.mspowerpoint_backend import MsPowerpointDocumentBackend
|
||||||
from docling.backend.msword_backend import MsWordDocumentBackend
|
from docling.backend.msword_backend import MsWordDocumentBackend
|
||||||
from docling.datamodel.base_models import ConversionStatus, DocumentStream, InputFormat
|
from docling.datamodel.base_models import ConversionStatus, DocumentStream, InputFormat
|
||||||
@ -44,6 +45,11 @@ class FormatOption(BaseModel):
|
|||||||
return self
|
return self
|
||||||
|
|
||||||
|
|
||||||
|
class ExcelFormatOption(FormatOption):
|
||||||
|
pipeline_cls: Type = SimplePipeline
|
||||||
|
backend: Type[AbstractDocumentBackend] = MsExcelDocumentBackend
|
||||||
|
|
||||||
|
|
||||||
class WordFormatOption(FormatOption):
|
class WordFormatOption(FormatOption):
|
||||||
pipeline_cls: Type = SimplePipeline
|
pipeline_cls: Type = SimplePipeline
|
||||||
backend: Type[AbstractDocumentBackend] = MsWordDocumentBackend
|
backend: Type[AbstractDocumentBackend] = MsWordDocumentBackend
|
||||||
@ -80,6 +86,9 @@ class ImageFormatOption(FormatOption):
|
|||||||
|
|
||||||
|
|
||||||
_format_to_default_options = {
|
_format_to_default_options = {
|
||||||
|
InputFormat.XLSX: FormatOption(
|
||||||
|
pipeline_cls=SimplePipeline, backend=MsExcelDocumentBackend
|
||||||
|
),
|
||||||
InputFormat.DOCX: FormatOption(
|
InputFormat.DOCX: FormatOption(
|
||||||
pipeline_cls=SimplePipeline, backend=MsWordDocumentBackend
|
pipeline_cls=SimplePipeline, backend=MsWordDocumentBackend
|
||||||
),
|
),
|
||||||
|
1913
poetry.lock
generated
1913
poetry.lock
generated
File diff suppressed because it is too large
Load Diff
@ -47,6 +47,7 @@ python-pptx = "^1.0.2"
|
|||||||
beautifulsoup4 = "^4.12.3"
|
beautifulsoup4 = "^4.12.3"
|
||||||
pandas = "^2.1.4"
|
pandas = "^2.1.4"
|
||||||
marko = "^2.1.2"
|
marko = "^2.1.2"
|
||||||
|
openpyxl = "^3.1.5"
|
||||||
|
|
||||||
[tool.poetry.group.dev.dependencies]
|
[tool.poetry.group.dev.dependencies]
|
||||||
black = {extras = ["jupyter"], version = "^24.4.2"}
|
black = {extras = ["jupyter"], version = "^24.4.2"}
|
||||||
@ -65,6 +66,7 @@ pandas-stubs = "^2.1.4.231227"
|
|||||||
ipykernel = "^6.29.5"
|
ipykernel = "^6.29.5"
|
||||||
ipywidgets = "^8.1.5"
|
ipywidgets = "^8.1.5"
|
||||||
nbqa = "^1.9.0"
|
nbqa = "^1.9.0"
|
||||||
|
types-openpyxl = "^3.1.5.20241114"
|
||||||
|
|
||||||
[tool.poetry.group.docs.dependencies]
|
[tool.poetry.group.docs.dependencies]
|
||||||
mkdocs-material = "^9.5.40"
|
mkdocs-material = "^9.5.40"
|
||||||
|
@ -0,0 +1,35 @@
|
|||||||
|
item-0 at level 0: unspecified: group _root_
|
||||||
|
item-1 at level 1: chapter: group slide-0
|
||||||
|
item-2 at level 2: title: Test Table Slide
|
||||||
|
item-3 at level 2: paragraph: With footnote
|
||||||
|
item-4 at level 2: table with [9x7]
|
||||||
|
item-5 at level 1: chapter: group slide-1
|
||||||
|
item-6 at level 2: title: Second slide title
|
||||||
|
item-7 at level 2: paragraph: Let’s introduce a list
|
||||||
|
item-8 at level 2: paragraph: With foo
|
||||||
|
item-9 at level 2: paragraph: Bar
|
||||||
|
item-10 at level 2: paragraph: And baz things
|
||||||
|
item-11 at level 2: paragraph: A rectangle shape with this text inside.
|
||||||
|
item-12 at level 1: chapter: group slide-2
|
||||||
|
item-13 at level 2: ordered_list: group list
|
||||||
|
item-14 at level 3: list_item: List item4
|
||||||
|
item-15 at level 3: list_item: List item5
|
||||||
|
item-16 at level 3: list_item: List item6
|
||||||
|
item-17 at level 2: list: group list
|
||||||
|
item-18 at level 3: list_item: I1
|
||||||
|
item-19 at level 3: list_item: I2
|
||||||
|
item-20 at level 3: list_item: I3
|
||||||
|
item-21 at level 3: list_item: I4
|
||||||
|
item-22 at level 2: paragraph: Some info:
|
||||||
|
item-23 at level 2: list: group list
|
||||||
|
item-24 at level 3: list_item: Item A
|
||||||
|
item-25 at level 3: list_item: Item B
|
||||||
|
item-26 at level 2: paragraph: Maybe a list?
|
||||||
|
item-27 at level 2: ordered_list: group list
|
||||||
|
item-28 at level 3: list_item: List1
|
||||||
|
item-29 at level 3: list_item: List2
|
||||||
|
item-30 at level 3: list_item: List3
|
||||||
|
item-31 at level 2: list: group list
|
||||||
|
item-32 at level 3: list_item: l1
|
||||||
|
item-33 at level 3: list_item: l2
|
||||||
|
item-34 at level 3: list_item: l3
|
2133
tests/data/groundtruth/docling_v2/powerpoint_sample.pptx.json
Normal file
2133
tests/data/groundtruth/docling_v2/powerpoint_sample.pptx.json
Normal file
File diff suppressed because it is too large
Load Diff
50
tests/data/groundtruth/docling_v2/powerpoint_sample.pptx.md
Normal file
50
tests/data/groundtruth/docling_v2/powerpoint_sample.pptx.md
Normal file
@ -0,0 +1,50 @@
|
|||||||
|
# Test Table Slide
|
||||||
|
|
||||||
|
With footnote
|
||||||
|
|
||||||
|
| | Class1 | Class1 | Class1 | Class2 | Class2 | Class2 |
|
||||||
|
|----|-----------------|-----------------|----------|----------|----------|----------|
|
||||||
|
| | A merged with B | A merged with B | C | A | B | C |
|
||||||
|
| R1 | True | False | | False | True | True |
|
||||||
|
| R2 | | | True | False | | |
|
||||||
|
| R3 | False | | | | False | |
|
||||||
|
| R3 | | True | | True | | |
|
||||||
|
| R4 | | | False | | False | |
|
||||||
|
| R4 | | True | | True | False | False |
|
||||||
|
| R4 | True | False | True | False | True | False |
|
||||||
|
|
||||||
|
# Second slide title
|
||||||
|
|
||||||
|
Let’s introduce a list
|
||||||
|
|
||||||
|
With foo
|
||||||
|
|
||||||
|
Bar
|
||||||
|
|
||||||
|
And baz things
|
||||||
|
|
||||||
|
A rectangle shape with this text inside.
|
||||||
|
|
||||||
|
1. List item4
|
||||||
|
2. List item5
|
||||||
|
3. List item6
|
||||||
|
|
||||||
|
- I1
|
||||||
|
- I2
|
||||||
|
- I3
|
||||||
|
- I4
|
||||||
|
|
||||||
|
Some info:
|
||||||
|
|
||||||
|
- Item A
|
||||||
|
- Item B
|
||||||
|
|
||||||
|
Maybe a list?
|
||||||
|
|
||||||
|
1. List1
|
||||||
|
2. List2
|
||||||
|
3. List3
|
||||||
|
|
||||||
|
- l1
|
||||||
|
- l2
|
||||||
|
- l3
|
@ -0,0 +1,5 @@
|
|||||||
|
item-0 at level 0: unspecified: group _root_
|
||||||
|
item-1 at level 1: chapter: group slide-0
|
||||||
|
item-2 at level 2: title: Docling
|
||||||
|
item-3 at level 2: paragraph: Image test
|
||||||
|
item-4 at level 2: picture
|
File diff suppressed because one or more lines are too long
@ -0,0 +1,5 @@
|
|||||||
|
# Docling
|
||||||
|
|
||||||
|
Image test
|
||||||
|
|
||||||
|
<!-- image -->
|
10
tests/data/groundtruth/docling_v2/test-01.xlsx.itxt
Normal file
10
tests/data/groundtruth/docling_v2/test-01.xlsx.itxt
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
item-0 at level 0: unspecified: group _root_
|
||||||
|
item-1 at level 1: section: group sheet: Sheet1
|
||||||
|
item-2 at level 2: table with [7x3]
|
||||||
|
item-3 at level 1: section: group sheet: Sheet2
|
||||||
|
item-4 at level 2: table with [9x4]
|
||||||
|
item-5 at level 2: table with [5x3]
|
||||||
|
item-6 at level 2: table with [5x3]
|
||||||
|
item-7 at level 1: section: group sheet: Sheet3
|
||||||
|
item-8 at level 2: table with [7x3]
|
||||||
|
item-9 at level 2: table with [7x3]
|
3240
tests/data/groundtruth/docling_v2/test-01.xlsx.json
Normal file
3240
tests/data/groundtruth/docling_v2/test-01.xlsx.json
Normal file
File diff suppressed because it is too large
Load Diff
51
tests/data/groundtruth/docling_v2/test-01.xlsx.md
Normal file
51
tests/data/groundtruth/docling_v2/test-01.xlsx.md
Normal file
@ -0,0 +1,51 @@
|
|||||||
|
| first | second | third |
|
||||||
|
|----------|-----------|---------|
|
||||||
|
| 1 | 5 | 9 |
|
||||||
|
| 2 | 4 | 6 |
|
||||||
|
| 3 | 3 | 3 |
|
||||||
|
| 4 | 2 | 0 |
|
||||||
|
| 5 | 1 | -3 |
|
||||||
|
| 6 | 0 | -6 |
|
||||||
|
|
||||||
|
| col-1 | col-2 | col-3 | col-4 |
|
||||||
|
|---------|---------|---------|---------|
|
||||||
|
| 1 | 2 | 3 | 4 |
|
||||||
|
| 2 | 4 | 6 | 8 |
|
||||||
|
| 3 | 6 | 9 | 12 |
|
||||||
|
| 4 | 8 | 12 | 16 |
|
||||||
|
| 5 | 10 | 15 | 20 |
|
||||||
|
| 6 | 12 | 18 | 24 |
|
||||||
|
| 7 | 14 | 21 | 28 |
|
||||||
|
| 8 | 16 | 24 | 32 |
|
||||||
|
|
||||||
|
| col-1 | col-2 | col-3 |
|
||||||
|
|---------|---------|---------|
|
||||||
|
| 1 | 2 | 3 |
|
||||||
|
| 2 | 4 | 6 |
|
||||||
|
| 3 | 6 | 9 |
|
||||||
|
| 4 | 8 | 12 |
|
||||||
|
|
||||||
|
| col-1 | col-2 | col-3 |
|
||||||
|
|---------|---------|---------|
|
||||||
|
| 1 | 2 | 3 |
|
||||||
|
| 2 | 4 | 6 |
|
||||||
|
| 3 | 6 | 9 |
|
||||||
|
| 4 | 8 | 12 |
|
||||||
|
|
||||||
|
| first | header | header |
|
||||||
|
|----------|----------|----------|
|
||||||
|
| first | second | third |
|
||||||
|
| 1 | 2 | 3 |
|
||||||
|
| 3 | 4 | 5 |
|
||||||
|
| 3 | 6 | 7 |
|
||||||
|
| 8 | 9 | 9 |
|
||||||
|
| 10 | 9 | 9 |
|
||||||
|
|
||||||
|
| first (f) | header (f) | header (f) |
|
||||||
|
|-------------|--------------|--------------|
|
||||||
|
| first (f) | second | third |
|
||||||
|
| 1 | 2 | 3 |
|
||||||
|
| 3 | 4 | 5 |
|
||||||
|
| 3 | 6 | 7 |
|
||||||
|
| 8 | 9 | 9 |
|
||||||
|
| 10 | 9 | 9 |
|
BIN
tests/data/pptx/powerpoint_with_image.pptx
Normal file
BIN
tests/data/pptx/powerpoint_with_image.pptx
Normal file
Binary file not shown.
BIN
tests/data/xlsx/test-01.xlsx
Normal file
BIN
tests/data/xlsx/test-01.xlsx
Normal file
Binary file not shown.
77
tests/test_backend_msexcel.py
Normal file
77
tests/test_backend_msexcel.py
Normal file
@ -0,0 +1,77 @@
|
|||||||
|
import json
|
||||||
|
import os
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
from docling.backend.msword_backend import MsWordDocumentBackend
|
||||||
|
from docling.datamodel.base_models import InputFormat
|
||||||
|
from docling.datamodel.document import (
|
||||||
|
ConversionResult,
|
||||||
|
InputDocument,
|
||||||
|
SectionHeaderItem,
|
||||||
|
)
|
||||||
|
from docling.document_converter import DocumentConverter
|
||||||
|
|
||||||
|
GENERATE = False
|
||||||
|
|
||||||
|
|
||||||
|
def get_xlsx_paths():
|
||||||
|
|
||||||
|
# Define the directory you want to search
|
||||||
|
directory = Path("./tests/data/xlsx/")
|
||||||
|
|
||||||
|
# List all PDF files in the directory and its subdirectories
|
||||||
|
pdf_files = sorted(directory.rglob("*.xlsx"))
|
||||||
|
return pdf_files
|
||||||
|
|
||||||
|
|
||||||
|
def get_converter():
|
||||||
|
|
||||||
|
converter = DocumentConverter(allowed_formats=[InputFormat.XLSX])
|
||||||
|
|
||||||
|
return converter
|
||||||
|
|
||||||
|
|
||||||
|
def verify_export(pred_text: str, gtfile: str):
|
||||||
|
|
||||||
|
if not os.path.exists(gtfile) or GENERATE:
|
||||||
|
with open(gtfile, "w") as fw:
|
||||||
|
fw.write(pred_text)
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
|
else:
|
||||||
|
with open(gtfile, "r") as fr:
|
||||||
|
true_text = fr.read()
|
||||||
|
|
||||||
|
assert pred_text == true_text, "pred_itxt==true_itxt"
|
||||||
|
return pred_text == true_text
|
||||||
|
|
||||||
|
|
||||||
|
def test_e2e_xlsx_conversions():
|
||||||
|
|
||||||
|
xlsx_paths = get_xlsx_paths()
|
||||||
|
converter = get_converter()
|
||||||
|
|
||||||
|
for xlsx_path in xlsx_paths:
|
||||||
|
# print(f"converting {xlsx_path}")
|
||||||
|
|
||||||
|
gt_path = (
|
||||||
|
xlsx_path.parent.parent / "groundtruth" / "docling_v2" / xlsx_path.name
|
||||||
|
)
|
||||||
|
|
||||||
|
conv_result: ConversionResult = converter.convert(xlsx_path)
|
||||||
|
|
||||||
|
doc: DoclingDocument = conv_result.document
|
||||||
|
|
||||||
|
pred_md: str = doc.export_to_markdown()
|
||||||
|
assert verify_export(pred_md, str(gt_path) + ".md"), "export to md"
|
||||||
|
|
||||||
|
pred_itxt: str = doc._export_to_indented_text(
|
||||||
|
max_text_len=70, explicit_tables=False
|
||||||
|
)
|
||||||
|
assert verify_export(
|
||||||
|
pred_itxt, str(gt_path) + ".itxt"
|
||||||
|
), "export to indented-text"
|
||||||
|
|
||||||
|
pred_json: str = json.dumps(doc.export_to_dict(), indent=2)
|
||||||
|
assert verify_export(pred_json, str(gt_path) + ".json"), "export to json"
|
72
tests/test_backend_pptx.py
Normal file
72
tests/test_backend_pptx.py
Normal file
@ -0,0 +1,72 @@
|
|||||||
|
import json
|
||||||
|
import os
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
from docling.datamodel.base_models import InputFormat
|
||||||
|
from docling.datamodel.document import ConversionResult
|
||||||
|
from docling.document_converter import DocumentConverter
|
||||||
|
|
||||||
|
GENERATE = False
|
||||||
|
|
||||||
|
|
||||||
|
def get_pptx_paths():
|
||||||
|
|
||||||
|
# Define the directory you want to search
|
||||||
|
directory = Path("./tests/data/pptx/")
|
||||||
|
|
||||||
|
# List all PPTX files in the directory and its subdirectories
|
||||||
|
pptx_files = sorted(directory.rglob("*.pptx"))
|
||||||
|
return pptx_files
|
||||||
|
|
||||||
|
|
||||||
|
def get_converter():
|
||||||
|
|
||||||
|
converter = DocumentConverter(allowed_formats=[InputFormat.PPTX])
|
||||||
|
|
||||||
|
return converter
|
||||||
|
|
||||||
|
|
||||||
|
def verify_export(pred_text: str, gtfile: str):
|
||||||
|
|
||||||
|
if not os.path.exists(gtfile) or GENERATE:
|
||||||
|
with open(gtfile, "w") as fw:
|
||||||
|
fw.write(pred_text)
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
|
else:
|
||||||
|
with open(gtfile, "r") as fr:
|
||||||
|
true_text = fr.read()
|
||||||
|
|
||||||
|
assert pred_text == true_text, "pred_itxt==true_itxt"
|
||||||
|
return pred_text == true_text
|
||||||
|
|
||||||
|
|
||||||
|
def test_e2e_pptx_conversions():
|
||||||
|
|
||||||
|
pptx_paths = get_pptx_paths()
|
||||||
|
converter = get_converter()
|
||||||
|
|
||||||
|
for pptx_path in pptx_paths:
|
||||||
|
# print(f"converting {pptx_path}")
|
||||||
|
|
||||||
|
gt_path = (
|
||||||
|
pptx_path.parent.parent / "groundtruth" / "docling_v2" / pptx_path.name
|
||||||
|
)
|
||||||
|
|
||||||
|
conv_result: ConversionResult = converter.convert(pptx_path)
|
||||||
|
|
||||||
|
doc: DoclingDocument = conv_result.document
|
||||||
|
|
||||||
|
pred_md: str = doc.export_to_markdown()
|
||||||
|
assert verify_export(pred_md, str(gt_path) + ".md"), "export to md"
|
||||||
|
|
||||||
|
pred_itxt: str = doc._export_to_indented_text(
|
||||||
|
max_text_len=70, explicit_tables=False
|
||||||
|
)
|
||||||
|
assert verify_export(
|
||||||
|
pred_itxt, str(gt_path) + ".itxt"
|
||||||
|
), "export to indented-text"
|
||||||
|
|
||||||
|
pred_json: str = json.dumps(doc.export_to_dict(), indent=2)
|
||||||
|
assert verify_export(pred_json, str(gt_path) + ".json"), "export to json"
|
Loading…
Reference in New Issue
Block a user