mirror of
https://github.com/DS4SD/docling.git
synced 2025-12-08 20:58:11 +00:00
fix: refine conversion result (#52)
- fields `output` & `assembled` need not be optional - introduced "synonym" `ConversionResult` for `ConvertedDocument` & deprecated the latter Signed-off-by: Panos Vagenas <35837085+vagenas@users.noreply.github.com>
This commit is contained in:
@@ -5,14 +5,14 @@ from pathlib import Path
|
||||
from typing import Iterable
|
||||
|
||||
from docling.datamodel.base_models import ConversionStatus, PipelineOptions
|
||||
from docling.datamodel.document import ConvertedDocument, DocumentConversionInput
|
||||
from docling.datamodel.document import ConversionResult, DocumentConversionInput
|
||||
from docling.document_converter import DocumentConverter
|
||||
|
||||
_log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def export_documents(
|
||||
converted_docs: Iterable[ConvertedDocument],
|
||||
conv_results: Iterable[ConversionResult],
|
||||
output_dir: Path,
|
||||
):
|
||||
output_dir.mkdir(parents=True, exist_ok=True)
|
||||
@@ -21,27 +21,27 @@ def export_documents(
|
||||
failure_count = 0
|
||||
partial_success_count = 0
|
||||
|
||||
for doc in converted_docs:
|
||||
if doc.status == ConversionStatus.SUCCESS:
|
||||
for conv_res in conv_results:
|
||||
if conv_res.status == ConversionStatus.SUCCESS:
|
||||
success_count += 1
|
||||
doc_filename = doc.input.file.stem
|
||||
doc_filename = conv_res.input.file.stem
|
||||
|
||||
# Export Deep Search document JSON format:
|
||||
with (output_dir / f"{doc_filename}.json").open("w") as fp:
|
||||
fp.write(json.dumps(doc.render_as_dict()))
|
||||
fp.write(json.dumps(conv_res.render_as_dict()))
|
||||
|
||||
# Export Markdown format:
|
||||
with (output_dir / f"{doc_filename}.md").open("w") as fp:
|
||||
fp.write(doc.render_as_markdown())
|
||||
elif doc.status == ConversionStatus.PARTIAL_SUCCESS:
|
||||
fp.write(conv_res.render_as_markdown())
|
||||
elif conv_res.status == ConversionStatus.PARTIAL_SUCCESS:
|
||||
_log.info(
|
||||
f"Document {doc.input.file} was partially converted with the following errors:"
|
||||
f"Document {conv_res.input.file} was partially converted with the following errors:"
|
||||
)
|
||||
for item in doc.errors:
|
||||
for item in conv_res.errors:
|
||||
_log.info(f"\t{item.error_message}")
|
||||
partial_success_count += 1
|
||||
else:
|
||||
_log.info(f"Document {doc.input.file} failed to convert.")
|
||||
_log.info(f"Document {conv_res.input.file} failed to convert.")
|
||||
failure_count += 1
|
||||
|
||||
_log.info(
|
||||
@@ -72,8 +72,8 @@ def main():
|
||||
|
||||
start_time = time.time()
|
||||
|
||||
converted_docs = doc_converter.convert(input)
|
||||
export_documents(converted_docs, output_dir=Path("./scratch"))
|
||||
conv_results = doc_converter.convert(input)
|
||||
export_documents(conv_results, output_dir=Path("./scratch"))
|
||||
|
||||
end_time = time.time() - start_time
|
||||
|
||||
|
||||
@@ -7,14 +7,14 @@ from typing import Iterable
|
||||
from docling.backend.docling_parse_backend import DoclingParseDocumentBackend
|
||||
from docling.backend.pypdfium2_backend import PyPdfiumDocumentBackend
|
||||
from docling.datamodel.base_models import ConversionStatus, PipelineOptions
|
||||
from docling.datamodel.document import ConvertedDocument, DocumentConversionInput
|
||||
from docling.datamodel.document import ConversionResult, DocumentConversionInput
|
||||
from docling.document_converter import DocumentConverter
|
||||
|
||||
_log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def export_documents(
|
||||
converted_docs: Iterable[ConvertedDocument],
|
||||
conv_results: Iterable[ConversionResult],
|
||||
output_dir: Path,
|
||||
):
|
||||
output_dir.mkdir(parents=True, exist_ok=True)
|
||||
@@ -22,20 +22,20 @@ def export_documents(
|
||||
success_count = 0
|
||||
failure_count = 0
|
||||
|
||||
for doc in converted_docs:
|
||||
if doc.status == ConversionStatus.SUCCESS:
|
||||
for conv_res in conv_results:
|
||||
if conv_res.status == ConversionStatus.SUCCESS:
|
||||
success_count += 1
|
||||
doc_filename = doc.input.file.stem
|
||||
doc_filename = conv_res.input.file.stem
|
||||
|
||||
# Export Deep Search document JSON format:
|
||||
with (output_dir / f"{doc_filename}.json").open("w") as fp:
|
||||
fp.write(json.dumps(doc.render_as_dict()))
|
||||
fp.write(json.dumps(conv_res.render_as_dict()))
|
||||
|
||||
# Export Markdown format:
|
||||
with (output_dir / f"{doc_filename}.md").open("w") as fp:
|
||||
fp.write(doc.render_as_markdown())
|
||||
fp.write(conv_res.render_as_markdown())
|
||||
else:
|
||||
_log.info(f"Document {doc.input.file} failed to convert.")
|
||||
_log.info(f"Document {conv_res.input.file} failed to convert.")
|
||||
failure_count += 1
|
||||
|
||||
_log.info(
|
||||
@@ -113,8 +113,8 @@ def main():
|
||||
|
||||
start_time = time.time()
|
||||
|
||||
converted_docs = doc_converter.convert(input)
|
||||
export_documents(converted_docs, output_dir=Path("./scratch"))
|
||||
conv_results = doc_converter.convert(input)
|
||||
export_documents(conv_results, output_dir=Path("./scratch"))
|
||||
|
||||
end_time = time.time() - start_time
|
||||
|
||||
|
||||
@@ -10,7 +10,7 @@ from docling.datamodel.base_models import (
|
||||
PageElement,
|
||||
TableElement,
|
||||
)
|
||||
from docling.datamodel.document import ConvertedDocument, DocumentConversionInput
|
||||
from docling.datamodel.document import DocumentConversionInput
|
||||
from docling.document_converter import DocumentConverter
|
||||
|
||||
_log = logging.getLogger(__name__)
|
||||
@@ -39,25 +39,25 @@ def main():
|
||||
|
||||
start_time = time.time()
|
||||
|
||||
converted_docs = doc_converter.convert(input_files)
|
||||
conv_results = doc_converter.convert(input_files)
|
||||
|
||||
output_dir.mkdir(parents=True, exist_ok=True)
|
||||
for doc in converted_docs:
|
||||
if doc.status != ConversionStatus.SUCCESS:
|
||||
_log.info(f"Document {doc.input.file} failed to convert.")
|
||||
for conv_res in conv_results:
|
||||
if conv_res.status != ConversionStatus.SUCCESS:
|
||||
_log.info(f"Document {conv_res.input.file} failed to convert.")
|
||||
continue
|
||||
|
||||
doc_filename = doc.input.file.stem
|
||||
doc_filename = conv_res.input.file.stem
|
||||
|
||||
# Export page images
|
||||
for page in doc.pages:
|
||||
for page in conv_res.pages:
|
||||
page_no = page.page_no + 1
|
||||
page_image_filename = output_dir / f"{doc_filename}-{page_no}.png"
|
||||
with page_image_filename.open("wb") as fp:
|
||||
page.image.save(fp, format="PNG")
|
||||
|
||||
# Export figures and tables
|
||||
for element, image in doc.render_element_images(
|
||||
for element, image in conv_res.render_element_images(
|
||||
element_types=(FigureElement, TableElement)
|
||||
):
|
||||
element_image_filename = (
|
||||
|
||||
Reference in New Issue
Block a user