mirror of
https://github.com/DS4SD/docling.git
synced 2025-12-08 12:48:28 +00:00
ci: add coverage and ruff (#1383)
* add coverage calculation and push Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> * new codecov version and usage of token Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> * enable ruff formatter instead of black and isort Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> * apply ruff lint fixes Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> * apply ruff unsafe fixes Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> * add removed imports Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> * runs 1 on linter issues Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> * finalize linter fixes Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> * Update pyproject.toml Co-authored-by: Cesar Berrospi Ramis <75900930+ceberam@users.noreply.github.com> Signed-off-by: Michele Dolfi <97102151+dolfim-ibm@users.noreply.github.com> --------- Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> Signed-off-by: Michele Dolfi <97102151+dolfim-ibm@users.noreply.github.com> Co-authored-by: Cesar Berrospi Ramis <75900930+ceberam@users.noreply.github.com>
This commit is contained in:
@@ -19,7 +19,6 @@ def _get_backend(fname):
|
||||
|
||||
|
||||
def test_asciidocs_examples():
|
||||
|
||||
fnames = sorted(glob.glob("./tests/data/asciidoc/*.asciidoc"))
|
||||
|
||||
for fname in fnames:
|
||||
@@ -38,8 +37,8 @@ def test_asciidocs_examples():
|
||||
print("\n\n", pred_mddoc)
|
||||
|
||||
if os.path.exists(gname):
|
||||
with open(gname, "r") as fr:
|
||||
true_mddoc = fr.read()
|
||||
with open(gname) as fr:
|
||||
fr.read()
|
||||
|
||||
# assert pred_mddoc == true_mddoc, "pred_mddoc!=true_mddoc for asciidoc"
|
||||
else:
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
import json
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
from pytest import warns
|
||||
@@ -15,22 +13,19 @@ GENERATE = GEN_TEST_DATA
|
||||
|
||||
|
||||
def get_csv_paths():
|
||||
|
||||
# Define the directory you want to search
|
||||
directory = Path(f"./tests/data/csv/")
|
||||
directory = Path("./tests/data/csv/")
|
||||
|
||||
# List all CSV files in the directory and its subdirectories
|
||||
return sorted(directory.rglob("*.csv"))
|
||||
|
||||
|
||||
def get_csv_path(name: str):
|
||||
|
||||
# Return the matching CSV file path
|
||||
return Path(f"./tests/data/csv/{name}.csv")
|
||||
|
||||
|
||||
def get_converter():
|
||||
|
||||
converter = DocumentConverter(allowed_formats=[InputFormat.CSV])
|
||||
|
||||
return converter
|
||||
@@ -55,9 +50,9 @@ def test_e2e_valid_csv_conversions():
|
||||
pred_itxt: str = doc._export_to_indented_text(
|
||||
max_text_len=70, explicit_tables=False
|
||||
)
|
||||
assert verify_export(
|
||||
pred_itxt, str(gt_path) + ".itxt"
|
||||
), "export to indented-text"
|
||||
assert verify_export(pred_itxt, str(gt_path) + ".itxt"), (
|
||||
"export to indented-text"
|
||||
)
|
||||
|
||||
assert verify_document(
|
||||
pred_doc=doc,
|
||||
|
||||
@@ -32,7 +32,7 @@ def test_text_cell_counts():
|
||||
|
||||
doc_backend = _get_backend(pdf_doc)
|
||||
|
||||
for page_index in range(0, doc_backend.page_count()):
|
||||
for page_index in range(doc_backend.page_count()):
|
||||
last_cell_count = None
|
||||
for i in range(10):
|
||||
page_backend: DoclingParsePageBackend = doc_backend.load_page(0)
|
||||
@@ -42,9 +42,9 @@ def test_text_cell_counts():
|
||||
last_cell_count = len(cells)
|
||||
|
||||
if len(cells) != last_cell_count:
|
||||
assert (
|
||||
False
|
||||
), "Loading page multiple times yielded non-identical text cell counts"
|
||||
assert False, (
|
||||
"Loading page multiple times yielded non-identical text cell counts"
|
||||
)
|
||||
last_cell_count = len(cells)
|
||||
|
||||
|
||||
@@ -66,7 +66,7 @@ def test_crop_page_image(test_doc_path):
|
||||
page_backend: DoclingParsePageBackend = doc_backend.load_page(0)
|
||||
|
||||
# Crop out "Figure 1" from the DocLayNet paper
|
||||
im = page_backend.get_page_image(
|
||||
page_backend.get_page_image(
|
||||
scale=2, cropbox=BoundingBox(l=317, t=246, r=574, b=527)
|
||||
)
|
||||
# im.show()
|
||||
|
||||
@@ -31,7 +31,7 @@ def test_text_cell_counts():
|
||||
|
||||
doc_backend = _get_backend(pdf_doc)
|
||||
|
||||
for page_index in range(0, doc_backend.page_count()):
|
||||
for page_index in range(doc_backend.page_count()):
|
||||
last_cell_count = None
|
||||
for i in range(10):
|
||||
page_backend: DoclingParseV2PageBackend = doc_backend.load_page(0)
|
||||
@@ -41,9 +41,9 @@ def test_text_cell_counts():
|
||||
last_cell_count = len(cells)
|
||||
|
||||
if len(cells) != last_cell_count:
|
||||
assert (
|
||||
False
|
||||
), "Loading page multiple times yielded non-identical text cell counts"
|
||||
assert False, (
|
||||
"Loading page multiple times yielded non-identical text cell counts"
|
||||
)
|
||||
last_cell_count = len(cells)
|
||||
|
||||
|
||||
@@ -65,7 +65,7 @@ def test_crop_page_image(test_doc_path):
|
||||
page_backend: DoclingParseV2PageBackend = doc_backend.load_page(0)
|
||||
|
||||
# Crop out "Figure 1" from the DocLayNet paper
|
||||
im = page_backend.get_page_image(
|
||||
page_backend.get_page_image(
|
||||
scale=2, cropbox=BoundingBox(l=317, t=246, r=574, b=527)
|
||||
)
|
||||
# im.show()
|
||||
|
||||
@@ -31,7 +31,7 @@ def test_text_cell_counts():
|
||||
|
||||
doc_backend = _get_backend(pdf_doc)
|
||||
|
||||
for page_index in range(0, doc_backend.page_count()):
|
||||
for page_index in range(doc_backend.page_count()):
|
||||
last_cell_count = None
|
||||
for i in range(10):
|
||||
page_backend: DoclingParseV4PageBackend = doc_backend.load_page(0)
|
||||
@@ -41,9 +41,9 @@ def test_text_cell_counts():
|
||||
last_cell_count = len(cells)
|
||||
|
||||
if len(cells) != last_cell_count:
|
||||
assert (
|
||||
False
|
||||
), "Loading page multiple times yielded non-identical text cell counts"
|
||||
assert False, (
|
||||
"Loading page multiple times yielded non-identical text cell counts"
|
||||
)
|
||||
last_cell_count = len(cells)
|
||||
|
||||
|
||||
@@ -65,7 +65,7 @@ def test_crop_page_image(test_doc_path):
|
||||
page_backend: DoclingParseV4PageBackend = doc_backend.load_page(0)
|
||||
|
||||
# Crop out "Figure 1" from the DocLayNet paper
|
||||
im = page_backend.get_page_image(
|
||||
page_backend.get_page_image(
|
||||
scale=2, cropbox=BoundingBox(l=317, t=246, r=574, b=527)
|
||||
)
|
||||
# im.show()
|
||||
|
||||
@@ -105,7 +105,6 @@ def test_ordered_lists():
|
||||
|
||||
|
||||
def get_html_paths():
|
||||
|
||||
# Define the directory you want to search
|
||||
directory = Path("./tests/data/html/")
|
||||
|
||||
@@ -115,14 +114,12 @@ def get_html_paths():
|
||||
|
||||
|
||||
def get_converter():
|
||||
|
||||
converter = DocumentConverter(allowed_formats=[InputFormat.HTML])
|
||||
|
||||
return converter
|
||||
|
||||
|
||||
def test_e2e_html_conversions():
|
||||
|
||||
html_paths = get_html_paths()
|
||||
converter = get_converter()
|
||||
|
||||
@@ -138,15 +135,15 @@ def test_e2e_html_conversions():
|
||||
doc: DoclingDocument = conv_result.document
|
||||
|
||||
pred_md: str = doc.export_to_markdown()
|
||||
assert verify_export(
|
||||
pred_md, str(gt_path) + ".md", generate=GENERATE
|
||||
), "export to md"
|
||||
assert verify_export(pred_md, str(gt_path) + ".md", generate=GENERATE), (
|
||||
"export to md"
|
||||
)
|
||||
|
||||
pred_itxt: str = doc._export_to_indented_text(
|
||||
max_text_len=70, explicit_tables=False
|
||||
)
|
||||
assert verify_export(
|
||||
pred_itxt, str(gt_path) + ".itxt", generate=GENERATE
|
||||
), "export to indented-text"
|
||||
assert verify_export(pred_itxt, str(gt_path) + ".itxt", generate=GENERATE), (
|
||||
"export to indented-text"
|
||||
)
|
||||
|
||||
assert verify_document(doc, str(gt_path) + ".json", GENERATE)
|
||||
|
||||
@@ -15,7 +15,7 @@ GENERATE = GEN_TEST_DATA
|
||||
|
||||
|
||||
def get_pubmed_paths():
|
||||
directory = Path(os.path.dirname(__file__) + f"/data/pubmed/")
|
||||
directory = Path(os.path.dirname(__file__) + "/data/pubmed/")
|
||||
xml_files = sorted(directory.rglob("*.xml"))
|
||||
return xml_files
|
||||
|
||||
@@ -47,9 +47,9 @@ def test_e2e_pubmed_conversions(use_stream=False):
|
||||
pred_itxt: str = doc._export_to_indented_text(
|
||||
max_text_len=70, explicit_tables=False
|
||||
)
|
||||
assert verify_export(
|
||||
pred_itxt, str(gt_path) + ".itxt"
|
||||
), "export to indented-text"
|
||||
assert verify_export(pred_itxt, str(gt_path) + ".itxt"), (
|
||||
"export to indented-text"
|
||||
)
|
||||
|
||||
assert verify_document(doc, str(gt_path) + ".json", GENERATE), "export to json"
|
||||
|
||||
|
||||
@@ -17,7 +17,6 @@ GENERATE = GEN_TEST_DATA
|
||||
|
||||
|
||||
def get_xlsx_paths():
|
||||
|
||||
# Define the directory you want to search
|
||||
directory = Path("./tests/data/xlsx/")
|
||||
|
||||
@@ -27,7 +26,6 @@ def get_xlsx_paths():
|
||||
|
||||
|
||||
def get_converter():
|
||||
|
||||
converter = DocumentConverter(allowed_formats=[InputFormat.XLSX])
|
||||
|
||||
return converter
|
||||
@@ -65,13 +63,13 @@ def test_e2e_xlsx_conversions(documents) -> None:
|
||||
pred_itxt: str = doc._export_to_indented_text(
|
||||
max_text_len=70, explicit_tables=False
|
||||
)
|
||||
assert verify_export(
|
||||
pred_itxt, str(gt_path) + ".itxt"
|
||||
), "export to indented-text"
|
||||
assert verify_export(pred_itxt, str(gt_path) + ".itxt"), (
|
||||
"export to indented-text"
|
||||
)
|
||||
|
||||
assert verify_document(
|
||||
doc, str(gt_path) + ".json", GENERATE
|
||||
), "document document"
|
||||
assert verify_document(doc, str(gt_path) + ".json", GENERATE), (
|
||||
"document document"
|
||||
)
|
||||
|
||||
|
||||
def test_pages(documents) -> None:
|
||||
@@ -81,7 +79,7 @@ def test_pages(documents) -> None:
|
||||
documents: The paths and converted documents.
|
||||
"""
|
||||
# number of pages from the backend method
|
||||
path = [item for item in get_xlsx_paths() if item.stem == "test-01"][0]
|
||||
path = next(item for item in get_xlsx_paths() if item.stem == "test-01")
|
||||
in_doc = InputDocument(
|
||||
path_or_stream=path,
|
||||
format=InputFormat.XLSX,
|
||||
@@ -92,7 +90,7 @@ def test_pages(documents) -> None:
|
||||
assert backend.page_count() == 3
|
||||
|
||||
# number of pages from the converted document
|
||||
doc = [item for path, item in documents if path.stem == "test-01"][0]
|
||||
doc = next(item for path, item in documents if path.stem == "test-01")
|
||||
assert len(doc.pages) == 3
|
||||
|
||||
# page sizes as number of cells
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
from docling.backend.msword_backend import MsWordDocumentBackend
|
||||
@@ -43,7 +42,6 @@ def test_heading_levels():
|
||||
|
||||
|
||||
def get_docx_paths():
|
||||
|
||||
# Define the directory you want to search
|
||||
directory = Path("./tests/data/docx/")
|
||||
|
||||
@@ -53,14 +51,12 @@ def get_docx_paths():
|
||||
|
||||
|
||||
def get_converter():
|
||||
|
||||
converter = DocumentConverter(allowed_formats=[InputFormat.DOCX])
|
||||
|
||||
return converter
|
||||
|
||||
|
||||
def test_e2e_docx_conversions():
|
||||
|
||||
docx_paths = get_docx_paths()
|
||||
converter = get_converter()
|
||||
|
||||
@@ -76,20 +72,20 @@ def test_e2e_docx_conversions():
|
||||
doc: DoclingDocument = conv_result.document
|
||||
|
||||
pred_md: str = doc.export_to_markdown()
|
||||
assert verify_export(
|
||||
pred_md, str(gt_path) + ".md", generate=GENERATE
|
||||
), "export to md"
|
||||
assert verify_export(pred_md, str(gt_path) + ".md", generate=GENERATE), (
|
||||
"export to md"
|
||||
)
|
||||
|
||||
pred_itxt: str = doc._export_to_indented_text(
|
||||
max_text_len=70, explicit_tables=False
|
||||
)
|
||||
assert verify_export(
|
||||
pred_itxt, str(gt_path) + ".itxt", generate=GENERATE
|
||||
), "export to indented-text"
|
||||
assert verify_export(pred_itxt, str(gt_path) + ".itxt", generate=GENERATE), (
|
||||
"export to indented-text"
|
||||
)
|
||||
|
||||
assert verify_document(
|
||||
doc, str(gt_path) + ".json", generate=GENERATE
|
||||
), "document document"
|
||||
assert verify_document(doc, str(gt_path) + ".json", generate=GENERATE), (
|
||||
"document document"
|
||||
)
|
||||
|
||||
if docx_path.name == "word_tables.docx":
|
||||
pred_html: str = doc.export_to_html()
|
||||
|
||||
@@ -109,27 +109,27 @@ def test_patent_groundtruth(patents, groundtruth):
|
||||
md_name = path.stem + ".md"
|
||||
if md_name in gt_names:
|
||||
pred_md = doc.export_to_markdown()
|
||||
assert (
|
||||
pred_md == gt_names[md_name]
|
||||
), f"Markdown file mismatch against groundtruth {md_name}"
|
||||
assert pred_md == gt_names[md_name], (
|
||||
f"Markdown file mismatch against groundtruth {md_name}"
|
||||
)
|
||||
json_path = path.with_suffix(".json")
|
||||
if json_path.stem in gt_names:
|
||||
assert verify_document(
|
||||
doc, str(json_path), GENERATE
|
||||
), f"JSON file mismatch against groundtruth {json_path}"
|
||||
assert verify_document(doc, str(json_path), GENERATE), (
|
||||
f"JSON file mismatch against groundtruth {json_path}"
|
||||
)
|
||||
itxt_name = path.stem + ".itxt"
|
||||
if itxt_name in gt_names:
|
||||
pred_itxt = doc._export_to_indented_text()
|
||||
assert (
|
||||
pred_itxt == gt_names[itxt_name]
|
||||
), f"Indented text file mismatch against groundtruth {itxt_name}"
|
||||
assert pred_itxt == gt_names[itxt_name], (
|
||||
f"Indented text file mismatch against groundtruth {itxt_name}"
|
||||
)
|
||||
|
||||
|
||||
def test_tables(tables):
|
||||
"""Test the table parser."""
|
||||
# CHECK table in file tables_20180000016.xml
|
||||
file_name = "tables_ipa20180000016.xml"
|
||||
file_table = [item[1] for item in tables if item[0].name == file_name][0]
|
||||
file_table = next(item[1] for item in tables if item[0].name == file_name)
|
||||
assert file_table.num_rows == 13
|
||||
assert file_table.num_cols == 10
|
||||
assert len(file_table.table_cells) == 130
|
||||
@@ -140,7 +140,7 @@ def test_patent_uspto_ice(patents):
|
||||
|
||||
# CHECK application doc number 20200022300
|
||||
file_name = "ipa20200022300.xml"
|
||||
doc = [item[1] for item in patents if item[0].name == file_name][0]
|
||||
doc = next(item[1] for item in patents if item[0].name == file_name)
|
||||
if GENERATE:
|
||||
_generate_groundtruth(doc, Path(file_name).stem)
|
||||
|
||||
@@ -278,7 +278,7 @@ def test_patent_uspto_ice(patents):
|
||||
|
||||
# CHECK application doc number 20180000016 for HTML entities, level 2 headings, tables
|
||||
file_name = "ipa20180000016.xml"
|
||||
doc = [item[1] for item in patents if item[0].name == file_name][0]
|
||||
doc = next(item[1] for item in patents if item[0].name == file_name)
|
||||
if GENERATE:
|
||||
_generate_groundtruth(doc, Path(file_name).stem)
|
||||
|
||||
@@ -348,7 +348,7 @@ def test_patent_uspto_ice(patents):
|
||||
|
||||
# CHECK application doc number 20110039701 for complex long tables
|
||||
file_name = "ipa20110039701.xml"
|
||||
doc = [item[1] for item in patents if item[0].name == file_name][0]
|
||||
doc = next(item[1] for item in patents if item[0].name == file_name)
|
||||
assert doc.name == file_name
|
||||
assert len(doc.tables) == 17
|
||||
|
||||
@@ -358,7 +358,7 @@ def test_patent_uspto_grant_v2(patents):
|
||||
|
||||
# CHECK application doc number 06442728
|
||||
file_name = "pg06442728.xml"
|
||||
doc = [item[1] for item in patents if item[0].name == file_name][0]
|
||||
doc = next(item[1] for item in patents if item[0].name == file_name)
|
||||
if GENERATE:
|
||||
_generate_groundtruth(doc, Path(file_name).stem)
|
||||
|
||||
@@ -376,12 +376,12 @@ def test_patent_uspto_grant_v2(patents):
|
||||
assert isinstance(texts[2], TextItem)
|
||||
assert texts[2].text == (
|
||||
"An interleaver receives incoming data frames of size N. The interleaver "
|
||||
"indexes the elements of the frame with an N₁×N₂ index array. The interleaver "
|
||||
"indexes the elements of the frame with an N₁×N₂ index array. The interleaver " # noqa: RUF001
|
||||
"then effectively rearranges (permutes) the data by permuting the rows of the "
|
||||
"index array. The interleaver employs the equation I(j,k)=I(j,αjk+βj)modP) to "
|
||||
"index array. The interleaver employs the equation I(j,k)=I(j,αjk+βj)modP) to " # noqa: RUF001
|
||||
"permute the columns (indexed by k) of each row (indexed by j). P is at least "
|
||||
"equal to N₂, βj is a constant which may be different for each row, and each "
|
||||
"αj is a relative prime number relative to P. After permuting, the "
|
||||
"αj is a relative prime number relative to P. After permuting, the " # noqa: RUF001
|
||||
"interleaver outputs the data in a different order than received (e.g., "
|
||||
"receives sequentially row by row, outputs sequentially each column by column)."
|
||||
)
|
||||
@@ -402,7 +402,7 @@ def test_patent_uspto_app_v1(patents):
|
||||
|
||||
# CHECK application doc number 20010031492
|
||||
file_name = "pa20010031492.xml"
|
||||
doc = [item[1] for item in patents if item[0].name == file_name][0]
|
||||
doc = next(item[1] for item in patents if item[0].name == file_name)
|
||||
if GENERATE:
|
||||
_generate_groundtruth(doc, Path(file_name).stem)
|
||||
|
||||
@@ -432,7 +432,7 @@ def test_patent_uspto_grant_aps(patents):
|
||||
|
||||
# CHECK application doc number 057006474
|
||||
file_name = "pftaps057006474.txt"
|
||||
doc = [item[1] for item in patents if item[0].name == file_name][0]
|
||||
doc = next(item[1] for item in patents if item[0].name == file_name)
|
||||
if GENERATE:
|
||||
_generate_groundtruth(doc, Path(file_name).stem)
|
||||
|
||||
|
||||
@@ -32,7 +32,7 @@ def test_text_cell_counts():
|
||||
|
||||
doc_backend = _get_backend(pdf_doc)
|
||||
|
||||
for page_index in range(0, doc_backend.page_count()):
|
||||
for page_index in range(doc_backend.page_count()):
|
||||
last_cell_count = None
|
||||
for i in range(10):
|
||||
page_backend: PyPdfiumPageBackend = doc_backend.load_page(0)
|
||||
@@ -42,9 +42,9 @@ def test_text_cell_counts():
|
||||
last_cell_count = len(cells)
|
||||
|
||||
if len(cells) != last_cell_count:
|
||||
assert (
|
||||
False
|
||||
), "Loading page multiple times yielded non-identical text cell counts"
|
||||
assert False, (
|
||||
"Loading page multiple times yielded non-identical text cell counts"
|
||||
)
|
||||
last_cell_count = len(cells)
|
||||
|
||||
|
||||
@@ -66,7 +66,7 @@ def test_crop_page_image(test_doc_path):
|
||||
page_backend: PyPdfiumPageBackend = doc_backend.load_page(0)
|
||||
|
||||
# Crop out "Figure 1" from the DocLayNet paper
|
||||
im = page_backend.get_page_image(
|
||||
page_backend.get_page_image(
|
||||
scale=2, cropbox=BoundingBox(l=317, t=246, r=574, b=527)
|
||||
)
|
||||
# im.show()
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
from docling.datamodel.base_models import InputFormat
|
||||
@@ -12,7 +11,6 @@ GENERATE = GEN_TEST_DATA
|
||||
|
||||
|
||||
def get_pptx_paths():
|
||||
|
||||
# Define the directory you want to search
|
||||
directory = Path("./tests/data/pptx/")
|
||||
|
||||
@@ -22,14 +20,12 @@ def get_pptx_paths():
|
||||
|
||||
|
||||
def get_converter():
|
||||
|
||||
converter = DocumentConverter(allowed_formats=[InputFormat.PPTX])
|
||||
|
||||
return converter
|
||||
|
||||
|
||||
def test_e2e_pptx_conversions():
|
||||
|
||||
pptx_paths = get_pptx_paths()
|
||||
converter = get_converter()
|
||||
|
||||
@@ -50,10 +46,10 @@ def test_e2e_pptx_conversions():
|
||||
pred_itxt: str = doc._export_to_indented_text(
|
||||
max_text_len=70, explicit_tables=False
|
||||
)
|
||||
assert verify_export(
|
||||
pred_itxt, str(gt_path) + ".itxt"
|
||||
), "export to indented-text"
|
||||
assert verify_export(pred_itxt, str(gt_path) + ".itxt"), (
|
||||
"export to indented-text"
|
||||
)
|
||||
|
||||
assert verify_document(
|
||||
doc, str(gt_path) + ".json", GENERATE
|
||||
), "document document"
|
||||
assert verify_document(doc, str(gt_path) + ".json", GENERATE), (
|
||||
"document document"
|
||||
)
|
||||
|
||||
@@ -3,7 +3,6 @@ from pathlib import Path
|
||||
from docling_core.types.doc import CodeItem, TextItem
|
||||
from docling_core.types.doc.labels import CodeLanguageLabel, DocItemLabel
|
||||
|
||||
from docling.backend.docling_parse_backend import DoclingParseDocumentBackend
|
||||
from docling.datamodel.base_models import InputFormat
|
||||
from docling.datamodel.document import ConversionResult
|
||||
from docling.datamodel.pipeline_options import PdfPipelineOptions
|
||||
@@ -12,7 +11,6 @@ from docling.pipeline.standard_pdf_pipeline import StandardPdfPipeline
|
||||
|
||||
|
||||
def get_converter():
|
||||
|
||||
pipeline_options = PdfPipelineOptions()
|
||||
pipeline_options.generate_page_images = True
|
||||
|
||||
|
||||
@@ -2,7 +2,6 @@ from pathlib import Path
|
||||
|
||||
from docling_core.types.doc import PictureClassificationData
|
||||
|
||||
from docling.backend.docling_parse_backend import DoclingParseDocumentBackend
|
||||
from docling.datamodel.base_models import InputFormat
|
||||
from docling.datamodel.document import ConversionResult
|
||||
from docling.datamodel.pipeline_options import PdfPipelineOptions
|
||||
@@ -11,7 +10,6 @@ from docling.pipeline.standard_pdf_pipeline import StandardPdfPipeline
|
||||
|
||||
|
||||
def get_converter():
|
||||
|
||||
pipeline_options = PdfPipelineOptions()
|
||||
pipeline_options.generate_page_images = True
|
||||
|
||||
@@ -49,32 +47,32 @@ def test_picture_classifier():
|
||||
|
||||
res = results[0]
|
||||
assert len(res.annotations) == 1
|
||||
assert type(res.annotations[0]) == PictureClassificationData
|
||||
assert isinstance(res.annotations[0], PictureClassificationData)
|
||||
classification_data = res.annotations[0]
|
||||
assert classification_data.provenance == "DocumentPictureClassifier"
|
||||
assert (
|
||||
len(classification_data.predicted_classes) == 16
|
||||
), "Number of predicted classes is not equal to 16"
|
||||
assert len(classification_data.predicted_classes) == 16, (
|
||||
"Number of predicted classes is not equal to 16"
|
||||
)
|
||||
confidences = [pred.confidence for pred in classification_data.predicted_classes]
|
||||
assert confidences == sorted(
|
||||
confidences, reverse=True
|
||||
), "Predictions are not sorted in descending order of confidence"
|
||||
assert (
|
||||
classification_data.predicted_classes[0].class_name == "bar_chart"
|
||||
), "The prediction is wrong for the bar chart image."
|
||||
assert confidences == sorted(confidences, reverse=True), (
|
||||
"Predictions are not sorted in descending order of confidence"
|
||||
)
|
||||
assert classification_data.predicted_classes[0].class_name == "bar_chart", (
|
||||
"The prediction is wrong for the bar chart image."
|
||||
)
|
||||
|
||||
res = results[1]
|
||||
assert len(res.annotations) == 1
|
||||
assert type(res.annotations[0]) == PictureClassificationData
|
||||
assert isinstance(res.annotations[0], PictureClassificationData)
|
||||
classification_data = res.annotations[0]
|
||||
assert classification_data.provenance == "DocumentPictureClassifier"
|
||||
assert (
|
||||
len(classification_data.predicted_classes) == 16
|
||||
), "Number of predicted classes is not equal to 16"
|
||||
assert len(classification_data.predicted_classes) == 16, (
|
||||
"Number of predicted classes is not equal to 16"
|
||||
)
|
||||
confidences = [pred.confidence for pred in classification_data.predicted_classes]
|
||||
assert confidences == sorted(
|
||||
confidences, reverse=True
|
||||
), "Predictions are not sorted in descending order of confidence"
|
||||
assert (
|
||||
classification_data.predicted_classes[0].class_name == "map"
|
||||
), "The prediction is wrong for the bar chart image."
|
||||
assert confidences == sorted(confidences, reverse=True), (
|
||||
"Predictions are not sorted in descending order of confidence"
|
||||
)
|
||||
assert classification_data.predicted_classes[0].class_name == "map", (
|
||||
"The prediction is wrong for the bar chart image."
|
||||
)
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
from pathlib import Path
|
||||
|
||||
from docling.backend.docling_parse_backend import DoclingParseDocumentBackend
|
||||
from docling.backend.docling_parse_v4_backend import DoclingParseV4DocumentBackend
|
||||
from docling.datamodel.base_models import InputFormat
|
||||
from docling.datamodel.document import ConversionResult
|
||||
from docling.datamodel.pipeline_options import AcceleratorDevice, PdfPipelineOptions
|
||||
@@ -15,7 +14,6 @@ GENERATE_V2 = GEN_TEST_DATA
|
||||
|
||||
|
||||
def get_pdf_paths():
|
||||
|
||||
# Define the directory you want to search
|
||||
directory = Path("./tests/data/pdf/")
|
||||
|
||||
@@ -25,7 +23,6 @@ def get_pdf_paths():
|
||||
|
||||
|
||||
def get_converter():
|
||||
|
||||
pipeline_options = PdfPipelineOptions()
|
||||
pipeline_options.do_ocr = False
|
||||
pipeline_options.do_table_structure = True
|
||||
@@ -45,7 +42,6 @@ def get_converter():
|
||||
|
||||
|
||||
def test_e2e_pdfs_conversions():
|
||||
|
||||
pdf_paths = get_pdf_paths()
|
||||
converter = get_converter()
|
||||
|
||||
|
||||
@@ -3,7 +3,6 @@ from pathlib import Path
|
||||
from typing import List
|
||||
|
||||
from docling.backend.docling_parse_backend import DoclingParseDocumentBackend
|
||||
from docling.backend.docling_parse_v4_backend import DoclingParseV4DocumentBackend
|
||||
from docling.datamodel.base_models import InputFormat
|
||||
from docling.datamodel.document import ConversionResult
|
||||
from docling.datamodel.pipeline_options import (
|
||||
|
||||
@@ -12,10 +12,9 @@ from docling.document_converter import PdfFormatOption
|
||||
|
||||
|
||||
def test_in_doc_from_valid_path():
|
||||
|
||||
test_doc_path = Path("./tests/data/pdf/2206.01062.pdf")
|
||||
doc = _make_input_doc(test_doc_path)
|
||||
assert doc.valid == True
|
||||
assert doc.valid is True
|
||||
|
||||
|
||||
def test_in_doc_from_invalid_path():
|
||||
@@ -23,29 +22,26 @@ def test_in_doc_from_invalid_path():
|
||||
|
||||
doc = _make_input_doc(test_doc_path)
|
||||
|
||||
assert doc.valid == False
|
||||
assert doc.valid is False
|
||||
|
||||
|
||||
def test_in_doc_from_valid_buf():
|
||||
|
||||
buf = BytesIO(Path("./tests/data/pdf/2206.01062.pdf").open("rb").read())
|
||||
stream = DocumentStream(name="my_doc.pdf", stream=buf)
|
||||
|
||||
doc = _make_input_doc_from_stream(stream)
|
||||
assert doc.valid == True
|
||||
assert doc.valid is True
|
||||
|
||||
|
||||
def test_in_doc_from_invalid_buf():
|
||||
|
||||
buf = BytesIO(b"")
|
||||
stream = DocumentStream(name="my_doc.pdf", stream=buf)
|
||||
|
||||
doc = _make_input_doc_from_stream(stream)
|
||||
assert doc.valid == False
|
||||
assert doc.valid is False
|
||||
|
||||
|
||||
def test_image_in_pdf_backend():
|
||||
|
||||
in_doc = InputDocument(
|
||||
path_or_stream=Path("tests/data/2305.03393v1-pg9-img.png"),
|
||||
format=InputFormat.IMAGE,
|
||||
@@ -76,7 +72,6 @@ def test_image_in_pdf_backend():
|
||||
|
||||
|
||||
def test_in_doc_with_page_range():
|
||||
|
||||
test_doc_path = Path("./tests/data/pdf/2206.01062.pdf")
|
||||
limits = DocumentLimits()
|
||||
limits.page_range = (1, 10)
|
||||
@@ -87,7 +82,7 @@ def test_in_doc_with_page_range():
|
||||
backend=PyPdfiumDocumentBackend,
|
||||
limits=limits,
|
||||
)
|
||||
assert doc.valid == True
|
||||
assert doc.valid is True
|
||||
|
||||
limits.page_range = (9, 9)
|
||||
|
||||
@@ -97,7 +92,7 @@ def test_in_doc_with_page_range():
|
||||
backend=PyPdfiumDocumentBackend,
|
||||
limits=limits,
|
||||
)
|
||||
assert doc.valid == True
|
||||
assert doc.valid is True
|
||||
|
||||
limits.page_range = (11, 12)
|
||||
|
||||
@@ -107,7 +102,7 @@ def test_in_doc_with_page_range():
|
||||
backend=PyPdfiumDocumentBackend,
|
||||
limits=limits,
|
||||
)
|
||||
assert doc.valid == False
|
||||
assert doc.valid is False
|
||||
|
||||
|
||||
def test_guess_format(tmp_path):
|
||||
@@ -192,17 +187,17 @@ def test_guess_format(tmp_path):
|
||||
)
|
||||
doc_path = temp_dir / "docling_test.xml"
|
||||
doc_path.write_text(xml_content, encoding="utf-8")
|
||||
assert dci._guess_format(doc_path) == None
|
||||
assert dci._guess_format(doc_path) is None
|
||||
buf = BytesIO(Path(doc_path).open("rb").read())
|
||||
stream = DocumentStream(name="docling_test.xml", stream=buf)
|
||||
assert dci._guess_format(stream) == None
|
||||
assert dci._guess_format(stream) is None
|
||||
|
||||
# Invalid USPTO patent (as plain text)
|
||||
stream = DocumentStream(name="pftaps057006474.txt", stream=BytesIO(b"xyz"))
|
||||
assert dci._guess_format(stream) == None
|
||||
assert dci._guess_format(stream) is None
|
||||
doc_path = temp_dir / "pftaps_wrong.txt"
|
||||
doc_path.write_text("xyz", encoding="utf-8")
|
||||
assert dci._guess_format(doc_path) == None
|
||||
assert dci._guess_format(doc_path) is None
|
||||
|
||||
# Valid Docling JSON
|
||||
test_str = '{"name": ""}'
|
||||
|
||||
@@ -4,7 +4,6 @@ from pathlib import Path
|
||||
import pytest
|
||||
|
||||
from docling.backend.docling_parse_backend import DoclingParseDocumentBackend
|
||||
from docling.backend.docling_parse_v4_backend import DoclingParseV4DocumentBackend
|
||||
from docling.datamodel.base_models import DocumentStream, InputFormat
|
||||
from docling.datamodel.pipeline_options import PdfPipelineOptions
|
||||
from docling.document_converter import DocumentConverter, PdfFormatOption
|
||||
@@ -16,14 +15,12 @@ GENERATE = GEN_TEST_DATA
|
||||
|
||||
|
||||
def get_pdf_path():
|
||||
|
||||
pdf_path = Path("./tests/data/pdf/2305.03393v1-pg9.pdf")
|
||||
return pdf_path
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def converter():
|
||||
|
||||
pipeline_options = PdfPipelineOptions()
|
||||
pipeline_options.do_ocr = False
|
||||
pipeline_options.do_table_structure = True
|
||||
@@ -42,7 +39,6 @@ def converter():
|
||||
|
||||
|
||||
def test_convert_path(converter: DocumentConverter):
|
||||
|
||||
pdf_path = get_pdf_path()
|
||||
print(f"converting {pdf_path}")
|
||||
|
||||
@@ -56,7 +52,6 @@ def test_convert_path(converter: DocumentConverter):
|
||||
|
||||
|
||||
def test_convert_stream(converter: DocumentConverter):
|
||||
|
||||
pdf_path = get_pdf_path()
|
||||
print(f"converting {pdf_path}")
|
||||
|
||||
|
||||
@@ -8,7 +8,6 @@ from docling.document_converter import ConversionError, DocumentConverter
|
||||
|
||||
|
||||
def get_pdf_path():
|
||||
|
||||
pdf_path = Path("./tests/data/pdf/2305.03393v1-pg9.pdf")
|
||||
return pdf_path
|
||||
|
||||
|
||||
@@ -3,8 +3,6 @@ from pathlib import Path
|
||||
|
||||
import pytest
|
||||
|
||||
from docling.backend.docling_parse_backend import DoclingParseDocumentBackend
|
||||
from docling.backend.docling_parse_v4_backend import DoclingParseV4DocumentBackend
|
||||
from docling.datamodel.base_models import InputFormat
|
||||
from docling.datamodel.pipeline_options import PdfPipelineOptions
|
||||
from docling.document_converter import DocumentConverter, PdfFormatOption
|
||||
@@ -23,7 +21,6 @@ def test_doc_paths():
|
||||
|
||||
|
||||
def get_converter():
|
||||
|
||||
pipeline_options = PdfPipelineOptions()
|
||||
pipeline_options.do_ocr = False
|
||||
|
||||
|
||||
@@ -21,7 +21,6 @@ from docling.datamodel.document import ConversionResult
|
||||
|
||||
|
||||
def levenshtein(str1: str, str2: str) -> int:
|
||||
|
||||
# Ensure str1 is the shorter string to optimize memory usage
|
||||
if len(str1) > len(str2):
|
||||
str1, str2 = str2, str1
|
||||
@@ -46,7 +45,6 @@ def levenshtein(str1: str, str2: str) -> int:
|
||||
|
||||
|
||||
def verify_text(gt: str, pred: str, fuzzy: bool, fuzzy_threshold: float = 0.4):
|
||||
|
||||
if len(gt) == 0 or not fuzzy:
|
||||
assert gt == pred, f"{gt}!={pred}"
|
||||
else:
|
||||
@@ -57,22 +55,19 @@ def verify_text(gt: str, pred: str, fuzzy: bool, fuzzy_threshold: float = 0.4):
|
||||
|
||||
|
||||
def verify_cells(doc_pred_pages: List[Page], doc_true_pages: List[Page]):
|
||||
|
||||
assert len(doc_pred_pages) == len(
|
||||
doc_true_pages
|
||||
), "pred- and true-doc do not have the same number of pages"
|
||||
assert len(doc_pred_pages) == len(doc_true_pages), (
|
||||
"pred- and true-doc do not have the same number of pages"
|
||||
)
|
||||
|
||||
for pid, page_true_item in enumerate(doc_true_pages):
|
||||
|
||||
num_true_cells = len(page_true_item.cells)
|
||||
num_pred_cells = len(doc_pred_pages[pid].cells)
|
||||
|
||||
assert (
|
||||
num_true_cells == num_pred_cells
|
||||
), f"num_true_cells!=num_pred_cells {num_true_cells}!={num_pred_cells}"
|
||||
assert num_true_cells == num_pred_cells, (
|
||||
f"num_true_cells!=num_pred_cells {num_true_cells}!={num_pred_cells}"
|
||||
)
|
||||
|
||||
for cid, cell_true_item in enumerate(page_true_item.cells):
|
||||
|
||||
cell_pred_item = doc_pred_pages[pid].cells[cid]
|
||||
|
||||
true_text = cell_true_item.text
|
||||
@@ -81,9 +76,9 @@ def verify_cells(doc_pred_pages: List[Page], doc_true_pages: List[Page]):
|
||||
|
||||
true_bbox = cell_true_item.rect.to_bounding_box().as_tuple()
|
||||
pred_bbox = cell_pred_item.rect.to_bounding_box().as_tuple()
|
||||
assert (
|
||||
true_bbox == pred_bbox
|
||||
), f"bbox is not the same: {true_bbox} != {pred_bbox}"
|
||||
assert true_bbox == pred_bbox, (
|
||||
f"bbox is not the same: {true_bbox} != {pred_bbox}"
|
||||
)
|
||||
|
||||
return True
|
||||
|
||||
@@ -123,19 +118,19 @@ def verify_tables_v1(doc_pred: DsDocument, doc_true: DsDocument, fuzzy: bool):
|
||||
|
||||
# print("Expected number of tables: {}, result: {}".format(len(doc_true.tables), len(doc_pred.tables)))
|
||||
|
||||
assert len(doc_true.tables) == len(
|
||||
doc_pred.tables
|
||||
), "document has different count of tables than expected."
|
||||
assert len(doc_true.tables) == len(doc_pred.tables), (
|
||||
"document has different count of tables than expected."
|
||||
)
|
||||
|
||||
for l, true_item in enumerate(doc_true.tables):
|
||||
pred_item = doc_pred.tables[l]
|
||||
for ix, true_item in enumerate(doc_true.tables):
|
||||
pred_item = doc_pred.tables[ix]
|
||||
|
||||
assert (
|
||||
true_item.num_rows == pred_item.num_rows
|
||||
), "table does not have the same #-rows"
|
||||
assert (
|
||||
true_item.num_cols == pred_item.num_cols
|
||||
), "table does not have the same #-cols"
|
||||
assert true_item.num_rows == pred_item.num_rows, (
|
||||
"table does not have the same #-rows"
|
||||
)
|
||||
assert true_item.num_cols == pred_item.num_cols, (
|
||||
"table does not have the same #-cols"
|
||||
)
|
||||
|
||||
assert true_item.data is not None, "documents are expected to have table data"
|
||||
assert pred_item.data is not None, "documents are expected to have table data"
|
||||
@@ -145,7 +140,6 @@ def verify_tables_v1(doc_pred: DsDocument, doc_true: DsDocument, fuzzy: bool):
|
||||
|
||||
for i, row in enumerate(true_item.data):
|
||||
for j, col in enumerate(true_item.data[i]):
|
||||
|
||||
# print("true: ", true_item.data[i][j].text)
|
||||
# print("pred: ", pred_item.data[i][j].text)
|
||||
# print("")
|
||||
@@ -154,20 +148,20 @@ def verify_tables_v1(doc_pred: DsDocument, doc_true: DsDocument, fuzzy: bool):
|
||||
true_item.data[i][j].text, pred_item.data[i][j].text, fuzzy=fuzzy
|
||||
)
|
||||
|
||||
assert (
|
||||
true_item.data[i][j].obj_type == pred_item.data[i][j].obj_type
|
||||
), "table-cell does not have the same type"
|
||||
assert true_item.data[i][j].obj_type == pred_item.data[i][j].obj_type, (
|
||||
"table-cell does not have the same type"
|
||||
)
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def verify_table_v2(true_item: TableItem, pred_item: TableItem, fuzzy: bool):
|
||||
assert (
|
||||
true_item.data.num_rows == pred_item.data.num_rows
|
||||
), "table does not have the same #-rows"
|
||||
assert (
|
||||
true_item.data.num_cols == pred_item.data.num_cols
|
||||
), "table does not have the same #-cols"
|
||||
assert true_item.data.num_rows == pred_item.data.num_rows, (
|
||||
"table does not have the same #-rows"
|
||||
)
|
||||
assert true_item.data.num_cols == pred_item.data.num_cols, (
|
||||
"table does not have the same #-cols"
|
||||
)
|
||||
|
||||
assert true_item.data is not None, "documents are expected to have table data"
|
||||
assert pred_item.data is not None, "documents are expected to have table data"
|
||||
@@ -177,7 +171,6 @@ def verify_table_v2(true_item: TableItem, pred_item: TableItem, fuzzy: bool):
|
||||
|
||||
for i, row in enumerate(true_item.data.grid):
|
||||
for j, col in enumerate(true_item.data.grid[i]):
|
||||
|
||||
# print("true: ", true_item.data[i][j].text)
|
||||
# print("pred: ", pred_item.data[i][j].text)
|
||||
# print("")
|
||||
@@ -223,11 +216,11 @@ def verify_picture_image_v2(
|
||||
|
||||
|
||||
def verify_docitems(doc_pred: DoclingDocument, doc_true: DoclingDocument, fuzzy: bool):
|
||||
assert len(doc_pred.texts) == len(doc_true.texts), f"Text lengths do not match."
|
||||
assert len(doc_pred.texts) == len(doc_true.texts), "Text lengths do not match."
|
||||
|
||||
assert len(doc_true.tables) == len(
|
||||
doc_pred.tables
|
||||
), "document has different count of tables than expected."
|
||||
assert len(doc_true.tables) == len(doc_pred.tables), (
|
||||
"document has different count of tables than expected."
|
||||
)
|
||||
|
||||
for (true_item, _true_level), (pred_item, _pred_level) in zip(
|
||||
doc_true.iterate_items(), doc_pred.iterate_items()
|
||||
@@ -237,7 +230,7 @@ def verify_docitems(doc_pred: DoclingDocument, doc_true: DoclingDocument, fuzzy:
|
||||
assert isinstance(pred_item, DocItem), "Test item is not a DocItem"
|
||||
|
||||
# Validate type
|
||||
assert true_item.label == pred_item.label, f"Object label does not match."
|
||||
assert true_item.label == pred_item.label, "Object label does not match."
|
||||
|
||||
# Validate provenance
|
||||
assert len(true_item.prov) == len(pred_item.prov), "Length of prov mismatch"
|
||||
@@ -261,25 +254,25 @@ def verify_docitems(doc_pred: DoclingDocument, doc_true: DoclingDocument, fuzzy:
|
||||
|
||||
# Validate table content
|
||||
if isinstance(true_item, TableItem):
|
||||
assert isinstance(
|
||||
pred_item, TableItem
|
||||
), "Test item is not a TableItem as the expected one"
|
||||
assert verify_table_v2(
|
||||
true_item, pred_item, fuzzy=fuzzy
|
||||
), "Tables not matching"
|
||||
assert isinstance(pred_item, TableItem), (
|
||||
"Test item is not a TableItem as the expected one"
|
||||
)
|
||||
assert verify_table_v2(true_item, pred_item, fuzzy=fuzzy), (
|
||||
"Tables not matching"
|
||||
)
|
||||
|
||||
# Validate picture content
|
||||
if isinstance(true_item, PictureItem):
|
||||
assert isinstance(
|
||||
pred_item, PictureItem
|
||||
), "Test item is not a PictureItem as the expected one"
|
||||
assert isinstance(pred_item, PictureItem), (
|
||||
"Test item is not a PictureItem as the expected one"
|
||||
)
|
||||
|
||||
true_image = true_item.get_image(doc=doc_true)
|
||||
pred_image = true_item.get_image(doc=doc_pred)
|
||||
if true_image is not None:
|
||||
assert verify_picture_image_v2(
|
||||
true_image, pred_image
|
||||
), "Picture image mismatch"
|
||||
assert verify_picture_image_v2(true_image, pred_image), (
|
||||
"Picture image mismatch"
|
||||
)
|
||||
|
||||
# TODO: check picture annotations
|
||||
|
||||
@@ -298,14 +291,14 @@ def verify_conversion_result_v1(
|
||||
input_path: Path,
|
||||
doc_result: ConversionResult,
|
||||
generate: bool = False,
|
||||
ocr_engine: str = None,
|
||||
ocr_engine: Optional[str] = None,
|
||||
fuzzy: bool = False,
|
||||
):
|
||||
PageList = TypeAdapter(List[Page])
|
||||
|
||||
assert (
|
||||
doc_result.status == ConversionStatus.SUCCESS
|
||||
), f"Doc {input_path} did not convert successfully."
|
||||
assert doc_result.status == ConversionStatus.SUCCESS, (
|
||||
f"Doc {input_path} did not convert successfully."
|
||||
)
|
||||
|
||||
doc_pred_pages: List[Page] = doc_result.pages
|
||||
doc_pred: DsDocument = doc_result.legacy_document
|
||||
@@ -344,52 +337,52 @@ def verify_conversion_result_v1(
|
||||
with open(dt_path, "w") as fw:
|
||||
fw.write(doc_pred_dt)
|
||||
else: # default branch in test
|
||||
with open(pages_path, "r") as fr:
|
||||
with open(pages_path) as fr:
|
||||
doc_true_pages = PageList.validate_json(fr.read())
|
||||
|
||||
with open(json_path, "r") as fr:
|
||||
with open(json_path) as fr:
|
||||
doc_true: DsDocument = DsDocument.model_validate_json(fr.read())
|
||||
|
||||
with open(md_path, "r") as fr:
|
||||
with open(md_path) as fr:
|
||||
doc_true_md = fr.read()
|
||||
|
||||
with open(dt_path, "r") as fr:
|
||||
with open(dt_path) as fr:
|
||||
doc_true_dt = fr.read()
|
||||
|
||||
if not fuzzy:
|
||||
assert verify_cells(
|
||||
doc_pred_pages, doc_true_pages
|
||||
), f"Mismatch in PDF cell prediction for {input_path}"
|
||||
assert verify_cells(doc_pred_pages, doc_true_pages), (
|
||||
f"Mismatch in PDF cell prediction for {input_path}"
|
||||
)
|
||||
|
||||
# assert verify_output(
|
||||
# doc_pred, doc_true
|
||||
# ), f"Mismatch in JSON prediction for {input_path}"
|
||||
|
||||
assert verify_tables_v1(
|
||||
doc_pred, doc_true, fuzzy=fuzzy
|
||||
), f"verify_tables(doc_pred, doc_true) mismatch for {input_path}"
|
||||
assert verify_tables_v1(doc_pred, doc_true, fuzzy=fuzzy), (
|
||||
f"verify_tables(doc_pred, doc_true) mismatch for {input_path}"
|
||||
)
|
||||
|
||||
assert verify_md(
|
||||
doc_pred_md, doc_true_md, fuzzy=fuzzy
|
||||
), f"Mismatch in Markdown prediction for {input_path}"
|
||||
assert verify_md(doc_pred_md, doc_true_md, fuzzy=fuzzy), (
|
||||
f"Mismatch in Markdown prediction for {input_path}"
|
||||
)
|
||||
|
||||
assert verify_dt(
|
||||
doc_pred_dt, doc_true_dt, fuzzy=fuzzy
|
||||
), f"Mismatch in DocTags prediction for {input_path}"
|
||||
assert verify_dt(doc_pred_dt, doc_true_dt, fuzzy=fuzzy), (
|
||||
f"Mismatch in DocTags prediction for {input_path}"
|
||||
)
|
||||
|
||||
|
||||
def verify_conversion_result_v2(
|
||||
input_path: Path,
|
||||
doc_result: ConversionResult,
|
||||
generate: bool = False,
|
||||
ocr_engine: str = None,
|
||||
ocr_engine: Optional[str] = None,
|
||||
fuzzy: bool = False,
|
||||
):
|
||||
PageList = TypeAdapter(List[Page])
|
||||
|
||||
assert (
|
||||
doc_result.status == ConversionStatus.SUCCESS
|
||||
), f"Doc {input_path} did not convert successfully."
|
||||
assert doc_result.status == ConversionStatus.SUCCESS, (
|
||||
f"Doc {input_path} did not convert successfully."
|
||||
)
|
||||
|
||||
doc_pred_pages: List[Page] = doc_result.pages
|
||||
doc_pred: DoclingDocument = doc_result.document
|
||||
@@ -426,42 +419,41 @@ def verify_conversion_result_v2(
|
||||
with open(dt_path, "w") as fw:
|
||||
fw.write(doc_pred_dt)
|
||||
else: # default branch in test
|
||||
with open(pages_path, "r") as fr:
|
||||
with open(pages_path) as fr:
|
||||
doc_true_pages = PageList.validate_json(fr.read())
|
||||
|
||||
with open(json_path, "r") as fr:
|
||||
with open(json_path) as fr:
|
||||
doc_true: DoclingDocument = DoclingDocument.model_validate_json(fr.read())
|
||||
|
||||
with open(md_path, "r") as fr:
|
||||
with open(md_path) as fr:
|
||||
doc_true_md = fr.read()
|
||||
|
||||
with open(dt_path, "r") as fr:
|
||||
with open(dt_path) as fr:
|
||||
doc_true_dt = fr.read()
|
||||
|
||||
if not fuzzy:
|
||||
assert verify_cells(
|
||||
doc_pred_pages, doc_true_pages
|
||||
), f"Mismatch in PDF cell prediction for {input_path}"
|
||||
assert verify_cells(doc_pred_pages, doc_true_pages), (
|
||||
f"Mismatch in PDF cell prediction for {input_path}"
|
||||
)
|
||||
|
||||
# assert verify_output(
|
||||
# doc_pred, doc_true
|
||||
# ), f"Mismatch in JSON prediction for {input_path}"
|
||||
|
||||
assert verify_docitems(
|
||||
doc_pred, doc_true, fuzzy=fuzzy
|
||||
), f"verify_docling_document(doc_pred, doc_true) mismatch for {input_path}"
|
||||
assert verify_docitems(doc_pred, doc_true, fuzzy=fuzzy), (
|
||||
f"verify_docling_document(doc_pred, doc_true) mismatch for {input_path}"
|
||||
)
|
||||
|
||||
assert verify_md(
|
||||
doc_pred_md, doc_true_md, fuzzy=fuzzy
|
||||
), f"Mismatch in Markdown prediction for {input_path}"
|
||||
assert verify_md(doc_pred_md, doc_true_md, fuzzy=fuzzy), (
|
||||
f"Mismatch in Markdown prediction for {input_path}"
|
||||
)
|
||||
|
||||
assert verify_dt(
|
||||
doc_pred_dt, doc_true_dt, fuzzy=fuzzy
|
||||
), f"Mismatch in DocTags prediction for {input_path}"
|
||||
assert verify_dt(doc_pred_dt, doc_true_dt, fuzzy=fuzzy), (
|
||||
f"Mismatch in DocTags prediction for {input_path}"
|
||||
)
|
||||
|
||||
|
||||
def verify_document(pred_doc: DoclingDocument, gtfile: str, generate: bool = False):
|
||||
|
||||
if not os.path.exists(gtfile) or generate:
|
||||
with open(gtfile, "w") as fw:
|
||||
json.dump(pred_doc.export_to_dict(), fw, indent=2)
|
||||
|
||||
Reference in New Issue
Block a user