From 0df19cadc91d7ca2a34f9d54560899f58518873f Mon Sep 17 00:00:00 2001 From: Michele Dolfi Date: Wed, 19 Feb 2025 09:32:57 +0100 Subject: [PATCH] Improvements for visualization example (#1017) * fix colab install, use granite and improve viz of description Signed-off-by: Michele Dolfi * switch docs to notbook Signed-off-by: Michele Dolfi * show results with all models Signed-off-by: Michele Dolfi * show other vlm Signed-off-by: Michele Dolfi --------- Signed-off-by: Michele Dolfi --- docs/examples/pictures_description.ipynb | 253 ++++++++++++++++++++--- mkdocs.yml | 2 +- 2 files changed, 231 insertions(+), 24 deletions(-) diff --git a/docs/examples/pictures_description.ipynb b/docs/examples/pictures_description.ipynb index f906a7aa..f50860db 100644 --- a/docs/examples/pictures_description.ipynb +++ b/docs/examples/pictures_description.ipynb @@ -9,7 +9,7 @@ }, { "cell_type": "code", - "execution_count": 1, + "execution_count": null, "metadata": {}, "outputs": [ { @@ -21,7 +21,18 @@ } ], "source": [ - "%pip install -q docling ipython" + "%pip install -q docling[vlm] ipython" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "from docling.datamodel.base_models import InputFormat\n", + "from docling.datamodel.pipeline_options import PdfPipelineOptions\n", + "from docling.document_converter import DocumentConverter, PdfFormatOption" ] }, { @@ -30,26 +41,61 @@ "metadata": {}, "outputs": [], "source": [ - "from docling.datamodel.base_models import InputFormat\n", - "from docling.datamodel.pipeline_options import ( # granite_picture_description,\n", - " PdfPipelineOptions,\n", - " smolvlm_picture_description,\n", - ")\n", - "from docling.document_converter import DocumentConverter, PdfFormatOption" + "# The source document\n", + "DOC_SOURCE = \"https://arxiv.org/pdf/2501.17887\"" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Describe pictures with Granite Vision\n", + "\n", + "This section will run locally the [ibm-granite/granite-vision-3.1-2b-preview](https://huggingface.co/ibm-granite/granite-vision-3.1-2b-preview) model to describe the pictures of the document." ] }, { "cell_type": "code", "execution_count": 3, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Using a slow image processor as `use_fast` is unset and a slow processor was saved with this model. `use_fast=True` will be the default behavior in v4.48, even if the model was saved with a slow processor. This will result in minor differences in outputs. You'll still be able to use a slow processor with `use_fast=False`.\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "93a634699bf1434c9bc8e384d6db1a28", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Loading checkpoint shards: 0%| | 0/2 [00:00Picture #/pictures/0

Caption

Figure 1: Four examples of complex page layouts across different document categories

Annotations

[PictureDescriptionData(kind='description', text='An advertisement with a blue background, an image of a building, and text about the 175 years of looking forward.', provenance='HuggingFaceTB/SmolVLM-256M-Instruct')]\n", - "

Picture #/pictures/1


Caption

Figure 2: Distribution of DocLayNet pages across document categories.

Annotations

[PictureDescriptionData(kind='description', text='The image is a pie chart that represents the distribution of various categories. The chart is divided into four sections, each representing a different category. The categories are: Financial, Tenders, Laws, and Manuals. \\n\\n### Description of the Pie Chart:\\n1. **Financial Categories:**\\n - **Financial:** 32%\\n - **Tenders:** 6%\\n - **Laws:** 16%\\n - **Manuals:** 21%\\n\\n2. **Tenders:**\\n - **Tenders:** 16%\\n - **Laws:** 16%\\n - **Manuals:** 16%\\n\\n3. **Laws:**\\n - **Laws:** 16%\\n - **Manuals:** 16%\\n\\n4. **Manuals:**\\n - **Manuals:** 21%\\n\\n### Analysis:\\nThe pie chart is a visual representation of the distribution of', provenance='HuggingFaceTB/SmolVLM-256M-Instruct')]\n", - "

Picture #/pictures/2


Caption

Figure 3: Corpus Conversion Service annotation user interface. The PDF page is shown in the background, with overlaid text-cells (in darker shades). The annotation boxes can be drawn by dragging a rectangle over each segment with the respective label from the palette on the right.

Annotations

[PictureDescriptionData(kind='description', text='The image is a table that contains field labels and a list of fields. The table is titled \"Field Labels.\" The table has five columns and five rows. The first column is labeled \"Clusters,\" the second column is labeled \"Clusters,\" the third column is labeled \"Clusters,\" the fourth column is labeled \"Clusters,\" and the fifth column is labeled \"Clusters.\"\\n\\nThe table is structured in a way that it is easy to understand. The first row of the table contains the following fields:\\n\\n- \"Clusters\"\\n- \"Clusters\"\\n- \"Clusters\"\\n- \"Clusters\"\\n- \"Clusters\"\\n- \"Clusters\"\\n\\nThe second row of the table contains the following fields:\\n\\n- \"Clusters\"\\n- \"Clusters\"\\n- \"Clusters\"\\n- \"Clusters\"\\n- \"Clusters\"\\n- \"Clusters\"\\n\\nThe third row of the', provenance='HuggingFaceTB/SmolVLM-256M-Instruct')]\n", - "

Picture #/pictures/3


Caption

Figure 4: Examples of plausible annotation alternatives for the same page. Criteria in our annotation guideline can resolve cases A to C, while the case D remains ambiguous.

Annotations

[PictureDescriptionData(kind='description', text='Figure 1.', provenance='HuggingFaceTB/SmolVLM-256M-Instruct')]\n", - "

Picture #/pictures/4


Caption

Figure 5: Prediction performance (mAP@0.5-0.95) of a Mask R-CNNnetworkwithResNet50backbonetrainedonincreasing fractions of the DocLayNet dataset. The learning curve flattens around the 80% mark, indicating that increasing the size of the DocLayNet dataset with similar data will not yield significantly better predictions.

Annotations

[PictureDescriptionData(kind='description', text='The image is a line graph that shows the percentage of DocLayNet training set as a percentage of the total training set. The x-axis represents the percentage of training set, ranging from 0 to 100. The y-axis represents the percentage of training set, ranging from 0 to 100. The graph shows a continuous trend of increasing training set percentage over time.\\n\\n### Description of the Graph:\\n1. **X-Axis (Percentage of Training Set):**\\n - The x-axis is labeled \"Percentage of DocLayNet training set.\"\\n - The range of the x-axis is from 0 to 100.\\n\\n2. **Y-Axis (Percentage of Training Set):**\\n - The y-axis is labeled \"MAP:0.500-0.95.\"\\n - The range of the y-axis is from 0 to 100.\\n\\n3.', provenance='HuggingFaceTB/SmolVLM-256M-Instruct')]\n" + "

Picture #/pictures/0


Caption

Figure 1: Sketch of Docling's pipelines and usage model. Both PDF pipeline and simple pipeline build up a DoclingDocument representation, which can be further enriched. Downstream applications can utilize Docling's API to inspect, export, or chunk the document for various purposes.

Annotations (ibm-granite/granite-vision-3.1-2b-preview)

In this image we can see a poster with some text and images.
\n", + "

Picture #/pictures/1


Caption

Figure 2: Dataset categories and sample counts for documents and pages.

Annotations (ibm-granite/granite-vision-3.1-2b-preview)

In this image we can see a pie chart. In the pie chart we can see the categories and the number of documents in each category.
\n", + "

Picture #/pictures/2


Caption

Figure 3: Distribution of conversion times for all documents, ordered by number of pages in a document, on all system configurations. Every dot represents one document. Log/log scale is used to even the spacing, since both number of pages and conversion times have long-tail distributions.

Annotations (ibm-granite/granite-vision-3.1-2b-preview)

In this image we can see a graph. On the x-axis we can see the number of pages. On the y-axis we can see the seconds.
\n", + "

Picture #/pictures/3


Caption

Figure 4: Contributions of PDF backend and AI models to the conversion time of a page (in seconds per page). Lower is better. Left: Ranges of time contributions for each model to pages it was applied on (i.e., OCR was applied only on pages with bitmaps, table structure was applied only on pages with tables). Right: Average time contribution to a page in the benchmark dataset (factoring in zero-time contribution for OCR and table structure models on pages without bitmaps or tables) .

Annotations (ibm-granite/granite-vision-3.1-2b-preview)

In this image we can see a bar chart and a line chart. In the bar chart we can see the values of Pdf Parse, OCR, Layout, Table Structure, Page Total and Page. In the line chart we can see the values of Pdf Parse, OCR, Layout, Table Structure, Page Total and Page.
\n", + "

Picture #/pictures/4


Caption

Figure 5: Conversion time in seconds per page on our dataset in three scenarios, across all assets and system configurations. Lower bars are better. The configuration includes OCR and table structure recognition ( fast table option on Docling and MinerU, hi res in unstructured, as shown in table 1).

Annotations (ibm-granite/granite-vision-3.1-2b-preview)

In this image we can see a bar chart. In the chart we can see the CPU, Max, GPU, and sec/page.
\n" ], "text/plain": [ "" @@ -90,20 +136,181 @@ } ], "source": [ + "from docling_core.types.doc.document import PictureDescriptionData\n", "from IPython import display\n", "\n", "html_buffer = []\n", "# display the first 5 pictures and their captions and annotations:\n", "for pic in doc.pictures[:5]:\n", - " html_buffer.append(\n", - " f\"

Picture {pic.self_ref}

\"\n", + " html_item = (\n", + " f\"

Picture {pic.self_ref}

\"\n", " f'
'\n", - " f\"

Caption

{pic.caption_text(doc=doc)}
\"\n", - " f\"

Annotations

{pic.annotations}\\n\"\n", + " f\"

Caption

{pic.caption_text(doc=doc)}
\"\n", " )\n", + " for annotation in pic.annotations:\n", + " if not isinstance(annotation, PictureDescriptionData):\n", + " continue\n", + " html_item += (\n", + " f\"

Annotations ({annotation.provenance})

{annotation.text}
\\n\"\n", + " )\n", + " html_buffer.append(html_item)\n", "display.HTML(\"
\".join(html_buffer))" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Describe pictures with SmolVLM\n", + "\n", + "This section will run locally the [HuggingFaceTB/SmolVLM-256M-Instruct](https://huggingface.co/HuggingFaceTB/SmolVLM-256M-Instruct) model to describe the pictures of the document." + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [], + "source": [ + "from docling.datamodel.pipeline_options import smolvlm_picture_description\n", + "\n", + "pipeline_options = PdfPipelineOptions()\n", + "pipeline_options.do_picture_description = True\n", + "pipeline_options.picture_description_options = (\n", + " smolvlm_picture_description # <-- the model choice\n", + ")\n", + "pipeline_options.picture_description_options.prompt = (\n", + " \"Describe the image in three sentences. Be consise and accurate.\"\n", + ")\n", + "pipeline_options.images_scale = 2.0\n", + "pipeline_options.generate_picture_images = True\n", + "\n", + "converter = DocumentConverter(\n", + " format_options={\n", + " InputFormat.PDF: PdfFormatOption(\n", + " pipeline_options=pipeline_options,\n", + " )\n", + " }\n", + ")\n", + "doc = converter.convert(DOC_SOURCE).document" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "

Picture #/pictures/0


Caption

Figure 1: Sketch of Docling's pipelines and usage model. Both PDF pipeline and simple pipeline build up a DoclingDocument representation, which can be further enriched. Downstream applications can utilize Docling's API to inspect, export, or chunk the document for various purposes.

Annotations (HuggingFaceTB/SmolVLM-256M-Instruct)

This is a page that has different types of documents on it.
\n", + "

Picture #/pictures/1


Caption

Figure 2: Dataset categories and sample counts for documents and pages.

Annotations (HuggingFaceTB/SmolVLM-256M-Instruct)

Here is a page-by-page list of documents per category:\n", + "- Science\n", + "- Articles\n", + "- Law and Regulations\n", + "- Articles\n", + "- Misc.
\n", + "

Picture #/pictures/2


Caption

Figure 3: Distribution of conversion times for all documents, ordered by number of pages in a document, on all system configurations. Every dot represents one document. Log/log scale is used to even the spacing, since both number of pages and conversion times have long-tail distributions.

Annotations (HuggingFaceTB/SmolVLM-256M-Instruct)

The image is a bar chart that shows the number of pages of a website as a function of the number of pages of the website. The x-axis represents the number of pages, ranging from 100 to 10,000. The y-axis represents the number of pages, ranging from 100 to 10,000. The chart is labeled \"Number of pages\" and has a legend at the top of the chart that indicates the number of pages.\n", + "\n", + "The chart shows a clear trend: as the number of pages increases, the number of pages decreases. This is evident from the following points:\n", + "\n", + "- The number of pages increases from 100 to 1000.\n", + "- The number of pages decreases from 1000 to 10,000.\n", + "- The number of pages increases from 10,000 to 10,000.
\n", + "

Picture #/pictures/3


Caption

Figure 4: Contributions of PDF backend and AI models to the conversion time of a page (in seconds per page). Lower is better. Left: Ranges of time contributions for each model to pages it was applied on (i.e., OCR was applied only on pages with bitmaps, table structure was applied only on pages with tables). Right: Average time contribution to a page in the benchmark dataset (factoring in zero-time contribution for OCR and table structure models on pages without bitmaps or tables) .

Annotations (HuggingFaceTB/SmolVLM-256M-Instruct)

bar chart with different colored bars representing different data points.
\n", + "

Picture #/pictures/4


Caption

Figure 5: Conversion time in seconds per page on our dataset in three scenarios, across all assets and system configurations. Lower bars are better. The configuration includes OCR and table structure recognition ( fast table option on Docling and MinerU, hi res in unstructured, as shown in table 1).

Annotations (HuggingFaceTB/SmolVLM-256M-Instruct)

A bar chart with the following information:\n", + "\n", + "- The x-axis represents the number of pages, ranging from 0 to 14.\n", + "- The y-axis represents the page count, ranging from 0 to 14.\n", + "- The chart has three categories: Marker, Unstructured, and Detailed.\n", + "- The x-axis is labeled \"see/page.\"\n", + "- The y-axis is labeled \"Page Count.\"\n", + "- The chart shows that the Marker category has the highest number of pages, followed by the Unstructured category, and then the Detailed category.
\n" + ], + "text/plain": [ + "" + ] + }, + "execution_count": 6, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "from docling_core.types.doc.document import PictureDescriptionData\n", + "from IPython import display\n", + "\n", + "html_buffer = []\n", + "# display the first 5 pictures and their captions and annotations:\n", + "for pic in doc.pictures[:5]:\n", + " html_item = (\n", + " f\"

Picture {pic.self_ref}

\"\n", + " f'
'\n", + " f\"

Caption

{pic.caption_text(doc=doc)}
\"\n", + " )\n", + " for annotation in pic.annotations:\n", + " if not isinstance(annotation, PictureDescriptionData):\n", + " continue\n", + " html_item += (\n", + " f\"

Annotations ({annotation.provenance})

{annotation.text}
\\n\"\n", + " )\n", + " html_buffer.append(html_item)\n", + "display.HTML(\"
\".join(html_buffer))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Use other vision models\n", + "\n", + "The examples above can also be reproduced using other vision model.\n", + "The Docling options `PictureDescriptionVlmOptions` allows to speficy your favorite vision model from the Hugging Face Hub." + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [], + "source": [ + "from docling.datamodel.pipeline_options import PictureDescriptionVlmOptions\n", + "\n", + "pipeline_options = PdfPipelineOptions()\n", + "pipeline_options.do_picture_description = True\n", + "pipeline_options.picture_description_options = PictureDescriptionVlmOptions(\n", + " repo_id=\"\", # <-- add here the Hugging Face repo_id of your favorite VLM\n", + " prompt=\"Describe the image in three sentences. Be consise and accurate.\",\n", + ")\n", + "pipeline_options.images_scale = 2.0\n", + "pipeline_options.generate_picture_images = True\n", + "\n", + "converter = DocumentConverter(\n", + " format_options={\n", + " InputFormat.PDF: PdfFormatOption(\n", + " pipeline_options=pipeline_options,\n", + " )\n", + " }\n", + ")\n", + "\n", + "# Uncomment to run:\n", + "# doc = converter.convert(DOC_SOURCE).document" + ] + }, { "cell_type": "code", "execution_count": null, @@ -114,7 +321,7 @@ ], "metadata": { "kernelspec": { - "display_name": ".venv", + "display_name": "docling-aMWN2FRM-py3.12", "language": "python", "name": "python3" }, @@ -128,7 +335,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.12.8" + "version": "3.12.7" } }, "nbformat": 4, diff --git a/mkdocs.yml b/mkdocs.yml index 2b7127a5..36bcacb5 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -75,7 +75,7 @@ nav: - "Figure enrichment": examples/develop_picture_enrichment.py - "Table export": examples/export_tables.py - "Multimodal export": examples/export_multimodal.py - - "Annotate picture with local vlm": examples/pictures_description.py + - "Annotate picture with local vlm": examples/pictures_description.ipynb - "Annotate picture with remote vlm": examples/pictures_description_api.py - "Force full page OCR": examples/full_page_ocr.py - "Automatic OCR language detection with tesseract": examples/tesseract_lang_detection.py