From 753c12b29ed37241a34c32fb6b0aedb9948e1b5e Mon Sep 17 00:00:00 2001 From: Michele Dolfi Date: Wed, 19 Feb 2025 08:21:42 +0100 Subject: [PATCH] show results with all models Signed-off-by: Michele Dolfi --- docs/examples/pictures_description.ipynb | 180 ++++++++++++++++++++--- 1 file changed, 160 insertions(+), 20 deletions(-) diff --git a/docs/examples/pictures_description.ipynb b/docs/examples/pictures_description.ipynb index 025eef0c..33e94e01 100644 --- a/docs/examples/pictures_description.ipynb +++ b/docs/examples/pictures_description.ipynb @@ -31,23 +31,52 @@ "outputs": [], "source": [ "from docling.datamodel.base_models import InputFormat\n", - "from docling.datamodel.pipeline_options import (\n", - " PdfPipelineOptions,\n", - " granite_picture_description,\n", - " smolvlm_picture_description,\n", - ")\n", + "from docling.datamodel.pipeline_options import PdfPipelineOptions\n", "from docling.document_converter import DocumentConverter, PdfFormatOption" ] }, { "cell_type": "code", - "execution_count": 9, + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "# The source document\n", + "DOC_SOURCE = \"https://arxiv.org/pdf/2501.17887\"" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Describe pictures with Granite Vision\n", + "\n", + "This section will run locally the [ibm-granite/granite-vision-3.1-2b-preview](https://huggingface.co/ibm-granite/granite-vision-3.1-2b-preview) model to describe the pictures of the document." + ] + }, + { + "cell_type": "code", + "execution_count": 3, "metadata": {}, "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Using a slow image processor as `use_fast` is unset and a slow processor was saved with this model. `use_fast=True` will be the default behavior in v4.48, even if the model was saved with a slow processor. This will result in minor differences in outputs. You'll still be able to use a slow processor with `use_fast=False`.\n" + ] + }, { "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "9d3bb7b3b4fd4640af40289dd7bf50d7", + "model_id": "93a634699bf1434c9bc8e384d6db1a28", "version_major": 2, "version_minor": 0 }, @@ -60,12 +89,13 @@ } ], "source": [ - "DOC_SOURCE = \"https://arxiv.org/pdf/2501.17887\"\n", + "from docling.datamodel.pipeline_options import granite_picture_description\n", "\n", "pipeline_options = PdfPipelineOptions()\n", "pipeline_options.do_picture_description = True\n", - "# pipeline_options.picture_description_options = smolvlm_picture_description\n", - "pipeline_options.picture_description_options = granite_picture_description\n", + "pipeline_options.picture_description_options = (\n", + " granite_picture_description # <-- the model choice\n", + ")\n", "pipeline_options.picture_description_options.prompt = (\n", " \"Describe the image in three sentences. Be consise and accurate.\"\n", ")\n", @@ -84,23 +114,23 @@ }, { "cell_type": "code", - "execution_count": 10, + "execution_count": 4, "metadata": {}, "outputs": [ { "data": { "text/html": [ - "

Picture #/pictures/0


Caption

Figure 1: Sketch of Docling's pipelines and usage model. Both PDF pipeline and simple pipeline build up a DoclingDocument representation, which can be further enriched. Downstream applications can utilize Docling's API to inspect, export, or chunk the document for various purposes.

Annotations

In this image we can see a poster with some text and images.
\n", - "

Picture #/pictures/1


Caption

Figure 2: Dataset categories and sample counts for documents and pages.

Annotations

In this image we can see a pie chart. In the pie chart we can see the categories and the number of documents in each category.
\n", - "

Picture #/pictures/2


Caption

Figure 3: Distribution of conversion times for all documents, ordered by number of pages in a document, on all system configurations. Every dot represents one document. Log/log scale is used to even the spacing, since both number of pages and conversion times have long-tail distributions.

Annotations

In this image we can see a graph. On the x-axis we can see the number of pages. On the y-axis we can see the seconds.
\n", - "

Picture #/pictures/3


Caption

Figure 4: Contributions of PDF backend and AI models to the conversion time of a page (in seconds per page). Lower is better. Left: Ranges of time contributions for each model to pages it was applied on (i.e., OCR was applied only on pages with bitmaps, table structure was applied only on pages with tables). Right: Average time contribution to a page in the benchmark dataset (factoring in zero-time contribution for OCR and table structure models on pages without bitmaps or tables) .

Annotations

In this image we can see a bar chart and a line chart. In the bar chart we can see the values of Pdf Parse, OCR, Layout, Table Structure, Page Total and Page. In the line chart we can see the values of Pdf Parse, OCR, Layout, Table Structure, Page Total and Page.
\n", - "

Picture #/pictures/4


Caption

Figure 5: Conversion time in seconds per page on our dataset in three scenarios, across all assets and system configurations. Lower bars are better. The configuration includes OCR and table structure recognition ( fast table option on Docling and MinerU, hi res in unstructured, as shown in table 1).

Annotations

In this image we can see a bar chart. In the chart we can see the CPU, Max, GPU, and sec/page.
\n" + "

Picture #/pictures/0


Caption

Figure 1: Sketch of Docling's pipelines and usage model. Both PDF pipeline and simple pipeline build up a DoclingDocument representation, which can be further enriched. Downstream applications can utilize Docling's API to inspect, export, or chunk the document for various purposes.

Annotations (ibm-granite/granite-vision-3.1-2b-preview)

In this image we can see a poster with some text and images.
\n", + "

Picture #/pictures/1


Caption

Figure 2: Dataset categories and sample counts for documents and pages.

Annotations (ibm-granite/granite-vision-3.1-2b-preview)

In this image we can see a pie chart. In the pie chart we can see the categories and the number of documents in each category.
\n", + "

Picture #/pictures/2


Caption

Figure 3: Distribution of conversion times for all documents, ordered by number of pages in a document, on all system configurations. Every dot represents one document. Log/log scale is used to even the spacing, since both number of pages and conversion times have long-tail distributions.

Annotations (ibm-granite/granite-vision-3.1-2b-preview)

In this image we can see a graph. On the x-axis we can see the number of pages. On the y-axis we can see the seconds.
\n", + "

Picture #/pictures/3


Caption

Figure 4: Contributions of PDF backend and AI models to the conversion time of a page (in seconds per page). Lower is better. Left: Ranges of time contributions for each model to pages it was applied on (i.e., OCR was applied only on pages with bitmaps, table structure was applied only on pages with tables). Right: Average time contribution to a page in the benchmark dataset (factoring in zero-time contribution for OCR and table structure models on pages without bitmaps or tables) .

Annotations (ibm-granite/granite-vision-3.1-2b-preview)

In this image we can see a bar chart and a line chart. In the bar chart we can see the values of Pdf Parse, OCR, Layout, Table Structure, Page Total and Page. In the line chart we can see the values of Pdf Parse, OCR, Layout, Table Structure, Page Total and Page.
\n", + "

Picture #/pictures/4


Caption

Figure 5: Conversion time in seconds per page on our dataset in three scenarios, across all assets and system configurations. Lower bars are better. The configuration includes OCR and table structure recognition ( fast table option on Docling and MinerU, hi res in unstructured, as shown in table 1).

Annotations (ibm-granite/granite-vision-3.1-2b-preview)

In this image we can see a bar chart. In the chart we can see the CPU, Max, GPU, and sec/page.
\n" ], "text/plain": [ "" ] }, - "execution_count": 10, + "execution_count": 4, "metadata": {}, "output_type": "execute_result" } @@ -113,14 +143,124 @@ "# display the first 5 pictures and their captions and annotations:\n", "for pic in doc.pictures[:5]:\n", " html_item = (\n", - " f\"

Picture {pic.self_ref}

\"\n", + " f\"

Picture {pic.self_ref}

\"\n", " f'
'\n", - " f\"

Caption

{pic.caption_text(doc=doc)}
\"\n", + " f\"

Caption

{pic.caption_text(doc=doc)}
\"\n", " )\n", " for annotation in pic.annotations:\n", " if not isinstance(annotation, PictureDescriptionData):\n", " continue\n", - " html_item += f\"

Annotations

{annotation.text}
\\n\"\n", + " html_item += (\n", + " f\"

Annotations ({annotation.provenance})

{annotation.text}
\\n\"\n", + " )\n", + " html_buffer.append(html_item)\n", + "display.HTML(\"
\".join(html_buffer))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Describe pictures with SmolVLM\n", + "\n", + "This section will run locally the [HuggingFaceTB/SmolVLM-256M-Instruct](https://huggingface.co/HuggingFaceTB/SmolVLM-256M-Instruct) model to describe the pictures of the document." + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "from docling.datamodel.pipeline_options import smolvlm_picture_description\n", + "\n", + "pipeline_options = PdfPipelineOptions()\n", + "pipeline_options.do_picture_description = True\n", + "pipeline_options.picture_description_options = (\n", + " smolvlm_picture_description # <-- the model choice\n", + ")\n", + "pipeline_options.picture_description_options.prompt = (\n", + " \"Describe the image in three sentences. Be consise and accurate.\"\n", + ")\n", + "pipeline_options.images_scale = 2.0\n", + "pipeline_options.generate_picture_images = True\n", + "\n", + "converter = DocumentConverter(\n", + " format_options={\n", + " InputFormat.PDF: PdfFormatOption(\n", + " pipeline_options=pipeline_options,\n", + " )\n", + " }\n", + ")\n", + "doc = converter.convert(DOC_SOURCE).document" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "

Picture #/pictures/0


Caption

Figure 1: Sketch of Docling's pipelines and usage model. Both PDF pipeline and simple pipeline build up a DoclingDocument representation, which can be further enriched. Downstream applications can utilize Docling's API to inspect, export, or chunk the document for various purposes.

Annotations (HuggingFaceTB/SmolVLM-256M-Instruct)

This is a page that has different types of documents on it.
\n", + "

Picture #/pictures/1


Caption

Figure 2: Dataset categories and sample counts for documents and pages.

Annotations (HuggingFaceTB/SmolVLM-256M-Instruct)

Here is a page-by-page list of documents per category:\n", + "- Science\n", + "- Articles\n", + "- Law and Regulations\n", + "- Articles\n", + "- Misc.
\n", + "

Picture #/pictures/2


Caption

Figure 3: Distribution of conversion times for all documents, ordered by number of pages in a document, on all system configurations. Every dot represents one document. Log/log scale is used to even the spacing, since both number of pages and conversion times have long-tail distributions.

Annotations (HuggingFaceTB/SmolVLM-256M-Instruct)

The image is a bar chart that shows the number of pages of a website as a function of the number of pages of the website. The x-axis represents the number of pages, ranging from 100 to 10,000. The y-axis represents the number of pages, ranging from 100 to 10,000. The chart is labeled \"Number of pages\" and has a legend at the top of the chart that indicates the number of pages.\n", + "\n", + "The chart shows a clear trend: as the number of pages increases, the number of pages decreases. This is evident from the following points:\n", + "\n", + "- The number of pages increases from 100 to 1000.\n", + "- The number of pages decreases from 1000 to 10,000.\n", + "- The number of pages increases from 10,000 to 10,000.
\n", + "

Picture #/pictures/3


Caption

Figure 4: Contributions of PDF backend and AI models to the conversion time of a page (in seconds per page). Lower is better. Left: Ranges of time contributions for each model to pages it was applied on (i.e., OCR was applied only on pages with bitmaps, table structure was applied only on pages with tables). Right: Average time contribution to a page in the benchmark dataset (factoring in zero-time contribution for OCR and table structure models on pages without bitmaps or tables) .

Annotations (HuggingFaceTB/SmolVLM-256M-Instruct)

bar chart with different colored bars representing different data points.
\n", + "

Picture #/pictures/4


Caption

Figure 5: Conversion time in seconds per page on our dataset in three scenarios, across all assets and system configurations. Lower bars are better. The configuration includes OCR and table structure recognition ( fast table option on Docling and MinerU, hi res in unstructured, as shown in table 1).

Annotations (HuggingFaceTB/SmolVLM-256M-Instruct)

A bar chart with the following information:\n", + "\n", + "- The x-axis represents the number of pages, ranging from 0 to 14.\n", + "- The y-axis represents the page count, ranging from 0 to 14.\n", + "- The chart has three categories: Marker, Unstructured, and Detailed.\n", + "- The x-axis is labeled \"see/page.\"\n", + "- The y-axis is labeled \"Page Count.\"\n", + "- The chart shows that the Marker category has the highest number of pages, followed by the Unstructured category, and then the Detailed category.
\n" + ], + "text/plain": [ + "" + ] + }, + "execution_count": 6, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "from docling_core.types.doc.document import PictureDescriptionData\n", + "from IPython import display\n", + "\n", + "html_buffer = []\n", + "# display the first 5 pictures and their captions and annotations:\n", + "for pic in doc.pictures[:5]:\n", + " html_item = (\n", + " f\"

Picture {pic.self_ref}

\"\n", + " f'
'\n", + " f\"

Caption

{pic.caption_text(doc=doc)}
\"\n", + " )\n", + " for annotation in pic.annotations:\n", + " if not isinstance(annotation, PictureDescriptionData):\n", + " continue\n", + " html_item += (\n", + " f\"

Annotations ({annotation.provenance})

{annotation.text}
\\n\"\n", + " )\n", " html_buffer.append(html_item)\n", "display.HTML(\"
\".join(html_buffer))" ]