From 17afb664d005168b5a6f12a2df4432076a9329bb Mon Sep 17 00:00:00 2001 From: Christoph Auer <60343111+cau-git@users.noreply.github.com> Date: Wed, 17 Sep 2025 15:15:49 +0200 Subject: [PATCH] feat: Add granite-docling model (#2272) * adding granite-docling preview Signed-off-by: Peter Staar * updated the model specs Signed-off-by: Peter Staar * typo Signed-off-by: Michele Dolfi * use granite-docling and add to the model downloader Signed-off-by: Michele Dolfi * update docs and README Signed-off-by: Michele Dolfi * Update final repo_ids for GraniteDocling Signed-off-by: Christoph Auer * Update final repo_ids for GraniteDocling Signed-off-by: Christoph Auer * Fix model name in CLI usage example Signed-off-by: Christoph Auer <60343111+cau-git@users.noreply.github.com> * Fix VLM model name in README.md Signed-off-by: Christoph Auer <60343111+cau-git@users.noreply.github.com> --------- Signed-off-by: Peter Staar Signed-off-by: Michele Dolfi Signed-off-by: Christoph Auer Signed-off-by: Christoph Auer <60343111+cau-git@users.noreply.github.com> Co-authored-by: Peter Staar Co-authored-by: Michele Dolfi --- README.md | 6 +++--- docling/cli/main.py | 16 +++++++++++++- docling/cli/models.py | 4 ++++ docling/datamodel/pipeline_options.py | 4 ++-- docling/datamodel/vlm_model_specs.py | 30 +++++++++++++++++++++++++++ docling/utils/model_downloader.py | 22 ++++++++++++++++++++ docs/examples/minimal_vlm_pipeline.py | 4 ++-- docs/index.md | 2 +- docs/usage/index.md | 4 ++-- docs/usage/vision_models.md | 2 ++ mkdocs.yml | 2 +- 11 files changed, 84 insertions(+), 12 deletions(-) diff --git a/README.md b/README.md index 5c921b5f..d3cd4935 100644 --- a/README.md +++ b/README.md @@ -36,7 +36,7 @@ Docling simplifies document processing, parsing diverse formats — including ad * 🔒 Local execution capabilities for sensitive data and air-gapped environments * 🤖 Plug-and-play [integrations][integrations] incl. LangChain, LlamaIndex, Crew AI & Haystack for agentic AI * 🔍 Extensive OCR support for scanned PDFs and images -* 👓 Support of several Visual Language Models ([SmolDocling](https://huggingface.co/ds4sd/SmolDocling-256M-preview)) +* 👓 Support of several Visual Language Models ([GraniteDocling](https://huggingface.co/ibm-granite/granite-docling-258M)) * 🎙️ Audio support with Automatic Speech Recognition (ASR) models * 🔌 Connect to any agent using the [MCP server](https://docling-project.github.io/docling/usage/mcp/) * 💻 Simple and convenient CLI @@ -88,9 +88,9 @@ Docling has a built-in CLI to run conversions. docling https://arxiv.org/pdf/2206.01062 ``` -You can also use 🥚[SmolDocling](https://huggingface.co/ds4sd/SmolDocling-256M-preview) and other VLMs via Docling CLI: +You can also use 🥚[GraniteDocling](https://huggingface.co/ibm-granite/granite-docling-258M) and other VLMs via Docling CLI: ```bash -docling --pipeline vlm --vlm-model smoldocling https://arxiv.org/pdf/2206.01062 +docling --pipeline vlm --vlm-model granite_docling https://arxiv.org/pdf/2206.01062 ``` This will use MLX acceleration on supported Apple Silicon hardware. diff --git a/docling/cli/main.py b/docling/cli/main.py index 692efc30..2177b788 100644 --- a/docling/cli/main.py +++ b/docling/cli/main.py @@ -64,6 +64,8 @@ from docling.datamodel.vlm_model_specs import ( GOT2_TRANSFORMERS, GRANITE_VISION_OLLAMA, GRANITE_VISION_TRANSFORMERS, + GRANITEDOCLING_MLX, + GRANITEDOCLING_TRANSFORMERS, SMOLDOCLING_MLX, SMOLDOCLING_TRANSFORMERS, SMOLDOCLING_VLLM, @@ -334,7 +336,7 @@ def convert( # noqa: C901 vlm_model: Annotated[ VlmModelType, typer.Option(..., help="Choose the VLM model to use with PDF or image files."), - ] = VlmModelType.SMOLDOCLING, + ] = VlmModelType.GRANITEDOCLING, asr_model: Annotated[ AsrModelType, typer.Option(..., help="Choose the ASR model to use with audio/video files."), @@ -684,6 +686,18 @@ def convert( # noqa: C901 "To run SmolDocling faster, please install mlx-vlm:\n" "pip install mlx-vlm" ) + elif vlm_model == VlmModelType.GRANITEDOCLING: + pipeline_options.vlm_options = GRANITEDOCLING_TRANSFORMERS + if sys.platform == "darwin": + try: + import mlx_vlm + + pipeline_options.vlm_options = GRANITEDOCLING_MLX + except ImportError: + _log.warning( + "To run GraniteDocling faster, please install mlx-vlm:\n" + "pip install mlx-vlm" + ) elif vlm_model == VlmModelType.SMOLDOCLING_VLLM: pipeline_options.vlm_options = SMOLDOCLING_VLLM diff --git a/docling/cli/models.py b/docling/cli/models.py index ff0eed52..be80add0 100644 --- a/docling/cli/models.py +++ b/docling/cli/models.py @@ -33,6 +33,8 @@ class _AvailableModels(str, Enum): CODE_FORMULA = "code_formula" PICTURE_CLASSIFIER = "picture_classifier" SMOLVLM = "smolvlm" + GRANITEDOCLING = "granitedocling" + GRANITEDOCLING_MLX = "granitedocling_mlx" SMOLDOCLING = "smoldocling" SMOLDOCLING_MLX = "smoldocling_mlx" GRANITE_VISION = "granite_vision" @@ -108,6 +110,8 @@ def download( with_code_formula=_AvailableModels.CODE_FORMULA in to_download, with_picture_classifier=_AvailableModels.PICTURE_CLASSIFIER in to_download, with_smolvlm=_AvailableModels.SMOLVLM in to_download, + with_granitedocling=_AvailableModels.GRANITEDOCLING in to_download, + with_granitedocling_mlx=_AvailableModels.GRANITEDOCLING_MLX in to_download, with_smoldocling=_AvailableModels.SMOLDOCLING in to_download, with_smoldocling_mlx=_AvailableModels.SMOLDOCLING_MLX in to_download, with_granite_vision=_AvailableModels.GRANITE_VISION in to_download, diff --git a/docling/datamodel/pipeline_options.py b/docling/datamodel/pipeline_options.py index 842c1625..ca8324e5 100644 --- a/docling/datamodel/pipeline_options.py +++ b/docling/datamodel/pipeline_options.py @@ -12,7 +12,7 @@ from pydantic import ( ) from typing_extensions import deprecated -from docling.datamodel import asr_model_specs +from docling.datamodel import asr_model_specs, vlm_model_specs # Import the following for backwards compatibility from docling.datamodel.accelerator_options import AcceleratorDevice, AcceleratorOptions @@ -290,7 +290,7 @@ class VlmPipelineOptions(PaginatedPipelineOptions): ) # If True, text from backend will be used instead of generated text vlm_options: Union[InlineVlmOptions, ApiVlmOptions] = ( - smoldocling_vlm_conversion_options + vlm_model_specs.GRANITEDOCLING_TRANSFORMERS ) diff --git a/docling/datamodel/vlm_model_specs.py b/docling/datamodel/vlm_model_specs.py index 54d81978..652e0afd 100644 --- a/docling/datamodel/vlm_model_specs.py +++ b/docling/datamodel/vlm_model_specs.py @@ -18,6 +18,35 @@ from docling.datamodel.pipeline_options_vlm_model import ( _log = logging.getLogger(__name__) +# Granite-Docling +GRANITEDOCLING_TRANSFORMERS = InlineVlmOptions( + repo_id="ibm-granite/granite-docling-258M", + prompt="Convert this page to docling.", + response_format=ResponseFormat.DOCTAGS, + inference_framework=InferenceFramework.TRANSFORMERS, + transformers_model_type=TransformersModelType.AUTOMODEL_IMAGETEXTTOTEXT, + supported_devices=[ + AcceleratorDevice.CPU, + AcceleratorDevice.CUDA, + ], + scale=2.0, + temperature=0.0, + max_new_tokens=8192, + stop_strings=["", "<|end_of_text|>"], +) + +GRANITEDOCLING_MLX = InlineVlmOptions( + repo_id="ibm-granite/granite-docling-258M-mlx", + prompt="Convert this page to docling.", + response_format=ResponseFormat.DOCTAGS, + inference_framework=InferenceFramework.MLX, + supported_devices=[AcceleratorDevice.MPS], + scale=2.0, + temperature=0.0, + max_new_tokens=8192, + stop_strings=["", "<|end_of_text|>"], +) + # SmolDocling SMOLDOCLING_MLX = InlineVlmOptions( repo_id="ds4sd/SmolDocling-256M-preview-mlx-bf16", @@ -272,3 +301,4 @@ class VlmModelType(str, Enum): GRANITE_VISION_VLLM = "granite_vision_vllm" GRANITE_VISION_OLLAMA = "granite_vision_ollama" GOT_OCR_2 = "got_ocr_2" + GRANITEDOCLING = "granite_docling" diff --git a/docling/utils/model_downloader.py b/docling/utils/model_downloader.py index 28c3918c..1894dccd 100644 --- a/docling/utils/model_downloader.py +++ b/docling/utils/model_downloader.py @@ -10,6 +10,8 @@ from docling.datamodel.pipeline_options import ( ) from docling.datamodel.settings import settings from docling.datamodel.vlm_model_specs import ( + GRANITEDOCLING_MLX, + GRANITEDOCLING_TRANSFORMERS, SMOLDOCLING_MLX, SMOLDOCLING_TRANSFORMERS, ) @@ -34,6 +36,8 @@ def download_models( with_code_formula: bool = True, with_picture_classifier: bool = True, with_smolvlm: bool = False, + with_granitedocling: bool = False, + with_granitedocling_mlx: bool = False, with_smoldocling: bool = False, with_smoldocling_mlx: bool = False, with_granite_vision: bool = False, @@ -86,6 +90,24 @@ def download_models( progress=progress, ) + if with_granitedocling: + _log.info("Downloading GraniteDocling model...") + download_hf_model( + repo_id=GRANITEDOCLING_TRANSFORMERS.repo_id, + local_dir=output_dir / GRANITEDOCLING_TRANSFORMERS.repo_cache_folder, + force=force, + progress=progress, + ) + + if with_granitedocling_mlx: + _log.info("Downloading GraniteDocling MLX model...") + download_hf_model( + repo_id=GRANITEDOCLING_MLX.repo_id, + local_dir=output_dir / GRANITEDOCLING_MLX.repo_cache_folder, + force=force, + progress=progress, + ) + if with_smoldocling: _log.info("Downloading SmolDocling model...") download_hf_model( diff --git a/docs/examples/minimal_vlm_pipeline.py b/docs/examples/minimal_vlm_pipeline.py index 3a25ce43..ba2f809f 100644 --- a/docs/examples/minimal_vlm_pipeline.py +++ b/docs/examples/minimal_vlm_pipeline.py @@ -32,7 +32,7 @@ from docling.pipeline.vlm_pipeline import VlmPipeline source = "https://arxiv.org/pdf/2501.17887" ###### USING SIMPLE DEFAULT VALUES -# - SmolDocling model +# - GraniteDocling model # - Using the transformers framework converter = DocumentConverter( @@ -53,7 +53,7 @@ print(doc.export_to_markdown()) # For more options see the `compare_vlm_models.py` example. pipeline_options = VlmPipelineOptions( - vlm_options=vlm_model_specs.SMOLDOCLING_MLX, + vlm_options=vlm_model_specs.GRANITEDOCLING_MLX, ) converter = DocumentConverter( diff --git a/docs/index.md b/docs/index.md index bfde7059..a41b1303 100644 --- a/docs/index.md +++ b/docs/index.md @@ -28,7 +28,7 @@ Docling simplifies document processing, parsing diverse formats — including ad * 🔒 Local execution capabilities for sensitive data and air-gapped environments * 🤖 Plug-and-play [integrations][integrations] incl. LangChain, LlamaIndex, Crew AI & Haystack for agentic AI * 🔍 Extensive OCR support for scanned PDFs and images -* 👓 Support of several Visual Language Models ([SmolDocling](https://huggingface.co/ds4sd/SmolDocling-256M-preview)) +* 👓 Support of several Visual Language Models ([GraniteDocling](https://huggingface.co/ibm-granite/granite-docling-258M)) * 🎙️ Support for Audio with Automatic Speech Recognition (ASR) models * 🔌 Connect to any agent using the [Docling MCP](https://docling-project.github.io/docling/usage/mcp/) server * 💻 Simple and convenient CLI diff --git a/docs/usage/index.md b/docs/usage/index.md index 0eeb94a9..a336eb93 100644 --- a/docs/usage/index.md +++ b/docs/usage/index.md @@ -31,9 +31,9 @@ You can additionally use Docling directly from your terminal, for instance: docling https://arxiv.org/pdf/2206.01062 ``` -The CLI provides various options, such as 🥚[SmolDocling](https://huggingface.co/ds4sd/SmolDocling-256M-preview) (incl. MLX acceleration) & other VLMs: +The CLI provides various options, such as 🥚[GraniteDocling](https://huggingface.co/ibm-granite/granite-docling-258M) (incl. MLX acceleration) & other VLMs: ```bash -docling --pipeline vlm --vlm-model smoldocling https://arxiv.org/pdf/2206.01062 +docling --pipeline vlm --vlm-model granite_docling https://arxiv.org/pdf/2206.01062 ``` For all available options, run `docling --help` or check the [CLI reference](../reference/cli.md). diff --git a/docs/usage/vision_models.md b/docs/usage/vision_models.md index 6e701a3b..d181ca95 100644 --- a/docs/usage/vision_models.md +++ b/docs/usage/vision_models.md @@ -45,6 +45,8 @@ The following table reports the models currently available out-of-the-box. | Model instance | Model | Framework | Device | Num pages | Inference time (sec) | | ---------------|------ | --------- | ------ | --------- | ---------------------| +| `vlm_model_specs.GRANITEDOCLING_TRANSFORMERS` | [ibm-granite/granite-docling-258M](https://huggingface.co/ibm-granite/granite-docling-258M) | `Transformers/AutoModelForVision2Seq` | MPS | 1 | - | +| `vlm_model_specs.GRANITEDOCLING_MLX` | [ibm-granite/granite-docling-258M-mlx-bf16](https://huggingface.co/ibm-granite/granite-docling-258M-mlx-bf16) | `MLX`| MPS | 1 | - | | `vlm_model_specs.SMOLDOCLING_TRANSFORMERS` | [ds4sd/SmolDocling-256M-preview](https://huggingface.co/ds4sd/SmolDocling-256M-preview) | `Transformers/AutoModelForVision2Seq` | MPS | 1 | 102.212 | | `vlm_model_specs.SMOLDOCLING_MLX` | [ds4sd/SmolDocling-256M-preview-mlx-bf16](https://huggingface.co/ds4sd/SmolDocling-256M-preview-mlx-bf16) | `MLX`| MPS | 1 | 6.15453 | | `vlm_model_specs.QWEN25_VL_3B_MLX` | [mlx-community/Qwen2.5-VL-3B-Instruct-bf16](https://huggingface.co/mlx-community/Qwen2.5-VL-3B-Instruct-bf16) | `MLX`| MPS | 1 | 23.4951 | diff --git a/mkdocs.yml b/mkdocs.yml index e239f680..4d375b7c 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -83,7 +83,7 @@ nav: - "Custom conversion": examples/custom_convert.py - "Batch conversion": examples/batch_convert.py - "Multi-format conversion": examples/run_with_formats.py - - "VLM pipeline with SmolDocling": examples/minimal_vlm_pipeline.py + - "VLM pipeline with GraniteDocling": examples/minimal_vlm_pipeline.py - "VLM pipeline with remote model": examples/vlm_pipeline_api_model.py - "VLM comparison": examples/compare_vlm_models.py - "ASR pipeline with Whisper": examples/minimal_asr_pipeline.py