diff --git a/docling/datamodel/pipeline_options.py b/docling/datamodel/pipeline_options.py index 5b870550..fa280958 100644 --- a/docling/datamodel/pipeline_options.py +++ b/docling/datamodel/pipeline_options.py @@ -19,8 +19,8 @@ from typing_extensions import deprecated # Import the following for backwards compatibility from docling.datamodel.pipeline_options_vlm_model import ( ApiVlmOptions, - HuggingFaceVlmOptions, InferenceFramework, + InlineVlmOptions, ResponseFormat, ) from docling.datamodel.vlm_model_spec import ( @@ -317,7 +317,7 @@ class VlmPipelineOptions(PaginatedPipelineOptions): False # (To be used with vlms, or other generative models) ) # If True, text from backend will be used instead of generated text - vlm_options: Union[HuggingFaceVlmOptions, ApiVlmOptions] = ( + vlm_options: Union[InlineVlmOptions, ApiVlmOptions] = ( smoldocling_vlm_conversion_options ) diff --git a/docling/datamodel/pipeline_options_vlm_model.py b/docling/datamodel/pipeline_options_vlm_model.py index 544e6fd7..7276531e 100644 --- a/docling/datamodel/pipeline_options_vlm_model.py +++ b/docling/datamodel/pipeline_options_vlm_model.py @@ -2,6 +2,7 @@ from enum import Enum from typing import Any, Dict, Literal from pydantic import AnyUrl, BaseModel +from typing_extensions import deprecated class BaseVlmOptions(BaseModel): @@ -17,15 +18,16 @@ class ResponseFormat(str, Enum): class InferenceFramework(str, Enum): MLX = "mlx" - TRANSFORMERS = "transformers" + TRANSFORMERS = "transformers" # TODO: how to flag this as outdated? TRANSFORMERS_VISION2SEQ = "transformers-vision2seq" TRANSFORMERS_CAUSALLM = "transformers-causallm" -class HuggingFaceVlmOptions(BaseVlmOptions): - kind: Literal["hf_model_options"] = "hf_model_options" +class InlineVlmOptions(BaseVlmOptions): + kind: Literal["inline_model_options"] = "inline_model_options" repo_id: str + trust_remote_code: bool = False load_in_8bit: bool = True llm_int8_threshold: float = 6.0 quantized: bool = False @@ -46,6 +48,11 @@ class HuggingFaceVlmOptions(BaseVlmOptions): return self.repo_id.replace("/", "--") +@deprecated("Use InlineVlmOptions instead.") +class HuggingFaceVlmOptions(InlineVlmOptions): + pass + + class ApiVlmOptions(BaseVlmOptions): kind: Literal["api_model_options"] = "api_model_options" diff --git a/docling/datamodel/vlm_model_spec.py b/docling/datamodel/vlm_model_spec.py index 36cbc2d0..08a093c8 100644 --- a/docling/datamodel/vlm_model_spec.py +++ b/docling/datamodel/vlm_model_spec.py @@ -7,8 +7,8 @@ from pydantic import ( from docling.datamodel.pipeline_options_vlm_model import ( ApiVlmOptions, - HuggingFaceVlmOptions, InferenceFramework, + InlineVlmOptions, ResponseFormat, ) @@ -16,7 +16,7 @@ _log = logging.getLogger(__name__) # SmolDocling -SMOLDOCLING_MLX = HuggingFaceVlmOptions( +SMOLDOCLING_MLX = InlineVlmOptions( repo_id="ds4sd/SmolDocling-256M-preview-mlx-bf16", prompt="Convert this page to docling.", response_format=ResponseFormat.DOCTAGS, @@ -25,7 +25,7 @@ SMOLDOCLING_MLX = HuggingFaceVlmOptions( temperature=0.0, ) -SMOLDOCLING_TRANSFORMERS = HuggingFaceVlmOptions( +SMOLDOCLING_TRANSFORMERS = InlineVlmOptions( repo_id="ds4sd/SmolDocling-256M-preview", prompt="Convert this page to docling.", response_format=ResponseFormat.DOCTAGS, @@ -35,7 +35,7 @@ SMOLDOCLING_TRANSFORMERS = HuggingFaceVlmOptions( ) # GraniteVision -GRANITE_VISION_TRANSFORMERS = HuggingFaceVlmOptions( +GRANITE_VISION_TRANSFORMERS = InlineVlmOptions( repo_id="ibm-granite/granite-vision-3.2-2b", prompt="Convert this page to markdown. Do not miss any text and only output the bare MarkDown!", response_format=ResponseFormat.MARKDOWN, @@ -55,7 +55,7 @@ GRANITE_VISION_OLLAMA = ApiVlmOptions( ) # Pixtral -PIXTRAL_12B_TRANSFORMERS = HuggingFaceVlmOptions( +PIXTRAL_12B_TRANSFORMERS = InlineVlmOptions( repo_id="mistral-community/pixtral-12b", prompt="Convert this page to markdown. Do not miss any text and only output the bare markdown!", response_format=ResponseFormat.MARKDOWN, @@ -64,7 +64,7 @@ PIXTRAL_12B_TRANSFORMERS = HuggingFaceVlmOptions( temperature=0.0, ) -PIXTRAL_12B_MLX = HuggingFaceVlmOptions( +PIXTRAL_12B_MLX = InlineVlmOptions( repo_id="mlx-community/pixtral-12b-bf16", prompt="Convert this page to markdown. Do not miss any text and only output the bare markdown!", response_format=ResponseFormat.MARKDOWN, @@ -74,7 +74,7 @@ PIXTRAL_12B_MLX = HuggingFaceVlmOptions( ) # Phi4 -PHI4_TRANSFORMERS = HuggingFaceVlmOptions( +PHI4_TRANSFORMERS = InlineVlmOptions( repo_id="microsoft/Phi-4-multimodal-instruct", prompt="Convert this page to MarkDown. Do not miss any text and only output the bare markdown", response_format=ResponseFormat.MARKDOWN, @@ -84,7 +84,7 @@ PHI4_TRANSFORMERS = HuggingFaceVlmOptions( ) # Qwen -QWEN25_VL_3B_MLX = HuggingFaceVlmOptions( +QWEN25_VL_3B_MLX = InlineVlmOptions( repo_id="mlx-community/Qwen2.5-VL-3B-Instruct-bf16", prompt="Convert this page to markdown. Do not miss any text and only output the bare markdown!", response_format=ResponseFormat.MARKDOWN, @@ -94,7 +94,7 @@ QWEN25_VL_3B_MLX = HuggingFaceVlmOptions( ) # Gemma-3 -GEMMA3_12B_MLX = HuggingFaceVlmOptions( +GEMMA3_12B_MLX = InlineVlmOptions( repo_id="mlx-community/gemma-3-12b-it-bf16", prompt="Convert this page to markdown. Do not miss any text and only output the bare markdown!", response_format=ResponseFormat.MARKDOWN, @@ -103,7 +103,7 @@ GEMMA3_12B_MLX = HuggingFaceVlmOptions( temperature=0.0, ) -GEMMA3_27B_MLX = HuggingFaceVlmOptions( +GEMMA3_27B_MLX = InlineVlmOptions( repo_id="mlx-community/gemma-3-27b-it-bf16", prompt="Convert this page to markdown. Do not miss any text and only output the bare markdown!", response_format=ResponseFormat.MARKDOWN, diff --git a/docling/models/vlm_models_inline/hf_transformers_causallm_model.py b/docling/models/vlm_models_inline/hf_transformers_causallm_model.py index 4f3a3d3f..76c32b2b 100644 --- a/docling/models/vlm_models_inline/hf_transformers_causallm_model.py +++ b/docling/models/vlm_models_inline/hf_transformers_causallm_model.py @@ -9,7 +9,7 @@ from docling.datamodel.document import ConversionResult from docling.datamodel.pipeline_options import ( AcceleratorOptions, ) -from docling.datamodel.pipeline_options_vlm_model import HuggingFaceVlmOptions +from docling.datamodel.pipeline_options_vlm_model import InlineVlmOptions from docling.models.base_model import BasePageModel from docling.models.hf_vlm_model import HuggingFaceVlmModel from docling.utils.accelerator_utils import decide_device @@ -24,12 +24,10 @@ class HuggingFaceVlmModel_AutoModelForCausalLM(BasePageModel): enabled: bool, artifacts_path: Optional[Path], accelerator_options: AcceleratorOptions, - vlm_options: HuggingFaceVlmOptions, + vlm_options: InlineVlmOptions, ): self.enabled = enabled - self.trust_remote_code = True - self.vlm_options = vlm_options if self.enabled: @@ -58,51 +56,33 @@ class HuggingFaceVlmModel_AutoModelForCausalLM(BasePageModel): elif (artifacts_path / repo_cache_folder).exists(): artifacts_path = artifacts_path / repo_cache_folder - self.param_question = vlm_options.prompt # "Perform Layout Analysis." - self.param_quantization_config = BitsAndBytesConfig( - load_in_8bit=vlm_options.load_in_8bit, # True, - llm_int8_threshold=vlm_options.llm_int8_threshold, # 6.0 - ) - self.param_quantized = vlm_options.quantized # False + self.param_quantization_config: Optional[BitsAndBytesConfig] = None + if vlm_options.quantized: + self.param_quantization_config = BitsAndBytesConfig( + load_in_8bit=vlm_options.load_in_8bit, + llm_int8_threshold=vlm_options.llm_int8_threshold, + ) self.processor = AutoProcessor.from_pretrained( artifacts_path, - trust_remote_code=self.trust_remote_code, + trust_remote_code=vlm_options.trust_remote_code, + ) + self.vlm_model = AutoModelForCausalLM.from_pretrained( + artifacts_path, + device_map=self.device, + torch_dtype="auto", + quantization_config=self.param_quantization_config, + _attn_implementation=( + "flash_attention_2" + if self.device.startswith("cuda") + and accelerator_options.cuda_use_flash_attention2 + else "eager" + ), + trust_remote_code=vlm_options.trust_remote_code, ) - if self.param_quantized: - print("using quantized") - self.vlm_model = AutoModelForCausalLM.from_pretrained( - artifacts_path, - device_map=self.device, - torch_dtype="auto", - quantization_config=self.param_quantization_config, - _attn_implementation=( - "flash_attention_2" - if self.device.startswith("cuda") - and accelerator_options.cuda_use_flash_attention2 - else "eager" - ), - trust_remote_code=self.trust_remote_code, - ) # .to(self.device) - else: - print("using original") - self.vlm_model = AutoModelForCausalLM.from_pretrained( - artifacts_path, - device_map=self.device, - torch_dtype="auto", # torch.bfloat16, - _attn_implementation=( - "flash_attention_2" - if self.device.startswith("cuda") - and accelerator_options.cuda_use_flash_attention2 - else "eager" - ), - trust_remote_code=self.trust_remote_code, - ) # .to(self.device) - - model_path = artifacts_path # Load generation config - self.generation_config = GenerationConfig.from_pretrained(model_path) + self.generation_config = GenerationConfig.from_pretrained(artifacts_path) def __call__( self, conv_res: ConversionResult, page_batch: Iterable[Page] @@ -161,6 +141,7 @@ class HuggingFaceVlmModel_AutoModelForCausalLM(BasePageModel): def formulate_prompt(self) -> str: """Formulate a prompt for the VLM.""" if self.vlm_options.repo_id == "microsoft/Phi-4-multimodal-instruct": + _log.debug("Using specialized prompt for Phi-4") # more info here: https://huggingface.co/microsoft/Phi-4-multimodal-instruct#loading-the-model-locally user_prompt = "<|user|>" @@ -171,7 +152,22 @@ class HuggingFaceVlmModel_AutoModelForCausalLM(BasePageModel): _log.debug(f"prompt for {self.vlm_options.repo_id}: {prompt}") return prompt - else: - raise ValueError(f"No prompt template for {self.vlm_options.repo_id}") - return "" + _log.debug("Using default prompt for CasualLM using apply_chat_template") + messages = [ + { + "role": "user", + "content": [ + { + "type": "text", + "text": "This is a page from a document.", + }, + {"type": "image"}, + {"type": "text", "text": self.vlm_options.prompt}, + ], + } + ] + prompt = self.processor.apply_chat_template( + messages, add_generation_prompt=False + ) + return prompt diff --git a/docling/models/vlm_models_inline/hf_transformers_vision2seq_model.py b/docling/models/vlm_models_inline/hf_transformers_vision2seq_model.py index 91e04087..95578b47 100644 --- a/docling/models/vlm_models_inline/hf_transformers_vision2seq_model.py +++ b/docling/models/vlm_models_inline/hf_transformers_vision2seq_model.py @@ -9,7 +9,7 @@ from docling.datamodel.document import ConversionResult from docling.datamodel.pipeline_options import ( AcceleratorOptions, ) -from docling.datamodel.pipeline_options_vlm_model import HuggingFaceVlmOptions +from docling.datamodel.pipeline_options_vlm_model import InlineVlmOptions from docling.models.base_model import BasePageModel from docling.models.hf_vlm_model import HuggingFaceVlmModel from docling.utils.accelerator_utils import decide_device @@ -24,7 +24,7 @@ class HuggingFaceVlmModel_AutoModelForVision2Seq(BasePageModel): enabled: bool, artifacts_path: Optional[Path], accelerator_options: AcceleratorOptions, - vlm_options: HuggingFaceVlmOptions, + vlm_options: InlineVlmOptions, ): self.enabled = enabled @@ -57,45 +57,29 @@ class HuggingFaceVlmModel_AutoModelForVision2Seq(BasePageModel): elif (artifacts_path / repo_cache_folder).exists(): artifacts_path = artifacts_path / repo_cache_folder - # self.param_question = vlm_options.prompt # "Perform Layout Analysis." - self.param_quantization_config = BitsAndBytesConfig( - load_in_8bit=vlm_options.load_in_8bit, # True, - llm_int8_threshold=vlm_options.llm_int8_threshold, # 6.0 - ) - self.param_quantized = vlm_options.quantized # False + self.param_quantization_config: Optional[BitsAndBytesConfig] = None + if vlm_options.quantized: + self.param_quantization_config = BitsAndBytesConfig( + load_in_8bit=vlm_options.load_in_8bit, + llm_int8_threshold=vlm_options.llm_int8_threshold, + ) self.processor = AutoProcessor.from_pretrained( artifacts_path, - # trust_remote_code=True, + trust_remote_code=vlm_options.trust_remote_code, + ) + self.vlm_model = AutoModelForVision2Seq.from_pretrained( + artifacts_path, + device_map=self.device, + # torch_dtype=torch.bfloat16, + _attn_implementation=( + "flash_attention_2" + if self.device.startswith("cuda") + and accelerator_options.cuda_use_flash_attention2 + else "eager" + ), + trust_remote_code=vlm_options.trust_remote_code, ) - if not self.param_quantized: - self.vlm_model = AutoModelForVision2Seq.from_pretrained( - artifacts_path, - device_map=self.device, - # torch_dtype=torch.bfloat16, - _attn_implementation=( - "flash_attention_2" - if self.device.startswith("cuda") - and accelerator_options.cuda_use_flash_attention2 - else "eager" - ), - # trust_remote_code=True, - ) # .to(self.device) - - else: - self.vlm_model = AutoModelForVision2Seq.from_pretrained( - artifacts_path, - device_map=self.device, - torch_dtype="auto", - quantization_config=self.param_quantization_config, - _attn_implementation=( - "flash_attention_2" - if self.device.startswith("cuda") - and accelerator_options.cuda_use_flash_attention2 - else "eager" - ), - # trust_remote_code=True, - ) # .to(self.device) def __call__( self, conv_res: ConversionResult, page_batch: Iterable[Page] diff --git a/docling/models/vlm_models_inline/mlx_model.py b/docling/models/vlm_models_inline/mlx_model.py index 099bb42c..1cc43c7c 100644 --- a/docling/models/vlm_models_inline/mlx_model.py +++ b/docling/models/vlm_models_inline/mlx_model.py @@ -9,7 +9,7 @@ from docling.datamodel.document import ConversionResult from docling.datamodel.pipeline_options import ( AcceleratorOptions, ) -from docling.datamodel.pipeline_options_vlm_model import HuggingFaceVlmOptions +from docling.datamodel.pipeline_options_vlm_model import InlineVlmOptions from docling.models.base_model import BasePageModel from docling.models.hf_vlm_model import HuggingFaceVlmModel from docling.utils.profiling import TimeRecorder @@ -23,7 +23,7 @@ class HuggingFaceMlxModel(BasePageModel): enabled: bool, artifacts_path: Optional[Path], accelerator_options: AcceleratorOptions, - vlm_options: HuggingFaceVlmOptions, + vlm_options: InlineVlmOptions, ): self.enabled = enabled diff --git a/docling/pipeline/vlm_pipeline.py b/docling/pipeline/vlm_pipeline.py index fd4a4375..8f21b9d4 100644 --- a/docling/pipeline/vlm_pipeline.py +++ b/docling/pipeline/vlm_pipeline.py @@ -31,8 +31,8 @@ from docling.datamodel.pipeline_options import ( ) from docling.datamodel.pipeline_options_vlm_model import ( ApiVlmOptions, - HuggingFaceVlmOptions, InferenceFramework, + InlineVlmOptions, ResponseFormat, ) from docling.datamodel.settings import settings @@ -86,8 +86,8 @@ class VlmPipeline(PaginatedPipeline): vlm_options=cast(ApiVlmOptions, self.pipeline_options.vlm_options), ), ] - elif isinstance(self.pipeline_options.vlm_options, HuggingFaceVlmOptions): - vlm_options = cast(HuggingFaceVlmOptions, self.pipeline_options.vlm_options) + elif isinstance(self.pipeline_options.vlm_options, InlineVlmOptions): + vlm_options = cast(InlineVlmOptions, self.pipeline_options.vlm_options) if vlm_options.inference_framework == InferenceFramework.MLX: self.build_pipe = [ HuggingFaceMlxModel( @@ -100,6 +100,7 @@ class VlmPipeline(PaginatedPipeline): elif ( vlm_options.inference_framework == InferenceFramework.TRANSFORMERS_VISION2SEQ + or vlm_options.inference_framework == InferenceFramework.TRANSFORMERS ): self.build_pipe = [ HuggingFaceVlmModel_AutoModelForVision2Seq(