mirror of
https://github.com/DS4SD/docling.git
synced 2025-07-26 20:14:47 +00:00
reformatted the code
Signed-off-by: Peter Staar <taa@zurich.ibm.com>
This commit is contained in:
parent
d5b6c871cf
commit
0c7c7c11c2
@ -186,9 +186,9 @@ class DocumentConverter:
|
||||
Tuple[Type[BasePipeline], str], BasePipeline
|
||||
] = {}
|
||||
|
||||
def _get_initialized_pipelines(self) -> dict[
|
||||
tuple[Type[BasePipeline], str], BasePipeline
|
||||
]:
|
||||
def _get_initialized_pipelines(
|
||||
self,
|
||||
) -> dict[tuple[Type[BasePipeline], str], BasePipeline]:
|
||||
return self.initialized_pipelines
|
||||
|
||||
def _get_pipeline_options_hash(self, pipeline_options: PipelineOptions) -> str:
|
||||
|
@ -6,7 +6,6 @@ _log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class HuggingFaceVlmModel:
|
||||
|
||||
@staticmethod
|
||||
def map_device_to_cpu_if_mlx(device: str) -> str:
|
||||
if device == "mps":
|
||||
|
@ -76,8 +76,6 @@ class HuggingFaceMlxModel(BasePageModel):
|
||||
assert page.size is not None
|
||||
|
||||
hi_res_image = page.get_image(scale=self.vlm_options.scale)
|
||||
hi_res_image.save("./scratch/page.png")
|
||||
|
||||
if hi_res_image is not None:
|
||||
im_width, im_height = hi_res_image.size
|
||||
|
||||
@ -128,7 +126,9 @@ class HuggingFaceMlxModel(BasePageModel):
|
||||
)
|
||||
)
|
||||
else:
|
||||
_log.warning(f"incompatible shape for logprobs: {token.logprobs.shape}")
|
||||
_log.warning(
|
||||
f"incompatible shape for logprobs: {token.logprobs.shape}"
|
||||
)
|
||||
|
||||
output += token.text
|
||||
if "</doctag>" in token.text:
|
||||
|
@ -42,7 +42,7 @@ class HuggingFaceVlmModel_AutoModelForCausalLM(BasePageModel):
|
||||
)
|
||||
|
||||
self.device = decide_device(accelerator_options.device)
|
||||
self.device = HuggingFaceVlmMode.map_device_to_cpu_if_mlx(self.device)
|
||||
self.device = HuggingFaceVlmModel.map_device_to_cpu_if_mlx(self.device)
|
||||
_log.debug(f"Available device for VLM: {self.device}")
|
||||
|
||||
self.use_cache = vlm_options.use_kv_cache
|
||||
@ -127,7 +127,7 @@ class HuggingFaceVlmModel_AutoModelForCausalLM(BasePageModel):
|
||||
|
||||
inputs = self.processor(
|
||||
text=prompt, images=hi_res_image, return_tensors="pt"
|
||||
) #.to(self.device)
|
||||
) # .to(self.device)
|
||||
|
||||
# Generate response
|
||||
start_time = time.time()
|
||||
@ -153,7 +153,9 @@ class HuggingFaceVlmModel_AutoModelForCausalLM(BasePageModel):
|
||||
_log.debug(
|
||||
f"Generated {num_tokens} tokens in time {generation_time:.2f} seconds."
|
||||
)
|
||||
page.predictions.vlm_response = VlmPrediction(text=response, generation_time=generation_time)
|
||||
page.predictions.vlm_response = VlmPrediction(
|
||||
text=response, generation_time=generation_time
|
||||
)
|
||||
|
||||
yield page
|
||||
|
||||
|
@ -39,7 +39,7 @@ class HuggingFaceVlmModel_AutoModelForVision2Seq(BasePageModel):
|
||||
)
|
||||
|
||||
self.device = decide_device(accelerator_options.device)
|
||||
self.device = HuggingFaceVlmMode.map_device_to_cpu_if_mlx(self.device)
|
||||
self.device = HuggingFaceVlmModel.map_device_to_cpu_if_mlx(self.device)
|
||||
|
||||
_log.debug(f"Available device for HuggingFace VLM: {self.device}")
|
||||
|
||||
|
@ -39,7 +39,7 @@ class HuggingFaceVlmModel_LlavaForConditionalGeneration(BasePageModel):
|
||||
)
|
||||
|
||||
self.device = decide_device(accelerator_options.device)
|
||||
self.device = HuggingFaceVlmMode.map_device_to_cpu_if_mlx(self.device)
|
||||
self.device = HuggingFaceVlmModel.map_device_to_cpu_if_mlx(self.device)
|
||||
|
||||
self.use_cache = vlm_options.use_kv_cache
|
||||
self.max_new_tokens = vlm_options.max_new_tokens
|
||||
@ -113,7 +113,7 @@ class HuggingFaceVlmModel_LlavaForConditionalGeneration(BasePageModel):
|
||||
temperature=self.temperature,
|
||||
)
|
||||
|
||||
#num_tokens = len(generate_ids[0])
|
||||
# num_tokens = len(generate_ids[0])
|
||||
generation_time = time.time() - start_time
|
||||
|
||||
response = self.processor.batch_decode(
|
||||
@ -124,7 +124,7 @@ class HuggingFaceVlmModel_LlavaForConditionalGeneration(BasePageModel):
|
||||
|
||||
page.predictions.vlm_response = VlmPrediction(
|
||||
text=response,
|
||||
#generated_tokens=num_tokens,
|
||||
# generated_tokens=num_tokens,
|
||||
generation_time=generation_time,
|
||||
)
|
||||
|
||||
|
@ -1,11 +1,23 @@
|
||||
import re
|
||||
import logging
|
||||
import re
|
||||
from io import BytesIO
|
||||
from pathlib import Path
|
||||
from typing import List, Optional, Union, cast
|
||||
|
||||
# from docling_core.types import DoclingDocument
|
||||
from docling_core.types.doc import BoundingBox, DocItem, ImageRef, PictureItem, TextItem
|
||||
from docling_core.types.doc import (
|
||||
BoundingBox,
|
||||
DocItem,
|
||||
DoclingDocument,
|
||||
ImageRef,
|
||||
PictureItem,
|
||||
ProvenanceItem,
|
||||
TextItem,
|
||||
)
|
||||
from docling_core.types.doc.base import (
|
||||
BoundingBox,
|
||||
Size,
|
||||
)
|
||||
from docling_core.types.doc.document import DocTagsDocument
|
||||
from PIL import Image as PILImage
|
||||
|
||||
@ -20,14 +32,6 @@ from docling.datamodel.pipeline_model_specializations import (
|
||||
InferenceFramework,
|
||||
ResponseFormat,
|
||||
)
|
||||
from docling_core.types.doc.base import (
|
||||
Size,
|
||||
BoundingBox,
|
||||
)
|
||||
from docling_core.types.doc import (
|
||||
ProvenanceItem,
|
||||
DoclingDocument
|
||||
)
|
||||
from docling.datamodel.pipeline_options import (
|
||||
VlmPipelineOptions,
|
||||
)
|
||||
@ -168,6 +172,7 @@ class VlmPipeline(PaginatedPipeline):
|
||||
self.pipeline_options.vlm_options.response_format
|
||||
== ResponseFormat.DOCTAGS
|
||||
):
|
||||
"""
|
||||
doctags_list = []
|
||||
image_list = []
|
||||
for page in conv_res.pages:
|
||||
@ -207,6 +212,9 @@ class VlmPipeline(PaginatedPipeline):
|
||||
txt = self.extract_text_from_backend(page, crop_bbox)
|
||||
element.text = txt
|
||||
element.orig = txt
|
||||
"""
|
||||
conv_res.document = self._turn_dt_into_doc(conv_res)
|
||||
|
||||
elif (
|
||||
self.pipeline_options.vlm_options.response_format
|
||||
== ResponseFormat.MARKDOWN
|
||||
@ -271,21 +279,18 @@ class VlmPipeline(PaginatedPipeline):
|
||||
if self.force_backend_text:
|
||||
scale = self.pipeline_options.images_scale
|
||||
for element, _level in conv_res.document.iterate_items():
|
||||
if (not isinstance(element, TextItem)
|
||||
or len(element.prov) == 0
|
||||
):
|
||||
if not isinstance(element, TextItem) or len(element.prov) == 0:
|
||||
continue
|
||||
crop_bbox = (
|
||||
element.prov[0]
|
||||
.bbox.scaled(scale=scale)
|
||||
.to_top_left_origin(
|
||||
page_height=page.size.height * scale
|
||||
)
|
||||
.to_top_left_origin(page_height=page.size.height * scale)
|
||||
)
|
||||
txt = self.extract_text_from_backend(page, crop_bbox)
|
||||
element.text = txt
|
||||
element.orig = txt
|
||||
|
||||
return conv_res.document
|
||||
|
||||
"""
|
||||
def _turn_md_into_doc(self, conv_res):
|
||||
@ -308,7 +313,6 @@ class VlmPipeline(PaginatedPipeline):
|
||||
"""
|
||||
|
||||
def _turn_md_into_doc(self, conv_res):
|
||||
|
||||
def _extract_markdown_code(text):
|
||||
"""
|
||||
Extracts text from markdown code blocks (enclosed in triple backticks).
|
||||
@ -322,10 +326,7 @@ class VlmPipeline(PaginatedPipeline):
|
||||
"""
|
||||
# Regex pattern to match content between triple backticks
|
||||
# This handles multiline content and optional language specifier
|
||||
pattern = r'^```(?:\w*\n)?(.*?)```(\n)*$'
|
||||
|
||||
# Search for matches with DOTALL flag to match across multiple lines
|
||||
matches = re.findall(pattern, text, re.DOTALL)
|
||||
pattern = r"^```(?:\w*\n)?(.*?)```(\n)*$"
|
||||
|
||||
# Search with DOTALL flag to match across multiple lines
|
||||
mtch = re.search(pattern, text, re.DOTALL)
|
||||
@ -338,8 +339,7 @@ class VlmPipeline(PaginatedPipeline):
|
||||
return text
|
||||
|
||||
for pg_idx, page in enumerate(conv_res.pages):
|
||||
|
||||
page_no = pg_idx+1 # FIXME: might be incorrect
|
||||
page_no = pg_idx + 1 # FIXME: might be incorrect
|
||||
|
||||
predicted_text = ""
|
||||
if page.predictions.vlm_response:
|
||||
@ -370,14 +370,18 @@ class VlmPipeline(PaginatedPipeline):
|
||||
conv_res.document.add_page(
|
||||
page_no=page_no,
|
||||
size=Size(width=pg_width, height=pg_height),
|
||||
image=ImageRef.from_pil(image=page.image, dpi=72) if page.image else None,
|
||||
image=ImageRef.from_pil(image=page.image, dpi=72)
|
||||
if page.image
|
||||
else None,
|
||||
)
|
||||
|
||||
for item, level in page_doc.iterate_items():
|
||||
item.prov = [
|
||||
ProvenanceItem(page_no=pg_idx+1,
|
||||
bbox=BoundingBox(t=0.0, b=0.0, l=0.0, r=0.0),
|
||||
charspan=[0,0])
|
||||
ProvenanceItem(
|
||||
page_no=pg_idx + 1,
|
||||
bbox=BoundingBox(t=0.0, b=0.0, l=0.0, r=0.0),
|
||||
charspan=[0, 0],
|
||||
)
|
||||
]
|
||||
conv_res.document.append_child_item(child=item)
|
||||
print(item)
|
||||
|
@ -4,6 +4,7 @@ from pathlib import Path
|
||||
|
||||
from docling_core.types.doc import DocItemLabel, ImageRefMode
|
||||
from docling_core.types.doc.document import DEFAULT_EXPORT_LABELS
|
||||
from tabulate import tabulate
|
||||
|
||||
from docling.datamodel.base_models import InputFormat
|
||||
from docling.datamodel.pipeline_model_specializations import (
|
||||
@ -25,8 +26,6 @@ from docling.datamodel.pipeline_options import (
|
||||
from docling.document_converter import DocumentConverter, PdfFormatOption
|
||||
from docling.pipeline.vlm_pipeline import VlmPipeline
|
||||
|
||||
from tabulate import tabulate
|
||||
|
||||
## Use experimental VlmPipeline
|
||||
pipeline_options = VlmPipelineOptions()
|
||||
# If force_backend_text = True, text from backend will be used instead of generated text
|
||||
@ -101,9 +100,10 @@ qwen_vlm_conversion_options = HuggingFaceVlmOptions(
|
||||
pipeline_options.vlm_options = qwen_vlm_conversion_options
|
||||
"""
|
||||
|
||||
|
||||
def convert(sources: list[Path], converter):
|
||||
for source in sources:
|
||||
#start_time = time.time()
|
||||
# start_time = time.time()
|
||||
print("================================================")
|
||||
print(f"Processing... {source}")
|
||||
print("================================================")
|
||||
@ -161,10 +161,16 @@ def convert(sources: list[Path], converter):
|
||||
print("====================================================")
|
||||
|
||||
# return [source, f"{out_path / fname}.html", model_id, framework, inference_time, ]
|
||||
return [source, model_id, framework, pg_num, inference_time, ]
|
||||
return [
|
||||
source,
|
||||
model_id,
|
||||
framework,
|
||||
pg_num,
|
||||
inference_time,
|
||||
]
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
sources = [
|
||||
# "tests/data/2305.03393v1-pg9-img.png",
|
||||
"tests/data/pdf/2305.03393v1-pg9.pdf",
|
||||
@ -186,13 +192,13 @@ if __name__ == "__main__":
|
||||
|
||||
rows = []
|
||||
for vlm_options in [
|
||||
# smoldocling_vlm_conversion_options, \
|
||||
smoldocling_vlm_mlx_conversion_options, \
|
||||
# granite_vision_vlm_conversion_options, \
|
||||
# phi_vlm_conversion_options, \
|
||||
# qwen25_vl_3b_vlm_mlx_conversion_options, \
|
||||
# pixtral_12b_vlm_mlx_conversion_options,
|
||||
# pixtral_12b_vlm_conversion_options,
|
||||
# smoldocling_vlm_conversion_options, \
|
||||
smoldocling_vlm_mlx_conversion_options,
|
||||
# granite_vision_vlm_conversion_options, \
|
||||
# phi_vlm_conversion_options, \
|
||||
# qwen25_vl_3b_vlm_mlx_conversion_options, \
|
||||
# pixtral_12b_vlm_mlx_conversion_options,
|
||||
# pixtral_12b_vlm_conversion_options,
|
||||
]:
|
||||
pipeline_options.vlm_options = vlm_options
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user