add mlx-whisper support

This commit is contained in:
Ken Steele
2025-10-02 03:58:54 -07:00
parent 5be856fbc0
commit c60e72d2b5
6 changed files with 349 additions and 52 deletions

View File

@@ -11,6 +11,7 @@ from docling.datamodel.pipeline_options_asr_model import (
# ApiAsrOptions,
InferenceAsrFramework,
InlineAsrNativeWhisperOptions,
InlineAsrMlxWhisperOptions,
TransformersModelType,
)
@@ -27,16 +28,54 @@ WHISPER_TINY = InlineAsrNativeWhisperOptions(
max_time_chunk=30.0,
)
WHISPER_SMALL = InlineAsrNativeWhisperOptions(
repo_id="small",
inference_framework=InferenceAsrFramework.WHISPER,
verbose=True,
timestamps=True,
word_timestamps=True,
temperature=0.0,
max_new_tokens=256,
max_time_chunk=30.0,
)
def _get_whisper_small_model():
"""
Get the best Whisper Small model for the current hardware.
Automatically selects MLX Whisper Small for Apple Silicon (MPS) if available,
otherwise falls back to native Whisper Small.
"""
# Check if MPS is available (Apple Silicon)
try:
import torch
has_mps = torch.backends.mps.is_built() and torch.backends.mps.is_available()
except ImportError:
has_mps = False
# Check if mlx-whisper is available
try:
import mlx_whisper # type: ignore
has_mlx_whisper = True
except ImportError:
has_mlx_whisper = False
# Use MLX Whisper if both MPS and mlx-whisper are available
if has_mps and has_mlx_whisper:
return InlineAsrMlxWhisperOptions(
repo_id="mlx-community/whisper-small-mlx",
inference_framework=InferenceAsrFramework.MLX,
language="en",
task="transcribe",
word_timestamps=True,
no_speech_threshold=0.6,
logprob_threshold=-1.0,
compression_ratio_threshold=2.4,
)
else:
return InlineAsrNativeWhisperOptions(
repo_id="small",
inference_framework=InferenceAsrFramework.WHISPER,
verbose=True,
timestamps=True,
word_timestamps=True,
temperature=0.0,
max_new_tokens=256,
max_time_chunk=30.0,
)
# Create the model instance
WHISPER_SMALL = _get_whisper_small_model()
WHISPER_MEDIUM = InlineAsrNativeWhisperOptions(
repo_id="medium",
@@ -49,16 +88,54 @@ WHISPER_MEDIUM = InlineAsrNativeWhisperOptions(
max_time_chunk=30.0,
)
WHISPER_BASE = InlineAsrNativeWhisperOptions(
repo_id="base",
inference_framework=InferenceAsrFramework.WHISPER,
verbose=True,
timestamps=True,
word_timestamps=True,
temperature=0.0,
max_new_tokens=256,
max_time_chunk=30.0,
)
def _get_whisper_base_model():
"""
Get the best Whisper Base model for the current hardware.
Automatically selects MLX Whisper Base for Apple Silicon (MPS) if available,
otherwise falls back to native Whisper Base.
"""
# Check if MPS is available (Apple Silicon)
try:
import torch
has_mps = torch.backends.mps.is_built() and torch.backends.mps.is_available()
except ImportError:
has_mps = False
# Check if mlx-whisper is available
try:
import mlx_whisper # type: ignore
has_mlx_whisper = True
except ImportError:
has_mlx_whisper = False
# Use MLX Whisper if both MPS and mlx-whisper are available
if has_mps and has_mlx_whisper:
return InlineAsrMlxWhisperOptions(
repo_id="mlx-community/whisper-base-mlx",
inference_framework=InferenceAsrFramework.MLX,
language="en",
task="transcribe",
word_timestamps=True,
no_speech_threshold=0.6,
logprob_threshold=-1.0,
compression_ratio_threshold=2.4,
)
else:
return InlineAsrNativeWhisperOptions(
repo_id="base",
inference_framework=InferenceAsrFramework.WHISPER,
verbose=True,
timestamps=True,
word_timestamps=True,
temperature=0.0,
max_new_tokens=256,
max_time_chunk=30.0,
)
# Create the model instance
WHISPER_BASE = _get_whisper_base_model()
WHISPER_LARGE = InlineAsrNativeWhisperOptions(
repo_id="large",
@@ -71,16 +148,58 @@ WHISPER_LARGE = InlineAsrNativeWhisperOptions(
max_time_chunk=30.0,
)
WHISPER_TURBO = InlineAsrNativeWhisperOptions(
repo_id="turbo",
inference_framework=InferenceAsrFramework.WHISPER,
verbose=True,
timestamps=True,
word_timestamps=True,
temperature=0.0,
max_new_tokens=256,
max_time_chunk=30.0,
)
def _get_whisper_turbo_model():
"""
Get the best Whisper Turbo model for the current hardware.
Automatically selects MLX Whisper Turbo for Apple Silicon (MPS) if available,
otherwise falls back to native Whisper Turbo.
"""
# Check if MPS is available (Apple Silicon)
try:
import torch
has_mps = torch.backends.mps.is_built() and torch.backends.mps.is_available()
except ImportError:
has_mps = False
# Check if mlx-whisper is available
try:
import mlx_whisper # type: ignore
has_mlx_whisper = True
except ImportError:
has_mlx_whisper = False
# Use MLX Whisper if both MPS and mlx-whisper are available
if has_mps and has_mlx_whisper:
return InlineAsrMlxWhisperOptions(
repo_id="mlx-community/whisper-turbo",
inference_framework=InferenceAsrFramework.MLX,
language="en",
task="transcribe",
word_timestamps=True,
no_speech_threshold=0.6,
logprob_threshold=-1.0,
compression_ratio_threshold=2.4,
)
else:
return InlineAsrNativeWhisperOptions(
repo_id="turbo",
inference_framework=InferenceAsrFramework.WHISPER,
verbose=True,
timestamps=True,
word_timestamps=True,
temperature=0.0,
max_new_tokens=256,
max_time_chunk=30.0,
)
# Create the model instance
WHISPER_TURBO = _get_whisper_turbo_model()
# Note: MLX Whisper models are now automatically selected when using
# WHISPER_TURBO, WHISPER_BASE, WHISPER_SMALL, etc. on Apple Silicon systems
# with mlx-whisper installed. No need for separate MLX-specific model specs.
class AsrModelType(str, Enum):

View File

@@ -17,7 +17,7 @@ class BaseAsrOptions(BaseModel):
class InferenceAsrFramework(str, Enum):
# MLX = "mlx" # disabled for now
MLX = "mlx"
# TRANSFORMERS = "transformers" # disabled for now
WHISPER = "whisper"
@@ -55,3 +55,22 @@ class InlineAsrNativeWhisperOptions(InlineAsrOptions):
AcceleratorDevice.CUDA,
]
word_timestamps: bool = True
class InlineAsrMlxWhisperOptions(InlineAsrOptions):
"""
MLX Whisper options for Apple Silicon optimization.
Uses mlx-whisper library for efficient inference on Apple Silicon devices.
"""
inference_framework: InferenceAsrFramework = InferenceAsrFramework.MLX
language: str = "en"
task: str = "transcribe" # "transcribe" or "translate"
supported_devices: List[AcceleratorDevice] = [
AcceleratorDevice.MPS, # MLX is optimized for Apple Silicon
]
word_timestamps: bool = True
no_speech_threshold: float = 0.6 # Threshold for detecting speech
logprob_threshold: float = -1.0 # Log probability threshold
compression_ratio_threshold: float = 2.4 # Compression ratio threshold

View File

@@ -33,6 +33,7 @@ from docling.datamodel.pipeline_options import (
)
from docling.datamodel.pipeline_options_asr_model import (
InlineAsrNativeWhisperOptions,
InlineAsrMlxWhisperOptions,
# AsrResponseFormat,
InlineAsrOptions,
)
@@ -228,6 +229,130 @@ class _NativeWhisperModel:
return convo
class _MlxWhisperModel:
def __init__(
self,
enabled: bool,
artifacts_path: Optional[Path],
accelerator_options: AcceleratorOptions,
asr_options: InlineAsrMlxWhisperOptions,
):
"""
Transcriber using MLX Whisper for Apple Silicon optimization.
"""
self.enabled = enabled
_log.info(f"artifacts-path: {artifacts_path}")
_log.info(f"accelerator_options: {accelerator_options}")
if self.enabled:
try:
import mlx_whisper # type: ignore
except ImportError:
raise ImportError(
"mlx-whisper is not installed. Please install it via `pip install mlx-whisper` or do `uv sync --extra asr`."
)
self.asr_options = asr_options
self.mlx_whisper = mlx_whisper
self.device = decide_device(
accelerator_options.device,
supported_devices=asr_options.supported_devices,
)
_log.info(f"Available device for MLX Whisper: {self.device}")
self.model_name = asr_options.repo_id
_log.info(f"loading _MlxWhisperModel({self.model_name})")
# MLX Whisper models are loaded differently - they use HuggingFace repos
self.model_path = self.model_name
# Store MLX-specific options
self.language = asr_options.language
self.task = asr_options.task
self.word_timestamps = asr_options.word_timestamps
self.no_speech_threshold = asr_options.no_speech_threshold
self.logprob_threshold = asr_options.logprob_threshold
self.compression_ratio_threshold = asr_options.compression_ratio_threshold
def run(self, conv_res: ConversionResult) -> ConversionResult:
audio_path: Path = Path(conv_res.input.file).resolve()
try:
conversation = self.transcribe(audio_path)
# Ensure we have a proper DoclingDocument
origin = DocumentOrigin(
filename=conv_res.input.file.name or "audio.wav",
mimetype="audio/x-wav",
binary_hash=conv_res.input.document_hash,
)
conv_res.document = DoclingDocument(
name=conv_res.input.file.stem or "audio.wav", origin=origin
)
for citem in conversation:
conv_res.document.add_text(
label=DocItemLabel.TEXT, text=citem.to_string()
)
conv_res.status = ConversionStatus.SUCCESS
return conv_res
except Exception as exc:
_log.error(f"MLX Audio transcription has an error: {exc}")
conv_res.status = ConversionStatus.FAILURE
return conv_res
def transcribe(self, fpath: Path) -> list[_ConversationItem]:
"""
Transcribe audio using MLX Whisper.
Args:
fpath: Path to audio file
Returns:
List of conversation items with timestamps
"""
result = self.mlx_whisper.transcribe(
str(fpath),
path_or_hf_repo=self.model_path,
language=self.language,
task=self.task,
word_timestamps=self.word_timestamps,
no_speech_threshold=self.no_speech_threshold,
logprob_threshold=self.logprob_threshold,
compression_ratio_threshold=self.compression_ratio_threshold,
)
convo: list[_ConversationItem] = []
# MLX Whisper returns segments similar to native Whisper
for segment in result.get("segments", []):
item = _ConversationItem(
start_time=segment.get("start"),
end_time=segment.get("end"),
text=segment.get("text", "").strip(),
words=[]
)
# Add word-level timestamps if available
if self.word_timestamps and "words" in segment:
item.words = []
for word_data in segment["words"]:
item.words.append(
_ConversationWord(
start_time=word_data.get("start"),
end_time=word_data.get("end"),
text=word_data.get("word", ""),
)
)
convo.append(item)
return convo
class AsrPipeline(BasePipeline):
def __init__(self, pipeline_options: AsrPipelineOptions):
super().__init__(pipeline_options)
@@ -245,6 +370,16 @@ class AsrPipeline(BasePipeline):
accelerator_options=pipeline_options.accelerator_options,
asr_options=asr_options,
)
elif isinstance(self.pipeline_options.asr_options, InlineAsrMlxWhisperOptions):
asr_options: InlineAsrMlxWhisperOptions = (
self.pipeline_options.asr_options
)
self._model = _MlxWhisperModel(
enabled=True, # must be always enabled for this pipeline to make sense.
artifacts_path=self.artifacts_path,
accelerator_options=pipeline_options.accelerator_options,
asr_options=asr_options,
)
else:
_log.error(f"No model support for {self.pipeline_options.asr_options}")

View File

@@ -15,7 +15,8 @@
# - The script prints the transcription to stdout.
#
# Customizing the model
# - Edit `get_asr_converter()` to switch `asr_model_specs` (e.g., language or model size).
# - The script automatically selects the best model for your hardware (MLX Whisper for Apple Silicon, native Whisper otherwise).
# - Edit `get_asr_converter()` to manually override `pipeline_options.asr_options` with any model from `asr_model_specs`.
# - Keep `InputFormat.AUDIO` and `AsrPipeline` unchanged for a minimal setup.
#
# Input audio
@@ -36,10 +37,15 @@ from docling.pipeline.asr_pipeline import AsrPipeline
def get_asr_converter():
"""Create a DocumentConverter configured for ASR with a default model.
"""Create a DocumentConverter configured for ASR with automatic model selection.
Uses `asr_model_specs.WHISPER_TURBO` by default. You can swap in another
model spec from `docling.datamodel.asr_model_specs` to experiment.
Uses `asr_model_specs.WHISPER_TURBO` which automatically selects the best
implementation for your hardware:
- MLX Whisper Turbo for Apple Silicon (M1/M2/M3) with mlx-whisper installed
- Native Whisper Turbo as fallback
You can swap in another model spec from `docling.datamodel.asr_model_specs`
to experiment with different model sizes.
"""
pipeline_options = AsrPipelineOptions()
pipeline_options.asr_options = asr_model_specs.WHISPER_TURBO

View File

@@ -106,6 +106,7 @@ rapidocr = [
# 'onnxruntime (>=1.7.0,<1.20.0) ; python_version < "3.10"',
]
asr = [
'mlx-whisper>=0.4.3 ; python_version >= "3.10" and sys_platform == "darwin" and platform_machine == "arm64"',
"openai-whisper>=20250625",
]

51
uv.lock generated
View File

@@ -1267,6 +1267,7 @@ dependencies = [
[package.optional-dependencies]
asr = [
{ name = "mlx-whisper", marker = "python_full_version >= '3.10' and platform_machine == 'arm64' and sys_platform == 'darwin'" },
{ name = "openai-whisper" },
]
easyocr = [
@@ -1350,6 +1351,7 @@ requires-dist = [
{ name = "lxml", specifier = ">=4.0.0,<6.0.0" },
{ name = "marko", specifier = ">=2.1.2,<3.0.0" },
{ name = "mlx-vlm", marker = "python_full_version >= '3.10' and platform_machine == 'arm64' and sys_platform == 'darwin' and extra == 'vlm'", specifier = ">=0.3.0,<1.0.0" },
{ name = "mlx-whisper", marker = "python_full_version >= '3.10' and platform_machine == 'arm64' and sys_platform == 'darwin' and extra == 'asr'", specifier = ">=0.4.3" },
{ name = "ocrmac", marker = "sys_platform == 'darwin'", specifier = ">=1.0.0,<2.0.0" },
{ name = "ocrmac", marker = "sys_platform == 'darwin' and extra == 'ocrmac'", specifier = ">=1.0.0,<2.0.0" },
{ name = "onnxruntime", marker = "extra == 'rapidocr'", specifier = ">=1.7.0,<2.0.0" },
@@ -3497,6 +3499,26 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/21/28/5f8bf24989a21d022fbb7c5126f31764eda9e85abed30d7bc1916fc3bc0a/mlx_vlm-0.3.4-py3-none-any.whl", hash = "sha256:1ec7264ea7d9febfb0fd284ce81d2bdea241da647ee54d8b484362bfd2660df6", size = 332608, upload-time = "2025-10-14T08:01:10.392Z" },
]
[[package]]
name = "mlx-whisper"
version = "0.4.3"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "huggingface-hub", marker = "python_full_version >= '3.10' and platform_machine == 'arm64' and sys_platform == 'darwin'" },
{ name = "mlx", marker = "python_full_version >= '3.10' and platform_machine == 'arm64' and sys_platform == 'darwin'" },
{ name = "more-itertools", marker = "python_full_version >= '3.10' and platform_machine == 'arm64' and sys_platform == 'darwin'" },
{ name = "numba", version = "0.62.1", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.10' and platform_machine == 'arm64' and sys_platform == 'darwin'" },
{ name = "numpy", version = "2.2.6", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.10' and platform_machine == 'arm64' and sys_platform == 'darwin'" },
{ name = "scipy", version = "1.15.3", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version == '3.10.*' and platform_machine == 'arm64' and sys_platform == 'darwin'" },
{ name = "scipy", version = "1.16.2", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11' and platform_machine == 'arm64' and sys_platform == 'darwin'" },
{ name = "tiktoken", marker = "python_full_version >= '3.10' and platform_machine == 'arm64' and sys_platform == 'darwin'" },
{ name = "torch", marker = "python_full_version >= '3.10' and platform_machine == 'arm64' and sys_platform == 'darwin'" },
{ name = "tqdm", marker = "python_full_version >= '3.10' and platform_machine == 'arm64' and sys_platform == 'darwin'" },
]
wheels = [
{ url = "https://files.pythonhosted.org/packages/22/b7/a35232812a2ccfffcb7614ba96a91338551a660a0e9815cee668bf5743f0/mlx_whisper-0.4.3-py3-none-any.whl", hash = "sha256:6b82b6597a994643a3e5496c7bc229a672e5ca308458455bfe276e76ae024489", size = 890544, upload-time = "2025-08-29T14:56:13.815Z" },
]
[[package]]
name = "modelscope"
version = "1.31.0"
@@ -4377,9 +4399,9 @@ version = "1.0.0"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "click", version = "8.1.8", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10'" },
{ name = "click", version = "8.3.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.10'" },
{ name = "pillow" },
{ name = "pyobjc-framework-vision" },
{ name = "click", version = "8.3.0", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version >= '3.10' and platform_machine != 'x86_64') or (python_full_version >= '3.10' and sys_platform != 'linux')" },
{ name = "pillow", marker = "python_full_version < '3.10' or platform_machine != 'x86_64' or sys_platform != 'linux'" },
{ name = "pyobjc-framework-vision", marker = "python_full_version < '3.10' or platform_machine != 'x86_64' or sys_platform != 'linux'" },
]
sdist = { url = "https://files.pythonhosted.org/packages/dd/dc/de3e9635774b97d9766f6815bbb3f5ec9bce347115f10d9abbf2733a9316/ocrmac-1.0.0.tar.gz", hash = "sha256:5b299e9030c973d1f60f82db000d6c2e5ff271601878c7db0885e850597d1d2e", size = 1463997, upload-time = "2024-11-07T12:00:00.197Z" }
wheels = [
@@ -5364,9 +5386,6 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/20/8a/b35a615ae6f04550d696bb179c414538b3b477999435fdd4ad75b76139e4/pybase64-1.4.2-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:a370dea7b1cee2a36a4d5445d4e09cc243816c5bc8def61f602db5a6f5438e52", size = 54320, upload-time = "2025-07-27T13:03:27.495Z" },
{ url = "https://files.pythonhosted.org/packages/d3/a9/8bd4f9bcc53689f1b457ecefed1eaa080e4949d65a62c31a38b7253d5226/pybase64-1.4.2-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:9aa4de83f02e462a6f4e066811c71d6af31b52d7484de635582d0e3ec3d6cc3e", size = 56482, upload-time = "2025-07-27T13:03:28.942Z" },
{ url = "https://files.pythonhosted.org/packages/75/e5/4a7735b54a1191f61c3f5c2952212c85c2d6b06eb5fb3671c7603395f70c/pybase64-1.4.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:83a1c2f9ed00fee8f064d548c8654a480741131f280e5750bb32475b7ec8ee38", size = 70959, upload-time = "2025-07-27T13:03:30.171Z" },
{ url = "https://files.pythonhosted.org/packages/f4/56/5337f27a8b8d2d6693f46f7b36bae47895e5820bfa259b0072574a4e1057/pybase64-1.4.2-cp313-cp313-android_21_arm64_v8a.whl", hash = "sha256:0f331aa59549de21f690b6ccc79360ffed1155c3cfbc852eb5c097c0b8565a2b", size = 33888, upload-time = "2025-07-27T13:03:35.698Z" },
{ url = "https://files.pythonhosted.org/packages/e3/ff/470768f0fe6de0aa302a8cb1bdf2f9f5cffc3f69e60466153be68bc953aa/pybase64-1.4.2-cp313-cp313-ios_13_0_arm64_iphoneos.whl", hash = "sha256:69d3f0445b0faeef7bb7f93bf8c18d850785e2a77f12835f49e524cc54af04e7", size = 30914, upload-time = "2025-07-27T13:03:38.475Z" },
{ url = "https://files.pythonhosted.org/packages/75/6b/d328736662665e0892409dc410353ebef175b1be5eb6bab1dad579efa6df/pybase64-1.4.2-cp313-cp313-ios_13_0_arm64_iphonesimulator.whl", hash = "sha256:2372b257b1f4dd512f317fb27e77d313afd137334de64c87de8374027aacd88a", size = 31380, upload-time = "2025-07-27T13:03:39.7Z" },
{ url = "https://files.pythonhosted.org/packages/ca/96/7ff718f87c67f4147c181b73d0928897cefa17dc75d7abc6e37730d5908f/pybase64-1.4.2-cp313-cp313-ios_13_0_x86_64_iphonesimulator.whl", hash = "sha256:fb794502b4b1ec91c4ca5d283ae71aef65e3de7721057bd9e2b3ec79f7a62d7d", size = 38230, upload-time = "2025-07-27T13:03:41.637Z" },
{ url = "https://files.pythonhosted.org/packages/71/ab/db4dbdfccb9ca874d6ce34a0784761471885d96730de85cee3d300381529/pybase64-1.4.2-cp313-cp313-manylinux1_x86_64.manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:d377d48acf53abf4b926c2a7a24a19deb092f366a04ffd856bf4b3aa330b025d", size = 71608, upload-time = "2025-07-27T13:03:47.01Z" },
{ url = "https://files.pythonhosted.org/packages/f2/58/7f2cef1ceccc682088958448d56727369de83fa6b29148478f4d2acd107a/pybase64-1.4.2-cp313-cp313-manylinux2014_armv7l.manylinux_2_17_armv7l.whl", hash = "sha256:ab9cdb6a8176a5cb967f53e6ad60e40c83caaa1ae31c5e1b29e5c8f507f17538", size = 56413, upload-time = "2025-07-27T13:03:49.908Z" },
@@ -5388,8 +5407,6 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/95/f0/c392c4ac8ccb7a34b28377c21faa2395313e3c676d76c382642e19a20703/pybase64-1.4.2-cp313-cp313t-musllinux_1_2_riscv64.whl", hash = "sha256:ad59362fc267bf15498a318c9e076686e4beeb0dfe09b457fabbc2b32468b97a", size = 58103, upload-time = "2025-07-27T13:04:29.996Z" },
{ url = "https://files.pythonhosted.org/packages/32/30/00ab21316e7df8f526aa3e3dc06f74de6711d51c65b020575d0105a025b2/pybase64-1.4.2-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:01593bd064e7dcd6c86d04e94e44acfe364049500c20ac68ca1e708fbb2ca970", size = 60779, upload-time = "2025-07-27T13:04:31.549Z" },
{ url = "https://files.pythonhosted.org/packages/a6/65/114ca81839b1805ce4a2b7d58bc16e95634734a2059991f6382fc71caf3e/pybase64-1.4.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:5b81547ad8ea271c79fdf10da89a1e9313cb15edcba2a17adf8871735e9c02a0", size = 74684, upload-time = "2025-07-27T13:04:32.976Z" },
{ url = "https://files.pythonhosted.org/packages/99/bf/00a87d951473ce96c8c08af22b6983e681bfabdb78dd2dcf7ee58eac0932/pybase64-1.4.2-cp314-cp314-ios_13_0_arm64_iphoneos.whl", hash = "sha256:4157ad277a32cf4f02a975dffc62a3c67d73dfa4609b2c1978ef47e722b18b8e", size = 30924, upload-time = "2025-07-27T13:04:39.189Z" },
{ url = "https://files.pythonhosted.org/packages/ae/43/dee58c9d60e60e6fb32dc6da722d84592e22f13c277297eb4ce6baf99a99/pybase64-1.4.2-cp314-cp314-ios_13_0_arm64_iphonesimulator.whl", hash = "sha256:e113267dc349cf624eb4f4fbf53fd77835e1aa048ac6877399af426aab435757", size = 31390, upload-time = "2025-07-27T13:04:40.995Z" },
{ url = "https://files.pythonhosted.org/packages/e1/11/b28906fc2e330b8b1ab4bc845a7bef808b8506734e90ed79c6062b095112/pybase64-1.4.2-cp314-cp314-ios_13_0_x86_64_iphonesimulator.whl", hash = "sha256:cea5aaf218fd9c5c23afacfe86fd4464dfedc1a0316dd3b5b4075b068cc67df0", size = 38212, upload-time = "2025-07-27T13:04:42.729Z" },
{ url = "https://files.pythonhosted.org/packages/e4/2e/851eb51284b97354ee5dfa1309624ab90920696e91a33cd85b13d20cc5c1/pybase64-1.4.2-cp314-cp314-manylinux1_x86_64.manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:a3e54dcf0d0305ec88473c9d0009f698cabf86f88a8a10090efeff2879c421bb", size = 71674, upload-time = "2025-07-27T13:04:49.294Z" },
{ url = "https://files.pythonhosted.org/packages/a4/8e/3479266bc0e65f6cc48b3938d4a83bff045330649869d950a378f2ddece0/pybase64-1.4.2-cp314-cp314-manylinux2014_armv7l.manylinux_2_17_armv7l.whl", hash = "sha256:753da25d4fd20be7bda2746f545935773beea12d5cb5ec56ec2d2960796477b1", size = 56461, upload-time = "2025-07-27T13:04:52.37Z" },
@@ -5739,7 +5756,7 @@ name = "pyobjc-framework-cocoa"
version = "11.1"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "pyobjc-core" },
{ name = "pyobjc-core", marker = "python_full_version < '3.10' or platform_machine != 'x86_64' or sys_platform != 'linux'" },
]
sdist = { url = "https://files.pythonhosted.org/packages/4b/c5/7a866d24bc026f79239b74d05e2cf3088b03263da66d53d1b4cf5207f5ae/pyobjc_framework_cocoa-11.1.tar.gz", hash = "sha256:87df76b9b73e7ca699a828ff112564b59251bb9bbe72e610e670a4dc9940d038", size = 5565335, upload-time = "2025-06-14T20:56:59.683Z" }
wheels = [
@@ -5758,8 +5775,8 @@ name = "pyobjc-framework-coreml"
version = "11.1"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "pyobjc-core" },
{ name = "pyobjc-framework-cocoa" },
{ name = "pyobjc-core", marker = "python_full_version < '3.10' or platform_machine != 'x86_64' or sys_platform != 'linux'" },
{ name = "pyobjc-framework-cocoa", marker = "python_full_version < '3.10' or platform_machine != 'x86_64' or sys_platform != 'linux'" },
]
sdist = { url = "https://files.pythonhosted.org/packages/0d/5d/4309f220981d769b1a2f0dcb2c5c104490d31389a8ebea67e5595ce1cb74/pyobjc_framework_coreml-11.1.tar.gz", hash = "sha256:775923eefb9eac2e389c0821b10564372de8057cea89f1ea1cdaf04996c970a7", size = 82005, upload-time = "2025-06-14T20:57:12.004Z" }
wheels = [
@@ -5778,8 +5795,8 @@ name = "pyobjc-framework-quartz"
version = "11.1"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "pyobjc-core" },
{ name = "pyobjc-framework-cocoa" },
{ name = "pyobjc-core", marker = "python_full_version < '3.10' or platform_machine != 'x86_64' or sys_platform != 'linux'" },
{ name = "pyobjc-framework-cocoa", marker = "python_full_version < '3.10' or platform_machine != 'x86_64' or sys_platform != 'linux'" },
]
sdist = { url = "https://files.pythonhosted.org/packages/c7/ac/6308fec6c9ffeda9942fef72724f4094c6df4933560f512e63eac37ebd30/pyobjc_framework_quartz-11.1.tar.gz", hash = "sha256:a57f35ccfc22ad48c87c5932818e583777ff7276605fef6afad0ac0741169f75", size = 3953275, upload-time = "2025-06-14T20:58:17.924Z" }
wheels = [
@@ -5798,10 +5815,10 @@ name = "pyobjc-framework-vision"
version = "11.1"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "pyobjc-core" },
{ name = "pyobjc-framework-cocoa" },
{ name = "pyobjc-framework-coreml" },
{ name = "pyobjc-framework-quartz" },
{ name = "pyobjc-core", marker = "python_full_version < '3.10' or platform_machine != 'x86_64' or sys_platform != 'linux'" },
{ name = "pyobjc-framework-cocoa", marker = "python_full_version < '3.10' or platform_machine != 'x86_64' or sys_platform != 'linux'" },
{ name = "pyobjc-framework-coreml", marker = "python_full_version < '3.10' or platform_machine != 'x86_64' or sys_platform != 'linux'" },
{ name = "pyobjc-framework-quartz", marker = "python_full_version < '3.10' or platform_machine != 'x86_64' or sys_platform != 'linux'" },
]
sdist = { url = "https://files.pythonhosted.org/packages/40/a8/7128da4d0a0103cabe58910a7233e2f98d18c590b1d36d4b3efaaedba6b9/pyobjc_framework_vision-11.1.tar.gz", hash = "sha256:26590512ee7758da3056499062a344b8a351b178be66d4b719327884dde4216b", size = 133721, upload-time = "2025-06-14T20:58:46.095Z" }
wheels = [