Update vllm version for Colab notebook
new version if vllm (0.10.1) break hosting on Colab.
```error_log
(EngineCore_0 pid=4952) File "/usr/local/lib/python3.12/dist-packages/vllm/model_executor/models/qwen2_vl.py", line 769, in get_image_processor
(EngineCore_0 pid=4952) return self.get_hf_processor(**kwargs).image_processor
(EngineCore_0 pid=4952) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
(EngineCore_0 pid=4952) File "/content/dots.ocr/weights/DotsOCR/modeling_dots_ocr_vllm.py", line 113, in get_hf_processor
(EngineCore_0 pid=4952) self.get_tokenizer().image_token = "<|imgpad|>" # Ensure image token is set
(EngineCore_0 pid=4952) ^^^^^^^^^^^^^^^^^^^^
(EngineCore_0 pid=4952) RecursionError: maximum recursion depth exceeded
[rank0]:[W820 01:47:10.202570866 ProcessGroupNCCL.cpp:1479] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
(APIServer pid=4825) Traceback (most recent call last):
(APIServer pid=4825) File "/usr/local/bin/vllm", line 9, in <module>
(APIServer pid=4825) sys.exit(main())
(APIServer pid=4825) ^^^^^^
(APIServer pid=4825) File "/usr/local/lib/python3.12/dist-packages/vllm/entrypoints/cli/main.py", line 54, in main
(APIServer pid=4825) args.dispatch_function(args)
(APIServer pid=4825) File "/usr/local/lib/python3.12/dist-packages/vllm/entrypoints/cli/serve.py", line 50, in cmd
(APIServer pid=4825) uvloop.run(run_server(args))
(APIServer pid=4825) File "/usr/local/lib/python3.12/dist-packages/uvloop/__init__.py", line 109, in run
(APIServer pid=4825) return __asyncio.run(
(APIServer pid=4825) ^^^^^^^^^^^^^^
(APIServer pid=4825) File "/usr/lib/python3.12/asyncio/runners.py", line 195, in run
(APIServer pid=4825) return runner.run(main)
(APIServer pid=4825) ^^^^^^^^^^^^^^^^
(APIServer pid=4825) File "/usr/lib/python3.12/asyncio/runners.py", line 118, in run
(APIServer pid=4825) return self._loop.run_until_complete(task)
(APIServer pid=4825) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
(APIServer pid=4825) File "uvloop/loop.pyx", line 1518, in uvloop.loop.Loop.run_until_complete
(APIServer pid=4825) File "/usr/local/lib/python3.12/dist-packages/uvloop/__init__.py", line 61, in wrapper
(APIServer pid=4825) return await main
(APIServer pid=4825) ^^^^^^^^^^
(APIServer pid=4825) File "/usr/local/lib/python3.12/dist-packages/vllm/entrypoints/openai/api_server.py", line 1850, in run_server
(APIServer pid=4825) await run_server_worker(listen_address, sock, args, **uvicorn_kwargs)
(APIServer pid=4825) File "/usr/local/lib/python3.12/dist-packages/vllm/entrypoints/openai/api_server.py", line 1870, in run_server_worker
(APIServer pid=4825) async with build_async_engine_client(
(APIServer pid=4825) ^^^^^^^^^^^^^^^^^^^^^^^^^^
(APIServer pid=4825) File "/usr/lib/python3.12/contextlib.py", line 210, in __aenter__
(APIServer pid=4825) return await anext(self.gen)
(APIServer pid=4825) ^^^^^^^^^^^^^^^^^^^^^
(APIServer pid=4825) File "/usr/local/lib/python3.12/dist-packages/vllm/entrypoints/openai/api_server.py", line 178, in build_async_engine_client
(APIServer pid=4825) async with build_async_engine_client_from_engine_args(
(APIServer pid=4825) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
(APIServer pid=4825) File "/usr/lib/python3.12/contextlib.py", line 210, in __aenter__
(APIServer pid=4825) return await anext(self.gen)
(APIServer pid=4825) ^^^^^^^^^^^^^^^^^^^^^
(APIServer pid=4825) File "/usr/local/lib/python3.12/dist-packages/vllm/entrypoints/openai/api_server.py", line 220, in build_async_engine_client_from_engine_args
(APIServer pid=4825) async_llm = AsyncLLM.from_vllm_config(
(APIServer pid=4825) ^^^^^^^^^^^^^^^^^^^^^^^^^^
(APIServer pid=4825) File "/usr/local/lib/python3.12/dist-packages/vllm/utils/__init__.py", line 1557, in inner
(APIServer pid=4825) return fn(*args, **kwargs)
(APIServer pid=4825) ^^^^^^^^^^^^^^^^^^^
(APIServer pid=4825) File "/usr/local/lib/python3.12/dist-packages/vllm/v1/engine/async_llm.py", line 174, in from_vllm_config
(APIServer pid=4825) return cls(
(APIServer pid=4825) ^^^^
(APIServer pid=4825) File "/usr/local/lib/python3.12/dist-packages/vllm/v1/engine/async_llm.py", line 120, in __init__
(APIServer pid=4825) self.engine_core = EngineCoreClient.make_async_mp_client(
(APIServer pid=4825) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
(APIServer pid=4825) File "/usr/local/lib/python3.12/dist-packages/vllm/v1/engine/core_client.py", line 102, in make_async_mp_client
(APIServer pid=4825) return AsyncMPClient(*client_args)
(APIServer pid=4825) ^^^^^^^^^^^^^^^^^^^^^^^^^^^
(APIServer pid=4825) File "/usr/local/lib/python3.12/dist-packages/vllm/v1/engine/core_client.py", line 767, in __init__
(APIServer pid=4825) super().__init__(
(APIServer pid=4825) File "/usr/local/lib/python3.12/dist-packages/vllm/v1/engine/core_client.py", line 446, in __init__
(APIServer pid=4825) with launch_core_engines(vllm_config, executor_class,
(APIServer pid=4825) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
(APIServer pid=4825) File "/usr/lib/python3.12/contextlib.py", line 144, in __exit__
(APIServer pid=4825) next(self.gen)
(APIServer pid=4825) File "/usr/local/lib/python3.12/dist-packages/vllm/v1/engine/utils.py", line 706, in launch_core_engines
(APIServer pid=4825) wait_for_engine_startup(
(APIServer pid=4825) File "/usr/local/lib/python3.12/dist-packages/vllm/v1/engine/utils.py", line 759, in wait_for_engine_startup
(APIServer pid=4825) raise RuntimeError("Engine core initialization failed. "
(APIServer pid=4825) RuntimeError: Engine core initialization failed. See root cause above. Failed core proc(s): {}```