Error out in VLLM

#7
by rmadupu - opened
(vllm) rakesh@instance-20250720-112712:~$ sudo vllm serve "LiquidAI/LFM2-350M"
Traceback (most recent call last):
  File "/usr/local/bin/vllm", line 5, in <module>
    from vllm.entrypoints.cli.main import main
  File "/usr/local/lib/python3.9/dist-packages/vllm/entrypoints/cli/__init__.py", line 3, in <module>
    from vllm.entrypoints.cli.benchmark.latency import BenchmarkLatencySubcommand
  File "/usr/local/lib/python3.9/dist-packages/vllm/entrypoints/cli/benchmark/latency.py", line 5, in <module>
    from vllm.benchmarks.latency import add_cli_args, main
  File "/usr/local/lib/python3.9/dist-packages/vllm/benchmarks/latency.py", line 16, in <module>
    from vllm import LLM, SamplingParams
  File "<frozen importlib._bootstrap>", line 1055, in _handle_fromlist
  File "/usr/local/lib/python3.9/dist-packages/vllm/__init__.py", line 64, in __getattr__
    module = import_module(module_name, __package__)
  File "/usr/lib/python3.9/importlib/__init__.py", line 127, in import_module
    return _bootstrap._gcd_import(name[level:], package, level)
  File "/usr/local/lib/python3.9/dist-packages/vllm/entrypoints/llm.py", line 20, in <module>
    from vllm.config import (CompilationConfig, ModelDType, TokenizerMode,
  File "/usr/local/lib/python3.9/dist-packages/vllm/config.py", line 36, in <module>
    from vllm.platforms import current_platform
  File "<frozen importlib._bootstrap>", line 1055, in _handle_fromlist
  File "/usr/local/lib/python3.9/dist-packages/vllm/platforms/__init__.py", line 275, in __getattr__
    platform_cls_qualname = resolve_current_platform_cls_qualname()
  File "/usr/local/lib/python3.9/dist-packages/vllm/platforms/__init__.py", line 210, in resolve_current_platform_cls_qualname
    platform_plugins = load_plugins_by_group('vllm.platform_plugins')
  File "/usr/local/lib/python3.9/dist-packages/vllm/plugins/__init__.py", line 29, in load_plugins_by_group
    discovered_plugins = entry_points(group=group)
TypeError: entry_points() got an unexpected keyword argument 'group'
(vllm) rakesh@instance-20250720-112712:~$ 

Do I need to have a higher python version ?

Sign up or log in to comment