Skip to content
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions vllm/attention/backends/flashinfer.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@
is_block_tables_empty)
from vllm.attention.layer import Attention
from vllm.attention.ops.paged_attn import PagedAttention
from vllm.config import VllmConfig, get_current_vllm_config
from vllm.config import VllmConfig
from vllm.logger import init_logger
from vllm.utils import (async_tensor_h2d, get_kv_cache_torch_dtype,
make_tensor_with_pad)
Expand Down Expand Up @@ -187,7 +187,7 @@ def __init__(self, runner):
# Global hyperparameters shared by all attention layers
self.global_hyperparameters: Optional[PerLayerParameters] = None

self.vllm_config = get_current_vllm_config()
self.vllm_config = self.runner.vllm_config

def _get_workspace_buffer(self):
if self._workspace_buffer is None:
Expand Down Expand Up @@ -613,7 +613,7 @@ def __init__(self, input_builder: "ModelInputForGPUBuilder"):
# Global hyperparameters shared by all attention layers
self.global_hyperparameters: Optional[PerLayerParameters] = None

self.vllm_config = get_current_vllm_config()
self.vllm_config = self.runner.vllm_config

def prepare(self):
self.slot_mapping: List[int] = []
Expand Down