Skip to content

Commit bd9ba97

Browse files
[None][chore] Remove two unused parameters in create_py_executor (#7458)
Signed-off-by: leslie-fang25 <[email protected]>
1 parent 5ff3a65 commit bd9ba97

File tree

2 files changed

+2
-15
lines changed

2 files changed

+2
-15
lines changed

tensorrt_llm/_torch/pyexecutor/py_executor_creator.py

Lines changed: 1 addition & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -14,9 +14,7 @@
1414
from tensorrt_llm._utils import get_sm_version
1515
from tensorrt_llm.bindings.executor import (CapacitySchedulerPolicy,
1616
ContextChunkingPolicy,
17-
ExecutorConfig,
18-
LogitsPostProcessorConfig,
19-
ParallelConfig)
17+
ExecutorConfig)
2018
from tensorrt_llm.bindings.internal.batch_manager import ContextChunkingConfig
2119
from tensorrt_llm.llmapi.llm_args import KvCacheConnectorConfig, TorchLlmArgs
2220
from tensorrt_llm.llmapi.tokenizer import TokenizerBase
@@ -217,14 +215,9 @@ def create_py_executor(
217215
tokenizer: Optional[TokenizerBase] = None,
218216
lora_config: Optional[LoraConfig] = None,
219217
kv_connector_config: Optional[KvCacheConnectorConfig] = None,
220-
logits_post_processor_config: Optional[LogitsPostProcessorConfig] = None,
221-
parallel_config: Optional[ParallelConfig] = None,
222218
) -> PyExecutor:
223219

224220
executor_config = llm_args.get_executor_config(checkpoint_dir, tokenizer)
225-
executor_config.logits_post_processor_config = logits_post_processor_config
226-
executor_config.parallel_config = parallel_config
227-
228221
garbage_collection_gen0_threshold = llm_args.garbage_collection_gen0_threshold
229222

230223
_mangle_executor_config(executor_config)

tensorrt_llm/executor/worker.py

Lines changed: 1 addition & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -113,6 +113,7 @@ def _create_py_executor():
113113
assert hasattr(
114114
self.llm_args, "backend"
115115
), "llm_args should be with backend in _create_py_executor"
116+
_ = _get_comm_ranks_device_id()
116117
if self.llm_args.backend == "pytorch":
117118
from tensorrt_llm._torch.pyexecutor.py_executor_creator import \
118119
create_py_executor
@@ -122,13 +123,6 @@ def _create_py_executor():
122123
args["tokenizer"] = tokenizer
123124
args["lora_config"] = lora_config
124125
args["kv_connector_config"] = kv_connector_config
125-
args[
126-
"logits_post_processor_config"] = tllm.LogitsPostProcessorConfig(
127-
processor_batched=batched_logits_processor,
128-
replicate=False)
129-
comm_ranks, device_ids = _get_comm_ranks_device_id()
130-
args["parallel_config"] = tllm.ParallelConfig(
131-
participant_ids=comm_ranks, device_ids=device_ids)
132126
elif self.llm_args.backend == "_autodeploy":
133127
from tensorrt_llm._torch.auto_deploy.llm_args import \
134128
LlmArgs as ADLlmArgs

0 commit comments

Comments
 (0)