Skip to content

Commit

Permalink
[Minor] Fix type annotations (vllm-project#1238)
Browse files Browse the repository at this point in the history
  • Loading branch information
WoosukKwon authored Oct 2, 2023
1 parent a60b353 commit 84e4e37
Show file tree
Hide file tree
Showing 2 changed files with 3 additions and 4 deletions.
2 changes: 1 addition & 1 deletion vllm/model_executor/layers/sampler.py
Original file line number Diff line number Diff line change
Expand Up @@ -290,7 +290,7 @@ def _get_topk_logprobs(
def _build_sequence_outputs(
parent_ids: List[int],
next_token_ids: List[int],
selected_token_logprobs: torch.Tensor,
selected_token_logprobs: List[float],
parent_seq_ids: List[int],
parent_logprobs: torch.Tensor,
num_output_logprobs: Optional[int],
Expand Down
5 changes: 2 additions & 3 deletions vllm/sampling_params.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,6 @@ class SamplingParams:
max_tokens: Maximum number of tokens to generate per output sequence.
logprobs: Number of log probabilities to return per output token.
skip_special_tokens: Whether to skip special tokens in the output.
Defaults to true.
"""

def __init__(
Expand All @@ -76,8 +75,8 @@ def __init__(
use_beam_search: bool = False,
length_penalty: float = 1.0,
early_stopping: Union[bool, str] = False,
stop: Union[None, str, List[str]] = None,
stop_token_ids: List[int] = None,
stop: Optional[Union[str, List[str]]] = None,
stop_token_ids: Optional[List[int]] = None,
ignore_eos: bool = False,
max_tokens: int = 16,
logprobs: Optional[int] = None,
Expand Down

0 comments on commit 84e4e37

Please sign in to comment.