Skip to content

Commit

Permalink
Bump up transformers version & Remove MistralConfig (vllm-project#1254)
Browse files Browse the repository at this point in the history
  • Loading branch information
WoosukKwon authored Oct 13, 2023
1 parent ec3b5ce commit e7c8555
Show file tree
Hide file tree
Showing 6 changed files with 4 additions and 81 deletions.
4 changes: 2 additions & 2 deletions requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,8 @@ pyarrow # Required for Ray data.
sentencepiece # Required for LLaMA tokenizer.
numpy
torch == 2.0.1
transformers >= 4.33.1 # Required for Code Llama.
xformers == 0.0.22
transformers >= 4.34.0 # Required for Mistral.
xformers == 0.0.22 # Required for Mistral.
fastapi
uvicorn[standard]
pydantic < 2 # Required for OpenAI server.
2 changes: 1 addition & 1 deletion vllm/model_executor/models/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,10 +9,10 @@
from vllm.model_executor.models.gpt_neox import GPTNeoXForCausalLM
from vllm.model_executor.models.internlm import InternLMForCausalLM
from vllm.model_executor.models.llama import LlamaForCausalLM
from vllm.model_executor.models.mistral import MistralForCausalLM
from vllm.model_executor.models.mpt import MPTForCausalLM
from vllm.model_executor.models.opt import OPTForCausalLM
from vllm.model_executor.models.qwen import QWenLMHeadModel
from vllm.model_executor.models.mistral import MistralForCausalLM

__all__ = [
"AquilaForCausalLM",
Expand Down
2 changes: 1 addition & 1 deletion vllm/model_executor/models/mistral.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@

import torch
from torch import nn
from transformers import MistralConfig

from vllm.model_executor.input_metadata import InputMetadata
from vllm.model_executor.layers.activation import SiluAndMul
Expand All @@ -44,7 +45,6 @@
convert_pyslice_to_tensor, hf_model_weights_iterator,
load_tensor_parallel_weights, load_padded_tensor_parallel_vocab)
from vllm.sequence import SamplerOutput
from vllm.transformers_utils.configs.mistral import MistralConfig

KVCache = Tuple[torch.Tensor, torch.Tensor]

Expand Down
9 changes: 0 additions & 9 deletions vllm/transformers_utils/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,15 +17,6 @@
def get_config(model: str,
trust_remote_code: bool,
revision: Optional[str] = None) -> PretrainedConfig:
# NOTE: Because the Mistral model in HF hub does not have
# `configuration_mistral.py`, we cannot use `AutoConfig` to load the
# config. Instead, we use `MistralConfig` directly.
# NOTE: This is a hack. This does not work for local models.
# FIXME: Remove this once the Mistral model is available in the stable
# version of HF transformers.
if "mistral" in model.lower():
return MistralConfig.from_pretrained(model, revision=revision)

try:
config = AutoConfig.from_pretrained(
model, trust_remote_code=trust_remote_code, revision=revision)
Expand Down
2 changes: 0 additions & 2 deletions vllm/transformers_utils/configs/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,13 +6,11 @@
# tiiuae/falcon-7b(-instruct) models. Newer Falcon models will use the
# `FalconConfig` class from the official HuggingFace transformers library.
from vllm.transformers_utils.configs.falcon import RWConfig
from vllm.transformers_utils.configs.mistral import MistralConfig

__all__ = [
"MPTConfig",
"BaiChuanConfig",
"AquilaConfig",
"QWenConfig",
"RWConfig",
"MistralConfig",
]
66 changes: 0 additions & 66 deletions vllm/transformers_utils/configs/mistral.py

This file was deleted.

0 comments on commit e7c8555

Please sign in to comment.