From 45d1e3b26fa4bd000ac4602b4640dd7cc3a8821b Mon Sep 17 00:00:00 2001 From: Shawn Tan Date: Wed, 28 Aug 2024 20:16:42 +0000 Subject: [PATCH] HF PR accepted. - Removed granite config. - Updated test with offiicial model --- tests/models/test_granite.py | 2 +- vllm/model_executor/models/granite.py | 4 +- vllm/transformers_utils/configs/granite.py | 199 --------------------- 3 files changed, 3 insertions(+), 202 deletions(-) delete mode 100644 vllm/transformers_utils/configs/granite.py diff --git a/tests/models/test_granite.py b/tests/models/test_granite.py index 812d411bbad8e..24f26eca25075 100644 --- a/tests/models/test_granite.py +++ b/tests/models/test_granite.py @@ -7,7 +7,7 @@ from .utils import check_logprobs_close MODELS = [ - "mayank-mishra/granite-3b-mup", + "ibm/PowerLM-3b", ] diff --git a/vllm/model_executor/models/granite.py b/vllm/model_executor/models/granite.py index 892ad8a58b8f6..0c7a027e356be 100644 --- a/vllm/model_executor/models/granite.py +++ b/vllm/model_executor/models/granite.py @@ -48,8 +48,7 @@ default_weight_loader, kv_cache_scales_loader, maybe_remap_kv_scale_name) from vllm.model_executor.sampling_metadata import SamplingMetadata from vllm.sequence import IntermediateTensors, SamplerOutput -# from transformers import GraniteConfig -from vllm.transformers_utils.configs.granite import GraniteConfig +from transformers import GraniteConfig from vllm.utils import is_hip from .interfaces import SupportsLoRA @@ -216,6 +215,7 @@ def __init__( cache_config=cache_config, prefix=f"{prefix}.self_attn", ) + self.mlp = GraniteMLP( hidden_size=self.hidden_size, intermediate_size=config.intermediate_size, diff --git a/vllm/transformers_utils/configs/granite.py b/vllm/transformers_utils/configs/granite.py deleted file mode 100644 index c12838be5d385..0000000000000 --- a/vllm/transformers_utils/configs/granite.py +++ /dev/null @@ -1,199 +0,0 @@ -# coding=utf-8 -# Copyright 2024 EleutherAI and the HuggingFace Inc. team. All rights reserved. -# -# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX -# and OPT implementations in this library. It has been modified from its -# original forms to accommodate minor architectural differences compared -# to GPT-NeoX and OPT used by the Meta AI team that trained the model. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Granite model configuration""" - -from transformers.configuration_utils import PretrainedConfig -from transformers.modeling_rope_utils import rope_config_validation -from transformers.utils import logging - -logger = logging.get_logger(__name__) - - -class GraniteConfig(PretrainedConfig): - r""" - This is the configuration class to store the configuration of - a [`GraniteModel`]. It is used to instantiate an Granite - model according to the specified arguments, defining the model architecture. - Instantiating a configuration with the defaults will yield a similar - configuration to that of the Granite-3B. - - Configuration objects inherit from [`PretrainedConfig`] and can be used to - control the model outputs. Read the documentation from [`PretrainedConfig`] - for more information. - - - Args: - vocab_size (`int`, *optional*, defaults to 32000): - Vocabulary size of the Granite model. Defines the number of - different tokens that can be represented by the `inputs_ids` - passed when calling [`GraniteModel`] - hidden_size (`int`, *optional*, defaults to 4096): - Dimension of the hidden representations. - intermediate_size (`int`, *optional*, defaults to 11008): - Dimension of the MLP representations. - num_hidden_layers (`int`, *optional*, defaults to 32): - Number of hidden layers in the Transformer decoder. - num_attention_heads (`int`, *optional*, defaults to 32): - Number of attention heads for each attention layer in the - Transformer decoder. - num_key_value_heads (`int`, *optional*): - This is the number of key_value heads that should be used to - implement Grouped Query Attention. If - `num_key_value_heads=num_attention_heads`, the model will use Multi - Head Attention (MHA), if `num_key_value_heads=1` the model will use - Multi Query Attention (MQA) otherwise GQA is used. When converting - a multi-head checkpoint to a GQA checkpoint, each group key and - value head should be constructed by meanpooling all the original - heads within that group. For more details checkout - [this paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not - specified, will default to `num_attention_heads`. - hidden_act (`str` or `function`, *optional*, defaults to `"silu"`): - The non-linear activation function (function or string) in the - decoder. - max_position_embeddings (`int`, *optional*, defaults to 2048): - The maximum sequence length that this model might ever be used with. - initializer_range (`float`, *optional*, defaults to 0.02): - The standard deviation of the truncated_normal_initializer for - initializing all weight matrices. - rms_norm_eps (`float`, *optional*, defaults to 1e-06): - The epsilon used by the rms normalization layers. - use_cache (`bool`, *optional*, defaults to `True`): - Whether or not the model should return the last key/values - attentions (not used by all models). Only relevant if - `config.is_decoder=True`. - pad_token_id (`int`, *optional*): - Padding token id. - bos_token_id (`int`, *optional*, defaults to 1): - Beginning of stream token id. - eos_token_id (`int`, *optional*, defaults to 2): - End of stream token id. - tie_word_embeddings (`bool`, *optional*, defaults to `False`): - Whether to tie weight embeddings - rope_theta (`float`, *optional*, defaults to 10000.0): - The base period of the RoPE embeddings. - rope_scaling (`Dict`, *optional*): - Dictionary containing the scaling configuration for the RoPE - embeddings. Currently supports two scaling strategies: linear and - dynamic. Their scaling factor must be a float greater than 1. The - expected format is - `{"type": strategy name, "factor": scaling factor}`. - When using this flag, don't update `max_position_embeddings` to - the expected new maximum. See the following thread for more - information on how these scaling strategies behave: - https://www.reddit.com/r/LocalLLaMA/comments/14mrgpr/dynamically_scaled_rope_further_increases/. - This is an experimental feature, subject to breaking API changes - in future versions. - attention_bias (`bool`, *optional*, defaults to `False`): - Whether to use a bias in the query, key, value and output - projection layers during self-attention. - attention_dropout (`float`, *optional*, defaults to 0.0): - The dropout ratio for the attention probabilities. - mlp_bias (`bool`, *optional*, defaults to `False`): - Whether to use a bias in up_proj, down_proj and gate_proj layers - in the MLP layers. - embedding_multiplier (`float`, *optional*, defaults to 1.0): - embedding multiplier - logits_scaling (`float`, *optional*, defaults to 1.0): - divisor for output logits - residual_multiplier (`float`, *optional*, defaults to 1.0): - residual multiplier - attention_multiplier (`float`, *optional*, defaults to 1.0): - attention multiplier - - ```python - >>> from transformers import GraniteModel, GraniteConfig - - >>> # Initializing a Granite granite-3b style configuration - >>> configuration = GraniteConfig() - - >>> # Initializing a model from the granite-7b style configuration - >>> model = GraniteModel(configuration) - - >>> # Accessing the model configuration - >>> configuration = model.config - ```""" - - model_type = "granite" - keys_to_ignore_at_inference = ["past_key_values"] - - def __init__( - self, - vocab_size=32000, - hidden_size=4096, - intermediate_size=11008, - num_hidden_layers=32, - num_attention_heads=32, - num_key_value_heads=None, - hidden_act="silu", - max_position_embeddings=2048, - initializer_range=0.02, - rms_norm_eps=1e-6, - use_cache=True, - pad_token_id=None, - bos_token_id=1, - eos_token_id=2, - tie_word_embeddings=False, - rope_theta=10000.0, - rope_scaling=None, - attention_bias=False, - attention_dropout=0.0, - mlp_bias=False, - embedding_multiplier=1.0, - logits_scaling=1.0, - residual_multiplier=1.0, - attention_multiplier=1.0, - **kwargs, - ): - self.vocab_size = vocab_size - self.max_position_embeddings = max_position_embeddings - self.hidden_size = hidden_size - self.intermediate_size = intermediate_size - self.num_hidden_layers = num_hidden_layers - self.num_attention_heads = num_attention_heads - - # for backward compatibility - if num_key_value_heads is None: - num_key_value_heads = num_attention_heads - - self.num_key_value_heads = num_key_value_heads - self.hidden_act = hidden_act - self.initializer_range = initializer_range - self.rms_norm_eps = rms_norm_eps - self.use_cache = use_cache - self.rope_theta = rope_theta - self.rope_scaling = rope_scaling - self.attention_bias = attention_bias - self.attention_dropout = attention_dropout - self.mlp_bias = mlp_bias - - self.embedding_multiplier = embedding_multiplier - self.logits_scaling = logits_scaling - self.residual_multiplier = residual_multiplier - self.attention_multiplier = attention_multiplier - - super().__init__( - pad_token_id=pad_token_id, - bos_token_id=bos_token_id, - eos_token_id=eos_token_id, - tie_word_embeddings=tie_word_embeddings, - **kwargs, - ) - - rope_config_validation(self)