From 161982b7b189a364557700a9c6458af5f8133f43 Mon Sep 17 00:00:00 2001 From: Jee Jee Li Date: Tue, 19 Nov 2024 04:57:16 +0000 Subject: [PATCH 01/17] Init Signed-off-by: Jee Jee Li --- tests/lora/test_chatglm3_tp.py | 113 ++++++++++++++++++++++++++++++ vllm/lora/fully_sharded_layers.py | 25 +++++-- vllm/lora/layers.py | 26 +++++-- 3 files changed, 155 insertions(+), 9 deletions(-) create mode 100644 tests/lora/test_chatglm3_tp.py diff --git a/tests/lora/test_chatglm3_tp.py b/tests/lora/test_chatglm3_tp.py new file mode 100644 index 0000000000000..4b3ecfa8a7e33 --- /dev/null +++ b/tests/lora/test_chatglm3_tp.py @@ -0,0 +1,113 @@ +from typing import List + +import vllm +from vllm.lora.request import LoRARequest + +from ..utils import multi_gpu_test + +MODEL_PATH = "THUDM/chatglm3-6b" + +PROMPT_TEMPLATE = """I want you to act as a SQL terminal in front of an example database, you need only to return the sql command to me.Below is an instruction that describes a task, Write a response that appropriately completes the request.\n"\n##Instruction:\nconcert_singer contains tables such as stadium, singer, concert, singer_in_concert. Table stadium has columns such as Stadium_ID, Location, Name, Capacity, Highest, Lowest, Average. Stadium_ID is the primary key.\nTable singer has columns such as Singer_ID, Name, Country, Song_Name, Song_release_year, Age, Is_male. Singer_ID is the primary key.\nTable concert has columns such as concert_ID, concert_Name, Theme, Stadium_ID, Year. concert_ID is the primary key.\nTable singer_in_concert has columns such as concert_ID, Singer_ID. concert_ID is the primary key.\nThe Stadium_ID of concert is the foreign key of Stadium_ID of stadium.\nThe Singer_ID of singer_in_concert is the foreign key of Singer_ID of singer.\nThe concert_ID of singer_in_concert is the foreign key of concert_ID of concert.\n\n###Input:\n{query}\n\n###Response:""" # noqa: E501 + + +def do_sample(llm: vllm.LLM, lora_path: str, lora_id: int) -> List[str]: + prompts = [ + PROMPT_TEMPLATE.format(query="How many singers do we have?"), + PROMPT_TEMPLATE.format( + query= + "What is the average, minimum, and maximum age of all singers from France?" # noqa: E501 + ), + PROMPT_TEMPLATE.format( + query= + "Show name, country, age for all singers ordered by age from the oldest to the youngest." # noqa: E501 + ), + ] + print(prompts) + sampling_params = vllm.SamplingParams(temperature=0, max_tokens=32) + outputs = llm.generate( + prompts, + sampling_params, + lora_request=LoRARequest(str(lora_id), lora_id, lora_path) + if lora_id else None) + # Print the outputs. + generated_texts: List[str] = [] + for output in outputs: + prompt = output.prompt + generated_text = output.outputs[0].text.strip() + generated_texts.append(generated_text) + print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}") + return generated_texts + + +def test_chatglm3_lora_tp1(chatglm3_lora_files): + llm = vllm.LLM(MODEL_PATH, + max_model_len=1024, + enable_lora=True, + max_loras=4, + max_lora_rank=64, + tensor_parallel_size=1, + trust_remote_code=True + ) + + expected_lora_output = [ + "SELECT count(*) FROM singer", + "SELECT avg(age) , min(age) , max(age) FROM singer WHERE country = 'France'", # noqa: E501 + "SELECT name , country , age FROM singer ORDER BY age", + ] + + output1 = do_sample(llm, chatglm3_lora_files, lora_id=1) + for i in range(len(expected_lora_output)): + assert output1[i] == expected_lora_output[i] + output2 = do_sample(llm, chatglm3_lora_files, lora_id=2) + for i in range(len(expected_lora_output)): + assert output2[i] == expected_lora_output[i] + + +@multi_gpu_test(num_gpus=2) +def test_chatglm3_lora_tp2(chatglm3_lora_files): + llm = vllm.LLM(MODEL_PATH, + max_model_len=1024, + enable_lora=True, + max_loras=4, + max_lora_rank=64, + tensor_parallel_size=2, + trust_remote_code=True, + # fully_sharded_loras=True, + ) + + expected_lora_output = [ + "SELECT count(*) FROM singer", + "SELECT avg(age) , min(age) , max(age) FROM singer WHERE country = 'France'", # noqa: E501 + "SELECT name , country , age FROM singer ORDER BY age", + ] + + output1 = do_sample(llm, chatglm3_lora_files, lora_id=1) + for i in range(len(expected_lora_output)): + assert output1[i] == expected_lora_output[i] + output2 = do_sample(llm, chatglm3_lora_files, lora_id=2) + for i in range(len(expected_lora_output)): + assert output2[i] == expected_lora_output[i] + +@multi_gpu_test(num_gpus=4) +def test_chatglm3_lora_tp4(chatglm3_lora_files): + llm = vllm.LLM(MODEL_PATH, + max_model_len=1024, + enable_lora=True, + max_loras=4, + max_lora_rank=64, + tensor_parallel_size=4, + trust_remote_code=True + ) + + expected_lora_output = [ + "SELECT count(*) FROM singer", + "SELECT avg(age) , min(age) , max(age) FROM singer WHERE country = 'France'", # noqa: E501 + "SELECT name , country , age FROM singer ORDER BY age", + ] + + output1 = do_sample(llm, chatglm3_lora_files, lora_id=1) + for i in range(len(expected_lora_output)): + assert output1[i] == expected_lora_output[i] + output2 = do_sample(llm, chatglm3_lora_files, lora_id=2) + for i in range(len(expected_lora_output)): + assert output2[i] == expected_lora_output[i] diff --git a/vllm/lora/fully_sharded_layers.py b/vllm/lora/fully_sharded_layers.py index 3443c3feb4d2a..392d08901ac94 100644 --- a/vllm/lora/fully_sharded_layers.py +++ b/vllm/lora/fully_sharded_layers.py @@ -45,10 +45,27 @@ class ColumnParallelLinearWithShardedLoRA(ColumnParallelLinearWithLoRA): """ def slice_lora_a(self, lora_a: torch.Tensor) -> torch.Tensor: - tp_rank = get_tensor_model_parallel_rank() - shard_size = self.lora_a_stacked.shape[2] - start_idx = tp_rank * shard_size - lora_a = lora_a[:, start_idx:start_idx + shard_size] + if self.is_merged_col_linear: + tp_rank = get_tensor_model_parallel_rank() + shard_size = self.lora_a_stacked.shape[2] + offset = lora_a.shape[-1] // 2 + + left_weight = lora_a[:, tp_rank * shard_size:(tp_rank + 1) * + shard_size] + right_weigt = lora_a[:, offset + tp_rank * shard_size:offset + + (tp_rank + 1) * shard_size] + lora_a = torch.cat([left_weight, right_weigt], dim=1) + else: + # tensor_model_parallel_rank = get_tensor_model_parallel_rank() + # shard_size = self.output_dim + # start_idx = tensor_model_parallel_rank * shard_size + # end_idx = (tensor_model_parallel_rank + 1) * shard_size + # lora_b = lora_b[:, start_idx:end_idx] + # return lora_b + tp_rank = get_tensor_model_parallel_rank() + shard_size = self.lora_a_stacked.shape[2] + start_idx = tp_rank * shard_size + lora_a = lora_a[:, start_idx:start_idx + shard_size] return lora_a def apply(self, x: torch.Tensor, diff --git a/vllm/lora/layers.py b/vllm/lora/layers.py index 6afe80219fe07..efcbd438e6c56 100644 --- a/vllm/lora/layers.py +++ b/vllm/lora/layers.py @@ -451,6 +451,10 @@ class ColumnParallelLinearWithLoRA(BaseLayerWithLoRA): def __init__(self, base_layer: ColumnParallelLinear) -> None: super().__init__() + + self.is_merged_col_linear = type( + base_layer) is MergedColumnParallelLinear + self.base_layer = base_layer self.tp_size = get_tensor_model_parallel_world_size() self.input_size = self.base_layer.input_size @@ -508,11 +512,23 @@ def slice_lora_a(self, lora_a: torch.Tensor) -> torch.Tensor: return lora_a def slice_lora_b(self, lora_b: torch.Tensor) -> torch.Tensor: - tensor_model_parallel_rank = get_tensor_model_parallel_rank() - shard_size = self.output_dim - start_idx = tensor_model_parallel_rank * shard_size - end_idx = (tensor_model_parallel_rank + 1) * shard_size - lora_b = lora_b[:, start_idx:end_idx] + # mlp weight + if self.is_merged_col_linear: + tp_rank = get_tensor_model_parallel_rank() + shard_size = self.output_size // 2 + offset = lora_b.shape[-1] // 2 + + left_weight = lora_b[:, tp_rank * shard_size:(tp_rank + 1) * + shard_size] + right_weigt = lora_b[:, offset + tp_rank * shard_size:offset + + (tp_rank + 1) * shard_size] + lora_b = torch.cat([left_weight, right_weigt], dim=1) + else: + tensor_model_parallel_rank = get_tensor_model_parallel_rank() + shard_size = self.output_dim + start_idx = tensor_model_parallel_rank * shard_size + end_idx = (tensor_model_parallel_rank + 1) * shard_size + lora_b = lora_b[:, start_idx:end_idx] return lora_b def slice_bias(self, bias: torch.Tensor) -> torch.Tensor: From 74767cb696567d11832ee1fe87e890cf485ddd57 Mon Sep 17 00:00:00 2001 From: Jee Jee Li Date: Tue, 19 Nov 2024 14:08:59 +0000 Subject: [PATCH 02/17] Complete weight shard logic Signed-off-by: Jee Jee Li --- tests/lora/test_chatglm3_tp.py | 36 +++++++++++++++++-------------- vllm/lora/fully_sharded_layers.py | 13 +++++------ vllm/lora/layers.py | 9 ++++++-- 3 files changed, 32 insertions(+), 26 deletions(-) diff --git a/tests/lora/test_chatglm3_tp.py b/tests/lora/test_chatglm3_tp.py index 4b3ecfa8a7e33..4b3bdbedefb2e 100644 --- a/tests/lora/test_chatglm3_tp.py +++ b/tests/lora/test_chatglm3_tp.py @@ -1,5 +1,7 @@ from typing import List +import pytest + import vllm from vllm.lora.request import LoRARequest @@ -22,7 +24,6 @@ def do_sample(llm: vllm.LLM, lora_path: str, lora_id: int) -> List[str]: "Show name, country, age for all singers ordered by age from the oldest to the youngest." # noqa: E501 ), ] - print(prompts) sampling_params = vllm.SamplingParams(temperature=0, max_tokens=32) outputs = llm.generate( prompts, @@ -46,8 +47,7 @@ def test_chatglm3_lora_tp1(chatglm3_lora_files): max_loras=4, max_lora_rank=64, tensor_parallel_size=1, - trust_remote_code=True - ) + trust_remote_code=True) expected_lora_output = [ "SELECT count(*) FROM singer", @@ -64,16 +64,18 @@ def test_chatglm3_lora_tp1(chatglm3_lora_files): @multi_gpu_test(num_gpus=2) -def test_chatglm3_lora_tp2(chatglm3_lora_files): - llm = vllm.LLM(MODEL_PATH, - max_model_len=1024, - enable_lora=True, - max_loras=4, - max_lora_rank=64, - tensor_parallel_size=2, - trust_remote_code=True, - # fully_sharded_loras=True, - ) +@pytest.mark.parametrize("fully_sharded", [True, False]) +def test_chatglm3_lora_tp2(chatglm3_lora_files, fully_sharded): + llm = vllm.LLM( + MODEL_PATH, + max_model_len=1024, + enable_lora=True, + max_loras=4, + max_lora_rank=64, + tensor_parallel_size=2, + trust_remote_code=True, + fully_sharded_loras=fully_sharded, + ) expected_lora_output = [ "SELECT count(*) FROM singer", @@ -88,16 +90,18 @@ def test_chatglm3_lora_tp2(chatglm3_lora_files): for i in range(len(expected_lora_output)): assert output2[i] == expected_lora_output[i] + @multi_gpu_test(num_gpus=4) -def test_chatglm3_lora_tp4(chatglm3_lora_files): +@pytest.mark.parametrize("fully_sharded", [True, False]) +def test_chatglm3_lora_tp4(chatglm3_lora_files, fully_sharded): llm = vllm.LLM(MODEL_PATH, max_model_len=1024, enable_lora=True, max_loras=4, max_lora_rank=64, tensor_parallel_size=4, - trust_remote_code=True - ) + trust_remote_code=True, + fully_sharded_loras=fully_sharded) expected_lora_output = [ "SELECT count(*) FROM singer", diff --git a/vllm/lora/fully_sharded_layers.py b/vllm/lora/fully_sharded_layers.py index 392d08901ac94..6a8dd31702e6e 100644 --- a/vllm/lora/fully_sharded_layers.py +++ b/vllm/lora/fully_sharded_layers.py @@ -45,23 +45,20 @@ class ColumnParallelLinearWithShardedLoRA(ColumnParallelLinearWithLoRA): """ def slice_lora_a(self, lora_a: torch.Tensor) -> torch.Tensor: + # Applicable to cases where the base_layer is + # MergedColumnParallelLinear. if self.is_merged_col_linear: tp_rank = get_tensor_model_parallel_rank() - shard_size = self.lora_a_stacked.shape[2] + shard_size = self.lora_a_stacked.shape[2] // 2 offset = lora_a.shape[-1] // 2 - left_weight = lora_a[:, tp_rank * shard_size:(tp_rank + 1) * shard_size] right_weigt = lora_a[:, offset + tp_rank * shard_size:offset + (tp_rank + 1) * shard_size] lora_a = torch.cat([left_weight, right_weigt], dim=1) + # Applicable to cases where the base_layer is + # ColumnParallelLinear. else: - # tensor_model_parallel_rank = get_tensor_model_parallel_rank() - # shard_size = self.output_dim - # start_idx = tensor_model_parallel_rank * shard_size - # end_idx = (tensor_model_parallel_rank + 1) * shard_size - # lora_b = lora_b[:, start_idx:end_idx] - # return lora_b tp_rank = get_tensor_model_parallel_rank() shard_size = self.lora_a_stacked.shape[2] start_idx = tp_rank * shard_size diff --git a/vllm/lora/layers.py b/vllm/lora/layers.py index efcbd438e6c56..3f1f4524086ef 100644 --- a/vllm/lora/layers.py +++ b/vllm/lora/layers.py @@ -451,7 +451,9 @@ class ColumnParallelLinearWithLoRA(BaseLayerWithLoRA): def __init__(self, base_layer: ColumnParallelLinear) -> None: super().__init__() - + # The base_layer type is ColumnParallelLinear or + # MergedColumnParallelLinear, their weight sharding logic is + # inconsistent when TP is greater than 1. self.is_merged_col_linear = type( base_layer) is MergedColumnParallelLinear @@ -512,7 +514,8 @@ def slice_lora_a(self, lora_a: torch.Tensor) -> torch.Tensor: return lora_a def slice_lora_b(self, lora_b: torch.Tensor) -> torch.Tensor: - # mlp weight + # Applicable to cases where the base_layer is + # MergedColumnParallelLinear. if self.is_merged_col_linear: tp_rank = get_tensor_model_parallel_rank() shard_size = self.output_size // 2 @@ -523,6 +526,8 @@ def slice_lora_b(self, lora_b: torch.Tensor) -> torch.Tensor: right_weigt = lora_b[:, offset + tp_rank * shard_size:offset + (tp_rank + 1) * shard_size] lora_b = torch.cat([left_weight, right_weigt], dim=1) + # Applicable to cases where the base_layer is + # ColumnParallelLinear. else: tensor_model_parallel_rank = get_tensor_model_parallel_rank() shard_size = self.output_dim From f0e8f31a5876e0f486a8f7dbc3222ceefbde6b7c Mon Sep 17 00:00:00 2001 From: Jee Jee Li Date: Thu, 21 Nov 2024 04:00:57 +0000 Subject: [PATCH 03/17] Add todo for bias slice Signed-off-by: Jee Jee Li --- vllm/lora/layers.py | 1 + 1 file changed, 1 insertion(+) diff --git a/vllm/lora/layers.py b/vllm/lora/layers.py index 3f1f4524086ef..d51b3dde365db 100644 --- a/vllm/lora/layers.py +++ b/vllm/lora/layers.py @@ -537,6 +537,7 @@ def slice_lora_b(self, lora_b: torch.Tensor) -> torch.Tensor: return lora_b def slice_bias(self, bias: torch.Tensor) -> torch.Tensor: + # TODO: Fix the slicing logic of bias. if bias is None: return bias tensor_model_parallel_rank = get_tensor_model_parallel_rank() From 3fa2fb7bb5b35d35696bbaf5aca3098b5b33df7b Mon Sep 17 00:00:00 2001 From: Jee Jee Li Date: Fri, 22 Nov 2024 05:10:10 +0000 Subject: [PATCH 04/17] Done Signed-off-by: Jee Jee Li --- tests/lora/test_chatglm3_tp.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tests/lora/test_chatglm3_tp.py b/tests/lora/test_chatglm3_tp.py index 4b3bdbedefb2e..82bfe96b44eed 100644 --- a/tests/lora/test_chatglm3_tp.py +++ b/tests/lora/test_chatglm3_tp.py @@ -3,6 +3,7 @@ import pytest import vllm +from vllm.distributed import cleanup_dist_env_and_memory from vllm.lora.request import LoRARequest from ..utils import multi_gpu_test @@ -61,6 +62,7 @@ def test_chatglm3_lora_tp1(chatglm3_lora_files): output2 = do_sample(llm, chatglm3_lora_files, lora_id=2) for i in range(len(expected_lora_output)): assert output2[i] == expected_lora_output[i] + cleanup_dist_env_and_memory() @multi_gpu_test(num_gpus=2) @@ -89,6 +91,7 @@ def test_chatglm3_lora_tp2(chatglm3_lora_files, fully_sharded): output2 = do_sample(llm, chatglm3_lora_files, lora_id=2) for i in range(len(expected_lora_output)): assert output2[i] == expected_lora_output[i] + cleanup_dist_env_and_memory() @multi_gpu_test(num_gpus=4) @@ -115,3 +118,4 @@ def test_chatglm3_lora_tp4(chatglm3_lora_files, fully_sharded): output2 = do_sample(llm, chatglm3_lora_files, lora_id=2) for i in range(len(expected_lora_output)): assert output2[i] == expected_lora_output[i] + cleanup_dist_env_and_memory() \ No newline at end of file From ff771cd9c5fee62dc9781ffe34b4fcd257f6fc93 Mon Sep 17 00:00:00 2001 From: Jee Jee Li Date: Fri, 22 Nov 2024 13:23:55 +0800 Subject: [PATCH 05/17] Update vllm/lora/layers.py Co-authored-by: Cyrus Leung --- vllm/lora/layers.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/vllm/lora/layers.py b/vllm/lora/layers.py index d51b3dde365db..ead5793bd88a8 100644 --- a/vllm/lora/layers.py +++ b/vllm/lora/layers.py @@ -523,9 +523,9 @@ def slice_lora_b(self, lora_b: torch.Tensor) -> torch.Tensor: left_weight = lora_b[:, tp_rank * shard_size:(tp_rank + 1) * shard_size] - right_weigt = lora_b[:, offset + tp_rank * shard_size:offset + + right_weight = lora_b[:, offset + tp_rank * shard_size:offset + (tp_rank + 1) * shard_size] - lora_b = torch.cat([left_weight, right_weigt], dim=1) + lora_b = torch.cat([left_weight, right_weight], dim=1) # Applicable to cases where the base_layer is # ColumnParallelLinear. else: From 5f66271d9724c0710d72965c5fe8ebfc021403bc Mon Sep 17 00:00:00 2001 From: Jee Jee Li Date: Fri, 22 Nov 2024 13:25:55 +0800 Subject: [PATCH 06/17] Update vllm/lora/fully_sharded_layers.py Co-authored-by: Cyrus Leung --- vllm/lora/fully_sharded_layers.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/vllm/lora/fully_sharded_layers.py b/vllm/lora/fully_sharded_layers.py index 6a8dd31702e6e..a578aeebdb637 100644 --- a/vllm/lora/fully_sharded_layers.py +++ b/vllm/lora/fully_sharded_layers.py @@ -53,9 +53,9 @@ def slice_lora_a(self, lora_a: torch.Tensor) -> torch.Tensor: offset = lora_a.shape[-1] // 2 left_weight = lora_a[:, tp_rank * shard_size:(tp_rank + 1) * shard_size] - right_weigt = lora_a[:, offset + tp_rank * shard_size:offset + + right_weight = lora_a[:, offset + tp_rank * shard_size:offset + (tp_rank + 1) * shard_size] - lora_a = torch.cat([left_weight, right_weigt], dim=1) + lora_a = torch.cat([left_weight, right_weight], dim=1) # Applicable to cases where the base_layer is # ColumnParallelLinear. else: From a76016e2e69b3a6803071f2bfcb88af894c73fa0 Mon Sep 17 00:00:00 2001 From: Jee Jee Li Date: Fri, 22 Nov 2024 05:38:26 +0000 Subject: [PATCH 07/17] Format code Signed-off-by: Jee Jee Li --- vllm/lora/fully_sharded_layers.py | 2 +- vllm/lora/layers.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/vllm/lora/fully_sharded_layers.py b/vllm/lora/fully_sharded_layers.py index a578aeebdb637..fc69a9769f86a 100644 --- a/vllm/lora/fully_sharded_layers.py +++ b/vllm/lora/fully_sharded_layers.py @@ -54,7 +54,7 @@ def slice_lora_a(self, lora_a: torch.Tensor) -> torch.Tensor: left_weight = lora_a[:, tp_rank * shard_size:(tp_rank + 1) * shard_size] right_weight = lora_a[:, offset + tp_rank * shard_size:offset + - (tp_rank + 1) * shard_size] + (tp_rank + 1) * shard_size] lora_a = torch.cat([left_weight, right_weight], dim=1) # Applicable to cases where the base_layer is # ColumnParallelLinear. diff --git a/vllm/lora/layers.py b/vllm/lora/layers.py index ead5793bd88a8..1e0119238ff2c 100644 --- a/vllm/lora/layers.py +++ b/vllm/lora/layers.py @@ -524,7 +524,7 @@ def slice_lora_b(self, lora_b: torch.Tensor) -> torch.Tensor: left_weight = lora_b[:, tp_rank * shard_size:(tp_rank + 1) * shard_size] right_weight = lora_b[:, offset + tp_rank * shard_size:offset + - (tp_rank + 1) * shard_size] + (tp_rank + 1) * shard_size] lora_b = torch.cat([left_weight, right_weight], dim=1) # Applicable to cases where the base_layer is # ColumnParallelLinear. From 9abad3cb673065e693f6ec04ad195075e1fd46dd Mon Sep 17 00:00:00 2001 From: Jee Jee Li Date: Fri, 22 Nov 2024 10:12:12 +0000 Subject: [PATCH 08/17] Add LoRA TP test Signed-off-by: Jee Jee Li --- tests/lora/test_chatglm3_tp.py | 79 +++++++++++------------ tests/lora/test_llama.py | 48 +------------- tests/lora/test_llama_tp.py | 101 ++++++++++++++++++++++++++++++ vllm/lora/fully_sharded_layers.py | 27 +++----- vllm/lora/layers.py | 2 +- 5 files changed, 151 insertions(+), 106 deletions(-) create mode 100644 tests/lora/test_llama_tp.py diff --git a/tests/lora/test_chatglm3_tp.py b/tests/lora/test_chatglm3_tp.py index 82bfe96b44eed..40e07278b2676 100644 --- a/tests/lora/test_chatglm3_tp.py +++ b/tests/lora/test_chatglm3_tp.py @@ -1,8 +1,7 @@ from typing import List -import pytest - import vllm +from tests.utils import fork_new_process_for_each_test from vllm.distributed import cleanup_dist_env_and_memory from vllm.lora.request import LoRARequest @@ -12,6 +11,12 @@ PROMPT_TEMPLATE = """I want you to act as a SQL terminal in front of an example database, you need only to return the sql command to me.Below is an instruction that describes a task, Write a response that appropriately completes the request.\n"\n##Instruction:\nconcert_singer contains tables such as stadium, singer, concert, singer_in_concert. Table stadium has columns such as Stadium_ID, Location, Name, Capacity, Highest, Lowest, Average. Stadium_ID is the primary key.\nTable singer has columns such as Singer_ID, Name, Country, Song_Name, Song_release_year, Age, Is_male. Singer_ID is the primary key.\nTable concert has columns such as concert_ID, concert_Name, Theme, Stadium_ID, Year. concert_ID is the primary key.\nTable singer_in_concert has columns such as concert_ID, Singer_ID. concert_ID is the primary key.\nThe Stadium_ID of concert is the foreign key of Stadium_ID of stadium.\nThe Singer_ID of singer_in_concert is the foreign key of Singer_ID of singer.\nThe concert_ID of singer_in_concert is the foreign key of concert_ID of concert.\n\n###Input:\n{query}\n\n###Response:""" # noqa: E501 +EXPECTED_LORA_OUTPUT = [ + "SELECT count(*) FROM singer", + "SELECT avg(age) , min(age) , max(age) FROM singer WHERE country = 'France'", # noqa: E501 + "SELECT name , country , age FROM singer ORDER BY age", +] + def do_sample(llm: vllm.LLM, lora_path: str, lora_id: int) -> List[str]: prompts = [ @@ -41,6 +46,7 @@ def do_sample(llm: vllm.LLM, lora_path: str, lora_id: int) -> List[str]: return generated_texts +@fork_new_process_for_each_test def test_chatglm3_lora_tp1(chatglm3_lora_files): llm = vllm.LLM(MODEL_PATH, max_model_len=1024, @@ -50,53 +56,51 @@ def test_chatglm3_lora_tp1(chatglm3_lora_files): tensor_parallel_size=1, trust_remote_code=True) - expected_lora_output = [ + EXPECTED_LORA_OUTPUT = [ "SELECT count(*) FROM singer", "SELECT avg(age) , min(age) , max(age) FROM singer WHERE country = 'France'", # noqa: E501 "SELECT name , country , age FROM singer ORDER BY age", ] output1 = do_sample(llm, chatglm3_lora_files, lora_id=1) - for i in range(len(expected_lora_output)): - assert output1[i] == expected_lora_output[i] + for i in range(len(EXPECTED_LORA_OUTPUT)): + assert output1[i] == EXPECTED_LORA_OUTPUT[i] output2 = do_sample(llm, chatglm3_lora_files, lora_id=2) - for i in range(len(expected_lora_output)): - assert output2[i] == expected_lora_output[i] + for i in range(len(EXPECTED_LORA_OUTPUT)): + assert output2[i] == EXPECTED_LORA_OUTPUT[i] cleanup_dist_env_and_memory() -@multi_gpu_test(num_gpus=2) -@pytest.mark.parametrize("fully_sharded", [True, False]) -def test_chatglm3_lora_tp2(chatglm3_lora_files, fully_sharded): - llm = vllm.LLM( - MODEL_PATH, - max_model_len=1024, - enable_lora=True, - max_loras=4, - max_lora_rank=64, - tensor_parallel_size=2, - trust_remote_code=True, - fully_sharded_loras=fully_sharded, - ) - - expected_lora_output = [ +@multi_gpu_test(num_gpus=4) +@fork_new_process_for_each_test +def test_chatglm3_lora_tp4(chatglm3_lora_files): + llm = vllm.LLM(MODEL_PATH, + max_model_len=1024, + enable_lora=True, + max_loras=4, + max_lora_rank=64, + tensor_parallel_size=4, + trust_remote_code=True, + fully_sharded_loras=False) + + EXPECTED_LORA_OUTPUT = [ "SELECT count(*) FROM singer", "SELECT avg(age) , min(age) , max(age) FROM singer WHERE country = 'France'", # noqa: E501 "SELECT name , country , age FROM singer ORDER BY age", ] output1 = do_sample(llm, chatglm3_lora_files, lora_id=1) - for i in range(len(expected_lora_output)): - assert output1[i] == expected_lora_output[i] + for i in range(len(EXPECTED_LORA_OUTPUT)): + assert output1[i] == EXPECTED_LORA_OUTPUT[i] output2 = do_sample(llm, chatglm3_lora_files, lora_id=2) - for i in range(len(expected_lora_output)): - assert output2[i] == expected_lora_output[i] + for i in range(len(EXPECTED_LORA_OUTPUT)): + assert output2[i] == EXPECTED_LORA_OUTPUT[i] cleanup_dist_env_and_memory() @multi_gpu_test(num_gpus=4) -@pytest.mark.parametrize("fully_sharded", [True, False]) -def test_chatglm3_lora_tp4(chatglm3_lora_files, fully_sharded): +@fork_new_process_for_each_test +def test_chatglm3_lora_tp4_fully_sharded_loras(chatglm3_lora_files): llm = vllm.LLM(MODEL_PATH, max_model_len=1024, enable_lora=True, @@ -104,18 +108,11 @@ def test_chatglm3_lora_tp4(chatglm3_lora_files, fully_sharded): max_lora_rank=64, tensor_parallel_size=4, trust_remote_code=True, - fully_sharded_loras=fully_sharded) - - expected_lora_output = [ - "SELECT count(*) FROM singer", - "SELECT avg(age) , min(age) , max(age) FROM singer WHERE country = 'France'", # noqa: E501 - "SELECT name , country , age FROM singer ORDER BY age", - ] - + fully_sharded_loras=True) output1 = do_sample(llm, chatglm3_lora_files, lora_id=1) - for i in range(len(expected_lora_output)): - assert output1[i] == expected_lora_output[i] + for i in range(len(EXPECTED_LORA_OUTPUT)): + assert output1[i] == EXPECTED_LORA_OUTPUT[i] output2 = do_sample(llm, chatglm3_lora_files, lora_id=2) - for i in range(len(expected_lora_output)): - assert output2[i] == expected_lora_output[i] - cleanup_dist_env_and_memory() \ No newline at end of file + for i in range(len(EXPECTED_LORA_OUTPUT)): + assert output2[i] == EXPECTED_LORA_OUTPUT[i] + cleanup_dist_env_and_memory() diff --git a/tests/lora/test_llama.py b/tests/lora/test_llama.py index e2a4f1ed0496a..d307cc9dd6990 100644 --- a/tests/lora/test_llama.py +++ b/tests/lora/test_llama.py @@ -1,10 +1,8 @@ from typing import List -import pytest import ray import vllm -from vllm.distributed import cleanup_dist_env_and_memory from vllm.lora.request import LoRARequest MODEL_PATH = "meta-llama/Llama-2-7b-hf" @@ -37,16 +35,13 @@ def do_sample(llm: vllm.LLM, lora_path: str, lora_id: int) -> List[str]: return generated_texts -@pytest.mark.parametrize("tp_size", [1, 2, 4]) -def test_llama_lora(sql_lora_files, tp_size, num_gpus_available): - if num_gpus_available < tp_size: - pytest.skip(f"Not enough GPUs for tensor parallelism {tp_size}") +def test_llama_lora(sql_lora_files): llm = vllm.LLM(MODEL_PATH, enable_lora=True, max_num_seqs=16, max_loras=4, - tensor_parallel_size=tp_size) + tensor_parallel_size=1) expected_no_lora_output = [ "\n\n [user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_75 (icao VARCHAR, airport VARCHAR)\n\n question: Name the ICAO for lilongwe international airport [/user] [assistant]\n\n [user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_76 (icao VARCHAR, airport VARCHAR)\n\n question: Name the ICAO for lilongwe international airport [/user] [assistant]\n\n [user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_77 (icao VARCHAR, airport VARCHAR)\n\n question: Name the ICAO for lilongwe international airport [/user] [assistant]\n\n [user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_78 (icao VARCHAR, airport VARCHAR)\n\n question: Name the ICAO for lilongwe international airport [/user]", # noqa: E501 @@ -80,45 +75,6 @@ def test_llama_lora(sql_lora_files, tp_size, num_gpus_available): print("removing lora") -def test_llama_tensor_parallel_equality(sql_lora_files, num_gpus_available): - if num_gpus_available < 4: - pytest.skip("Not enough GPUs for tensor parallelism 4") - - llm_tp1 = vllm.LLM(MODEL_PATH, - enable_lora=True, - max_num_seqs=16, - max_loras=4, - tensor_parallel_size=1) - output_tp1 = do_sample(llm_tp1, sql_lora_files, lora_id=1) - - del llm_tp1 - cleanup_dist_env_and_memory() - - llm_tp2 = vllm.LLM(MODEL_PATH, - enable_lora=True, - max_num_seqs=16, - max_loras=4, - tensor_parallel_size=2) - output_tp2 = do_sample(llm_tp2, sql_lora_files, lora_id=1) - - del llm_tp2 - cleanup_dist_env_and_memory() - - assert output_tp1 == output_tp2 - - llm_tp4 = vllm.LLM(MODEL_PATH, - enable_lora=True, - max_num_seqs=16, - max_loras=4, - tensor_parallel_size=4) - output_tp4 = do_sample(llm_tp4, sql_lora_files, lora_id=1) - - del llm_tp4 - cleanup_dist_env_and_memory() - - assert output_tp1 == output_tp4 - - def test_llama_lora_warmup(sql_lora_files): """Test that the LLM initialization works with a warmup LORA path and is more conservative""" diff --git a/tests/lora/test_llama_tp.py b/tests/lora/test_llama_tp.py new file mode 100644 index 0000000000000..ef338252f4689 --- /dev/null +++ b/tests/lora/test_llama_tp.py @@ -0,0 +1,101 @@ +from typing import List +import vllm +from vllm.lora.request import LoRARequest +from tests.utils import fork_new_process_for_each_test + +MODEL_PATH = "meta-llama/Llama-2-7b-hf" + + + + +EXPECTED_NO_LORA_OUTPUT = [ + "\n\n [user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_75 (icao VARCHAR, airport VARCHAR)\n\n question: Name the ICAO for lilongwe international airport [/user] [assistant]\n\n [user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_76 (icao VARCHAR, airport VARCHAR)\n\n question: Name the ICAO for lilongwe international airport [/user] [assistant]\n\n [user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_77 (icao VARCHAR, airport VARCHAR)\n\n question: Name the ICAO for lilongwe international airport [/user] [assistant]\n\n [user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_78 (icao VARCHAR, airport VARCHAR)\n\n question: Name the ICAO for lilongwe international airport [/user]", # noqa: E501 + " Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_11 (nationality VARCHAR, elector VARCHAR)\n\n question: When Anchero Pantaleone was the elector what is under nationality? ", # noqa: E501 + "\n\n answer: 1\n\n [user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_96 (one_mora VARCHAR, gloss VARCHAR, accented_mora VARCHAR)\n\n question: What is the one mora for a high tone mora with a gloss of /˧kot/ [kòt]? [/user] [assistant]\n\n answer: 2\n\n [user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_97 (one_mora VARCHAR, gloss VARCHAR, accented_mora VARCHAR)\n\n question: What is the one mora for a high tone mora with a gloss of /˧kot/ [kòt]? [/user] [assistant]\n\n answer: 2\n\n [user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_98 (one_mora VARCHAR, gloss VARCHAR, accented_mora VARCHAR)\n\n question: What is the one m", # noqa: E501 + " Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE candidate (people_id VARCHAR, unsure_rate INTEGER); CREATE TABLE people (sex VARCHAR, people_id VARCHAR)\n\n question: which gender got the highest average uncertain ratio. ", # noqa: E501 + " Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_60 (pick INTEGER, former_wnba_team VARCHAR)\n\n question: What pick was a player that previously played for the Minnesota Lynx? ", # noqa: E501 + "\n\n [user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_28138035_4 (womens_doubles VARCHAR, mens_singles VARCHAR)\n\n question: Name the women's doubles for werner schlager [/user] [assistant]\n\n [user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_28138035_4 (womens_doubles VARCHAR, mens_singles VARCHAR)\n\n question: Name the women's doubles for werner schlager [/user] [assistant]\n\n [user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_28138035_4 (womens_doubles VARCHAR, mens_singles VARCHAR)\n\n question: Name the women's doubles for werner schlager [/user] [assistant]\n\n [user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE", # noqa: E501 + ] +EXPECTED_LORA_OUTPUT = [ + " SELECT icao FROM table_name_74 WHERE airport = 'lilongwe international airport' ", # noqa: E501 + " SELECT nationality FROM table_name_11 WHERE elector = 'anchero pantaleone' ", # noqa: E501 + " SELECT one_mora FROM table_name_95 WHERE gloss = 'low tone mora with a gloss of /˩okiru/' [òkìɽɯ́] AND accented_mora = 'low tone mora with a gloss of /˩okiru/' [òkìɽɯ́] ", # noqa: E501 + " SELECT sex FROM people WHERE people_id IN (SELECT people_id FROM candidate GROUP BY sex ORDER BY COUNT(people_id) DESC LIMIT 1) ", # noqa: E501 + " SELECT pick FROM table_name_60 WHERE former_wnba_team = 'Minnesota Lynx' ", # noqa: E501 + " SELECT womens_doubles FROM table_28138035_4 WHERE mens_singles = 'Werner Schlager' " # noqa: E501 + ] + + + +def do_sample(llm: vllm.LLM, lora_path: str, lora_id: int) -> List[str]: + prompts = [ + "[user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_74 (icao VARCHAR, airport VARCHAR)\n\n question: Name the ICAO for lilongwe international airport [/user] [assistant]", # noqa: E501 + "[user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_11 (nationality VARCHAR, elector VARCHAR)\n\n question: When Anchero Pantaleone was the elector what is under nationality? [/user] [assistant]", # noqa: E501 + "[user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_95 (one_mora VARCHAR, gloss VARCHAR, accented_mora VARCHAR)\n\n question: What is the one mora for a low tone mora with a gloss of /˩okiru/ [òkìɽɯ́]? [/user] [assistant]", # noqa: E501 + "[user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE candidate (people_id VARCHAR, unsure_rate INTEGER); CREATE TABLE people (sex VARCHAR, people_id VARCHAR)\n\n question: which gender got the highest average uncertain ratio. [/user] [assistant]", # noqa: E501 + "[user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_60 (pick INTEGER, former_wnba_team VARCHAR)\n\n question: What pick was a player that previously played for the Minnesota Lynx? [/user] [assistant]", # noqa: E501 + "[user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_28138035_4 (womens_doubles VARCHAR, mens_singles VARCHAR)\n\n question: Name the women's doubles for werner schlager [/user] [assistant]" # noqa: E501 + ] + sampling_params = vllm.SamplingParams(temperature=0, + max_tokens=256, + stop=["[/assistant]"]) + outputs = llm.generate( + prompts, + sampling_params, + lora_request=LoRARequest(str(lora_id), lora_id, lora_path) + if lora_id else None) + # Print the outputs. + generated_texts: List[str] = [] + for output in outputs: + prompt = output.prompt + generated_text = output.outputs[0].text + generated_texts.append(generated_text) + print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}") + return generated_texts + +@fork_new_process_for_each_test +def test_llama_lora_tp4(sql_lora_files): + + llm = vllm.LLM(MODEL_PATH, + enable_lora=True, + max_num_seqs=16, + max_loras=4, + tensor_parallel_size=4,) + + print("lora adapter created") + assert do_sample(llm, sql_lora_files, lora_id=0) == EXPECTED_NO_LORA_OUTPUT + + print("lora 1") + assert do_sample(llm, sql_lora_files, lora_id=1) == EXPECTED_LORA_OUTPUT + + print("no lora") + assert do_sample(llm, sql_lora_files, lora_id=0) == EXPECTED_NO_LORA_OUTPUT + + print("lora 2") + assert do_sample(llm, sql_lora_files, lora_id=2) == EXPECTED_LORA_OUTPUT + + print("removing lora") + + +@fork_new_process_for_each_test +def test_llama_lora_tp4_fully_sharded_loras(sql_lora_files): + + llm = vllm.LLM(MODEL_PATH, + enable_lora=True, + max_num_seqs=16, + max_loras=4, + tensor_parallel_size=4,) + print("lora adapter created") + assert do_sample(llm, sql_lora_files, lora_id=0) == EXPECTED_NO_LORA_OUTPUT + + print("lora 1") + assert do_sample(llm, sql_lora_files, lora_id=1) == EXPECTED_LORA_OUTPUT + + print("no lora") + assert do_sample(llm, sql_lora_files, lora_id=0) == EXPECTED_NO_LORA_OUTPUT + + print("lora 2") + assert do_sample(llm, sql_lora_files, lora_id=2) == EXPECTED_LORA_OUTPUT + + print("removing lora") + diff --git a/vllm/lora/fully_sharded_layers.py b/vllm/lora/fully_sharded_layers.py index fc69a9769f86a..f5c2eced9d2bb 100644 --- a/vllm/lora/fully_sharded_layers.py +++ b/vllm/lora/fully_sharded_layers.py @@ -44,25 +44,16 @@ class ColumnParallelLinearWithShardedLoRA(ColumnParallelLinearWithLoRA): Based on S-LoRA, slicing happens along the rank dim. """ + # For all LoRA layers where the `base_layer` is `ColumnParallelLinear`, + # their `lora_a` and `lora_b` have different sharding patterns. After + # completing the `lora_a` GEMM , a gather operation is performed. + # Therefore, the sharding of `lora_a` only needs to correspond with the + # gather operation. def slice_lora_a(self, lora_a: torch.Tensor) -> torch.Tensor: - # Applicable to cases where the base_layer is - # MergedColumnParallelLinear. - if self.is_merged_col_linear: - tp_rank = get_tensor_model_parallel_rank() - shard_size = self.lora_a_stacked.shape[2] // 2 - offset = lora_a.shape[-1] // 2 - left_weight = lora_a[:, tp_rank * shard_size:(tp_rank + 1) * - shard_size] - right_weight = lora_a[:, offset + tp_rank * shard_size:offset + - (tp_rank + 1) * shard_size] - lora_a = torch.cat([left_weight, right_weight], dim=1) - # Applicable to cases where the base_layer is - # ColumnParallelLinear. - else: - tp_rank = get_tensor_model_parallel_rank() - shard_size = self.lora_a_stacked.shape[2] - start_idx = tp_rank * shard_size - lora_a = lora_a[:, start_idx:start_idx + shard_size] + tp_rank = get_tensor_model_parallel_rank() + shard_size = self.lora_a_stacked.shape[2] + start_idx = tp_rank * shard_size + lora_a = lora_a[:, start_idx:start_idx + shard_size] return lora_a def apply(self, x: torch.Tensor, diff --git a/vllm/lora/layers.py b/vllm/lora/layers.py index 1e0119238ff2c..3701988ff692f 100644 --- a/vllm/lora/layers.py +++ b/vllm/lora/layers.py @@ -801,7 +801,7 @@ def can_replace_layer( class QKVParallelLinearWithLora(ColumnParallelLinearWithLoRA): """ ColumnParallelLinear layer that is specifically designed for - qkv_proj. Certain models, such as chtglm3 and baichuan-7b, + qkv_proj. Certain models, such as chatglm3 and baichuan-7b, only contains a single LoRA within their qkv_proj layer. During inference with Tensor Parallel, the weights of lora_b From 3aa890f9f271dd0452eb85c9a8fa246ecb15ea1c Mon Sep 17 00:00:00 2001 From: Jee Jee Li Date: Fri, 22 Nov 2024 10:15:50 +0000 Subject: [PATCH 09/17] Done Signed-off-by: Jee Jee Li --- tests/lora/test_llama_tp.py | 65 ++++++++++++++++++++----------------- 1 file changed, 35 insertions(+), 30 deletions(-) diff --git a/tests/lora/test_llama_tp.py b/tests/lora/test_llama_tp.py index ef338252f4689..fce5885d40443 100644 --- a/tests/lora/test_llama_tp.py +++ b/tests/lora/test_llama_tp.py @@ -1,30 +1,29 @@ from typing import List + import vllm -from vllm.lora.request import LoRARequest from tests.utils import fork_new_process_for_each_test +from vllm.lora.request import LoRARequest -MODEL_PATH = "meta-llama/Llama-2-7b-hf" - - +from ..utils import multi_gpu_test +MODEL_PATH = "meta-llama/Llama-2-7b-hf" EXPECTED_NO_LORA_OUTPUT = [ - "\n\n [user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_75 (icao VARCHAR, airport VARCHAR)\n\n question: Name the ICAO for lilongwe international airport [/user] [assistant]\n\n [user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_76 (icao VARCHAR, airport VARCHAR)\n\n question: Name the ICAO for lilongwe international airport [/user] [assistant]\n\n [user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_77 (icao VARCHAR, airport VARCHAR)\n\n question: Name the ICAO for lilongwe international airport [/user] [assistant]\n\n [user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_78 (icao VARCHAR, airport VARCHAR)\n\n question: Name the ICAO for lilongwe international airport [/user]", # noqa: E501 - " Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_11 (nationality VARCHAR, elector VARCHAR)\n\n question: When Anchero Pantaleone was the elector what is under nationality? ", # noqa: E501 - "\n\n answer: 1\n\n [user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_96 (one_mora VARCHAR, gloss VARCHAR, accented_mora VARCHAR)\n\n question: What is the one mora for a high tone mora with a gloss of /˧kot/ [kòt]? [/user] [assistant]\n\n answer: 2\n\n [user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_97 (one_mora VARCHAR, gloss VARCHAR, accented_mora VARCHAR)\n\n question: What is the one mora for a high tone mora with a gloss of /˧kot/ [kòt]? [/user] [assistant]\n\n answer: 2\n\n [user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_98 (one_mora VARCHAR, gloss VARCHAR, accented_mora VARCHAR)\n\n question: What is the one m", # noqa: E501 - " Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE candidate (people_id VARCHAR, unsure_rate INTEGER); CREATE TABLE people (sex VARCHAR, people_id VARCHAR)\n\n question: which gender got the highest average uncertain ratio. ", # noqa: E501 - " Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_60 (pick INTEGER, former_wnba_team VARCHAR)\n\n question: What pick was a player that previously played for the Minnesota Lynx? ", # noqa: E501 - "\n\n [user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_28138035_4 (womens_doubles VARCHAR, mens_singles VARCHAR)\n\n question: Name the women's doubles for werner schlager [/user] [assistant]\n\n [user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_28138035_4 (womens_doubles VARCHAR, mens_singles VARCHAR)\n\n question: Name the women's doubles for werner schlager [/user] [assistant]\n\n [user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_28138035_4 (womens_doubles VARCHAR, mens_singles VARCHAR)\n\n question: Name the women's doubles for werner schlager [/user] [assistant]\n\n [user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE", # noqa: E501 - ] + "\n\n [user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_75 (icao VARCHAR, airport VARCHAR)\n\n question: Name the ICAO for lilongwe international airport [/user] [assistant]\n\n [user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_76 (icao VARCHAR, airport VARCHAR)\n\n question: Name the ICAO for lilongwe international airport [/user] [assistant]\n\n [user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_77 (icao VARCHAR, airport VARCHAR)\n\n question: Name the ICAO for lilongwe international airport [/user] [assistant]\n\n [user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_78 (icao VARCHAR, airport VARCHAR)\n\n question: Name the ICAO for lilongwe international airport [/user]", # noqa: E501 + " Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_11 (nationality VARCHAR, elector VARCHAR)\n\n question: When Anchero Pantaleone was the elector what is under nationality? ", # noqa: E501 + "\n\n answer: 1\n\n [user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_96 (one_mora VARCHAR, gloss VARCHAR, accented_mora VARCHAR)\n\n question: What is the one mora for a high tone mora with a gloss of /˧kot/ [kòt]? [/user] [assistant]\n\n answer: 2\n\n [user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_97 (one_mora VARCHAR, gloss VARCHAR, accented_mora VARCHAR)\n\n question: What is the one mora for a high tone mora with a gloss of /˧kot/ [kòt]? [/user] [assistant]\n\n answer: 2\n\n [user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_98 (one_mora VARCHAR, gloss VARCHAR, accented_mora VARCHAR)\n\n question: What is the one m", # noqa: E501 + " Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE candidate (people_id VARCHAR, unsure_rate INTEGER); CREATE TABLE people (sex VARCHAR, people_id VARCHAR)\n\n question: which gender got the highest average uncertain ratio. ", # noqa: E501 + " Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_60 (pick INTEGER, former_wnba_team VARCHAR)\n\n question: What pick was a player that previously played for the Minnesota Lynx? ", # noqa: E501 + "\n\n [user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_28138035_4 (womens_doubles VARCHAR, mens_singles VARCHAR)\n\n question: Name the women's doubles for werner schlager [/user] [assistant]\n\n [user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_28138035_4 (womens_doubles VARCHAR, mens_singles VARCHAR)\n\n question: Name the women's doubles for werner schlager [/user] [assistant]\n\n [user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_28138035_4 (womens_doubles VARCHAR, mens_singles VARCHAR)\n\n question: Name the women's doubles for werner schlager [/user] [assistant]\n\n [user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE", # noqa: E501 +] EXPECTED_LORA_OUTPUT = [ - " SELECT icao FROM table_name_74 WHERE airport = 'lilongwe international airport' ", # noqa: E501 - " SELECT nationality FROM table_name_11 WHERE elector = 'anchero pantaleone' ", # noqa: E501 - " SELECT one_mora FROM table_name_95 WHERE gloss = 'low tone mora with a gloss of /˩okiru/' [òkìɽɯ́] AND accented_mora = 'low tone mora with a gloss of /˩okiru/' [òkìɽɯ́] ", # noqa: E501 - " SELECT sex FROM people WHERE people_id IN (SELECT people_id FROM candidate GROUP BY sex ORDER BY COUNT(people_id) DESC LIMIT 1) ", # noqa: E501 - " SELECT pick FROM table_name_60 WHERE former_wnba_team = 'Minnesota Lynx' ", # noqa: E501 - " SELECT womens_doubles FROM table_28138035_4 WHERE mens_singles = 'Werner Schlager' " # noqa: E501 - ] - + " SELECT icao FROM table_name_74 WHERE airport = 'lilongwe international airport' ", # noqa: E501 + " SELECT nationality FROM table_name_11 WHERE elector = 'anchero pantaleone' ", # noqa: E501 + " SELECT one_mora FROM table_name_95 WHERE gloss = 'low tone mora with a gloss of /˩okiru/' [òkìɽɯ́] AND accented_mora = 'low tone mora with a gloss of /˩okiru/' [òkìɽɯ́] ", # noqa: E501 + " SELECT sex FROM people WHERE people_id IN (SELECT people_id FROM candidate GROUP BY sex ORDER BY COUNT(people_id) DESC LIMIT 1) ", # noqa: E501 + " SELECT pick FROM table_name_60 WHERE former_wnba_team = 'Minnesota Lynx' ", # noqa: E501 + " SELECT womens_doubles FROM table_28138035_4 WHERE mens_singles = 'Werner Schlager' " # noqa: E501 +] def do_sample(llm: vllm.LLM, lora_path: str, lora_id: int) -> List[str]: @@ -53,14 +52,18 @@ def do_sample(llm: vllm.LLM, lora_path: str, lora_id: int) -> List[str]: print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}") return generated_texts + +@multi_gpu_test(num_gpus=4) @fork_new_process_for_each_test def test_llama_lora_tp4(sql_lora_files): - llm = vllm.LLM(MODEL_PATH, - enable_lora=True, - max_num_seqs=16, - max_loras=4, - tensor_parallel_size=4,) + llm = vllm.LLM( + MODEL_PATH, + enable_lora=True, + max_num_seqs=16, + max_loras=4, + tensor_parallel_size=4, + ) print("lora adapter created") assert do_sample(llm, sql_lora_files, lora_id=0) == EXPECTED_NO_LORA_OUTPUT @@ -77,14 +80,17 @@ def test_llama_lora_tp4(sql_lora_files): print("removing lora") +@multi_gpu_test(num_gpus=4) @fork_new_process_for_each_test def test_llama_lora_tp4_fully_sharded_loras(sql_lora_files): - llm = vllm.LLM(MODEL_PATH, - enable_lora=True, - max_num_seqs=16, - max_loras=4, - tensor_parallel_size=4,) + llm = vllm.LLM( + MODEL_PATH, + enable_lora=True, + max_num_seqs=16, + max_loras=4, + tensor_parallel_size=4, + ) print("lora adapter created") assert do_sample(llm, sql_lora_files, lora_id=0) == EXPECTED_NO_LORA_OUTPUT @@ -98,4 +104,3 @@ def test_llama_lora_tp4_fully_sharded_loras(sql_lora_files): assert do_sample(llm, sql_lora_files, lora_id=2) == EXPECTED_LORA_OUTPUT print("removing lora") - From efb37a4386ddb00f589675c824a09e3714c5d7d5 Mon Sep 17 00:00:00 2001 From: Jee Jee Li Date: Fri, 22 Nov 2024 11:07:31 +0000 Subject: [PATCH 10/17] Optimize unit test Signed-off-by: Jee Jee Li --- tests/lora/conftest.py | 3 +- tests/lora/test_chatglm3.py | 59 ------------------- tests/lora/test_chatglm3_tp.py | 18 +----- tests/lora/test_llama.py | 102 --------------------------------- tests/lora/test_llama_tp.py | 55 ++++++++++++++++++ 5 files changed, 59 insertions(+), 178 deletions(-) delete mode 100644 tests/lora/test_chatglm3.py delete mode 100644 tests/lora/test_llama.py diff --git a/tests/lora/conftest.py b/tests/lora/conftest.py index 29ecf37808205..d71f8fffefe21 100644 --- a/tests/lora/conftest.py +++ b/tests/lora/conftest.py @@ -148,7 +148,8 @@ def sql_lora_huggingface_id(): @pytest.fixture(scope="session") -def sql_lora_files(sql_lora_huggingface_id): +def sql_lora_files(): + return snapshot_download(repo_id=sql_lora_huggingface_id) diff --git a/tests/lora/test_chatglm3.py b/tests/lora/test_chatglm3.py deleted file mode 100644 index de4cbea80924e..0000000000000 --- a/tests/lora/test_chatglm3.py +++ /dev/null @@ -1,59 +0,0 @@ -from typing import List - -import vllm -from vllm.lora.request import LoRARequest - -MODEL_PATH = "THUDM/chatglm3-6b" - -PROMPT_TEMPLATE = """I want you to act as a SQL terminal in front of an example database, you need only to return the sql command to me.Below is an instruction that describes a task, Write a response that appropriately completes the request.\n"\n##Instruction:\nconcert_singer contains tables such as stadium, singer, concert, singer_in_concert. Table stadium has columns such as Stadium_ID, Location, Name, Capacity, Highest, Lowest, Average. Stadium_ID is the primary key.\nTable singer has columns such as Singer_ID, Name, Country, Song_Name, Song_release_year, Age, Is_male. Singer_ID is the primary key.\nTable concert has columns such as concert_ID, concert_Name, Theme, Stadium_ID, Year. concert_ID is the primary key.\nTable singer_in_concert has columns such as concert_ID, Singer_ID. concert_ID is the primary key.\nThe Stadium_ID of concert is the foreign key of Stadium_ID of stadium.\nThe Singer_ID of singer_in_concert is the foreign key of Singer_ID of singer.\nThe concert_ID of singer_in_concert is the foreign key of concert_ID of concert.\n\n###Input:\n{query}\n\n###Response:""" # noqa: E501 - - -def do_sample(llm: vllm.LLM, lora_path: str, lora_id: int) -> List[str]: - prompts = [ - PROMPT_TEMPLATE.format(query="How many singers do we have?"), - PROMPT_TEMPLATE.format( - query= - "What is the average, minimum, and maximum age of all singers from France?" # noqa: E501 - ), - PROMPT_TEMPLATE.format( - query= - "Show name, country, age for all singers ordered by age from the oldest to the youngest." # noqa: E501 - ), - ] - print(prompts) - sampling_params = vllm.SamplingParams(temperature=0, max_tokens=32) - outputs = llm.generate( - prompts, - sampling_params, - lora_request=LoRARequest(str(lora_id), lora_id, lora_path) - if lora_id else None) - # Print the outputs. - generated_texts: List[str] = [] - for output in outputs: - prompt = output.prompt - generated_text = output.outputs[0].text.strip() - generated_texts.append(generated_text) - print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}") - return generated_texts - - -def test_chatglm3_lora(chatglm3_lora_files): - llm = vllm.LLM(MODEL_PATH, - max_model_len=1024, - enable_lora=True, - max_loras=4, - max_lora_rank=64, - trust_remote_code=True) - - expected_lora_output = [ - "SELECT count(*) FROM singer", - "SELECT avg(age) , min(age) , max(age) FROM singer WHERE country = 'France'", # noqa: E501 - "SELECT name , country , age FROM singer ORDER BY age", - ] - - output1 = do_sample(llm, chatglm3_lora_files, lora_id=1) - for i in range(len(expected_lora_output)): - assert output1[i] == expected_lora_output[i] - output2 = do_sample(llm, chatglm3_lora_files, lora_id=2) - for i in range(len(expected_lora_output)): - assert output2[i] == expected_lora_output[i] diff --git a/tests/lora/test_chatglm3_tp.py b/tests/lora/test_chatglm3_tp.py index 40e07278b2676..07d641faaa821 100644 --- a/tests/lora/test_chatglm3_tp.py +++ b/tests/lora/test_chatglm3_tp.py @@ -2,7 +2,6 @@ import vllm from tests.utils import fork_new_process_for_each_test -from vllm.distributed import cleanup_dist_env_and_memory from vllm.lora.request import LoRARequest from ..utils import multi_gpu_test @@ -47,7 +46,7 @@ def do_sample(llm: vllm.LLM, lora_path: str, lora_id: int) -> List[str]: @fork_new_process_for_each_test -def test_chatglm3_lora_tp1(chatglm3_lora_files): +def test_chatglm3_lora(chatglm3_lora_files): llm = vllm.LLM(MODEL_PATH, max_model_len=1024, enable_lora=True, @@ -56,19 +55,12 @@ def test_chatglm3_lora_tp1(chatglm3_lora_files): tensor_parallel_size=1, trust_remote_code=True) - EXPECTED_LORA_OUTPUT = [ - "SELECT count(*) FROM singer", - "SELECT avg(age) , min(age) , max(age) FROM singer WHERE country = 'France'", # noqa: E501 - "SELECT name , country , age FROM singer ORDER BY age", - ] - output1 = do_sample(llm, chatglm3_lora_files, lora_id=1) for i in range(len(EXPECTED_LORA_OUTPUT)): assert output1[i] == EXPECTED_LORA_OUTPUT[i] output2 = do_sample(llm, chatglm3_lora_files, lora_id=2) for i in range(len(EXPECTED_LORA_OUTPUT)): assert output2[i] == EXPECTED_LORA_OUTPUT[i] - cleanup_dist_env_and_memory() @multi_gpu_test(num_gpus=4) @@ -83,11 +75,6 @@ def test_chatglm3_lora_tp4(chatglm3_lora_files): trust_remote_code=True, fully_sharded_loras=False) - EXPECTED_LORA_OUTPUT = [ - "SELECT count(*) FROM singer", - "SELECT avg(age) , min(age) , max(age) FROM singer WHERE country = 'France'", # noqa: E501 - "SELECT name , country , age FROM singer ORDER BY age", - ] output1 = do_sample(llm, chatglm3_lora_files, lora_id=1) for i in range(len(EXPECTED_LORA_OUTPUT)): @@ -95,7 +82,7 @@ def test_chatglm3_lora_tp4(chatglm3_lora_files): output2 = do_sample(llm, chatglm3_lora_files, lora_id=2) for i in range(len(EXPECTED_LORA_OUTPUT)): assert output2[i] == EXPECTED_LORA_OUTPUT[i] - cleanup_dist_env_and_memory() + @multi_gpu_test(num_gpus=4) @@ -115,4 +102,3 @@ def test_chatglm3_lora_tp4_fully_sharded_loras(chatglm3_lora_files): output2 = do_sample(llm, chatglm3_lora_files, lora_id=2) for i in range(len(EXPECTED_LORA_OUTPUT)): assert output2[i] == EXPECTED_LORA_OUTPUT[i] - cleanup_dist_env_and_memory() diff --git a/tests/lora/test_llama.py b/tests/lora/test_llama.py deleted file mode 100644 index d307cc9dd6990..0000000000000 --- a/tests/lora/test_llama.py +++ /dev/null @@ -1,102 +0,0 @@ -from typing import List - -import ray - -import vllm -from vllm.lora.request import LoRARequest - -MODEL_PATH = "meta-llama/Llama-2-7b-hf" - - -def do_sample(llm: vllm.LLM, lora_path: str, lora_id: int) -> List[str]: - prompts = [ - "[user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_74 (icao VARCHAR, airport VARCHAR)\n\n question: Name the ICAO for lilongwe international airport [/user] [assistant]", # noqa: E501 - "[user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_11 (nationality VARCHAR, elector VARCHAR)\n\n question: When Anchero Pantaleone was the elector what is under nationality? [/user] [assistant]", # noqa: E501 - "[user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_95 (one_mora VARCHAR, gloss VARCHAR, accented_mora VARCHAR)\n\n question: What is the one mora for a low tone mora with a gloss of /˩okiru/ [òkìɽɯ́]? [/user] [assistant]", # noqa: E501 - "[user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE candidate (people_id VARCHAR, unsure_rate INTEGER); CREATE TABLE people (sex VARCHAR, people_id VARCHAR)\n\n question: which gender got the highest average uncertain ratio. [/user] [assistant]", # noqa: E501 - "[user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_60 (pick INTEGER, former_wnba_team VARCHAR)\n\n question: What pick was a player that previously played for the Minnesota Lynx? [/user] [assistant]", # noqa: E501 - "[user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_28138035_4 (womens_doubles VARCHAR, mens_singles VARCHAR)\n\n question: Name the women's doubles for werner schlager [/user] [assistant]" # noqa: E501 - ] - sampling_params = vllm.SamplingParams(temperature=0, - max_tokens=256, - stop=["[/assistant]"]) - outputs = llm.generate( - prompts, - sampling_params, - lora_request=LoRARequest(str(lora_id), lora_id, lora_path) - if lora_id else None) - # Print the outputs. - generated_texts: List[str] = [] - for output in outputs: - prompt = output.prompt - generated_text = output.outputs[0].text - generated_texts.append(generated_text) - print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}") - return generated_texts - - -def test_llama_lora(sql_lora_files): - - llm = vllm.LLM(MODEL_PATH, - enable_lora=True, - max_num_seqs=16, - max_loras=4, - tensor_parallel_size=1) - - expected_no_lora_output = [ - "\n\n [user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_75 (icao VARCHAR, airport VARCHAR)\n\n question: Name the ICAO for lilongwe international airport [/user] [assistant]\n\n [user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_76 (icao VARCHAR, airport VARCHAR)\n\n question: Name the ICAO for lilongwe international airport [/user] [assistant]\n\n [user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_77 (icao VARCHAR, airport VARCHAR)\n\n question: Name the ICAO for lilongwe international airport [/user] [assistant]\n\n [user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_78 (icao VARCHAR, airport VARCHAR)\n\n question: Name the ICAO for lilongwe international airport [/user]", # noqa: E501 - " Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_11 (nationality VARCHAR, elector VARCHAR)\n\n question: When Anchero Pantaleone was the elector what is under nationality? ", # noqa: E501 - "\n\n answer: 1\n\n [user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_96 (one_mora VARCHAR, gloss VARCHAR, accented_mora VARCHAR)\n\n question: What is the one mora for a high tone mora with a gloss of /˧kot/ [kòt]? [/user] [assistant]\n\n answer: 2\n\n [user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_97 (one_mora VARCHAR, gloss VARCHAR, accented_mora VARCHAR)\n\n question: What is the one mora for a high tone mora with a gloss of /˧kot/ [kòt]? [/user] [assistant]\n\n answer: 2\n\n [user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_98 (one_mora VARCHAR, gloss VARCHAR, accented_mora VARCHAR)\n\n question: What is the one m", # noqa: E501 - " Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE candidate (people_id VARCHAR, unsure_rate INTEGER); CREATE TABLE people (sex VARCHAR, people_id VARCHAR)\n\n question: which gender got the highest average uncertain ratio. ", # noqa: E501 - " Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_60 (pick INTEGER, former_wnba_team VARCHAR)\n\n question: What pick was a player that previously played for the Minnesota Lynx? ", # noqa: E501 - "\n\n [user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_28138035_4 (womens_doubles VARCHAR, mens_singles VARCHAR)\n\n question: Name the women's doubles for werner schlager [/user] [assistant]\n\n [user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_28138035_4 (womens_doubles VARCHAR, mens_singles VARCHAR)\n\n question: Name the women's doubles for werner schlager [/user] [assistant]\n\n [user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_28138035_4 (womens_doubles VARCHAR, mens_singles VARCHAR)\n\n question: Name the women's doubles for werner schlager [/user] [assistant]\n\n [user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE", # noqa: E501 - ] - expected_lora_output = [ - " SELECT icao FROM table_name_74 WHERE airport = 'lilongwe international airport' ", # noqa: E501 - " SELECT nationality FROM table_name_11 WHERE elector = 'anchero pantaleone' ", # noqa: E501 - " SELECT one_mora FROM table_name_95 WHERE gloss = 'low tone mora with a gloss of /˩okiru/' [òkìɽɯ́] AND accented_mora = 'low tone mora with a gloss of /˩okiru/' [òkìɽɯ́] ", # noqa: E501 - " SELECT sex FROM people WHERE people_id IN (SELECT people_id FROM candidate GROUP BY sex ORDER BY COUNT(people_id) DESC LIMIT 1) ", # noqa: E501 - " SELECT pick FROM table_name_60 WHERE former_wnba_team = 'Minnesota Lynx' ", # noqa: E501 - " SELECT womens_doubles FROM table_28138035_4 WHERE mens_singles = 'Werner Schlager' " # noqa: E501 - ] - - print("lora adapter created") - assert do_sample(llm, sql_lora_files, lora_id=0) == expected_no_lora_output - - print("lora 1") - assert do_sample(llm, sql_lora_files, lora_id=1) == expected_lora_output - - print("no lora") - assert do_sample(llm, sql_lora_files, lora_id=0) == expected_no_lora_output - - print("lora 2") - assert do_sample(llm, sql_lora_files, lora_id=2) == expected_lora_output - - print("removing lora") - - -def test_llama_lora_warmup(sql_lora_files): - """Test that the LLM initialization works with a warmup LORA path and - is more conservative""" - - @ray.remote(num_gpus=1) - def get_num_gpu_blocks_lora(): - llm = vllm.LLM(MODEL_PATH, enable_lora=True, max_num_seqs=16) - num_gpu_blocks_lora_warmup = llm.llm_engine.cache_config.num_gpu_blocks - return num_gpu_blocks_lora_warmup - - @ray.remote(num_gpus=1) - def get_num_gpu_blocks_no_lora(): - llm = vllm.LLM(MODEL_PATH, max_num_seqs=16) - num_gpu_blocks_no_lora_warmup = ( - llm.llm_engine.cache_config.num_gpu_blocks) - return num_gpu_blocks_no_lora_warmup - - num_gpu_blocks_lora_warmup = ray.get(get_num_gpu_blocks_lora.remote()) - num_gpu_blocks_no_lora_warmup = ray.get( - get_num_gpu_blocks_no_lora.remote()) - assert num_gpu_blocks_lora_warmup < num_gpu_blocks_no_lora_warmup, ( - "The warmup with lora should be more " - "conservative than without lora, therefore the number of " - "memory blocks for the KV cache should be " - "less when using lora than when not using lora") diff --git a/tests/lora/test_llama_tp.py b/tests/lora/test_llama_tp.py index fce5885d40443..aae6310a2a213 100644 --- a/tests/lora/test_llama_tp.py +++ b/tests/lora/test_llama_tp.py @@ -1,5 +1,7 @@ from typing import List +import ray + import vllm from tests.utils import fork_new_process_for_each_test from vllm.lora.request import LoRARequest @@ -53,6 +55,58 @@ def do_sample(llm: vllm.LLM, lora_path: str, lora_id: int) -> List[str]: return generated_texts +@fork_new_process_for_each_test +def test_llama_lora(sql_lora_files): + + llm = vllm.LLM(MODEL_PATH, + enable_lora=True, + max_num_seqs=16, + max_loras=4, + tensor_parallel_size=1) + + print("lora adapter created") + assert do_sample(llm, sql_lora_files, lora_id=0) == EXPECTED_NO_LORA_OUTPUT + + print("lora 1") + assert do_sample(llm, sql_lora_files, lora_id=1) == EXPECTED_LORA_OUTPUT + + print("no lora") + assert do_sample(llm, sql_lora_files, lora_id=0) == EXPECTED_NO_LORA_OUTPUT + + print("lora 2") + assert do_sample(llm, sql_lora_files, lora_id=2) == EXPECTED_LORA_OUTPUT + + print("removing lora") + + +@fork_new_process_for_each_test +def test_llama_lora_warmup(sql_lora_files): + """Test that the LLM initialization works with a warmup LORA path and + is more conservative""" + + @ray.remote(num_gpus=1) + def get_num_gpu_blocks_lora(): + llm = vllm.LLM(MODEL_PATH, enable_lora=True, max_num_seqs=16) + num_gpu_blocks_lora_warmup = llm.llm_engine.cache_config.num_gpu_blocks + return num_gpu_blocks_lora_warmup + + @ray.remote(num_gpus=1) + def get_num_gpu_blocks_no_lora(): + llm = vllm.LLM(MODEL_PATH, max_num_seqs=16) + num_gpu_blocks_no_lora_warmup = ( + llm.llm_engine.cache_config.num_gpu_blocks) + return num_gpu_blocks_no_lora_warmup + + num_gpu_blocks_lora_warmup = ray.get(get_num_gpu_blocks_lora.remote()) + num_gpu_blocks_no_lora_warmup = ray.get( + get_num_gpu_blocks_no_lora.remote()) + assert num_gpu_blocks_lora_warmup < num_gpu_blocks_no_lora_warmup, ( + "The warmup with lora should be more " + "conservative than without lora, therefore the number of " + "memory blocks for the KV cache should be " + "less when using lora than when not using lora") + + @multi_gpu_test(num_gpus=4) @fork_new_process_for_each_test def test_llama_lora_tp4(sql_lora_files): @@ -90,6 +144,7 @@ def test_llama_lora_tp4_fully_sharded_loras(sql_lora_files): max_num_seqs=16, max_loras=4, tensor_parallel_size=4, + fully_sharded_loras=True, ) print("lora adapter created") assert do_sample(llm, sql_lora_files, lora_id=0) == EXPECTED_NO_LORA_OUTPUT From fe826eb75d4245a63d68d2c4cd7dddfaa67c95ba Mon Sep 17 00:00:00 2001 From: Jee Jee Li Date: Fri, 22 Nov 2024 11:41:58 +0000 Subject: [PATCH 11/17] Configure LoRA TP test Signed-off-by: Jee Jee Li --- .buildkite/test-pipeline.yaml | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/.buildkite/test-pipeline.yaml b/.buildkite/test-pipeline.yaml index c436d2b48d20f..02b0f69d078b4 100644 --- a/.buildkite/test-pipeline.yaml +++ b/.buildkite/test-pipeline.yaml @@ -230,9 +230,23 @@ steps: source_file_dependencies: - vllm/lora - tests/lora - command: pytest -v -s lora --shard-id=$$BUILDKITE_PARALLEL_JOB --num-shards=$$BUILDKITE_PARALLEL_JOB_COUNT --ignore=lora/test_long_context.py + command: pytest -v -s lora --shard-id=$$BUILDKITE_PARALLEL_JOB --num-shards=$$BUILDKITE_PARALLEL_JOB_COUNT --ignore lora/test_long_context.py lora/test_chatglm3_tp.py lora/test_llama_tp.py parallelism: 4 +- label: LoRA TP Test + # There is some Tensor Parallelism related processing logic in LoRA that + # requires multi-GPU testing for validation. + num_gpus: 4 + source_file_dependencies: + - vllm/lora + - tests/lora/ + commands: + - pytest -v -s -x lora/test_chatglm3_tp.py + - pytest -v -s -x lora/test_llama_tp.py + + + + - label: "PyTorch Fullgraph Smoke Test" # 9min fast_check: true source_file_dependencies: From 0f38dde7db7e7cc90d02ba5db0d9c310873d9be7 Mon Sep 17 00:00:00 2001 From: Jee Jee Li Date: Fri, 22 Nov 2024 11:50:28 +0000 Subject: [PATCH 12/17] Make yapf happy Signed-off-by: Jee Jee Li --- tests/lora/test_chatglm3_tp.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/tests/lora/test_chatglm3_tp.py b/tests/lora/test_chatglm3_tp.py index 07d641faaa821..f17464573459f 100644 --- a/tests/lora/test_chatglm3_tp.py +++ b/tests/lora/test_chatglm3_tp.py @@ -75,7 +75,6 @@ def test_chatglm3_lora_tp4(chatglm3_lora_files): trust_remote_code=True, fully_sharded_loras=False) - output1 = do_sample(llm, chatglm3_lora_files, lora_id=1) for i in range(len(EXPECTED_LORA_OUTPUT)): assert output1[i] == EXPECTED_LORA_OUTPUT[i] @@ -84,7 +83,6 @@ def test_chatglm3_lora_tp4(chatglm3_lora_files): assert output2[i] == EXPECTED_LORA_OUTPUT[i] - @multi_gpu_test(num_gpus=4) @fork_new_process_for_each_test def test_chatglm3_lora_tp4_fully_sharded_loras(chatglm3_lora_files): From b99b893a8e5846d9c111a78aaf8bd67f5af4ca56 Mon Sep 17 00:00:00 2001 From: Jee Jee Li Date: Fri, 22 Nov 2024 14:27:48 +0000 Subject: [PATCH 13/17] Optimize unit test Signed-off-by: Jee Jee Li --- .buildkite/test-pipeline.yaml | 26 +++++++++----------------- 1 file changed, 9 insertions(+), 17 deletions(-) diff --git a/.buildkite/test-pipeline.yaml b/.buildkite/test-pipeline.yaml index 02b0f69d078b4..e1fa0e11e007d 100644 --- a/.buildkite/test-pipeline.yaml +++ b/.buildkite/test-pipeline.yaml @@ -233,20 +233,6 @@ steps: command: pytest -v -s lora --shard-id=$$BUILDKITE_PARALLEL_JOB --num-shards=$$BUILDKITE_PARALLEL_JOB_COUNT --ignore lora/test_long_context.py lora/test_chatglm3_tp.py lora/test_llama_tp.py parallelism: 4 -- label: LoRA TP Test - # There is some Tensor Parallelism related processing logic in LoRA that - # requires multi-GPU testing for validation. - num_gpus: 4 - source_file_dependencies: - - vllm/lora - - tests/lora/ - commands: - - pytest -v -s -x lora/test_chatglm3_tp.py - - pytest -v -s -x lora/test_llama_tp.py - - - - - label: "PyTorch Fullgraph Smoke Test" # 9min fast_check: true source_file_dependencies: @@ -489,18 +475,24 @@ steps: - pytest -v -s distributed/test_pp_cudagraph.py - pytest -v -s distributed/test_pipeline_parallel.py -- label: LoRA Long Context (Distributed) # 11min - # This test runs llama 13B, so it is required to run on 4 GPUs. +- label: LoRA TP Test (Distributed) + num_gpus: 4 soft_fail: true source_file_dependencies: - vllm/lora - - tests/lora/test_long_context + - tests/lora commands: # FIXIT: find out which code initialize cuda before running the test # before the fix, we need to use spawn to test it - export VLLM_WORKER_MULTIPROC_METHOD=spawn + # This test runs llama 13B, so it is required to run on 4 GPUs. - pytest -v -s -x lora/test_long_context.py + # There is some Tensor Parallelism related processing logic in LoRA that + # requires multi-GPU testing for validation. + - pytest -v -s -x lora/test_chatglm3_tp.py + - pytest -v -s -x lora/test_llama_tp.py + - label: Weight Loading Multiple GPU Test # 33min working_dir: "/vllm-workspace/tests" From 80a238b3d0080c2ccdd11aa161592af50dbf44a1 Mon Sep 17 00:00:00 2001 From: Jee Jee Li Date: Fri, 22 Nov 2024 14:31:29 +0000 Subject: [PATCH 14/17] Delete empty line Signed-off-by: Jee Jee Li --- .buildkite/test-pipeline.yaml | 1 - tests/lora/conftest.py | 1 - 2 files changed, 2 deletions(-) diff --git a/.buildkite/test-pipeline.yaml b/.buildkite/test-pipeline.yaml index e1fa0e11e007d..4d95959e0f579 100644 --- a/.buildkite/test-pipeline.yaml +++ b/.buildkite/test-pipeline.yaml @@ -476,7 +476,6 @@ steps: - pytest -v -s distributed/test_pipeline_parallel.py - label: LoRA TP Test (Distributed) - num_gpus: 4 soft_fail: true source_file_dependencies: diff --git a/tests/lora/conftest.py b/tests/lora/conftest.py index d71f8fffefe21..c4a05727a2d25 100644 --- a/tests/lora/conftest.py +++ b/tests/lora/conftest.py @@ -149,7 +149,6 @@ def sql_lora_huggingface_id(): @pytest.fixture(scope="session") def sql_lora_files(): - return snapshot_download(repo_id=sql_lora_huggingface_id) From b7f04798c4e0882d8962fc33f8c161acb2ddd1d5 Mon Sep 17 00:00:00 2001 From: Jee Jee Li Date: Fri, 22 Nov 2024 15:14:44 +0000 Subject: [PATCH 15/17] Fix conftext bug Signed-off-by: Jee Jee Li --- .buildkite/test-pipeline.yaml | 2 +- tests/lora/conftest.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.buildkite/test-pipeline.yaml b/.buildkite/test-pipeline.yaml index 4d95959e0f579..bff33d35b423e 100644 --- a/.buildkite/test-pipeline.yaml +++ b/.buildkite/test-pipeline.yaml @@ -475,7 +475,7 @@ steps: - pytest -v -s distributed/test_pp_cudagraph.py - pytest -v -s distributed/test_pipeline_parallel.py -- label: LoRA TP Test (Distributed) +- label: LoRA TP Test (Distributed) num_gpus: 4 soft_fail: true source_file_dependencies: diff --git a/tests/lora/conftest.py b/tests/lora/conftest.py index c4a05727a2d25..29ecf37808205 100644 --- a/tests/lora/conftest.py +++ b/tests/lora/conftest.py @@ -148,7 +148,7 @@ def sql_lora_huggingface_id(): @pytest.fixture(scope="session") -def sql_lora_files(): +def sql_lora_files(sql_lora_huggingface_id): return snapshot_download(repo_id=sql_lora_huggingface_id) From 83b76c6129c113cd86b135849b38fa3f52f3876b Mon Sep 17 00:00:00 2001 From: Jee Jee Li Date: Fri, 22 Nov 2024 16:23:04 +0000 Subject: [PATCH 16/17] Fix chatglm bug Signed-off-by: Jee Jee Li --- vllm/model_executor/models/chatglm.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/vllm/model_executor/models/chatglm.py b/vllm/model_executor/models/chatglm.py index 2ea592aaba9f9..7169c2c6461c8 100644 --- a/vllm/model_executor/models/chatglm.py +++ b/vllm/model_executor/models/chatglm.py @@ -694,7 +694,7 @@ class ChatGLM(ChatGLMBaseModel): embedding_padding_modules = [] -class ChatGLMV(ChatGLMBaseModel, SupportsMultiModal): +class ChatGLM4V(ChatGLMBaseModel, SupportsMultiModal): packed_modules_mapping = { "query_key_value": ["query_key_value"], "dense_h_to_4h": ["dense_h_to_4h"], @@ -747,7 +747,7 @@ def __new__( config = vllm_config.model_config.hf_config # Initialize VL if hasattr(config, "visual"): - return ChatGLM(vllm_config=vllm_config, prefix=prefix) + return ChatGLM4V(vllm_config=vllm_config, prefix=prefix) # Initialize LLM else: - return ChatGLMV(vllm_config=vllm_config, prefix=prefix) + return ChatGLM(vllm_config=vllm_config, prefix=prefix) From 251ab4160f78065220c953b91a5865d04f2e681d Mon Sep 17 00:00:00 2001 From: Jee Jee Li Date: Fri, 22 Nov 2024 16:23:51 +0000 Subject: [PATCH 17/17] Fix chatglm bug Signed-off-by: Jee Jee Li --- vllm/model_executor/models/chatglm.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/vllm/model_executor/models/chatglm.py b/vllm/model_executor/models/chatglm.py index 7169c2c6461c8..eb4ed4ea78dce 100644 --- a/vllm/model_executor/models/chatglm.py +++ b/vllm/model_executor/models/chatglm.py @@ -694,7 +694,7 @@ class ChatGLM(ChatGLMBaseModel): embedding_padding_modules = [] -class ChatGLM4V(ChatGLMBaseModel, SupportsMultiModal): +class ChatGLMV(ChatGLMBaseModel, SupportsMultiModal): packed_modules_mapping = { "query_key_value": ["query_key_value"], "dense_h_to_4h": ["dense_h_to_4h"], @@ -747,7 +747,7 @@ def __new__( config = vllm_config.model_config.hf_config # Initialize VL if hasattr(config, "visual"): - return ChatGLM4V(vllm_config=vllm_config, prefix=prefix) + return ChatGLMV(vllm_config=vllm_config, prefix=prefix) # Initialize LLM else: return ChatGLM(vllm_config=vllm_config, prefix=prefix)