|
| 1 | +# SPDX-License-Identifier: Apache-2.0 |
| 2 | + |
| 3 | +import pytest |
| 4 | +import torch |
| 5 | + |
| 6 | +from vllm.model_executor.models.vision import resolve_visual_encoder_outputs |
| 7 | + |
| 8 | + |
| 9 | +@pytest.mark.parametrize( |
| 10 | + ("feature_sample_layers", "num_layers_loaded", "max_possible_layers", |
| 11 | + "expected_features"), |
| 12 | + [ |
| 13 | + # All layers loaded |
| 14 | + ([1, 10], 10, 10, [1, 10]), |
| 15 | + ([-10, -1], 10, 10, [1, 10]), |
| 16 | + # Some layers not loaded |
| 17 | + ([1, 10], 10, 20, [1, 10]), |
| 18 | + ([-20, -11], 10, 20, [1, 10]), |
| 19 | + ]) |
| 20 | +def test_resolve_visual_encoder_outputs(feature_sample_layers, |
| 21 | + num_layers_loaded, max_possible_layers, |
| 22 | + expected_features): |
| 23 | + """ |
| 24 | + Test that offsets are correctly handled for vision feature layers. |
| 25 | + """ |
| 26 | + encoder_outputs = [ |
| 27 | + torch.tensor([idx]) for idx in range(num_layers_loaded + 1) |
| 28 | + ] |
| 29 | + output_tensor = resolve_visual_encoder_outputs( |
| 30 | + encoder_outputs=encoder_outputs, |
| 31 | + feature_sample_layers=feature_sample_layers, |
| 32 | + post_layer_norm=None, |
| 33 | + max_possible_layers=max_possible_layers) |
| 34 | + assert torch.equal(torch.tensor(expected_features), output_tensor) |
0 commit comments