forked from oumi-ai/oumi
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy path70b_vllm_infer.yaml
29 lines (26 loc) · 900 Bytes
/
70b_vllm_infer.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
# Inference config for Llama 3.3 70B Instruct with VLLM.
#
# Requirements:
# - Run `pip install vllm`
# - Log into HF: `huggingface-cli login`
# - Request access to Llama 3.3: https://huggingface.co/meta-llama/Llama-3.3-70B-Instruct
#
# Usage:
# oumi infer -i -c configs/recipes/llama3_3/inference/70b_vllm_infer.yaml
#
# See Also:
# - Documentation: https://oumi.ai/docs/en/latest/user_guides/infer/infer.html
# - Config class: oumi.core.configs.InferenceConfig
# - Config source: https://github.com/oumi-ai/oumi/blob/main/src/oumi/core/configs/inference_config.py
# - Other inference configs: configs/**/inference/
model:
model_name: "meta-llama/Llama-3.3-70B-Instruct"
model_max_length: 2048
torch_dtype_str: "bfloat16"
attn_implementation: "sdpa"
load_pretrained_weights: True
trust_remote_code: True
generation:
max_new_tokens: 2048
batch_size: 1
engine: VLLM