|
| 1 | +import os |
| 2 | +import asyncio |
| 3 | +import logging |
| 4 | +from ..models import ModelEnum, BenchmarkRequest, PerplexityRequest |
| 5 | +from ..utils import parse_benchmark_data, parse_perplexity_data |
| 6 | + |
| 7 | +import os |
| 8 | +import subprocess # Keep for CalledProcessError |
| 9 | +import asyncio # Ensure asyncio is imported |
| 10 | +from pydantic import BaseModel, Field |
| 11 | +from fastapi import HTTPException, Query, Depends |
| 12 | +import logging # Import logging |
| 13 | + |
| 14 | +# --- Logging Configuration for this module --- |
| 15 | +logger = logging.getLogger(__name__) |
| 16 | + |
| 17 | +def validate_prompt_length(prompt: str = Query(..., description="Input text for perplexity calculation"), ctx_size: int = Query(10, gt=3)) -> str: |
| 18 | + token_count = len(prompt.split()) |
| 19 | + min_tokens = 2 * ctx_size |
| 20 | + if token_count < min_tokens: |
| 21 | + raise HTTPException( |
| 22 | + status_code=400, |
| 23 | + detail=f"Prompt too short. Needs at least {min_tokens} tokens, got {token_count}" |
| 24 | + ) |
| 25 | + return prompt |
| 26 | + |
| 27 | +async def run_benchmark( |
| 28 | + model: ModelEnum, |
| 29 | + n_token: int = Query(128, gt=0), |
| 30 | + threads: int = Query(2, gt=0), |
| 31 | + n_prompt: int = Query(32, gt=0) |
| 32 | +): |
| 33 | + request = BenchmarkRequest(model=model, n_token=n_token, threads=threads, n_prompt=n_prompt) |
| 34 | + build_dir = os.getenv("BUILD_DIR", "build") |
| 35 | + bench_path = os.path.join(build_dir, "bin", "llama-bench") |
| 36 | + if not os.path.exists(bench_path): |
| 37 | + raise HTTPException(status_code=500, detail="Benchmark binary not found") |
| 38 | + command = [ |
| 39 | + bench_path, |
| 40 | + '-m', request.model.value, |
| 41 | + '-n', str(request.n_token), |
| 42 | + '-ngl', '0', |
| 43 | + '-b', '1', |
| 44 | + '-t', str(request.threads), |
| 45 | + '-p', str(request.n_prompt), |
| 46 | + '-r', '5' |
| 47 | + ] |
| 48 | + try: |
| 49 | + process = await asyncio.create_subprocess_exec( |
| 50 | + *command, |
| 51 | + stdout=asyncio.subprocess.PIPE, |
| 52 | + stderr=asyncio.subprocess.PIPE |
| 53 | + ) |
| 54 | + stdout_bytes, stderr_bytes = await process.communicate() |
| 55 | + if process.returncode != 0: |
| 56 | + raise HTTPException(status_code=500, detail=f"Benchmark failed: {stderr_bytes.decode(errors='ignore')}") |
| 57 | + parsed_data = parse_benchmark_data(stdout_bytes.decode(errors='ignore')) |
| 58 | + return parsed_data |
| 59 | + except Exception as e: |
| 60 | + raise HTTPException(status_code=500, detail=f"An unexpected error occurred during benchmark: {str(e)}") |
| 61 | + |
| 62 | +async def run_perplexity( |
| 63 | + model: ModelEnum, |
| 64 | + prompt: str = Depends(validate_prompt_length), |
| 65 | + threads: int = Query(2, gt=0), |
| 66 | + ctx_size: int = Query(10, gt=3), |
| 67 | + ppl_stride: int = Query(0, ge=0) |
| 68 | +): |
| 69 | + """Calculate perplexity for given text and model""" |
| 70 | + try: |
| 71 | + request = PerplexityRequest( |
| 72 | + model=model, |
| 73 | + prompt=prompt, |
| 74 | + threads=threads, |
| 75 | + ctx_size=ctx_size, |
| 76 | + ppl_stride=ppl_stride |
| 77 | + ) |
| 78 | + except ValueError as e: |
| 79 | + raise HTTPException(status_code=400, detail=str(e)) |
| 80 | + |
| 81 | + build_dir = os.getenv("BUILD_DIR", "build") |
| 82 | + ppl_path = os.path.join(build_dir, "bin", "llama-perplexity") |
| 83 | + if not os.path.exists(ppl_path): |
| 84 | + logger.error(f"Perplexity binary not found at '{ppl_path}'.") |
| 85 | + raise HTTPException(status_code=500, detail="Perplexity binary not found") |
| 86 | + |
| 87 | + command = [ |
| 88 | + ppl_path, |
| 89 | + '--model', request.model.value, |
| 90 | + '--prompt', request.prompt, |
| 91 | + '--threads', str(request.threads), |
| 92 | + '--ctx-size', str(request.ctx_size), |
| 93 | + '--perplexity', |
| 94 | + '--ppl-stride', str(request.ppl_stride) |
| 95 | + ] |
| 96 | + |
| 97 | + try: |
| 98 | + logger.info(f"Running perplexity calculation with command: {' '.join(command)}") |
| 99 | + process = await asyncio.create_subprocess_exec( |
| 100 | + *command, |
| 101 | + stdout=asyncio.subprocess.PIPE, # Perplexity might output to stdout or stderr |
| 102 | + stderr=asyncio.subprocess.PIPE |
| 103 | + ) |
| 104 | + stdout_bytes, stderr_bytes = await process.communicate() |
| 105 | + |
| 106 | + if process.returncode != 0: |
| 107 | + logger.error(f"Perplexity calculation failed. RC: {process.returncode}. Stderr: {stderr_bytes.decode(errors='ignore')}") |
| 108 | + raise subprocess.CalledProcessError( |
| 109 | + process.returncode, cmd=command, output=stdout_bytes, stderr=stderr_bytes |
| 110 | + ) |
| 111 | + |
| 112 | + # Original code parsed from stderr, stick to that unless known otherwise |
| 113 | + parsed_data = parse_perplexity_data(stderr_bytes.decode(errors='ignore')) |
| 114 | + logger.info("Perplexity calculation completed successfully.") |
| 115 | + return parsed_data |
| 116 | + except subprocess.CalledProcessError as e: |
| 117 | + logger.error(f"Perplexity calculation failed: {str(e)}. Command: {e.cmd}. RC: {e.returncode}. Stderr: {e.stderr.decode(errors='ignore') if e.stderr else ''}", exc_info=True) |
| 118 | + raise HTTPException(status_code=500, detail=f"Perplexity calculation failed: {e.stderr.decode(errors='ignore') if e.stderr else str(e)}") |
| 119 | + except Exception as e: # Catch any other unexpected errors |
| 120 | + logger.error(f"Unexpected error during perplexity calculation: {str(e)}", exc_info=True) |
| 121 | + raise HTTPException(status_code=500, detail=f"An unexpected error occurred during perplexity calculation: {str(e)}") |
| 122 | + |
| 123 | +def get_model_sizes(): |
| 124 | + """Endpoint to get the file sizes of supported .gguf models.""" |
| 125 | + model_sizes = {} |
| 126 | + models_dir = "models" |
| 127 | + for subdir in os.listdir(models_dir): |
| 128 | + subdir_path = os.path.join(models_dir, subdir) |
| 129 | + if os.path.isdir(subdir_path): |
| 130 | + for file in os.listdir(subdir_path): |
| 131 | + if file.endswith(".gguf"): |
| 132 | + file_path = os.path.join(subdir_path, file) |
| 133 | + file_size_bytes = os.path.getsize(file_path) |
| 134 | + file_size_mb = round(file_size_bytes / (1024 * 1024), 3) |
| 135 | + file_size_gb = round(file_size_bytes / (1024 * 1024 * 1024), 3) |
| 136 | + model_sizes[file] = { |
| 137 | + "bytes": file_size_bytes, |
| 138 | + "MB": file_size_mb, |
| 139 | + "GB": file_size_gb |
| 140 | + } |
| 141 | + return model_sizes |
| 142 | + |
| 143 | +async def run_benchmark( |
| 144 | + model: ModelEnum, |
| 145 | + n_token: int = Query(128, gt=0), |
| 146 | + threads: int = Query(2, gt=0), |
| 147 | + n_prompt: int = Query(32, gt=0) |
| 148 | +): |
| 149 | + """Run benchmark on specified model""" |
| 150 | + request = BenchmarkRequest(model=model, n_token=n_token, threads=threads, n_prompt=n_prompt) |
| 151 | + build_dir = os.getenv("BUILD_DIR", "build") |
| 152 | + bench_path = os.path.join(build_dir, "bin", "llama-bench") |
| 153 | + if not os.path.exists(bench_path): |
| 154 | + logger.error(f"Benchmark binary not found at '{bench_path}'.") |
| 155 | + raise HTTPException(status_code=500, detail="Benchmark binary not found") |
| 156 | + command = [ |
| 157 | + bench_path, |
| 158 | + '-m', request.model.value, |
| 159 | + '-n', str(request.n_token), |
| 160 | + '-ngl', '0', |
| 161 | + '-b', '1', |
| 162 | + '-t', str(request.threads), |
| 163 | + '-p', str(request.n_prompt), |
| 164 | + '-r', '5' |
| 165 | + ] |
| 166 | + try: |
| 167 | + logger.info(f"Running benchmark with command: {' '.join(command)}") |
| 168 | + # Replace subprocess.run with asyncio.create_subprocess_exec and communicate |
| 169 | + process = await asyncio.create_subprocess_exec( |
| 170 | + *command, |
| 171 | + stdout=asyncio.subprocess.PIPE, |
| 172 | + stderr=asyncio.subprocess.PIPE |
| 173 | + ) |
| 174 | + stdout_bytes, stderr_bytes = await process.communicate() # Wait for completion |
| 175 | + |
| 176 | + if process.returncode != 0: |
| 177 | + logger.error(f"Benchmark failed. RC: {process.returncode}. Stderr: {stderr_bytes.decode(errors='ignore')}") |
| 178 | + raise subprocess.CalledProcessError( |
| 179 | + process.returncode, cmd=command, output=stdout_bytes, stderr=stderr_bytes |
| 180 | + ) |
| 181 | + |
| 182 | + parsed_data = parse_benchmark_data(stdout_bytes.decode(errors='ignore')) |
| 183 | + logger.info("Benchmark completed successfully.") |
| 184 | + return parsed_data |
| 185 | + except subprocess.CalledProcessError as e: # Catch the specific error |
| 186 | + # Log details from the CalledProcessError object |
| 187 | + logger.error(f"Benchmark failed: {str(e)}. Command: {e.cmd}. RC: {e.returncode}. Stdout: {e.stdout.decode(errors='ignore') if e.stdout else ''}. Stderr: {e.stderr.decode(errors='ignore') if e.stderr else ''}", exc_info=True) |
| 188 | + raise HTTPException(status_code=500, detail=f"Benchmark failed: {e.stderr.decode(errors='ignore') if e.stderr else str(e)}") |
| 189 | + except Exception as e: # Catch any other unexpected errors |
| 190 | + logger.error(f"Unexpected error during benchmark: {str(e)}", exc_info=True) |
| 191 | + raise HTTPException(status_code=500, detail=f"An unexpected error occurred during benchmark: {str(e)}") |
0 commit comments