logo

InferX AI Function Platform (Lambda Function for Inference)

    --   Serve tens models in one box with ultra-fast (<2 sec) cold start (contact: support@inferx.net)



Model llava-1.5-7b-hf

namespace model name standby gpu standby pageable standby pinned memory gpu count vRam (MB) cpu memory (MB) state revision
llava-hf llava-1.5-7b-hf Blob Blob Blob 1 14000 20.0 12000 Normal 281

Image

Prompt



Sample Rest Call

Pods

tenant namespace pod name state require resource allocated resource
public llava-hf public/llava-hf/llava-1.5-7b-hf/281/952 Standby {'CPU': 20000, 'Mem': 12000, 'GPU': {'Type': 'Any', 'Count': 1, 'vRam': 14000}} {'nodename': 'node3', 'CPU': 20000, 'Mem': 12000, 'GPUType': 'A4000', 'GPUs': {'vRam': 0, 'map': {}, 'slotSize': 0, 'totalSlotCnt': 0}, 'MaxContextPerGPU': 2}

Func

{
 "image": "vllm-openai-upgraded:v0.1.0",
 "commands": [
  "/usr/lib/run_llava.py"
 ],
 "envs": [
  [
   "LD_LIBRARY_PATH",
   "/usr/local/lib/python3.12/dist-packages/nvidia/cuda_nvrtc/lib/:$LD_LIBRARY_PATH"
  ]
 ],
 "mounts": [
  {
   "hostpath": "/home/brad/cache",
   "mountpath": "/root/.cache/huggingface"
  }
 ],
 "endpoint": {
  "port": 8000,
  "schema": "Http",
  "probe": "/health"
 },
 "version": 281,
 "entrypoint": [
  "/usr/bin/python3"
 ],
 "resources": {
  "CPU": 20000,
  "Mem": 12000,
  "GPU": {
   "Type": "Any",
   "Count": 1,
   "vRam": 14000
  }
 },
 "standby": {
  "gpu": "Blob",
  "pageable": "Blob",
  "pinned": "Blob"
 },
 "probe": {
  "port": 80,
  "schema": "Http",
  "probe": "/health"
 },
 "sample_query": {
  "apiType": "llava",
  "path": "v1/completions",
  "prompt": "What is shown in this image?",
  "body": {
   "image": "https://www.ilankelman.org/stopsigns/australia.jpg"
  }
 }
}