logo

InferX AI Function Platform (Lambda Function for Inference)

    --   Serve tens models in one box with ultra-fast (<2 sec) cold start (contact: support@inferx.net)



Node

Node Name Node
node3 {
 "nodeIp": "192.168.0.44",
 "podMgrPort": 1233,
 "tsotSvcPort": 1235,
 "stateSvcPort": 1236,
 "cidr": "10.1.2.0/8",
 "resources": {
  "nodename": "node3",
  "CPU": 30000,
  "Mem": 400000,
  "GPUType": "A4000",
  "GPUs": {
   "vRam": 14336,
   "map": {
    "0": {
     "contextCnt": 2,
     "slotCnt": 56
    },
    "1": {
     "contextCnt": 2,
     "slotCnt": 56
    }
   },
   "slotSize": 268435456,
   "totalSlotCnt": 56
  },
  "MaxContextPerGPU": 2
 },
 "blobStoreEnable": true
}