logo

InferX AI Function Platform (Lambda Function for Inference)

    --   Serve tens models in one box with ultra-fast (<2 sec) cold start (contact: support@inferx.net)

Action

Node

Node Name Node
node2 {
 "nodeIp": "192.168.0.44",
 "podMgrPort": 1233,
 "tsotSvcPort": 1235,
 "stateSvcPort": 1236,
 "cidr": "10.1.2.0/8",
 "resources": {
  "nodename": "node2",
  "CPU": 30000,
  "Mem": 400000,
  "GPUType": "A4000",
  "GPUs": {
   "vRam": 14848,
   "map": {
    "0": {
     "contextCnt": 1,
     "slotCnt": 58
    },
    "1": {
     "contextCnt": 1,
     "slotCnt": 58
    }
   },
   "slotSize": 268435456,
   "totalSlotCnt": 58
  },
  "MaxContextPerGPU": 1
 },
 "blobStoreEnable": true
}