deployments endpoints
create, manage, and control container deployments
all deployment endpoints require authentication. include the bearer token in headers.
POST
/deployments
create a new deployment
request body
{
"nodeId": "node_abc123", // required
"templateId": "ollama", // or customImage
"customImage": null, // docker image if not using template
"name": "my-ollama-server", // optional
"env": { // optional
"OLLAMA_MODELS": "llama2,mistral"
}
}response
{
"deployment": {
"id": "dep_xyz789",
"userId": "user_abc123",
"nodeId": "node_abc123",
"templateId": "ollama",
"name": "my-ollama-server",
"status": "pending",
"endpoints": [],
"env": { "OLLAMA_MODELS": "llama2,mistral" },
"totalCost": 0,
"createdAt": "2024-01-15T10:30:00Z"
},
"estimatedHourlyCost": 150
}GET
/deployments
list your deployments
query parameters
statusstring (optional) - filter by status
pagenumber (default: 1)
limitnumber (default: 20, max: 100)
response
{
"deployments": [...],
"pagination": {
"page": 1,
"limit": 20,
"total": 5,
"totalPages": 1
}
}GET
/deployments/:id
get deployment details
response
{
"deployment": {
"id": "dep_xyz789",
"status": "running",
"endpoints": [
{
"type": "https",
"url": "https://dep-xyz789.neuranet.network",
"internalPort": 11434
}
],
"totalCost": 250,
"createdAt": "2024-01-15T10:30:00Z",
"startedAt": "2024-01-15T10:32:00Z"
}
}POST
/deployments/:id/start
start a stopped deployment
response
{
"deployment": {
"id": "dep_xyz789",
"status": "starting"
}
}POST
/deployments/:id/stop
stop a running deployment
response
{
"deployment": {
"id": "dep_xyz789",
"status": "stopping"
}
}DELETE
/deployments/:id
delete a deployment (stops if running)
response
{
"success": true,
"message": "Deployment deleted"
}GET
/deployments/:id/logs
get deployment logs
query parameters
tailnumber (default: 100) - last n lines
sincestring (optional) - iso timestamp
response
{
"logs": [
{
"timestamp": "2024-01-15T10:32:00Z",
"stream": "stdout",
"message": "Server started on port 11434"
},
{
"timestamp": "2024-01-15T10:32:01Z",
"stream": "stdout",
"message": "Loading model llama2..."
}
]
}