templates

browse and filter deployment templates

get all templates

// Get all available templates
const templates = await client.getTemplates();

// Returns array of TemplateSummary:
// [
//   {
//     id: "ollama",
//     slug: "ollama",
//     name: "Ollama",
//     description: "Run LLMs locally",
//     category: "llm",
//     requiresGpu: true,
//     minGpuVram: 8000,
//   },
//   ...
// ]

filter templates

// Filter by category
const llmTemplates = await client.getTemplates({
  category: 'llm',
});

// Filter by GPU requirement
const gpuTemplates = await client.getTemplates({
  requiresGpu: true,
});

// Search by name/description
const results = await client.getTemplates({
  search: 'llama',
});

// Combine filters
const filtered = await client.getTemplates({
  category: 'ai-inference',
  requiresGpu: true,
  search: 'llama',
});

get template details

// Get specific template by ID or slug
const template = await client.getTemplate('ollama');

// Returns full Template object:
// {
//   id: "ollama",
//   slug: "ollama",
//   name: "Ollama",
//   description: "Run LLMs locally with Ollama",
//   category: "llm",
//   dockerImage: "ollama/ollama:latest",
//   defaultEnv: { OLLAMA_MODELS: "llama2" },
//   ports: [{ internal: 11434, protocol: "http" }],
//   requiresGpu: true,
//   minGpuVram: 8000,
//   minCpuCores: 4,
//   minRamMb: 8192,
//   estimatedPullTime: 120,
// }

template types

interface TemplateSummary {
  id: string;
  slug: string;
  name: string;
  description: string;
  category: string;
  requiresGpu: boolean;
  minGpuVram?: number;
}

interface Template extends TemplateSummary {
  dockerImage: string;
  defaultEnv?: Record<string, string>;
  ports: TemplatePort[];
  minCpuCores: number;
  minRamMb: number;
  minDiskGb?: number;
  estimatedPullTime?: number;
  documentation?: string;
}

interface TemplatePort {
  internal: number;
  protocol: 'http' | 'https' | 'tcp' | 'udp';
  description?: string;
}