# RunPod Pod Configuration # Used by service_runpod_control.sh # # Usage: # service_runpod_control.sh create # Create pod from this config # service_runpod_control.sh get # Show pod status # service_runpod_control.sh start # Start the pod # service_runpod_control.sh stop # Stop the pod # service_runpod_control.sh remove # Delete the pod pod: # Required fields name: "runpod-ai-orchestrator" gpuType: "NVIDIA GeForce RTX 4090" gpuCount: 1 # Template and volume IDs (from RunPod dashboard) templateId: "runpod-ai-orchestrator" networkVolumeId: "runpod-ai-orchestrator" imageName: "dev.pivoine.art/valknar/runpod-ai-orchestrator:latest" # Exposed ports ports: - "22/tcp" # Optional: Resource limits # containerDiskSize: 20 # GB (default: 20) # volumeSize: 1 # GB (default: 1) # volumePath: "/runpod" # Mount path # mem: 20 # Minimum memory GB # vcpu: 1 # Minimum vCPUs # Optional: Cloud selection # secureCloud: false # Use secure cloud only # communityCloud: false # Use community cloud only # Optional: Custom image (overrides template) # imageName: "runpod/pytorch:2.1.0-py3.10-cuda11.8.0-devel-ubuntu22.04" # Optional: Environment variables # env: # - "HF_TOKEN=your_token_here" # - "CUSTOM_VAR=value"