- Add warmup() to LlmProvider interface for eager subprocess startup - ManagedVllmProvider.warmup() starts vLLM in background on project load - ProviderRegistry.warmupAll() triggers all managed providers - NamedProvider proxies warmup() to inner provider - paginate stage generates LLM-powered descriptive page titles when available, cached by content hash, falls back to generic "Page N" - project-mcp-endpoint calls warmupAll() on router creation so vLLM is loading while the session initializes Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
33 lines
942 B
Bash
Executable File
33 lines
942 B
Bash
Executable File
#!/bin/bash
|
|
# Build python-runner Docker image and push to Gitea container registry
|
|
set -e
|
|
|
|
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
|
|
PROJECT_ROOT="$(dirname "$SCRIPT_DIR")"
|
|
cd "$PROJECT_ROOT"
|
|
|
|
# Load .env for GITEA_TOKEN
|
|
if [ -f .env ]; then
|
|
set -a; source .env; set +a
|
|
fi
|
|
|
|
# Push directly to internal address (external proxy has body size limit)
|
|
REGISTRY="10.0.0.194:3012"
|
|
IMAGE="mcpctl-python-runner"
|
|
TAG="${1:-latest}"
|
|
|
|
echo "==> Building python-runner image..."
|
|
podman build -t "$IMAGE:$TAG" -f deploy/Dockerfile.python-runner .
|
|
|
|
echo "==> Tagging as $REGISTRY/michal/$IMAGE:$TAG..."
|
|
podman tag "$IMAGE:$TAG" "$REGISTRY/michal/$IMAGE:$TAG"
|
|
|
|
echo "==> Logging in to $REGISTRY..."
|
|
podman login --tls-verify=false -u michal -p "$GITEA_TOKEN" "$REGISTRY"
|
|
|
|
echo "==> Pushing to $REGISTRY/michal/$IMAGE:$TAG..."
|
|
podman push --tls-verify=false "$REGISTRY/michal/$IMAGE:$TAG"
|
|
|
|
echo "==> Done!"
|
|
echo " Image: $REGISTRY/michal/$IMAGE:$TAG"
|