diff --git a/deploy/Dockerfile.mcpd b/deploy/Dockerfile.mcpd index 29c6c76..ddffec1 100644 --- a/deploy/Dockerfile.mcpd +++ b/deploy/Dockerfile.mcpd @@ -1,3 +1,9 @@ +# syntax=docker/dockerfile:1.6 +# `# syntax=...` enables BuildKit's --mount feature on the builder so we can +# share the pnpm content-addressed store across image builds. Without it the +# next two RUN steps fall back to plain mode and the cache mount is ignored +# (build still works, just slower). + # Stage 1: Build TypeScript FROM node:20-alpine AS builder @@ -12,8 +18,12 @@ COPY src/db/package.json src/db/tsconfig.json src/db/ COPY src/shared/package.json src/shared/tsconfig.json src/shared/ COPY src/web/package.json src/web/tsconfig.json src/web/ -# Install all dependencies -RUN pnpm install --frozen-lockfile +# Install all dependencies. The cache mount keeps pnpm's CAS store warm +# across builds: only newly-changed packages get downloaded; everything +# else hardlinks from the cache. Drops install from ~60s to <5s on a +# warm cache. `--frozen-lockfile` still guarantees lockfile fidelity. +RUN --mount=type=cache,id=pnpm-store-mcpd-builder,target=/root/.local/share/pnpm/store \ + pnpm install --frozen-lockfile # Copy source code COPY src/mcpd/src/ src/mcpd/src/ @@ -42,8 +52,11 @@ COPY src/mcpd/package.json src/mcpd/ COPY src/db/package.json src/db/ COPY src/shared/package.json src/shared/ -# Install all deps (prisma CLI needed at runtime for db push) -RUN pnpm install --frozen-lockfile +# Install all deps (prisma CLI needed at runtime for db push). Same +# cache-mount trick as the builder stage; separate cache id so the two +# stages don't compete for the same lock. +RUN --mount=type=cache,id=pnpm-store-mcpd-runtime,target=/root/.local/share/pnpm/store \ + pnpm install --frozen-lockfile # Copy prisma schema and generate client COPY src/db/prisma/ src/db/prisma/ diff --git a/vitest.config.ts b/vitest.config.ts index a1c7953..1b36b85 100644 --- a/vitest.config.ts +++ b/vitest.config.ts @@ -1,8 +1,21 @@ import { defineConfig } from 'vitest/config'; +import { availableParallelism } from 'node:os'; + +// Default vitest's pool to ~half the CPU threads we have. The previous +// implicit default left this 64-thread workstation at ~10% utilization +// during `pnpm test:run`. Half is a soft cap that stays kind to laptops +// (8-thread → 4 workers) while letting beefy hosts push closer to the +// box's actual capacity. Override at run time with VITEST_MAX_THREADS. +const cores = availableParallelism(); +const maxThreads = Number(process.env['VITEST_MAX_THREADS'] ?? Math.max(2, Math.floor(cores / 2))); export default defineConfig({ test: { globals: true, + pool: 'threads', + poolOptions: { + threads: { maxThreads, minThreads: 1 }, + }, coverage: { provider: 'v8', reporter: ['text', 'json', 'html'],