- Add warmup() to LlmProvider interface for eager subprocess startup - ManagedVllmProvider.warmup() starts vLLM in background on project load - ProviderRegistry.warmupAll() triggers all managed providers - NamedProvider proxies warmup() to inner provider - paginate stage generates LLM-powered descriptive page titles when available, cached by content hash, falls back to generic "Page N" - project-mcp-endpoint calls warmupAll() on router creation so vLLM is loading while the session initializes Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
102 lines
3.0 KiB
TypeScript
102 lines
3.0 KiB
TypeScript
import { describe, it, expect } from 'vitest';
|
|
import { MemoryCache } from '../src/proxymodel/cache.js';
|
|
|
|
describe('MemoryCache', () => {
|
|
it('returns computed value on cache miss', async () => {
|
|
const cache = new MemoryCache();
|
|
const value = await cache.getOrCompute('key1', async () => 'computed');
|
|
expect(value).toBe('computed');
|
|
});
|
|
|
|
it('returns cached value on cache hit', async () => {
|
|
const cache = new MemoryCache();
|
|
let callCount = 0;
|
|
const compute = async () => { callCount++; return 'computed'; };
|
|
|
|
await cache.getOrCompute('key1', compute);
|
|
const value = await cache.getOrCompute('key1', compute);
|
|
|
|
expect(value).toBe('computed');
|
|
expect(callCount).toBe(1); // Only computed once
|
|
});
|
|
|
|
it('get/set work for manual cache operations', async () => {
|
|
const cache = new MemoryCache();
|
|
|
|
expect(await cache.get('missing')).toBeNull();
|
|
|
|
await cache.set('key1', 'value1');
|
|
expect(await cache.get('key1')).toBe('value1');
|
|
});
|
|
|
|
it('hash produces consistent short hashes', () => {
|
|
const cache = new MemoryCache();
|
|
const hash1 = cache.hash('hello world');
|
|
const hash2 = cache.hash('hello world');
|
|
const hash3 = cache.hash('different content');
|
|
|
|
expect(hash1).toBe(hash2);
|
|
expect(hash1).not.toBe(hash3);
|
|
expect(hash1).toHaveLength(16);
|
|
});
|
|
|
|
it('evicts oldest entry when at capacity', async () => {
|
|
const cache = new MemoryCache({ maxEntries: 3 });
|
|
|
|
await cache.set('a', '1');
|
|
await cache.set('b', '2');
|
|
await cache.set('c', '3');
|
|
expect(cache.size).toBe(3);
|
|
|
|
// Adding 4th should evict 'a' (oldest)
|
|
await cache.set('d', '4');
|
|
expect(cache.size).toBe(3);
|
|
expect(await cache.get('a')).toBeNull();
|
|
expect(await cache.get('b')).toBe('2');
|
|
expect(await cache.get('d')).toBe('4');
|
|
});
|
|
|
|
it('accessing an entry refreshes its LRU position', async () => {
|
|
const cache = new MemoryCache({ maxEntries: 3 });
|
|
|
|
await cache.set('a', '1');
|
|
await cache.set('b', '2');
|
|
await cache.set('c', '3');
|
|
|
|
// Access 'a' to refresh it
|
|
await cache.get('a');
|
|
|
|
// Adding 'd' should evict 'b' (now oldest), not 'a'
|
|
await cache.set('d', '4');
|
|
expect(await cache.get('a')).toBe('1');
|
|
expect(await cache.get('b')).toBeNull();
|
|
});
|
|
|
|
it('getOrCompute refreshes LRU position on hit', async () => {
|
|
const cache = new MemoryCache({ maxEntries: 3 });
|
|
|
|
await cache.set('a', '1');
|
|
await cache.set('b', '2');
|
|
await cache.set('c', '3');
|
|
|
|
// Hit 'a' via getOrCompute
|
|
await cache.getOrCompute('a', async () => 'should not run');
|
|
|
|
// Evict: 'b' should go, not 'a'
|
|
await cache.set('d', '4');
|
|
expect(await cache.get('a')).toBe('1');
|
|
expect(await cache.get('b')).toBeNull();
|
|
});
|
|
|
|
it('clear removes all entries', async () => {
|
|
const cache = new MemoryCache();
|
|
await cache.set('a', '1');
|
|
await cache.set('b', '2');
|
|
expect(cache.size).toBe(2);
|
|
|
|
cache.clear();
|
|
expect(cache.size).toBe(0);
|
|
expect(await cache.get('a')).toBeNull();
|
|
});
|
|
});
|