import type {
LanguageModelV3Middleware,
LanguageModelV3StreamPart,
} from '@ai-sdk/provider';
export const debugMiddleware: LanguageModelV3Middleware = {
specificationVersion: 'v3',
wrapGenerate: async ({ doGenerate, params }) => {
const requestId = crypto.randomUUID();
console.group(`[${requestId}] Generate Request`);
console.log('Prompt:', params.prompt);
console.log('Settings:', {
temperature: params.temperature,
maxTokens: params.maxOutputTokens,
});
console.groupEnd();
const startTime = Date.now();
const result = await doGenerate();
const duration = Date.now() - startTime;
console.group(`[${requestId}] Generate Response`);
console.log('Duration:', `${duration}ms`);
console.log('Text:', result.text);
console.log('Usage:', result.usage);
console.log('Finish Reason:', result.finishReason);
console.groupEnd();
return result;
},
wrapStream: async ({ doStream, params }) => {
const requestId = crypto.randomUUID();
console.group(`[${requestId}] Stream Request`);
console.log('Prompt:', params.prompt);
console.groupEnd();
const { stream, ...rest } = await doStream();
let chunkCount = 0;
let fullText = '';
const transformStream = new TransformStream<
LanguageModelV3StreamPart,
LanguageModelV3StreamPart
>({
transform(chunk, controller) {
chunkCount++;
if (chunk.type === 'text-delta') {
fullText += chunk.textDelta;
}
if (chunk.type === 'finish') {
console.group(`[${requestId}] Stream Complete`);
console.log('Chunks:', chunkCount);
console.log('Full text:', fullText);
console.log('Usage:', chunk.usage);
console.groupEnd();
}
controller.enqueue(chunk);
},
});
return {
stream: stream.pipeThrough(transformStream),
...rest,
};
},
};