> loading_
# To enable iterative refinement, the generate endpoint in `app/api/generate/route.ts`
# now accepts the previously generated code and a new user prompt for changes.
export async function POST(request: Request) {
const { imageUrl, previousCode, userPrompt } = await request.json();
// 1. Construct the initial message history for the LLM.
// This includes the screenshot and the primary instruction.
const messages = [
{
role: "user",
content: [
{ type: "image_url", image_url: { url: imageUrl } },
{ type: "text", text: "Turn this into a single HTML file with inline Tailwind." },
],
},
];
// 2. If this is a refinement step (i.e., previous code and a new prompt exist),
// append the assistant's last response and the new user prompt to the history.
if (previousCode && userPrompt) {
messages.push(
{ role: "assistant", content: previousCode },
{ role: "user", content: userPrompt }
);
}
// 3. Call the OpenAI API with the complete conversation history.
// The LLM now has the full context to understand the refinement request
// and generate the updated code accordingly.
const response = await openai.chat.completions.create({
model: "gpt-4-vision-preview",
max_tokens: 4096,
messages: messages,
});
// ... return the new code from response.choices[0].message.content
}