src / generator.ts
import {
type Chat,
type GeneratorController,
type InferParsedConfig,
} from "@lmstudio/sdk";
import OpenAI from "openai";
import { configSchematics, globalConfigSchematics } from "./config";
import {
ResponseInput,
ResponseInputItem,
Tool,
} from "openai/resources/responses/responses";
import { ReasoningEffort } from "openai/resources/shared";
import { readFile } from "fs/promises";
import { createReadStream } from "fs";
/* -------------------------------------------------------------------------- */
/* Build helpers */
/* -------------------------------------------------------------------------- */
/** Build a pre-configured OpenAI client. */
function createOpenAI(
globalConfig: InferParsedConfig<typeof globalConfigSchematics>
) {
const apiKey = globalConfig.get("openaiApiKey");
return new OpenAI({
apiKey,
baseURL: "https://api.openai.com/v1",
});
}
// Function to create a file with the Files API
async function createFile(openai: OpenAI, filePath: string) {
const fileContent = createReadStream(filePath);
const result = await openai.files.create({
file: fileContent,
purpose: "user_data",
});
return result.id;
}
/** Convert internal chat history to the format expected by OpenAI. */
async function toOpenAIMessages(openai: OpenAI, history: Chat, ctl: GeneratorController): Promise<ResponseInput> {
const messages: ResponseInputItem[] = [];
for (const message of history) {
switch (message.getRole()) {
case "system":
messages.push({ role: "system", content: message.getText() });
break;
case "user": {
// Check if the message has files (images)
const files = message.getFiles(ctl.client);
const hasFiles = files.length > 0;
if (hasFiles) {
// Build content array with both text and images
const content: Array<{ type: "input_text"; text: string } | { type: "input_image"; image_url: string; detail: "auto" | "low" | "high" } | { type: "input_file"; file_id: string }> = [];
// Add text if present
const text = message.getText();
if (text) {
content.push({ type: "input_text", text });
}
// Add images
for (const file of files) {
if (file.isImage()) {
const filePath = await file.getFilePath();
const fileBuffer = await readFile(filePath);
const base64Image = fileBuffer.toString("base64");
// Determine MIME type based on file extension
const ext = file.name.split(".").pop()?.toLowerCase();
const mimeType = ext === "png" ? "image/png" :
ext === "jpg" || ext === "jpeg" ? "image/jpeg" :
ext === "gif" ? "image/gif" :
ext === "webp" ? "image/webp" : "image/jpeg";
content.push({
type: "input_image",
image_url: `data:${mimeType};base64,${base64Image}`,
detail: "auto",
});
}
else {
const filePath = await file.getFilePath();
const id = await createFile(openai, filePath);
content.push({
type: "input_file",
file_id: id,
});
}
}
messages.push({ role: "user", content });
} else {
// No images, just send text
messages.push({ role: "user", content: message.getText() });
}
break;
}
case "assistant": {
// const toolCalls: ChatCompletionMessageToolCall[] = message
// .getToolCallRequests()
// .map(toolCall => ({
// id: toolCall.id ?? "",
// type: "function",
// function: {
// name: toolCall.name,
// arguments: JSON.stringify(toolCall.arguments ?? {}),
// },
// }));
messages.push({
role: "assistant",
content: message.getText(),
// ...(toolCalls.length ? { tool_calls: toolCalls } : {}),
});
break;
}
// case "tool": {
// message.getToolCallResults().forEach(toolCallResult => {
// messages.push({
// role: "tool",
// tool_call_id: toolCallResult.toolCallId ?? "",
// content: toolCallResult.content,
// } as ChatCompletionToolMessageParam);
// });
// break;
// }
}
}
return messages;
}
/* -------------------------------------------------------------------------- */
/* Stream-handling utils */
/* -------------------------------------------------------------------------- */
function wireAbort(
ctl: GeneratorController,
stream: { controller: AbortController }
) {
ctl.onAborted(() => {
console.info("Generation aborted by user.");
stream.controller.abort();
});
}
/* -------------------------------------------------------------------------- */
/* API */
/* -------------------------------------------------------------------------- */
export async function generate(ctl: GeneratorController, history: Chat) {
const config = ctl.getPluginConfig(configSchematics);
const model = config.get("model");
const verbosity = config.get("verbosity") as
| "low"
| "medium"
| "high"
| null
| undefined;
const reasoningEffortRaw = config.get("reasoningEffort");
// fix "none" setting for gpt-5
const reasoningEffort = (reasoningEffortRaw === "none" && model === "gpt-5") ? "minimal" : reasoningEffortRaw;
const webSearch = config.get("webSearch");
const globalConfig = ctl.getGlobalPluginConfig(globalConfigSchematics);
/* 1. Setup client & payload */
const openai = createOpenAI(globalConfig);
/* 2. Kick off streaming response */
const messages = await toOpenAIMessages(openai, history, ctl);
const tools: Array<Tool> = [
{
type: "code_interpreter" as const,
container: { type: "auto" as const },
},
];
if (webSearch === "on") {
tools.push({ type: "web_search" as const });
}
const stream = await openai.responses.create({
model: model,
input: messages,
text: {
verbosity: verbosity,
},
reasoning: {
effort: reasoningEffort as ReasoningEffort,
summary: "detailed",
},
tools: tools,
stream: true,
});
/* 3. Abort wiring & stream processing */
wireAbort(ctl, stream as any);
for await (const event of stream) {
if (event.type === "response.output_text.delta") {
ctl.fragmentGenerated(event.delta)
}
else if (event.type === "response.reasoning_summary_text.delta") {
ctl.fragmentGenerated(event.delta, {reasoningType: "reasoning"})
}
}
}