style: apply lint fixes

This commit is contained in:
Peter Steinberger
2026-01-08 04:44:11 +00:00
parent c2a6e04e06
commit dc3e3f27d4
23 changed files with 94 additions and 95 deletions

View File

@@ -6,6 +6,7 @@ import {
createProcessTool,
processTool,
} from "./bash-tools.js";
import { sanitizeBinaryOutput } from "./shell-utils.js";
const isWin = process.platform === "win32";
const shortDelayCmd = isWin ? "ping -n 2 127.0.0.1 > nul" : "sleep 0.05";
@@ -17,10 +18,9 @@ const echoAfterDelay = (message: string) =>
const echoLines = (lines: string[]) =>
joinCommands(lines.map((line) => `echo ${line}`));
const normalizeText = (value?: string) =>
(value ?? "")
sanitizeBinaryOutput(value ?? "")
.replace(/\r\n/g, "\n")
.replace(/\r/g, "\n")
.replace(/[\u0000-\u0008\u000b\u000c\u000e-\u001f\u007f]/g, "")
.split("\n")
.map((line) => line.replace(/\s+$/u, ""))
.join("\n")

View File

@@ -424,8 +424,42 @@ export async function scanOpenRouterModels(
filtered,
concurrency,
async (entry) => {
const isFree = isFreeOpenRouterModel(entry);
if (!probe) {
const isFree = isFreeOpenRouterModel(entry);
if (!probe) {
return {
id: entry.id,
name: entry.name,
provider: "openrouter",
modelRef: `openrouter/${entry.id}`,
contextLength: entry.contextLength,
maxCompletionTokens: entry.maxCompletionTokens,
supportedParametersCount: entry.supportedParametersCount,
supportsToolsMeta: entry.supportsToolsMeta,
modality: entry.modality,
inferredParamB: entry.inferredParamB,
createdAtMs: entry.createdAtMs,
pricing: entry.pricing,
isFree,
tool: { ok: false, latencyMs: null, skipped: true },
image: { ok: false, latencyMs: null, skipped: true },
} satisfies ModelScanResult;
}
const model: OpenAIModel = {
...baseModel,
id: entry.id,
name: entry.name || entry.id,
contextWindow: entry.contextLength ?? baseModel.contextWindow,
maxTokens: entry.maxCompletionTokens ?? baseModel.maxTokens,
input: parseModality(entry.modality),
reasoning: baseModel.reasoning,
};
const toolResult = await probeTool(model, apiKey, timeoutMs);
const imageResult = model.input.includes("image")
? await probeImage(ensureImageInput(model), apiKey, timeoutMs)
: { ok: false, latencyMs: null, skipped: true };
return {
id: entry.id,
name: entry.name,
@@ -440,44 +474,10 @@ export async function scanOpenRouterModels(
createdAtMs: entry.createdAtMs,
pricing: entry.pricing,
isFree,
tool: { ok: false, latencyMs: null, skipped: true },
image: { ok: false, latencyMs: null, skipped: true },
tool: toolResult,
image: imageResult,
} satisfies ModelScanResult;
}
const model: OpenAIModel = {
...baseModel,
id: entry.id,
name: entry.name || entry.id,
contextWindow: entry.contextLength ?? baseModel.contextWindow,
maxTokens: entry.maxCompletionTokens ?? baseModel.maxTokens,
input: parseModality(entry.modality),
reasoning: baseModel.reasoning,
};
const toolResult = await probeTool(model, apiKey, timeoutMs);
const imageResult = model.input.includes("image")
? await probeImage(ensureImageInput(model), apiKey, timeoutMs)
: { ok: false, latencyMs: null, skipped: true };
return {
id: entry.id,
name: entry.name,
provider: "openrouter",
modelRef: `openrouter/${entry.id}`,
contextLength: entry.contextLength,
maxCompletionTokens: entry.maxCompletionTokens,
supportedParametersCount: entry.supportedParametersCount,
supportsToolsMeta: entry.supportsToolsMeta,
modality: entry.modality,
inferredParamB: entry.inferredParamB,
createdAtMs: entry.createdAtMs,
pricing: entry.pricing,
isFree,
tool: toolResult,
image: imageResult,
} satisfies ModelScanResult;
},
},
{
onProgress: (completed, total) =>
options.onProgress?.({