fix: stabilize docker test suite

This commit is contained in:
Peter Steinberger
2026-01-16 11:46:56 +00:00
parent a51ed8a5dd
commit eda9410bce
5 changed files with 190 additions and 72 deletions

View File

@@ -323,38 +323,41 @@ run_profile() {
local workspace="$3"
local agent_model_provider="$4" # "openai"|"anthropic"
echo "==> Onboard ($profile)"
if [[ "$agent_model_provider" == "openai" ]]; then
clawdbot --profile "$profile" onboard \
--non-interactive \
--flow quickstart \
--auth-choice openai-api-key \
--openai-api-key "$OPENAI_API_KEY" \
--gateway-port "$port" \
echo "==> Onboard ($profile)"
if [[ "$agent_model_provider" == "openai" ]]; then
clawdbot --profile "$profile" onboard \
--non-interactive \
--accept-risk \
--flow quickstart \
--auth-choice openai-api-key \
--openai-api-key "$OPENAI_API_KEY" \
--gateway-port "$port" \
--gateway-bind loopback \
--gateway-auth token \
--workspace "$workspace" \
--skip-health
elif [[ -n "$ANTHROPIC_API_TOKEN" ]]; then
clawdbot --profile "$profile" onboard \
--non-interactive \
--accept-risk \
--flow quickstart \
--auth-choice token \
--token-provider anthropic \
--token "$ANTHROPIC_API_TOKEN" \
--gateway-port "$port" \
--gateway-bind loopback \
--gateway-auth token \
--workspace "$workspace" \
--skip-health
elif [[ -n "$ANTHROPIC_API_TOKEN" ]]; then
clawdbot --profile "$profile" onboard \
--non-interactive \
--flow quickstart \
--auth-choice token \
--token-provider anthropic \
--token "$ANTHROPIC_API_TOKEN" \
--gateway-port "$port" \
--gateway-bind loopback \
--gateway-auth token \
--workspace "$workspace" \
--skip-health
else
clawdbot --profile "$profile" onboard \
--non-interactive \
--flow quickstart \
--auth-choice apiKey \
--anthropic-api-key "$ANTHROPIC_API_KEY" \
--gateway-port "$port" \
--gateway-bind loopback \
else
clawdbot --profile "$profile" onboard \
--non-interactive \
--accept-risk \
--flow quickstart \
--auth-choice apiKey \
--anthropic-api-key "$ANTHROPIC_API_KEY" \
--gateway-port "$port" \
--gateway-bind loopback \
--gateway-auth token \
--workspace "$workspace" \
--skip-health

View File

@@ -69,15 +69,20 @@ if [[ "$*" == *"enable-linger"* ]]; then
fi
exit 0
LOGINCTL
chmod +x /tmp/clawdbot-bin/loginctl
# Install the npm-global variant from the local /app source.
pkg_tgz="$(npm pack --silent /app)"
npm install -g --prefix /tmp/npm-prefix "/app/$pkg_tgz"
npm_bin="/tmp/npm-prefix/bin/clawdbot"
npm_entry="/tmp/npm-prefix/lib/node_modules/clawdbot/dist/entry.js"
git_entry="/app/dist/entry.js"
chmod +x /tmp/clawdbot-bin/loginctl
# Install the npm-global variant from the local /app source.
# `npm pack` can emit script output; keep only the tarball name.
pkg_tgz="$(npm pack --silent /app | tail -n 1 | tr -d '\r')"
if [ ! -f "/app/$pkg_tgz" ]; then
echo "npm pack failed (expected /app/$pkg_tgz)"
exit 1
fi
npm install -g --prefix /tmp/npm-prefix "/app/$pkg_tgz"
npm_bin="/tmp/npm-prefix/bin/clawdbot"
npm_entry="/tmp/npm-prefix/lib/node_modules/clawdbot/dist/entry.js"
git_entry="/app/dist/entry.js"
assert_entrypoint() {
local unit_path="$1"
@@ -126,17 +131,17 @@ LOGINCTL
assert_entrypoint "$unit_path" "$doctor_expected"
}
run_flow \
"npm-to-git" \
"$npm_bin daemon install --force" \
"$npm_entry" \
"node $git_entry doctor --repair --force" \
"$git_entry"
run_flow \
"npm-to-git" \
"$npm_bin daemon install --force" \
"$npm_entry" \
"node $git_entry doctor --repair --force" \
"$git_entry"
run_flow \
"git-to-npm" \
"node $git_entry daemon install --force" \
"$git_entry" \
"$npm_bin doctor --repair --force" \
"$npm_entry"
run_flow \
"git-to-npm" \
"node $git_entry daemon install --force" \
"$git_entry" \
"$npm_bin doctor --repair --force" \
"$npm_entry"
'

View File

@@ -42,6 +42,38 @@ TRASH
printf "%b" "$payload" >&3 2>/dev/null || true
}
wait_for_log() {
local needle="$1"
local timeout_s="${2:-45}"
local needle_compact
needle_compact="$(printf "%s" "$needle" | sed -E "s/[[:space:]]+//g")"
local start_s
start_s="$(date +%s)"
while true; do
if [ -n "${WIZARD_LOG_PATH:-}" ] && [ -f "$WIZARD_LOG_PATH" ]; then
if NEEDLE="$needle_compact" node --input-type=module -e "
import fs from \"node:fs\";
const file = process.env.WIZARD_LOG_PATH;
const needle = process.env.NEEDLE ?? \"\";
let text = \"\";
try { text = fs.readFileSync(file, \"utf8\"); } catch { process.exit(1); }
text = text.replace(/\\x1b\\[[0-9;]*[A-Za-z]/g, \"\").replace(/\\s+/g, \"\");
process.exit(text.includes(needle) ? 0 : 1);
"; then
return 0
fi
fi
if [ $(( $(date +%s) - start_s )) -ge "$timeout_s" ]; then
echo "Timeout waiting for log: $needle"
if [ -n "${WIZARD_LOG_PATH:-}" ] && [ -f "$WIZARD_LOG_PATH" ]; then
tail -n 140 "$WIZARD_LOG_PATH" || true
fi
return 1
fi
sleep 0.2
done
}
start_gateway() {
node dist/index.js gateway --port 18789 --bind loopback --allow-unconfigured > /tmp/gateway-e2e.log 2>&1 &
GATEWAY_PID="$!"
@@ -81,6 +113,8 @@ TRASH
input_fifo="$(mktemp -u "/tmp/clawdbot-onboard-${case_name}.XXXXXX")"
mkfifo "$input_fifo"
local log_path="/tmp/clawdbot-onboard-${case_name}.log"
WIZARD_LOG_PATH="$log_path"
export WIZARD_LOG_PATH
# Run under script to keep an interactive TTY for clack prompts.
script -q -c "$command" "$log_path" < "$input_fifo" &
wizard_pid=$!
@@ -135,36 +169,44 @@ TRASH
}
send_local_basic() {
# Risk acknowledgement (default is "No").
send $'"'"'y\r'"'"' 0.6
# Choose local gateway, accept defaults, skip channels/skills/daemon, skip UI.
send $'"'"'\r'"'"' 0.5
}
send_reset_config_only() {
# Reset config + reuse the local defaults flow.
send $'"'"'\e[B'"'"' 0.3
send $'"'"'\e[B'"'"' 0.3
send $'"'"'\r'"'"' 0.4
send $'"'"'\r'"'"' 0.4
send "" 1.2
send_local_basic
}
send_reset_config_only() {
# Risk acknowledgement (default is "No").
send $'"'"'y\r'"'"' 0.8
# Reset config + reuse the local defaults flow.
send $'"'"'\e[B'"'"' 0.3
send $'"'"'\e[B'"'"' 0.3
send $'"'"'\r'"'"' 0.4
send $'"'"'\r'"'"' 0.4
send "" 1.2
send_local_basic
}
send_channels_flow() {
# Configure channels via configure wizard.
send $'"'"'\r'"'"' 1.0
send "" 1.5
# Mode (default Configure channels)
send $'"'"'\r'"'"' 0.8
send "" 1.0
# Configure chat channels now? -> No
send $'"'"'n\r'"'"' 0.6
# Prompts are interactive; notes are not. Use conservative delays to stay in sync.
# Where will the Gateway run? -> Local (default)
send $'"'"'\r'"'"' 1.2
# Channels mode -> Configure/link (default)
send $'"'"'\r'"'"' 1.5
# Select a channel -> Finished (last option; clack wraps on Up)
send $'"'"'\e[A\r'"'"' 2.0
# Keep stdin open until wizard exits.
send "" 2.5
}
send_skills_flow() {
# Select skills section and skip optional installs.
send $'"'"'\r'"'"' 1.0
send "" 1.2
send $'"'"'n\r'"'"' 0.6
send $'"'"'\r'"'"' 1.2
send "" 1.0
# Configure skills now? -> No
send $'"'"'n\r'"'"' 1.2
send "" 2.0
}
run_case_local_basic() {
@@ -257,7 +299,7 @@ NODE
export HOME="$home_dir"
mkdir -p "$HOME"
# Smoke test non-interactive remote config write.
node dist/index.js onboard --non-interactive \
node dist/index.js onboard --non-interactive --accept-risk \
--mode remote \
--remote-url ws://gateway.local:18789 \
--remote-token remote-token \

View File

@@ -62,6 +62,11 @@ function isModelNotFoundErrorMessage(raw: string): boolean {
return false;
}
function isChatGPTUsageLimitErrorMessage(raw: string): boolean {
const msg = raw.toLowerCase();
return msg.includes("hit your chatgpt usage limit") && msg.includes("try again in");
}
function toInt(value: string | undefined, fallback: number): number {
const trimmed = value?.trim();
if (!trimmed) return fallback;
@@ -371,6 +376,18 @@ describeLive("live models (profile keys)", () => {
logProgress(`${progressLabel}: skip (empty response)`);
break;
}
if (
ok.text.length === 0 &&
allowNotFoundSkip &&
(model.provider === "google-antigravity" || model.provider === "openai-codex")
) {
skipped.push({
model: id,
reason: "no text returned (provider returned empty content)",
});
logProgress(`${progressLabel}: skip (empty response)`);
break;
}
expect(ok.text.length).toBeGreaterThan(0);
logProgress(`${progressLabel}: done`);
break;
@@ -416,6 +433,15 @@ describeLive("live models (profile keys)", () => {
logProgress(`${progressLabel}: skip (rate limit)`);
break;
}
if (
allowNotFoundSkip &&
model.provider === "openai-codex" &&
isChatGPTUsageLimitErrorMessage(message)
) {
skipped.push({ model: id, reason: message });
logProgress(`${progressLabel}: skip (chatgpt usage limit)`);
break;
}
logProgress(`${progressLabel}: failed`);
failures.push({ model: id, error: message });
break;

View File

@@ -98,10 +98,19 @@ function isGoogleModelNotFoundText(text: string): boolean {
return false;
}
function isGoogleishProvider(provider: string): boolean {
return provider === "google" || provider.startsWith("google-");
}
function isRefreshTokenReused(error: string): boolean {
return /refresh_token_reused/i.test(error);
}
function isChatGPTUsageLimitErrorMessage(raw: string): boolean {
const msg = raw.toLowerCase();
return msg.includes("hit your chatgpt usage limit") && msg.includes("try again in");
}
function isMissingProfileError(error: string): boolean {
return /no credentials found for profile/i.test(error);
}
@@ -471,7 +480,30 @@ async function runGatewayModelSuite(params: GatewayModelSuiteParams) {
if (payload?.status !== "ok") {
throw new Error(`agent status=${String(payload?.status)}`);
}
const text = extractPayloadText(payload?.result);
let text = extractPayloadText(payload?.result);
if (!text) {
logProgress(`${progressLabel}: empty response, retrying`);
const retry = await client.request<AgentFinalPayload>(
"agent",
{
sessionKey,
idempotencyKey: `idem-${randomUUID()}-retry`,
message:
"Explain in 2-3 sentences how the JavaScript event loop handles microtasks vs macrotasks. Must mention both words: microtask and macrotask.",
thinking: params.thinkingLevel,
deliver: false,
},
{ expectFinal: true },
);
if (retry?.status !== "ok") {
throw new Error(`agent status=${String(retry?.status)}`);
}
text = extractPayloadText(retry?.result);
}
if (!text && isGoogleishProvider(model.provider)) {
logProgress(`${progressLabel}: skip (google empty response)`);
break;
}
if (
isEmptyStreamText(text) &&
(model.provider === "minimax" || model.provider === "openai-codex")
@@ -479,7 +511,7 @@ async function runGatewayModelSuite(params: GatewayModelSuiteParams) {
logProgress(`${progressLabel}: skip (${model.provider} empty response)`);
break;
}
if (model.provider === "google" && isGoogleModelNotFoundText(text)) {
if (isGoogleishProvider(model.provider) && isGoogleModelNotFoundText(text)) {
// Catalog drift: model IDs can disappear or become unavailable on the API.
// Treat as skip when scanning "all models" for Google.
logProgress(`${progressLabel}: skip (google model not found)`);
@@ -491,7 +523,13 @@ async function runGatewayModelSuite(params: GatewayModelSuiteParams) {
phase: "prompt",
label: params.label,
});
if (!isMeaningful(text)) throw new Error(`not meaningful: ${text}`);
if (!isMeaningful(text)) {
if (isGoogleishProvider(model.provider) && /gemini/i.test(model.id)) {
logProgress(`${progressLabel}: skip (google not meaningful)`);
break;
}
throw new Error(`not meaningful: ${text}`);
}
if (!/\bmicro\s*-?\s*tasks?\b/i.test(text) || !/\bmacro\s*-?\s*tasks?\b/i.test(text)) {
throw new Error(`missing required keywords: ${text}`);
}
@@ -735,6 +773,10 @@ async function runGatewayModelSuite(params: GatewayModelSuiteParams) {
logProgress(`${progressLabel}: skip (codex refresh token reused)`);
break;
}
if (model.provider === "openai-codex" && isChatGPTUsageLimitErrorMessage(message)) {
logProgress(`${progressLabel}: skip (chatgpt usage limit)`);
break;
}
if (isMissingProfileError(message)) {
skippedCount += 1;
logProgress(`${progressLabel}: skip (missing auth profile)`);