Compare commits

...

28 Commits

Author SHA1 Message Date
Vincent Koc
9205017f6b docs(changelog): note operator policy overlay 2026-03-06 01:21:07 -05:00
Vincent Koc
197a60e5e6 test(config): cover operator policy overlay 2026-03-06 01:21:07 -05:00
Vincent Koc
6c3b0cbea8 doctor: report immutable operator policy 2026-03-06 01:21:07 -05:00
Vincent Koc
120c686de4 status: show active operator policy 2026-03-06 01:21:07 -05:00
Vincent Koc
42e9c9ae12 gateway: reject writes blocked by operator policy 2026-03-06 01:21:07 -05:00
Vincent Koc
5b30cb7721 config: apply immutable operator policy overlay 2026-03-06 01:21:07 -05:00
Vincent Koc
a3b608379a config: expose operator policy snapshot metadata 2026-03-06 01:21:07 -05:00
Vincent Koc
5e799a39c3 config: add operator policy path resolver 2026-03-06 01:21:07 -05:00
Vincent Koc
74e0729631
Skills/nano-banana-pro: support hosted input images (#37247)
* skills(nano-banana-pro): support remote edit image URLs

* test(nano-banana-pro): cover remote input image validation

* docs(nano-banana-pro): document remote input images

* docs(changelog): note nano-banana remote image inputs

* chore(nano-banana-pro): normalize script imports

* test(nano-banana-pro): normalize test imports

* ci: use published bun release tag

* ci: skip prod audit on PRs without dependency changes

* test(nano-banana-pro): remove pillow dependency from skill tests

* docs(changelog): credit nano-banana input image follow-up
2026-03-06 01:02:23 -05:00
Vincent Koc
7187bfd84b test(process/supervisor): assert pipe-closed stdin state 2026-03-05 07:56:48 -05:00
Vincent Koc
447b2eaf2c test(process): cover ended stdin hint behavior 2026-03-05 07:56:48 -05:00
Vincent Koc
1d15dfb03d process/supervisor: track pty stdin destroyed state 2026-03-05 07:56:48 -05:00
Vincent Koc
d7a0862382 process/supervisor: track child stdin destroyed state 2026-03-05 07:56:48 -05:00
Vincent Koc
73b3f655ba types/process: expose writable stdin state flags 2026-03-05 07:56:48 -05:00
Vincent Koc
c3ffaaa764 tools/process: refine attach hint and stdin writability checks 2026-03-05 07:56:47 -05:00
Vincent Koc
2c0a34ffb7
Merge branch 'main' into feat/19809-slack-typing-reaction 2026-03-04 06:16:12 -08:00
Vincent Koc
f85a1c4820 docs(changelog): note interactive process recovery 2026-03-04 08:52:32 -05:00
Vincent Koc
628e17d0ea docs(process): add attach and input-wait notes 2026-03-04 08:52:27 -05:00
Vincent Koc
4dc5b0e44f test(process): cover attach and input-wait hints 2026-03-04 08:52:19 -05:00
Vincent Koc
f44639375f tools/process: add attach action and input-wait metadata 2026-03-04 08:52:05 -05:00
Vincent Koc
4a80fb4751 docs(changelog): note slack system event routing fix 2026-03-04 01:12:28 -05:00
Vincent Koc
6181541626 test(slack): assert reaction session routing carries sender 2026-03-04 01:12:27 -05:00
Vincent Koc
999b0c6cf2 test(slack): update interaction session key assertions 2026-03-04 01:12:27 -05:00
Vincent Koc
b41aafb9c0 test(slack): cover binding-aware system event routing 2026-03-04 01:12:27 -05:00
Vincent Koc
2dd6794d0e fix(slack): include modal submitter in session routing 2026-03-04 01:12:27 -05:00
Vincent Koc
1b5e1f558f fix(slack): include sender context for interaction session routing 2026-03-04 01:12:27 -05:00
Vincent Koc
c8e91fdf88 fix(slack): pass sender to system event session resolver 2026-03-04 01:12:27 -05:00
Vincent Koc
6af782d2d9 fix(slack): route system events via binding-aware session keys 2026-03-04 01:12:27 -05:00
21 changed files with 1356 additions and 55 deletions

View File

@ -61,7 +61,7 @@ runs:
if: inputs.install-bun == 'true' if: inputs.install-bun == 'true'
uses: oven-sh/setup-bun@v2 uses: oven-sh/setup-bun@v2
with: with:
bun-version: "1.3.9+cf6cdbbba" bun-version: "1.3.9"
- name: Runtime versions - name: Runtime versions
shell: bash shell: bash

View File

@ -327,7 +327,26 @@ jobs:
pre-commit run zizmor --files "${workflow_files[@]}" pre-commit run zizmor --files "${workflow_files[@]}"
- name: Audit production dependencies - name: Audit production dependencies
run: pre-commit run --all-files pnpm-audit-prod run: |
set -euo pipefail
if [ "${{ github.event_name }}" = "push" ]; then
pre-commit run --all-files pnpm-audit-prod
exit 0
fi
if [ "${{ github.event_name }}" != "pull_request" ]; then
pre-commit run --all-files pnpm-audit-prod
exit 0
fi
BASE="${{ github.event.pull_request.base.sha }}"
if ! git diff --name-only "$BASE" HEAD | grep -Eq '(^|/)package\.json$|^pnpm-lock\.yaml$|^pnpm-workspace\.yaml$'; then
echo "No dependency manifest changes detected; skipping pnpm audit on this PR."
exit 0
fi
pre-commit run --all-files pnpm-audit-prod
checks-windows: checks-windows:
needs: [docs-scope, changed-scope] needs: [docs-scope, changed-scope]

View File

@ -13,6 +13,9 @@ Docs: https://docs.openclaw.ai
- Agents/tool-result truncation: preserve important tail diagnostics by using head+tail truncation for oversized tool results while keeping configurable truncation options. (#20076) thanks @jlwestsr. - Agents/tool-result truncation: preserve important tail diagnostics by using head+tail truncation for oversized tool results while keeping configurable truncation options. (#20076) thanks @jlwestsr.
- Telegram/topic agent routing: support per-topic `agentId` overrides in forum groups and DM topics so topics can route to dedicated agents with isolated sessions. (#33647; based on #31513) Thanks @kesor and @Sid-Qin. - Telegram/topic agent routing: support per-topic `agentId` overrides in forum groups and DM topics so topics can route to dedicated agents with isolated sessions. (#33647; based on #31513) Thanks @kesor and @Sid-Qin.
- Slack/DM typing feedback: add `channels.slack.typingReaction` so Socket Mode DMs can show reaction-based processing status even when Slack native assistant typing is unavailable. (#19816) Thanks @dalefrieswthat. - Slack/DM typing feedback: add `channels.slack.typingReaction` so Socket Mode DMs can show reaction-based processing status even when Slack native assistant typing is unavailable. (#19816) Thanks @dalefrieswthat.
- Exec/process interactive recovery: add `process attach` plus input-wait metadata/hints (`waitingForInput`, `idleMs`, `stdinWritable`) so long-running interactive sessions can be observed and resumed without losing context. Fixes #33957. Thanks @westoque.
- Skills/nano-banana-pro: accept public `http(s)` input images for edit/composition while keeping local path support, and return explicit errors for redirects, `file://`, and private-network URLs. Fixes #33960. Thanks @westoque and @vincentkoc.
- Config/operator policy: add an immutable `operator-policy.json5` overlay that locks configured paths above mutable config, rejects conflicting `config set/patch/apply` writes, and surfaces the active policy in `status --all` and `openclaw doctor`. Fixes #33958. Thanks @vincentkoc.
### Fixes ### Fixes

View File

@ -54,6 +54,7 @@ Config (preferred):
Actions: Actions:
- `list`: running + finished sessions - `list`: running + finished sessions
- `attach`: read current session output with interactive recovery hints
- `poll`: drain new output for a session (also reports exit status) - `poll`: drain new output for a session (also reports exit status)
- `log`: read the aggregated output (supports `offset` + `limit`) - `log`: read the aggregated output (supports `offset` + `limit`)
- `write`: send stdin (`data`, optional `eof`) - `write`: send stdin (`data`, optional `eof`)
@ -71,6 +72,9 @@ Notes:
- `process log` uses line-based `offset`/`limit`. - `process log` uses line-based `offset`/`limit`.
- When both `offset` and `limit` are omitted, it returns the last 200 lines and includes a paging hint. - When both `offset` and `limit` are omitted, it returns the last 200 lines and includes a paging hint.
- When `offset` is provided and `limit` is omitted, it returns from `offset` to the end (not capped to 200). - When `offset` is provided and `limit` is omitted, it returns from `offset` to the end (not capped to 200).
- Running session details now include `stdinWritable`, `lastOutputAt`, `idleMs`, and `waitingForInput`.
- `process list` appends `[input-wait]` when a session has been idle long enough and stdin is writable.
- `process poll`/`attach` include an interactive recovery hint when `waitingForInput=true`.
## Examples ## Examples

View File

@ -39,6 +39,12 @@ Edit (single image)
uv run {baseDir}/scripts/generate_image.py --prompt "edit instructions" --filename "output.png" -i "/path/in.png" --resolution 2K uv run {baseDir}/scripts/generate_image.py --prompt "edit instructions" --filename "output.png" -i "/path/in.png" --resolution 2K
``` ```
Edit from a hosted image URL
```bash
uv run {baseDir}/scripts/generate_image.py --prompt "turn this into a watercolor poster" --filename "output.png" -i "https://images.example.com/source.png" --resolution 2K
```
Multi-image composition (up to 14 images) Multi-image composition (up to 14 images)
```bash ```bash
@ -53,6 +59,9 @@ API key
Notes Notes
- Resolutions: `1K` (default), `2K`, `4K`. - Resolutions: `1K` (default), `2K`, `4K`.
- Input images can be local paths or public `http(s)` URLs.
- `file://` URLs are rejected; use a normal local path instead.
- Remote input URLs reject redirects plus private/loopback/special-use hosts for safety.
- Use timestamps in filenames: `yyyy-mm-dd-hh-mm-ss-name.png`. - Use timestamps in filenames: `yyyy-mm-dd-hh-mm-ss-name.png`.
- The script prints a `MEDIA:` line for OpenClaw to auto-attach on supported chat providers. - The script prints a `MEDIA:` line for OpenClaw to auto-attach on supported chat providers.
- Do not read the image back; report the saved path only. - Do not read the image back; report the saved path only.

View File

@ -17,9 +17,22 @@ Multi-image editing (up to 14 images):
""" """
import argparse import argparse
import ipaddress
import os import os
import re
import socket
import sys import sys
from io import BytesIO
from pathlib import Path from pathlib import Path
from urllib import error, parse, request
MAX_REMOTE_IMAGE_BYTES = 20 * 1024 * 1024
REMOTE_IMAGE_TIMEOUT_SEC = 20
class NoRedirectHandler(request.HTTPRedirectHandler):
def redirect_request(self, req, fp, code, msg, headers, newurl):
return None
def get_api_key(provided_key: str | None) -> str | None: def get_api_key(provided_key: str | None) -> str | None:
@ -29,6 +42,127 @@ def get_api_key(provided_key: str | None) -> str | None:
return os.environ.get("GEMINI_API_KEY") return os.environ.get("GEMINI_API_KEY")
def is_remote_image_url(image_source: str) -> bool:
parsed = parse.urlparse(image_source)
return parsed.scheme.lower() in {"http", "https"}
def _looks_like_windows_drive_path(image_source: str) -> bool:
return bool(re.match(r"^[a-zA-Z]:[\\/]", image_source))
def _is_blocked_remote_ip(address: str) -> bool:
ip = ipaddress.ip_address(address)
return (
ip.is_private
or ip.is_loopback
or ip.is_link_local
or ip.is_multicast
or ip.is_reserved
or ip.is_unspecified
)
def validate_remote_image_url(image_url: str) -> parse.ParseResult:
parsed = parse.urlparse(image_url)
scheme = parsed.scheme.lower()
if scheme not in {"http", "https"}:
if scheme == "file":
raise ValueError(
f"Unsupported input image URL '{image_url}'. "
"Use a local path instead of file:// URLs."
)
raise ValueError(
f"Unsupported input image URL '{image_url}'. Only public http(s) URLs are supported."
)
if not parsed.hostname:
raise ValueError(f"Invalid input image URL '{image_url}': hostname is required.")
if parsed.username or parsed.password:
raise ValueError(
f"Unsupported input image URL '{image_url}': embedded credentials are not allowed."
)
try:
resolved = socket.getaddrinfo(
parsed.hostname,
parsed.port or (443 if scheme == "https" else 80),
type=socket.SOCK_STREAM,
)
except socket.gaierror as exc:
raise ValueError(f"Could not resolve input image URL '{image_url}': {exc}.") from exc
blocked = sorted(
{
entry[4][0]
for entry in resolved
if entry[4] and entry[4][0] and _is_blocked_remote_ip(entry[4][0])
}
)
if blocked:
raise ValueError(
f"Unsafe input image URL '{image_url}': private, loopback, or "
f"special-use hosts are not allowed ({', '.join(blocked)})."
)
return parsed
def load_input_image(image_source: str, pil_image_module):
if is_remote_image_url(image_source):
validate_remote_image_url(image_source)
opener = request.build_opener(NoRedirectHandler())
req = request.Request(
image_source,
headers={"User-Agent": "OpenClaw nano-banana-pro/1.0"},
)
try:
with opener.open(req, timeout=REMOTE_IMAGE_TIMEOUT_SEC) as response:
redirected_to = response.geturl()
if redirected_to != image_source:
raise ValueError(
"Redirected input image URLs are not supported for safety. "
f"Re-run with the final asset URL: {redirected_to}"
)
image_bytes = response.read(MAX_REMOTE_IMAGE_BYTES + 1)
except error.HTTPError as exc:
if 300 <= exc.code < 400:
location = exc.headers.get("Location")
detail = f" Redirect target: {location}" if location else ""
raise ValueError(
f"Redirected input image URLs are not supported for safety.{detail}"
) from exc
raise ValueError(
f"Error downloading input image '{image_source}': HTTP {exc.code}."
) from exc
except error.URLError as exc:
raise ValueError(
f"Error downloading input image '{image_source}': {exc.reason}."
) from exc
if len(image_bytes) > MAX_REMOTE_IMAGE_BYTES:
raise ValueError(
f"Input image URL '{image_source}' exceeded the "
f"{MAX_REMOTE_IMAGE_BYTES // (1024 * 1024)} MB download limit."
)
with pil_image_module.open(BytesIO(image_bytes)) as img:
return img.copy()
parsed = parse.urlparse(image_source)
if parsed.scheme and not _looks_like_windows_drive_path(image_source):
if parsed.scheme.lower() == "file":
raise ValueError(
f"Unsupported input image URL '{image_source}'. "
"Use a local path instead of file:// URLs."
)
raise ValueError(
f"Unsupported input image source '{image_source}'. "
"Use a local path or a public http(s) URL."
)
local_path = Path(image_source).expanduser()
with pil_image_module.open(local_path) as img:
return img.copy()
def main(): def main():
parser = argparse.ArgumentParser( parser = argparse.ArgumentParser(
description="Generate images using Nano Banana Pro (Gemini 3 Pro Image)" description="Generate images using Nano Banana Pro (Gemini 3 Pro Image)"
@ -48,7 +182,10 @@ def main():
action="append", action="append",
dest="input_images", dest="input_images",
metavar="IMAGE", metavar="IMAGE",
help="Input image path(s) for editing/composition. Can be specified multiple times (up to 14 images)." help=(
"Input image path(s) for editing/composition. "
"Can be specified multiple times (up to 14 images)."
),
) )
parser.add_argument( parser.add_argument(
"--resolution", "-r", "--resolution", "-r",
@ -89,15 +226,17 @@ def main():
output_resolution = args.resolution output_resolution = args.resolution
if args.input_images: if args.input_images:
if len(args.input_images) > 14: if len(args.input_images) > 14:
print(f"Error: Too many input images ({len(args.input_images)}). Maximum is 14.", file=sys.stderr) print(
f"Error: Too many input images ({len(args.input_images)}). Maximum is 14.",
file=sys.stderr,
)
sys.exit(1) sys.exit(1)
max_input_dim = 0 max_input_dim = 0
for img_path in args.input_images: for img_path in args.input_images:
try: try:
with PILImage.open(img_path) as img: copied = load_input_image(img_path, PILImage)
copied = img.copy() width, height = copied.size
width, height = copied.size
input_images.append(copied) input_images.append(copied)
print(f"Loaded input image: {img_path}") print(f"Loaded input image: {img_path}")
@ -115,13 +254,19 @@ def main():
output_resolution = "2K" output_resolution = "2K"
else: else:
output_resolution = "1K" output_resolution = "1K"
print(f"Auto-detected resolution: {output_resolution} (from max input dimension {max_input_dim})") print(
f"Auto-detected resolution: {output_resolution} "
f"(from max input dimension {max_input_dim})"
)
# Build contents (images first if editing, prompt only if generating) # Build contents (images first if editing, prompt only if generating)
if input_images: if input_images:
contents = [*input_images, args.prompt] contents = [*input_images, args.prompt]
img_count = len(input_images) img_count = len(input_images)
print(f"Processing {img_count} image{'s' if img_count > 1 else ''} with resolution {output_resolution}...") print(
f"Processing {img_count} image{'s' if img_count > 1 else ''} "
f"with resolution {output_resolution}..."
)
else: else:
contents = args.prompt contents = args.prompt
print(f"Generating image with resolution {output_resolution}...") print(f"Generating image with resolution {output_resolution}...")

View File

@ -0,0 +1,108 @@
import tempfile
import unittest
from pathlib import Path
from unittest.mock import patch
import generate_image
class FakeResponse:
def __init__(self, payload: bytes, url: str):
self._payload = payload
self._url = url
def geturl(self):
return self._url
def read(self, _limit: int):
return self._payload
def __enter__(self):
return self
def __exit__(self, exc_type, exc, tb):
return False
class FakeImage:
def __init__(self, size):
self.size = size
def copy(self):
return FakeImage(self.size)
def __enter__(self):
return self
def __exit__(self, exc_type, exc, tb):
return False
class FakePILImageModule:
def __init__(self, sizes_by_source):
self._sizes_by_source = sizes_by_source
def open(self, source):
if isinstance(source, (str, Path)):
key = source
else:
key = type(source).__name__
size = self._sizes_by_source[key]
return FakeImage(size)
class LoadInputImageTests(unittest.TestCase):
def test_load_input_image_accepts_local_path(self):
with tempfile.TemporaryDirectory() as tmpdir:
image_path = Path(tmpdir) / "input.png"
image_path.write_bytes(b"not-a-real-image")
fake_pil = FakePILImageModule({image_path: (16, 12)})
loaded = generate_image.load_input_image(str(image_path), fake_pil)
self.assertEqual(loaded.size, (16, 12))
def test_load_input_image_accepts_public_https_url(self):
fake_opener = type(
"FakeOpener",
(),
{
"open": lambda self, req, timeout=0: FakeResponse(
b"fake-image-bytes",
req.full_url,
)
},
)()
fake_pil = FakePILImageModule({"BytesIO": (20, 10)})
with patch.object(
generate_image.socket,
"getaddrinfo",
return_value=[(None, None, None, None, ("93.184.216.34", 443))],
), patch.object(generate_image.request, "build_opener", return_value=fake_opener):
loaded = generate_image.load_input_image("https://example.com/input.png", fake_pil)
self.assertEqual(loaded.size, (20, 10))
def test_load_input_image_rejects_private_network_url(self):
with patch.object(
generate_image.socket,
"getaddrinfo",
return_value=[(None, None, None, None, ("127.0.0.1", 443))],
):
with self.assertRaisesRegex(ValueError, "private, loopback, or special-use hosts"):
generate_image.load_input_image(
"https://localhost/input.png",
FakePILImageModule({}),
)
def test_load_input_image_rejects_file_url(self):
with self.assertRaisesRegex(ValueError, "Use a local path instead of file:// URLs"):
generate_image.load_input_image(
"file:///tmp/input.png",
FakePILImageModule({}),
)
if __name__ == "__main__":
unittest.main()

View File

@ -23,6 +23,9 @@ export type SessionStdin = {
// When backed by a real Node stream (child.stdin), this exists; for PTY wrappers it may not. // When backed by a real Node stream (child.stdin), this exists; for PTY wrappers it may not.
destroy?: () => void; destroy?: () => void;
destroyed?: boolean; destroyed?: boolean;
writable?: boolean;
writableEnded?: boolean;
writableFinished?: boolean;
}; };
export interface ProcessSession { export interface ProcessSession {

View File

@ -512,7 +512,7 @@ export function createExecTool(
type: "text", type: "text",
text: `${getWarningText()}Command still running (session ${run.session.id}, pid ${ text: `${getWarningText()}Command still running (session ${run.session.id}, pid ${
run.session.pid ?? "n/a" run.session.pid ?? "n/a"
}). Use process (list/poll/log/write/kill/clear/remove) for follow-up.`, }). Use process (attach/poll/log/write/send-keys/submit/paste/kill/clear/remove) for follow-up.`,
}, },
], ],
details: { details: {

View File

@ -0,0 +1,183 @@
import { afterEach, expect, test, vi } from "vitest";
import { addSession, appendOutput, resetProcessRegistryForTests } from "./bash-process-registry.js";
import { createProcessSessionFixture } from "./bash-process-registry.test-helpers.js";
import { createProcessTool } from "./bash-tools.process.js";
afterEach(() => {
resetProcessRegistryForTests();
});
function readText(result: Awaited<ReturnType<ReturnType<typeof createProcessTool>["execute"]>>) {
return result.content.find((part) => part.type === "text")?.text ?? "";
}
test("process attach surfaces input-wait hints for idle interactive sessions", async () => {
vi.useFakeTimers();
try {
const now = new Date("2026-01-01T00:00:00.000Z").getTime();
vi.setSystemTime(now);
const processTool = createProcessTool({ inputWaitIdleMs: 2_000 });
const session = createProcessSessionFixture({
id: "sess-attach",
command: "expo login",
backgrounded: true,
startedAt: now - 10_000,
});
session.stdin = { write: () => {}, end: () => {}, destroyed: false };
addSession(session);
appendOutput(session, "stdout", "Enter 2FA code:\n");
const result = await processTool.execute("toolcall", {
action: "attach",
sessionId: session.id,
});
const details = result.details as {
status?: string;
waitingForInput?: boolean;
stdinWritable?: boolean;
idleMs?: number;
};
const text = readText(result);
expect(details.status).toBe("running");
expect(details.waitingForInput).toBe(true);
expect(details.stdinWritable).toBe(true);
expect(details.idleMs).toBeGreaterThanOrEqual(2_000);
expect(text).toContain("Enter 2FA code:");
expect(text).toContain("may be waiting for input");
expect(text).not.toContain("Use process attach");
expect(text).toContain("process write");
} finally {
vi.useRealTimers();
}
});
test("process poll returns waiting-for-input metadata when no new output arrives", async () => {
vi.useFakeTimers();
try {
const now = new Date("2026-01-01T00:00:00.000Z").getTime();
vi.setSystemTime(now);
const processTool = createProcessTool({ inputWaitIdleMs: 2_000 });
const session = createProcessSessionFixture({
id: "sess-poll-wait",
command: "expo login",
backgrounded: true,
startedAt: now - 10_000,
});
session.stdin = { write: () => {}, end: () => {}, destroyed: false };
addSession(session);
const poll = await processTool.execute("toolcall", {
action: "poll",
sessionId: session.id,
});
const details = poll.details as {
status?: string;
waitingForInput?: boolean;
stdinWritable?: boolean;
idleMs?: number;
};
const text = readText(poll);
expect(details.status).toBe("running");
expect(details.waitingForInput).toBe(true);
expect(details.stdinWritable).toBe(true);
expect(details.idleMs).toBeGreaterThanOrEqual(2_000);
expect(text).toContain("(no new output)");
expect(text).toContain("may be waiting for input");
} finally {
vi.useRealTimers();
}
});
test("process list marks idle interactive sessions as input-wait", async () => {
vi.useFakeTimers();
try {
const now = new Date("2026-01-01T00:00:00.000Z").getTime();
vi.setSystemTime(now);
const processTool = createProcessTool({ inputWaitIdleMs: 2_000 });
const session = createProcessSessionFixture({
id: "sess-list-wait",
command: "expo login",
backgrounded: true,
startedAt: now - 10_000,
});
session.stdin = { write: () => {}, end: () => {}, destroyed: false };
addSession(session);
const list = await processTool.execute("toolcall", { action: "list" });
const details = list.details as {
sessions?: Array<{
sessionId: string;
waitingForInput?: boolean;
stdinWritable?: boolean;
}>;
};
const text = readText(list);
const listed = details.sessions?.find((item) => item.sessionId === session.id);
expect(listed?.waitingForInput).toBe(true);
expect(listed?.stdinWritable).toBe(true);
expect(text).toContain("[input-wait]");
} finally {
vi.useRealTimers();
}
});
test("ended stdin is reported as non-writable and avoids input-wait hints", async () => {
vi.useFakeTimers();
try {
const now = new Date("2026-01-01T00:00:00.000Z").getTime();
vi.setSystemTime(now);
const processTool = createProcessTool({ inputWaitIdleMs: 2_000 });
const session = createProcessSessionFixture({
id: "sess-ended-stdin",
command: "sleep 60",
backgrounded: true,
startedAt: now - 10_000,
});
session.stdin = {
write: () => {},
end: () => {},
destroyed: false,
writableEnded: true,
};
addSession(session);
const attach = await processTool.execute("toolcall", {
action: "attach",
sessionId: session.id,
});
const attachDetails = attach.details as {
waitingForInput?: boolean;
stdinWritable?: boolean;
};
const attachText = readText(attach);
expect(attachDetails.waitingForInput).toBe(false);
expect(attachDetails.stdinWritable).toBe(false);
expect(attachText).not.toContain("may be waiting for input");
expect(attachText).not.toContain("process write");
const poll = await processTool.execute("toolcall", {
action: "poll",
sessionId: session.id,
});
const pollDetails = poll.details as {
waitingForInput?: boolean;
stdinWritable?: boolean;
status?: string;
};
const pollText = readText(poll);
expect(pollDetails.status).toBe("running");
expect(pollDetails.waitingForInput).toBe(false);
expect(pollDetails.stdinWritable).toBe(false);
expect(pollText).toContain("Process still running.");
expect(pollText).not.toContain("may be waiting for input");
} finally {
vi.useRealTimers();
}
});

View File

@ -15,19 +15,30 @@ import {
markExited, markExited,
setJobTtlMs, setJobTtlMs,
} from "./bash-process-registry.js"; } from "./bash-process-registry.js";
import { deriveSessionName, pad, sliceLogLines, truncateMiddle } from "./bash-tools.shared.js"; import {
clampWithDefault,
deriveSessionName,
pad,
readEnvInt,
sliceLogLines,
truncateMiddle,
} from "./bash-tools.shared.js";
import { recordCommandPoll, resetCommandPollCount } from "./command-poll-backoff.js"; import { recordCommandPoll, resetCommandPollCount } from "./command-poll-backoff.js";
import { encodeKeySequence, encodePaste } from "./pty-keys.js"; import { encodeKeySequence, encodePaste } from "./pty-keys.js";
export type ProcessToolDefaults = { export type ProcessToolDefaults = {
cleanupMs?: number; cleanupMs?: number;
scopeKey?: string; scopeKey?: string;
inputWaitIdleMs?: number;
}; };
type WritableStdin = { type WritableStdin = {
write: (data: string, cb?: (err?: Error | null) => void) => void; write: (data: string, cb?: (err?: Error | null) => void) => void;
end: () => void; end: () => void;
destroyed?: boolean; destroyed?: boolean;
writable?: boolean;
writableEnded?: boolean;
writableFinished?: boolean;
}; };
const DEFAULT_LOG_TAIL_LINES = 200; const DEFAULT_LOG_TAIL_LINES = 200;
@ -50,7 +61,10 @@ function defaultTailNote(totalLines: number, usingDefaultTail: boolean) {
} }
const processSchema = Type.Object({ const processSchema = Type.Object({
action: Type.String({ description: "Process action" }), action: Type.String({
description:
"Process action (list|attach|poll|log|write|send-keys|submit|paste|kill|clear|remove)",
}),
sessionId: Type.Optional(Type.String({ description: "Session id for actions other than list" })), sessionId: Type.Optional(Type.String({ description: "Session id for actions other than list" })),
data: Type.Optional(Type.String({ description: "Data to write for write" })), data: Type.Optional(Type.String({ description: "Data to write for write" })),
keys: Type.Optional( keys: Type.Optional(
@ -72,6 +86,9 @@ const processSchema = Type.Object({
}); });
const MAX_POLL_WAIT_MS = 120_000; const MAX_POLL_WAIT_MS = 120_000;
const DEFAULT_INPUT_WAIT_IDLE_MS = 15_000;
const MIN_INPUT_WAIT_IDLE_MS = 1_000;
const MAX_INPUT_WAIT_IDLE_MS = 10 * 60 * 1000;
function resolvePollWaitMs(value: unknown) { function resolvePollWaitMs(value: unknown) {
if (typeof value === "number" && Number.isFinite(value)) { if (typeof value === "number" && Number.isFinite(value)) {
@ -124,9 +141,58 @@ export function createProcessTool(
setJobTtlMs(defaults.cleanupMs); setJobTtlMs(defaults.cleanupMs);
} }
const scopeKey = defaults?.scopeKey; const scopeKey = defaults?.scopeKey;
const inputWaitIdleMs = clampWithDefault(
defaults?.inputWaitIdleMs ?? readEnvInt("OPENCLAW_PROCESS_INPUT_WAIT_IDLE_MS"),
DEFAULT_INPUT_WAIT_IDLE_MS,
MIN_INPUT_WAIT_IDLE_MS,
MAX_INPUT_WAIT_IDLE_MS,
);
const supervisor = getProcessSupervisor(); const supervisor = getProcessSupervisor();
const isInScope = (session?: { scopeKey?: string } | null) => const isInScope = (session?: { scopeKey?: string } | null) =>
!scopeKey || session?.scopeKey === scopeKey; !scopeKey || session?.scopeKey === scopeKey;
const resolveStdinWritable = (session: ProcessSession) => {
const stdin = session.stdin ?? session.child?.stdin;
if (!stdin || stdin.destroyed) {
return false;
}
if ("writable" in stdin && stdin.writable === false) {
return false;
}
if ("writableEnded" in stdin && stdin.writableEnded === true) {
return false;
}
if ("writableFinished" in stdin && stdin.writableFinished === true) {
return false;
}
return true;
};
const describeRunningSession = (session: ProcessSession) => {
const record = supervisor.getRecord(session.id);
const lastOutputAt = record?.lastOutputAtMs ?? session.startedAt;
const idleMs = Math.max(0, Date.now() - lastOutputAt);
const stdinWritable = resolveStdinWritable(session);
const waitingForInput = stdinWritable && idleMs >= inputWaitIdleMs;
return {
stdinWritable,
waitingForInput,
lastOutputAt,
idleMs,
};
};
const buildInputWaitHint = (
hints: { waitingForInput: boolean; idleMs: number },
opts?: { attachContext?: boolean },
) => {
if (!hints.waitingForInput) {
return "";
}
const idleLabel = formatDurationCompact(hints.idleMs) ?? `${Math.round(hints.idleMs / 1000)}s`;
const summary = `\n\nNo new output for ${idleLabel}; this session may be waiting for input.`;
if (opts?.attachContext) {
return summary;
}
return `${summary} Use process attach, then process write/send-keys/submit to continue.`;
};
const cancelManagedSession = (sessionId: string) => { const cancelManagedSession = (sessionId: string) => {
const record = supervisor.getRecord(sessionId); const record = supervisor.getRecord(sessionId);
@ -150,12 +216,13 @@ export function createProcessTool(
name: "process", name: "process",
label: "process", label: "process",
description: description:
"Manage running exec sessions: list, poll, log, write, send-keys, submit, paste, kill.", "Manage running exec sessions: list, attach, poll, log, write, send-keys, submit, paste, kill.",
parameters: processSchema, parameters: processSchema,
execute: async (_toolCallId, args, _signal, _onUpdate): Promise<AgentToolResult<unknown>> => { execute: async (_toolCallId, args, _signal, _onUpdate): Promise<AgentToolResult<unknown>> => {
const params = args as { const params = args as {
action: action:
| "list" | "list"
| "attach"
| "poll" | "poll"
| "log" | "log"
| "write" | "write"
@ -181,18 +248,25 @@ export function createProcessTool(
if (params.action === "list") { if (params.action === "list") {
const running = listRunningSessions() const running = listRunningSessions()
.filter((s) => isInScope(s)) .filter((s) => isInScope(s))
.map((s) => ({ .map((s) => {
sessionId: s.id, const runtime = describeRunningSession(s);
status: "running", return {
pid: s.pid ?? undefined, sessionId: s.id,
startedAt: s.startedAt, status: "running",
runtimeMs: Date.now() - s.startedAt, pid: s.pid ?? undefined,
cwd: s.cwd, startedAt: s.startedAt,
command: s.command, runtimeMs: Date.now() - s.startedAt,
name: deriveSessionName(s.command), cwd: s.cwd,
tail: s.tail, command: s.command,
truncated: s.truncated, name: deriveSessionName(s.command),
})); tail: s.tail,
truncated: s.truncated,
stdinWritable: runtime.stdinWritable,
waitingForInput: runtime.waitingForInput,
lastOutputAt: runtime.lastOutputAt,
idleMs: runtime.idleMs,
};
});
const finished = listFinishedSessions() const finished = listFinishedSessions()
.filter((s) => isInScope(s)) .filter((s) => isInScope(s))
.map((s) => ({ .map((s) => ({
@ -213,7 +287,11 @@ export function createProcessTool(
.toSorted((a, b) => b.startedAt - a.startedAt) .toSorted((a, b) => b.startedAt - a.startedAt)
.map((s) => { .map((s) => {
const label = s.name ? truncateMiddle(s.name, 80) : truncateMiddle(s.command, 120); const label = s.name ? truncateMiddle(s.name, 80) : truncateMiddle(s.command, 120);
return `${s.sessionId} ${pad(s.status, 9)} ${formatDurationCompact(s.runtimeMs) ?? "n/a"} :: ${label}`; const inputWaitTag =
s.status === "running" && "waitingForInput" in s && s.waitingForInput
? " [input-wait]"
: "";
return `${s.sessionId} ${pad(s.status, 9)} ${formatDurationCompact(s.runtimeMs) ?? "n/a"} :: ${label}${inputWaitTag}`;
}); });
return { return {
content: [ content: [
@ -256,13 +334,13 @@ export function createProcessTool(
result: failedResult(`Session ${params.sessionId} is not backgrounded.`), result: failedResult(`Session ${params.sessionId} is not backgrounded.`),
}; };
} }
const stdin = scopedSession.stdin ?? scopedSession.child?.stdin; if (!resolveStdinWritable(scopedSession)) {
if (!stdin || stdin.destroyed) {
return { return {
ok: false as const, ok: false as const,
result: failedResult(`Session ${params.sessionId} stdin is not writable.`), result: failedResult(`Session ${params.sessionId} stdin is not writable.`),
}; };
} }
const stdin = scopedSession.stdin ?? scopedSession.child?.stdin;
return { ok: true as const, session: scopedSession, stdin: stdin as WritableStdin }; return { ok: true as const, session: scopedSession, stdin: stdin as WritableStdin };
}; };
@ -291,6 +369,81 @@ export function createProcessTool(
}); });
switch (params.action) { switch (params.action) {
case "attach": {
if (scopedSession) {
if (!scopedSession.backgrounded) {
return failText(`Session ${params.sessionId} is not backgrounded.`);
}
const runtime = describeRunningSession(scopedSession);
const window = resolveLogSliceWindow(params.offset, params.limit);
const { slice, totalLines, totalChars } = sliceLogLines(
scopedSession.aggregated,
window.effectiveOffset,
window.effectiveLimit,
);
const logDefaultTailNote = defaultTailNote(totalLines, window.usingDefaultTail);
const waitingHint = buildInputWaitHint(runtime, { attachContext: true });
const controlHint = runtime.stdinWritable
? "\n\nUse process write, send-keys, or submit to provide input."
: "";
return {
content: [
{
type: "text",
text:
(slice || "(no output yet)") + logDefaultTailNote + waitingHint + controlHint,
},
],
details: {
status: "running",
sessionId: params.sessionId,
total: totalLines,
totalLines,
totalChars,
truncated: scopedSession.truncated,
name: deriveSessionName(scopedSession.command),
stdinWritable: runtime.stdinWritable,
waitingForInput: runtime.waitingForInput,
idleMs: runtime.idleMs,
lastOutputAt: runtime.lastOutputAt,
},
};
}
if (scopedFinished) {
const window = resolveLogSliceWindow(params.offset, params.limit);
const { slice, totalLines, totalChars } = sliceLogLines(
scopedFinished.aggregated,
window.effectiveOffset,
window.effectiveLimit,
);
const status = scopedFinished.status === "completed" ? "completed" : "failed";
const logDefaultTailNote = defaultTailNote(totalLines, window.usingDefaultTail);
return {
content: [
{
type: "text",
text:
(slice || "(no output recorded)") +
logDefaultTailNote +
"\n\nSession already exited.",
},
],
details: {
status,
sessionId: params.sessionId,
total: totalLines,
totalLines,
totalChars,
truncated: scopedFinished.truncated,
exitCode: scopedFinished.exitCode ?? undefined,
exitSignal: scopedFinished.exitSignal ?? undefined,
name: deriveSessionName(scopedFinished.command),
},
};
}
return failText(`No session found for ${params.sessionId}`);
}
case "poll": { case "poll": {
if (!scopedSession) { if (!scopedSession) {
if (scopedFinished) { if (scopedFinished) {
@ -353,6 +506,7 @@ export function createProcessTool(
? "completed" ? "completed"
: "failed" : "failed"
: "running"; : "running";
const runtime = describeRunningSession(scopedSession);
const output = [stdout.trimEnd(), stderr.trimEnd()].filter(Boolean).join("\n").trim(); const output = [stdout.trimEnd(), stderr.trimEnd()].filter(Boolean).join("\n").trim();
const hasNewOutput = output.length > 0; const hasNewOutput = output.length > 0;
const retryInMs = exited const retryInMs = exited
@ -371,7 +525,7 @@ export function createProcessTool(
? `\n\nProcess exited with ${ ? `\n\nProcess exited with ${
exitSignal ? `signal ${exitSignal}` : `code ${exitCode}` exitSignal ? `signal ${exitSignal}` : `code ${exitCode}`
}.` }.`
: "\n\nProcess still running."), : buildInputWaitHint(runtime) || "\n\nProcess still running."),
}, },
], ],
details: { details: {
@ -380,6 +534,10 @@ export function createProcessTool(
exitCode: exited ? exitCode : undefined, exitCode: exited ? exitCode : undefined,
aggregated: scopedSession.aggregated, aggregated: scopedSession.aggregated,
name: deriveSessionName(scopedSession.command), name: deriveSessionName(scopedSession.command),
stdinWritable: runtime.stdinWritable,
waitingForInput: runtime.waitingForInput,
idleMs: runtime.idleMs,
lastOutputAt: runtime.lastOutputAt,
...(typeof retryInMs === "number" ? { retryInMs } : {}), ...(typeof retryInMs === "number" ? { retryInMs } : {}),
}, },
}; };
@ -405,6 +563,7 @@ export function createProcessTool(
window.effectiveLimit, window.effectiveLimit,
); );
const logDefaultTailNote = defaultTailNote(totalLines, window.usingDefaultTail); const logDefaultTailNote = defaultTailNote(totalLines, window.usingDefaultTail);
const runtime = describeRunningSession(scopedSession);
return { return {
content: [{ type: "text", text: (slice || "(no output yet)") + logDefaultTailNote }], content: [{ type: "text", text: (slice || "(no output yet)") + logDefaultTailNote }],
details: { details: {
@ -415,6 +574,10 @@ export function createProcessTool(
totalChars, totalChars,
truncated: scopedSession.truncated, truncated: scopedSession.truncated,
name: deriveSessionName(scopedSession.command), name: deriveSessionName(scopedSession.command),
stdinWritable: runtime.stdinWritable,
waitingForInput: runtime.waitingForInput,
idleMs: runtime.idleMs,
lastOutputAt: runtime.lastOutputAt,
}, },
}; };
} }

View File

@ -106,6 +106,7 @@ export async function doctorCommand(
const sourceConfigValid = configResult.sourceConfigValid ?? true; const sourceConfigValid = configResult.sourceConfigValid ?? true;
const configPath = configResult.path ?? CONFIG_PATH; const configPath = configResult.path ?? CONFIG_PATH;
const configSnapshot = await readConfigFileSnapshot().catch(() => null);
if (!cfg.gateway?.mode) { if (!cfg.gateway?.mode) {
const lines = [ const lines = [
"gateway.mode is unset; gateway start will be blocked.", "gateway.mode is unset; gateway start will be blocked.",
@ -191,6 +192,23 @@ export async function doctorCommand(
} }
await noteStateIntegrity(cfg, prompter, configResult.path ?? CONFIG_PATH); await noteStateIntegrity(cfg, prompter, configResult.path ?? CONFIG_PATH);
const operatorPolicy = configSnapshot?.policy;
if (operatorPolicy?.exists) {
const lines = operatorPolicy.valid
? [
`- Active: ${shortenHomePath(operatorPolicy.path)}`,
`- Locked paths: ${operatorPolicy.lockedPaths.length}`,
`- Config edits cannot weaken values enforced by this file.`,
]
: [
`- Invalid operator policy: ${shortenHomePath(operatorPolicy.path)}`,
...operatorPolicy.issues.map((issue) => {
const pathLabel = issue.path || "<root>";
return `- ${pathLabel}: ${issue.message}`;
}),
];
note(lines.join("\n"), "Operator policy");
}
await noteSessionLockHealth({ shouldRepair: prompter.shouldRepair }); await noteSessionLockHealth({ shouldRepair: prompter.shouldRepair });
cfg = await maybeRepairSandboxImages(cfg, runtime, prompter); cfg = await maybeRepairSandboxImages(cfg, runtime, prompter);

View File

@ -266,6 +266,18 @@ export async function statusAllCommand(
).length; ).length;
const overviewRows = [ const overviewRows = [
(() => {
const operatorPolicy = snap?.policy;
if (!operatorPolicy?.exists) {
return { Item: "Operator policy", Value: "not configured" };
}
return {
Item: "Operator policy",
Value: operatorPolicy.valid
? `${operatorPolicy.path} · ${operatorPolicy.lockedPaths.length} locked path${operatorPolicy.lockedPaths.length === 1 ? "" : "s"}`
: `${operatorPolicy.path} · invalid`,
};
})(),
{ Item: "Version", Value: VERSION }, { Item: "Version", Value: VERSION },
{ Item: "OS", Value: osSummary.label }, { Item: "OS", Value: osSummary.label },
{ Item: "Node", Value: process.versions.node }, { Item: "Node", Value: process.versions.node },

View File

@ -43,7 +43,12 @@ import { findLegacyConfigIssues } from "./legacy.js";
import { applyMergePatch } from "./merge-patch.js"; import { applyMergePatch } from "./merge-patch.js";
import { normalizeExecSafeBinProfilesInConfig } from "./normalize-exec-safe-bin.js"; import { normalizeExecSafeBinProfilesInConfig } from "./normalize-exec-safe-bin.js";
import { normalizeConfigPaths } from "./normalize-paths.js"; import { normalizeConfigPaths } from "./normalize-paths.js";
import { resolveConfigPath, resolveDefaultConfigCandidates, resolveStateDir } from "./paths.js"; import {
resolveConfigPath,
resolveDefaultConfigCandidates,
resolveOperatorPolicyPath,
resolveStateDir,
} from "./paths.js";
import { isBlockedObjectKey } from "./prototype-keys.js"; import { isBlockedObjectKey } from "./prototype-keys.js";
import { applyConfigOverrides } from "./runtime-overrides.js"; import { applyConfigOverrides } from "./runtime-overrides.js";
import type { OpenClawConfig, ConfigFileSnapshot, LegacyConfigIssue } from "./types.js"; import type { OpenClawConfig, ConfigFileSnapshot, LegacyConfigIssue } from "./types.js";
@ -631,6 +636,17 @@ type ConfigReadResolution = {
envSnapshotForRestore: Record<string, string | undefined>; envSnapshotForRestore: Record<string, string | undefined>;
}; };
type OperatorPolicyState = {
path: string;
exists: boolean;
valid: boolean;
issues: ConfigFileSnapshot["issues"];
warnings: ConfigFileSnapshot["warnings"];
lockedPaths: string[];
lockedPathSegments: string[][];
resolvedConfig: OpenClawConfig;
};
function resolveConfigIncludesForRead( function resolveConfigIncludesForRead(
parsed: unknown, parsed: unknown,
configPath: string, configPath: string,
@ -665,6 +681,215 @@ function resolveConfigForRead(
}; };
} }
function resolvePolicyConfigForRead(resolvedIncludes: unknown, env: NodeJS.ProcessEnv): unknown {
return resolveConfigEnvVars(resolvedIncludes, env);
}
function formatConfigPathSegments(pathSegments: string[]): string {
if (pathSegments.length === 0) {
return "<root>";
}
let output = "";
for (const segment of pathSegments) {
if (isNumericPathSegment(segment)) {
output += `[${segment}]`;
} else {
output = output ? `${output}.${segment}` : segment;
}
}
return output;
}
function collectLockedPolicyPaths(value: unknown, currentPath: string[], output: string[][]): void {
if (Array.isArray(value)) {
if (currentPath.length > 0) {
output.push(currentPath);
}
return;
}
if (isPlainObject(value)) {
const entries = Object.entries(value);
if (entries.length === 0) {
if (currentPath.length > 0) {
output.push(currentPath);
}
return;
}
for (const [key, child] of entries) {
if (isBlockedObjectKey(key)) {
continue;
}
collectLockedPolicyPaths(child, [...currentPath, key], output);
}
return;
}
if (currentPath.length > 0) {
output.push(currentPath);
}
}
function getValueAtPath(
root: unknown,
pathSegments: string[],
): { found: boolean; value?: unknown } {
let current = root;
for (const segment of pathSegments) {
if (Array.isArray(current)) {
if (!isNumericPathSegment(segment)) {
return { found: false };
}
const index = Number.parseInt(segment, 10);
if (!Number.isFinite(index) || index < 0 || index >= current.length) {
return { found: false };
}
current = current[index];
continue;
}
if (!isPlainObject(current) || !hasOwnObjectKey(current, segment)) {
return { found: false };
}
current = current[segment];
}
return { found: true, value: current };
}
function prefixPolicyIssues(
issues: ConfigFileSnapshot["issues"],
prefix = "operatorPolicy",
): ConfigFileSnapshot["issues"] {
return issues.map((issue) => ({
...issue,
path: issue.path ? `${prefix}.${issue.path}` : prefix,
}));
}
function createOperatorPolicyLockError(lockedPaths: string[]): Error {
const message =
lockedPaths.length === 1
? `Config path locked by operator policy: ${lockedPaths[0]}`
: `Config paths locked by operator policy: ${lockedPaths.join(", ")}`;
const error = new Error(message) as Error & {
code?: string;
lockedPaths?: string[];
};
error.code = "OPERATOR_POLICY_LOCKED";
error.lockedPaths = lockedPaths;
return error;
}
function readOperatorPolicyState(deps: Required<ConfigIoDeps>): OperatorPolicyState {
const path = resolveOperatorPolicyPath(deps.env, resolveStateDir(deps.env, deps.homedir));
const exists = deps.fs.existsSync(path);
if (!exists) {
return {
path,
exists: false,
valid: true,
issues: [],
warnings: [],
lockedPaths: [],
lockedPathSegments: [],
resolvedConfig: {},
};
}
try {
const raw = deps.fs.readFileSync(path, "utf-8");
const parsedRes = parseConfigJson5(raw, deps.json5);
if (!parsedRes.ok) {
return {
path,
exists: true,
valid: false,
issues: [{ path: "", message: `JSON5 parse failed: ${parsedRes.error}` }],
warnings: [],
lockedPaths: [],
lockedPathSegments: [],
resolvedConfig: {},
};
}
let resolved: unknown;
try {
resolved = resolveConfigIncludesForRead(parsedRes.parsed, path, deps);
} catch (err) {
const message =
err instanceof ConfigIncludeError
? err.message
: `Include resolution failed: ${String(err)}`;
return {
path,
exists: true,
valid: false,
issues: [{ path: "", message }],
warnings: [],
lockedPaths: [],
lockedPathSegments: [],
resolvedConfig: {},
};
}
let resolvedConfigRaw: unknown;
try {
resolvedConfigRaw = resolvePolicyConfigForRead(resolved, deps.env);
} catch (err) {
const message =
err instanceof MissingEnvVarError
? err.message
: `Env var substitution failed: ${String(err)}`;
return {
path,
exists: true,
valid: false,
issues: [{ path: "", message }],
warnings: [],
lockedPaths: [],
lockedPathSegments: [],
resolvedConfig: {},
};
}
const validated = validateConfigObjectWithPlugins(resolvedConfigRaw);
if (!validated.ok) {
return {
path,
exists: true,
valid: false,
issues: validated.issues,
warnings: validated.warnings,
lockedPaths: [],
lockedPathSegments: [],
resolvedConfig: {},
};
}
const lockedPathSegments: string[][] = [];
collectLockedPolicyPaths(validated.config, [], lockedPathSegments);
const lockedPaths = lockedPathSegments.map((segments) => formatConfigPathSegments(segments));
return {
path,
exists: true,
valid: true,
issues: [],
warnings: validated.warnings,
lockedPaths,
lockedPathSegments,
resolvedConfig: validated.config,
};
} catch (err) {
return {
path,
exists: true,
valid: false,
issues: [{ path: "", message: `read failed: ${String(err)}` }],
warnings: [],
lockedPaths: [],
lockedPathSegments: [],
resolvedConfig: {},
};
}
}
type ReadConfigFileSnapshotInternalResult = { type ReadConfigFileSnapshotInternalResult = {
snapshot: ConfigFileSnapshot; snapshot: ConfigFileSnapshot;
envSnapshotForRestore?: Record<string, string | undefined>; envSnapshotForRestore?: Record<string, string | undefined>;
@ -725,19 +950,62 @@ export function createConfigIO(overrides: ConfigIoDeps = {}) {
(error as { code?: string; details?: string }).details = details; (error as { code?: string; details?: string }).details = details;
throw error; throw error;
} }
const operatorPolicy = readOperatorPolicyState(deps);
if (!operatorPolicy.valid) {
const details = prefixPolicyIssues(operatorPolicy.issues)
.map((iss) => `- ${iss.path || "<root>"}: ${iss.message}`)
.join("\n");
if (!loggedInvalidConfigs.has(operatorPolicy.path)) {
loggedInvalidConfigs.add(operatorPolicy.path);
deps.logger.error(`Invalid operator policy at ${operatorPolicy.path}:\n${details}`);
}
const error = new Error(`Invalid operator policy at ${operatorPolicy.path}:\n${details}`);
(error as { code?: string; details?: string }).code = "INVALID_CONFIG";
(error as { code?: string; details?: string }).details = details;
throw error;
}
const effectiveConfigRaw = operatorPolicy.exists
? applyMergePatch(resolvedConfig, operatorPolicy.resolvedConfig, {
mergeObjectArraysById: true,
})
: resolvedConfig;
const effectiveValidated = validateConfigObjectWithPlugins(effectiveConfigRaw);
if (!effectiveValidated.ok) {
const details = effectiveValidated.issues
.map((iss) => `- ${iss.path || "<root>"}: ${iss.message}`)
.join("\n");
if (!loggedInvalidConfigs.has(operatorPolicy.path)) {
loggedInvalidConfigs.add(operatorPolicy.path);
deps.logger.error(`Operator policy merge invalid at ${operatorPolicy.path}:\n${details}`);
}
const error = new Error(
`Operator policy merge invalid at ${operatorPolicy.path}:\n${details}`,
);
(error as { code?: string; details?: string }).code = "INVALID_CONFIG";
(error as { code?: string; details?: string }).details = details;
throw error;
}
if (validated.warnings.length > 0) { if (validated.warnings.length > 0) {
const details = validated.warnings const details = validated.warnings
.map((iss) => `- ${iss.path || "<root>"}: ${iss.message}`) .map((iss) => `- ${iss.path || "<root>"}: ${iss.message}`)
.join("\n"); .join("\n");
deps.logger.warn(`Config warnings:\\n${details}`); deps.logger.warn(`Config warnings:\\n${details}`);
} }
warnIfConfigFromFuture(validated.config, deps.logger); if (operatorPolicy.warnings.length > 0) {
const details = prefixPolicyIssues(operatorPolicy.warnings)
.map((iss) => `- ${iss.path || "<root>"}: ${iss.message}`)
.join("\n");
deps.logger.warn(`Operator policy warnings:\n${details}`);
}
warnIfConfigFromFuture(effectiveValidated.config, deps.logger);
const cfg = applyTalkConfigNormalization( const cfg = applyTalkConfigNormalization(
applyModelDefaults( applyModelDefaults(
applyCompactionDefaults( applyCompactionDefaults(
applyContextPruningDefaults( applyContextPruningDefaults(
applyAgentDefaults( applyAgentDefaults(
applySessionDefaults(applyLoggingDefaults(applyMessageDefaults(validated.config))), applySessionDefaults(
applyLoggingDefaults(applyMessageDefaults(effectiveValidated.config)),
),
), ),
), ),
), ),
@ -819,20 +1087,38 @@ export function createConfigIO(overrides: ConfigIoDeps = {}) {
async function readConfigFileSnapshotInternal(): Promise<ReadConfigFileSnapshotInternalResult> { async function readConfigFileSnapshotInternal(): Promise<ReadConfigFileSnapshotInternalResult> {
maybeLoadDotEnvForConfig(deps.env); maybeLoadDotEnvForConfig(deps.env);
const operatorPolicy = readOperatorPolicyState(deps);
const exists = deps.fs.existsSync(configPath); const exists = deps.fs.existsSync(configPath);
if (!exists) { if (!exists) {
const policyIssues = operatorPolicy.valid ? [] : prefixPolicyIssues(operatorPolicy.issues);
const policyWarnings = operatorPolicy.valid
? []
: prefixPolicyIssues(operatorPolicy.warnings);
const hash = hashConfigRaw(null); const hash = hashConfigRaw(null);
const config = applyTalkApiKey( const mergedRaw = operatorPolicy.valid
applyTalkConfigNormalization( ? applyMergePatch({}, operatorPolicy.resolvedConfig, {
applyModelDefaults( mergeObjectArraysById: true,
applyCompactionDefaults( })
applyContextPruningDefaults( : {};
applyAgentDefaults(applySessionDefaults(applyMessageDefaults({}))), const mergedValidated = validateConfigObjectWithPlugins(mergedRaw);
const valid = operatorPolicy.valid && mergedValidated.ok;
const issues = [...policyIssues, ...(mergedValidated.ok ? [] : mergedValidated.issues)];
const warnings = [...policyWarnings, ...(mergedValidated.ok ? mergedValidated.warnings : [])];
const config = valid
? applyTalkApiKey(
applyTalkConfigNormalization(
applyModelDefaults(
applyCompactionDefaults(
applyContextPruningDefaults(
applyAgentDefaults(
applySessionDefaults(applyMessageDefaults(mergedValidated.config)),
),
),
),
), ),
), ),
), )
), : {};
);
const legacyIssues: LegacyConfigIssue[] = []; const legacyIssues: LegacyConfigIssue[] = [];
return { return {
snapshot: { snapshot: {
@ -841,12 +1127,20 @@ export function createConfigIO(overrides: ConfigIoDeps = {}) {
raw: null, raw: null,
parsed: {}, parsed: {},
resolved: {}, resolved: {},
valid: true, valid,
config, config,
hash, hash,
issues: [], issues,
warnings: [], warnings,
legacyIssues, legacyIssues,
policy: {
path: operatorPolicy.path,
exists: operatorPolicy.exists,
valid: operatorPolicy.valid,
lockedPaths: operatorPolicy.lockedPaths,
issues: operatorPolicy.issues,
warnings: operatorPolicy.warnings,
},
}, },
}; };
} }
@ -869,6 +1163,14 @@ export function createConfigIO(overrides: ConfigIoDeps = {}) {
issues: [{ path: "", message: `JSON5 parse failed: ${parsedRes.error}` }], issues: [{ path: "", message: `JSON5 parse failed: ${parsedRes.error}` }],
warnings: [], warnings: [],
legacyIssues: [], legacyIssues: [],
policy: {
path: operatorPolicy.path,
exists: operatorPolicy.exists,
valid: operatorPolicy.valid,
lockedPaths: operatorPolicy.lockedPaths,
issues: operatorPolicy.issues,
warnings: operatorPolicy.warnings,
},
}, },
}; };
} }
@ -895,6 +1197,14 @@ export function createConfigIO(overrides: ConfigIoDeps = {}) {
issues: [{ path: "", message }], issues: [{ path: "", message }],
warnings: [], warnings: [],
legacyIssues: [], legacyIssues: [],
policy: {
path: operatorPolicy.path,
exists: operatorPolicy.exists,
valid: operatorPolicy.valid,
lockedPaths: operatorPolicy.lockedPaths,
issues: operatorPolicy.issues,
warnings: operatorPolicy.warnings,
},
}, },
}; };
} }
@ -920,6 +1230,14 @@ export function createConfigIO(overrides: ConfigIoDeps = {}) {
issues: [{ path: "", message }], issues: [{ path: "", message }],
warnings: [], warnings: [],
legacyIssues: [], legacyIssues: [],
policy: {
path: operatorPolicy.path,
exists: operatorPolicy.exists,
valid: operatorPolicy.valid,
lockedPaths: operatorPolicy.lockedPaths,
issues: operatorPolicy.issues,
warnings: operatorPolicy.warnings,
},
}, },
}; };
} }
@ -929,6 +1247,32 @@ export function createConfigIO(overrides: ConfigIoDeps = {}) {
// entries (for auto-migration) when they are present in the parsed source. // entries (for auto-migration) when they are present in the parsed source.
const legacyIssues = findLegacyConfigIssues(resolvedConfigRaw, parsedRes.parsed); const legacyIssues = findLegacyConfigIssues(resolvedConfigRaw, parsedRes.parsed);
if (!operatorPolicy.valid) {
return {
snapshot: {
path: configPath,
exists: true,
raw,
parsed: parsedRes.parsed,
resolved: coerceConfig(resolvedConfigRaw),
valid: false,
config: coerceConfig(resolvedConfigRaw),
hash,
issues: prefixPolicyIssues(operatorPolicy.issues),
warnings: prefixPolicyIssues(operatorPolicy.warnings),
legacyIssues,
policy: {
path: operatorPolicy.path,
exists: operatorPolicy.exists,
valid: false,
lockedPaths: operatorPolicy.lockedPaths,
issues: operatorPolicy.issues,
warnings: operatorPolicy.warnings,
},
},
};
}
const validated = validateConfigObjectWithPlugins(resolvedConfigRaw); const validated = validateConfigObjectWithPlugins(resolvedConfigRaw);
if (!validated.ok) { if (!validated.ok) {
return { return {
@ -944,17 +1288,62 @@ export function createConfigIO(overrides: ConfigIoDeps = {}) {
issues: validated.issues, issues: validated.issues,
warnings: validated.warnings, warnings: validated.warnings,
legacyIssues, legacyIssues,
policy: {
path: operatorPolicy.path,
exists: operatorPolicy.exists,
valid: operatorPolicy.valid,
lockedPaths: operatorPolicy.lockedPaths,
issues: operatorPolicy.issues,
warnings: operatorPolicy.warnings,
},
},
};
}
const effectiveConfigRaw = operatorPolicy.exists
? applyMergePatch(resolvedConfigRaw, operatorPolicy.resolvedConfig, {
mergeObjectArraysById: true,
})
: resolvedConfigRaw;
const effectiveValidated = validateConfigObjectWithPlugins(effectiveConfigRaw);
if (!effectiveValidated.ok) {
return {
snapshot: {
path: configPath,
exists: true,
raw,
parsed: parsedRes.parsed,
resolved: coerceConfig(resolvedConfigRaw),
valid: false,
config: coerceConfig(effectiveConfigRaw),
hash,
issues: effectiveValidated.issues,
warnings: [
...validated.warnings,
...operatorPolicy.warnings,
...effectiveValidated.warnings,
],
legacyIssues,
policy: {
path: operatorPolicy.path,
exists: operatorPolicy.exists,
valid: operatorPolicy.valid,
lockedPaths: operatorPolicy.lockedPaths,
issues: operatorPolicy.issues,
warnings: operatorPolicy.warnings,
},
}, },
}; };
} }
warnIfConfigFromFuture(validated.config, deps.logger); warnIfConfigFromFuture(effectiveValidated.config, deps.logger);
const snapshotConfig = normalizeConfigPaths( const snapshotConfig = normalizeConfigPaths(
applyTalkApiKey( applyTalkApiKey(
applyTalkConfigNormalization( applyTalkConfigNormalization(
applyModelDefaults( applyModelDefaults(
applyAgentDefaults( applyAgentDefaults(
applySessionDefaults(applyLoggingDefaults(applyMessageDefaults(validated.config))), applySessionDefaults(
applyLoggingDefaults(applyMessageDefaults(effectiveValidated.config)),
),
), ),
), ),
), ),
@ -974,8 +1363,20 @@ export function createConfigIO(overrides: ConfigIoDeps = {}) {
config: snapshotConfig, config: snapshotConfig,
hash, hash,
issues: [], issues: [],
warnings: validated.warnings, warnings: [
...validated.warnings,
...operatorPolicy.warnings,
...effectiveValidated.warnings,
],
legacyIssues, legacyIssues,
policy: {
path: operatorPolicy.path,
exists: operatorPolicy.exists,
valid: operatorPolicy.valid,
lockedPaths: operatorPolicy.lockedPaths,
issues: operatorPolicy.issues,
warnings: operatorPolicy.warnings,
},
}, },
envSnapshotForRestore: readResolution.envSnapshotForRestore, envSnapshotForRestore: readResolution.envSnapshotForRestore,
}; };
@ -1012,6 +1413,14 @@ export function createConfigIO(overrides: ConfigIoDeps = {}) {
issues: [{ path: "", message }], issues: [{ path: "", message }],
warnings: [], warnings: [],
legacyIssues: [], legacyIssues: [],
policy: {
path: operatorPolicy.path,
exists: operatorPolicy.exists,
valid: operatorPolicy.valid,
lockedPaths: operatorPolicy.lockedPaths,
issues: operatorPolicy.issues,
warnings: operatorPolicy.warnings,
},
}, },
}; };
} }
@ -1037,6 +1446,14 @@ export function createConfigIO(overrides: ConfigIoDeps = {}) {
clearConfigCache(); clearConfigCache();
let persistCandidate: unknown = cfg; let persistCandidate: unknown = cfg;
const { snapshot } = await readConfigFileSnapshotInternal(); const { snapshot } = await readConfigFileSnapshotInternal();
const operatorPolicy = readOperatorPolicyState(deps);
if (!operatorPolicy.valid) {
const prefixedIssues = prefixPolicyIssues(operatorPolicy.issues);
const details = prefixedIssues
.map((issue) => `${issue.path || "<root>"}: ${issue.message}`)
.join("; ");
throw new Error(`Invalid operator policy at ${operatorPolicy.path}: ${details}`);
}
let envRefMap: Map<string, string> | null = null; let envRefMap: Map<string, string> | null = null;
let changedPaths: Set<string> | null = null; let changedPaths: Set<string> | null = null;
if (snapshot.valid && snapshot.exists) { if (snapshot.valid && snapshot.exists) {
@ -1066,6 +1483,22 @@ export function createConfigIO(overrides: ConfigIoDeps = {}) {
} }
} }
if (operatorPolicy.valid && operatorPolicy.exists) {
const conflictingLockedPaths = operatorPolicy.lockedPathSegments
.filter((pathSegments) => {
const candidateValue = getValueAtPath(persistCandidate, pathSegments);
if (!candidateValue.found) {
return false;
}
const policyValue = getValueAtPath(operatorPolicy.resolvedConfig, pathSegments);
return !policyValue.found || !isDeepStrictEqual(candidateValue.value, policyValue.value);
})
.map((pathSegments) => formatConfigPathSegments(pathSegments));
if (conflictingLockedPaths.length > 0) {
throw createOperatorPolicyLockError(conflictingLockedPaths);
}
}
const validated = validateConfigObjectRawWithPlugins(persistCandidate); const validated = validateConfigObjectRawWithPlugins(persistCandidate);
if (!validated.ok) { if (!validated.ok) {
const issue = validated.issues[0]; const issue = validated.issues[0];
@ -1117,8 +1550,12 @@ export function createConfigIO(overrides: ConfigIoDeps = {}) {
? (restoreEnvRefsFromMap(cfgToWrite, "", envRefMap, changedPaths) as OpenClawConfig) ? (restoreEnvRefsFromMap(cfgToWrite, "", envRefMap, changedPaths) as OpenClawConfig)
: cfgToWrite; : cfgToWrite;
let outputConfig = outputConfigBase; let outputConfig = outputConfigBase;
if (options.unsetPaths?.length) { const effectiveUnsetPaths = [
for (const unsetPath of options.unsetPaths) { ...(options.unsetPaths ?? []),
...(operatorPolicy.valid ? operatorPolicy.lockedPathSegments : []),
];
if (effectiveUnsetPaths.length) {
for (const unsetPath of effectiveUnsetPaths) {
if (!Array.isArray(unsetPath) || unsetPath.length === 0) { if (!Array.isArray(unsetPath) || unsetPath.length === 0) {
continue; continue;
} }

View File

@ -47,6 +47,13 @@ describe("config io write", () => {
return { configPath, io, snapshot }; return { configPath, io, snapshot };
} }
async function writeOperatorPolicy(params: { home: string; policy: Record<string, unknown> }) {
const policyPath = path.join(params.home, ".openclaw", "operator-policy.json5");
await fs.mkdir(path.dirname(policyPath), { recursive: true });
await fs.writeFile(policyPath, JSON.stringify(params.policy, null, 2), "utf-8");
return policyPath;
}
async function writeTokenAuthAndReadConfig(params: { async function writeTokenAuthAndReadConfig(params: {
io: { writeConfigFile: (config: Record<string, unknown>) => Promise<void> }; io: { writeConfigFile: (config: Record<string, unknown>) => Promise<void> };
snapshot: { config: Record<string, unknown> }; snapshot: { config: Record<string, unknown> };
@ -142,6 +149,86 @@ describe("config io write", () => {
}); });
}); });
it("applies immutable operator policy to the effective config snapshot", async () => {
await withSuiteHome(async (home) => {
await writeOperatorPolicy({
home,
policy: {
tools: { profile: "messaging" },
approvals: { exec: { enabled: false } },
},
});
const { io, snapshot } = await writeConfigAndCreateIo({
home,
initialConfig: { gateway: { mode: "local" } },
});
const reloaded = await io.readConfigFileSnapshot();
expect(reloaded.valid).toBe(true);
expect(reloaded.config.gateway?.mode).toBe("local");
expect(reloaded.config.tools?.profile).toBe("messaging");
expect(reloaded.config.approvals?.exec?.enabled).toBe(false);
expect(reloaded.resolved.tools?.profile).toBeUndefined();
expect(reloaded.policy?.exists).toBe(true);
expect(reloaded.policy?.valid).toBe(true);
expect(reloaded.policy?.lockedPaths).toEqual(
expect.arrayContaining(["tools.profile", "approvals.exec.enabled"]),
);
expect(snapshot.policy?.lockedPaths).toEqual(reloaded.policy?.lockedPaths);
});
});
it("rejects writes that conflict with locked operator policy paths", async () => {
await withSuiteHome(async (home) => {
await writeOperatorPolicy({
home,
policy: {
tools: { profile: "messaging" },
},
});
const { io, snapshot } = await writeConfigAndCreateIo({
home,
initialConfig: { gateway: { mode: "local" } },
});
const next = structuredClone(snapshot.config);
next.tools = {
...next.tools,
profile: "full",
};
await expect(io.writeConfigFile(next)).rejects.toThrow(
"Config path locked by operator policy: tools.profile",
);
});
});
it("keeps locked operator policy paths out of the mutable config file", async () => {
await withSuiteHome(async (home) => {
await writeOperatorPolicy({
home,
policy: {
tools: { profile: "messaging" },
},
});
const { configPath, io, snapshot } = await writeConfigAndCreateIo({
home,
initialConfig: { gateway: { mode: "local" } },
});
const next = structuredClone(snapshot.config);
next.gateway = { mode: "remote" };
await io.writeConfigFile(next);
const persisted = JSON.parse(await fs.readFile(configPath, "utf-8")) as {
gateway?: { mode?: string };
tools?: { profile?: string };
};
expect(persisted.gateway?.mode).toBe("remote");
expect(persisted.tools?.profile).toBeUndefined();
});
});
it('shows actionable guidance for dmPolicy="open" without wildcard allowFrom', async () => { it('shows actionable guidance for dmPolicy="open" without wildcard allowFrom', async () => {
await withSuiteHome(async (home) => { await withSuiteHome(async (home) => {
const io = createConfigIO({ const io = createConfigIO({

View File

@ -21,6 +21,7 @@ export const isNixMode = resolveIsNixMode();
const LEGACY_STATE_DIRNAMES = [".clawdbot", ".moldbot", ".moltbot"] as const; const LEGACY_STATE_DIRNAMES = [".clawdbot", ".moldbot", ".moltbot"] as const;
const NEW_STATE_DIRNAME = ".openclaw"; const NEW_STATE_DIRNAME = ".openclaw";
const CONFIG_FILENAME = "openclaw.json"; const CONFIG_FILENAME = "openclaw.json";
const OPERATOR_POLICY_FILENAME = "operator-policy.json5";
const LEGACY_CONFIG_FILENAMES = ["clawdbot.json", "moldbot.json", "moltbot.json"] as const; const LEGACY_CONFIG_FILENAMES = ["clawdbot.json", "moldbot.json", "moltbot.json"] as const;
function resolveDefaultHomeDir(): string { function resolveDefaultHomeDir(): string {
@ -193,6 +194,22 @@ export function resolveConfigPath(
export const CONFIG_PATH = resolveConfigPathCandidate(); export const CONFIG_PATH = resolveConfigPathCandidate();
/**
* Immutable operator policy overlay file (JSON5 subset of OpenClawConfig).
* Can be overridden via OPENCLAW_OPERATOR_POLICY_PATH.
* Default: ~/.openclaw/operator-policy.json5 (or $OPENCLAW_STATE_DIR/operator-policy.json5)
*/
export function resolveOperatorPolicyPath(
env: NodeJS.ProcessEnv = process.env,
stateDir: string = resolveStateDir(env, envHomedir(env)),
): string {
const override = env.OPENCLAW_OPERATOR_POLICY_PATH?.trim();
if (override) {
return resolveUserPath(override, env, envHomedir(env));
}
return path.join(stateDir, OPERATOR_POLICY_FILENAME);
}
/** /**
* Resolve default config path candidates across default locations. * Resolve default config path candidates across default locations.
* Order: explicit config path state-dir-derived paths new default. * Order: explicit config path state-dir-derived paths new default.

View File

@ -128,6 +128,15 @@ export type LegacyConfigIssue = {
message: string; message: string;
}; };
export type ConfigOperatorPolicySnapshot = {
path: string;
exists: boolean;
valid: boolean;
lockedPaths: string[];
issues: ConfigValidationIssue[];
warnings: ConfigValidationIssue[];
};
export type ConfigFileSnapshot = { export type ConfigFileSnapshot = {
path: string; path: string;
exists: boolean; exists: boolean;
@ -145,4 +154,5 @@ export type ConfigFileSnapshot = {
issues: ConfigValidationIssue[]; issues: ConfigValidationIssue[];
warnings: ConfigValidationIssue[]; warnings: ConfigValidationIssue[];
legacyIssues: LegacyConfigIssue[]; legacyIssues: LegacyConfigIssue[];
policy?: ConfigOperatorPolicySnapshot;
}; };

View File

@ -47,6 +47,19 @@ import { parseRestartRequestParams } from "./restart-request.js";
import type { GatewayRequestHandlers, RespondFn } from "./types.js"; import type { GatewayRequestHandlers, RespondFn } from "./types.js";
import { assertValidParams } from "./validation.js"; import { assertValidParams } from "./validation.js";
function respondConfigWriteError(respond: RespondFn, error: unknown): boolean {
const message = error instanceof Error ? error.message : String(error);
const code =
error && typeof error === "object" && "code" in error && typeof error.code === "string"
? error.code
: undefined;
if (code === "OPERATOR_POLICY_LOCKED" || message.startsWith("Invalid operator policy at ")) {
respond(false, undefined, errorShape(ErrorCodes.INVALID_REQUEST, message));
return true;
}
return false;
}
function requireConfigBaseHash( function requireConfigBaseHash(
params: unknown, params: unknown,
snapshot: Awaited<ReturnType<typeof readConfigFileSnapshot>>, snapshot: Awaited<ReturnType<typeof readConfigFileSnapshot>>,
@ -270,7 +283,14 @@ export const configHandlers: GatewayRequestHandlers = {
if (!parsed) { if (!parsed) {
return; return;
} }
await writeConfigFile(parsed.config, writeOptions); try {
await writeConfigFile(parsed.config, writeOptions);
} catch (error) {
if (respondConfigWriteError(respond, error)) {
return;
}
throw error;
}
respond( respond(
true, true,
{ {
@ -360,7 +380,14 @@ export const configHandlers: GatewayRequestHandlers = {
context?.logGateway?.info( context?.logGateway?.info(
`config.patch write ${formatControlPlaneActor(actor)} changedPaths=${summarizeChangedPaths(changedPaths)} restartReason=config.patch`, `config.patch write ${formatControlPlaneActor(actor)} changedPaths=${summarizeChangedPaths(changedPaths)} restartReason=config.patch`,
); );
await writeConfigFile(validated.config, writeOptions); try {
await writeConfigFile(validated.config, writeOptions);
} catch (error) {
if (respondConfigWriteError(respond, error)) {
return;
}
throw error;
}
const { sessionKey, note, restartDelayMs, deliveryContext, threadId } = const { sessionKey, note, restartDelayMs, deliveryContext, threadId } =
resolveConfigRestartRequest(params); resolveConfigRestartRequest(params);
@ -420,7 +447,14 @@ export const configHandlers: GatewayRequestHandlers = {
context?.logGateway?.info( context?.logGateway?.info(
`config.apply write ${formatControlPlaneActor(actor)} changedPaths=${summarizeChangedPaths(changedPaths)} restartReason=config.apply`, `config.apply write ${formatControlPlaneActor(actor)} changedPaths=${summarizeChangedPaths(changedPaths)} restartReason=config.apply`,
); );
await writeConfigFile(parsed.config, writeOptions); try {
await writeConfigFile(parsed.config, writeOptions);
} catch (error) {
if (respondConfigWriteError(respond, error)) {
return;
}
throw error;
}
const { sessionKey, note, restartDelayMs, deliveryContext, threadId } = const { sessionKey, note, restartDelayMs, deliveryContext, threadId } =
resolveConfigRestartRequest(params); resolveConfigRestartRequest(params);

View File

@ -34,6 +34,7 @@ async function createAdapterHarness(params?: {
pid?: number; pid?: number;
argv?: string[]; argv?: string[];
env?: NodeJS.ProcessEnv; env?: NodeJS.ProcessEnv;
stdinMode?: "inherit" | "pipe-open" | "pipe-closed";
}) { }) {
const { child, killMock } = createStubChild(params?.pid); const { child, killMock } = createStubChild(params?.pid);
spawnWithFallbackMock.mockResolvedValue({ spawnWithFallbackMock.mockResolvedValue({
@ -43,7 +44,7 @@ async function createAdapterHarness(params?: {
const adapter = await createChildAdapter({ const adapter = await createChildAdapter({
argv: params?.argv ?? ["node", "-e", "setTimeout(() => {}, 1000)"], argv: params?.argv ?? ["node", "-e", "setTimeout(() => {}, 1000)"],
env: params?.env, env: params?.env,
stdinMode: "pipe-open", stdinMode: params?.stdinMode ?? "pipe-open",
}); });
return { adapter, killMock }; return { adapter, killMock };
} }
@ -114,4 +115,20 @@ describe("createChildAdapter", () => {
}; };
expect(spawnArgs.options?.env).toEqual({ FOO: "bar", COUNT: "12" }); expect(spawnArgs.options?.env).toEqual({ FOO: "bar", COUNT: "12" });
}); });
it("marks stdin as destroyed when launched with pipe-closed", async () => {
const { adapter } = await createAdapterHarness({
pid: 7777,
argv: ["node", "-e", "setTimeout(() => {}, 1000)"],
stdinMode: "pipe-closed",
});
expect(adapter.stdin?.destroyed).toBe(true);
await new Promise<void>((resolve) => {
adapter.stdin?.write("input", (err) => {
expect(err).toBeTruthy();
resolve();
});
});
});
}); });

View File

@ -68,19 +68,31 @@ export async function createChildAdapter(params: {
}); });
const child = spawned.child as ChildProcessWithoutNullStreams; const child = spawned.child as ChildProcessWithoutNullStreams;
let stdinDestroyed = params.input !== undefined || stdinMode === "pipe-closed";
if (child.stdin) { if (child.stdin) {
if (params.input !== undefined) { if (params.input !== undefined) {
child.stdin.write(params.input); child.stdin.write(params.input);
child.stdin.end(); child.stdin.end();
stdinDestroyed = true;
} else if (stdinMode === "pipe-closed") { } else if (stdinMode === "pipe-closed") {
child.stdin.end(); child.stdin.end();
stdinDestroyed = true;
} }
child.stdin.on("close", () => {
stdinDestroyed = true;
});
} }
const stdin: ManagedRunStdin | undefined = child.stdin const stdin: ManagedRunStdin | undefined = child.stdin
? { ? {
destroyed: false, get destroyed() {
return stdinDestroyed;
},
write: (data: string, cb?: (err?: Error | null) => void) => { write: (data: string, cb?: (err?: Error | null) => void) => {
if (stdinDestroyed) {
cb?.(new Error("stdin is not writable"));
return;
}
try { try {
child.stdin.write(data, cb); child.stdin.write(data, cb);
} catch (err) { } catch (err) {
@ -88,14 +100,22 @@ export async function createChildAdapter(params: {
} }
}, },
end: () => { end: () => {
if (stdinDestroyed) {
return;
}
try { try {
stdinDestroyed = true;
child.stdin.end(); child.stdin.end();
} catch { } catch {
// ignore close errors // ignore close errors
} }
}, },
destroy: () => { destroy: () => {
if (stdinDestroyed) {
return;
}
try { try {
stdinDestroyed = true;
child.stdin.destroy(); child.stdin.destroy();
} catch { } catch {
// ignore destroy errors // ignore destroy errors

View File

@ -65,6 +65,7 @@ export async function createPtyAdapter(params: {
let waitPromise: Promise<{ code: number | null; signal: NodeJS.Signals | number | null }> | null = let waitPromise: Promise<{ code: number | null; signal: NodeJS.Signals | number | null }> | null =
null; null;
let forceKillWaitFallbackTimer: NodeJS.Timeout | null = null; let forceKillWaitFallbackTimer: NodeJS.Timeout | null = null;
let stdinDestroyed = false;
const clearForceKillWaitFallback = () => { const clearForceKillWaitFallback = () => {
if (!forceKillWaitFallbackTimer) { if (!forceKillWaitFallbackTimer) {
@ -104,8 +105,14 @@ export async function createPtyAdapter(params: {
}) ?? null; }) ?? null;
const stdin: ManagedRunStdin = { const stdin: ManagedRunStdin = {
destroyed: false, get destroyed() {
return stdinDestroyed;
},
write: (data, cb) => { write: (data, cb) => {
if (stdinDestroyed) {
cb?.(new Error("stdin is not writable"));
return;
}
try { try {
pty.write(data); pty.write(data);
cb?.(null); cb?.(null);
@ -114,7 +121,11 @@ export async function createPtyAdapter(params: {
} }
}, },
end: () => { end: () => {
if (stdinDestroyed) {
return;
}
try { try {
stdinDestroyed = true;
const eof = process.platform === "win32" ? "\x1a" : "\x04"; const eof = process.platform === "win32" ? "\x1a" : "\x04";
pty.write(eof); pty.write(eof);
} catch { } catch {
@ -183,6 +194,7 @@ export async function createPtyAdapter(params: {
// ignore disposal errors // ignore disposal errors
} }
clearForceKillWaitFallback(); clearForceKillWaitFallback();
stdinDestroyed = true;
dataListener = null; dataListener = null;
exitListener = null; exitListener = null;
settleWait({ code: null, signal: null }); settleWait({ code: null, signal: null });