Compare commits

...

1 Commits

Author SHA1 Message Date
Peter Steinberger
e7fa532bc6 fix: land mac talk-mode taskHint (#38445) (thanks @dmiv) 2026-03-08 13:51:57 +00:00
4 changed files with 25 additions and 5 deletions

View File

@ -11481,7 +11481,7 @@
"filename": "src/agents/models-config.e2e-harness.ts",
"hashed_secret": "7cf31e8b6cda49f70c31f1f25af05d46f924142d",
"is_verified": false,
"line_number": 130
"line_number": 131
}
],
"src/agents/models-config.fills-missing-provider-apikey-from-env-var.e2e.test.ts": [
@ -13034,5 +13034,5 @@
}
]
},
"generated_at": "2026-03-08T05:05:36Z"
"generated_at": "2026-03-08T13:49:29Z"
}

View File

@ -14,6 +14,7 @@ Docs: https://docs.openclaw.ai
- Mattermost replies: keep `root_id` pinned to the existing thread root when an agent replies inside a thread, while still using reply-target threading for top-level posts. (#27744) thanks @hnykda.
- Agents/failover: detect Amazon Bedrock `Too many tokens per day` quota errors as rate limits across fallback, cron retry, and memory embeddings while keeping context-window `too many tokens per request` errors out of the rate-limit lane. (#39377) Thanks @gambletan.
- Android/Play distribution: remove self-update, background location, `screen.record`, and background mic capture from the Android app, narrow the foreground service to `dataSync` only, and clean up the legacy `location.enabledMode=always` preference migration. (#39660) Thanks @obviyus.
- macOS Talk Mode: set the speech recognition request `taskHint` to `.dictation` for mic capture, and add regression coverage for the request defaults. (#38445) Thanks @dmiv.
## 2026.3.7

View File

@ -70,6 +70,11 @@ actor TalkModeRuntime {
private let minSpeechRMS: Double = 1e-3
private let speechBoostFactor: Double = 6.0
static func configureRecognitionRequest(_ request: SFSpeechAudioBufferRecognitionRequest) {
request.shouldReportPartialResults = true
request.taskHint = .dictation
}
// MARK: - Lifecycle
func setEnabled(_ enabled: Bool) async {
@ -176,9 +181,9 @@ actor TalkModeRuntime {
return
}
self.recognitionRequest = SFSpeechAudioBufferRecognitionRequest()
self.recognitionRequest?.shouldReportPartialResults = true
guard let request = self.recognitionRequest else { return }
let request = SFSpeechAudioBufferRecognitionRequest()
Self.configureRecognitionRequest(request)
self.recognitionRequest = request
if self.audioEngine == nil {
self.audioEngine = AVAudioEngine()

View File

@ -0,0 +1,14 @@
import Speech
import Testing
@testable import OpenClaw
struct TalkModeRuntimeSpeechTests {
@Test func `speech request uses dictation defaults`() {
let request = SFSpeechAudioBufferRecognitionRequest()
TalkModeRuntime.configureRecognitionRequest(request)
#expect(request.shouldReportPartialResults)
#expect(request.taskHint == .dictation)
}
}