<?xml version="1.0" encoding="UTF-8"?><urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9" xmlns:news="http://www.google.com/schemas/sitemap-news/0.9"><url><loc>https://blog.redwoodresearch.org/p/fail-safer-at-alignment-by-channeling</loc><news:news><news:publication><news:name>Redwood Research blog</news:name><news:language>en</news:language></news:publication><news:publication_date>2026-04-27T17:42:05+00:00</news:publication_date><news:title>Fail safe(r) at alignment by channeling reward-hacking into a "spillway" motivation</news:title></news:news></url><url><loc>https://blog.redwoodresearch.org/p/ai-companies-should-publish-security</loc><news:news><news:publication><news:name>Redwood Research blog</news:name><news:language>en</news:language></news:publication><news:publication_date>2026-04-27T12:03:38+00:00</news:publication_date><news:title>AI companies should publish security assessments</news:title></news:news></url></urlset>