<?xml version="1.0" encoding="UTF-8"?>
<source>
  <jobs>
    <job>
      <externalid>fab21c7e-6bf</externalid>
      <Title>Research Engineer / Scientist, Alignment Science - London</Title>
      <Description><![CDATA[<p>About the role:</p>
<p>You will contribute to exploratory experimental research on AI safety, with a focus on risks from powerful future systems. As a Research Engineer on Alignment Science, you&#39;ll work on creating methods to ensure advanced AI systems remain safe and harmless in unfamiliar or adversarial scenarios.</p>
<p>Responsibilities:</p>
<ul>
<li>Conduct research on AI control and alignment stress-testing</li>
<li>Develop and implement new techniques for ensuring AI safety</li>
<li>Collaborate with other teams, including Interpretability, Fine-Tuning, and the Frontier Red Team</li>
<li>Test and evaluate the effectiveness of AI safety techniques</li>
</ul>
<p>Requirements:</p>
<ul>
<li>Significant software, ML, or research engineering experience</li>
<li>Familiarity with technical AI safety research</li>
<li>Experience contributing to empirical AI research projects</li>
</ul>
<p>Preferred qualifications:</p>
<ul>
<li>Experience authoring research papers in machine learning, NLP, or AI safety</li>
<li>Experience with LLMs</li>
<li>Experience with reinforcement learning</li>
</ul>
<p>Benefits:</p>
<ul>
<li>Competitive compensation and benefits</li>
<li>Optional equity donation matching</li>
<li>Generous vacation and parental leave</li>
<li>Flexible working hours</li>
</ul>
<p>Note:</p>
<p>This role requires all candidates to be based at least 25% in London and travel to San Francisco occasionally.</p>
<p style="margin-top:24px;font-size:13px;color:#666;">XML job scraping automation by <a href="https://yubhub.co">YubHub</a></p>]]></Description>
      <Jobtype>full-time</Jobtype>
      <Experiencelevel>senior</Experiencelevel>
      <Workarrangement>hybrid</Workarrangement>
      <Salaryrange>£260,000-£370,000 GBP</Salaryrange>
      <Skills>software engineering, machine learning, research engineering, AI safety, technical AI safety research, research paper authoring, LLMs, reinforcement learning</Skills>
      <Category>Engineering</Category>
      <Industry>Technology</Industry>
      <Employername>Anthropic</Employername>
      <Employerlogo>https://logos.yubhub.co/anthropic.com.png</Employerlogo>
      <Employerdescription>Anthropic is a public benefit corporation that aims to create reliable, interpretable, and steerable AI systems.</Employerdescription>
      <Employerwebsite>https://www.anthropic.com/</Employerwebsite>
      <Compensationcurrency></Compensationcurrency>
      <Compensationmin></Compensationmin>
      <Compensationmax></Compensationmax>
      <Applyto>https://job-boards.greenhouse.io/anthropic/jobs/4610158008</Applyto>
      <Location>London, UK</Location>
      <Country></Country>
      <Postedate>2026-04-18</Postedate>
    </job>
  </jobs>
</source>