{"version":"0.1","company":{"name":"YubHub","url":"https://yubhub.co","jobsUrl":"https://yubhub.co/jobs/skill/ml-workflows"},"x-facet":{"type":"skill","slug":"ml-workflows","display":"Ml Workflows","count":5},"x-feed-size-limit":100,"x-feed-sort":"enriched_at desc","x-feed-notice":"This feed contains at most 100 jobs (the most recently enriched). For the full corpus, use the paginated /stats/by-facet endpoint or /search.","x-generator":"yubhub-xml-generator","x-rights":"Free to redistribute with attribution: \"Data by YubHub (https://yubhub.co)\"","x-schema":"Each entry in `jobs` follows https://schema.org/JobPosting. YubHub-native raw fields carry `x-` prefix.","jobs":[{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_c81cbaa1-56a"},"title":"Engineering Technical Program Manager - W&B Platform","description":"<p>The Weights &amp; Biases (W&amp;B) team builds the developer platform trusted by machine learning practitioners to track, manage, and scale their ML workflows. As a Technical Program Manager focused on platform reliability and release management, you&#39;ll be at the centre of our platform&#39;s growth and stability.</p>\n<p>You will partner with engineering teams within W&amp;B and CoreWeave AI/ML Platform Services (AMPS) to ensure W&amp;B integrates seamlessly into the broader ML ecosystem, while maintaining high reliability and predictable releases.</p>\n<p>This role is ideal for someone who thrives in cross-functional environments, has a strong grasp of developer workflows, and excels at creating repeatable, reliable program structures that scale.</p>\n<p><strong>Responsibilities</strong></p>\n<ul>\n<li>Drive end-to-end program management for critical platform initiatives.</li>\n<li>Build and run release management processes, ensuring predictable and high-quality delivery cycles.</li>\n<li>Partner with engineering and product to define success metrics, manage risks, and ensure on-time delivery.</li>\n<li>Build and scale incident management and RCA processes for W&amp;B services.</li>\n<li>Improve the predictability and visibility of releases across teams, introducing dashboards, retrospectives, and program forums.</li>\n<li>Collaborate with TPMs and engineering leaders across W&amp;B and CoreWeave to ensure end-to-end reliability across the ML developer stack.</li>\n</ul>\n<p><strong>Qualifications</strong></p>\n<ul>\n<li>Bachelor&#39;s degree in a technical field or equivalent experience.</li>\n<li>5+ years of program management experience in SaaS, developer tools, or ML/AI platforms.</li>\n<li>Proven experience running release management programs and incident management processes.</li>\n<li>Strong technical fluency in cloud computing, developer workflows, and CI/CD practices.</li>\n<li>Excellent communication and facilitation skills with diverse technical and non-technical audiences.</li>\n<li>Track record of improving reliability, efficiency, and predictability in software delivery.</li>\n</ul>\n<p><strong>Additional Qualifications</strong></p>\n<ul>\n<li>Familiarity with ML workflows, model training/inference, and developer productivity tools.</li>\n<li>Experience building integrations between SaaS platforms, APIs, and cloud services.</li>\n<li>Strong background in reliability engineering practices and DevOps program leadership.</li>\n</ul>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_c81cbaa1-56a","directApply":true,"hiringOrganization":{"@type":"Organization","name":"CoreWeave","sameAs":"https://www.coreweave.com","logo":"https://logos.yubhub.co/coreweave.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/coreweave/jobs/4610109006","x-work-arrangement":"hybrid","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":"$177,000 to $237,000","x-skills-required":["cloud computing","developer workflows","CI/CD practices","program management","release management","incident management","reliability engineering"],"x-skills-preferred":["ML workflows","model training/inference","developer productivity tools","integration between SaaS platforms, APIs, and cloud services"],"datePosted":"2026-04-18T15:56:43.785Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"San Francisco, CA"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"cloud computing, developer workflows, CI/CD practices, program management, release management, incident management, reliability engineering, ML workflows, model training/inference, developer productivity tools, integration between SaaS platforms, APIs, and cloud services","baseSalary":{"@type":"MonetaryAmount","currency":"USD","value":{"@type":"QuantitativeValue","minValue":177000,"maxValue":237000,"unitText":"YEAR"}}},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_48e2e160-bde"},"title":"Senior Solutions Architect - Weights & Biases","description":"<p>Our Solutions Architecture team at Weights &amp; Biases is a unique hybrid organization, combining the deep technical skills of Site Reliability Engineering with the consultative expertise of Solutions Architecture. We focus on ensuring customers can successfully deploy and operate W&amp;B across cloud and on-prem environments while delivering a best-in-class experience that accelerates ML adoption at scale.</p>\n<p>As a Solutions Architect, you will be responsible for managing complex customer deployments across AWS, GCP, Azure, and on-prem environments. You’ll partner directly with customer engineering teams to provision and monitor services, debug and resolve infrastructure issues, and ensure performance and scalability using SRE best practices. This role blends hands-on technical problem-solving with customer-facing engagement, including technical discussions, demos, workshops, and enablement content creation. You’ll work closely with Sales Engineering, Field Engineering, Support, and Product to drive adoption and influence our product roadmap based on customer feedback.</p>\n<p>We believe in investing in our people, and value candidates who can bring their own diversified experiences to our teams – even if you aren&#39;t a 100% skill or experience match. Here are a few qualities we’ve found compatible with our team. If some of this describes you, we’d love to talk.</p>\n<ul>\n<li>You love diving into infrastructure problems and solving them systematically</li>\n<li>You’re curious about how to scale complex ML systems in production environments</li>\n<li>You’re an expert in building and running containerized, distributed systems</li>\n</ul>\n<p>We work hard, have fun, and move fast! We’re in an exciting stage of hyper-growth that you will not want to miss out on. We’re not afraid of a little chaos, and we’re constantly learning. Our team cares deeply about how we build our product and how we work together, which is represented through our core values:</p>\n<ul>\n<li>Be Curious at Your Core</li>\n<li>Act Like an Owner</li>\n<li>Empower Employees</li>\n<li>Deliver Best-in-Class Client Experiences</li>\n<li>Achieve More Together</li>\n</ul>\n<p>The base salary ranges for this role is $180,000 to $200,000. The starting salary will be determined based on job-related knowledge, skills, experience, and market location. We strive for both market alignment and internal equity when determining compensation. In addition to base salary, our total rewards package includes a discretionary bonus, equity awards, and a comprehensive benefits program (all based on eligibility).</p>\n<p>We offer a variety of benefits to support your needs, including:</p>\n<ul>\n<li>Medical, dental, and vision insurance</li>\n<li>100% paid for by CoreWeave</li>\n<li>Company-paid Life Insurance</li>\n<li>Voluntary supplemental life insurance</li>\n<li>Short and long-term disability insurance</li>\n<li>Flexible Spending Account</li>\n<li>Health Savings Account</li>\n<li>Tuition Reimbursement</li>\n<li>Ability to Participate in Employee Stock Purchase Program (ESPP)</li>\n<li>Mental Wellness Benefits through Spring Health</li>\n<li>Family-Forming support provided by Carrot</li>\n<li>Paid Parental Leave</li>\n<li>Flexible, full-service childcare support with Kinside</li>\n<li>401(k) with a generous employer match</li>\n<li>Flexible PTO</li>\n<li>Catered lunch each day in our office and data center locations</li>\n<li>A casual work environment</li>\n<li>A work culture focused on innovative disruption</li>\n</ul>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_48e2e160-bde","directApply":true,"hiringOrganization":{"@type":"Organization","name":"CoreWeave","sameAs":"https://www.coreweave.com","logo":"https://logos.yubhub.co/coreweave.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/coreweave/jobs/4622845006","x-work-arrangement":"hybrid","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":"$180,000 to $200,000","x-skills-required":["Docker","Kubernetes","Helm charts","Networking","Cloud-managed services (e.g., MySQL, Object Stores)","Infrastructure as Code (IaC), preferably Terraform","Linux/Unix command line experience","Python","ML workflows or tools"],"x-skills-preferred":["Deep proficiency in Kubernetes design patterns, including Operators","Familiarity with data engineering and MLOps tooling","Experience as an educator or facilitator for technical training sessions, workshops, or demos","SaaS, web service, or distributed systems operations experience"],"datePosted":"2026-04-18T15:54:07.692Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Livingston, NJ / New York, NY / San Francisco, CA / Sunnyvale, CA / Bellevue, WA"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"Docker, Kubernetes, Helm charts, Networking, Cloud-managed services (e.g., MySQL, Object Stores), Infrastructure as Code (IaC), preferably Terraform, Linux/Unix command line experience, Python, ML workflows or tools, Deep proficiency in Kubernetes design patterns, including Operators, Familiarity with data engineering and MLOps tooling, Experience as an educator or facilitator for technical training sessions, workshops, or demos, SaaS, web service, or distributed systems operations experience","baseSalary":{"@type":"MonetaryAmount","currency":"USD","value":{"@type":"QuantitativeValue","minValue":180000,"maxValue":200000,"unitText":"YEAR"}}},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_64176983-af0"},"title":"Research Engineer, Reward Models Platform","description":"<p>You will work as a Research Engineer on Anthropic&#39;s Reward Models Platform. Your primary responsibility will be to design and build infrastructure that enables researchers to rapidly iterate on reward signals. This includes tools for rubric development, human feedback data analysis, and reward robustness evaluation. You will also develop systems for automated quality assessment of rewards, including detection of reward hacks and other pathologies. Additionally, you will create tooling that allows researchers to easily compare different reward methodologies and understand their effects. You will collaborate with researchers to translate science requirements into platform capabilities and optimize existing systems for performance, reliability, and ease of use.</p>\n<p>You will have the opportunity to contribute directly to research projects yourself and have a direct impact on our ability to scale reward development across domains. You will work closely with researchers and translate ambiguous requirements into well-scoped engineering projects.</p>\n<p>To be successful in this role, you should have prior research experience and be excited to work closely with researchers. You should have strong Python skills and experience with ML workflows and data pipelines, and building related infrastructure/tooling/platforms. You should be comfortable working across the stack, ranging from data pipelines to experiment tracking to user-facing tooling.</p>\n<p>Strong candidates may also have experience with ML research, building internal tooling and platforms for ML researchers, data quality assessment and pipeline optimization, experiment tracking, evaluation frameworks, or MLOps tooling. They may also have experience with large-scale data processing, Kubernetes, distributed systems, or cloud infrastructure, and familiarity with reinforcement learning or fine-tuning workflows.</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_64176983-af0","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Anthropic","sameAs":"https://www.anthropic.com/","logo":"https://logos.yubhub.co/anthropic.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/anthropic/jobs/5024831008","x-work-arrangement":"hybrid","x-experience-level":"mid","x-job-type":"full-time","x-salary-range":"$350,000-$500,000 USD","x-skills-required":["Python","ML workflows","data pipelines","infrastructure/tooling/platforms","rubric development","human feedback data analysis","reward robustness evaluation","automated quality assessment","reward hacks","pathologies","experiment tracking","evaluation frameworks","MLOps tooling"],"x-skills-preferred":["ML research","building internal tooling and platforms for ML researchers","data quality assessment and pipeline optimization","Kubernetes","distributed systems","cloud infrastructure","reinforcement learning","fine-tuning workflows"],"datePosted":"2026-04-18T15:42:43.065Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Remote-Friendly (Travel-Required) | San Francisco, CA | Seattle, WA | New York City, NY"}},"jobLocationType":"TELECOMMUTE","employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"Python, ML workflows, data pipelines, infrastructure/tooling/platforms, rubric development, human feedback data analysis, reward robustness evaluation, automated quality assessment, reward hacks, pathologies, experiment tracking, evaluation frameworks, MLOps tooling, ML research, building internal tooling and platforms for ML researchers, data quality assessment and pipeline optimization, Kubernetes, distributed systems, cloud infrastructure, reinforcement learning, fine-tuning workflows","baseSalary":{"@type":"MonetaryAmount","currency":"USD","value":{"@type":"QuantitativeValue","minValue":350000,"maxValue":500000,"unitText":"YEAR"}}},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_ceba9e5b-250"},"title":"Senior Backend Engineer, Product and Infra","description":"<p>We&#39;re looking for a Senior Backend Engineer to build the systems and services that power our product experience. You&#39;ll own the backend infrastructure that makes our content discoverable, our features responsive, and our platform reliable at scale.</p>\n<p>Your work will directly shape what users experience: designing APIs that serve rich content, building services that handle real-time interactions, implementing content-matching systems for rights and safety, and ensuring our platform performs under load. You&#39;ll architect systems that are fast, correct, and maintainable.</p>\n<p>You&#39;ll collaborate closely with Product, ML Research, and Mobile/Web teams to ship features that matter. We use Python, Go, BigQuery, Pub/Sub, and a microservices architecture,but we care more about good judgment than specific tool experience.</p>\n<p><strong>Responsibilities</strong></p>\n<ul>\n<li>Design and maintain application-level data models that organize rich content into canonical structures optimized for product features, search, and retrieval.</li>\n<li>Build high-reliability ETLs and streaming pipelines to process usage events, analytics data, behavioral signals, and application logs.</li>\n<li>Develop data services that expose unified content to the application, such as metadata access APIs, indexing workflows, and retrieval-ready representations.</li>\n<li>Implement and refine fingerprinting pipelines used for deduplication, rights attribution, safety checks, and provenance validation.</li>\n<li>Own data consistency between ingestion systems, application surfaces, metadata storage, and downstream reporting environments.</li>\n<li>Define and track key operational metrics, including latency, completeness, accuracy, and event health.</li>\n<li>Collaborate with Product teams to ensure content structures and APIs support evolving features and high-quality user experiences.</li>\n<li>Partner with Analytics and Research teams to deliver clean usage datasets for experimentation, model evaluation, reporting, and internal insights.</li>\n<li>Operate large analytical workloads in BigQuery and build reusable Dataflow/Beam components for structured processing.</li>\n<li>Improve reliability and scale by designing robust schema evolution strategies, idempotent pipelines, and well-instrumented operational flows.</li>\n</ul>\n<p><strong>Requirements</strong></p>\n<ul>\n<li>Experience building production backend services and APIs at scale</li>\n<li>Experience building ETL/ELT pipelines, event processing systems, and structured data models for applications or analytics</li>\n<li>Strong background in data modeling, metadata systems, indexing, or building canonical representations for heterogeneous content</li>\n<li>Proficiency in Python, Go, SQL, and scalable data-processing frameworks (Dataflow/Beam, Spark, or similar)</li>\n<li>Familiarity with BigQuery or other analytical data warehouses and strong comfort optimizing large queries and schemas</li>\n<li>Experience with event-driven architectures, Pub/Sub, or Kafka-like systems</li>\n<li>Strong understanding of data quality, schema evolution, lineage, and operational reliability</li>\n<li>Ability to design pipelines that balance cost, latency, correctness, and scale</li>\n<li>Clear communication skills and an ability to collaborate closely with Product, Research, and Analytics stakeholders</li>\n</ul>\n<p><strong>Nice to Have</strong></p>\n<ul>\n<li>Experience building application-facing APIs or microservices that expose structured content</li>\n<li>Background in information retrieval, indexing systems, or search infrastructure</li>\n<li>Experience with fingerprinting, perceptual hashing, audio similarity metrics, or content-matching algorithms</li>\n<li>Familiarity with ML workflows and how downstream analytics and usage data feed back into research pipelines</li>\n<li>Understanding of batch + streaming architectures and how to blend them effectively</li>\n<li>Experience with Go, Next.js, or React Native for occasional full-stack contributions</li>\n</ul>\n<p><strong>Why Join Us</strong></p>\n<p>You will design the core data services and pipelines that power our product experience, analytics, and business operations. You’ll work on high-impact data challenges involving real-time signals, large-scale metadata systems, and cross-platform consistency. You’ll join a small, fast-moving team where you’ll shape the structure, reliability, and intelligence of our downstream data ecosystem.</p>\n<p><strong>Benefits</strong></p>\n<ul>\n<li>Highly competitive salary and equity</li>\n<li>Quarterly productivity budget</li>\n<li>Flexible time off</li>\n<li>Fantastic office location in Manhattan</li>\n<li>Productivity package, including ChatGPT Plus, Claude Code, and Copilot</li>\n<li>Top-notch private health, dental, and vision insurance for you and your dependents</li>\n<li>401(k) plan options with employer matching</li>\n<li>Concierge medical/primary care through One Medical and Rightway</li>\n<li>Mental health support from Spring Health</li>\n<li>Personalized life insurance, travel assistance, and many other perks</li>\n</ul>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_ceba9e5b-250","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Udio","sameAs":"https://www.udio.com/","logo":"https://logos.yubhub.co/udio.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/udio/jobs/4987729008","x-work-arrangement":"onsite","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":"$180,000 - $220,000","x-skills-required":["Python","Go","BigQuery","Pub/Sub","Data modeling","Metadata systems","Indexing","Canonical representations","ETL/ELT pipelines","Event processing systems","Structured data models","Scalable data-processing frameworks","Analytical data warehouses","Event-driven architectures","Kafka-like systems","Data quality","Schema evolution","Lineage","Operational reliability"],"x-skills-preferred":["Application-facing APIs","Microservices","Information retrieval","Indexing systems","Search infrastructure","Fingerprinting","Perceptual hashing","Audio similarity metrics","Content-matching algorithms","ML workflows","Batch + streaming architectures"],"datePosted":"2026-04-17T13:05:20.076Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"New York"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"Python, Go, BigQuery, Pub/Sub, Data modeling, Metadata systems, Indexing, Canonical representations, ETL/ELT pipelines, Event processing systems, Structured data models, Scalable data-processing frameworks, Analytical data warehouses, Event-driven architectures, Kafka-like systems, Data quality, Schema evolution, Lineage, Operational reliability, Application-facing APIs, Microservices, Information retrieval, Indexing systems, Search infrastructure, Fingerprinting, Perceptual hashing, Audio similarity metrics, Content-matching algorithms, ML workflows, Batch + streaming architectures","baseSalary":{"@type":"MonetaryAmount","currency":"USD","value":{"@type":"QuantitativeValue","minValue":180000,"maxValue":220000,"unitText":"YEAR"}}},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_cf63279d-d28"},"title":"Research Engineer, Reward Models Platform","description":"<p><strong>About the role</strong></p>\n<p>You will deeply understand the research workflows of our Finetuning teams and automate the high-friction parts – turning days of manual experimentation into hours. You’ll build the tools and infrastructure that enable researchers across the organisation to develop, evaluate, and optimise reward signals for training our models. Your scalable platforms will make it easy to experiment with different reward methodologies, assess their robustness, and iterate rapidly on improvements to help the rest of Anthropic train our reward models.</p>\n<p><strong>Responsibilities</strong></p>\n<ul>\n<li>Design and build infrastructure that enables researchers to rapidly iterate on reward signals, including tools for rubric development, human feedback data analysis, and reward robustness evaluation</li>\n<li>Develop systems for automated quality assessment of rewards, including detection of reward hacks and other pathologies</li>\n<li>Create tooling that allows researchers to easily compare different reward methodologies (preference models, rubrics, programmatic rewards) and understand their effects</li>\n<li>Build pipelines and workflows that reduce toil in reward development, from dataset preparation to evaluation to deployment</li>\n<li>Implement monitoring and observability systems to track reward signal quality and surface issues during training runs</li>\n<li>Collaborate with researchers to translate science requirements into platform capabilities</li>\n<li>Optimise existing systems for performance, reliability, and ease of use</li>\n<li>Contribute to the development of best practices and documentation for reward development workflows</li>\n</ul>\n<p><strong>You may be a good fit if you</strong></p>\n<ul>\n<li>Have prior research experience</li>\n<li>Are excited to work closely with researchers and translate ambiguous requirements into well-scoped engineering projects</li>\n<li>Have strong Python skills</li>\n<li>Have experience with ML workflows and data pipelines, and building related infrastructure/tooling/platforms</li>\n<li>Are comfortable working across the stack, ranging from data pipelines to experiment tracking to user-facing tooling</li>\n<li>Can balance building robust, maintainable systems with the need to move quickly in a research environment</li>\n<li>Are results-oriented, with a bias towards flexibility and impact</li>\n<li>Pick up slack, even if it goes outside your job description</li>\n<li>Care about the societal impacts of your work and are motivated by Anthropic&#39;s mission to develop safe AI</li>\n</ul>\n<p><strong>Strong candidates may also have experience with</strong></p>\n<ul>\n<li>Experience with ML research</li>\n<li>Building internal tooling and platforms for ML researchers</li>\n<li>Data quality assessment and pipeline optimisation</li>\n<li>Experiment tracking, evaluation frameworks, or MLOps tooling</li>\n<li>Large-scale data processing (e.g., Spark, Hive, or similar)</li>\n<li>Kubernetes, distributed systems, or cloud infrastructure</li>\n<li>Familiarity with reinforcement learning or fine-tuning workflows</li>\n</ul>\n<p><strong>Representative projects</strong></p>\n<ul>\n<li>Building infrastructure that allows researchers to rapidly test new rubric designs against small models before scaling up</li>\n<li>Developing automated systems to detect reward hacks and surface problematic behaviours during training</li>\n<li>Creating tooling for comparing different grading methodologies and understanding their effects on model behaviour</li>\n<li>Building a data quality flywheel that helps researchers identify problematic transcripts and feed improvements back into the system</li>\n<li>Developing dashboards and monitoring systems that give researchers visibility into reward signal quality across training runs</li>\n<li>Streamlining dataset preparation workflows to reduce latency and operational overhead</li>\n</ul>\n<p><strong>Logistics</strong></p>\n<ul>\n<li>Education requirements: We require at least a Bachelor&#39;s degree in a related field or equivalent experience.</li>\n<li>Location-based hybrid policy: Currently, we expect all staff to be in one of our offices at least 25% of the time. However, some roles may require more time in our offices.</li>\n<li>Visa sponsorship: We do sponsor visas! However, we aren&#39;t able to successfully sponsor visas for every role and every candidate. But if we make you an offer, we will make every reasonable effort to get you a visa, and we retain an immigration lawyer to help with the process.</li>\n</ul>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_cf63279d-d28","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Anthropic","sameAs":"https://www.anthropic.com","logo":"https://logos.yubhub.co/anthropic.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/anthropic/jobs/5024831008","x-work-arrangement":"hybrid","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":"$350,000 - $500,000 USD","x-skills-required":["Python","ML workflows","data pipelines","infrastructure/tooling/platforms","distributed systems","cloud infrastructure","reinforcement learning","fine-tuning workflows"],"x-skills-preferred":["ML research","data quality assessment","pipeline optimisation","experiment tracking","evaluation frameworks","MLOps tooling","large-scale data processing","Kubernetes"],"datePosted":"2026-03-08T13:48:05.218Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"San Francisco, CA, Seattle, WA, New York City, NY"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"Python, ML workflows, data pipelines, infrastructure/tooling/platforms, distributed systems, cloud infrastructure, reinforcement learning, fine-tuning workflows, ML research, data quality assessment, pipeline optimisation, experiment tracking, evaluation frameworks, MLOps tooling, large-scale data processing, Kubernetes","baseSalary":{"@type":"MonetaryAmount","currency":"USD","value":{"@type":"QuantitativeValue","minValue":350000,"maxValue":500000,"unitText":"YEAR"}}}]}