{"version":"0.1","company":{"name":"YubHub","url":"https://yubhub.co","jobsUrl":"https://yubhub.co/jobs/skill/deployments"},"x-facet":{"type":"skill","slug":"deployments","display":"Deployments","count":91},"x-feed-size-limit":100,"x-feed-sort":"enriched_at desc","x-feed-notice":"This feed contains at most 100 jobs (the most recently enriched). For the full corpus, use the paginated /stats/by-facet endpoint or /search.","x-generator":"yubhub-xml-generator","x-rights":"Free to redistribute with attribution: \"Data by YubHub (https://yubhub.co)\"","x-schema":"Each entry in `jobs` follows https://schema.org/JobPosting. YubHub-native raw fields carry `x-` prefix.","jobs":[{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_c19e39af-feb"},"title":"Full-Stack Software Engineer, (Forward Deployed), GPS","description":"<p>Scale&#39;s rapidly growing Global Public Sector team is focused on using AI to address critical challenges facing the public sector around the world.</p>\n<p>Our core work consists of creating custom AI applications that will impact millions of citizens, generating high-quality training data for custom LLMs, and upskilling and advisory services to spread the impact of AI.</p>\n<p>As a Full Stack Software Engineer (Forward Deployed), you&#39;ll collaborate directly with public sector counterparts to quickly build full-stack, AI applications, to solve their most pressing challenges and achieve meaningful impact for citizens.</p>\n<p>At Scale, we&#39;re not just building AI solutions,we&#39;re enabling the public sector to transform their operations and better serve citizens through cutting-edge technology.</p>\n<p><strong>Responsibilities:</strong></p>\n<ul>\n<li>Collaborate with senior engineers to implement features for public sector clients, including spending time with the client to understand user feedback and assist with delivery.</li>\n<li>Develop and maintain full-stack components that integrate with AI models, focusing on building responsive UIs and reliable backend APIs.</li>\n<li>Assist in deploying and monitoring applications within cloud environments, ensuring basic system stability and security.</li>\n<li>Help build and refine reusable features that support diverse international client use cases.</li>\n<li>Work within a multi-disciplinary team of design, product, and data specialists to build robust features that follow established technical architectures.</li>\n</ul>\n<p><strong>Ideal Candidate:</strong></p>\n<ul>\n<li>Bachelor&#39;s degree in Computer Science or a related quantitative field</li>\n<li>Professional full-stack experience with a focus on React, TypeScript, and Python/Node.js. Familiarity with Next.js and NoSQL/Relational databases, along with exposure to containerization (Docker) and cloud deployments.</li>\n<li>Experience building and deploying web applications with a good understanding of cloud fundamentals and scalable coding practices.</li>\n<li>A self-starting approach to navigate ambiguous requirements and deliver reliable software.</li>\n</ul>\n<p><strong>Nice to Have:</strong></p>\n<ul>\n<li>Proficient in Arabic</li>\n<li>Experience working cross functionally with operations</li>\n<li>Experience building solutions with LLMs</li>\n</ul>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_c19e39af-feb","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Scale","sameAs":"https://scale.com/","logo":"https://logos.yubhub.co/scale.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/scaleai/jobs/4676602005","x-work-arrangement":"remote","x-experience-level":"mid","x-job-type":"full-time","x-salary-range":null,"x-skills-required":["React","TypeScript","Python","Node.js","Next.js","NoSQL/Relational databases","containerization (Docker)","cloud deployments"],"x-skills-preferred":["Arabic","experience working cross functionally with operations","experience building solutions with LLMs"],"datePosted":"2026-04-18T16:01:21.167Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Dubai, UAE"}},"jobLocationType":"TELECOMMUTE","employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"React, TypeScript, Python, Node.js, Next.js, NoSQL/Relational databases, containerization (Docker), cloud deployments, Arabic, experience working cross functionally with operations, experience building solutions with LLMs"},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_2d16873c-e17"},"title":"Full-Stack Software Engineer, (Forward Deployed), GPS","description":"<p>Scale&#39;s rapidly growing Global Public Sector team is focused on using AI to address critical challenges facing the public sector around the world.</p>\n<p>Our core work consists of creating custom AI applications that will impact millions of citizens, generating high-quality training data for custom LLMs, and upskilling and advisory services to spread the impact of AI.</p>\n<p>As a Full Stack Software Engineer (Forward Deployed), you&#39;ll collaborate directly with public sector counterparts to quickly build full-stack, AI applications, to solve their most pressing challenges and achieve meaningful impact for citizens.</p>\n<p>At Scale, we&#39;re not just building AI solutions,we&#39;re enabling the public sector to transform their operations and better serve citizens through cutting-edge technology.</p>\n<p>If you&#39;re ready to shape the future of AI in the public sector and be a founding member of our team, we&#39;d love to hear from you.</p>\n<p><strong>Responsibilities</strong></p>\n<ul>\n<li>Collaborate with senior engineers to implement features for public sector clients, including spending time with the client to understand user feedback and assist with delivery.</li>\n<li>Develop and maintain full-stack components that integrate with AI models, focusing on building responsive UIs and reliable backend APIs.</li>\n<li>Assist in deploying and monitoring applications within cloud environments, ensuring basic system stability and security.</li>\n<li>Help build and refine reusable features that support diverse international client use cases.</li>\n<li>Work within a multi-disciplinary team of design, product, and data specialists to build robust features that follow established technical architectures.</li>\n</ul>\n<p><strong>Ideal Candidate</strong></p>\n<ul>\n<li>Bachelor&#39;s degree in Computer Science or a related quantitative field</li>\n<li>Professional full-stack experience with a focus on React, TypeScript, and Python/Node.js. Familiarity with Next.js and NoSQL/Relational databases, along with exposure to containerization (Docker) and cloud deployments.</li>\n<li>Experience building and deploying web applications with a good understanding of cloud fundamentals and scalable coding practices.</li>\n<li>A self-starting approach to navigate ambiguous requirements and deliver reliable software.</li>\n</ul>\n<p><strong>Nice to Haves</strong></p>\n<ul>\n<li>Proficient in Arabic</li>\n<li>Experience working cross functionally with operations</li>\n<li>Experience building solutions with LLMs</li>\n</ul>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_2d16873c-e17","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Scale","sameAs":"https://scale.com/","logo":"https://logos.yubhub.co/scale.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/scaleai/jobs/4676600005","x-work-arrangement":"onsite","x-experience-level":"mid","x-job-type":"full-time","x-salary-range":null,"x-skills-required":["React","TypeScript","Python","Node.js","Next.js","NoSQL/Relational databases","containerization (Docker)","cloud deployments"],"x-skills-preferred":["Arabic","cross functional collaboration","LLM solutions"],"datePosted":"2026-04-18T16:01:13.044Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Doha, Qatar"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"React, TypeScript, Python, Node.js, Next.js, NoSQL/Relational databases, containerization (Docker), cloud deployments, Arabic, cross functional collaboration, LLM solutions"},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_2717510f-5f6"},"title":"Transaction Principal","description":"<p>As a Transaction Principal for Europe at Anthropic, you&#39;ll drive the commercial sourcing and transaction execution process for our European data center capacity deals. You&#39;ll lead RFP processes, negotiate term sheets, and serve as the central leader ensuring seamless stakeholder alignment from initial sourcing through lease execution.</p>\n<p>This role is critical to securing the infrastructure that powers Anthropic&#39;s frontier AI systems across Europe , you&#39;ll bridge commercial negotiations with complex internal coordination across legal, finance, engineering, and network teams, and partner closely with our Compute Markets team who own the Europe market strategy and government relationships.</p>\n<p>Responsibilities:</p>\n<ul>\n<li>Lead the RFP and commercial sourcing process for European data center deals, managing developer outreach, proposal evaluation, and competitive selection across multiple markets</li>\n<li>Negotiate term sheets and manage the LOI process, structuring commercial terms that meet Anthropic&#39;s technical and business requirements while maintaining strong developer partnerships</li>\n<li>Create the bridge from LOI to executed transaction, ensuring all commercial, technical, and legal requirements are satisfied for deal closure</li>\n<li>Serve as project manager for cross-functional stakeholder engagement , coordinating due diligence teams, internal and external legal counsel, network organization, platform engineers, and finance to ensure alignment prior to lease execution</li>\n<li>Act as the single point of contact for auxiliary organizations including networks, deployments, and government relations, providing regular updates on transaction progress and leasing status</li>\n<li>Develop and maintain transaction timelines, tracking critical-path items and proactively identifying risks that could impact deal closure</li>\n<li>Ensure all stakeholder requirements are captured and addressed in commercial agreements, translating technical and operational needs into contractual terms</li>\n<li>Manage complex digital infrastructure development activities to a construction-ready state, through a developer or directly</li>\n<li>Marry the right projects, capital stacks, and developers at the right stages</li>\n<li>Navigate country-specific permitting, grid connection, and regulatory requirements that vary significantly across European markets</li>\n<li>Document and refine transaction processes and playbooks to enable scalable deal execution as Anthropic expands its infrastructure footprint across the region</li>\n<li>Partner with the Compute Markets Manager to prioritize markets, sites, and counterparties, and feed deal learnings back into Europe market strategy</li>\n</ul>\n<p>You may be a good fit if you:</p>\n<ul>\n<li>Have 10+ years of experience in transaction management, commercial real estate, data center leasing, or infrastructure procurement</li>\n<li>Possess a proven track record of managing complex, multi-stakeholder transactions from sourcing through execution</li>\n<li>Have strong negotiation skills with experience structuring term sheets, LOIs, and commercial agreements</li>\n<li>Excel at project management and can coordinate across legal, technical, finance, and operational teams simultaneously</li>\n<li>Have experience with RFP processes and competitive sourcing for large-scale infrastructure or real estate transactions</li>\n<li>Have experience working in or across European markets, with knowledge of the regional data center and development landscape , including established FLAP-D hubs and emerging markets like the Nordics and Southern Europe</li>\n<li>Are comfortable operating across multiple countries with different legal frameworks, languages, and business cultures</li>\n<li>Are highly organized with strong attention to detail while maintaining focus on strategic deal objectives</li>\n<li>Can operate effectively in fast-paced, ambiguous environments where processes are being built alongside execution</li>\n<li>Demonstrate exceptional communication skills and can coordinate effectively across time zones with US-based HQ teams and distributed European partners</li>\n</ul>\n<p>It&#39;s a bonus if you:</p>\n<ul>\n<li>Have experience with data center or hyperscale infrastructure transactions specifically</li>\n<li>Come from the development side of the industry rather than traditional brokerage/leasing , you understand how DC development works and how value is created (yield-on-cost, cap rates, development fees)</li>\n<li>Understand technical requirements for AI/ML workloads including power density, cooling, and network connectivity</li>\n<li>Have worked with legal teams on complex lease negotiations or infrastructure agreements across multiple European jurisdictions</li>\n<li>Understand utility coordination, power procurement, or energy considerations in data center transactions, particularly in the European context (fragmented national power markets, grid connection queues, renewable PPAs, sustainability and efficiency regulations)</li>\n<li>Have familiarity with data sovereignty and regulatory considerations that influence European site selection</li>\n<li>Have relationships within the European data center developer, operator, and broker ecosystem</li>\n<li>Have a background in corporate development, strategic partnerships, or infrastructure investment</li>\n<li>Have experience in high-growth technology companies managing infrastructure expansion</li>\n</ul>\n<p>Annual compensation range for this role is £225,000-£270,000 GBP.</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_2717510f-5f6","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Anthropic","sameAs":"https://www.anthropic.co/","logo":"https://logos.yubhub.co/anthropic.co.png"},"x-apply-url":"https://job-boards.greenhouse.io/anthropic/jobs/5170084008","x-work-arrangement":"hybrid","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":"£225,000-£270,000 GBP","x-skills-required":["transaction management","commercial real estate","data center leasing","infrastructure procurement","RFP processes","competitive sourcing","project management","negotiation skills","term sheets","LOIs","commercial agreements","cross-functional stakeholder engagement","due diligence teams","legal counsel","network organization","platform engineers","finance","auxiliary organizations","networks","deployments","government relations","transaction timelines","critical-path items","risks","technical and operational needs","contractual terms","digital infrastructure development","construction-ready state","projects","capital stacks","developers","country-specific permitting","grid connection","regulatory requirements","transaction processes","playbooks","scalable deal execution","Europe market strategy","Compute Markets Manager","market prioritization","site prioritization","counterparty prioritization","deal learnings"],"x-skills-preferred":[],"datePosted":"2026-04-18T15:59:03.320Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"London, UK"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"transaction management, commercial real estate, data center leasing, infrastructure procurement, RFP processes, competitive sourcing, project management, negotiation skills, term sheets, LOIs, commercial agreements, cross-functional stakeholder engagement, due diligence teams, legal counsel, network organization, platform engineers, finance, auxiliary organizations, networks, deployments, government relations, transaction timelines, critical-path items, risks, technical and operational needs, contractual terms, digital infrastructure development, construction-ready state, projects, capital stacks, developers, country-specific permitting, grid connection, regulatory requirements, transaction processes, playbooks, scalable deal execution, Europe market strategy, Compute Markets Manager, market prioritization, site prioritization, counterparty prioritization, deal learnings","baseSalary":{"@type":"MonetaryAmount","currency":"GBP","value":{"@type":"QuantitativeValue","minValue":225000,"maxValue":270000,"unitText":"YEAR"}}},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_04c1ff49-2d1"},"title":"Data Platform Solutions Architect (Professional Services)","description":"<p>We&#39;re hiring for multiple roles within our Professional Services team. As a Data Platform Solutions Architect, you will work with clients on short to medium-term customer engagements on their big data challenges using the Databricks platform. You will provide data engineering, data science, and cloud technology projects which require integrating with client systems, training, and other technical tasks to help customers to get most value out of their data.</p>\n<p>The impact you will have:</p>\n<ul>\n<li>Work on a variety of impactful customer technical projects which may include designing and building reference architectures, creating how-to&#39;s and productionalizing customer use cases</li>\n<li>Work with engagement managers to scope variety of professional services work with input from the customer</li>\n<li>Guide strategic customers as they implement transformational big data projects, 3rd party migrations, including end-to-end design, build and deployment of industry-leading big data and AI applications</li>\n<li>Consult on architecture and design; bootstrap or implement customer projects which leads to a customers&#39; successful understanding, evaluation and adoption of Databricks.</li>\n<li>Provide an escalated level of support for customer operational issues.</li>\n</ul>\n<p>What we look for:</p>\n<ul>\n<li>Extensive experience in data engineering, data platforms &amp; analytics</li>\n<li>Comfortable writing code in either Python or Scala</li>\n<li>Working knowledge of two or more common Cloud ecosystems (AWS, Azure, GCP) with expertise in at least one</li>\n<li>Deep experience with distributed computing with Apache Spark™ and knowledge of Spark runtime internals</li>\n<li>Familiarity with CI/CD for production deployments</li>\n<li>Working knowledge of MLOps</li>\n<li>Design and deployment of performant end-to-end data architectures</li>\n<li>Experience with technical project delivery - managing scope and timelines.</li>\n<li>Documentation and white-boarding skills.</li>\n<li>Experience working with clients and managing conflicts.</li>\n<li>Build skills in technical areas which support the deployment and integration of Databricks-based solutions to complete customer projects.</li>\n<li>Travel to customers 10% of the time</li>\n</ul>\n<p>[Preferred] Databricks Certification but not essential</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_04c1ff49-2d1","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Databricks","sameAs":"https://databricks.com","logo":"https://logos.yubhub.co/databricks.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/databricks/jobs/8396801002","x-work-arrangement":"hybrid","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":null,"x-skills-required":["data engineering","data platforms & analytics","Python","Scala","Cloud ecosystems (AWS, Azure, GCP)","Apache Spark","CI/CD for production deployments","MLOps","technical project delivery","documentation and white-boarding skills"],"x-skills-preferred":["Databricks Certification"],"datePosted":"2026-04-18T15:58:52.546Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"London, United Kingdom"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"data engineering, data platforms & analytics, Python, Scala, Cloud ecosystems (AWS, Azure, GCP), Apache Spark, CI/CD for production deployments, MLOps, technical project delivery, documentation and white-boarding skills, Databricks Certification"},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_5b244f27-9fd"},"title":"Resident Solutions Architect - Communications, Media, Entertainment & Games","description":"<p>As a Resident Solutions Architect in our Professional Services team, you will work with clients on short to medium term customer engagements on their big data challenges using the Databricks platform. You will provide data engineering, data science, and cloud technology projects which require integrating with client systems, training, and other technical tasks to help customers to get most value out of their data.</p>\n<p>You will work on a variety of impactful customer technical projects which may include designing and building reference architectures, creating how-to&#39;s and productionalizing customer use cases. You will work with engagement managers to scope variety of professional services work with input from the customer.</p>\n<p>Guide strategic customers as they implement transformational big data projects, 3rd party migrations, including end-to-end design, build and deployment of industry-leading big data and AI applications. Consult on architecture and design; bootstrap or implement customer projects which leads to a customers&#39; successful understanding, evaluation and adoption of Databricks.</p>\n<p>Provide an escalated level of support for customer operational issues. You will work with the Databricks technical team, Project Manager, Architect and Customer team to ensure the technical components of the engagement are delivered to meet customer&#39;s needs.</p>\n<p>Work with Engineering and Databricks Customer Support to provide product and implementation feedback and to guide rapid resolution for engagement specific product and support issues.</p>\n<p>The ideal candidate will have 6+ years experience in data engineering, data platforms &amp; analytics, comfortable writing code in either Python or Scala, working knowledge of two or more common Cloud ecosystems (AWS, Azure, GCP) with expertise in at least one, deep experience with distributed computing with Apache Spark™ and knowledge of Spark runtime internals, familiarity with CI/CD for production deployments, working knowledge of MLOps, design and deployment of performant end-to-end data architectures, experience with technical project delivery - managing scope and timelines, documentation and white-boarding skills, experience working with clients and managing conflicts, build skills in technical areas which support the deployment and integration of Databricks-based solutions to complete customer projects.</p>\n<p>Travel to customers 20% of the time.</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_5b244f27-9fd","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Databricks","sameAs":"https://databricks.com","logo":"https://logos.yubhub.co/databricks.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/databricks/jobs/8461258002","x-work-arrangement":"hybrid","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":"$180,656-$248,360 USD","x-skills-required":["data engineering","data platforms & analytics","Python","Scala","Cloud ecosystems (AWS, Azure, GCP)","Apache Spark","CI/CD for production deployments","MLOps","end-to-end data architectures","technical project delivery","documentation and white-boarding skills","client management"],"x-skills-preferred":[],"datePosted":"2026-04-18T15:58:34.588Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Raleigh, North Carolina"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"data engineering, data platforms & analytics, Python, Scala, Cloud ecosystems (AWS, Azure, GCP), Apache Spark, CI/CD for production deployments, MLOps, end-to-end data architectures, technical project delivery, documentation and white-boarding skills, client management","baseSalary":{"@type":"MonetaryAmount","currency":"USD","value":{"@type":"QuantitativeValue","minValue":180656,"maxValue":248360,"unitText":"YEAR"}}},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_b05b9f90-7d3"},"title":"Data Center Engineer, Resource Efficiency – Compute Supply","description":"<p><strong>About the Role</strong></p>\n<p>As a Power &amp; Resource Efficiency Engineer, you&#39;ll sit at the intersection of IT and facilities , building the systems, models, and control loops that optimize how we allocate and consume power, cooling, and physical capacity across our TPU/GPU fleet.</p>\n<p>You&#39;ll own the technical strategy for turning raw data center capacity into reliable, efficient compute, working across power topology, workload scheduling, and real-time telemetry to push utilization as close to the physical envelope as possible while maintaining our availability commitments.</p>\n<p><strong>Responsibilities</strong></p>\n<ul>\n<li>Build models that forecast consumption across electrical and mechanical subsystems, informing capacity planning, energy procurement, oversubscription targets and risks, including statistical modeling of cluster utilization, workload profiles, and failure modes.</li>\n</ul>\n<ul>\n<li>Design IT/OT interfaces that bridge compute orchestration with facility controls, enabling real-time telemetry across accelerator hardware, power distribution, cooling, and schedulers.</li>\n</ul>\n<ul>\n<li>Build and operate load management systems that use power and cooling topology to enable load management and power/thermal-aware placement to maximize throughput while meeting SLOs.</li>\n</ul>\n<ul>\n<li>Partner with data center providers to drive design optimizations and hold them accountable to SLA-grade performance standards, providing technical diligence on partner architectures.</li>\n</ul>\n<p><strong>What We&#39;re Looking For</strong></p>\n<ul>\n<li>Deep knowledge of data center power distribution and cooling architectures, and how they interact with IT load profiles. Experience with reliability engineering, SLA development, and failure-mode analysis.</li>\n</ul>\n<ul>\n<li>Proficiency in statistical modeling and simulation for infrastructure capacity or power utilization.</li>\n</ul>\n<ul>\n<li>Familiarity with SCADA/BMS/EPMS, telemetry pipelines, and control systems. Experience building software that bridges IT and OT.</li>\n</ul>\n<ul>\n<li>Exposure to accelerator deployments and their power management interfaces strongly preferred.</li>\n</ul>\n<ul>\n<li>Demand response, grid interaction, or behind-the-meter generation experience is a plus.</li>\n</ul>\n<ul>\n<li>Ability to translate between infrastructure engineering, software teams, and external partners.</li>\n</ul>\n<p><strong>Required Qualifications</strong></p>\n<ul>\n<li>Bachelor&#39;s degree in Electrical Engineering, Mechanical Engineering, Power Systems, Controls Engineering, or a related field.</li>\n</ul>\n<ul>\n<li>5+ years of experience in data center infrastructure or facility engineering.</li>\n</ul>\n<ul>\n<li>Demonstrated experience with data center power distribution and cooling system architectures.</li>\n</ul>\n<ul>\n<li>Experience building or operating software-based power management, load scheduling, or control systems.</li>\n</ul>\n<ul>\n<li>Proficiency in Python or similar languages for statistical modeling, simulation, or automation of data center infrastructure optimizations.</li>\n</ul>\n<ul>\n<li>Familiarity with SCADA, BMS, EPMS, or industrial control systems and associated protocols (Modbus, BACnet, SNMP).</li>\n</ul>\n<ul>\n<li>Track record of cross-functional collaboration across hardware, software, and facilities teams.</li>\n</ul>\n<p><strong>Preferred Qualifications</strong></p>\n<ul>\n<li>Master&#39;s or PhD in Controls, Power Systems, or related discipline and 3+ years of experience in data center infrastructure or facility engineering.</li>\n</ul>\n<ul>\n<li>Experience with accelerator-class deployments and their power management interfaces.</li>\n</ul>\n<ul>\n<li>Background in control theory, dynamical systems, or cyber-physical systems design.</li>\n</ul>\n<ul>\n<li>Experience with energy storage, microgrid integration, demand response, or behind-the-meter generation.</li>\n</ul>\n<ul>\n<li>Familiarity with reliability engineering methods.</li>\n</ul>\n<ul>\n<li>Experience with SLA development, availability modeling, or service credit frameworks.</li>\n</ul>\n<ul>\n<li>Exposure to ML/optimization techniques applied to infrastructure or energy systems.</li>\n</ul>\n<p><strong>Salary</strong></p>\n<p>The annual compensation range for this role is $320,000-$405,000 USD.</p>\n<p><strong>Benefits</strong></p>\n<p>We offer competitive compensation and benefits, optional equity donation matching, generous vacation and parental leave, flexible working hours, and a lovely office space in which to collaborate with our team.</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_b05b9f90-7d3","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Anthropic","sameAs":"https://www.anthropic.com/","logo":"https://logos.yubhub.co/anthropic.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/anthropic/jobs/5159642008","x-work-arrangement":"hybrid","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":"$320,000-$405,000 USD","x-skills-required":["data center power distribution","cooling architectures","IT load profiles","reliability engineering","SLA development","failure-mode analysis","statistical modeling","simulation","infrastructure capacity","power utilization","SCADA/BMS/EPMS","telemetry pipelines","control systems","accelerator deployments","power management interfaces","demand response","grid interaction","behind-the-meter generation","Python","automation","data center infrastructure optimizations","SCADA","BMS","EPMS","industrial control systems","Modbus","BACnet","SNMP"],"x-skills-preferred":["accelerator-class deployments","control theory","dynamical systems","cyber-physical systems design","energy storage","microgrid integration","reliability engineering methods","availability modeling","service credit frameworks","ML/optimization techniques"],"datePosted":"2026-04-18T15:58:06.281Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Remote-Friendly, United States"}},"jobLocationType":"TELECOMMUTE","employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"data center power distribution, cooling architectures, IT load profiles, reliability engineering, SLA development, failure-mode analysis, statistical modeling, simulation, infrastructure capacity, power utilization, SCADA/BMS/EPMS, telemetry pipelines, control systems, accelerator deployments, power management interfaces, demand response, grid interaction, behind-the-meter generation, Python, automation, data center infrastructure optimizations, SCADA, BMS, EPMS, industrial control systems, Modbus, BACnet, SNMP, accelerator-class deployments, control theory, dynamical systems, cyber-physical systems design, energy storage, microgrid integration, reliability engineering methods, availability modeling, service credit frameworks, ML/optimization techniques","baseSalary":{"@type":"MonetaryAmount","currency":"USD","value":{"@type":"QuantitativeValue","minValue":320000,"maxValue":405000,"unitText":"YEAR"}}},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_32b5f6f9-913"},"title":"Account Executive, UK","description":"<p><strong>Job Title: Account Executive, UK</strong></p>\n<p><strong>Location: London, UK</strong></p>\n<p><strong>Category-defining tech. Career-defining work.</strong></p>\n<p>Lots of tech companies disrupt. But, many fail when they try to scale. We&#39;re different. CockroachDB makes it easier for companies to build and scale apps. This is how and why we&#39;re helping some of the most innovative companies on the planet. We tackle problems head-on and focus on solutions that create lasting impact.</p>\n<p><strong>Because when our customers win, we all win.</strong></p>\n<p><strong>The Role</strong></p>\n<p>Our Account Executives target and close new business with some of today&#39;s most innovative companies. In this role, you&#39;ll both nurture inbound leads in your territory with marketing-led support as well as prospect into a target list of select accounts. You will do this by driving opportunities through the entire sales cycle from pipeline generation to closure, employing a value-oriented sales methodology with a focus on use cases spanning customer data and marketing activation. The ideal candidate will have the aptitude and passion for becoming an expert in CockroachDB&#39;s product capabilities, business impact, and competitive advantages and loves to build long-lasting relationships with customer needs at the center.</p>\n<p>This role will cover the UK region and you must be based within a commutable distance to our London office to be eligible.</p>\n<p><strong>You Will</strong></p>\n<ul>\n<li>Close new logos and expand existing business within an assigned territory, meeting and exceeding sales goals through prospecting, qualifying, managing, and closing sales opportunities</li>\n</ul>\n<ul>\n<li>Leverage and coordinate cross-functional internal teams (Sales Development, Legal, Engineering, Security, Marketing, Product) to efficiently navigate complex sales cycles</li>\n</ul>\n<ul>\n<li>Maintain, build and own specific relationship maps for your territory, including existing relationships and aspirational contacts</li>\n</ul>\n<ul>\n<li>Lead compelling presentations of CockroachDB&#39;s product and vision to a broad range of audiences, from c-level executives to individual contributors</li>\n</ul>\n<ul>\n<li>Provide timely and accurate forecasts and clear visibility on sales and revenue performance by actively handling your pipeline of opportunities</li>\n</ul>\n<p><strong>The Expectations</strong></p>\n<p>In your first 30 days, you will learn about CockroachDB and will be able to pitch the product proficiently. We believe that it is necessary for you to build this foundation so you can successfully engage with existing accounts and grasp the sales strategy. Upon completion of your first month, you will have built cross-functional relationships and will have started building your strategy for account penetration.</p>\n<p>After three months, you&#39;ll have a sound plan for account strategy and mapping. You have started building relationships with several leaders across your region and are beginning to understand their challenges and how CockroachDB can help solve them.</p>\n<p>After six months, you will have 3x your quota in pipeline and will have at least 3 POCs in process.</p>\n<p><strong>You Have</strong></p>\n<ul>\n<li>3-5+ years of experience selling enterprise solutions and demonstrated success in software sales within the database ecosystem or adjacent technologies</li>\n</ul>\n<ul>\n<li>Experience leading large and complex sales cycles within the C-Level at Fortune 500 companies, specifically within financial services</li>\n</ul>\n<ul>\n<li>A consultative and value-based approach to selling software for cloud, on-premise and hybrid deployments</li>\n</ul>\n<ul>\n<li>The ability to assess customer needs and build valuable, trusted relationships at all levels</li>\n</ul>\n<ul>\n<li>An interest and experience leveraging AI tools to improve your sales process</li>\n</ul>\n<ul>\n<li>A track record of overachievement and hitting sales targets</li>\n</ul>\n<ul>\n<li>Expert time management and resource skills</li>\n</ul>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_32b5f6f9-913","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Cockroach Labs","sameAs":"https://www.cockroachlabs.com/","logo":"https://logos.yubhub.co/cockroachlabs.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/cockroachlabs/jobs/7792866","x-work-arrangement":"onsite","x-experience-level":"executive","x-job-type":"full-time","x-salary-range":null,"x-skills-required":["Enterprise sales","Software sales","Database ecosystem","Cloud deployments","On-premise deployments","Hybrid deployments","Value-based sales","Customer needs assessment","Relationship building","AI tools","Time management","Resource skills"],"x-skills-preferred":[],"datePosted":"2026-04-18T15:57:33.583Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"London, UK"}},"employmentType":"FULL_TIME","occupationalCategory":"Sales","industry":"Technology","skills":"Enterprise sales, Software sales, Database ecosystem, Cloud deployments, On-premise deployments, Hybrid deployments, Value-based sales, Customer needs assessment, Relationship building, AI tools, Time management, Resource skills"},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_769c0070-5b2"},"title":"Research Scientist, Agent Robustness","description":"<p>As a Research Scientist working on Agent Robustness, you will work on the fundamental challenges of building AI agents that are safe and aligned with humans.</p>\n<p>For example, you might:</p>\n<ul>\n<li>Research the science of AI agent capabilities with a focus on how they relate to safety, risk factors, and methodologies for benchmarking them;</li>\n<li>Design and build harnesses to test AI agents&#39; tendency to take harmful actions when pressured to do so by users or tricked into doing so by elements of their environment;</li>\n<li>Design and build exploits and mitigations for new and unique failure modes that arise as AI agents gain affordances like coding, web browsing, and computer use;</li>\n<li>Characterize and design mitigations for potential failure modes or broader risks of systems involving multiple interacting AI agents.</li>\n</ul>\n<p>Ideally you&#39;d have:</p>\n<ul>\n<li>Commitment to our mission of promoting safe, secure, and trustworthy AI deployments in the industry as frontier AI capabilities continue to advance;</li>\n<li>Practical experience conducting technical research collaboratively;</li>\n<li>Experience with post-training and RL techniques such as RLHF, DPO, GRPO, and similar approaches;</li>\n<li>A track record of published research in machine learning, particularly in generative AI;</li>\n<li>At least three years of experience addressing sophisticated ML problems, whether in a research setting or in product development;</li>\n<li>Strong written and verbal communication skills to operate in a cross-functional team.</li>\n</ul>\n<p>Nice to have:</p>\n<ul>\n<li>Hands-on experience with agent evaluation frameworks such as SWE-bench, WebArena, OSWorld, Inspect, or similar tools;</li>\n<li>Experience with red-teaming, prompt injection, or adversarial testing of AI systems.</li>\n</ul>\n<p>Our research interviews are crafted to assess candidates&#39; skills in practical ML prototyping and debugging, their grasp of research concepts, and their alignment with our organisational culture. We will not ask any LeetCode-style questions. If you&#39;re excited about advancing AI safety and contributing to our mission, we encourage you to apply, even if your experience doesn&#39;t perfectly align with every requirement.</p>\n<p>Compensation packages at Scale for eligible roles include base salary, equity, and benefits. The range displayed on each job posting reflects the minimum and maximum target for new hire salaries for the position, determined by work location and additional factors, including job-related skills, experience, interview performance, and relevant education or training. Scale employees in eligible roles are also granted equity-based compensation, subject to Board of Director approval. Your recruiter can share more about the specific salary range for your preferred location during the hiring process, and confirm whether the hired role will be eligible for equity grant. You&#39;ll also receive benefits including, but not limited to: Comprehensive health, dental and vision coverage, retirement benefits, a learning and development stipend, and generous PTO. Additionally, this role may be eligible for additional benefits such as a commuter stipend.</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_769c0070-5b2","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Scale","sameAs":"https://scale.com/","logo":"https://logos.yubhub.co/scale.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/scaleai/jobs/4675684005","x-work-arrangement":"hybrid","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":"$216,000-$270,000 USD","x-skills-required":["Commitment to our mission of promoting safe, secure, and trustworthy AI deployments in the industry as frontier AI capabilities continue to advance","Practical experience conducting technical research collaboratively","Experience with post-training and RL techniques such as RLHF, DPO, GRPO, and similar approaches","A track record of published research in machine learning, particularly in generative AI","At least three years of experience addressing sophisticated ML problems, whether in a research setting or in product development"],"x-skills-preferred":["Hands-on experience with agent evaluation frameworks such as SWE-bench, WebArena, OSWorld, Inspect, or similar tools","Experience with red-teaming, prompt injection, or adversarial testing of AI systems"],"datePosted":"2026-04-18T15:57:29.447Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"San Francisco, CA; New York, NY"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"Commitment to our mission of promoting safe, secure, and trustworthy AI deployments in the industry as frontier AI capabilities continue to advance, Practical experience conducting technical research collaboratively, Experience with post-training and RL techniques such as RLHF, DPO, GRPO, and similar approaches, A track record of published research in machine learning, particularly in generative AI, At least three years of experience addressing sophisticated ML problems, whether in a research setting or in product development, Hands-on experience with agent evaluation frameworks such as SWE-bench, WebArena, OSWorld, Inspect, or similar tools, Experience with red-teaming, prompt injection, or adversarial testing of AI systems","baseSalary":{"@type":"MonetaryAmount","currency":"USD","value":{"@type":"QuantitativeValue","minValue":216000,"maxValue":270000,"unitText":"YEAR"}}},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_c38cbb6f-4b7"},"title":"Staff Software Engineer, Inference","description":"<p>Job Title: Staff Software Engineer, Inference\\n\\nLocation: Dublin, IE\\n\\nDepartment: Software Engineering - Infrastructure\\n\\nJob Description:\\n\\nAbout Anthropic\\n\\nAnthropic&#39;s mission is to create reliable, interpretable, and steerable AI systems. We want AI to be safe and beneficial for our users and for society as a whole.\\n\\nAbout the role:\\n\\nOur Inference team is responsible for building and maintaining the critical systems that serve Claude to millions of users worldwide. We bring Claude to life by serving our models via the industry&#39;s largest compute-agnostic inference deployments. We are responsible for the entire stack from intelligent request routing to fleet-wide orchestration across diverse AI accelerators.\\n\\nThe team has a dual mandate: maximizing compute efficiency to serve our explosive customer growth, while enabling breakthrough research by giving our scientists the high-performance inference infrastructure they need to develop next-generation models. We tackle complex, distributed systems challenges across multiple accelerator families and emerging AI hardware running in multiple cloud platforms.\\n\\nAs a Staff Software Engineer on our Inference team, you will work end to end, identifying and addressing key infrastructure blockers to serve Claude to millions of users while enabling breakthrough AI research. Strong candidates should have familiarity with performance optimization, distributed systems, large-scale service orchestration, and intelligent request routing. Familiarity with LLM inference optimization, batching strategies, and multi-accelerator deployments is highly encouraged but not strictly necessary.\\n\\nStrong candidates may also have experience with:\\n\\n- High-performance, large-scale distributed systems\\n\\n- Implementing and deploying machine learning systems at scale\\n\\n- Load balancing, request routing, or traffic management systems\\n\\n- LLM inference optimization, batching, and caching strategies\\n\\n- Kubernetes and cloud infrastructure (AWS, GCP)\\n\\n- Python or Rust\\n\\nYou may be a good fit if you:\\n\\n- Have significant software engineering experience, particularly with distributed systems\\n\\n- Are results-oriented, with a bias towards flexibility and impact\\n\\n- Pick up slack, even if it goes outside your job description\\n\\n- Want to learn more about machine learning systems and infrastructure\\n\\n- Thrive in environments where technical excellence directly drives both business results and research breakthroughs\\n\\n- Care about the societal impacts of your work\\n\\nRepresentative projects across the org:\\n\\n- Designing intelligent routing algorithms that optimize request distribution across thousands of accelerators\\n\\n- Autoscaling our compute fleet to dynamically match supply with demand across production, research, and experimental workloads\\n\\n- Building production-grade deployment pipelines for releasing new models to millions of users\\n\\n- Integrating new AI accelerator platforms to maintain our hardware-agnostic competitive advantage\\n\\n- Contributing to new inference features (e.g., structured sampling, prompt caching)\\n\\n- Supporting inference for new model architectures\\n\\n- Analyzing observability data to tune performance based on real-world production workloads\\n\\n- Managing multi-region deployments and geographic routing for global customers\\n\\nDeadline to apply: None. Applications will be reviewed on a rolling basis.\\n\\nThe annual compensation range for this role is listed below.\\n\\nFor sales roles, the range provided is the role’s On Target Earnings (&quot;OTE&quot;) range, meaning that the range includes both the sales commissions/sales bonuses target and annual base salary for the role.\\n\\nAnnual Salary:€295.000-€355.000 EUR\\n\\nLogistics\\n\\nMinimum education: Bachelor’s degree or an equivalent combination of education, training, and/or experience\\n\\nRequired field of study: A field relevant to the role as demonstrated through coursework, training, or professional experience\\n\\nMinimum years of experience: Years of experience required will correlate with the internal job level requirements for the position\\n\\nLocation-based hybrid policy: Currently, we expect all staff to be in one of our offices at least 25% of the time. However, some roles may require more time in our offices.\\n\\nVisa sponsorship: We do sponsor visas! However, we aren&#39;t able to successfully sponsor visas for every role and every candidate. But if we make you an offer, we will make every reasonable effort to get you a visa, and we retain an immigration lawyer to help with this.\\n\\nWe encourage you to apply even if you do not believe you meet every single qualification. Not all strong candidates will meet every single qualification as listed. Research shows that people who identify as being from underrepresented groups are more prone to experiencing imposter syndrome and doubting the strength of their candidacy, so we urge you not to exclude yourself prematurely and to submit an application if you&#39;re interested in this work. We think AI systems like the ones we&#39;re building have enormous social and ethical implications. We think this makes representation even more important, and we strive to include a range of diverse perspectives on our team.\\n\\nYour safety matters to us. To protect yourself from potential scams, remember that Anthropic recruiters only contact you from @anthropic.com email addresses. In some cases, we may partner with vetted recruiting agencies who will identify themselves as working on behalf of Anthropic. Be cautious of emails from other domains. Legitimate Anthropic recruiters will never ask for money, fees, or banking information before your first day. If you&#39;re ever unsure about a communication, don&#39;t click any links,visit anthropic.com/careers directly for confirmed position openings.\\n\\nHow we&#39;re different\\n\\nWe believe that the highest-impact AI research will be big science. At Anthropic we work as a single cohesive team on just a few large-scale research efforts. And we value impact , advancing our long-term goals of steerable, trustworthy AI , rather than work on smaller and more specific puzzles. We view AI research as an empirical science, which has as much in common with physics and biology as with traditional efforts in computer science. We&#39;re an extremely collaborative group, and we host frequent research discussions to ensure that we are pursuing the highest-impact work at any given time. As such, we greatly value communication skills.\\n\\nThe easiest way to understand our research directions is to read our recent research. This research continues many of the directions our team worked on prior to Anthropic, including: GPT-3, Circuit-Based Interpretability, Multimodal Neurons, Scaling Laws, AI &amp; Compute, Concrete Problems in AI Safety, and Learning from Human Preferences.\\n\\nCome work with us!\\n\\nAnthropic is a public benefit corporation headquartered in San Francisco. We offer competitive compensation and benefits, optional equity donation matching, generous vacation and parental leave, flexible working hours, and a lovely office space in which to collaborate with colleagues. Guidance on Candidates&#39; AI Usage: Learn about our policy for using AI in our application process</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_c38cbb6f-4b7","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Anthropic","sameAs":"https://www.anthropic.com/","logo":"https://logos.yubhub.co/anthropic.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/anthropic/jobs/5150472008","x-work-arrangement":"hybrid","x-experience-level":"staff","x-job-type":"full-time","x-salary-range":"€295.000-€355.000 EUR","x-skills-required":["performance optimization","distributed systems","large-scale service orchestration","intelligent request routing","LLM inference optimization","batching strategies","multi-accelerator deployments","Kubernetes","cloud infrastructure","Python","Rust"],"x-skills-preferred":[],"datePosted":"2026-04-18T15:57:00.340Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Dublin, IE"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"performance optimization, distributed systems, large-scale service orchestration, intelligent request routing, LLM inference optimization, batching strategies, multi-accelerator deployments, Kubernetes, cloud infrastructure, Python, Rust"},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_890da396-bd8"},"title":"Head of International - Partner Solutions Architecture, Applied AI","description":"<p>Job Title: Head of International - Partner Solutions Architecture, Applied AI</p>\n<p>Location: London, UK</p>\n<p>Department: Sales</p>\n<p>As the Manager of the International Partnerships, Applied AI, Solutions Architect team at Anthropic, you will drive the adoption of frontier AI by enabling the deployment of Anthropic&#39;s products (Claude for Enterprise, Claude Code, and API) through our Global and Regional System Integrators (GSIs/RSIs), cloud partners (AWS and GCP), and strategic technology partners across international markets.</p>\n<p>Based in London, you will lead and grow a team of Partner Solutions Architects, establishing Anthropic&#39;s technical partner ecosystem across EMEA and beyond. You&#39;ll be responsible for leading &amp; growing the International Partnerships Applied AI team, establishing processes and best practices for partner-led pre-sales engagements, helping each team member achieve success, high productivity, and career growth, and representing Anthropic as a technical lead on some of its most important international partnerships.</p>\n<p>In collaboration with the Sales, Partnerships, Product, and Engineering teams, you&#39;ll help partners incorporate leading-edge AI systems into their practices, solutions, and customer engagements. You will employ your excellent communication skills to explain and demonstrate complex solutions persuasively to technical and non-technical audiences alike.</p>\n<p>You will play a critical role in identifying opportunities to accelerate indirect revenue, enable partner AI practices, and execute on long-term international GTM strategy, while maintaining our best-in-class safety standards.</p>\n<p>Responsibilities:</p>\n<ul>\n<li>Team Leadership &amp; Development: Manage and mentor a team of Applied AI, Partner Solutions Architects, providing both technical guidance and career development. Set goals and reviews for your team, promoting growth and output</li>\n</ul>\n<ul>\n<li>Strategic Technical Partnership: Serve as the senior technical thought partner to the Anthropic international GTM partnerships team, providing technical expertise to better understand the partner landscape, driving key strategic programs, and identifying opportunities to deepen partner technical capabilities across international markets</li>\n</ul>\n<ul>\n<li>Partner Ecosystem Enablement: Embed your team with GSI and cloud partner technical teams to enable their AI practices, support troubleshooting, evangelize Anthropic in their developer communities, and serve as an escalation point for complex technical issues</li>\n</ul>\n<ul>\n<li>Joint Solution Development: Lead your team in collaborating with partners to identify high-value industry-specific GenAI applications, develop joint solutions, and codify reference architectures / best practices to accelerate time to deployment across international markets</li>\n</ul>\n<ul>\n<li>Customer Deal Support: Own the technical portions of partner-led pre-sales engagements, ensuring your team intervenes directly to unblock strategic customer deals where partners are the primary delivery vehicle, providing deep technical expertise and solution architecture guidance</li>\n</ul>\n<ul>\n<li>Partner Ecosystem &amp; Events: Represent Anthropic at international partner events such as GSI customer workshops, AWS summits, and industry conferences. Lead or support partner-specific developer events, hackathons, and technical enablement sessions</li>\n</ul>\n<ul>\n<li>Cross-Functional Collaboration: Drive collaboration from cross-functional teams to influence and unify stakeholders at all levels of the organization to drive business outcomes. Partner closely with your aligned GTM leadership to co-build international partner strategies</li>\n</ul>\n<ul>\n<li>Product Feedback: Validate and gather feedback on Anthropic&#39;s products and offerings, especially as they relate to international partner use cases and deployment patterns, and deliver this feedback to relevant Anthropic teams to inform product roadmap and partner strategy</li>\n</ul>\n<ul>\n<li>Thought Leadership: Contribute to thought leadership through conference presentations, webinars, and technical content creation focused on the international partner ecosystem</li>\n</ul>\n<p>You may be a good fit if you:</p>\n<ul>\n<li>7+ years of experience in technical customer-facing/partner-facing roles such as Solutions Architect, Sales Engineer, Partner Sales Engineer, Technical Account Manager</li>\n</ul>\n<ul>\n<li>5+ years of technical go-to-market management experience, specifically managing pre-sales or partner-facing technical teams across EMEA, APAC, and other international regions.</li>\n</ul>\n<ul>\n<li>Track record of successfully building and scaling partnerships with GSIs (e.g., Accenture, Deloitte, WPP, TCS, Infosys) and/or cloud providers (AWS, GCP) to solve complex technical challenges across international markets</li>\n</ul>\n<ul>\n<li>Experience with the unique dynamics of partner-led selling and delivery, including indirect revenue models and partner enablement at scale</li>\n</ul>\n<ul>\n<li>Deep technical proficiency with enterprise AI deployments, API integrations, and production LLM use cases</li>\n</ul>\n<ul>\n<li>Exceptional ability to build relationships with and communicate technical concepts to diverse stakeholders including C-suite executives, engineering &amp; IT teams, and partner leadership</li>\n</ul>\n<ul>\n<li>Have an organizational mindset and enjoy building foundational teams in a relatively unstructured environment</li>\n</ul>\n<ul>\n<li>Have excellent communication, collaboration, and coaching abilities</li>\n</ul>\n<ul>\n<li>Are comfortable dealing with highly uncertain, ambiguous, and fast-moving environments</li>\n</ul>\n<ul>\n<li>Strong executive presence and ability to foster deep relationships with technical leaders and partner engineering teams</li>\n</ul>\n<ul>\n<li>Have at least a high-level familiarity with the architecture and operation of large language models and/or ML in general</li>\n</ul>\n<ul>\n<li>Experience with prompt engineering, LLM evaluation, and architecting AI-powered systems</li>\n</ul>\n<ul>\n<li>A love of teaching, mentoring, and helping others succeed</li>\n</ul>\n<ul>\n<li>Have a passion for making powerful technology safe and societally beneficial</li>\n</ul>\n<ul>\n<li>Think creatively about the risks and benefits of new technologies, and think beyond past checklists and playbooks</li>\n</ul>\n<p>Strong candidates may have:</p>\n<ul>\n<li>Partner SA Leadership at Scale: 5+ years leading partner-facing solution architect teams through hypergrowth, with direct experience managing both senior SAs and developing junior talent in complex partner ecosystem environments</li>\n</ul>\n<ul>\n<li>AI/ML Technical Depth + Executive Engagement: Hands-on experience with AI/ML platforms and enterprise integration patterns, combined with proven track record engaging C-level stakeholders and partner leadership in large-scale technical evaluations and joint GTM motions</li>\n</ul>\n<ul>\n<li>GSI Practice Building: Experience helping GSIs or consultancies build or scale their AI/ML practices, including enablement programs, certification paths, and joint solution development</li>\n</ul>\n<p>Annual compensation range for this role is £170,000-£215,000 GBP.</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_890da396-bd8","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Anthropic","sameAs":"https://www.anthropic.com/","logo":"https://logos.yubhub.co/anthropic.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/anthropic/jobs/5146999008","x-work-arrangement":"hybrid","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":"£170,000-£215,000 GBP","x-skills-required":["Technical customer-facing/partner-facing roles","Solutions Architect","Sales Engineer","Partner Sales Engineer","Technical Account Manager","Technical go-to-market management","Enterprise AI deployments","API integrations","Production LLM use cases","Large language models","ML in general","Prompt engineering","LLM evaluation","Architecting AI-powered systems"],"x-skills-preferred":["Partner SA Leadership at Scale","AI/ML Technical Depth + Executive Engagement","GSI Practice Building"],"datePosted":"2026-04-18T15:56:55.674Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"London, UK"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"Technical customer-facing/partner-facing roles, Solutions Architect, Sales Engineer, Partner Sales Engineer, Technical Account Manager, Technical go-to-market management, Enterprise AI deployments, API integrations, Production LLM use cases, Large language models, ML in general, Prompt engineering, LLM evaluation, Architecting AI-powered systems, Partner SA Leadership at Scale, AI/ML Technical Depth + Executive Engagement, GSI Practice Building","baseSalary":{"@type":"MonetaryAmount","currency":"GBP","value":{"@type":"QuantitativeValue","minValue":170000,"maxValue":215000,"unitText":"YEAR"}}},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_32c0c69a-037"},"title":"Staff Software Engineer, Inference","description":"<p><strong>About the role:</strong></p>\n<p>Our Inference team is responsible for building and maintaining the critical systems that serve Claude to millions of users worldwide. We bring Claude to life by serving our models via the industry&#39;s largest compute-agnostic inference deployments. We are responsible for the entire stack from intelligent request routing to fleet-wide orchestration across diverse AI accelerators.</p>\n<p>As a Staff Software Engineer on our Inference team, you will work end to end, identifying and addressing key infrastructure blockers to serve Claude to millions of users while enabling breakthrough AI research. Strong candidates should have familiarity with performance optimization, distributed systems, large-scale service orchestration, and intelligent request routing. Familiarity with LLM inference optimization, batching strategies, and multi-accelerator deployments is highly encouraged but not strictly necessary.</p>\n<p><strong>Responsibilities:</strong></p>\n<ul>\n<li>Work end to end on identifying and addressing key infrastructure blockers to serve Claude to millions of users while enabling breakthrough AI research</li>\n<li>Collaborate with the team to design and implement solutions to complex problems</li>\n<li>Develop and maintain large-scale distributed systems</li>\n<li>Implement and deploy machine learning systems at scale</li>\n<li>Load balancing, request routing, or traffic management systems</li>\n<li>LLM inference optimization, batching, and caching strategies</li>\n<li>Kubernetes and cloud infrastructure (AWS, GCP)</li>\n<li>Python or Rust</li>\n</ul>\n<p><strong>Requirements:</strong></p>\n<ul>\n<li>Significant software engineering experience, particularly with distributed systems</li>\n<li>Results-oriented, with a bias towards flexibility and impact</li>\n<li>Pick up slack, even if it goes outside your job description</li>\n<li>Want to learn more about machine learning systems and infrastructure</li>\n<li>Thrive in environments where technical excellence directly drives both business results and research breakthroughs</li>\n<li>Care about the societal impacts of your work</li>\n</ul>\n<p><strong>Benefits:</strong></p>\n<ul>\n<li>Competitive compensation and benefits</li>\n<li>Optional equity donation matching</li>\n<li>Generous vacation and parental leave</li>\n<li>Flexible working hours</li>\n<li>Lovely office space in which to collaborate with colleagues</li>\n</ul>\n<p><strong>Application Instructions:</strong></p>\n<p>If you&#39;re interested in this role, please submit your application through our website. We look forward to hearing from you!</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_32c0c69a-037","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Anthropic","sameAs":"https://www.anthropic.com/","logo":"https://logos.yubhub.co/anthropic.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/anthropic/jobs/5150472008","x-work-arrangement":"hybrid","x-experience-level":"staff","x-job-type":"full-time","x-salary-range":"€295.000-€355.000 EUR","x-skills-required":["performance optimization","distributed systems","large-scale service orchestration","intelligent request routing","LLM inference optimization","batching strategies","multi-accelerator deployments","Kubernetes","cloud infrastructure","Python","Rust"],"x-skills-preferred":[],"datePosted":"2026-04-18T15:56:14.384Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Dublin, IE"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"performance optimization, distributed systems, large-scale service orchestration, intelligent request routing, LLM inference optimization, batching strategies, multi-accelerator deployments, Kubernetes, cloud infrastructure, Python, Rust"},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_87f455b9-4bf"},"title":"QA Lead, AI Agent","description":"<p>Join us on this thrilling journey to revolutionize the workforce with AI. The future of work is here, and it&#39;s at Cresta.</p>\n<p>At Cresta, shipping AI is only half the story. Ensuring that AI interacts with humans reliably, accurately, and empathetically at scale is where the real challenge lies. As the QA Lead, AI Agent, you will be the ultimate guardian of the customer experience for our AI Agent product line. This role is perfect for a strategic quality expert who loves the intersection of human psychology and machine logic.</p>\n<p>You will own the end-to-end quality strategy, from designing complex test plans for non-deterministic LLMs to building automated and scalable testing environments using Cresta&#39;s proprietary no-code test and evaluation tools. You aren&#39;t just looking for bugs; you are building the framework that allows Cresta to deploy world-class AI agents for the world&#39;s largest enterprises with total confidence.</p>\n<p><strong>Responsibilities</strong></p>\n<ul>\n<li>Architect &amp; Scale AI Evaluation Systems: Design and oversee the end-to-end framework for testing AI agent systems at scale. You will leverage LLM-driven methodologies,including automated simulations, &quot;LLM-on-LLM&quot; rubrics, and adversarial red-teaming,to ensure reliability, policy adherence, and logic across complex, multi-turn conversational flows.</li>\n</ul>\n<ul>\n<li>Drive Deployment Excellence: Partner with Forward Deployed Engineers and PMs to triage issues, identify bottlenecks, and create new test cases on the fly to address real-world deployment challenges.</li>\n</ul>\n<ul>\n<li>Be the Customer’s Voice: Conduct manual UAT and voice-call testing to represent the end-customer experience. You take it personally when an agent lacks empathy or clarity, and you excel at articulating these nuances to the engineering team and clients.</li>\n</ul>\n<ul>\n<li>Lead and Scale the Team: lead a pod of QA analysts and partners. You will define the best practices, communication loops, and shared knowledge base that allow the QA function to scale alongside our rapidly growing product line.</li>\n</ul>\n<p><strong>Requirements</strong></p>\n<ul>\n<li>5+ years of experience in Quality Engineering, Deployments, or Technical QA, ideally within an AI or high-growth SaaS environment.</li>\n</ul>\n<ul>\n<li>Systems Thinking: A strong technical intuition and curiosity about how LLMs work. While you don&#39;t need to code, you must be comfortable navigating technical concepts like LLM, RAG, prompt logic, and multi-turn conversational flows.</li>\n</ul>\n<ul>\n<li>Operational Leadership: Proven ability to large E2E technical projects through partners, and a passion for building processes that improve efficiency between QA, Engineering, and Product.</li>\n</ul>\n<ul>\n<li>The &quot;QA Nose&quot;: An uncanny ability to find the edge case and a bias toward action. You anticipate bottlenecks before they happen and deliver solutions with urgency.</li>\n</ul>\n<ul>\n<li>High Empathy: A consultative mindset with the ability to represent the &quot;human element&quot; of a customer support interaction.</li>\n</ul>\n<ul>\n<li>Startup Agility: You thrive in fast-paced environments, excel at turning ambiguity into execution, and are comfortable &quot;rolling up your sleeves&quot; to build.</li>\n</ul>\n<p><strong>Bonus Points</strong></p>\n<ul>\n<li>Experience with CCaaS (Contact Center as a Service), telephony, or STT/TTS (Speech-to-Text) technologies.</li>\n</ul>\n<ul>\n<li>Background in Conversation Design or SDET roles.</li>\n</ul>\n<ul>\n<li>Experience leading team with direct reports.</li>\n</ul>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_87f455b9-4bf","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Cresta","sameAs":"https://www.cresta.ai/","logo":"https://logos.yubhub.co/cresta.ai.png"},"x-apply-url":"https://job-boards.greenhouse.io/cresta/jobs/5148813008","x-work-arrangement":"remote","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":null,"x-skills-required":["Quality Engineering","Deployments","Technical QA","AI","SaaS","LLMs","RAG","Prompt Logic","Multi-Turn Conversational Flows"],"x-skills-preferred":["CCaaS","Telephony","STT/TTS","Conversation Design","SDET"],"datePosted":"2026-04-18T15:55:59.640Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"United States (Remote)"}},"jobLocationType":"TELECOMMUTE","employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"Quality Engineering, Deployments, Technical QA, AI, SaaS, LLMs, RAG, Prompt Logic, Multi-Turn Conversational Flows, CCaaS, Telephony, STT/TTS, Conversation Design, SDET"},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_0036f074-845"},"title":"Resident Solutions Architect - Financial Services","description":"<p>As a Senior Big Data Solutions Architect (Sr Resident Solutions Architect) in our Professional Services team, you will work with clients on short to medium term customer engagements on their big data challenges using the Databricks platform.</p>\n<p>You will provide data engineering, data science, and cloud technology projects which require integrating with client systems, training, and other technical tasks to help customers to get most value out of their data.</p>\n<p>RSAs are billable and know how to complete projects according to specification with excellent customer service.</p>\n<p>You will report to the regional Manager/Lead.</p>\n<p>The impact you will have:</p>\n<ul>\n<li>Work on a variety of impactful customer technical projects which may include designing and building reference architectures, creating how-to&#39;s and productionalizing customer use cases</li>\n<li>Work with engagement managers to scope variety of professional services work with input from the customer</li>\n<li>Guide strategic customers as they implement transformational big data projects, 3rd party migrations, including end-to-end design, build and deployment of industry-leading big data and AI applications</li>\n<li>Consult on architecture and design; bootstrap hands-on projects which leads to a customers&#39; successful understanding, evaluation and adoption of Databricks.</li>\n<li>Provide an escalated level of support for customer operational issues.</li>\n<li>Work with the Databricks technical team, Project Manager, Architect and Customer team to ensure the technical components of the engagement are delivered to meet customer&#39;s needs.</li>\n<li>Work with Engineering and Databricks Customer Support to provide product and implementation feedback and to guide rapid resolution for engagement specific product and support issues.</li>\n</ul>\n<p>What we look for:</p>\n<ul>\n<li>9+ years experience in data engineering, data platforms &amp; analytics</li>\n<li>Comfortable writing code in either Python or Scala</li>\n<li>Working knowledge of two or more common Cloud ecosystems (AWS, Azure, GCP) with expertise in at least one</li>\n<li>Deep experience with distributed computing with Apache Spark™ and knowledge of Apache Spark™ runtime internals</li>\n<li>Familiarity with CI/CD for production deployments</li>\n<li>Working knowledge of MLOps</li>\n<li>Capable of design and deployment of highly performant end-to-end data architectures</li>\n<li>Experience with technical project delivery - managing scope and timelines.</li>\n<li>Documentation and white-boarding skills.</li>\n<li>Experience working with clients and managing conflicts.</li>\n<li>Experience in building scalable streaming and batch solutions using cloud-native components</li>\n<li>Travel to customers up to 20% of the time</li>\n</ul>\n<p>Nice to have: Databricks Certification</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_0036f074-845","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Databricks","sameAs":"https://databricks.com","logo":"https://logos.yubhub.co/databricks.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/databricks/jobs/8456966002","x-work-arrangement":"onsite","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":"$180,656-$248,360 USD","x-skills-required":["data engineering","data platforms & analytics","Python","Scala","Cloud ecosystems (AWS, Azure, GCP)","Apache Spark","CI/CD for production deployments","MLOps","design and deployment of highly performant end-to-end data architectures","technical project delivery","documentation and white-boarding skills","client management"],"x-skills-preferred":[],"datePosted":"2026-04-18T15:55:41.870Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Boston, Massachusetts"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"data engineering, data platforms & analytics, Python, Scala, Cloud ecosystems (AWS, Azure, GCP), Apache Spark, CI/CD for production deployments, MLOps, design and deployment of highly performant end-to-end data architectures, technical project delivery, documentation and white-boarding skills, client management","baseSalary":{"@type":"MonetaryAmount","currency":"USD","value":{"@type":"QuantitativeValue","minValue":180656,"maxValue":248360,"unitText":"YEAR"}}},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_fc79e6e5-5c0"},"title":"Resident Solutions Architect - Manufacturing","description":"<p>As a Resident Solutions Architect (RSA) on our Professional Services team, you will work with customers on short to medium term customer engagements on their big data challenges using the Databricks platform.</p>\n<p>You will provide data engineering, data science, and cloud technology projects which require integrating with client systems, training, and other technical tasks to help customers to get most value out of their data.</p>\n<p>RSAs are billable and know how to complete projects according to specification with excellent customer service.</p>\n<p>The impact you will have:</p>\n<ul>\n<li>Handle a variety of impactful customer technical projects which may include designing and building reference architectures, creating how-to&#39;s and productionalizing customer use cases</li>\n</ul>\n<ul>\n<li>Work with engagement managers to scope variety of professional services work with input from the customer</li>\n</ul>\n<ul>\n<li>Guide strategic customers as they implement transformational big data projects, 3rd party migrations, including end-to-end design, build and deployment of industry-leading big data and AI applications</li>\n</ul>\n<ul>\n<li>Consult on architecture and design; bootstrap or implement customer projects which leads to a customers&#39; successful understanding, evaluation and adoption of Databricks.</li>\n</ul>\n<ul>\n<li>Provide an escalated level of support for customer operational issues</li>\n</ul>\n<ul>\n<li>Collaborate with the Databricks Technical, Project Manager, Architect and Customer teams to ensure the technical components of the engagement are delivered to meet customer&#39;s needs</li>\n</ul>\n<ul>\n<li>Work with Engineering and Databricks Customer Support to provide product and implementation feedback and to guide rapid resolution for engagement specific product and support issues</li>\n</ul>\n<p>What we look for:</p>\n<ul>\n<li>6+ years experience in data engineering, data platforms &amp; analytics</li>\n</ul>\n<ul>\n<li>Comfortable writing code in either Python or Scala</li>\n</ul>\n<ul>\n<li>Working knowledge of two or more common Cloud ecosystems (AWS, Azure, GCP) with expertise in at least one</li>\n</ul>\n<ul>\n<li>Deep experience with distributed computing with Apache Spark™ and knowledge of Spark runtime internals</li>\n</ul>\n<ul>\n<li>Familiarity with CI/CD for production deployments</li>\n</ul>\n<ul>\n<li>Working knowledge of MLOps</li>\n</ul>\n<ul>\n<li>Design and deployment of performant end-to-end data architectures</li>\n</ul>\n<ul>\n<li>Experience with technical project delivery - managing scope and timelines</li>\n</ul>\n<ul>\n<li>Documentation and white-boarding skills</li>\n</ul>\n<ul>\n<li>Experience working with clients and managing conflicts</li>\n</ul>\n<ul>\n<li>Build skills in technical areas which support the deployment and integration of Databricks-based solutions to complete customer projects</li>\n</ul>\n<ul>\n<li>Ability to travel up to 30% when needed</li>\n</ul>\n<p>Pay Range Transparency Databricks is committed to fair and equitable compensation practices. The pay range(s) for this role is listed below and represents the expected base salary range for non-commissionable roles or on-target earnings for commissionable roles. Actual compensation packages are based on several factors that are unique to each candidate, including but not limited to job-related skills, depth of experience, relevant certifications and training, and specific work location. Based on the factors above, Databricks anticipated utilizing the full width of the range. The total compensation package for this position may also include eligibility for annual performance bonus, equity, and the benefits listed above.</p>\n<p>For more information regarding which range your location is in visit our page here.</p>\n<p>Zone 1 Pay Range $180,656-$248,360 USD</p>\n<p>Zone 2 Pay Range $180,656-$248,360 USD</p>\n<p>Zone 3 Pay Range $180,656-$248,360 USD</p>\n<p>Zone 4 Pay Range $180,656-$248,360 USD</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_fc79e6e5-5c0","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Databricks","sameAs":"https://databricks.com","logo":"https://logos.yubhub.co/databricks.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/databricks/jobs/8494156002","x-work-arrangement":"hybrid","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":"$180,656-$248,360 USD","x-skills-required":["Python","Scala","Cloud ecosystems (AWS, Azure, GCP)","Apache Spark","CI/CD for production deployments","MLOps","Data engineering","Data science","Cloud technology"],"x-skills-preferred":[],"datePosted":"2026-04-18T15:54:34.838Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Seattle, Washington"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"Python, Scala, Cloud ecosystems (AWS, Azure, GCP), Apache Spark, CI/CD for production deployments, MLOps, Data engineering, Data science, Cloud technology","baseSalary":{"@type":"MonetaryAmount","currency":"USD","value":{"@type":"QuantitativeValue","minValue":180656,"maxValue":248360,"unitText":"YEAR"}}},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_dbb4a6c8-3d9"},"title":"Manager, Applied AI Solutions Architecture - Partnerships","description":"<p>As the Manager of the Partnerships Applied AI Solutions Architect team, you will drive adoption of frontier AI by enabling deployment of Anthropic&#39;s products (Claude for Enterprise, Claude Code, API) through our Global and Regional System Integrators, cloud partners (AWS, GCP, Azure), and strategic technology partners.</p>\n<p>You will build and lead a team of Partner Solutions Architects, establish processes and best practices for partner-led pre-sales engagements, and represent Anthropic as the technical lead on its most important partnerships. In collaboration with Sales, Partnerships, Product, and Engineering, you will help partners incorporate leading-edge AI into their practices, accelerate indirect revenue, and execute long-term GTM strategy while maintaining our best-in-class safety standards.</p>\n<p>Responsibilities:</p>\n<p>Team Leadership &amp; Development: Hire, manage, and mentor a team of Partner Solutions Architects. Set goals, run reviews, and coach each team member toward high productivity and career growth.</p>\n<p>Strategic Technical Partnership: Act as the senior technical thought partner to Anthropic&#39;s GTM partnerships team. Co-build partner strategy with aligned GTM leadership, drive key programs, and align cross-functional stakeholders (Sales, Product, Engineering) behind partner outcomes.</p>\n<p>Partner Enablement &amp; Ecosystem: Embed your team with GSI and cloud partner technical teams to enable their AI practices, troubleshoot, and evangelize Anthropic in their developer communities. Represent Anthropic at partner events (GSI workshops, AWS/GCP summits, hackathons) and contribute technical content and thought leadership.</p>\n<p>Joint Solution Development: Lead partners in identifying high-value, industry-specific GenAI applications. Develop joint solutions and codify reference architectures and best practices to accelerate time to deployment.</p>\n<p>Customer Deal Support: Own the technical portion of partner-led pre-sales engagements. Intervene directly on strategic deals where partners are the primary delivery vehicle, providing deep solution architecture guidance.</p>\n<p>Product Feedback: Gather and validate feedback on Anthropic&#39;s products from partner deployments and deliver it to Product and Engineering to inform roadmap and partner strategy.</p>\n<p>You may be a good fit if you have:</p>\n<p>7+ years in technical customer-facing or partner-facing roles (Solutions Architect, Sales Engineer, Partner SE, TAM).</p>\n<p>3+ years managing pre-sales or partner-facing technical teams; comfortable building foundational teams in ambiguous, fast-moving environments.</p>\n<p>Track record building and scaling partnerships with GSIs (e.g., Accenture, Deloitte, TCS, Infosys) and/or cloud providers (AWS, GCP, Azure).</p>\n<p>Deep understanding of partner-led selling and delivery: indirect revenue models, enablement at scale, and joint GTM motions.</p>\n<p>Technical depth in enterprise AI deployments: LLM architecture, prompt engineering, evaluation, API integrations, and production use cases.</p>\n<p>Exceptional communication and executive presence; able to build trusted relationships with C-suite, partner leadership, and engineering teams alike.</p>\n<p>A love of teaching and mentoring, and a passion for advancing safe, beneficial AI.</p>\n<p>Strong candidates may also have:</p>\n<p>5+ years leading partner-facing SA teams through hypergrowth, including developing both senior and junior talent.</p>\n<p>Direct experience helping GSIs or consultancies build their AI/ML practice - enablement programs, certification paths, joint solution development.</p>\n<p>The annual compensation range for this role is $315,000-$380,000 USD.</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_dbb4a6c8-3d9","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Anthropic","sameAs":"https://www.anthropic.com/","logo":"https://logos.yubhub.co/anthropic.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/anthropic/jobs/5173031008","x-work-arrangement":"hybrid","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":"$315,000-$380,000 USD","x-skills-required":["Technical customer-facing or partner-facing roles","Pre-sales or partner-facing technical teams management","Partnership building and scaling","Partner-led selling and delivery","Enterprise AI deployments"],"x-skills-preferred":["LLM architecture","Prompt engineering","API integrations","Production use cases"],"datePosted":"2026-04-18T15:53:59.200Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"San Francisco, CA | New York City, NY | Seattle, WA"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"Technical customer-facing or partner-facing roles, Pre-sales or partner-facing technical teams management, Partnership building and scaling, Partner-led selling and delivery, Enterprise AI deployments, LLM architecture, Prompt engineering, API integrations, Production use cases","baseSalary":{"@type":"MonetaryAmount","currency":"USD","value":{"@type":"QuantitativeValue","minValue":315000,"maxValue":380000,"unitText":"YEAR"}}},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_d50772ab-afe"},"title":"Staff / Senior Software Engineer, Cloud Inference","description":"<p>We are seeking a Staff / Senior Software Engineer to join our Cloud Inference team. The successful candidate will design and build infrastructure that serves Claude across multiple cloud service providers (CSPs), accounting for differences in compute hardware, networking, APIs, and operational models.</p>\n<p>The ideal candidate will have significant software engineering experience, with a strong background in high-performance, large-scale distributed systems serving millions of users. They will also have experience building or operating services on at least one major cloud platform (AWS, GCP, or Azure), with exposure to Kubernetes, Infrastructure as Code or container orchestration.</p>\n<p>Responsibilities:</p>\n<ul>\n<li>Design and build infrastructure that serves Claude across multiple CSPs, accounting for differences in compute hardware, networking, APIs, and operational models</li>\n</ul>\n<ul>\n<li>Collaborate with CSP partner engineering teams to resolve operational issues, influence provider roadmaps, and stand up end-to-end serving on new cloud platforms</li>\n</ul>\n<ul>\n<li>Design and evolve CI/CD automation systems, including validation and deployment pipelines, that reliably ship new model versions to millions of users across cloud platforms without regressions</li>\n</ul>\n<ul>\n<li>Design interfaces and tooling abstractions across CSPs that enable cost-effective inference management, scale across providers, and reduce per-platform complexity</li>\n</ul>\n<ul>\n<li>Contribute to capacity planning and autoscaling strategies that dynamically match supply with demand across CSP validation and production workloads</li>\n</ul>\n<ul>\n<li>Optimise inference cost and performance across providers,designing workload placement and routing systems that direct requests to the most cost-effective accelerator and region</li>\n</ul>\n<ul>\n<li>Contribute to inference features that must work consistently across all platforms</li>\n</ul>\n<ul>\n<li>Analyse observability data across providers to identify performance bottlenecks, cost anomalies, and regressions, and drive remediation based on real-world production workloads</li>\n</ul>\n<p>Requirements:</p>\n<ul>\n<li>Significant software engineering experience, with a strong background in high-performance, large-scale distributed systems serving millions of users</li>\n</ul>\n<ul>\n<li>Experience building or operating services on at least one major cloud platform (AWS, GCP, or Azure), with exposure to Kubernetes, Infrastructure as Code or container orchestration</li>\n</ul>\n<ul>\n<li>Strong interest in inference</li>\n</ul>\n<ul>\n<li>Thrive in cross-functional collaboration with both internal teams and external partners</li>\n</ul>\n<ul>\n<li>Are a fast learner who can quickly ramp up on new technologies, hardware platforms, and provider ecosystems</li>\n</ul>\n<ul>\n<li>Are highly autonomous and self-driven, taking ownership of problems end-to-end with a bias toward flexibility and high-impact work</li>\n</ul>\n<ul>\n<li>Pick up slack, even when it goes outside your job description</li>\n</ul>\n<p>Preferred skills:</p>\n<ul>\n<li>Direct experience working with CSP partner teams to scale infrastructure or products across multiple platforms, navigating differences in networking, security, privacy, billing, and managed service offerings</li>\n</ul>\n<ul>\n<li>A background in building platform-agnostic tooling or abstraction layers that work across cloud providers</li>\n</ul>\n<ul>\n<li>Hands-on experience with capacity management, cost optimisation, or resource planning at scale across heterogeneous environments</li>\n</ul>\n<ul>\n<li>Strong familiarity with LLM inference optimisation, batching, caching, and serving strategies</li>\n</ul>\n<ul>\n<li>Experience with Machine learning infrastructure including GPUs, TPUs, Trainium, or other AI accelerators</li>\n</ul>\n<ul>\n<li>Background designing and building CI/CD systems that automate deployment and validation across cloud environments</li>\n</ul>\n<ul>\n<li>Solid understanding of multi-region deployments, geographic routing, and global traffic management</li>\n</ul>\n<ul>\n<li>Proficiency in Python or Rust</li>\n</ul>\n<p>Salary Range: $300,000-$485,000 USD</p>\n<p>Experience Level: Staff</p>\n<p>Employment Type: Full-time</p>\n<p>Workplace Type: Hybrid</p>\n<p>Category: Engineering</p>\n<p>Industry: Technology</p>\n<p>Required Skills:</p>\n<ul>\n<li>High-performance, large-scale distributed systems</li>\n</ul>\n<ul>\n<li>Cloud computing (AWS, GCP, Azure)</li>\n</ul>\n<ul>\n<li>Kubernetes</li>\n</ul>\n<ul>\n<li>Infrastructure as Code</li>\n</ul>\n<ul>\n<li>Container orchestration</li>\n</ul>\n<ul>\n<li>Inference</li>\n</ul>\n<ul>\n<li>Cross-functional collaboration</li>\n</ul>\n<ul>\n<li>Autonomy and self-driven</li>\n</ul>\n<ul>\n<li>Platform-agnostic tooling</li>\n</ul>\n<ul>\n<li>Capacity management</li>\n</ul>\n<ul>\n<li>Cost optimisation</li>\n</ul>\n<ul>\n<li>Resource planning</li>\n</ul>\n<ul>\n<li>LLM inference optimisation</li>\n</ul>\n<ul>\n<li>Machine learning infrastructure</li>\n</ul>\n<ul>\n<li>CI/CD systems</li>\n</ul>\n<ul>\n<li>Multi-region deployments</li>\n</ul>\n<ul>\n<li>Geographic routing</li>\n</ul>\n<ul>\n<li>Global traffic management</li>\n</ul>\n<ul>\n<li>Python</li>\n</ul>\n<ul>\n<li>Rust</li>\n</ul>\n<p>Preferred Skills:</p>\n<ul>\n<li>Direct experience working with CSP partner teams</li>\n</ul>\n<ul>\n<li>Building platform-agnostic tooling</li>\n</ul>\n<ul>\n<li>Hands-on experience with capacity management</li>\n</ul>\n<ul>\n<li>Strong familiarity with LLM inference optimisation</li>\n</ul>\n<ul>\n<li>Experience with Machine learning infrastructure</li>\n</ul>\n<ul>\n<li>Background designing and building CI/CD systems</li>\n</ul>\n<ul>\n<li>Solid understanding of multi-region deployments</li>\n</ul>\n<ul>\n<li>Proficiency in Python or Rust</li>\n</ul>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_d50772ab-afe","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Anthropic","sameAs":"https://www.anthropic.com/","logo":"https://logos.yubhub.co/anthropic.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/anthropic/jobs/5107466008","x-work-arrangement":"hybrid","x-experience-level":"staff","x-job-type":"full-time","x-salary-range":"$300,000-$485,000 USD","x-skills-required":["high-performance, large-scale distributed systems","cloud computing (AWS, GCP, Azure)","kubernetes","infrastructure as code","container orchestration","inference","cross-functional collaboration","autonomy and self-driven","platform-agnostic tooling","capacity management","cost optimisation","resource planning","llm inference optimisation","machine learning infrastructure","ci/cd systems","multi-region deployments","geographic routing","global traffic management","python","rust"],"x-skills-preferred":["direct experience working with csp partner teams","building platform-agnostic tooling","hands-on experience with capacity management","strong familiarity with llm inference optimisation","experience with machine learning infrastructure","background designing and building ci/cd systems","solid understanding of multi-region deployments","proficiency in python or rust"],"datePosted":"2026-04-18T15:53:24.048Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"San Francisco, CA | Seattle, WA"}},"employmentType":"FULL_TIME","occupationalCategory":"engineering","industry":"technology","skills":"high-performance, large-scale distributed systems, cloud computing (AWS, GCP, Azure), kubernetes, infrastructure as code, container orchestration, inference, cross-functional collaboration, autonomy and self-driven, platform-agnostic tooling, capacity management, cost optimisation, resource planning, llm inference optimisation, machine learning infrastructure, ci/cd systems, multi-region deployments, geographic routing, global traffic management, python, rust, direct experience working with csp partner teams, building platform-agnostic tooling, hands-on experience with capacity management, strong familiarity with llm inference optimisation, experience with machine learning infrastructure, background designing and building ci/cd systems, solid understanding of multi-region deployments, proficiency in python or rust","baseSalary":{"@type":"MonetaryAmount","currency":"USD","value":{"@type":"QuantitativeValue","minValue":300000,"maxValue":485000,"unitText":"YEAR"}}},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_74be15a1-bce"},"title":"Software Engineer, Inference Deployment","description":"<p>Our mandate is to make inference deployment boring and unattended. We serve Claude to millions of users across GPUs, TPUs, and Trainium , and every model update must reach production safely, quickly, and without disrupting service. As a Software Engineer on the Launch Engineering team, you&#39;ll design and build the deployment infrastructure that moves inference code from merge to production.</p>\n<p>This is a resource-constrained optimization problem at its core: validation and deployment consume the same accelerator chips that serve customer traffic , your deploys compete with live user requests for the same hardware. Every model brings different fleet sizes, startup times, and correctness requirements, so the system must adapt continuously. You&#39;ll build systems that navigate these constraints , orchestrating validation, scheduling deployments intelligently, and driving down cycle time from merge to production.</p>\n<p>Responsibilities:</p>\n<ul>\n<li>Own deployment orchestration that continuously moves validated inference builds into production across GPU, TPU, and Trainium fleets, unattended under normal conditions</li>\n</ul>\n<ul>\n<li>Improve capacity-aware deployment scheduling to maximize deployment throughput against constrained accelerator budgets and variable fleet sizes</li>\n</ul>\n<ul>\n<li>Extend deployment observability , dashboards and tooling that answer &quot;what code is running in production,&quot; &quot;where is my commit,&quot; and &quot;what validation passed for this deploy&quot;</li>\n</ul>\n<ul>\n<li>Drive down cycle time from code merge to production with pipeline architectures that minimize serial dependencies and maximize parallelism</li>\n</ul>\n<ul>\n<li>Optimize fleet rollout strategies for large-scale deployments across thousands of GPU, TPU, and Trainium chips, minimizing disruption to serving capacity</li>\n</ul>\n<ul>\n<li>Evolve self-service model onboarding so that new models can be added to the continuous deployment pipeline without Launch Engineering involvement</li>\n</ul>\n<ul>\n<li>Partner across the Inference organization with teams owning validation, autoscaling, and model routing to integrate deployment automation with their systems</li>\n</ul>\n<p>You May Be a Good Fit If You Have:</p>\n<ul>\n<li>5+ years of experience building deployment, release, or delivery infrastructure at scale</li>\n</ul>\n<ul>\n<li>Strong software engineering skills with experience designing systems that manage complex state machines and multi-stage pipelines</li>\n</ul>\n<ul>\n<li>Experience with deployment systems where resource constraints shape the design , whether that&#39;s fleet capacity, network bandwidth, hardware availability, or coordinated rollout windows</li>\n</ul>\n<ul>\n<li>A track record of building automation that measurably improves deployment velocity and reliability</li>\n</ul>\n<ul>\n<li>Proficiency with Kubernetes-based deployments, rolling update mechanics, and container orchestration</li>\n</ul>\n<ul>\n<li>Comfort working across the stack , from backend services and databases to CLI tools and web UIs</li>\n</ul>\n<ul>\n<li>Strong communication skills and the ability to work closely with oncall engineers, model teams, and infrastructure partners</li>\n</ul>\n<p>Strong Candidates May Also Have:</p>\n<ul>\n<li>Experience with ML inference or training infrastructure deployment, particularly across multiple accelerator types (GPU, TPU, Trainium)</li>\n</ul>\n<ul>\n<li>Background in capacity planning or resource-constrained scheduling (e.g., bin-packing, fleet management, job scheduling with hardware affinity)</li>\n</ul>\n<ul>\n<li>Experience with progressive delivery in systems with long validation cycles: canary/soak testing, blue-green deployments, traffic shifting, automated rollback</li>\n</ul>\n<ul>\n<li>Experience at companies with large-scale release engineering challenges (mobile release trains, monorepo deployments, multi-datacenter rollouts)</li>\n</ul>\n<ul>\n<li>Experience with Python and/or Rust in production systems</li>\n</ul>\n<p>The annual compensation range for this role is $320,000-$485,000 USD.</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_74be15a1-bce","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Anthropic","sameAs":"https://www.anthropic.com/","logo":"https://logos.yubhub.co/anthropic.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/anthropic/jobs/5111745008","x-work-arrangement":"hybrid","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":"$320,000-$485,000 USD","x-skills-required":["deployment infrastructure","software engineering","complex state machines","multi-stage pipelines","Kubernetes-based deployments","container orchestration","backend services","databases","CLI tools","web UIs"],"x-skills-preferred":["ML inference","training infrastructure deployment","capacity planning","resource-constrained scheduling"," deployments","progressive delivery","Python","Rust"],"datePosted":"2026-04-18T15:53:04.252Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"San Francisco, CA | New York City, NY | Seattle, WA"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"deployment infrastructure, software engineering, complex state machines, multi-stage pipelines, Kubernetes-based deployments, container orchestration, backend services, databases, CLI tools, web UIs, ML inference, training infrastructure deployment, capacity planning, resource-constrained scheduling,  deployments, progressive delivery, Python, Rust","baseSalary":{"@type":"MonetaryAmount","currency":"USD","value":{"@type":"QuantitativeValue","minValue":320000,"maxValue":485000,"unitText":"YEAR"}}},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_7df35551-a96"},"title":"Account Executive, Beneficial Deployments (French Speaking)","description":"<p>As an EMEA Nonprofit Account Executive at Anthropic, you&#39;ll drive adoption of safe, frontier AI by securing strategic partnerships with nonprofit organisations across Europe, the Middle East, and Africa.\\n\\nYou&#39;ll leverage your consultative sales expertise to propel revenue growth while becoming a trusted partner to nonprofit leaders, helping them embed and deploy AI to amplify their impact across programme delivery, fundraising, research, and operations.\\n\\nThe ideal candidate will be an exceptional salesperson with experience selling into EMEA markets , and specifically into French-speaking contexts , a passion for developing new market segments, and the ability to operate autonomously while partnering closely with SF-based teams.\\n\\nBy driving deployment of Anthropic&#39;s emerging products in the EMEA nonprofit sector, you will help organisations amplify their social impact while advancing the ethical development of AI.\\n\\nResponsibilities:\\n\\n- Win new business and drive revenue for Anthropic within EMEA nonprofit organisations, including INGOs, foundations, charitable trusts, and social enterprises. Own the full sales cycle from first outbound to launch, managing complex procurement processes across multiple jurisdictions\\n\\n- Design and execute innovative sales strategies tailored to EMEA market dynamics, regulatory environments, and cultural contexts. Analyse market landscapes across UK, EU, and emerging markets to translate high-level plans into targeted sales activities\\n\\n- Navigate complex stakeholder ecosystems including executive directors, trustees, programme officers, IT departments, and procurement committees across multiple geographies, building consensus in organisations with federated or matrix structures\\n\\n- Serve as the regional expert on EMEA nonprofit market dynamics, regulatory requirements, and competitive landscape. Provide insights that strengthen our value proposition and inform product roadmaps for international deployments\\n\\n- Build strategic relationships with EMEA nonprofit technology platforms, consultants, sector networks (e.g., Bond, NCVO, European Foundation Centre), and sector influencers to expand market reach\\n\\n- Partner effectively with SF-based teams across time zones, contributing to global sales methodology development while adapting playbooks and best practices for EMEA markets\\n\\nYou May Be a Good Fit If You Have:\\n\\n- 5+ years of experience prospecting and closing leads in EMEA markets, with particular focus on Francophone markets (France, Belgium, Switzerland, and Francophone Africa , Senegal, DRC, Côte d&#39;Ivoire, and others) and broader European market contexts\\n\\n- Proven ability to manage complex, multi-country sales cycles and navigate varying procurement frameworks, budget cycles, and approval processes across EMEA\\n\\n- Experience managing six-figure enterprise deal cycles\\n\\n- Experience selling to organisations with federated structures, matrix decision-making, or multi-entity governance (e.g., international federations, umbrella organisations)\\n\\n- Demonstrated history of exceeding quota while operating autonomously across time zones with limited direct supervision\\n\\n- Excellent communication skills with ability to adapt style across cultural contexts and present confidently to stakeholders from diverse backgrounds\\n\\n- Fluency in English required; native or professional fluency in French required. Proficiency in additional languages (Spanish, Arabic) a plus.\\n\\n- Passion for emerging technologies like AI, with interest in ensuring they are developed safely and responsibly\\n\\n- Interest in or passion for social impact and mission-driven work\\n\\nStrong Candidates May Also Have:\\n\\n- Experience selling to or working with EMEA nonprofit organisations, INGOs, foundations, or government/bilateral agencies (e.g., FCDO, GIZ, EU institutions)\\n\\n- Understanding of international development funding mechanisms, including institutional donors, bilateral agencies, and European foundation giving\\n\\n- Familiarity with nonprofit technology ecosystems popular in EMEA, including CRMs (Salesforce NPSP, Blackbaud, CiviCRM), and platforms like Raiser&#39;s Edge\\n\\n- Active involvement in the EMEA nonprofit community through board service, volunteering, or prior employment\\n\\n- Experience navigating complex procurement with major INGOs (e.g., Save the Children, Oxfam, MSF, IRC) or large UK charities\\n\\n- Understanding of specific nonprofit verticals in EMEA contexts (humanitarian, development, environment, health, migration)\\n\\n- Existing network within Francophone nonprofit, INGO, or social sector communities strongly preferred\\n\\nLogistics:\\n\\nLocation: London or Dublin preferred. \\n\\nTravel: Up to 40% travel within EMEA for customer meetings and events; quarterly travel to SF headquarters expected.\\n\\nEducation: Bachelor&#39;s degree or equivalent experience.\\n\\nVisa Sponsorship: We sponsor visas where possible and retain immigration support for successful candidates.\\n\\nThe annual compensation range for this role is listed below. \\n\\nFor sales roles, the range provided is the role’s On Target Earnings (&quot;OTE&quot;) range, meaning that the range includes both the sales commissions/sales bonuses target and annual base salary for the role.\\n\\nAnnual Salary:€205.000-€250.000 EUR</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_7df35551-a96","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Anthropic","sameAs":"https://www.anthropic.com/","logo":"https://logos.yubhub.co/anthropic.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/anthropic/jobs/5165641008","x-work-arrangement":"hybrid","x-experience-level":"mid","x-job-type":"full-time","x-salary-range":"€205.000-€250.000 EUR","x-skills-required":["Sales","EMEA markets","French-speaking contexts","AI","Nonprofit organisations","Strategic partnerships","Revenue growth","Complex procurement processes","Innovative sales strategies","Regulatory environments","Cultural contexts","Market landscapes","International deployments","Nonprofit technology platforms","Consultants","Sector networks","Sector influencers","Global sales methodology development","Adapting playbooks and best practices"],"x-skills-preferred":["Fluency in English","Native or professional fluency in French","Proficiency in additional languages (Spanish, Arabic)","Passion for emerging technologies","Interest in ensuring they are developed safely and responsibly","Interest in or passion for social impact and mission-driven work"],"datePosted":"2026-04-18T15:51:41.195Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Dublin, IE"}},"employmentType":"FULL_TIME","occupationalCategory":"Sales","industry":"Technology","skills":"Sales, EMEA markets, French-speaking contexts, AI, Nonprofit organisations, Strategic partnerships, Revenue growth, Complex procurement processes, Innovative sales strategies, Regulatory environments, Cultural contexts, Market landscapes, International deployments, Nonprofit technology platforms, Consultants, Sector networks, Sector influencers, Global sales methodology development, Adapting playbooks and best practices, Fluency in English, Native or professional fluency in French, Proficiency in additional languages (Spanish, Arabic), Passion for emerging technologies, Interest in ensuring they are developed safely and responsibly, Interest in or passion for social impact and mission-driven work"},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_3439b4ff-d42"},"title":"Engineering Manager, HADR","description":"<p>We&#39;re seeking an experienced Engineering Manager to join our High Availability and Disaster Recovery team. As a key member of our team, you will help develop our global architecture by combining less-available components and data centers into a highly available and resilient whole. You will work on latency-critical solutions where every millisecond matters and data redundancy is a hard requirement. Your work will enable Stripe to increase the GDP of the internet by providing uptime and data protection which have historically been impossible.</p>\n<p>Responsibilities:</p>\n<ul>\n<li>Lead and manage a team of talented engineers on the team, providing mentorship, guidance, and support to ensure their success.</li>\n<li>Drive the execution of projects, overseeing the entire development lifecycle from planning to delivery, while maintaining high standards of quality and timely completion.</li>\n<li>Help influence peers / managers and build consensus while dealing with ambiguity</li>\n<li>Build your team - formalizing role definitions, defining charter and ownership boundaries and taking a newly formed team into a high-functioning one</li>\n</ul>\n<p>Who you are: We&#39;re looking for someone who meets the minimum requirements to be considered for the role. If you meet these requirements, you are encouraged to apply. The preferred qualifications are a bonus, not a requirement.</p>\n<p>Minimum requirements:</p>\n<ul>\n<li>4+ years of software development experience</li>\n<li>2+ years of cloud development or management experience</li>\n<li>Professional working proficiency in English</li>\n</ul>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_3439b4ff-d42","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Stripe","sameAs":"https://stripe.com/","logo":"https://logos.yubhub.co/stripe.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/stripe/jobs/7657997","x-work-arrangement":"remote","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":null,"x-skills-required":["software development experience","cloud development or management experience","English language proficiency"],"x-skills-preferred":["distributed system concepts","high-availability systems","chaos engineering","disaster recovery design","cloud infrastructure","multi-region deployments","document databases","MongoDB"],"datePosted":"2026-04-18T15:51:24.845Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"US Remote"}},"jobLocationType":"TELECOMMUTE","employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"software development experience, cloud development or management experience, English language proficiency, distributed system concepts, high-availability systems, chaos engineering, disaster recovery design, cloud infrastructure, multi-region deployments, document databases, MongoDB"},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_e394b0fa-2ba"},"title":"Staff Software Engineer, Inference","description":"<p><strong>About the role</strong></p>\n<p>Our Inference team is responsible for building and maintaining the critical systems that serve Claude to millions of users worldwide. We bring Claude to life by serving our models via the industry&#39;s largest compute-agnostic inference deployments. We are responsible for the entire stack from intelligent request routing to fleet-wide orchestration across diverse AI accelerators.</p>\n<p>As a Staff Software Engineer on our Inference team, you will work end to end, identifying and addressing key infrastructure blockers to serve Claude to millions of users while enabling breakthrough AI research. Strong candidates should have familiarity with performance optimization, distributed systems, large-scale service orchestration, and intelligent request routing. Familiarity with LLM inference optimization, batching strategies, and multi-accelerator deployments is highly encouraged but not strictly necessary.</p>\n<p><strong>Strong candidates may also have experience with</strong></p>\n<ul>\n<li>High-performance, large-scale distributed systems</li>\n<li>Implementing and deploying machine learning systems at scale</li>\n<li>Load balancing, request routing, or traffic management systems</li>\n<li>LLM inference optimization, batching, and caching strategies</li>\n<li>Kubernetes and cloud infrastructure (AWS, GCP)</li>\n<li>Python or Rust</li>\n</ul>\n<p><strong>You may be a good fit if you</strong></p>\n<ul>\n<li>Have significant software engineering experience, particularly with distributed systems</li>\n<li>Are results-oriented, with a bias towards flexibility and impact</li>\n<li>Pick up slack, even if it goes outside your job description</li>\n<li>Want to learn more about machine learning systems and infrastructure</li>\n<li>Thrive in environments where technical excellence directly drives both business results and research breakthroughs</li>\n<li>Care about the societal impacts of your work</li>\n</ul>\n<p><strong>Representative projects across the org</strong></p>\n<ul>\n<li>Designing intelligent routing algorithms that optimize request distribution across thousands of accelerators</li>\n<li>Autoscaling our compute fleet to dynamically match supply with demand across production, research, and experimental workloads</li>\n<li>Building production-grade deployment pipelines for releasing new models to millions of users</li>\n<li>Integrating new AI accelerator platforms to maintain our hardware-agnostic competitive advantage</li>\n<li>Contributing to new inference features (e.g., structured sampling, prompt caching)</li>\n<li>Supporting inference for new model architectures</li>\n<li>Analyzing observability data to tune performance based on real-world production workloads</li>\n<li>Managing multi-region deployments and geographic routing for global customers</li>\n</ul>\n<p><strong>Deadline to apply</strong></p>\n<p>None. Applications will be reviewed on a rolling basis.</p>\n<p><strong>Annual compensation range</strong></p>\n<p>The annual compensation range for this role is £325,000-£390,000 GBP.</p>\n<p><strong>Logistics</strong></p>\n<ul>\n<li>Minimum education: Bachelor’s degree or an equivalent combination of education, training, and/or experience</li>\n<li>Required field of study: A field relevant to the role as demonstrated through coursework, training, or professional experience</li>\n<li>Minimum years of experience: Years of experience required will correlate with the internal job level requirements for the position</li>\n<li>Location-based hybrid policy: Currently, we expect all staff to be in one of our offices at least 25% of the time. However, some roles may require more time in our offices.</li>\n<li>Visa sponsorship: We do sponsor visas! However, we aren&#39;t able to successfully sponsor visas for every role and every candidate. But if we make you an offer, we will make every reasonable effort to get you a visa, and we retain an immigration lawyer to help with this.</li>\n</ul>\n<p><strong>Why work with us?</strong></p>\n<p>We believe that the highest-impact AI research will be big science. At Anthropic we work as a single cohesive team on just a few large-scale research efforts. And we value impact , advancing our long-term goals of steerable, trustworthy AI , rather than work on smaller and more specific puzzles. We view AI research as an empirical science, which has as much in common with physics and biology as with traditional efforts in computer science. We&#39;re an extremely collaborative group, and we host frequent research discussions to ensure that we are pursuing the highest-impact work at any given time. As such, we greatly value communication skills.</p>\n<p>The easiest way to understand our research directions is to read our recent research. This research continues many of the directions our team worked on prior to Anthropic, including: GPT-3, Circuit-Based Interpretability, Multimodal Neurons, Scaling Laws, AI &amp; Compute, Concrete Problems in AI Safety, and Learning from Human Preferences.</p>\n<p><strong>Come work with us!</strong></p>\n<p>Anthropic is a public benefit corporation headquartered in San Francisco. We offer competitive compensation and benefits, optional equity donation matching, generous vacation and parental leave, flexible working hours, and a lovely office space in which to collaborate with colleagues.</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_e394b0fa-2ba","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Anthropic","sameAs":"https://www.anthropic.com/","logo":"https://logos.yubhub.co/anthropic.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/anthropic/jobs/5097742008","x-work-arrangement":"hybrid","x-experience-level":"staff","x-job-type":"full-time","x-salary-range":"£325,000-£390,000 GBP","x-skills-required":["performance optimization","distributed systems","large-scale service orchestration","intelligent request routing","LLM inference optimization","batching strategies","multi-accelerator deployments","Kubernetes","cloud infrastructure","Python","Rust"],"x-skills-preferred":["high-performance distributed systems","machine learning systems","load balancing","request routing","traffic management","caching strategies"],"datePosted":"2026-04-18T15:50:52.588Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"London, UK"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"performance optimization, distributed systems, large-scale service orchestration, intelligent request routing, LLM inference optimization, batching strategies, multi-accelerator deployments, Kubernetes, cloud infrastructure, Python, Rust, high-performance distributed systems, machine learning systems, load balancing, request routing, traffic management, caching strategies","baseSalary":{"@type":"MonetaryAmount","currency":"GBP","value":{"@type":"QuantitativeValue","minValue":325000,"maxValue":390000,"unitText":"YEAR"}}},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_a7d0cf0f-a3a"},"title":"Senior Engineer- Data Platforms","description":"<p>The Data Platform Team serves as the experts on managing data infrastructure for CoreWeave. Our data infrastructure includes managed databases, data ingestion, data flow, data lakes, and other data retrieval for CoreWeave and its customers.</p>\n<p>We are seeking senior software engineers with specialization in database and stream processing who can help us fulfill the goal of our global datastore strategy and establish communication models for our data flow. This individual will work with a team of mixed skilled engineers and have the opportunity to work on the full range of rewarding challenges that come with the business of building a cloud in a communicative, supportive, and high-performing environment.</p>\n<p>As a member of the Data Platform Team you will have the opportunity to:</p>\n<ul>\n<li>Design and implement the platform to deliver data to teams with a focus on providing managed solutions through APIs</li>\n<li>Participate in operations and scaling of relational data platforms</li>\n<li>Develop a stream processing architecture and solve for scalability and reliability</li>\n<li>Improve the performance, security, reliability, and scalability of our data platforms and related services, and participate in the team’s on-call rotation</li>\n<li>Establish guidelines and guard rails for data access and storage for stakeholder teams</li>\n<li>Ensure compliance with standards for data protection regulation</li>\n<li>Grow, change, invest in your teammates, be invested-in, share your ideas, listen to others, be curious, have fun, and, above all, be yourself</li>\n</ul>\n<p>The ideal candidate will have 5+ years of experience in a software or infrastructure engineering industry, with experience operating services in production and at scale and familiarity with reliability engineering concepts such as different types of testing, progressive deployments, error budgets, observability, and fault-tolerant design.</p>\n<p>The base salary range for this role is $175,000 to $210,000. The starting salary will be determined based on job-related knowledge, skills, experience, and market location.</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_a7d0cf0f-a3a","directApply":true,"hiringOrganization":{"@type":"Organization","name":"CoreWeave","sameAs":"https://www.coreweave.com","logo":"https://logos.yubhub.co/coreweave.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/coreweave/jobs/4562276006","x-work-arrangement":"hybrid","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":"$175,000 to $210,000","x-skills-required":["database and stream processing","managed databases","data ingestion","data flow","data lakes","APIs","operational experience","reliability engineering","testing","progressive deployments","error budgets","observability","fault-tolerant design"],"x-skills-preferred":["Kubernetes","Go","Linux distributions","shell scripting","Linux storage and networking stacks"],"datePosted":"2026-04-18T15:50:18.835Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Bellevue, WA / Sunnyvale, CA"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"database and stream processing, managed databases, data ingestion, data flow, data lakes, APIs, operational experience, reliability engineering, testing, progressive deployments, error budgets, observability, fault-tolerant design, Kubernetes, Go, Linux distributions, shell scripting, Linux storage and networking stacks","baseSalary":{"@type":"MonetaryAmount","currency":"USD","value":{"@type":"QuantitativeValue","minValue":175000,"maxValue":210000,"unitText":"YEAR"}}},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_a8092b6e-7f5"},"title":"Bare Metal Support Engineer","description":"<p>As a Bare Metal Support Engineer at CoreWeave, you will be responsible for supporting, operating, and maintaining CoreWeave&#39;s extensive GPU fleet across our growing data centers in the U.S., Europe, and beyond.</p>\n<p>You will work closely with customers, data center technicians, and engineering teams to ensure the reliability, performance, and scalability of our infrastructure.</p>\n<p>Key responsibilities include:</p>\n<ul>\n<li>Providing high-level support for customers utilizing bare-metal GPU fleets on CoreWeave Cloud.</li>\n<li>Diagnosing, triaging, and investigating reported customer issues and high-priority incidents, identifying root causes and escalating when necessary.</li>\n<li>Developing a deep understanding of customer workloads and use cases to provide tailored technical support.</li>\n<li>Coordinating remote troubleshooting and hardware interventions with Data Center Technicians.</li>\n<li>Creating and maintaining internal documentation, including troubleshooting guides, best practices, and knowledge base articles.</li>\n<li>Participating in an on-call rotation to support production clusters and ensure operational reliability.</li>\n<li>Collaborating with engineering teams to improve hardware reliability, software stability, and system performance.</li>\n<li>Implementing automation and scripting to streamline support workflows and reduce manual interventions.</li>\n<li>Performing in-depth log analysis and debugging across multiple layers of the stack (firmware, drivers, hardware).</li>\n<li>Providing feedback to internal teams on common support issues to drive continuous improvements.</li>\n<li>Working with networking teams to troubleshoot connectivity issues affecting customer workloads.</li>\n<li>Supporting supercomputing infrastructure running GPU workloads at scale.</li>\n<li>Driving operational excellence by refining internal processes and support methodologies.</li>\n</ul>\n<p>To succeed in this role, you will need:</p>\n<ul>\n<li>Experience in data centers, GPU clusters, server deployments, system administration, or hardware troubleshooting.</li>\n<li>Demonstrated experience driving resolutions and continuous improvements across cross-functional environments and teams within a data center environment.</li>\n<li>Intermediate knowledge of Linux (Ubuntu, CentOS, or similar), including command-line proficiency.</li>\n<li>Experience with NVIDIA GPUs, SuperMicro systems, Dell systems, high-performance computing (HPC), and large-scale data center environments.</li>\n<li>Experience in networking fundamentals (TCP/IP, VLANs, DNS, DHCP) and troubleshooting tools.</li>\n<li>Hands-on experience with firmware updates, BIOS configurations, and driver management.</li>\n<li>Experience analyzing system logs and debugging issues across firmware, drivers, and hardware layers.</li>\n<li>Experience working with Jira, Confluence, Notion, or other issue-tracking and documentation platforms.</li>\n<li>Experience in scripting and automation (Python, Bash, Ansible, or similar).</li>\n</ul>\n<p>If you&#39;re a curious and analytical individual with a passion for problem-solving and a desire to work in a fast-paced environment, we&#39;d love to hear from you!</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_a8092b6e-7f5","directApply":true,"hiringOrganization":{"@type":"Organization","name":"CoreWeave","sameAs":"https://www.coreweave.com","logo":"https://logos.yubhub.co/coreweave.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/coreweave/jobs/4560350006","x-work-arrangement":"hybrid","x-experience-level":"mid","x-job-type":"full-time","x-salary-range":"$83,000 to $132,000","x-skills-required":["Linux","GPU clusters","server deployments","system administration","hardware troubleshooting","NVIDIA GPUs","SuperMicro systems","Dell systems","high-performance computing","large-scale data center environments","networking fundamentals","troubleshooting tools","firmware updates","BIOS configurations","driver management","system logs","debugging issues","Jira","Confluence","Notion","issue-tracking","documentation platforms","scripting","automation"],"x-skills-preferred":["Kubernetes","Docker","containerized infrastructure"],"datePosted":"2026-04-18T15:49:58.535Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Livingston, NJ / New York, NY / Sunnyvale, CA / Bellevue, WA"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"Linux, GPU clusters, server deployments, system administration, hardware troubleshooting, NVIDIA GPUs, SuperMicro systems, Dell systems, high-performance computing, large-scale data center environments, networking fundamentals, troubleshooting tools, firmware updates, BIOS configurations, driver management, system logs, debugging issues, Jira, Confluence, Notion, issue-tracking, documentation platforms, scripting, automation, Kubernetes, Docker, containerized infrastructure","baseSalary":{"@type":"MonetaryAmount","currency":"USD","value":{"@type":"QuantitativeValue","minValue":83000,"maxValue":132000,"unitText":"YEAR"}}},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_ce541b1a-167"},"title":"Senior Technical Account Manager - Auth0","description":"<p>Secure Every Identity</p>\n<p>Okta secures AI by building the trusted, neutral infrastructure that enables organisations to safely embrace this new era.</p>\n<p>We are looking for builders and owners who operate with speed and urgency and execute with excellence. This is an opportunity to do career-defining work.</p>\n<p><strong>The Team</strong></p>\n<p>Technical Account Management (TAM) is a global team that owns Auth0 customer success within Okta’s broader Customer Success team. We collaborate with Auth0’s customers to share knowledge, and best practices and make recommendations to continuously innovate around identity and security.</p>\n<p>As our customer’s strategic identity coaches, we are Auth0 product experts, and we enable Auth0&#39;s worldwide growth by educating existing customers and ensuring they are happy and successful.</p>\n<p>We share our technical and product expertise with customers through presentations, demonstrations, technical evaluations, and ongoing recommendations on Auth0 and industry best practices.</p>\n<p><strong>The Opportunity</strong></p>\n<p>A TAM specializing in enterprise identity, including the Auth0 product and adjacent technologies. The TAM will provide Okta’s customers with strategic technical guidance over the comprehensive suite of products and features available at Okta.</p>\n<p>They are held in high regard as a technical expert for how Okta’s solutions translate to business value. They are also held in high regard for their ability to understand the code that makes up identity authentication pipelines, Auth0, after all, is developer-friendly.</p>\n<p>The TAM specialization calls for an understanding of hybrid scenarios that capitalize on Auth0’s ability to manage authentication, authorization, and lifecycle management capabilities for consumer SaaS, business-to-consumer (B2C), and general CIAM applications.</p>\n<p>The opportunity is that as an Auth0 TAM you will get to guide some of the world&#39;s largest companies in their strategic identity journey at the same time as being an Auth0 champion!</p>\n<p><strong>What you’ll be doing</strong></p>\n<p>Fully own the account management function as an Auth0 TAM. This includes the business and the technical side</p>\n<p>Advise customers on best practices and product adoption in a post-sales capacity</p>\n<p>Be comfortable with a number of personas including but not limited to CISO, Product Owner, CMO, developers, etc., with an account portfolio of strategic accounts</p>\n<p>Have a deep interest in the security space and where the industry is headed particularly from a CIAM perspective.</p>\n<p>Earn customer trust by understanding their goals and use cases, and recommend best practices relating to process changes, product adoption, configuration, and additional features to meet requirements</p>\n<p>Maintain focus on increasing subscription adoption, customer satisfaction, and retention</p>\n<p>Review customer architectures and Auth0 configurations to ensure they are enhancing security posture and capturing ROI as Auth0 releases new features and functionality</p>\n<p>Establish strong personal relationships on key accounts with decision-makers and stakeholders</p>\n<p>Establish strong relationships internally, too as part of a larger collaborative team</p>\n<p>Participate in content creation for both internal and external enablement of staff and customers</p>\n<p><strong>What you’ll bring to the role</strong></p>\n<p>7+ years of total experience in information technology, with at least 3 years of hands-on experience as a Technical Account Manager (TAM) or comparable practitioner role in the IAM space</p>\n<p>Working proficiency in the following core IAM areas:</p>\n<p>Technologies and protocols to support identity federation and robust access control models, including concepts such as SAML 2.0, WS-Federation, OAuth, OpenID Connect, etc.</p>\n<p>Legacy applications in a hybrid IT environment with non-standard applications (i.e. those that do not support modern identity federation protocols)</p>\n<p>Enterprise applications in the ecosystem to provide identity and attributes to applications or to harness an external application to help drive business processes (ITSM, HR, etc)</p>\n<p>Consumer and/or SaaS application deployments</p>\n<p>Security and performance monitoring, and 3rd party signals integrations (SEIM, MDM, WAF, etc)</p>\n<p>Familiarity with IAM solution providers is strongly desired.</p>\n<p>Strong background in any of the following: Technical Account Management, Technical Consulting, Product Management, Solution Architect, or a similar role</p>\n<p>Understanding of common software development practices, including concepts such as SDLC, CI/CD, Containerization, etc.</p>\n<p>Ability to code in Javascript</p>\n<p>Understanding of identity and surrounding technologies, including concepts such as encryption, PKI, RSA, etc.</p>\n<p>Strong business acumen, history of success owning enterprise segment customer relationships and escalations</p>\n<p>Excellent communication skills. Ability to set expectations and communicate goals and objectives with customers at various levels, from a developer to a CISO</p>\n<p>Ability to track and influence customer behavior and health metrics across a portfolio of accounts</p>\n<p>This position will be located in London or Barcelona and will have some travel required (under 50% of the time)</p>\n<p>BA/BS/MS or related discipline or equivalent work experience required</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_ce541b1a-167","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Okta","sameAs":"https://www.okta.com/","logo":"https://logos.yubhub.co/okta.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/okta/jobs/7614965","x-work-arrangement":"hybrid","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":"£104,000-£143,000 GBP","x-skills-required":["SAML 2.0","WS-Federation","OAuth","OpenID Connect","Legacy applications","Enterprise applications","Consumer and/or SaaS application deployments","Security and performance monitoring","3rd party signals integrations","IAM solution providers","Technical Account Management","Technical Consulting","Product Management","Solution Architect","SDLC","CI/CD","Containerization","Javascript","Encryption","PKI","RSA","Business acumen","Communication skills"],"x-skills-preferred":[],"datePosted":"2026-04-18T15:49:55.724Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"London, United Kingdom"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"SAML 2.0, WS-Federation, OAuth, OpenID Connect, Legacy applications, Enterprise applications, Consumer and/or SaaS application deployments, Security and performance monitoring, 3rd party signals integrations, IAM solution providers, Technical Account Management, Technical Consulting, Product Management, Solution Architect, SDLC, CI/CD, Containerization, Javascript, Encryption, PKI, RSA, Business acumen, Communication skills","baseSalary":{"@type":"MonetaryAmount","currency":"GBP","value":{"@type":"QuantitativeValue","minValue":104000,"maxValue":143000,"unitText":"YEAR"}}},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_85f1f87e-70f"},"title":"Resident Solutions Architect - Financial Services","description":"<p>As a Senior Big Data Solutions Architect (Sr Resident Solutions Architect) in our Professional Services team, you will work with clients on short to medium term customer engagements on their big data challenges using the Databricks platform.</p>\n<p>You will provide data engineering, data science, and cloud technology projects which require integrating with client systems, training, and other technical tasks to help customers to get most value out of their data.</p>\n<p>RSAs are billable and know how to complete projects according to specification with excellent customer service.</p>\n<p>You will report to the regional Manager/Lead.</p>\n<p>The impact you will have:</p>\n<ul>\n<li>You will work on a variety of impactful customer technical projects which may include designing and building reference architectures, creating how-to&#39;s and productionalizing customer use cases</li>\n</ul>\n<ul>\n<li>Work with engagement managers to scope variety of professional services work with input from the customer</li>\n</ul>\n<ul>\n<li>Guide strategic customers as they implement transformational big data projects, 3rd party migrations, including end-to-end design, build and deployment of industry-leading big data and AI applications</li>\n</ul>\n<ul>\n<li>Consult on architecture and design; bootstrap hands-on projects which leads to a customers&#39; successful understanding, evaluation and adoption of Databricks.</li>\n</ul>\n<ul>\n<li>Provide an escalated level of support for customer operational issues.</li>\n</ul>\n<ul>\n<li>You will work with the Databricks technical team, Project Manager, Architect and Customer team to ensure the technical components of the engagement are delivered to meet customer&#39;s needs.</li>\n</ul>\n<ul>\n<li>Work with Engineering and Databricks Customer Support to provide product and implementation feedback and to guide rapid resolution for engagement specific product and support issues.</li>\n</ul>\n<p>What we look for:</p>\n<ul>\n<li>9+ years experience in data engineering, data platforms &amp; analytics</li>\n</ul>\n<ul>\n<li>Comfortable writing code in either Python or Scala</li>\n</ul>\n<ul>\n<li>Working knowledge of two or more common Cloud ecosystems (AWS, Azure, GCP) with expertise in at least one</li>\n</ul>\n<ul>\n<li>Deep experience with distributed computing with Apache Spark™ and knowledge of Apache Spark™ runtime internals</li>\n</ul>\n<ul>\n<li>Familiarity with CI/CD for production deployments</li>\n</ul>\n<ul>\n<li>Working knowledge of MLOps</li>\n</ul>\n<ul>\n<li>Capable of design and deployment of highly performant end-to-end data architectures</li>\n</ul>\n<ul>\n<li>Experience with technical project delivery - managing scope and timelines.</li>\n</ul>\n<ul>\n<li>Documentation and white-boarding skills.</li>\n</ul>\n<ul>\n<li>Experience working with clients and managing conflicts.</li>\n</ul>\n<ul>\n<li>Experience in building scalable streaming and batch solutions using cloud-native components</li>\n</ul>\n<ul>\n<li>Travel to customers up to 20% of the time</li>\n</ul>\n<p>Nice to have: Databricks Certification</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_85f1f87e-70f","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Databricks","sameAs":"https://databricks.com","logo":"https://logos.yubhub.co/databricks.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/databricks/jobs/8461327002","x-work-arrangement":"hybrid","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":"$180,656-$248,360 USD","x-skills-required":["data engineering","data platforms & analytics","Python","Scala","Cloud ecosystems (AWS, Azure, GCP)","Apache Spark","CI/CD for production deployments","MLOps","end-to-end data architectures","technical project delivery","documentation and white-boarding skills","client management"],"x-skills-preferred":["Databricks Certification"],"datePosted":"2026-04-18T15:49:55.028Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Austin, Texas"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"data engineering, data platforms & analytics, Python, Scala, Cloud ecosystems (AWS, Azure, GCP), Apache Spark, CI/CD for production deployments, MLOps, end-to-end data architectures, technical project delivery, documentation and white-boarding skills, client management, Databricks Certification","baseSalary":{"@type":"MonetaryAmount","currency":"USD","value":{"@type":"QuantitativeValue","minValue":180656,"maxValue":248360,"unitText":"YEAR"}}},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_0fceb87c-57b"},"title":"PLM Development Manager","description":"<p>We are seeking a PLM Development Manager to lead our team developing Teamcenter customizations and integrations. As the PLM Development Manager, you will define the vision and strategy for PLM across the enterprise, set goals, policies, and processes that guide how PLM customization and integration work gets done company-wide.</p>\n<p>Key responsibilities include:</p>\n<p>Defining the vision and strategy for PLM across the enterprise Setting goals, policies, and processes that guide how PLM customization and integration work gets done company-wide Managing a department of developers, solution engineers, and technical leads Building and scaling high-performing teams Overseeing workforce planning and budget allocation across multiple programs Creating career frameworks, technical standards, and engineering practices Driving architectural decisions that impact PLM infrastructure, integrations, and scalability for the entire organization Partnering with senior leadership across Engineering, Operations, IT, and Product to align PLM strategy with company priorities Communicating technical strategies to C-suite audiences Overseeing multi-million dollar budgets for PLM licensing, infrastructure, and team operations Ensuring business continuity and operational excellence</p>\n<p>The ideal candidate will have 8+ years of engineering experience, with at least 4+ years in people management, including 2+ years managing managers or leading multiple teams. They will also have expertise in Enterprise PLMs, including architecture, customization, and integrations, as well as broad understanding of enterprise systems landscape.</p>\n<p>In addition to the required qualifications, preferred qualifications include experience in defense, aerospace, or highly regulated manufacturing environments at scale, background implementing Teamcenter at a company during hypergrowth, prior experience in startup or high-growth technology companies, and track record of successful M&amp;A integrations or large-scale system migration.</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_0fceb87c-57b","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Anduril Industries","sameAs":"https://www.anduril.com/","logo":"https://logos.yubhub.co/anduril.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/andurilindustries/jobs/5067990007","x-work-arrangement":"onsite","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":"$166,000-$220,000 USD","x-skills-required":["Enterprise PLMs","Teamcenter","Architecture","Customization","Integrations","Cloud infrastructure","High-availability systems","Enterprise-scale deployments","People management","Leadership","Communication","Budget management"],"x-skills-preferred":["Defense","Aerospace","Regulated manufacturing","Teamcenter implementation","Startup growth","M&A integrations","System migration"],"datePosted":"2026-04-18T15:48:46.606Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Costa Mesa, California, United States"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"Enterprise PLMs, Teamcenter, Architecture, Customization, Integrations, Cloud infrastructure, High-availability systems, Enterprise-scale deployments, People management, Leadership, Communication, Budget management, Defense, Aerospace, Regulated manufacturing, Teamcenter implementation, Startup growth, M&A integrations, System migration","baseSalary":{"@type":"MonetaryAmount","currency":"USD","value":{"@type":"QuantitativeValue","minValue":166000,"maxValue":220000,"unitText":"YEAR"}}},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_b36d00b1-459"},"title":"Staff Database Reliability Engineer (DBRE), Mysql, Federal","description":"<p>We are seeking a Staff Database Reliability Engineer (DBRE) to join our team. As a DBRE, you will have ownership of all technical aspects of our data services tier from ground up. You will partner with our core product engineers, performance engineers, site reliability engineers, and growing DBRE team, working on scaling, securing, and tuning our infrastructure be it self-managed MySQL, RDS Aurora MySQL/PostgreSQL or CloudSQL MySQL/PostgreSQL.  Our team is committed to two Okta Engineering mantras &quot;Always On&quot; and &quot;No Mysteries&quot;. You will ensure effective performance and 24X7 availability of the production database tier, design, implement and document operational processes, tasks, and configuration management. You will also coordinate efforts towards performance tuning, scaling and benchmarking the data services infrastructure.  You will contribute to configuration management using chef and infrastructure as code using terraform. You will conduct thorough performance analysis and tuning to meet application SLAs, optimizing database schema, indexes, and SQL queries. Quickly troubleshoot and resolve database performance issues.  Required Skills:  <em> Proven experience as a MySQL DBRE </em> In-depth knowledge of MySQL internals, performance tuning, and query optimization <em> Experience in database design, implementation, and maintenance in a high-availability environment </em> Strong proficiency in SQL and familiarity with scripting <em> Familiarity with database monitoring tools (e.g, Grafana) </em> Solid understanding of database security practices and compliance requirements <em> Ability to troubleshoot and resolve database performance issues and outages promptly </em> Excellent communication skills and ability to work effectively in a team environment <em> Bachelor’s degree in Computer Science, Engineering, or a related field (or equivalent work experience)  Preferred Skills:  </em> AWS Certified Database - Specialty or related certifications demonstrating proficiency in AWS database services and cloud infrastructure management <em> Familiarity or hands-on experience with PostgreSQL or other relational database management systems (RDBMS), understanding their differences and implications for database management </em> Understanding of containerization technologies such as Docker and Kubernetes and their impact on database deployments and scalability <em> Proficient in a Linux environment, including Linux internals and tuning </em> Proven track record of applying innovative solutions to complex database challenges and a strong problem-solving mindset in a dynamic operational environment  This position requires the ability to access federal environments and/or have access to protected federal data. As a condition of employment for this position, the successful candidate must be able to submit documentation establishing U.S. Person status (e.g. a U.S. Citizen, National, Lawful Permanent Resident, Refugee, or Asylee. 22 CFR 120.15) upon hire. Requires in-person onboarding and travel to our San Francisco, CA HQ office or our Chicago office during the first week of employment.</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_b36d00b1-459","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Okta","sameAs":"https://www.okta.com/","logo":"https://logos.yubhub.co/okta.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/okta/jobs/7670281","x-work-arrangement":"hybrid","x-experience-level":"staff","x-job-type":"full-time","x-salary-range":"$162,000-$244,000 USD","x-skills-required":["Proven experience as a MySQL DBRE","In-depth knowledge of MySQL internals, performance tuning, and query optimization","Experience in database design, implementation, and maintenance in a high-availability environment","Strong proficiency in SQL and familiarity with scripting","Familiarity with database monitoring tools (e.g, Grafana)","Solid understanding of database security practices and compliance requirements","Ability to troubleshoot and resolve database performance issues and outages promptly","Excellent communication skills and ability to work effectively in a team environment","Bachelor’s degree in Computer Science, Engineering, or a related field (or equivalent work experience)"],"x-skills-preferred":["AWS Certified Database - Specialty or related certifications demonstrating proficiency in AWS database services and cloud infrastructure management","Familiarity or hands-on experience with PostgreSQL or other relational database management systems (RDBMS), understanding their differences and implications for database management","Understanding of containerization technologies such as Docker and Kubernetes and their impact on database deployments and scalability","Proficient in a Linux environment, including Linux internals and tuning","Proven track record of applying innovative solutions to complex database challenges and a strong problem-solving mindset in a dynamic operational environment"],"datePosted":"2026-04-18T15:48:29.544Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Bellevue, Washington; New York, New York; San Francisco, California; Washington, DC"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"Proven experience as a MySQL DBRE, In-depth knowledge of MySQL internals, performance tuning, and query optimization, Experience in database design, implementation, and maintenance in a high-availability environment, Strong proficiency in SQL and familiarity with scripting, Familiarity with database monitoring tools (e.g, Grafana), Solid understanding of database security practices and compliance requirements, Ability to troubleshoot and resolve database performance issues and outages promptly, Excellent communication skills and ability to work effectively in a team environment, Bachelor’s degree in Computer Science, Engineering, or a related field (or equivalent work experience), AWS Certified Database - Specialty or related certifications demonstrating proficiency in AWS database services and cloud infrastructure management, Familiarity or hands-on experience with PostgreSQL or other relational database management systems (RDBMS), understanding their differences and implications for database management, Understanding of containerization technologies such as Docker and Kubernetes and their impact on database deployments and scalability, Proficient in a Linux environment, including Linux internals and tuning, Proven track record of applying innovative solutions to complex database challenges and a strong problem-solving mindset in a dynamic operational environment","baseSalary":{"@type":"MonetaryAmount","currency":"USD","value":{"@type":"QuantitativeValue","minValue":162000,"maxValue":244000,"unitText":"YEAR"}}},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_66cf66eb-76e"},"title":"Senior Machine Learning Systems Engineer","description":"<p>As a Senior Machine Learning Systems Engineer at Reddit, you will lead the development of a platform for large-scale ML models. Your primary responsibilities will include designing end-to-end model lifecycle patterns (MLOps) to boost velocity of development for ML engineers, zero-to-one development and support of a graph ML codebase and platform, collaborating with ML engineers on performance tuning, optimizing batch data processing, and architecting pipelines to build and maintain massive graph data structures.</p>\n<p>To be successful in this role, you will need 5+ years of experience in ML infrastructure, including model training and model deployments, hands-on experience with ML optimization, deep experience with cloud-based technologies, and proficiency with common programming languages and frameworks of ML. You should also have strong organizational and communication skills, experience working with graph databases and graph neural networks, and a deep understanding of the machine learning development lifecycle.</p>\n<p>In addition to base salary, this job is eligible to receive equity in the form of restricted stock units, and depending on the position offered, it may also be eligible to receive a commission. Reddit offers a wide range of benefits to U.S.-based employees, including medical, dental, and vision insurance, 401(k) program with employer match, generous time off for vacation, and parental leave.</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_66cf66eb-76e","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Reddit Inc.","sameAs":"https://www.redditinc.com","logo":"https://logos.yubhub.co/redditinc.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/reddit/jobs/7731772","x-work-arrangement":"remote","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":"$216,700-$303,400 USD","x-skills-required":["ML infrastructure","model training","model deployments","ML optimization","cloud-based technologies","graph databases","graph neural networks","common programming languages","frameworks of ML"],"x-skills-preferred":[],"datePosted":"2026-04-18T15:47:48.295Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Remote - United States"}},"jobLocationType":"TELECOMMUTE","employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"ML infrastructure, model training, model deployments, ML optimization, cloud-based technologies, graph databases, graph neural networks, common programming languages, frameworks of ML","baseSalary":{"@type":"MonetaryAmount","currency":"USD","value":{"@type":"QuantitativeValue","minValue":216700,"maxValue":303400,"unitText":"YEAR"}}},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_f35f0a65-82b"},"title":"Staff Software Engineer - Continuous Integration, Developer Experience","description":"<p>We&#39;re looking for a Staff Software Engineer to join our Build and Code Platform team. As a Staff Software Engineer, you will be directly responsible for cultivating the developer experience, from initial code creation to the final built artifact. You&#39;ll draw on your technical expertise and leadership skills to ensure our developer tooling is smooth, easy to use, and provides maximum value to an ever-growing engineering organization.</p>\n<p>Responsibilities: Steer: Work with the team to select, scope, and drive high-leverage projects that accelerate development to help Reddit achieve its goals. Build: Execute on a strategy to create a developer experience that reduces toil and provides faster and higher-quality feedback around all parts of the SDLC, including source control, builds, testing, code review, code integration, knowledge search, and more. Amplify: Mentor, coach, and collaborate with other technical contributors. Collaborate: Work together with a variety of cross-functional teams across Reddit Engineering. Evolve: Learn and improve your own technical and non-technical abilities.</p>\n<p>Requirements: 7+ years of experience identifying, driving, and executing high-impact projects that align with the company&#39;s strategy. 5+ years of experience working in developer experience, infrastructure, or platform teams, and experience working on developer tools, libraries, and frameworks. 5+ years of industry experience in large-scale distributed systems and experience developing and improving highly scalable and reliable systems. Experience with CI/CD tools (Drone, BuildKite, Github Actions, Bazel, Argo Workflows/Rollouts/CD, Temporal, and other adjacent tools). Experience with Kubernetes and cloud providers (AWS, GCP). A track record of leading large-scale technical projects that require cross-team and cross-functional collaboration. The ability to disambiguate complex problems, align stakeholders, and aggressively prioritize to execute on projects effectively. Excellent communication skills that you employ to drive toward consensus, navigate disagreements, influence decisions and priorities, and empower others. A strong sense of empathy, curiosity, and humility that drive a desire to deeply understand developer pain points, continuously improve systems, and ultimately deliver a delightful user experience. A history of mentorship and technical leadership.</p>\n<p>Preferred qualifications include: Go experience. Experience with GraphQL, REST, HTTP, gRPC. Experience with Github Enterprise Server. Experience with mobile client CI/CD challenges (Bitrise, MacStadium, Orka, Gradle). Experience designing and implementing platforms. Experience with multi-region, multi-provider deployments.</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_f35f0a65-82b","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Reddit","sameAs":"https://www.redditinc.com","logo":"https://logos.yubhub.co/redditinc.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/reddit/jobs/7342078","x-work-arrangement":"remote","x-experience-level":"staff","x-job-type":"full-time","x-salary-range":"$217,000-$303,900 USD","x-skills-required":["CI/CD tools","Kubernetes","Cloud providers","Large-scale distributed systems","Developer tools","Libraries","Frameworks","Cross-functional collaboration","Complex problem-solving","Communication","Empathy","Curiosity","Humility"],"x-skills-preferred":["Go","GraphQL","REST","HTTP","gRPC","Github Enterprise Server","Mobile client CI/CD challenges","Platform design","Multi-region, multi-provider deployments"],"datePosted":"2026-04-18T15:47:30.240Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Remote - United States"}},"jobLocationType":"TELECOMMUTE","employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"CI/CD tools, Kubernetes, Cloud providers, Large-scale distributed systems, Developer tools, Libraries, Frameworks, Cross-functional collaboration, Complex problem-solving, Communication, Empathy, Curiosity, Humility, Go, GraphQL, REST, HTTP, gRPC, Github Enterprise Server, Mobile client CI/CD challenges, Platform design, Multi-region, multi-provider deployments","baseSalary":{"@type":"MonetaryAmount","currency":"USD","value":{"@type":"QuantitativeValue","minValue":217000,"maxValue":303900,"unitText":"YEAR"}}},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_3d57b93e-423"},"title":"Resident Solutions Architect - Financial Services","description":"<p>As a Resident Solutions Architect in our Professional Services team, you will work with clients on short to medium term customer engagements on their big data challenges using the Databricks platform.</p>\n<p>You will provide data engineering, data science, and cloud technology projects which require integrating with client systems, training, and other technical tasks to help customers to get most value out of their data.</p>\n<p>RSAs are billable and know how to complete projects according to specification with excellent customer service.</p>\n<p>You will report to the regional Manager/Lead.</p>\n<p>The impact you will have:</p>\n<ul>\n<li>Work on a variety of impactful customer technical projects which may include designing and building reference architectures, creating how-to&#39;s and productionalizing customer use cases</li>\n<li>Work with engagement managers to scope variety of professional services work with input from the customer</li>\n<li>Guide strategic customers as they implement transformational big data projects, 3rd party migrations, including end-to-end design, build and deployment of industry-leading big data and AI applications</li>\n<li>Consult on architecture and design; bootstrap hands-on projects which leads to a customers&#39; successful understanding, evaluation and adoption of Databricks.</li>\n<li>Provide an escalated level of support for customer operational issues.</li>\n<li>Work with the Databricks technical team, Project Manager, Architect and Customer team to ensure the technical components of the engagement are delivered to meet customer&#39;s needs.</li>\n<li>Work with Engineering and Databricks Customer Support to provide product and implementation feedback and to guide rapid resolution for engagement specific product and support issues.</li>\n</ul>\n<p>What we look for:</p>\n<ul>\n<li>6+ years experience in data engineering, data platforms &amp; analytics</li>\n<li>Comfortable writing code in either Python or Scala</li>\n<li>Working knowledge of two or more common Cloud ecosystems (AWS, Azure, GCP) with expertise in at least one</li>\n<li>Deep experience with distributed computing with Apache Spark™ and knowledge of Apache Spark™ runtime internals</li>\n<li>Familiarity with CI/CD for production deployments</li>\n<li>Working knowledge of MLOps</li>\n<li>Capable of design and deployment of highly performant end-to-end data architectures</li>\n<li>Experience with technical project delivery - managing scope and timelines.</li>\n<li>Documentation and white-boarding skills.</li>\n<li>Experience working with clients and managing conflicts.</li>\n<li>Experience in building scalable streaming and batch solutions using cloud-native components</li>\n<li>Travel to customers up to 20% of the time</li>\n</ul>\n<p>Nice to have:</p>\n<ul>\n<li>Databricks Certification</li>\n</ul>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_3d57b93e-423","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Databricks","sameAs":"https://databricks.com","logo":"https://logos.yubhub.co/databricks.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/databricks/jobs/8456948002","x-work-arrangement":"hybrid","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":"$180,656-$248,360 USD","x-skills-required":["data engineering","data platforms & analytics","Python","Scala","Cloud ecosystems (AWS, Azure, GCP)","Apache Spark","CI/CD for production deployments","MLOps","data architecture","technical project delivery","documentation and white-boarding skills","client management"],"x-skills-preferred":["Databricks Certification"],"datePosted":"2026-04-18T15:47:22.867Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Atlanta, Georgia"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"data engineering, data platforms & analytics, Python, Scala, Cloud ecosystems (AWS, Azure, GCP), Apache Spark, CI/CD for production deployments, MLOps, data architecture, technical project delivery, documentation and white-boarding skills, client management, Databricks Certification","baseSalary":{"@type":"MonetaryAmount","currency":"USD","value":{"@type":"QuantitativeValue","minValue":180656,"maxValue":248360,"unitText":"YEAR"}}},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_3b01c809-8ef"},"title":"Staff Machine Learning Systems Engineer","description":"<p>As a Staff Machine Learning Systems Engineer at Reddit, you will lead the development of a platform for large-scale ML models. Your responsibilities will include designing end-to-end model lifecycle patterns (MLOps) to boost velocity of development for ML engineers, zero-to-one development and support of a graph ML codebase and platform, collaborating with ML engineers on performance tuning, optimizing batch data processing, and architecting pipelines to build and maintain massive graph data structures.</p>\n<p>We are looking for an experienced engineer with 8+ years of experience in ML infrastructure, including model training and model deployments. You should have hands-on experience with ML optimization, cloud-based technologies, MLOps tools, and proficiency with common programming languages and frameworks of ML. Strong focus on scalability, reliability, performance, and ease of use is essential.</p>\n<p>In addition to base salary, this job is eligible to receive equity in the form of restricted stock units, and depending on the position offered, it may also be eligible to receive a commission. Reddit offers a wide range of benefits to U.S.-based employees, including medical, dental, and vision insurance, 401(k) program with employer match, generous time off for vacation, and parental leave.</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_3b01c809-8ef","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Reddit","sameAs":"https://www.redditinc.com","logo":"https://logos.yubhub.co/redditinc.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/reddit/jobs/7731788","x-work-arrangement":"remote","x-experience-level":"staff","x-job-type":"full-time","x-salary-range":"$230,000-$322,000 USD","x-skills-required":["ML infrastructure","model training","model deployments","ML optimization","cloud-based technologies","MLOps tools","Python","PyTorch","Tensorflow"],"x-skills-preferred":["graph ML codebase and platform","Apache Beam","Apache Spark","Ray Data"],"datePosted":"2026-04-18T15:47:03.069Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Remote - United States"}},"jobLocationType":"TELECOMMUTE","employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"ML infrastructure, model training, model deployments, ML optimization, cloud-based technologies, MLOps tools, Python, PyTorch, Tensorflow, graph ML codebase and platform, Apache Beam, Apache Spark, Ray Data","baseSalary":{"@type":"MonetaryAmount","currency":"USD","value":{"@type":"QuantitativeValue","minValue":230000,"maxValue":322000,"unitText":"YEAR"}}},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_cbd81d47-d7e"},"title":"Data Platform Solutions Architect (Professional Services)","description":"<p>We&#39;re hiring for multiple roles within our Professional Services team. This position may be offered as Senior Solutions Consultant, Resident Solutions Architect, or Senior Resident Solutions Architect. The final title will align to your experience, technical depth, and customer-facing ownership.</p>\n<p>As a Big Data Solutions Architect (Internal Title - Resident Solutions Architect) in our Professional Services team, you will work with clients on short to medium-term customer engagements on their big data challenges using the Databricks platform. You will provide data engineering, data science, and cloud technology projects which require integrating with client systems, training, and other technical tasks to help customers to get most value out of their data.</p>\n<p>RSAs are billable and know how to complete projects according to specification with excellent customer service. You will report to the regional Manager/Lead.</p>\n<p>The impact you will have:</p>\n<ul>\n<li>Work on a variety of impactful customer technical projects which may include designing and building reference architectures, creating how-to&#39;s and productionalizing customer use cases</li>\n<li>Work with engagement managers to scope variety of professional services work with input from the customer</li>\n<li>Guide strategic customers as they implement transformational big data projects, 3rd party migrations, including end-to-end design, build and deployment of industry-leading big data and AI applications</li>\n<li>Consult on architecture and design; bootstrap or implement customer projects which leads to a customers&#39; successful understanding, evaluation and adoption of Databricks.</li>\n<li>Provide an escalated level of support for customer operational issues.</li>\n<li>Work with the Databricks technical team, Project Manager, Architect and Customer team to ensure the technical components of the engagement are delivered to meet customer&#39;s needs.</li>\n<li>Work with Engineering and Databricks Customer Support to provide product and implementation feedback and to guide rapid resolution for engagement specific product and support issues.</li>\n</ul>\n<p>What we look for:</p>\n<ul>\n<li>Extensive experience in data engineering, data platforms &amp; analytics</li>\n<li>Comfortable writing code in either Python or Scala</li>\n<li>Working knowledge of two or more common Cloud ecosystems (AWS, Azure, GCP) with expertise in at least one</li>\n<li>Deep experience with distributed computing with Apache Spark™ and knowledge of Spark runtime internals</li>\n<li>Familiarity with CI/CD for production deployments</li>\n<li>Working knowledge of MLOps</li>\n<li>Design and deployment of performant end-to-end data architectures</li>\n<li>Experience with technical project delivery - managing scope and timelines.</li>\n<li>Documentation and white-boarding skills.</li>\n<li>Experience working with clients and managing conflicts.</li>\n<li>Build skills in technical areas which support the deployment and integration of Databricks-based solutions to complete customer projects.</li>\n<li>Travel to customers 10% of the time</li>\n</ul>\n<p>[Preferred] Databricks Certification but not essential</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_cbd81d47-d7e","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Databricks","sameAs":"https://databricks.com","logo":"https://logos.yubhub.co/databricks.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/databricks/jobs/8486738002","x-work-arrangement":"hybrid","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":null,"x-skills-required":["data engineering","data platforms & analytics","Python","Scala","Cloud ecosystems (AWS, Azure, GCP)","Apache Spark","CI/CD for production deployments","MLOps","end-to-end data architectures","technical project delivery","documentation and white-boarding skills","client management"],"x-skills-preferred":["Databricks Certification"],"datePosted":"2026-04-18T15:46:17.349Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"London, United Kingdom"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"data engineering, data platforms & analytics, Python, Scala, Cloud ecosystems (AWS, Azure, GCP), Apache Spark, CI/CD for production deployments, MLOps, end-to-end data architectures, technical project delivery, documentation and white-boarding skills, client management, Databricks Certification"},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_8efd6b3b-251"},"title":"Resident Solutions Architect - Public Sector","description":"<p>As a Resident Solutions Architect in our Professional Services team, you will work with clients on short to medium term customer engagements on their big data challenges using the Databricks platform.</p>\n<p>You will provide data engineering, data science, and cloud technology projects which require integrating with client systems, training, and other technical tasks to help customers to get most value out of their data.</p>\n<p>RSAs are billable and know how to complete projects according to specification with excellent customer service.</p>\n<p>You will report to the regional Manager/Lead.</p>\n<p>The impact you will have:</p>\n<ul>\n<li>Work on a variety of impactful customer technical projects which may include designing and building reference architectures, creating how-to&#39;s and productionalizing customer use cases</li>\n</ul>\n<ul>\n<li>Work with engagement managers to scope variety of professional services work with input from the customer</li>\n</ul>\n<ul>\n<li>Guide strategic customers as they implement transformational big data projects, 3rd party migrations, including end-to-end design, build and deployment of industry-leading big data and AI applications</li>\n</ul>\n<ul>\n<li>Consult on architecture and design; bootstrap or implement customer projects which leads to a customers&#39; successful understanding, evaluation and adoption of Databricks.</li>\n</ul>\n<ul>\n<li>Provide an escalated level of support for customer operational issues.</li>\n</ul>\n<ul>\n<li>You will work with the Databricks technical team, Project Manager, Architect and Customer team to ensure the technical components of the engagement are delivered to meet customer&#39;s needs.</li>\n</ul>\n<ul>\n<li>Work with Engineering and Databricks Customer Support to provide product and implementation feedback and to guide rapid resolution for engagement specific product and support issues.</li>\n</ul>\n<p>What we look for:</p>\n<ul>\n<li>6+ years experience in data engineering, data platforms &amp; analytics</li>\n</ul>\n<ul>\n<li>Comfortable writing code in either Python or Scala</li>\n</ul>\n<ul>\n<li>Working knowledge of two or more common Cloud ecosystems (AWS, Azure, GCP) with expertise in at least one</li>\n</ul>\n<ul>\n<li>Deep experience with distributed computing with Apache Spark™ and knowledge of Spark runtime internals</li>\n</ul>\n<ul>\n<li>Familiarity with CI/CD for production deployments</li>\n</ul>\n<ul>\n<li>Working knowledge of MLOps</li>\n</ul>\n<ul>\n<li>Design and deployment of performant end-to-end data architectures</li>\n</ul>\n<ul>\n<li>Experience with technical project delivery - managing scope and timelines.</li>\n</ul>\n<ul>\n<li>Documentation and white-boarding skills.</li>\n</ul>\n<ul>\n<li>Experience working with clients and managing conflicts.</li>\n</ul>\n<ul>\n<li>Build skills in technical areas which support the deployment and integration of Databricks-based solutions to complete customer projects.</li>\n</ul>\n<ul>\n<li>Travel to customers 20% of the time</li>\n</ul>\n<p>Databricks Certification</p>\n<p>Pay Range Transparency</p>\n<p>Databricks is committed to fair and equitable compensation practices. The pay range(s) for this role is listed below and represents the expected base salary range for non-commissionable roles or on-target earnings for commissionable roles.</p>\n<p>Actual compensation packages are based on several factors that are unique to each candidate, including but not limited to job-related skills, depth of experience, relevant certifications and training, and specific work location.</p>\n<p>Based on the factors above, Databricks anticipated utilizing the full width of the range.</p>\n<p>The total compensation package for this position may also include eligibility for annual performance bonus, equity, and the benefits listed above.</p>\n<p>For more information regarding which range your location is in visit our page here.</p>\n<p>Zone 1 Pay Range $180,656-$248,360 USD</p>\n<p>Zone 2 Pay Range $180,656-$248,360 USD</p>\n<p>Zone 3 Pay Range $180,656-$248,360 USD</p>\n<p>Zone 4 Pay Range $180,656-$248,360 USD</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_8efd6b3b-251","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Databricks","sameAs":"https://databricks.com","logo":"https://logos.yubhub.co/databricks.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/databricks/jobs/8456973002","x-work-arrangement":"onsite","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":"$180,656-$248,360 USD","x-skills-required":["data engineering","data platforms & analytics","Python","Scala","Cloud ecosystems (AWS, Azure, GCP)","Apache Spark","CI/CD for production deployments","MLOps","performant end-to-end data architectures","technical project delivery","documentation and white-boarding skills","client management"],"x-skills-preferred":[],"datePosted":"2026-04-18T15:45:55.475Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Boston, Massachusetts"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"data engineering, data platforms & analytics, Python, Scala, Cloud ecosystems (AWS, Azure, GCP), Apache Spark, CI/CD for production deployments, MLOps, performant end-to-end data architectures, technical project delivery, documentation and white-boarding skills, client management","baseSalary":{"@type":"MonetaryAmount","currency":"USD","value":{"@type":"QuantitativeValue","minValue":180656,"maxValue":248360,"unitText":"YEAR"}}},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_6d94d7ea-9ca"},"title":"Resident Solutions Architect - Financial Services","description":"<p>As a Senior Big Data Solutions Architect (Sr Resident Solutions Architect) in our Professional Services team, you will work with clients on short to medium term customer engagements on their big data challenges using the Databricks platform.</p>\n<p>You will provide data engineering, data science, and cloud technology projects which require integrating with client systems, training, and other technical tasks to help customers to get most value out of their data.</p>\n<p>RSAs are billable and know how to complete projects according to specification with excellent customer service.</p>\n<p>The impact you will have:</p>\n<ul>\n<li>You will work on a variety of impactful customer technical projects which may include designing and building reference architectures, creating how-to&#39;s and productionalizing customer use cases</li>\n</ul>\n<ul>\n<li>Work with engagement managers to scope variety of professional services work with input from the customer</li>\n</ul>\n<ul>\n<li>Guide strategic customers as they implement transformational big data projects, 3rd party migrations, including end-to-end design, build and deployment of industry-leading big data and AI applications</li>\n</ul>\n<ul>\n<li>Consult on architecture and design; bootstrap hands-on projects which leads to a customers&#39; successful understanding, evaluation and adoption of Databricks.</li>\n</ul>\n<ul>\n<li>Provide an escalated level of support for customer operational issues.</li>\n</ul>\n<ul>\n<li>You will work with the Databricks technical team, Project Manager, Architect and Customer team to ensure the technical components of the engagement are delivered to meet customer&#39;s needs.</li>\n</ul>\n<ul>\n<li>Work with Engineering and Databricks Customer Support to provide product and implementation feedback and to guide rapid resolution for engagement specific product and support issues.</li>\n</ul>\n<p>What we look for:</p>\n<ul>\n<li>9+ years experience in data engineering, data platforms &amp; analytics</li>\n</ul>\n<ul>\n<li>Comfortable writing code in either Python or Scala</li>\n</ul>\n<ul>\n<li>Working knowledge of two or more common Cloud ecosystems (AWS, Azure, GCP) with expertise in at least one</li>\n</ul>\n<ul>\n<li>Deep experience with distributed computing with Apache Spark™ and knowledge of Apache Spark™ runtime internals</li>\n</ul>\n<ul>\n<li>Familiarity with CI/CD for production deployments</li>\n</ul>\n<ul>\n<li>Working knowledge of MLOps</li>\n</ul>\n<ul>\n<li>Capable of design and deployment of highly performant end-to-end data architectures</li>\n</ul>\n<ul>\n<li>Experience with technical project delivery - managing scope and timelines.</li>\n</ul>\n<ul>\n<li>Documentation and white-boarding skills.</li>\n</ul>\n<ul>\n<li>Experience working with clients and managing conflicts.</li>\n</ul>\n<ul>\n<li>Experience in building scalable streaming and batch solutions using cloud-native components</li>\n</ul>\n<ul>\n<li>Travel to customers up to 20% of the time</li>\n</ul>\n<p>Nice to have: Databricks Certification</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_6d94d7ea-9ca","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Databricks","sameAs":"https://databricks.com","logo":"https://logos.yubhub.co/databricks.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/databricks/jobs/8461330002","x-work-arrangement":"onsite","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":"$180,656-$248,360 USD","x-skills-required":["data engineering","data platforms & analytics","Python","Scala","Cloud ecosystems (AWS, Azure, GCP)","Apache Spark","CI/CD for production deployments","MLOps","design and deployment of highly performant end-to-end data architectures","technical project delivery","documentation and white-boarding skills","client management"],"x-skills-preferred":["Databricks Certification"],"datePosted":"2026-04-18T15:45:27.183Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Washington, D.C."}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"data engineering, data platforms & analytics, Python, Scala, Cloud ecosystems (AWS, Azure, GCP), Apache Spark, CI/CD for production deployments, MLOps, design and deployment of highly performant end-to-end data architectures, technical project delivery, documentation and white-boarding skills, client management, Databricks Certification","baseSalary":{"@type":"MonetaryAmount","currency":"USD","value":{"@type":"QuantitativeValue","minValue":180656,"maxValue":248360,"unitText":"YEAR"}}},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_74a551ed-76c"},"title":"Solutions Engineer, Auth0","description":"<p>We are looking for a Solutions Engineer to join our team in Germany. As a Solutions Engineer, you will be responsible for engaging with customers to help them address their identity management challenges, driving a clear technical preference for Okta&#39;s Auth0 offering.</p>\n<p>Your primary responsibilities will include delivering interactive presentations and solution demonstrations to all levels of an organization, whiteboarding solutions, and leading Q&amp;A sessions with prospective customers. You will also collaborate with account executives to develop and execute territory and account strategies to maximize the Okta opportunity in those accounts.</p>\n<p>To be successful in this role, you will need to have a strong technical understanding of the Auth0 product line and the underlying technologies and protocols. You should also be able to communicate effectively with both technical and non-technical audiences.</p>\n<p>In addition to your technical expertise, you will need to have excellent communication and interpersonal skills, as well as the ability to work in a fast-paced environment.</p>\n<p>As a Solutions Engineer, you will have the opportunity to work with a variety of customers and projects, and to contribute to the growth and success of our business.</p>\n<p>If you are a motivated and detail-oriented individual with a passion for technology and customer service, we encourage you to apply for this exciting opportunity.</p>\n<p>The annual On Target Compensation (OTE) range for this position for candidates located in Germany is between €122,000-€134,000 EUR.</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_74a551ed-76c","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Auth0","sameAs":"https://auth0.com/","logo":"https://logos.yubhub.co/auth0.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/okta/jobs/7535056","x-work-arrangement":"remote","x-experience-level":"mid","x-job-type":"full-time","x-salary-range":"€122,000-€134,000 EUR","x-skills-required":["Fluency in English and German","Software Engineering or Development experience","Understanding of Authentication and Authorisation protocols and frameworks such as SAML, OIDC and OAuth","Experience working with REST APIs and SDKs","Ability to communicate effectively with both technical and non-technical audiences"],"x-skills-preferred":["Experience working in a Pre-Sales role as Sales Engineer or Solutions Engineer","Understanding of cloud deployments stacks such as AWS, Azure and GCP","Experience in developing mobile applications (using React Native/Ionic/Xamarin) and understanding the identity layer integration involving devices"],"datePosted":"2026-04-18T15:45:02.393Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Germany"}},"jobLocationType":"TELECOMMUTE","employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"Fluency in English and German, Software Engineering or Development experience, Understanding of Authentication and Authorisation protocols and frameworks such as SAML, OIDC and OAuth, Experience working with REST APIs and SDKs, Ability to communicate effectively with both technical and non-technical audiences, Experience working in a Pre-Sales role as Sales Engineer or Solutions Engineer, Understanding of cloud deployments stacks such as AWS, Azure and GCP, Experience in developing mobile applications (using React Native/Ionic/Xamarin) and understanding the identity layer integration involving devices","baseSalary":{"@type":"MonetaryAmount","currency":"EUR","value":{"@type":"QuantitativeValue","minValue":122000,"maxValue":134000,"unitText":"YEAR"}}},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_8664b981-66c"},"title":"Data Platform Solutions Architect (Professional Services) - Emerging Enterprise & DNB","description":"<p>We&#39;re hiring for multiple roles within our Professional Services team. Depending on experience and scope, this position may be offered as a Senior Solutions Consultant or a Resident Solutions Architect. You may know this role as a Big Data Solutions Architect, Analytics Architect, Data Platform Architect, or Technical Consultant. The final title will align to your experience, technical depth, and customer-facing ownership.</p>\n<p>As a Data Platform Solutions Architect on our Professional Services team for the Emerging Enterprise &amp; Digital Natives business in EMEA, you will work with clients on short to medium-term customer engagements on their big data challenges using the Databricks platform. You will provide data engineering, data science, and cloud technology projects which require integrating with client systems, training, and other technical tasks to help customers to get most value out of their data.</p>\n<p>RSAs are billable and know how to complete projects according to specification with excellent customer service. You will report to the regional Manager/Lead.</p>\n<p>The impact you will have:</p>\n<ul>\n<li>Drive high-impact customer projects: Design and build reference architectures, implement production use cases, and create “how-to” guides tailored to the unique needs of fast-moving Emerging Enterprise &amp; Digital Native customers in EMEA.</li>\n</ul>\n<ul>\n<li>Collaborate on project scoping: Work closely with Engagement Managers and customers to define project scope, schedules, and deliverables for professional services engagements.</li>\n</ul>\n<ul>\n<li>Enable transformational initiatives: Guide strategic customers through their end-to-end big data journeys,migrating from legacy platforms and deploying industry-leading data and AI applications on the Databricks platform.</li>\n</ul>\n<ul>\n<li>Consult on architecture &amp; design: Provide thought leadership on solution design and implementation strategies, ensuring customers can successfully evaluate and adopt Databricks.</li>\n</ul>\n<ul>\n<li>Offer advanced support: Serve as an escalation point for operational issues, collaborating with Databricks Support and Engineering to resolve challenges quickly.</li>\n</ul>\n<ul>\n<li>Align technical delivery: Partner with cross-functional Databricks teams (Technical, PM, Architecture, and Customer Success) to align on milestones, ensuring customer needs and deadlines are met.</li>\n</ul>\n<ul>\n<li>Amplify product feedback: Provide implementation insights to Databricks Product and Support teams, guiding rapid improvements in features and troubleshooting for customers.</li>\n</ul>\n<p>What we look for:</p>\n<ul>\n<li>Extensive experience in data engineering, data platforms &amp; analytics</li>\n</ul>\n<ul>\n<li>Comfortable writing code in either Python or Scala</li>\n</ul>\n<ul>\n<li>Working knowledge of two or more common Cloud ecosystems (AWS, Azure, GCP) with expertise in at least one</li>\n</ul>\n<ul>\n<li>Deep experience with distributed computing with Apache Spark™ and knowledge of Spark runtime internals</li>\n</ul>\n<ul>\n<li>Familiarity with CI/CD for production deployments</li>\n</ul>\n<ul>\n<li>Working knowledge of MLOps</li>\n</ul>\n<ul>\n<li>Design and deployment of performant end-to-end data architectures</li>\n</ul>\n<ul>\n<li>Experience with technical project delivery - managing scope and timelines.</li>\n</ul>\n<ul>\n<li>Documentation and white-boarding skills.</li>\n</ul>\n<ul>\n<li>Experience working with clients and managing conflicts.</li>\n</ul>\n<ul>\n<li>Build skills in technical areas which support the deployment and integration of Databricks-based solutions to complete customer projects.</li>\n</ul>\n<ul>\n<li>Travel to customers 10% of the time</li>\n</ul>\n<ul>\n<li>[Preferred] Databricks Certification but not essential</li>\n</ul>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_8664b981-66c","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Databricks","sameAs":"https://databricks.com","logo":"https://logos.yubhub.co/databricks.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/databricks/jobs/8439047002","x-work-arrangement":"onsite","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":null,"x-skills-required":["data engineering","data platforms & analytics","Python","Scala","Cloud ecosystems (AWS, Azure, GCP)","Apache Spark","CI/CD for production deployments","MLOps","performant end-to-end data architectures","technical project delivery","documentation and white-boarding skills","client management"],"x-skills-preferred":["Databricks Certification"],"datePosted":"2026-04-18T15:43:52.925Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"London, United Kingdom"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"data engineering, data platforms & analytics, Python, Scala, Cloud ecosystems (AWS, Azure, GCP), Apache Spark, CI/CD for production deployments, MLOps, performant end-to-end data architectures, technical project delivery, documentation and white-boarding skills, client management, Databricks Certification"},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_34a0bf55-11a"},"title":"Resident Solutions Architect - Communications, Media, Entertainment & Games","description":"<p>As a Resident Solutions Architect in our Professional Services team, you will work with clients on short to medium term customer engagements on their big data challenges using the Databricks platform.</p>\n<p>You will provide data engineering, data science, and cloud technology projects which require integrating with client systems, training, and other technical tasks to help customers to get most value out of their data.</p>\n<p>The impact you will have:</p>\n<ul>\n<li>Work on a variety of impactful customer technical projects which may include designing and building reference architectures, creating how-to&#39;s and productionalizing customer use cases</li>\n<li>Guide strategic customers as they implement transformational big data projects, 3rd party migrations, including end-to-end design, build and deployment of industry-leading big data and AI applications</li>\n<li>Consult on architecture and design; bootstrap or implement customer projects which leads to a customers&#39; successful understanding, evaluation and adoption of Databricks</li>\n</ul>\n<p>What we look for:</p>\n<ul>\n<li>6+ years experience in data engineering, data platforms &amp; analytics</li>\n<li>Comfortable writing code in either Python or Scala</li>\n<li>Working knowledge of two or more common Cloud ecosystems (AWS, Azure, GCP) with expertise in at least one</li>\n<li>Deep experience with distributed computing with Apache Spark™ and knowledge of Spark runtime internals</li>\n<li>Familiarity with CI/CD for production deployments</li>\n<li>Working knowledge of MLOps</li>\n<li>Design and deployment of performant end-to-end data architectures</li>\n<li>Experience with technical project delivery - managing scope and timelines</li>\n<li>Documentation and white-boarding skills</li>\n<li>Experience working with clients and managing conflicts</li>\n</ul>\n<p>Databricks Certification</p>\n<p>Pay Range Transparency</p>\n<p>Databricks is committed to fair and equitable compensation practices. The pay range(s) for this role is listed below and represents the expected base salary range for non-commissionable roles or on-target earnings for commissionable roles.</p>\n<p>Actual compensation packages are based on several factors that are unique to each candidate, including but not limited to job-related skills, depth of experience, relevant certifications and training, and specific work location.</p>\n<p>Based on the factors above, Databricks anticipated utilizing the full width of the range.</p>\n<p>The total compensation package for this position may also include eligibility for annual performance bonus, equity, and the benefits listed above.</p>\n<p>For more information regarding which range your location is in visit our page here.</p>\n<p>Zone 1 Pay Range $180,656-$248,360 USD</p>\n<p>Zone 2 Pay Range $180,656-$248,360 USD</p>\n<p>Zone 3 Pay Range $180,656-$248,360 USD</p>\n<p>Zone 4 Pay Range $180,656-$248,360 USD</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_34a0bf55-11a","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Databricks","sameAs":"https://databricks.com","logo":"https://logos.yubhub.co/databricks.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/databricks/jobs/8461222002","x-work-arrangement":"onsite","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":"$180,656-$248,360 USD","x-skills-required":["data engineering","data platforms & analytics","Python","Scala","Cloud ecosystems (AWS, Azure, GCP)","Apache Spark","CI/CD for production deployments","MLOps","end-to-end data architectures","technical project delivery","documentation and white-boarding skills"],"x-skills-preferred":[],"datePosted":"2026-04-18T15:43:17.113Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Boston, Massachusetts"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"data engineering, data platforms & analytics, Python, Scala, Cloud ecosystems (AWS, Azure, GCP), Apache Spark, CI/CD for production deployments, MLOps, end-to-end data architectures, technical project delivery, documentation and white-boarding skills","baseSalary":{"@type":"MonetaryAmount","currency":"USD","value":{"@type":"QuantitativeValue","minValue":180656,"maxValue":248360,"unitText":"YEAR"}}},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_7d723067-22d"},"title":"Resident Solutions Architect - Healthcare & Life Sciences","description":"<p>As a Resident Solutions Architect in our Professional Services team, you will work with clients on short to medium term customer engagements on their big data challenges using the Databricks platform.</p>\n<p>You will provide data engineering, data science, and cloud technology projects which require integrating with client systems, training, and other technical tasks to help customers to get most value out of their data.</p>\n<p>RSAs are billable and know how to complete projects according to specification with excellent customer service.</p>\n<p>You will report to the regional Manager/Lead.</p>\n<p>The impact you will have:</p>\n<ul>\n<li>Work on a variety of impactful customer technical projects which may include designing and building reference architectures, creating how-to&#39;s and productionalizing customer use cases</li>\n</ul>\n<ul>\n<li>Work with engagement managers to scope variety of professional services work with input from the customer</li>\n</ul>\n<ul>\n<li>Guide strategic customers as they implement transformational big data projects, 3rd party migrations, including end-to-end design, build and deployment of industry-leading big data and AI applications</li>\n</ul>\n<ul>\n<li>Consult on architecture and design; bootstrap or implement customer projects which leads to a customers&#39; successful understanding, evaluation and adoption of Databricks.</li>\n</ul>\n<ul>\n<li>Provide an escalated level of support for customer operational issues.</li>\n</ul>\n<ul>\n<li>Work with the Databricks technical team, Project Manager, Architect and Customer team to ensure the technical components of the engagement are delivered to meet customer&#39;s needs.</li>\n</ul>\n<ul>\n<li>Work with Engineering and Databricks Customer Support to provide product and implementation feedback and to guide rapid resolution for engagement specific product and support issues.</li>\n</ul>\n<p>What we look for:</p>\n<ul>\n<li>6+ years experience in data engineering, data platforms &amp; analytics</li>\n</ul>\n<ul>\n<li>Comfortable writing code in either Python or Scala</li>\n</ul>\n<ul>\n<li>Working knowledge of two or more common Cloud ecosystems (AWS, Azure, GCP) with expertise in at least one</li>\n</ul>\n<ul>\n<li>Deep experience with distributed computing with Apache Spark™ and knowledge of Spark runtime internals</li>\n</ul>\n<ul>\n<li>Familiarity with CI/CD for production deployments</li>\n</ul>\n<ul>\n<li>Working knowledge of MLOps</li>\n</ul>\n<ul>\n<li>Design and deployment of performant end-to-end data architectures</li>\n</ul>\n<ul>\n<li>Experience with technical project delivery - managing scope and timelines.</li>\n</ul>\n<ul>\n<li>Documentation and white-boarding skills.</li>\n</ul>\n<ul>\n<li>Experience working with clients and managing conflicts.</li>\n</ul>\n<ul>\n<li>Build skills in technical areas which support the deployment and integration of Databricks-based solutions to complete customer projects.</li>\n</ul>\n<ul>\n<li>Travel to customers 20% of the time</li>\n</ul>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_7d723067-22d","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Databricks","sameAs":"https://databricks.com","logo":"https://logos.yubhub.co/databricks.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/databricks/jobs/8494144002","x-work-arrangement":"hybrid","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":"$180,656-$248,360 USD","x-skills-required":["data engineering","data platforms & analytics","Python","Scala","Cloud ecosystems (AWS, Azure, GCP)","Apache Spark","CI/CD for production deployments","MLOps","performant end-to-end data architectures","technical project delivery","documentation and white-boarding skills","client management"],"x-skills-preferred":[],"datePosted":"2026-04-18T15:43:01.843Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Dallas, Texas"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"data engineering, data platforms & analytics, Python, Scala, Cloud ecosystems (AWS, Azure, GCP), Apache Spark, CI/CD for production deployments, MLOps, performant end-to-end data architectures, technical project delivery, documentation and white-boarding skills, client management","baseSalary":{"@type":"MonetaryAmount","currency":"USD","value":{"@type":"QuantitativeValue","minValue":180656,"maxValue":248360,"unitText":"YEAR"}}},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_b4a461d1-b6b"},"title":"Resident Solutions Architect - Public Sector","description":"<p>As a Resident Solutions Architect in our Professional Services team, you will work with clients on short to medium term customer engagements on their big data challenges using the Databricks platform. You will provide data engineering, data science, and cloud technology projects which require integrating with client systems, training, and other technical tasks to help customers to get most value out of their data.</p>\n<p>RSAs are billable and know how to complete projects according to specification with excellent customer service. You will report to the regional Manager/Lead.</p>\n<p>The impact you will have:</p>\n<ul>\n<li>You will work on a variety of impactful customer technical projects which may include designing and building reference architectures, creating how-to&#39;s and productionalizing customer use cases</li>\n<li>Work with engagement managers to scope variety of professional services work with input from the customer</li>\n<li>Guide strategic customers as they implement transformational big data projects, 3rd party migrations, including end-to-end design, build and deployment of industry-leading big data and AI applications</li>\n<li>Consult on architecture and design; bootstrap or implement customer projects which leads to a customers&#39; successful understanding, evaluation and adoption of Databricks.</li>\n<li>Provide an escalated level of support for customer operational issues.</li>\n<li>You will work with the Databricks technical team, Project Manager, Architect and Customer team to ensure the technical components of the engagement are delivered to meet customer&#39;s needs.</li>\n<li>Work with Engineering and Databricks Customer Support to provide product and implementation feedback and to guide rapid resolution for engagement specific product and support issues.</li>\n</ul>\n<p>What we look for:</p>\n<ul>\n<li>6+ years experience in data engineering, data platforms &amp; analytics</li>\n<li>Comfortable writing code in either Python or Scala</li>\n<li>Working knowledge of two or more common Cloud ecosystems (AWS, Azure, GCP) with expertise in at least one</li>\n<li>Deep experience with distributed computing with Apache Spark and knowledge of Spark runtime internals</li>\n<li>Familiarity with CI/CD for production deployments</li>\n<li>Working knowledge of MLOps</li>\n<li>Design and deployment of performant end-to-end data architectures</li>\n<li>Experience with technical project delivery - managing scope and timelines.</li>\n<li>Documentation and white-boarding skills.</li>\n<li>Experience working with clients and managing conflicts.</li>\n<li>Build skills in technical areas which support the deployment and integration of Databricks-based solutions to complete customer projects.</li>\n<li>Travel to customers 20% of the time</li>\n</ul>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_b4a461d1-b6b","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Databricks","sameAs":"https://databricks.com","logo":"https://logos.yubhub.co/databricks.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/databricks/jobs/8494128002","x-work-arrangement":"onsite","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":"$180,656-$248,360 USD","x-skills-required":["data engineering","data platforms & analytics","Python","Scala","Cloud ecosystems (AWS, Azure, GCP)","Apache Spark","CI/CD for production deployments","MLOps","end-to-end data architectures","technical project delivery","documentation and white-boarding skills","client management"],"x-skills-preferred":[],"datePosted":"2026-04-18T15:42:50.996Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Washington, D.C."}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"data engineering, data platforms & analytics, Python, Scala, Cloud ecosystems (AWS, Azure, GCP), Apache Spark, CI/CD for production deployments, MLOps, end-to-end data architectures, technical project delivery, documentation and white-boarding skills, client management","baseSalary":{"@type":"MonetaryAmount","currency":"USD","value":{"@type":"QuantitativeValue","minValue":180656,"maxValue":248360,"unitText":"YEAR"}}},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_3e92e8a2-811"},"title":"Resident Solutions Architect - Public Sector","description":"<p>As a Resident Solutions Architect in our Professional Services team, you will work with clients on short to medium term customer engagements on their big data challenges using the Databricks platform.</p>\n<p>You will provide data engineering, data science, and cloud technology projects which require integrating with client systems, training, and other technical tasks to help customers to get most value out of their data.</p>\n<p>RSAs are billable and know how to complete projects according to specification with excellent customer service.</p>\n<p>You will report to the regional Manager/Lead.</p>\n<p>The impact you will have:</p>\n<ul>\n<li>You will work on a variety of impactful customer technical projects which may include designing and building reference architectures, creating how-to&#39;s and productionalizing customer use cases</li>\n</ul>\n<ul>\n<li>Work with engagement managers to scope variety of professional services work with input from the customer</li>\n</ul>\n<ul>\n<li>Guide strategic customers as they implement transformational big data projects, 3rd party migrations, including end-to-end design, build and deployment of industry-leading big data and AI applications</li>\n</ul>\n<ul>\n<li>Consult on architecture and design; bootstrap or implement customer projects which leads to a customers&#39; successful understanding, evaluation and adoption of Databricks.</li>\n</ul>\n<ul>\n<li>Provide an escalated level of support for customer operational issues.</li>\n</ul>\n<ul>\n<li>You will work with the Databricks technical team, Project Manager, Architect and Customer team to ensure the technical components of the engagement are delivered to meet customer&#39;s needs.</li>\n</ul>\n<ul>\n<li>Work with Engineering and Databricks Customer Support to provide product and implementation feedback and to guide rapid resolution for engagement specific product and support issues.</li>\n</ul>\n<p>What we look for:</p>\n<ul>\n<li>6+ years experience in data engineering, data platforms &amp; analytics</li>\n</ul>\n<ul>\n<li>Comfortable writing code in either Python or Scala</li>\n</ul>\n<ul>\n<li>Working knowledge of two or more common Cloud ecosystems (AWS, Azure, GCP) with expertise in at least one</li>\n</ul>\n<ul>\n<li>Deep experience with distributed computing with Apache Spark™ and knowledge of Spark runtime internals</li>\n</ul>\n<ul>\n<li>Familiarity with CI/CD for production deployments</li>\n</ul>\n<ul>\n<li>Working knowledge of MLOps</li>\n</ul>\n<ul>\n<li>Design and deployment of performant end-to-end data architectures</li>\n</ul>\n<ul>\n<li>Experience with technical project delivery - managing scope and timelines.</li>\n</ul>\n<ul>\n<li>Documentation and white-boarding skills.</li>\n</ul>\n<ul>\n<li>Experience working with clients and managing conflicts.</li>\n</ul>\n<ul>\n<li>Build skills in technical areas which support the deployment and integration of Databricks-based solutions to complete customer projects.</li>\n</ul>\n<ul>\n<li>Travel to customers 20% of the time</li>\n</ul>\n<p>Databricks Certification</p>\n<p>Pay Range Transparency</p>\n<p>Databricks is committed to fair and equitable compensation practices. The pay range(s) for this role is listed below and represents the expected base salary range for non-commissionable roles or on-target earnings for commissionable roles.</p>\n<p>Actual compensation packages are based on several factors that are unique to each candidate, including but not limited to job-related skills, depth of experience, relevant certifications and training, and specific work location.</p>\n<p>Based on the factors above, Databricks anticipated utilizing the full width of the range.</p>\n<p>The total compensation package for this position may also include eligibility for annual performance bonus, equity, and the benefits listed above.</p>\n<p>For more information regarding which range your location is in visit our page here.</p>\n<p>Zone 1 Pay Range $180,656-$248,360 USD</p>\n<p>Zone 2 Pay Range $180,656-$248,360 USD</p>\n<p>Zone 3 Pay Range $180,656-$248,360 USD</p>\n<p>Zone 4 Pay Range $180,656-$248,360 USD</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_3e92e8a2-811","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Databricks","sameAs":"https://databricks.com","logo":"https://logos.yubhub.co/databricks.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/databricks/jobs/8494130002","x-work-arrangement":"onsite","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":"$180,656-$248,360 USD","x-skills-required":["data engineering","data platforms & analytics","Python","Scala","Cloud ecosystems (AWS, Azure, GCP)","Apache Spark","CI/CD for production deployments","MLOps","end-to-end data architectures","technical project delivery","documentation and white-boarding skills","client management"],"x-skills-preferred":[],"datePosted":"2026-04-18T15:42:35.247Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"New York City, New York"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"data engineering, data platforms & analytics, Python, Scala, Cloud ecosystems (AWS, Azure, GCP), Apache Spark, CI/CD for production deployments, MLOps, end-to-end data architectures, technical project delivery, documentation and white-boarding skills, client management","baseSalary":{"@type":"MonetaryAmount","currency":"USD","value":{"@type":"QuantitativeValue","minValue":180656,"maxValue":248360,"unitText":"YEAR"}}},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_1d222227-15b"},"title":"Resident Solutions Architect - Financial Services","description":"<p>As a Senior Big Data Solutions Architect (Sr Resident Solutions Architect) in our Professional Services team, you will work with clients on short to medium term customer engagements on their big data challenges using the Databricks platform.</p>\n<p>You will provide data engineering, data science, and cloud technology projects which require integrating with client systems, training, and other technical tasks to help customers to get most value out of their data.</p>\n<p>The impact you will have:</p>\n<ul>\n<li>Work on a variety of impactful customer technical projects which may include designing and building reference architectures, creating how-to&#39;s and productionalizing customer use cases</li>\n<li>Guide strategic customers as they implement transformational big data projects, 3rd party migrations, including end-to-end design, build and deployment of industry-leading big data and AI applications</li>\n<li>Consult on architecture and design; bootstrap hands-on projects which leads to a customers&#39; successful understanding, evaluation and adoption of Databricks</li>\n<li>Provide an escalated level of support for customer operational issues</li>\n</ul>\n<p>What we look for:</p>\n<ul>\n<li>9+ years experience in data engineering, data platforms &amp; analytics</li>\n<li>Comfortable writing code in either Python or Scala</li>\n<li>Working knowledge of two or more common Cloud ecosystems (AWS, Azure, GCP) with expertise in at least one</li>\n<li>Deep experience with distributed computing with Apache Spark™ and knowledge of Apache Spark™ runtime internals</li>\n<li>Familiarity with CI/CD for production deployments</li>\n<li>Working knowledge of MLOps</li>\n<li>Capable of design and deployment of highly performant end-to-end data architectures</li>\n<li>Experience with technical project delivery - managing scope and timelines</li>\n<li>Documentation and white-boarding skills</li>\n<li>Experience working with clients and managing conflicts</li>\n<li>Experience in building scalable streaming and batch solutions using cloud-native components</li>\n<li>Travel to customers up to 20% of the time</li>\n</ul>\n<p>Nice to have:</p>\n<ul>\n<li>Databricks Certification</li>\n</ul>\n<p>Pay Range Transparency</p>\n<p>Databricks is committed to fair and equitable compensation practices. The pay range(s) for this role is listed below and represents the expected base salary range for non-commissionable roles or on-target earnings for commissionable roles.</p>\n<p>Actual compensation packages are based on several factors that are unique to each candidate, including but not limited to job-related skills, depth of experience, relevant certifications and training, and specific work location.</p>\n<p>Based on the factors above, Databricks anticipated utilizing the full width of the range.</p>\n<p>The total compensation package for this position may also include eligibility for annual performance bonus, equity, and the benefits listed above.</p>\n<p>For more information regarding which range your location is in visit our page here.</p>\n<p>Zone 1 Pay Range $180,656-$248,360 USD Zone 2 Pay Range $180,656-$248,360 USD Zone 3 Pay Range $180,656-$248,360 USD Zone 4 Pay Range $180,656-$248,360 USD</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_1d222227-15b","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Databricks","sameAs":"https://databricks.com","logo":"https://logos.yubhub.co/databricks.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/databricks/jobs/8456969002","x-work-arrangement":"remote","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":"$180,656-$248,360 USD","x-skills-required":["data engineering","data platforms & analytics","Python","Scala","Cloud ecosystems (AWS, Azure, GCP)","Apache Spark","CI/CD for production deployments","MLOps","technical project delivery","documentation and white-boarding skills","client management"],"x-skills-preferred":[],"datePosted":"2026-04-18T15:42:03.482Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Chicago, Illinois"}},"jobLocationType":"TELECOMMUTE","employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"data engineering, data platforms & analytics, Python, Scala, Cloud ecosystems (AWS, Azure, GCP), Apache Spark, CI/CD for production deployments, MLOps, technical project delivery, documentation and white-boarding skills, client management","baseSalary":{"@type":"MonetaryAmount","currency":"USD","value":{"@type":"QuantitativeValue","minValue":180656,"maxValue":248360,"unitText":"YEAR"}}},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_5b0c9ca0-2ec"},"title":"Manager of Solutions Architecture, Applied AI (Industries)","description":"<p>As the Manager of the Applied AI Architect team for DACH Industries at Anthropic, you will drive the adoption of frontier AI by enabling the deployment of Anthropic&#39;s products across our Industries accounts. You&#39;ll leverage your technical skills and consultative sales experience to drive positive AI transformation that addresses our customers&#39; business needs, meets their technical requirements, and provides a high degree of reliability and safety.</p>\n<p>You&#39;ll be responsible for leading and growing the DACH Applied AI Architect team, establishing processes and best practices for your segment&#39;s pre-sales engagements based on your depth of experience and knowledge of the market, helping each team member achieve success, high productivity, and career growth, and representing Anthropic as a technical lead on some of its most important partnerships.</p>\n<p>In collaboration with Sales, Product, and Engineering teams, you&#39;ll help large enterprise customers incorporate leading-edge AI systems into both internal business transformation initiatives and customer-facing products. You will also help build technical GTM plays for Anthropic’s products and define high-value industry solutions.</p>\n<p>Responsibilities:</p>\n<ul>\n<li>Hire, manage and mentor a team of Applied AI Architects for DACH, providing both technical guidance and career development</li>\n<li>Set goals and reviews for your team, promoting growth and output</li>\n<li>Work with a handful of highest-value enterprise customers on their overall AI adoption strategies, focusing on pre-sales technical excellence including use case scoping, architecture reviews, technical champion building, technical adoption workshops and POC execution</li>\n<li>Partner closely with your aligned GTM leadership to understand customer requirements and co-build GTM strategies to drive adoption across all industry verticals</li>\n<li>Own the technical portions of pre-sales engagements, ensuring your team provides compelling demos and validates customer ROI from Anthropic products</li>\n<li>Develop scalable technical engagement frameworks and reusable assets tailored for all non-tech sectors, including Financial Services &amp; Insurance, Healthcare, Telco, Retail and others.</li>\n<li>Drive collaboration from cross-functional teams to influence and unify stakeholders at all levels of the organization to drive business outcomes</li>\n<li>Travel to customer sites for executive-level sessions, technical workshops, and building relationships</li>\n<li>Establish a shared vision for creating solutions that enable beneficial and safe AI</li>\n<li>Lead the vision, strategy, and execution of innovative solutions that leverage our latest models&#39; capabilities</li>\n<li>Contribute to thought leadership through conference presentations, webinars, and technical content creation</li>\n<li>Stay current with emerging AI/ML trends and competitive landscape across all major Industries verticals</li>\n</ul>\n<p>You may be a good fit if you have:</p>\n<ul>\n<li>7+ years of experience as an Solutions Architect, Sales Engineer, or similar pre-sales technical role</li>\n<li>3+ years of technical go-to-market management experience, specifically managing pre-sales teams</li>\n<li>Experience working with and selling to large enterprise customers across multiple verticals</li>\n<li>Demonstrated ability to navigate and articulate complex technical, organisational and regulatory requirements of large enterprises, in particular in regulated industries (e.g FSI, Healthcare &amp; Life Sciences)</li>\n<li>A proven knowledge of the DACH market and its local specificities (regulatory, cultural, technical…)</li>\n<li>Experience framing and delivering enterprise AI use cases (productivity, workflow transformation) and or scoping large enterprise adoption programs</li>\n<li>Deep technical proficiency with enterprise AI deployments, API integrations, and production LLM use cases</li>\n<li>Demonstrated ability to build scalable, repeatable processes and frameworks that work across diverse customer segments</li>\n<li>An organisational mindset and enjoy building foundational teams in a relatively unstructured environment</li>\n<li>Excellent communication, collaboration, and coaching abilities</li>\n<li>Comfort dealing with highly uncertain, ambiguous, and fast-moving environments</li>\n<li>Strong executive presence and ability to foster deep relationships with technical leaders and engineering teams</li>\n<li>High-level familiarity with the architecture and operation of LLM and/or ML in general</li>\n<li>Experience with prompt engineering, LLM evaluation, and architecting AI-powered systems</li>\n<li>Ability to make ambiguous problems clear and identify core principles that can translate across scenarios</li>\n<li>A passion for making powerful technology safe and societally beneficial</li>\n<li>Creative thinking about the risks and benefits of new technologies, beyond past checklists and playbooks</li>\n<li>A track record of staying current with emerging AI research and industry trends</li>\n</ul>\n<p>Strong candidates may also have:</p>\n<ul>\n<li>10+ years technical experience</li>\n<li>5+ years managing AI technical teams in hypergrowth (ideally from 0 to 30+)</li>\n<li>Previous experience leading solutions architect or pre-sales teams through rapid growth</li>\n<li>Proven experience and understanding across all key non-tech industry verticals, in particular the regulated industries: Financial Services, Insurance, Healthcare, Life Sciences, Telco, Retail, Consumer Goods, Manufacturing, Oil &amp; Gas, etc.</li>\n<li>In addition to English, native or fluency in a DACH language</li>\n<li>Demonstrated experience in defining and delivering enterprise AI use cases in regulated industries: e.g Banking, Insurance, Healthcare and Life sciences</li>\n<li>Track record building technical playbooks and assets that scale across diverse customer segments</li>\n<li>Understanding of both digital-native technical requirements (API integration, developer experience) and traditional enterprise needs (security, compliance, change management)</li>\n<li>Background at enterprise AI software or API-first companies serving large enterprise customers</li>\n<li>Proven experience building a DACH AI team</li>\n</ul>\n<p>Logistics</p>\n<p>Minimum education: Bachelor’s degree or an equivalent combination of education, training, and/or experience Required field of study: A field relevant to the role as demonstrated through coursework, training, or professional experience Minimum years of experience: Years of experience required will correlate with the internal job level requirements for the position Location-based hybrid policy: Currently, we expect all staff to be in one of our offices at least 25% of the time. However, some roles may require more time in our offices. Visa sponsorship: We do sponsor visas! However, we aren&#39;t able to successfully sponsor visas for every role and every candidate. But if we make you an offer, we will make every reasonable effort to get you a visa, and we retain an immigration lawyer to help with this.</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_5b0c9ca0-2ec","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Anthropic","sameAs":"https://www.anthropic.co/","logo":"https://logos.yubhub.co/anthropic.co.png"},"x-apply-url":"https://job-boards.greenhouse.io/anthropic/jobs/5117652008","x-work-arrangement":"hybrid","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":null,"x-skills-required":["Enterprise AI deployments","API integrations","Production LLM use cases","Technical go-to-market management","Pre-sales teams management","Large enterprise customers sales","Regulated industries (e.g FSI, Healthcare & Life Sciences) knowledge","DACH market knowledge","Scalable, repeatable processes and frameworks","Organisational mindset","Excellent communication, collaboration, and coaching abilities","High-level familiarity with LLM and/or ML architecture and operation","Prompt engineering, LLM evaluation, and architecting AI-powered systems"],"x-skills-preferred":[],"datePosted":"2026-04-18T15:42:02.718Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Munich, Germany"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"Enterprise AI deployments, API integrations, Production LLM use cases, Technical go-to-market management, Pre-sales teams management, Large enterprise customers sales, Regulated industries (e.g FSI, Healthcare & Life Sciences) knowledge, DACH market knowledge, Scalable, repeatable processes and frameworks, Organisational mindset, Excellent communication, collaboration, and coaching abilities, High-level familiarity with LLM and/or ML architecture and operation, Prompt engineering, LLM evaluation, and architecting AI-powered systems"},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_6fed2bb6-3b6"},"title":"Resident Solutions Architect - Public Sector","description":"<p>As a Resident Solutions Architect in our Professional Services team, you will work with clients on short to medium term customer engagements on their big data challenges using the Databricks platform. You will provide data engineering, data science, and cloud technology projects which require integrating with client systems, training, and other technical tasks to help customers to get most value out of their data.</p>\n<p>Your responsibilities will include:</p>\n<ul>\n<li>Designing and building reference architectures for customers</li>\n<li>Creating how-to&#39;s and productionalizing customer use cases</li>\n<li>Guiding strategic customers as they implement transformational big data projects, 3rd party migrations, including end-to-end design, build and deployment of industry-leading big data and AI applications</li>\n<li>Consulting on architecture and design; bootstrapping or implementing customer projects which leads to a customers&#39; successful understanding, evaluation and adoption of Databricks</li>\n<li>Providing an escalated level of support for customer operational issues</li>\n<li>Working with the Databricks technical team, Project Manager, Architect and Customer team to ensure the technical components of the engagement are delivered to meet customer&#39;s needs</li>\n<li>Working with Engineering and Databricks Customer Support to provide product and implementation feedback and to guide rapid resolution for engagement specific product and support issues</li>\n</ul>\n<p>To be successful in this role, you will need:</p>\n<ul>\n<li>6+ years experience in data engineering, data platforms &amp; analytics</li>\n<li>Comfortable writing code in either Python or Scala</li>\n<li>Working knowledge of two or more common Cloud ecosystems (AWS, Azure, GCP) with expertise in at least one</li>\n<li>Deep experience with distributed computing with Apache Spark™ and knowledge of Spark runtime internals</li>\n<li>Familiarity with CI/CD for production deployments</li>\n<li>Working knowledge of MLOps</li>\n<li>Design and deployment of performant end-to-end data architectures</li>\n<li>Experience with technical project delivery - managing scope and timelines</li>\n<li>Documentation and white-boarding skills</li>\n<li>Experience working with clients and managing conflicts</li>\n<li>Build skills in technical areas which support the deployment and integration of Databricks-based solutions to complete customer projects</li>\n<li>Travel to customers 20% of the time</li>\n</ul>\n<p>The pay range for this role is $180,656-$248,360 USD per year, depending on location and experience.</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_6fed2bb6-3b6","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Databricks","sameAs":"https://databricks.com","logo":"https://logos.yubhub.co/databricks.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/databricks/jobs/8461321002","x-work-arrangement":"hybrid","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":"$180,656-$248,360 USD per year","x-skills-required":["data engineering","data platforms & analytics","Python","Scala","Cloud ecosystems (AWS, Azure, GCP)","Apache Spark","CI/CD for production deployments","MLOps","performant end-to-end data architectures","technical project delivery","documentation and white-boarding skills","client management"],"x-skills-preferred":[],"datePosted":"2026-04-18T15:41:52.838Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Chicago, Illinois"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"data engineering, data platforms & analytics, Python, Scala, Cloud ecosystems (AWS, Azure, GCP), Apache Spark, CI/CD for production deployments, MLOps, performant end-to-end data architectures, technical project delivery, documentation and white-boarding skills, client management","baseSalary":{"@type":"MonetaryAmount","currency":"USD","value":{"@type":"QuantitativeValue","minValue":180656,"maxValue":248360,"unitText":"YEAR"}}},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_d0793a44-d91"},"title":"Resident Solutions Architect - Financial Services","description":"<p>As a Senior Big Data Solutions Architect (Sr Resident Solutions Architect) in our Professional Services team, you will work with clients on short to medium term customer engagements on their big data challenges using the Databricks platform.</p>\n<p>You will provide data engineering, data science, and cloud technology projects which require integrating with client systems, training, and other technical tasks to help customers to get most value out of their data.</p>\n<p>RSAs are billable and know how to complete projects according to specification with excellent customer service.</p>\n<p>You will report to the regional Manager/Lead.</p>\n<p>The impact you will have:</p>\n<ul>\n<li>Work on a variety of impactful customer technical projects which may include designing and building reference architectures, creating how-to&#39;s and productionalizing customer use cases</li>\n<li>Work with engagement managers to scope variety of professional services work with input from the customer</li>\n<li>Guide strategic customers as they implement transformational big data projects, 3rd party migrations, including end-to-end design, build and deployment of industry-leading big data and AI applications</li>\n<li>Consult on architecture and design; bootstrap hands-on projects which leads to a customers&#39; successful understanding, evaluation and adoption of Databricks.</li>\n<li>Provide an escalated level of support for customer operational issues.</li>\n<li>Work with the Databricks technical team, Project Manager, Architect and Customer team to ensure the technical components of the engagement are delivered to meet customer&#39;s needs.</li>\n<li>Work with Engineering and Databricks Customer Support to provide product and implementation feedback and to guide rapid resolution for engagement specific product and support issues.</li>\n</ul>\n<p>What we look for:</p>\n<ul>\n<li>9+ years experience in data engineering, data platforms &amp; analytics</li>\n<li>Comfortable writing code in either Python or Scala</li>\n<li>Working knowledge of two or more common Cloud ecosystems (AWS, Azure, GCP) with expertise in at least one</li>\n<li>Deep experience with distributed computing with Apache Spark™ and knowledge of Apache Spark™ runtime internals</li>\n<li>Familiarity with CI/CD for production deployments</li>\n<li>Working knowledge of MLOps</li>\n<li>Capable of design and deployment of highly performant end-to-end data architectures</li>\n<li>Experience with technical project delivery - managing scope and timelines.</li>\n<li>Documentation and white-boarding skills.</li>\n<li>Experience working with clients and managing conflicts.</li>\n<li>Experience in building scalable streaming and batch solutions using cloud-native components</li>\n<li>Travel to customers up to 20% of the time</li>\n</ul>\n<p>Nice to have:</p>\n<ul>\n<li>Databricks Certification</li>\n</ul>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_d0793a44-d91","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Databricks","sameAs":"https://databricks.com","logo":"https://logos.yubhub.co/databricks.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/databricks/jobs/8461328002","x-work-arrangement":"hybrid","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":"$180,656-$248,360 USD","x-skills-required":["data engineering","data platforms & analytics","Python","Scala","Cloud ecosystems (AWS, Azure, GCP)","Apache Spark","CI/CD for production deployments","MLOps","end-to-end data architectures","technical project delivery","documentation and white-boarding skills","client management"],"x-skills-preferred":["Databricks Certification"],"datePosted":"2026-04-18T15:41:30.682Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Charlotte, North Carolina"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"data engineering, data platforms & analytics, Python, Scala, Cloud ecosystems (AWS, Azure, GCP), Apache Spark, CI/CD for production deployments, MLOps, end-to-end data architectures, technical project delivery, documentation and white-boarding skills, client management, Databricks Certification","baseSalary":{"@type":"MonetaryAmount","currency":"USD","value":{"@type":"QuantitativeValue","minValue":180656,"maxValue":248360,"unitText":"YEAR"}}},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_5fd85b1e-563"},"title":"Resident Solutions Architect - Financial Services","description":"<p>As a Senior Big Data Solutions Architect (Sr Resident Solutions Architect) in our Professional Services team, you will work with clients on short to medium term customer engagements on their big data challenges using the Databricks platform.</p>\n<p>You will provide data engineering, data science, and cloud technology projects which require integrating with client systems, training, and other technical tasks to help customers to get most value out of their data.</p>\n<p>RSAs are billable and know how to complete projects according to specification with excellent customer service.</p>\n<p>You will report to the regional Manager/Lead.</p>\n<p>The impact you will have:</p>\n<ul>\n<li>Work on a variety of impactful customer technical projects which may include designing and building reference architectures, creating how-to&#39;s and productionalizing customer use cases</li>\n<li>Work with engagement managers to scope variety of professional services work with input from the customer</li>\n<li>Guide strategic customers as they implement transformational big data projects, 3rd party migrations, including end-to-end design, build and deployment of industry-leading big data and AI applications</li>\n<li>Consult on architecture and design; bootstrap hands-on projects which leads to a customers&#39; successful understanding, evaluation and adoption of Databricks.</li>\n<li>Provide an escalated level of support for customer operational issues.</li>\n<li>Work with the Databricks technical team, Project Manager, Architect and Customer team to ensure the technical components of the engagement are delivered to meet customer&#39;s needs.</li>\n<li>Work with Engineering and Databricks Customer Support to provide product and implementation feedback and to guide rapid resolution for engagement specific product and support issues.</li>\n</ul>\n<p>What we look for:</p>\n<ul>\n<li>9+ years experience in data engineering, data platforms &amp; analytics</li>\n<li>Comfortable writing code in either Python or Scala</li>\n<li>Working knowledge of two or more common Cloud ecosystems (AWS, Azure, GCP) with expertise in at least one</li>\n<li>Deep experience with distributed computing with Apache Spark™ and knowledge of Apache Spark™ runtime internals</li>\n<li>Familiarity with CI/CD for production deployments</li>\n<li>Working knowledge of MLOps</li>\n<li>Capable of design and deployment of highly performant end-to-end data architectures</li>\n<li>Experience with technical project delivery - managing scope and timelines.</li>\n<li>Documentation and white-boarding skills.</li>\n<li>Experience working with clients and managing conflicts.</li>\n<li>Experience in building scalable streaming and batch solutions using cloud-native components</li>\n<li>Travel to customers up to 20% of the time</li>\n<li>Nice to have: Databricks Certification</li>\n</ul>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_5fd85b1e-563","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Databricks","sameAs":"https://databricks.com","logo":"https://logos.yubhub.co/databricks.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/databricks/jobs/8456965002","x-work-arrangement":"onsite","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":"$180,656-$248,360 USD","x-skills-required":["data engineering","data platforms & analytics","Python","Scala","Cloud ecosystems (AWS, Azure, GCP)","Apache Spark","CI/CD for production deployments","MLOps","design and deployment of highly performant end-to-end data architectures","technical project delivery","documentation and white-boarding skills","client management"],"x-skills-preferred":["Databricks Certification"],"datePosted":"2026-04-18T15:41:28.459Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Dallas, Texas"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"data engineering, data platforms & analytics, Python, Scala, Cloud ecosystems (AWS, Azure, GCP), Apache Spark, CI/CD for production deployments, MLOps, design and deployment of highly performant end-to-end data architectures, technical project delivery, documentation and white-boarding skills, client management, Databricks Certification","baseSalary":{"@type":"MonetaryAmount","currency":"USD","value":{"@type":"QuantitativeValue","minValue":180656,"maxValue":248360,"unitText":"YEAR"}}},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_86f8a410-0b9"},"title":"Manager of Solutions Architecture, Applied AI (Industries)","description":"<p>Job Title: Manager of Solutions Architecture, Applied AI (Industries)</p>\n<p>About the Role:</p>\n<p>As the manager of the Industries Solutions Architect team within Applied AI at Anthropic, you will drive the adoption of frontier AI in partnership with the rest of the go-to-market organisation. Our Industries customers include Fortune 500 companies within verticals like financial services, healthcare life sciences, and retail.</p>\n<p>Responsibilities:</p>\n<ul>\n<li>Hire, manage, and guide a team of pre-sales Solutions Architects by providing both technical guidance and career development.</li>\n<li>Set goals for your team in collaboration with sales and other parts of the organisation that establish baseline expectations for performance.</li>\n<li>Act as a technical sponsor for high-value strategic customers and advise them on their overall AI adoption strategies or use case scoping and POC execution.</li>\n<li>Partner closely with Industries sales leadership to identify new strategies to drive adoption of Anthropic products within specific verticals or horizontal use cases.</li>\n<li>Work with cross-functional teams like product and engineering to ensure Anthropic prioritises customer feedback or resolves blockers to adoption.</li>\n<li>Travel to customer sites or conferences for executive-level sessions, technical workshops, and relationship building.</li>\n<li>Establish a shared vision for creating solutions that enable beneficial and safe AI in technology products.</li>\n<li>Contribute to thought leadership through conference presentations, webinars, and technical content creation.</li>\n<li>Stay current with emerging AI/ML trends and the competitive landscape.</li>\n</ul>\n<p>Requirements:</p>\n<ul>\n<li>7+ years of experience as a Solutions Architect, Sales Engineer, or similar pre-sales technical role.</li>\n<li>3+ years of technical pre-sales management experience.</li>\n<li>Have sold complex technical products to Fortune 500 companies, especially in verticals like financial services, healthcare life sciences, and retail.</li>\n<li>Have deep technical proficiency with enterprise AI use cases, API integrations, and LLM deployments.</li>\n<li>Thrive in building and rapidly scaling teams and processes within ambiguous and fast-moving environments.</li>\n<li>Have excellent communication, collaboration, and coaching abilities.</li>\n<li>Strong executive presence and ability to foster deep relationships with technical leaders and engineering teams at Fortune 500 companies.</li>\n<li>Have at least a high level familiarity with the architecture and operation of LLMs.</li>\n<li>Have a passion for making powerful technology safe and societally beneficial.</li>\n<li>Stay up-to-date and informed by taking an active interest in emerging research and industry trends within AI.</li>\n</ul>\n<p>Preferred Qualifications:</p>\n<ul>\n<li>Enterprise pre-sales leadership at scale: 5+ years leading solution architect teams through hypergrowth (ideally 10 to 50+ people), with direct experience managing senior individual contributors and developing junior talent in complex enterprise software sales environments.</li>\n<li>AI Technical Depth + Executive Engagement: Hands-on experience with AI platforms and enterprise integration patterns, combined with proven track record engaging C-level stakeholders in $10M+ technical evaluations and enterprise sales cycles.</li>\n<li>Multi-Segment GTM Experience: Demonstrated success adapting technical approaches across customer segments (commercial to Fortune 100).</li>\n</ul>\n<p>Salary: The annual compensation range for this role is $270,000-$345,000 USD.</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_86f8a410-0b9","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Anthropic","sameAs":"https://www.anthropic.com/","logo":"https://logos.yubhub.co/anthropic.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/anthropic/jobs/4964610008","x-work-arrangement":"hybrid","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":"$270,000-$345,000 USD","x-skills-required":["Solutions Architecture","Sales Engineering","Pre-sales Technical Role","Enterprise AI Use Cases","API Integrations","LLM Deployments","Team Management","Technical Guidance","Career Development","Communication","Collaboration","Coaching","Executive Presence"],"x-skills-preferred":["Enterprise Pre-sales Leadership","AI Technical Depth","Executive Engagement","Multi-Segment GTM Experience"],"datePosted":"2026-04-18T15:40:59.458Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"San Francisco, CA | New York City, NY"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"Solutions Architecture, Sales Engineering, Pre-sales Technical Role, Enterprise AI Use Cases, API Integrations, LLM Deployments, Team Management, Technical Guidance, Career Development, Communication, Collaboration, Coaching, Executive Presence, Enterprise Pre-sales Leadership, AI Technical Depth, Executive Engagement, Multi-Segment GTM Experience","baseSalary":{"@type":"MonetaryAmount","currency":"USD","value":{"@type":"QuantitativeValue","minValue":270000,"maxValue":345000,"unitText":"YEAR"}}},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_56bd1ff9-278"},"title":"Manager, Applied AI Solutions Architecture - Partnerships","description":"<p>As the Manager of the Partnerships Applied AI Solutions Architect team, you will drive adoption of frontier AI by enabling deployment of Anthropic&#39;s products (Claude for Enterprise, Claude Code, API) through our Global and Regional System Integrators, cloud partners (AWS, GCP, Azure), and strategic technology partners.</p>\n<p>You will build and lead a team of Partner Solutions Architects, establish processes and best practices for partner-led pre-sales engagements, and represent Anthropic as the technical lead on its most important partnerships. In collaboration with Sales, Partnerships, Product, and Engineering, you will help partners incorporate leading-edge AI into their practices, accelerate indirect revenue, and execute long-term GTM strategy while maintaining our best-in-class safety standards.</p>\n<p>Responsibilities:</p>\n<p>Team Leadership &amp; Development: Hire, manage, and mentor a team of Partner Solutions Architects. Set goals, run reviews, and coach each team member toward high productivity and career growth.</p>\n<p>Strategic Technical Partnership: Act as the senior technical thought partner to Anthropic&#39;s GTM partnerships team. Co-build partner strategy with aligned GTM leadership, drive key programs, and align cross-functional stakeholders (Sales, Product, Engineering) behind partner outcomes.</p>\n<p>Partner Enablement &amp; Ecosystem: Embed your team with GSI and cloud partner technical teams to enable their AI practices, troubleshoot, and evangelize Anthropic in their developer communities. Represent Anthropic at partner events (GSI workshops, AWS/GCP summits, hackathons) and contribute technical content and thought leadership.</p>\n<p>Joint Solution Development: Lead partners in identifying high-value, industry-specific GenAI applications. Develop joint solutions and codify reference architectures and best practices to accelerate time to deployment.</p>\n<p>Customer Deal Support: Own the technical portion of partner-led pre-sales engagements. Intervene directly on strategic deals where partners are the primary delivery vehicle, providing deep solution architecture guidance.</p>\n<p>Product Feedback: Gather and validate feedback on Anthropic&#39;s products from partner deployments and deliver it to Product and Engineering to inform roadmap and partner strategy.</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_56bd1ff9-278","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Anthropic","sameAs":"https://www.anthropic.com/","logo":"https://logos.yubhub.co/anthropic.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/anthropic/jobs/5173031008","x-work-arrangement":"hybrid","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":"$315,000-$380,000 USD","x-skills-required":["Technical depth in enterprise AI deployments","LLM architecture","Prompt engineering","Evaluation","API integrations","Production use cases","Deep understanding of partner-led selling and delivery","Indirect revenue models","Enablement at scale","Joint GTM motions","Exceptional communication and executive presence","Ability to build trusted relationships with C-suite, partner leadership, and engineering teams alike"],"x-skills-preferred":["5+ years leading partner-facing SA teams through hypergrowth","Direct experience helping GSIs or consultancies build their AI/ML practice","Enablement programs","Certification paths","Joint solution development"],"datePosted":"2026-04-18T15:40:55.039Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"San Francisco, CA | New York City, NY | Seattle, WA"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"Technical depth in enterprise AI deployments, LLM architecture, Prompt engineering, Evaluation, API integrations, Production use cases, Deep understanding of partner-led selling and delivery, Indirect revenue models, Enablement at scale, Joint GTM motions, Exceptional communication and executive presence, Ability to build trusted relationships with C-suite, partner leadership, and engineering teams alike, 5+ years leading partner-facing SA teams through hypergrowth, Direct experience helping GSIs or consultancies build their AI/ML practice, Enablement programs, Certification paths, Joint solution development","baseSalary":{"@type":"MonetaryAmount","currency":"USD","value":{"@type":"QuantitativeValue","minValue":315000,"maxValue":380000,"unitText":"YEAR"}}},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_c0df6b64-aad"},"title":"Head of Solutions Architects, Applied AI (Korea)","description":"<p><strong>Job Title</strong></p>\n<p>Head of Solutions Architects, Applied AI (Korea)</p>\n<p><strong>About Anthropic</strong></p>\n<p>Anthropic&#39;s mission is to create reliable, interpretable, and steerable AI systems. The company is a quickly growing group of committed researchers, engineers, policy experts, and business leaders working together to build beneficial AI systems.</p>\n<p><strong>About the Role</strong></p>\n<p>As the founding leader of Applied AI Solutions Architecture in Korea, you will drive the adoption of frontier AI by enabling the deployment of Anthropic&#39;s products (Claude for Enterprise, Claude Code, and API) across Korean enterprises and digital-first organisations. You&#39;ll leverage your technical skills and consultative sales experience to drive positive AI transformation that addresses our customers&#39; business needs, meets their technical requirements, and provides a high degree of reliability and safety.</p>\n<p><strong>Responsibilities</strong></p>\n<ul>\n<li>Build and manage the foundational team of Applied AI professionals in Seoul (Solutions Architects and Product Engineers) providing both technical guidance and career development</li>\n<li>Set goals and reviews for your team, promoting growth and output</li>\n<li>Work with a handful of highest-value enterprise customers on their overall AI adoption strategies, focusing on pre-sales technical excellence including use case scoping, technical champion building, and POC execution</li>\n<li>Partner closely with your aligned GTM leadership to understand customer requirements &amp; co-build GTM strategies to drive adoption for Korean enterprise customers</li>\n<li>Contribute to thought leadership through conference presentations, webinars, and technical content creation</li>\n<li>Own the technical portions of pre-sales engagements, ensuring your team provides compelling demos and validates enterprise customer ROI from Anthropic products</li>\n<li>Drive collaboration from cross-functional teams to influence and unify stakeholders at all levels of the organisation to drive business outcomes</li>\n<li>Travel regularly to customer sites for executive-level sessions, technical workshops, and building relationships</li>\n<li>Establish a shared vision for creating solutions that enable beneficial and safe AI in technology products</li>\n<li>Lead the vision, strategy, and execution of innovative solutions that leverage our latest models&#39; capabilities</li>\n<li>Stay current with emerging AI/ML trends and competitive landscape in the Korean enterprise tech sector</li>\n</ul>\n<p><strong>Requirements</strong></p>\n<ul>\n<li>7+ years of experience as a Solutions Architect, Sales Engineer, or similar pre-sales technical role</li>\n<li>3+ years of technical go-to-market management experience, specifically managing pre-sales teams</li>\n<li>Native or business-level fluency in Korean and professional proficiency in English</li>\n<li>Experience working with Korean enterprise customers and understanding local business culture and decision-making processes</li>\n<li>Experience with the unique technical requirements and technical procurement process of enterprise tech companies</li>\n<li>Deep technical proficiency with enterprise AI deployments, API integrations, and production LLM use cases</li>\n<li>Have an organisational mindset and enjoy building foundational teams in a relatively unstructured environment</li>\n<li>Have excellent communication, collaboration, and coaching abilities</li>\n<li>Are comfortable dealing with highly uncertain, ambiguous, and fast-moving environments typical of the tech industry</li>\n<li>Strong executive presence and ability to foster deep relationships with technical leaders and engineering teams</li>\n<li>Have at least a high level familiarity with the architecture and operation of large language models and/or ML in general</li>\n<li>Experience with prompt engineering, LLM evaluation, and architecting AI-powered systems</li>\n<li>Make ambiguous problems clear and identify core principles that can translate across scenarios</li>\n<li>Have a passion for making powerful technology safe and societally beneficial</li>\n<li>Think creatively about the risks and benefits of new technologies, and think beyond past checklists and playbooks</li>\n<li>Stay up-to-date and informed by taking an active interest in emerging research and industry trends</li>\n<li>Understanding of developer tooling, SDKs, and technical integration patterns common in enterprise tech companies</li>\n</ul>\n<p><strong>Benefits</strong></p>\n<ul>\n<li>Competitive salary and benefits package</li>\n<li>Opportunity to work with a talented and diverse team</li>\n<li>Professional development and growth opportunities</li>\n<li>Flexible work arrangements</li>\n</ul>\n<p><strong>How to Apply</strong></p>\n<p>If you&#39;re interested in this opportunity, please submit your resume and a cover letter explaining why you&#39;re a great fit for this role. We can&#39;t wait to hear from you!</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_c0df6b64-aad","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Anthropic","sameAs":"https://www.anthropic.com/","logo":"https://logos.yubhub.co/anthropic.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/anthropic/jobs/5064817008","x-work-arrangement":"hybrid","x-experience-level":"executive","x-job-type":"full-time","x-salary-range":null,"x-skills-required":["Solutions Architect","Sales Engineer","Pre-sales Technical Role","Technical Go-to-Market Management","Enterprise AI Deployments","API Integrations","Production LLM Use Cases","Large Language Models","Machine Learning","Prompt Engineering","LLM Evaluation","AI-Powered Systems"],"x-skills-preferred":[],"datePosted":"2026-04-18T15:40:39.535Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Seoul, South Korea"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"Solutions Architect, Sales Engineer, Pre-sales Technical Role, Technical Go-to-Market Management, Enterprise AI Deployments, API Integrations, Production LLM Use Cases, Large Language Models, Machine Learning, Prompt Engineering, LLM Evaluation, AI-Powered Systems"},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_dbbfbfe1-a44"},"title":"Global Leader, Applied AI Architects, Beneficial Deployments","description":"<p>As the Global Leader of Applied AI Architects for Beneficial Deployments, you will lead a team of Applied AI Architects who serve as primary technical partners to mission-driven organisations and non-profits adopting Claude. You&#39;ll build and scale a world-class, globally distributed team that turns frontier AI into real impact in education, global health, economic mobility, and life sciences.</p>\n<p>Your responsibilities will include:</p>\n<ul>\n<li>Leading, growing, and mentoring a globally distributed team of Architects supporting mission-driven non-profits across education, global health, economic mobility, and life sciences</li>\n<li>Setting the vision, strategy, and operating model for how Applied AI shows up in Beneficial Deployments</li>\n<li>Establishing hiring plans, team structure, and career development paths as we scale the team globally</li>\n<li>Partnering closely with segment leads and senior partner leadership to understand requirements and shape engagements on our highest-impact partnerships</li>\n<li>Driving the design of cohort-based accelerators, Claude Code enablement programs, and other scalable mechanisms that multiply our impact across many organisations simultaneously</li>\n</ul>\n<p>You may be a good fit if you have:</p>\n<ul>\n<li>10+ years of experience in technical, customer-facing roles (Solutions Architect, Forward Deployed Engineer, Customer Engineer, Sales Engineer, or similar), with meaningful exposure to complex, high-stakes deployments</li>\n<li>7+ years of engineering or technical leadership experience, preferably building and scaling customer-facing or forward-deployed teams globally</li>\n<li>Experience working with or inside mission-driven organisations,education, healthcare, scientific research, global development, or non-profits,and a genuine understanding of the constraints, incentives, and operating realities of these sectors</li>\n<li>Familiarity with common LLM implementation patterns, including prompt engineering, evaluation frameworks, agent frameworks, and retrieval systems; working knowledge of Python</li>\n</ul>\n<p>Strong candidates may also have:</p>\n<ul>\n<li>Experience leading globally distributed teams across time zones and regions</li>\n<li>Background in philanthropy, global health, education technology, or scientific research</li>\n<li>Experience designing cohort-based or programmatic delivery models that scale technical expertise across many organisations</li>\n<li>A working understanding of emerging research in agents, evaluations, and AI safety</li>\n</ul>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_dbbfbfe1-a44","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Anthropic","sameAs":"https://www.anthropic.co/","logo":"https://logos.yubhub.co/anthropic.co.png"},"x-apply-url":"https://job-boards.greenhouse.io/anthropic/jobs/5192104008","x-work-arrangement":"hybrid","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":"$315,000-$380,000 USD","x-skills-required":["technical leadership","customer-facing roles","complex, high-stakes deployments","LLM implementation patterns","prompt engineering","evaluation frameworks","agent frameworks","retrieval systems","Python"],"x-skills-preferred":["globally distributed teams","philanthropy","global health","education technology","scientific research"],"datePosted":"2026-04-18T15:39:36.977Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"San Francisco, CA | New York City, NY"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"technical leadership, customer-facing roles, complex, high-stakes deployments, LLM implementation patterns, prompt engineering, evaluation frameworks, agent frameworks, retrieval systems, Python, globally distributed teams, philanthropy, global health, education technology, scientific research","baseSalary":{"@type":"MonetaryAmount","currency":"USD","value":{"@type":"QuantitativeValue","minValue":315000,"maxValue":380000,"unitText":"YEAR"}}},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_a9c991df-d04"},"title":"Manager of Solutions Architecture, Applied AI (Industries)","description":"<p>As the Manager of the Applied AI Architect team for Northern European Industries at Anthropic, you will drive the adoption of frontier AI by enabling the deployment of Anthropic&#39;s products (Claude for Enterprise, Claude Code, and API) across our Industries accounts: large enterprises across all non-tech verticals in the UK, Ireland and the Nordics.</p>\n<p>You&#39;ll leverage your technical skills and consultative sales experience to drive positive AI transformation that addresses our customers&#39; business needs, meets their technical requirements, and provides a high degree of reliability and safety.</p>\n<p>Responsibilities:</p>\n<ul>\n<li>Hire, manage and mentor a team of Applied AI Architects for Northern Europe, providing both technical guidance and career development</li>\n<li>Set goals and reviews for your team, promoting growth and output</li>\n<li>Work with a handful of highest-value enterprise customers on their overall AI adoption strategies, focusing on pre-sales technical excellence including use case scoping, architecture reviews, technical champion building, technical adoption workshops and POC execution</li>\n<li>Partner closely with your aligned GTM leadership to understand customer requirements and co-build GTM strategies to drive adoption across all industry verticals</li>\n<li>Own the technical portions of pre-sales engagements, ensuring your team provides compelling demos and validates customer ROI from Anthropic products</li>\n<li>Develop scalable technical engagement frameworks and reusable assets tailored for all non-tech sectors, including Financial Services &amp; Insurance, Healthcare, Telco, Retail and others.</li>\n</ul>\n<p>You may be a good fit if you have:</p>\n<ul>\n<li>7+ years of experience as a Solutions Architect, Sales Engineer, or similar pre-sales technical role</li>\n<li>3+ years of technical go-to-market management experience, specifically managing pre-sales teams</li>\n<li>Experience working with and selling to large enterprise customers across multiple verticals</li>\n<li>Demonstrated ability to navigate and articulate complex technical, organisational and regulatory requirements of large enterprises, in particular in regulated industries (e.g FSI, Healthcare &amp; Life Sciences)</li>\n<li>A proven knowledge of the Northern European market and its local specificities (regulatory, cultural, technical…)</li>\n<li>Experience framing and delivering enterprise AI use cases (productivity, workflow transformation) and/or scoping large enterprise adoption programs</li>\n<li>Deep technical proficiency with enterprise AI deployments, API integrations, and production LLM use cases</li>\n<li>Demonstrated ability to build scalable, repeatable processes and frameworks that work across diverse customer segments</li>\n<li>An organisational mindset and enjoy building foundational teams in a relatively unstructured environment</li>\n<li>Excellent communication, collaboration, and coaching abilities</li>\n<li>Comfort dealing with highly uncertain, ambiguous, and fast-moving environments</li>\n<li>Strong executive presence and ability to foster deep relationships with technical leaders and engineering teams</li>\n<li>High-level familiarity with the architecture and operation of LLM and/or ML in general</li>\n<li>Experience with prompt engineering, LLM evaluation, and architecting AI-powered systems</li>\n<li>Ability to make ambiguous problems clear and identify core principles that can translate across scenarios</li>\n<li>A passion for making powerful technology safe and societally beneficial</li>\n<li>Creative thinking about the risks and benefits of new technologies, beyond past checklists and playbooks</li>\n<li>A track record of staying current with emerging AI research and industry trends or similar.</li>\n</ul>\n<p>Strong candidates may also have:</p>\n<ul>\n<li>10+ years technical experience</li>\n<li>5+ years managing AI technical teams in hypergrowth (ideally from 10 to 50+)</li>\n<li>Previous experience leading solutions architect or pre-sales teams through rapid growth</li>\n<li>Proven experience and understanding across all key non-tech industry verticals, in particular the regulated industries: Financial Services, Insurance, Healthcare, Life Sciences, Telco, Retail, Consumer Goods, Manufacturing, Oil &amp; Gas, etc.</li>\n<li>In addition to English, native or fluency in a Northern European language</li>\n<li>Demonstrated experience in defining and delivering enterprise AI use cases in regulated industries: e.g, Banking, Insurance, Healthcare and Life sciences</li>\n<li>Track record of building technical playbooks and assets that scale across diverse customer segments</li>\n<li>Understanding of both digital-native technical requirements (API integration, developer experience) and traditional enterprise needs (security, compliance, change management)</li>\n<li>Background at enterprise AI software or API-first companies serving large enterprise customers</li>\n<li>Proven experience building a Northern European AI team</li>\n</ul>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_a9c991df-d04","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Anthropic","sameAs":"https://www.anthropic.co/","logo":"https://logos.yubhub.co/anthropic.co.png"},"x-apply-url":"https://job-boards.greenhouse.io/anthropic/jobs/5115884008","x-work-arrangement":"hybrid","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":"Annual Salary: £170,000-£215,000 GBP","x-skills-required":["Solutions Architecture","Sales Engineering","Pre-sales Technical Role","Technical Go-to-Market Management","Enterprise AI Deployments","API Integrations","Production LLM Use Cases","Scalable Technical Engagement Frameworks","Reusable Assets","Financial Services & Insurance","Healthcare","Telco","Retail","Digital-Native Technical Requirements","API Integration","Developer Experience","Traditional Enterprise Needs","Security","Compliance","Change Management"],"x-skills-preferred":["Prompt Engineering","LLM Evaluation","Architecting AI-Powered Systems","Ambiguous Problem Solving","Core Principles Identification","Passion for Making Powerful Technology Safe and Societally Beneficial","Creative Thinking","Staying Current with Emerging AI Research and Industry Trends"],"datePosted":"2026-04-18T15:38:48.226Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"London, UK"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"Solutions Architecture, Sales Engineering, Pre-sales Technical Role, Technical Go-to-Market Management, Enterprise AI Deployments, API Integrations, Production LLM Use Cases, Scalable Technical Engagement Frameworks, Reusable Assets, Financial Services & Insurance, Healthcare, Telco, Retail, Digital-Native Technical Requirements, API Integration, Developer Experience, Traditional Enterprise Needs, Security, Compliance, Change Management, Prompt Engineering, LLM Evaluation, Architecting AI-Powered Systems, Ambiguous Problem Solving, Core Principles Identification, Passion for Making Powerful Technology Safe and Societally Beneficial, Creative Thinking, Staying Current with Emerging AI Research and Industry Trends","baseSalary":{"@type":"MonetaryAmount","currency":"GBP","value":{"@type":"QuantitativeValue","minValue":170000,"maxValue":215000,"unitText":"YEAR"}}},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_65321c61-7dd"},"title":"Account Executive, Beneficial Deployments (Portuguese Speaking)","description":"<p>As an EMEA Nonprofit Account Executive at Anthropic, you&#39;ll drive adoption of safe, frontier AI by securing strategic partnerships with nonprofit organisations across Europe, the Middle East, and Africa.\\n\\nYou&#39;ll leverage your consultative sales expertise to propel revenue growth while becoming a trusted partner to nonprofit leaders, helping them embed and deploy AI to amplify their impact across programme delivery, fundraising, research, and operations.\\n\\nThe ideal candidate will be an exceptional salesperson with experience selling into EMEA markets , and specifically into Portuguese-speaking contexts , a passion for developing new market segments, and the ability to operate autonomously while partnering closely with SF-based teams.\\n\\nBy driving deployment of Anthropic&#39;s emerging products in the EMEA nonprofit sector, you will help organisations amplify their social impact while advancing the ethical development of AI.\\n\\nResponsibilities:\\n\\n- Win new business and drive revenue for Anthropic within EMEA nonprofit organisations, including INGOs, foundations, charitable trusts, and social enterprises. Own the full sales cycle from first outbound to launch, managing complex procurement processes across multiple jurisdictions\\n\\n- Design and execute innovative sales strategies tailored to EMEA market dynamics, regulatory environments, and cultural contexts. Analyse market landscapes across UK, EU, and emerging markets to translate high-level plans into targeted sales activities\\n\\n- Navigate complex stakeholder ecosystems including executive directors, trustees, programme officers, IT departments, and procurement committees across multiple geographies, building consensus in organisations with federated or matrix structures\\n\\n- Serve as the regional expert on EMEA nonprofit market dynamics, regulatory requirements, and competitive landscape. Provide insights that strengthen our value proposition and inform product roadmaps for international deployments\\n\\n- Build strategic relationships with EMEA nonprofit technology platforms, consultants, sector networks (e.g., Bond, NCVO, European Foundation Centre), and sector influencers to expand market reach\\n\\n- Partner effectively with SF-based teams across time zones, contributing to global sales methodology development while adapting playbooks and best practices for EMEA markets\\n\\nYou May Be a Good Fit If You Have:\\n\\n- 5+ years of experience prospecting and closing leads in EMEA markets, with particular focus on Portuguese-speaking markets (Portugal, Lusophone Africa , Angola, Mozambique, Cape Verde) and broader European market contexts\\n\\n- Proven ability to manage complex, multi-country sales cycles and navigate varying procurement frameworks, budget cycles, and approval processes across EMEA\\n\\n- Experience managing six-figure enterprise deal cycles\\n\\n- Experience selling to organisations with federated structures, matrix decision-making, or multi-entity governance (e.g., international federations, umbrella organisations)\\n\\n- Demonstrated history of exceeding quota while operating autonomously across time zones with limited direct supervision\\n\\n- Excellent communication skills with ability to adapt style across cultural contexts and present confidently to stakeholders from diverse backgrounds\\n\\n- Fluency in English required; native or professional fluency in Portuguese (European or Brazilian) required. Proficiency in additional languages (Spanish, French) a plus.\\n\\n- Passion for emerging technologies like AI, with interest in ensuring they are developed safely and responsibly\\n\\n- Interest in or passion for social impact and mission-driven work\\n\\nStrong Candidates May Also Have:\\n\\n- Experience selling to or working with EMEA nonprofit organisations, INGOs, foundations, or government/bilateral agencies (e.g., FCDO, GIZ, EU institutions)\\n\\n- Understanding of international development funding mechanisms, including institutional donors, bilateral agencies, and European foundation giving\\n\\n- Familiarity with nonprofit technology ecosystems popular in EMEA, including CRMs (Salesforce NPSP, Blackbaud, CiviCRM), and platforms like Raiser&#39;s Edge\\n\\n- Active involvement in the EMEA nonprofit community through board service, volunteering, or prior employment\\n\\n- Experience navigating complex procurement with major INGOs (e.g., Save the Children, Oxfam, MSF, IRC) or large UK charities\\n\\n- Understanding of specific nonprofit verticals in EMEA contexts (humanitarian, development, environment, health, migration)\\n\\n- Existing network within Lusophone nonprofit, INGO, or social sector communities strongly preferred\\n\\nLogistics:\\n\\nLocation: London preferred. Remote within UK/EU considered for exceptional candidates.\\n\\nTravel: Up to 40% travel within EMEA for customer meetings and events; quarterly travel to SF headquarters expected.\\n\\nTime Zone Coverage: Must be able to maintain regular overlap with SF-based teams (typically 4–5 hours daily).\\n\\nEducation: Bachelor&#39;s degree or equivalent experience.\\n\\nVisa Sponsorship: We sponsor visas where possible and retain immigration support for successful candidates.\\n\\nThe annual compensation range for this role is listed below.\\n\\nFor sales roles, the range provided is the role’s On Target Earnings (&quot;OTE&quot;) range, meaning that the range includes both the sales commissions/sales bonuses target and annual base salary for the role.\\n\\nAnnual Salary:€205.000-€250.000 EUR</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_65321c61-7dd","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Anthropic","sameAs":"https://www.anthropic.com/","logo":"https://logos.yubhub.co/anthropic.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/anthropic/jobs/5165651008","x-work-arrangement":"hybrid","x-experience-level":"mid","x-job-type":"full-time","x-salary-range":"€205.000-€250.000 EUR","x-skills-required":["sales","EMEA","nonprofit","AI","strategic partnerships","consultative sales expertise","revenue growth","sales cycle","procurement processes","market dynamics","regulatory environments","cultural contexts","stakeholder ecosystems","executive directors","trustees","programme officers","IT departments","procurement committees","matrix structures","regional expert","regulatory requirements","competitive landscape","value proposition","product roadmaps","international deployments","strategic relationships","technology platforms","consultants","sector networks","sector influencers","global sales methodology","playbooks","best practices","time zones"],"x-skills-preferred":[],"datePosted":"2026-04-18T15:37:06.803Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Dublin, IE"}},"employmentType":"FULL_TIME","occupationalCategory":"Sales","industry":"Technology","skills":"sales, EMEA, nonprofit, AI, strategic partnerships, consultative sales expertise, revenue growth, sales cycle, procurement processes, market dynamics, regulatory environments, cultural contexts, stakeholder ecosystems, executive directors, trustees, programme officers, IT departments, procurement committees, matrix structures, regional expert, regulatory requirements, competitive landscape, value proposition, product roadmaps, international deployments, strategic relationships, technology platforms, consultants, sector networks, sector influencers, global sales methodology, playbooks, best practices, time zones"},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_e53014e6-57c"},"title":"Data Center Engineer, Resource Efficiency – Compute Supply","description":"<p>As a Power &amp; Resource Efficiency Engineer, you&#39;ll sit at the intersection of IT and facilities , building the systems, models, and control loops that optimize how we allocate and consume power, cooling, and physical capacity across our TPU/GPU fleet.</p>\n<p>You&#39;ll own the technical strategy for turning raw data center capacity into reliable, efficient compute, working across power topology, workload scheduling, and real-time telemetry to push utilization as close to the physical envelope as possible while maintaining our availability commitments.</p>\n<p>Key responsibilities include:</p>\n<ul>\n<li>Building models that forecast consumption across electrical and mechanical subsystems, informing capacity planning, energy procurement, oversubscription targets and risks, including statistical modeling of cluster utilization, workload profiles, and failure modes.</li>\n</ul>\n<ul>\n<li>Designing IT/OT interfaces that bridge compute orchestration with facility controls, enabling real-time telemetry across accelerator hardware, power distribution, cooling, and schedulers.</li>\n</ul>\n<ul>\n<li>Building and operating load management systems that use power and cooling topology to enable load management and power/thermal-aware placement to maximize throughput while meeting SLOs.</li>\n</ul>\n<ul>\n<li>Partnering with data center providers to drive design optimizations and hold them accountable to SLA-grade performance standards, providing technical diligence on partner architectures.</li>\n</ul>\n<p>In this role, you&#39;ll need to have deep knowledge of data center power distribution and cooling architectures, and how they interact with IT load profiles. Experience with reliability engineering, SLA development, and failure-mode analysis is also essential.</p>\n<p>Additionally, proficiency in statistical modeling and simulation for infrastructure capacity or power utilization, familiarity with SCADA/BMS/EPMS, telemetry pipelines, and control systems, and exposure to accelerator deployments and their power management interfaces are highly desirable.</p>\n<p>This is a challenging and rewarding role that requires a unique blend of technical expertise, business acumen, and collaboration skills. If you&#39;re passionate about data center infrastructure, AI, and sustainability, we encourage you to apply.</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_e53014e6-57c","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Anthropic","sameAs":"https://www.anthropic.com/","logo":"https://logos.yubhub.co/anthropic.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/anthropic/jobs/5159642008","x-work-arrangement":"hybrid","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":"$320,000-$405,000 USD","x-skills-required":["data center power distribution and cooling architectures","_SYSTEMS","reliability engineering","SLA development","failure-mode analysis","statistical modeling and simulation","SCADA/BMS/EPMS","telemetry pipelines","control systems","accelerator deployments","power management interfaces"],"x-skills-preferred":["Python","similar languages","control theory","dynamical systems","cyber-physical systems design","energy storage","microgrid integration","demand response","behind-the-meter generation"],"datePosted":"2026-04-18T15:37:06.319Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Remote-Friendly, United States"}},"jobLocationType":"TELECOMMUTE","employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"data center power distribution and cooling architectures, _SYSTEMS, reliability engineering, SLA development, failure-mode analysis, statistical modeling and simulation, SCADA/BMS/EPMS, telemetry pipelines, control systems, accelerator deployments, power management interfaces, Python, similar languages, control theory, dynamical systems, cyber-physical systems design, energy storage, microgrid integration, demand response, behind-the-meter generation","baseSalary":{"@type":"MonetaryAmount","currency":"USD","value":{"@type":"QuantitativeValue","minValue":320000,"maxValue":405000,"unitText":"YEAR"}}},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_24d8202d-7fb"},"title":"Mission Manager – International Government (South America)","description":"<p>As a Mission Manager focused on International Government, you will collaborate directly with government agencies, public sector organisations, and partners in South America to strategize, scope, and execute mission-critical AI solutions using xAI&#39;s frontier models, APIs, tools, and integrations.</p>\n<p>You will serve as the primary bridge between international government stakeholders and xAI&#39;s engineering, research, and product teams,driving adoption of AI for civilian and public-sector use cases such as policy analysis, public health modelling, education, scientific research support, regulatory workflows, disaster response, and citizen services.</p>\n<p>Responsibilities:</p>\n<ul>\n<li>Lead problem discovery, requirements gathering, and product scoping with key South American government customers and organisations, often on-site during international assignments and travel.</li>\n<li>Support the full lifecycle of engagements: from initial outreach and proposal development through solution design, prototyping, deployment, integration, and ongoing optimisation.</li>\n<li>Meet customers on-site to deploy xAI models/products in their environments, integrate with existing systems, or enable cloud-based access,working closely alongside applied engineers and technical teams.</li>\n<li>Identify customer pain points, translate mission needs into technical specifications, and collaborate with xAI teams to customise or build LLM-powered solutions tailored to regional priorities.</li>\n<li>Ensure solutions comply with relevant international and regional regulations (e.g., data protection, sovereignty, and ethical AI guidelines).</li>\n<li>Analyse system performance (logs, prompts, outputs) to drive reliability, effectiveness, and continuous improvement.</li>\n<li>Build and maintain strong relationships with government stakeholders, multilateral organisations, and partners across South America.</li>\n<li>Contribute to new business development, including preparing proposals, demos, and strategic roadmaps aligned with xAI&#39;s capabilities</li>\n</ul>\n<p>Basic Qualifications:</p>\n<ul>\n<li>Proven experience partnering with international government agencies, regional/local administrations, multilateral organisations, or public-sector entities on AI, software, data, or technology initiatives (experience in South America or Latin American markets strongly preferred).</li>\n<li>5+ years in government relations, public sector consulting, solution engineering, product management, or customer-facing technical roles,ideally involving complex deployments or enterprise-scale AI/software projects.</li>\n<li>Strong understanding of AI applications in public sector contexts (e.g., policy, healthcare, education, environment, or emergency management).</li>\n<li>Excellent communication and presentation skills; ability to translate technical concepts for non-technical government audiences and vice versa.</li>\n<li>Willingness and ability to travel internationally (primarily South America, up to 30-50% as needed).</li>\n<li>Bachelor&#39;s degree or equivalent experience in a relevant field (e.g., international relations, computer science, public policy, engineering)</li>\n</ul>\n<p>Preferred Skills and Experience:</p>\n<ul>\n<li>Direct experience working with South American governments or organisations (e.g., Argentina, Chile, Colombia, Peru, Honduras or multilateral bodies like OAS/IDB).</li>\n<li>Familiarity with regional data protection regulations, sovereignty requirements, or ethical AI frameworks in Latin America.</li>\n<li>Technical background (e.g., experience with LLMs, APIs, cloud deployments, or data analysis) or proven ability to work closely with engineering teams.</li>\n<li>Fluency in Spanish and/or Portuguese (in addition to English) is highly desirable.</li>\n<li>Track record of delivering high-impact projects under tight timelines in dynamic or resource-constrained environments</li>\n</ul>\n<p>Compensation and Benefits: $180,000 - $440,000 USD Base salary is just one part of our total rewards package at xAI, which also includes equity, comprehensive medical, vision, and dental coverage, access to a 401(k) retirement plan, short &amp; long-term disability insurance, life insurance, and various other discounts and perks</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_24d8202d-7fb","directApply":true,"hiringOrganization":{"@type":"Organization","name":"xAI","sameAs":"https://www.xai.com","logo":"https://logos.yubhub.co/xai.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/xai/jobs/5084214007","x-work-arrangement":"onsite","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":"$180,000 - $440,000 USD","x-skills-required":["Government relations","Public sector consulting","Solution engineering","Product management","Customer-facing technical roles","Complex deployments","Enterprise-scale AI/software projects","AI applications in public sector contexts","Policy","Healthcare","Education","Environment","Emergency management","Excellent communication and presentation skills","Technical concepts for non-technical government audiences","International travel"],"x-skills-preferred":["Direct experience working with South American governments or organisations","Familiarity with regional data protection regulations, sovereignty requirements, or ethical AI frameworks in Latin America","Technical background (e.g., experience with LLMs, APIs, cloud deployments, or data analysis)","Fluency in Spanish and/or Portuguese (in addition to English)","Track record of delivering high-impact projects under tight timelines in dynamic or resource-constrained environments"],"datePosted":"2026-04-18T15:24:25.104Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Palo Alto, CA"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"Government relations, Public sector consulting, Solution engineering, Product management, Customer-facing technical roles, Complex deployments, Enterprise-scale AI/software projects, AI applications in public sector contexts, Policy, Healthcare, Education, Environment, Emergency management, Excellent communication and presentation skills, Technical concepts for non-technical government audiences, International travel, Direct experience working with South American governments or organisations, Familiarity with regional data protection regulations, sovereignty requirements, or ethical AI frameworks in Latin America, Technical background (e.g., experience with LLMs, APIs, cloud deployments, or data analysis), Fluency in Spanish and/or Portuguese (in addition to English), Track record of delivering high-impact projects under tight timelines in dynamic or resource-constrained environments","baseSalary":{"@type":"MonetaryAmount","currency":"USD","value":{"@type":"QuantitativeValue","minValue":180000,"maxValue":440000,"unitText":"YEAR"}}},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_d5b743bb-d8f"},"title":"Product Manager, AI Platforms","description":"<p>The AI Platform Product Manager will drive the strategy and execution of Shield AI&#39;s next-generation autonomy intelligence stack. This PM owns the product vision and roadmap for the Hivemind AI Platform, ensuring we can manufacture, govern, and field advanced world models, robotics foundation models, and vision-language-action systems safely and at scale.</p>\n<p>This role sits at the intersection of AI/ML, autonomy, model lifecycle, infrastructure, and product strategy. The PM partners closely with engineering, AI research, Hivemind Solutions, and field teams to deliver the tooling that enables sovereign autonomy, AI Factories at the edge, and continuous learning,capabilities that are central to Shield AI&#39;s strategic direction.</p>\n<p>This is a high-impact role for an experienced product leader excited to define how foundation models are trained, validated, governed, and deployed across thousands of autonomous systems in highly contested environments.</p>\n<p><strong>Responsibilities:</strong></p>\n<ul>\n<li>AI Model Development &amp; Training Platform</li>\n</ul>\n<p>Own the roadmap for foundation model training workflows, including dataset ingestion, curation, labeling, synthetic data generation, domain model training, and distillation pipelines. Define requirements for world models, robotics models, and VLA-based training, evaluation, and specialization. Lead the evolution of MLOps capabilities in Forge, including data lineage, experiment tracking, model versioning, and scalable evaluation suites.</p>\n<ul>\n<li>Data, Simulation &amp; Synthetic Data Factory</li>\n</ul>\n<p>Define product requirements for synthetic data generation, simulation-integrated data flywheels, and automated scenario generation. Partner with Digital Twin, Simulation, and autonomy teams to convert natural-language mission inputs into data needs, training procedures, and model variants.</p>\n<ul>\n<li>Safe Deployment &amp; Model Governance</li>\n</ul>\n<p>Lead the development of model governance and auditability tooling, including model cards, dataset rights, lineage tracking, safety gates, and compliance evidence. Build guardrails and workflows to safely deploy models onto edge hardware in disconnected, GPS- or comms-denied environments. Partner with Safety, Certification, Cyber, and Engineering teams to ensure traceability and evaluation pipelines meet operational and accreditation requirements.</p>\n<ul>\n<li>Edge Deployment &amp; AI Factory Integration</li>\n</ul>\n<p>Partner with Pilot, EdgeOS, and hardware teams to integrate foundation-model-based perception and reasoning into autonomy behaviors. Define requirements for distillation, quantization, and inference tooling as part of the “three-computer” development and deployment model. Ensure closed-loop workflows between cloud model training and edge-native execution.</p>\n<ul>\n<li>Cross-Functional Leadership</li>\n</ul>\n<p>Collaborate with Engineering, Research, Product, Customer Engagement, and Solutions teams to ensure model outputs meet mission and platform constraints. Translate advanced AI capabilities into intuitive workflows that platform OEMs and partner nations can use to build sovereign AI factories. Sequence foundational capabilities that unblock autonomy, simulation, and customer-facing product teams.</p>\n<ul>\n<li>User &amp; Customer Impact</li>\n</ul>\n<p>Develop deep empathy for ML engineers, autonomy developers, and Solutions engineers who rely on the platform. Capture operational data gaps, mission-driven model needs, and domain-specific specialization requirements. Lead demos and onboarding for model-development capabilities across internal and external teams.</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_d5b743bb-d8f","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Shield AI","sameAs":"https://www.shield.ai","logo":"https://logos.yubhub.co/shield.ai.png"},"x-apply-url":"https://jobs.lever.co/shieldai/7886f437-2d5e-4616-8dcb-3dc488f1f585","x-work-arrangement":"onsite","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":"$190,000 - $290,000 a year","x-skills-required":["AI Model Development & Training Platform","Data, Simulation & Synthetic Data Factory","Safe Deployment & Model Governance","Edge Deployment & AI Factory Integration","Cross-Functional Leadership","User & Customer Impact","Strong engineering background","Deep understanding of foundation models, robotics models, multimodal models, MLOps, and training infrastructure","Experience managing complex products spanning data pipelines, cloud training clusters, model governance, and edge deployments","Proven success partnering with research teams to transition ML innovations into stable, production-grade workflows"],"x-skills-preferred":["Experience working on autonomy, robotics, embedded AI, or mission-critical systems","Hands-on familiarity with GPU infrastructure, distributed training, or data lakehouse architectures","Experience supporting defense, dual-use, or safety-critical AI systems","Background designing or operating AI Factory–style pipelines (data → training → evaluation → distillation → edge deployment)","Advanced degree in engineering, ML/AI, robotics, or a related field"],"datePosted":"2026-04-17T13:02:54.419Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"San Diego"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"AI Model Development & Training Platform, Data, Simulation & Synthetic Data Factory, Safe Deployment & Model Governance, Edge Deployment & AI Factory Integration, Cross-Functional Leadership, User & Customer Impact, Strong engineering background, Deep understanding of foundation models, robotics models, multimodal models, MLOps, and training infrastructure, Experience managing complex products spanning data pipelines, cloud training clusters, model governance, and edge deployments, Proven success partnering with research teams to transition ML innovations into stable, production-grade workflows, Experience working on autonomy, robotics, embedded AI, or mission-critical systems, Hands-on familiarity with GPU infrastructure, distributed training, or data lakehouse architectures, Experience supporting defense, dual-use, or safety-critical AI systems, Background designing or operating AI Factory–style pipelines (data → training → evaluation → distillation → edge deployment), Advanced degree in engineering, ML/AI, robotics, or a related field","baseSalary":{"@type":"MonetaryAmount","currency":"USD","value":{"@type":"QuantitativeValue","minValue":190000,"maxValue":290000,"unitText":"YEAR"}}},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_b35f1f86-3ad"},"title":"Senior V-BAT Air Vehicle Operator, Deployed Operations Site Lead","description":"<p>Execute and lead V-BAT flight operations in support of DoD and foreign customers.</p>\n<p>This is an operationally focused role requiring strong aviation judgment, disciplined execution, and the ability to lead small teams in austere and high-visibility conditions.</p>\n<p>Responsibilities:</p>\n<ul>\n<li><p>Leading mission planning activities, performing comprehensive risk assessments, delivering pre-mission briefings, executing flight operations, and conducting structured post-flight debriefs.</p>\n</li>\n<li><p>Conducting Functional Check Flights (FCFs) and system validation as required, ensuring aircraft configuration control and logbook accuracy, coordinating with maintenance to sustain fleet readiness, and supporting overall operational discipline.</p>\n</li>\n<li><p>Mentoring junior operators, serving as the on-site operational representative to customers, and traveling extensively in support of deployments.</p>\n</li>\n</ul>\n<p>Requirements:</p>\n<ul>\n<li><p>At least 1,500 documented flight hours</p>\n</li>\n<li><p>Private Pilot License (PPL)</p>\n</li>\n<li><p>Instrument Rating and/or Commercial Pilot Rating</p>\n</li>\n<li><p>FAA Part 107 Remote Pilot Certificate</p>\n</li>\n<li><p>Ability to obtain and maintain an FAA Class II Medical Certificate</p>\n</li>\n<li><p>Ability to obtain and hold a Secret DoD security clearance</p>\n</li>\n<li><p>Ability to pass background, drug screening, and customer-required medical/dental screenings</p>\n</li>\n<li><p>Willingness to travel up to 50–75% in support of deployments</p>\n</li>\n<li><p>Ability to lead and operate independently in small teams under austere conditions</p>\n</li>\n<li><p>Strong aviation decision-making and risk management skills</p>\n</li>\n<li><p>Previous experience serving as a Site Lead responsible for leading and overseeing aviation operations</p>\n</li>\n</ul>\n<p>Preferred Qualifications:</p>\n<ul>\n<li><p>Experience operating as crew on manned aircraft</p>\n</li>\n<li><p>Prior military service, particularly with overseas deployments</p>\n</li>\n<li><p>Maritime / shipboard UAS experience</p>\n</li>\n<li><p>Instructor experience (manned or unmanned)</p>\n</li>\n</ul>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_b35f1f86-3ad","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Shield AI","sameAs":"https://www.shield.ai","logo":"https://logos.yubhub.co/shield.ai.png"},"x-apply-url":"https://jobs.lever.co/shieldai/52bfd55b-d539-4af1-b8bf-dbd2ccf12161","x-work-arrangement":"onsite","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":"$98,000 - $155,000 a year","x-skills-required":["Private Pilot License (PPL)","Instrument Rating and/or Commercial Pilot Rating","FAA Part 107 Remote Pilot Certificate","Ability to obtain and maintain an FAA Class II Medical Certificate","Ability to obtain and hold a Secret DoD security clearance","Ability to pass background, drug screening, and customer-required medical/dental screenings"],"x-skills-preferred":["Experience operating as crew on manned aircraft","Prior military service, particularly with overseas deployments","Maritime / shipboard UAS experience","Instructor experience (manned or unmanned)"],"datePosted":"2026-04-17T13:00:05.779Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Dallas"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"Private Pilot License (PPL), Instrument Rating and/or Commercial Pilot Rating, FAA Part 107 Remote Pilot Certificate, Ability to obtain and maintain an FAA Class II Medical Certificate, Ability to obtain and hold a Secret DoD security clearance, Ability to pass background, drug screening, and customer-required medical/dental screenings, Experience operating as crew on manned aircraft, Prior military service, particularly with overseas deployments, Maritime / shipboard UAS experience, Instructor experience (manned or unmanned)","baseSalary":{"@type":"MonetaryAmount","currency":"USD","value":{"@type":"QuantitativeValue","minValue":98000,"maxValue":155000,"unitText":"YEAR"}}},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_6f25b435-69f"},"title":"Technical Support Engineer – On-Premise","description":"<p>We are seeking a Technical Support Engineer - On-Premise Infrastructure to join our Support team in France. This role is ideal for someone who excels at technical troubleshooting, incident investigation, and customer communication in a B2B environment.</p>\n<p>As a key member of the support team, you will be responsible for handling escalated technical issues from on-premise enterprise clients, reproducing complex problems, and collaborating with engineering, data, and product teams to ensure swift resolution. You will report directly to the Head of Support, and play a critical role in maintaining customer satisfaction and improving our support operations.</p>\n<p>Key Responsibilities:</p>\n<ul>\n<li>Frontline Investigation: Handle escalated tickets from enterprise clients via Intercom, focusing on on-premise infrastructure and AI-related issues (e.g., deployment, performance, integration, security).</li>\n<li>Root Cause Analysis: Ask the right questions to gather context, reproduce issues in test environments, and diagnose technical problems (systems, networks, storage, GPU clusters, AI models).</li>\n<li>Cross-Team Collaboration: Work closely with engineering, and deployment teams to escalate, track, and resolve incidents efficiently.</li>\n<li>Proactive Communication: Provide clear, empathetic, and timely updates to clients and internal stakeholders, ensuring transparency throughout the resolution process.</li>\n</ul>\n<p>Knowledge Sharing &amp; Process Improvement:</p>\n<ul>\n<li>Documentation: Create and update technical FAQs, troubleshooting guides, and internal knowledge base articles to empower self-serve/L1 team and reduce recurrence of issues.</li>\n<li>Feedback Loop: Identify recurring pain points in on-premise deployments and suggest improvements to product, documentation, or support workflows.</li>\n</ul>\n<p>Customer-Centric Approach:</p>\n<ul>\n<li>Empathy &amp; Ownership: Maintain a customer-first mindset, ensuring clients feel heard and supported, even in high-pressure situations.</li>\n<li>Solution-Oriented: Proactively propose workarounds, fixes, or process optimizations to enhance the customer experience and reduce incident resolution time.</li>\n</ul>\n<p>Technical Expertise:</p>\n<ul>\n<li>On-Premise &amp; Cloud Environments: Deep understanding of Linux/Windows servers, networking, virtualization, storage, security (firewalls, RGPD compliance), and cloud providers (AWS, GCP, Azure).</li>\n<li>Kubernetes/Helm: Experience with deployment, scaling, and troubleshooting of applications in Kubernetes clusters using Helm charts.</li>\n<li>Terraform: Familiarity with Infrastructure as Code (IaC) for managing cloud resources is a strong plus.</li>\n<li>AI Infrastructure: Knowledge of AI/ML pipelines, LLM/RAG deployments, GPU acceleration, and data storage solutions for enterprise clients.</li>\n<li>Tooling: Proficiency in Intercom, monitoring tools, scripting (Bash/Python), and diagnostic utilities (logs, performance metrics).</li>\n</ul>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_6f25b435-69f","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Mistral AI","sameAs":"https://mistral.ai/careers","logo":"https://logos.yubhub.co/mistral.ai.png"},"x-apply-url":"https://jobs.lever.co/mistral/f00a13aa-61f1-4c56-993c-20846adc2b15","x-work-arrangement":"onsite","x-experience-level":"mid","x-job-type":"full-time","x-salary-range":null,"x-skills-required":["Linux/Windows servers","Networking","Virtualization","Storage","Security","Kubernetes/Helm","Terraform","AI/ML pipelines","LLM/RAG deployments","GPU acceleration","Data storage solutions","Intercom","Monitoring tools","Scripting (Bash/Python)","Diagnostic utilities"],"x-skills-preferred":[],"datePosted":"2026-04-17T12:47:50.345Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Paris"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"Linux/Windows servers, Networking, Virtualization, Storage, Security, Kubernetes/Helm, Terraform, AI/ML pipelines, LLM/RAG deployments, GPU acceleration, Data storage solutions, Intercom, Monitoring tools, Scripting (Bash/Python), Diagnostic utilities"},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_1d67d51e-39e"},"title":"CyberSecurity, Offensive Security Engineer","description":"<p>At Mistral AI, we&#39;re pushing the boundaries of what&#39;s possible with agentic systems,building products like Mistral Studio and Mistral Vibe that redefine how users interact with AI.</p>\n<p>As a Security Researcher, you&#39;ll play a pivotal role in safeguarding these innovations by anticipating, identifying, and mitigating risks before they materialize. This isn&#39;t just about finding vulnerabilities; it&#39;s about shaping the future of secure AI by embedding an attacker&#39;s mindset into everything we build.</p>\n<p>You&#39;ll work at the intersection of offensive security, AI safety, and product development, collaborating with cross-functional teams to harden our systems against evolving threats. Your expertise will directly influence how we design, deploy, and protect our most critical assets , ensuring our agents remain resilient, trustworthy, and ahead of adversaries.</p>\n<p>Key responsibilities include:</p>\n<ul>\n<li><p>Proactively hunting for vulnerabilities in the interactions between our agentic applications, cloud infrastructure, and foundational models, with a focus on realistic, high-impact attack vectors.</p>\n</li>\n<li><p>Designing and executing red and purple team exercises, simulating sophisticated adversarial scenarios to stress-test our defenses and refine our detection capabilities.</p>\n</li>\n<li><p>Partnering with defensive teams to translate offensive insights into actionable improvements, from detection engineering to incident response.</p>\n</li>\n<li><p>Conducting in-depth penetration testing across our product suite, including AI-driven workflows, custom infrastructure, and user-facing interfaces.</p>\n</li>\n<li><p>Building and automating offensive tooling to scale your impact, leveraging cutting-edge techniques to stay ahead of emerging threats.</p>\n</li>\n<li><p>Communicating findings with clarity and conviction, ensuring technical and non-technical stakeholders understand risks and prioritize mitigations effectively.</p>\n</li>\n<li><p>Shaping Mistral AI&#39;s security strategy by contributing attacker-informed perspectives to threat modeling, risk assessment, and architectural decisions.</p>\n</li>\n</ul>\n<p>We&#39;re looking for someone with 7+ years of offensive security experience, deep knowledge of AI/ML security risks, and hands-on experience assessing modern technology stacks. A builder&#39;s mindset, strong intuition for trust boundaries, and outstanding communication skills are also essential.</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_1d67d51e-39e","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Mistral AI","sameAs":"https://mistral.ai","logo":"https://logos.yubhub.co/mistral.ai.png"},"x-apply-url":"https://jobs.lever.co/mistral/2414ad08-5756-4875-afb5-04d26464b397","x-work-arrangement":"onsite","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":null,"x-skills-required":["offensive security","AI/ML security risks","custom Kubernetes deployments","cloud-native architectures","CI/CD pipelines","GitHub security best practices","macOS/Linux internals","Python/React-based applications","data science toolchains","AI/ML infrastructure"],"x-skills-preferred":["background in AI, data science, or related fields","experience in high-growth startups or research-driven organizations","expertise in adjacent disciplines"],"datePosted":"2026-04-17T12:47:11.116Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Paris"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"offensive security, AI/ML security risks, custom Kubernetes deployments, cloud-native architectures, CI/CD pipelines, GitHub security best practices, macOS/Linux internals, Python/React-based applications, data science toolchains, AI/ML infrastructure, background in AI, data science, or related fields, experience in high-growth startups or research-driven organizations, expertise in adjacent disciplines"},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_2e8a2997-260"},"title":"Senior Infrastructure Engineer","description":"<p>We are open to hiring at multiple levels for this role, depending on experience, impact, and demonstrated ownership. While this role is level-agnostic, it is best suited for engineers with experience owning and working in highly ambiguous problem spaces.</p>\n<p>About the company:\nThe mining industry has steadily become worse at finding new ore deposits, requiring &gt;10X more capital to make discoveries compared to 30 years ago. KoBold Metals builds AI models for mineral exploration and deploys those models,alongside our novel sensors,to guide decisions on KoBold-owned-and-operated exploration programs.</p>\n<p>About The Role:\nIn this role, you will partner with exploration and engineering teams to build reliable, scalable infrastructure that makes it easier to turn data and models into real-world exploration insights. You will improve observability, streamline MLOps workflows, and maintain shared tools like JupyterHub that enable faster experimentation and collaboration. Your work will help create a solid foundation for scientists and engineers to focus on discovery instead of infrastructure.</p>\n<p>Responsibilities</p>\n<ul>\n<li>Design, build, and operate compute infrastructure that is both scalable and reliable to support critical services.</li>\n<li>Work closely with engineering teams to embed observability, reliability, and security throughout the software development process.</li>\n<li>Create and maintain automation for monitoring, deployments, and incident response to keep operations efficient and predictable.</li>\n<li>Lead or support capacity planning, performance reviews, and system tuning to ensure stable and efficient systems.</li>\n<li>Join the on-call rotation and take part in incident response, troubleshooting, and resolution.</li>\n<li>Develop and refine monitoring and alerting to catch issues early and reduce downtime.</li>\n<li>Establish and maintain disaster recovery and business continuity practices that protect the organization against failures.</li>\n<li>Regularly review and improve our tools and processes to strengthen system visibility and reliability.</li>\n<li>Investigate points of fragility in distributed systems and understand how complex systems behave under stress in order to improve resilience.</li>\n<li>Continually learn about mineral exploration through reading, discussions with exploration team members, periodic rotation on an exploration team and time in the field with geologists</li>\n</ul>\n<p>Qualifications</p>\n<ul>\n<li>5+ years of experience as an Infrastructure Engineer, Site Reliability Engineer or in a similar role</li>\n<li>Strong scripting and programming skills (Python, Go, Java or JavaScript/ Node.js )</li>\n<li>Experience with IaC tools like Terraform and container orchestration tools like Kubernetes and Docker</li>\n<li>Experience with cloud platforms such as AWS</li>\n<li>Experience operating or administering JupyterHub in a multi-user environment</li>\n<li>Understanding of MLOps workflows, including model training, deployment, and related tooling</li>\n<li>Excellent communication &amp; collaboration skills and a continuous improvement mindset</li>\n<li>Proven ability to troubleshoot complex issues and implement effective solutions</li>\n<li>Proven ability to thrive in dynamic and evolving environments, effectively navigating uncertainty and incomplete information.</li>\n<li>Proven ability to grow expertise, influence &amp; educate others</li>\n<li>Comfortable making informed decisions with limited data, adapting quickly to new circumstances, and maintaining focus on strategic objectives while driving clarity for the team.</li>\n<li>Intellectual curiosity and eagerness to learn about all aspects of mineral exploration, particularly in the geology domain. Enjoys constantly learning such that you are driving insights through using our tools in exploration and willing to work directly with geologists in the field.</li>\n<li>Ability to explain technical problems to and collaborate on solutions with domain experts who are not infrastructure engineers. A strong communicator who enjoys working with colleagues across the company.</li>\n<li>Excitement about joining a fast-growing early-stage company, comfort with a dynamic work environment, and eagerness to take on an evolving range of responsibilities.</li>\n<li>Keen not just to build cool technology, but to figure out what technical product to build to best achieve the business objectives of the company.</li>\n</ul>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_2e8a2997-260","directApply":true,"hiringOrganization":{"@type":"Organization","name":"KoBold Metals","sameAs":"https://koboldmetals.com/","logo":"https://logos.yubhub.co/koboldmetals.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/koboldmetals/jobs/4002126005","x-work-arrangement":"remote","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":"$170,000 - $230,000","x-skills-required":["scripting","programming","IaC","container orchestration","cloud platforms","MLOps workflows","observability","reliability","security","automation","monitoring","deployments","incident response","capacity planning","performance reviews","system tuning","disaster recovery","business continuity","tools","processes","distributed systems","complex systems","resilience","mineral exploration","geology"],"x-skills-preferred":[],"datePosted":"2026-04-17T12:40:33.164Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Remote"}},"jobLocationType":"TELECOMMUTE","employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"scripting, programming, IaC, container orchestration, cloud platforms, MLOps workflows, observability, reliability, security, automation, monitoring, deployments, incident response, capacity planning, performance reviews, system tuning, disaster recovery, business continuity, tools, processes, distributed systems, complex systems, resilience, mineral exploration, geology","baseSalary":{"@type":"MonetaryAmount","currency":"USD","value":{"@type":"QuantitativeValue","minValue":170000,"maxValue":230000,"unitText":"YEAR"}}},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_bbaf1090-aa3"},"title":"Senior Product Manager, Cyngn Insight & Fleet Management","description":"<p>About Cyngn</p>\n<p>Cyngn is a publicly-traded autonomous technology company that deploys self-driving industrial vehicles to factories, warehouses, and other facilities throughout North America. We are looking for a Senior Product Manager to lead the strategy and execution of Cyngn Insight and Fleet Management, the company&#39;s core software platform for operating, monitoring, and scaling autonomous vehicle fleets in production environments.</p>\n<p>Responsibilities</p>\n<ul>\n<li>Own the product vision, strategy, and roadmap for Cyngn Insight and Fleet Management, including fleet operations, monitoring, analytics, safety workflows, and enterprise integrations.</li>\n<li>Define and maintain a multi-quarter roadmap that balances customer needs, operational scalability, system reliability, security, and commercial objectives.</li>\n<li>Act as the senior product owner for fleet and operations initiatives, driving clear prioritization, trade-off decisions, and execution across concurrent programs.</li>\n<li>Partner with Engineering and Program Management to translate platform strategy into actionable requirements, milestones, and delivery plans.</li>\n<li>Translate customer workflows, operational data, and deployment feedback into product requirements that improve fleet uptime, efficiency, safety visibility, and ease of use.</li>\n<li>Collaborate with Sales, Business Development, Marketing, and Customer Success on go-to-market strategy, customer onboarding, and long-term account success.</li>\n<li>Define, track, and communicate success metrics (KPIs) for fleet performance and platform adoption, including utilization, uptime, incident response, and customer value realization.</li>\n</ul>\n<p>Qualifications</p>\n<ul>\n<li>BS/BA in a technical field or equivalent practical experience.</li>\n<li>5+ years of product management experience, including at least 2 years in a senior or lead PM role.</li>\n<li>Demonstrated success delivering complex, multi-tenant software platforms or enterprise products operating at scale.</li>\n<li>Strong technical aptitude with experience working closely with engineering teams on distributed systems, data platforms, or cloud-based products.</li>\n<li>Excellent communication and leadership skills, with comfort engaging executives, customers, and deeply technical stakeholders.</li>\n</ul>\n<p>Preferred Qualifications</p>\n<ul>\n<li>Experience with fleet management systems, enterprise SaaS platforms, or operational analytics products.</li>\n<li>Background in software development, data platforms, or systems engineering (e.g., Python, SQL, cloud infrastructure).</li>\n<li>Familiarity with autonomous vehicles, robotics operations, industrial automation, or large-scale deployments in logistics, manufacturing, or warehousing.</li>\n</ul>\n<p>Benefits &amp; Perks</p>\n<ul>\n<li>Health benefits (Medical, Dental, Vision, HSA and FSA (Health &amp; Dependent Daycare), Employee Assistance Program, 1:1 Health Concierge)</li>\n<li>Life, Short-term and long-term disability insurance (Cyngn funds 100% of premiums)</li>\n<li>Company 401(k)</li>\n<li>Commuter Benefits</li>\n<li>Flexible vacation policy</li>\n<li>Sabbatical leave opportunity after 5 years with the company</li>\n<li>Paid Parental Leave</li>\n<li>Daily lunches for in-office employees and fully-stocked kitchen with snacks and beverages</li>\n<li>Monthly meal and tech allowances for remote employees</li>\n<li>Allowance to purchase new headphones when you join!</li>\n</ul>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_bbaf1090-aa3","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Cyngn","sameAs":"https://www.cyngn.com/","logo":"https://logos.yubhub.co/cyngn.com.png"},"x-apply-url":"https://jobs.lever.co/cyngn/29a77488-5a84-4242-aba3-a5cca4a6681c","x-work-arrangement":"onsite","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":"$160,000-180,000 per year","x-skills-required":["product management","software development","data platforms","cloud-based products","fleet management systems","enterprise SaaS platforms","operational analytics products"],"x-skills-preferred":["Python","SQL","cloud infrastructure","autonomous vehicles","robotics operations","industrial automation","large-scale deployments"],"datePosted":"2026-04-17T12:28:00.826Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Mountain View"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"product management, software development, data platforms, cloud-based products, fleet management systems, enterprise SaaS platforms, operational analytics products, Python, SQL, cloud infrastructure, autonomous vehicles, robotics operations, industrial automation, large-scale deployments","baseSalary":{"@type":"MonetaryAmount","currency":"USD","value":{"@type":"QuantitativeValue","minValue":160000,"maxValue":180000,"unitText":"YEAR"}}},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_8e582153-6af"},"title":"Senior DevOps Lead - Cloud & Autonomous System","description":"<p>About Cyngn</p>\n<p>Cyngn is a publicly-traded autonomous technology company that deploys self-driving industrial vehicles to factories, warehouses, and other facilities throughout North America.</p>\n<p>We are a small company with under 100 employees, operating with the energy of a startup. However, we&#39;re also publicly traded, which means our employees get access to the liquidity of our publicly-traded equity.</p>\n<p>As a Senior DevOps Lead at Cyngn, you will play a vital role in architecting and managing infrastructure across cloud and autonomous vehicle systems. This position combines traditional cloud DevOps leadership with specialized expertise in robotics and autonomous systems infrastructure.</p>\n<p>Responsibilities</p>\n<ul>\n<li>Lead and architect cloud and vehicle infrastructure initiatives across AWS and ROS/Linux environments</li>\n<li>Design and implement scalable solutions for both cloud services and autonomous vehicle systems</li>\n<li>Establish and maintain DevOps best practices, CI/CD pipelines, and infrastructure as code</li>\n<li>Drive observability, monitoring, and incident response strategies</li>\n<li>Optimize performance and cost efficiency of cloud and edge computing resources</li>\n<li>Mentor team members and foster a developer-friendly environment</li>\n<li>Manage on-call rotations and incident response processes</li>\n<li>Architect solutions for processing and storing large-scale vehicle telemetry data</li>\n<li>Lead security initiatives and compliance efforts across infrastructure</li>\n</ul>\n<p>Requirements</p>\n<ul>\n<li>10+ years of relevant DevOps/Infrastructure experience</li>\n<li>Proven track record as a technical lead in platform or infrastructure teams</li>\n<li>Advanced expertise in AWS services, infrastructure as code (Terraform), and Kubernetes</li>\n<li>Strong experience with service mesh (Istio) and Helm/Kustomize</li>\n<li>Deep understanding of ROS/ROS2 and Linux kernel configurations</li>\n<li>Experience with GPU configurations and ML infrastructure</li>\n<li>Expertise in ARM and NVIDIA CUDA platform configurations</li>\n<li>Strong programming skills in Python and shell scripting</li>\n<li>Experience with infrastructure automation (Ansible)</li>\n<li>Expertise in CI/CD tools (Jenkins, GitHub Actions)</li>\n<li>Strong system architecture and design skills</li>\n<li>Excellence in technical documentation</li>\n<li>Outstanding problem-solving abilities</li>\n<li>Strong leadership and mentoring capabilities</li>\n</ul>\n<p>Nice to haves</p>\n<ul>\n<li>Experience with autonomous vehicle systems</li>\n<li>Track record of optimizing GPU-based ML infrastructure</li>\n<li>Experience with large-scale IoT deployments</li>\n<li>Contributions to open-source projects</li>\n<li>Experience with real-time systems and low-latency requirements</li>\n<li>Expertise in security implementations including SSO, IdP, and AWS Cognito</li>\n<li>Experience with JFrog artifactory and container registry management</li>\n<li>Proficiency in AWS IoT Greengrass</li>\n<li>Experience with container resource management on edge devices</li>\n<li>Understanding of CPU affinity and priority scheduling</li>\n<li>Track record of implementing cost optimization strategies</li>\n<li>Experience with scaling systems both horizontally and vertically</li>\n</ul>\n<p>Benefits &amp; Perks</p>\n<ul>\n<li>Health benefits (Medical, Dental, Vision, HSA and FSA (Health &amp; Dependent Daycare), Employee Assistance Program, 1:1 Health Concierge)</li>\n<li>Life, Short-term, and long-term disability insurance (Cyngn funds 100% of premiums)</li>\n<li>Company 401(k)</li>\n<li>Commuter Benefits</li>\n<li>Flexible vacation policy</li>\n<li>Sabbatical leave opportunity after five years with the company</li>\n<li>Paid Parental Leave</li>\n<li>Daily lunches for in-office employees</li>\n<li>Monthly meal and tech allowances for remote employees</li>\n</ul>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_8e582153-6af","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Cyngn","sameAs":"https://www.cyngn.com/","logo":"https://logos.yubhub.co/cyngn.com.png"},"x-apply-url":"https://jobs.lever.co/cyngn/1c31b7d8-cf85-472f-9358-1e10189cf815","x-work-arrangement":"remote","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":"$198,000-225,000 per year","x-skills-required":["AWS services","infrastructure as code (Terraform)","Kubernetes","service mesh (Istio)","Helm/Kustomize","ROS/ROS2","Linux kernel configurations","GPU configurations","ML infrastructure","ARM","NVIDIA CUDA platform configurations","Python","shell scripting","infrastructure automation (Ansible)","CI/CD tools (Jenkins, GitHub Actions)","system architecture and design skills","technical documentation","problem-solving abilities","leadership and mentoring capabilities"],"x-skills-preferred":["autonomous vehicle systems","optimizing GPU-based ML infrastructure","large-scale IoT deployments","open-source projects","real-time systems and low-latency requirements","security implementations including SSO, IdP, and AWS Cognito","JFrog artifactory and container registry management","AWS IoT Greengrass","container resource management on edge devices","CPU affinity and priority scheduling","cost optimization strategies","scaling systems both horizontally and vertically"],"datePosted":"2026-04-17T12:27:09.593Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Mountain View"}},"jobLocationType":"TELECOMMUTE","employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"AWS services, infrastructure as code (Terraform), Kubernetes, service mesh (Istio), Helm/Kustomize, ROS/ROS2, Linux kernel configurations, GPU configurations, ML infrastructure, ARM, NVIDIA CUDA platform configurations, Python, shell scripting, infrastructure automation (Ansible), CI/CD tools (Jenkins, GitHub Actions), system architecture and design skills, technical documentation, problem-solving abilities, leadership and mentoring capabilities, autonomous vehicle systems, optimizing GPU-based ML infrastructure, large-scale IoT deployments, open-source projects, real-time systems and low-latency requirements, security implementations including SSO, IdP, and AWS Cognito, JFrog artifactory and container registry management, AWS IoT Greengrass, container resource management on edge devices, CPU affinity and priority scheduling, cost optimization strategies, scaling systems both horizontally and vertically","baseSalary":{"@type":"MonetaryAmount","currency":"USD","value":{"@type":"QuantitativeValue","minValue":198000,"maxValue":225000,"unitText":"YEAR"}}},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_b11e687b-75e"},"title":"Robotics Integration Engineer","description":"<p>About Cyngn</p>\n<p>Cyngn is a publicly-traded autonomous technology company that deploys self-driving industrial vehicles to factories, warehouses, and other facilities throughout North America.</p>\n<p>We are looking for innovative, motivated, and experienced leaders to join us and move this field forward.</p>\n<p>Key reasons to join Cyngn:</p>\n<ul>\n<li>We operate with the energy of a startup, but with the liquidity of a publicly-traded company.</li>\n<li>Our autonomous vehicles are deployed to real clients right now, so your work will have a tangible, visible impact.</li>\n<li>We&#39;re a welcoming, diverse team of sharp thinkers and kind humans.</li>\n</ul>\n<p>About this role:</p>\n<p>As a Robotics Integration Engineer, you&#39;ll help bring autonomy to life by integrating hardware + sensors + vehicle software into a reliable, production-ready stack.</p>\n<p>Responsibilities</p>\n<ul>\n<li>Integrate and validate sensors such as LiDAR, cameras, IMU/GNSS, and other vehicle hardware.</li>\n<li>Own mission-critical system pieces: state management, health monitoring, diagnostics, logging, and fleet-ready tooling.</li>\n<li>Work on vehicle communications including CAN bus (interfaces, message handling, reliability, basic ECU/firmware touchpoints).</li>\n<li>Build and maintain the glue that makes the stack work: drivers, bring-up scripts, config management, calibration workflows, and time synchronization basics.</li>\n<li>Troubleshoot complex issues across hardware + software + networking (timing drift, dropped frames, driver issues, flaky connections, bandwidth constraints).</li>\n<li>Profile and optimize performance for real-time-ish workloads (high-bandwidth sensor streams, CPU/memory bottlenecks, startup stability).</li>\n<li>Help create and maintain integration tests and validation workflows (reproducible tests, automated checks, regression catches, log replay).</li>\n<li>Collaborate across perception, localization, controls, and product teams to integrate systems cleanly and ship improvements quickly.</li>\n<li>Write clear documentation for integration procedures, system configuration, and “how to debug this when it breaks.”</li>\n</ul>\n<p>Qualifications</p>\n<ul>\n<li>2–4+ years in robotics integration, autonomy, embedded/systems engineering, or adjacent experience working close to hardware.</li>\n<li>Strong programming ability in: C++, Python, Shell scripting, and Linux.</li>\n<li>Solid experience with Linux (Ubuntu), including building, packaging, and running systems in the field.</li>\n<li>Comfort with sensor + hardware bring-up (drivers, calibration workflows, time sync concepts, logs, reproducible setup).</li>\n<li>Understanding of networking fundamentals (TCP/UDP, bandwidth/latency tradeoffs, basic multicast, and debugging with common tools).</li>\n<li>Strong debugging instincts: you can form a hypothesis, gather evidence, and drive to root cause across layers.</li>\n<li>Clear communicator with good documentation habits and a low-ego, team-first approach.</li>\n</ul>\n<p>Bonus Qualifications</p>\n<ul>\n<li>Experience with ROS 2 (nodes, launch, TF2, bags, QoS) or other robotics middleware frameworks.</li>\n<li>Experience with CAN tooling (SocketCAN, DBC workflows) and/or ECU/firmware update flows.</li>\n<li>Experience with containerized deployments (e.g., Docker) or production deployment patterns on vehicles.</li>\n<li>Familiarity with profiling tools (perf, top/htop, valgrind, gdb) and performance tuning.</li>\n<li>Exposure to OTA / device management systems (e.g., AWS Greengrass) or fleet rollout practices.</li>\n<li>Understanding of safety-oriented development practices (fault handling, watchdogs, redundancy concepts).</li>\n<li>Experience with simulation environments (e.g., NVIDIA Isaac Sim, Gazebo, or similar) for integration and regression testing.</li>\n<li>CI/CD experience for robotics stacks (automated builds/tests, hardware-in-the-loop or log replay workflows).</li>\n</ul>\n<p>Benefits &amp; Perks</p>\n<ul>\n<li>Health benefits (Medical, Dental, Vision, HSA and FSA (Health &amp; Dependent Daycare), Employee Assistance Program, 1:1 Health Concierge)</li>\n<li>Life, Short-term and long-term disability insurance (Cyngn funds 100% of premiums)</li>\n<li>Company 401(k)</li>\n<li>Commuter Benefits</li>\n<li>Flexible vacation policy</li>\n<li>Remote or hybrid work opportunities</li>\n<li>Sabbatical leave opportunity after 5 years with the company</li>\n<li>Paid Parental Leave</li>\n<li>Daily lunches for in-office employees</li>\n<li>Monthly meal and tech allowances for remote employees</li>\n</ul>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_b11e687b-75e","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Cyngn","sameAs":"https://www.cyngn.com/","logo":"https://logos.yubhub.co/cyngn.com.png"},"x-apply-url":"https://jobs.lever.co/cyngn/2ae1976a-8bd5-4f88-aeb1-6a41f1f8660a","x-work-arrangement":"onsite","x-experience-level":"mid","x-job-type":"full-time","x-salary-range":"$153,000-171,000 per year","x-skills-required":["C++","Python","Shell scripting","Linux","ROS 2","CAN tooling","Containerized deployments","Profiling tools","Safety-oriented development practices","Simulation environments","CI/CD experience"],"x-skills-preferred":[],"datePosted":"2026-04-17T12:26:46.887Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Mountain View"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"C++, Python, Shell scripting, Linux, ROS 2, CAN tooling, Containerized deployments, Profiling tools, Safety-oriented development practices, Simulation environments, CI/CD experience","baseSalary":{"@type":"MonetaryAmount","currency":"USD","value":{"@type":"QuantitativeValue","minValue":153000,"maxValue":171000,"unitText":"YEAR"}}},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_91578588-736"},"title":"Principal AI Engineer – Enterprise Agentic AI","description":"<p>Engineer the Future with Us</p>\n<p>We currently have 614 open roles</p>\n<p><strong>Principal AI Engineer – Enterprise Agentic AI</strong></p>\n<p><strong>Job Summary</strong></p>\n<p>You are a highly skilled, hands-on engineer with a passion for embedding advanced AI into the heart of enterprise business systems. With a robust enterprise background and a minimum of 10 years in software engineering, you thrive on architecting and delivering production-grade AI solutions that drive real business outcomes.</p>\n<p><strong>Key Responsibilities</strong></p>\n<ul>\n<li>Lead the architecture and hands-on development of enterprise agentic AI platforms across core business domains.</li>\n<li>Build and deploy AI copilots and multi-agent workflows for business teams (Legal, Finance, Sales, HR, Operations) using Microsoft AI Copilot, SAP BTP, Salesforce, and other platforms.</li>\n<li>Embed AI directly into enterprise workflows, ensuring seamless integration with ERP and CRM systems.</li>\n<li>Design, implement, and govern secure AI solutions utilizing MCP (Model Context Protocol) and A2A (Agent-to-Agent) orchestration standards.</li>\n<li>Develop scalable APIs and microservices in Python and C#, supporting robust, production-ready AI deployments.</li>\n<li>Ensure production readiness through best practices in CI/CD, observability, reliability, and continuous evaluation.</li>\n<li>Mentor engineers, lead architectural discussions, and collaborate with cross-functional business and technical stakeholders.</li>\n</ul>\n<p><strong>Impact</strong></p>\n<ul>\n<li>Drive the adoption of agentic AI across Synopsys&#39; core business systems, setting standards for enterprise AI integration and governance.</li>\n<li>Deliver high-visibility, high-impact AI solutions that transform business processes and decision-making for teams company-wide.</li>\n<li>Shape the architecture and best practices for scalable, secure, and reliable AI across hybrid and multi-vendor environments.</li>\n<li>Empower business teams to leverage AI copilots and intelligent workflows for increased productivity and innovation.</li>\n<li>Mentor and upskill engineering teams, fostering a culture of excellence, collaboration, and forward-thinking AI adoption.</li>\n<li>Ensure AI solutions meet enterprise standards for security, compliance, reliability, and operational excellence.</li>\n</ul>\n<p><strong>Requirements</strong></p>\n<ul>\n<li>10–15 years of software engineering experience, with a focus on enterprise systems.</li>\n<li>Expert-level proficiency in both Python and C#.</li>\n<li>Hands-on experience building and operating production AI/GenAI systems integrated with ERP and CRM platforms.</li>\n<li>Deep understanding of agentic AI, RAG, multi-step workflows, and enterprise-grade AI architecture.</li>\n<li>Proven experience with Microsoft AI Copilot, Copilot Studio, Microsoft AI Foundry, SAP BTP, and Salesforce AI (including Einstein and agentic capabilities).</li>\n<li>Expertise in microservices, API development, CI/CD pipelines, and cloud deployments.</li>\n<li>Experience with SQL, NoSQL, and vector databases.</li>\n<li>Preferred: Familiarity with LangChain, LangGraph, LlamaIndex, MCP, A2A, and prior support for business functions such as Legal, Finance, Sales, or HR.</li>\n</ul>\n<p><strong>Who You Are</strong></p>\n<ul>\n<li>Enterprise-minded engineer with a strong platform and architecture focus.</li>\n<li>Hands-on leader who enjoys mentoring and collaborating across business and technical teams.</li>\n<li>Pragmatic, delivery-oriented, and able to translate business needs into robust AI solutions.</li>\n<li>Adaptable and comfortable working in fast-changing, multi-vendor, and hybrid environments.</li>\n<li>Excellent communicator, able to articulate complex technical concepts to diverse audiences.</li>\n<li>Committed to continuous learning and keeping pace with the latest in AI and enterprise technology.</li>\n</ul>\n<p><strong>The Team You’ll Be A Part Of</strong></p>\n<p>You will join a cross-functional, forward-thinking engineering team responsible for embedding trustworthy, scalable AI into Synopsys’ most critical business systems. Our team partners closely with business stakeholders and platform owners to deliver agentic AI workflows that create measurable business value. We are passionate about innovation, operational excellence, and setting the standard for enterprise AI adoption.</p>\n<p><strong>Rewards and Benefits</strong></p>\n<p>We offer a comprehensive range of health, wellness, and financial benefits to cater to your needs. Our total rewards include both monetary and non-monetary offerings. Your recruiter will provide more details about the salary range and benefits during the hiring process.</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_91578588-736","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Synopsys","sameAs":"https://careers.synopsys.com","logo":"https://logos.yubhub.co/careers.synopsys.com.png"},"x-apply-url":"https://careers.synopsys.com/job/hyderabad/principal-ai-engineer-enterprise-agentic-ai/44408/93375604480","x-work-arrangement":"onsite","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":null,"x-skills-required":["Python","C#","Microsoft AI Copilot","SAP BTP","Salesforce AI","Microservices","API development","CI/CD pipelines","Cloud deployments","SQL","NoSQL","Vector databases"],"x-skills-preferred":["LangChain","LangGraph","LlamaIndex","MCP","A2A","Legal","Finance","Sales","HR"],"datePosted":"2026-04-05T13:21:38.055Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Hyderabad"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"Python, C#, Microsoft AI Copilot, SAP BTP, Salesforce AI, Microservices, API development, CI/CD pipelines, Cloud deployments, SQL, NoSQL, Vector databases, LangChain, LangGraph, LlamaIndex, MCP, A2A, Legal, Finance, Sales, HR"},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_3e77a678-cf0"},"title":"Technical Support Engineer – On-Premise","description":"<p>We are seeking a Technical Support Engineer - On-Premise Infrastructure to join our Support team in France. This role is ideal for someone who excels at technical troubleshooting, incident investigation, and customer communication in a B2B environment.</p>\n<p>As a key member of the support team, you will be responsible for handling escalated technical issues from on-premise enterprise clients, reproducing complex problems, and collaborating with engineering, data, and product teams to ensure swift resolution. You will report directly to the Head of Support, and play a critical role in maintaining customer satisfaction and improving our support operations.</p>\n<p>This is a unique opportunity to work at the intersection of AI infrastructure, customer success, and technical problem-solving.</p>\n<p>Key Responsibilities:</p>\n<p>Technical Support &amp; Incident Management</p>\n<p>• Frontline Investigation: Handle escalated tickets from enterprise clients via Intercom, focusing on on-premise infrastructure and AI-related issues (e.g., deployment, performance, integration, security).</p>\n<p>• Root Cause Analysis: Ask the right questions to gather context, reproduce issues in test environments, and diagnose technical problems (systems, networks, storage, GPU clusters, AI models).</p>\n<p>• Cross-Team Collaboration: Work closely with engineering, and deployment teams to escalate, track, and resolve incidents efficiently.</p>\n<p>• Proactive Communication: Provide clear, empathetic, and timely updates to clients and internal stakeholders, ensuring transparency throughout the resolution process.</p>\n<p>Knowledge Sharing &amp; Process Improvement</p>\n<p>• Documentation: Create and update technical FAQs, troubleshooting guides, and internal knowledge base articles to empower self-serve/L1 team and reduce recurrence of issues.</p>\n<p>• Feedback Loop: Identify recurring pain points in on-premise deployments and suggest improvements to product, documentation, or support workflows.</p>\n<p>Customer-Centric Approach</p>\n<p>• Empathy &amp; Ownership: Maintain a customer-first mindset, ensuring clients feel heard and supported, even in high-pressure situations.</p>\n<p>• Solution-Oriented: Proactively propose workarounds, fixes, or process optimizations to enhance the customer experience and reduce incident resolution time.</p>\n<p>Technical Expertise</p>\n<p>• On-Premise &amp; Cloud Environments: Deep understanding of Linux/Windows servers, networking, virtualization, storage, security (firewalls, RGPD compliance), and cloud providers (AWS, GCP, Azure).</p>\n<p>• Kubernetes/Helm: Experience with deployment, scaling, and troubleshooting of applications in Kubernetes clusters using Helm charts.</p>\n<p>• Terraform: Familiarity with Infrastructure as Code (IaC) for managing cloud resources is a strong plus.</p>\n<p>• AI Infrastructure: Knowledge of AI/ML pipelines, LLM/RAG deployments, GPU acceleration, and data storage solutions for enterprise clients.</p>\n<p>• Tooling: Proficiency in Intercom, monitoring tools, scripting (Bash/Python), and diagnostic utilities (logs, performance metrics).</p>\n<p>Who you are:</p>\n<p>Required Experience: 3+ years in technical support, systems administration, or DevOps, with a focus on on-premise or hybrid infrastructures.</p>\n<p>Technical Skills:</p>\n<p>• Hands-on experience with troubleshooting complex technical issues in enterprise environments.</p>\n<p>• Knowledge of AI/ML workflows, data pipelines, or high-performance computing (a strong plus).</p>\n<p>• Familiarity with ticketing systems (Intercom), RGPD compliance, and security best practices.</p>\n<p>Soft Skills:</p>\n<p>• Exceptional problem-solving and analytical skills.</p>\n<p>• Strong written and verbal communication in French and English (additional languages are a bonus).</p>\n<p>• Ability to explain technical concepts clearly to non-technical stakeholders.</p>\n<p>Mindset:</p>\n<p>• Customer-obsessed, with a passion for delivering high-quality support.</p>\n<p>• Collaborative, able to work effectively in a distributed, fast-paced team.</p>\n<p>• Curious and adaptable, with a willingness to learn and master new technologies.</p>\n<p>Why Join Mistral AI?</p>\n<p>• Impact: Directly contribute to the success of enterprise AI deployments and shape the future of on-premise support.</p>\n<p>• Growth: Opportunities for career advancement in support leadership, technical specialization, or customer success.</p>\n<p>• Innovation: Work with cutting-edge AI technology in a dynamic, mission-driven company.</p>\n<p>• Team: Join a passionate, diverse, and low-ego team that values collaboration and continuous learning.</p>\n<p>• Work Environment: Hybrid flexibility (Paris office) with a focus on work-life balance and professional development.</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_3e77a678-cf0","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Mistral AI","sameAs":"https://mistral.ai/careers"},"x-apply-url":"https://jobs.lever.co/mistral/f00a13aa-61f1-4c56-993c-20846adc2b15","x-work-arrangement":"onsite","x-experience-level":"mid","x-job-type":"full-time","x-salary-range":null,"x-skills-required":["Linux/Windows servers","Networking","Virtualization","Storage","Security","Cloud providers","Kubernetes/Helm","Terraform","AI/ML pipelines","LLM/RAG deployments","GPU acceleration","Data storage solutions","Intercom","Monitoring tools","Scripting","Diagnostic utilities"],"x-skills-preferred":[],"datePosted":"2026-03-10T11:31:48.488Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Paris"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"Linux/Windows servers, Networking, Virtualization, Storage, Security, Cloud providers, Kubernetes/Helm, Terraform, AI/ML pipelines, LLM/RAG deployments, GPU acceleration, Data storage solutions, Intercom, Monitoring tools, Scripting, Diagnostic utilities"},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_ce88828f-470"},"title":"Solutions Architect, AI and ML","description":"<p>We are building the world&#39;s leading AI company and are looking for an experienced Cloud Solution Architect to help assist customers with adoption of GPU hardware and Software, as well as building and deploying Machine Learning (ML), Deep Learning (DL), data analytics solutions on various Cloud Computing Platforms.</p>\n<p>As part of the Solutions Architecture team, we work with some of the most exciting computing hardware and software technologies including the latest breakthroughs in machine learning and data science. A Solutions Architect is the first line of technical expertise between NVIDIA and our customers so you will engage directly with developers, researchers, and data scientists with some of NVIDIA&#39;s most strategic technology customers as well as work directly with business and engineering teams on product strategy.</p>\n<p><strong>What you will be doing:</strong></p>\n<ul>\n<li>Working with Cloud Service Providers to develop and demonstrate solutions based on NVIDIA&#39;s ML/DL and data science software and hardware technologies</li>\n</ul>\n<ul>\n<li>Build and deploy AI/ML solutions at scale using NVIDIA&#39;s AI software on cloud-based GPU platforms.</li>\n</ul>\n<ul>\n<li>Build custom PoCs for solution that address customer&#39;s critical business needs applying NVIDIA hardware and software technology</li>\n</ul>\n<ul>\n<li>Partner with Sales Account Managers or Developer Relations Managers to identify and secure new business opportunities for NVIDIA products and solutions for ML/DL and other software solutions</li>\n</ul>\n<ul>\n<li>Prepare and deliver technical content to customers including presentations about purpose-built solutions, workshops about NVIDIA products and solutions, etc.</li>\n</ul>\n<ul>\n<li>Conduct regular technical customer meetings for project/product roadmap, feature discussions, and intro to new technologies. Establish close technical ties to the customer to facilitate rapid resolution of customer issues</li>\n</ul>\n<p><strong>What we need to see:</strong></p>\n<ul>\n<li>3+ years of Solutions Engineering (or similar Sales Engineering roles) or equivalent experience</li>\n</ul>\n<ul>\n<li>3+ years of work-related experience in Deep Learning and Machine Learning, including deep learning frameworks TensorFlow or PyTorch, GPU, and CUDA experience extremely helpful.</li>\n</ul>\n<ul>\n<li>BS/MS/PhD in Electrical/Computer Engineering, Computer Science, Statistics, Physics, or other Engineering fields or equivalent experience.</li>\n</ul>\n<ul>\n<li>Established track record of deploying solutions in cloud computing environments including AWS, GCP, or Azure</li>\n</ul>\n<ul>\n<li>Knowledge of DevOps/ML Ops technologies such as Docker/containers, Kubernetes, data center deployments</li>\n</ul>\n<ul>\n<li>Ability to use at least one scripting language (i.e., Python)</li>\n</ul>\n<ul>\n<li>Good programming and debugging skills</li>\n</ul>\n<ul>\n<li>Ability to communicate your ideas/code clearly through documents, presentation etc.</li>\n</ul>\n<p><strong>Ways to stand out from the crowd:</strong></p>\n<ul>\n<li>AWS, GCP or Azure Professional Solution Architect Certification.</li>\n</ul>\n<ul>\n<li>Hands-on experience with NVIDIA GPUs and SDKs (e.g. CUDA, RAPIDS, Triton etc.)</li>\n</ul>\n<ul>\n<li>System-level experience specifically GPU-based systems</li>\n</ul>\n<ul>\n<li>Experience with Deep Learning at scale</li>\n</ul>\n<ul>\n<li>Familiarity with parallel programming and distributed computing platforms</li>\n</ul>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_ce88828f-470","directApply":true,"hiringOrganization":{"@type":"Organization","name":"NVIDIA","sameAs":"https://nvidia.wd5.myworkdayjobs.com","logo":"https://logos.yubhub.co/nvidia.com.png"},"x-apply-url":"https://nvidia.wd5.myworkdayjobs.com/en-US/NVIDIAExternalCareerSite/job/US-WA-Redmond/Solutions-Architect--AI-and-ML_JR2000691","x-work-arrangement":"onsite","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":null,"x-skills-required":["Solutions Engineering","Deep Learning and Machine Learning","TensorFlow or PyTorch","GPU and CUDA experience","BS/MS/PhD in Electrical/Computer Engineering, Computer Science, Statistics, Physics, or other Engineering fields","DevOps/ML Ops technologies","Docker/containers, Kubernetes, data center deployments","Scripting language (i.e., Python)","Good programming and debugging skills","Ability to communicate your ideas/code clearly through documents, presentation etc."],"x-skills-preferred":["AWS, GCP or Azure Professional Solution Architect Certification","Hands-on experience with NVIDIA GPUs and SDKs (e.g. CUDA, RAPIDS, Triton etc.)","System-level experience specifically GPU-based systems","Experience with Deep Learning at scale","Familiarity with parallel programming and distributed computing platforms"],"datePosted":"2026-03-09T20:46:16.733Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Redmond, Santa Clara, Seattle"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"Solutions Engineering, Deep Learning and Machine Learning, TensorFlow or PyTorch, GPU and CUDA experience, BS/MS/PhD in Electrical/Computer Engineering, Computer Science, Statistics, Physics, or other Engineering fields, DevOps/ML Ops technologies, Docker/containers, Kubernetes, data center deployments, Scripting language (i.e., Python), Good programming and debugging skills, Ability to communicate your ideas/code clearly through documents, presentation etc., AWS, GCP or Azure Professional Solution Architect Certification, Hands-on experience with NVIDIA GPUs and SDKs (e.g. CUDA, RAPIDS, Triton etc.), System-level experience specifically GPU-based systems, Experience with Deep Learning at scale, Familiarity with parallel programming and distributed computing platforms"},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_2a56a653-c18"},"title":"Palantir Engineer Specialist - Sr. Consultant - Principal","description":"<p><strong>Palantir Engineer Specialist</strong></p>\n<p><strong>Sr. Consultant - Principal</strong></p>\n<p><strong>London</strong></p>\n<p>Do you want to boost your career and collaborate with expert, talented colleagues to solve and deliver against our clients&#39; most important challenges? We are growing and are looking for people to join our team. You will be part of an entrepreneurial, high-growth environment of 300,000 employees. Our dynamic organisation allows you to work across functional business pillars, contributing your ideas, experiences, diverse thinking, and a strong mindset. Are you ready?</p>\n<p><strong>About Your Role</strong></p>\n<p>As a <strong>Senior Consultant / Principal Consultant – Palantir Engineer</strong>, you lead and deliver end-to-end, data-driven solutions using <strong>Palantir Foundry</strong> in complex client environments. You operate at the intersection of engineering, data, and consulting, working closely with business and technical stakeholders to translate complex problems into scalable, production-ready solutions. You combine strong hands-on technical skills with a consulting mindset, taking ownership of solution design, implementation, and adoption across organisations.</p>\n<p><strong>Your role will include:</strong></p>\n<ul>\n<li>Own the <strong>end-to-end delivery</strong> of Palantir Foundry–based solutions, from problem definition to production</li>\n<li>Design and implement <strong>data pipelines and transformations</strong> across diverse data sources</li>\n<li>Model data using <strong>Foundry Ontology</strong> concepts to support analytics and operational use cases</li>\n<li>Build scalable, reliable solutions using <strong>Python, SQL, and PySpark</strong> within Foundry</li>\n<li>Collaborate closely with business stakeholders to define requirements, success metrics, and roadmaps</li>\n<li>Support <strong>prototyping, productionisation, and scaling</strong> of data-driven applications</li>\n<li>Ensure solutions meet requirements for <strong>data quality, governance, security, and performance</strong></li>\n<li>Act as a technical advisor within project teams and contribute to best practices</li>\n</ul>\n<p><strong>Requirements</strong></p>\n<p><strong>What you bring – required</strong></p>\n<p><strong>Experience &amp; Seniority</strong></p>\n<ul>\n<li>Proven experience as a <strong>Senior Consultant or Principal Consultant</strong> in data, analytics, or platform engineering</li>\n<li>Strong experience delivering <strong>client-facing data solutions</strong> in complex environments</li>\n<li>Ability to take ownership and work independently in ambiguous problem spaces</li>\n</ul>\n<p><strong>Core Data &amp; Analytics Technology Skills</strong></p>\n<ul>\n<li>Strong programming skills in <strong>Python</strong> and <strong>SQL</strong>; <strong>PySpark</strong> experience required</li>\n<li>Hands-on experience with <strong>Palantir Foundry</strong>, including:</li>\n<li>Pipeline Builder / Code Workbook</li>\n<li>Data integration and transformation</li>\n<li>Ontology modelling and data lineage</li>\n<li>Solid understanding of <strong>data architectures</strong>, including data lakes, lakehouses, and data warehouses</li>\n<li>Experience working with APIs, databases, and structured / semi-structured data</li>\n</ul>\n<p><strong>Engineering &amp; Platform Foundations</strong></p>\n<ul>\n<li>Experience building <strong>scalable ETL/ELT pipelines</strong></li>\n<li>Familiarity with <strong>CI/CD concepts</strong>, testing, and production deployments</li>\n<li>Strong focus on <strong>solution quality, maintainability, and performance</strong></li>\n<li>Bachelor’s or Master’s degree in Computer Science, Engineering, Mathematics, or a related field <strong>or equivalent practical experience</strong></li>\n</ul>\n<p><strong>Nice to have</strong></p>\n<ul>\n<li>Experience with <strong>cloud platforms</strong> (AWS, Azure, GCP)</li>\n<li>Familiarity with <strong>containerisation</strong> (Docker, Kubernetes)</li>\n<li>Prior experience as a <strong>Palantir FDE</strong> or in Foundry-heavy delivery roles</li>\n<li>Domain experience in industries such as <strong>Energy, Finance, Public Sector, Healthcare, or Logistics</strong></li>\n</ul>\n<p><strong>Benefits</strong></p>\n<p><strong>About your team</strong></p>\n<p>Join our growing Data &amp; Analytics practice and make a difference. In this practice you will be utilizing the most innovative technological solutions in modern data ecosystem. In this role you’ll be able to see your own ideas transform into breakthrough results in the areas of Data &amp; Analytics strategy, Data Management &amp; Governance, Data Platforms &amp; engineering, Analytics &amp; Data Science.</p>\n<p><strong>About Infosys Consulting</strong></p>\n<p>Be part of a globally renowned management consulting firm on the front-line of industry disruption and at the cutting edge of technology. We work with market leading brands across sectors. Our culture is inclusive and entrepreneurial. Being a mid-size consultancy within the scale of Infosys gives us the global reach to partner with our clients throughout their transformation journey.</p>\n<p>Our core values, IC-LIFE, form a common code that helps us move forward. IC-LIFE stands for Inclusion, Equity and Diversity, Client, Leadership, Integrity, Fairness, and Excellence. To learn more about Infosys Consulting and our values, please visit our careers page.</p>\n<p>Within Europe, we are recognised as one of the UK’s top firms by the Financial Times and Forbes due to our client innovations, our cultural diversity and dedicated training and career paths. Infosys is on the Germany’s top employers list for 2023. Management Consulting Magazine named us on their list of Best Firms to Work for. Furthermore, Infosys has been recognised by the Top Employers Institute, a global certification company, for its exceptional standards in employee conditions across Europe for five years in a row.</p>\n<p>We offer industry-leading compensation and benefits, along with top training and development opportunities so that you can grow your career and achieve your personal ambitions. Curious to learn more? We’d love to hear from you.... Apply today!</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_2a56a653-c18","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Infosys Consulting - Europe","sameAs":"https://jobs.workable.com","logo":"https://logos.yubhub.co/view.com.png"},"x-apply-url":"https://jobs.workable.com/view/2A8U1ryerVijb4fFAc6i8u/hybrid-palantir-engineer-specialist---sr.-consultant---principal-in-london-at-infosys-consulting---europe","x-work-arrangement":"hybrid","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":null,"x-skills-required":["Python","SQL","PySpark","Palantir Foundry","Pipeline Builder","Code Workbook","Data integration","Data transformation","Ontology modelling","Data lineage","Data architectures","Data lakes","Lakehouses","Data warehouses","APIs","Databases","Structured data","Semi-structured data","ETL/ELT pipelines","CI/CD concepts","Testing","Production deployments","Solution quality","Maintainability","Performance","Bachelor’s degree","Master’s degree","Computer Science","Engineering","Mathematics"],"x-skills-preferred":["Cloud platforms","Containerisation","Palantir FDE","Foundry-heavy delivery roles","Domain experience in industries such as Energy, Finance, Public Sector, Healthcare, or Logistics"],"datePosted":"2026-03-09T16:59:40.750Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"London"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"Python, SQL, PySpark, Palantir Foundry, Pipeline Builder, Code Workbook, Data integration, Data transformation, Ontology modelling, Data lineage, Data architectures, Data lakes, Lakehouses, Data warehouses, APIs, Databases, Structured data, Semi-structured data, ETL/ELT pipelines, CI/CD concepts, Testing, Production deployments, Solution quality, Maintainability, Performance, Bachelor’s degree, Master’s degree, Computer Science, Engineering, Mathematics, Cloud platforms, Containerisation, Palantir FDE, Foundry-heavy delivery roles, Domain experience in industries such as Energy, Finance, Public Sector, Healthcare, or Logistics"},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_56dc9a51-e66"},"title":"Principal Consultant - Data Architecture","description":"<p><strong>Principal Consultant - Data Architecture</strong></p>\n<p>You will be part of an entrepreneurial, high-growth environment of 300,000 employees. Our dynamic organization allows you to work across functional business pillars, contributing your ideas, experiences, diverse thinking, and a strong mindset.</p>\n<p><strong>About Your Role</strong></p>\n<p>As a Principal Data Architecture Consultant, you will act as a senior technical leader in complex data and analytics engagements. You will shape and govern end-to-end enterprise data architectures, lead technical teams, and serve as a trusted technical advisor for clients and internal stakeholders.</p>\n<p><strong>Your Role Will Include:</strong></p>\n<ul>\n<li>Define and govern target enterprise data, integration and analytics architectures across cloud and hybrid environments</li>\n<li>Translate business objectives into scalable, secure, and compliant data solutions</li>\n<li>Lead the design of end-to-end data solutions (ingestion, integration, storage, security, processing, analytics, AI enablement)</li>\n<li>Guide delivery teams through implementation, rollout, and production readiness</li>\n<li>Function as senior technical counterpart for client architects, IT leads, and engineering teams</li>\n<li>Mentor data architects, system architects and engineers and contribute to best practices and reference architectures</li>\n<li>Support pre-sales and solution design activities from a technical perspective</li>\n</ul>\n<p><strong>Requirements</strong></p>\n<ul>\n<li>5–8+ years of experience in enterprise data architecture, system data integration, data engineering, or analytics</li>\n<li>Proven experience leading enterprise data architecture workstreams or technical teams</li>\n<li>Strong client-facing experience in complex enterprise environments</li>\n</ul>\n<p><strong>Core Data &amp; Analytics Technology Skills</strong></p>\n<ul>\n<li>Strong expertise in modern data architectures, including:</li>\n<li>Data Mesh/ Data Fabric/ Data lake / data warehouse architectures</li>\n<li>Modern Data Architecture design principles</li>\n<li>Batch and streaming data integration patterns</li>\n<li>Data Platform, DevOps, deployment and security architectures</li>\n<li>Analytics and AI enablement architectures</li>\n<li>Hands-on experience with cloud data platforms, e.g.:</li>\n<li>Azure, AWS or GCP</li>\n<li>Databricks, Snowflake, BigQuery, Azure Synapse / Microsoft Fabric</li>\n<li>Strong SQL skills and experience with relational databases (e.g. Postgres, SQL Server, Oracle)</li>\n<li>Experience with NoSQL databases (e.g. Cosmos DB, MongoDB, InfluxDB)</li>\n<li>Solid understanding of API-based and event-driven architectures</li>\n<li>Experience designing and governing enterprise data migration programmes, including mapping, transformation rules, data quality remediation etc.</li>\n</ul>\n<p><strong>Engineering &amp; Platform Foundations</strong></p>\n<ul>\n<li>Experience with data pipelines, orchestration, and automation</li>\n<li>Familiarity with CI/CD concepts and production-grade deployments</li>\n<li>Understanding of distributed systems; Docker / Kubernetes is a plus</li>\n</ul>\n<p><strong>Data Management &amp; Governance</strong></p>\n<ul>\n<li>Strong understanding of data management and governance principles, including:</li>\n<li>Data quality, metadata, lineage, master data management</li>\n<li>Data Management software and tools</li>\n<li>Security, access control, and compliance considerations</li>\n<li>Bachelor’s or Master’s degree in Computer Science, Engineering, Mathematics, or a related field or equivalent practical experience</li>\n</ul>\n<p><strong>Nice to Have</strong></p>\n<ul>\n<li>Exposure to advanced analytics, AI / ML or GenAI from an architectural perspective</li>\n<li>Experience with streaming platforms (e.g. Kafka, Azure Event Hubs)</li>\n<li>Hands-on Experience with data governance or metadata tools</li>\n<li>Cloud, data, or architecture certifications</li>\n</ul>\n<p><strong>Language &amp; Mobility</strong></p>\n<ul>\n<li>Very good English skills</li>\n<li>Willingness to travel for project-related work</li>\n</ul>\n<p><strong>Benefits</strong></p>\n<p>You will be utilizing the most innovative technological solutions in modern data ecosystem. In this role you’ll be able to see your own ideas transform into breakthrough results in the areas of Data &amp; Analytics Strategy, Data Management &amp; Governance, Data Platforms &amp; Engineering, Analytics &amp; Data Science.</p>\n<p><strong>About Infosys Consulting</strong></p>\n<p>Be part of a globally renowned management consulting firm on the front-line of industry disruption and at the cutting edge of technology. We work with market leading brands across sectors. Our culture is inclusive and entrepreneurial. Being a mid-size consultancy within the scale of Infosys gives us the global reach to partner with our clients throughout their transformation journey.</p>\n<p>Our core values, IC-LIFE, form a common code that helps us move forward. IC-LIFE stands for Inclusion, Equity and Diversity, Client, Leadership, Integrity, Fairness, and Excellence. To learn more about Infosys Consulting and our values, please visit our careers page.</p>\n<p>Within Europe, we are recognized as one of the UK’s top firms by the Financial Times and Forbes due to our client innovations, our cultural diversity and dedicated training and career paths. Infosys is on the Germany’s top employers list for 2023. Management Consulting Magazine named us on their list of Best Firms to Work for. Furthermore, Infosys has been recognized by the Top Employers Institute, a global certification company, for its exceptional standards in employee conditions across Europe for five years in a row.</p>\n<p>We offer industry-leading compensation and benefits, along with top training and development opportunities so that you can grow your career and achieve your personal ambitions. Curious to learn more? We’d love to hear from you.... Apply today!</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_56dc9a51-e66","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Infosys Consulting - Europe","sameAs":"https://jobs.workable.com","logo":"https://logos.yubhub.co/view.com.png"},"x-apply-url":"https://jobs.workable.com/view/hpBWjvvy8D6B1f818cHxZR/remote-principal-consultant---data-architecture-in-poland-at-infosys-consulting---europe","x-work-arrangement":"remote","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":null,"x-skills-required":["enterprise data architecture","system data integration","data engineering","analytics","modern data architectures","Data Mesh/ Data Fabric/ Data lake / data warehouse architectures","Modern Data Architecture design principles","Batch and streaming data integration patterns","Data Platform, DevOps, deployment and security architectures","Analytics and AI enablement architectures","cloud data platforms","Azure","AWS","GCP","Databricks","Snowflake","BigQuery","Azure Synapse / Microsoft Fabric","SQL","relational databases","Postgres","SQL Server","Oracle","NoSQL databases","Cosmos DB","MongoDB","InfluxDB","API-based and event-driven architectures","data migration programmes","data pipelines","orchestration","automation","CI/CD concepts","production-grade deployments","distributed systems","Docker","Kubernetes","data management and governance principles","data quality","metadata","lineage","master data management","data management software and tools","security","access control","compliance considerations","Bachelor’s or Master’s degree in Computer Science, Engineering, Mathematics, or a related field or equivalent practical experience"],"x-skills-preferred":["advanced analytics","AI / ML or GenAI","streaming platforms","Kafka","Azure Event Hubs","data governance or metadata tools","cloud","data","architecture certifications"],"datePosted":"2026-03-09T16:51:22.857Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Poland"}},"jobLocationType":"TELECOMMUTE","employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"enterprise data architecture, system data integration, data engineering, analytics, modern data architectures, Data Mesh/ Data Fabric/ Data lake / data warehouse architectures, Modern Data Architecture design principles, Batch and streaming data integration patterns, Data Platform, DevOps, deployment and security architectures, Analytics and AI enablement architectures, cloud data platforms, Azure, AWS, GCP, Databricks, Snowflake, BigQuery, Azure Synapse / Microsoft Fabric, SQL, relational databases, Postgres, SQL Server, Oracle, NoSQL databases, Cosmos DB, MongoDB, InfluxDB, API-based and event-driven architectures, data migration programmes, data pipelines, orchestration, automation, CI/CD concepts, production-grade deployments, distributed systems, Docker, Kubernetes, data management and governance principles, data quality, metadata, lineage, master data management, data management software and tools, security, access control, compliance considerations, Bachelor’s or Master’s degree in Computer Science, Engineering, Mathematics, or a related field or equivalent practical experience, advanced analytics, AI / ML or GenAI, streaming platforms, Kafka, Azure Event Hubs, data governance or metadata tools, cloud, data, architecture certifications"},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_3fce9068-f55"},"title":"SAP Business Cutover Project Manager","description":"<p><strong>Senior SAP Business Cutover Project Manager</strong></p>\n<p>Lead the end-to-end business cutover process for an SAP S/4 global programme, ensuring smooth transition from legacy operations to new systems. This role focuses on business readiness, operational ramp down, and ramp up activities across supply chain, manufacturing, distribution, commercial and finance, minimizing disruption and safeguarding customer business activities.</p>\n<p><strong>Cutover Planning &amp; Governance</strong></p>\n<ul>\n<li>Develop and own the business cutover strategy and execution roadmap, integrating technical and business activities.</li>\n<li>Work with the business teams to develop detailed ramp down/ramp up plans for critical business processes for all sites (e.g., production scheduling, inventory, order fulfilment, finance).</li>\n<li>Ensure compliance with governance, methodologies, and change control processes.</li>\n</ul>\n<p><strong>Business Readiness</strong></p>\n<ul>\n<li>Coordinate readiness across the 5 key regions including all associated plants, warehouses, and distribution centers.</li>\n<li>Align cutover activities with seasonal demand cycles and logistics constraints.</li>\n</ul>\n<p><strong>Stakeholder Management</strong></p>\n<ul>\n<li>Engage business leaders and operational teams to validate readiness and dependencies.</li>\n<li>Facilitate go/no-go readiness reviews with leadership and PMO.</li>\n</ul>\n<p><strong>Risk &amp; Issue Management</strong></p>\n<ul>\n<li>Identify and mitigate risks related to downtime, data migration, and operational continuity.</li>\n<li>Define rollback scenarios and contingency plans.</li>\n</ul>\n<p><strong>Execution &amp; Reporting</strong></p>\n<ul>\n<li>Drive cutover execution during trial runs, dress rehearsal and cutover for go-live.</li>\n<li>Provide real-time dashboards and executive updates on readiness and progress.</li>\n</ul>\n<p><strong>Post-Go-Live Stabilization</strong></p>\n<ul>\n<li>Lead hypercare activities and ensure smooth handover to operations/support teams.</li>\n<li>Capture lessons learned for continuous improvement.</li>\n</ul>\n<p><strong>Define KPIs &amp; Success Metrics such as:</strong></p>\n<ul>\n<li>Operational Continuity</li>\n<li>Downtime Management</li>\n<li>Readiness Score</li>\n<li>Data Migration Accuracy</li>\n<li>Hypercare Resolution</li>\n</ul>\n<p><strong>Requirements</strong></p>\n<ul>\n<li>10+ years in SAP program delivery with proven experience in business cutover management for consumer goods and logistics.</li>\n<li>Expertise in ramp down/ramp up planning for large-scale ERP transformations (ECC and S/4HANA).</li>\n<li>Strong understanding of supply chain, manufacturing, distribution, commercial and finance processes.</li>\n<li>Familiarity with SAP modules.</li>\n<li>Experience in global rollouts and multi-country deployments.</li>\n<li>SAP or PMP certification preferred.</li>\n<li>Excellent communication and ability to influence C-Level executives.</li>\n<li>Ability to lead teams to prepare large proposals and program plans, facilitate leverage differentiators (e.g. specific consulting frameworks, etc.).</li>\n<li>Outstanding communication skills (verbal and written) and presentation skills, with the ability to influence C-Level stakeholders within client organizations.</li>\n<li>Strategic thinker with strong business orientation.</li>\n<li>Ability to manage complex dependencies and drive decisions.</li>\n<li>Skilled in balancing technical and operational priorities.</li>\n<li>Willingness to work shifts for cutover activities</li>\n<li>Project-related mobility/willingness to travel.</li>\n</ul>\n<p><strong>Benefits</strong></p>\n<p>Infosys Consulting offers industry-leading compensation and benefits, along with top training and development opportunities so that you can grow your career and achieve your personal ambitions.</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_3fce9068-f55","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Infosys Consulting - Europe","sameAs":"https://jobs.workable.com","logo":"https://logos.yubhub.co/view.com.png"},"x-apply-url":"https://jobs.workable.com/view/qWeyLMotNbitwJhiEqA87a/hybrid-sap-business-cutover-project-manager-in-london-at-infosys-consulting---europe","x-work-arrangement":"hybrid","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":null,"x-skills-required":["SAP program delivery","Business cutover management","Ramp down/ramp up planning","Supply chain","Manufacturing","Distribution","Commercial","Finance","SAP modules","Global rollouts","Multi-country deployments","SAP or PMP certification"],"x-skills-preferred":["Excellent communication","Influence C-Level executives","Lead teams","Prepare large proposals","Program plans","Facilitate leverage differentiators","Outstanding communication skills","Presentation skills","Strategic thinker","Business orientation","Manage complex dependencies","Drive decisions","Balancing technical and operational priorities"],"datePosted":"2026-03-09T16:46:54.816Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"London"}},"employmentType":"FULL_TIME","occupationalCategory":"Consulting","industry":"Management Consulting","skills":"SAP program delivery, Business cutover management, Ramp down/ramp up planning, Supply chain, Manufacturing, Distribution, Commercial, Finance, SAP modules, Global rollouts, Multi-country deployments, SAP or PMP certification, Excellent communication, Influence C-Level executives, Lead teams, Prepare large proposals, Program plans, Facilitate leverage differentiators, Outstanding communication skills, Presentation skills, Strategic thinker, Business orientation, Manage complex dependencies, Drive decisions, Balancing technical and operational priorities"},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_1a10d476-cae"},"title":"Test Engineer - Platform","description":"<p>We&#39;re looking for an intermediate to senior Test Engineer to join our Platform Team, with a key focus on our platform modernisation and stabilisation initiatives. You&#39;ll predominantly be involved in technical, backend testing, with an approximate 60% manual / 40% automation split. The work is almost entirely focused on backend libraries and services, with no front-end testing involved.</p>\n<p>This is an ideal role for an experienced tester with an interest in cloud platform solutions, who is looking to further develop their automation testing experience.</p>\n<p><strong>Responsibilities</strong></p>\n<ul>\n<li>A variety of backend testing with a focus on containers, deployments, APIs, authentication, access tokens etc...</li>\n<li>Functional, non-functional, regression and security testing</li>\n<li>Familiar with modern coding standards/practices and DevOps, CI/CD pipelines</li>\n<li>Strong knowledge of software QA methodologies, tools, and processes</li>\n<li>Experience with testing tools like JMeter, Postman, or other similar tools would be advantageous</li>\n<li>Exposure to C# / .Net would be beneficial</li>\n<li>Excellent troubleshooting and problem-solving skills, and a proven ability to write clear, concise, and comprehensive test plans and test cases</li>\n</ul>\n<p><strong>Benefits</strong></p>\n<ul>\n<li>Excellent work/life balance, including a 4 ½ day working week</li>\n<li>Hybrid working (home and office-based split, requiring regular weekly attendance in the Auckland office)</li>\n<li>Medical and Life insurance (after qualifying period)</li>\n<li>Volunteer day, enhanced paid parental leave and wellness benefits</li>\n<li>Strong mentoring &amp; career development focus</li>\n<li>Fun team events including the Vista Innovation Cup</li>\n</ul>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_1a10d476-cae","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Vista","sameAs":"https://apply.workable.com","logo":"https://logos.yubhub.co/j.com.png"},"x-apply-url":"https://apply.workable.com/j/94E2692BD4","x-work-arrangement":"hybrid","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":null,"x-skills-required":["backend testing","containers","deployments","APIs","authentication","access tokens","functional testing","non-functional testing","regression testing","security testing","DevOps","CI/CD pipelines","software QA methodologies","testing tools","JMeter","Postman"],"x-skills-preferred":["C#",".Net"],"datePosted":"2026-03-09T16:20:15.095Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Auckland"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"backend testing, containers, deployments, APIs, authentication, access tokens, functional testing, non-functional testing, regression testing, security testing, DevOps, CI/CD pipelines, software QA methodologies, testing tools, JMeter, Postman, C#, .Net"},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_d6224b25-b36"},"title":"Development Engineer - Optimal Audio","description":"<p>We are seeking an enthusiastic and motivated Development Engineer to join our OA Research and Development team to continue to drive forward the development of our audio processor platforms for installed commercial audio.</p>\n<p><strong>Responsibilities</strong></p>\n<ul>\n<li>Tracking through features from product management to software architecture, design and implementation on an embedded platform</li>\n<li>Developing features to support existing and new networked audio processing products</li>\n<li>Continuous improvement to maintain stability and performance of the product family</li>\n<li>Responsibility for bug tracking, software revision control and release process</li>\n<li>Interfacing, maintenance and adding to the control web app</li>\n<li>Selection and test of new support technologies such as embedded libraries to implement new features</li>\n<li>Implement new features in response to customer feedback and product support requests</li>\n<li>Develop new products on the platform and related technologies</li>\n<li>Maintain the platform for periodic feature releases</li>\n<li>Create a structured test framework and process</li>\n</ul>\n<p><strong>Job Scope</strong></p>\n<ul>\n<li>Develop the software to support the existing and derived hardware platforms from driver level up to application features</li>\n<li>Become proficient with a good working knowledge of the underlying hardware to be able to create appropriate subsystems within the constraints of the platform</li>\n</ul>\n<p><strong>Skills and Experience Required</strong></p>\n<ul>\n<li>Degree qualified in software engineering or related engineering subject</li>\n<li>Be strong in embedded development/debugging and problem solving</li>\n<li>Have good knowledge of embedded C and C++, experience with using a real-time operating system, and knowledge of common embedded processor peripherals</li>\n<li>Have a good detailed knowledge of Embedded TCP/IP networking</li>\n</ul>\n<p><strong>Benefits</strong></p>\n<ul>\n<li>Life Assurance</li>\n<li>Income Protection</li>\n<li>Pension</li>\n<li>Bike2work scheme</li>\n<li>25 days holiday (increasing with service)</li>\n<li>Medical Cash Plan and Private Healthcare options</li>\n<li>Substantial staff discount on company products</li>\n<li>Participation to company bonus scheme</li>\n<li>Opportunity to join our EV scheme</li>\n</ul>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_d6224b25-b36","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Focusrite PLC","sameAs":"https://apply.workable.com","logo":"https://logos.yubhub.co/j.com.png"},"x-apply-url":"https://apply.workable.com/j/02E60B02AC","x-work-arrangement":"onsite","x-experience-level":"mid","x-job-type":"full-time","x-salary-range":"Negotiable + benefits","x-skills-required":["embedded development","debugging","problem solving","embedded C","C++","real-time operating system","Embedded TCP/IP networking"],"x-skills-preferred":["ARM Cortex processors","creating build pipelines and cloud deployments","digital signal processing","networked audio transports such as Dante","professional audio","modern web technologies","TypeScript","React"],"datePosted":"2026-03-09T16:12:31.697Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"High Wycombe"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"embedded development, debugging, problem solving, embedded C, C++, real-time operating system, Embedded TCP/IP networking, ARM Cortex processors, creating build pipelines and cloud deployments, digital signal processing, networked audio transports such as Dante, professional audio, modern web technologies, TypeScript, React"},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_e9d432ac-fb7"},"title":"Implementation Engineer","description":"<p>We are seeking an Implementation Engineer to join our team in Pune. As an Implementation Engineer, you will be responsible for developing Infrastructure-as-Code using Terraform, CDK, or Pulumi, and developing CI/CD pipelines for Cloud Deployments. You will also be responsible for developing custom automation using Lambda Functions or Azure functions. Your tasks will include managing code repositories, performing peer code reviews, and maintaining code hygiene. You will also be responsible for mentoring and training junior engineers in your pod. You will work side-by-side with customers to design, diagram, and document complex integrations, and then build and deploy these designs. You will also establish partnerships and strategic relationships with contacts at our biggest brands. Time management is critical, and you should be able to manage multiple tasks and projects simultaneously. You will also be responsible for analysing and auditing existing Helpshift implementations to make improvements. You will become an expert at using Helpshift&#39;s administrative tools, which include a suite of AI products, bots, and other mission-critical support functions. You will work collaboratively with Account Managers, Customer Success Managers, and Sales teams to ensure customers&#39; overall success with the product. You will continually optimise the overall development process with improvements to documentation, trainings, and other customer-facing content.</p>\n<p>Responsibilities:</p>\n<ul>\n<li>Develop Infrastructure-as-Code using Terraform, CDK, or Pulumi</li>\n<li>Develop CI/CD pipelines for Cloud Deployments</li>\n<li>Develop custom automation using Lambda Functions or Azure functions</li>\n<li>Manage code repositories and perform peer code reviews</li>\n<li>Maintain code hygiene and write test cases for solutions</li>\n<li>Mentor and train junior engineers in your pod</li>\n<li>Work side-by-side with customers to design, diagram, and document complex integrations</li>\n<li>Establish partnerships and strategic relationships with contacts at our biggest brands</li>\n<li>Analyse and audit existing Helpshift implementations to make improvements</li>\n<li>Become an expert at using Helpshift&#39;s administrative tools</li>\n<li>Work collaboratively with Account Managers, Customer Success Managers, and Sales teams</li>\n<li>Continually optimise the overall development process</li>\n</ul>\n<p>Requirements:</p>\n<ul>\n<li>3 years of SaaS experience in a specialization such as consulting services, technical pre-sales, or solution engineering</li>\n<li>Proven experience translating ambiguous customer requirements into actionable technical solutions</li>\n<li>Proficiency with Python, Go, C#, Node.js, or Powershell</li>\n<li>Proficiency with deploying cloud solutions on AWS or Azure</li>\n<li>Familiarity with technical SaaS concepts such as SDKs, APIs, and cloud computing</li>\n<li>Exceptional organisational skills and a project management mindset</li>\n<li>Understanding of Object-Oriented Programming concepts</li>\n<li>Excellent communication skills and ability to lead meetings with customer executives and analysts</li>\n<li>Proficiency in G-Suite and ability to perform data analysis tasks</li>\n<li>Curiosity about complex systems and natural problem-solving skills</li>\n</ul>\n<p>Benefits:</p>\n<ul>\n<li>Hybrid setup</li>\n<li>Worker&#39;s insurance</li>\n<li>Paid Time Offs</li>\n<li>Other employee benefits to be discussed by our Talent Acquisition team in Pune</li>\n</ul>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_e9d432ac-fb7","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Helpshift","sameAs":"https://apply.workable.com","logo":"https://logos.yubhub.co/j.com.png"},"x-apply-url":"https://apply.workable.com/j/450F76EA64","x-work-arrangement":"hybrid","x-experience-level":"mid","x-job-type":"full-time","x-salary-range":null,"x-skills-required":["Infrastructure-as-Code","Terraform","CDK","Pulumi","CI/CD pipelines","Cloud Deployments","custom automation","Lambda Functions","Azure functions","code repositories","peer code reviews","code hygiene","test cases","mentoring","training","junior engineers","customer success","account management","sales"],"x-skills-preferred":["Python","Go","C#","Node.js","Powershell","AWS","Azure","SDKs","APIs","cloud computing","G-Suite","data analysis"],"datePosted":"2026-03-09T10:56:00.023Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Pune"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"Infrastructure-as-Code, Terraform, CDK, Pulumi, CI/CD pipelines, Cloud Deployments, custom automation, Lambda Functions, Azure functions, code repositories, peer code reviews, code hygiene, test cases, mentoring, training, junior engineers, customer success, account management, sales, Python, Go, C#, Node.js, Powershell, AWS, Azure, SDKs, APIs, cloud computing, G-Suite, data analysis"},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_62ce45e3-77a"},"title":"Implementation Specialist","description":"<p>We are looking for an ambitious, experienced, driven &amp; solution orientated individual to join our Implementations team. As an Implementation Specialist, you will be responsible for implementing Helpshift&#39;s AI-first customer service platform for our clients. This includes understanding no code automation concepts and CI/CD pipelines for Cloud Deployments, developing no code automation / Logic Apps (Lambda Functions/ Azure functions), and managing code repositories and performing peer code reviews.</p>\n<p>Responsibilities:</p>\n<ul>\n<li>Understand no code automation concepts and CI/CD pipelines for Cloud Deployments</li>\n<li>Develop no code automation / Logic Apps (Lambda Functions/ Azure functions)</li>\n<li>Understand programming languages such as Python, JavaScript, bash, powershell, C#</li>\n<li>Managing code repositories and performing peer code reviews</li>\n<li>Ability to maintain the code hygiene and writing test cases for the solutions</li>\n<li>Assist with proof of concepts for strategic and enterprise customers</li>\n<li>Work side-by-side with our technical account managers to design, build, and deploy complex integrations for our customers</li>\n<li>Establish partnerships and strategic relationships with contacts at our biggest brands</li>\n<li>Time management is critical and should be able to manage multiple tasks/projects simultaneously</li>\n<li>Analyze and audit existing Helpshift implementations for the purpose of making improvements</li>\n<li>Become an expert at using Helpshift’s administrative tools which include a suite of AI products, bots, and other mission-critical support functions</li>\n<li>Work collaboratively with Account Managers, Technical Account Managers, Customer Success Managers, and Sales to ensure customers’ overall success with the product</li>\n<li>Collaborate with project managers to ensure sprint planning is completed on time and project deliverables are trackable in JIRA</li>\n<li>Continually optimize the overall implementation processes with improvements to documentation, trainings, demos and other customer-facing content</li>\n</ul>\n<p>Requirements:</p>\n<ul>\n<li>You have a minimum of 4 years of SaaS experience in a specialization such as consulting services, technical pre-sales or technical project management</li>\n<li>You are proficient with any of: Python, Go, C#, Node.js, Powershell</li>\n<li>You are proficient with deploying cloud solutions on AWS or Azure (preferred)</li>\n<li>You are familiar with technical SaaS concepts such as SDKs, APIs and cloud computing</li>\n<li>You are exceptionally organized and a project manager at heart</li>\n<li>You value performing tasks as efficiently as possible and you dislike seeing mistakes repeated</li>\n<li>You have excellent communication skills and are comfortable leading meetings with customer executives and analysts alike</li>\n<li>You are proficient in the entire G-Suite and can perform data analysis tasks including generating pivot tables and writing complex formulas in Sheets/Excel</li>\n<li>You are curious about complex systems and a natural problem solver</li>\n<li>Bonus points for previous experience in a support role or call centre\nBenefits:</li>\n<li>Hybrid setup</li>\n<li>Worker&#39;s insurance</li>\n<li>Paid Time Offs</li>\n<li>Other employee benefits to be discussed by our Talent Acquisition team in Pune\nHelpshift embraces diversity. We are proud to be an equal opportunity workplace and do not discriminate on the basis of sex, race, color, age, sexual orientation, gender identity, religion, national origin, citizenship, marital status, veteran status, or disability status</li>\n</ul>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_62ce45e3-77a","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Keywords Studios","sameAs":"https://apply.workable.com","logo":"https://logos.yubhub.co/j.com.png"},"x-apply-url":"https://apply.workable.com/j/3DC7F11853","x-work-arrangement":"hybrid","x-experience-level":"mid","x-job-type":"full-time","x-salary-range":null,"x-skills-required":["no code automation","CI/CD pipelines","Cloud Deployments","Python","JavaScript","bash","powershell","C#","code repositories","peer code reviews","test cases","proof of concepts","complex integrations","AI products","bots","mission-critical support functions","sprint planning","JIRA","documentation","trainings","demos","customer-facing content"],"x-skills-preferred":["AWS","Azure","SDKs","APIs","cloud computing","G-Suite","data analysis","pivot tables","complex formulas"],"datePosted":"2026-03-09T10:53:29.023Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Pune"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"no code automation, CI/CD pipelines, Cloud Deployments, Python, JavaScript, bash, powershell, C#, code repositories, peer code reviews, test cases, proof of concepts, complex integrations, AI products, bots, mission-critical support functions, sprint planning, JIRA, documentation, trainings, demos, customer-facing content, AWS, Azure, SDKs, APIs, cloud computing, G-Suite, data analysis, pivot tables, complex formulas"},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_7b2b97d5-0a1"},"title":"Software Engineer, Inference Deployment","description":"<p><strong>About Anthropic</strong></p>\n<p>Anthropic&#39;s mission is to create reliable, interpretable, and steerable AI systems. We want AI to be safe and beneficial for our users and for society as a whole. Our team is a quickly growing group of committed researchers, engineers, policy experts, and business leaders working together to build beneficial AI systems.</p>\n<p><strong>About the Role</strong></p>\n<p>Our mandate is to make inference deployment boring and unattended.</p>\n<p>Anthropic serves Claude to millions of users across GPUs, TPUs, and Trainium — and every model update must reach production safely, quickly, and without disrupting service. We&#39;re building the systems that make inference deployment continuous and unattended.</p>\n<p>As a Software Engineer on the Launch Engineering team, you&#39;ll design and build the deployment infrastructure that moves inference code from merge to production. This is a resource-constrained optimization problem at its core: validation and deployment consume the same accelerator chips that serve customer traffic — your deploys compete with live user requests for the same hardware. Every model brings different fleet sizes, startup times, and correctness requirements, so the system must adapt continuously. You&#39;ll build systems that navigate these constraints — orchestrating validation, scheduling deployments intelligently, and driving down cycle time from merge to production.</p>\n<p>If you&#39;ve built deployment systems at scale and gravitate toward the hardest problems at the intersection of automation and resource management, this team will give you an outsized scope to work on them.</p>\n<p><strong>Responsibilities</strong></p>\n<ul>\n<li><strong>Own deployment orchestration</strong> that continuously moves validated inference builds into production across GPU, TPU, and Trainium fleets, unattended under normal conditions</li>\n<li><strong>Improve capacity-aware deployment scheduling</strong> to maximize deployment throughput against constrained accelerator budgets and variable fleet sizes</li>\n<li><strong>Extend deployment observability</strong> — dashboards and tooling that answer &quot;what code is running in production,&quot; &quot;where is my commit,&quot; and &quot;what validation passed for this deploy&quot;</li>\n<li><strong>Drive down cycle time</strong> from code merge to production with pipeline architectures that minimize serial dependencies and maximize parallelism</li>\n<li><strong>Optimize fleet rollout strategies</strong> for large-scale deployments across thousands of GPU, TPU, and Trainium chips, minimizing disruption to serving capacity</li>\n<li><strong>Evolve self-service model onboarding</strong> so that new models can be added to the continuous deployment pipeline without Launch Engineering involvement</li>\n<li><strong>Partner across the Inference organization</strong> with teams owning validation, autoscaling, and model routing to integrate deployment automation with their systems</li>\n</ul>\n<p><strong>You May Be a Good Fit If You Have</strong></p>\n<ul>\n<li>5+ years of experience building deployment, release, or delivery infrastructure at scale</li>\n<li>Strong software engineering skills with experience designing systems that manage complex state machines and multi-stage pipelines</li>\n<li>Experience with deployment systems where resource constraints shape the design — whether that&#39;s fleet capacity, network bandwidth, hardware availability, or coordinated rollout windows</li>\n<li>A track record of building automation that measurably improves deployment velocity and reliability</li>\n<li>Proficiency with Kubernetes-based deployments, rolling update mechanics, and container orchestration</li>\n<li>Comfort working across the stack — from backend services and databases to CLI tools and web UIs</li>\n<li>Strong communication skills and the ability to work closely with oncall engineers, model teams, and infrastructure partners</li>\n</ul>\n<p><strong>Strong Candidates May Also Have</strong></p>\n<ul>\n<li>Experience with ML inference or training infrastructure deployment, particularly across multiple accelerator types (GPU, TPU, Trainium)</li>\n<li>Background in capacity planning or resource-constrained scheduling (e.g., bin-packing, fleet management, job scheduling with hardware affinity)</li>\n<li>Experience with progressive delivery in systems with long validation cycles: canary/soak testing, blue-green deployments, traffic shifting, automated rollback</li>\n<li>Experience at companies with large-scale release engineering challenges (mobile release trains, monorepo deployments, multi-datacenter rollouts)</li>\n<li>Experience with Python and/or Rust in production systems</li>\n</ul>\n<p><strong>Logistics</strong></p>\n<p><strong>Education requirements:</strong> We require at least a Bachelor&#39;s degree in a related field or equivalent experience. <strong>Location-based hybrid policy:</strong> Currently, we expect all staff to be in one of our offices at least 25% of the time. However, some roles may require more time in our offices.</p>\n<p><strong>Visa sponsorship:</strong> We do sponsor visas! However, we aren&#39;t able to successfully sponsor visas for every role and every candidate. But if we make you an offer, we will make every reasonable effort to get you a visa, and we retain an immigration lawyer to help with this.</p>\n<p><strong>We encourage you to apply even if you do not believe you meet every single qualification.</strong> Not all strong candidates will meet every single qualification as listed. Research shows that people who identify as being from underrepresented groups are more prone to experiencing imposter syndrome and doubting the strength of their candidacy, so we urge you not to exclude yourself prematurely and to submit an application if you&#39;re interested in this work.</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_7b2b97d5-0a1","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Anthropic","sameAs":"https://job-boards.greenhouse.io","logo":"https://logos.yubhub.co/anthropic.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/anthropic/jobs/5111745008","x-work-arrangement":"hybrid","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":"$320,000 - $485,000USD","x-skills-required":["deployment","release","delivery","infrastructure","Kubernetes","container","orchestration","pipelines","state machines","multi-stage","pipelines","parallelism","optimization","resource management","automation","velocity","reliability","communication","collaboration","oncall","model teams","infrastructure partners"],"x-skills-preferred":["ML inference","training infrastructure","capacity planning","resource-constrained scheduling","bin-packing","fleet management","job scheduling","hardware affinity","progressive delivery","canary/soak testing","blue-green deployments","traffic shifting","automated rollback","mobile release trains","monorepo deployments","multi-datacenter rollouts","Python","Rust"],"datePosted":"2026-03-08T13:54:19.012Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"San Francisco, CA | New York City, NY | Seattle, WA"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"deployment, release, delivery, infrastructure, Kubernetes, container, orchestration, pipelines, state machines, multi-stage, pipelines, parallelism, optimization, resource management, automation, velocity, reliability, communication, collaboration, oncall, model teams, infrastructure partners, ML inference, training infrastructure, capacity planning, resource-constrained scheduling, bin-packing, fleet management, job scheduling, hardware affinity, progressive delivery, canary/soak testing, blue-green deployments, traffic shifting, automated rollback, mobile release trains, monorepo deployments, multi-datacenter rollouts, Python, Rust","baseSalary":{"@type":"MonetaryAmount","currency":"USD","value":{"@type":"QuantitativeValue","minValue":320000,"maxValue":485000,"unitText":"YEAR"}}},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_25934fbc-c50"},"title":"Staff / Senior Software Engineer, Cloud Inference","description":"<p><strong>About the Role</strong></p>\n<p>The Cloud Inference team scales and optimizes Claude to serve the massive audiences of developers and enterprise companies across AWS, GCP, Azure, and future cloud service providers (CSPs). We own the end-to-end product of Claude on each cloud platform—from API integration and intelligent request routing to inference execution, capacity management, and day-to-day operations.</p>\n<p>Our engineers are extremely high leverage: we simultaneously drive multiple major revenue streams while optimizing one of Anthropic&#39;s most precious resources—compute. As we expand to more cloud platforms, the complexity of managing inference efficiently across providers with different hardware, networking stacks, and operational models grows significantly. We need engineers who can navigate these platform differences, build robust abstractions that work across providers, and make smart infrastructure decisions that keep us cost-effective at massive scale.</p>\n<p>Your work will increase the scale at which our services operate, accelerate our ability to reliably launch new frontier models and innovative features to customers across all platforms, and ensure our LLMs meet rigorous safety, performance, and security standards.</p>\n<p><strong>What You&#39;ll Do</strong></p>\n<ul>\n<li>Design and build infrastructure that serves Claude across multiple CSPs, accounting for differences in compute hardware, networking, APIs, and operational models</li>\n<li>Collaborate with CSP partner engineering teams to resolve operational issues, influence provider roadmaps, and stand up end-to-end serving on new cloud platforms</li>\n<li>Design and evolve CI/CD automation systems, including validation and deployment pipelines, that reliably ship new model versions to millions of users across cloud platforms without regressions</li>\n<li>Design interfaces and tooling abstractions across CSPs that enable cost-effective inference management, scale across providers, and reduce per-platform complexity</li>\n<li>Contribute to capacity planning and autoscaling strategies that dynamically match supply with demand across CSP validation and production workloads</li>\n<li>Optimize inference cost and performance across providers—designing workload placement and routing systems that direct requests to the most cost-effective accelerator and region</li>\n<li>Contribute to inference features that must work consistently across all platforms</li>\n<li>Analyze observability data across providers to identify performance bottlenecks, cost anomalies, and regressions, and drive remediation based on real-world production workloads</li>\n</ul>\n<p><strong>You May Be a Good Fit If You:</strong></p>\n<ul>\n<li>Have significant software engineering experience, with a strong background in high-performance, large-scale distributed systems serving millions of users</li>\n<li>Have experience building or operating services on at least one major cloud platform (AWS, GCP, or Azure), with exposure to Kubernetes, Infrastructure as Code or container orchestration</li>\n<li>Have strong interest in inference</li>\n<li>Thrive in cross-functional collaboration with both internal teams and external partners</li>\n<li>Are a fast learner who can quickly ramp up on new technologies, hardware platforms, and provider ecosystems</li>\n<li>Are highly autonomous and self-driven, taking ownership of problems end-to-end with a bias toward flexibility and high-impact work</li>\n<li>Pick up slack, even when it goes outside your job description</li>\n</ul>\n<p><strong>Strong Candidates May Also Have Experience With</strong></p>\n<ul>\n<li>Direct experience working with CSP partner teams to scale infrastructure or products across multiple platforms, navigating differences in networking, security, privacy, billing, and managed service offerings</li>\n<li>A background in building platform-agnostic tooling or abstraction layers that work across cloud providers</li>\n<li>Hands-on experience with capacity management, cost optimization, or resource planning at scale across heterogeneous environments</li>\n<li>Strong familiarity with LLM inference optimization, batching, caching, and serving strategies</li>\n<li>Experience with Machine learning infrastructure including GPUs, TPUs, Trainium, or other AI accelerators</li>\n<li>Background designing and building CI/CD systems that automate deployment and validation across cloud environments</li>\n<li>Solid understanding of multi-region deployments, geographic routing, and global traffic management</li>\n<li>Proficiency in Python or Rust</li>\n</ul>\n<p><strong>Logistics</strong></p>\n<p><strong>Education requirements:</strong> We require at least a Bachelor&#39;s degree in a related field or equivalent experience. <strong>Location-based hybrid policy:</strong> Currently, we expect all staff to be in one of our offices at least 25% of the time. However, some roles may require more time in our offices.</p>\n<p><strong>Visa sponsorship:</strong> We do sponsor visas! However, we aren&#39;t able to successfully sponsor visas for every role and every candidate. But if we make you an offer, we will make every reasonable effort to get you a visa, and we retain an immigration lawyer to help with this.</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_25934fbc-c50","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Anthropic","sameAs":"https://www.anthropic.com","logo":"https://logos.yubhub.co/anthropic.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/anthropic/jobs/5107466008","x-work-arrangement":"hybrid","x-experience-level":"staff","x-job-type":"full-time","x-salary-range":"$300,000 - $485,000 USD","x-skills-required":["Software engineering","Cloud infrastructure","Kubernetes","Infrastructure as Code","Container orchestration","LLM inference optimization","Batching","Caching","Serving strategies","Machine learning infrastructure","GPUs","TPUs","Trainium","AI accelerators","CI/CD systems","Deployment and validation","Cloud environments","Multi-region deployments","Geographic routing","Global traffic management"],"x-skills-preferred":["Python","Rust","Cloud platforms","Networking","Security","Privacy","Billing","Managed service offerings","Platform-agnostic tooling","Abstraction layers","Capacity management","Cost optimization","Resource planning"],"datePosted":"2026-03-08T13:49:59.956Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"San Francisco, CA | Seattle, WA"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"Software engineering, Cloud infrastructure, Kubernetes, Infrastructure as Code, Container orchestration, LLM inference optimization, Batching, Caching, Serving strategies, Machine learning infrastructure, GPUs, TPUs, Trainium, AI accelerators, CI/CD systems, Deployment and validation, Cloud environments, Multi-region deployments, Geographic routing, Global traffic management, Python, Rust, Cloud platforms, Networking, Security, Privacy, Billing, Managed service offerings, Platform-agnostic tooling, Abstraction layers, Capacity management, Cost optimization, Resource planning","baseSalary":{"@type":"MonetaryAmount","currency":"USD","value":{"@type":"QuantitativeValue","minValue":300000,"maxValue":485000,"unitText":"YEAR"}}},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_f95fe525-8fd"},"title":"Staff Software Engineer, Inference","description":"<p><strong>About the role</strong></p>\n<p>Our Inference team is responsible for building and maintaining the critical systems that serve Claude to millions of users worldwide. We bring Claude to life by serving our models via the industry&#39;s largest compute-agnostic inference deployments. We are responsible for the entire stack from intelligent request routing to fleet-wide orchestration across diverse AI accelerators. The team has a dual mandate: maximizing compute efficiency to serve our explosive customer growth, while enabling breakthrough research by giving our scientists the high-performance inference infrastructure they need to develop next-generation models. We tackle complex, distributed systems challenges across multiple accelerator families and emerging AI hardware running in multiple cloud platforms.</p>\n<p><strong>As a Staff Software Engineer on our Inference team, you will work end to end, identifying and addressing key infrastructure blockers to serve Claude to millions of users while enabling breakthrough AI research. Strong candidates should have familiarity with performance optimization, distributed systems, large-scale service orchestration, and intelligent request routing. Familiarity with LLM inference optimization, batching strategies, and multi-accelerator deployments is highly encouraged but not strictly necessary.</strong></p>\n<p><strong>Strong candidates may also have experience with</strong></p>\n<ul>\n<li>High-performance, large-scale distributed systems</li>\n<li>Implementing and deploying machine learning systems at scale</li>\n<li>Load balancing, request routing, or traffic management systems</li>\n<li>LLM inference optimization, batching, and caching strategies</li>\n<li>Kubernetes and cloud infrastructure (AWS, GCP)</li>\n<li>Python or Rust</li>\n</ul>\n<p><strong>You may be a good fit if you</strong></p>\n<ul>\n<li>Have significant software engineering experience, particularly with distributed systems</li>\n<li>Are results-oriented, with a bias towards flexibility and impact</li>\n<li>Pick up slack, even if it goes outside your job description</li>\n<li>Want to learn more about machine learning systems and infrastructure</li>\n<li>Thrive in environments where technical excellence directly drives both business results and research breakthroughs</li>\n<li>Care about the societal impacts of your work</li>\n</ul>\n<p><strong>Representative projects across the org</strong></p>\n<ul>\n<li>Designing intelligent routing algorithms that optimize request distribution across thousands of accelerators</li>\n<li>Autoscaling our compute fleet to dynamically match supply with demand across production, research, and experimental workloads</li>\n<li>Building production-grade deployment pipelines for releasing new models to millions of users</li>\n<li>Integrating new AI accelerator platforms to maintain our hardware-agnostic competitive advantage</li>\n<li>Contributing to new inference features (e.g., structured sampling, prompt caching)</li>\n<li>Supporting inference for new model architectures</li>\n<li>Analyzing observability data to tune performance based on real-world production workloads</li>\n<li>Managing multi-region deployments and geographic routing for global customers</li>\n</ul>\n<p><strong>Deadline to apply: None. Applications will be reviewed on a rolling basis.</strong></p>\n<p><strong>Logistics</strong></p>\n<ul>\n<li>Education requirements: We require at least a Bachelor&#39;s degree in a related field or equivalent experience.</li>\n<li>Location-based hybrid policy: Currently, we expect all staff to be in one of our offices at least 25% of the time. However, some roles may require more time in our offices.</li>\n<li>Visa sponsorship: We do sponsor visas! However, we aren&#39;t able to successfully sponsor visas for every role and every candidate. But if we make you an offer, we will make every reasonable effort to get you a visa, and we retain an immigration lawyer to help with this.</li>\n</ul>\n<p><strong>We encourage you to apply even if you do not believe you meet every single qualification. Not all strong candidates will meet every single qualification as listed. Research shows that people who identify as being from underrepresented groups are more prone to experiencing imposter syndrome and doubting the strength of their candidacy, so we urge you not to exclude yourself prematurely and to submit an application if you&#39;re interested in this work.</strong></p>\n<p><strong>Your safety matters to us. To protect yourself from potential scams, remember that Anthropic recruiters only contact you from @anthropic.com email addresses. In some cases, we may partner with vetted recruiting agencies who will identify themselves as working on behalf of Anthropic. Be cautious of emails from other domains. Legitimate Anthropic recruiters will never ask for money, fees, or banking information before your first day. If you&#39;re ever unsure about a communication, don&#39;t click any links—visit anthropic.com/careers directly for confirmed position openings.</strong></p>\n<p><strong>How we&#39;re different</strong></p>\n<p>We believe that the highest-impact AI research will be big science. At Anthropic we work as a single cohesive team on just a few large-scale research efforts. And we value impact — advancing our long-term goals of steerable, trustworthy AI — rather than work on smaller and more specific puzzles. We view</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_f95fe525-8fd","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Anthropic","sameAs":"https://job-boards.greenhouse.io","logo":"https://logos.yubhub.co/anthropic.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/anthropic/jobs/5097742008","x-work-arrangement":"hybrid","x-experience-level":"staff","x-job-type":"full-time","x-salary-range":"£325,000 - £390,000GBP","x-skills-required":["performance optimization","distributed systems","large-scale service orchestration","intelligent request routing","LLM inference optimization","batching strategies","multi-accelerator deployments","Kubernetes","cloud infrastructure","Python","Rust"],"x-skills-preferred":["high-performance, large-scale distributed systems","implementing and deploying machine learning systems at scale","load balancing, request routing, or traffic management systems","caching strategies"],"datePosted":"2026-03-08T13:49:42.673Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"London, UK"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"performance optimization, distributed systems, large-scale service orchestration, intelligent request routing, LLM inference optimization, batching strategies, multi-accelerator deployments, Kubernetes, cloud infrastructure, Python, Rust, high-performance, large-scale distributed systems, implementing and deploying machine learning systems at scale, load balancing, request routing, or traffic management systems, caching strategies","baseSalary":{"@type":"MonetaryAmount","currency":"GBP","value":{"@type":"QuantitativeValue","minValue":325000,"maxValue":390000,"unitText":"YEAR"}}},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_77529f19-0dc"},"title":"Manager, Solutions Architect","description":"<p><strong>About the role</strong></p>\n<p>As the founding leader of Applied AI Solutions Architecture in Korea, you will drive the adoption of frontier AI by enabling the deployment of Anthropic&#39;s products (Claude for Enterprise, Claude Code, and API) across Korean enterprises and digital-first organisations. You&#39;ll leverage your technical skills and consultative sales experience to drive positive AI transformation that addresses our customers&#39; business needs, meets their technical requirements, and provides a high degree of reliability and safety.</p>\n<p><strong>Responsibilities:</strong></p>\n<ul>\n<li>Build and manage the foundational team of Applied AI professionals in Seoul (Solutions Architects and Product Engineers) providing both technical guidance and career development</li>\n</ul>\n<ul>\n<li>Set goals and reviews for your team, promoting growth and output</li>\n</ul>\n<ul>\n<li>Work with a handful of highest-value enterprise customers on their overall AI adoption strategies, focusing on pre-sales technical excellence including use case scoping, technical champion building, and POC execution</li>\n</ul>\n<ul>\n<li>Partner closely with your aligned GTM leadership to understand customer requirements &amp; co-build GTM strategies to drive adoption for Korean enterprise customers</li>\n</ul>\n<ul>\n<li>Contribute to thought leadership through conference presentations, webinars, and technical content creation</li>\n</ul>\n<ul>\n<li>Own the technical portions of pre-sales engagements, ensuring your team provides compelling demos and validates enterprise customer ROI from Anthropic products</li>\n</ul>\n<ul>\n<li>Drive collaboration from cross-functional teams to influence and unify stakeholders at all levels of the organisation to drive business outcomes</li>\n</ul>\n<ul>\n<li>Travel regularly to customer sites for executive-level sessions, technical workshops, and building relationships</li>\n</ul>\n<ul>\n<li>Establish a shared vision for creating solutions that enable beneficial and safe AI in technology products</li>\n</ul>\n<ul>\n<li>Lead the vision, strategy, and execution of innovative solutions that leverage our latest models&#39; capabilities</li>\n</ul>\n<ul>\n<li>Stay current with emerging AI/ML trends and competitive landscape in the Korean enterprise tech sector</li>\n</ul>\n<p><strong>You may be a good fit if you have:</strong></p>\n<ul>\n<li>7+ years of experience as a Solutions Architect, Sales Engineer, or similar pre-sales technical role</li>\n</ul>\n<ul>\n<li>3+ years of technical go-to-market management experience, specifically managing pre-sales teams</li>\n</ul>\n<ul>\n<li>Native or business-level fluency in Korean and professional proficiency in English</li>\n</ul>\n<ul>\n<li>Experience working with Korean enterprise customers and understanding local business culture and decision-making processes</li>\n</ul>\n<ul>\n<li>Experience with the unique technical requirements and technical procurement process of enterprise tech companies</li>\n</ul>\n<ul>\n<li>Deep technical proficiency with enterprise AI deployments, API integrations, and production LLM use cases</li>\n</ul>\n<ul>\n<li>Have an organisational mindset and enjoy building foundational teams in a relatively unstructured environment</li>\n</ul>\n<ul>\n<li>Have excellent communication, collaboration, and coaching abilities</li>\n</ul>\n<ul>\n<li>Are comfortable dealing with highly uncertain, ambiguous, and fast-moving environments typical of the tech industry</li>\n</ul>\n<ul>\n<li>Strong executive presence and ability to foster deep relationships with technical leaders and engineering teams</li>\n</ul>\n<ul>\n<li>Have at least a high level familiarity with the architecture and operation of large language models and/or ML in general</li>\n</ul>\n<ul>\n<li>Experience with prompt engineering, LLM evaluation, and architecting AI-powered systems</li>\n</ul>\n<ul>\n<li>Make ambiguous problems clear and identify core principles that can translate across scenarios</li>\n</ul>\n<ul>\n<li>Have a passion for making powerful technology safe and societally beneficial</li>\n</ul>\n<ul>\n<li>Think creatively about the risks and benefits of new technologies, and think beyond past checklists and playbooks</li>\n</ul>\n<ul>\n<li>Stay up-to-date and informed by taking an active interest in emerging research and industry trends</li>\n</ul>\n<ul>\n<li>Understanding of developer tooling, SDKs, and technical integration patterns common in enterprise tech companies</li>\n</ul>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_77529f19-0dc","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Anthropic","sameAs":"https://www.anthropic.com/jobs","logo":"https://logos.yubhub.co/anthropic.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/anthropic/jobs/5064817008","x-work-arrangement":"hybrid","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":null,"x-skills-required":["Solutions Architect","Sales Engineer","Pre-sales technical role","Technical go-to-market management","Enterprise AI deployments","API integrations","Production LLM use cases","Large language models","ML in general","Prompt engineering","LLM evaluation","Architecting AI-powered systems"],"x-skills-preferred":["Korean","English","Enterprise tech companies","Developer tooling","SDKs","Technical integration patterns"],"datePosted":"2026-03-08T13:45:48.461Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Seoul, South Korea"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"Solutions Architect, Sales Engineer, Pre-sales technical role, Technical go-to-market management, Enterprise AI deployments, API integrations, Production LLM use cases, Large language models, ML in general, Prompt engineering, LLM evaluation, Architecting AI-powered systems, Korean, English, Enterprise tech companies, Developer tooling, SDKs, Technical integration patterns"},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_519653d2-8cc"},"title":"Manager of Solutions Architecture, Applied AI (Industries)","description":"<p>As the manager of the Industries Solutions Architect team within Applied AI at Anthropic, you will drive the adoption of frontier AI in partnership with the rest of the go to market organisation. Our Industries customers include Fortune 500 companies within verticals like financial services, healthcare life sciences, and retail.</p>\n<p>You will be responsible for leading and growing the pre-sales team that partners with account executives to help those companies understand and deploy Anthropic&#39;s products, including Claude for Enterprise, Claude Code, and the API. This will include leveraging your technical skills and consultative sales experience to hire great people, establish processes for the team to scale, and represent Anthropic directly at strategic customer engagements.</p>\n<p>You will hire, manage, and guide a team of pre-sales Solutions Architects by providing both technical guidance and career development. You will set goals for your team in collaboration with sales and other parts of the organisation that establish baseline expectations for performance.</p>\n<p>You will act as a technical sponsor for high-value strategic customers and advise them on their overall AI adoption strategies or use case scoping and POC execution. You will partner closely with Industries sales leadership to identify new strategies to drive adoption of Anthropic products within specific verticals or horizontal use cases.</p>\n<p>You will work with cross-functional teams like product and engineering to ensure Anthropic prioritises customer feedback or resolves blockers to adoption. You will travel to customer sites or conferences for executive-level sessions, technical workshops, and relationship building.</p>\n<p>You will establish a shared vision for creating solutions that enable beneficial and safe AI in technology products. You will contribute to thought leadership through conference presentations, webinars, and technical content creation. You will stay current with emerging AI/ML trends and the competitive landscape.</p>\n<p>You may be a good fit if you have 7+ years of experience as a Solutions Architect, Sales Engineer, or similar pre-sales technical role. You will have 3+ years of technical pre-sales management experience. You will have sold complex technical products to Fortune 500 companies, especially in verticals like financial services, healthcare life sciences, and retail.</p>\n<p>You will have deep technical proficiency with enterprise AI use cases, API integrations, and LLM deployments. You will thrive in building and rapidly scaling teams and processes within ambiguous and fast-moving environments. You will have excellent communication, collaboration, and coaching abilities.</p>\n<p>You will have strong executive presence and ability to foster deep relationships with technical leaders and engineering teams at Fortune 500 companies. You will have at least a high level familiarity with the architecture and operation of LLMs. You will have a passion for making powerful technology safe and societally beneficial.</p>\n<p>You will stay up-to-date and informed by taking an active interest in emerging research and industry trends within AI.</p>\n<p>Strong candidates may have experience in enterprise pre-sales leadership at scale, AI technical depth, and executive engagement. They may have multi-segment GTM experience and a proven track record adapting technical approaches across customer segments.</p>\n<p>The annual compensation range for this role is $270,000 - $345,000 USD.</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_519653d2-8cc","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Anthropic","sameAs":"https://www.anthropic.com","logo":"https://logos.yubhub.co/anthropic.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/anthropic/jobs/4964610008","x-work-arrangement":"hybrid","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":"$270,000 - $345,000 USD","x-skills-required":["Solutions Architecture","Sales Engineering","Pre-sales Technical Role","Enterprise AI Use Cases","API Integrations","LLM Deployments","Team Management","Process Development","Communication","Collaboration","Coaching"],"x-skills-preferred":["AI Technical Depth","Executive Engagement","Multi-Segment GTM Experience","Emerging Research and Industry Trends"],"datePosted":"2026-03-08T13:45:35.420Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"San Francisco, CA | New York City, NY"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"Solutions Architecture, Sales Engineering, Pre-sales Technical Role, Enterprise AI Use Cases, API Integrations, LLM Deployments, Team Management, Process Development, Communication, Collaboration, Coaching, AI Technical Depth, Executive Engagement, Multi-Segment GTM Experience, Emerging Research and Industry Trends","baseSalary":{"@type":"MonetaryAmount","currency":"USD","value":{"@type":"QuantitativeValue","minValue":270000,"maxValue":345000,"unitText":"YEAR"}}},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_a039f741-0fa"},"title":"Manager of Solutions Architecture, Applied AI (Industries)","description":"<p><strong>About the Role</strong></p>\n<p>As the Manager of the Applied AI Architect team for Northern European Industries at Anthropic, you will drive the adoption of frontier AI by enabling the deployment of Anthropic&#39;s products (Claude for Enterprise, Claude Code, and API) across our Industries accounts: large enterprises across all non-tech verticals in the UK, Ireland and the Nordics.</p>\n<p>You&#39;ll leverage your technical skills and consultative sales experience to drive positive AI transformation that addresses our customers&#39; business needs, meets their technical requirements, and provides a high degree of reliability and safety.</p>\n<p><strong>Responsibilities</strong></p>\n<ul>\n<li>Hire, manage and mentor a team of Applied AI Architects for Northern Europe, providing both technical guidance and career development</li>\n<li>Set goals and reviews for your team, promoting growth and output</li>\n<li>Work with a handful of highest-value enterprise customers on their overall AI adoption strategies, focusing on pre-sales technical excellence including use case scoping, architecture reviews, technical champion building, technical adoption workshops and POC execution</li>\n<li>Partner closely with your aligned GTM leadership to understand customer requirements and co-build GTM strategies to drive adoption across all industry verticals</li>\n<li>Own the technical portions of pre-sales engagements, ensuring your team provides compelling demos and validates customer ROI from Anthropic products</li>\n<li>Develop scalable technical engagement frameworks and reusable assets tailored for all non-tech sectors, including Financial Services &amp; Insurance, Healthcare, Telco, Retail and others.</li>\n</ul>\n<p><strong>You may be a good fit if you have:</strong></p>\n<ul>\n<li>7+ years of experience as a Solutions Architect, Sales Engineer, or similar pre-sales technical role</li>\n<li>3+ years of technical go-to-market management experience, specifically managing pre-sales teams</li>\n<li>Experience working with and selling to large enterprise customers across multiple verticals</li>\n<li>Demonstrated ability to navigate and articulate complex technical, organisational and regulatory requirements of large enterprises, in particular in regulated industries (e.g FSI, Healthcare &amp; Life Sciences)</li>\n<li>A proven knowledge of the Northern European market and its local specificities (regulatory, cultural, technical…)</li>\n<li>Experience framing and delivering enterprise AI use cases (productivity, workflow transformation) and/or scoping large enterprise adoption programs</li>\n<li>Deep technical proficiency with enterprise AI deployments, API integrations, and production LLM use cases</li>\n<li>Demonstrated ability to build scalable, repeatable processes and frameworks that work across diverse customer segments</li>\n<li>An organisational mindset and enjoy building foundational teams in a relatively unstructured environment</li>\n<li>Excellent communication, collaboration, and coaching abilities</li>\n<li>Comfort dealing with highly uncertain, ambiguous, and fast-moving environments</li>\n<li>Strong executive presence and ability to foster deep relationships with technical leaders and engineering teams</li>\n<li>High-level familiarity with the architecture and operation of LLM and/or ML in general</li>\n<li>Experience with prompt engineering, LLM evaluation, and architecting AI-powered systems</li>\n<li>Ability to make ambiguous problems clear and identify core principles that can translate across scenarios</li>\n<li>A passion for making powerful technology safe and societally beneficial</li>\n<li>Creative thinking about the risks and benefits of new technologies, beyond past checklists and playbooks</li>\n<li>A track record of staying current with emerging AI research and industry trends or similar.</li>\n</ul>\n<p><strong>Strong candidates may also have:</strong></p>\n<ul>\n<li>10+ years technical experience</li>\n<li>5+ years managing AI technic</li>\n</ul>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_a039f741-0fa","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Anthropic","sameAs":"https://job-boards.greenhouse.io","logo":"https://logos.yubhub.co/anthropic.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/anthropic/jobs/5115884008","x-work-arrangement":"onsite","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":null,"x-skills-required":["Solutions Architecture","Sales Engineering","Pre-sales technical role","Technical go-to-market management","Enterprise AI deployments","API integrations","Production LLM use cases","Scalable, repeatable processes and frameworks","Organisational mindset","Excellent communication, collaboration, and coaching abilities","High-level familiarity with the architecture and operation of LLM and/or ML in general","Prompt engineering, LLM evaluation, and architecting AI-powered systems"],"x-skills-preferred":["Enterprise AI use cases","Workflow transformation","Large enterprise adoption programs","Northern European market and its local specificities","Regulatory, cultural, technical…","LLM and/or ML in general","AI-powered systems"],"datePosted":"2026-03-08T13:45:31.935Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"London, UK"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"Solutions Architecture, Sales Engineering, Pre-sales technical role, Technical go-to-market management, Enterprise AI deployments, API integrations, Production LLM use cases, Scalable, repeatable processes and frameworks, Organisational mindset, Excellent communication, collaboration, and coaching abilities, High-level familiarity with the architecture and operation of LLM and/or ML in general, Prompt engineering, LLM evaluation, and architecting AI-powered systems, Enterprise AI use cases, Workflow transformation, Large enterprise adoption programs, Northern European market and its local specificities, Regulatory, cultural, technical…, LLM and/or ML in general, AI-powered systems"},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_6ad59cf1-f95"},"title":"Manager, Applied AI (Startups)","description":"<p>As the Manager of Applied AI EMEA team at Anthropic, you will drive the adoption of frontier AI by enabling the deployment of Anthropic&#39;s products (Claude for Enterprise, Claude Code, and API) across several Enterprise segments.</p>\n<p>You&#39;ll leverage your technical skills and consultative sales experience to drive positive AI transformation that addresses our customers&#39; business needs, meets their technical requirements, and provides a high degree of reliability and safety.</p>\n<p>You&#39;ll be responsible for leading &amp; growing the EMEA Applied AI, Solutions Architect team. You will establish processes and best practices for the region&#39;s technical pre-sales engagements based on your years of experience, help each team member achieve success, high productivity, and career growth, and represent Anthropic as a technical lead on some of its most important partnerships.</p>\n<p>In collaboration with the Sales, Product, and Engineering teams, you&#39;ll help enterprise tech partners incorporate leading-edge AI systems into their cutting-edge products and platforms. You will employ your excellent communication skills to explain and demonstrate complex solutions persuasively to technical and non-technical audiences alike. You will play a critical role in identifying opportunities to innovate and differentiate our AI systems, while maintaining our best-in-class safety standards.</p>\n<p><strong>Responsibilities:</strong></p>\n<ul>\n<li>Manage and mentor a team of Applied AI, Solutions Architect providing both technical guidance and career development</li>\n<li>Set goals and reviews for your team, promoting growth and output</li>\n<li>Work with a handful of highest-value enterprise customers on their overall AI adoption strategies, focusing on pre-sales technical excellence including use case scoping, technical champion building, and POC execution</li>\n<li>Partner closely with your aligned GTM leadership to understand customer requirements &amp; co-build GTM strategies to drive adoption for Digital Native enterprise customers</li>\n<li>Own the technical portions of pre-sales engagements, ensuring your team provides compelling demos and validates enterprise customer ROI from Anthropic products</li>\n<li>Drive collaboration from cross-functional teams to influence and unify stakeholders at all levels of the organization to drive business outcomes</li>\n<li>Travel occasionally to customer sites for executive-level sessions, technical workshops, and building relationships</li>\n<li>Establish a shared vision for creating solutions that enable beneficial and safe AI in technology products</li>\n<li>Lead the vision, strategy, and execution of innovative solutions that leverage our latest models&#39; capabilities for tech-forward use cases</li>\n<li>Contribute to thought leadership through conference presentations, webinars, and technical content creation</li>\n<li>Stay current with emerging AI/ML trends and competitive landscape in the enterprise tech sector</li>\n</ul>\n<p><strong>You may be a good fit if you have:</strong></p>\n<ul>\n<li>7+ years of experience as a Solutions Architect, Sales Engineer, or similar pre-sales technical role</li>\n<li>3+ years of technical go-to-market management experience, specifically managing pre-sales teams</li>\n<li>3+ years of product management experience</li>\n<li>Have experience working with and selling to Digital Native focused customers (Vertical Enterprise SaaS, Horizontal Enterprise SaaS, Consumer Technology Companies, PaaS, etc.)</li>\n<li>Experience with the unique technical requirements and technical procurement process of enterprise tech companies</li>\n<li>Deep technical proficiency with enterprise AI deployments, API integrations, and production LLM use cases</li>\n<li>Have an organizational mindset and enjoy building foundational teams in a relatively unstructured environment</li>\n<li>Have excellent communication, collaboration, and coaching abilities</li>\n<li>Are comfortable dealing with highly uncertain, ambiguous, and fast-moving environments typical of the tech industry</li>\n<li>Strong executive presence and ability to foster deep relationships with technical leaders and engineering teams</li>\n<li>Have at least a high level familiarity with the architecture and operation of large language models and/or ML in general</li>\n<li>Experience with prompt engineering, LLM evaluation, and architecting AI-powered systems</li>\n<li>Make ambiguous problems clear and identify core principles that can translate across scenarios</li>\n<li>Have a passion for making powerful technology safe and societally beneficial</li>\n<li>Think creatively about the risks and benefits of new technologies, and think beyond past checklists and playbooks</li>\n<li>Stay up-to-date and informed by taking an active interest in emerging research and industry trends</li>\n<li>Understanding of developer tooling, SDKs, and technical integration patterns common in enterprise tech companies</li>\n</ul>\n<p>The annual compensation range for this role is £325,000 - £390,000GBP.</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_6ad59cf1-f95","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Anthropic","sameAs":"https://www.anthropic.com","logo":"https://logos.yubhub.co/anthropic.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/anthropic/jobs/5142110008","x-work-arrangement":"hybrid","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":"£325,000 - £390,000GBP","x-skills-required":["Solutions Architect","Sales Engineer","Technical go-to-market management","Product management","Digital Native focused customers","Enterprise AI deployments","API integrations","Production LLM use cases","Large language models","ML in general","Prompt engineering","LLM evaluation","Architecting AI-powered systems","Developer tooling","SDKs","Technical integration patterns"],"x-skills-preferred":["Executive presence","Fostering deep relationships","Uncertain, ambiguous, and fast-moving environments","Organizational mindset","Building foundational teams","Excellent communication","Collaboration","Coaching abilities","Passion for making powerful technology safe and societally beneficial","Creative thinking about risks and benefits of new technologies"],"datePosted":"2026-03-08T13:43:54.929Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"London, UK"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"Solutions Architect, Sales Engineer, Technical go-to-market management, Product management, Digital Native focused customers, Enterprise AI deployments, API integrations, Production LLM use cases, Large language models, ML in general, Prompt engineering, LLM evaluation, Architecting AI-powered systems, Developer tooling, SDKs, Technical integration patterns, Executive presence, Fostering deep relationships, Uncertain, ambiguous, and fast-moving environments, Organizational mindset, Building foundational teams, Excellent communication, Collaboration, Coaching abilities, Passion for making powerful technology safe and societally beneficial, Creative thinking about risks and benefits of new technologies","baseSalary":{"@type":"MonetaryAmount","currency":"GBP","value":{"@type":"QuantitativeValue","minValue":325000,"maxValue":390000,"unitText":"YEAR"}}},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_d7e9f469-dac"},"title":"Manager of Solutions Architecture, Applied AI (Industries)","description":"<p><strong>About the Role</strong></p>\n<p>As the Manager of the Applied AI Architect team for DACH Industries at Anthropic, you will drive the adoption of frontier AI by enabling the deployment of Anthropic&#39;s products (Claude for Enterprise, Claude Code, and API) across our Industries accounts: large enterprises across all non-tech verticals in Germany, Switzerland and Austria.</p>\n<p>You&#39;ll leverage your technical skills and consultative sales experience to drive positive AI transformation that addresses our customers&#39; business needs, meets their technical requirements, and provides a high degree of reliability and safety.</p>\n<p><strong>Responsibilities</strong></p>\n<ul>\n<li>Hire, manage and mentor a team of Applied AI Architects for DACH, providing both technical guidance and career development</li>\n</ul>\n<ul>\n<li>Set goals and reviews for your team, promoting growth and output</li>\n</ul>\n<ul>\n<li>Work with a handful of highest-value enterprise customers on their overall AI adoption strategies, focusing on pre-sales technical excellence including use case scoping, architecture reviews, technical champion building, technical adoption workshops and POC execution</li>\n</ul>\n<ul>\n<li>Partner closely with your aligned GTM leadership to understand customer requirements and co-build GTM strategies to drive adoption across all industry verticals</li>\n</ul>\n<ul>\n<li>Own the technical portions of pre-sales engagements, ensuring your team provides compelling demos and validates customer ROI from Anthropic products</li>\n</ul>\n<ul>\n<li>Develop scalable technical engagement frameworks and reusable assets tailored for all non-tech sectors, including Financial Services &amp; Insurance, Healthcare, Telco, Retail and others.</li>\n</ul>\n<ul>\n<li>Drive collaboration from cross-functional teams to influence and unify stakeholders at all levels of the organisation to drive business outcomes</li>\n</ul>\n<ul>\n<li>Travel to customer sites for executive-level sessions, technical workshops, and building relationships</li>\n</ul>\n<ul>\n<li>Establish a shared vision for creating solutions that enable beneficial and safe AI</li>\n</ul>\n<ul>\n<li>Lead the vision, strategy, and execution of innovative solutions that leverage our latest models&#39; capabilities</li>\n</ul>\n<ul>\n<li>Contribute to thought leadership through conference presentations, webinars, and technical content creation</li>\n</ul>\n<ul>\n<li>Stay current with emerging AI/ML trends and competitive landscape across all major Industries verticals</li>\n</ul>\n<p><strong>You may be a good fit if you have:</strong></p>\n<ul>\n<li>7+ years of experience as an Solutions Architect, Sales Engineer, or similar pre-sales technical role</li>\n</ul>\n<ul>\n<li>3+ years of technical go-to-market management experience, specifically managing pre-sales teams</li>\n</ul>\n<ul>\n<li>Experience working with and selling to large enterprise customers across multiple verticals</li>\n</ul>\n<ul>\n<li>Demonstrated ability to navigate and articulate complex technical, organisational and regulatory requirements of large enterprises, in particular in regulated industries (e.g FSI, Healthcare &amp; Life Sciences)</li>\n</ul>\n<ul>\n<li>A proven knowledge of the DACH market and its local specificities (regulatory, cultural, technical…)</li>\n</ul>\n<ul>\n<li>Experience framing and delivering enterprise AI use cases (productivity, workflow transformation) and or scoping large enterprise adoption programs</li>\n</ul>\n<ul>\n<li>Deep technical proficiency with enterprise AI deployments, API integrations, and production LLM use cases</li>\n</ul>\n<ul>\n<li>Demonstrated ability to build scalable, repeatable processes and frameworks that work across diverse customer segments</li>\n</ul>\n<ul>\n<li>An organisational mindset and enjoy building foundational teams in a relatively unstructured environment</li>\n</ul>\n<ul>\n<li>Excellent communication, collaboration, and coaching abilities</li>\n</ul>\n<ul>\n<li>Comfort dealing with highly uncertain, ambiguous, and fast-moving environments</li>\n</ul>\n<ul>\n<li>Strong executive presence and ability to foster deep relationships with technical leaders and engineering teams</li>\n</ul>\n<ul>\n<li>High-level familiarity with the architecture and operation of LLM and/or ML in general</li>\n</ul>\n<ul>\n<li>Experience with prompt engineering, LLM evaluation, and architecting AI-powered systems</li>\n</ul>\n<ul>\n<li>Ability to make ambiguous problems clear and identify core principles that can translate across scenarios</li>\n</ul>\n<ul>\n<li>A passion for making powerful technology safe and societally beneficial</li>\n</ul>\n<ul>\n<li>Creative thinking about the risks and benefits of new technologies, beyond past checklists and playbooks</li>\n</ul>\n<ul>\n<li>A track record of staying current with emerging AI research and industry trends</li>\n</ul>\n<p><strong>Strong candidates may also have:</strong></p>\n<ul>\n<li>10+ years technical experience</li>\n</ul>\n<ul>\n<li>5+ years managing AI technical teams in hypergrowth</li>\n</ul>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_d7e9f469-dac","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Anthropic","sameAs":"https://job-boards.greenhouse.io","logo":"https://logos.yubhub.co/anthropic.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/anthropic/jobs/5117652008","x-work-arrangement":"onsite","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":null,"x-skills-required":["Solutions Architecture","Sales Engineering","Pre-sales technical role","Technical go-to-market management","Enterprise AI deployments","API integrations","Production LLM use cases","Scalable, repeatable processes and frameworks","Organisational mindset","Excellent communication, collaboration, and coaching abilities","High-level familiarity with the architecture and operation of LLM and/or ML in general","Prompt engineering, LLM evaluation, and architecting AI-powered systems"],"x-skills-preferred":["Technical leadership","Engineering teams","LLM and/or ML in general","Prompt engineering, LLM evaluation, and architecting AI-powered systems","Scalable, repeatable processes and frameworks","Organisational mindset","Excellent communication, collaboration, and coaching abilities"],"datePosted":"2026-03-08T13:43:19.178Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Munich, Germany"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"Solutions Architecture, Sales Engineering, Pre-sales technical role, Technical go-to-market management, Enterprise AI deployments, API integrations, Production LLM use cases, Scalable, repeatable processes and frameworks, Organisational mindset, Excellent communication, collaboration, and coaching abilities, High-level familiarity with the architecture and operation of LLM and/or ML in general, Prompt engineering, LLM evaluation, and architecting AI-powered systems, Technical leadership, Engineering teams, LLM and/or ML in general, Prompt engineering, LLM evaluation, and architecting AI-powered systems, Scalable, repeatable processes and frameworks, Organisational mindset, Excellent communication, collaboration, and coaching abilities"},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_d52d568d-49b"},"title":"Forward Deployed Engineer (FDE), Life Sciences","description":"<p><strong>Forward Deployed Engineer (FDE), Life Sciences - NYC</strong></p>\n<p><strong>Location</strong></p>\n<p>New York City</p>\n<p><strong>Employment Type</strong></p>\n<p>Full time</p>\n<p><strong>Location Type</strong></p>\n<p>Hybrid</p>\n<p><strong>Department</strong></p>\n<p>Model Deployment for Business</p>\n<p><strong>Compensation</strong></p>\n<ul>\n<li>$198K – $335K • Offers Equity</li>\n</ul>\n<p>The base pay offered may vary depending on multiple individualized factors, including market location, job-related knowledge, skills, and experience. If the role is non-exempt, overtime pay will be provided consistent with applicable laws. In addition to the salary range listed above, total compensation also includes generous equity, performance-related bonus(es) for eligible employees, and the following benefits.</p>\n<p><strong>Benefits</strong></p>\n<ul>\n<li>Medical, dental, and vision insurance for you and your family, with employer contributions to Health Savings Accounts</li>\n</ul>\n<ul>\n<li>Pre-tax accounts for Health FSA, Dependent Care FSA, and commuter expenses (parking and transit)</li>\n</ul>\n<ul>\n<li>401(k) retirement plan with employer match</li>\n</ul>\n<ul>\n<li>Paid parental leave (up to 24 weeks for birth parents and 20 weeks for non-birthing parents), plus paid medical and caregiver leave (up to 8 weeks)</li>\n</ul>\n<ul>\n<li>Paid time off: flexible PTO for exempt employees and up to 15 days annually for non-exempt employees</li>\n</ul>\n<ul>\n<li>13+ paid company holidays, and multiple paid coordinated company office closures throughout the year for focus and recharge, plus paid sick or safe time (1 hour per 30 hours worked, or more, as required by applicable state or local law)</li>\n</ul>\n<ul>\n<li>Mental health and wellness support</li>\n</ul>\n<ul>\n<li>Employer-paid basic life and disability coverage</li>\n</ul>\n<ul>\n<li>Annual learning and development stipend to fuel your professional growth</li>\n</ul>\n<ul>\n<li>Daily meals in our offices, and meal delivery credits as eligible</li>\n</ul>\n<ul>\n<li>Relocation support for eligible employees</li>\n</ul>\n<ul>\n<li>Additional taxable fringe benefits, such as charitable donation matching and wellness stipends, may also be provided.</li>\n</ul>\n<p><strong>About the team</strong></p>\n<p>OpenAI’s Forward Deployed Engineering team partners with global pharma and biotech, CROs, and research institutions to deploy existing expertise across the R&amp;D value chain to help customers design and ship production-grade AI systems. We operate at the intersection of customer delivery and core platform development, converting early deployments into repeatable system standards and evaluation practices that scale across regulated environments.</p>\n<p><strong>About the role</strong></p>\n<p>We are hiring a Forward Deployed Engineer (FDE) to push the frontier on what is possible today across drug discovery (e.g., target identification, molecular design, pre-clinical) and development (e.g., trial design, trial ops, biostats) by leading end-to-end deployments of our models inside life sciences organizations and research institutions. You will work with customers who are deep experts in their scientific or operational domains, translating real-world data, infrastructure, and constraints into production systems.</p>\n<p>You will measure success through production adoption, measurable workflow impact, and eval-driven feedback loops, including evaluation benchmarks and acceptance criteria, that inform product and model roadmaps. You’ll work closely with our Product, Research, Partnerships, GRC, Security, and GTM to deliver in regulated contexts, including inspection readiness with audit trails and traceable evidence.</p>\n<p>This role is based in NYC. We use a hybrid work model of 3 days in the office per week. We offer relocation assistance. Travel up to 50% is required.</p>\n<p><strong>In this role you will</strong></p>\n<ul>\n<li>Design and ship production systems around models, owning integrations, data provenance, reliability, and on-call readiness across research, clinical, and operational workflows.</li>\n</ul>\n<ul>\n<li>Lead discovery and scoping from pre-sales through post-sales, translating ambiguous workflow needs into hypothesis-driven problem framing, system requirements, and an execution plan with measurable endpoints.</li>\n</ul>\n<ul>\n<li>Define and enforce launch criteria for regulated contexts, including validation evidence, audit readiness, outcome metrics, and drive delivery until we demonstrate sustained production impact.</li>\n</ul>\n<ul>\n<li>Build in sensitive scientific data environments where auditability, validation, and access controls shape architecture, operating procedures, and failure handling.</li>\n</ul>\n<ul>\n<li>Run evaluation loops that measure model and system quality against workflow-specific scientific benchmarks and use results to drive model and product changes.</li>\n</ul>\n<ul>\n<li>Distill deployment learnings into hardened primitives, reference architectures, validation templates, and benchmark harnesses that scale across regulated life sciences environments.</li>\n</ul>\n<p><strong>You might thrive in this role if you</strong></p>\n<ul>\n<li>Bring 5+ years of software/ML engineering or technical deployment experience with customer-facing ownership in biotech, pharma, clinical research, or scientific software; PhD, MS, or equivalent applied experience in a life sciences relevant field encouraged.</li>\n</ul>\n<ul>\n<li>Have owned customer GenAI deployments end-to-end from scoping through production adoption, and improved them through evaluation design, error analysis, and iterative evidence generation that tightens acceptance criteria over time.</li>\n</ul>\n<ul>\n<li>Have delivered AI systems in trial design, regulatory writing, or scientific operations where validation strategy, auditability, compliance constraints, and reviewer expectations shaped system design and rollout.</li>\n</ul>\n<ul>\n<li>Communicate clearly across scientific, clinical, model research, technical, and executive audiences, translating technical tradeoffs into decision quality, risk posture, and measurable outcomes with credibility.</li>\n</ul>\n<ul>\n<li>Apply systems thinking with high execution standards, consistently turning failures, escalations, and audit findings into improved operating standards, validation artifacts, and process improvements.</li>\n</ul>\n<p><strong>What we offer</strong></p>\n<ul>\n<li>Competitive salary and equity package</li>\n</ul>\n<ul>\n<li>Opportunity to work with a talented team of engineers and researchers</li>\n</ul>\n<ul>\n<li>Collaborative and dynamic work environment</li>\n</ul>\n<ul>\n<li>Professional development and growth opportunities</li>\n</ul>\n<ul>\n<li>Flexible work arrangements</li>\n</ul>\n<ul>\n<li>Comprehensive benefits package</li>\n</ul>\n<ul>\n<li>Access to cutting-edge technology and resources</li>\n</ul>\n<p><strong>How to apply</strong></p>\n<p>If you are a motivated and talented individual who is passionate about AI and life sciences, we encourage you to apply for this exciting opportunity. Please submit your resume and a cover letter explaining why you are a strong fit for this role.</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_d52d568d-49b","directApply":true,"hiringOrganization":{"@type":"Organization","name":"OpenAI","sameAs":"https://jobs.ashbyhq.com","logo":"https://logos.yubhub.co/openai.com.png"},"x-apply-url":"https://jobs.ashbyhq.com/openai/55e611d8-b284-458e-908c-baccd091d0c0","x-work-arrangement":"hybrid","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":"$198K – $335K","x-skills-required":["software/ML engineering","technical deployment","customer-facing ownership","biotech","pharma","clinical research","scientific software","PhD","MS","equivalent applied experience","life sciences relevant field","GenAI deployments","evaluation design","error analysis","iterative evidence generation","validation strategy","auditability","compliance constraints","reviewer expectations","system design and rollout","scientific operations","trial design","regulatory writing","scientific operations","validation artifacts","process improvements"],"x-skills-preferred":["AI","life sciences","software development","data analysis","machine learning","deep learning","natural language processing","computer vision","robotics","autonomous systems"],"datePosted":"2026-03-06T18:41:01.225Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"New York City"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"software/ML engineering, technical deployment, customer-facing ownership, biotech, pharma, clinical research, scientific software, PhD, MS, equivalent applied experience, life sciences relevant field, GenAI deployments, evaluation design, error analysis, iterative evidence generation, validation strategy, auditability, compliance constraints, reviewer expectations, system design and rollout, scientific operations, trial design, regulatory writing, scientific operations, validation artifacts, process improvements, AI, life sciences, software development, data analysis, machine learning, deep learning, natural language processing, computer vision, robotics, autonomous systems","baseSalary":{"@type":"MonetaryAmount","currency":"USD","value":{"@type":"QuantitativeValue","minValue":198000,"maxValue":335000,"unitText":"YEAR"}}},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_28cb565e-69a"},"title":"Researcher, Health AI","description":"<p><strong>Researcher, Health AI</strong></p>\n<p><strong>Location</strong></p>\n<p>San Francisco</p>\n<p><strong>Employment Type</strong></p>\n<p>Full time</p>\n<p><strong>Department</strong></p>\n<p>Safety Systems</p>\n<p><strong>Compensation</strong></p>\n<ul>\n<li>$295K – $445K • Offers Equity</li>\n</ul>\n<p>The base pay offered may vary depending on multiple individualized factors, including market location, job-related knowledge, skills, and experience. If the role is non-exempt, overtime pay will be provided consistent with applicable laws. In addition to the salary range listed above, total compensation also includes generous equity, performance-related bonus(es) for eligible employees, and the following benefits.</p>\n<ul>\n<li>Medical, dental, and vision insurance for you and your family, with employer contributions to Health Savings Accounts</li>\n</ul>\n<ul>\n<li>Pre-tax accounts for Health FSA, Dependent Care FSA, and commuter expenses (parking and transit)</li>\n</ul>\n<ul>\n<li>401(k) retirement plan with employer match</li>\n</ul>\n<ul>\n<li>Paid parental leave (up to 24 weeks for birth parents and 20 weeks for non-birthing parents), plus paid medical and caregiver leave (up to 8 weeks)</li>\n</ul>\n<ul>\n<li>Paid time off: flexible PTO for exempt employees and up to 15 days annually for non-exempt employees</li>\n</ul>\n<ul>\n<li>13+ paid company holidays, and multiple paid coordinated company office closures throughout the year for focus and recharge, plus paid sick or safe time (1 hour per 30 hours worked, or more, as required by applicable state or local law)</li>\n</ul>\n<ul>\n<li>Mental health and wellness support</li>\n</ul>\n<ul>\n<li>Employer-paid basic life and disability coverage</li>\n</ul>\n<ul>\n<li>Annual learning and development stipend to fuel your professional growth</li>\n</ul>\n<ul>\n<li>Daily meals in our offices, and meal delivery credits as eligible</li>\n</ul>\n<ul>\n<li>Relocation support for eligible employees</li>\n</ul>\n<ul>\n<li>Additional taxable fringe benefits, such as charitable donation matching and wellness stipends, may also be provided.</li>\n</ul>\n<p>More details about our benefits are available to candidates during the hiring process.</p>\n<p>This role is at-will and OpenAI reserves the right to modify base pay and other compensation components at any time based on individual performance, team or company results, or market conditions.</p>\n<p><strong>About the Team</strong></p>\n<p>The Safety Systems team is dedicated to ensuring the safety, robustness, and reliability of AI models towards their deployment in the real world.</p>\n<p>OpenAI’s charter calls on us to ensure the benefits of AI are distributed widely. Our Health AI team is focused on enabling universal access to high-quality medical information. We work at the intersection of AI safety research and healthcare applications, aiming to create trustworthy AI models that can assist medical professionals and improve patient outcomes.</p>\n<p><strong>About the Role</strong></p>\n<p>We’re seeking strong researchers who are passionate about advancing AI safety and improving global health outcomes. As a Research Scientist, you will contribute to the development of safe and effective AI models for healthcare applications. You will implement practical and general methods to improve the behavior, knowledge, and reasoning of our models in these settings. This will require research into safety and alignment techniques that we aim to generalize towards safe and beneficial AGI.</p>\n<p>This role is based in San Francisco, CA. We use a hybrid work model of 3 days in the office per week and offer relocation assistance to new employees.</p>\n<p><strong>In this role, you will:</strong></p>\n<ul>\n<li>Design and apply practical and scalable methods to improve safety and reliability of our models, including RLHF, automated red teaming, scalable oversight, etc.</li>\n</ul>\n<ul>\n<li>Evaluate methods using health-related data, ensuring models provide accurate, reliable, and trustworthy information.</li>\n</ul>\n<ul>\n<li>Build reusable libraries for applying general alignment techniques to our models.</li>\n</ul>\n<ul>\n<li>Proactively understand the safety of our models and systems, identifying areas of risk.</li>\n</ul>\n<ul>\n<li>Work with cross-team stakeholders to integrate methods in core model training and launch safety improvements in OpenAI’s products.</li>\n</ul>\n<p><strong>You might thrive in this role if you:</strong></p>\n<ul>\n<li>Are excited about OpenAI’s mission of ensuring AGI is universally beneficial and are aligned with OpenAI’s charter.</li>\n</ul>\n<ul>\n<li>Demonstrate passion for AI safety and improving global health outcomes.</li>\n</ul>\n<ul>\n<li>Have 4+ years of experience with deep learning research and LLMs, especially practical alignment topics such as RLHF, automated red teaming, scalable oversight, etc.</li>\n</ul>\n<ul>\n<li>Hold a Ph.D. or other degree in computer science, AI, machine learning, or a related field.</li>\n</ul>\n<ul>\n<li>Stay goal-oriented instead of method-oriented, and are not afraid of unglamorous but high-value work when needed.</li>\n</ul>\n<ul>\n<li>Possess experience making practical model improvements for AI model deployment.</li>\n</ul>\n<ul>\n<li>Own problems end-to-end, and are willing to pick up whatever knowledge you&#39;re missing to get the job done.</li>\n</ul>\n<ul>\n<li>Are a team player who enjoys collaborative work environments.</li>\n</ul>\n<ul>\n<li>Bonus: possess experience in health-related AI research or deployments.</li>\n</ul>\n<p><strong>About OpenAI</strong></p>\n<p>OpenAI is an AI research and deployment company dedicated to ensuring that general-purpose artificial intelligence benefits all of humanity. We push the boundaries of the capabilities of AI systems and seek to safely deploy them to the world through our products. AI is an extremely powerful tool that must be created with safety and human needs at its core, and to achieve our mission, we must encompass and value the many different perspectives, voices, and experiences that form the full spectrum of humanity.</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_28cb565e-69a","directApply":true,"hiringOrganization":{"@type":"Organization","name":"OpenAI","sameAs":"https://jobs.ashbyhq.com","logo":"https://logos.yubhub.co/openai.com.png"},"x-apply-url":"https://jobs.ashbyhq.com/openai/bcbe08e3-9593-431d-bc99-37e35e035742","x-work-arrangement":"hybrid","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":"$295K – $445K • Offers Equity","x-skills-required":["Deep learning research","LLMs","RLHF","Automated red teaming","Scalable oversight","Health-related data","AI safety research","Healthcare applications","Trustworthy AI models","Medical professionals","Patient outcomes","Ph.D. or other degree in computer science, AI, machine learning, or a related field"],"x-skills-preferred":["Team player","Collaborative work environments","Health-related AI research or deployments"],"datePosted":"2026-03-06T18:40:30.820Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"San Francisco"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"Deep learning research, LLMs, RLHF, Automated red teaming, Scalable oversight, Health-related data, AI safety research, Healthcare applications, Trustworthy AI models, Medical professionals, Patient outcomes, Ph.D. or other degree in computer science, AI, machine learning, or a related field, Team player, Collaborative work environments, Health-related AI research or deployments","baseSalary":{"@type":"MonetaryAmount","currency":"USD","value":{"@type":"QuantitativeValue","minValue":295000,"maxValue":445000,"unitText":"YEAR"}}},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_b4b28851-8f4"},"title":"Forward Deployed Engineer (FDE), Life Sciences - SF","description":"<p><strong>Forward Deployed Engineer (FDE), Life Sciences - SF</strong></p>\n<p><strong>Location</strong></p>\n<p>San Francisco</p>\n<p><strong>Employment Type</strong></p>\n<p>Full time</p>\n<p><strong>Location Type</strong></p>\n<p>Hybrid</p>\n<p><strong>Department</strong></p>\n<p>Model Deployment for Business</p>\n<p><strong>Compensation</strong></p>\n<ul>\n<li>$198K – $335K • Offers Equity</li>\n</ul>\n<p>The base pay offered may vary depending on multiple individualized factors, including market location, job-related knowledge, skills, and experience. If the role is non-exempt, overtime pay will be provided consistent with applicable laws. In addition to the salary range listed above, total compensation also includes generous equity, performance-related bonus(es) for eligible employees, and the following benefits.</p>\n<p><strong>Benefits</strong></p>\n<ul>\n<li>Medical, dental, and vision insurance for you and your family, with employer contributions to Health Savings Accounts</li>\n</ul>\n<ul>\n<li>Pre-tax accounts for Health FSA, Dependent Care FSA, and commuter expenses (parking and transit)</li>\n</ul>\n<ul>\n<li>401(k) retirement plan with employer match</li>\n</ul>\n<ul>\n<li>Paid parental leave (up to 24 weeks for birth parents and 20 weeks for non-birthing parents), plus paid medical and caregiver leave (up to 8 weeks)</li>\n</ul>\n<ul>\n<li>Paid time off: flexible PTO for exempt employees and up to 15 days annually for non-exempt employees</li>\n</ul>\n<ul>\n<li>13+ paid company holidays, and multiple paid coordinated company office closures throughout the year for focus and recharge, plus paid sick or safe time (1 hour per 30 hours worked, or more, as required by applicable state or local law)</li>\n</ul>\n<ul>\n<li>Mental health and wellness support</li>\n</ul>\n<ul>\n<li>Employer-paid basic life and disability coverage</li>\n</ul>\n<ul>\n<li>Annual learning and development stipend to fuel your professional growth</li>\n</ul>\n<ul>\n<li>Daily meals in our offices, and meal delivery credits as eligible</li>\n</ul>\n<ul>\n<li>Relocation support for eligible employees</li>\n</ul>\n<ul>\n<li>Additional taxable fringe benefits, such as charitable donation matching and wellness stipends, may also be provided.</li>\n</ul>\n<p><strong>About the team</strong></p>\n<p>OpenAI’s Forward Deployed Engineering team partners with global pharma and biotech, CROs, and research institutions to deploy existing expertise across the R&amp;D value chain to help customers design and ship production-grade AI systems. We operate at the intersection of customer delivery and core platform development, converting early deployments into repeatable system standards and evaluation practices that scale across regulated environments.</p>\n<p><strong>About the role</strong></p>\n<p>We are hiring a Forward Deployed Engineer (FDE) to push the frontier on what is possible today across drug discovery (e.g., target identification, molecular design, pre-clinical) and development (e.g., trial design, trial ops, biostats) by leading end-to-end deployments of our models inside life sciences organizations and research institutions. You will work with customers who are deep experts in their scientific or operational domains, translating real-world data, infrastructure, and constraints into production systems.</p>\n<p><strong>Responsibilities</strong></p>\n<ul>\n<li>Design and ship production systems around models, owning integrations, data provenance, reliability, and on-call readiness across research, clinical, and operational workflows.</li>\n</ul>\n<ul>\n<li>Lead discovery and scoping from pre-sales through post-sales, translating ambiguous workflow needs into hypothesis-driven problem framing, system requirements, and an execution plan with measurable endpoints.</li>\n</ul>\n<ul>\n<li>Define and enforce launch criteria for regulated contexts, including validation evidence, audit readiness, outcome metrics, and drive delivery until we demonstrate sustained production impact.</li>\n</ul>\n<ul>\n<li>Build in sensitive scientific data environments where auditability, validation, and access controls shape architecture, operating procedures, and failure handling.</li>\n</ul>\n<ul>\n<li>Run evaluation loops that measure model and system quality against workflow-specific scientific benchmarks and use results to drive model and product changes.</li>\n</ul>\n<ul>\n<li>Distill deployment learnings into hardened primitives, reference architectures, validation templates, and benchmark harnesses that scale across regulated life sciences environments.</li>\n</ul>\n<p><strong>Requirements</strong></p>\n<ul>\n<li>Bring 5+ years of software/ML engineering or technical deployment experience with customer-facing ownership in biotech, pharma, clinical research, or scientific software; PhD, MS, or equivalent applied experience in a life sciences relevant field encouraged.</li>\n</ul>\n<ul>\n<li>Have owned customer GenAI deployments end-to-end from scoping through production adoption, and improved them through evaluation design, error analysis, and iterative evidence generation that tightens acceptance criteria over time.</li>\n</ul>\n<ul>\n<li>Have delivered AI systems in trial design, regulatory writing, or scientific operations where validation strategy, auditability, compliance constraints, and reviewer expectations shaped system design and rollout.</li>\n</ul>\n<ul>\n<li>Communicate clearly across scientific, clinical, model research, technical, and executive audiences, translating technical tradeoffs into decision quality, risk posture, and measurable outcomes with credibility.</li>\n</ul>\n<ul>\n<li>Apply systems thinking with high execution standards, consistently turning failures, escalations, and audit findings into improved operating standards, validat</li>\n</ul>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_b4b28851-8f4","directApply":true,"hiringOrganization":{"@type":"Organization","name":"OpenAI","sameAs":"https://jobs.ashbyhq.com","logo":"https://logos.yubhub.co/openai.com.png"},"x-apply-url":"https://jobs.ashbyhq.com/openai/c6e5f4a6-8ab1-4653-be9d-e2bca259e84a","x-work-arrangement":"hybrid","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":"$198K – $335K","x-skills-required":["software/ML engineering","technical deployment","customer-facing ownership","biotech","pharma","clinical research","scientific software","PhD","MS","equivalent applied experience","life sciences relevant field","GenAI deployments","evaluation design","error analysis","iterative evidence generation","acceptance criteria","AI systems","trial design","regulatory writing","scientific operations","validation strategy","auditability","compliance constraints","reviewer expectations","systems thinking","high execution standards"],"x-skills-preferred":[],"datePosted":"2026-03-06T18:40:11.299Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"San Francisco"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"software/ML engineering, technical deployment, customer-facing ownership, biotech, pharma, clinical research, scientific software, PhD, MS, equivalent applied experience, life sciences relevant field, GenAI deployments, evaluation design, error analysis, iterative evidence generation, acceptance criteria, AI systems, trial design, regulatory writing, scientific operations, validation strategy, auditability, compliance constraints, reviewer expectations, systems thinking, high execution standards","baseSalary":{"@type":"MonetaryAmount","currency":"USD","value":{"@type":"QuantitativeValue","minValue":198000,"maxValue":335000,"unitText":"YEAR"}}},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_50d6a6cf-6c0"},"title":"AI Success Engineer","description":"<p><strong>AI Success Engineer - São Paulo, Brazil</strong></p>\n<p><strong>Location</strong></p>\n<p>São Paulo</p>\n<p><strong>Employment Type</strong></p>\n<p>Full time</p>\n<p><strong>Department</strong></p>\n<p><strong>About the Team</strong></p>\n<p>OpenAI’s AI Success Engineer team partners with the world’s most ambitious organisations to translate cutting-edge AI into real business value. We guide customers from first deployment through scaled enterprise adoption. Our work spans technical integration and enablement, workflow transformation, and sustained program and product delivery.</p>\n<p>Our customers range from fast-growing digital natives to the largest global enterprises, government agencies, and educational institutions. Every engagement is an opportunity to shape how AI changes work, productivity, and innovation. This role sits at the centre of that mission.</p>\n<p><strong>About the Role</strong></p>\n<p>The AI Success Engineer role is the primary post-sales point of contact for OpenAI’s most important customers. You are responsible for driving account health and adoption, ensuring technical readiness, identifying new use cases, and delivering measurable value to our customers with OpenAI’s ambitiously growing platform.</p>\n<p>This role blends technical depth, program management, customer advisory, and product influence. You will partner deeply with customer teams, map workflows, lead configuration, oversee deployment plans, and guide customers toward high-impact use cases that showcase the full value of our platform.</p>\n<p>You will work closely with Sales, Solutions Architecture, Product, and Research to ensure the customer experience is connected and successful across every touchpoint. Success in this role means accelerating adoption, increasing customer activation depth, guiding strategic use cases that get to production, and helping customers demonstrate tangible business impact.</p>\n<p>This role is based in São Paulo (office 3x a week) and we provide relocation support to new employees.</p>\n<p><strong>In this role, you will:</strong></p>\n<ul>\n<li>Lead the technical relationship for post-sale customers and act as their trusted advisor on deployment, adoption, and value realisation.</li>\n</ul>\n<ul>\n<li>Own account health, adoption velocity, and ongoing technical deployment and success across your portfolio.</li>\n</ul>\n<ul>\n<li>Be an expert in all of OpenAI products across our API and agentic platform, Codex, ChatGPT Enterprise, and more and conduct technical enablement and configuration sessions across them.</li>\n</ul>\n<ul>\n<li>Identify and validate use cases by embedding with customer teams to understand workflows and pain points.</li>\n</ul>\n<ul>\n<li>Lead account-level coordination across multiple work streams, including new product activation, change management, and customer rollout and deployment planning.</li>\n</ul>\n<ul>\n<li>Build strong relationships with executive sponsors and technical stakeholders and help align business goals with OpenAI capabilities.</li>\n</ul>\n<ul>\n<li>Translate customer objectives into an actionable adoption roadmap with clear sequencing, milestones, and KPIs.</li>\n</ul>\n<ul>\n<li>Partner with Solutions Architecture, Product, Engineering and Research by surfacing customer feedback, field patterns, and technical blockers and act as a cross-functional navigator who keeps teams aligned, informed, and moving toward customer outcomes.</li>\n</ul>\n<ul>\n<li>Guide value realisation and measure impact through baselines, KPI definition, and post-deployment reporting.</li>\n</ul>\n<ul>\n<li>Facilitate workshops on use case design, adoption best practices, champion building, and internal enablement.</li>\n</ul>\n<ul>\n<li>Help drive expansions by identifying high-leverage opportunities where OpenAI’s platform can power new workflows or lines of business.</li>\n</ul>\n<ul>\n<li>Serve as the technical advisor for existing customer implementations by guiding and optimising account setup, configuration, etc.</li>\n</ul>\n<p><strong>You’ll thrive in this role if you:</strong></p>\n<ul>\n<li>8+ years of experience in technical customer-facing roles such as technical account management, technical GenAI consulting or deployment roles, solutions architecture, technical delivery leadership, customer architecture or engineering, or other deep technical enterprise adoption work.</li>\n</ul>\n<ul>\n<li>Deep, hands-on knowledge of OpenAI product capabilities, APIs, SDKs, connectors, and common integration patterns and able to explain model behaviour, limitations, technical trade-offs, embeddings, retrieval augmentation, and approaches to fine-tuning or custom model usage.</li>\n</ul>\n<ul>\n<li>Understanding and familiarity with coding languages like Python or JavaScript, and comfort with REST APIs, SDKs, automation, CI/CD, containers, and cloud platforms.</li>\n</ul>\n<ul>\n<li>Can translate technical concepts into clear business language and help customers understand the strategic impact of AI technologies.</li>\n</ul>\n<ul>\n<li>Can show a strong record of driving technical deployments with hands-on on customer work and owning impactful adoption and value for large enterprise customers with complex environments and multiple stakeholders.</li>\n</ul>\n<ul>\n<li>Are comfortable embedding with customers to map workflows, identify requirements, and diagnose adoption challenges.</li>\n</ul>\n<ul>\n<li>Have excellent project and program management instincts and can lead multi-work stream initiatives with clarity and structure.</li>\n</ul>\n<ul>\n<li>Enjoy being a thought partner for C-level stakeholders while also diving deep with technical teams.</li>\n</ul>\n<ul>\n<li>Operate with high ownership and can manage fast decision-making, context switching, and dynamic customer needs.</li>\n</ul>\n<ul>\n<li>Have a strong record of driving technical deployments with hands-on on customer work and owning impactful adoption and value for large enterprise customers with complex environments and multiple stakeholders.</li>\n</ul>\n<p><strong>About OpenAI</strong></p>\n<p>OpenAI is an AI research and deployment company dedicated to ensuring that general-purpose artificial intelligence benefits all of humanity. We push the boundaries of the capabilities of AI systems and seek to safely deploy them to the world through our products. AI is an extremely powerful tool that must be created with safety and human needs at its core, and to achieve our mission, we must encompass and value the many different perspectives, voices, and experiences that form the full spectrum of human endeavour.</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_50d6a6cf-6c0","directApply":true,"hiringOrganization":{"@type":"Organization","name":"OpenAI","sameAs":"https://jobs.ashbyhq.com","logo":"https://logos.yubhub.co/openai.com.png"},"x-apply-url":"https://jobs.ashbyhq.com/openai/c7d41e7c-7e84-4af5-85b9-bbc1a3b08e87","x-work-arrangement":"hybrid","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":null,"x-skills-required":["Deep, hands-on knowledge of OpenAI product capabilities, APIs, SDKs, connectors, and common integration patterns","Understanding and familiarity with coding languages like Python or JavaScript","Comfort with REST APIs, SDKs, automation, CI/CD, containers, and cloud platforms","Ability to explain model behaviour, limitations, technical trade-offs, embeddings, retrieval augmentation, and approaches to fine-tuning or custom model usage","Strong record of driving technical deployments with hands-on on customer work and owning impactful adoption and value for large enterprise customers with complex environments and multiple stakeholders"],"x-skills-preferred":["Technical account management","Technical GenAI consulting or deployment roles","Solutions architecture","Technical delivery leadership","Customer architecture or engineering"],"datePosted":"2026-03-06T18:38:42.587Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"São Paulo"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"Deep, hands-on knowledge of OpenAI product capabilities, APIs, SDKs, connectors, and common integration patterns, Understanding and familiarity with coding languages like Python or JavaScript, Comfort with REST APIs, SDKs, automation, CI/CD, containers, and cloud platforms, Ability to explain model behaviour, limitations, technical trade-offs, embeddings, retrieval augmentation, and approaches to fine-tuning or custom model usage, Strong record of driving technical deployments with hands-on on customer work and owning impactful adoption and value for large enterprise customers with complex environments and multiple stakeholders, Technical account management, Technical GenAI consulting or deployment roles, Solutions architecture, Technical delivery leadership, Customer architecture or engineering"},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_f6802a94-1d7"},"title":"AI Deployment Engineer, Gov","description":"<p><strong>Job Posting</strong></p>\n<p><strong>AI Deployment Engineer, Gov</strong></p>\n<p><strong>Location</strong></p>\n<p>Washington, DC</p>\n<p><strong>Employment Type</strong></p>\n<p>Full time</p>\n<p><strong>Location Type</strong></p>\n<p>Hybrid</p>\n<p><strong>Department</strong></p>\n<p>OpenAI for Gov</p>\n<p><strong>Compensation</strong></p>\n<ul>\n<li>Remote- Zone A$137K – $250.2K • Offers Equity</li>\n</ul>\n<p>The base pay offered may vary depending on multiple individualized factors, including market location, job-related knowledge, skills, and experience. If the role is non-exempt, overtime pay will be provided consistent with applicable laws. In addition to the salary range listed above, total compensation also includes generous equity, performance-related bonus(es) for eligible employees, and the following benefits.</p>\n<ul>\n<li>Medical, dental, and vision insurance for you and your family, with employer contributions to Health Savings Accounts</li>\n</ul>\n<ul>\n<li>Pre-tax accounts for Health FSA, Dependent Care FSA, and commuter expenses (parking and transit)</li>\n</ul>\n<ul>\n<li>401(k) retirement plan with employer match</li>\n</ul>\n<ul>\n<li>Paid parental leave (up to 24 weeks for birth parents and 20 weeks for non-birthing parents), plus paid medical and caregiver leave (up to 8 weeks)</li>\n</ul>\n<ul>\n<li>Paid time off: flexible PTO for exempt employees and up to 15 days annually for non-exempt employees</li>\n</ul>\n<ul>\n<li>13+ paid company holidays, and multiple paid coordinated company office closures throughout the year for focus and recharge, plus paid sick or safe time (1 hour per 30 hours worked, or more, as required by applicable state or local law)</li>\n</ul>\n<ul>\n<li>Mental health and wellness support</li>\n</ul>\n<ul>\n<li>Employer-paid basic life and disability coverage</li>\n</ul>\n<ul>\n<li>Annual learning and development stipend to fuel your professional growth</li>\n</ul>\n<ul>\n<li>Daily meals in our offices, and meal delivery credits as eligible</li>\n</ul>\n<ul>\n<li>Relocation support for eligible employees</li>\n</ul>\n<ul>\n<li>Additional taxable fringe benefits, such as charitable donation matching and wellness stipends, may also be provided.</li>\n</ul>\n<p>More details about our benefits are available to candidates during the hiring process.</p>\n<p>This role is at-will and OpenAI reserves the right to modify base pay and other compensation components at any time based on individual performance, team or company results, or market conditions.</p>\n<p><strong>About the team</strong></p>\n<p>The AI Deployment Engineering team is responsible for ensuring the safe and effective deployment of Generative AI applications. We act as a trusted advisor and thought partner for our customers, working to build an effective backlog of GenAI use cases for their industry and drive them to production through strong technical guidance. As the founding AI Deployment Engineer in the Public Sector segment, you’ll help government agencies transform their organization through solutions such as automated content generation, contextual search, and novel applications that make use of our newest, most exciting models and technology.</p>\n<p><strong>About the Role</strong></p>\n<p>We are looking for a solutions-oriented technical leader to partner with our public sector customers and ensure they achieve tangible value with GenAI. You will pair with government agencies (federal, state, and local), policymakers, and other public institutions to establish a GenAI strategy and identify the highest value applications. You’ll then partner with their technical teams, subject matter experts, systems integrators, and implementation partners to move from prototype through production. You’ll take a holistic view of their needs and design an architecture using the OpenAI API and other services to maximize customer value. You will collaborate closely with Sales, Solutions Engineering, Global Affairs, Applied Research, and Product teams.</p>\n<p>This role is based in Washington, DC. We offer relocation support to new employees.</p>\n<p><strong>In this role, you will:</strong></p>\n<ul>\n<li>Deeply embed with our most sophisticated public sector customers as the technical lead, serving as their technical thought partner to ideate and build novel applications on our API.</li>\n</ul>\n<ul>\n<li>Work with senior customer stakeholders to identify the best applications of GenAI in their industry and to build/qualify a comprehensive backlog to support their AI roadmap.</li>\n</ul>\n<ul>\n<li>Intervene directly to accelerate customer time to value through building hands-on prototypes and/or by delivering impactful strategic guidance, often in collaboration with systems integrators and implementation partners.</li>\n</ul>\n<ul>\n<li>Forge and manage relationships with our customers’ and implementation partners’ leadership and stakeholders to ensure the successful deployment and scale of their applications.</li>\n</ul>\n<ul>\n<li>Contribute to our open-source developer and enterprise resources.</li>\n</ul>\n<ul>\n<li>Scale the AI Deployment Engineering function through sharing knowledge, codifying best practices, and publishing notebooks to our internal and external repositories.</li>\n</ul>\n<ul>\n<li>Validate, synthesize, and deliver high-signal feedback to the Product, Engineering, and Research teams.</li>\n</ul>\n<p><strong>You’ll thrive in this role if you:</strong></p>\n<ul>\n<li>Bring 7+ years of technical consulting (or equivalent) experience with public sector customers (U.S. federal preferred), bridging technical teams and senior stakeholders.</li>\n</ul>\n<ul>\n<li>Active TS/SCI clearance</li>\n</ul>\n<ul>\n<li>Have successfully led GenAI or traditional ML implementations for government agencies in close collaboration with systems integrators and implementation partners.</li>\n</ul>\n<ul>\n<li>Understand network and cloud architecture, including experience with on-premise deployments.</li>\n</ul>\n<ul>\n<li>Are an effective and polished communicator who can translate business and technical topics to all audiences.</li>\n</ul>\n<ul>\n<li>Have industry experience in programming languages like Python or Javascript.</li>\n</ul>\n<ul>\n<li>Own problems end-to-end and are willing to pick up whatever knowledge you&#39;re missing to get the job done.</li>\n</ul>\n<ul>\n<li>Have a humble attitude, an eagerness to help your colleagues, and a desire to do whatever it takes to make the team succeed.</li>\n</ul>\n<ul>\n<li>Are an effective, high throughput operator who can drive multiple concurrent projects and prioritize ruthlessly.</li>\n</ul>\n<p><strong>About OpenAI</strong></p>\n<p>OpenAI is an AI research and deployment company dedicated to ensuring that general-purpose artificial intelligence benefits all of humanity.</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_f6802a94-1d7","directApply":true,"hiringOrganization":{"@type":"Organization","name":"OpenAI","sameAs":"https://jobs.ashbyhq.com","logo":"https://logos.yubhub.co/openai.com.png"},"x-apply-url":"https://jobs.ashbyhq.com/openai/be7b1bf5-37ab-40f7-9ec1-e9732244f12a","x-work-arrangement":"hybrid","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":"$137K – $250.2K","x-skills-required":["Technical consulting","Public sector customers","GenAI","Traditional ML","Network and cloud architecture","On-premise deployments","Python","Javascript","TS/SCI clearance"],"x-skills-preferred":["Effective and polished communicator","Humble attitude","Eagerness to help colleagues","Desire to make team succeed","High throughput operator","Ability to drive multiple concurrent projects"],"datePosted":"2026-03-06T18:37:54.305Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Washington, DC"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"Technical consulting, Public sector customers, GenAI, Traditional ML, Network and cloud architecture, On-premise deployments, Python, Javascript, TS/SCI clearance, Effective and polished communicator, Humble attitude, Eagerness to help colleagues, Desire to make team succeed, High throughput operator, Ability to drive multiple concurrent projects","baseSalary":{"@type":"MonetaryAmount","currency":"USD","value":{"@type":"QuantitativeValue","minValue":137000,"maxValue":250200,"unitText":"YEAR"}}},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_0da0881e-799"},"title":"Forward Deployed Engineer (FDE), Life Sciences","description":"<p><strong>Forward Deployed Engineer (FDE), Life Sciences - Dublin</strong></p>\n<p><strong>Location</strong></p>\n<p>Dublin, Ireland</p>\n<p><strong>Employment Type</strong></p>\n<p>Full time</p>\n<p><strong>Location Type</strong></p>\n<p>Hybrid</p>\n<p><strong>Department</strong></p>\n<p>Model Deployment for Business</p>\n<p><strong>About the team</strong></p>\n<p>OpenAI’s Forward Deployed Engineering team partners with global pharma and biotech, CROs, and research institutions to deploy existing expertise across the R&amp;D value chain to help customers design and ship production-grade AI systems. We operate at the intersection of customer delivery and core platform development, converting early deployments into repeatable system standards and evaluation practices that scale across regulated environments.</p>\n<p><strong>About the role</strong></p>\n<p>We are hiring a Forward Deployed Engineer (FDE) to push the frontier on what is possible today across drug discovery (e.g., target identification, molecular design, pre-clinical) and development (e.g., trial design, trial ops, biostats) by leading end-to-end deployments of our models inside life sciences organizations and research institutions. You will work with customers who are deep experts in their scientific or operational domains, translating real-world data, infrastructure, and constraints into production systems.</p>\n<p>You will measure success through production adoption, measurable workflow impact, and eval-driven feedback loops, including evaluation benchmarks and acceptance criteria, that inform product and model roadmaps. You’ll work closely with our Product, Research, Partnerships, GRC, Security, and GTM to deliver in regulated contexts, including inspection readiness with audit trails and traceable evidence.</p>\n<p>This role is based in Dublin. We use a hybrid work model of 3 days in the office per week. We offer relocation assistance. Travel up to 50% is required.</p>\n<p><strong>In this role you will</strong></p>\n<ul>\n<li>Design and ship production systems around models, owning integrations, data provenance, reliability, and on-call readiness across research, clinical, and operational workflows.</li>\n</ul>\n<ul>\n<li>Lead discovery and scoping from pre-sales through post-sales, translating ambiguous workflow needs into hypothesis-driven problem framing, system requirements, and an execution plan with measurable endpoints.</li>\n</ul>\n<ul>\n<li>Define and enforce launch criteria for regulated contexts, including validation evidence, audit readiness, outcome metrics, and drive delivery until we demonstrate sustained production impact.</li>\n</ul>\n<ul>\n<li>Build in sensitive scientific data environments where auditability, validation, and access controls shape architecture, operating procedures, and failure handling.</li>\n</ul>\n<ul>\n<li>Run evaluation loops that measure model and system quality against workflow-specific scientific benchmarks and use results to drive model and product changes.</li>\n</ul>\n<ul>\n<li>Distill deployment learnings into hardened primitives, reference architectures, validation templates, and benchmark harnesses that scale across regulated life sciences environments.</li>\n</ul>\n<p><strong>You might thrive in this role if you</strong></p>\n<ul>\n<li>Bring 5+ years of software/ML engineering or technical deployment experience with customer-facing ownership in biotech, pharma, clinical research, or scientific software; PhD, MS, or equivalent applied experience in a life sciences relevant field encouraged.</li>\n</ul>\n<ul>\n<li>Have owned customer GenAI deployments end-to-end from scoping through production adoption, and improved them through evaluation design, error analysis, and iterative evidence generation that tightens acceptance criteria over time.</li>\n</ul>\n<ul>\n<li>Have delivered AI systems in trial design, regulatory writing, or scientific operations where validation strategy, auditability, compliance constraints, and reviewer expectations shaped system design and rollout.</li>\n</ul>\n<ul>\n<li>Communicate clearly across scientific, clinical, model research, technical, and executive audiences, translating technical tradeoffs into decision quality, risk posture, and measurable outcomes with credibility.</li>\n</ul>\n<ul>\n<li>Apply systems thinking with high execution standards, consistently turning failures, escalations, and audit findings into improved operating standards, validation artifacts, and repeatable deployment playbooks.</li>\n</ul>\n<p><strong>About OpenAI</strong></p>\n<p>OpenAI is an AI research and deployment company dedicated to ensuring that general-purpose artificial intelligence benefits all of humanity. We push the boundaries of the capabilities of AI systems and seek to safely deploy them to the world through our products. AI is an extremely powerful tool that must be created with safety and human needs at its core, and to achieve our mission, we must encompass and value the many different perspectives, voices, and experiences that form the full spectrum of humanity.</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_0da0881e-799","directApply":true,"hiringOrganization":{"@type":"Organization","name":"OpenAI","sameAs":"https://jobs.ashbyhq.com","logo":"https://logos.yubhub.co/openai.com.png"},"x-apply-url":"https://jobs.ashbyhq.com/openai/710316df-bd8d-4f65-901f-2e5da7fb8aa8","x-work-arrangement":"hybrid","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":"Competitive salary and benefits package","x-skills-required":["software/ML engineering","customer-facing ownership","biotech","pharma","clinical research","scientific software","PhD","MS","equivalent applied experience in a life sciences relevant field"],"x-skills-preferred":["GenAI deployments","evaluation design","error analysis","iterative evidence generation","validation strategy","auditability","compliance constraints","reviewer expectations","systems thinking","high execution standards"],"datePosted":"2026-03-06T18:34:42.563Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Dublin"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"software/ML engineering, customer-facing ownership, biotech, pharma, clinical research, scientific software, PhD, MS, equivalent applied experience in a life sciences relevant field, GenAI deployments, evaluation design, error analysis, iterative evidence generation, validation strategy, auditability, compliance constraints, reviewer expectations, systems thinking, high execution standards"},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_827b146c-14c"},"title":"Forward Deployed Engineer (FDE), Life Sciences","description":"<p><strong>Forward Deployed Engineer (FDE), Life Sciences - Munich</strong></p>\n<p><strong>Location</strong></p>\n<p>Munich, Germany</p>\n<p><strong>Employment Type</strong></p>\n<p>Full time</p>\n<p><strong>Location Type</strong></p>\n<p>Hybrid</p>\n<p><strong>Department</strong></p>\n<p>Model Deployment for Business</p>\n<p><strong>About the team</strong></p>\n<p>OpenAI’s Forward Deployed Engineering team partners with global pharma and biotech, CROs, and research institutions to deploy existing expertise across the R&amp;D value chain to help customers design and ship production-grade AI systems. We operate at the intersection of customer delivery and core platform development, converting early deployments into repeatable system standards and evaluation practices that scale across regulated environments.</p>\n<p><strong>About the role</strong></p>\n<p>We are hiring a Forward Deployed Engineer (FDE) to push the frontier on what is possible today across drug discovery (e.g., target identification, molecular design, pre-clinical) and development (e.g., trial design, trial ops, biostats) by leading end-to-end deployments of our models inside life sciences organizations and research institutions. You will work with customers who are deep experts in their scientific or operational domains, translating real-world data, infrastructure, and constraints into production systems.</p>\n<p>You will measure success through production adoption, measurable workflow impact, and eval-driven feedback loops, including evaluation benchmarks and acceptance criteria, that inform product and model roadmaps. You’ll work closely with our Product, Research, Partnerships, GRC, Security, and GTM to deliver in regulated contexts, including inspection readiness with audit trails and traceable evidence.</p>\n<p>This role is based in Munich. We use a hybrid work model of 3 days in the office per week. We offer relocation assistance. Travel up to 50% is required.</p>\n<p><strong>In this role you will</strong></p>\n<ul>\n<li>Design and ship production systems around models, owning integrations, data provenance, reliability, and on-call readiness across research, clinical, and operational workflows.</li>\n</ul>\n<ul>\n<li>Lead discovery and scoping from pre-sales through post-sales, translating ambiguous workflow needs into hypothesis-driven problem framing, system requirements, and an execution plan with measurable endpoints.</li>\n</ul>\n<ul>\n<li>Define and enforce launch criteria for regulated contexts, including validation evidence, audit readiness, outcome metrics, and drive delivery until we demonstrate sustained production impact.</li>\n</ul>\n<ul>\n<li>Build in sensitive scientific data environments where auditability, validation, and access controls shape architecture, operating procedures, and failure handling.</li>\n</ul>\n<ul>\n<li>Run evaluation loops that measure model and system quality against workflow-specific scientific benchmarks and use results to drive model and product changes.</li>\n</ul>\n<ul>\n<li>Distill deployment learnings into hardened primitives, reference architectures, validation templates, and benchmark harnesses that scale across regulated life sciences environments.</li>\n</ul>\n<p><strong>You might thrive in this role if you</strong></p>\n<ul>\n<li>Bring 5+ years of software/ML engineering or technical deployment experience with customer-facing ownership in biotech, pharma, clinical research, or scientific software; PhD, MS, or equivalent applied experience in a life sciences relevant field encouraged.</li>\n</ul>\n<ul>\n<li>Have owned customer GenAI deployments end-to-end from scoping through production adoption, and improved them through evaluation design, error analysis, and iterative evidence generation that tightens acceptance criteria over time.</li>\n</ul>\n<ul>\n<li>Have delivered AI systems in trial design, regulatory writing, or scientific operations where validation strategy, auditability, compliance constraints, and reviewer expectations shaped system design and rollout.</li>\n</ul>\n<ul>\n<li>Communicate clearly across scientific, clinical, model research, technical, and executive audiences, translating technical tradeoffs into decision quality, risk posture, and measurable outcomes with credibility.</li>\n</ul>\n<ul>\n<li>Apply systems thinking with high execution standards, consistently turning failures, escalations, and audit findings into improved operating standards, validation artifacts, and repeatable deployment playbooks.</li>\n</ul>\n<p><strong>About OpenAI</strong></p>\n<p>OpenAI is an AI research and deployment company dedicated to ensuring that general-purpose artificial intelligence benefits all of humanity. We push the boundaries of the capabilities of AI systems and seek to safely deploy them to the world through our products. AI is an extremely powerful tool that must be created with safety and human needs at its core, and to achieve our mission, we must encompass and value the many different perspectives, voices, and experiences that form the full spectrum of humanity.</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_827b146c-14c","directApply":true,"hiringOrganization":{"@type":"Organization","name":"OpenAI","sameAs":"https://jobs.ashbyhq.com","logo":"https://logos.yubhub.co/openai.com.png"},"x-apply-url":"https://jobs.ashbyhq.com/openai/a3bfefb4-ef77-4a49-a644-92104ca83c2c","x-work-arrangement":"hybrid","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":null,"x-skills-required":["software/ML engineering","customer-facing ownership","biotech","pharma","clinical research","scientific software","PhD","MS","equivalent applied experience in a life sciences relevant field"],"x-skills-preferred":["GenAI deployments","evaluation design","error analysis","iterative evidence generation","validation strategy","auditability","compliance constraints","reviewer expectations","systems thinking","high execution standards"],"datePosted":"2026-03-06T18:34:42.441Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Munich, Germany"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"software/ML engineering, customer-facing ownership, biotech, pharma, clinical research, scientific software, PhD, MS, equivalent applied experience in a life sciences relevant field, GenAI deployments, evaluation design, error analysis, iterative evidence generation, validation strategy, auditability, compliance constraints, reviewer expectations, systems thinking, high execution standards"},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_99a112d8-20a"},"title":"Partner AI Deployment Engineer","description":"<p><strong>About the role</strong></p>\n<p>We are looking for a Partner AI Deployment Engineer (P-ADE) to lead technical delivery with OpenAI partners across EMEA and help scale customer deployments built on the OpenAI platform. This role focuses on working across a wide range of customer use cases, supporting the design, deployment and scaling of production-grade AI solutions delivered through partners.</p>\n<p>You will work closely with partner delivery teams, alongside Solutions Engineers (SEs), Forward Deployed Engineers (FDEs) and other ADEs, to move customer engagements from initial design through to stable, scaled production. Your work will accelerate time to value, reduce delivery risk and ensure solutions meet OpenAI’s standards for quality, safety and reliability. You will collaborate closely with GTM, Applied, and Research to support partner-led enterprise adoption.</p>\n<p>This role is based in Paris or Munich. We use a hybrid work model of 3 days in the office per week and offer relocation assistance to new employees.</p>\n<p><strong>In this role, you will:</strong></p>\n<ul>\n<li>Act as a primary technical delivery partner for a set of OpenAI partners across EMEA, supporting customer deployments across multiple industries and use cases.</li>\n</ul>\n<ul>\n<li>Work with partner delivery teams and customer stakeholders to translate solution designs into deployable, production-ready architectures on the OpenAI platform.</li>\n</ul>\n<ul>\n<li>Support customer time to value through hands-on prototyping, integration support, architectural guidance and troubleshooting during critical phases of delivery.</li>\n</ul>\n<ul>\n<li>Collaborate closely with SEs, FDEs, and other ADEs to ensure the right technical expertise is engaged from design through production rollout.</li>\n</ul>\n<ul>\n<li>Help partners operationalise solutions by addressing scalability, reliability, security and safety considerations required for enterprise production environments.</li>\n</ul>\n<ul>\n<li>Contribute to reusable deployment patterns, reference architectures and delivery guidance that enable repeatable execution across partner engagements.</li>\n</ul>\n<ul>\n<li>Act as a technical quality and governance point during deployments, helping ensure solutions meet OpenAI’s standards and best practices before and after go-live.</li>\n</ul>\n<ul>\n<li>Capture and synthesise feedback from real customer deployments and share insights with Applied, Research and partner teams to improve delivery playbooks and platform capabilities.</li>\n</ul>\n<p><strong>You’ll thrive in this role if you:</strong></p>\n<ul>\n<li>Have 8+ years of experience in technical consulting, solution delivery or a similar role, working with senior technical and business leaders on complex enterprise deployments.</li>\n</ul>\n<ul>\n<li>Have experience delivering large, multi-stakeholder technical projects in partnership with boutique services organisations, system integrators or similar delivery environments.</li>\n</ul>\n<ul>\n<li>Have strong hands-on experience building, integrating and operating production software using modern languages such as Python or JavaScript.</li>\n</ul>\n<ul>\n<li>Have designed, deployed and supported Generative AI and or machine learning solutions in real-world production environments.</li>\n</ul>\n<ul>\n<li>Have practical experience working with the OpenAI platform in customer-facing or delivery contexts.</li>\n</ul>\n<ul>\n<li>Are a clear communicator who can work effectively with partner engineers, internal teams and customer stakeholders.</li>\n</ul>\n<ul>\n<li>Take ownership of delivery problems end to end and are comfortable operating in ambiguous, fast-moving environments.</li>\n</ul>\n<ul>\n<li>Bring a collaborative, humble mindset and enjoy working across partners and internal teams to deliver successful customer outcomes.</li>\n</ul>\n<p><strong>About OpenAI</strong></p>\n<p>OpenAI is an AI research and deployment company dedicated to ensuring that general-purpose artificial intelligence benefits all of humanity. We push the boundaries of the capabilities of AI systems and seek to safely deploy them to the world through our products. AI is an extremely powerful tool that must be created with safety and human needs at its core, and to achieve our mission, we must encompass and value the many different perspectives, voices, and experiences that form the full spectrum of humanity.</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_99a112d8-20a","directApply":true,"hiringOrganization":{"@type":"Organization","name":"OpenAI","sameAs":"https://jobs.ashbyhq.com","logo":"https://logos.yubhub.co/openai.com.png"},"x-apply-url":"https://jobs.ashbyhq.com/openai/4a3d7b56-5e7b-4a90-8b94-2a99596b4496","x-work-arrangement":"hybrid","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":null,"x-skills-required":["Python","JavaScript","Generative AI","Machine Learning","OpenAI platform","Technical consulting","Solution delivery","Complex enterprise deployments"],"x-skills-preferred":["Collaborative mindset","Humble mindset","Clear communication","Partner engineers","Internal teams","Customer stakeholders"],"datePosted":"2026-03-06T18:34:24.426Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Munich, Germany; Paris, France"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"Python, JavaScript, Generative AI, Machine Learning, OpenAI platform, Technical consulting, Solution delivery, Complex enterprise deployments, Collaborative mindset, Humble mindset, Clear communication, Partner engineers, Internal teams, Customer stakeholders"},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_7e3d844c-cee"},"title":"AI Deployment Engineer","description":"<p><strong>AI Deployment Engineer</strong></p>\n<p><strong>Location</strong></p>\n<p>New York City</p>\n<p><strong>Employment Type</strong></p>\n<p>Full time</p>\n<p><strong>Location Type</strong></p>\n<p>Hybrid</p>\n<p><strong>Department</strong></p>\n<p><strong>Compensation</strong></p>\n<ul>\n<li>Zone A$197K – $278K • Offers Equity</li>\n</ul>\n<p>The base pay offered may vary depending on multiple individualized factors, including market location, job-related knowledge, skills, and experience. If the role is non-exempt, overtime pay will be provided consistent with applicable laws. In addition to the salary range listed above, total compensation also includes generous equity, performance-related bonus(es) for eligible employees, and the following benefits.</p>\n<ul>\n<li>Medical, dental, and vision insurance for you and your family, with employer contributions to Health Savings Accounts</li>\n</ul>\n<ul>\n<li>Pre-tax accounts for Health FSA, Dependent Care FSA, and commuter expenses (parking and transit)</li>\n</ul>\n<ul>\n<li>401(k) retirement plan with employer match</li>\n</ul>\n<ul>\n<li>Paid parental leave (up to 24 weeks for birth parents and 20 weeks for non-birthing parents), plus paid medical and caregiver leave (up to 8 weeks)</li>\n</ul>\n<ul>\n<li>Paid time off: flexible PTO for exempt employees and up to 15 days annually for non-exempt employees</li>\n</ul>\n<ul>\n<li>13+ paid company holidays, and multiple paid coordinated company office closures throughout the year for focus and recharge, plus paid sick or safe time (1 hour per 30 hours worked, or more, as required by applicable state or local law)</li>\n</ul>\n<ul>\n<li>Mental health and wellness support</li>\n</ul>\n<ul>\n<li>Employer-paid basic life and disability coverage</li>\n</ul>\n<ul>\n<li>Annual learning and development stipend to fuel your professional growth</li>\n</ul>\n<ul>\n<li>Daily meals in our offices, and meal delivery credits as eligible</li>\n</ul>\n<ul>\n<li>Relocation support for eligible employees</li>\n</ul>\n<ul>\n<li>Additional taxable fringe benefits, such as charitable donation matching and wellness stipends, may also be provided.</li>\n</ul>\n<p>More details about our benefits are available to candidates during the hiring process.</p>\n<p>This role is at-will and OpenAI reserves the right to modify base pay and other compensation components at any time based on individual performance, team or company results, or market conditions.</p>\n<p><strong>About the team</strong></p>\n<p>The AI Deployment Engineering team ensures the safe and effective deployment of Generative AI applications for developers and enterprises. We act as trusted advisors and technical partners to our customers, helping them build and execute their AI adoption strategy post-sale. Our mission is to develop a strong backlog of GenAI use cases tailored to each customer’s industry and to drive these initiatives from prototype to production through hands-on technical guidance and partnership.</p>\n<p><strong>About the role</strong></p>\n<p>We are looking for a solutions-oriented technical leader to engage with customers post-sale and ensure they realize tangible business value from their investment in OpenAI&#39;s technologies. You will work closely with senior leaders and technical teams within customer organizations to establish GenAI roadmaps, strategies, prioritize high-value use cases, and guide projects from early prototyping through enterprise-grade production deployments.</p>\n<p>You will take a holistic view of each customer’s architecture and operations, designing solutions that leverage ChatGPT, OpenAI APIs, and our broader ecosystem of tools and services. You will work cross-functionally with Sales, Solutions Engineering, Applied Research, and Product teams, and report to the Head of Solutions Architecture for your segment.</p>\n<p>This role is based in our NYC office. We use a hybrid work model (3 days/week in the office) and offer relocation assistance for new employees.</p>\n<p><strong>In this role, you will:</strong></p>\n<ul>\n<li>Serve as the primary technical subject matter expert post-sale for a portfolio of customers, embedding deeply with them to design and deploy GenAI solutions.</li>\n</ul>\n<ul>\n<li>Engage with senior business and technical stakeholders to identify, prioritize, and validate the highest-value GenAI applications in their roadmap.</li>\n</ul>\n<ul>\n<li>Accelerate customer time to value by providing architectural guidance, building hands-on prototypes, and advising on best practices for scaling solutions in production.</li>\n</ul>\n<ul>\n<li>Maintain strong relationships with leadership and technical teams to drive adoption, expansion, and successful outcomes.</li>\n</ul>\n<ul>\n<li>Contribute to open-source resources and enterprise-facing technical documentation to scale best practices across customers.</li>\n</ul>\n<ul>\n<li>Share learnings and collaborate with internal teams to inform product development and improve customer outcomes.</li>\n</ul>\n<ul>\n<li>Codify knowledge and operationalize technical success practices to help the Solutions Architecture team scale impact across industries and customer types.</li>\n</ul>\n<p><strong>You’ll thrive in this role if you:</strong></p>\n<ul>\n<li>Have 5+ years of technical consulting, post-sales engineering, solutions architecture, or similar experience working directly with customers.</li>\n</ul>\n<ul>\n<li>Are a strong communicator, able to explain technical and business concepts clearly to executive and practitioner audiences alike.</li>\n</ul>\n<ul>\n<li>Have experience leading complex deployments of Generative AI or traditional machine learning systems, ideally including infrastructure and network architecture considerations.</li>\n</ul>\n<ul>\n<li>Possess hands-on proficiency in languages like Python, JavaScript, or similar, and are comfortable building prototypes or proofs of concept.</li>\n</ul>\n<ul>\n<li>Take end-to-end ownership of challenges, proactively acquiring new skills or knowledge as needed to drive success.</li>\n</ul>\n<ul>\n<li>Bring a humble, collaborative mindset and an eagerness to support teammates and customers alike.</li>\n</ul>\n<ul>\n<li>Thrive in fast-paced environments, adeptly managing multiple workstreams and prioritizing for the highest customer impact.</li>\n</ul>\n<p><strong>About OpenAI</strong></p>\n<p>OpenAI is an AI research and deployment company dedicated to ensuring that general-purpose artificial intelligence benefits all of humanity. We push the boundaries of the capabilities of AI systems and seek to safely deploy them to the world through our products. AI is an extremely powerful tool that must be created with safety and human needs at its core, and to achieve our mission, we must be a company that is both technically excellent and socially responsible.</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_7e3d844c-cee","directApply":true,"hiringOrganization":{"@type":"Organization","name":"OpenAI","sameAs":"https://jobs.ashbyhq.com","logo":"https://logos.yubhub.co/openai.com.png"},"x-apply-url":"https://jobs.ashbyhq.com/openai/6bacc51d-6580-4089-93ca-7df3bce4b635","x-work-arrangement":"hybrid","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":"Zone A$197K – $278K • Offers Equity","x-skills-required":["Generative AI","ChatGPT","OpenAI APIs","Python","JavaScript","Solutions Architecture","Technical Consulting","Post-Sales Engineering","Complex Deployments","Infrastructure and Network Architecture"],"x-skills-preferred":["Machine Learning","Cloud Computing","DevOps","Agile Methodologies","Scrum","Kanban"],"datePosted":"2026-03-06T18:33:40.679Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"New York City"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"Generative AI, ChatGPT, OpenAI APIs, Python, JavaScript, Solutions Architecture, Technical Consulting, Post-Sales Engineering, Complex Deployments, Infrastructure and Network Architecture, Machine Learning, Cloud Computing, DevOps, Agile Methodologies, Scrum, Kanban","baseSalary":{"@type":"MonetaryAmount","currency":"USD","value":{"@type":"QuantitativeValue","minValue":197000,"maxValue":278000,"unitText":"YEAR"}}},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_cbb7e2e4-4bc"},"title":"Security Engineer, Infrastructure Security","description":"<p><strong>Security Engineer, Infrastructure Security</strong></p>\n<p><strong>Location</strong></p>\n<p>Remote - US; New York City; San Francisco; Seattle</p>\n<p><strong>Employment Type</strong></p>\n<p>Full time</p>\n<p><strong>Location Type</strong></p>\n<p>Remote</p>\n<p><strong>Department</strong></p>\n<p>Security</p>\n<p><strong>Compensation</strong></p>\n<ul>\n<li>SF, Seattle or NYC $230K – $385K • Offers Equity</li>\n<li>Zone A $207K – $346.5K • Offers Equity</li>\n<li>Zone B $184K – $308K • Offers Equity</li>\n</ul>\n<p>The base pay offered may vary depending on multiple individualized factors, including market location, job-related knowledge, skills, and experience. If the role is non-exempt, overtime pay will be provided consistent with applicable laws. In addition to the salary range listed above, total compensation also includes generous equity, performance-related bonus(es) for eligible employees, and the following benefits.</p>\n<p><strong>Benefits</strong></p>\n<ul>\n<li>Medical, dental, and vision insurance for you and your family, with employer contributions to Health Savings Accounts</li>\n<li>Pre-tax accounts for Health FSA, Dependent Care FSA, and commuter expenses (parking and transit)</li>\n<li>401(k) retirement plan with employer match</li>\n<li>Paid parental leave (up to 24 weeks for birth parents and 20 weeks for non-birthing parents), plus paid medical and caregiver leave (up to 8 weeks)</li>\n<li>Paid time off: flexible PTO for exempt employees and up to 15 days annually for non-exempt employees</li>\n<li>13+ paid company holidays, and multiple paid coordinated company office closures throughout the year for focus and recharge, plus paid sick or safe time (1 hour per 30 hours worked, or more, as required by applicable state or local law)</li>\n<li>Mental health and wellness support</li>\n<li>Employer-paid basic life and disability coverage</li>\n<li>Annual learning and development stipend to fuel your professional growth</li>\n<li>Daily meals in our offices, and meal delivery credits as eligible</li>\n<li>Relocation support for eligible employees</li>\n<li>Additional taxable fringe benefits, such as charitable donation matching and wellness stipends, may also be provided.</li>\n</ul>\n<p><strong>About the Team</strong></p>\n<p>Security is at the foundation of OpenAI’s mission to ensure that artificial general intelligence benefits all of humanity.</p>\n<p>The Security team protects OpenAI’s technology, people, and products. We are technical in what we build but are operational in how we do our work, and are committed to supporting all products and research at OpenAI. Our Security team tenets include: prioritizing for impact, enabling researchers, preparing for future transformative technologies, and engaging a robust security culture.</p>\n<p><strong>About the Role</strong></p>\n<p>OpenAI is seeking a Security Engineer to join our Infrastructure Security (InfraSec) team. InfraSec protects the foundations of OpenAI’s research and production environments, spanning GPU supercomputing clusters, multi-cloud infrastructure, datacenters, networking, storage, and the critical services that power our frontier AI models. Our charter includes securing everything from bare-metal hardware and firmware, to Kubernetes clusters and service meshes, to data storage and access pathways for highly sensitive model weights and user data.</p>\n<p><strong>In this role, you will:</strong></p>\n<ul>\n<li>Design and build security controls across diverse layers (e.g., physical hardware, firmware/BMC, OS, Kubernetes, networks, and CI/CD) to defend against sophisticated adversaries and insider threats.</li>\n<li>Collaborate with engineering and security teams to drive deployment of security enhancements and control changes across broad-scale infrastructure.</li>\n<li>Tackle high-impact projects such as checkpoint encryption, network isolation, secret management, and machine identity, while continuously raising the security bar for emerging AI workloads.</li>\n<li>Take a generalist approach to building security controls, balancing a mix of security expertise and broad technical skillsets to adapt to evolving challenges.</li>\n</ul>\n<p><strong>You will thrive in this role if you have:</strong></p>\n<ul>\n<li>Deep understanding of security principles, best practices, and common vulnerabilities.</li>\n<li>A proactive mindset, with the ability to identify and address security gaps or inefficiencies through automation and tooling.</li>\n<li>A track record of delivering scalable solutions and driving impactful changes across infrastructure in real-world projects.</li>\n<li>Expertise in the security of cloud platforms (e.g., Amazon AWS, Microsoft Azure), especially securing multi-cloud networks and infrastructure, and designing cloud agnostic systems.</li>\n<li>Experience securing on-prem deployments and datacenters from construction to multi-tenant use.</li>\n<li>Familiarity with container security, orchestration security, and authentication/authorization.</li>\n<li>Strong analytical and problem-solving skills, with an ability to think critically and objectively assess security risks.</li>\n<li>Excellent communication skills, with the ability to convey complex security concepts to technical and non-technical stakeholders.</li>\n<li>Excitement about collaborating with cross-functional teams to build secure, reliable systems that scale globally.</li>\n</ul>\n<p><strong>About OpenAI</strong></p>\n<p>OpenAI is an AI research and deployment company dedicated to ensuring that general-purpose artificial intelligence benefits all of humanity. We push the boundaries of the capabilities of AI systems and seek to safely deploy them to the world through our products. AI is an extremely powerful tool that must be created with safety and human needs at its core, and to achieve our mission, we must encompass and value the many different perspectives, voices, and experiences that form the full spectrum of humanity.</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_cbb7e2e4-4bc","directApply":true,"hiringOrganization":{"@type":"Organization","name":"OpenAI","sameAs":"https://jobs.ashbyhq.com","logo":"https://logos.yubhub.co/openai.com.png"},"x-apply-url":"https://jobs.ashbyhq.com/openai/f51f750f-a737-4441-8f96-30133a2a8049","x-work-arrangement":"remote","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":"$230K – $385K","x-skills-required":["security principles","best practices","common vulnerabilities","cloud platforms","Amazon AWS","Microsoft Azure","container security","orchestration security","authentication/authorization","Kubernetes","service meshes","data storage","access pathways","firmware","BMC","OS","networks","CI/CD"],"x-skills-preferred":["security expertise","broad technical skillsets","cloud agnostic systems","on-prem deployments","datacenters","multi-tenant use","strong analytical skills","problem-solving skills","critical thinking","objectively assess security risks"],"datePosted":"2026-03-06T18:33:14.263Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Remote - US; New York City; San Francisco; Seattle"}},"jobLocationType":"TELECOMMUTE","employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"security principles, best practices, common vulnerabilities, cloud platforms, Amazon AWS, Microsoft Azure, container security, orchestration security, authentication/authorization, Kubernetes, service meshes, data storage, access pathways, firmware, BMC, OS, networks, CI/CD, security expertise, broad technical skillsets, cloud agnostic systems, on-prem deployments, datacenters, multi-tenant use, strong analytical skills, problem-solving skills, critical thinking, objectively assess security risks","baseSalary":{"@type":"MonetaryAmount","currency":"USD","value":{"@type":"QuantitativeValue","minValue":230000,"maxValue":385000,"unitText":"YEAR"}}},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_5779a84c-421"},"title":"Account Director, Healthcare & Life Sciences","description":"<p><strong>Account Director, Healthcare &amp; Life Sciences</strong></p>\n<p><strong>Location</strong></p>\n<p>San Francisco; New York City; Seattle</p>\n<p><strong>Employment Type</strong></p>\n<p>Full time</p>\n<p><strong>Location Type</strong></p>\n<p>Hybrid</p>\n<p><strong>Department</strong></p>\n<p><strong>Compensation</strong></p>\n<ul>\n<li>$232K – $350K • Offers Equity • Mult</li>\n</ul>\n<p>The base pay offered may vary depending on multiple individualized factors, including market location, job-related knowledge, skills, and experience. If the role is non-exempt, overtime pay will be provided consistent with applicable laws. In addition to the salary range listed above, total compensation also includes generous equity, performance-related bonus(es) for eligible employees, and the following benefits.</p>\n<ul>\n<li>Medical, dental, and vision insurance for you and your family, with employer contributions to Health Savings Accounts</li>\n</ul>\n<ul>\n<li>Pre-tax accounts for Health FSA, Dependent Care FSA, and commuter expenses (parking and transit)</li>\n</ul>\n<ul>\n<li>401(k) retirement plan with employer match</li>\n</ul>\n<ul>\n<li>Paid parental leave (up to 24 weeks for birth parents and 20 weeks for non-birthing parents), plus paid medical and caregiver leave (up to 8 weeks)</li>\n</ul>\n<ul>\n<li>Paid time off: flexible PTO for exempt employees and up to 15 days annually for non-exempt employees</li>\n</ul>\n<ul>\n<li>13+ paid company holidays, and multiple paid coordinated company office closures throughout the year for focus and recharge, plus paid sick or safe time (1 hour per 30 hours worked, or more, as required by applicable state or local law)</li>\n</ul>\n<ul>\n<li>Mental health and wellness support</li>\n</ul>\n<ul>\n<li>Employer-paid basic life and disability coverage</li>\n</ul>\n<ul>\n<li>Annual learning and development stipend to fuel your professional growth</li>\n</ul>\n<ul>\n<li>Daily meals in our offices, and meal delivery credits as eligible</li>\n</ul>\n<ul>\n<li>Relocation support for eligible employees</li>\n</ul>\n<ul>\n<li>Additional taxable fringe benefits, such as charitable donation matching and wellness stipends, may also be provided.</li>\n</ul>\n<p>More details about our benefits are available to candidates during the hiring process.</p>\n<p>This role is at-will and OpenAI reserves the right to modify base pay and other compensation components at any time based on individual performance, team or company results, or market conditions.</p>\n<p><strong>About the team</strong></p>\n<p>OpenAI’s mission is to build safe artificial general intelligence (AGI) which benefits all of humanity. This long-term undertaking brings the world’s best scientists, engineers, and business professionals into one lab together to accomplish this.</p>\n<p>In pursuit of this mission, our Go To Market (GTM) team is responsible for helping customers learn how to leverage and deploy our highly capable AI products across their business. The team is made of Sales, Solutions, Support, Marketing, and Partnership professionals that work together to create valuable solutions that will help bring AI to as many users as possible.</p>\n<p><strong>About the role</strong></p>\n<p>As an Account Director focused on Healthcare &amp; Life Sciences, you will own executive-level relationships with leading healthcare and life sciences organizations, including global pharmaceutical manufacturers, medical device companies, and contract research organizations. You’ll help these companies safely and effectively deploy OpenAI’s technology to transform R&amp;D productivity, automate documentation, enhance regulatory workflows, and personalize patient and provider engagement.</p>\n<p>This role blends scientific literacy, technical depth, business acumen, and relationship-driven enterprise sales. You will collaborate closely with researchers, engineers, and healthcare-focused solution strategists to design secure, compliant, and high-impact AI deployments.</p>\n<p>This role is based in San Francisco. We use a hybrid work model of three days in the office per week and offer relocation assistance to new employees.</p>\n<p><strong>In this role, you’ll:</strong></p>\n<ul>\n<li>Manage a focused portfolio of healthcare, large pharmaceutical and life sciences accounts, developing long-term strategic account plans</li>\n</ul>\n<ul>\n<li>Lead complex, multi-stakeholder sales cycles spanning R&amp;D, medical, regulatory, and commercial functions</li>\n</ul>\n<ul>\n<li>Partner with solutions and research engineering to design pilots that demonstrate measurable business and scientific impact</li>\n</ul>\n<ul>\n<li>Collaborate with compliance, privacy, and security teams to ensure responsible deployment of AI in regulated environments</li>\n</ul>\n<ul>\n<li>Own a revenue and consumption target; manage forecasts and pipeline reporting</li>\n</ul>\n<ul>\n<li>Monitor industry and regulatory trends (FDA, EMA, etc.) to guide customer and product strategy</li>\n</ul>\n<ul>\n<li>Represent OpenAI at industry conferences and thought-leadership events (e.g., HLTH, JPM)</li>\n</ul>\n<ul>\n<li>Partner cross-functionally with marketing, partnerships, and communications to build the HLS go-to-market motion</li>\n</ul>\n<p><strong>We’re seeking someone with experience including:</strong></p>\n<ul>\n<li>14+ years selling complex enterprise software or platform-as-a-service solutions to healthcare or life sciences customers</li>\n</ul>\n<ul>\n<li>Proven success achieving annual revenue targets &gt;$2M+ in regulated industries</li>\n</ul>\n<ul>\n<li>Experience managing C-suite relationships across R&amp;D, IT, medical, and commercial teams</li>\n</ul>\n<ul>\n<li>Strong understanding of healthcare and life sciences workflows (e.g., drug discovery, clinical trials, regulatory submissions, market access)</li>\n</ul>\n<ul>\n<li>Familiarity with data privacy, compliance (HIPAA, GxP), and security considerations in healthcare</li>\n</ul>\n<ul>\n<li>Demonstrated ability to design and execute complex deal and partnership strategies</li>\n</ul>\n<p><strong>You might thrive in this role if you:</strong></p>\n<ul>\n<li>Are customer-centric and can translate complex scientific and business needs into transformative AI solutions</li>\n</ul>\n<ul>\n<li>Are passionate about advancing human health through the safe and ethical use of AI</li>\n</ul>\n<ul>\n<li>Are a builder who enjoys designing scalable systems and repeatable sales motions from the ground up</li>\n</ul>\n<ul>\n<li>Are a strategist who anticipates industry shifts and guides enterprise customers through them</li>\n</ul>\n<ul>\n<li>Are energized by ambiguity and motivated to create structure and clarity across complex, cross-functional engagements</li>\n</ul>\n<p><strong>About OpenAI</strong></p>\n<p>OpenAI is an AI research and deployment company dedicated to ensuring that general-purpose artificial intelligence benefits all of humanity. The company is made up of scientists, engineers, and business professionals working together to accomplish this long-term undertaking.</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_5779a84c-421","directApply":true,"hiringOrganization":{"@type":"Organization","name":"OpenAI","sameAs":"https://jobs.ashbyhq.com","logo":"https://logos.yubhub.co/openai.com.png"},"x-apply-url":"https://jobs.ashbyhq.com/openai/93d9be71-6502-4e48-94c2-1c17724e2bc7","x-work-arrangement":"hybrid","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":"$232K – $350K • Offers Equity • Mult","x-skills-required":["14+ years selling complex enterprise software or platform-as-a-service solutions to healthcare or life sciences customers","Proven success achieving annual revenue targets >$2M+ in regulated industries","Experience managing C-suite relationships across R&D, IT, medical, and commercial teams","Strong understanding of healthcare and life sciences workflows (e.g., drug discovery, clinical trials, regulatory submissions, market access)","Familiarity with data privacy, compliance (HIPAA, GxP), and security considerations in healthcare","Demonstrated ability to design and execute complex deal and partnership strategies"],"x-skills-preferred":["Scientific literacy","Technical depth","Business acumen","Relationship-driven enterprise sales","Collaboration with researchers, engineers, and healthcare-focused solution strategists","Designing secure, compliant, and high-impact AI deployments"],"datePosted":"2026-03-06T18:32:06.539Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"San Francisco; New York City; Seattle"}},"employmentType":"FULL_TIME","occupationalCategory":"Sales","industry":"Technology","skills":"14+ years selling complex enterprise software or platform-as-a-service solutions to healthcare or life sciences customers, Proven success achieving annual revenue targets >$2M+ in regulated industries, Experience managing C-suite relationships across R&D, IT, medical, and commercial teams, Strong understanding of healthcare and life sciences workflows (e.g., drug discovery, clinical trials, regulatory submissions, market access), Familiarity with data privacy, compliance (HIPAA, GxP), and security considerations in healthcare, Demonstrated ability to design and execute complex deal and partnership strategies, Scientific literacy, Technical depth, Business acumen, Relationship-driven enterprise sales, Collaboration with researchers, engineers, and healthcare-focused solution strategists, Designing secure, compliant, and high-impact AI deployments","baseSalary":{"@type":"MonetaryAmount","currency":"USD","value":{"@type":"QuantitativeValue","minValue":232000,"maxValue":350000,"unitText":"YEAR"}}},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_3443a031-482"},"title":"Senior DevOps Engineer (AI)","description":"<p>We are seeking a Senior DevOps Engineer (AI) to lead the DevOps, deployment, and integration aspects of our AI-powered products. This is a high-ownership role for someone who enjoys operating independently, solving complex problems, and helping teams run smoothly at scale.</p>\n<p><strong>What you&#39;ll do</strong></p>\n<p>Act as the primary interface between the research team and the deployment ecosystem, ensuring seamless transition from innovation to production.</p>\n<ul>\n<li>Own interactions with external AI and LLM providers to facilitate integration and optimize performance across platforms.</li>\n<li>Lead information security and compliance approvals for product deployments, maintaining high standards of data protection and privacy.</li>\n<li>Design, build, and evolve the build and development environment, including robust CI/CD pipelines for automation and reliability.</li>\n<li>Manage releases end-to-end, from scheduling and execution to closure, ensuring timely and high-quality product delivery.</li>\n<li>Drive porting efforts and cross-platform support, broadening product reach and enabling scalability across cloud environments.</li>\n</ul>\n<p><strong>What you need</strong></p>\n<ul>\n<li>Strong proficiency in Git, CI/CD, and version control workflows.</li>\n<li>Expertise in operating systems and cloud deployments (Azure, GCP, AWS).</li>\n<li>Hands-on experience with Docker/Kubernetes and infrastructure-as-code practices.</li>\n<li>Ability to design and build distributed systems at scale.</li>\n<li>Proficiency in Python or another scripting language for automation.</li>\n<li>Experience in model inferencing frameworks (vLLM, TGI) is highly desirable.</li>\n</ul>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_3443a031-482","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Synopsys","sameAs":"https://careers.synopsys.com","logo":"https://logos.yubhub.co/careers.synopsys.com.png"},"x-apply-url":"https://careers.synopsys.com/job/dublin/senior-devops-engineer-ai/44408/92358709552","x-work-arrangement":"onsite","x-experience-level":"senior","x-job-type":"employee","x-salary-range":null,"x-skills-required":["Git","CI/CD","version control workflows","operating systems","cloud deployments","Docker/Kubernetes","infrastructure-as-code","Python","scripting language","model inferencing frameworks"],"x-skills-preferred":["AI","LLM","information security","compliance approvals","data protection","privacy"],"datePosted":"2026-03-04T17:05:19.738Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Dublin, Leinster, Ireland"}},"occupationalCategory":"Engineering","industry":"Technology","skills":"Git, CI/CD, version control workflows, operating systems, cloud deployments, Docker/Kubernetes, infrastructure-as-code, Python, scripting language, model inferencing frameworks, AI, LLM, information security, compliance approvals, data protection, privacy"},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_269f1982-858"},"title":"Web Developer","description":"<p>We are looking for a Web Developer who will independently design, develop, and implement new functionalities and applications that support our business objectives. In this role, you will ensure application quality through thorough testing and maintenance, keep technical documentation up to date, and play an active role in gathering and refining requirements.</p>\n<p><strong>What you&#39;ll do</strong></p>\n<ul>\n<li>Design, develop, test, and maintain high‑quality software solutions.</li>\n<li>Collaborate with project managers, product owners, and tech leads to define priorities, estimates, and project plans.</li>\n</ul>\n<p><strong>What you need</strong></p>\n<ul>\n<li>Strong understanding of object-oriented programming and solid, reusable design principles.</li>\n<li>Advanced knowledge of programming languages and technologies relevant to the role.</li>\n</ul>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_269f1982-858","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Ubisoft","sameAs":"https://jobs.smartrecruiters.com","logo":"https://logos.yubhub.co/ubisoft2.com.png"},"x-apply-url":"https://jobs.smartrecruiters.com/Ubisoft2/744000112358026-web-developer","x-work-arrangement":"onsite","x-experience-level":"mid","x-job-type":"full-time","x-salary-range":null,"x-skills-required":["JavaScript","TypeScript","Node.js","React","REST API design and implementation","CI/CD tools and concepts","Automation of deployments","Code version control systems","Linux and Windows system tools","Client–server architecture"],"x-skills-preferred":["French"],"datePosted":"2026-03-04T06:06:26.045Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Bucharest"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"JavaScript, TypeScript, Node.js, React, REST API design and implementation, CI/CD tools and concepts, Automation of deployments, Code version control systems, Linux and Windows system tools, Client–server architecture, French"}]}