{"version":"0.1","company":{"name":"YubHub","url":"https://yubhub.co","jobsUrl":"https://yubhub.co/jobs/skill/azure"},"x-facet":{"type":"skill","slug":"azure","display":"Azure","count":100},"x-feed-size-limit":100,"x-feed-sort":"enriched_at desc","x-feed-notice":"This feed contains at most 100 jobs (the most recently enriched). For the full corpus, use the paginated /stats/by-facet endpoint or /search.","x-generator":"yubhub-xml-generator","x-rights":"Free to redistribute with attribution: \"Data by YubHub (https://yubhub.co)\"","x-schema":"Each entry in `jobs` follows https://schema.org/JobPosting. YubHub-native raw fields carry `x-` prefix.","jobs":[{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_b33cbd91-bc9"},"title":"Systematic Production Support Engineer","description":"<p>We are seeking an experienced Systematic Production Support Engineer to help us scale our systematic operations and support engineering capabilities. This role directly supports portfolio management teams across Millennium, with operational excellence at the core. Our efforts are focused on delivering the highest quality returns to our investors – providing a world-class and reliable trading and technology platform is essential to this mission.</p>\n<p>As a Systematic Production Support Engineer, you will be responsible for building, developing, and maintaining a reliable, scalable, and integrated platform for trading strategy monitoring, reporting, and operations. You will work closely with portfolio managers and other internal customers to reduce operational risk through the implementation of monitoring, reporting, and trade workflow solutions, as well as automated systems and processes focused on trading and operations.</p>\n<p>Key responsibilities include:</p>\n<ul>\n<li>Building, developing, and maintaining a reliable, scalable, and integrated platform for trading strategy monitoring, reporting, and operations</li>\n<li>Working with portfolio managers and other internal customers to reduce operational risk through the implementation of monitoring, reporting, and trade workflow solutions</li>\n<li>Implementing automated systems and processes focused on trading and operations</li>\n<li>Streamlining development and deployment processes</li>\n</ul>\n<p>Technical qualifications include:</p>\n<ul>\n<li>5+ years of development experience in Python</li>\n<li>Experience working in a Linux/Unix environment</li>\n<li>Experience working with PostgreSQL or other relational databases</li>\n</ul>\n<p>Preferred skills and experience include:</p>\n<ul>\n<li>Understanding of NLP, supervised/non-supervised learning, and Generative AI models</li>\n<li>Experience operating and monitoring low-latency trading environments</li>\n<li>Familiarity with quantitative finance and electronic trading concepts</li>\n<li>Familiarity with financial data</li>\n<li>Broad understanding of equities, futures, FX, or other financial instruments</li>\n<li>Experience designing and developing distributed systems with a focus on backend development in C/C++, Java, Scala, Go, or C#</li>\n<li>Experience with Apache/Confluent Kafka</li>\n<li>Experience automating SDLC pipelines (e.g., Jenkins, TeamCity, or AWS CodePipeline)</li>\n<li>Experience with containerization and orchestration technologies</li>\n<li>Experience building and deploying systems that utilize services provided by AWS, GCP, or Azure</li>\n<li>Contributions to open-source projects</li>\n</ul>\n<p>This is a unique opportunity to drive significant value creation for one of the world&#39;s leading investment managers.</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_b33cbd91-bc9","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Unknown","sameAs":"https://mlp.eightfold.ai","logo":"https://logos.yubhub.co/mlp.eightfold.ai.png"},"x-apply-url":"https://mlp.eightfold.ai/careers/job/755954716155","x-work-arrangement":"onsite","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":null,"x-skills-required":["Python","Linux/Unix","PostgreSQL","NLP","supervised/non-supervised learning","Generative AI models","low-latency trading environments","quantitative finance","electronic trading concepts","financial data","equities","futures","FX","distributed systems","backend development","C/C++","Java","Scala","Go","C#","Apache/Confluent Kafka","SDLC pipelines","containerization","orchestration technologies","AWS","GCP","Azure"],"x-skills-preferred":["Understanding of NLP, supervised/non-supervised learning, and Generative AI models","Experience operating and monitoring low-latency trading environments","Familiarity with quantitative finance and electronic trading concepts","Familiarity with financial data","Broad understanding of equities, futures, FX, or other financial instruments","Experience designing and developing distributed systems with a focus on backend development in C/C++, Java, Scala, Go, or C#","Experience with Apache/Confluent Kafka","Experience automating SDLC pipelines (e.g., Jenkins, TeamCity, or AWS CodePipeline)","Experience with containerization and orchestration technologies","Experience building and deploying systems that utilize services provided by AWS, GCP, or Azure","Contributions to open-source projects"],"datePosted":"2026-04-18T22:14:36.583Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Miami, Florida, United States of America"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Finance","skills":"Python, Linux/Unix, PostgreSQL, NLP, supervised/non-supervised learning, Generative AI models, low-latency trading environments, quantitative finance, electronic trading concepts, financial data, equities, futures, FX, distributed systems, backend development, C/C++, Java, Scala, Go, C#, Apache/Confluent Kafka, SDLC pipelines, containerization, orchestration technologies, AWS, GCP, Azure, Understanding of NLP, supervised/non-supervised learning, and Generative AI models, Experience operating and monitoring low-latency trading environments, Familiarity with quantitative finance and electronic trading concepts, Familiarity with financial data, Broad understanding of equities, futures, FX, or other financial instruments, Experience designing and developing distributed systems with a focus on backend development in C/C++, Java, Scala, Go, or C#, Experience with Apache/Confluent Kafka, Experience automating SDLC pipelines (e.g., Jenkins, TeamCity, or AWS CodePipeline), Experience with containerization and orchestration technologies, Experience building and deploying systems that utilize services provided by AWS, GCP, or Azure, Contributions to open-source projects"},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_1bd2d1b2-84f"},"title":"Senior Machine Learning Researcher","description":"<p>We are seeking a senior machine learning researcher to join our Core AI team.</p>\n<p>As part of the team, you will help solve complex business problems by developing viable cutting-edge AI/ML solutions.</p>\n<p>You will develop and implement creative solutions that fundamentally transform business processes, delivering breakthrough improvements rather than incremental changes.</p>\n<p>You will work closely with other AI/ML researchers and engineers, SWEs, product owners/managers, and business stakeholders, and participate in the full lifecycle of solution development, including requirements gathering with business, experimentation and algorithmic exploration, development, and assistance with productization.</p>\n<p>Key Responsibilities:</p>\n<ul>\n<li>Work independently or as part of a team to help design and implement high accuracy and delightful user experience solutions utilizing ML, NLP, GenAI, Agentic technologies.</li>\n</ul>\n<ul>\n<li>Participate in all aspects of solution development, including ideation and requirement gathering with business stakeholders, experimentation and exploration to identify strong solution approaches, solution development, etc.</li>\n</ul>\n<ul>\n<li>Prototype, test, and iterate on novel AI models and approaches to solve complex business challenges.</li>\n</ul>\n<ul>\n<li>Collaborate with cross-functional teams to identify opportunities where AI can create significant business value, and transition solutions into production systems.</li>\n</ul>\n<ul>\n<li>Research and stay updated with the latest advancements in machine learning and AI technologies.</li>\n</ul>\n<ul>\n<li>Participate in code reviews, technical discussions, and knowledge sharing sessions.</li>\n</ul>\n<ul>\n<li>Communicate technical concepts and transformative ideas effectively to both technical and non-technical stakeholders.</li>\n</ul>\n<p>Required Skills &amp; Qualifications:</p>\n<ul>\n<li>Bachelor&#39;s with 10+ years, Master&#39;s with 7+ years, or PhD with 5+ years in Computer Science, Data Science, Machine Learning, or related field.</li>\n</ul>\n<ul>\n<li>Deep expertise and proven ability in developing high accuracy/value solutions to business problems in the NLP, Generative AI, Agentic AI, and/or ML space.</li>\n</ul>\n<ul>\n<li>Hands-on experience with data processing, experimentation, and exploration.</li>\n</ul>\n<ul>\n<li>Strong programming skills in Python.</li>\n</ul>\n<ul>\n<li>Experience with cloud platforms (AWS, Azure, GCP) for deploying ML solutions.</li>\n</ul>\n<ul>\n<li>Excellent problem-solving skills and attention to detail.</li>\n</ul>\n<ul>\n<li>Strong communication skills to collaborate with technical and non-technical stakeholders.</li>\n</ul>\n<ul>\n<li>Ability to work independently and collaboratively.</li>\n</ul>\n<p>Additional Preferred Skills &amp; Qualifications:</p>\n<ul>\n<li>Understanding of the financial markets, including experience with financial datasets, is strongly preferred.</li>\n</ul>\n<ul>\n<li>Experience with ML frameworks such as PyTorch, TensorFlow.</li>\n</ul>\n<ul>\n<li>Familiarity with MLOps practices and tools such as SageMaker, MLflow, or Airflow.</li>\n</ul>\n<ul>\n<li>Previous experience working in an Agile environment.</li>\n</ul>\n<p>Millennium pays a total compensation package which includes a base salary, discretionary performance bonus, and a comprehensive benefits package. The estimated base salary range for this position is $175,000 to $250,000, which is specific to New York and may change in the future.</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_1bd2d1b2-84f","directApply":true,"hiringOrganization":{"@type":"Organization","name":"IT - Artificial Intelligence","sameAs":"https://mlp.eightfold.ai","logo":"https://logos.yubhub.co/mlp.eightfold.ai.png"},"x-apply-url":"https://mlp.eightfold.ai/careers/job/755954012324","x-work-arrangement":"onsite","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":"$175,000 to $250,000","x-skills-required":["Python","Machine Learning","NLP","GenAI","Agentic technologies","Data processing","Experimentation","Exploration","Cloud platforms (AWS, Azure, GCP)","Problem-solving skills","Communication skills"],"x-skills-preferred":["PyTorch","TensorFlow","MLOps practices and tools (SageMaker, MLflow, Airflow)","Agile environment"],"datePosted":"2026-04-18T22:14:27.951Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"New York, New York, United States of America"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"Python, Machine Learning, NLP, GenAI, Agentic technologies, Data processing, Experimentation, Exploration, Cloud platforms (AWS, Azure, GCP), Problem-solving skills, Communication skills, PyTorch, TensorFlow, MLOps practices and tools (SageMaker, MLflow, Airflow), Agile environment","baseSalary":{"@type":"MonetaryAmount","currency":"USD","value":{"@type":"QuantitativeValue","minValue":175000,"maxValue":250000,"unitText":"YEAR"}}},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_78270c8d-016"},"title":"Operations Data Governance & Controls Specialist","description":"<p>As an Operations Control Specialist – Data Governance &amp; Controls, you will design, implement, and support technical data governance solutions with a focus on the firm&#39;s Trader Master and related reference data domains.</p>\n<p>This role requires a strong technical background in Data Management, Data Architecture, Data Lineage, Data Quality, Master Data Management (MDM), and automation within Financial Services and/or Technology.</p>\n<p>You will contribute to and help lead the technical design of data governance controls, data models, and integration patterns, partnering closely with Technology and Operations teams.</p>\n<p>Key Responsibilities:</p>\n<ul>\n<li>Build/enhance data governance frameworks, controls, standards, and workflows (policies, definitions, entitlements).</li>\n<li>Create data quality rules and monitoring; automate exception detection, alerting, remediation, SLAs, and RCA.</li>\n<li>Develop Python/SQL/ETL-ELT automation for checks, controls, and reporting; deliver Tableau/Power BI dashboards and KPIs.</li>\n<li>Contribute to conceptual/logical/physical data modeling for Trader Master and core domains.</li>\n<li>Support MDM capabilities: golden record, matching/merging, survivorship, stewardship workflows; help shape MDM strategy.</li>\n<li>Implement access/entitlement governance (RBAC, row/column security) across DB/warehouse/BI with audit compliance.</li>\n<li>Maintain catalog, glossary, lineage, schema history, impact analysis; manage structured change workflows.</li>\n<li>Define integration patterns (batch/API/streaming) and build reconciliations/validations across systems.</li>\n<li>Manage historical/temporal data (validation, backfills, remediation) supporting regulatory/reporting/analytics.</li>\n<li>Produce technical documentation (designs, runbooks, data dictionaries), share knowledge, and mentor juniors.</li>\n</ul>\n<p>Qualifications:</p>\n<ul>\n<li>Bachelor’s degree in Computer Science, Engineering, Information Systems, Mathematics, Finance, or related field; advanced degree (MS, MBA, or equivalent) is a plus.</li>\n<li>5–8 years of experience in financial services or fintech with hands-on work in data engineering, data management, or data architecture roles; exposure to trading strategies, fund structures, and financial products strongly preferred.</li>\n</ul>\n<p>Technical Expertise (Required):</p>\n<ul>\n<li>Strong Python and SQL; experience with data warehousing + ETL/ELT.</li>\n<li>Familiarity with MDM/data governance tools (e.g., Collibra, Informatica, Alation) and Tableau/Power BI.</li>\n<li>Proven ability to lead delivery, solve complex data issues, and communicate with technical/non-technical stakeholders.</li>\n<li>Preferred certs: DAMA/CDMP, cloud (AWS/Azure/GCP), Scrum, BI/data engineering.</li>\n</ul>\n<p>Millennium pays a total compensation package which includes a base salary, discretionary performance bonus, and a comprehensive benefits package.</p>\n<p>The estimated base salary range for this position is $70,000 to $160,000, which is specific to New York and may change in the future.</p>\n<p>When finalizing an offer, we take into consideration an individual’s experience level and the qualifications they bring to the role to formulate a competitive total compensation package.</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_78270c8d-016","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Ops & MO Control","sameAs":"https://mlp.eightfold.ai","logo":"https://logos.yubhub.co/mlp.eightfold.ai.png"},"x-apply-url":"https://mlp.eightfold.ai/careers/job/755954926796","x-work-arrangement":"onsite","x-experience-level":"mid","x-job-type":"full-time","x-salary-range":"$70,000 to $160,000","x-skills-required":["Python","SQL","ETL/ELT","Data Warehousing","Tableau/Power BI","MDM/data governance tools","Collibra","Informatica","Alation"],"x-skills-preferred":["DAMA/CDMP","cloud (AWS/Azure/GCP)","Scrum","BI/data engineering"],"datePosted":"2026-04-18T22:14:17.909Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"New York, New York, United States of America"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Finance","skills":"Python, SQL, ETL/ELT, Data Warehousing, Tableau/Power BI, MDM/data governance tools, Collibra, Informatica, Alation, DAMA/CDMP, cloud (AWS/Azure/GCP), Scrum, BI/data engineering","baseSalary":{"@type":"MonetaryAmount","currency":"USD","value":{"@type":"QuantitativeValue","minValue":70000,"maxValue":160000,"unitText":"YEAR"}}},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_c3b63dd5-0f6"},"title":"Backend utvecklare","description":"<p>We are seeking an experienced backend developer to join our tech team. As a backend developer, you will be responsible for designing, developing, and maintaining the server-side of our applications and systems. You will work closely with our frontend developers, designers, and product owners to ensure a seamless integration between frontend and backend.</p>\n<p>Responsibilities:</p>\n<ul>\n<li>Design and develop scalable and efficient backend solutions for our digital platforms.</li>\n<li>Write clean, readable, and reusable code.</li>\n<li>Perform unit testing and debugging to ensure high quality and reliability.</li>\n<li>Participate in technical discussions and contribute ideas to improve the product&#39;s performance and functionality.</li>\n<li>Collaborate with frontend developers and other team members to ensure a smooth user experience.</li>\n</ul>\n<p>Qualifications:</p>\n<ul>\n<li>Experience in backend development with a focus on web applications.</li>\n<li>Good knowledge of programming languages such as Python, Java, or similar.</li>\n<li>Experience working with frameworks such as Django, Flask, Spring, or similar.</li>\n<li>Familiarity with database management systems such as MySQL, PostgreSQL, or similar.</li>\n<li>Knowledge of API design and implementation.</li>\n<li>Strong problem-solving skills and ability to work independently as well as in a team.</li>\n</ul>\n<p>Benefits:</p>\n<ul>\n<li>Attractive salary based on experience and competence.</li>\n<li>Opportunity to work with exciting projects and the latest technology.</li>\n<li>Flexible working hours and possibility of remote work.</li>\n<li>Continuous professional development and opportunities for career growth.</li>\n</ul>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_c3b63dd5-0f6","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Scandinavian Airlines","sameAs":"https://scandinavianairlines.teamtailor.com","logo":"https://logos.yubhub.co/scandinavianairlines.teamtailor.com.png"},"x-apply-url":"https://scandinavianairlines.teamtailor.com/jobs/4882026-backend-utvecklare","x-work-arrangement":"On-site","x-experience-level":"mid","x-job-type":"full-time","x-salary-range":null,"x-skills-required":["backend development","web applications","Python","Java","Django","Flask","Spring","MySQL","PostgreSQL","API design","problem-solving"],"x-skills-preferred":["cloud services","AWS","Google Cloud","Azure"],"datePosted":"2026-04-18T22:13:45.980Z","employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Transportation","skills":"backend development, web applications, Python, Java, Django, Flask, Spring, MySQL, PostgreSQL, API design, problem-solving, cloud services, AWS, Google Cloud, Azure"},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_32932504-2b5"},"title":"Systematic Production Support Engineer","description":"<p>We are looking for an experienced professional to help us scale our systematic operations and support engineering capabilities.</p>\n<p>This role directly supports portfolio management teams across Millennium, with operational excellence at the core. Our efforts are focused on delivering the highest quality returns to our investors – providing a world-class and reliable trading and technology platform is essential to this mission.</p>\n<p>This is a unique opportunity to drive significant value creation for one of the world&#39;s leading investment managers.</p>\n<p>Principal Responsibilities:</p>\n<ul>\n<li>Build, develop and maintain a reliable, scalable, and integrated platform for trading strategy monitoring, reporting, and operations.</li>\n<li>Work with portfolio managers and other internal customers to reduce operational risk through:</li>\n<li>Implementation of monitoring, reporting, and trade workflow solutions.</li>\n<li>Implementation of automated systems and processes focused on trading and operations.</li>\n<li>Streamlining development and deployment processes.</li>\n<li>Implementation of MCP servers focused on assisting rest of the Support Engineering team as well as proactively monitoring production environment.</li>\n</ul>\n<p>Technical Qualification:</p>\n<ul>\n<li>5+ years of development experience in Python.</li>\n<li>Experience working in a Linux / Unix environment.</li>\n<li>Experience working with PostgreSQL or other relational databases.</li>\n<li>Ability to understand and discuss requirements from portfolio managers.</li>\n</ul>\n<p>Preferred Skills and Experience:</p>\n<ul>\n<li>Understanding of NLP, supervised/non-supervised learning and Generative AI models.</li>\n<li>Experience operating and monitoring low-latency trading environments.</li>\n<li>Familiarity with quantitative finance and electronic trading concepts.</li>\n<li>Familiarity with financial data.</li>\n<li>Broad understanding of equities, futures, FX, or other financial instruments.</li>\n<li>Experience designing and developing distributed systems with a focus on backend development in C/C++, Java, Scala, Go, or C#.</li>\n<li>Experience with Apache / Confluent Kafka.</li>\n<li>Experience automating SDLC pipelines (e.g., Jenkins, TeamCity, or AWS CodePipeline).</li>\n<li>Experience with containerization and orchestration technologies.</li>\n<li>Experience building and deploying systems that utilize services provided by AWS, GCP or Azure.</li>\n<li>Contributions to open-source projects.</li>\n</ul>\n<p>The estimated base salary range for this position is $100,000 to $175,000, which is specific to New York and may change in the future. Millennium pays a total compensation package which includes a base salary, discretionary performance bonus, and a comprehensive benefits package. When finalizing an offer, we take into consideration an individual&#39;s experience level and the qualifications they bring to the role to formulate a competitive total compensation package.</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_32932504-2b5","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Equity IT","sameAs":"https://mlp.eightfold.ai","logo":"https://logos.yubhub.co/mlp.eightfold.ai.png"},"x-apply-url":"https://mlp.eightfold.ai/careers/job/755954627501","x-work-arrangement":"onsite","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":"$100,000 to $175,000","x-skills-required":["Python","Linux / Unix","PostgreSQL","NLP","supervised/non-supervised learning","Generative AI models"],"x-skills-preferred":["Apache / Confluent Kafka","C/C++","Java","Scala","Go","C#","containerization","orchestration technologies","AWS","GCP","Azure"],"datePosted":"2026-04-18T22:13:42.254Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"New York, New York, United States of America · Old Greenwich, Connecticut, United States of America"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Finance","skills":"Python, Linux / Unix, PostgreSQL, NLP, supervised/non-supervised learning, Generative AI models, Apache / Confluent Kafka, C/C++, Java, Scala, Go, C#, containerization, orchestration technologies, AWS, GCP, Azure","baseSalary":{"@type":"MonetaryAmount","currency":"USD","value":{"@type":"QuantitativeValue","minValue":100000,"maxValue":175000,"unitText":"YEAR"}}},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_64bb6566-575"},"title":"Senior ‘Developer Infrastructure’ Engineer","description":"<p>The GALAXY Platform Execution &amp; Exchange Data (SPEED) Team is a core part of Millennium&#39;s technology organisation, powering the firm&#39;s lowest-latency solutions for systematic and high-frequency trading.</p>\n<p>SPEED delivers the live trading and market-data platforms used by portfolio managers and risk systems, including Latency Critical Trading (LCT), DMA OMS (Client Direct), DMA market data feeds, packet capture (PCAPs), enterprise market data, and intraday data services across latency tiers from sub-100 nanoseconds to millisecond-sensitive workflows.</p>\n<p>As a Senior Developer Infrastructure Engineer on SPEED, you will own and evolve the build and CI/CD infrastructure that underpins these mission-critical systems.</p>\n<p>By designing scalable build pipelines, shared tooling, and reliable release workflows, you will directly enhance developer productivity and enable fast, safe iteration on some of the firm&#39;s most performance-sensitive code.</p>\n<p>This role offers the opportunity to shape core engineering practices while contributing to platforms that are central to Millennium&#39;s trading edge.</p>\n<p>Principal Responsibilities</p>\n<ul>\n<li>Design, build, and maintain a highly scalable, parallel, and cached build system for a large, performance-sensitive codebase.</li>\n</ul>\n<ul>\n<li>Own and continually optimise CI/CD pipelines to minimise build/test times, reduce flakiness, and improve developer productivity.</li>\n</ul>\n<ul>\n<li>Operate with an AI-first mindset across the SDLC, using automation by default to streamline build, test, and release workflows.</li>\n</ul>\n<ul>\n<li>Integrate and operationalise AI tools (e.g., copilots, workflow automation, AI-driven analytics) to eliminate manual toil, accelerate development, and codify reusable AI-enabled patterns for the broader engineering organisation.</li>\n</ul>\n<ul>\n<li>Design and operate containerised environments (e.g., Docker, Kubernetes) to maximise utilisation, reliability, and scalability across environments.</li>\n</ul>\n<ul>\n<li>Implement and manage artifact storage, dependency management, and versioning strategies for large, distributed systems.</li>\n</ul>\n<ul>\n<li>Develop and maintain shared libraries, CLIs, scripts, and internal platforms that reduce friction and enable self-service for engineers.</li>\n</ul>\n<ul>\n<li>Build and enhance test suites and environment provisioning, leveraging AI and automation where appropriate for smarter checks, triage, and observability.</li>\n</ul>\n<ul>\n<li>Monitor, instrument, and improve the reliability, observability, and performance of build and CI/CD systems using metrics, dashboards, and alerting.</li>\n</ul>\n<ul>\n<li>Partner with trading and engineering teams to understand requirements, remove friction, and champion best practices for building, testing, and releasing software.</li>\n</ul>\n<p>Qualifications/Skills Required</p>\n<ul>\n<li>5+ years of software engineering or DevInfra/Platform/DevOps experience, with significant focus on building systems and CI/CD.</li>\n</ul>\n<ul>\n<li>Strong programming skills in one or more languages (e.g., Python, Rust, Go, C++) for automation and tooling.</li>\n</ul>\n<ul>\n<li>Hands-on experience with at least one modern build system (e.g., Bazel, Buck2).</li>\n</ul>\n<ul>\n<li>Solid understanding of source control (Git), branching strategies, and release management.</li>\n</ul>\n<ul>\n<li>Experience with monorepos is a plus.</li>\n</ul>\n<ul>\n<li>Experience scaling build and test infrastructure for growing codebases and teams (parallelization, test sharding, remote execution, caching).</li>\n</ul>\n<ul>\n<li>Experience designing or participating in processes, systems, or playbooks that leverage AI to streamline work rather than needing to add more headcount to the team.</li>\n</ul>\n<ul>\n<li>Familiarity with containers and cloud infrastructure (Docker, Kubernetes, and major cloud providers such as AWS/GCP/Azure).</li>\n</ul>\n<ul>\n<li>Strong communication and collaboration skills; comfortable partnering with multiple teams and driving cross-cutting initiatives.</li>\n</ul>\n<p>The estimated base salary range for this position is $175,000 to $250,000, which is specific to New York and may change in the future. Millennium pays a total compensation package which includes a base salary, discretionary performance bonus, and a comprehensive benefits package. When finalising an offer, we take into consideration an individual&#39;s experience level and the qualifications they bring to the role to formulate a competitive total compensation package.</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_64bb6566-575","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Unknown","sameAs":"https://mlp.eightfold.ai","logo":"https://logos.yubhub.co/mlp.eightfold.ai.png"},"x-apply-url":"https://mlp.eightfold.ai/careers/job/755954695574","x-work-arrangement":"onsite","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":"$175,000 to $250,000","x-skills-required":["Python","Rust","Go","C++","Bazel","Buck2","Git","Kubernetes","Docker","AWS","GCP","Azure"],"x-skills-preferred":[],"datePosted":"2026-04-18T22:13:29.006Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"New York, New York, United States of America"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"Python, Rust, Go, C++, Bazel, Buck2, Git, Kubernetes, Docker, AWS, GCP, Azure","baseSalary":{"@type":"MonetaryAmount","currency":"USD","value":{"@type":"QuantitativeValue","minValue":175000,"maxValue":250000,"unitText":"YEAR"}}},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_6964b8e4-caf"},"title":"Cybersecurity Engineer","description":"<p>Job Title: Cybersecurity Engineer</p>\n<p>Introduction to role</p>\n<p>Cybersecurity sits at the heart of our IT strategy. As we move towards ambitious objectives, we are looking for individuals who focus on innovation to maintain a sustainable risk position against an evolving threat landscape, who recognise that adversaries may include organised crime syndicates or state-sponsored attackers, and who understand attackers&#39; motivations and ways of working.</p>\n<p>In this role, you will operate within AstraZeneca&#39;s global cybersecurity organisation, collaborating with and influencing multiple functions across China, India, Mexico, Sweden, the US and the UK. Ready to help defend a global enterprise where technology directly supports life-changing medicines?</p>\n<p>Accountabilities</p>\n<p>In this role, you will engineer cybersecurity solutions across cloud, on-premises and third-party collaboration environments, with a predominant focus on cloud and data. You will collaborate with other teams to perform, assess and evolve IT processes that intersect our cybersecurity priorities, ensuring security is embedded into how work gets done. You will map governance and compliance frameworks and their controls to technical implementation, shifting hardening processes as far left as possible in the lifecycle. You will leverage deep understanding of threats, weaknesses and vulnerabilities around cloud and data to help other areas respond promptly and effectively to contain breaches or address areas of concern. You will also contribute to continuous improvement by analysing incidents, refining standards and influencing architectural decisions that balance risk, performance and usability.</p>\n<p>How will you use your expertise to raise the bar?</p>\n<p>Essential Skills/Experience</p>\n<ul>\n<li>Minimum 10 years of experience</li>\n<li>Bachelor&#39;s Degree</li>\n<li>Must have broad enterprise IT experience with significant cloud and data exposure.</li>\n<li>Must have in-depth understanding of security and networking protocols, cryptography, and modern authentication and authorization protocols.</li>\n<li>Must have experience designing, deploying, and operating secure networks, systems, application and security architectures at scale.</li>\n<li>Must have experience configuring and managing cloud security services in an AWS, Azure and GCP at organisation at scale.</li>\n<li>Must have experience researching, designing, and implementing security policies, standards, and procedures, including those in cybersecurity frameworks such as MITRE ATT&amp;CK, NIST CSF, NIST SP.800- 53, and NIST SP.800-61, as well as implementing cloud security reference architectures.</li>\n<li>Should have experience working in a software development and systems administration organisation, implementing DevSecOps and process automation.</li>\n<li>Should have the ability to conduct post-mortem on security incidents and take post-mortem data to drive uplift in policies, procedures, standards.</li>\n<li>Familiarity with CSPM, CNAPP, and Cloud EDR platforms</li>\n<li>Expertise with Microsoft Defender, Sentinel and Splunk</li>\n</ul>\n<p>Desirable Skills/Experience</p>\n<ul>\n<li>Identify and articulate architectural trade-offs.</li>\n<li>Embed process, governance and security into workflow and technology.</li>\n<li>Design and implement software tools and services using modern programming languages.</li>\n<li>Manage and lead projects delivering prioritised initiatives at challenging deadlines.</li>\n<li>Exert positive influence in a matrixed organisation to drive technology evolution.</li>\n<li>Drive efforts to achieve process and technology improvement at scale.</li>\n</ul>\n<p>The annual base pay for this position ranges from 136,044.00 - 204,066.00 USD Annual (80% - 120%). Hourly and salaried non-exempt employees will also be paid overtime pay when working qualifying overtime hours. Base pay offered may vary depending on multiple individualised factors, including market location, job-related knowledge, skills, and experience. In addition, our positions offer a short-term incentive bonus opportunity; eligibility to participate in our equity-based long-term incentive programme (salaried roles), to receive a retirement contribution (hourly roles), and commission payment eligibility (sales roles).</p>\n<p>Benefits offered included a qualified retirement programme [401(k) plan]; paid vacation and holidays; paid leaves; and, health benefits including medical, prescription drug, dental, and vision coverage in accordance with the terms and conditions of the applicable plans. Additional details of participation in these benefit plans will be provided if an employee receives an offer of employment. If hired, employee will be in an &#39;at-will position&#39; and the Company reserves the right to modify base pay (as well as any other discretionary payment or compensation programme) at any time, including for reasons related to individual performance, Company or individual department/team performance, and market factors.</p>\n<p>When we put unexpected teams in the same room, we unleash bold thinking with the power to inspire life-changing medicines. In-person working gives us the platform we need to connect, work at pace and challenge perceptions. That&#39;s why we work, on average, a minimum of three days per week from the office. But that doesn&#39;t mean we&#39;re not flexible. We balance the expectation of being in the office while respecting individual flexibility. Join us in our unique and ambitious world.</p>\n<p>AstraZeneca offers an environment where cybersecurity work has real-world impact on patients&#39; lives, not just systems and data. Here, technology experts collaborate with scientists and business teams to unlock the potential of data, analytics, AI and machine learning, constantly experimenting with new approaches while keeping critical platforms secure. There is strong investment in digital capabilities, room to explore modern tools through initiatives like hackathons, and a culture that values curiosity, coaching and continuous learning so that every day brings opportunities to grow skills and shape both personal development and the future of healthcare technology.</p>\n<p>If this role matches your skills and ambitions, apply now and help protect the digital foundations that enable life-changing medicines!</p>\n<p>Date Posted 17-Apr-2026 Closing Date 03-May-2026</p>\n<p>Our mission is to build an inclusive environment where equal employment opportunities are available to all applicants and employees. In furtherance of that mission, we welcome and consider applications from all qualified candidates, regardless of their protected characteristics. If you have a disability or special need that requires accommodation, please complete the corresponding section in the application form.</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_6964b8e4-caf","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Cyber Security Engineering Cloud/Data","sameAs":"https://astrazeneca.eightfold.ai","logo":"https://logos.yubhub.co/astrazeneca.eightfold.ai.png"},"x-apply-url":"https://astrazeneca.eightfold.ai/careers/job/563877689899183","x-work-arrangement":"hybrid","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":null,"x-skills-required":["Minimum 10 years of experience","Bachelor's Degree","Broad enterprise IT experience with significant cloud and data exposure","In-depth understanding of security and networking protocols, cryptography, and modern authentication and authorization protocols","Experience designing, deploying, and operating secure networks, systems, application and security architectures at scale","Experience configuring and managing cloud security services in an AWS, Azure and GCP at organisation at scale","Experience researching, designing, and implementing security policies, standards, and procedures, including those in cybersecurity frameworks such as MITRE ATT&CK, NIST CSF, NIST SP.800- 53, and NIST SP.800-61, as well as implementing cloud security reference architectures","Experience working in a software development and systems administration organisation, implementing DevSecOps and process automation","Ability to conduct post-mortem on security incidents and take post-mortem data to drive uplift in policies, procedures, standards","Familiarity with CSPM, CNAPP, and Cloud EDR platforms","Expertise with Microsoft Defender, Sentinel and Splunk"],"x-skills-preferred":[],"datePosted":"2026-04-18T22:13:02.185Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Gaithersburg, Maryland, United States of America"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Healthcare","skills":"Minimum 10 years of experience, Bachelor's Degree, Broad enterprise IT experience with significant cloud and data exposure, In-depth understanding of security and networking protocols, cryptography, and modern authentication and authorization protocols, Experience designing, deploying, and operating secure networks, systems, application and security architectures at scale, Experience configuring and managing cloud security services in an AWS, Azure and GCP at organisation at scale, Experience researching, designing, and implementing security policies, standards, and procedures, including those in cybersecurity frameworks such as MITRE ATT&CK, NIST CSF, NIST SP.800- 53, and NIST SP.800-61, as well as implementing cloud security reference architectures, Experience working in a software development and systems administration organisation, implementing DevSecOps and process automation, Ability to conduct post-mortem on security incidents and take post-mortem data to drive uplift in policies, procedures, standards, Familiarity with CSPM, CNAPP, and Cloud EDR platforms, Expertise with Microsoft Defender, Sentinel and Splunk"},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_867e3558-9a7"},"title":"Team Lead, Java Engineer - Equities Trading Technologies","description":"<p>We are seeking a Team Lead to maintain and enhance our mission-critical, multi-asset trading platform that is used firm-wide daily. This individual will own the existing Java Swing code base, while also playing a pivotal role in designing the next-generation HTML5 trading UI.</p>\n<p>The ideal candidate should have a proven track record in developing and maintaining Java-based front-end applications in the finance sector. Exceptional team collaboration skills and the ability to work effectively with colleagues across global time zones are crucial.</p>\n<p>Millennium strongly prioritizes our synergistic culture, which revolves around teamwork and low egos. You should possess the ability to work in a fast-paced environment both collaboratively and individually while managing multiple projects simultaneously.</p>\n<p>The successful individual will have a strong sense of urgency, emotional intelligence, and prioritize a high-caliber end-user experience.</p>\n<p>Qualifications:</p>\n<ul>\n<li>Bachelor’s degree in computer science or comparable</li>\n<li>7+ years of professional experience with Core Java and Java Swing, electronic trading systems and/or trader workstations environment strongly preferred.</li>\n<li>5+ years of experience working with HTML, JavaScript, CSS, and JQuery</li>\n<li>Deep understanding of multithreading and distributed systems within a high performance, latency-sensitive environment</li>\n<li>Strong knowledge of unit testing frameworks and continuous test-driven development practices</li>\n<li>Enterprise level experience with design patterns such as MVC, MV, MVP</li>\n<li>Enterprise level experience with RESTful web services</li>\n<li>Previous experience liaising with non-technology stakeholders, polished and proactive communication skills</li>\n</ul>\n<p>Beneficial/Ideal Technology Experience:</p>\n<ul>\n<li>EXT-JS, AngularJS, AJAX, JSON experience is very beneficial</li>\n<li>Knowledge of equities, futures, options and other asset classes is preferred</li>\n<li>Enterprise level experience with OMS architecture and design is preferred</li>\n<li>Experience with messaging middleware, Solace preferred</li>\n<li>Experience with relational and NoSQL databases. MongoDB preferred</li>\n<li>Experience working with financial data, including reference data, market data, order/execution and positions data.</li>\n<li>Experience working with Cloud: AWS (preferred), GCP or Azure</li>\n</ul>\n<p>Millennium pays a total compensation package which includes a base salary, discretionary performance bonus, and a comprehensive benefits package. The estimated base salary range for this position is $175,000 to $250,000, which is specific to New York and may change in the future.</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_867e3558-9a7","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Equity IT","sameAs":"https://mlp.eightfold.ai","logo":"https://logos.yubhub.co/mlp.eightfold.ai.png"},"x-apply-url":"https://mlp.eightfold.ai/careers/job/755955412056","x-work-arrangement":"onsite","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":"$175,000 to $250,000","x-skills-required":["Core Java","Java Swing","HTML","JavaScript","CSS","JQuery","Multithreading","Distributed systems","Unit testing frameworks","Continuous test-driven development practices","MVC","MV","MVP","RESTful web services"],"x-skills-preferred":["EXT-JS","AngularJS","AJAX","JSON","Equities","Futures","Options","OMS architecture and design","Messaging middleware","Solace","Relational databases","NoSQL databases","MongoDB","Financial data","Cloud","AWS","GCP","Azure"],"datePosted":"2026-04-18T22:13:00.318Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Miami, Florida, United States of America · New York, New York, United States of America"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Finance","skills":"Core Java, Java Swing, HTML, JavaScript, CSS, JQuery, Multithreading, Distributed systems, Unit testing frameworks, Continuous test-driven development practices, MVC, MV, MVP, RESTful web services, EXT-JS, AngularJS, AJAX, JSON, Equities, Futures, Options, OMS architecture and design, Messaging middleware, Solace, Relational databases, NoSQL databases, MongoDB, Financial data, Cloud, AWS, GCP, Azure","baseSalary":{"@type":"MonetaryAmount","currency":"USD","value":{"@type":"QuantitativeValue","minValue":175000,"maxValue":250000,"unitText":"YEAR"}}},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_7275ef33-009"},"title":"Staff Data Engineer","description":"<p>At Bayer, we&#39;re seeking a Staff Data Engineer to join our team. As a Staff Data Engineer, you will design and lead the implementation of data flows to connect operational systems, data for analytics and business intelligence (BI) systems. You will recognize opportunities to reuse existing data flows, lead the build of data streaming systems, optimize the code to ensure processes perform optimally, and lead work on database management.</p>\n<p>Communicating Between Technical and Non-Technical Colleagues</p>\n<p>As a Staff Data Engineer, you will communicate effectively with technical and non-technical stakeholders, support and host discussions within a multidisciplinary team, and be an advocate for the team externally.</p>\n<p>Data Analysis and Synthesis</p>\n<p>You will undertake data profiling and source system analysis, present clear insights to colleagues to support the end use of the data.</p>\n<p>Data Development Process</p>\n<p>You will design, build and test data products that are complex or large scale, build teams to complete data integration services.</p>\n<p>Data Innovation</p>\n<p>You will understand the impact on the organization of emerging trends in data tools, analysis techniques and data usage.</p>\n<p>Data Integration Design</p>\n<p>You will select and implement the appropriate technologies to deliver resilient, scalable and future-proofed data solutions and integration pipelines.</p>\n<p>Data Modeling</p>\n<p>You will produce relevant data models across multiple subject areas, explain which models to use for which purpose, understand industry-recognised data modelling patterns and standards, and when to apply them, compare and align different data models.</p>\n<p>Metadata Management</p>\n<p>You will design an appropriate metadata repository and present changes to existing metadata repositories, understand a range of tools for storing and working with metadata, provide oversight and advice to more inexperienced members of the team.</p>\n<p>Problem Resolution</p>\n<p>You will respond to problems in databases, data processes, data products and services as they occur, initiate actions, monitor services and identify trends to resolve problems, determine the appropriate remedy and assist with its implementation, and with preventative measures.</p>\n<p>Programming and Build</p>\n<p>You will use agreed standards and tools to design, code, test, correct and document moderate-to-complex programs and scripts from agreed specifications and subsequent iterations, collaborate with others to review specifications where appropriate.</p>\n<p>Technical Understanding</p>\n<p>You will understand the core technical concepts related to the role, and apply them with guidance.</p>\n<p>Testing</p>\n<p>You will review requirements and specifications, and define test conditions, identify issues and risks associated with work, analyse and report test activities and results.</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_7275ef33-009","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Bayer","sameAs":"https://talent.bayer.com","logo":"https://logos.yubhub.co/talent.bayer.com.png"},"x-apply-url":"https://talent.bayer.com/careers/job/562949976928777","x-work-arrangement":"remote","x-experience-level":"staff","x-job-type":"full-time","x-salary-range":"$114,400 to $171,600","x-skills-required":["Proficiency in programming language such as Python or Java","Experience with Big Data technologies such as Hadoop, Spark, and Kafka","Familiarity with ETL processes and tools","Knowledge of SQL and NoSQL databases","Strong understanding of relational databases","Experience with data warehousing solutions","Proficiency with cloud platforms","Expertise in data modeling and design","Experience in designing and building scalable data pipelines","Experience with RESTful APIs and data integration"],"x-skills-preferred":["Relevant certifications (e.g., GCP Certified, AWS Certified, Azure Certified)","Bachelor's degree in Computer Science, Data Engineering, Information Technology, or a related field","Strong analytical and communication skills","Ability to work collaboratively in a team environment","High level of accuracy and attention to detail"],"datePosted":"2026-04-18T22:12:56.654Z","jobLocationType":"TELECOMMUTE","employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Healthcare","skills":"Proficiency in programming language such as Python or Java, Experience with Big Data technologies such as Hadoop, Spark, and Kafka, Familiarity with ETL processes and tools, Knowledge of SQL and NoSQL databases, Strong understanding of relational databases, Experience with data warehousing solutions, Proficiency with cloud platforms, Expertise in data modeling and design, Experience in designing and building scalable data pipelines, Experience with RESTful APIs and data integration, Relevant certifications (e.g., GCP Certified, AWS Certified, Azure Certified), Bachelor's degree in Computer Science, Data Engineering, Information Technology, or a related field, Strong analytical and communication skills, Ability to work collaboratively in a team environment, High level of accuracy and attention to detail","baseSalary":{"@type":"MonetaryAmount","currency":"USD","value":{"@type":"QuantitativeValue","minValue":114400,"maxValue":171600,"unitText":"YEAR"}}},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_80dbb0f6-e54"},"title":"Senior Security Engineer","description":"<p>We are seeking a subject matter expert with direct experience in a wide range of security technologies, tools, and methodologies. This role is suited for an experienced Windows Engineer with proven understanding in enterprise security and will focus on building toolsets and processes to support the Information Security Program (ISP).</p>\n<p>The team fosters a collaborative environment and is building a best-in-class program to partner with the business to protect the Firm&#39;s information and computer systems.</p>\n<p>Principal Responsibilities:</p>\n<ul>\n<li>Provide a high level of security consultancy and engineering support for Windows/Active Directory/Azure security solutions including analysis and development of Windows security solutions.</li>\n<li>Strong understanding of modern authentication protocols, e.g., OIDC / OAUTH 2.</li>\n<li>Contribute to the vision, strategy, and drive design and implementation for authentication platforms both on premises and in the cloud.</li>\n<li>Provide security consultancy and engineering support for SAML, OIDC and Kerberos authentication across different Identity providers, including analysis and development of SSO, PKI, and other authentication solutions.</li>\n<li>Able to demonstrate clear understanding of current risks and threats related to Identity Management at technical and managerial levels.</li>\n<li>Actively monitor new and emerging security and privacy related technologies, trends, issues, and solutions and assess their applicability to key business initiatives and strategies.</li>\n<li>Participate in Information Security Incident Response activities for the Firm&#39;s environment.</li>\n<li>Liaison with key stakeholders to create and enforce policy including Technology organization, Trading units, Legal, Internal Audit, and Compliance.</li>\n<li>Provide support to Security and other technical operations staff to ensure smooth turnover from Engineering to Production - and provide mentoring to junior level security professionals.</li>\n<li>Develop and maintain documentation of all Security products including specific tools, technologies, and processes.</li>\n</ul>\n<p>Qualifications/Skills Required:</p>\n<ul>\n<li>Bachelor&#39;s degree in computer science or engineering preferred.</li>\n<li>7 + years&#39; experience working in a technical role with a minimum of 2 + years&#39; experience focusing on information security in the financial industry (preferred).</li>\n<li>Excellent understanding and experience of engineering Microsoft security solutions – including desktop and server operating systems, EntraID, Active Directory, Group Policy, Desired Configuration State, DNS, Messaging.</li>\n<li>Ability to understand code in C#/.NET and / or Python and strong scripting experience in PowerShell.</li>\n<li>Experience managing IaaS, SaaS solutions and services using CI/CD pipelines. Jenkins, Terraform experience is a strong plus.</li>\n<li>Solid understanding of SAML, OIDC and Kerberos authentication and related technology controls and best practices.</li>\n<li>Experience with Office 365 security controls including usage of Azure Active Directory, Conditional Access, o365 logging APIs, Microsoft CAS, and Microsoft Authenticator.</li>\n<li>Understanding and experience with implementing Data Loss Prevention (DLP) solutions, policies, and technologies.</li>\n<li>Understanding of Azure Information Protection (AIP) and its components, including labeling, classification, and encryption.</li>\n<li>Ability to develop and implement strategies to ensure compliance with data protection regulations, such as GDPR or HIPAA, utilizing DLP and AIP solutions.</li>\n<li>Strong knowledge and experience in a variety of security technologies including: EDR, SIEM, Vulnerability Management is a plus.</li>\n<li>Relevant security certification (CISSP, GCIA, CISM, etc.) and/or product certifications (PingFederate, Azure, Windows, AD etc.) a plus.</li>\n</ul>\n<p>The estimated base salary range for this position is $175,000 to $250,000, which is specific to New York and may change in the future. Millennium pays a total compensation package which includes a base salary, discretionary performance bonus, and a comprehensive benefits package.</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_80dbb0f6-e54","directApply":true,"hiringOrganization":{"@type":"Organization","name":"IT Infrastructure","sameAs":"https://mlp.eightfold.ai","logo":"https://logos.yubhub.co/mlp.eightfold.ai.png"},"x-apply-url":"https://mlp.eightfold.ai/careers/job/755944784476","x-work-arrangement":"onsite","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":"$175,000 to $250,000","x-skills-required":["security technologies","tools","methodologies","Windows security solutions","OIDC / OAUTH 2","SAML","Kerberos authentication","Identity providers","SSO","PKI","EDR","SIEM","Vulnerability Management"],"x-skills-preferred":["C#/.NET","Python","PowerShell","Jenkins","Terraform","Azure Active Directory","Conditional Access","o365 logging APIs","Microsoft CAS","Microsoft Authenticator","Data Loss Prevention (DLP)","Azure Information Protection (AIP)"],"datePosted":"2026-04-18T22:12:55.408Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"New York, New York, United States of America"}},"employmentType":"FULL_TIME","occupationalCategory":"IT","industry":"Finance","skills":"security technologies, tools, methodologies, Windows security solutions, OIDC / OAUTH 2, SAML, Kerberos authentication, Identity providers, SSO, PKI, EDR, SIEM, Vulnerability Management, C#/.NET, Python, PowerShell, Jenkins, Terraform, Azure Active Directory, Conditional Access, o365 logging APIs, Microsoft CAS, Microsoft Authenticator, Data Loss Prevention (DLP), Azure Information Protection (AIP)","baseSalary":{"@type":"MonetaryAmount","currency":"USD","value":{"@type":"QuantitativeValue","minValue":175000,"maxValue":250000,"unitText":"YEAR"}}},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_62c461dc-a98"},"title":"Lead Cloud Engineer","description":"<p>For Digital Hub Warsaw, we&#39;re looking for a Lead Cloud Engineer to join our team. As a visionary company, we&#39;re driven to solve the world&#39;s toughest challenges and strive for a world where &#39;Health for all, Hunger for none&#39; is no longer a dream, but a real possibility.</p>\n<p>We&#39;re building an enterprise-grade Infrastructure Operations Platform named VOPs to support the facilitation of most complex IT infrastructure operations for all IT teams at Bayer globally. Your responsibilities will include:</p>\n<p>Planning and Design: Join the team responsible for planning and running our VOPs platform. Leadership: Mentor a team of engineers, providing guidance and support in the implementation of cloud solutions. Collaboration with Stakeholders: Work closely with Squad Leads and other stakeholders to understand requirements and align integration strategies with business goals. Technical Oversight: Ensure that solutions are scalable, reliable, maintainable, and secure, adhering to best practices in IT architecture and in-line with Bayer&#39;s strategy. Documentation and Standards: Create, maintain, and review comprehensive documentation for processes, standards, and best practices. Intercultural Communication: Foster an environment of open communication and collaboration among diverse teams across different geographical locations.</p>\n<p>Our requirements include: Degree in Computer Science, Information Technology, or related field, or equivalent practical experience as an IT engineer. At least 6 years of experience in Azure (other clouds will be a plus). Proficiency in IT Architecture &amp; design, specifically in infrastructure automation, provisioning, and maintenance. Strong analytical skills with the ability to troubleshoot and resolve technical issues effectively, even under pressure. Familiarity with IaC (e.g., Terraform) and strong proficiency in Python. Linux command line tools and shell scripting. Experience with building IT systems in regulated environments. Integration and Automation Expertise: Knowledge of CI/CD processes and experience in building and deploying integration solutions (Azure DevOps, GitHub Repos, and GitHub Actions). Excellent verbal and written communication skills, with the ability to present complex technical information to non-technical stakeholders. Experience with API management and/or design will be appreciated. Intercultural Competence: Ability to work collaboratively in a multicultural environment, respecting diverse perspectives and fostering teamwork, establishing and maintaining a robust professional network. Language Proficiency: Fluent in English, both spoken and written.</p>\n<p>What we offer includes: A flexible, hybrid work model. Great workplace in a new modern office in Warsaw. Career development, 360° Feedback &amp; Mentoring programme. Wide access to professional development tools, trainings, &amp; conferences. Company Bonus &amp; Reward Structure. VIP Medical Care Package (including Dental &amp; Mental health). Holiday allowance (&#39;Wczasy pod gruszą&#39;). Life &amp; Travel Insurance. Pension plan. Co-financed sport card. FitProfit. Meals Subsidy in Office. Additional days off. Budget for Home Office Setup &amp; Maintenance. Access to Company Game Room equipped with table tennis, soccer table, Sony PlayStation 5, and Xbox Series X consoles setup with premium game passes, and massage chairs. Tailored-made support in relocation to Warsaw when needed.</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_62c461dc-a98","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Bayer","sameAs":"https://talent.bayer.com","logo":"https://logos.yubhub.co/talent.bayer.com.png"},"x-apply-url":"https://talent.bayer.com/careers/job/562949973780545","x-work-arrangement":"hybrid","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":null,"x-skills-required":["Azure","IT Architecture & design","Infrastructure automation","Provisioning","Maintenance","IaC (Terraform)","Python","Linux command line tools","Shell scripting","CI/CD processes","Azure DevOps","GitHub Repos","GitHub Actions","API management","API design"],"x-skills-preferred":[],"datePosted":"2026-04-18T22:11:27.474Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Warsaw"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"Azure, IT Architecture & design, Infrastructure automation, Provisioning, Maintenance, IaC (Terraform), Python, Linux command line tools, Shell scripting, CI/CD processes, Azure DevOps, GitHub Repos, GitHub Actions, API management, API design"},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_5ea08502-c4c"},"title":"CRM Dynamic/ Associate Director, Software Engineering Specialist","description":"<p>Join HSBC and fulfil your potential in the role of Associate Director, Software Engineering Specialist. As a key member of our team, you will participate in every aspect of the solution implementation from analysing customer business requirements to configuring, customising and testing the Microsoft Dynamics application.</p>\n<p>Your key responsibilities will include collaborating with stakeholders throughout the organisation to ensure a comprehensive set of requirements, aligned to business objectives, gaining a full understanding of current business processes and identifying opportunities for improvements.</p>\n<p>You will provide in-depth knowledge of the Microsoft Dynamics application and evaluate the customer&#39;s business processes against the standard Microsoft Dynamics functionality.</p>\n<p>Key skills required for this role include:</p>\n<ul>\n<li>At least 13 years of hands-on developing solutions using C#, Dynamics CRM &amp; Azure apps.</li>\n<li>Experience with Microsoft Dynamics D365 Sales Online implementation.</li>\n<li>Good understanding of existing Microsoft ALM stack and experience using it.</li>\n<li>Ability to use VSTS/TFS as a consulting task tracking tool.</li>\n</ul>\n<p>In addition, you will have expertise in writing unit tests, utilising Application Insights features and proficiency in programming languages such as C# and JavaScript.</p>\n<p>If you are a skilled software engineer with a passion for delivering high-quality solutions, we encourage you to apply for this exciting opportunity.</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_5ea08502-c4c","directApply":true,"hiringOrganization":{"@type":"Organization","name":"HSBC","sameAs":"https://portal.careers.hsbc.com","logo":"https://logos.yubhub.co/portal.careers.hsbc.com.png"},"x-apply-url":"https://portal.careers.hsbc.com/careers/job/563774610678809","x-work-arrangement":"onsite","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":null,"x-skills-required":["C#","Dynamics CRM","Azure apps","Microsoft Dynamics D365 Sales Online","VSTS/TFS","Application Insights","JavaScript"],"x-skills-preferred":[],"datePosted":"2026-04-18T22:11:04.572Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Pune"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Finance","skills":"C#, Dynamics CRM, Azure apps, Microsoft Dynamics D365 Sales Online, VSTS/TFS, Application Insights, JavaScript"},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_79072a0c-85b"},"title":"Behavioral Data Science Intern - Agentic AI & People Analytics","description":"<p>Where do you want to go? What do you want to achieve? How would you like to get involved? At Bayer, we bring together multi-talents and specialists to feed the world, slow climate change, and create healthier, more sustainable lives for all.</p>\n<p>This is the opportunity to start your career with a global leader committed to HealthForAll and HungerForNone. Bring your ideas, skills, and passion with you. Your career starts here.</p>\n<p>Are you passionate about AI, data science, and behavioural insights? Join our Talent Impact team and apply your technical skills to projects that combine machine learning, generative AI, and behavioural science to improve how people work and develop. This internship offers hands-on experience in a supportive environment where you’ll learn, contribute, and make an impact.</p>\n<p>Your tasks and educational objectives:</p>\n<ul>\n<li>Work with HR and behavioural data to create structured, analysis-ready datasets for people analytics.</li>\n<li>Support development and testing agentic AI workflows (including LLM-based tools) that support HR decision-making.</li>\n<li>Help to build and evaluate machine learning models to explore workforce trends, learning behaviours, and engagement.</li>\n<li>Together with team members, create dashboards and visualisations that turn complex data into actionable insights for HR and business partners.</li>\n<li>Apply modern data workflows using Databricks, GitHub Spaces, and cloud platforms (Azure or AWS).</li>\n<li>Collaborate with experienced mentors and participate in small experiments to measure impact and share findings.</li>\n</ul>\n<p>Who you are:</p>\n<ul>\n<li>Python programming skills for data processing, modelling, and AI workflows.</li>\n<li>Hands-on experience with Generative AI (GenAI) or LLM-based systems (academic projects or internships count).</li>\n<li>Familiarity with cloud platforms (Azure or AWS), with a focus on Databricks and GitHub Spaces for collaborative development.</li>\n<li>Solid foundation in data science and machine learning.</li>\n<li>Strong interest in behavioural science, people analytics, and HR.</li>\n<li>Currently enrolled in a Master’s or advanced Bachelor’s program in data science, computer science, cognitive science, psychology, behavioural economics, neuroscience, or a related field.</li>\n<li>Curiosity, willingness to learn, and ability to work on-site in Leverkusen.</li>\n<li>Fluent English, written and spoken.</li>\n</ul>\n<p>What we offer:</p>\n<p>Our benefits package is flexible, appreciative, and tailored to your lifestyle, because what matters to you, matters to us!</p>\n<ul>\n<li>For a full-time position, you can expect an attractive salary of € 2,214 gross per month.</li>\n<li>Depending on the nature of your job, flexible work arrangements can be made in alignment with your manager.</li>\n<li>We support your growth through access to professional development and learning opportunities, such as LinkedIn Learning and our language learning platform Education First.</li>\n<li>As one of our perks, our Corporate Benefits program grants you access to sales discounts from more than 150 brands.</li>\n<li>We embrace diversity by providing an inclusive work environment in which you are welcomed, supported, and encouraged to bring your whole self to work.</li>\n</ul>\n<p>Ever feel burnt out by bureaucracy? Us too. That’s why we’re changing the way we work, for higher productivity, faster innovation, and better results. We call it Dynamic Shared Ownership (DSO). Learn more about what DSO will mean for you in your new role here https://www.bayer.com/en/strategy/strategy</p>\n<p>Our Mission &amp; Strategy:</p>\n<p>Through Dynamic Shared Ownership, we’re putting an end to the hierarchical model and putting more power in the hands of the innovators and creators at Bayer. Ready to join us? Apply now and start your 6-month learning journey in Leverkusen!</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_79072a0c-85b","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Bayer","sameAs":"https://talent.bayer.com","logo":"https://logos.yubhub.co/talent.bayer.com.png"},"x-apply-url":"https://talent.bayer.com/careers/job/562949975182354","x-work-arrangement":"onsite","x-experience-level":"entry","x-job-type":"internship","x-salary-range":null,"x-skills-required":["Python","Generative AI","LLM-based systems","Cloud platforms (Azure or AWS)","Databricks","GitHub Spaces","Data science","Machine learning"],"x-skills-preferred":[],"datePosted":"2026-04-18T22:10:44.663Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Leverkusen"}},"employmentType":"INTERN","occupationalCategory":"Engineering","industry":"Manufacturing","skills":"Python, Generative AI, LLM-based systems, Cloud platforms (Azure or AWS), Databricks, GitHub Spaces, Data science, Machine learning"},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_a8d34aff-3e5"},"title":"Applied AI Engineer, Global Public Sector","description":"<p>We&#39;re hiring Applied AI Engineers to build custom end-to-end AI applications for our public sector clients using the latest developments in the field of AI.</p>\n<p>You will partner with public sector clients to deeply understand their challenges and define AI-driven solutions.</p>\n<p>Key responsibilities include:</p>\n<ul>\n<li>Building and deploying end-to-end AI applications into production leveraging latest developments from the biggest AI labs, and open source models</li>\n<li>Collaborating with cross-functional teams, including data annotation specialists, to create high-quality training datasets</li>\n<li>Designing and maintaining robust evaluation frameworks to ensure the reliability and effectiveness of AI models</li>\n<li>Participating in customer engagements, including occasional travel (approximately two weeks per quarter)</li>\n</ul>\n<p>Ideally you&#39;d have:</p>\n<ul>\n<li>A strong engineering background, with a Bachelor’s degree in Computer Science, Mathematics, or a related quantitative field (or equivalent practical experience)</li>\n<li>7+ years of post-graduation engineering experience, with demonstrated proficiency in languages such as Python, TypeScript/JavaScript, Java, or C++</li>\n<li>2+ years of experience applying AI/ML in production environments, such as deploying deep learning solutions, building generative/agentic AI applications or setting up evaluations pipelines</li>\n<li>Familiarity with cloud-based machine learning tools and platforms (e.g. AWS, GCP, Azure)</li>\n<li>Strong problem-solving skills, with a data-driven approach to iterating on machine learning models and datasets</li>\n<li>Excellent written and verbal communication skills to collaborate effectively in a cross-functional environment</li>\n</ul>\n<p>Nice to haves:</p>\n<ul>\n<li>Experience working at a startup, particularly as founding engineer</li>\n<li>Experience building and deploying large-scale AI solutions</li>\n<li>Strong written and verbal communication skills to operate in a cross-functional team environment</li>\n<li>Proficiency in Arabic (if focused on language models)</li>\n</ul>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_a8d34aff-3e5","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Scale","sameAs":"https://scale.com/","logo":"https://logos.yubhub.co/scale.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/scaleai/jobs/4413992005","x-work-arrangement":"hybrid","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":null,"x-skills-required":["Python","TypeScript/JavaScript","Java","C++","Cloud-based machine learning tools and platforms (e.g. AWS, GCP, Azure)"],"x-skills-preferred":["Experience working at a startup, particularly as founding engineer","Experience building and deploying large-scale AI solutions","Strong written and verbal communication skills to operate in a cross-functional team environment","Proficiency in Arabic (if focused on language models)"],"datePosted":"2026-04-18T16:00:59.864Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Doha, Qatar; London, UK"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"Python, TypeScript/JavaScript, Java, C++, Cloud-based machine learning tools and platforms (e.g. AWS, GCP, Azure), Experience working at a startup, particularly as founding engineer, Experience building and deploying large-scale AI solutions, Strong written and verbal communication skills to operate in a cross-functional team environment, Proficiency in Arabic (if focused on language models)"},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_3cc878fa-5d1"},"title":"Infrastructure Software Engineer, Enterprise GenAI","description":"<p>We are seeking a strong engineer to join our team and help us build and scale our core infrastructure in a fast-paced environment. The ideal candidate will have a strong understanding of software engineering principles and practices, as well as experience with large-scale distributed systems.</p>\n<p>You will implement solutions across multiple cloud providers (GCP, Azure, AWS) for customers in diverse, highly-regulated industries like healthcare, telecom, finance, and retail.</p>\n<p>Key responsibilities include:</p>\n<ul>\n<li>Architecting multi-cloud systems and abstractions to allow the SGP platform to run on top of existing Cloud providers</li>\n<li>Implementing custom integrations between Scale AI&#39;s platform and customer data environments (cloud platforms, data warehouses, internal APIs)</li>\n<li>Collaborating with platform, product teams and our customers directly to develop and implement innovative infrastructure that scales to meet evolving needs</li>\n<li>Delivering experiments at a high velocity and level of quality to engage our customers</li>\n</ul>\n<p>Requirements include:</p>\n<ul>\n<li>4+ years of full-time engineering experience, post-graduation</li>\n<li>Experience scaling products at hyper growth startups</li>\n<li>Experience tinkering with or productizing LLMs, vector databases, and the other latest AI technologies</li>\n<li>Proficient in Python or Javascript/Typescript, and SQL</li>\n<li>Experience with Kubernetes</li>\n<li>Experience with major cloud providers (AWS, Azure, GCP)</li>\n<li>Excellent communication skills with the ability to explain technical concepts to both technical and non-technical audiences</li>\n</ul>\n<p>Compensation packages at Scale for eligible roles include base salary, equity, and benefits.</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_3cc878fa-5d1","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Scale","sameAs":"https://www.scale.com/","logo":"https://logos.yubhub.co/scale.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/scaleai/jobs/4665557005","x-work-arrangement":"hybrid","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":"$179,400-$224,250 USD","x-skills-required":["Python","Javascript/Typescript","SQL","Kubernetes","GCP","Azure","AWS"],"x-skills-preferred":[],"datePosted":"2026-04-18T16:00:45.380Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"San Francisco, CA; New York, NY"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"Python, Javascript/Typescript, SQL, Kubernetes, GCP, Azure, AWS","baseSalary":{"@type":"MonetaryAmount","currency":"USD","value":{"@type":"QuantitativeValue","minValue":179400,"maxValue":224250,"unitText":"YEAR"}}},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_94999453-111"},"title":"Senior Full-Stack Software Engineer, (Forward Deployed), GPS","description":"<p>Scale&#39;s rapidly growing Global Public Sector team is focused on using AI to address critical challenges facing the public sector around the world.</p>\n<p>Our core work consists of creating custom AI applications that will impact millions of citizens, generating high-quality training data for custom LLMs, and upskilling and advisory services to spread the impact of AI.</p>\n<p>As a Full Stack Software Engineer (Forward Deployed), you&#39;ll collaborate directly with public sector counterparts to quickly build full-stack, AI applications, to solve their most pressing challenges and achieve meaningful impact for citizens.</p>\n<p>At Scale, we&#39;re not just building AI solutions,we&#39;re enabling the public sector to transform their operations and better serve citizens through cutting-edge technology.</p>\n<p>If you&#39;re ready to shape the future of AI in the public sector and be a founding member of our team, we&#39;d love to hear from you.</p>\n<p><strong>Responsibilities</strong></p>\n<ul>\n<li>Partner with public sector clients to scope, collect feedback and implement solutions for complex problems, including spending up to two weeks per month in client offices for feedback and delivery.</li>\n<li>Architect production-grade applications that integrate AI models with full-stack frameworks, managing everything from interactive UIs to backend APIs and systems.</li>\n<li>Deploy and manage infrastructure within cloud environments, ensuring the highest levels of system integrity, security, scalability, and long-term reliability.</li>\n<li>Contribute to core platform features designed to be reused across diverse international client use cases.</li>\n<li>Partner with design, product, and data teams to build robust applications aligned with the broader technical architecture.</li>\n</ul>\n<p><strong>Ideal Candidate</strong></p>\n<ul>\n<li>Bachelor&#39;s degree in Computer Science or a related quantitative field</li>\n<li>5+ years of post-graduation, full-stack engineering experience with demonstrated proficiency in React (required), TypeScript, Next.js, Python, Node.js, PostgreSQL or MongoDB plus hands-on experience with Docker, Kubernetes, and Azure/AWS/GCP.</li>\n<li>Proven ability to architect scalable, production-grade applications with a strong handle on cloud environments and infrastructure health.</li>\n<li>Experience working directly within customer infrastructure to deploy, maintain, and troubleshoot complex, end-to-end solutions.</li>\n<li>A self-starting approach with the technical maturity to navigate ambiguous requirements and deliver reliable software.</li>\n<li>Driven async communication methodologies to reduce communication frictions</li>\n</ul>\n<p><strong>Nice to Haves</strong></p>\n<ul>\n<li>Proficient in Arabic</li>\n<li>Past experience working in a forward deployed engineer / dedicated customer engineer role</li>\n<li>Experience working cross functionally with operations</li>\n<li>Experience building solutions with LLMs and a deep understanding of the overall Gen AI landscape</li>\n</ul>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_94999453-111","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Scale","sameAs":"https://scale.com/","logo":"https://logos.yubhub.co/scale.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/scaleai/jobs/4676608005","x-work-arrangement":"onsite","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":null,"x-skills-required":["React","TypeScript","Next.js","Python","Node.js","PostgreSQL","MongoDB","Docker","Kubernetes","Azure","AWS","GCP"],"x-skills-preferred":[],"datePosted":"2026-04-18T16:00:24.081Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Dubai, UAE"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"React, TypeScript, Next.js, Python, Node.js, PostgreSQL, MongoDB, Docker, Kubernetes, Azure, AWS, GCP"},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_740da2af-174"},"title":"Security Engineer, Detection & Response","description":"<p>We are seeking a Senior Security Engineer with a specialty in Detection and Incident Response to join our Security Engineering team. This role sits at the intersection of security operations and software engineering, requiring you to investigate incidents and build the systems that detect, contain, and prevent them.</p>\n<p>You will design and ship high-precision detections across cloud services and enterprise SaaS, develop automation that shortens response timelines, and mature the telemetry pipelines that make it all possible. Your ability to write production-quality code is just as important as your ability to triage an alert.</p>\n<p>Responsibilities:</p>\n<ul>\n<li>Engineer, test, and deploy detection logic across cloud and enterprise environments, treating detections as software with version control, peer review, and measurable performance.</li>\n</ul>\n<ul>\n<li>Build and maintain incident response automation, runbooks, and tooling that reduce containment timelines without sacrificing developer velocity.</li>\n</ul>\n<ul>\n<li>Mature telemetry pipelines through improved schema design, normalization, enrichment, and quality checks that reduce false positives and increase signal fidelity.</li>\n</ul>\n<ul>\n<li>Perform digital incident investigations to identify and contain potential security breaches.</li>\n</ul>\n<ul>\n<li>Conduct digital forensics and malware analysis to understand attack vectors and adversary methodologies.</li>\n</ul>\n<ul>\n<li>Integrate alerting with messaging and ticketing systems to enable fast, traceable response workflows.</li>\n</ul>\n<ul>\n<li>Partner cross-functionally with IT, security, and engineering teams to harden identity and access patterns, close logging and forensics gaps, and implement maintainable guardrails that scale with the organisation.</li>\n</ul>\n<ul>\n<li>Utilize threat intelligence platforms to improve hunting, detection, and response workflows.</li>\n</ul>\n<ul>\n<li>Clearly explain the significance and impact of incidents, providing actionable recommendations to both technical and non-technical stakeholders.</li>\n</ul>\n<p>Ideal Candidate:</p>\n<ul>\n<li>5+ years of experience in Detection Engineering, Incident Response, or Security Operations, with a strong emphasis on building and shipping security tooling and automation.</li>\n</ul>\n<ul>\n<li>Proficiency in at least one programming language (e.g., Python, Go) and comfort writing production-grade code , not just scripts.</li>\n</ul>\n<ul>\n<li>Hands-on experience designing or improving detection pipelines, SIEM content, and alerting workflows in cloud-native environments.</li>\n</ul>\n<ul>\n<li>Practical experience with SIEM, EDR, and SOAR tools, with a preference for candidates who have built integrations or extended these platforms programmatically.</li>\n</ul>\n<ul>\n<li>Strong understanding of modern cyber threats, common attack techniques, and adversary TTPs.</li>\n</ul>\n<ul>\n<li>Familiarity with digital forensics tools and malware analysis techniques.</li>\n</ul>\n<ul>\n<li>Experience with cloud-native environments (e.g., AWS, GCP, Azure) and the security telemetry those environments generate.</li>\n</ul>\n<ul>\n<li>Exposure to threat intelligence platforms and integrating intel into detection and investigation workflows.</li>\n</ul>\n<ul>\n<li>Strong communication skills, with the ability to translate complex security findings into clear business impact.</li>\n</ul>\n<ul>\n<li>Relevant security certifications (e.g., GCIH, GCFA, GCIA, CISSP, GDSA) are a plus.</li>\n</ul>\n<p>Compensation packages at Scale for eligible roles include base salary, equity, and benefits. The range displayed on each job posting reflects the minimum and maximum target for new hire salaries for the position, determined by work location and additional factors, including job-related skills, experience, interview performance, and relevant education or training. Scale employees in eligible roles are also granted equity based compensation, subject to Board of Director approval. Your recruiter can share more about the specific salary range for your preferred location during the hiring process, and confirm whether the hired role will be eligible for equity grant. You’ll also receive benefits including, but not limited to: Comprehensive health, dental and vision coverage, retirement benefits, a learning and development stipend, and generous PTO. Additionally, this role may be eligible for additional benefits such as a commuter stipend.</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_740da2af-174","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Scale","sameAs":"https://scale.com/","logo":"https://logos.yubhub.co/scale.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/scaleai/jobs/4684073005","x-work-arrangement":"hybrid","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":"$237,600-$297,000 USD","x-skills-required":["Detection Engineering","Incident Response","Security Operations","Cloud Services","Enterprise SaaS","Automation","Telemetry Pipelines","Digital Forensics","Malware Analysis","Threat Intelligence Platforms","SIEM","EDR","SOAR","Cloud-Native Environments","Programming Languages","Python","Go"],"x-skills-preferred":["Hands-on experience designing or improving detection pipelines, SIEM content, and alerting workflows in cloud-native environments","Practical experience with SIEM, EDR, and SOAR tools, with a preference for candidates who have built integrations or extended these platforms programmatically","Strong understanding of modern cyber threats, common attack techniques, and adversary TTPs","Familiarity with digital forensics tools and malware analysis techniques","Experience with cloud-native environments (e.g., AWS, GCP, Azure) and the security telemetry those environments generate","Exposure to threat intelligence platforms and integrating intel into detection and investigation workflows","Strong communication skills, with the ability to translate complex security findings into clear business impact","Relevant security certifications (e.g., GCIH, GCFA, GCIA, CISSP, GDSA)"],"datePosted":"2026-04-18T16:00:14.303Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"New York, NY; San Francisco, CA; Seattle, WA; Washington, DC"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"Detection Engineering, Incident Response, Security Operations, Cloud Services, Enterprise SaaS, Automation, Telemetry Pipelines, Digital Forensics, Malware Analysis, Threat Intelligence Platforms, SIEM, EDR, SOAR, Cloud-Native Environments, Programming Languages, Python, Go, Hands-on experience designing or improving detection pipelines, SIEM content, and alerting workflows in cloud-native environments, Practical experience with SIEM, EDR, and SOAR tools, with a preference for candidates who have built integrations or extended these platforms programmatically, Strong understanding of modern cyber threats, common attack techniques, and adversary TTPs, Familiarity with digital forensics tools and malware analysis techniques, Experience with cloud-native environments (e.g., AWS, GCP, Azure) and the security telemetry those environments generate, Exposure to threat intelligence platforms and integrating intel into detection and investigation workflows, Strong communication skills, with the ability to translate complex security findings into clear business impact, Relevant security certifications (e.g., GCIH, GCFA, GCIA, CISSP, GDSA)","baseSalary":{"@type":"MonetaryAmount","currency":"USD","value":{"@type":"QuantitativeValue","minValue":237600,"maxValue":297000,"unitText":"YEAR"}}},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_a400e696-2d2"},"title":"Staff Software Engineer, Enterprise GenAI","description":"<p>We&#39;re seeking a strong engineer to join our team and help us build and scale our product in a fast-paced environment. As a Staff Software Engineer, you will own large new areas within our product, working across backend, frontend, and interacting with LLMs and ML models. You will solve hard engineering problems in scalability and reliability.</p>\n<p>Key responsibilities include:</p>\n<ul>\n<li>Delivering experiments at a high velocity and level of quality to engage our customers</li>\n<li>Working across the entire product lifecycle from conceptualization through production</li>\n<li>Being able, and willing, to multi-task and learn new technologies quickly</li>\n</ul>\n<p>Ideally, you&#39;d have:</p>\n<ul>\n<li>7+ years of full-time engineering experience, post-graduation</li>\n<li>Experience scaling products at hyper growth startups</li>\n<li>Experience tinkering with or productizing LLMs, vector databases, and the other latest AI technologies</li>\n<li>Proficient in Python or Javascript/Typescript, and SQL</li>\n<li>Experience with Kubernetes</li>\n<li>Experience with major cloud providers (AWS, Azure, GCP)</li>\n</ul>\n<p>Compensation packages at Scale for eligible roles include base salary, equity, and benefits. The range displayed on each job posting reflects the minimum and maximum target for new hire salaries for the position, determined by work location and additional factors, including job-related skills, experience, interview performance, and relevant education or training.</p>\n<p>You’ll also receive benefits including, but not limited to: Comprehensive health, dental and vision coverage, retirement benefits, a learning and development stipend, and generous PTO. Additionally, this role may be eligible for additional benefits such as a commuter stipend.</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_a400e696-2d2","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Scale","sameAs":"https://scale.com/","logo":"https://logos.yubhub.co/scale.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/scaleai/jobs/4569678005","x-work-arrangement":"hybrid","x-experience-level":"staff","x-job-type":"full-time","x-salary-range":"$248,400-$310,500 USD","x-skills-required":["Python","Javascript/Typescript","SQL","Kubernetes","AWS","Azure","GCP"],"x-skills-preferred":["LLMs","vector databases"],"datePosted":"2026-04-18T16:00:11.482Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"San Francisco, CA; New York, NY"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"Python, Javascript/Typescript, SQL, Kubernetes, AWS, Azure, GCP, LLMs, vector databases","baseSalary":{"@type":"MonetaryAmount","currency":"USD","value":{"@type":"QuantitativeValue","minValue":248400,"maxValue":310500,"unitText":"YEAR"}}},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_bd7327f8-fcf"},"title":"Staff Software Engineer, Full-Stack - Enterprise Gen AI","description":"<p>We&#39;re looking for a frontend-focused full-stack engineer to help build AI-powered applications that redefine enterprise workflows and push the boundaries of interactive AI. As a staff software engineer, you&#39;ll work on a mix of cutting-edge customer-facing AI applications and internal SaaS products. Our engineering team powers projects like TIME&#39;s Person of the Year AI experience, where our AI technology helped shape one of the most iconic features in media. You&#39;ll also contribute to Scale&#39;s GenAI Platform (SGP), a powerful system that enables businesses to build and deploy AI agents at scale.</p>\n<p>Your responsibilities will include:</p>\n<ul>\n<li>Building and enhancing user-facing AI applications for major enterprise customers, including high-profile media and Fortune 500 companies</li>\n<li>Developing and refining features for Scale&#39;s GenAI Platform, empowering businesses to build, deploy, and manage AI-driven agents</li>\n<li>Designing, building, and optimizing polished, high-performance UIs using Next.js, React, TypeScript, and Tailwind</li>\n<li>Working closely with product managers, designers, and AI/ML teams to create seamless, intuitive, and impactful user experiences</li>\n<li>Integrating frontend applications with backend services, working with APIs, authentication systems, and cloud-based infrastructure</li>\n</ul>\n<p>In this role, you&#39;ll have the opportunity to shape the future of AI-powered user experiences, working on projects that impact millions of users while developing tools that empower businesses to deploy AI at scale.</p>\n<p>The base salary range for this full-time position in our hub locations of San Francisco, New York, or Seattle is $248,400,$310,500 USD. Compensation packages at Scale include base salary, equity, and benefits. You&#39;ll also receive benefits including, but not limited to: Comprehensive health, dental and vision coverage, retirement benefits, a learning and development stipend, and generous PTO.</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_bd7327f8-fcf","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Scale","sameAs":"https://scale.com/","logo":"https://logos.yubhub.co/scale.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/scaleai/jobs/4529529005","x-work-arrangement":"onsite","x-experience-level":"staff","x-job-type":"full-time","x-salary-range":"$248,400—$310,500 USD","x-skills-required":["Next.js","React","TypeScript","Tailwind","AI/ML","APIs","Authentication systems","Cloud-based infrastructure"],"x-skills-preferred":["FastAPI","PostgreSQL","GraphQL","AWS","Azure","GCP","Data-rich web platforms","Interactive AI applications","Agent-based systems"],"datePosted":"2026-04-18T16:00:02.231Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"New York, NY; San Francisco, CA"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"Next.js, React, TypeScript, Tailwind, AI/ML, APIs, Authentication systems, Cloud-based infrastructure, FastAPI, PostgreSQL, GraphQL, AWS, Azure, GCP, Data-rich web platforms, Interactive AI applications, Agent-based systems","baseSalary":{"@type":"MonetaryAmount","currency":"USD","value":{"@type":"QuantitativeValue","minValue":248400,"maxValue":310500,"unitText":"YEAR"}}},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_44975b06-cb1"},"title":"Senior Full-Stack Software Engineer, (Forward Deployed), GPS","description":"<p>We&#39;re seeking a Senior Full-Stack Software Engineer to join our Global Public Sector team. As a forward-deployed engineer, you&#39;ll collaborate directly with public sector counterparts to build full-stack, AI applications that solve critical challenges and achieve meaningful impact for citizens.</p>\n<p>Our core work consists of creating custom AI applications, generating high-quality training data for custom LLMs, and upskilling and advisory services to spread the impact of AI.</p>\n<p>You&#39;ll partner with public sector clients to scope, collect feedback, and implement solutions for complex problems. You&#39;ll also architect production-grade applications that integrate AI models with full-stack frameworks, manage infrastructure within cloud environments, and contribute to core platform features.</p>\n<p>Ideally, you&#39;ll have a Bachelor&#39;s degree in Computer Science or a related quantitative field, 5+ years of full-stack engineering experience, and proficiency in React, TypeScript, Next.js, Python, Node.js, PostgreSQL or MongoDB, and hands-on experience with Docker, Kubernetes, and Azure/AWS/GCP.</p>\n<p>We&#39;re looking for a self-starting approach with technical maturity to navigate ambiguous requirements and deliver reliable software. You&#39;ll also need to drive async communication methodologies to reduce communication frictions.</p>\n<p>If you&#39;re ready to shape the future of AI in the public sector and be a founding member of our team, we&#39;d love to hear from you.</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_44975b06-cb1","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Scale","sameAs":"https://scale.com/","logo":"https://logos.yubhub.co/scale.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/scaleai/jobs/4673310005","x-work-arrangement":"remote","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":null,"x-skills-required":["React","TypeScript","Next.js","Python","Node.js","PostgreSQL","MongoDB","Docker","Kubernetes","Azure","AWS","GCP"],"x-skills-preferred":[],"datePosted":"2026-04-18T15:59:59.289Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"London, UK"}},"jobLocationType":"TELECOMMUTE","employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"React, TypeScript, Next.js, Python, Node.js, PostgreSQL, MongoDB, Docker, Kubernetes, Azure, AWS, GCP"},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_bd00b53a-6fa"},"title":"Software Engineer, Enterprise AI","description":"<p>We are seeking a strong engineer to join our team and help us build and scale our product in a fast-paced environment. The ideal candidate will have a strong understanding of software engineering principles and practices, as well as experience with large-scale distributed systems.</p>\n<p>You will be responsible for owning large new areas within our product, working across backend, frontend, and interacting with LLMs and ML models. You will solve hard engineering problems in scalability and reliability.</p>\n<p>Key responsibilities include:</p>\n<ul>\n<li>Owning large new areas within our product</li>\n<li>Working across backend, frontend, and interacting with LLMs and ML models</li>\n<li>Delivering experiments at a high velocity and level of quality to engage our customers</li>\n<li>Working across the entire product lifecycle from conceptualization through production</li>\n</ul>\n<p>Ideally, you&#39;d have:</p>\n<ul>\n<li>4+ years of full-time engineering experience, post-graduation</li>\n<li>Experience scaling products at hyper growth startups</li>\n<li>Experience tinkering with or productizing LLMs, vector databases, and the other latest AI technologies</li>\n<li>Proficient in Python or Javascript/Typescript, and SQL</li>\n<li>Experience with Kubernetes</li>\n<li>Experience with major cloud providers (AWS, Azure, GCP)</li>\n</ul>\n<p>Compensation packages at Scale for eligible roles include base salary, equity, and benefits. The range displayed on each job posting reflects the minimum and maximum target for new hire salaries for the position, determined by work location and additional factors, including job-related skills, experience, interview performance, and relevant education or training.</p>\n<p>You’ll also receive benefits including, but not limited to: Comprehensive health, dental and vision coverage, retirement benefits, a learning and development stipend, and generous PTO. Additionally, this role may be eligible for additional benefits such as a commuter stipend.</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_bd00b53a-6fa","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Scale","sameAs":"https://scale.com","logo":"https://logos.yubhub.co/scale.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/scaleai/jobs/4513943005","x-work-arrangement":"onsite","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":"$179,400-$224,250 USD","x-skills-required":["Python","Javascript/Typescript","SQL","Kubernetes","AWS","Azure","GCP"],"x-skills-preferred":["LLMs","vector databases","AI technologies"],"datePosted":"2026-04-18T15:59:58.329Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"New York, NY; San Francisco, CA"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"Python, Javascript/Typescript, SQL, Kubernetes, AWS, Azure, GCP, LLMs, vector databases, AI technologies","baseSalary":{"@type":"MonetaryAmount","currency":"USD","value":{"@type":"QuantitativeValue","minValue":179400,"maxValue":224250,"unitText":"YEAR"}}},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_45fc6ed2-285"},"title":"Senior Full-Stack Software Engineer, (Forward Deployed), GPS","description":"<p>We&#39;re seeking a Senior Full-Stack Software Engineer to join our Global Public Sector team. As a forward-deployed engineer, you&#39;ll collaborate directly with public sector counterparts to build full-stack AI applications that solve their most pressing challenges.</p>\n<p>Our core work consists of creating custom AI applications, generating high-quality training data for custom LLMs, and upskilling and advisory services to spread the impact of AI.</p>\n<p>You&#39;ll partner with public sector clients to scope, collect feedback, and implement solutions for complex problems. You&#39;ll also architect production-grade applications that integrate AI models with full-stack frameworks, manage infrastructure within cloud environments, and contribute to core platform features.</p>\n<p>Ideally, you&#39;ll have a Bachelor&#39;s degree in Computer Science or a related quantitative field, 5+ years of full-stack engineering experience, and proficiency in React, TypeScript, Next.js, Python, Node.js, PostgreSQL or MongoDB, Docker, Kubernetes, and Azure/AWS/GCP.</p>\n<p>You&#39;ll be a self-starting individual with technical maturity to navigate ambiguous requirements and deliver reliable software. You&#39;ll also have experience working directly within customer infrastructure to deploy, maintain, and troubleshoot complex, end-to-end solutions.</p>\n<p>Nice to have: proficient in Arabic, past experience working in a forward-deployed engineer/dedicated customer engineer role, experience working cross-functionally with operations, and experience building solutions with LLMs and a deep understanding of the overall Gen AI landscape.</p>\n<p>Please note that our policy requires a 90-day waiting period before reconsidering candidates for the same role.</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_45fc6ed2-285","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Scale","sameAs":"https://scale.com/","logo":"https://logos.yubhub.co/scale.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/scaleai/jobs/4676606005","x-work-arrangement":"onsite","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":null,"x-skills-required":["React","TypeScript","Next.js","Python","Node.js","PostgreSQL","MongoDB","Docker","Kubernetes","Azure","AWS","GCP"],"x-skills-preferred":[],"datePosted":"2026-04-18T15:59:52.395Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Doha, Qatar"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"React, TypeScript, Next.js, Python, Node.js, PostgreSQL, MongoDB, Docker, Kubernetes, Azure, AWS, GCP"},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_14499a71-fa9"},"title":"Software Engineer, Enterprise","description":"<p>At Scale AI, we&#39;re pioneering the next era of enterprise AI. As businesses race to harness the power of Generative AI, Scale is at the forefront, delivering cutting-edge solutions that transform workflows, automate complex processes, and drive unparalleled efficiency for the largest enterprises.</p>\n<p>We&#39;re looking for a Backend Engineer to help bring large-scale GenAI systems to production. In this role, you&#39;ll build the core infrastructure that powers AI products for some of the world&#39;s largest enterprises,designing scalable APIs, distributed data systems, and robust deployment pipelines that enable production-grade reliability and performance.</p>\n<p>This is a rare opportunity to be at the center of the GenAI revolution, solving hard backend and infrastructure challenges that make AI truly work at enterprise scale. If you&#39;re excited about shaping how AI systems are deployed and scaled in the real world, we want to hear from you.</p>\n<p>At Scale, we don&#39;t just follow AI advancements , we lead them. Backed by deep expertise in data, infrastructure, and model deployment, we are uniquely positioned to solve the hardest problems in AI adoption. Join us in shaping the future of enterprise AI, where your work will directly impact how businesses operate, innovate, and grow in the age of GenAI.</p>\n<p><strong>Responsibilities</strong></p>\n<ul>\n<li>Design, build, and scale backend systems that power enterprise GenAI products, focusing on reliability, performance, and deployment across both Scale&#39;s and customers&#39; infrastructure.</li>\n</ul>\n<ul>\n<li>Develop core services and APIs that integrate AI models and enterprise data sources securely and efficiently, enabling production-scale AI adoption.</li>\n</ul>\n<ul>\n<li>Architect scalable distributed systems for data processing, inference, and orchestration of large-scale GenAI workloads.</li>\n</ul>\n<ul>\n<li>Optimize backend performance for latency, throughput, and cost,ensuring AI applications can operate at enterprise scale across hybrid and multi-cloud environments.</li>\n</ul>\n<ul>\n<li>Manage and evolve cloud infrastructure (AWS, Azure, or GCP), driving automation, observability, and security for large-scale AI deployments.</li>\n</ul>\n<ul>\n<li>Collaborate with ML and product teams to bring cutting-edge GenAI models into production through efficient APIs, model serving systems, and evaluation frameworks.</li>\n</ul>\n<ul>\n<li>Continuously improve reliability and scalability, applying strong engineering practices to make AI systems robust, maintainable, and enterprise-ready.</li>\n</ul>\n<p><strong>Ideal Candidate</strong></p>\n<ul>\n<li>4+ years of experience developing large-scale backend or infrastructure systems, with a strong emphasis on distributed services, reliability, and scalability.</li>\n</ul>\n<ul>\n<li>Proficiency in Python or TypeScript, with experience designing high-performance APIs and backend architectures using frameworks such as FastAPI, Flask, Express, or NestJS.</li>\n</ul>\n<ul>\n<li>Deep familiarity with cloud infrastructure (AWS and Azure preferred), including container orchestration (Kubernetes, Docker) and Infrastructure-as-Code tools like Terraform.</li>\n</ul>\n<ul>\n<li>Experience managing data systems such as relational and NoSQL databases (PostgreSQL, DynamoDB, etc.) and building pipelines for data-intensive applications.</li>\n</ul>\n<ul>\n<li>Hands-on experience with GenAI applications, model integration, or AI agent systems,understanding how to deploy, evaluate, and scale AI workloads in production.</li>\n</ul>\n<ul>\n<li>Strong understanding of observability, CI/CD, and security best practices for running services in enterprise or multi-tenant environments.</li>\n</ul>\n<ul>\n<li>Ability to balance rapid iteration with production-grade quality, shipping reliable backend systems in fast-paced environments.</li>\n</ul>\n<p>Collaborative mindset, working closely with ML, infra, and product teams to bring complex GenAI systems into production at enterprise scale.</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_14499a71-fa9","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Scale AI","sameAs":"https://scale.com/","logo":"https://logos.yubhub.co/scale.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/scaleai/jobs/4536653005","x-work-arrangement":"hybrid","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":null,"x-skills-required":["Python","TypeScript","FastAPI","Flask","Express","NestJS","AWS","Azure","Kubernetes","Docker","Terraform","PostgreSQL","DynamoDB","GenAI","Model Integration","AI Agent Systems"],"x-skills-preferred":[],"datePosted":"2026-04-18T15:59:48.948Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"London, UK"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"Python, TypeScript, FastAPI, Flask, Express, NestJS, AWS, Azure, Kubernetes, Docker, Terraform, PostgreSQL, DynamoDB, GenAI, Model Integration, AI Agent Systems"},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_6cae1ee9-b93"},"title":"Senior Technical Solutions Engineer (Platform)","description":"<p>As a Senior Technical Solutions Engineer, you will provide technical support for Databricks Platform related issues and resolve any challenges involving the Databricks unified analytics platform.</p>\n<p>You will assist customers in their Databricks journey and provide them with the guidance and knowledge that they need to accomplish value and achieve their strategic goals using our products.</p>\n<p>They will look to you for answers to everything from basic technical questions to complex architectural scenarios spanning across the entire Big Data ecosystem.</p>\n<p>Responsibilities:</p>\n<ul>\n<li>Troubleshoot and resolve complex customer issues related to Databricks platform</li>\n<li>Provide best practices support for custom-built solutions developed by Databricks customers</li>\n<li>Deliver suggestions for improving performance in customer-specific environments</li>\n<li>Assist with issues around third-party integrations with Databricks environment</li>\n<li>Demonstrate and coordinate with engineering and escalation teams to achieve resolution of customer issues and requests</li>\n<li>Participate in the creation and maintenance of company documentation and knowledge articles</li>\n<li>Be a true proponent of customer advocacy</li>\n<li>Strengthen your AWS/Azure and Databricks platform expertise through learning and internal training programs</li>\n<li>Participate in weekend and weekday on call rotation</li>\n</ul>\n<p>Requirements:</p>\n<ul>\n<li>4+ years experience designing, building, testing, and maintaining Python/Java/Scala based applications</li>\n<li>Expert level knowledge in python is desired</li>\n<li>Strong experience with SQL-based database is required</li>\n<li>Linux/Unix administration skills</li>\n<li>Hands-on experience with AWS, Azure or GCP</li>\n<li>Experience with &quot;Distributed Big Data Computing&quot; environment</li>\n<li>Technical degree or the equivalent experience</li>\n<li>Written and spoken proficiency in both Japanese and English</li>\n</ul>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_6cae1ee9-b93","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Databricks","sameAs":"https://databricks.com/","logo":"https://logos.yubhub.co/databricks.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/databricks/jobs/8488552002","x-work-arrangement":"onsite","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":null,"x-skills-required":["Python","Java","Scala","SQL","Linux/Unix","AWS","Azure","GCP","Distributed Big Data Computing"],"x-skills-preferred":[],"datePosted":"2026-04-18T15:59:28.244Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Tokyo, Japan"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"Python, Java, Scala, SQL, Linux/Unix, AWS, Azure, GCP, Distributed Big Data Computing"},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_88ec8f26-4c9"},"title":"Senior IT Systems Engineer","description":"<p>We&#39;re seeking a strategic thinker and proven problem-solver with deep expertise in modern IT ecosystems. As a Sr. IT Systems Engineer, you&#39;ll lead the design, implementation, administration, and optimization of core SaaS platforms, including Okta, Google Workspace, Slack, Atlassian, and other IT tools. You&#39;ll own end-to-end support, monitoring, troubleshooting, and performance tuning of applications, systems, and their complex interconnections,ensuring high availability, security, and seamless user experience.</p>\n<p>Key responsibilities include:</p>\n<ul>\n<li>Designing and implementing SaaS platforms and IT tools</li>\n<li>Providing technical guidance to support business expansion, system scalability, and infrastructure maturity</li>\n<li>Identifying gaps, risks, and opportunities in the environment and leading initiatives to enhance security posture, operational efficiency, and resilience</li>\n<li>Evaluating emerging technologies, IAM trends, and automation platforms and developing business cases and adoption recommendations</li>\n<li>Mentoring junior engineers and collaborating with cross-functional teams to align IT capabilities with organizational goals</li>\n</ul>\n<p>Basic qualifications include 8+ years of hands-on experience administering and optimizing a broad portfolio of SaaS applications in a hybrid and high-growth environment, with advanced proficiency in our core stack: Okta (including Advanced Server Access &amp; Workflows), Google Workspace, Slack Enterprise, Atlassian, etc.</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_88ec8f26-4c9","directApply":true,"hiringOrganization":{"@type":"Organization","name":"xAI","sameAs":"https://www.xai.com","logo":"https://logos.yubhub.co/xai.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/xai/jobs/5071895007","x-work-arrangement":"onsite","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":"$184,000 - $276,000 USD","x-skills-required":["Okta","Google Workspace","Slack","Atlassian","IAM principles and protocols","APIs for custom integrations","Scripting and automation for monitoring, alerting, and operational efficiency","Azure","AWS","GCP cloud platforms"],"x-skills-preferred":["n8n","Okta Workflows","Workato","Zapier","BetterCloud","custom integrations"],"datePosted":"2026-04-18T15:58:57.233Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Palo Alto, CA"}},"employmentType":"FULL_TIME","occupationalCategory":"IT","industry":"Technology","skills":"Okta, Google Workspace, Slack, Atlassian, IAM principles and protocols, APIs for custom integrations, Scripting and automation for monitoring, alerting, and operational efficiency, Azure, AWS, GCP cloud platforms, n8n, Okta Workflows, Workato, Zapier, BetterCloud, custom integrations","baseSalary":{"@type":"MonetaryAmount","currency":"USD","value":{"@type":"QuantitativeValue","minValue":184000,"maxValue":276000,"unitText":"YEAR"}}},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_e6c2906a-625"},"title":"Senior Software Engineer,  Full-Stack – Scale GP","description":"<p>We are seeking a strong Senior Full-Stack Engineer to help us build, scale, and refine our rapidly growing Generative AI platform, Scale GP. As a senior engineer, you will work across the stack,from React/TypeScript frontends to Python-based backends,while integrating with LLMs and machine learning systems. You will solve complex challenges in scalability, reliability, and product experience while owning significant product areas in a fast-paced environment.</p>\n<p><strong>Responsibilities</strong></p>\n<ul>\n<li>Own major full-stack product areas, driving features from design through production deployment.</li>\n<li>Build modern frontend experiences using React and TypeScript, ensuring performance, usability, and responsiveness.</li>\n<li>Develop reliable backend services in Python, working with distributed systems, data pipelines, and ML/LLM components.</li>\n<li>Integrate with LLMs, vector databases, and AI infrastructure to power intelligent product experiences.</li>\n<li>Deliver experiments and new features quickly, maintaining high quality and tight feedback loops with customers.</li>\n<li>Collaborate across product, ML, and infrastructure teams to shape the direction of Scale GP.</li>\n<li>Adapt quickly,learning new technologies, frameworks, and tools as needed across the stack.</li>\n</ul>\n<p><strong>Ideal Experience</strong></p>\n<ul>\n<li>5+ years of full-time engineering experience, post-graduation.</li>\n<li>Strong experience developing full-stack applications using React, TypeScript, and Python.</li>\n<li>Experience scaling or shipping products at high-growth startups.</li>\n<li>Familiarity with LLMs, vector databases, embeddings, or other modern AI tooling (tinkering or production experience welcome).</li>\n<li>Proficiency with SQL and modern API development.</li>\n<li>Experience with Kubernetes, containerization, and microservice architectures.</li>\n<li>Experience working with at least one major cloud provider (AWS, GCP, or Azure).</li>\n</ul>\n<p>Compensation packages at Scale for eligible roles include base salary, equity, and benefits. The range displayed on each job posting reflects the minimum and maximum target for new hire salaries for the position, determined by work location and additional factors, including job-related skills, experience, interview performance, and relevant education or training. Scale employees in eligible roles are also granted equity based compensation, subject to Board of Director approval. Your recruiter can share more about the specific salary range for your preferred location during the hiring process, and confirm whether the hired role will be eligible for equity grant. You’ll also receive benefits including, but not limited to: Comprehensive health, dental and vision coverage, retirement benefits, a learning and development stipend, and generous PTO. Additionally, this role may be eligible for additional benefits such as a commuter stipend.</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_e6c2906a-625","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Scale","sameAs":"https://scale.com/","logo":"https://logos.yubhub.co/scale.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/scaleai/jobs/4637484005","x-work-arrangement":"hybrid","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":"$216,000-$270,000 USD","x-skills-required":["React","TypeScript","Python","LLMs","vector databases","embeddings","SQL","API development","Kubernetes","containerization","microservice architectures","cloud providers (AWS, GCP, or Azure)"],"x-skills-preferred":[],"datePosted":"2026-04-18T15:58:56.168Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"San Francisco, CA; New York, NY"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"React, TypeScript, Python, LLMs, vector databases, embeddings, SQL, API development, Kubernetes, containerization, microservice architectures, cloud providers (AWS, GCP, or Azure)","baseSalary":{"@type":"MonetaryAmount","currency":"USD","value":{"@type":"QuantitativeValue","minValue":216000,"maxValue":270000,"unitText":"YEAR"}}},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_f2bc1be2-478"},"title":"Senior Technical Solutions Engineer, Platform","description":"<p>As a Senior Technical Solutions Engineer, you will provide technical support for Databricks Platform related issues and resolve any challenges involving the Databricks unified analytics platform.</p>\n<p>You will assist customers in their Databricks journey and provide them with the guidance and knowledge that they need to accomplish value and achieve their strategic goals using our products.</p>\n<p>They will look to you for answers to everything from basic technical questions to complex architectural scenarios spanning across the entire Big Data ecosystem.</p>\n<p>You will report to the Senior Manager of Technical Solutions.</p>\n<p>Key responsibilities include: Troubleshooting and resolving complex customer issues related to Databricks platform Providing best practices support for custom-built solutions developed by Databricks customers Delivering suggestions for improving performance in customer-specific environments Assisting with issues around third-party integrations with Databricks environment Demonstrating and coordinating with engineering and escalation teams to achieve resolution of customer issues and requests Participating in the creation and maintenance of company documentation and knowledge articles Being a true proponent of customer advocacy Strengthening your AWS/Azure and Databricks platform expertise through learning and internal training programs Participating in weekend and weekday on call rotation</p>\n<p>Requirements include: Minimum 4 years experience designing, building, testing, and maintaining Python/Java/Scala based applications Expert level knowledge in python is desired Solid experience with SQL-based database is required Linux/Unix administration skills Hands-on experience with AWS, Azure or GCP Candidate must possess excellent English written and oral communication skills Experience with &quot;Distributed Big Data Computing&quot; environment Technical degree or the equivalent experience</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_f2bc1be2-478","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Databricks","sameAs":"https://databricks.com","logo":"https://logos.yubhub.co/databricks.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/databricks/jobs/7902994002","x-work-arrangement":"hybrid","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":null,"x-skills-required":["Python","Java","Scala","SQL","Linux/Unix administration","AWS","Azure","GCP","Distributed Big Data Computing"],"x-skills-preferred":[],"datePosted":"2026-04-18T15:58:52.913Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Costa Rica"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"Python, Java, Scala, SQL, Linux/Unix administration, AWS, Azure, GCP, Distributed Big Data Computing"},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_04c1ff49-2d1"},"title":"Data Platform Solutions Architect (Professional Services)","description":"<p>We&#39;re hiring for multiple roles within our Professional Services team. As a Data Platform Solutions Architect, you will work with clients on short to medium-term customer engagements on their big data challenges using the Databricks platform. You will provide data engineering, data science, and cloud technology projects which require integrating with client systems, training, and other technical tasks to help customers to get most value out of their data.</p>\n<p>The impact you will have:</p>\n<ul>\n<li>Work on a variety of impactful customer technical projects which may include designing and building reference architectures, creating how-to&#39;s and productionalizing customer use cases</li>\n<li>Work with engagement managers to scope variety of professional services work with input from the customer</li>\n<li>Guide strategic customers as they implement transformational big data projects, 3rd party migrations, including end-to-end design, build and deployment of industry-leading big data and AI applications</li>\n<li>Consult on architecture and design; bootstrap or implement customer projects which leads to a customers&#39; successful understanding, evaluation and adoption of Databricks.</li>\n<li>Provide an escalated level of support for customer operational issues.</li>\n</ul>\n<p>What we look for:</p>\n<ul>\n<li>Extensive experience in data engineering, data platforms &amp; analytics</li>\n<li>Comfortable writing code in either Python or Scala</li>\n<li>Working knowledge of two or more common Cloud ecosystems (AWS, Azure, GCP) with expertise in at least one</li>\n<li>Deep experience with distributed computing with Apache Spark™ and knowledge of Spark runtime internals</li>\n<li>Familiarity with CI/CD for production deployments</li>\n<li>Working knowledge of MLOps</li>\n<li>Design and deployment of performant end-to-end data architectures</li>\n<li>Experience with technical project delivery - managing scope and timelines.</li>\n<li>Documentation and white-boarding skills.</li>\n<li>Experience working with clients and managing conflicts.</li>\n<li>Build skills in technical areas which support the deployment and integration of Databricks-based solutions to complete customer projects.</li>\n<li>Travel to customers 10% of the time</li>\n</ul>\n<p>[Preferred] Databricks Certification but not essential</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_04c1ff49-2d1","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Databricks","sameAs":"https://databricks.com","logo":"https://logos.yubhub.co/databricks.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/databricks/jobs/8396801002","x-work-arrangement":"hybrid","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":null,"x-skills-required":["data engineering","data platforms & analytics","Python","Scala","Cloud ecosystems (AWS, Azure, GCP)","Apache Spark","CI/CD for production deployments","MLOps","technical project delivery","documentation and white-boarding skills"],"x-skills-preferred":["Databricks Certification"],"datePosted":"2026-04-18T15:58:52.546Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"London, United Kingdom"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"data engineering, data platforms & analytics, Python, Scala, Cloud ecosystems (AWS, Azure, GCP), Apache Spark, CI/CD for production deployments, MLOps, technical project delivery, documentation and white-boarding skills, Databricks Certification"},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_f86a39bf-9a5"},"title":"Solutions Architect - Digital Native Business, Strategic","description":"<p>As a Solutions Architect on the Digital Natives team, you will work with leading data engineering, data science, and ML teams to push the boundaries of what big data architectures are capable of.</p>\n<p>Reporting to the Field Engineering Manager, you will collaborate with strategic customers, product teams, and the broader customer-facing team to develop architectures and solutions using our platform and APIs.</p>\n<p>You will guide customers through the competitive landscape, best practices, and implementation; and develop technical champions along the way.</p>\n<p>We are looking for high technical aptitude individuals with a deep sense of ownership and a desire to help customers ship solutions at production scale.</p>\n<p>Ideal candidates are deeply curious, capable of operating with confidence in ambiguous situations, and are extremely adaptable.</p>\n<p>The impact you will have:</p>\n<ul>\n<li>Partner with the sales team and provide technical leadership to help customers understand how Databricks can help solve their business problems.</li>\n</ul>\n<ul>\n<li>Drive technical discovery and solution design, focusing on winning competitive deals and accelerating time-to-value in strategic accounts.</li>\n</ul>\n<ul>\n<li>Continuously research &amp; learn new technologies and their implementations on Databricks.</li>\n</ul>\n<ul>\n<li>Consult on Big Data architectures, implement proof of concepts for strategic projects, spanning data engineering, data science, and machine learning, and SQL analysis workflows.</li>\n</ul>\n<ul>\n<li>As well as validating integrations with cloud services, home-grown tools, and other 3rd party applications.</li>\n</ul>\n<ul>\n<li>Collaborate with your fellow Solutions Architects, using your skills to support each other and our customers.</li>\n</ul>\n<ul>\n<li>Become an expert in, promote, and recruit contributors for Databricks-inspired open-source projects (Spark, Delta Lake, and MLflow) across the developer community.</li>\n</ul>\n<ul>\n<li>Work closely with account executives to create and execute account penetration strategies, focusing on winning technical decision-makers and building new customer champions.</li>\n</ul>\n<ul>\n<li>Build trusted advisor relationships with senior and executive stakeholders by articulating the business value of Databricks in clear, outcomes-driven terms.</li>\n</ul>\n<p>What we look for:</p>\n<ul>\n<li>5+ years in a data engineering, data science, technical architecture, or similar pre-sales/consulting role.</li>\n</ul>\n<ul>\n<li>Experience building distributed data systems.</li>\n</ul>\n<ul>\n<li>Comfortable programming in, and debugging, Python and SQL.</li>\n</ul>\n<ul>\n<li>Have built solutions with public cloud providers such as AWS, Azure, or GCP.</li>\n</ul>\n<ul>\n<li>Expertise in one of the following:</li>\n</ul>\n<ul>\n<li>Data Engineering technologies (Ex: Spark, Hadoop, Kafka)</li>\n</ul>\n<ul>\n<li>Data Science and Machine Learning technologies (Ex: pandas, scikit-learn, pytorch, Tensorflow)</li>\n</ul>\n<ul>\n<li>Strong executive presence with the ability to influence C/VP-level stakeholders and align technical solutions to strategic business priorities.</li>\n</ul>\n<ul>\n<li>Available to travel to customers in your region.</li>\n</ul>\n<ul>\n<li>[Desired] Degree in a quantitative discipline (Computer Science, Applied Mathematics, Operations Research).</li>\n</ul>\n<ul>\n<li>Nice to have: Databricks Certification.</li>\n</ul>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_f86a39bf-9a5","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Databricks","sameAs":"https://databricks.com","logo":"https://logos.yubhub.co/databricks.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/databricks/jobs/8434467002","x-work-arrangement":"remote","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":"$180,000-$247,500 USD","x-skills-required":["Data Engineering technologies","Data Science and Machine Learning technologies","Python","SQL","Cloud providers (AWS, Azure, GCP)"],"x-skills-preferred":[],"datePosted":"2026-04-18T15:58:42.812Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Remote - California; Remote - Colorado; Remote - Oregon; Remote - Washington"}},"jobLocationType":"TELECOMMUTE","employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"Data Engineering technologies, Data Science and Machine Learning technologies, Python, SQL, Cloud providers (AWS, Azure, GCP)","baseSalary":{"@type":"MonetaryAmount","currency":"USD","value":{"@type":"QuantitativeValue","minValue":180000,"maxValue":247500,"unitText":"YEAR"}}},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_5b244f27-9fd"},"title":"Resident Solutions Architect - Communications, Media, Entertainment & Games","description":"<p>As a Resident Solutions Architect in our Professional Services team, you will work with clients on short to medium term customer engagements on their big data challenges using the Databricks platform. You will provide data engineering, data science, and cloud technology projects which require integrating with client systems, training, and other technical tasks to help customers to get most value out of their data.</p>\n<p>You will work on a variety of impactful customer technical projects which may include designing and building reference architectures, creating how-to&#39;s and productionalizing customer use cases. You will work with engagement managers to scope variety of professional services work with input from the customer.</p>\n<p>Guide strategic customers as they implement transformational big data projects, 3rd party migrations, including end-to-end design, build and deployment of industry-leading big data and AI applications. Consult on architecture and design; bootstrap or implement customer projects which leads to a customers&#39; successful understanding, evaluation and adoption of Databricks.</p>\n<p>Provide an escalated level of support for customer operational issues. You will work with the Databricks technical team, Project Manager, Architect and Customer team to ensure the technical components of the engagement are delivered to meet customer&#39;s needs.</p>\n<p>Work with Engineering and Databricks Customer Support to provide product and implementation feedback and to guide rapid resolution for engagement specific product and support issues.</p>\n<p>The ideal candidate will have 6+ years experience in data engineering, data platforms &amp; analytics, comfortable writing code in either Python or Scala, working knowledge of two or more common Cloud ecosystems (AWS, Azure, GCP) with expertise in at least one, deep experience with distributed computing with Apache Spark™ and knowledge of Spark runtime internals, familiarity with CI/CD for production deployments, working knowledge of MLOps, design and deployment of performant end-to-end data architectures, experience with technical project delivery - managing scope and timelines, documentation and white-boarding skills, experience working with clients and managing conflicts, build skills in technical areas which support the deployment and integration of Databricks-based solutions to complete customer projects.</p>\n<p>Travel to customers 20% of the time.</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_5b244f27-9fd","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Databricks","sameAs":"https://databricks.com","logo":"https://logos.yubhub.co/databricks.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/databricks/jobs/8461258002","x-work-arrangement":"hybrid","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":"$180,656-$248,360 USD","x-skills-required":["data engineering","data platforms & analytics","Python","Scala","Cloud ecosystems (AWS, Azure, GCP)","Apache Spark","CI/CD for production deployments","MLOps","end-to-end data architectures","technical project delivery","documentation and white-boarding skills","client management"],"x-skills-preferred":[],"datePosted":"2026-04-18T15:58:34.588Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Raleigh, North Carolina"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"data engineering, data platforms & analytics, Python, Scala, Cloud ecosystems (AWS, Azure, GCP), Apache Spark, CI/CD for production deployments, MLOps, end-to-end data architectures, technical project delivery, documentation and white-boarding skills, client management","baseSalary":{"@type":"MonetaryAmount","currency":"USD","value":{"@type":"QuantitativeValue","minValue":180656,"maxValue":248360,"unitText":"YEAR"}}},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_4daeb1d2-f04"},"title":"Senior Software Engineer - Fullstack","description":"<p>We are seeking a senior software engineer to join our team in Vancouver. As a fullstack software engineer, you will work with your team and product management to make insights from data simple. You&#39;ll set the foundation for how we build robust, scalable, and delightful products.</p>\n<p>Our customers increasingly use Databricks to analyze petabyte-scale logs in real time. This creates new challenges across the entire data processing pipeline, including ingestion, indexing, processing, and the user experience itself. Our customers are also using Databricks to launch AI/BI, which is redefining Business Intelligence for the AI age. We have several open roles across the teams below:</p>\n<ul>\n<li>Log Analytics: Our customers increasingly use Databricks to analyze petabyte-scale logs in real time.</li>\n<li>AI/BI: AI/BI is redefining Business Intelligence for the AI age.</li>\n<li>Unity Catalog Business Semantics: Context is everything for AI. For enterprise data, that context needs to be governed and managed, which is what Unity Catalog Business Semantics offers.</li>\n<li>Databricks Apps: Databricks Apps is one of the fastest growing products at Databricks, used by more than 2,500 customers who have created more than 20,000 apps.</li>\n</ul>\n<p>What we look for:</p>\n<ul>\n<li>5+ years of experience with HTML, CSS, and JavaScript.</li>\n<li>Passion for user experience and design and a deep understanding of front-end architecture.</li>\n<li>Comfortable working towards a multi-year vision with incremental deliverables.</li>\n<li>Motivated by delivering customer value.</li>\n<li>Experience with modern JavaScript frameworks (e.g., React, Angular, or VueJs/Ember).</li>\n<li>5+ years of experience with server-side web technologies (eg: Node.js, Java, Python, Scala, C#, C++,Go).</li>\n<li>Good knowledge of SQL.</li>\n<li>Experience with cloud technologies, e.g. AWS, Azure, GCP, Docker, or Kubernetes.</li>\n<li>Experience developing large-scale distributed systems.</li>\n</ul>\n<p>Pay Range Transparency Databricks is committed to fair and equitable compensation practices. The pay range for this role is listed below and represents the expected salary range for non-commissionable roles or on-target earnings for commissionable roles. Actual compensation packages are based on several factors that are unique to each candidate, including but not limited to job-related skills, depth of experience, relevant certifications and training, and specific work location. Based on the factors above, Databricks anticipates utilizing the full width of the range. The total compensation package for this position may also include eligibility for annual performance bonus, equity, and the benefits listed above. Canada Pay Range $146,200-$201,100 CAD</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_4daeb1d2-f04","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Databricks","sameAs":"https://databricks.com","logo":"https://logos.yubhub.co/databricks.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/databricks/jobs/8099342002","x-work-arrangement":"onsite","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":"$146,200-$201,100 CAD","x-skills-required":["HTML","CSS","JavaScript","Node.js","Java","Python","Scala","C#","C++","Go","SQL","AWS","Azure","GCP","Docker","Kubernetes"],"x-skills-preferred":[],"datePosted":"2026-04-18T15:58:30.534Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Vancouver, Canada"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"HTML, CSS, JavaScript, Node.js, Java, Python, Scala, C#, C++, Go, SQL, AWS, Azure, GCP, Docker, Kubernetes","baseSalary":{"@type":"MonetaryAmount","currency":"USD","value":{"@type":"QuantitativeValue","minValue":146200,"maxValue":201100,"unitText":"YEAR"}}},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_262aa1cb-01c"},"title":"Head of Corporate Engineering","description":"<p>As Head of Corporate Engineering, you will be responsible for Enterprise engineering and operations globally. You will be responsible for building and managing a highly technical enterprise engineering team, developing first principled-based strategies, and enabling strong enterprise security.</p>\n<p>Key responsibilities include engineering, securing and optimizing cloud infrastructure, Identity and Access Management, Endpoints, Collaboration tools, and ensuring compliance with SOX, PCI DSS, and FedRAMP compliance. The Head of Corporate Engineering will work closely with R&amp;D on managing engineering tools like Jira, Confluence, and GitHub, driving efficient adoption and integration.</p>\n<p>Strong technical and influencing leadership principles coupled with the ability to manage a complex, scaling, and fast-moving enterprise environment are essential. This role reports directly to the Vice President, Infrastructure and Operations</p>\n<p>Responsibilities:</p>\n<p>In this influential role, you will be responsible for:</p>\n<p>Securing the Enterprise: Working closely with Enterprise Security organization to harden and secure our cloud environments, secret management, collaboration tools, endpoints, SaaS environments, IAM tools, and more. Success measured in continuous improvement of our enterprise security hardening standards</p>\n<p>Building and Scaling our Cloud Infrastructure: Your team will be responsible for establishing and implementing enterprise cloud infrastructure including establishing Infrastructure Provisioning, SRE services, 24/7 on-call support, Infra as Code, observability, and more. In addition, you will be responsible for managing cloud budgets, vendor management, and establishing cost optimization initiatives. Success is measured in increased developer velocity while securing &amp; scaling the cloud infrastructure</p>\n<p>Engineering Tooling: Partner closely with R&amp;D teams to establish policies, configurations, run-books, SLAs, hardening, scalability and availability of engineering tools like Github, Jira, Atlassian, and more</p>\n<p>Endpoint Engineering: Enable extreme automation for endpoint management with zero-touch deployment, observability (synthetic and real-time), provisioning/de-provisioning, and establishing standards / SLAs. Enforce security policies, configure &amp; manage security settings and ensure compliance across all endpoints and mobile devices. Success is measured in terms of end-user satisfaction and % of manual touch</p>\n<p>Collaboration Management: Ensure we provide world class tools to our employees to be extremely productive and collaborative. This would include but not be limited to managing and scaling internal workplace products like Gmail, Slack, Atlassian, Moveworks, Glean, and more. Success is measured by user satisfaction</p>\n<p>Identity &amp; Access Management: Manage the IAM team from IAM implementation, access standards enforcement, SLA management, and compliance to various standards like FedRAMP, IL5, PCI, and more. Included are both internal and external identity providers to be managed. Success is measured by compliance, Identity governance, and availability</p>\n<p>Desired Success Outcomes</p>\n<p>A high-performing enterprise engineering team capable of handling complex technical projects with agility and high quality</p>\n<p>Well defined cloud strategy ensuring the stability, scalability, and security of cloud infrastructure. Overhaul of current processes and workflows to address inefficiencies and increase team velocity</p>\n<p>Robust endpoint security with Implementation of comprehensive security measures for all endpoints, including Mac, Windows, and mobile devices</p>\n<p>Deliver high-quality employee experience with productivity tools (Gmail, Slack, Atlassian tools, Moveworks, GitHub) with a robust forward-looking roadmap</p>\n<p>Efficient operational support for Tier 3 IT services with minimized production incidents. Implementation of robust incident and change management processes with mature operational practice</p>\n<p>Efficient and mature processes for system integrations related to Mergers and Acquisitions (M&amp;As), ensuring timely smooth transitions during M&amp;A integrations</p>\n<p>Development and implementation of automation tools and frameworks, Identification of automation opportunities to reduce manual toil and improve accuracy</p>\n<p>Qualifications:</p>\n<p>10 years of experience managing Cloud infrastructure at large enterprises. Extensive experience managing public cloud implementations in AWS. Experience with GCP and Azure will be a plus</p>\n<p>In-depth understanding of Cloud native technologies to lead and guide the team. Must have hands-on experience in troubleshooting and debugging issues in production environments</p>\n<p>Working experience in managing DevOps/SRE practices OKRs (Objective and Key Results), Agile development, Infra-as-code, SRE (Site Reliability Engineering), DevOps measurement such as DORA KPIs,</p>\n<p>In-depth understanding of each collaboration tool&#39;s features, functionalities, and configurations (e.g., Gmail for email, Slack for messaging). Ability to identify and integrate and optimize the use of various tools for seamless collaboration (e.g., connecting Jira with GitHub for Dev metrics)</p>\n<p>Experience leading a team of senior professionals working asynchronously in a remote, distributed team. Strong communication skills, with clear verbal communication and written communication skills</p>\n<p>Collaborative style: partners well with cross-functional teams to solve hard problems and to complete complex deliverables with quality and business outcomes</p>\n<p>Provide mentorship and guidance to team members to ensure that their skills and knowledge are kept up-to-date</p>\n<p>Pay Range Transparency Databricks is committed to fair and equitable compensation practices. The pay range(s) for this role is listed below and represents the expected salary range for non-commissionable roles or on-target earnings for commissionable roles. Actual compensation packages are based on several factors that are unique to each candidate, including but not limited to job-related skills, depth of experience, relevant certifications and training, and specific work location. Based on the factors above, Databricks anticipates utilizing the full width of the range. The total compensation package for this position may also include eligibility for annual performance bonus, equity, and the benefits listed above. For more information regarding which range your location is in visit our page here.</p>\n<p>Zone 1 Pay Range $265,000-$364,300 USD</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_262aa1cb-01c","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Databricks","sameAs":"https://databricks.com","logo":"https://logos.yubhub.co/databricks.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/databricks/jobs/7293607002","x-work-arrangement":"remote","x-experience-level":"executive","x-job-type":"full-time","x-salary-range":"$265,000-$364,300 USD","x-skills-required":["Cloud infrastructure","Identity and Access Management","Endpoint security","Collaboration tools","DevOps","Site Reliability Engineering","Agile development","Infrastructure as Code","Observability","Automation","Scripting languages","Cloud native technologies","Public cloud implementations","AWS","GCP","Azure"],"x-skills-preferred":["Jira","Confluence","GitHub","Atlassian","Moveworks","Glean","Slack","Gmail","Microsoft Office"],"datePosted":"2026-04-18T15:58:26.589Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"San Francisco, California"}},"jobLocationType":"TELECOMMUTE","employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"Cloud infrastructure, Identity and Access Management, Endpoint security, Collaboration tools, DevOps, Site Reliability Engineering, Agile development, Infrastructure as Code, Observability, Automation, Scripting languages, Cloud native technologies, Public cloud implementations, AWS, GCP, Azure, Jira, Confluence, GitHub, Atlassian, Moveworks, Glean, Slack, Gmail, Microsoft Office","baseSalary":{"@type":"MonetaryAmount","currency":"USD","value":{"@type":"QuantitativeValue","minValue":265000,"maxValue":364300,"unitText":"YEAR"}}},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_a38ec886-62e"},"title":"AI Engineer - FDE (Forward Deployed Engineer)","description":"<p>Mission</p>\n<p>The AI Forward Deployed Engineering (AI FDE) team is a highly specialized customer-facing AI team at Databricks. We deliver professional services engagements to help our customers build and productionize first-of-its-kind AI applications.</p>\n<p>We work cross-functionally to shape long-term strategic priorities and initiatives alongside engineering, product, and developer relations, as well as support internal subject matter expert (SME) teams. We view our team as an ensemble: we look for individuals with strong, unique specializations to improve the overall strength of the team.</p>\n<p>This team is the right fit for you if you love working with customers, teammates, and fueling your curiosity for the latest trends in GenAI, LLMOps, and ML more broadly. This role can be remote.</p>\n<p>The impact you will have:</p>\n<ul>\n<li>Develop cutting-edge GenAI solutions, incorporating the latest techniques from our Mosaic AI research to solve customer problems</li>\n</ul>\n<ul>\n<li>Own production rollouts of consumer and internally facing GenAI applications</li>\n</ul>\n<ul>\n<li>Serve as a trusted technical advisor to customers across a variety of domains</li>\n</ul>\n<ul>\n<li>Present at conferences such as Data + AI Summit, recognized as a thought leader internally and externally</li>\n</ul>\n<ul>\n<li>Collaborate cross-functionally with the product and engineering teams to influence priorities and shape the product roadmap</li>\n</ul>\n<p>What we look for:</p>\n<ul>\n<li>Experience building GenAI applications, including RAG, multi-agent systems, Text2SQL, fine-tuning, etc., with tools such as HuggingFace, LangChain, and DSPy</li>\n</ul>\n<ul>\n<li>Minimum of 5+ years of relevant experience as a Data Scientist preferably working in a consulting role</li>\n</ul>\n<ul>\n<li>Expertise in deploying production-grade GenAI applications, including evaluation and optimizations</li>\n</ul>\n<ul>\n<li>Extensive years of hands-on industry data science experience, leveraging common machine learning and data science tools, i.e. pandas, scikit-learn, PyTorch, etc.</li>\n</ul>\n<ul>\n<li>Experience building production-grade machine learning deployments on AWS, Azure, or GCP</li>\n</ul>\n<ul>\n<li>Graduate degree in a quantitative discipline (Computer Science, Engineering, Statistics, Operations Research, etc.) or equivalent practical experience</li>\n</ul>\n<ul>\n<li>Experience communicating and/or teaching technical concepts to non-technical and technical audiences alike</li>\n</ul>\n<ul>\n<li>Passion for collaboration, life-long learning, and driving business value through AI</li>\n</ul>\n<ul>\n<li>Preferred experience using the Databricks Intelligence Platform and Apache Spark to process large-scale distributed datasets</li>\n</ul>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_a38ec886-62e","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Databricks","sameAs":"https://databricks.com","logo":"https://logos.yubhub.co/databricks.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/databricks/jobs/8099751002","x-work-arrangement":"remote","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":null,"x-skills-required":["GenAI","HuggingFace","LangChain","DSPy","pandas","scikit-learn","PyTorch","AWS","Azure","GCP","Apache Spark"],"x-skills-preferred":["Databricks Intelligence Platform","Mosaic AI research"],"datePosted":"2026-04-18T15:58:10.707Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Remote - India"}},"jobLocationType":"TELECOMMUTE","employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"GenAI, HuggingFace, LangChain, DSPy, pandas, scikit-learn, PyTorch, AWS, Azure, GCP, Apache Spark, Databricks Intelligence Platform, Mosaic AI research"},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_ded9d7ff-8aa"},"title":"Senior Engineering Manager, Data Streaming Services (Auth0)","description":"<p>Secure Every Identity, from AI to Human\\n\\nIdentity is the key to unlocking the potential of AI. As a Senior Engineering Manager, Data Streaming Services at Auth0, you will lead the evolution of our streaming data backbone across a multi-cloud footprint. You will oversee multiple engineering teams dedicated to making data streaming seamless, reliable, and high-performance.\\n\\nThis is a &quot;manager of managers&quot; role requiring a blend of strategic foresight, execution rigor, and technical grit. You will set the vision for our streaming services, mentor high-performing teams, and take accountability for our service uptime guarantees.\\n\\n<strong>Key Responsibilities:</strong>\\n\\n<em> Lead a world-class team of teams. Oversee data streaming infrastructure and services that power our global platform across AWS and Azure.\\n</em> Own roadmap and execution. Partner with product and stakeholder teams to define the team&#39;s strategy and prioritized roadmap.\\n<em> Drive engineering excellence. Set high standards of quality, reliability, and operational robustness, championing best practices in software development, from code reviews to observability and incident management.\\n</em> Lead an automation-first culture. Reduce operational friction and ensure infrastructure is self-healing and code-defined. Draw efficiency from AI-assisted development.\\n<em> Act as a technical leader. Lead response on incidents for services under ownership and help teams navigate complex distributed systems failures.\\n\\n<strong>Requirements:</strong>\\n\\n</em> Proven engineering leadership, building and leading teams of teams. Experience coaching Staff+ engineers and engineering managers.\\n<em> Strong technical and architectural acumen. Background in building scalable, distributed systems. Comfortable participating in and guiding technical discussions.\\n</em> Strong project management skills. Expertise in creating technical roadmaps, prioritizing effectively in an agile environment, and managing complex project dependencies.\\n<em> Collaborative leadership style, adapted to remote ways of working. Excellent written and verbal communication skills to build strong relationships with stakeholders and inspire others.\\n\\n<strong>Bonus Points:</strong>\\n\\n</em> Experience developing data-intensive applications in a modern programming language such as go, node.js, or Java.\\n<em> Experience with databases such as PostgreSQL and MongoDB.\\n</em> Experience with distributed streaming platforms like Kafka.\\n<em> Familiarity with concepts in the IAM (Identity and Access Management) domain.\\n</em> Experience with cloud providers (AWS, Azure), container technologies such as Kubernetes and Docker, and observability tools such as Datadog.\\n* Experience building reliable, high-availability platforms for enterprise SaaS applications.</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_ded9d7ff-8aa","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Auth0","sameAs":"https://auth0.com/","logo":"https://logos.yubhub.co/auth0.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/okta/jobs/7719329","x-work-arrangement":"hybrid","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":"$207,000-$284,000 USD","x-skills-required":["engineering leadership","technical and architectural acumen","project management skills","collaborative leadership style","data-intensive applications","databases","distributed streaming platforms","IAM domain","cloud providers","container technologies","observability tools"],"x-skills-preferred":["go","node.js","Java","PostgreSQL","MongoDB","Kafka","AWS","Azure","Kubernetes","Docker","Datadog"],"datePosted":"2026-04-18T15:58:08.018Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Chicago, Illinois; New York, New York; Washington, DC"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"engineering leadership, technical and architectural acumen, project management skills, collaborative leadership style, data-intensive applications, databases, distributed streaming platforms, IAM domain, cloud providers, container technologies, observability tools, go, node.js, Java, PostgreSQL, MongoDB, Kafka, AWS, Azure, Kubernetes, Docker, Datadog","baseSalary":{"@type":"MonetaryAmount","currency":"USD","value":{"@type":"QuantitativeValue","minValue":207000,"maxValue":284000,"unitText":"YEAR"}}},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_eed925a1-b05"},"title":"Sr. Staff/ Staff Backline Technical Solution engineer","description":"<p>At Databricks, we enable data teams to solve the world&#39;s toughest problems by building and running the world&#39;s best data and AI infrastructure platform. As a Backline Technical Solutions Engineer, you will help our customers succeed with the Databricks platform by resolving complex technical customer escalations and working closely with the frontline support team.</p>\n<p>Your responsibilities will include: Troubleshooting and resolving complex customer issues related to the Databricks Platform by analysing core component metrics and logs. Providing suggestions and best practice guidance for improving performance in customer-specific environments and providing product improvement feedback. Helping the support team with detailed troubleshooting guides and runbooks. Contributing to automation and tooling programs to make daily troubleshooting efficient. Partnering with the engineering team and spreading awareness of upcoming features and releases. Identifying and contributing supportability features back into the product. Demonstrating ownership and coordinating with engineering and escalation teams to achieve resolution of customer issues and requests. Participating in weekend and weekday on-call rotation.</p>\n<p>We look for candidates with 12+ years of industry experience, expertise in scripting using Python or Shell, and comfort with black box troubleshooting. Experience with supporting Java, Scala or Python based applications, distributed big data computing environments, SQL-based database systems, Linux and network troubleshooting, and cloud services such as AWS, Azure or GCP is also required.</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_eed925a1-b05","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Databricks","sameAs":"https://databricks.com","logo":"https://logos.yubhub.co/databricks.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/databricks/jobs/8375176002","x-work-arrangement":"onsite","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":null,"x-skills-required":["Java","Scala","Python","Shell","Distributed Big Data Computing","SQL-based Database Systems","Linux","Network Troubleshooting","AWS","Azure","GCP"],"x-skills-preferred":[],"datePosted":"2026-04-18T15:58:03.133Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Bengaluru, India"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"Java, Scala, Python, Shell, Distributed Big Data Computing, SQL-based Database Systems, Linux, Network Troubleshooting, AWS, Azure, GCP"},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_10290548-1ea"},"title":"Solutions Architect - Public Sector (LEAPS)","description":"<p>As a Solutions Architect - Public Sector at Databricks, you will be part of the Field Engineering team responsible for leading the growth of the Databricks Unified Analytics Platform. The role involves working with customers, teammates, the product team, and post-sales teams to identify use cases for Databricks, develop architectures and solutions using our platform, and guide customers through implementation to accomplish value.</p>\n<p>Key responsibilities include: Partnering with the sales team to help customers understand how Databricks can help solve their business problems Providing technical leadership for customers to evaluate and adopt Databricks Consulting on big data architecture, implementing proof of concepts for strategic customer projects, data science and machine learning projects, and validating integrations with cloud services and other 3rd party applications Building and presenting reference architectures, how-tos, and demo applications for customers Becoming an expert in, and promoting Databricks-inspired open-source projects (Spark, Delta Lake, MLflow, and Koalas) across developer communities through meetups, conferences, and webinars Traveling to customers in your region</p>\n<p>We look for candidates with 5+ years of experience in a customer-facing pre-sales, technical architecture, or consulting role, with expertise in designing and architecting distributed data systems. Experience with public cloud providers such as AWS, Azure, or GCP, data engineering technologies (e.g., Spark, Hadoop, Kafka), and data warehousing (e.g., SQL, OLTP/OLAP/DSS) is also required.</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_10290548-1ea","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Databricks","sameAs":"https://databricks.com","logo":"https://logos.yubhub.co/databricks.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/databricks/jobs/8320126002","x-work-arrangement":"onsite","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":"$180,000-$247,500 USD","x-skills-required":["Apache Spark","MLflow","Delta Lake","Python","Scala","Java","SQL","R","AWS","Azure","GCP","Data Engineering","Data Warehousing"],"x-skills-preferred":[],"datePosted":"2026-04-18T15:57:53.145Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Maryland; Virginia; Washington, D.C."}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"Apache Spark, MLflow, Delta Lake, Python, Scala, Java, SQL, R, AWS, Azure, GCP, Data Engineering, Data Warehousing","baseSalary":{"@type":"MonetaryAmount","currency":"USD","value":{"@type":"QuantitativeValue","minValue":180000,"maxValue":247500,"unitText":"YEAR"}}},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_70e2591f-d7d"},"title":"Technical Program Manager, Infrastructure","description":"<p>As a Technical Program Manager for Infrastructure, you&#39;ll work across multiple infrastructure domains to coordinate complex programs that have broad organisational impact. You&#39;ll be solving novel scaling challenges at the frontier of what&#39;s possible, all while maintaining the security and reliability our mission demands.</p>\n<p>Developer Productivity &amp; Tooling</p>\n<ul>\n<li>Drive cross-functional programs to improve developer environments, CI/CD infrastructure, and release processes that enable rapid innovation while maintaining high security standards</li>\n</ul>\n<ul>\n<li>Coordinate large-scale migrations and platform modernization efforts across engineering teams</li>\n</ul>\n<ul>\n<li>Partner with teams to measure and improve developer productivity metrics, identifying bottlenecks and driving systematic improvements</li>\n</ul>\n<ul>\n<li>Lead initiatives to integrate AI tools into development workflows, helping Anthropic be at the forefront of AI-assisted research and engineering</li>\n</ul>\n<p>Infrastructure Reliability &amp; Operations</p>\n<ul>\n<li>Drive programs to establish and achieve reliability targets across training infrastructure and production services</li>\n</ul>\n<ul>\n<li>Coordinate incident response improvements, post-mortem processes, and on-call rotations that help teams operate effectively</li>\n</ul>\n<ul>\n<li>Establish metrics and dashboards to track infrastructure health, capacity utilisation, and operational excellence</li>\n</ul>\n<p>Cross-functional Coordination</p>\n<ul>\n<li>Serve as the critical bridge between infrastructure teams, research, and product, translating technical complexities into clear updates for a variety of audiences</li>\n</ul>\n<ul>\n<li>Consult with stakeholders to deeply understand infrastructure, data, and compute needs, identifying solutions to support frontier research and product development</li>\n</ul>\n<ul>\n<li>Drive alignment on priorities and timelines across teams with competing constraints</li>\n</ul>\n<p>You&#39;ll be a good fit if you have 5+ years of technical program management experience, with a track record of successfully delivering complex infrastructure programs in ML/AI systems or large-scale distributed systems. You&#39;ll also need a deep technical understanding of infrastructure systems, strong stakeholder management skills, and the ability to navigate competing priorities-confirming data-driven technical decisions.</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_70e2591f-d7d","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Anthropic","sameAs":"https://www.anthropic.com/","logo":"https://logos.yubhub.co/anthropic.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/anthropic/jobs/5111783008","x-work-arrangement":"hybrid","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":"$290,000-$365,000 USD","x-skills-required":["Kubernetes","Cloud platforms (AWS, GCP, Azure)","ML infrastructure (GPU/TPU/Trainium clusters)","Developer productivity initiatives","CI/CD systems","Infrastructure scaling"],"x-skills-preferred":["Observability tooling and practices","AI tools to improve engineering productivity","Research teams and translating their needs into concrete technical requirements"],"datePosted":"2026-04-18T15:57:52.097Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"San Francisco, CA | New York City, NY | Seattle, WA"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"Kubernetes, Cloud platforms (AWS, GCP, Azure), ML infrastructure (GPU/TPU/Trainium clusters), Developer productivity initiatives, CI/CD systems, Infrastructure scaling, Observability tooling and practices, AI tools to improve engineering productivity, Research teams and translating their needs into concrete technical requirements","baseSalary":{"@type":"MonetaryAmount","currency":"USD","value":{"@type":"QuantitativeValue","minValue":290000,"maxValue":365000,"unitText":"YEAR"}}},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_2aff6a46-3ea"},"title":"Manufacturing Software Engineer, Intelligence Systems","description":"<p>As a Software Engineer in the Manufacturing Test organization, you will join a software development team tasked to ensure that we build quality products - in land, sea, and air. You will develop test executive software that can systematically and thoroughly test our products and create analytics to improve our development cycle. You will champion automation, and work to reduce operator time and instruction complexity through the use of parallel execution, data acquisition, automated deployment tools. You will be presented complex, multiplatform problems with heavy reliance on cloud data systems. In this role you’ll need to think creatively and continuously improve our methods of automation, throughput, user interfaces, and data analytics.</p>\n<p>This role will be based temporarily at Santa Ana, CA for a 3 month training period before transitioning to Asheville, OH.</p>\n<p>Responsibilities:</p>\n<ul>\n<li>Develop applications for Windows and Linux desktop environments</li>\n<li>Integrate cloud data and deployment features while maintaining user authentication and security</li>\n<li>Generate automation scripts (python) for debug and prototype development</li>\n<li>Triage issues, root cause failures, and coordinate next-steps</li>\n<li>Partner with end-users to turn needs into features while balancing user experience with engineering constraints</li>\n</ul>\n<p>Required Qualifications:</p>\n<ul>\n<li>Expertise in desktop application development with WPF and C#</li>\n<li>Proficient in ASP.NET, RESTful services with C# in AWS/Azure infrastructure</li>\n<li>Hands-on working knowledge of a major relational database (DB2, SQL Server etc.) and/or NoSql</li>\n<li>Experience working in CI/CD and designing and delivering DevOps automation for app deployment and testing</li>\n<li>Bachelor’s degree in Computer Science, Computer Engineering, or related field</li>\n<li>Experience working on multi-disciplinary projects, working closely with Electrical / Mechanical / Manufacturing Engineers</li>\n<li>Eligible to obtain and maintain an active U.S. Secret security clearance</li>\n</ul>\n<p>Preferred Qualifications:</p>\n<ul>\n<li>5+ years of relevant industry experience</li>\n<li>Pursuing a Master’s of Computer Science or related field</li>\n<li>Experience with test automation or cloud deployment tools</li>\n<li>Currently possesses and is able to maintain an active U.S. Secret security clearance</li>\n</ul>\n<p>US Salary Range $129,000-$171,000 USD</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_2aff6a46-3ea","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Anduril","sameAs":"https://www.anduril.com/","logo":"https://logos.yubhub.co/anduril.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/andurilindustries/jobs/5080387007","x-work-arrangement":"onsite","x-experience-level":"mid","x-job-type":"full-time","x-salary-range":"$129,000-$171,000 USD","x-skills-required":["desktop application development","WPF","C#","ASP.NET","RESTful services","AWS/Azure infrastructure","relational database","NoSql","CI/CD","DevOps automation"],"x-skills-preferred":["test automation","cloud deployment tools"],"datePosted":"2026-04-18T15:57:48.929Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Ashville, Ohio, United States"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"desktop application development, WPF, C#, ASP.NET, RESTful services, AWS/Azure infrastructure, relational database, NoSql, CI/CD, DevOps automation, test automation, cloud deployment tools","baseSalary":{"@type":"MonetaryAmount","currency":"USD","value":{"@type":"QuantitativeValue","minValue":129000,"maxValue":171000,"unitText":"YEAR"}}},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_e22b8bd1-f7a"},"title":"Staff Product Manager, Serverless Workspaces","description":"<p>At Databricks, we are building the world&#39;s best data and AI infrastructure platform to enable data teams to solve the world&#39;s toughest problems. The Serverless Workspaces team is the engine behind Databricks&#39; shift from a &#39;configure-first&#39; to a &#39;use-now&#39; platform. We are redefining the customer onboarding experience by removing the heavy lifting of cloud infrastructure without complicated networking, storage, and cluster configuration, just instant access to data and AI.</p>\n<p>You will own the strategy for this next-generation platform layer, balancing the simplicity of a SaaS experience with the control enterprise customers demand. The impact you will have:</p>\n<ul>\n<li>Drive the transition to Serverless: Lead the strategy to unify the journey to onboard to serverless and classic workspaces and drive 10X usage of serverless in the next year</li>\n<li>Democratize Workspace Creation: Design and ship flows that allow users to spin up workspaces instantly with little friction while maintaining strict governance guardrails and company policies</li>\n<li>Redefine the &#39;Getting Started&#39; experience: Lower the barrier to entry by removing the requirement for customers to manage detailed cloud infrastructure configurations before using Databricks but allowing them dial those in when they&#39;re ready</li>\n<li>Solve &#39;Workspace Proliferation&#39;: Help define the tools and policies that allow Admins to confidently govern increased amounts of workspaces across the enterprise</li>\n<li>Unify the Data Estate: Work closely with the Unity Catalog and Identity teams to ensure that these new serverless environments seamlessly integrate with a customer&#39;s existing data and security models</li>\n</ul>\n<p>What we look for:</p>\n<ul>\n<li>7+ years of experience as a Product Manager working on cloud infrastructure, developer platforms, or SaaS foundations</li>\n<li>Technical depth in Cloud Infrastructure: Familiarity with AWS, Azure, or GCP resource management (e.g. networking, compute, identity) and how to abstract that complexity for end-users</li>\n<li>Passion for simplification: A track record of taking complex technical workflows (like configuring a VPC or peering) and turning them into &#39;one-click&#39; consumer-grade experiences</li>\n<li>Data-driven mindset: Comfortable defining and tracking KPIs, such as &#39;Time to First Workspace&#39; or &#39;Serverless Adoption Rate,&#39; to measure success</li>\n</ul>\n<p>Pay Range Transparency Databricks is committed to fair and equitable compensation practices. The pay range(s) for this role is listed below and represents the expected salary range for non-commissionable roles or on-target earnings for commissionable roles. Actual compensation packages are based on several factors that are unique to each candidate, including but not limited to job-related skills, depth of experience, relevant certifications and training, and specific work location. Based on the factors above, Databricks anticipates utilizing the full width of the range. The total compensation package for this position may also include eligibility for annual performance bonus, equity, and the benefits listed above.</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_e22b8bd1-f7a","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Databricks","sameAs":"https://databricks.com","logo":"https://logos.yubhub.co/databricks.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/databricks/jobs/8420607002","x-work-arrangement":"onsite","x-experience-level":"staff","x-job-type":"full-time","x-salary-range":"$181,700-$249,800 USD","x-skills-required":["Cloud Infrastructure","Developer Platforms","SaaS Foundations","AWS","Azure","GCP","Networking","Compute","Identity"],"x-skills-preferred":[],"datePosted":"2026-04-18T15:57:47.250Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"San Francisco, California"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"Cloud Infrastructure, Developer Platforms, SaaS Foundations, AWS, Azure, GCP, Networking, Compute, Identity","baseSalary":{"@type":"MonetaryAmount","currency":"USD","value":{"@type":"QuantitativeValue","minValue":181700,"maxValue":249800,"unitText":"YEAR"}}},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_5196c4ac-d97"},"title":"Senior Software Engineer - Infrastructure and Tools","description":"<p>We are seeking a Senior Software Engineer to join our Infrastructure teams. As a key member of our team, you will build scalable systems to power the Databricks platform, making it the de-facto platform for running Big Data and AI workloads.</p>\n<p>Your responsibilities will include building and extending components of the core Databricks infrastructure, architecting multi-cloud systems and abstractions to allow the Databricks product to run on top of existing Cloud providers, improving software development workflows for engineering and operational efficiency, using our own data and AI platform to analyze build and test logs and metrics to identify areas for improvement, developing automated build, test, and release infrastructures, and setting and upholding the standard for engineering processes to support high-quality engineering.</p>\n<p>To succeed in this role, you will need a BS (or higher) in Computer Science, or a related field, and 5+ years of experience writing production code in one of Java, Scala, Go, C++, or Python. You should also have passion for building highly scalable and reliable infrastructure, experience architecting, developing, and deploying large-scale distributed systems at scale, and experience with cloud APIs and cloud technologies such as AWS, Azure, GCP, Docker, Kubernetes, or Terraform.</p>\n<p>In addition to a competitive salary, we offer comprehensive health coverage, 401(k) plan, equity awards, flexible time off, paid parental leave, family planning, gym reimbursement, annual personal development fund, work headphones reimbursement, employee assistance program, and business travel accident insurance.</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_5196c4ac-d97","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Databricks","sameAs":"https://databricks.com","logo":"https://logos.yubhub.co/databricks.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/databricks/jobs/6318503002","x-work-arrangement":"onsite","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":"$166,000-$225,000 USD","x-skills-required":["Java","Scala","Go","C++","Python","Cloud APIs","Cloud technologies","AWS","Azure","GCP","Docker","Kubernetes","Terraform"],"x-skills-preferred":[],"datePosted":"2026-04-18T15:57:44.136Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"San Francisco, California"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"Java, Scala, Go, C++, Python, Cloud APIs, Cloud technologies, AWS, Azure, GCP, Docker, Kubernetes, Terraform","baseSalary":{"@type":"MonetaryAmount","currency":"USD","value":{"@type":"QuantitativeValue","minValue":166000,"maxValue":225000,"unitText":"YEAR"}}},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_8482d0fc-285"},"title":"Senior Backend Engineer, Gitlab Delivery: Upgrades","description":"<p>As a Senior Backend Engineer on the GitLab Upgrades team, you&#39;ll help self-managed customers run GitLab reliably by building and maintaining the infrastructure, tooling, and automation behind our deployment options.</p>\n<p>You&#39;ll work across Omnibus GitLab, GitLab Helm Charts, the GitLab Environment Toolkit (Get), and the GitLab Operator to make GitLab easier to deploy, more secure by default, and scalable across major cloud providers and a wide range of customer environments.</p>\n<p>In this role, you&#39;ll partner closely with engineering teams and act as a bridge to customer needs, improving installation, upgrade, and day-to-day operations for production-grade GitLab deployments.</p>\n<p>Some examples of our projects:</p>\n<ul>\n<li>Evolving Omnibus GitLab, Helm Charts, GET, and the GitLab Operator to support validated reference architectures for enterprise-scale deployments</li>\n</ul>\n<ul>\n<li>Building automation pipelines and observability into deployment tooling to validate, test, and operate GitLab across Kubernetes and other self-managed environments</li>\n</ul>\n<p>You&#39;ll maintain and evolve the Omnibus GitLab package to support reliable, production-ready self-managed deployments, improving deployment stability, increasing upgrade success rates, and reducing escalation rates.</p>\n<p>You&#39;ll develop and improve GitLab Helm Charts so core components integrate cleanly and scale across supported environments, reducing deployment friction, shortening time to deploy, and improving operational consistency at scale.</p>\n<p>You&#39;ll enhance the GitLab Environment Toolkit (Get), validated reference architectures, and the GitLab Operator for secure, Kubernetes-native lifecycle management, improving reliability, strengthening security baselines, and accelerating adoption in customer environments.</p>\n<p>You&#39;ll improve installation, upgrade, and operational workflows across deployment methods to create a consistent experience for self-managed customers, reducing operational overhead, lowering failure rates, and increasing consistency across deployment methods.</p>\n<p>You&#39;ll partner with Security to address vulnerabilities and deliver secure defaults and configurations in the deployment stack, reducing exposure to vulnerabilities and improving baseline security across self-managed deployments.</p>\n<p>You&#39;ll build and maintain automation and continuous integration and continuous delivery pipelines that validate and test Omnibus, Charts, Get, and the Operator, increasing release confidence, improving test coverage, and reducing regressions across deployment tooling.</p>\n<p>You&#39;ll work closely with Distribution Engineers, Site Reliability Engineers, Release Managers, and Development teams to integrate new features into deployment methods and keep them reliable, scalable, and aligned with customer needs, improving delivery readiness and reducing operational issues after release.</p>\n<p>You&#39;ll guide architectural direction, mentor backend engineers, and contribute to the roadmap for self-managed delivery, improving technical quality, accelerating delivery effectiveness, and strengthening team execution.</p>\n<p>You&#39;ll have experience operating backend services in production, including deployment, monitoring, and maintenance in Kubernetes- and Helm-based environments.</p>\n<p>You&#39;ll have proficiency in Go for building observable and resilient services, with working knowledge of Ruby as a useful addition.</p>\n<p>You&#39;ll have hands-on practice with infrastructure as code, including tools such as Terraform, and with managing infrastructure across cloud providers such as Google Cloud Platform, Amazon Web Services, or Microsoft Azure.</p>\n<p>You&#39;ll have knowledge of database design, operations, and troubleshooting, especially for PostgreSQL in secure and scalable setups.</p>\n<p>You&#39;ll have knowledge of secure, scalable, and reliable deployment practices, including service scaling and rollout strategies.</p>\n<p>You&#39;ll have familiarity with observability tools and patterns such as Prometheus and Grafana to monitor system health and performance.</p>\n<p>You&#39;ll have ability to work effectively in large codebases and coordinate across distributed, cross-functional teams using clear written communication.</p>\n<p>You&#39;ll have openness to transferable experience from related backend or infrastructure roles, along with the ability to write user-focused documentation and implementation guides.</p>\n<p>The Upgrades team is part of GitLab Delivery and focuses on helping self-managed customers run GitLab successfully in their own environments, from smaller deployments to large enterprise footprints.</p>\n<p>We own deployment and operational tooling across our work on Omnibus GitLab, Helm Charts, Get, and the GitLab Operator, and we work as a globally distributed, all-remote group that works asynchronously with Site Reliability Engineering, Release, Security, and Development teams across regions.</p>\n<p>We are focused on making self-managed GitLab easier to deploy, upgrade, secure, and operate at scale.</p>\n<p>For more on how we work, see Team Handbook Page.</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_8482d0fc-285","directApply":true,"hiringOrganization":{"@type":"Organization","name":"GitLab","sameAs":"https://about.gitlab.com/","logo":"https://logos.yubhub.co/about.gitlab.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/gitlab/jobs/8463933002","x-work-arrangement":"remote","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":null,"x-skills-required":["Go","Ruby","Terraform","Google Cloud Platform","Amazon Web Services","Microsoft Azure","PostgreSQL","Prometheus","Grafana"],"x-skills-preferred":[],"datePosted":"2026-04-18T15:57:31.988Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Remote, India"}},"jobLocationType":"TELECOMMUTE","employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"Go, Ruby, Terraform, Google Cloud Platform, Amazon Web Services, Microsoft Azure, PostgreSQL, Prometheus, Grafana"},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_477d343e-e37"},"title":"Customer Success Architect","description":"<p>About Mixpanel</p>\n<p>Mixpanel turns data clarity into innovation. Trusted by more than 29,000 companies, including Workday, Pinterest, LG, and Rakuten Viber, Mixpanel’s AI-first digital analytics help teams accelerate adoption, improve retention, and ship with confidence. Powering this is an industry-leading platform that combines product and web analytics, session replay, experimentation, feature flags, and metric trees.</p>\n<p>About the Customer Success Team:</p>\n<p>Mixpanel’s Customer Success &amp; Solutions Engineering teams are analytics consultants who embed themselves within our enterprise customer teams to drive our customers’ business outcomes. We work with prospects and customers throughout the customer journey to understand what drives value and serve as the technical counterpart to our Sales organization to deliver on that value.</p>\n<p>You will partner closely with Account Executives, Account Managers, Product, Engineering, and Support to successfully roll out self-serve analytics within our customers’ organizations, help the customer manage change, execute on technical projects and services that delight our customers, and ultimately drive ROI on the customer’s Mixpanel investment.</p>\n<p>About the Role:</p>\n<p>As a CSA, you will partner with customers throughout the customer journey to understand what drives value, beginning from the pre-sales running proof of concepts to demonstrate quick time to value, to post-sales onboarding and implementation, where you set customers up for long-term success with scalable implementation and data governance best practices. Throughout the entire customer lifecycle, you will work to understand how analytics can drive business value for your customers and will consult them on how to maximize the value of Mixpanel, including managing change during Mixpanel’s rollout, defining and achieving ROI, and identifying areas of improvement in their current usage of analytics.</p>\n<p>For large enterprise customers, post onboarding, you will also continue alongside the Account Managers to drive data trust and product adoption for 100+ end user teams through a change management rollout approach.</p>\n<p>Responsibilities:</p>\n<p>Serve as a trusted technical advisor for prospects/customers to provide strategic consultation on data architecture, governance, instrumentation, and business outcomes</p>\n<p>Effectively communicate at most levels of the customer’s organization to influence business outcomes via Mixpanel, design and execute a comprehensive analytics strategy, and unblock technical and organizational roadblocks</p>\n<p>Own the customer’s success with Mixpanel , documenting and delivering ROI to the customer throughout their journey to transform their business with self-serve analytics</p>\n<p>Own onboarding and data health for your assigned customers/projects, including ongoing enhancements to their data quality and overall tech stack integration</p>\n<p>Engage with customers’ engineering, product management, and marketing teams to handle technical onboarding, optimize Mixpanel deployments, and improve data trust</p>\n<p>Deliver a variety of technical services ranging from data architecture consultations to adoption and change management best practices</p>\n<p>Leverage modern data architecture expertise to create scalable data governance practices and data trust for our customers, including data optimization and re-implementation projects</p>\n<p>Successfully execute on success outcomes whilst balancing project timelines, scope creep, and unanticipated issues</p>\n<p>Bridge the technical-business gap with your customers , working with business stakeholders to define a strategic vision for Mixpanel and then working with the right business and technical contacts to execute that vision</p>\n<p>Collaborate with our technical and solutions partners as needed on data optimization and onboarding projects</p>\n<p>Be a technical sponsor for internal engagements with Mixpanel product and engineering teams to prioritize product and systems tasks from clients</p>\n<p>We&#39;re Looking For Someone Who Has</p>\n<p>3 to 5 years of experience consulting on defining and delivering ROI through new tool implementations</p>\n<p>Experience working with Director-level members of the customer organization to define a strategic vision and successfully leveraging those members to deliver on that vision</p>\n<p>The ability to communicate with stakeholders at most levels of an organization , from talking with developers about the ins and outs of an API to talking to a Director of Data Science/Product Management about organizational efficiency</p>\n<p>Can manage complex projects with assorted client stakeholders, working across teams and departments to execute real change</p>\n<p>Has a demonstrated successful record of experience in customer success, client-facing professional services, consulting, or technical project management role</p>\n<p>Excellent written, analytical, and communication skills</p>\n<p>Strong process and/or project delivery discipline</p>\n<p>Eager to learn new technologies and adapt to evolving customer needs</p>\n<p>We&#39;d Be Extra Excited For Someone Who Has</p>\n<p>Experience in data querying, modeling, and transforming in at least one core tool, including SQL / dbt / Python / Business Intelligence tools / Product Analytics tools, etc.</p>\n<p>Familiar with databases and cloud data warehouses like Google Cloud, Amazon Redshift, Microsoft Azure, Snowflake, Databricks, etc.</p>\n<p>Familiar with product analytics implementation methods like SDKs, Customer Data Platforms (CDPs), Event Streaming, Reverse ETL, etc.</p>\n<p>Familiar with analytics best practices across business segments and verticals</p>\n<p>Benefits and Perks</p>\n<p>Comprehensive Medical, Vision, and Dental Care</p>\n<p>Mental Wellness Benefit</p>\n<p>Generous Vacation Policy &amp; Additional Company Holidays</p>\n<p>Enhanced Parental Leave</p>\n<p>Volunteer Time Off</p>\n<p>Additional US Benefits: Pre-Tax Benefits including 401(K), Wellness Benefit, Holiday Break</p>\n<p>Culture Values</p>\n<p>Make Bold Bets: We choose courageous action over comfortable progress.</p>\n<p>Innovate with Insight: We tackle decisions with rigor and judgment - combining data, experience and collective wisdom to drive powerful outcomes.</p>\n<p>One Team: We collaborate across boundaries to achieve far greater impact than any of us could accomplish alone.</p>\n<p>Candor with Connection: We build meaningful relationships that enable honest feedback and direct conversations.</p>\n<p>Champion the Customer: We seek to deeply understand our customers’ needs, ensuring their success is our north star.</p>\n<p>Powerful Simplicity: We find elegant solutions to complex problems, making sophisticated things accessible.</p>\n<p>Why choose Mixpanel?</p>\n<p>We’re a leader in analytics with over 9,000 customers and $277M raised from prominent investors: like Andreessen-Horowitz, Sequoia, YC, and, most recently, Bain Capital.</p>\n<p>Mixpanel’s pioneering event-based data analytics platform offers a powerful yet simple solution for companies to understand user behaviors and easily track overarching company success metrics.</p>\n<p>Our accomplished teams continuously facilitate our expansion by tackling the ever-evolving challenges tied to scaling, reliability, design, and service.</p>\n<p>Choosing to work at Mixpanel means you’ll be helping the world’s most innovative companies learn from their data so they can make better decisions.</p>\n<p>Mixpanel is an equal opportunity employer supporting workforce diversity.</p>\n<p>At Mixpanel, we are focused on things that really matter,our people, our customers, our partners,out of a recognition that those relationships are the most valuable assets we have.</p>\n<p>We actively encourage women, people with disabilities, veterans, underrepresented minorities, and LGBTQ+ people to apply.</p>\n<p>We do not discriminate on the basis of race, religion, color, national origin, gender, gender identity or expression, sexual orientation, age, marital status, or any other protected characteristic.</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_477d343e-e37","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Mixpanel","sameAs":"https://mixpanel.com","logo":"https://logos.yubhub.co/mixpanel.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/mixpanel/jobs/7506821","x-work-arrangement":"hybrid","x-experience-level":"mid","x-job-type":"full-time","x-salary-range":null,"x-skills-required":["data architecture","governance","instrumentation","business outcomes","data querying","modeling","transforming","SQL","dbt","Python","Business Intelligence tools","Product Analytics tools"],"x-skills-preferred":["databases","cloud data warehouses","Google Cloud","Amazon Redshift","Microsoft Azure","Snowflake","Databricks","SDKs","Customer Data Platforms","Event Streaming","Reverse ETL"],"datePosted":"2026-04-18T15:57:25.195Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Bengaluru, India (Hybrid)"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"data architecture, governance, instrumentation, business outcomes, data querying, modeling, transforming, SQL, dbt, Python, Business Intelligence tools, Product Analytics tools, databases, cloud data warehouses, Google Cloud, Amazon Redshift, Microsoft Azure, Snowflake, Databricks, SDKs, Customer Data Platforms, Event Streaming, Reverse ETL"},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_ae6df2c2-eb1"},"title":"DevOps Engineer, Infrastructure & Security","description":"<p>As a DevOps Engineer, Infrastructure &amp; Security at Scale, you will play a crucial role in building out and enhancing our CI/CD pipelines. Our product portfolio and customer base are expanding, and we need skilled engineers to streamline our Software Development Life Cycle (SDLC) through collaborative efforts.</p>\n<p>You will design, develop, and maintain robust CI/CD pipelines to automate the deployment of our lowside and highside products. You will collaborate closely with product and engineering teams to enhance existing application code for improved compatibility and streamlined integration within automated pipelines.</p>\n<p>Contribute to the overall architecture and design of our deployment systems, bringing new ideas to life for increased efficiency and reliability. Troubleshoot and resolve complex deployment issues, ensuring minimal disruption to development cycles.</p>\n<p>Develop a deep understanding of our product and ML architectures to facilitate seamless integration and deployment. Document pipeline processes and configurations to ensure maintainability and knowledge transfer.</p>\n<p>Proactively incorporate security best practices into all stages of the CI/CD pipeline, building security into our development processes. Drive standardization and foster collaboration across different product teams to achieve a unified and efficient SDLC.</p>\n<p>We are looking for experienced DevOps Engineers, DevSecOps Engineers, Software Engineers with a strong focus on CI/CD, or a similar role. You should have a proven track record of building or significantly enhancing CI/CD pipelines.</p>\n<p>Experience configuring and adapting application code to integrate seamlessly with evolving CI/CD environments is a plus. Familiarity with standard containerization &amp; deployment technologies like Kubernetes, Terraform, Docker, etc. is also required.</p>\n<p>We offer a competitive salary range of $245,600-$307,000 USD, comprehensive health, dental and vision coverage, retirement benefits, a learning and development stipend, and generous PTO. This role may be eligible for additional benefits such as a commuter stipend.</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_ae6df2c2-eb1","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Scale","sameAs":"https://www.scale.com/","logo":"https://logos.yubhub.co/scale.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/scaleai/jobs/4674863005","x-work-arrangement":"onsite","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":"$245,600-$307,000 USD","x-skills-required":["CI/CD","Kubernetes","Terraform","Docker","Python","Bash","PowerShell","Jenkins","GitLab CI","GitHub Actions","Azure DevOps","AWS","Azure","GCP","Security best practices"],"x-skills-preferred":["Containerization technologies","Machine learning lifecycles","MLOps concepts","Prior experience in classified environments"],"datePosted":"2026-04-18T15:57:24.917Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Washington, DC"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"CI/CD, Kubernetes, Terraform, Docker, Python, Bash, PowerShell, Jenkins, GitLab CI, GitHub Actions, Azure DevOps, AWS, Azure, GCP, Security best practices, Containerization technologies, Machine learning lifecycles, MLOps concepts, Prior experience in classified environments","baseSalary":{"@type":"MonetaryAmount","currency":"USD","value":{"@type":"QuantitativeValue","minValue":245600,"maxValue":307000,"unitText":"YEAR"}}},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_cb18189c-d78"},"title":"Solutions Architect (Pre-sales) - Kansai Region","description":"<p>As a Pre-sales Solutions Architect (Analytics, AI, Big Data, Public Cloud) – Kansai Region, your mission will be to drive successful technical evaluations and solution designs for some of our focus customers in the Kansai region (Osaka/Kyoto) for Databricks Japan.</p>\n<p>You are passionate about data and AI, love getting hands-on with technology, and enjoy communicating its value to both technical and non-technical stakeholders. Partnering closely with Account Executives, you will lead the technical discovery, architecture design, and proof-of-concept phases, and act as a trusted advisor to our customers on their data and AI strategy.</p>\n<p>You will help customers realize tangible, data-driven outcomes on the Databricks Lakehouse Platform by guiding data and AI teams to design, build, and operationalize solutions within their enterprise ecosystem.</p>\n<p>Responsibilities:</p>\n<ul>\n<li>Be a Big Data Analytics expert on aspects of architecture and design</li>\n<li>Lead your prospects through evaluating and adopting Databricks</li>\n<li>Support your customers by authoring reference architectures, how-tos, and demo applications</li>\n<li>Integrate Databricks with 3rd-party applications to support customer architectures</li>\n<li>Engage with the technical community by leading workshops, seminars, and meet-ups</li>\n</ul>\n<p>What we look for:</p>\n<ul>\n<li>Pre-sales or post-sales experience working with external clients across a variety of industry markets</li>\n<li>Understanding of customer-facing pre-sales or consulting role with a core strength in either Data Engineering or Data Science advantageous</li>\n<li>Experience demonstrating technical concepts, including presenting and whiteboarding</li>\n<li>Experience designing and implementing architectures within public clouds (AWS, Azure, or GCP)</li>\n<li>Experience with Big Data technologies, including Apache Spark, AI, Data Science, Data Engineering, Hadoop, Cassandra, and others</li>\n<li>Fluent coding experience in Python or Scala implementing Apache Spark, Java, and R is also desirable</li>\n<li>Experience working with Enterprise Accounts</li>\n<li>Written and verbal fluency in Japanese</li>\n</ul>\n<p>Benefits:</p>\n<p>At Databricks, we strive to provide comprehensive benefits and perks that meet the needs of all of our employees. For specific details on the benefits offered in your region, click here.</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_cb18189c-d78","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Databricks","sameAs":"https://databricks.com","logo":"https://logos.yubhub.co/databricks.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/databricks/jobs/8437028002","x-work-arrangement":"onsite","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":null,"x-skills-required":["Big Data Analytics","Apache Spark","AI","Data Science","Data Engineering","Hadoop","Cassandra","Python","Scala","Java","R","Public Cloud","AWS","Azure","GCP"],"x-skills-preferred":[],"datePosted":"2026-04-18T15:57:24.678Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Japan"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"Big Data Analytics, Apache Spark, AI, Data Science, Data Engineering, Hadoop, Cassandra, Python, Scala, Java, R, Public Cloud, AWS, Azure, GCP"},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_b6611499-8b7"},"title":"AI Identity Architect","description":"<p>Secure Every Identity, from AI to Human Identity is the key to unlocking the potential of AI.\\n\\nOkta secures AI by building the trusted, neutral infrastructure that enables organisations to safely embrace this new era.\\n\\nThis work requires a relentless drive to solve complex challenges with real-world stakes.\\n\\nWe are looking for builders and owners who operate with speed and urgency and execute with excellence.\\n\\nThis is an opportunity to do career-defining work.\\n\\nWe&#39;re all in on this mission.\\n\\nIf you are too, let&#39;s talk.\\n\\nThe Identity Team\\n\\nThe Identity team’s mission is to strengthen Okta’s position as the leading Identity-as-a-Service solution through identifying and resolving risks to the employees, product, and most importantly, our customers.\\n\\nWith the ever-increasing pace of cloud application adoption, companies are struggling to find ways to accurately assess risk and act at the speed of their business.\\n\\nThe AI Identity Architect Opportunity\\n\\nReporting to the VP of Identity &amp; Access Management, this role will be an AI Identity Pioneer, not just an IAM expert.\\n\\nYour &quot;been there, done that&quot; experience in securing autonomous agents at scale is your superpower.\\n\\nYou’ve seen how traditional OAuth flows break under agentic pressure, you’ve felt the pain of &quot;Secret Zero&quot; in a LangChain loop, and you know exactly where the industry’s current tools fall short.\\n\\nAt Okta, you won&#39;t just implement security; you will use your battle-tested experience to drive the product features needed to secure the next generation of identities.\\n\\nThe AI Identity Architect&#39;s mission is to own Okta’s enterprise identity strategy for autonomous AI agents.\\n\\nAs Customer Zero, you will implement Okta on Okta,validating identity patterns at production scale, feeding direct input into product roadmaps, and partnering with business units building internal agentic systems.\\n\\nWhat you’ll be doing\\n\\nProduct Vision &amp; Architecture (The &quot;Ratified R0&quot;)\\n\\nDrive the Roadmap: Act as a primary stakeholder for Okta’s product teams.\\n\\nTranslate your real-world experience securing agents into prioritized feature requests and product requirements.\\n\\nTarget State: Define a multi-year roadmap for Non-Human Identities (NHIs) and AI Agents aligned with Zero Trust (NIST 800-207) and Okta’s Secure Identity Commitment.\\n\\nPosture First: Use ISPM (Identity Security Posture Management) to discover unmanaged AI agents and eliminate &quot;Identity Debt&quot; across the enterprise.\\n\\nCross-App Access &amp; Brokered Delegation\\n\\nAgent-to-App Connectivity: Architect secure Cross-App Access patterns where agents act as intermediaries between enterprise systems.\\n\\nDelegated Authority: Refine how user identity is &quot;brokered&quot; to an agent (e.g. OAuth2 Token Exchange), ensuring the agent never has more power than the human user who triggered it.\\n\\nSession Scoping: Implement context-bound, short-lived tokens to prevent lateral movement by a compromised agent.\\n\\nOkta Customer Zero -- Validate and publish patterns using Okta primitives to secure the AI lifecycle for:\\n\\nOkta Identity Engine &amp; Auth0: Define how AI agents prove their identity within AuthN/AuthZ core concepts, implementing rigorous protocols for secure access delegation like OAuth2/OIDC, mTLS, and SPIFFE/SPIRE for workload attestation.\\n\\nOkta Privilege Access: Implement JIT/JEA access and ephemeral, vaulted secrets for agent tool-use.\\n\\nOkta Identity Governance &amp; Workflows: Automate the Joiner-Mover-Leaver (JML) lifecycle for agents, including automated certification and revocation.\\n\\nFine-Grained Authorization: Implement ReBAC for intent-bound decisions (e.g., &quot;Can this agent access the Finance API on behalf of the CFO?&quot;).\\n\\nServe as &quot;Customer Zero&quot; by architecting and stress-testing internal AI security frameworks, translating real-world deployment lessons into a continuous stream of public-facing white papers, blogs, and technical guides to steer industry best practices.\\n\\nAI Ecosystem &amp; Tech Stack Integration\\n\\nDefine how Okta identity is woven into modern AI orchestration layers:\\n\\nOrchestration: Secure identity patterns such as LangChain, LangGraph, AutoGPT, CrewAI, LlamaIndex, and Semantic Kernel.\\n\\nArchitect secure connectivity to AI model providers such as Azure OpenAI, AWS Bedrock, Google Vertex AI, OpenAI API, and Anthropic.\\n\\nWhat you’ll bring to the role\\n\\nThe &quot;Been There&quot; Factor: Proven track record of securing AI agents and non-human identities in a production environment.\\n\\nExperience: 7+ years in IAM/Security Architecture; proven strategy work across workforce, customer, and Non-Human Identities (NHIs).\\n\\nDeep knowledge of the core protocols OAuth2/OIDC (especially Token Exchange), SAML, mTLS, JWT, and Model Context Protocol (MCP).\\n\\nHands-on experience with Modern Identity framework SPIFFE/SPIRE.\\n\\nAbility to author Architecture Decision Records (ADR) and influence at the VP/CTO level, while simultaneously acting as a peer to Product Management.\\n\\nAnd extra credit if you have experience in any of the following!\\n\\nPrior work shaping identity strategy for autonomous/agent systems, multi-agent delegation, or brokered access patterns.\\n\\nExposure to policy-as-code (OPA/Cedar) and service-mesh identity.\\n\\nCertifications such as CISSP-ISSAP, CCSP, or TOGAF are welcome but not required or expected.\\n\\n#LI-SM1 #LI-Hybrid P21621_3398002</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_b6611499-8b7","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Okta","sameAs":"https://www.okta.com/","logo":"https://logos.yubhub.co/okta.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/okta/jobs/7749222","x-work-arrangement":"hybrid","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":"$242,000-$332,000 USD","x-skills-required":["OAuth2/OIDC","SAML","mTLS","JWT","Model Context Protocol (MCP)","SPIFFE/SPIRE","Architecture Decision Records (ADR)","Policy-as-code (OPA/Cedar)","Service-mesh identity"],"x-skills-preferred":["LangChain","LangGraph","AutoGPT","CrewAI","LlamaIndex","Semantic Kernel","Azure OpenAI","AWS Bedrock","Google Vertex AI","OpenAI API","Anthropic"],"datePosted":"2026-04-18T15:57:24.671Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"San Francisco, California"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"OAuth2/OIDC, SAML, mTLS, JWT, Model Context Protocol (MCP), SPIFFE/SPIRE, Architecture Decision Records (ADR), Policy-as-code (OPA/Cedar), Service-mesh identity, LangChain, LangGraph, AutoGPT, CrewAI, LlamaIndex, Semantic Kernel, Azure OpenAI, AWS Bedrock, Google Vertex AI, OpenAI API, Anthropic","baseSalary":{"@type":"MonetaryAmount","currency":"USD","value":{"@type":"QuantitativeValue","minValue":242000,"maxValue":332000,"unitText":"YEAR"}}},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_65befd80-0e2"},"title":"Staff Software Engineer","description":"<p>We&#39;re seeking an experienced Staff-level backend software engineer to join our Live Pay team. You&#39;ll work cross-functionally with various teams and contribute to the design and development of key platform services. This person must be strong in JVM languages and event-driven architecture on AWS.</p>\n<p>The Canada base salary range for this full-time position is $252,000-$308,000, plus equity and benefits. Our salary ranges are determined by role, level, and location. This role will be hybrid from our Vancouver, CAN office, with 2 days a week in the office required.</p>\n<p>Responsibilities:</p>\n<ul>\n<li>Drive the design and implementation of new features. Break down complex problems into their bare essentials, translate this complexity into elegant design, and create high-quality, clean code.</li>\n</ul>\n<ul>\n<li>Make a meaningful impact on the lives of our community members.</li>\n</ul>\n<ul>\n<li>Design, develop, and deliver large-scale systems.</li>\n</ul>\n<ul>\n<li>Collaborate and mentor other engineers while providing thoughtful guidance using code, design, and architecture reviews.</li>\n</ul>\n<ul>\n<li>Contribute to defining technical direction, planning the roadmap, escalating issues, and synthesizing feedback to ensure team success.</li>\n</ul>\n<ul>\n<li>Estimate and manage team project timelines and risks.</li>\n</ul>\n<ul>\n<li>Care passionately about producing high-quality, efficient designs and code.</li>\n</ul>\n<ul>\n<li>Constantly learning about new technologies and industry standards in software engineering.</li>\n</ul>\n<ul>\n<li>Work cross-functionally with other teams, including: Analytics, design, product, marketing, and data science.</li>\n</ul>\n<p>Requirements:</p>\n<ul>\n<li>7+ years of development experience in backend software development</li>\n</ul>\n<ul>\n<li>Bachelor&#39;s, Master’s, or PhD in computer science, computer engineering, or a related technical discipline, or equivalent industry experience.</li>\n</ul>\n<ul>\n<li>Proficiency in at least one modern programming language, such as Java, Kotlin, Scala, or C#, and experience with at least one major framework such as Spring, Spring Boot, or ASP.NET Core.</li>\n</ul>\n<ul>\n<li>Hands-on experience working in cloud environments: AWS, GCP, or Azure</li>\n</ul>\n<ul>\n<li>Proficiency in event-driven systems such as Kafka, SQS, SNS, or Kinesis, and experience designing and operating scalable distributed systems.</li>\n</ul>\n<ul>\n<li>Knowledge of professional software engineering practices and best practices for the full software development life cycle, including coding standards, code reviews, source control management, build processes, testing, and operations</li>\n</ul>\n<ul>\n<li>Hands-on experience working with various databases. DynamoDB, MySQL, ElasticSearch</li>\n</ul>\n<ul>\n<li>Experience using AI-assisted development tools (e.g., Copilot, Cursor, LLMs) to improve engineering productivity</li>\n</ul>\n<ul>\n<li>Experience with continuous integration and delivery tools, and experience in developing and executing functional and integration tests.</li>\n</ul>\n<ul>\n<li>Familiarity with a clean architecture approach and software craftsmanship</li>\n</ul>\n<ul>\n<li>Experience with Kubernetes and microservice architecture is a strong plus.</li>\n</ul>\n<ul>\n<li>Excellent written and verbal communication skills.</li>\n</ul>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_65befd80-0e2","directApply":true,"hiringOrganization":{"@type":"Organization","name":"EarnIn","sameAs":"https://www.earnin.com/","logo":"https://logos.yubhub.co/earnin.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/earnin/jobs/7680387","x-work-arrangement":"hybrid","x-experience-level":"staff","x-job-type":"full-time","x-salary-range":"$252,000-$308,000","x-skills-required":["Java","Kotlin","Scala","C#","Spring","Spring Boot","ASP.NET Core","AWS","GCP","Azure","Kafka","SQS","SNS","Kinesis","DynamoDB","MySQL","ElasticSearch","AI-assisted development tools","Continuous integration and delivery tools","Clean architecture approach","Software craftsmanship","Kubernetes","Microservice architecture"],"x-skills-preferred":[],"datePosted":"2026-04-18T15:57:22.668Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Vancouver, Canada"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"Java, Kotlin, Scala, C#, Spring, Spring Boot, ASP.NET Core, AWS, GCP, Azure, Kafka, SQS, SNS, Kinesis, DynamoDB, MySQL, ElasticSearch, AI-assisted development tools, Continuous integration and delivery tools, Clean architecture approach, Software craftsmanship, Kubernetes, Microservice architecture","baseSalary":{"@type":"MonetaryAmount","currency":"USD","value":{"@type":"QuantitativeValue","minValue":252000,"maxValue":308000,"unitText":"YEAR"}}},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_a0373d52-7fe"},"title":"Senior IAM Engineer","description":"<p>We are looking for a Senior IAM Engineer to join our team. As a Senior IAM Engineer, you will play a critical role in securing our systems and data. You will have the opportunity to work with cutting-edge IAM technologies, collaborate with cross-functional teams, and influence the development of our IAM strategy.</p>\n<p>Your primary focus will be on designing and implementing identity lifecycle management, integration and orchestration, access governance, security and compliance, custom tooling, and data and AI infrastructure support. You will also be responsible for collaborating with cross-functional teams, improving provisioning and deprovisioning processes, integrating and managing IdPs within the IAM system, handling and streamlining access requests, developing and implementing IAM policies and procedures, and responding to ad-hoc requests.</p>\n<p>To be successful in this role, you will need to have a strong understanding of identity lifecycle management, directory services, SSO, MFA, SCIM provisioning, and federation (SAML, OIDC, OAuth). You will also need to have experience partnering with HR, Finance, Compliance, and other cross-functional teams to design and implement IAM and enterprise solutions.</p>\n<p>Additional skills and experience we&#39;d prioritize include experience with Workato or similar integration orchestrator tools, experience with Okta Workflows, certifications such as Workato or Okta Certified Professional/Administrator/Consultant, experience integrating IAM with HR systems, knowledge of compliance requirements related to IAM, and background in cloud platforms (AWS, GCP, Azure) and IAM integrations.</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_a0373d52-7fe","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Komodo Health","sameAs":"https://www.komodohealth.com/","logo":"https://logos.yubhub.co/komodohealth.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/komodohealth/jobs/8393728002","x-work-arrangement":"remote","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":null,"x-skills-required":["Scripting","Automation Mindset","APIs","Infrastructure as Code","Security Mindset","Identity and Access Management","Okta","Workday","Google Workspace","SCIM provisioning","Federation (SAML, OIDC, OAuth)","Directory services","SSO","MFA"],"x-skills-preferred":["Workato","Okta Workflows","Certifications (Workato or Okta Certified Professional/Administrator/Consultant)","Experience integrating IAM with HR systems","Knowledge of compliance requirements related to IAM","Background in cloud platforms (AWS, GCP, Azure) and IAM integrations"],"datePosted":"2026-04-18T15:57:17.076Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"India"}},"jobLocationType":"TELECOMMUTE","employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Healthcare","skills":"Scripting, Automation Mindset, APIs, Infrastructure as Code, Security Mindset, Identity and Access Management, Okta, Workday, Google Workspace, SCIM provisioning, Federation (SAML, OIDC, OAuth), Directory services, SSO, MFA, Workato, Okta Workflows, Certifications (Workato or Okta Certified Professional/Administrator/Consultant), Experience integrating IAM with HR systems, Knowledge of compliance requirements related to IAM, Background in cloud platforms (AWS, GCP, Azure) and IAM integrations"},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_af586166-0a0"},"title":"Technical Solutions Specialist, Data Operations","description":"<p>In Data Operations on the Strategic Data Partnerships team at Anthropic, you will support a cross-functional team in implementing partnership strategies to improve Anthropic’s products. You’ll ensure data meets our standards and reaches the right teams, build systems to track compliance and data usage across the portfolio, and coordinate across Research, Product, Legal, and external partners to remove barriers and accelerate impact.</p>\n<p>This role requires operational excellence combined with technical hands-on execution, and is a great fit for someone who wants to apply those skills in a high-impact, fast-growth context.</p>\n<p>Responsibilities:</p>\n<p>Data Opportunity Assessment and Processing</p>\n<ul>\n<li>Analyze and review incoming or prospective data to verify it is useful and strategic for Anthropic</li>\n<li>Own and maintain Python-based ETL pipelines that process large partner datasets, applying filtering criteria and deduplicating against existing data</li>\n<li>Write and optimize SQL queries against large relational databases to support filtering and analysis workflows</li>\n<li>Refine processing logic as requirements evolve across new data types and formats</li>\n</ul>\n<p>Data Delivery Infrastructure, Tooling, and Support</p>\n<ul>\n<li>Own end-to-end data delivery workflows, ensuring data moves seamlessly from partners to internal teams to accelerate time-to-impact</li>\n<li>Manage AWS and GCP resources for receiving and organizing partner data deliveries</li>\n<li>Troubleshoot delivery issues and coordinate with partners on formatting and transfer protocols and resolve technical escalations from partners and internal teams</li>\n<li>Build and maintain internal systems, scripts, and automation that support the team’s workflows</li>\n<li>Support occasional research evaluation tasks as needed</li>\n</ul>\n<p>Data Operations and Governance</p>\n<ul>\n<li>Develop and maintain Anthropic&#39;s preferred standards for receiving, consuming and cataloging data, ensuring alignment with Product and Engineering&#39;s evolving needs</li>\n<li>Contribute to systems for monitoring data usage and compliance with partner agreements</li>\n<li>Partner with teammates and cross-functional stakeholders to build out governance practices as the team scales</li>\n</ul>\n<p>You May Be a Good Fit If You</p>\n<ul>\n<li>Bachelor’s degree in Engineering, Computer Science, a related field, or equivalent practical experience</li>\n<li>5-7+ years of experience with data pipelines or data engineering workflows</li>\n<li>Background in solutions engineering, partner engineering or related role at a large tech company</li>\n<li>5+ years of experience in technical troubleshooting or writing code in one or more programming languages</li>\n<li>Proficiency in Python and SQL, including writing, debugging, and optimizing scripts and queries against large datasets</li>\n<li>Hands-on experience with cloud infrastructure (AWS, GCP, or Azure), including managing storage, configuring access, and working from the CLI</li>\n<li>Excellent problem-solving skills with a track record of debugging technical issues, whether at the code level or within a broader system</li>\n<li>Some experience interacting with external third parties delivering data</li>\n</ul>\n<p>Strong Candidates Will Have</p>\n<ul>\n<li>Experience working alongside technical teams (research, engineering, or product) to solve ambiguous problems</li>\n<li>Ability to translate technical concepts into clear, actionable guidance for non-technical stakeholders or external partners</li>\n<li>Experience owning or maintaining a production service or system with uptime expectations</li>\n<li>Familiarity with data governance, compliance, or rights management</li>\n<li>Ability to manage multiple, time-sensitive projects simultaneously and the drive to take a project from an initial idea to full completion</li>\n<li>Experience leveraging AI to automate workflows</li>\n</ul>\n<p>Candidates Need Not Have</p>\n<ul>\n<li>Deep expertise in AI or machine learning</li>\n<li>A pure software engineering background</li>\n</ul>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_af586166-0a0","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Anthropic","sameAs":"https://www.anthropic.com/","logo":"https://logos.yubhub.co/anthropic.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/anthropic/jobs/5056499008","x-work-arrangement":"hybrid","x-experience-level":"mid","x-job-type":"full-time","x-salary-range":"$205,000-$240,000 USD","x-skills-required":["Python","SQL","Cloud infrastructure (AWS, GCP, or Azure)","Data pipelines","Data engineering workflows","Solutions engineering","Partner engineering"],"x-skills-preferred":[],"datePosted":"2026-04-18T15:57:08.396Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"San Francisco, CA | New York City, NY"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"Python, SQL, Cloud infrastructure (AWS, GCP, or Azure), Data pipelines, Data engineering workflows, Solutions engineering, Partner engineering","baseSalary":{"@type":"MonetaryAmount","currency":"USD","value":{"@type":"QuantitativeValue","minValue":205000,"maxValue":240000,"unitText":"YEAR"}}},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_bfddfcc3-e38"},"title":"Senior Software Engineer, Public Sector","description":"<p>As a Senior Software Engineer, you will lead the development of a vertical feature or a horizontal capability to include defining requirements with stakeholders and implementation until it is accepted by the stakeholders.</p>\n<p>You will:</p>\n<p>Lead the design and implementation of scalable backend systems and distributed architectures for Federal customers. Manage the full lifecycle of feature development from requirement definition to deployment on classified networks. Direct the orchestration of asynchronous agent fleets to meet mission requirements. Lead customer engagements to translate mission needs into technical requirements. Own the communication with stakeholders to ensure implementation meets defined acceptance criteria. Conduct technical reviews and identify risks within machine learning infrastructure and model serving. Drive the platform roadmap by providing technical specifications for Federal product offerings.</p>\n<p>Ideally you will have:</p>\n<p>Full Stack Development: Proficiency in front-end, back-end development and infrastructure, including experience with modern web development frameworks, programming languages, and databases Cloud-Native Technologies: Familiarity with cloud platforms (e.g., AWS, Azure, GCP) and experience in developing and deploying applications in a cloud-native environment. Understanding of containerization (e.g., Docker) and container orchestration (e.g., Kubernetes) is a plus Data Engineering: Knowledge of ETL (Extract, Transform, Load) processes and experience in building data pipelines to integrate and process diverse data sources. Understanding of data modeling, data warehousing, and data governance principles AI Application Integration: Familiarity with integrating Large Language Models (LLMs) and building agentic workflows. Understanding of prompt engineering, retrieval-augmented generation (RAG), and agent orchestration is beneficial. Problem Solving: Strong analytical and problem-solving skills to understand complex challenges and devise effective solutions. Ability to think critically, identify root causes, and propose innovative approaches to overcome technical obstacles Collaboration and Communication: Excellent interpersonal and communication skills to effectively collaborate with cross-functional teams, stakeholders, and customers. Ability to clearly articulate technical concepts to non-technical audiences and foster a collaborative work environment Adaptability and Learning Agility: Willingness to embrace new technologies, learn new skills, and adapt to defining and evolving project requirements. Ability to quickly grasp and apply new concepts and stay up-to-date with emerging trends in software engineering</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_bfddfcc3-e38","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Scale","sameAs":"https://www.scale.com/","logo":"https://logos.yubhub.co/scale.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/scaleai/jobs/4674911005","x-work-arrangement":"onsite","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":"$216,000-$311,000 USD (San Francisco, New York, Seattle) $194,400-$279,000 USD (Hawaii, Washington DC, Texas, Colorado) $162,400-$233,000 USD (St. Louis)","x-skills-required":["Full Stack Development","Cloud-Native Technologies","Data Engineering","AI Application Integration","Problem Solving","Collaboration and Communication","Adaptability and Learning Agility"],"x-skills-preferred":["Docker","Kubernetes","AWS","Azure","GCP","ETL","data modeling","data warehousing","data governance","Large Language Models","prompt engineering","retrieval-augmented generation","agent orchestration"],"datePosted":"2026-04-18T15:57:07.621Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"San Francisco, CA; St. Louis, MO; New York, NY; Washington, DC"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"Full Stack Development, Cloud-Native Technologies, Data Engineering, AI Application Integration, Problem Solving, Collaboration and Communication, Adaptability and Learning Agility, Docker, Kubernetes, AWS, Azure, GCP, ETL, data modeling, data warehousing, data governance, Large Language Models, prompt engineering, retrieval-augmented generation, agent orchestration","baseSalary":{"@type":"MonetaryAmount","currency":"USD","value":{"@type":"QuantitativeValue","minValue":162400,"maxValue":311000,"unitText":"YEAR"}}},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_35458586-a42"},"title":"Enterprise Architect, Finance & Legal Systems","description":"<p>We are seeking an experienced Enterprise Architect to join our Technology, Data and Intelligence team. As an Enterprise Architect, you will be responsible for defining and delivering the technology architecture strategy across Finance and Legal functions, enabling data-driven decision-making, automation, and operational excellence.</p>\n<p>Key responsibilities will include:</p>\n<ul>\n<li>Defining the target-state architecture for Finance and Legal applications, ensuring alignment with enterprise strategy and growth objectives.</li>\n<li>Leading the design and implementation of end-to-end architectural solutions for Finance and Legal systems, ensuring integration, scalability, and performance across the enterprise.</li>\n<li>Developing and maintaining a multi-year roadmap for modernization across ERP, FP&amp;A, Legal, and Sales Compensation systems.</li>\n<li>Ensuring systems are designed with identity-first security principles, integrating with Okta and other IAM solutions for authentication, authorization, and compliance.</li>\n</ul>\n<p>The ideal candidate will have:</p>\n<ul>\n<li>15+ years of software engineering experience, including significant time as an Architect or Principal in ERP Systems (Oracle/Netsuite/SAP), FP&amp;A Systems (Anaplan) and/or CLM systems (Aptus/Conga/Ironclad).</li>\n<li>Excellent storytelling and communication skills,comfortable presenting to both technical and executive stakeholders.</li>\n<li>Multiple ERP (Oracle or Netsuite) full cycle implementation experience.</li>\n<li>Deep understanding of the Finance business process areas – Order to Cash, Record to Report, Source to Pay, Plan to Report (FP&amp;A), Treasury, Credit Collection, Revenue Recognition, and Subscription Billing, Contract Life Cycle Mgmt within Legal Ops.</li>\n<li>Demonstrated hands-on experience architecting functional and technical solutions within major business applications, with specific expertise in NetSuite (or Oracle), Aptus/Conga (or IronClad), Anaplan, Coupa, Scout, Tax engines such as Avalara, Vertex or OneSource – including understanding their data models and APIs in context of solution development and integrations.</li>\n<li>Architected and delivered AI Agents using leading LLMs Gemini, OpenAI or Claude.</li>\n<li>Experience with managing a Software and/or Vendor selection keeping in view the end state architecture of the enterprise.</li>\n<li>Proficient understanding of middlewares such as MuleSoft, Workato, Boomi, or Informatica for connecting Finance, Legal, CRM, and data platforms.</li>\n<li>Familiar with code, configuration, and system performance standards/reviews to ensure quality, scalability, and compliance with enterprise standards.</li>\n<li>Proficiency with AWS, Azure, or GCP, with knowledge of data lakes/warehouses (Snowflake, Redshift, BigQuery) for SaaS revenue and compliance analytics.</li>\n<li>Identity &amp; Security: knowledge of SSO, OAuth, SAML, SCIM, and Zero Trust principles, with hands-on integration experience in Okta or similar IAM platforms.</li>\n</ul>\n<p>In addition to the above skills and experience, the ideal candidate will be passionate about innovation, AI adoption, and continuous improvement aligned with Okta’s mission to build secure, intelligent, and connected business systems.</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_35458586-a42","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Okta","sameAs":"https://www.okta.com/","logo":"https://logos.yubhub.co/okta.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/okta/jobs/7442186","x-work-arrangement":"hybrid","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":"$150,000 - $250,000 per year","x-skills-required":["Enterprise Architecture","Cloud Computing","Identity and Access Management","Security","Data Analytics","Machine Learning","Artificial Intelligence","Software Development","DevOps","Agile Methodologies"],"x-skills-preferred":["AWS","Azure","GCP","Snowflake","Redshift","BigQuery","MuleSoft","Workato","Boomi","Informatica"],"datePosted":"2026-04-18T15:56:59.822Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"San Francisco, California"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"Enterprise Architecture, Cloud Computing, Identity and Access Management, Security, Data Analytics, Machine Learning, Artificial Intelligence, Software Development, DevOps, Agile Methodologies, AWS, Azure, GCP, Snowflake, Redshift, BigQuery, MuleSoft, Workato, Boomi, Informatica","baseSalary":{"@type":"MonetaryAmount","currency":"USD","value":{"@type":"QuantitativeValue","minValue":150000,"maxValue":250000,"unitText":"YEAR"}}},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_ed4bd662-c67"},"title":"Senior Solutions Architect, Commercial - San Francisco","description":"<p>We are looking for a Senior Solutions Architect to support our Commercial Sales team in a consumption-based business where customer success drives revenue growth. You&#39;ll work across the full sales cycle, from initial technical evaluations with new prospects through helping existing customers expand their use of Temporal in production.</p>\n<p>The nature of our business means you&#39;ll spend significant time helping customers who&#39;ve already adopted Temporal unlock more value by expanding into additional use cases, teams, and workloads. This is a high-velocity, technically deep role.</p>\n<p>You&#39;ll partner with developers, architects, and engineering leaders at fast-moving companies to help them understand how Temporal fits into their existing architecture and prove out value through hands-on technical work.</p>\n<p>You&#39;ll be working in a consumption model where usage grows over time, which means building strong technical relationships and staying engaged with accounts as they scale.</p>\n<p>As an early member of a growing team, you should be comfortable with ambiguity, frequent context switching, and creating leverage through reusable assets that help the broader team move faster.</p>\n<p>Must reside in San Francisco, CA</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_ed4bd662-c67","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Temporal","sameAs":"https://temporal.io/","logo":"https://logos.yubhub.co/temporal.io.png"},"x-apply-url":"https://job-boards.greenhouse.io/temporaltechnologies/jobs/5037692007","x-work-arrangement":"remote","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":"$200,000 - $250,000 OTE","x-skills-required":["Strong development background with hands-on coding experience in at least one modern language (Go, Java, TypeScript, or Python)","Deep understanding of distributed systems (reliability, observability, and fault tolerance)","Proven experience in a pre-sales, customer-facing engineering, or solutions architecture role working with technical buyers","Exceptional time management and prioritization skills with the ability to thrive in high-volume environments","Enthusiasm for AI/ML technologies and eagerness to learn about emerging use cases in agentic workflows and LLM orchestration"],"x-skills-preferred":["Experience with workflow engines, event-driven architectures, or orchestration technologies (Temporal, Cadence, or similar)","Background articulating the value of commercial SaaS offerings that compete with open source alternatives (Redis, Kafka, Databricks, etc.)","Contributions to developer tooling, open source projects, or technical content","Strong cross-functional collaboration skills with the ability to serve as a technical bridge between customers and internal teams","Certifications with any of the major cloud providers (AWS, GCP, or Azure) or foundational AI model providers (OpenAI, Anthropic, or Google)"],"datePosted":"2026-04-18T15:56:33.427Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"United States - Remote Opportunity"}},"jobLocationType":"TELECOMMUTE","employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"Strong development background with hands-on coding experience in at least one modern language (Go, Java, TypeScript, or Python), Deep understanding of distributed systems (reliability, observability, and fault tolerance), Proven experience in a pre-sales, customer-facing engineering, or solutions architecture role working with technical buyers, Exceptional time management and prioritization skills with the ability to thrive in high-volume environments, Enthusiasm for AI/ML technologies and eagerness to learn about emerging use cases in agentic workflows and LLM orchestration, Experience with workflow engines, event-driven architectures, or orchestration technologies (Temporal, Cadence, or similar), Background articulating the value of commercial SaaS offerings that compete with open source alternatives (Redis, Kafka, Databricks, etc.), Contributions to developer tooling, open source projects, or technical content, Strong cross-functional collaboration skills with the ability to serve as a technical bridge between customers and internal teams, Certifications with any of the major cloud providers (AWS, GCP, or Azure) or foundational AI model providers (OpenAI, Anthropic, or Google)","baseSalary":{"@type":"MonetaryAmount","currency":"USD","value":{"@type":"QuantitativeValue","minValue":200000,"maxValue":250000,"unitText":"YEAR"}}},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_aeba45bc-3e4"},"title":"Senior Solutions Engineer","description":"<p>About Mixpanel</p>\n<p>Mixpanel turns data clarity into innovation. Trusted by more than 29,000 companies, including Workday, Pinterest, LG, and Rakuten Viber, Mixpanel’s AI-first digital analytics help teams accelerate adoption, improve retention, and ship with confidence.</p>\n<p>Powering this is an industry-leading platform that combines product and web analytics, session replay, experimentation, feature flags, and metric trees. Mixpanel delivers insights that customers trust.</p>\n<p>Visit mixpanel.com to learn more.</p>\n<p>About the Customer Success &amp; Solutions Engineering Team</p>\n<p>Mixpanel’s Customer Success &amp; Solutions Engineering teams are analytics consultants who embed themselves within our enterprise customer teams to drive our customer’s business outcomes. We work with prospects and customers throughout the customer journey to understand what drives value and serve as the technical counterpart to our Sales organization to deliver on that value.</p>\n<p>You will partner closely with Account Executives, Account Managers, Product, Engineering, and Support to successfully roll out self-serve analytics within our customer’s organizations, help the customer manage change, execute on technical projects and services that delight our customers and ultimately drive ROI on the customer’s Mixpanel investment.</p>\n<p>About the Role</p>\n<p>Our SEs are inquisitive, nimble, and able to clearly articulate the technical benefits and requirements of Mixpanel to developers and product managers, while also communicating the business value of our product to high-level executives. In your first month, you’ll become a Mixpanel expert,both in features and functionality as well as implementation. You’ll have the opportunity to shadow customer calls and demos with current Sales Engineers and Account Executives while learning to articulate our value proposition. You’ll also be trained on Mixpanel’s internal systems and tools to set you up for success.</p>\n<p>Within your first three months, you’ll be directly involved in deal cycles with Commercial Account Executives. You’ll lead the technical qualification for customer use cases and deliver customized demos for prospects. You’ll work directly with leadership at the prospect’s organization to understand business challenges that can be solved through an analytics platform and consult on how Mixpanel can address those challenges to achieve a strong ROI. You’ll also work with the prospect’s business and technical teams to scope and execute proof-of-concept projects to establish Mixpanel’s value,including consulting on data ingestion methods, overall architecture, success criteria, and rollout strategies for analytics tools across an organization.</p>\n<p>Responsibilities</p>\n<p>Serve as a trusted technical advisor for prospects, providing strategic consultation on data architecture, governance, instrumentation, and business outcomes.</p>\n<p>Communicate and consult effectively at all levels of the customer’s organization to earn trust and influence buying decisions.</p>\n<p>Bridge the technical-business gap,working with senior stakeholders to define success for proof-of-concepts and ensuring successful execution and outcomes.</p>\n<p>Leverage your Mixpanel expertise and technical/consultative skills to impart best practices throughout proof-of-concept projects.</p>\n<p>Partner with Account Executives to drive revenue growth, serving as the key technical contact for customers.</p>\n<p>Partner with post-sales teams to ensure that pre-sales value propositions translate into tangible post-sales results.</p>\n<p>Develop relationships and uncover the needs of key technical stakeholders within your assigned book of business.</p>\n<p>Be the “Voice of the Prospect” by collecting feedback from potential Mixpanel customers and sharing it with the Product team.</p>\n<p>We&#39;re Looking For Someone Who Has</p>\n<p>The ability to communicate with stakeholders at all levels,from discussing APIs with developers to organizational efficiency with CIOs.</p>\n<p>A demonstrated track record of qualifying and selling technical solutions to executive stakeholders.</p>\n<p>6+ years of experience in a Software-as-a-Service Sales Engineering or related role.</p>\n<p>Experience in data querying, modeling, and transformation using tools such as SQL, dbt, Python, Business Intelligence platforms, or Product Analytics tools.</p>\n<p>Familiarity with databases and cloud data warehouses (e.g., Google Cloud, Amazon Redshift, Microsoft Azure, Snowflake, Databricks).</p>\n<p>A successful record of experience in sales engineering, customer success, client-facing professional services, consulting, or technical project management.</p>\n<p>Excellent written, analytical, communication, and presentation skills.</p>\n<p>Strong process and project delivery discipline.</p>\n<p>The ability to travel.</p>\n<p>Fluency in multiple languages; German preferred.</p>\n<p>Benefits and Perks</p>\n<p>Comprehensive Medical, Vision, and Dental Care</p>\n<p>Mental Wellness Benefit</p>\n<p>Generous Vacation Policy &amp; Additional Company Holidays</p>\n<p>Enhanced Parental Leave</p>\n<p>Volunteer Time Off</p>\n<p>Additional US Benefits: Pre-Tax Benefits including 401(K), Wellness Benefit, Holiday Break</p>\n<p>Culture Values</p>\n<p>Make Bold Bets: We choose courageous action over comfortable progress.</p>\n<p>Innovate with Insight: We tackle decisions with rigor and judgment - combining data, experience and collective wisdom to drive powerful outcomes.</p>\n<p>One Team: We collaborate across boundaries to achieve far greater impact than any of us could accomplish alone.</p>\n<p>Candor with Connection: We build meaningful relationships that enable honest feedback and direct conversations.</p>\n<p>Champion the Customer: We seek to deeply understand our customers’ needs, ensuring their success is our north star.</p>\n<p>Why choose Mixpanel?</p>\n<p>We’re a leader in analytics with over 9,000 customers and $277M raised from prominent investors: like Andreessen-Horowitz, Sequoia, YC, and, most recently, Bain Capital.</p>\n<p>Mixpanel’s pioneering event-based data analytics platform offers a powerful yet simple solution for companies to understand user behaviors and easily track overarching company success metrics.</p>\n<p>Our accomplished teams continuously facilitate our expansion by tackling the ever-evolving challenges tied to scaling, reliability, design, and service.</p>\n<p>Choosing to work at Mixpanel means you’ll be helping the world’s most innovative companies learn from their data so they can make better decisions.</p>\n<p>Mixpanel is an equal opportunity employer supporting workforce diversity.</p>\n<p>At Mixpanel, we are focused on things that really matter,our people, our customers, our partners,out of a recognition that those relationships are the most valuable assets we have.</p>\n<p>We actively encourage women, people with disabilities, veterans, underrepresented minorities, and LGBTQ+ people to apply.</p>\n<p>We do not discriminate on the basis of race, religion, color, national origin, gender, gender identity or expression, sexual orientation, age, marital status, veteran status, or disability status.</p>\n<p>Pursuant to the San Francisco Fair Chance Ordinance or other similar laws that may be applicable, we will consider for employment qualified applicants with arrest and conviction records.</p>\n<p>We’ve immersed ourselves in our Culture and Values as our guiding principles for the impact we want to have and the future we are building.</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_aeba45bc-3e4","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Mixpanel","sameAs":"https://mixpanel.com","logo":"https://logos.yubhub.co/mixpanel.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/mixpanel/jobs/7407407","x-work-arrangement":"hybrid","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":null,"x-skills-required":["SQL","dbt","Python","Business Intelligence platforms","Product Analytics tools","Databases","Cloud data warehouses","Google Cloud","Amazon Redshift","Microsoft Azure","Snowflake","Databricks"],"x-skills-preferred":[],"datePosted":"2026-04-18T15:56:33.243Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"London, UK (Hybrid)"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"SQL, dbt, Python, Business Intelligence platforms, Product Analytics tools, Databases, Cloud data warehouses, Google Cloud, Amazon Redshift, Microsoft Azure, Snowflake, Databricks"},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_c20bf3f6-ff0"},"title":"Senior Developer Support Engineer","description":"<p>We are looking for a Senior Developer Support Engineer to join our team. As a Senior Developer Support Engineer, you will be responsible for supporting and maintaining customers who have implemented the Customer Identity SaaS solution, resolving technical and non-technical customer issues in a timely fashion.</p>\n<p>Key responsibilities include:</p>\n<ul>\n<li>Operational management of Support tickets</li>\n<li>Building and maintaining excellent relationships with clients and achieving the highest level of customer satisfaction</li>\n<li>Taking end-to-end ownership of customer issues, including initial troubleshooting, identification of root cause and issue resolution</li>\n<li>Exceeding customer expectations on response quality, timeliness of responses and overall customer experience</li>\n<li>Serving as internal and external point of contact on customer issues and ensuring they are resolved as expediently as possible</li>\n</ul>\n<p>Requirements include:</p>\n<ul>\n<li>5 years+ of technical support and/or software development OR 3 years+ of solid experience in a business or technical analyst role for medium to large scale business software implementation projects</li>\n<li>Strong analytical and problem-solving skills</li>\n<li>Self-starter , able to come up to speed on complex, difficult concepts with minimal assistance</li>\n<li>Ability to quickly context-switch between multiple complex work streams</li>\n<li>Instinctive ability to subdivide problems into basic components in order to efficiently pinpoint the root cause of issues</li>\n<li>Customer-obsessed attitude , a customer advocate, always going the extra mile</li>\n<li>Team player with solid communication and presentation skills</li>\n<li>Proactivity , identify opportunities and take preemptive action against potential problems</li>\n<li>Continuous growth , permanently look for areas of improvement, make plans on how to improve them, and execute those plans</li>\n</ul>\n<p>Technical Domain Focus includes:</p>\n<ul>\n<li>Knowledge of software development fundamentals and common architectures</li>\n<li>Knowledge of HTTP, encryption, basic security concepts</li>\n<li>Understanding of authentication and authorization concepts</li>\n<li>Knowledge of one or more auth protocols/specifications: Oauth2, OIDC, SAML, WS-FED, LDAP, Azure AD, etc.</li>\n<li>Proficient in at least one programming language; ideally JavaScript</li>\n</ul>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_c20bf3f6-ff0","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Okta","sameAs":"https://www.okta.com/","logo":"https://logos.yubhub.co/okta.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/okta/jobs/7770733","x-work-arrangement":"onsite","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":null,"x-skills-required":["software development","technical support","business or technical analyst","HTTP","encryption","security concepts","authentication","authorization","Oauth2","OIDC","SAML","WS-FED","LDAP","Azure AD","JavaScript"],"x-skills-preferred":[],"datePosted":"2026-04-18T15:56:32.011Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Bengaluru, India"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"software development, technical support, business or technical analyst, HTTP, encryption, security concepts, authentication, authorization, Oauth2, OIDC, SAML, WS-FED, LDAP, Azure AD, JavaScript"},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_20fef61c-c3c"},"title":"Partner Solutions Engineer, UK&I","description":"<p>About Us</p>\n<p>At Cloudflare, we are on a mission to help build a better Internet. Today the company runs one of the world’s largest networks that powers millions of websites and other Internet properties for customers ranging from individual bloggers to SMBs to Fortune 500 companies.</p>\n<p>We protect and accelerate any Internet application online without adding hardware, installing software, or changing a line of code. Internet properties powered by Cloudflare all have web traffic routed through its intelligent global network, which gets smarter with every request. As a result, they see significant improvement in performance and a decrease in spam and other attacks.</p>\n<p>Cloudflare was named to Entrepreneur Magazine’s Top Company Cultures list and ranked among the World’s Most Innovative Companies by Fast Company.</p>\n<p>Our culture is built on iteration, leveraging AI to ship faster today to make it better tomorrow, while ensuring that every improvement, no matter how small, is shared across the team to lift everyone up.</p>\n<p>If you’re the type of person who values curiosity over bureaucracy, and that AI is a partner in solving tough problems to keep the Internet moving forward, you’ll fit right in.</p>\n<p>Available Locations: London</p>\n<p>About Solutions Engineering at Cloudflare</p>\n<p>The Pre-Sales Solution Engineering organization owns the technical sale of the Cloudflare solution portfolio, ensuring maximal business value, fit-for-purpose solution design and adoption roadmap for our customers. Solutions Engineering is made up of individuals from a wide range of backgrounds - from Financial Consulting to Product Management, Customer Support to Software Engineering, and we are serious about building a diverse, experienced and curious team.</p>\n<p>The Partner Solutions Engineer is an experienced PreSales role within the Solutions Engineering team. Partner Solutions Engineers work closely with our partners to educate, empower, and ensure their success delivering Cloudflare security, reliability and performance solutions.</p>\n<p>What you&#39;ll do as a Partner Solutions Engineer</p>\n<p>Your role will be to build passionate champions within the technology ranks at your Partner accounts, aid your Partner organizations to drive sales for identified opportunities, and collaborate with your technical champions to build revenue pipeline. As the technical partner advocate within Cloudflare, you will work closely with every team at Cloudflare, from Sales and Product, through to Engineering and Customer Support.</p>\n<p>You have strong experience in large Pre-Sales partner and account management as well as excellent verbal and written communications skills in English, suited for both technical and executive-level engagement. You are comfortable speaking about the Cloudflare vision and mission with all technical and non-technical audiences. Ultimately, you are passionate about technology and have the ability to explain complex technical concepts in easy-to-understand terms.</p>\n<p>You are naturally curious, and an avid builder who is not afraid to get your hands dirty. You appreciate the diversity of challenges in working with partners and customers, and look forward to helping them realize the full promise of Cloudflare.</p>\n<p>On the Solutions Engineering team, you will find a collaborative environment where everyone brings different strengths and jumps in to help each other. Specifically, we are looking for you to:</p>\n<ul>\n<li>Build and maintain long term technical relationships with our EMEA partners to increase Cloudflare’s reputation and authority within the partner solution portfolio through demonstrating value, enablement, and uncovering new areas of potential revenue</li>\n</ul>\n<ul>\n<li>Drive technical solution design conversations and guide partners in EMEA through use case qualification and collaborative technical wins through demonstrations and proofs-of-concepts</li>\n</ul>\n<ul>\n<li>Evangelize and represent Cloudflare through technical thought leadership and expertise</li>\n</ul>\n<ul>\n<li>Be the voice of the partner internally at Cloudflare, engaging with and influencing Cloudflare’s Product and Engineering teams to meet your partner and customer needs</li>\n</ul>\n<p>Travel up to 40% throughout the quarter to support partner engagements, attend conferences and industry events, and to collaborate with your Cloudflare teammates</p>\n<p>Examples of desirable skills, knowledge and experience:</p>\n<ul>\n<li>Fluency in English (verbal and written)</li>\n</ul>\n<ul>\n<li>Experience managing technical sales within large partners and accounts:</li>\n</ul>\n<ul>\n<li>Developing champion-style relationships</li>\n</ul>\n<ul>\n<li>Driving technical wins</li>\n</ul>\n<ul>\n<li>Assisting with technical validation</li>\n</ul>\n<ul>\n<li>Experience and expertise in one or more of the core industry components of Cloudflare solutions:</li>\n</ul>\n<ul>\n<li>SASE concepts and Zero Trust Networking architectures</li>\n</ul>\n<ul>\n<li>Networking technologies including TCP, UDP, DNS, IPv4 + IPv6, BGP routing, GRE, SD-WAN, MPLS, Global Traffic Management</li>\n</ul>\n<ul>\n<li>Internet security technologies including DDoS and DDoS mitigation, Firewalls, TLS, VPN, DLP</li>\n</ul>\n<ul>\n<li>Detailed understanding of workflow from user to application including hybrid architectures with Azure, AWS, GCP</li>\n</ul>\n<ul>\n<li>HTTP technologies including reverse proxy (e.g., WAF and CDN), forward proxy (secure web gateway), serverless application development</li>\n</ul>\n<p>What Makes Cloudflare Special?</p>\n<p>We’re not just a highly ambitious, large-scale technology company. We’re a highly ambitious, large-scale technology company with a soul. Fundamental to our mission to help build a better Internet is protecting the free and open Internet.</p>\n<p>Project Galileo: Since 2014, we&#39;ve equipped more than 2,400 journalism and civil society organizations in 111 countries with powerful tools to defend themselves against attacks that would otherwise censor their work, technology already used by Cloudflare’s enterprise customers--at no cost.</p>\n<p>Athenian Project: In 2017, we created the Athenian Project to ensure that state and local governments have the highest level of protection and reliability for free, so that their constituents have access to election information and voter registration. Since the project, we&#39;ve provided services to more than 425 local government election websites in 33 states.</p>\n<p>1.1.1.1: We released 1.1.1.1 to help fix the foundation of the Internet by building a faster, more secure and privacy-centric public DNS resolver. This is available publicly for everyone to use - it is the first consumer-focused service Cloudflare has ever released.</p>\n<p>Here’s the deal - we don’t store client IP addresses never, ever. We will continue to abide by our privacy commitment and ensure that no user data is sold to advertisers or used to target consumers.</p>\n<p>Sound like something you’d like to be a part of? We’d love to hear from you!</p>\n<p>This position may require access to information protected under U.S. export control laws, including the U.S. Export Administration Regulations. Please note that any offer of employment may be conditioned on your authorization to receive software or technology controlled under these U.S. export laws without sponsorship for an export license.</p>\n<p>Cloudflare is proud to be an equal opportunity employer. We are committed to providing equal employment opportunity for all people and place great value in both diversity and inclusiveness. All qualified applicants will be considered for employment without regard to their, or any other person&#39;s, perceived or actual race, color, religion, sex, gender, gender identity, gender expression, sexual orientation, national origin, ancestry, citizenship, age, physical or mental disability, medical condition, family care status, or any other basis protected by law.</p>\n<p>We are an AA/Veterans</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_20fef61c-c3c","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Cloudflare","sameAs":"https://www.cloudflare.com/","logo":"https://logos.yubhub.co/cloudflare.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/cloudflare/jobs/7210482","x-work-arrangement":"hybrid","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":null,"x-skills-required":["Fluency in English (verbal and written)","Experience managing technical sales within large partners and accounts","Developing champion-style relationships","Driving technical wins","Assisting with technical validation","SASE concepts and Zero Trust Networking architectures","Networking technologies including TCP, UDP, DNS, IPv4 + IPv6, BGP routing, GRE, SD-WAN, MPLS, Global Traffic Management","Internet security technologies including DDoS and DDoS mitigation, Firewalls, TLS, VPN, DLP","Detailed understanding of workflow from user to application including hybrid architectures with Azure, AWS, GCP","HTTP technologies including reverse proxy (e.g., WAF and CDN), forward proxy (secure web gateway), serverless application development"],"x-skills-preferred":[],"datePosted":"2026-04-18T15:56:31.368Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Hybrid; In-Office"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"Fluency in English (verbal and written), Experience managing technical sales within large partners and accounts, Developing champion-style relationships, Driving technical wins, Assisting with technical validation, SASE concepts and Zero Trust Networking architectures, Networking technologies including TCP, UDP, DNS, IPv4 + IPv6, BGP routing, GRE, SD-WAN, MPLS, Global Traffic Management, Internet security technologies including DDoS and DDoS mitigation, Firewalls, TLS, VPN, DLP, Detailed understanding of workflow from user to application including hybrid architectures with Azure, AWS, GCP, HTTP technologies including reverse proxy (e.g., WAF and CDN), forward proxy (secure web gateway), serverless application development"},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_07626e74-020"},"title":"Engineering Architect, Identity (Auth0)","description":"<p>Secure Every Identity, from AI to Human</p>\n<p>Identity is the key to unlocking the potential of AI. Auth0 secures AI by building the trusted, neutral infrastructure that enables organisations to safely embrace this new era. This work requires a relentless drive to solve complex challenges with real-world stakes. We are looking for builders and owners who operate with speed and urgency and execute with excellence.</p>\n<p>This is an opportunity to do career-defining work. We&#39;re all in on this mission. If you are too, let&#39;s talk.</p>\n<p><strong>Software Architect, Identity</strong></p>\n<p><strong>The Engineering Architect Team</strong></p>\n<p>The Architecture team is a small group of very senior engineers reporting to our VP of Engineering Excellence, working broadly across the organisation in collaboration with Engineering, Product, and Security. We partner deeply with other Engineering teams for large projects, and provide direction and architectural guidance for smaller initiatives. We have a dual-pronged charter to “level up the tech stack and level up the people stack” via both technical contributions and partnerships/mentoring.</p>\n<p>In this role, you will have the opportunity to significantly contribute to Auth0’s future technology direction. Through your experience, knowledge of industry trends, and technical abilities you will provide guidance, build proof of concepts, and deliver production software implementations that help Auth0 Engineering teams move faster by using and developing standard patterns and technologies. You will also help advance the engineering culture and help uplevel other engineers. Note that while this role involves a lot of guidance, documentation, and leadership, it also requires substantial hands-on coding and development of both applications and systems.</p>\n<p><strong>What you’ll be doing</strong></p>\n<ul>\n<li>Collaborate with Product, Security, and Engineering teams to define and continually improve Auth0’s technology stack and architecture.</li>\n</ul>\n<ul>\n<li>Foster and lead innovation in the IAM space, with a strong focus on Agentic Identity</li>\n</ul>\n<ul>\n<li>Lead initiatives to enhance, scale, and evolve Auth0’s product offerings.</li>\n</ul>\n<ul>\n<li>Embed within Engineering teams across the organisation for large projects, while providing guidance and lighter touch engagements for smaller initiatives.</li>\n</ul>\n<ul>\n<li>Design, architect, and document large scale distributed systems.</li>\n</ul>\n<ul>\n<li>Lead the development of complex, broadly-scoped functionality in a very large and deep set of services and components.</li>\n</ul>\n<ul>\n<li>Teach by doing: coding, optimising, and troubleshooting Node.js and Go applications in collaboration with feature development teams.</li>\n</ul>\n<ul>\n<li>Implement features and create consistent foundations using technologies such as AWS, Azure, Node.js, Go, MongoDB, Redis, PostgreSQL, Kubernetes.</li>\n</ul>\n<ul>\n<li>Investigate, understand, and resolve bottlenecks in our ability to scale, use resources efficiently, and maintain a 99.99% uptime SLA.</li>\n</ul>\n<ul>\n<li>Drive technical decision making while striving to find the right balance between factors such as simplicity, flexibility, reliability, cost, and performance.</li>\n</ul>\n<ul>\n<li>Participate in “round table” discussions and mentor team members and engineers throughout the organisation to level up our people.</li>\n</ul>\n<ul>\n<li>Participate in our Engineering Leadership Team with other architects, directors, and executives.</li>\n</ul>\n<ul>\n<li>You will join our Incident Commander on-call rotation. Members of our team do periodic on-call rotation for high-severity incidents to help up-level our responses After spending time getting acquainted with our applications, systems, and processes, and getting training to</li>\n</ul>\n<p><strong>What you’ll bring to the role</strong></p>\n<ul>\n<li>10+ years of software development experience.</li>\n</ul>\n<ul>\n<li>5+ years of experience working on cloud applications.</li>\n</ul>\n<ul>\n<li>Experience with API-first applications using REST and/or gRPC</li>\n</ul>\n<ul>\n<li>Passion and thorough understanding of what it takes to build and operate secure, reliable systems at scale.</li>\n</ul>\n<ul>\n<li>Knowledge of Identity Protocols such as OAuth, OIDC and SAML.</li>\n</ul>\n<ul>\n<li>Industry knowledge of the Authorization and Authentication spaces.</li>\n</ul>\n<ul>\n<li>Experience in building AI Agents, and/or MCP servers applications.</li>\n</ul>\n<ul>\n<li>Experience with security engineering and application security.</li>\n</ul>\n<ul>\n<li>Very strong written and verbal communication skills with a demonstrated ability to adjust your communication style to the intended audience, whether communicating with senior executives, customers, engineers, or product managers.</li>\n</ul>\n<ul>\n<li>Mastery and deep understanding of hands-on software development building distributed systems.</li>\n</ul>\n<ul>\n<li>Experience with multi-cloud environments and container deployments, particularly Kubernetes in AWS/Azure.</li>\n</ul>\n<ul>\n<li>Prior experience with application performance management, tracing, and performance testing tools.</li>\n</ul>\n<ul>\n<li>Excellence at creating clarity and alignment for technical initiatives.</li>\n</ul>\n<ul>\n<li>Great ability to build trust through collaboration with multiple teams and get consensus on a vision.</li>\n</ul>\n<ul>\n<li>Knowledge of application security and cloud security best practices.</li>\n</ul>\n<p>And extra credit if you have experience in any of the following!</p>\n<ul>\n<li>Deep experience in Node.js (Javascript or Typescript), or Golang.</li>\n</ul>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_07626e74-020","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Auth0","sameAs":"https://auth0.com/","logo":"https://logos.yubhub.co/auth0.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/okta/jobs/7128746","x-work-arrangement":"hybrid","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":"$274,000-$370,000 USD","x-skills-required":["API-first applications","REST","gRPC","OAuth","OIDC","SAML","Authorization","Authentication","AI Agents","MCP servers","Security engineering","Application security","Cloud security best practices","Node.js","Go","AWS","Azure","MongoDB","Redis","PostgreSQL","Kubernetes"],"x-skills-preferred":[],"datePosted":"2026-04-18T15:56:28.589Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"San Francisco, California"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"API-first applications, REST, gRPC, OAuth, OIDC, SAML, Authorization, Authentication, AI Agents, MCP servers, Security engineering, Application security, Cloud security best practices, Node.js, Go, AWS, Azure, MongoDB, Redis, PostgreSQL, Kubernetes","baseSalary":{"@type":"MonetaryAmount","currency":"USD","value":{"@type":"QuantitativeValue","minValue":274000,"maxValue":370000,"unitText":"YEAR"}}},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_90423d85-ea7"},"title":"Senior Software Engineer - Fullstack","description":"<p>As a Full Stack software engineer, you will work with your team and product management to make insights from data simple. We are looking for engineers that are customer obsessed, who can take on the full scope of the product and user experience beyond the technical implementation. You&#39;ll set the foundation for how we build robust, scalable and delightful products.</p>\n<p>Some example experiences you&#39;ll create for our customers to achieve the full project lifecycle from loading data, visualizing results, creating statistical models, and deploying as production artifacts include:</p>\n<ul>\n<li>Simple workflows to create, configure, and manage large-scale compute clusters, networks and data sources.</li>\n<li>Create, deploy, test, and upgrade complex data pipelines with powerful features to visualize data graphs.</li>\n<li>Seamless onboarding and management for all members of an organisation to become data-driven.</li>\n<li>Provide a great SQL-centric data exploration and dashboarding experience on Databricks.</li>\n<li>An interactive environment for collaborative data projects at massive scale with an easy path to production.</li>\n</ul>\n<p>We are looking for engineers with 5+ years of experience with HTML, CSS, and JavaScript, passion for user experience and design, and a deep understanding of front-end architecture. You should be comfortable working towards a multi-year vision with incremental deliverables, motivated by delivering customer value, and experienced with modern JavaScript frameworks and server-side web technologies.</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_90423d85-ea7","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Databricks","sameAs":"https://databricks.com","logo":"https://logos.yubhub.co/databricks.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/databricks/jobs/5445641002","x-work-arrangement":"onsite","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":"$166,000-$225,000 USD","x-skills-required":["HTML","CSS","JavaScript","SQL","Cloud technologies (AWS, Azure, GCP, Docker, or Kubernetes)","Modern JavaScript frameworks (React, Angular, or VueJs/Ember)","Server-side web technologies (Node.js, Java, Python, Scala, C#, C++, Go)"],"x-skills-preferred":[],"datePosted":"2026-04-18T15:56:16.942Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Mountain View, California; San Francisco, California"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"HTML, CSS, JavaScript, SQL, Cloud technologies (AWS, Azure, GCP, Docker, or Kubernetes), Modern JavaScript frameworks (React, Angular, or VueJs/Ember), Server-side web technologies (Node.js, Java, Python, Scala, C#, C++, Go)","baseSalary":{"@type":"MonetaryAmount","currency":"USD","value":{"@type":"QuantitativeValue","minValue":166000,"maxValue":225000,"unitText":"YEAR"}}},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_995b724b-c85"},"title":"Senior Sales Engineer, Partnerships","description":"<p>We are seeking a Senior Sales Engineer, Partnerships to join our team. As a Senior Sales Engineer, you will be responsible for providing technical expertise and strategic enablement to partners, facilitating strategies to pursue avenues of revenue outside of the life sciences. This role bridges technical knowledge and business strategy, supporting partners during discovery, qualification, and solution design to showcase the value of Komodo&#39;s healthcare data and analytics platform.</p>\n<p>Key Responsibilities:</p>\n<ul>\n<li>Serve as a technical lead on 8-10 multiple strategic opportunities, directly influencing the deal cycles and accelerating revenue growth.</li>\n<li>Become the definitive subject matter expert on Komodo&#39;s comprehensive suite of healthcare data assets and platform capabilities.</li>\n<li>Garner subject matter expertise and ownership of a segment within the Partnerships / Channel Partnerships organization.</li>\n<li>Develop scalable technical frameworks, demo environments, and reusable assets that have set new organizational standards with a heavy emphasis on agentic AI workflows.</li>\n<li>Drive cross-functional initiatives by partnering with Product, Data Science, and Engineering to deliver customized, innovative solutions.</li>\n</ul>\n<p>Requirements:</p>\n<ul>\n<li>7+ years of experience in Sales Engineering or Solutions Engineering with a focus on healthcare data and healthcare technology.</li>\n<li>Proven track record of understanding and leveraging AI tools to enhance SaaS products or improve operational workflows.</li>\n<li>Expertise in healthcare data (e.g., 837/835 transactions, NDC codes) and its practical applications in analytics, reporting, and decision-making.</li>\n<li>Strong technical skills, including experience with APIs, data integration, cloud-based architectures (e.g., AWS, Azure), and analyzing large datasets.</li>\n<li>An understanding and proficiency of data science techniques, specifically SQL, Python, and/or R.</li>\n<li>Excellent communication and presentation skills, with the ability to train partners and translate complex technical concepts for diverse stakeholders.</li>\n</ul>\n<p>Preferred Skills:</p>\n<ul>\n<li>Experience working within the provider, payer, or financial service segments.</li>\n<li>Technical certifications in AWS, Azure, or data platforms.</li>\n<li>Experience with CRM platforms like Salesforce for managing partner and client interactions.</li>\n<li>Familiarity with data visualization tools (e.g., Tableau, Looker) to create impactful partner training materials.</li>\n<li>Knowledge of identity resolution and privacy-preserving linking technologies.</li>\n<li>Prior experience developing joint business plans and co-sell strategies with channel partners.</li>\n</ul>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_995b724b-c85","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Komodo Health","sameAs":"https://www.komodohealth.com/","logo":"https://logos.yubhub.co/komodohealth.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/komodohealth/jobs/8495825002","x-work-arrangement":"remote","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":"$143,000-$193,000 USD","x-skills-required":["Sales Engineering","Healthcare Data","Healthcare Technology","AI Tools","APIs","Data Integration","Cloud-Based Architectures","Data Science Techniques","SQL","Python","R","Excellent Communication","Presentation Skills"],"x-skills-preferred":["Experience Working Within Provider, Payer, or Financial Service Segments","Technical Certifications in AWS, Azure, or Data Platforms","Experience with CRM Platforms Like Salesforce","Familiarity with Data Visualization Tools","Knowledge of Identity Resolution and Privacy-Preserving Linking Technologies","Prior Experience Developing Joint Business Plans and Co-Sell Strategies"],"datePosted":"2026-04-18T15:56:08.142Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"United States"}},"jobLocationType":"TELECOMMUTE","employmentType":"FULL_TIME","occupationalCategory":"Sales","industry":"Healthcare","skills":"Sales Engineering, Healthcare Data, Healthcare Technology, AI Tools, APIs, Data Integration, Cloud-Based Architectures, Data Science Techniques, SQL, Python, R, Excellent Communication, Presentation Skills, Experience Working Within Provider, Payer, or Financial Service Segments, Technical Certifications in AWS, Azure, or Data Platforms, Experience with CRM Platforms Like Salesforce, Familiarity with Data Visualization Tools, Knowledge of Identity Resolution and Privacy-Preserving Linking Technologies, Prior Experience Developing Joint Business Plans and Co-Sell Strategies","baseSalary":{"@type":"MonetaryAmount","currency":"USD","value":{"@type":"QuantitativeValue","minValue":143000,"maxValue":193000,"unitText":"YEAR"}}},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_5ceb4835-0f1"},"title":"Manager, Professional Services","description":"<p>As a Manager, Professional Services, you will work with clients on short to medium-term customer engagements on their big data challenges using the Databricks platform. You will provide data engineering, data science, and cloud technology projects which require integrating with client systems, training, and other technical tasks to help customers get the most value out of their data.</p>\n<p>The impact you will have:</p>\n<ul>\n<li>You will work on a variety of impactful customer technical big data projects which may include building reference architectures, how-to&#39;s, and production-grade MVPs.</li>\n<li>Guide strategic customers as they implement transformational big data projects, 3rd party migrations, including end-to-end design, build, and deployment of industry-leading big data and AI applications.</li>\n<li>Consult on architecture and design; bootstrap or implement strategic customer projects which lead to a customer&#39;s successful understanding, evaluation, and adoption of Databricks.</li>\n<li>Work with Engineering and Databricks Customer Support to provide product and implementation feedback and to guide rapid resolution for engagement-specific product and support issues.</li>\n</ul>\n<p>What we look for:</p>\n<ul>\n<li>10+ years of experience with Big Data Technologies such as Apache Spark, Kafka, Cloud Native, and Data Lakes in a customer-facing post-sales, technical architecture, or consulting role.</li>\n<li>4+ years of people management experience, managing a team of Data Engineers, Data Architects, etc.</li>\n<li>6+ years of experience working on Big Data Architectures independently.</li>\n<li>Experience working across Cloud Platforms (GCP/AWS/Azure).</li>\n<li>Experience working on Databricks platform is a plus.</li>\n<li>Documentation and white-boarding skills.</li>\n<li>Build skills in technical areas which support the deployment and integration of Databricks-based solutions to complete customer projects.</li>\n<li>Willingness to travel for onsite customer engagements within India.</li>\n</ul>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_5ceb4835-0f1","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Databricks","sameAs":"https://databricks.com","logo":"https://logos.yubhub.co/databricks.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/databricks/jobs/8503068002","x-work-arrangement":"remote","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":null,"x-skills-required":["Apache Spark","Kafka","Cloud Native","Data Lakes","Big Data Technologies","Data Engineering","Data Science","Cloud Technology","People Management","Team Leadership"],"x-skills-preferred":["Databricks","GCP","AWS","Azure","Documentation","White-boarding"],"datePosted":"2026-04-18T15:56:03.190Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Remote - India"}},"jobLocationType":"TELECOMMUTE","employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"Apache Spark, Kafka, Cloud Native, Data Lakes, Big Data Technologies, Data Engineering, Data Science, Cloud Technology, People Management, Team Leadership, Databricks, GCP, AWS, Azure, Documentation, White-boarding"},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_0036f074-845"},"title":"Resident Solutions Architect - Financial Services","description":"<p>As a Senior Big Data Solutions Architect (Sr Resident Solutions Architect) in our Professional Services team, you will work with clients on short to medium term customer engagements on their big data challenges using the Databricks platform.</p>\n<p>You will provide data engineering, data science, and cloud technology projects which require integrating with client systems, training, and other technical tasks to help customers to get most value out of their data.</p>\n<p>RSAs are billable and know how to complete projects according to specification with excellent customer service.</p>\n<p>You will report to the regional Manager/Lead.</p>\n<p>The impact you will have:</p>\n<ul>\n<li>Work on a variety of impactful customer technical projects which may include designing and building reference architectures, creating how-to&#39;s and productionalizing customer use cases</li>\n<li>Work with engagement managers to scope variety of professional services work with input from the customer</li>\n<li>Guide strategic customers as they implement transformational big data projects, 3rd party migrations, including end-to-end design, build and deployment of industry-leading big data and AI applications</li>\n<li>Consult on architecture and design; bootstrap hands-on projects which leads to a customers&#39; successful understanding, evaluation and adoption of Databricks.</li>\n<li>Provide an escalated level of support for customer operational issues.</li>\n<li>Work with the Databricks technical team, Project Manager, Architect and Customer team to ensure the technical components of the engagement are delivered to meet customer&#39;s needs.</li>\n<li>Work with Engineering and Databricks Customer Support to provide product and implementation feedback and to guide rapid resolution for engagement specific product and support issues.</li>\n</ul>\n<p>What we look for:</p>\n<ul>\n<li>9+ years experience in data engineering, data platforms &amp; analytics</li>\n<li>Comfortable writing code in either Python or Scala</li>\n<li>Working knowledge of two or more common Cloud ecosystems (AWS, Azure, GCP) with expertise in at least one</li>\n<li>Deep experience with distributed computing with Apache Spark™ and knowledge of Apache Spark™ runtime internals</li>\n<li>Familiarity with CI/CD for production deployments</li>\n<li>Working knowledge of MLOps</li>\n<li>Capable of design and deployment of highly performant end-to-end data architectures</li>\n<li>Experience with technical project delivery - managing scope and timelines.</li>\n<li>Documentation and white-boarding skills.</li>\n<li>Experience working with clients and managing conflicts.</li>\n<li>Experience in building scalable streaming and batch solutions using cloud-native components</li>\n<li>Travel to customers up to 20% of the time</li>\n</ul>\n<p>Nice to have: Databricks Certification</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_0036f074-845","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Databricks","sameAs":"https://databricks.com","logo":"https://logos.yubhub.co/databricks.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/databricks/jobs/8456966002","x-work-arrangement":"onsite","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":"$180,656-$248,360 USD","x-skills-required":["data engineering","data platforms & analytics","Python","Scala","Cloud ecosystems (AWS, Azure, GCP)","Apache Spark","CI/CD for production deployments","MLOps","design and deployment of highly performant end-to-end data architectures","technical project delivery","documentation and white-boarding skills","client management"],"x-skills-preferred":[],"datePosted":"2026-04-18T15:55:41.870Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Boston, Massachusetts"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"data engineering, data platforms & analytics, Python, Scala, Cloud ecosystems (AWS, Azure, GCP), Apache Spark, CI/CD for production deployments, MLOps, design and deployment of highly performant end-to-end data architectures, technical project delivery, documentation and white-boarding skills, client management","baseSalary":{"@type":"MonetaryAmount","currency":"USD","value":{"@type":"QuantitativeValue","minValue":180656,"maxValue":248360,"unitText":"YEAR"}}},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_8317ba42-502"},"title":"Senior Technical Solutions Engineer (Platform)","description":"<p>We are seeking a highly skilled Frontline Senior Technical Solutions Engineer with over 7+ years of experience to join our Platform Support team.</p>\n<p>This role is pivotal in delivering exceptional support for our Databricks Data Intelligence platform, addressing complex technical challenges, and ensuring the seamless operation of our data solutions.</p>\n<p>As a frontline engineer, you will be the primary point of contact for critical issues, working closely with both internal teams and customers to resolve high-impact problems and drive platform improvements.</p>\n<p>Key Responsibilities:</p>\n<ul>\n<li>Frontline Support: Serve as the primary technical point of contact for escalated issues related to the Databricks Data Intelligence platform. Provide expert-level troubleshooting, diagnostics, and resolution for complex problems affecting system performance and reliability.</li>\n</ul>\n<ul>\n<li>Customer Interaction: Engage with customers directly to understand their technical issues and requirements. Provide timely, clear, and actionable solutions to ensure high levels of customer satisfaction.</li>\n</ul>\n<ul>\n<li>Incident Management: Lead the resolution of high-priority incidents, coordinating with various teams to address and mitigate issues swiftly. Conduct thorough root cause analyses and develop preventive measures to avoid recurrence.</li>\n</ul>\n<ul>\n<li>Collaboration: Work closely with engineering, product management, and DevOps teams to share insights, identify recurring issues, and drive improvements to the Databricks Data Intelligence platform.</li>\n</ul>\n<ul>\n<li>Documentation and Knowledge Sharing: Create and maintain detailed documentation on support procedures, known issues, and solutions. Contribute to internal knowledge bases and create training materials to assist other support engineers.</li>\n</ul>\n<ul>\n<li>Performance Monitoring: Monitor and analyze platform performance metrics to identify potential issues before they impact customers. Implement optimizations and enhancements to improve platform stability and efficiency.</li>\n</ul>\n<ul>\n<li>Platform Upgrades: Manage and oversee the deployment of Databricks Data Intelligence platform upgrades and patches, ensuring minimal disruption to services and maintaining system integrity.</li>\n</ul>\n<ul>\n<li>Innovation and Improvement: Stay abreast of industry trends and advancements in Databricks technology. Propose and drive initiatives to enhance platform capabilities and support processes.</li>\n</ul>\n<ul>\n<li>Customer Feedback: Collect and analyze customer feedback to drive continuous improvement in support processes and platform features.</li>\n</ul>\n<p>Qualifications:</p>\n<ul>\n<li>Experience: Minimum of 7+ years of hands-on experience in a technical support or engineering role related to Databricks Data Intelligence platform, cloud data platforms, or big data technologies.</li>\n</ul>\n<ul>\n<li>Technical Skills: A deep understanding of Databricks architecture and Apache Spark, along with experience in cloud platforms like AWS, Azure, or GCP, is essential. Strong capabilities in designing and managing data pipelines, distributed computing are required. Proficiency in Unix/Linux administration, familiarity with DevOps practices, and skills in log analysis and monitoring tools are also crucial for effective troubleshooting and system optimization.</li>\n</ul>\n<ul>\n<li>Problem-Solving: Demonstrated ability to diagnose and resolve complex technical issues with a strong analytical and methodical approach.</li>\n</ul>\n<ul>\n<li>Communication: Exceptional verbal and written communication skills, with the ability to effectively convey technical information to both technical and non-technical stakeholders.</li>\n</ul>\n<ul>\n<li>Customer Focus: Proven experience in managing high-impact customer interactions and ensuring a positive customer experience.</li>\n</ul>\n<ul>\n<li>Collaboration: Ability to work effectively in a team environment, collaborating with engineering, product, and customer-facing teams.</li>\n</ul>\n<ul>\n<li>Education: Bachelor’s degree in Computer Science, Engineering, or a related field. Advanced degree or relevant certifications are highly desirable.</li>\n</ul>\n<p>Preferred Skills:</p>\n<ul>\n<li>Experience with additional big data tools and technologies such as Hadoop, Kafka, or NoSQL databases.</li>\n</ul>\n<ul>\n<li>Familiarity with automation tools and CI/CD pipelines.</li>\n</ul>\n<ul>\n<li>Understanding of data governance and compliance requirements.</li>\n</ul>\n<p>Why Join Us?</p>\n<ul>\n<li>Innovative Environment: Work with cutting-edge technology in a fast-paced, innovative company.</li>\n</ul>\n<ul>\n<li>Career Growth: Opportunities for professional development and career advancement.</li>\n</ul>\n<ul>\n<li>Team Culture: Collaborate with a talented and motivated team dedicated to excellence and continuous improvement.</li>\n</ul>\n<p>PLEASE NOTE: THE ROLE INVOLVES WORKING IN THE EMEA TIMEZONE</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_8317ba42-502","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Databricks","sameAs":"https://databricks.com","logo":"https://logos.yubhub.co/databricks.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/databricks/jobs/8041698002","x-work-arrangement":"onsite","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":null,"x-skills-required":["Databricks architecture","Apache Spark","AWS","Azure","GCP","Unix/Linux administration","DevOps practices","log analysis and monitoring tools"],"x-skills-preferred":["Hadoop","Kafka","NoSQL databases","automation tools","CI/CD pipelines","data governance and compliance requirements"],"datePosted":"2026-04-18T15:55:32.901Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Bengaluru, India"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"Databricks architecture, Apache Spark, AWS, Azure, GCP, Unix/Linux administration, DevOps practices, log analysis and monitoring tools, Hadoop, Kafka, NoSQL databases, automation tools, CI/CD pipelines, data governance and compliance requirements"},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_0a7cad02-cd5"},"title":"Resident Solutions Architect - Manufacturing","description":"<p>As a Resident Solutions Architect (RSA) on our Professional Services team, you will work with customers on short to medium term customer engagements on their big data challenges using the Databricks platform.</p>\n<p>You will provide data engineering, data science, and cloud technology projects which require integrating with client systems, training, and other technical tasks to help customers to get most value out of their data.</p>\n<p>RSAs are billable and know how to complete projects according to specification with excellent customer service.</p>\n<p>The impact you will have:</p>\n<ul>\n<li>Handle a variety of impactful customer technical projects which may include designing and building reference architectures, creating how-to&#39;s and productionalizing customer use cases</li>\n</ul>\n<ul>\n<li>Work with engagement managers to scope variety of professional services work with input from the customer</li>\n</ul>\n<ul>\n<li>Guide strategic customers as they implement transformational big data projects, 3rd party migrations, including end-to-end design, build and deployment of industry-leading big data and AI applications</li>\n</ul>\n<ul>\n<li>Consult on architecture and design; bootstrap or implement customer projects which leads to a customers&#39; successful understanding, evaluation and adoption of Databricks.</li>\n</ul>\n<ul>\n<li>Provide an escalated level of support for customer operational issues</li>\n</ul>\n<ul>\n<li>Collaborate with the Databricks Technical, Project Manager, Architect and Customer teams to ensure the technical components of the engagement are delivered to meet customer&#39;s needs</li>\n</ul>\n<ul>\n<li>Work with Engineering and Databricks Customer Support to provide product and implementation feedback and to guide rapid resolution for engagement specific product and support issues</li>\n</ul>\n<p>What we look for:</p>\n<ul>\n<li>6+ years experience in data engineering, data platforms &amp; analytics</li>\n</ul>\n<ul>\n<li>Comfortable writing code in either Python or Scala</li>\n</ul>\n<ul>\n<li>Working knowledge of two or more common Cloud ecosystems (AWS, Azure, GCP) with expertise in at least one</li>\n</ul>\n<ul>\n<li>Deep experience with distributed computing with Apache Spark™ and knowledge of Spark runtime internals</li>\n</ul>\n<ul>\n<li>Familiarity with CI/CD for production deployments</li>\n</ul>\n<ul>\n<li>Working knowledge of MLOps</li>\n</ul>\n<ul>\n<li>Design and deployment of performant end-to-end data architectures</li>\n</ul>\n<ul>\n<li>Experience with technical project delivery - managing scope and timelines</li>\n</ul>\n<ul>\n<li>Documentation and white-boarding skills</li>\n</ul>\n<ul>\n<li>Experience working with clients and managing conflicts</li>\n</ul>\n<ul>\n<li>Build skills in technical areas which support the deployment and integration of Databricks-based solutions to complete customer projects</li>\n</ul>\n<ul>\n<li>Ability to travel up to 30% when needed</li>\n</ul>\n<p>Pay Range Transparency Databricks is committed to fair and equitable compensation practices. The pay range(s) for this role is listed below and represents the expected base salary range for non-commissionable roles or on-target earnings for commissionable roles.</p>\n<p>Actual compensation packages are based on several factors that are unique to each candidate, including but not limited to job-related skills, depth of experience, relevant certifications and training, and specific work location.</p>\n<p>Based on the factors above, Databricks anticipated utilizing the full width of the range.</p>\n<p>The total compensation package for this position may also include eligibility for annual performance bonus, equity, and the benefits listed above.</p>\n<p>For more information regarding which range your location is in visit our page here.</p>\n<p>Zone 1 Pay Range $180,656-$248,360 USD</p>\n<p>Zone 2 Pay Range $180,656-$248,360 USD</p>\n<p>Zone 3 Pay Range $180,656-$248,360 USD</p>\n<p>Zone 4 Pay Range $180,656-$248,360 USD</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_0a7cad02-cd5","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Databricks","sameAs":"https://databricks.com","logo":"https://logos.yubhub.co/databricks.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/databricks/jobs/8494155002","x-work-arrangement":"onsite","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":"$180,656-$248,360 USD","x-skills-required":["data engineering","data science","cloud technology","Apache Spark","CI/CD","MLOps","distributed computing","Python","Scala","AWS","Azure","GCP"],"x-skills-preferred":[],"datePosted":"2026-04-18T15:55:20.115Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Philadelphia, Pennsylvania"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"data engineering, data science, cloud technology, Apache Spark, CI/CD, MLOps, distributed computing, Python, Scala, AWS, Azure, GCP","baseSalary":{"@type":"MonetaryAmount","currency":"USD","value":{"@type":"QuantitativeValue","minValue":180656,"maxValue":248360,"unitText":"YEAR"}}},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_134a19a5-1cb"},"title":"Software Engineer, Production Engineering  (London, United Kingdom)","description":"<p>We&#39;re looking for a skilled Software Engineer to join our Production Engineering team. As a key member of the team, you will be responsible for ensuring the end-to-end reliability, durability, scalability, and performance of Figma&#39;s products and services.</p>\n<p>Your primary focus will be on building and running complex large-scale services, addressing common operational challenges through better telemetry and tooling, and debugging production issues across services and levels of the stack.</p>\n<p>You will work closely with the engineering team to define standard methodologies and goals around reliability, durability, scalability, and performance, and participate in design reviews and production reviews for new features, products, or infrastructure components.</p>\n<p>In addition, you will plan for the growth of Figma&#39;s infrastructure, operate and maintain AWS Infrastructure, and collaborate with cross-functional teams to identify and prioritize areas for improvement.</p>\n<p>We&#39;re looking for someone with 5+ years of experience operating infrastructure components/services at scale, a proven grasp of Computer Science fundamentals, and a strong interest in distributed systems.</p>\n<p>Experience managing infrastructure services in AWS, Microsoft Azure, or Google Cloud is a plus, as is a demonstrated unwavering commitment to operational security and best practices.</p>\n<p>If you have excellent problem-solving skills, technical communication skills, and a bias for action, we&#39;d love to hear from you.</p>\n<p>At Figma, we celebrate and support our differences, and we&#39;re committed to equal employment opportunities regardless of race, color, ancestry, religion, sex, national origin, sexual orientation, age, citizenship, marital status, disability, gender identity/expression, veteran status, or any other characteristic protected by law.</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_134a19a5-1cb","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Figma","sameAs":"https://www.figma.com/","logo":"https://logos.yubhub.co/figma.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/figma/jobs/5781928004","x-work-arrangement":"remote","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":null,"x-skills-required":["Computer Science fundamentals","Distributed systems","Infrastructure components/services","AWS","Microsoft Azure","Google Cloud","Operational security","Best practices"],"x-skills-preferred":["Problem-solving skills","Technical communication skills","Bias for action"],"datePosted":"2026-04-18T15:55:06.270Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"London, England"}},"jobLocationType":"TELECOMMUTE","employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"Computer Science fundamentals, Distributed systems, Infrastructure components/services, AWS, Microsoft Azure, Google Cloud, Operational security, Best practices, Problem-solving skills, Technical communication skills, Bias for action"},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_5cad560f-dc3"},"title":"Engineering Manager, Cloud Networking (Brazil)","description":"<p>You will join Airbnb&#39;s mission-driven company dedicated to helping create a world where anyone can belong anywhere. As the first Network engineering lead in Airbnb&#39;s Brazil office, you will be responsible for bootstrapping and growing the networking team in our new San Paulo office.</p>\n<p>Your primary focus will be on delivering an Airbnb network platform that is flexible, efficient, always available, and scales with the needs of the business. You will work closely with peers across Cloud Infra, Security, Reliability, and many other partner teams across the company to achieve this goal.</p>\n<p>Key responsibilities include:</p>\n<ul>\n<li>Providing meaningful input to technical designs and direct hands-on contributions to projects in the cloud networking space</li>\n<li>Growing, leading, and managing a small team of talented engineers</li>\n<li>Supporting your team&#39;s professional growth and maintaining high performance through mentorship and coaching</li>\n<li>Working with tech leads, peers, and partners to define and execute on a coherent vision and roadmap for Airbnb&#39;s cloud network infrastructure and related components</li>\n<li>Working with open source communities (e.g. istio) to build the next generation service mesh for all Airbnb back-end services</li>\n<li>Building cross-region gateways and load balancers for global Airbnb services</li>\n<li>Working with external partners and internal engineering and security teams to deliver edge security systems that protect Airbnb services</li>\n<li>Nurturing a culture of technical quality from design, through code review, to production</li>\n<li>Building strong partnership and alignment with teams across engineering</li>\n<li>Nurturing relationships with open source communities and external service partners</li>\n</ul>\n<p>As a successful candidate, you will have a strong background in engineering management, with 2+ years of experience and 8+ years of relevant software development experience in a fast-paced tech environment. You will also have experience with a public cloud provider (AWS, GCP, Azure) and their networking service offerings, as well as experience running large-scale networking systems and software (e.g. proxies, DNS, gateways).</p>\n<p>Additionally, you will have excellent communication skills and the ability to work well with teams across the engineering organization (e.g. reliability, compute, security, etc.). You will also have strong problem-solving skills and experience leading teams on-call for production infrastructure.</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_5cad560f-dc3","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Airbnb","sameAs":"https://www.airbnb.com/","logo":"https://logos.yubhub.co/airbnb.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/airbnb/jobs/7381450","x-work-arrangement":"onsite","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":null,"x-skills-required":["Professional fluency in English","2+ years of engineering management experience","8+ years of relevant software development experience in a fast-paced tech environment","Experience with a public cloud provider (AWS, GCP, Azure) and their networking service offerings","Experience running large-scale networking systems and software (e.g. proxies, DNS, gateways)","Experience with Istio service mesh, k8s and cloud native technologies","Excellent communication skills and the ability to work well with teams across the engineering organization","Strong problem-solving skills and experience leading teams on-call for production infrastructure"],"x-skills-preferred":["Experience with open source communities (e.g. istio)","Experience building cross-region gateways and load balancers for global services","Experience working with external partners and internal engineering and security teams to deliver edge security systems"],"datePosted":"2026-04-18T15:55:03.519Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Brazil"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"Professional fluency in English, 2+ years of engineering management experience, 8+ years of relevant software development experience in a fast-paced tech environment, Experience with a public cloud provider (AWS, GCP, Azure) and their networking service offerings, Experience running large-scale networking systems and software (e.g. proxies, DNS, gateways), Experience with Istio service mesh, k8s and cloud native technologies, Excellent communication skills and the ability to work well with teams across the engineering organization, Strong problem-solving skills and experience leading teams on-call for production infrastructure, Experience with open source communities (e.g. istio), Experience building cross-region gateways and load balancers for global services, Experience working with external partners and internal engineering and security teams to deliver edge security systems"},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_57339490-7ab"},"title":"Analytics Consulting Manager","description":"<p>We are seeking an experienced Analytics Consulting Manager to join our team at Komodo Health. As an Analytics Consulting Manager, you will be responsible for managing the end-to-end delivery of analytics projects, translating ambiguous business questions into tractable data analysis projects, and interpreting and investigating data and statistical questions that arise through the delivery of client work.</p>\n<p>Key responsibilities include:</p>\n<ul>\n<li>Managing the end-to-end delivery of analytics projects, from scoping and planning to execution and delivery</li>\n<li>Translating ambiguous business questions into tractable data analysis projects</li>\n<li>Interpreting and investigating data and statistical questions that arise through the delivery of client work</li>\n<li>Collaborating with cross-functional teams, including Data Science, Engineering, and Product Management, to ensure alignment on project goals and deliverables</li>\n<li>Serving as the primary point of contact for clients, addressing any concerns or issues that may arise during the project lifecycle</li>\n<li>Monitoring project progress and performance, proactively identifying risks and implementing mitigation strategies as needed</li>\n</ul>\n<p>To be successful in this role, you will need to have a strong background in analytics and project management, with experience leading cross-functional teams and managing client relationships. You will also need to have excellent communication and presentation skills, with the ability to convey complex analytical concepts to non-technical audiences.</p>\n<p>In addition to your technical skills and experience, you will need to be able to integrate AI into your daily work, from summarizing documents to automating workflows and uncovering insights. This is a pivotal moment in time, where being first to market with AI transforms industries and sets the bar.</p>\n<p>If you are a motivated and experienced professional looking to join a dynamic team and contribute to the development of cutting-edge analytics solutions, please apply today!</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_57339490-7ab","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Komodo Health","sameAs":"https://www.komodohealth.com/","logo":"https://logos.yubhub.co/komodohealth.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/komodohealth/jobs/8460436002","x-work-arrangement":"remote","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":"$112,000-$165,000 USD (San Francisco Bay Area and New York City), $129,000-$145,000 USD (All Other US Locations)","x-skills-required":["Bachelor's or Master's degree in a quantitative field (e.g., Statistics, Mathematics, Computer Science, Economics)","Minimum of 5 years of experience in healthcare analytics or related field, with a proven track record of delivering analytics solutions to clients","Strong project management skills, with experience leading cross-functional teams and managing client relationships","Excellent communication and presentation skills, with the ability to convey complex analytical concepts to non-technical audiences","Proficiency in data analysis tools and programming languages such as SQL, Python, or R"],"x-skills-preferred":["Experience working in a consulting or professional services environment, preferably within the healthcare industry","Experience with cloud-based analytics platforms such as AWS, Google Cloud Platform, or Azure","Familiarity with healthcare data standards and regulations, such as HIPAA and GDPR","Advanced analytical skills, including predictive modeling, machine learning, and data visualization"],"datePosted":"2026-04-18T15:55:00.487Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"United States"}},"jobLocationType":"TELECOMMUTE","employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Healthcare","skills":"Bachelor's or Master's degree in a quantitative field (e.g., Statistics, Mathematics, Computer Science, Economics), Minimum of 5 years of experience in healthcare analytics or related field, with a proven track record of delivering analytics solutions to clients, Strong project management skills, with experience leading cross-functional teams and managing client relationships, Excellent communication and presentation skills, with the ability to convey complex analytical concepts to non-technical audiences, Proficiency in data analysis tools and programming languages such as SQL, Python, or R, Experience working in a consulting or professional services environment, preferably within the healthcare industry, Experience with cloud-based analytics platforms such as AWS, Google Cloud Platform, or Azure, Familiarity with healthcare data standards and regulations, such as HIPAA and GDPR, Advanced analytical skills, including predictive modeling, machine learning, and data visualization","baseSalary":{"@type":"MonetaryAmount","currency":"USD","value":{"@type":"QuantitativeValue","minValue":112000,"maxValue":165000,"unitText":"YEAR"}}},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_86dc459d-a0f"},"title":"Senior Software Engineer, Platform as a Service","description":"<p>We are seeking a technical, hands-on, empathetic senior software engineer to help define and deliver our Platform as a Service (PAAS) mission. As a senior engineer on the PAAS team, you will collaborate with the team to deliver forward-looking, customer-centric tooling. Your expertise in building and using best-in-class infrastructure tools will equip our engineering organisation with tools to move quickly and deliver features that bring millions of people together.</p>\n<p>Key responsibilities include:</p>\n<ul>\n<li>Working with customer engineering teams to ensure we’re building solutions that developers love using day-in and day-out</li>\n<li>Collaborating with the Internal Development Experience (IDX) team to ensure an easy path to go from development through staging into production</li>\n<li>Working with the Platform Security team in order to secure every path to production</li>\n<li>Shipping Rust code to YAY, our in-house deployment tooling built around Google Kubernetes Engine and Temporal</li>\n<li>Exposing the full flexibility of Kubernetes for users while abstracting the complexities away</li>\n<li>Building tools to manage the configuration, observability, and scaling characteristics of our infrastructure</li>\n</ul>\n<p>Requirements include:</p>\n<ul>\n<li>5+ years of experience in software development with a focus on tooling, infrastructure, and automation</li>\n<li>Experience working in multi-milestone and even multi-quarter projects</li>\n<li>Expertise and empathy when troubleshooting issues with customer engineering teams</li>\n<li>Expertise using and building upon the primitives of standard cloud infrastructure tooling like Kubernetes, Docker</li>\n<li>Experience developing in cloud-based environments (we use Google Cloud; knowledge of Amazon Web Services and/or Azure also great!)</li>\n<li>Experience with infrastructure-as-code tooling (we use Terraform)</li>\n</ul>\n<p>Bonus points for experience with CI, build, and deployment technologies like Buildkite, Bazel, and Terraform, as well as cloud networking tools like istio, envoy, etc. and application observability tools like Datadog and/or Sentry.</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_86dc459d-a0f","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Discord","sameAs":"https://discord.com","logo":"https://logos.yubhub.co/discord.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/discord/jobs/8409021002","x-work-arrangement":"hybrid","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":"$196,000 to $220,500 + equity + benefits","x-skills-required":["Rust","Kubernetes","Docker","Terraform","Google Cloud","Amazon Web Services","Azure","CI/CD","infrastructure-as-code"],"x-skills-preferred":["Buildkite","Bazel","istio","envoy","Datadog","Sentry"],"datePosted":"2026-04-18T15:54:51.444Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"San Francisco Bay Area"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"Rust, Kubernetes, Docker, Terraform, Google Cloud, Amazon Web Services, Azure, CI/CD, infrastructure-as-code, Buildkite, Bazel, istio, envoy, Datadog, Sentry","baseSalary":{"@type":"MonetaryAmount","currency":"USD","value":{"@type":"QuantitativeValue","minValue":196000,"maxValue":220500,"unitText":"YEAR"}}},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_f95ac4b6-a7c"},"title":"Software Engineer - Delivery Platform","description":"<p>At Squarespace, we&#39;re reimagining how people bring their ideas to life online. Our Infrastructure Engineering teams are at the heart of that mission --- building the platforms and tooling that let every engineer ship with speed and confidence.</p>\n<p>As a Software Engineer on the Delivery team, you&#39;ll work on the systems that sit between GitHub and production. These systems include nearly every Squarespace service, such as CI/CD pipelines, GitOps workflows, and the deployment platform that spans our Kubernetes clusters and regions. If you&#39;re passionate about developer experience, modern deployment tooling, and making other engineers more productive, we want to hear from you.</p>\n<p><strong>Responsibilities</strong></p>\n<ul>\n<li>Build and evolve the platform that ships Squarespace services to production --- CI/CD pipelines, GitOps workflows, and deployment tooling across Kubernetes clusters.</li>\n<li>Increase adoption of modern deployment tooling across high-traffic services</li>\n<li>Design reusable Helm charts, GitOps templates, and standardized rollout/rollback patterns for engineering teams.</li>\n<li>Identify improvements to CI pipeline performance and reliability across the organization.</li>\n<li>Contribute to AI-assisted delivery tooling that helps engineers self-serve and diagnose build failures.</li>\n<li>Develop technical documentation to ensure knowledge sharing and reusability.</li>\n</ul>\n<p><strong>Requirements</strong></p>\n<ul>\n<li>3+ years of backend or platform engineering experience.</li>\n<li>Experience building or improving CI/CD pipelines (e.g., Drone, Jenkins, GitHub Actions, Harness).</li>\n<li>Knowledge of Docker and Kubernetes.</li>\n<li>Familiarity with GitOps tooling such as Argo CD or Flux.</li>\n<li>Proficiency in Go, Python, or Java.</li>\n<li>Experience with Google Cloud, AWS, or Azure.</li>\n<li>Comfortable with Agile methodologies and Git.</li>\n<li>Experience troubleshooting issues with users.</li>\n</ul>\n<p><strong>Benefits &amp; Perks</strong></p>\n<ul>\n<li>A choice between medical plans with an option for 100% covered premiums</li>\n<li>Fertility and adoption benefits</li>\n<li>Access to supplemental insurance plans for additional coverage</li>\n<li>Headspace mindfulness app subscription</li>\n<li>Global Employee Assistance Program</li>\n<li>Retirement benefits with employer match</li>\n<li>Flexible paid time off</li>\n<li>12 weeks paid parental leave and family care leave</li>\n<li>Pretax commuter benefit</li>\n<li>Education reimbursement</li>\n<li>Employee donation match to community organizations</li>\n<li>7 Global Employee Resource Groups (ERGs)</li>\n<li>Dog-friendly workplace</li>\n<li>Free lunch and snacks</li>\n<li>Private rooftop</li>\n<li>Hack week twice per year</li>\n</ul>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_f95ac4b6-a7c","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Squarespace","sameAs":"https://www.squarespace.com/about/careers","logo":"https://logos.yubhub.co/squarespace.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/squarespace/jobs/7789058","x-work-arrangement":"hybrid","x-experience-level":"mid","x-job-type":"full-time","x-salary-range":"$110,500 - $178,250 USD","x-skills-required":["backend or platform engineering experience","CI/CD pipelines","Docker","Kubernetes","GitOps tooling","Go","Python","Java","Google Cloud","AWS","Azure","Agile methodologies","Git"],"x-skills-preferred":[],"datePosted":"2026-04-18T15:54:49.772Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"New York City"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"backend or platform engineering experience, CI/CD pipelines, Docker, Kubernetes, GitOps tooling, Go, Python, Java, Google Cloud, AWS, Azure, Agile methodologies, Git","baseSalary":{"@type":"MonetaryAmount","currency":"USD","value":{"@type":"QuantitativeValue","minValue":110500,"maxValue":178250,"unitText":"YEAR"}}},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_a03720f6-bc3"},"title":"Solutions Architect","description":"<p>As a Solutions Architect at Databricks, you will partner with our customers to design scalable data architectures using Databricks technology and services.</p>\n<p>You have technical depth and business knowledge and can drive complex technology discussions which express the value of the Databricks platform throughout the sales lifecycle.</p>\n<p>In partnership with our Account Executives, you will engage with our customers&#39; technical leads, including architects, engineers, and operations teams with the goal of establishing yourself as a trusted advisor to achieve tangible outcomes.</p>\n<p>You will work with teams across Databricks and our executive leadership to represent your customer&#39;s needs and build valuable customer engagements and report to the Field Engineering Manager.</p>\n<p>The impact you will have:</p>\n<ul>\n<li>Work with Sales and other essential partners to develop account strategies for your assigned accounts to grow their usage of the platform.</li>\n</ul>\n<ul>\n<li>Establish the Databricks Lakehouse architecture as the standard data architecture for customers through excellent technical account planning.</li>\n</ul>\n<ul>\n<li>Build and present reference architectures and demo applications for prospects to help them understand how Databricks can be used to achieve their goals to land new users and use cases.</li>\n</ul>\n<ul>\n<li>Capture the technical win by consulting on big data architectures, data engineering pipelines, and data science/machine learning projects; prove out the Databricks technology for strategic customer projects; and validate integrations with cloud services and other 3rd party applications.</li>\n</ul>\n<ul>\n<li>Become an expert in, and promote Databricks inspired open-source projects (Spark, Delta Lake, MLflow, and Koalas) across developer communities through meetups, conferences, and webinars.</li>\n</ul>\n<p>What we look for:</p>\n<ul>\n<li>5+ years in a customer-facing pre-sales, technical architecture, or consulting role with expertise in at least one of the following technologies:</li>\n</ul>\n<ul>\n<li>Big data engineering (Ex: Spark, Hadoop, Kafka)</li>\n</ul>\n<ul>\n<li>Data Warehousing &amp; ETL (Ex: SQL, OLTP/OLAP/DSS)</li>\n</ul>\n<ul>\n<li>Data Science and Machine Learning (Ex: pandas, scikit-learn, HPO)</li>\n</ul>\n<ul>\n<li>Data Applications (Ex: Logs Analysis, Threat Detection, Real-time Systems Monitoring, Risk Analysis and more)</li>\n</ul>\n<ul>\n<li>Experience translating a customer&#39;s business needs to technology solutions, including establishing buy-in with essential customer stakeholders at all levels of the business.</li>\n</ul>\n<ul>\n<li>Experienced at designing, architecting, and presenting data systems for customers and managing the delivery of production solutions of those data architectures.</li>\n</ul>\n<ul>\n<li>Fluent in SQL and database technology.</li>\n</ul>\n<ul>\n<li>Debug and development experience in at least one of the following languages: Python, Scala, Java, or R.</li>\n</ul>\n<ul>\n<li>Desired: Built solutions with public cloud providers such as AWS, Azure, or GCP</li>\n</ul>\n<ul>\n<li>Desired: Degree in a quantitative discipline (Computer Science, Applied Mathematics, Operations Research)</li>\n</ul>\n<ul>\n<li>Travel to customers in your region up to 30% of the time.</li>\n</ul>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_a03720f6-bc3","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Databricks","sameAs":"https://databricks.com","logo":"https://logos.yubhub.co/databricks.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/databricks/jobs/5898477002","x-work-arrangement":"remote","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":"$164,500-$224,000 CAD","x-skills-required":["Big data engineering","Data Warehousing & ETL","Data Science and Machine Learning","Data Applications","SQL and database technology","Python, Scala, Java, or R"],"x-skills-preferred":["Built solutions with public cloud providers such as AWS, Azure, or GCP","Degree in a quantitative discipline (Computer Science, Applied Mathematics, Operations Research)"],"datePosted":"2026-04-18T15:54:41.801Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Toronto, Canada"}},"jobLocationType":"TELECOMMUTE","employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"Big data engineering, Data Warehousing & ETL, Data Science and Machine Learning, Data Applications, SQL and database technology, Python, Scala, Java, or R, Built solutions with public cloud providers such as AWS, Azure, or GCP, Degree in a quantitative discipline (Computer Science, Applied Mathematics, Operations Research)","baseSalary":{"@type":"MonetaryAmount","currency":"USD","value":{"@type":"QuantitativeValue","minValue":164500,"maxValue":224000,"unitText":"YEAR"}}},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_fc79e6e5-5c0"},"title":"Resident Solutions Architect - Manufacturing","description":"<p>As a Resident Solutions Architect (RSA) on our Professional Services team, you will work with customers on short to medium term customer engagements on their big data challenges using the Databricks platform.</p>\n<p>You will provide data engineering, data science, and cloud technology projects which require integrating with client systems, training, and other technical tasks to help customers to get most value out of their data.</p>\n<p>RSAs are billable and know how to complete projects according to specification with excellent customer service.</p>\n<p>The impact you will have:</p>\n<ul>\n<li>Handle a variety of impactful customer technical projects which may include designing and building reference architectures, creating how-to&#39;s and productionalizing customer use cases</li>\n</ul>\n<ul>\n<li>Work with engagement managers to scope variety of professional services work with input from the customer</li>\n</ul>\n<ul>\n<li>Guide strategic customers as they implement transformational big data projects, 3rd party migrations, including end-to-end design, build and deployment of industry-leading big data and AI applications</li>\n</ul>\n<ul>\n<li>Consult on architecture and design; bootstrap or implement customer projects which leads to a customers&#39; successful understanding, evaluation and adoption of Databricks.</li>\n</ul>\n<ul>\n<li>Provide an escalated level of support for customer operational issues</li>\n</ul>\n<ul>\n<li>Collaborate with the Databricks Technical, Project Manager, Architect and Customer teams to ensure the technical components of the engagement are delivered to meet customer&#39;s needs</li>\n</ul>\n<ul>\n<li>Work with Engineering and Databricks Customer Support to provide product and implementation feedback and to guide rapid resolution for engagement specific product and support issues</li>\n</ul>\n<p>What we look for:</p>\n<ul>\n<li>6+ years experience in data engineering, data platforms &amp; analytics</li>\n</ul>\n<ul>\n<li>Comfortable writing code in either Python or Scala</li>\n</ul>\n<ul>\n<li>Working knowledge of two or more common Cloud ecosystems (AWS, Azure, GCP) with expertise in at least one</li>\n</ul>\n<ul>\n<li>Deep experience with distributed computing with Apache Spark™ and knowledge of Spark runtime internals</li>\n</ul>\n<ul>\n<li>Familiarity with CI/CD for production deployments</li>\n</ul>\n<ul>\n<li>Working knowledge of MLOps</li>\n</ul>\n<ul>\n<li>Design and deployment of performant end-to-end data architectures</li>\n</ul>\n<ul>\n<li>Experience with technical project delivery - managing scope and timelines</li>\n</ul>\n<ul>\n<li>Documentation and white-boarding skills</li>\n</ul>\n<ul>\n<li>Experience working with clients and managing conflicts</li>\n</ul>\n<ul>\n<li>Build skills in technical areas which support the deployment and integration of Databricks-based solutions to complete customer projects</li>\n</ul>\n<ul>\n<li>Ability to travel up to 30% when needed</li>\n</ul>\n<p>Pay Range Transparency Databricks is committed to fair and equitable compensation practices. The pay range(s) for this role is listed below and represents the expected base salary range for non-commissionable roles or on-target earnings for commissionable roles. Actual compensation packages are based on several factors that are unique to each candidate, including but not limited to job-related skills, depth of experience, relevant certifications and training, and specific work location. Based on the factors above, Databricks anticipated utilizing the full width of the range. The total compensation package for this position may also include eligibility for annual performance bonus, equity, and the benefits listed above.</p>\n<p>For more information regarding which range your location is in visit our page here.</p>\n<p>Zone 1 Pay Range $180,656-$248,360 USD</p>\n<p>Zone 2 Pay Range $180,656-$248,360 USD</p>\n<p>Zone 3 Pay Range $180,656-$248,360 USD</p>\n<p>Zone 4 Pay Range $180,656-$248,360 USD</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_fc79e6e5-5c0","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Databricks","sameAs":"https://databricks.com","logo":"https://logos.yubhub.co/databricks.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/databricks/jobs/8494156002","x-work-arrangement":"hybrid","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":"$180,656-$248,360 USD","x-skills-required":["Python","Scala","Cloud ecosystems (AWS, Azure, GCP)","Apache Spark","CI/CD for production deployments","MLOps","Data engineering","Data science","Cloud technology"],"x-skills-preferred":[],"datePosted":"2026-04-18T15:54:34.838Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Seattle, Washington"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"Python, Scala, Cloud ecosystems (AWS, Azure, GCP), Apache Spark, CI/CD for production deployments, MLOps, Data engineering, Data science, Cloud technology","baseSalary":{"@type":"MonetaryAmount","currency":"USD","value":{"@type":"QuantitativeValue","minValue":180656,"maxValue":248360,"unitText":"YEAR"}}},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_c0df50e1-9cd"},"title":"Consultant, Developer Platform","description":"<p>About Us</p>\n<p>At Cloudflare, we are on a mission to help build a better Internet. Today the company runs one of the world’s largest networks that powers millions of websites and other Internet properties for customers ranging from individual bloggers to SMBs to Fortune 500 companies.</p>\n<p>As a Cloud Engineer for Developer Platform, you are an individual contributor working in the post-sales landscape, responsible for the technical execution of solutions and guidance to our customers, following a consultative approach, to get the most value possible from their Cloudflare investment.</p>\n<p>Key Responsibilities:</p>\n<ul>\n<li>Plan and deliver timely and organized services for customers, ensure customers see the full value in Cloudflare’s products and advice on product best practices.</li>\n</ul>\n<ul>\n<li>Gather business and technical requirements, use cases and any other information required to build, migrate and deliver a solution on behalf of the customer and transition the Cloudflare working environment to the customer.</li>\n</ul>\n<ul>\n<li>Produce a Solution Design, HLD, LLD, databuilds, procedures, scripts, test plans, drawings, deployment plan, migration plan, as-builts, and any other artifacts necessary to deliver the solution and transition smoothly into the customer’s technical teams.</li>\n</ul>\n<ul>\n<li>Implement changes on behalf of the customer in the Cloudflare environment following the customer’s change management process.</li>\n</ul>\n<ul>\n<li>Troubleshoot implementation issues and collaborate with Customer Support, Engineering and other teams to assist technical escalations.</li>\n</ul>\n<ul>\n<li>Contribute towards the success of the organization through knowledge sharing activities such as contributing to internal and external documentation, answering technical Q&amp;A, and helping to iterate on best practices.</li>\n</ul>\n<p>Support building operational assets like templates, automation scripts, procedures, workflows, etc.</p>\n<p>Requirements:</p>\n<ul>\n<li>3+ years of experience in a customer facing position as a Consultant delivering services.</li>\n</ul>\n<ul>\n<li>Demonstrated experience with:</li>\n</ul>\n<ul>\n<li>Developing serverless code in a CI/CD pipeline using an Agile methodology.</li>\n</ul>\n<ul>\n<li>Layers and protocols of the OSI model, such as TCP/IP, TLS, DNS, HTTP.</li>\n</ul>\n<ul>\n<li>Scripting languages.</li>\n</ul>\n<ul>\n<li>A scripting language (e.g. Python, JavaScript, Bash) and a desire to expand those skills.</li>\n</ul>\n<ul>\n<li>Infrastructure as code tools like Terraform.</li>\n</ul>\n<ul>\n<li>Strong experience with APIs.</li>\n</ul>\n<ul>\n<li>CI/CD pipelines using Azure DevOps or Git.</li>\n</ul>\n<ul>\n<li>Implementation and troubleshooting experience, knowledge of tools to troubleshoot, observability, logs, etc.</li>\n</ul>\n<ul>\n<li>Good understanding and knowledge of:</li>\n</ul>\n<ul>\n<li>Internet and Security technologies such as DDoS, Web Application Firewall, Certificates, DNS, CDN, Analytics and Logs.</li>\n</ul>\n<ul>\n<li>Security aspects of an internet property, such as DNS, WAFs, Bot Management, Rate Limiting, (M)TLS, certificates, OWASP.</li>\n</ul>\n<ul>\n<li>Performance aspects of an internet property, such as Speed, Latency, Caching, HTTP/3, TLSv1.3.</li>\n</ul>\n<p>Preferred Qualifications:</p>\n<ul>\n<li>You have worked with a Cybersecurity company or products and have performed migrations using migration tools.</li>\n</ul>\n<ul>\n<li>You have developed application security and performance capabilities.</li>\n</ul>\n<ul>\n<li>Ability to manage a project, work to deadlines, prioritize between competing demands and manage uncertainty.</li>\n</ul>\n<ul>\n<li>The work will be performed in English. Fluency in a second regional European language is a strong advantage.</li>\n</ul>\n<p>What Makes Cloudflare Special?</p>\n<p>We’re not just a highly ambitious, large-scale technology company. We’re a highly ambitious, large-scale technology company with a soul. Fundamental to our mission to help build a better Internet is protecting the free and open Internet.</p>\n<p>Project Galileo: Since 2014, we&#39;ve equipped more than 2,400 journalism and civil society organizations in 111 countries with powerful tools to defend themselves against attacks that would otherwise censor their work, technology already used by Cloudflare’s enterprise customers--at no cost.</p>\n<p>Athenian Project: In 2017, we created the Athenian Project to ensure that state and local governments have the highest level of protection and reliability for free, so that their constituents have access to election information and voter registration. Since the project, we&#39;ve provided services to more than 425 local government election websites in 33 states.</p>\n<p>1.1.1.1: We released 1.1.1.1 to help fix the foundation of the Internet by building a faster, more secure and privacy-centric public DNS resolver. This is available publicly for everyone to use - it is the first consumer-focused service Cloudflare has ever released.</p>\n<p>Here’s the deal - we don’t store client IP addresses never, ever. We will continue to abide by our privacy commitment and ensure that no user data is sold to advertisers or used to target consumers.</p>\n<p>Sound like something you’d like to be a part of? We’d love to hear from you!</p>\n<p>This position may require access to information protected under U.S. export control laws, including the U.S. Export Administration Regulations. Please note that any offer of employment may be conditioned on your authorization to receive software or technology controlled under these U.S. export laws without sponsorship for an export license.</p>\n<p>Cloudflare is proud to be an equal opportunity employer. We are committed to providing equal employment opportunity for all people and place great value in both diversity and inclusiveness. All qualified applicants will be considered for employment without regard to their, or any other person&#39;s, perceived or actual race, color, religion, sex, gender, gender identity, gender expression, sexual orientation, national origin, ancestry, citizenship, age, physical or mental disability, medical condition, family care status, or any other basis protected by law. We are an AA/Veterans/Disabled Employer. Cloudflare provides reasonable accommodations to qualified individuals</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_c0df50e1-9cd","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Cloudflare","sameAs":"https://www.cloudflare.com/","logo":"https://logos.yubhub.co/cloudflare.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/cloudflare/jobs/7383015","x-work-arrangement":"hybrid","x-experience-level":"mid","x-job-type":"full-time","x-salary-range":null,"x-skills-required":["Developing serverless code in a CI/CD pipeline using an Agile methodology","Layers and protocols of the OSI model, such as TCP/IP, TLS, DNS, HTTP","Scripting languages","Infrastructure as code tools like Terraform","Strong experience with APIs","CI/CD pipelines using Azure DevOps or Git","Implementation and troubleshooting experience, knowledge of tools to troubleshoot, observability, logs, etc","Good understanding and knowledge of Internet and Security technologies such as DDoS, Web Application Firewall, Certificates, DNS, CDN, Analytics and Logs","Security aspects of an internet property, such as DNS, WAFs, Bot Management, Rate Limiting, (M)TLS, certificates, OWASP","Performance aspects of an internet property, such as Speed, Latency, Caching, HTTP/3, TLSv1.3"],"x-skills-preferred":["You have worked with a Cybersecurity company or products and have performed migrations using migration tools","You have developed application security and performance capabilities","Ability to manage a project, work to deadlines, prioritize between competing demands and manage uncertainty","The work will be performed in English. Fluency in a second regional European language is a strong advantage"],"datePosted":"2026-04-18T15:54:26.532Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Hybrid"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"Developing serverless code in a CI/CD pipeline using an Agile methodology, Layers and protocols of the OSI model, such as TCP/IP, TLS, DNS, HTTP, Scripting languages, Infrastructure as code tools like Terraform, Strong experience with APIs, CI/CD pipelines using Azure DevOps or Git, Implementation and troubleshooting experience, knowledge of tools to troubleshoot, observability, logs, etc, Good understanding and knowledge of Internet and Security technologies such as DDoS, Web Application Firewall, Certificates, DNS, CDN, Analytics and Logs, Security aspects of an internet property, such as DNS, WAFs, Bot Management, Rate Limiting, (M)TLS, certificates, OWASP, Performance aspects of an internet property, such as Speed, Latency, Caching, HTTP/3, TLSv1.3, You have worked with a Cybersecurity company or products and have performed migrations using migration tools, You have developed application security and performance capabilities, Ability to manage a project, work to deadlines, prioritize between competing demands and manage uncertainty, The work will be performed in English. Fluency in a second regional European language is a strong advantage"},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_2ace8872-f7e"},"title":"Manager, Backline (Platform)","description":"<p>At Databricks, we are seeking a Manager, Backline (Platform) to join our team. As a critical bridge between Engineering and Frontline Support, the Backline Engineering Team handles complex technical issues and escalations across the Apache Spark ecosystem and the Databricks Platform stack. With a strong focus on customer success, we are committed to delivering exceptional customer satisfaction by providing deep technical expertise, proactive issue resolution, and continuous improvements to the platform.</p>\n<p>The Manager, Backline (Platform) will be responsible for:</p>\n<ul>\n<li>Hiring and developing top talent to build an outstanding team</li>\n<li>Mentoring engineers, providing clear feedback, and developing future leaders in the team</li>\n<li>Establishing and maintaining high standards in troubleshooting, automation, and tooling to improve efficiency</li>\n<li>Working closely with Engineering to enhance observability, debugging tools, and automation, reducing escalations</li>\n<li>Collaborating with Frontline Support, Engineering, and Product teams to improve customer escalations and support processes</li>\n<li>Defining a long-term roadmap for Backline, focusing on automation, tool development, bug fixing, and proactive issue resolution</li>\n<li>Taking ownership of high-impact customer escalations by leading critical incident response during Databricks runtime outages and major incidents</li>\n<li>Participating in weekday and weekend on-call rotations, ensuring fast and effective resolution of urgent issues</li>\n</ul>\n<p>We look for candidates with 10-12 years of industry experience, at least 3+ years in a managerial role, and strong technical expertise in one of the following domains: Linux/OS and Network troubleshooting, AWS, Azure, or GCP Cloud and related services, SQL-based database systems, or Python and/or Java-based applications.</p>\n<p>If you are a motivated and experienced professional with a passion for delivering exceptional customer satisfaction, we encourage you to apply for this exciting opportunity.</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_2ace8872-f7e","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Databricks","sameAs":"https://databricks.com","logo":"https://logos.yubhub.co/databricks.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/databricks/jobs/7879639002","x-work-arrangement":"onsite","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":null,"x-skills-required":["Linux/OS and Network troubleshooting","AWS, Azure, or GCP Cloud and related services","SQL-based database systems","Python and/or Java-based applications","Troubleshooting","Automation","Tooling","Observability","Debugging","Collaboration","Leadership"],"x-skills-preferred":[],"datePosted":"2026-04-18T15:54:15.620Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Bengaluru, India"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"Linux/OS and Network troubleshooting, AWS, Azure, or GCP Cloud and related services, SQL-based database systems, Python and/or Java-based applications, Troubleshooting, Automation, Tooling, Observability, Debugging, Collaboration, Leadership"},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_2a2d718a-f65"},"title":"Senior Software Engineer, AI Platform and Enablement","description":"<p><strong>About the Role</strong></p>\n<p>We&#39;re building a next-generation AI-powered platform and web application for creating audio and video content quickly and easily. This involves developing a revolutionary way to record, transcribe, edit, and mix audio and video on the web using state-of-the-art AI models,a challenge that requires solving complex technical problems. We&#39;re hiring a senior engineer to join our AI Platform and Enablement team. The ideal candidate thrives in a fast-moving, high-ownership environment and is comfortable navigating the ambiguity of bringing research work into an established product.</p>\n<p><strong>About the Team</strong></p>\n<p>The team’s objective is to support integrating cutting-edge first-party models (developed by our in-house AI Research team) and third-party/open source AI models into the Descript product.</p>\n<p><strong>Responsibilities</strong></p>\n<ul>\n<li>Build, maintain, and standardize third-party model integrations, including consulting for other engineering teams with AI model integration needs</li>\n</ul>\n<ul>\n<li>Design, implement, and maintain our AI infrastructure supporting our machine learning life cycle, including data ingestion pipelines, training developer experience and infrastructure, evaluation frameworks, and deployments / GPU infrastructure</li>\n</ul>\n<ul>\n<li>Collaborate with Product Managers, Research Engineers, and AI Researchers to understand their infrastructure needs and ensure our AI systems are robust, scalable, and efficient</li>\n</ul>\n<ul>\n<li>Optimise and scale our models and algorithms for efficient inference</li>\n</ul>\n<ul>\n<li>Deploy, monitor, and manage AI models in production</li>\n</ul>\n<p><strong>What You Bring</strong></p>\n<ul>\n<li>Experience in deploying and managing AI models in production</li>\n</ul>\n<ul>\n<li>Experience with the tools of large volume data pipelines like spark, flume, dask, etc.</li>\n</ul>\n<ul>\n<li>Familiarity with cloud platforms (AWS, Google Cloud, Azure) and container technologies (Docker, Kubernetes).</li>\n</ul>\n<ul>\n<li>Knowledge of DevOps and MLOps best practices</li>\n</ul>\n<ul>\n<li>Strong problem-solving abilities and excellent communication skills.</li>\n</ul>\n<p><strong>Benefits</strong></p>\n<ul>\n<li>Generous healthcare package</li>\n</ul>\n<ul>\n<li>401k matching program</li>\n</ul>\n<ul>\n<li>Catered lunches</li>\n</ul>\n<ul>\n<li>Flexible vacation time</li>\n</ul>\n<p><strong>Fun fact about me: I love pineapple on pizza.</strong></p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_2a2d718a-f65","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Descript","sameAs":"https://descript.com/","logo":"https://logos.yubhub.co/descript.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/descript/jobs/7580335003","x-work-arrangement":"hybrid","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":"$180,000 - $286,000/year","x-skills-required":["Experience in deploying and managing AI models in production","Experience with the tools of large volume data pipelines like spark, flume, dask, etc.","Familiarity with cloud platforms (AWS, Google Cloud, Azure) and container technologies (Docker, Kubernetes)","Knowledge of DevOps and MLOps best practices","Strong problem-solving abilities and excellent communication skills"],"x-skills-preferred":[],"datePosted":"2026-04-18T15:54:12.258Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"San Francisco, CA"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"Experience in deploying and managing AI models in production, Experience with the tools of large volume data pipelines like spark, flume, dask, etc., Familiarity with cloud platforms (AWS, Google Cloud, Azure) and container technologies (Docker, Kubernetes), Knowledge of DevOps and MLOps best practices, Strong problem-solving abilities and excellent communication skills","baseSalary":{"@type":"MonetaryAmount","currency":"USD","value":{"@type":"QuantitativeValue","minValue":180000,"maxValue":286000,"unitText":"YEAR"}}},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_02ba8342-079"},"title":"Specialist Solutions Architect - Data Warehousing (Healthcare & Life Sciences)","description":"<p>As a Specialist Solutions Architect (SSA) - Data Warehousing, you will guide customers in their cloud data warehousing transformation with Databricks. You will be in a customer-facing role, working with and supporting Solution Architects, that requires hands-on production experience with large-scale data warehousing technologies and lakehouse architecture.</p>\n<p>The SSA helps customers through evaluations and successful production planning for their business intelligence workloads while aligning their technical roadmap for the Databricks Data Intelligence Platform.</p>\n<p>As a deep go-to-expert reporting to the Specialist Field Engineering Manager, you will continue to strengthen your technical skills through mentorship, learning, and internal training programs and establish yourself in the data warehousing specialty - including performance tuning, data modeling, winning evaluations, architecture design, and production migration planning.</p>\n<p>The impact you will have:</p>\n<ul>\n<li>Provide technical leadership to guide strategic customers to successful cloud transformations on large-scale data warehousing workloads - ranging from evaluation to architecture design to production deployment</li>\n<li>Prove the value of the Databricks Intelligence Platform for customer workloads by architecting production workloads, including end-to-end pipeline load performance testing and optimization</li>\n<li>Become a technical expert in an area such as data warehousing evaluations or helping set up successful workload migrations</li>\n<li>Assist Solution Architects with more advanced aspects of the technical sale including custom proof of concept content, estimating workload sizing and performance, and tuning workloads for production</li>\n<li>Provide tutorials and training to improve community adoption (including hackathons and conference presentations)</li>\n<li>Contribute to the Databricks Community</li>\n</ul>\n<p>What we look for:</p>\n<ul>\n<li>5+ years experience in a technical role with expertise in data warehousing - such as query tuning, performance tuning, troubleshooting, data governance, debugging MPP data warehouses or other big data solutions, or migration workloads from EDW other systems</li>\n<li>Experience with design and implementation of data warehousing technologies including relational databases, SQL, data analytics, NoSQL, MPP, OLTP, and OLAP</li>\n<li>Deep Specialty Expertise in at least one of the following areas:</li>\n</ul>\n<p>+ Experience scaling large analytical data workloads in the cloud that are performant and cost-effective \t+ Maintained, extended, or migrated a production data warehouse system to evolve with complex needs, including data modeling, data governance needs, and integration with business intelligence tools \t+ Experience migrating on-premise EDW workloads to the public cloud</p>\n<ul>\n<li>Bachelor&#39;s degree in Computer Science, Information Systems, Engineering, or equivalent experience through work experience</li>\n<li>Production programming experience in SQL and Python, Scala, or Java</li>\n<li>Experience with the AWS, Azure, or GCP clouds</li>\n<li>2 years professional experience with data warehousing and big data technologies (Ex: SQL, Redshift, SAP, Synapse, EMR, OLAP &amp; OLTP workloads)</li>\n<li>2 years customer-facing experience in a pre-sales or post-sales role</li>\n<li>Can meet expectations for technical training and role-specific outcomes within 6 months of hire</li>\n<li>Can travel up to 30% when needed</li>\n</ul>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_02ba8342-079","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Databricks","sameAs":"https://databricks.com","logo":"https://logos.yubhub.co/databricks.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/databricks/jobs/8337429002","x-work-arrangement":"hybrid","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":"$180,000-$247,500 USD","x-skills-required":["data warehousing","cloud data warehousing","Databricks","lakehouse architecture","SQL","Python","Scala","Java","AWS","Azure","GCP","data analytics","NoSQL","MPP","OLTP","OLAP"],"x-skills-preferred":[],"datePosted":"2026-04-18T15:54:06.778Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Northeast - United States"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"data warehousing, cloud data warehousing, Databricks, lakehouse architecture, SQL, Python, Scala, Java, AWS, Azure, GCP, data analytics, NoSQL, MPP, OLTP, OLAP","baseSalary":{"@type":"MonetaryAmount","currency":"USD","value":{"@type":"QuantitativeValue","minValue":180000,"maxValue":247500,"unitText":"YEAR"}}},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_b372d3eb-ee1"},"title":"Staff Research Engineer, Applied AI","description":"<p>We are seeking a Staff Research Engineer, Applied AI to lead the development and deployment of novel applications, leveraging Google&#39;s generative AI models.</p>\n<p>This role focuses on rapidly developing new features, and working across partner teams to deliver solutions, and maximize impact for Google and top customers.</p>\n<p>You will be instrumental in translating cutting-edge AI research into real-world products, and demonstrating the capabilities of latest-generation models.</p>\n<p>We are looking for engineers with a strong track record of building and shipping AI-powered software, ideally with experience in early-stage environments where they have contributed to scaling products from initial concept to production.</p>\n<p>The ideal candidate will be motivated by the opportunity to drive product &amp; business impact.</p>\n<p>Key responsibilities:</p>\n<ul>\n<li>Harness frontier models to drive real-world high-impact outcomes</li>\n</ul>\n<ul>\n<li>Build evaluations, training data, and infrastructure to support AI deployments and rapid iterations</li>\n</ul>\n<ul>\n<li>Collaborate with researchers and product managers to translate research advancements into tangible product features.</li>\n</ul>\n<ul>\n<li>Contribute to the development of best practices for building and deploying generative AI applications.</li>\n</ul>\n<ul>\n<li>Contribute signal to influence the development of frontier models</li>\n</ul>\n<ul>\n<li>Lead the architecture and development of new products &amp; features from 0 to 1.</li>\n</ul>\n<p>About you:</p>\n<p>In order to set you up for success as a Staff Research Engineer, Applied AI at Google DeepMind, we look for the following skills and experience:</p>\n<p>Required Skills:</p>\n<ul>\n<li>Bachelor&#39;s degree or equivalent practical experience.</li>\n</ul>\n<ul>\n<li>8 years of experience in software development, and with data structures/algorithms.</li>\n</ul>\n<ul>\n<li>5 years of hands-on experience in AI research (e.g. RL, finetuning, evals), AI applications, or model deployment</li>\n</ul>\n<ul>\n<li>Proven experience in rapidly developing and shipping software products.</li>\n</ul>\n<ul>\n<li>Deep understanding of software development best practices, including testing &amp; deployment.</li>\n</ul>\n<ul>\n<li>Experience with cloud computing platforms and infrastructure (e.g., Google Cloud Platform, AWS, Azure).</li>\n</ul>\n<ul>\n<li>Substantial experience with machine learning frameworks and libraries such as TensorFlow, PyTorch, Hugging Face, etc.</li>\n</ul>\n<ul>\n<li>Ability to work in a fast-paced environment and adapt to changing priorities.</li>\n</ul>\n<p>Preferred Skills:</p>\n<ul>\n<li>Experience with generative AI research or applications.</li>\n</ul>\n<ul>\n<li>Contributions to open-source projects.</li>\n</ul>\n<ul>\n<li>Experience working in, or founding early stage startups.</li>\n</ul>\n<ul>\n<li>Experience delivering software solutions in a fast-paced, customer-facing environment.</li>\n</ul>\n<p>If you are a passionate machine learning engineer with a drive to build innovative products and a desire to work at the forefront of AI, we encourage you to apply!</p>\n<p>The US base salary range for this full-time position is between $197,000 - $291,000 + bonus + equity + benefits.</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_b372d3eb-ee1","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Google DeepMind","sameAs":"https://deepmind.com/","logo":"https://logos.yubhub.co/deepmind.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/deepmind/jobs/7561938","x-work-arrangement":"onsite","x-experience-level":"staff","x-job-type":"full-time","x-salary-range":"$197,000 - $291,000 + bonus + equity + benefits","x-skills-required":["Bachelor's degree or equivalent practical experience","8 years of experience in software development, and with data structures/algorithms","5 years of hands-on experience in AI research (e.g. RL, finetuning, evals), AI applications, or model deployment","Proven experience in rapidly developing and shipping software products","Deep understanding of software development best practices, including testing & deployment","Experience with cloud computing platforms and infrastructure (e.g., Google Cloud Platform, AWS, Azure)","Substantial experience with machine learning frameworks and libraries such as TensorFlow, PyTorch, Hugging Face, etc","Ability to work in a fast-paced environment and adapt to changing priorities"],"x-skills-preferred":["Experience with generative AI research or applications","Contributions to open-source projects","Experience working in, or founding early stage startups","Experience delivering software solutions in a fast-paced, customer-facing environment"],"datePosted":"2026-04-18T15:54:04.942Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Mountain View, California, US"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"Bachelor's degree or equivalent practical experience, 8 years of experience in software development, and with data structures/algorithms, 5 years of hands-on experience in AI research (e.g. RL, finetuning, evals), AI applications, or model deployment, Proven experience in rapidly developing and shipping software products, Deep understanding of software development best practices, including testing & deployment, Experience with cloud computing platforms and infrastructure (e.g., Google Cloud Platform, AWS, Azure), Substantial experience with machine learning frameworks and libraries such as TensorFlow, PyTorch, Hugging Face, etc, Ability to work in a fast-paced environment and adapt to changing priorities, Experience with generative AI research or applications, Contributions to open-source projects, Experience working in, or founding early stage startups, Experience delivering software solutions in a fast-paced, customer-facing environment","baseSalary":{"@type":"MonetaryAmount","currency":"USD","value":{"@type":"QuantitativeValue","minValue":197000,"maxValue":291000,"unitText":"YEAR"}}},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_c1903386-87b"},"title":"Staff Infrastructure Software Engineer (Kubernetes)","description":"<p>As a member of the infrastructure team, you will design, build, and advance our core infrastructure that allows the engineering team to execute quickly, productively, and securely.</p>\n<p>You will partner with engineers to build dev tools that empower developer workflows and deployment infrastructure.</p>\n<p>Ensure reliability of multi-cloud Kubernetes clusters and pipelines.</p>\n<p>Metrics, logging, analytics, and alerting for performance and security across all endpoints and applications.</p>\n<p>Infrastructure-as-code deployment tooling and supporting services on multiple cloud providers.</p>\n<p>Automate operations and engineering.</p>\n<p>Focus on automation so we can spend energy where it matters.</p>\n<p>Building machine learning infrastructure that enables AI teams to train, test, and deploy on large-scale datasets.</p>\n<p>We are looking for a highly skilled engineer with 5+ years of experience in DevOps, Site Reliability Engineering, Production Engineering, or equivalent field.</p>\n<p>Deep proficiency with coding languages such as Golang or Python.</p>\n<p>Deep familiarity with container-related security best practices.</p>\n<p>Production experience working with Kubernetes, and a deep understanding of the Kubernetes ecosystem, including popular open-source tooling such as cert-manager or external-dns.</p>\n<p>Experience with GPU-enabled clusters is a bonus.</p>\n<p>Production experience with Kubernetes templating tools such as Helm or Kustomize.</p>\n<p>Production experience with IAC tools such as Terraform or CloudFormation.</p>\n<p>Production experience working with AWS and services such as IAM, S3, EC2, and EKS.</p>\n<p>Production experience with other cloud providers such as Google Cloud and Azure is a bonus.</p>\n<p>Production experience with database software such as PostgreSQL.</p>\n<p>Experience with GitOps tooling such as Flux or Argo.</p>\n<p>Experience with CI/CD such as GitHub Actions.</p>\n<p>Perks and benefits include paid parental leave, monthly health and wellness allowance, and PTO.</p>\n<p>Compensation includes a base salary, equity, and a variety of benefits.</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_c1903386-87b","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Cresta","sameAs":"https://www.cresta.ai/","logo":"https://logos.yubhub.co/cresta.ai.png"},"x-apply-url":"https://job-boards.greenhouse.io/cresta/jobs/4535898008","x-work-arrangement":"remote","x-experience-level":"staff","x-job-type":"full-time","x-salary-range":null,"x-skills-required":["Golang","Python","Kubernetes","cert-manager","external-dns","GPU-enabled clusters","Helm","Kustomize","Terraform","CloudFormation","AWS","IAM","S3","EC2","EKS","Google Cloud","Azure","PostgreSQL","GitOps","Flux","Argo","CI/CD","GitHub Actions"],"x-skills-preferred":[],"datePosted":"2026-04-18T15:53:57.717Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Germany (Remote)"}},"jobLocationType":"TELECOMMUTE","employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"Golang, Python, Kubernetes, cert-manager, external-dns, GPU-enabled clusters, Helm, Kustomize, Terraform, CloudFormation, AWS, IAM, S3, EC2, EKS, Google Cloud, Azure, PostgreSQL, GitOps, Flux, Argo, CI/CD, GitHub Actions"},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_26212e9e-5a8"},"title":"Infrastructure Engineer/SRE","description":"<p>We&#39;re seeking an experienced Infrastructure Engineer/SRE to join our engineering team. As a key member of our infrastructure team, you will be responsible for designing, building, and advancing our core infrastructure that allows the engineering team to execute quickly, productively, and securely.</p>\n<p>As a collaborative but highly autonomous working environment, each member has a defined role with clear expectations, as well as the freedom to pursue projects they find interesting.</p>\n<p>Responsibilities:</p>\n<ul>\n<li>Partner with engineers to build dev tools that empower developer workflows and deployment infrastructure.</li>\n<li>Ensure reliability of multi-cloud Kubernetes clusters and pipelines.</li>\n<li>Metrics, logging, analytics, and alerting for performance and security across all endpoints and applications.</li>\n<li>Infrastructure-as-code deployment tooling and supporting services on multiple cloud providers.</li>\n<li>Automate operations and engineering. Focus on automation so we can spend energy where it matters.</li>\n<li>Building machine learning infrastructure that enables AI teams to train, test, and deploy on large-scale datasets.</li>\n</ul>\n<p>What we are looking for:</p>\n<ul>\n<li>5+ years experience in DevOps, Site Reliability Engineering, Production Engineering, or equivalent field.</li>\n<li>Deep proficiency with coding languages such as Golang or Python.</li>\n<li>Deep familiarity with container-related security best practices.</li>\n<li>Production experience working with Kubernetes, and a deep understanding of the Kubernetes ecosystem, including popular open-source tooling such as cert-manager or external-dns.</li>\n<li>Experience with GPU-enabled clusters is a bonus.</li>\n<li>Production experience with Kubernetes templating tools such as Helm or Kustomize.</li>\n<li>Production experience with IAC tools such as Terraform or CloudFormation.</li>\n<li>Production experience working with AWS and services such as IAM, S3, EC2, and EKS.</li>\n<li>Production experience with other cloud providers such as Google Cloud and Azure is a bonus.</li>\n<li>Production experience with database software such as PostgreSQL.</li>\n<li>Experience with GitOps tooling such as Flux or Argo.</li>\n<li>Experience with CI/CD such as GitHub Actions.</li>\n</ul>\n<p>Perks &amp; Benefits:</p>\n<ul>\n<li>We offer Cresta employees a variety of medical benefits designed to fit your stage of life.</li>\n<li>Flexible vacation time to promote a healthy work-life blend.</li>\n<li>Paid parental leave to support you and your family.</li>\n</ul>\n<p>Compensation for this position includes a base salary, equity, and a variety of benefits. Actual base salaries will be based on candidate-specific factors, including experience, skillset, and location, and local minimum pay requirements as applicable.</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_26212e9e-5a8","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Cresta","sameAs":"https://www.cresta.ai/","logo":"https://logos.yubhub.co/cresta.ai.png"},"x-apply-url":"https://job-boards.greenhouse.io/cresta/jobs/5113847008","x-work-arrangement":"remote","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":null,"x-skills-required":["Golang","Python","Kubernetes","cert-manager","external-dns","GPU-enabled clusters","Helm","Kustomize","Terraform","CloudFormation","AWS","IAM","S3","EC2","EKS","Google Cloud","Azure","PostgreSQL","GitOps","Flux","Argo","CI/CD","GitHub Actions"],"x-skills-preferred":[],"datePosted":"2026-04-18T15:53:55.875Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Australia (Remote)"}},"jobLocationType":"TELECOMMUTE","employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"Golang, Python, Kubernetes, cert-manager, external-dns, GPU-enabled clusters, Helm, Kustomize, Terraform, CloudFormation, AWS, IAM, S3, EC2, EKS, Google Cloud, Azure, PostgreSQL, GitOps, Flux, Argo, CI/CD, GitHub Actions"},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_47272a53-cc9"},"title":"Engineering Manager - Databricks SQL Control Plane","description":"<p>We are seeking an Engineering Manager to spearhead the development of a new service and architecture for the next generation of our product, Databricks SQL Control Plane. As an Engineering Manager, you will lead a team of talented software engineers to build the next-generation low-latency, multi-tenanted, cloud native data warehousing system. Your responsibilities will include growing the team by hiring strong engineering talent, leading and participating in technical, product and design discussions relating to cloud native database systems, managing and operating a highly available service in the cloud, growing leaders on the team by providing coaching, mentorship and growth opportunities, playing a key role in defining the product and engineering roadmap for the team, partnering with other engineering and product leaders on planning, prioritization and staffing, and creating a culture of excellence on the team while leading with empathy.</p>\n<p>Key requirements for this role include 5+ years experience working in database systems, data processing or related domains, experience building highly available cloud services on AWS, GCP or Azure, building, growing and managing high performance teams, experience defining and meeting SLOs for highly available systems, ability to attract and hire engineers who meet the Databricks hiring standards, and comfort working cross-functionality with product management and partners to build products that drive user growth.</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_47272a53-cc9","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Databricks","sameAs":"https://databricks.com/","logo":"https://logos.yubhub.co/databricks.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/databricks/jobs/8472398002","x-work-arrangement":"onsite","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":"$190,000-$261,250 USD","x-skills-required":["database systems","data processing","cloud native database systems","highly available cloud services","AWS","GCP","Azure","team management","leadership development","product roadmap development","cross-functional collaboration"],"x-skills-preferred":["Rust"],"datePosted":"2026-04-18T15:53:48.071Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Mountain View, California"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"database systems, data processing, cloud native database systems, highly available cloud services, AWS, GCP, Azure, team management, leadership development, product roadmap development, cross-functional collaboration, Rust","baseSalary":{"@type":"MonetaryAmount","currency":"USD","value":{"@type":"QuantitativeValue","minValue":190000,"maxValue":261250,"unitText":"YEAR"}}},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_bc54ed6c-ca0"},"title":"Full-Stack Engineer, Core Services (Senior Level)","description":"<p>We&#39;re looking for a Full-Stack Engineer to join our Core Services team. As a senior-level engineer, you&#39;ll design, build, and optimise the core systems and management platforms that power the Instabase platform.</p>\n<p>This is a high-impact role for a &#39;product-minded engineer&#39;. In our Core Services team, we treat our platform as a product. Because we operate with a lean team, you will have end-to-end ownership: from writing Product Requirement Documents (PRDs) and building the high-performance backend services and scalable infrastructure that support them.</p>\n<p>Responsibilities:</p>\n<ul>\n<li>Full Stack Development: You will function as a product-minded engineer for our internal platform. This involves architecting secure infrastructure (Kubernetes, Docker) and backend services (Go, Python, PostgresDB), while also building the frontend interfaces (React, TS) to support features.</li>\n</ul>\n<ul>\n<li>Developer Experience: Create the internal platforms and dashboards that improve developer velocity, reliability, and observability across the entire organisation.</li>\n</ul>\n<ul>\n<li>Technical Leadership: Act as a technical leader who mentors junior engineers, contributes to the entire infrastructure codebase, and identifies root causes for critical system issues.</li>\n</ul>\n<p>About you:</p>\n<ul>\n<li>Education: BS, MS, or PhD in Computer Science, or equivalent experience in a technical field such as Physics or Mathematics.</li>\n</ul>\n<ul>\n<li>Experience: 5+ years of professional software development experience with a strong foundation in CS fundamentals.</li>\n</ul>\n<ul>\n<li>Backend Expertise: Proficiency in Go and Python, with a deep understanding of building scalable backend services and APIs.</li>\n</ul>\n<ul>\n<li>Frontend Expertise: Strong experience with React, TypeScript, and JavaScript for building complex, data-rich web applications.</li>\n</ul>\n<ul>\n<li>Infrastructure &amp; Orchestration: Proficiency with Docker, Kubernetes, and cloud infrastructure (AWS, GCP, or Azure).</li>\n</ul>\n<ul>\n<li>Product Thinking &amp; UI Design: You are comfortable functioning as your own PM and Designer and write technical specs (PRDs) to define how users interact with infrastructure.</li>\n</ul>\n<ul>\n<li>Communication: Excellent communication skills to represent technical and product decisions to the wider engineering team.</li>\n</ul>\n<p>Good to have:</p>\n<ul>\n<li>Experience with React Native for mobile or cross-platform applications.</li>\n</ul>\n<ul>\n<li>Prior experience in a startup environment where you handled multi-functional responsibilities (Dev, PM, and Design).</li>\n</ul>\n<p>Compensation: The base salary range for this role is $190,000 to $205,000 + bonus, equity and US benefits.</p>\n<p>US Benefits:</p>\n<ul>\n<li>Flexible PTO: Because life is better when you actually live it!</li>\n</ul>\n<ul>\n<li>Comprehensive Coverage: Top-notch medical, dental, and vision insurance.</li>\n</ul>\n<ul>\n<li>401(k) with Matching: We’ve got your back for a secure future.</li>\n</ul>\n<ul>\n<li>Parental Leave &amp; Fertility Benefits: Supporting you in growing your family, your way.</li>\n</ul>\n<ul>\n<li>Therapy Sessions Covered: Mental health matters, 10 free sessions through Samata Health.</li>\n</ul>\n<ul>\n<li>Wellness Stipend: For gym memberships, fitness tech, or whatever keeps you thriving.</li>\n</ul>\n<ul>\n<li>Lunch on Us: Enjoy a lunch credit when you&#39;re in the office.</li>\n</ul>\n<p>#LI-Hybrid</p>\n<p>Instabase is an Equal Opportunity Employer. Qualified applicants will receive consideration for employment without regard to race, color, religion, sex, sexual orientation, gender perception or identity, national origin, age, marital status, protected veteran status, or disability status.</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_bc54ed6c-ca0","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Instabase","sameAs":"https://www.instabase.com/","logo":"https://logos.yubhub.co/instabase.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/instabase/jobs/8186577002","x-work-arrangement":"hybrid","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":"$190,000 to $205,000 + bonus, equity and US benefits","x-skills-required":["Go","Python","PostgresDB","Kubernetes","Docker","React","TypeScript","JavaScript","Cloud infrastructure (AWS, GCP, or Azure)"],"x-skills-preferred":[],"datePosted":"2026-04-18T15:53:35.809Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"San Francisco, CA"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"Go, Python, PostgresDB, Kubernetes, Docker, React, TypeScript, JavaScript, Cloud infrastructure (AWS, GCP, or Azure)","baseSalary":{"@type":"MonetaryAmount","currency":"USD","value":{"@type":"QuantitativeValue","minValue":190000,"maxValue":205000,"unitText":"YEAR"}}},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_34a50895-413"},"title":"Senior Technical Solutions Engineer - Platform (Greater China Region)","description":"<p>As a Senior Technical Solutions Engineer, you will provide technical support for Databricks Platform related issues and resolve any challenges involving the Databricks unified analytics platform.</p>\n<p>You will assist customers in their Databricks journey and provide them with the guidance and knowledge that they need to accomplish value and achieve their strategic goals using our products.</p>\n<p>They will look to you for answers to everything from basic technical questions to complex architectural scenarios spanning across the entire Big Data ecosystem.</p>\n<p>You will report to the Senior Manager of Technical Solutions.</p>\n<p>Responsibilities:</p>\n<ul>\n<li>Troubleshoot and resolve complex customer issues related to Databricks platform</li>\n<li>Provide best practices support for custom-built solutions developed by Databricks customers</li>\n<li>Deliver suggestions for improving performance in customer-specific environments</li>\n<li>Assist with issues around third-party integrations with Databricks environment</li>\n<li>Demonstrate and coordinate with engineering and escalation teams to achieve resolution of customer issues and requests</li>\n<li>Participate in the creation and maintenance of company documentation and knowledge articles</li>\n<li>Be a true proponent of customer advocacy</li>\n<li>Strengthen your AWS/Azure and Databricks platform expertise through learning and internal training programs</li>\n<li>Participate in weekend and weekday on call rotation</li>\n</ul>\n<p>Requirements:</p>\n<ul>\n<li>4+ years experience designing, building, testing, and maintaining Python/Java/Scala based applications</li>\n<li>Expert level knowledge in python is desired</li>\n<li>Strong experience with SQL-based database is required</li>\n<li>Linux/Unix administration skills</li>\n<li>Hands-on experience with AWS, Azure or GCP</li>\n<li>Experience with &quot;Distributed Big Data Computing&quot; environment</li>\n<li>Technical degree or the equivalent experience</li>\n<li>Proficiency in Mandarin and English is a must as this role serves clients based in the Greater China Region and involves direct customer communications in Mandarin</li>\n</ul>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_34a50895-413","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Databricks","sameAs":"https://databricks.com/","logo":"https://logos.yubhub.co/databricks.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/databricks/jobs/8407891002","x-work-arrangement":"onsite","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":null,"x-skills-required":["Python","Java","Scala","SQL","Linux/Unix administration","AWS","Azure","GCP","Distributed Big Data Computing"],"x-skills-preferred":[],"datePosted":"2026-04-18T15:53:29.197Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Singapore"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"Python, Java, Scala, SQL, Linux/Unix administration, AWS, Azure, GCP, Distributed Big Data Computing"},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_bec4e006-74f"},"title":"Consultant, Developer Platform","description":"<p>About the role: Cloudflare provides advisory and hands-on-keyboard implementation and migration services for enterprise customers. As a Consultant for Developer Platform, you are an individual contributor working in the post-sales landscape, responsible for the technical execution of solutions and guidance to our customers, following a consultative approach, to get the most value possible from their Cloudflare investment.</p>\n<p>You are an expert in Developer Platform products or equivalent and will focus on building and deploying serverless applications with scale, performance, security and reliability leveraging: Workers, Workers KV, Workers AI, D1, R2, Images, and many other products.</p>\n<p>This position has working hours Monday to Friday 09:00 a.m. to 06:00 p.m. Occasionally, we support our customers during the weekends for specific changes that need to be done outside of their business hours. Travel is expected to be around 40%.</p>\n<p>Experience might include a combination of the skills below:</p>\n<ul>\n<li>Plan and deliver timely and organized services for customers, ensure customers see the full value in Cloudflare’s products and advice on product best practices.</li>\n<li>Gather business and technical requirements, use cases and any other information required to build, migrate and deliver a solution on behalf of the customer and transition the Cloudflare working environment to the customer.</li>\n<li>Produce a Solution Design, HLD, LLD, databuilds, procedures, scripts, test plans, drawings, deployment plan, migration plan, as-builts, and any other artifacts necessary to deliver the solution and transition smoothly into the customer’s technical teams.</li>\n<li>Implement changes on behalf of the customer in the Cloudflare environment following the customer’s change management process.</li>\n<li>Proven experience with Cloudflare or similar with Workers, Javascript/Typescript and Workers APIs.</li>\n<li>Troubleshoot implementation issues and collaborate with Customer Support, Engineering and other teams to assist technical escalations.</li>\n<li>Contribute towards the success of the organization through knowledge sharing activities such as contributing to internal and external documentation, answering technical Q&amp;A, and helping to iterate on best practices.</li>\n</ul>\n<p>Support building operational assets like templates, automation scripts, procedures, workflows, etc.</p>\n<p>Experience might include a combination of the skills below:</p>\n<ul>\n<li>3+ years of experience in a customer facing position as a Consultant delivering services.</li>\n<li>Demonstrated experience with:</li>\n</ul>\n<p>Developing serverless code in a CI/CD pipeline using an Agile methodology. Layers and protocols of the OSI model, such as TCP/IP, TLS, DNS, HTTP Scripting languages A scripting language (e.g. Python, JavaScript, Bash) and a desire to expand those skills. Infrastructure as code tools like Terraform. Strong experience with APIs. CI/CD pipelines using Azure DevOps or Git. Implementation and troubleshooting experience, knowledge of tools to troubleshoot, observability, logs, etc. Good understanding and knowledge of:</p>\n<p>Internet and Security technologies such as DDoS, Web Application Firewall, Certificates, DNS, CDN, Analytics and Logs. Security aspects of an internet property, such as DNS, WAFs, Bot Management, Rate Limiting, (M)TLS, certificates, OWASP. Performance aspects of an internet property, such as Speed, Latency, Caching, HTTP/3, TLSv1.3.</p>\n<p>Strong advantage if:</p>\n<p>You have worked with a Cybersecurity company or products and have performed migrations using migration tools. You have developed application security and performance capabilities. Ability to manage a project, work to deadlines, prioritize between competing demands and manage uncertainty.</p>\n<p>The work will be performed in English. Fluency in a second regional European language is a strong advantage.</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_bec4e006-74f","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Cloudflare","sameAs":"https://www.cloudflare.com/","logo":"https://logos.yubhub.co/cloudflare.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/cloudflare/jobs/7383013","x-work-arrangement":"hybrid","x-experience-level":"mid","x-job-type":"full-time","x-salary-range":null,"x-skills-required":["Developing serverless code in a CI/CD pipeline using an Agile methodology","Layers and protocols of the OSI model, such as TCP/IP, TLS, DNS, HTTP","Scripting languages","Infrastructure as code tools like Terraform","Strong experience with APIs","CI/CD pipelines using Azure DevOps or Git","Implementation and troubleshooting experience, knowledge of tools to troubleshoot, observability, logs, etc","Good understanding and knowledge of Internet and Security technologies such as DDoS, Web Application Firewall, Certificates, DNS, CDN, Analytics and Logs","Security aspects of an internet property, such as DNS, WAFs, Bot Management, Rate Limiting, (M)TLS, certificates, OWASP","Performance aspects of an internet property, such as Speed, Latency, Caching, HTTP/3, TLSv1.3"],"x-skills-preferred":[],"datePosted":"2026-04-18T15:53:29.137Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Hybrid"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"Developing serverless code in a CI/CD pipeline using an Agile methodology, Layers and protocols of the OSI model, such as TCP/IP, TLS, DNS, HTTP, Scripting languages, Infrastructure as code tools like Terraform, Strong experience with APIs, CI/CD pipelines using Azure DevOps or Git, Implementation and troubleshooting experience, knowledge of tools to troubleshoot, observability, logs, etc, Good understanding and knowledge of Internet and Security technologies such as DDoS, Web Application Firewall, Certificates, DNS, CDN, Analytics and Logs, Security aspects of an internet property, such as DNS, WAFs, Bot Management, Rate Limiting, (M)TLS, certificates, OWASP, Performance aspects of an internet property, such as Speed, Latency, Caching, HTTP/3, TLSv1.3"},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_0e8e8a8f-db0"},"title":"Staff Software Engineer - Node.js  (JavaScript or TypeScript)","description":"<p><strong>Job Title\\nStaff Software Engineer - Node.js (JavaScript or TypeScript)\\n\\n## Company Overview\\nOkta is a developer-friendly identity platform that simplifies authentication and authorization for applications.\\n\\n## Role Description\\nWe are hiring for a new team within Core Identity, the Engineering organization entrusted with the very heart of the Auth0 application. Our teams own the authentication pipeline, identity protocols, user sessions, and all the fundamental concepts and foundational elements that underpin our entire product.\\n\\nAs a Staff Engineer for this new team, named Core Frontier, you will lead at the vital intersection of deep product innovation and the global customer experience. Your mission is to ensure that the sophisticated features developed across the Core Identity organization (such as Native to Web, Cross-App Access and Custom Token Exchange) are hardened, scaled, and seamlessly integrated into the Auth0 ecosystem.\\n\\n## Responsibilities\\n<em> Be a founding Staff member of this new team in Bengaluru, setting the technical bar and engineering culture for our growing presence in the region and working in collaboration with our global teams in Europe and North America.\\n</em> Lead the design and delivery of innovative features that extend the capabilities of Auth0’s platform to help our customers innovate around the world securely and delightfully.\\n<em> Take ownership of the strategic technical quality, security, reliability, and scalability of our systems. You&#39;ll drive architectural improvements and advocate for engineering best practices.\\n</em> Identify architectural gaps in our current &quot;frontier&quot; and advocate for long-term improvements that benefit the entire Core Identity organization.\\n<em> Thrive in a highly collaborative and cross-functional environment, working with talented engineers and partners across Product, Security, Design, Architecture and QA to deliver features that delight our customers and ensure a unified technical vision.\\n</em> Deepen or gain expertise in identity, security, and modern cloud technologies (AWS, Azure) while working on distributed systems at scale.\\n<em> Mentor other engineers and contribute to our culture of technical excellence and continuous improvement.\\n</em> Participate in an on-call rotation to ensure our critical services remain healthy and reliable.\\n\\n## Requirements\\n<em> 8+ years of professional software development experience, or equivalent.\\n</em> Proficiency in designing and building services with Node.js (JavaScript or TypeScript).\\n<em> Experience creating and maintaining public and secure APIs, as well as front ends.\\n</em> Experience designing, building, and operating distributed systems in a cloud environment (e.g., AWS, Azure).\\n<em> A strong commitment to quality, with experience in various testing strategies (e.g., unit, integration, end-to-end).\\n</em> A proven track record of driving technical alignment across multiple teams and mentoring senior-level individual contributors.\\n<em> A product-oriented mindset, with the ability to understand customer needs and work collaboratively to find effective solutions.\\n\\n## Nice to Have\\n</em> Experience in the Identity and Access Management (IAM) domain.\\n<em> Knowledge of security engineering principles and application security best practices.\\n\\n## What You Can Look Forward To\\n</em> Amazing Benefits\\n<em> Making Social Impact\\n</em> Developing Talent and Fostering Connection + Community at Okta\\n\\n## Okta Experience\\nOkta cultivates a dynamic work environment, providing the best tools, technology and benefits to empower our employees to work productively in a setting that best and uniquely suits their needs.</strong></p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_0e8e8a8f-db0","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Okta","sameAs":"https://www.okta.com/","logo":"https://logos.yubhub.co/okta.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/okta/jobs/7602354","x-work-arrangement":"hybrid","x-experience-level":"staff","x-job-type":"full-time","x-salary-range":null,"x-skills-required":["Node.js","JavaScript","TypeScript","APIs","Frontends","Distributed Systems","Cloud Environment","AWS","Azure","Security","Identity and Access Management"],"x-skills-preferred":[],"datePosted":"2026-04-18T15:53:28.361Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Bengaluru, India"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"Node.js, JavaScript, TypeScript, APIs, Frontends, Distributed Systems, Cloud Environment, AWS, Azure, Security, Identity and Access Management"},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_d50772ab-afe"},"title":"Staff / Senior Software Engineer, Cloud Inference","description":"<p>We are seeking a Staff / Senior Software Engineer to join our Cloud Inference team. The successful candidate will design and build infrastructure that serves Claude across multiple cloud service providers (CSPs), accounting for differences in compute hardware, networking, APIs, and operational models.</p>\n<p>The ideal candidate will have significant software engineering experience, with a strong background in high-performance, large-scale distributed systems serving millions of users. They will also have experience building or operating services on at least one major cloud platform (AWS, GCP, or Azure), with exposure to Kubernetes, Infrastructure as Code or container orchestration.</p>\n<p>Responsibilities:</p>\n<ul>\n<li>Design and build infrastructure that serves Claude across multiple CSPs, accounting for differences in compute hardware, networking, APIs, and operational models</li>\n</ul>\n<ul>\n<li>Collaborate with CSP partner engineering teams to resolve operational issues, influence provider roadmaps, and stand up end-to-end serving on new cloud platforms</li>\n</ul>\n<ul>\n<li>Design and evolve CI/CD automation systems, including validation and deployment pipelines, that reliably ship new model versions to millions of users across cloud platforms without regressions</li>\n</ul>\n<ul>\n<li>Design interfaces and tooling abstractions across CSPs that enable cost-effective inference management, scale across providers, and reduce per-platform complexity</li>\n</ul>\n<ul>\n<li>Contribute to capacity planning and autoscaling strategies that dynamically match supply with demand across CSP validation and production workloads</li>\n</ul>\n<ul>\n<li>Optimise inference cost and performance across providers,designing workload placement and routing systems that direct requests to the most cost-effective accelerator and region</li>\n</ul>\n<ul>\n<li>Contribute to inference features that must work consistently across all platforms</li>\n</ul>\n<ul>\n<li>Analyse observability data across providers to identify performance bottlenecks, cost anomalies, and regressions, and drive remediation based on real-world production workloads</li>\n</ul>\n<p>Requirements:</p>\n<ul>\n<li>Significant software engineering experience, with a strong background in high-performance, large-scale distributed systems serving millions of users</li>\n</ul>\n<ul>\n<li>Experience building or operating services on at least one major cloud platform (AWS, GCP, or Azure), with exposure to Kubernetes, Infrastructure as Code or container orchestration</li>\n</ul>\n<ul>\n<li>Strong interest in inference</li>\n</ul>\n<ul>\n<li>Thrive in cross-functional collaboration with both internal teams and external partners</li>\n</ul>\n<ul>\n<li>Are a fast learner who can quickly ramp up on new technologies, hardware platforms, and provider ecosystems</li>\n</ul>\n<ul>\n<li>Are highly autonomous and self-driven, taking ownership of problems end-to-end with a bias toward flexibility and high-impact work</li>\n</ul>\n<ul>\n<li>Pick up slack, even when it goes outside your job description</li>\n</ul>\n<p>Preferred skills:</p>\n<ul>\n<li>Direct experience working with CSP partner teams to scale infrastructure or products across multiple platforms, navigating differences in networking, security, privacy, billing, and managed service offerings</li>\n</ul>\n<ul>\n<li>A background in building platform-agnostic tooling or abstraction layers that work across cloud providers</li>\n</ul>\n<ul>\n<li>Hands-on experience with capacity management, cost optimisation, or resource planning at scale across heterogeneous environments</li>\n</ul>\n<ul>\n<li>Strong familiarity with LLM inference optimisation, batching, caching, and serving strategies</li>\n</ul>\n<ul>\n<li>Experience with Machine learning infrastructure including GPUs, TPUs, Trainium, or other AI accelerators</li>\n</ul>\n<ul>\n<li>Background designing and building CI/CD systems that automate deployment and validation across cloud environments</li>\n</ul>\n<ul>\n<li>Solid understanding of multi-region deployments, geographic routing, and global traffic management</li>\n</ul>\n<ul>\n<li>Proficiency in Python or Rust</li>\n</ul>\n<p>Salary Range: $300,000-$485,000 USD</p>\n<p>Experience Level: Staff</p>\n<p>Employment Type: Full-time</p>\n<p>Workplace Type: Hybrid</p>\n<p>Category: Engineering</p>\n<p>Industry: Technology</p>\n<p>Required Skills:</p>\n<ul>\n<li>High-performance, large-scale distributed systems</li>\n</ul>\n<ul>\n<li>Cloud computing (AWS, GCP, Azure)</li>\n</ul>\n<ul>\n<li>Kubernetes</li>\n</ul>\n<ul>\n<li>Infrastructure as Code</li>\n</ul>\n<ul>\n<li>Container orchestration</li>\n</ul>\n<ul>\n<li>Inference</li>\n</ul>\n<ul>\n<li>Cross-functional collaboration</li>\n</ul>\n<ul>\n<li>Autonomy and self-driven</li>\n</ul>\n<ul>\n<li>Platform-agnostic tooling</li>\n</ul>\n<ul>\n<li>Capacity management</li>\n</ul>\n<ul>\n<li>Cost optimisation</li>\n</ul>\n<ul>\n<li>Resource planning</li>\n</ul>\n<ul>\n<li>LLM inference optimisation</li>\n</ul>\n<ul>\n<li>Machine learning infrastructure</li>\n</ul>\n<ul>\n<li>CI/CD systems</li>\n</ul>\n<ul>\n<li>Multi-region deployments</li>\n</ul>\n<ul>\n<li>Geographic routing</li>\n</ul>\n<ul>\n<li>Global traffic management</li>\n</ul>\n<ul>\n<li>Python</li>\n</ul>\n<ul>\n<li>Rust</li>\n</ul>\n<p>Preferred Skills:</p>\n<ul>\n<li>Direct experience working with CSP partner teams</li>\n</ul>\n<ul>\n<li>Building platform-agnostic tooling</li>\n</ul>\n<ul>\n<li>Hands-on experience with capacity management</li>\n</ul>\n<ul>\n<li>Strong familiarity with LLM inference optimisation</li>\n</ul>\n<ul>\n<li>Experience with Machine learning infrastructure</li>\n</ul>\n<ul>\n<li>Background designing and building CI/CD systems</li>\n</ul>\n<ul>\n<li>Solid understanding of multi-region deployments</li>\n</ul>\n<ul>\n<li>Proficiency in Python or Rust</li>\n</ul>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_d50772ab-afe","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Anthropic","sameAs":"https://www.anthropic.com/","logo":"https://logos.yubhub.co/anthropic.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/anthropic/jobs/5107466008","x-work-arrangement":"hybrid","x-experience-level":"staff","x-job-type":"full-time","x-salary-range":"$300,000-$485,000 USD","x-skills-required":["high-performance, large-scale distributed systems","cloud computing (AWS, GCP, Azure)","kubernetes","infrastructure as code","container orchestration","inference","cross-functional collaboration","autonomy and self-driven","platform-agnostic tooling","capacity management","cost optimisation","resource planning","llm inference optimisation","machine learning infrastructure","ci/cd systems","multi-region deployments","geographic routing","global traffic management","python","rust"],"x-skills-preferred":["direct experience working with csp partner teams","building platform-agnostic tooling","hands-on experience with capacity management","strong familiarity with llm inference optimisation","experience with machine learning infrastructure","background designing and building ci/cd systems","solid understanding of multi-region deployments","proficiency in python or rust"],"datePosted":"2026-04-18T15:53:24.048Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"San Francisco, CA | Seattle, WA"}},"employmentType":"FULL_TIME","occupationalCategory":"engineering","industry":"technology","skills":"high-performance, large-scale distributed systems, cloud computing (AWS, GCP, Azure), kubernetes, infrastructure as code, container orchestration, inference, cross-functional collaboration, autonomy and self-driven, platform-agnostic tooling, capacity management, cost optimisation, resource planning, llm inference optimisation, machine learning infrastructure, ci/cd systems, multi-region deployments, geographic routing, global traffic management, python, rust, direct experience working with csp partner teams, building platform-agnostic tooling, hands-on experience with capacity management, strong familiarity with llm inference optimisation, experience with machine learning infrastructure, background designing and building ci/cd systems, solid understanding of multi-region deployments, proficiency in python or rust","baseSalary":{"@type":"MonetaryAmount","currency":"USD","value":{"@type":"QuantitativeValue","minValue":300000,"maxValue":485000,"unitText":"YEAR"}}},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_7a3f562b-768"},"title":"Senior Staff Software Engineer, API","description":"<p>About Anthropic\\n\\nAnthropic&#39;s mission is to create reliable, interpretable, and steerable AI systems. We want AI to be safe and beneficial for our users and for society as a whole.\\n\\nAbout the role\\n\\nAnthropic is seeking an exceptional Senior Staff Software Engineer to join the Claude Developer Platform team and serve as the senior-most individual contributor across API Engineering. Since launch, the Claude API has seen rapid growth and adoption by companies of all sizes to build AI applications with our industry-leading models. The API serves as the primary channel for safely and broadly distributing AI&#39;s benefits across all sectors of the economy.\\n\\nThis role sets the technical direction for the systems that make Claude accessible to developers, enterprises, and partners at scale. You will operate at the intersection of technical strategy and execution, partnering closely with Research, Inference, Platform, Infrastructure, and Safeguards to ensure the Claude API is reliable, capable, and positioned to grow with Anthropic&#39;s ambitions.\\n\\nResponsibilities\\n\\n- Define and drive multi-year technical strategy for the Claude API, setting direction across API Core, Capabilities, Knowledge, Distributability, and Agents.\\n\\n- Identify and personally lead the highest-complexity, highest-impact engineering initiatives spanning multiple teams.\\n\\n- Serve as the primary technical decision-maker for major architectural decisions with org-wide scope.\\n\\n- Partner with Research to evaluate and integrate frontier capabilities; work with Inference and Platform for reliable delivery at scale; collaborate with Infrastructure and Safeguards for reliability, security, and responsible deployment.\\n\\n- Mentor and develop Staff-level engineers across the org.\\n\\n- Drive alignment across Product, GTM, Safety, and beyond while proactively identifying and addressing systemic technical risks.\\n\\nYou may be a good fit if you:\\n\\n- Have 12+ years of engineering experience with a clear track record operating at Staff or Senior Staff level.\\n\\n- Have demonstrably shaped technical strategy for large-scale API or distributed systems platforms.\\n\\n- Drive the highest-leverage technical outcomes without formal authority,you lead through influence, quality of thinking, and trust.\\n\\n- Have deep expertise in distributed systems and API architecture, and are effective writing design docs, making architectural calls, and coding in critical paths.\\n\\n- Are highly effective across org boundaries,you build trust with Research, Inference, Infrastructure, Safeguards, and business stakeholders alike.\\n\\n- Bring strong product instincts and a craftsperson&#39;s approach to API design; you communicate clearly with both technical and non-technical audiences.\\n\\nTechnical Stack\\n\\n- Languages: Python, TypeScript\\n\\n- Frameworks: FastAPI, React\\n\\n- Infrastructure: GCP, Kubernetes, Cloud Run, AWS, Azure\\n\\n- Databases: PostgreSQL (AlloyDB), Vector Stores, Firestore\\n\\n- Tools: Feature Flagging, Prometheus, Grafana, Datadog\\n\\nDeadline to apply: None. Applications will be reviewed on a rolling basis.\\n\\nLocation Preference: Preference will be given to candidates based in New York or the San Francisco Bay Area as these positions are part of an SF- or NY-based team.\\n\\nThe annual compensation range for this role is listed below.\\n\\nFor sales roles, the range provided is the role’s On Target Earnings (&quot;OTE&quot;) range, meaning that the range includes both the sales commissions/sales bonuses target and annual base salary for the role.\\n\\nAnnual Salary: $405,000-$485,000 USD\\n\\n</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_7a3f562b-768","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Anthropic","sameAs":"https://www.anthropic.com/","logo":"https://logos.yubhub.co/anthropic.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/anthropic/jobs/5134895008","x-work-arrangement":"hybrid","x-experience-level":"staff","x-job-type":"full-time","x-salary-range":"$405,000-$485,000 USD","x-skills-required":["Python","TypeScript","FastAPI","React","GCP","Kubernetes","Cloud Run","AWS","Azure","PostgreSQL","Vector Stores","Firestore","Feature Flagging","Prometheus","Grafana","Datadog"],"x-skills-preferred":[],"datePosted":"2026-04-18T15:53:15.123Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"San Francisco, CA | New York City, NY"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"Python, TypeScript, FastAPI, React, GCP, Kubernetes, Cloud Run, AWS, Azure, PostgreSQL, Vector Stores, Firestore, Feature Flagging, Prometheus, Grafana, Datadog","baseSalary":{"@type":"MonetaryAmount","currency":"USD","value":{"@type":"QuantitativeValue","minValue":405000,"maxValue":485000,"unitText":"YEAR"}}},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_61460f7d-087"},"title":"Associate Solutions Engineer","description":"<p>About Us</p>\n<p>At Cloudflare, we are on a mission to help build a better Internet. Today the company runs one of the world’s largest networks that powers millions of websites and other Internet properties for customers ranging from individual bloggers to SMBs to Fortune 500 companies.</p>\n<p>The Cloudflare Associate Solution Engineering Program is a 12-month rotational experience designed to launch your career in pre-sales engineering. You&#39;ll combine technical depth, customer problem-solving, and business acumen to make Cloudflare&#39;s technology accessible and valuable for customers across Asia-Pacific.</p>\n<p>Responsibilities</p>\n<ul>\n<li>Shadow customer calls and technical deep-dives with Enterprise and Strategic accounts</li>\n<li>Build and deliver product demonstrations tailored to customer use cases (web security, performance, serverless computing)</li>\n<li>Participate in workshops on Cloudflare technologies: Workers, Zero Trust, DNS, DDoS mitigation, WAF</li>\n<li>Collaborate with Sales, Product, and Engineering teams to solve customer technical questions</li>\n<li>Document customer requirements and translate them into solution architectures</li>\n<li>Rotate between GCR, ANZ, and ASEAN customer teams every 4 months</li>\n<li>Contribute to internal tooling, demo environments, or solution accelerators</li>\n</ul>\n<p>Requirements</p>\n<ul>\n<li>Have graduated within the past 2 years (or have equivalent demonstrated technical experience through boot camps, self-study, or professional work)</li>\n<li>Can explain core networking concepts (e.g., how DNS resolution works, what happens when you visit a URL, difference between TCP/UDP)</li>\n<li>Are available to start in July 2026 and commit to 12 months including regional rotations</li>\n<li>Communicate fluently in English (written and verbal)</li>\n<li>Can manage multiple concurrent projects with competing deadlines</li>\n<li>Are authorized to work without sponsorship</li>\n</ul>\n<p>Nice to Have</p>\n<ul>\n<li>Internship or project experience in a customer-facing, consulting, or technical sales environment</li>\n<li>Proficiency in Mandarin, Cantonese, or Bahasa Indonesia (for serving regional customers)</li>\n<li>Scripting skills in Python, JavaScript, Bash, or similar</li>\n<li>Hands-on experience with web technologies: HTML/CSS/JS, HTTP APIs, or cloud platforms (AWS/GCP/Azure)</li>\n<li>Demonstrated ownership of technical projects (GitHub portfolio, conference talks, open-source contributions)</li>\n</ul>\n<p>Technologies you&#39;ll work with:</p>\n<ul>\n<li>Cloudflare&#39;s edge network</li>\n<li>Workers (serverless)</li>\n<li>Zero Trust security</li>\n<li>DNS/CDN</li>\n<li>DDoS mitigation</li>\n<li>WAF</li>\n<li>API Gateway</li>\n<li>R2 storage</li>\n<li>Stream</li>\n<li>Images</li>\n</ul>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_61460f7d-087","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Cloudflare","sameAs":"https://www.cloudflare.com/","logo":"https://logos.yubhub.co/cloudflare.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/cloudflare/jobs/7817971","x-work-arrangement":"hybrid","x-experience-level":"entry","x-job-type":"full-time","x-salary-range":null,"x-skills-required":["Cloudflare's edge network","Workers (serverless)","Zero Trust security","DNS/CDN","DDoS mitigation","WAF","API Gateway","R2 storage","Stream","Images"],"x-skills-preferred":["Python","JavaScript","Bash","HTML/CSS/JS","HTTP APIs","cloud platforms (AWS/GCP/Azure)"],"datePosted":"2026-04-18T15:52:46.368Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Hybrid"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"Cloudflare's edge network, Workers (serverless), Zero Trust security, DNS/CDN, DDoS mitigation, WAF, API Gateway, R2 storage, Stream, Images, Python, JavaScript, Bash, HTML/CSS/JS, HTTP APIs, cloud platforms (AWS/GCP/Azure)"},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_e9ea7ddb-b7d"},"title":"Director, Field Engineering","description":"<p>We are looking for a Director, Field Engineering in the Benelux to join our world-class hyper-growth organisation.</p>\n<p>In this role, you will lead first-line Managers and teams of pre-sales Solutions Architects focusing on complex accounts, helping to drive our expansion in the Benelux across various industries.</p>\n<p>Your experience in partnering with sales organisations will help to grow consumption, whilst coaching new sales and pre-sales team members to work together and raise the bar to best in class.</p>\n<p>You will guide your team and be involved with opportunities to enhance your team&#39;s effectiveness.</p>\n<p>Key responsibilities include:</p>\n<ul>\n<li>Hire and manage first-line Managers and a growing team of technical pre-sales Solutions Architects</li>\n<li>Build a collaborative culture within a rapid-growth team</li>\n<li>Support increased return on investment of Solutions Architect involvement in sales cycles</li>\n<li>Create trust-based relationships with customers for the long term and understand category-specific landscapes and trends</li>\n<li>Promote a solution and value-based selling field-engineering organisation</li>\n</ul>\n<p>To be successful in this role, you will need:</p>\n<ul>\n<li>5+ years of second-line leadership experience, manager of managers with teams of 20+ individuals</li>\n<li>Relevant high-growth enterprise software pre-sales success with senior-level tenure at a reputable software company, with experience of the EMEA region</li>\n<li>Ability to elevate the engagement with a track record of driving large transactions and high growth customers</li>\n<li>Proven leadership ability to influence, develop, and empower your team to achieve objectives with a team approach</li>\n<li>Proven track record of transformational success and delivery of customer value</li>\n<li>Track record of building strong ecosystems of lucrative customer relationships and cross-functional partnerships</li>\n<li>Experience in complex strategic accounts generating +$5M ARR</li>\n<li>Knowledgeable in and passionate about data-driven decisions, AI, and Cloud software models</li>\n<li>Great at instituting processes for technical field members to improve efficiency</li>\n</ul>\n<p>Required skills include:</p>\n<ul>\n<li>Leadership</li>\n<li>Sales</li>\n<li>Pre-sales</li>\n<li>Solutions architecture</li>\n<li>Customer relationship management</li>\n<li>Data-driven decision making</li>\n<li>AI</li>\n<li>Cloud software</li>\n</ul>\n<p>Preferred skills include:</p>\n<ul>\n<li>Programming languages (e.g. Python, Java)</li>\n<li>Data analysis tools (e.g. SQL, Tableau)</li>\n<li>Cloud platforms (e.g. AWS, Azure)</li>\n</ul>\n<p>If you are a motivated and experienced professional looking to take on a challenging role, please submit your application.</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_e9ea7ddb-b7d","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Databricks","sameAs":"https://databricks.com","logo":"https://logos.yubhub.co/databricks.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/databricks/jobs/8304674002","x-work-arrangement":"onsite","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":null,"x-skills-required":["Leadership","Sales","Pre-sales","Solutions architecture","Customer relationship management","Data-driven decision making","AI","Cloud software"],"x-skills-preferred":["Programming languages (e.g. Python, Java)","Data analysis tools (e.g. SQL, Tableau)","Cloud platforms (e.g. AWS, Azure)"],"datePosted":"2026-04-18T15:52:35.246Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Amsterdam, Netherlands"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"Leadership, Sales, Pre-sales, Solutions architecture, Customer relationship management, Data-driven decision making, AI, Cloud software, Programming languages (e.g. Python, Java), Data analysis tools (e.g. SQL, Tableau), Cloud platforms (e.g. AWS, Azure)"},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_a02999d2-33b"},"title":"Staff Software Engineer - Backend","description":"<p>At Databricks, we are enabling data teams to solve the world&#39;s toughest problems by building and running the world&#39;s best data and AI infrastructure platform. As a software engineer with a backend focus, you will work with your team to build infrastructure and products for the Databricks platform at scale.</p>\n<p>The impact you&#39;ll have is significant, spanning many domains across our essential service platforms. You might work on challenges such as:</p>\n<ul>\n<li>Distributed systems, at-scale service architecture and monitoring, workflow orchestration, and developer experience.</li>\n</ul>\n<ul>\n<li>Delivering reliable and high-performance services and client libraries for storing and accessing humongous amounts of data on cloud storage backends, e.g., AWS S3, Azure Blob Store.</li>\n</ul>\n<ul>\n<li>Building reliable, scalable services, e.g., Scala, Kubernetes, and data pipelines, e.g., Spark, Databricks, to power the pricing infrastructure that serves millions of cluster-hours per day and develop product features that empower customers to easily view and control platform usage.</li>\n</ul>\n<p>What we look for in a candidate includes:</p>\n<ul>\n<li>A Bachelor&#39;s degree (or higher) in Computer Science, or a related field.</li>\n</ul>\n<ul>\n<li>7+ years of production-level experience in one of: Java, Scala, C++, or similar languages.</li>\n</ul>\n<ul>\n<li>Experience developing large-scale distributed systems.</li>\n</ul>\n<ul>\n<li>Experience working on a SaaS platform or with Service-Oriented Architectures.</li>\n</ul>\n<ul>\n<li>Good knowledge of SQL.</li>\n</ul>\n<p>Benefits at Databricks include comprehensive benefits and perks that meet the needs of all employees. For specific details on the benefits offered in your region, please click here.</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_a02999d2-33b","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Databricks","sameAs":"https://databricks.com/","logo":"https://logos.yubhub.co/databricks.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/databricks/jobs/7984907002","x-work-arrangement":"onsite","x-experience-level":"staff","x-job-type":"full-time","x-salary-range":null,"x-skills-required":["Java","Scala","C++","SQL","distributed systems","at-scale service architecture and monitoring","workflow orchestration","developer experience","cloud storage backends","AWS S3","Azure Blob Store","Kubernetes","Spark","Databricks"],"x-skills-preferred":[],"datePosted":"2026-04-18T15:51:34.292Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Berlin, Germany"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"Java, Scala, C++, SQL, distributed systems, at-scale service architecture and monitoring, workflow orchestration, developer experience, cloud storage backends, AWS S3, Azure Blob Store, Kubernetes, Spark, Databricks"},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_22375926-26e"},"title":"Senior IT Systems Engineer","description":"<p>We&#39;re seeking a strategic thinker and proven problem-solver with deep expertise in modern IT ecosystems. As a Sr. IT Systems Engineer, you&#39;ll drive automation, mature enterprise workforce identity and access management (IAM), and architect scalable, secure SaaS integrations.</p>\n<p><strong>Responsibilities:</strong></p>\n<ul>\n<li>Lead the design, implementation, administration, and optimization of core SaaS platforms including Okta, Google Workspace, Slack, Atlassian, and other IT tools.</li>\n<li>Own end-to-end support, monitoring, troubleshooting, and performance tuning of applications, systems, and their complex interconnections,ensuring high availability, security, and seamless user experience.</li>\n<li>Help architect and advance our workforce Identity and Access Management program, including configuration of Single Sign-On (SSO), lifecycle management, provisioning/deprovisioning, access governance, and policy enforcement.</li>\n<li>Serve as the subject matter expert (SME) providing strategic technical guidance to support business expansion, system scalability, and infrastructure maturity.</li>\n<li>Drive cross-functional knowledge sharing by authoring, maintaining, and evolving comprehensive IT documentation, runbooks, and architecture diagrams.</li>\n<li>Proactively identify gaps, risks, and opportunities in the environment; lead initiatives to enhance security posture, operational efficiency, and resilience,prioritizing automation of manual/repetitive processes.</li>\n<li>Evaluate emerging technologies, IAM trends, and automation platforms; develop business cases and lead proof-of-concepts or adoption recommendations.</li>\n<li>Mentor junior engineers and collaborate with cross-functional teams to align IT capabilities with organisational goals.</li>\n</ul>\n<p><strong>Basic Qualifications:</strong></p>\n<ul>\n<li>8+ years of hands-on experience administering and optimising a broad portfolio of SaaS applications in a hybrid and high-growth environment,with advanced proficiency in our core stack: Okta (including Advanced Server Access &amp; Workflows), Google Workspace, Slack Enterprise, Atlassian, etc.</li>\n<li>4+ years of deep experience with n8n, Okta Workflows and/or other leading iPaaS/automation platforms (e.g., Workato, Zapier, BetterCloud, custom integrations).</li>\n<li>Expert-level knowledge of IAM principles and protocols: SSO, SAML, OIDC, OAuth 2.0, SCIM, JIT provisioning, SWA, RBAC, ABAC, and access governance best practices.</li>\n<li>Strong experience designing and working with APIs for custom integrations, data flows, and automation.</li>\n<li>Proficiency in scripting and automation for monitoring, alerting, and operational efficiency (e.g., Google Apps Manager (GAM), Python, Bash, PowerShell, Terraform, or similar); experience building custom solutions is highly valued.</li>\n<li>Solid working knowledge and administrative experience in Azure, AWS, and/or GCP cloud platforms.</li>\n<li>Exceptional analytical and troubleshooting skills with a proven track record of resolving sophisticated, cross-system incidents under pressure.</li>\n<li>Demonstrated ability to deliver measurable business impact, own key deliverables, and drive projects to completion in fast-paced environments with competing priorities.</li>\n<li>Comfortable adapting to dynamic requirements, handling time-sensitive escalations, and participating in on-call rotation.</li>\n<li>Track record of success as a Senior IT Systems Engineer or equivalent in a fast-moving corporate or tech environment.</li>\n<li>Okta certifications (e.g., Okta Certified Professional / Administrator / Consultant) strongly preferred; other relevant certifications (Google Workspace) are a plus.</li>\n<li>Bachelor’s degree in Information Technology, Computer Science, or a related field preferred (or equivalent demonstrated experience) is a plus.</li>\n</ul>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_22375926-26e","directApply":true,"hiringOrganization":{"@type":"Organization","name":"xAI","sameAs":"https://www.xai.com","logo":"https://logos.yubhub.co/xai.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/xai/jobs/5071895007","x-work-arrangement":"onsite","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":"$184,000 - $276,000 USD","x-skills-required":["Okta","Google Workspace","Slack","Atlassian","n8n","Okta Workflows","iPaaS/automation platforms","IAM principles and protocols","APIs for custom integrations","data flows","automation","scripting and automation","monitoring","alerting","operational efficiency","Azure","AWS","GCP cloud platforms","analytical and troubleshooting skills"],"x-skills-preferred":[],"datePosted":"2026-04-18T15:51:33.231Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Palo Alto, CA"}},"employmentType":"FULL_TIME","occupationalCategory":"IT","industry":"Technology","skills":"Okta, Google Workspace, Slack, Atlassian, n8n, Okta Workflows, iPaaS/automation platforms, IAM principles and protocols, APIs for custom integrations, data flows, automation, scripting and automation, monitoring, alerting, operational efficiency, Azure, AWS, GCP cloud platforms, analytical and troubleshooting skills","baseSalary":{"@type":"MonetaryAmount","currency":"USD","value":{"@type":"QuantitativeValue","minValue":184000,"maxValue":276000,"unitText":"YEAR"}}},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_9b10d521-d50"},"title":"Senior Software Engineer, Infrastructure","description":"<p>We are looking for a Senior Software Engineer to join our Network Infrastructure team. As a member of this team, you will be working with talented engineers on cutting-edge technologies of cloud-native network stack from Layer 3 to Layer 7. You will contribute to key infrastructure components that connect all Airbnb users and services across the globe.</p>\n<p>You will have the chance to define and influence large infrastructure initiatives such as global traffic load balancing and disaster recovery, next-gen service mesh, cross-region gateways, and edge security. Airbnb is a member of the Cloud Native Computing Foundation (CNCF) end-user community, and we work closely with the open-source community (e.g., k8s, istio) and peer companies to tackle cloud-native engineering challenges at scale.</p>\n<p>In this role, you will:</p>\n<ul>\n<li>Work with open-source communities (e.g., istio) to build the next-generation service mesh for all Airbnb back-end services;</li>\n<li>Build cross-region gateways and load balancers for global Airbnb services;</li>\n<li>Work with external partners and internal engineering and security teams to deliver edge security systems that protect Airbnb services;</li>\n<li>Design the multi-region network architecture on public clouds and build software and operation tools to manage Airbnb&#39;s production network;</li>\n<li>Work with product and engineering teams to optimize the network performance for Airbnb services;</li>\n</ul>\n<p>You will be a full-cycle developer with strong ownership and experience building and operating high-scale, distributed systems across the full software life cycle. You will have excellent communication skills and the ability to work well within a team and with teams across the engineering teams.</p>\n<p>You will be passionate about efficiency, availability, technical quality, and system quality. You will have led a team that is on-call for production infrastructure before.</p>\n<p>If you are passionate about building scalable and reliable systems, and you want to make an impact on the industry and open-source communities, then we want to hear from you!</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_9b10d521-d50","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Airbnb","sameAs":"https://www.airbnb.com/","logo":"https://logos.yubhub.co/airbnb.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/airbnb/jobs/7391864","x-work-arrangement":"remote","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":null,"x-skills-required":["Virtual network architecture on public cloud providers (e.g., AWS, GCP, Azure)","Network service offerings (e.g., VPC, Security Group, PrivateLink and related products.)","Large-scale networking systems and software (e.g., Edge proxies, DNS, CDN, network gateways)","Istio, Envoy","Full-cycle development","Communication skills","Team leadership"],"x-skills-preferred":["Cloud-native engineering","Open-source community","Peer companies","Cloud Native Computing Foundation (CNCF)"],"datePosted":"2026-04-18T15:51:22.468Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Remote - Brazil"}},"jobLocationType":"TELECOMMUTE","employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"Virtual network architecture on public cloud providers (e.g., AWS, GCP, Azure), Network service offerings (e.g., VPC, Security Group, PrivateLink and related products.), Large-scale networking systems and software (e.g., Edge proxies, DNS, CDN, network gateways), Istio, Envoy, Full-cycle development, Communication skills, Team leadership, Cloud-native engineering, Open-source community, Peer companies, Cloud Native Computing Foundation (CNCF)"},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_1125d83c-1eb"},"title":"Staff Software Engineer - Backend","description":"<p>As a Staff Software Engineer with a backend focus, you will work closely with your team and product management to prioritise, design, implement, test, and operate micro-services for the Databricks platform and product.</p>\n<p>This involves writing software in Scala/Java, building data pipelines (Apache Spark, Apache Kafka), integrating with third-party applications, and interacting with cloud APIs (AWS, Azure, CloudFormation, Terraform).</p>\n<p>You will be part of a team that builds highly technical products that fulfil real, important needs in the world. We constantly push the boundaries of data and AI technology, while simultaneously operating with the resilience, security and scale that is critical to making customers successful on our platform.</p>\n<p>Our engineering teams build one of the largest scale software platforms. The fleet consists of millions of virtual machines, generating terabytes of logs and processing exabytes of data per day.</p>\n<p>We run thousands of Kubernetes clusters across all regions and orchestrate millions of VMs on a daily basis.</p>\n<p>Competencies:</p>\n<ul>\n<li>BS/MS/PhD in Computer Science, or a related field</li>\n<li>10+ years of production level experience in one of: Java, Scala, C++, or similar language</li>\n<li>Comfortable working towards a multi-year vision with incremental deliverables</li>\n<li>Experience in architecting, developing, deploying, and operating large scale distributed systems</li>\n<li>Experience working on a SaaS platform or with Service-Oriented Architectures</li>\n<li>Good knowledge of SQL</li>\n<li>Experience with software security and systems that handle sensitive data</li>\n<li>Experience with cloud technologies, e.g. AWS, Azure, GCP, Docker, Kubernetes</li>\n</ul>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_1125d83c-1eb","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Databricks","sameAs":"https://databricks.com","logo":"https://logos.yubhub.co/databricks.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/databricks/jobs/6779233002","x-work-arrangement":"onsite","x-experience-level":"staff","x-job-type":"full-time","x-salary-range":"$182,400-$247,000 USD","x-skills-required":["Java","Scala","C++","Apache Spark","Apache Kafka","Cloud APIs","AWS","Azure","CloudFormation","Terraform","SQL","Software security","Cloud technologies"],"x-skills-preferred":[],"datePosted":"2026-04-18T15:51:07.479Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Bellevue, Washington"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"Java, Scala, C++, Apache Spark, Apache Kafka, Cloud APIs, AWS, Azure, CloudFormation, Terraform, SQL, Software security, Cloud technologies","baseSalary":{"@type":"MonetaryAmount","currency":"USD","value":{"@type":"QuantitativeValue","minValue":182400,"maxValue":247000,"unitText":"YEAR"}}},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_62900fcd-562"},"title":"Security Engineer - Offensive Security","description":"<p>As an Offensive Security Engineer on the Proactive Threat team at Stripe, you will simulate the tactics, techniques, and procedures (TTPs) of real-world adversaries to uncover security risks across Stripe&#39;s products and infrastructure.</p>\n<p>You&#39;ll conduct hands-on penetration testing, lead red team engagements, and collaborate with blue team counterparts to validate and improve detection and response capabilities. Your work will directly influence how Stripe builds, ships, and secures financial infrastructure used by millions of businesses worldwide.</p>\n<p>Responsibilities:</p>\n<p>Conduct comprehensive penetration tests across web applications, APIs, cloud environments (AWS/GCP/Azure), mobile applications, and internal infrastructure.</p>\n<p>Plan and execute red team engagements that emulate the TTPs of cyber and criminal threat actors targeting financial services, including initial access, lateral movement, persistence, and data exfiltration scenarios.</p>\n<p>Perform assumed-breach and objective-based assessments to test detection and response capabilities in coordination with defensive teams.</p>\n<p>Partner with detection engineering, threat intelligence, and incident response teams to validate security controls, identify coverage gaps, and improve detection fidelity.</p>\n<p>Contribute adversary tradecraft insights to inform detection rule development, threat hunting hypotheses, and incident response playbooks.</p>\n<p>Support incident investigations by providing offensive expertise, log analysis, and root cause analysis when required.</p>\n<p>Design, develop, and maintain custom offensive tools, scripts, and automation frameworks to enhance assessment efficiency and coverage.</p>\n<p>Build internal platforms and workflows that enable scalable, repeatable offensive operations.</p>\n<p>Contribute to internal security tooling repositories and champion engineering best practices within the team.</p>\n<p>Automate repetitive testing tasks, payload generation, and reporting workflows using modern development practices.</p>\n<p>Produce clear, actionable reports that communicate technical findings, business risk, and remediation guidance to both technical and non-technical stakeholders.</p>\n<p>Act as a subject-matter expert and primary point of contact for stakeholder teams engaged in offensive security programs and Stripe-wide security initiatives.</p>\n<p>Lead offensive security projects end-to-end, mentor junior team members, and foster a culture of continuous learning and knowledge sharing.</p>\n<p>Stay current with emerging threats, vulnerabilities, and attack techniques; share research internally and contribute to the broader security community.</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_62900fcd-562","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Stripe","sameAs":"https://stripe.com/","logo":"https://logos.yubhub.co/stripe.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/stripe/jobs/7820898","x-work-arrangement":"hybrid","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":null,"x-skills-required":["Python","Go","Web application security","Cloud platforms (AWS, Azure, or GCP)","Offensive tooling (Burp Suite, Cobalt Strike, Mythic, Sliver, BloodHound)","Adversary tradecraft and frameworks (MITRE ATT&CK)","Excellent written and verbal communication skills"],"x-skills-preferred":["Experience conducting offensive security in fintech, financial services, or other highly regulated environments","Background in vulnerability research, exploit development, or CVE discovery","Experience collaborating with threat intelligence, detection engineering, or incident response teams (purple team operations)","Familiarity with big data and log analysis tools (Splunk, Databricks, PySpark, osquery, etc.) for threat hunting or investigative support","Proficiency with AI/LLM-assisted development tools (e.g., Claude Code, Cursor, GitHub Copilot) and experience applying them to offensive security workflows"],"datePosted":"2026-04-18T15:51:01.913Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Ireland"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"Python, Go, Web application security, Cloud platforms (AWS, Azure, or GCP), Offensive tooling (Burp Suite, Cobalt Strike, Mythic, Sliver, BloodHound), Adversary tradecraft and frameworks (MITRE ATT&CK), Excellent written and verbal communication skills, Experience conducting offensive security in fintech, financial services, or other highly regulated environments, Background in vulnerability research, exploit development, or CVE discovery, Experience collaborating with threat intelligence, detection engineering, or incident response teams (purple team operations), Familiarity with big data and log analysis tools (Splunk, Databricks, PySpark, osquery, etc.) for threat hunting or investigative support, Proficiency with AI/LLM-assisted development tools (e.g., Claude Code, Cursor, GitHub Copilot) and experience applying them to offensive security workflows"},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_ba0a936c-9b5"},"title":"Partner Solution Architect (pre-sales)","description":"<p>We are looking for a Partner Solutions Architect to lead technical strategy and enablement for our ecosystem in the ANZ region. This is a hands-on builder role. You will be responsible for ensuring our partners are not only articulating Elastic&#39;s value but are technically capable of architecting, building, and validating complex solutions.</p>\n<p>As a Partner Solutions Architect, you will:</p>\n<ul>\n<li>Own Technical Engagement Plans (TEPs) for focus partners, establishing long-term technical roadmaps at the CTO and Practice Lead level.</li>\n<li>Guide partners through high-stakes Technical Validation cycles, ensuring Elastic solutions are built to best-practice standards.</li>\n<li>Lead &#39;one-to-many&#39; technical &#39;Build-a-thons&#39; and hands-on laboratory sessions that empower partner engineers to lead their own implementations.</li>\n<li>Build deep relationships with partner pre-sales teams to guide them through the &#39;how-to&#39; of complex Search AI, Observability, and Security architectures at the configuration level.</li>\n<li>Collaborate on &#39;design wins&#39; by developing repeatable technical blueprints.</li>\n</ul>\n<p>To be successful in this role, you will require:</p>\n<ul>\n<li>Direct, hands-on experience with the Elastic Stack (ELK) or similar distributed search/analytics technologies (e.g., OpenSearch, Solr, Splunk, Datadog).</li>\n<li>8+ years of experience in technical roles.</li>\n<li>Proven ability to design and build technical prototypes, ingest complex datasets, and optimize search/indexing performance.</li>\n<li>Hands-on experience with Kubernetes, Docker, and Infrastructure as Code (Terraform) on AWS, Azure, or GCP.</li>\n<li>3+ years in a partner-facing role, with a focus on building technical practices and enabling third-party engineering teams.</li>\n<li>The ability to translate deep technical capabilities into scalable partner-led solutions.</li>\n</ul>\n<p>If you are a motivated and experienced professional with a passion for technology and partnership development, we encourage you to apply for this exciting opportunity.</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_ba0a936c-9b5","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Elastic","sameAs":"https://www.elastic.co/","logo":"https://logos.yubhub.co/elastic.co.png"},"x-apply-url":"https://job-boards.greenhouse.io/elastic/jobs/7757097","x-work-arrangement":"remote","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":null,"x-skills-required":["Elastic Stack (ELK)","OpenSearch","Solr","Splunk","Datadog","Kubernetes","Docker","Infrastructure as Code (Terraform)","AWS","Azure","GCP"],"x-skills-preferred":[],"datePosted":"2026-04-18T15:51:00.609Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Sydney, Australia"}},"jobLocationType":"TELECOMMUTE","employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"Elastic Stack (ELK), OpenSearch, Solr, Splunk, Datadog, Kubernetes, Docker, Infrastructure as Code (Terraform), AWS, Azure, GCP"},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_f329c77b-25d"},"title":"Senior Solutions Engineer- West Coast","description":"<p>We are looking for a Senior Solutions Engineer to join our team on the West Coast. As a Senior Solutions Engineer, you will be responsible for collaborating with account executives to develop and execute territory and account strategies to maximise the Okta opportunity in those accounts.</p>\n<p>Your duties will include conducting research and discovery to understand customer requirements and communicating the business value of solving technology problems using cloud technology. You will also be responsible for executing the delivery of POCs for customers with complex use cases, collaborating with other Okta engineering teams as needed.</p>\n<p>To be successful in this role, you will need to have a strong understanding of Identity &amp; Access Management (IAM) and experience with cloud platforms such as AWS, Azure, and GCP. You will also need to be an elite communicator and able to identify, map, and manage multiple personas.</p>\n<p>This is a hybrid role, requiring in-person onboarding and travel to an office in the U.S. during the first week of employment. The OTE range for this position is between $192,000-$323,000 USD, depending on location and experience.</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_f329c77b-25d","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Okta","sameAs":"https://www.okta.com/","logo":"https://logos.yubhub.co/okta.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/okta/jobs/7592210","x-work-arrangement":"hybrid","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":"$192,000-$323,000 USD","x-skills-required":["pre-sales engineering experience","Identity & Access Management (IAM)","cloud platforms (AWS, Azure, GCP)","elite communication skills","ability to identify, map, and manage multiple personas"],"x-skills-preferred":["web development (JavaScript, HTML, frontend frameworks)","mobile development (iOS, Android)","backend development (Java, C#, Node.js, Python, PHP, Ruby)"],"datePosted":"2026-04-18T15:50:55.983Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Arizona"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"pre-sales engineering experience, Identity & Access Management (IAM), cloud platforms (AWS, Azure, GCP), elite communication skills, ability to identify, map, and manage multiple personas, web development (JavaScript, HTML, frontend frameworks), mobile development (iOS, Android), backend development (Java, C#, Node.js, Python, PHP, Ruby)","baseSalary":{"@type":"MonetaryAmount","currency":"USD","value":{"@type":"QuantitativeValue","minValue":192000,"maxValue":323000,"unitText":"YEAR"}}},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_44ca68dc-996"},"title":"Senior Software Engineer - Fullstack","description":"<p>We are seeking a Senior Software Engineer - Fullstack to join our team. As a Full Stack software engineer, you will work with your team and product management to make insights from data simple. We are looking for engineers that are customer obsessed, who can take on the full scope of the product and user experience beyond the technical implementation. You&#39;ll set the foundation for how we build robust, scalable and delightful products.</p>\n<p>Some example experiences you&#39;ll create for our customers to achieve the full project lifecycle from loading data, visualizing results, creating statistical models, and deploying as production artifacts include:</p>\n<p>Simple workflows to create, configure, and manage large-scale compute clusters, networks and data sources. Create, deploy, test, and upgrade complex data pipelines with powerful features to visualize data graphs. Seamless onboarding and management for all members of an organisation to become data-driven. Provide a great SQL-centric data exploration and dashboarding experience on Databricks. An interactive environment for collaborative data projects at massive scale with an easy path to production.</p>\n<p>What we look for:</p>\n<p>5+ years of experience with HTML, CSS, and JavaScript. Passion for user experience and design and a deep understanding of front-end architecture. Comfortable working towards a multi-year vision with incremental deliverables. Motivated by delivering customer value. Experience with modern JavaScript frameworks (e.g., React, Angular, or VueJs/Ember). 5+ years of experience with server-side web technologies (eg: Node.js, Java, Python, Scala, C#, C++,Go). Good knowledge of SQL. Experience with cloud technologies, e.g. AWS, Azure, GCP, Docker, or Kubernetes. Experience developing large-scale distributed systems.</p>\n<p>Pay Range Transparency Databricks is committed to fair and equitable compensation practices. The pay range(s) for this role is listed below and represents the expected salary range for non-commissionable roles or on-target earnings for commissionable roles. Actual compensation packages are based on several factors that are unique to each candidate, including but not limited to job-related skills, depth of experience, relevant certifications and training, and specific work location. Based on the factors above, Databricks anticipates utilizing the full width of the range. The total compensation package for this position may also include eligibility for annual performance bonus, equity, and the benefits listed above.</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_44ca68dc-996","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Databricks","sameAs":"https://databricks.com","logo":"https://logos.yubhub.co/databricks.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/databricks/jobs/6544403002","x-work-arrangement":"onsite","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":"$157,700-$213,800 USD","x-skills-required":["HTML","CSS","JavaScript","Node.js","Java","Python","Scala","C#","C++","Go","SQL","AWS","Azure","GCP","Docker","Kubernetes"],"x-skills-preferred":[],"datePosted":"2026-04-18T15:50:52.491Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Seattle, Washington"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"HTML, CSS, JavaScript, Node.js, Java, Python, Scala, C#, C++, Go, SQL, AWS, Azure, GCP, Docker, Kubernetes","baseSalary":{"@type":"MonetaryAmount","currency":"USD","value":{"@type":"QuantitativeValue","minValue":157700,"maxValue":213800,"unitText":"YEAR"}}},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_eda5b2b8-a68"},"title":"Senior Solutions Architect - AI/BI","description":"<p>We are seeking a Senior Solutions Architect - AI/BI to join our Field Engineering team in London. The successful candidate will be responsible for executing on Databricks&#39; strategic Product Operating Model, providing enhanced focus on earlier stage, highly prioritized product lines to establish product market fit and set the course for rapid revenue growth.</p>\n<p>As a Senior Solutions Architect - AI/BI, you will work in partnership with direct account teams to jointly engage clients, foster necessary relationships, position in-depth the specific product line, and provide compelling reasons for clients to adopt and grow the usage of the given product.</p>\n<p>Key responsibilities include:</p>\n<ul>\n<li>Collaborating with GTM leadership and account teams to design and execute high-impact engagement strategies across your territory.</li>\n<li>Serving as a trusted advisor, expert Solutions Architect, and champion, building technical credibility with stakeholders to drive product adoption and vision.</li>\n<li>Enabling clients at scale through workshops and developing customer-facing collateral that helps increase technical knowledge and thought leadership.</li>\n<li>Influencing product roadmap by translating field-derived, data-driven insights into strategic recommendations for Product and Engineering teams.</li>\n</ul>\n<p>To succeed in this role, you will need:</p>\n<ul>\n<li>6+ years in a customer-facing, pre-sales or consulting role influencing technical executives, driving high-level data strategy and product adoption.</li>\n<li>Proven ability to co-plan large territories with Account Executives and operate in a highly coordinated, cross-functional effort across GTM and R&amp;D teams.</li>\n<li>Experience collaborating with Global System Integrators (GSIs) and third-party consulting organizations to drive customer outcomes.</li>\n<li>Proficient in programming, debugging, and problem-solving using SQL and Python.</li>\n<li>Hands-on experience building solutions within major public cloud environments (AWS, Azure, or GCP).</li>\n<li>Broad experience (in two or more) and understanding across the fields of data engineering, data warehousing, AI, ML, governance, transactional systems, app development, and streaming.</li>\n</ul>\n<p>If you are a motivated and experienced professional with a passion for data and AI, we encourage you to apply for this exciting opportunity.</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_eda5b2b8-a68","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Databricks","sameAs":"https://databricks.com","logo":"https://logos.yubhub.co/databricks.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/databricks/jobs/8407183002","x-work-arrangement":"onsite","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":null,"x-skills-required":["Experience in designing and delivering cloud-based Data Visualisation and Analytics Solutions","Ability to advise customers in lakehouse analytics architecture","Certification and/or demonstrated competence in data visualisation and analytics systems along with one of Azure, AWS or GCP cloud providers","Demonstrated competence in the Lakehouse architecture including hands-on experience with Apache Spark, Python and SQL"],"x-skills-preferred":[],"datePosted":"2026-04-18T15:50:38.084Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"London, United Kingdom"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"Experience in designing and delivering cloud-based Data Visualisation and Analytics Solutions, Ability to advise customers in lakehouse analytics architecture, Certification and/or demonstrated competence in data visualisation and analytics systems along with one of Azure, AWS or GCP cloud providers, Demonstrated competence in the Lakehouse architecture including hands-on experience with Apache Spark, Python and SQL"},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_ee84f5eb-f18"},"title":"Senior Solutions Specialist","description":"<p>As a Senior Solutions Specialist at Okta, you will be part of the presales team that delivers sales presentations and product demonstrations to educate customers on the best ways to implement Okta identity and access management solutions.</p>\n<p>You will report to the Senior Manager in the OFCTO organization.</p>\n<p>Key responsibilities include:</p>\n<ul>\n<li>Strategic Customer Engagement: Support strategic customer conversations aligning with technical and engineering leaders and stakeholders. Deliver on emerging trends, industry best practices, and identity-driven digital transformation. Assist in preparing executive-level messaging and participate in strategic discussions to align Okta&#39;s solutions with customer business outcomes.</li>\n</ul>\n<ul>\n<li>Technical Leadership: Provide strategic and technical guidance to Solutions Engineering teams across both the Okta and Auth0 identity platforms. Collaborate with the Global Office of Field CTO office team to deliver actionable insights from the field, validate customer needs, and identify recurring trends for the Okta Product &amp; Engineering teams.</li>\n</ul>\n<ul>\n<li>Field Enablement and Collaboration: Contribute to frameworks, tools, and content to support Solutions Engineering teams in executing effectively. Partner with sales leadership to identify and address critical business opportunities and challenges. Drive cross-functional collaboration to ensure seamless execution of global initiatives.</li>\n</ul>\n<ul>\n<li>Market Influence: Represent Okta as a thought leader in the identity and security space. Influence industry standards and participate in relevant technical advisory boards.</li>\n</ul>\n<ul>\n<li>Innovation and Strategy: Partner closely with OFCTO teams as an advocate for customer-driven innovation, market trends, and GTM insights to provide input to influence product teams. Support, derive, and champion strategic initiatives that enhance Okta&#39;s differentiation and business impact.</li>\n</ul>\n<p>Position Requirements:</p>\n<ul>\n<li>Identity &amp; Directory Services Mastery: Deep Infrastructure Knowledge: Expert-level understanding of Active Directory (AD) and Azure AD (Entra ID) or similar, including Group Policy Objects (GPOs), Kerberos, and OIDC/SAML protocols.</li>\n</ul>\n<ul>\n<li>Multi-IdP Ecosystems: Proficiency in managing and securing diverse Identity Providers (IdPs) like Okta, Ping Identity, and Google Workspace.</li>\n</ul>\n<ul>\n<li>Hybrid Identity: Experience managing the complexities of identity synchronization between on-premises environments and the cloud.</li>\n</ul>\n<ul>\n<li>Device Identity &amp; Access Management Mastery: Passwordless Expertise: Deep, hands-on knowledge of FIDO2/WebAuthn, Passkeys, and implementing passwordless authentication strategies using solutions like Okta FastPass.</li>\n</ul>\n<ul>\n<li>Endpoint &amp; Device Context: Strong understanding of Desktop MFA, device registration, and leveraging device posture signals from MDM/UEM (e.g., Jamf, Intune, Workspace ONE) and EDR (e.g., CrowdStrike) platforms to enforce risk-based access policies.</li>\n</ul>\n<ul>\n<li>PKI &amp; Certificate Management: Familiarity with certificate-based authentication and its role in establishing device trust.</li>\n</ul>\n<ul>\n<li>Identity Threat Detection &amp; Response (ITDR): Threat Landscape Knowledge: A thorough understanding of modern identity attack vectors, including phishing, token theft, MFA bypass techniques, and lateral movement.</li>\n</ul>\n<ul>\n<li>Behavioral Analysis: Experience with User and Entity Behavior Analytics (UEBA) and the ability to interpret threat signals to detect and respond to suspicious activity.</li>\n</ul>\n<ul>\n<li>Security Ecosystem Integration: Proficiency in designing solutions that integrate identity platforms with SIEM (e.g., Splunk, Sentinel) and SOAR tools for automated threat response.</li>\n</ul>\n<ul>\n<li>Strong communication and presentation skills, with experience contributing to technical events.</li>\n</ul>\n<p>Travel expectation: up to 40% for customer meetings, industry events, and internal off-sites.</p>\n<p>This is a remote position.</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_ee84f5eb-f18","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Okta","sameAs":"https://www.okta.com/","logo":"https://logos.yubhub.co/okta.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/okta/jobs/7769595","x-work-arrangement":"remote","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":"$215,000-$323,000 USD (OTE range for candidates located in the San Francisco Bay area)","x-skills-required":["Active Directory (AD)","Azure AD (Entra ID)","Group Policy Objects (GPOs)","Kerberos","OIDC/SAML protocols","FIDO2/WebAuthn","Passkeys","Okta FastPass","Desktop MFA","Device registration","MDM/UEM (e.g., Jamf, Intune, Workspace ONE)","EDR (e.g., CrowdStrike)","Certificate-based authentication","PKI & Certificate Management","Identity Threat Detection & Response (ITDR)","User and Entity Behavior Analytics (UEBA)","SIEM (e.g., Splunk, Sentinel)","SOAR tools"],"x-skills-preferred":[],"datePosted":"2026-04-18T15:50:06.588Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Chicago, Illinois; Los Angeles, California; New York, New York; San Francisco, California; Washington, DC"}},"jobLocationType":"TELECOMMUTE","employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"Active Directory (AD), Azure AD (Entra ID), Group Policy Objects (GPOs), Kerberos, OIDC/SAML protocols, FIDO2/WebAuthn, Passkeys, Okta FastPass, Desktop MFA, Device registration, MDM/UEM (e.g., Jamf, Intune, Workspace ONE), EDR (e.g., CrowdStrike), Certificate-based authentication, PKI & Certificate Management, Identity Threat Detection & Response (ITDR), User and Entity Behavior Analytics (UEBA), SIEM (e.g., Splunk, Sentinel), SOAR tools","baseSalary":{"@type":"MonetaryAmount","currency":"USD","value":{"@type":"QuantitativeValue","minValue":215000,"maxValue":323000,"unitText":"YEAR"}}},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_245a7b5f-cac"},"title":"Staff Software Engineer (Infrastructure)","description":"<p>At Databricks, we are building and running the world&#39;s best data and AI infrastructure platform so our customers can use deep data insights to improve their business.</p>\n<p>As a Staff Software Engineer at Databricks India, you can get to work across various domains, including backend infrastructure, distributed systems, at-scale service architecture and monitoring, workflow orchestration, and developer experience.</p>\n<p>Our Infrastructure Backend teams span many domains across our essential service platforms. For instance, you might work on challenges such as:</p>\n<ul>\n<li>Problems that span from product to infrastructure including: distributed systems, at-scale service architecture and monitoring, workflow orchestration, and developer experience.</li>\n</ul>\n<ul>\n<li>Deliver reliable and high performance services and client libraries for storing and accessing humongous amount of data on cloud storage backends, e.g., AWS S3, Azure Blob Store.</li>\n</ul>\n<ul>\n<li>Build reliable, scalable services, e.g. Scala, Kubernetes, and data pipelines, e.g. Apache Spark, Databricks, to power the pricing infrastructure that serves millions of cluster-hours per day and develop product features that empower customers to easily view and control platform usage.</li>\n</ul>\n<p>What we look for:</p>\n<ul>\n<li>BS (or higher) in Computer Science, or a related field</li>\n</ul>\n<ul>\n<li>12+ years of production level experience in one of: Python, Java, Scala, C++, or similar language</li>\n</ul>\n<ul>\n<li>6+ years experience developing large-scale distributed systems from scratch</li>\n</ul>\n<ul>\n<li>Experience working on a SaaS platform or with Service-Oriented Architectures</li>\n</ul>\n<ul>\n<li>Experience working on Infrastructure related projects is a plus</li>\n</ul>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_245a7b5f-cac","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Databricks","sameAs":"https://databricks.com","logo":"https://logos.yubhub.co/databricks.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/databricks/jobs/7648674002","x-work-arrangement":"onsite","x-experience-level":"staff","x-job-type":"full-time","x-salary-range":null,"x-skills-required":["Python","Java","Scala","C++","AWS S3","Azure Blob Store","Kubernetes","Apache Spark","Databricks"],"x-skills-preferred":[],"datePosted":"2026-04-18T15:50:04.399Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Bengaluru, India"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"Python, Java, Scala, C++, AWS S3, Azure Blob Store, Kubernetes, Apache Spark, Databricks"},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_85f1f87e-70f"},"title":"Resident Solutions Architect - Financial Services","description":"<p>As a Senior Big Data Solutions Architect (Sr Resident Solutions Architect) in our Professional Services team, you will work with clients on short to medium term customer engagements on their big data challenges using the Databricks platform.</p>\n<p>You will provide data engineering, data science, and cloud technology projects which require integrating with client systems, training, and other technical tasks to help customers to get most value out of their data.</p>\n<p>RSAs are billable and know how to complete projects according to specification with excellent customer service.</p>\n<p>You will report to the regional Manager/Lead.</p>\n<p>The impact you will have:</p>\n<ul>\n<li>You will work on a variety of impactful customer technical projects which may include designing and building reference architectures, creating how-to&#39;s and productionalizing customer use cases</li>\n</ul>\n<ul>\n<li>Work with engagement managers to scope variety of professional services work with input from the customer</li>\n</ul>\n<ul>\n<li>Guide strategic customers as they implement transformational big data projects, 3rd party migrations, including end-to-end design, build and deployment of industry-leading big data and AI applications</li>\n</ul>\n<ul>\n<li>Consult on architecture and design; bootstrap hands-on projects which leads to a customers&#39; successful understanding, evaluation and adoption of Databricks.</li>\n</ul>\n<ul>\n<li>Provide an escalated level of support for customer operational issues.</li>\n</ul>\n<ul>\n<li>You will work with the Databricks technical team, Project Manager, Architect and Customer team to ensure the technical components of the engagement are delivered to meet customer&#39;s needs.</li>\n</ul>\n<ul>\n<li>Work with Engineering and Databricks Customer Support to provide product and implementation feedback and to guide rapid resolution for engagement specific product and support issues.</li>\n</ul>\n<p>What we look for:</p>\n<ul>\n<li>9+ years experience in data engineering, data platforms &amp; analytics</li>\n</ul>\n<ul>\n<li>Comfortable writing code in either Python or Scala</li>\n</ul>\n<ul>\n<li>Working knowledge of two or more common Cloud ecosystems (AWS, Azure, GCP) with expertise in at least one</li>\n</ul>\n<ul>\n<li>Deep experience with distributed computing with Apache Spark™ and knowledge of Apache Spark™ runtime internals</li>\n</ul>\n<ul>\n<li>Familiarity with CI/CD for production deployments</li>\n</ul>\n<ul>\n<li>Working knowledge of MLOps</li>\n</ul>\n<ul>\n<li>Capable of design and deployment of highly performant end-to-end data architectures</li>\n</ul>\n<ul>\n<li>Experience with technical project delivery - managing scope and timelines.</li>\n</ul>\n<ul>\n<li>Documentation and white-boarding skills.</li>\n</ul>\n<ul>\n<li>Experience working with clients and managing conflicts.</li>\n</ul>\n<ul>\n<li>Experience in building scalable streaming and batch solutions using cloud-native components</li>\n</ul>\n<ul>\n<li>Travel to customers up to 20% of the time</li>\n</ul>\n<p>Nice to have: Databricks Certification</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_85f1f87e-70f","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Databricks","sameAs":"https://databricks.com","logo":"https://logos.yubhub.co/databricks.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/databricks/jobs/8461327002","x-work-arrangement":"hybrid","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":"$180,656-$248,360 USD","x-skills-required":["data engineering","data platforms & analytics","Python","Scala","Cloud ecosystems (AWS, Azure, GCP)","Apache Spark","CI/CD for production deployments","MLOps","end-to-end data architectures","technical project delivery","documentation and white-boarding skills","client management"],"x-skills-preferred":["Databricks Certification"],"datePosted":"2026-04-18T15:49:55.028Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Austin, Texas"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"data engineering, data platforms & analytics, Python, Scala, Cloud ecosystems (AWS, Azure, GCP), Apache Spark, CI/CD for production deployments, MLOps, end-to-end data architectures, technical project delivery, documentation and white-boarding skills, client management, Databricks Certification","baseSalary":{"@type":"MonetaryAmount","currency":"USD","value":{"@type":"QuantitativeValue","minValue":180656,"maxValue":248360,"unitText":"YEAR"}}},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_f509d407-0b5"},"title":"Senior Security Engineer, Incident Response","description":"<p>You will be an individual contributor on the security Incident Response (IR) team at Databricks, reporting to the regional IR manager. Your responsibilities will include conducting security analysis and forensics, responding to high-priority alerts, and contributing to automations and agentic capabilities. You will be a security multiplier and help the team scale security incident response at Databricks.</p>\n<p>Responsibilities:</p>\n<ul>\n<li>Respond to incidents as part of a distributed 24x7 operations and on-call schedule.</li>\n<li>Triage and respond to security events and alerts, ensuring quick and effective containment.</li>\n<li>Contribute to security investigations, conducting analysis and forensics across a range of data sources to determine the timeline and impact of security events.</li>\n<li>Build automations, including leveraging AI and agentic platforms, to deliver autonomous capabilities, expedite your work, and scale the impact of the team.</li>\n<li>Communicate technical decisions through design docs and tech talks, and mentor junior security responders via security guidance, design reviews, and code reviews.</li>\n</ul>\n<p>What we look for:</p>\n<ul>\n<li>Bachelor&#39;s Degree AND 4+ years experience in Incident Response work OR Master&#39;s Degree AND 2+ years experience.</li>\n<li>Strong cloud security background in at least 1 of AWS, GCP, or Azure, and working knowledge of the others.</li>\n<li>Knowledge of AI/LLM and agentic capabilities, including effective prompting and use of MCP, agents, and agent skills.</li>\n<li>Broad security subject matter expertise.</li>\n<li>Expertise in few core IR skills (DFIR, Reverse Engineering, Traditional Network Security, Storage and access security, Sandboxing, Compute security, etc.).</li>\n<li>Experience with Enterprise Security and SaaS applications.</li>\n<li>Working knowledge of a SIEM and SOAR.</li>\n<li>Experience building Incident Response Tooling and scripting language skills.</li>\n</ul>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_f509d407-0b5","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Databricks","sameAs":"https://databricks.com","logo":"https://logos.yubhub.co/databricks.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/databricks/jobs/8026632002","x-work-arrangement":"remote","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":null,"x-skills-required":["cloud security","AWS","GCP","Azure","AI/LLM","agentic capabilities","security subject matter expertise","DFIR","Reverse Engineering","Traditional Network Security","Storage and access security","Sandboxing","Compute security","Enterprise Security","SaaS applications","SIEM","SOAR","Incident Response Tooling","scripting language skills"],"x-skills-preferred":[],"datePosted":"2026-04-18T15:49:52.742Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Amsterdam, Netherlands; Berlin, Germany; London, United Kingdom; Remote - Denmark; Remote - France; Remote - Germany; Remote - Italy; Remote - Spain; Remote - Sweden"}},"jobLocationType":"TELECOMMUTE","employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"cloud security, AWS, GCP, Azure, AI/LLM, agentic capabilities, security subject matter expertise, DFIR, Reverse Engineering, Traditional Network Security, Storage and access security, Sandboxing, Compute security, Enterprise Security, SaaS applications, SIEM, SOAR, Incident Response Tooling, scripting language skills"},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_f4ec68a8-fb9"},"title":"Manager, Enterprise Security Engineering","description":"<p>We&#39;re seeking a security-focused leader to build and scale world-class defensive controls protecting the infrastructure that supports our defence technology products.</p>\n<p>As a Manager, Enterprise Security Engineering, you will lead a high-performing team of security engineers, set technical direction, and establish clear standards for engineering excellence and ownership. You will define and execute the security roadmap for infrastructure, remote access/ZTNA, endpoint, and M&amp;A, and design and implement security controls across cloud, production, and corporate infrastructure.</p>\n<p>Key responsibilities include:</p>\n<ul>\n<li>Building, mentoring, and growing a high-performing team of security engineers</li>\n<li>Setting technical direction and establishing clear standards for engineering excellence and ownership</li>\n<li>Partnering in hiring, performance management, and career development</li>\n<li>Defining and executing the security roadmap for infrastructure, remote access/ZTNA, endpoint, and M&amp;A</li>\n<li>Designing and implementing security controls across cloud, production, and corporate infrastructure</li>\n<li>Developing tools and systems to improve security posture and operational efficiency</li>\n<li>Conducting security architecture and design reviews for systems and applications</li>\n<li>Partnering across infrastructure, IT, product, and security teams to reduce risk while enabling velocity</li>\n</ul>\n<p>Requirements include:</p>\n<ul>\n<li>Ability to work autonomously, take ownership of projects, and collaborate across teams</li>\n<li>Demonstrated ability to translate ambiguous requirements into clear technical roadmaps and delivered outcomes</li>\n<li>Have participated in or supported incident response events</li>\n<li>Strong programming ability in one or more general-purpose languages (Python, Go, Rust, etc)</li>\n<li>Experience with one or more infrastructure as code languages (e.g., Terraform, AWS CDK) in a production capacity</li>\n<li>Experience conducting security architecture or design reviews around custom business applications</li>\n<li>Strong understanding of modern attack vectors and defensive mitigation strategies</li>\n<li>Experience working with cloud platforms and deploying applications through CI/CD pipelines</li>\n<li>Experience implementing security controls across endpoints, corporate cloud environments, and internal infrastructure</li>\n<li>Eligible to obtain and maintain a U.S. TS clearance</li>\n</ul>\n<p>Preferred qualifications include:</p>\n<ul>\n<li>Experience building bespoke solutions in high-growth and high-complexity environments</li>\n<li>Experience with AWS, Azure, or GCP security ecosystem and tooling</li>\n<li>Strong experience with Linux operating systems</li>\n</ul>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_f4ec68a8-fb9","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Anduril","sameAs":"https://www.andurilindustries.com/","logo":"https://logos.yubhub.co/andurilindustries.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/andurilindustries/jobs/5070618007","x-work-arrangement":"onsite","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":"$166,000-$220,000 USD","x-skills-required":["security engineering","infrastructure as code","cloud security","endpoint security","M&A security","incident response","security architecture","CI/CD pipelines","Linux operating systems"],"x-skills-preferred":["AWS security ecosystem","Azure security ecosystem","GCP security ecosystem","containerization"],"datePosted":"2026-04-18T15:49:51.732Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Costa Mesa, California, United States"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"security engineering, infrastructure as code, cloud security, endpoint security, M&A security, incident response, security architecture, CI/CD pipelines, Linux operating systems, AWS security ecosystem, Azure security ecosystem, GCP security ecosystem, containerization","baseSalary":{"@type":"MonetaryAmount","currency":"USD","value":{"@type":"QuantitativeValue","minValue":166000,"maxValue":220000,"unitText":"YEAR"}}},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_ffd169d9-40b"},"title":"Resident Solutions Architect - Communications, Media, Entertainment & Games","description":"<p>As a Resident Solutions Architect in our Professional Services team, you will work with clients on short to medium term customer engagements on their big data challenges using the Databricks platform.</p>\n<p>You will provide data engineering, data science, and cloud technology projects which require integrating with client systems, training, and other technical tasks to help customers to get most value out of their data.</p>\n<p>RSAs are billable and know how to complete projects according to specification with excellent customer service.</p>\n<p>You will report to the regional Manager/Lead.</p>\n<p>The impact you will have:</p>\n<ul>\n<li>You will work on a variety of impactful customer technical projects which may include designing and building reference architectures, creating how-to&#39;s and productionalizing customer use cases</li>\n</ul>\n<ul>\n<li>Work with engagement managers to scope variety of professional services work with input from the customer</li>\n</ul>\n<ul>\n<li>Guide strategic customers as they implement transformational big data projects, 3rd party migrations, including end-to-end design, build and deployment of industry-leading big data and AI applications</li>\n</ul>\n<ul>\n<li>Consult on architecture and design; bootstrap or implement customer projects which leads to a customers&#39; successful understanding, evaluation and adoption of Databricks.</li>\n</ul>\n<ul>\n<li>Provide an escalated level of support for customer operational issues.</li>\n</ul>\n<ul>\n<li>You will work with the Databricks technical team, Project Manager, Architect and Customer team to ensure the technical components of the engagement are delivered to meet customer&#39;s needs.</li>\n</ul>\n<ul>\n<li>Work with Engineering and Databricks Customer Support to provide product and implementation feedback and to guide rapid resolution for engagement specific product and support issues.</li>\n</ul>\n<p>What we look for:</p>\n<ul>\n<li>6+ years experience in data engineering, data platforms &amp; analytics</li>\n</ul>\n<ul>\n<li>Comfortable writing code in either Python or Scala</li>\n</ul>\n<ul>\n<li>Working knowledge of two or more common Cloud ecosystems (AWS, Azure, GCP) with expertise in at least one</li>\n</ul>\n<ul>\n<li>Deep experience with distributed computing with Apache Spark™ and knowledge of Spark runtime internals</li>\n</ul>\n<ul>\n<li>Familiarity with CI/CD for production deployments</li>\n</ul>\n<ul>\n<li>Working knowledge of MLOps</li>\n</ul>\n<ul>\n<li>Design and deployment of performant end-to-end data architectures</li>\n</ul>\n<ul>\n<li>Experience with technical project delivery - managing scope and timelines.</li>\n</ul>\n<ul>\n<li>Documentation and white-boarding skills.</li>\n</ul>\n<ul>\n<li>Experience working with clients and managing conflicts.</li>\n</ul>\n<ul>\n<li>Build skills in technical areas which support the deployment and integration of Databricks-based solutions to complete customer projects.</li>\n</ul>\n<ul>\n<li>Travel to customers 20% of the time</li>\n</ul>\n<p>Pay Range Transparency Databricks is committed to fair and equitable compensation practices. The pay range(s) for this role is listed below and represents the expected base salary range for non-commissionable roles or on-target earnings for commissionable roles.</p>\n<p>Actual compensation packages are based on several factors that are unique to each candidate, including but not limited to job-related skills, depth of experience, relevant certifications and training, and specific work location.</p>\n<p>Based on the factors above, Databricks anticipated utilizing the full width of the range.</p>\n<p>The total compensation package for this position may also include eligibility for annual performance bonus, equity, and the benefits listed above.</p>\n<p>For more information regarding which range your location is in visit our page here.</p>\n<p>Zone 1 Pay Range $180,656-$248,360 USD</p>\n<p>Zone 2 Pay Range $180,656-$248,360 USD</p>\n<p>Zone 3 Pay Range $180,656-$248,360 USD</p>\n<p>Zone 4 Pay Range $180,656-$248,360 USD</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_ffd169d9-40b","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Databricks","sameAs":"https://databricks.com","logo":"https://logos.yubhub.co/databricks.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/databricks/jobs/8461239002","x-work-arrangement":"onsite","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":"$180,656-$248,360 USD","x-skills-required":["data engineering","data science","cloud technology","Apache Spark","CI/CD","MLOps","data platforms & analytics","Python","Scala","AWS","Azure","GCP"],"x-skills-preferred":[],"datePosted":"2026-04-18T15:49:46.649Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Atlanta, Georgia"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"data engineering, data science, cloud technology, Apache Spark, CI/CD, MLOps, data platforms & analytics, Python, Scala, AWS, Azure, GCP","baseSalary":{"@type":"MonetaryAmount","currency":"USD","value":{"@type":"QuantitativeValue","minValue":180656,"maxValue":248360,"unitText":"YEAR"}}},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_bcabf282-157"},"title":"Technical Support Engineer - Federal (Night Shift)","description":"<p>We are looking for a performance-driven Sr. Federal Technical Support Engineer to join our team. As a Senior Federal Technical Support Engineer, you will be part of a frontline team supporting the identity infrastructure of the U.S. Federal Government. You will manage customer issues from initial contact through troubleshooting and root cause identification to final resolution. You will act as a bridge between the customer and the company, ensuring a deep understanding of business impacts and driving timely problem resolution. You will consistently meet or exceed KPIs related to response quality, timeliness, and the overall customer experience. You will serve as the primary point of contact for internal and external stakeholders to ensure issues are resolved as expediently as possible. You will partner with the Engineering team to collect detailed information and document bugs for product issues impacting the customer base.</p>\n<p>In this role, you will have a deep specialization in Identity and Access Management (IAM) and FedRAMP High/Moderate environments. You will have hands-on experience supporting IAM solutions, including deep familiarity with protocols such as SAML, SSO, LDAP, and WS-Federation. You will have advanced knowledge of Active Directory, Entra ID (Azure AD), and Okta. You will be an expert in troubleshooting synchronization errors, managing complex group membership logic, and overseeing cross-platform identity lifecycle management. You will have experience supporting major SaaS applications, including Office 365, Google Workspace, Salesforce, and Workday. You will have proven ability to isolate and resolve network-layer impediments. You will be skilled in leveraging diagnostic utilities such as Wireshark, Fiddler, and DNS lookup tools to identify root causes. You will have excellent relationship management skills with the ability to remain calm, composed, and articulate during high-pressure customer situations. You will be a quick study with the ability to master new technologies rapidly in a fast-paced environment. You will have strong analytical and organizational skills; comfortable working both as a collaborative teammate and an independent contributor with minimal supervision. You will have a genuine passion for solving complex problems and advocating for customer success.</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_bcabf282-157","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Okta","sameAs":"https://www.okta.com/","logo":"https://logos.yubhub.co/okta.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/okta/jobs/7819794","x-work-arrangement":"hybrid","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":"$114,000-$157,300 USD","x-skills-required":["Identity & Access Management (IAM)","FedRAMP High/Moderate environments","SAML","SSO","LDAP","WS-Federation","Active Directory","Entra ID (Azure AD)","Okta","Troubleshooting synchronization errors","Managing complex group membership logic","Overseeing cross-platform identity lifecycle management","Supporting major SaaS applications","Office 365","Google Workspace","Salesforce","Workday","Network-layer impediments","Diagnostic utilities","Wireshark","Fiddler","DNS lookup tools"],"x-skills-preferred":[],"datePosted":"2026-04-18T15:49:38.572Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Bellevue, Washington; Chicago, Illinois; Washington, DC"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"Identity & Access Management (IAM), FedRAMP High/Moderate environments, SAML, SSO, LDAP, WS-Federation, Active Directory, Entra ID (Azure AD), Okta, Troubleshooting synchronization errors, Managing complex group membership logic, Overseeing cross-platform identity lifecycle management, Supporting major SaaS applications, Office 365, Google Workspace, Salesforce, Workday, Network-layer impediments, Diagnostic utilities, Wireshark, Fiddler, DNS lookup tools","baseSalary":{"@type":"MonetaryAmount","currency":"USD","value":{"@type":"QuantitativeValue","minValue":114000,"maxValue":157300,"unitText":"YEAR"}}}]}