{"version":"0.1","company":{"name":"YubHub","url":"https://yubhub.co","jobsUrl":"https://yubhub.co/jobs/skill/gpu-hardware"},"x-facet":{"type":"skill","slug":"gpu-hardware","display":"Gpu Hardware","count":5},"x-feed-size-limit":100,"x-feed-sort":"enriched_at desc","x-feed-notice":"This feed contains at most 100 jobs (the most recently enriched). For the full corpus, use the paginated /stats/by-facet endpoint or /search.","x-generator":"yubhub-xml-generator","x-rights":"Free to redistribute with attribution: \"Data by YubHub (https://yubhub.co)\"","x-schema":"Each entry in `jobs` follows https://schema.org/JobPosting. YubHub-native raw fields carry `x-` prefix.","jobs":[{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_a561c761-1f3"},"title":"Manager, Bare Metal Support Engineering","description":"<p>The Customer Experience (CX) Organisation at CoreWeave is dedicated to ensuring every client running AI workloads at scale has a seamless, reliable, and high-performance experience.</p>\n<p>As a Manager of Bare Metal Support Engineering, you&#39;ll be at the centre of ensuring our dedicated infrastructure remains stable, reliable, and performant. You&#39;ll lead daily support operations, triage incidents, drive escalations, and ensure that hardware is monitored, maintained, and delivered effectively for our clients.</p>\n<p>Key responsibilities include:</p>\n<ul>\n<li>Leading a skilled team responsible for maintaining and optimising physical infrastructure across multiple client environments.</li>\n<li>Building, developing, and leading a dedicated Infrastructure Support team focused on supporting key infrastructure, handling escalations, and ensuring smooth hardware operations.</li>\n<li>Overseeing the resolution of infrastructure-related incidents, escalation management, and collaborating with internal teams to deliver effective solutions.</li>\n<li>Improving support processes to enhance efficiency and reduce downtime, ensuring the infrastructure meets client expectations.</li>\n</ul>\n<p>The ideal candidate will have 5+ years of experience leading teams responsible for infrastructure support, data centre operations, or physical compute environments. They should be hands-on with Linux system administration and command-line tools, familiar with hardware-level diagnostics, troubleshooting, and replacement, and have experience working with high-performance rack-scale hardware.</p>\n<p>In addition to the required skills, preferred skills include experience managing infrastructure support teams in high-growth or rapidly evolving environments, proven ability to develop and implement operational processes that scale with business needs, and strong familiarity with server and GPU hardware lifecycle management.</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_a561c761-1f3","directApply":true,"hiringOrganization":{"@type":"Organization","name":"CoreWeave","sameAs":"https://www.coreweave.com","logo":"https://logos.yubhub.co/coreweave.com.png"},"x-apply-url":"https://job-boards.greenhouse.io/coreweave/jobs/4649055006","x-work-arrangement":"onsite","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":"$170,000 to $240,000 SGD","x-skills-required":["Linux system administration","Command-line tools","Hardware-level diagnostics","Troubleshooting and replacement","High-performance rack-scale hardware"],"x-skills-preferred":["Managing infrastructure support teams","Developing and implementing operational processes","Server and GPU hardware lifecycle management"],"datePosted":"2026-04-18T15:45:59.370Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Singapore"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"Linux system administration, Command-line tools, Hardware-level diagnostics, Troubleshooting and replacement, High-performance rack-scale hardware, Managing infrastructure support teams, Developing and implementing operational processes, Server and GPU hardware lifecycle management","baseSalary":{"@type":"MonetaryAmount","currency":"USD","value":{"@type":"QuantitativeValue","minValue":170000,"maxValue":240000,"unitText":"YEAR"}}},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_f8883394-0fc"},"title":"Solutions Architect, AI and ML","description":"<p>We are looking for an experienced Cloud Solution Architect to help assist customers with adoption of GPU hardware and Software, as well as building and deploying Machine Learning (ML) , Deep Learning (DL), data analytics solutions on various Cloud Computing Platforms.</p>\n<p>As a Solutions Architect, you will engage directly with developers, researchers, and data scientists with some of NVIDIA’s most strategic technology customers as well as work directly with business and engineering teams on product strategy.</p>\n<p><strong>Key Responsibilities:</strong></p>\n<ul>\n<li>Help cloud customers craft, deploy, and maintain scalable, GPU-accelerated inference pipelines on cloud ML services and Kubernetes for large language models (LLMs) and generative AI workloads.</li>\n<li>Enhance performance tuning using TensorRT/TensorRT-LLM, vLLM, Dynamo, and Triton Inference Server to improve GPU utilization and model efficiency.</li>\n<li>Collaborate with multi-functional teams (engineering, product) and offer technical mentorship to cloud customers implementing AI inference at scale.</li>\n<li>Build custom PoCs for solution that address customer’s critical business needs applying NVIDIA hardware and software technology</li>\n<li>Partner with Sales Account Managers or Developer Relations Managers to identify and secure new business opportunities for NVIDIA products and solutions for ML/DL and other software solutions</li>\n<li>Prepare and deliver technical content to customers including presentations about purpose-built solutions, workshops about NVIDIA products and solutions, etc.</li>\n<li>Conduct regular technical customer meetings for project/product roadmap, feature discussions, and intro to new technologies. Establish close technical ties to the customer to facilitate rapid resolution of customer issues</li>\n</ul>\n<p><strong>Requirements:</strong></p>\n<ul>\n<li>BS/MS/PhD in Electrical/Computer Engineering, Computer Science, Statistics, Physics, or other Engineering fields or equivalent experience.</li>\n<li>3+ Years in Solutions Architecture with a proven track record of moving AI inference from POC to production in cloud computing environments including AWS, GCP, or Azure</li>\n<li>3+ years of hands-on experience with Deep Learning frameworks such as PyTorch and TensorFlow</li>\n<li>Excellent knowledge of the theory and practice of LLM and DL inference</li>\n<li>Strong fundamentals in programming, optimizations, and software design, especially in Python</li>\n<li>Experience with containerization and orchestration technologies like Docker and Kubernetes, monitoring, and observability solutions for AI deployments</li>\n<li>Knowledge of Inference technologies - NVIDIA NIM, TensorRT-LLM, Dynamo, Triton Inference Server, vLLM, etc</li>\n<li>Proficiency in problem-solving and debugging skills in GPU environments</li>\n<li>Excellent presentation, communication and collaboration skills</li>\n</ul>\n<p><strong>Nice to Have:</strong></p>\n<ul>\n<li>AWS, GCP or Azure Professional Solution Architect Certification.</li>\n<li>Experience optimizing and deploying large MoE LLMs at scale</li>\n<li>Active contributions to open-source AI inference projects (e.g., vLLM, TensorRT-LLM Dynamo, SGLang, Triton or similar)</li>\n<li>Experience with Multi-GPU Multi-node Inference technologies like Tensor Parallelism/Expert Parallelism, Disaggregated Serving, LWS, MPI, EFA/Infiniband, NVLink/PCIe, etc</li>\n<li>Experience in developing and integrating monitoring and alerting solutions using Prometheus, Grafana, and NVIDIA DCGM and GPU performance Analysis and tools like NVIDIA Nsight Systems</li>\n</ul>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_f8883394-0fc","directApply":true,"hiringOrganization":{"@type":"Organization","name":"NVIDIA","sameAs":"https://nvidia.wd5.myworkdayjobs.com","logo":"https://logos.yubhub.co/nvidia.com.png"},"x-apply-url":"https://nvidia.wd5.myworkdayjobs.com/en-US/NVIDIAExternalCareerSite/job/US-WA-Redmond/Solutions-Architect--AI-and-ML_JR2005988-1","x-work-arrangement":"onsite","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":null,"x-skills-required":["Cloud Solution Architecture","GPU hardware and Software","Machine Learning (ML)","Deep Learning (DL)","Data Analytics","Cloud Computing Platforms","Kubernetes","TensorRT","TensorRT-LLM","vLLM","Dynamo","Triton Inference Server","Python","Containerization","Orchestration","Monitoring","Observability","Inference technologies","NVIDIA NIM","Problem-solving","Debugging","GPU environments"],"x-skills-preferred":["AWS","GCP","Azure","Professional Solution Architect Certification","Large MoE LLMs","Open-source AI inference projects","Multi-GPU Multi-node Inference technologies","Monitoring and alerting solutions","Prometheus","Grafana","NVIDIA DCGM","GPU performance Analysis","NVIDIA Nsight Systems"],"datePosted":"2026-03-09T20:45:22.711Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Redmond, CA, Santa Clara, Seattle"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"Cloud Solution Architecture, GPU hardware and Software, Machine Learning (ML), Deep Learning (DL), Data Analytics, Cloud Computing Platforms, Kubernetes, TensorRT, TensorRT-LLM, vLLM, Dynamo, Triton Inference Server, Python, Containerization, Orchestration, Monitoring, Observability, Inference technologies, NVIDIA NIM, Problem-solving, Debugging, GPU environments, AWS, GCP, Azure, Professional Solution Architect Certification, Large MoE LLMs, Open-source AI inference projects, Multi-GPU Multi-node Inference technologies, Monitoring and alerting solutions, Prometheus, Grafana, NVIDIA DCGM, GPU performance Analysis, NVIDIA Nsight Systems"},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_9f9ededf-ecb"},"title":"Software Engineer, Frontier Clusters Infrastructure","description":"<p><strong>Software Engineer, Frontier Clusters Infrastructure</strong></p>\n<p><strong>Location</strong></p>\n<p>San Francisco</p>\n<p><strong>Employment Type</strong></p>\n<p>Full time</p>\n<p><strong>Department</strong></p>\n<p>Scaling</p>\n<p><strong>Compensation</strong></p>\n<ul>\n<li>$230K – $490K • Offers Equity</li>\n</ul>\n<p>The base pay offered may vary depending on multiple individualized factors, including market location, job-related knowledge, skills, and experience. If the role is non-exempt, overtime pay will be provided consistent with applicable laws. In addition to the salary range listed above, total compensation also includes generous equity, performance-related bonus(es) for eligible employees, and the following benefits.</p>\n<ul>\n<li>Medical, dental, and vision insurance for you and your family, with employer contributions to Health Savings Accounts</li>\n</ul>\n<ul>\n<li>Pre-tax accounts for Health FSA, Dependent Care FSA, and commuter expenses (parking and transit)</li>\n</ul>\n<ul>\n<li>401(k) retirement plan with employer match</li>\n</ul>\n<ul>\n<li>Paid parental leave (up to 24 weeks for birth parents and 20 weeks for non-birthing parents), plus paid medical and caregiver leave (up to 8 weeks)</li>\n</ul>\n<ul>\n<li>Paid time off: flexible PTO for exempt employees and up to 15 days annually for non-exempt employees</li>\n</ul>\n<ul>\n<li>13+ paid company holidays, and multiple paid coordinated company office closures throughout the year for focus and recharge, plus paid sick or safe time (1 hour per 30 hours worked, or more, as required by applicable state or local law)</li>\n</ul>\n<ul>\n<li>Mental health and wellness support</li>\n</ul>\n<ul>\n<li>Employer-paid basic life and disability coverage</li>\n</ul>\n<ul>\n<li>Annual learning and development stipend to fuel your professional growth</li>\n</ul>\n<ul>\n<li>Daily meals in our offices, and meal delivery credits as eligible</li>\n</ul>\n<ul>\n<li>Relocation support for eligible employees</li>\n</ul>\n<ul>\n<li>Additional taxable fringe benefits, such as charitable donation matching and wellness stipends, may also be provided.</li>\n</ul>\n<p>More details about our benefits are available to candidates during the hiring process.</p>\n<p>This role is at-will and OpenAI reserves the right to modify base pay and other compensation components at any time based on individual performance, team or company results, or market conditions.</p>\n<p><strong>About the Team</strong></p>\n<p>The Frontier Systems team at OpenAI builds, launches, and supports the largest supercomputers in the world that OpenAI uses for its most cutting edge model training.</p>\n<p>We take data center designs, turn them into real, working systems and build any software needed for running large-scale frontier model trainings.</p>\n<p>Our mission is to bring up, stabilize and keep these hyperscale supercomputers reliable and efficient during the training of the frontier models.</p>\n<p><strong>About the Role</strong></p>\n<p>We are looking for engineers to operate the next generation of compute clusters that power OpenAI’s frontier research.</p>\n<p>This role blends distributed systems engineering with hands-on infrastructure work on our largest datacenters. You will scale Kubernetes clusters to massive scale, automate bare-metal bring-up, and build the software layer that hides the complexity of a magnitude of nodes across multiple data centers.</p>\n<p>You will work at the intersection of hardware and software, where speed and reliability are critical. Expect to manage fast-moving operations, quickly diagnose and fix issues when things are on fire, and continuously raise the bar for automation and uptime.</p>\n<p><strong>In this role, you will:</strong></p>\n<ul>\n<li>Spin up and scale large Kubernetes clusters, including automation for provisioning, bootstrapping, and cluster lifecycle management</li>\n</ul>\n<ul>\n<li>Build software abstractions that unify multiple clusters and present a seamless interface to training workloads</li>\n</ul>\n<ul>\n<li>Own node bring-up from bare metal through firmware upgrades, ensuring fast, repeatable deployment at massive scale</li>\n</ul>\n<ul>\n<li>Improve operational metrics such as reducing cluster restart times (e.g., from hours to minutes) and accelerating firmware or OS upgrade cycles</li>\n</ul>\n<ul>\n<li>Integrate networking and hardware health systems to deliver end-to-end reliability across servers, switches, and data center infrastructure</li>\n</ul>\n<ul>\n<li>Develop monitoring and observability systems to detect issues early and keep clusters stable under extreme load</li>\n</ul>\n<p><strong>You might thrive in this role if you:</strong></p>\n<ul>\n<li>Have deep experience operating or scaling Kubernetes clusters or similar container orchestration systems in high-growth or hyperscale environments</li>\n</ul>\n<ul>\n<li>Bring strong programming or scripting skills (Python, Go, or similar) and familiarity with Infrastructure-as-Code tools such as Terraform or CloudFormation</li>\n</ul>\n<ul>\n<li>Are comfortable with bare-metal Linux environments, GPU hardware, and large-scale networking</li>\n</ul>\n<ul>\n<li>Enjoy solving fast-moving, high-impact operational problems and building automation to eliminate manual work</li>\n</ul>\n<ul>\n<li>Can balance careful engineering with the urgency of keeping mission-critical systems running</li>\n</ul>\n<p><strong>Qualifications</strong></p>\n<ul>\n<li>Experience as an infrastructure, systems, or distributed systems engineer in large-scale or high-availability environments</li>\n</ul>\n<ul>\n<li>Strong knowledge of Kubernetes internals, cluster scaling patterns, and containerized workloads</li>\n</ul>\n<ul>\n<li>Proficiency in cloud infrastructure concepts (compute, networking, storage, security) and in automating cluster or data center operations</li>\n</ul>\n<p>_Bonus: background with GPU workloads, firmware management, or high-performance computing_</p>\n<p><strong>About OpenAI</strong></p>\n<p>OpenAI is an AI research and deployment company dedicated to ensuring that general-purpose artificial intelligence benefits all of humanity. We push the boundaries of the capabilities of AI systems and seek to safely deploy them to the world through our products. AI is an extremely powerful tool that must be created with safety and human needs at its core, and to achieve our mission, we must encompass and value the many different perspectives, voices, and experiences that form the full spectrum of humanity.</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_9f9ededf-ecb","directApply":true,"hiringOrganization":{"@type":"Organization","name":"OpenAI","sameAs":"https://jobs.ashbyhq.com","logo":"https://logos.yubhub.co/openai.com.png"},"x-apply-url":"https://jobs.ashbyhq.com/openai/770d5c3f-4e72-4b49-aec4-d444e8ad7a64","x-work-arrangement":"onsite","x-experience-level":"mid","x-job-type":"full-time","x-salary-range":"$230K – $490K • Offers Equity","x-skills-required":["Kubernetes","Python","Go","Terraform","CloudFormation","Linux","GPU hardware","Large-scale networking"],"x-skills-preferred":["Infrastructure-as-Code","Cloud infrastructure concepts","Containerized workloads","Distributed systems engineering"],"datePosted":"2026-03-06T18:30:45.275Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"San Francisco"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"Kubernetes, Python, Go, Terraform, CloudFormation, Linux, GPU hardware, Large-scale networking, Infrastructure-as-Code, Cloud infrastructure concepts, Containerized workloads, Distributed systems engineering","baseSalary":{"@type":"MonetaryAmount","currency":"USD","value":{"@type":"QuantitativeValue","minValue":230000,"maxValue":490000,"unitText":"YEAR"}}},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_33c8b32c-a06"},"title":"Site Reliability Engineer, Frontier Systems Infrastructure","description":"<p><strong>Site Reliability Engineer, Frontier Systems Infrastructure</strong></p>\n<p><strong>Location</strong></p>\n<p>San Francisco</p>\n<p><strong>Employment Type</strong></p>\n<p>Full time</p>\n<p><strong>Department</strong></p>\n<p>Scaling</p>\n<p><strong>Compensation</strong></p>\n<ul>\n<li>$255K – $490K • Offers Equity</li>\n</ul>\n<p>The base pay offered may vary depending on multiple individualized factors, including market location, job-related knowledge, skills, and experience. If the role is non-exempt, overtime pay will be provided consistent with applicable laws. In addition to the salary range listed above, total compensation also includes generous equity, performance-related bonus(es) for eligible employees, and the following benefits.</p>\n<ul>\n<li>Medical, dental, and vision insurance for you and your family, with employer contributions to Health Savings Accounts</li>\n</ul>\n<ul>\n<li>Pre-tax accounts for Health FSA, Dependent Care FSA, and commuter expenses (parking and transit)</li>\n</ul>\n<ul>\n<li>401(k) retirement plan with employer match</li>\n</ul>\n<ul>\n<li>Paid parental leave (up to 24 weeks for birth parents and 20 weeks for non-birthing parents), plus paid medical and caregiver leave (up to 8 weeks)</li>\n</ul>\n<ul>\n<li>Paid time off: flexible PTO for exempt employees and up to 15 days annually for non-exempt employees</li>\n</ul>\n<ul>\n<li>13+ paid company holidays, and multiple paid coordinated company office closures throughout the year for focus and recharge, plus paid sick or safe time (1 hour per 30 hours worked, or more, as required by applicable state or local law)</li>\n</ul>\n<ul>\n<li>Mental health and wellness support</li>\n</ul>\n<ul>\n<li>Employer-paid basic life and disability coverage</li>\n</ul>\n<ul>\n<li>Annual learning and development stipend to fuel your professional growth</li>\n</ul>\n<ul>\n<li>Daily meals in our offices, and meal delivery credits as eligible</li>\n</ul>\n<ul>\n<li>Relocation support for eligible employees</li>\n</ul>\n<ul>\n<li>Additional taxable fringe benefits, such as charitable donation matching and wellness stipends, may also be provided.</li>\n</ul>\n<p>More details about our benefits are available to candidates during the hiring process.</p>\n<p>This role is at-will and OpenAI reserves the right to modify base pay and other compensation components at any time based on individual performance, team or company results, or market conditions.</p>\n<p><strong>About the Team</strong></p>\n<p>The Frontier Systems team at OpenAI builds, launches, and supports the largest supercomputers in the world that OpenAI uses for its most cutting edge model training.</p>\n<p>We take data center designs, turn them into real, working systems and build any software needed for running large-scale frontier model trainings.</p>\n<p>Our mission is to bring up, stabilize and keep these hyperscale supercomputers reliable and efficient during the training of the frontier models.</p>\n<p><strong>About the Role</strong></p>\n<p>We are looking for engineers to operate the next generation of compute clusters that power OpenAI’s frontier research.</p>\n<p>This role blends distributed systems engineering with hands-on infrastructure work on our largest datacenters. You will scale Kubernetes clusters to massive scale, automate bare-metal bring-up, and build the software layer that hides the complexity of a magnitude of nodes across multiple data centers.</p>\n<p>You will work at the intersection of hardware and software, where speed and reliability are critical. Expect to manage fast-moving operations, quickly diagnose and fix issues when things are on fire, and continuously raise the bar for automation and uptime.</p>\n<p><strong>In this role, you will:</strong></p>\n<ul>\n<li>Spin up and scale large Kubernetes clusters, including automation for provisioning, bootstrapping, and cluster lifecycle management</li>\n</ul>\n<ul>\n<li>Build software abstractions that unify multiple clusters and present a seamless interface to training workloads</li>\n</ul>\n<ul>\n<li>Own node bring-up from bare metal through firmware upgrades, ensuring fast, repeatable deployment at massive scale</li>\n</ul>\n<ul>\n<li>Improve operational metrics such as reducing cluster restart times (e.g., from hours to minutes) and accelerating firmware or OS upgrade cycles</li>\n</ul>\n<ul>\n<li>Integrate networking and hardware health systems to deliver end-to-end reliability across servers, switches, and data center infrastructure</li>\n</ul>\n<ul>\n<li>Develop monitoring and observability systems to detect issues early and keep clusters stable under extreme load</li>\n</ul>\n<ul>\n<li>Be expected to execute at the same level as a software engineer</li>\n</ul>\n<p><strong>You might thrive in this role if you:</strong></p>\n<ul>\n<li>Have deep experience operating or scaling Kubernetes clusters or similar container orchestration systems in high-growth or hyperscale environments</li>\n</ul>\n<ul>\n<li>Bring strong programming or scripting skills (Python, Go, or similar) and familiarity with Infrastructure-as-Code tools such as Terraform or CloudFormation</li>\n</ul>\n<ul>\n<li>Are comfortable with bare-metal Linux environments, GPU hardware, and large-scale networking</li>\n</ul>\n<ul>\n<li>Enjoy solving fast-moving, high-impact operational problems and building automation to eliminate manual work</li>\n</ul>\n<ul>\n<li>Can balance careful engineering with the urgency of keeping mission-critical systems running</li>\n</ul>\n<p><strong>Qualifications</strong></p>\n<ul>\n<li>Experience as an infrastructure, systems, or distributed systems engineer in large-scale or high-availability environments</li>\n</ul>\n<ul>\n<li>Strong knowledge of Kubernetes internals, cluster scaling patterns, and containerized workloads</li>\n</ul>\n<ul>\n<li>Proficiency in cloud infrastructure concepts (compute, networking, storage, security) and in automating cluster or data center operations</li>\n</ul>\n<p>_Bonus: background with GPU workloads, firmware management, or high-performance computing_</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_33c8b32c-a06","directApply":true,"hiringOrganization":{"@type":"Organization","name":"OpenAI","sameAs":"https://jobs.ashbyhq.com","logo":"https://logos.yubhub.co/openai.com.png"},"x-apply-url":"https://jobs.ashbyhq.com/openai/ad2cf782-15a4-48c7-9133-1788e3f33bbb","x-work-arrangement":"onsite","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":"$255K – $490K • Offers Equity","x-skills-required":["Kubernetes","Python","Go","Terraform","CloudFormation","Linux","GPU hardware","Large-scale networking"],"x-skills-preferred":["Infrastructure-as-Code","Container orchestration","Distributed systems engineering","Cloud infrastructure concepts"],"datePosted":"2026-03-06T18:29:28.224Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"San Francisco"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"Kubernetes, Python, Go, Terraform, CloudFormation, Linux, GPU hardware, Large-scale networking, Infrastructure-as-Code, Container orchestration, Distributed systems engineering, Cloud infrastructure concepts","baseSalary":{"@type":"MonetaryAmount","currency":"USD","value":{"@type":"QuantitativeValue","minValue":255000,"maxValue":490000,"unitText":"YEAR"}}},{"@context":"https://schema.org","@type":"JobPosting","identifier":{"@type":"PropertyValue","name":"YubHub","value":"job_6a472088-02e"},"title":"Senior Rendering Software Engineer - Frostbite","description":"<p>We are looking for a hybrid remote/in-office Senior Software Engineer to join our Rendering team at our Orlando, FL studio and help shape the future of graphics for our EA titles. You and your team will work with our game teams to understand their rendering needs, ensuring the Frostbite engine empowers their success and amplifies their creative visions.</p>\n<p><strong>What you&#39;ll do</strong></p>\n<p>Work closely with game team engineers and the Frostbite rendering team to enable and deliver new rendering features Optimize solutions on multiple platforms to ensure CPU, GPU, and memory performance Collaborate with our game team partners to broaden our understanding of their technical landscape, including both opportunities and limitations Remain up-to-date with the latest hardware and domain advancements in real-time rendering Help enhance artist workflows to increase the content creation quality and efficiency</p>\n<p><strong>What you need</strong></p>\n<p>Strong C++ knowledge with at least 7 years of professional programming experience in real-time rendering Experience working in rendering for multiple shipped AAA titles on current-generation consoles (PS5, XBSX, PC) Significant experience using one or more low-level graphics APIs (e.g. DX12, Vulkan, Metal) Strong knowledge of modern CPU/GPU hardware architectures Significant experience with Graphics Debugging Tools (e.g. Pix, Renderdoc, Razor) Deep understanding of memory management and multi-threading with related debugging and optimization experience</p>\n<p style=\"margin-top:24px;font-size:13px;color:#666;\">XML job scraping automation by <a href=\"https://yubhub.co\">YubHub</a></p>","url":"https://yubhub.co/jobs/job_6a472088-02e","directApply":true,"hiringOrganization":{"@type":"Organization","name":"Electronic Arts","sameAs":"https://jobs.ea.com","logo":"https://logos.yubhub.co/jobs.ea.com.png"},"x-apply-url":"https://jobs.ea.com/en_US/careers/JobDetail/Senior-Software-Engineer-Rendering/212242","x-work-arrangement":"hybrid","x-experience-level":"senior","x-job-type":"full-time","x-salary-range":null,"x-skills-required":["C++","real-time rendering","low-level graphics APIs","modern CPU/GPU hardware architectures","Graphics Debugging Tools","memory management","multi-threading"],"x-skills-preferred":[],"datePosted":"2026-02-03T12:03:29.051Z","jobLocation":{"@type":"Place","address":{"@type":"PostalAddress","addressLocality":"Orlando, Florida"}},"employmentType":"FULL_TIME","occupationalCategory":"Engineering","industry":"Technology","skills":"C++, real-time rendering, low-level graphics APIs, modern CPU/GPU hardware architectures, Graphics Debugging Tools, memory management, multi-threading"}]}