@TechReport{iza:izadps:dp18517, author={Irlenbusch, Bernd and Rau, Holger A. and Rilke, Rainer Michael}, title={Human–AI Evaluation and Gender Transparency: Application Decisions in Competitive Hiring}, year={2026}, month={Apr}, institution={Institute of Labor Economics (IZA)}, address={Bonn}, type={IZA Discussion Paper}, number={18517}, url={https://www.iza.org/publications/dp18517}, abstract={We study how human versus LLM-based evaluation and gender transparency shape entry into competitive jobs. In a preregistered online experiment, participants first complete a Niederle and Vesterlund (2007) tournament task to measure competitive preferences, then prepare text-based job applications and decide whether to apply under each of four evaluation regimes—human only, LLM only, and two hybrid human-in-the-loop configurations—while gender disclosure is randomized between subjects. LLM involvement reduces application rates, with stronger effects for women than men, including under hybrid designs. Effects are driven by non-competitive candidates; non-competitive women, the group most exposed to AI-induced deterrence, receive the strongest objective evaluations under pure AI assessment across all subgroups, yet are systematically underconfident and apply least often. Competitive men persistently apply and exhibit overconfidence-driven adverse selection, whereas competitive women show resilience to AI-induced deterrence while remaining well-calibrated under AI evaluation and exhibiting positive self-selection across regimes. We find no effects of gender transparency.}, keywords={AI hiring;LLMs;algorithm aversion;gender differences}, }