@TechReport{iza:izadps:dp16293, author={Leib, Margarita and Köbis, Nils and Rilke, Rainer Michael and Hagens, Marloes and Irlenbusch, Bernd}, title={Corrupted by Algorithms? How AI-Generated and Human-Written Advice Shape (Dis)Honesty}, year={2023}, month={Jul}, institution={Institute of Labor Economics (IZA)}, address={Bonn}, type={IZA Discussion Paper}, number={16293}, url={https://www.iza.org/index.php/publications/dp16293}, abstract={Artificial Intelligence (AI) increasingly becomes an indispensable advisor. New ethical concerns arise if AI persuades people to behave dishonestly. In an experiment, we study how AI advice (generated by a Natural-Language-processing algorithm) affects (dis)honesty, compare it to equivalent human advice, and test whether transparency about advice source matters. We find that dishonesty-promoting advice increases dishonesty, whereas honesty-promoting advice does not increase honesty. This is the case for both AI and human advice. Algorithmic transparency, a commonly proposed policy to mitigate AI risks, does not affect behaviour. The findings mark the first steps towards managing AI advice responsibly.}, keywords={behavioural ethics;machine behaviour;Artificial Intelligence;advice}, }