@inbook{4592a7bc5c9e429ba51db86dea1df16b,
title = "Algorithmic sentencing: Drawing lessons from human factors research",
abstract = "Researchers in the field of “human factors” have long been aware that when humans devolve certain of their functions to technology, the transfer from human to machine can restructure more than the division of labor between them: humans{\textquoteright} perceptions of themselves and their abilities may also change. Such findings are relevant to the use of algorithmic and data-driven technologies, but whether they hold up in the specific context of recidivism risk assessment is only beginning to be considered. This chapter describes and analyzes some pertinent human factors results, and assesses the extent to which they pose a problem for the use of algorithms in the sentencing of offenders. While the findings from human factors research are themselves robust, they do not seem to translate neatly to the judicial sphere. The incentives, objectives, and ideologies of sentencing judges appear to upset the usual pattern of results seen in many other domains of human factors research.",
keywords = "automation bias, automation complacency, human factors, human-computer interaction, sentencing",
author = "John Zerilli",
year = "2021",
doi = "10.1093/oso/9780197539538.003.0009",
language = "English",
isbn = "9780197539538",
series = "Studies in Penal Theory and Philosophy",
publisher = "Oxford University Press",
pages = "165--183",
editor = "Jesper Ryberg and Roberts, {Julian V.}",
booktitle = "Sentencing and Artificial Intelligence",
address = "United States",
}