@inproceedings{7b47f147b1284d5980db7cf3a715f2ea,
title = "Handling inconsistent and uncertain legal reasoning for AI vehicles design",
abstract = "As AI products continue to evolve, increasingly legal problems are emerging for the engineers that design them. For example, if the aim is to build an autonomous vehicle (AV) that adheres to current laws, should we give it the ability to ignore a red traffic light in an emergency, or is this merely an excuse we permit humans to male? The paper argues that some of the changes brought by AVs are best understood as necessitating a revision of law{\textquoteright}s ontology. Current laws are often ambiguous, inconsistent or undefined when it comes to technologies that make use of AI. Engineers would benefit from decision support tools that provide engineer{\textquoteright}s with legal advice and guidance on their design decisions.This research aims at exploring a new representation of legal ontology by importing argumentation theory and constructing a trustworthy legal decision system.While the ideas are generally applicable to AI products, our initial focus has been on Autonomous Vehicles (AVs)",
author = "Yiwei Lu and Yuhui Lin and Burkhard Schafer and Andrew Ireland and Lachlan Urquhart and Zhe Yu",
year = "2023",
month = may,
day = "20",
doi = "10.48550/arXiv.2305.12203",
language = "English",
series = "Logic in Computing",
publisher = "ArXiv",
pages = "76--89",
editor = "Satoh, {Ken } and Georg Borges and Schweighofer, {Erich }",
booktitle = "Proceedings of the International Workshop on Methodologies for Translating Legal Norms into Formal Representations (LN2FR 2022)",
}