CINXE.COM
Search | arXiv e-print repository
<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8"/> <meta name="viewport" content="width=device-width, initial-scale=1"/> <!-- new favicon config and versions by realfavicongenerator.net --> <link rel="apple-touch-icon" sizes="180x180" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/apple-touch-icon.png"> <link rel="icon" type="image/png" sizes="32x32" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-32x32.png"> <link rel="icon" type="image/png" sizes="16x16" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-16x16.png"> <link rel="manifest" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/site.webmanifest"> <link rel="mask-icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/safari-pinned-tab.svg" color="#b31b1b"> <link rel="shortcut icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon.ico"> <meta name="msapplication-TileColor" content="#b31b1b"> <meta name="msapplication-config" content="images/icons/browserconfig.xml"> <meta name="theme-color" content="#b31b1b"> <!-- end favicon config --> <title>Search | arXiv e-print repository</title> <script defer src="https://static.arxiv.org/static/base/1.0.0a5/fontawesome-free-5.11.2-web/js/all.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/base/1.0.0a5/css/arxivstyle.css" /> <script type="text/x-mathjax-config"> MathJax.Hub.Config({ messageStyle: "none", extensions: ["tex2jax.js"], jax: ["input/TeX", "output/HTML-CSS"], tex2jax: { inlineMath: [ ['$','$'], ["\\(","\\)"] ], displayMath: [ ['$$','$$'], ["\\[","\\]"] ], processEscapes: true, ignoreClass: '.*', processClass: 'mathjax.*' }, TeX: { extensions: ["AMSmath.js", "AMSsymbols.js", "noErrors.js"], noErrors: { inlineDelimiters: ["$","$"], multiLine: false, style: { "font-size": "normal", "border": "" } } }, "HTML-CSS": { availableFonts: ["TeX"] } }); </script> <script src='//static.arxiv.org/MathJax-2.7.3/MathJax.js'></script> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/notification.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/bulma-tooltip.min.css" /> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/search.css" /> <script src="https://code.jquery.com/jquery-3.2.1.slim.min.js" integrity="sha256-k2WSCIexGzOj3Euiig+TlR8gA0EmPjuc79OEeY5L45g=" crossorigin="anonymous"></script> <script src="https://static.arxiv.org/static/search/0.5.6/js/fieldset.js"></script> <style> radio#cf-customfield_11400 { display: none; } </style> </head> <body> <header><a href="#main-container" class="is-sr-only">Skip to main content</a> <!-- contains Cornell logo and sponsor statement --> <div class="attribution level is-marginless" role="banner"> <div class="level-left"> <a class="level-item" href="https://cornell.edu/"><img src="https://static.arxiv.org/static/base/1.0.0a5/images/cornell-reduced-white-SMALL.svg" alt="Cornell University" width="200" aria-label="logo" /></a> </div> <div class="level-right is-marginless"><p class="sponsors level-item is-marginless"><span id="support-ack-url">We gratefully acknowledge support from<br /> the Simons Foundation, <a href="https://info.arxiv.org/about/ourmembers.html">member institutions</a>, and all contributors. <a href="https://info.arxiv.org/about/donate.html">Donate</a></span></p></div> </div> <!-- contains arXiv identity and search bar --> <div class="identity level is-marginless"> <div class="level-left"> <div class="level-item"> <a class="arxiv" href="https://arxiv.org/" aria-label="arxiv-logo"> <img src="https://static.arxiv.org/static/base/1.0.0a5/images/arxiv-logo-one-color-white.svg" aria-label="logo" alt="arxiv logo" width="85" style="width:85px;"/> </a> </div> </div> <div class="search-block level-right"> <form class="level-item mini-search" method="GET" action="https://arxiv.org/search"> <div class="field has-addons"> <div class="control"> <input class="input is-small" type="text" name="query" placeholder="Search..." aria-label="Search term or terms" /> <p class="help"><a href="https://info.arxiv.org/help">Help</a> | <a href="https://arxiv.org/search/advanced">Advanced Search</a></p> </div> <div class="control"> <div class="select is-small"> <select name="searchtype" aria-label="Field to search"> <option value="all" selected="selected">All fields</option> <option value="title">Title</option> <option value="author">Author</option> <option value="abstract">Abstract</option> <option value="comments">Comments</option> <option value="journal_ref">Journal reference</option> <option value="acm_class">ACM classification</option> <option value="msc_class">MSC classification</option> <option value="report_num">Report number</option> <option value="paper_id">arXiv identifier</option> <option value="doi">DOI</option> <option value="orcid">ORCID</option> <option value="author_id">arXiv author ID</option> <option value="help">Help pages</option> <option value="full_text">Full text</option> </select> </div> </div> <input type="hidden" name="source" value="header"> <button class="button is-small is-cul-darker">Search</button> </div> </form> </div> </div> <!-- closes identity --> <div class="container"> <div class="user-tools is-size-7 has-text-right has-text-weight-bold" role="navigation" aria-label="User menu"> <a href="https://arxiv.org/login">Login</a> </div> </div> </header> <main class="container" id="main-container"> <div class="level is-marginless"> <div class="level-left"> <h1 class="title is-clearfix"> Showing 1–17 of 17 results for author: <span class="mathjax">Habli, I</span> </h1> </div> <div class="level-right is-hidden-mobile"> <!-- feedback for mobile is moved to footer --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a> </span> </div> </div> <div class="content"> <form method="GET" action="/search/cs" aria-role="search"> Searching in archive <strong>cs</strong>. <a href="/search/?searchtype=author&query=Habli%2C+I">Search in all archives.</a> <div class="field has-addons-tablet"> <div class="control is-expanded"> <label for="query" class="hidden-label">Search term or terms</label> <input class="input is-medium" id="query" name="query" placeholder="Search term..." type="text" value="Habli, I"> </div> <div class="select control is-medium"> <label class="is-hidden" for="searchtype">Field</label> <select class="is-medium" id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> </div> <div class="control"> <button class="button is-link is-medium">Search</button> </div> </div> <div class="field"> <div class="control is-size-7"> <label class="radio"> <input checked id="abstracts-0" name="abstracts" type="radio" value="show"> Show abstracts </label> <label class="radio"> <input id="abstracts-1" name="abstracts" type="radio" value="hide"> Hide abstracts </label> </div> </div> <div class="is-clearfix" style="height: 2.5em"> <div class="is-pulled-right"> <a href="/search/advanced?terms-0-term=Habli%2C+I&terms-0-field=author&size=50&order=-announced_date_first">Advanced Search</a> </div> </div> <input type="hidden" name="order" value="-announced_date_first"> <input type="hidden" name="size" value="50"> </form> <div class="level breathe-horizontal"> <div class="level-left"> <form method="GET" action="/search/"> <div style="display: none;"> <select id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> <input id="query" name="query" type="text" value="Habli, I"> <ul id="abstracts"><li><input checked id="abstracts-0" name="abstracts" type="radio" value="show"> <label for="abstracts-0">Show abstracts</label></li><li><input id="abstracts-1" name="abstracts" type="radio" value="hide"> <label for="abstracts-1">Hide abstracts</label></li></ul> </div> <div class="box field is-grouped is-grouped-multiline level-item"> <div class="control"> <span class="select is-small"> <select id="size" name="size"><option value="25">25</option><option selected value="50">50</option><option value="100">100</option><option value="200">200</option></select> </span> <label for="size">results per page</label>. </div> <div class="control"> <label for="order">Sort results by</label> <span class="select is-small"> <select id="order" name="order"><option selected value="-announced_date_first">Announcement date (newest first)</option><option value="announced_date_first">Announcement date (oldest first)</option><option value="-submitted_date">Submission date (newest first)</option><option value="submitted_date">Submission date (oldest first)</option><option value="">Relevance</option></select> </span> </div> <div class="control"> <button class="button is-small is-link">Go</button> </div> </div> </form> </div> </div> <ol class="breathe-horizontal" start="1"> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2406.16220">arXiv:2406.16220</a> <span> [<a href="https://arxiv.org/pdf/2406.16220">pdf</a>, <a href="https://arxiv.org/format/2406.16220">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Learning Run-time Safety Monitors for Machine Learning Components </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Vardal%2C+O">Ozan Vardal</a>, <a href="/search/cs?searchtype=author&query=Hawkins%2C+R">Richard Hawkins</a>, <a href="/search/cs?searchtype=author&query=Paterson%2C+C">Colin Paterson</a>, <a href="/search/cs?searchtype=author&query=Picardi%2C+C">Chiara Picardi</a>, <a href="/search/cs?searchtype=author&query=Omeiza%2C+D">Daniel Omeiza</a>, <a href="/search/cs?searchtype=author&query=Kunze%2C+L">Lars Kunze</a>, <a href="/search/cs?searchtype=author&query=Habli%2C+I">Ibrahim Habli</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2406.16220v1-abstract-short" style="display: inline;"> For machine learning components used as part of autonomous systems (AS) in carrying out critical tasks it is crucial that assurance of the models can be maintained in the face of post-deployment changes (such as changes in the operating environment of the system). A critical part of this is to be able to monitor when the performance of the model at runtime (as a result of changes) poses a safety r… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2406.16220v1-abstract-full').style.display = 'inline'; document.getElementById('2406.16220v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2406.16220v1-abstract-full" style="display: none;"> For machine learning components used as part of autonomous systems (AS) in carrying out critical tasks it is crucial that assurance of the models can be maintained in the face of post-deployment changes (such as changes in the operating environment of the system). A critical part of this is to be able to monitor when the performance of the model at runtime (as a result of changes) poses a safety risk to the system. This is a particularly difficult challenge when ground truth is unavailable at runtime. In this paper we introduce a process for creating safety monitors for ML components through the use of degraded datasets and machine learning. The safety monitor that is created is deployed to the AS in parallel to the ML component to provide a prediction of the safety risk associated with the model output. We demonstrate the viability of our approach through some initial experiments using publicly available speed sign datasets. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2406.16220v1-abstract-full').style.display = 'none'; document.getElementById('2406.16220v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 23 June, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2406.09029">arXiv:2406.09029</a> <span> [<a href="https://arxiv.org/pdf/2406.09029">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computers and Society">cs.CY</span> </div> </div> <p class="title is-5 mathjax"> Fair by design: A sociotechnical approach to justifying the fairness of AI-enabled systems across the lifecycle </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Kaas%2C+M+H+L">Marten H. L. Kaas</a>, <a href="/search/cs?searchtype=author&query=Burr%2C+C">Christopher Burr</a>, <a href="/search/cs?searchtype=author&query=Porter%2C+Z">Zoe Porter</a>, <a href="/search/cs?searchtype=author&query=Ozturk%2C+B">Berk Ozturk</a>, <a href="/search/cs?searchtype=author&query=Ryan%2C+P">Philippa Ryan</a>, <a href="/search/cs?searchtype=author&query=Katell%2C+M">Michael Katell</a>, <a href="/search/cs?searchtype=author&query=Polo%2C+N">Nuala Polo</a>, <a href="/search/cs?searchtype=author&query=Westerling%2C+K">Kalle Westerling</a>, <a href="/search/cs?searchtype=author&query=Habli%2C+I">Ibrahim Habli</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2406.09029v1-abstract-short" style="display: inline;"> Fairness is one of the most commonly identified ethical principles in existing AI guidelines, and the development of fair AI-enabled systems is required by new and emerging AI regulation. But most approaches to addressing the fairness of AI-enabled systems are limited in scope in two significant ways: their substantive content focuses on statistical measures of fairness, and they do not emphasize… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2406.09029v1-abstract-full').style.display = 'inline'; document.getElementById('2406.09029v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2406.09029v1-abstract-full" style="display: none;"> Fairness is one of the most commonly identified ethical principles in existing AI guidelines, and the development of fair AI-enabled systems is required by new and emerging AI regulation. But most approaches to addressing the fairness of AI-enabled systems are limited in scope in two significant ways: their substantive content focuses on statistical measures of fairness, and they do not emphasize the need to identify and address fairness considerations across the whole AI lifecycle. Our contribution is to present an assurance framework and tool that can enable a practical and transparent method for widening the scope of fairness considerations across the AI lifecycle and move the discussion beyond mere statistical notions of fairness to consider a richer analysis in a practical and context-dependent manner. To illustrate this approach, we first describe and then apply the framework of Trustworthy and Ethical Assurance (TEA) to an AI-enabled clinical diagnostic support system (CDSS) whose purpose is to help clinicians predict the risk of developing hypertension in patients with Type 2 diabetes, a context in which several fairness considerations arise (e.g., discrimination against patient subgroups). This is supplemented by an open-source tool and a fairness considerations map to help facilitate reasoning about the fairness of AI-enabled systems in a participatory way. In short, by using a shared framework for identifying, documenting and justifying fairness considerations, and then using this deliberative exercise to structure an assurance case, research on AI fairness becomes reusable and generalizable for others in the ethical AI community and for sharing best practices for achieving fairness and equity in digital health and healthcare in particular. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2406.09029v1-abstract-full').style.display = 'none'; document.getElementById('2406.09029v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 13 June, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">18 pages, 2 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2403.15236">arXiv:2403.15236</a> <span> [<a href="https://arxiv.org/pdf/2403.15236">pdf</a>, <a href="https://arxiv.org/format/2403.15236">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Software Engineering">cs.SE</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1016/j.jss.2024.112034">10.1016/j.jss.2024.112034 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> ACCESS: Assurance Case Centric Engineering of Safety-critical Systems </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Wei%2C+R">Ran Wei</a>, <a href="/search/cs?searchtype=author&query=Foster%2C+S">Simon Foster</a>, <a href="/search/cs?searchtype=author&query=Mei%2C+H">Haitao Mei</a>, <a href="/search/cs?searchtype=author&query=Yan%2C+F">Fang Yan</a>, <a href="/search/cs?searchtype=author&query=Yang%2C+R">Ruizhe Yang</a>, <a href="/search/cs?searchtype=author&query=Habli%2C+I">Ibrahim Habli</a>, <a href="/search/cs?searchtype=author&query=O%27Halloran%2C+C">Colin O'Halloran</a>, <a href="/search/cs?searchtype=author&query=Tudor%2C+N">Nick Tudor</a>, <a href="/search/cs?searchtype=author&query=Kelly%2C+T">Tim Kelly</a>, <a href="/search/cs?searchtype=author&query=Nemouchi%2C+Y">Yakoub Nemouchi</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2403.15236v2-abstract-short" style="display: inline;"> Assurance cases are used to communicate and assess confidence in critical system properties such as safety and security. Historically, assurance cases have been manually created documents, which are evaluated by system stakeholders through lengthy and complicated processes. In recent years, model-based system assurance approaches have gained popularity to improve the efficiency and quality of syst… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2403.15236v2-abstract-full').style.display = 'inline'; document.getElementById('2403.15236v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2403.15236v2-abstract-full" style="display: none;"> Assurance cases are used to communicate and assess confidence in critical system properties such as safety and security. Historically, assurance cases have been manually created documents, which are evaluated by system stakeholders through lengthy and complicated processes. In recent years, model-based system assurance approaches have gained popularity to improve the efficiency and quality of system assurance activities. This becomes increasingly important, as systems becomes more complex, it is a challenge to manage their development life-cycles, including coordination of development, verification and validation activities, and change impact analysis in inter-connected system assurance artifacts. Moreover, there is a need for assurance cases that support evolution during the operational life of the system, to enable continuous assurance in the face of an uncertain environment, as Robotics and Autonomous Systems (RAS) are adopted into society. In this paper, we contribute ACCESS - Assurance Case Centric Engineering of Safety-critical Systems, an engineering methodology, together with its tool support, for the development of safety critical systems around evolving model-based assurance cases. We show how model-based system assurance cases can trace to heterogeneous engineering artifacts (e.g. system architectural models, system safety analysis, system behaviour models, etc.), and how formal methods can be integrated during the development process. We demonstrate how assurance cases can be automatically evaluated both at development and runtime. We apply our approach to a case study based on an Autonomous Underwater Vehicle (AUV). <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2403.15236v2-abstract-full').style.display = 'none'; document.getElementById('2403.15236v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 16 April, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 22 March, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2401.09459">arXiv:2401.09459</a> <span> [<a href="https://arxiv.org/pdf/2401.09459">pdf</a>, <a href="https://arxiv.org/format/2401.09459">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computers and Society">cs.CY</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> What's my role? Modelling responsibility for AI-based safety-critical systems </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Ryan%2C+P">Philippa Ryan</a>, <a href="/search/cs?searchtype=author&query=Porter%2C+Z">Zoe Porter</a>, <a href="/search/cs?searchtype=author&query=Al-Qaddoumi%2C+J">Joanna Al-Qaddoumi</a>, <a href="/search/cs?searchtype=author&query=McDermid%2C+J">John McDermid</a>, <a href="/search/cs?searchtype=author&query=Habli%2C+I">Ibrahim Habli</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2401.09459v1-abstract-short" style="display: inline;"> AI-Based Safety-Critical Systems (AI-SCS) are being increasingly deployed in the real world. These can pose a risk of harm to people and the environment. Reducing that risk is an overarching priority during development and operation. As more AI-SCS become autonomous, a layer of risk management via human intervention has been removed. Following an accident it will be important to identify causal co… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2401.09459v1-abstract-full').style.display = 'inline'; document.getElementById('2401.09459v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2401.09459v1-abstract-full" style="display: none;"> AI-Based Safety-Critical Systems (AI-SCS) are being increasingly deployed in the real world. These can pose a risk of harm to people and the environment. Reducing that risk is an overarching priority during development and operation. As more AI-SCS become autonomous, a layer of risk management via human intervention has been removed. Following an accident it will be important to identify causal contributions and the different responsible actors behind those to learn from mistakes and prevent similar future events. Many authors have commented on the "responsibility gap" where it is difficult for developers and manufacturers to be held responsible for harmful behaviour of an AI-SCS. This is due to the complex development cycle for AI, uncertainty in AI performance, and dynamic operating environment. A human operator can become a "liability sink" absorbing blame for the consequences of AI-SCS outputs they weren't responsible for creating, and may not have understanding of. This cross-disciplinary paper considers different senses of responsibility (role, moral, legal and causal), and how they apply in the context of AI-SCS safety. We use a core concept (Actor(A) is responsible for Occurrence(O)) to create role responsibility models, producing a practical method to capture responsibility relationships and provide clarity on the previously identified responsibility issues. Our paper demonstrates the approach with two examples: a retrospective analysis of the Tempe Arizona fatal collision involving an autonomous vehicle, and a safety focused predictive role-responsibility analysis for an AI-based diabetes co-morbidity predictor. In both examples our primary focus is on safety, aiming to reduce unfair or disproportionate blame being placed on operators or developers. We present a discussion and avenues for future research. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2401.09459v1-abstract-full').style.display = 'none'; document.getElementById('2401.09459v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 30 December, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">22 pages, 7 figures, 2 tables</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">ACM Class:</span> I.2.0; K.4.0 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2308.02608">arXiv:2308.02608</a> <span> [<a href="https://arxiv.org/pdf/2308.02608">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computers and Society">cs.CY</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> </div> </div> <p class="title is-5 mathjax"> Unravelling Responsibility for AI </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Porter%2C+Z">Zoe Porter</a>, <a href="/search/cs?searchtype=author&query=Ryan%2C+P">Philippa Ryan</a>, <a href="/search/cs?searchtype=author&query=Morgan%2C+P">Phillip Morgan</a>, <a href="/search/cs?searchtype=author&query=Al-Qaddoumi%2C+J">Joanna Al-Qaddoumi</a>, <a href="/search/cs?searchtype=author&query=Twomey%2C+B">Bernard Twomey</a>, <a href="/search/cs?searchtype=author&query=McDermid%2C+J">John McDermid</a>, <a href="/search/cs?searchtype=author&query=Habli%2C+I">Ibrahim Habli</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2308.02608v2-abstract-short" style="display: inline;"> It is widely acknowledged that we need to establish where responsibility lies for the outputs and impacts of AI-enabled systems. But without a clear and precise understanding of what "responsibility" means, deliberations about where responsibility lies will be, at best, unfocused and incomplete and, at worst, misguided. To address this concern, this paper draws upon central distinctions in philoso… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2308.02608v2-abstract-full').style.display = 'inline'; document.getElementById('2308.02608v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2308.02608v2-abstract-full" style="display: none;"> It is widely acknowledged that we need to establish where responsibility lies for the outputs and impacts of AI-enabled systems. But without a clear and precise understanding of what "responsibility" means, deliberations about where responsibility lies will be, at best, unfocused and incomplete and, at worst, misguided. To address this concern, this paper draws upon central distinctions in philosophy and law to clarify the concept of responsibility for AI for policymakers, practitioners, researchers and students from non-philosophical and non-legal backgrounds. Taking the three-part formulation "Actor A is responsible for Occurrence O," the paper unravels the concept of responsibility to clarify that there are different possibilities of who is responsible for AI, the senses in which they are responsible, and aspects of events they are responsible for. Criteria and conditions for fitting attributions of responsibility in the core senses (causal responsibility, role-responsibility, liability responsibility and moral responsibility) are articulated to promote an understanding of when responsibility attributions would be inappropriate or unjust. The analysis is presented with a graphical notation to facilitate informal diagrammatic reasoning and discussion about specific cases. It is illustrated by application to a scenario of a fatal collision between an autonomous AI-enabled ship and a traditional, crewed vessel at sea. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2308.02608v2-abstract-full').style.display = 'none'; document.getElementById('2308.02608v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 8 May, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 4 August, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2305.14182">arXiv:2305.14182</a> <span> [<a href="https://arxiv.org/pdf/2305.14182">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computers and Society">cs.CY</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1145/3597512.3599713">10.1145/3597512.3599713 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Ethics in conversation: Building an ethics assurance case for autonomous AI-enabled voice agents in healthcare </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Kaas%2C+M+H+L">Marten H. L. Kaas</a>, <a href="/search/cs?searchtype=author&query=Porter%2C+Z">Zoe Porter</a>, <a href="/search/cs?searchtype=author&query=Lim%2C+E">Ernest Lim</a>, <a href="/search/cs?searchtype=author&query=Higham%2C+A">Aisling Higham</a>, <a href="/search/cs?searchtype=author&query=Khavandi%2C+S">Sarah Khavandi</a>, <a href="/search/cs?searchtype=author&query=Habli%2C+I">Ibrahim Habli</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2305.14182v1-abstract-short" style="display: inline;"> The deployment and use of AI systems should be both safe and broadly ethically acceptable. The principles-based ethics assurance argument pattern is one proposal in the AI ethics landscape that seeks to support and achieve that aim. The purpose of this argument pattern or framework is to structure reasoning about, and to communicate and foster confidence in, the ethical acceptability of uses of sp… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2305.14182v1-abstract-full').style.display = 'inline'; document.getElementById('2305.14182v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2305.14182v1-abstract-full" style="display: none;"> The deployment and use of AI systems should be both safe and broadly ethically acceptable. The principles-based ethics assurance argument pattern is one proposal in the AI ethics landscape that seeks to support and achieve that aim. The purpose of this argument pattern or framework is to structure reasoning about, and to communicate and foster confidence in, the ethical acceptability of uses of specific real-world AI systems in complex socio-technical contexts. This paper presents the interim findings of a case study applying this ethics assurance framework to the use of Dora, an AI-based telemedicine system, to assess its viability and usefulness as an approach. The case study process to date has revealed some of the positive ethical impacts of the Dora platform, as well as unexpected insights and areas to prioritise for evaluation, such as risks to the frontline clinician, particularly in respect of clinician autonomy. The ethics assurance argument pattern offers a practical framework not just for identifying issues to be addressed, but also to start to construct solutions in the form of adjustments to the distribution of benefits, risks and constraints on human autonomy that could reduce ethical disparities across affected stakeholders. Though many challenges remain, this research represents a step in the direction towards the development and use of safe and ethically acceptable AI systems and, ideally, a shift towards more comprehensive and inclusive evaluations of AI systems in general. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2305.14182v1-abstract-full').style.display = 'none'; document.getElementById('2305.14182v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 23 May, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">19 pages, 3 figures, 1 table, pre-print of paper for Trustworthy Autonomous Systems conference</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> TAS 2023: Proceedings of the First International Symposium on Trustworthy Autonomous Systems </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2302.10292">arXiv:2302.10292</a> <span> [<a href="https://arxiv.org/pdf/2302.10292">pdf</a>, <a href="https://arxiv.org/format/2302.10292">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1007/978-3-031-40953-0_28">10.1007/978-3-031-40953-0_28 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> AERoS: Assurance of Emergent Behaviour in Autonomous Robotic Swarms </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Abeywickrama%2C+D+B">Dhaminda B. Abeywickrama</a>, <a href="/search/cs?searchtype=author&query=Wilson%2C+J">James Wilson</a>, <a href="/search/cs?searchtype=author&query=Lee%2C+S">Suet Lee</a>, <a href="/search/cs?searchtype=author&query=Chance%2C+G">Greg Chance</a>, <a href="/search/cs?searchtype=author&query=Winter%2C+P+D">Peter D. Winter</a>, <a href="/search/cs?searchtype=author&query=Manzini%2C+A">Arianna Manzini</a>, <a href="/search/cs?searchtype=author&query=Habli%2C+I">Ibrahim Habli</a>, <a href="/search/cs?searchtype=author&query=Windsor%2C+S">Shane Windsor</a>, <a href="/search/cs?searchtype=author&query=Hauert%2C+S">Sabine Hauert</a>, <a href="/search/cs?searchtype=author&query=Eder%2C+K">Kerstin Eder</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2302.10292v1-abstract-short" style="display: inline;"> The behaviours of a swarm are not explicitly engineered. Instead, they are an emergent consequence of the interactions of individual agents with each other and their environment. This emergent functionality poses a challenge to safety assurance. The main contribution of this paper is a process for the safety assurance of emergent behaviour in autonomous robotic swarms called AERoS, following the g… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2302.10292v1-abstract-full').style.display = 'inline'; document.getElementById('2302.10292v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2302.10292v1-abstract-full" style="display: none;"> The behaviours of a swarm are not explicitly engineered. Instead, they are an emergent consequence of the interactions of individual agents with each other and their environment. This emergent functionality poses a challenge to safety assurance. The main contribution of this paper is a process for the safety assurance of emergent behaviour in autonomous robotic swarms called AERoS, following the guidance on the Assurance of Machine Learning for use in Autonomous Systems (AMLAS). We explore our proposed process using a case study centred on a robot swarm operating a public cloakroom. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2302.10292v1-abstract-full').style.display = 'none'; document.getElementById('2302.10292v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 20 February, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">12 pages, 11 figures</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">ACM Class:</span> D.2.1; I.2.11 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2209.00421">arXiv:2209.00421</a> <span> [<a href="https://arxiv.org/pdf/2209.00421">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Software Engineering">cs.SE</span> </div> </div> <p class="title is-5 mathjax"> Review of the AMLAS Methodology for Application in Healthcare </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Laher%2C+S">Shakir Laher</a>, <a href="/search/cs?searchtype=author&query=Brackstone%2C+C">Carla Brackstone</a>, <a href="/search/cs?searchtype=author&query=Reis%2C+S">Sara Reis</a>, <a href="/search/cs?searchtype=author&query=Nguyen%2C+A">An Nguyen</a>, <a href="/search/cs?searchtype=author&query=White%2C+S">Sean White</a>, <a href="/search/cs?searchtype=author&query=Habli%2C+I">Ibrahim Habli</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2209.00421v1-abstract-short" style="display: inline;"> In recent years, the number of machine learning (ML) technologies gaining regulatory approval for healthcare has increased significantly allowing them to be placed on the market. However, the regulatory frameworks applied to them were originally devised for traditional software, which has largely rule-based behaviour, compared to the data-driven and learnt behaviour of ML. As the frameworks are in… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2209.00421v1-abstract-full').style.display = 'inline'; document.getElementById('2209.00421v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2209.00421v1-abstract-full" style="display: none;"> In recent years, the number of machine learning (ML) technologies gaining regulatory approval for healthcare has increased significantly allowing them to be placed on the market. However, the regulatory frameworks applied to them were originally devised for traditional software, which has largely rule-based behaviour, compared to the data-driven and learnt behaviour of ML. As the frameworks are in the process of reformation, there is a need to proactively assure the safety of ML to prevent patient safety being compromised. The Assurance of Machine Learning for use in Autonomous Systems (AMLAS) methodology was developed by the Assuring Autonomy International Programme based on well-established concepts in system safety. This review has appraised the methodology by consulting ML manufacturers to understand if it converges or diverges from their current safety assurance practices, whether there are gaps and limitations in its structure and if it is fit for purpose when applied to the healthcare domain. Through this work we offer the view that there is clear utility for AMLAS as a safety assurance methodology when applied to healthcare machine learning technologies, although development of healthcare specific supplementary guidance would benefit those implementing the methodology. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2209.00421v1-abstract-full').style.display = 'none'; document.getElementById('2209.00421v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 1 September, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2022. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2208.00853">arXiv:2208.00853</a> <span> [<a href="https://arxiv.org/pdf/2208.00853">pdf</a>, <a href="https://arxiv.org/format/2208.00853">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Software Engineering">cs.SE</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Systems and Control">eess.SY</span> </div> </div> <p class="title is-5 mathjax"> Guidance on the Safety Assurance of Autonomous Systems in Complex Environments (SACE) </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Hawkins%2C+R">Richard Hawkins</a>, <a href="/search/cs?searchtype=author&query=Osborne%2C+M">Matt Osborne</a>, <a href="/search/cs?searchtype=author&query=Parsons%2C+M">Mike Parsons</a>, <a href="/search/cs?searchtype=author&query=Nicholson%2C+M">Mark Nicholson</a>, <a href="/search/cs?searchtype=author&query=McDermid%2C+J">John McDermid</a>, <a href="/search/cs?searchtype=author&query=Habli%2C+I">Ibrahim Habli</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2208.00853v1-abstract-short" style="display: inline;"> Autonomous systems (AS) are systems that have the capability to take decisions free from direct human control. AS are increasingly being considered for adoption for applications where their behaviour may cause harm, such as when used for autonomous driving, medical applications or in domestic environments. For such applications, being able to ensure and demonstrate (assure) the safety of the opera… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2208.00853v1-abstract-full').style.display = 'inline'; document.getElementById('2208.00853v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2208.00853v1-abstract-full" style="display: none;"> Autonomous systems (AS) are systems that have the capability to take decisions free from direct human control. AS are increasingly being considered for adoption for applications where their behaviour may cause harm, such as when used for autonomous driving, medical applications or in domestic environments. For such applications, being able to ensure and demonstrate (assure) the safety of the operation of the AS is crucial for their adoption. This can be particularly challenging where AS operate in complex and changing real-world environments. Establishing justified confidence in the safety of AS requires the creation of a compelling safety case. This document introduces a methodology for the Safety Assurance of Autonomous Systems in Complex Environments (SACE). SACE comprises a set of safety case patterns and a process for (1) systematically integrating safety assurance into the development of the AS and (2) for generating the evidence base for explicitly justifying the acceptable safety of the AS. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2208.00853v1-abstract-full').style.display = 'none'; document.getElementById('2208.00853v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 1 August, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">ACM Class:</span> D.2.0 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2203.15370">arXiv:2203.15370</a> <span> [<a href="https://arxiv.org/pdf/2203.15370">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computers and Society">cs.CY</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1007/s43681-023-00297-2">10.1007/s43681-023-00297-2 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> A Principles-based Ethics Assurance Argument Pattern for AI and Autonomous Systems </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Porter%2C+Z">Zoe Porter</a>, <a href="/search/cs?searchtype=author&query=Habli%2C+I">Ibrahim Habli</a>, <a href="/search/cs?searchtype=author&query=McDermid%2C+J">John McDermid</a>, <a href="/search/cs?searchtype=author&query=Kaas%2C+M">Marten Kaas</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2203.15370v4-abstract-short" style="display: inline;"> An assurance case is a structured argument, typically produced by safety engineers, to communicate confidence that a critical or complex system, such as an aircraft, will be acceptably safe within its intended context. Assurance cases often inform third party approval of a system. One emerging proposition within the trustworthy AI and autonomous systems (AI/AS) research community is to use assuran… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2203.15370v4-abstract-full').style.display = 'inline'; document.getElementById('2203.15370v4-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2203.15370v4-abstract-full" style="display: none;"> An assurance case is a structured argument, typically produced by safety engineers, to communicate confidence that a critical or complex system, such as an aircraft, will be acceptably safe within its intended context. Assurance cases often inform third party approval of a system. One emerging proposition within the trustworthy AI and autonomous systems (AI/AS) research community is to use assurance cases to instil justified confidence that specific AI/AS will be ethically acceptable when operational in well-defined contexts. This paper substantially develops the proposition and makes it concrete. It brings together the assurance case methodology with a set of ethical principles to structure a principles-based ethics assurance argument pattern. The principles are justice, beneficence, non-maleficence, and respect for human autonomy, with the principle of transparency playing a supporting role. The argument pattern, shortened to the acronym PRAISE, is described. The objective of the proposed PRAISE argument pattern is to provide a reusable template for individual ethics assurance cases, by which engineers, developers, operators, or regulators could justify, communicate, or challenge a claim about the overall ethical acceptability of the use of a specific AI/AS in a given socio-technical context. We apply the pattern to the hypothetical use case of an autonomous robo-taxi service in a city centre. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2203.15370v4-abstract-full').style.display = 'none'; document.getElementById('2203.15370v4-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 6 June, 2023; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 29 March, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> AI and Ethics 2023 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2203.05830">arXiv:2203.05830</a> <span> [<a href="https://arxiv.org/pdf/2203.05830">pdf</a>, <a href="https://arxiv.org/format/2203.05830">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Systems and Control">eess.SY</span> </div> </div> <p class="title is-5 mathjax"> Analysing Ultra-Wide Band Positioning for Geofencing in a Safety Assurance Context </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Hodge%2C+V">Victoria Hodge</a>, <a href="/search/cs?searchtype=author&query=Hawkins%2C+R">Richard Hawkins</a>, <a href="/search/cs?searchtype=author&query=Hilder%2C+J">James Hilder</a>, <a href="/search/cs?searchtype=author&query=Habli%2C+I">Ibrahim Habli</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2203.05830v1-abstract-short" style="display: inline;"> There is a desire to move towards more flexible and automated factories. To enable this, we need to assure the safety of these dynamic factories. This safety assurance must be achieved in a manner that does not unnecessarily constrain the systems and thus negate the benefits of flexibility and automation. We previously developed a modular safety assurance approach, using safety contracts, as a way… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2203.05830v1-abstract-full').style.display = 'inline'; document.getElementById('2203.05830v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2203.05830v1-abstract-full" style="display: none;"> There is a desire to move towards more flexible and automated factories. To enable this, we need to assure the safety of these dynamic factories. This safety assurance must be achieved in a manner that does not unnecessarily constrain the systems and thus negate the benefits of flexibility and automation. We previously developed a modular safety assurance approach, using safety contracts, as a way to achieve this. In this case study we show how this approach can be applied to Autonomous Guided Vehicles (AGV) operating as part of a dynamic factory and why it is necessary. We empirically evaluate commercial, indoor fog/edge localisation technology to provide geofencing for hazardous areas in a laboratory. The experiments determine how factors such as AGV speeds, tag transmission timings, control software and AGV capabilities affect the ability of the AGV to stop outside the hazardous areas. We describe how this approach could be used to create a safety case for the AGV operation. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2203.05830v1-abstract-full').style.display = 'none'; document.getElementById('2203.05830v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 11 March, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2022. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2109.00520">arXiv:2109.00520</a> <span> [<a href="https://arxiv.org/pdf/2109.00520">pdf</a>, <a href="https://arxiv.org/format/2109.00520">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> The Role of Explainability in Assuring Safety of Machine Learning in Healthcare </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Jia%2C+Y">Yan Jia</a>, <a href="/search/cs?searchtype=author&query=McDermid%2C+J">John McDermid</a>, <a href="/search/cs?searchtype=author&query=Lawton%2C+T">Tom Lawton</a>, <a href="/search/cs?searchtype=author&query=Habli%2C+I">Ibrahim Habli</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2109.00520v2-abstract-short" style="display: inline;"> Established approaches to assuring safety-critical systems and software are difficult to apply to systems employing ML where there is no clear, pre-defined specification against which to assess validity. This problem is exacerbated by the "opaque" nature of ML where the learnt model is not amenable to human scrutiny. Explainable AI (XAI) methods have been proposed to tackle this issue by producing… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2109.00520v2-abstract-full').style.display = 'inline'; document.getElementById('2109.00520v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2109.00520v2-abstract-full" style="display: none;"> Established approaches to assuring safety-critical systems and software are difficult to apply to systems employing ML where there is no clear, pre-defined specification against which to assess validity. This problem is exacerbated by the "opaque" nature of ML where the learnt model is not amenable to human scrutiny. Explainable AI (XAI) methods have been proposed to tackle this issue by producing human-interpretable representations of ML models which can help users to gain confidence and build trust in the ML system. However, little work explicitly investigates the role of explainability for safety assurance in the context of ML development. This paper identifies ways in which XAI methods can contribute to safety assurance of ML-based systems. It then uses a concrete ML-based clinical decision support system, concerning weaning of patients from mechanical ventilation, to demonstrate how XAI methods can be employed to produce evidence to support safety assurance. The results are also represented in a safety argument to show where, and in what way, XAI methods can contribute to a safety case. Overall, we conclude that XAI methods have a valuable role in safety assurance of ML-based systems in healthcare but that they are not sufficient in themselves to assure safety. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2109.00520v2-abstract-full').style.display = 'none'; document.getElementById('2109.00520v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 5 May, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 1 September, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2021. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2102.01564">arXiv:2102.01564</a> <span> [<a href="https://arxiv.org/pdf/2102.01564">pdf</a>, <a href="https://arxiv.org/format/2102.01564">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Guidance on the Assurance of Machine Learning in Autonomous Systems (AMLAS) </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Hawkins%2C+R">Richard Hawkins</a>, <a href="/search/cs?searchtype=author&query=Paterson%2C+C">Colin Paterson</a>, <a href="/search/cs?searchtype=author&query=Picardi%2C+C">Chiara Picardi</a>, <a href="/search/cs?searchtype=author&query=Jia%2C+Y">Yan Jia</a>, <a href="/search/cs?searchtype=author&query=Calinescu%2C+R">Radu Calinescu</a>, <a href="/search/cs?searchtype=author&query=Habli%2C+I">Ibrahim Habli</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2102.01564v1-abstract-short" style="display: inline;"> Machine Learning (ML) is now used in a range of systems with results that are reported to exceed, under certain conditions, human performance. Many of these systems, in domains such as healthcare , automotive and manufacturing, exhibit high degrees of autonomy and are safety critical. Establishing justified confidence in ML forms a core part of the safety case for these systems. In this document w… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2102.01564v1-abstract-full').style.display = 'inline'; document.getElementById('2102.01564v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2102.01564v1-abstract-full" style="display: none;"> Machine Learning (ML) is now used in a range of systems with results that are reported to exceed, under certain conditions, human performance. Many of these systems, in domains such as healthcare , automotive and manufacturing, exhibit high degrees of autonomy and are safety critical. Establishing justified confidence in ML forms a core part of the safety case for these systems. In this document we introduce a methodology for the Assurance of Machine Learning for use in Autonomous Systems (AMLAS). AMLAS comprises a set of safety case patterns and a process for (1) systematically integrating safety assurance into the development of ML components and (2) for generating the evidence base for explicitly justifying the acceptable safety of these components when integrated into autonomous system applications. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2102.01564v1-abstract-full').style.display = 'none'; document.getElementById('2102.01564v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 2 February, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2021. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2101.05620">arXiv:2101.05620</a> <span> [<a href="https://arxiv.org/pdf/2101.05620">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computers and Society">cs.CY</span> </div> </div> <p class="title is-5 mathjax"> A Framework for Assurance of Medication Safety using Machine Learning </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Jia%2C+Y">Yan Jia</a>, <a href="/search/cs?searchtype=author&query=Lawton%2C+T">Tom Lawton</a>, <a href="/search/cs?searchtype=author&query=McDermid%2C+J">John McDermid</a>, <a href="/search/cs?searchtype=author&query=Rojas%2C+E">Eric Rojas</a>, <a href="/search/cs?searchtype=author&query=Habli%2C+I">Ibrahim Habli</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2101.05620v1-abstract-short" style="display: inline;"> Medication errors continue to be the leading cause of avoidable patient harm in hospitals. This paper sets out a framework to assure medication safety that combines machine learning and safety engineering methods. It uses safety analysis to proactively identify potential causes of medication error, based on expert opinion. As healthcare is now data rich, it is possible to augment safety analysis w… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2101.05620v1-abstract-full').style.display = 'inline'; document.getElementById('2101.05620v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2101.05620v1-abstract-full" style="display: none;"> Medication errors continue to be the leading cause of avoidable patient harm in hospitals. This paper sets out a framework to assure medication safety that combines machine learning and safety engineering methods. It uses safety analysis to proactively identify potential causes of medication error, based on expert opinion. As healthcare is now data rich, it is possible to augment safety analysis with machine learning to discover actual causes of medication error from the data, and to identify where they deviate from what was predicted in the safety analysis. Combining these two views has the potential to enable the risk of medication errors to be managed proactively and dynamically. We apply the framework to a case study involving thoracic surgery, e.g. oesophagectomy, where errors in giving beta-blockers can be critical to control atrial fibrillation. This case study combines a HAZOP-based safety analysis method known as SHARD with Bayesian network structure learning and process mining to produce the analysis results, showing the potential of the framework for ensuring patient safety, and for transforming the way that safety is managed in complex healthcare environments. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2101.05620v1-abstract-full').style.display = 'none'; document.getElementById('2101.05620v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 11 January, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2021. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2005.08381">arXiv:2005.08381</a> <span> [<a href="https://arxiv.org/pdf/2005.08381">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computers and Society">cs.CY</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1136/bmjhci-2020-100165">10.1136/bmjhci-2020-100165 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Enhancing Covid-19 Decision-Making by Creating an Assurance Case for Simulation Models </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Habli%2C+I">Ibrahim Habli</a>, <a href="/search/cs?searchtype=author&query=Alexander%2C+R">Rob Alexander</a>, <a href="/search/cs?searchtype=author&query=Hawkins%2C+R">Richard Hawkins</a>, <a href="/search/cs?searchtype=author&query=Sujan%2C+M">Mark Sujan</a>, <a href="/search/cs?searchtype=author&query=McDermid%2C+J">John McDermid</a>, <a href="/search/cs?searchtype=author&query=Picardi%2C+C">Chiara Picardi</a>, <a href="/search/cs?searchtype=author&query=Lawton%2C+T">Tom Lawton</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2005.08381v1-abstract-short" style="display: inline;"> Simulation models have been informing the COVID-19 policy-making process. These models, therefore, have significant influence on risk of societal harms. But how clearly are the underlying modelling assumptions and limitations communicated so that decision-makers can readily understand them? When making claims about risk in safety-critical systems, it is common practice to produce an assurance case… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2005.08381v1-abstract-full').style.display = 'inline'; document.getElementById('2005.08381v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2005.08381v1-abstract-full" style="display: none;"> Simulation models have been informing the COVID-19 policy-making process. These models, therefore, have significant influence on risk of societal harms. But how clearly are the underlying modelling assumptions and limitations communicated so that decision-makers can readily understand them? When making claims about risk in safety-critical systems, it is common practice to produce an assurance case, which is a structured argument supported by evidence with the aim to assess how confident we should be in our risk-based decisions. We argue that any COVID-19 simulation model that is used to guide critical policy decisions would benefit from being supported with such a case to explain how, and to what extent, the evidence from the simulation can be relied on to substantiate policy conclusions. This would enable a critical review of the implicit assumptions and inherent uncertainty in modelling, and would give the overall decision-making process greater transparency and accountability. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2005.08381v1-abstract-full').style.display = 'none'; document.getElementById('2005.08381v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 17 May, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">6 pages and 2 figures</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> BMJ Health & Care Informatics 2020;27:e100165 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1703.06350">arXiv:1703.06350</a> <span> [<a href="https://arxiv.org/pdf/1703.06350">pdf</a>, <a href="https://arxiv.org/format/1703.06350">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Software Engineering">cs.SE</span> </div> </div> <p class="title is-5 mathjax"> Engineering Trustworthy Self-Adaptive Software with Dynamic Assurance Cases </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Calinescu%2C+R">Radu Calinescu</a>, <a href="/search/cs?searchtype=author&query=Weyns%2C+D">Danny Weyns</a>, <a href="/search/cs?searchtype=author&query=Gerasimou%2C+S">Simos Gerasimou</a>, <a href="/search/cs?searchtype=author&query=Iftikhar%2C+M+U">M. Usman Iftikhar</a>, <a href="/search/cs?searchtype=author&query=Habli%2C+I">Ibrahim Habli</a>, <a href="/search/cs?searchtype=author&query=Kelly%2C+T">Tim Kelly</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1703.06350v2-abstract-short" style="display: inline;"> Building on concepts drawn from control theory, self-adaptive software handles environmental and internal uncertainties by dynamically adjusting its architecture and parameters in response to events such as workload changes and component failures. Self-adaptive software is increasingly expected to meet strict functional and non-functional requirements in applications from areas as diverse as manuf… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1703.06350v2-abstract-full').style.display = 'inline'; document.getElementById('1703.06350v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1703.06350v2-abstract-full" style="display: none;"> Building on concepts drawn from control theory, self-adaptive software handles environmental and internal uncertainties by dynamically adjusting its architecture and parameters in response to events such as workload changes and component failures. Self-adaptive software is increasingly expected to meet strict functional and non-functional requirements in applications from areas as diverse as manufacturing, healthcare and finance. To address this need, we introduce a methodology for the systematic ENgineering of TRUstworthy Self-adaptive sofTware (ENTRUST). ENTRUST uses a combination of (1) design-time and runtime modelling and verification, and (2) industry-adopted assurance processes to develop trustworthy self-adaptive software and assurance cases arguing the suitability of the software for its intended application. To evaluate the effectiveness of our methodology, we present a tool-supported instance of ENTRUST and its use to develop proof-of-concept self-adaptive software for embedded and service-based systems from the oceanic monitoring and e-finance domains, respectively. The experimental results show that ENTRUST can be used to engineer self-adaptive software systems in different application domains and to generate dynamic assurance cases for these systems. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1703.06350v2-abstract-full').style.display = 'none'; document.getElementById('1703.06350v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 22 November, 2018; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 18 March, 2017; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2017. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">29 pages, 24 figures</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">ACM Class:</span> D.2.11; D.2.18; D.2.4.e; D.2 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1404.6802">arXiv:1404.6802</a> <span> [<a href="https://arxiv.org/pdf/1404.6802">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Software Engineering">cs.SE</span> </div> </div> <p class="title is-5 mathjax"> Formalism of Requirements for Safety-Critical Software: Where Does the Benefit Come From? </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Habli%2C+I">Ibrahim Habli</a>, <a href="/search/cs?searchtype=author&query=Rae%2C+A">Andrew Rae</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1404.6802v1-abstract-short" style="display: inline;"> Safety and assurance standards often rely on the principle that requirements errors can be minimised by expressing the requirements more formally. Although numerous case studies have shown that the act of formalising previously informal requirements finds requirements errors, this principle is really just a hypothesis. An industrially persuasive causal relationship between formalisation and better… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1404.6802v1-abstract-full').style.display = 'inline'; document.getElementById('1404.6802v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1404.6802v1-abstract-full" style="display: none;"> Safety and assurance standards often rely on the principle that requirements errors can be minimised by expressing the requirements more formally. Although numerous case studies have shown that the act of formalising previously informal requirements finds requirements errors, this principle is really just a hypothesis. An industrially persuasive causal relationship between formalisation and better requirements has yet to be established. We describe multiple competing explanations for this hypothesis, in terms of the levels of precision, re-formulation, expertise, effort and automation that are typically associated with formalising requirements. We then propose an experiment to distinguish between these explanations, without necessarily excluding the possibility that none of them are correct. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1404.6802v1-abstract-full').style.display = 'none'; document.getElementById('1404.6802v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 27 April, 2014; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2014. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">EDCC-2014, AESSCS 2014, software, formal methods, safety, certification</span> </p> </li> </ol> <div class="is-hidden-tablet"> <!-- feedback for mobile only --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a> </span> </div> </div> </main> <footer> <div class="columns is-desktop" role="navigation" aria-label="Secondary"> <!-- MetaColumn 1 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/about">About</a></li> <li><a href="https://info.arxiv.org/help">Help</a></li> </ul> </div> <div class="column"> <ul class="nav-spaced"> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>contact arXiv</title><desc>Click here to contact arXiv</desc><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg> <a href="https://info.arxiv.org/help/contact.html"> Contact</a> </li> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>subscribe to arXiv mailings</title><desc>Click here to subscribe</desc><path d="M476 3.2L12.5 270.6c-18.1 10.4-15.8 35.6 2.2 43.2L121 358.4l287.3-253.2c5.5-4.9 13.3 2.6 8.6 8.3L176 407v80.5c0 23.6 28.5 32.9 42.5 15.8L282 426l124.6 52.2c14.2 6 30.4-2.9 33-18.2l72-432C515 7.8 493.3-6.8 476 3.2z"/></svg> <a href="https://info.arxiv.org/help/subscribe"> Subscribe</a> </li> </ul> </div> </div> </div> <!-- end MetaColumn 1 --> <!-- MetaColumn 2 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/license/index.html">Copyright</a></li> <li><a href="https://info.arxiv.org/help/policies/privacy_policy.html">Privacy Policy</a></li> </ul> </div> <div class="column sorry-app-links"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/web_accessibility.html">Web Accessibility Assistance</a></li> <li> <p class="help"> <a class="a11y-main-link" href="https://status.arxiv.org" target="_blank">arXiv Operational Status <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 256 512" class="icon filter-dark_grey" role="presentation"><path d="M224.3 273l-136 136c-9.4 9.4-24.6 9.4-33.9 0l-22.6-22.6c-9.4-9.4-9.4-24.6 0-33.9l96.4-96.4-96.4-96.4c-9.4-9.4-9.4-24.6 0-33.9L54.3 103c9.4-9.4 24.6-9.4 33.9 0l136 136c9.5 9.4 9.5 24.6.1 34z"/></svg></a><br> Get status notifications via <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/email/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg>email</a> or <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/slack/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512" class="icon filter-black" role="presentation"><path d="M94.12 315.1c0 25.9-21.16 47.06-47.06 47.06S0 341 0 315.1c0-25.9 21.16-47.06 47.06-47.06h47.06v47.06zm23.72 0c0-25.9 21.16-47.06 47.06-47.06s47.06 21.16 47.06 47.06v117.84c0 25.9-21.16 47.06-47.06 47.06s-47.06-21.16-47.06-47.06V315.1zm47.06-188.98c-25.9 0-47.06-21.16-47.06-47.06S139 32 164.9 32s47.06 21.16 47.06 47.06v47.06H164.9zm0 23.72c25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06H47.06C21.16 243.96 0 222.8 0 196.9s21.16-47.06 47.06-47.06H164.9zm188.98 47.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06h-47.06V196.9zm-23.72 0c0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06V79.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06V196.9zM283.1 385.88c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06v-47.06h47.06zm0-23.72c-25.9 0-47.06-21.16-47.06-47.06 0-25.9 21.16-47.06 47.06-47.06h117.84c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06H283.1z"/></svg>slack</a> </p> </li> </ul> </div> </div> </div> <!-- end MetaColumn 2 --> </div> </footer> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/member_acknowledgement.js"></script> </body> </html>