CINXE.COM

ICSE 2025 - Journal-first Papers - ICSE 2025

<!DOCTYPE html> <html> <head> <meta name="viewport" content="width=device-width, initial-scale=1, maximum-scale=1"> <meta http-equiv="Content-Type" content="text/html; charset=utf-8"> <link href="https://conf.researchr.org/favicon.ico?28d37e6d7e5e12d24c39bdba0f4f3cfd" rel="shortcut icon" type="image/x-icon" /> <link href="https://conf.researchr.org/stylesheets/common_.css?d7bcca3579a066193caf560ede9d6399" rel="stylesheet" type="text/css" /> <title>ICSE 2025 - Journal-first Papers - ICSE 2025</title> <script type="text/javascript">var show_webdsl_debug=false;</script> <script type="text/javascript">var contextpath="https://conf.researchr.org";</script> <link rel="stylesheet" href="https://conf.researchr.org/stylesheets/bootstrap/css/bootstrap.min.css?7f89537eaf606bff49f5cc1a7c24dbca" type="text/css" /><link rel="stylesheet" href="https://conf.researchr.org/stylesheets/conf.css?d8ff81ab1f2387936557f525e4e8eb20" type="text/css" /><link rel="stylesheet" href="https://conf.researchr.org/getFile/9f4d62aa-a37d-4024-89d2-e28327660721/logobarICSE2025.css?1714589489475" type="text/css" /><link rel="stylesheet" href="https://conf.researchr.org/stylesheets/theme.bootstrap_3.min.css?6867e70881e6ed2c03b6c9cf9c3152e0" type="text/css" /><script type="text/javascript" src="//code.jquery.com/jquery-3.5.1.min.js"></script> <script type="text/javascript" src="https://conf.researchr.org/javascript/jquery.tablesorter.combined.min.js?285b21976326641f8f1b910492e6d5d0"></script> <script type="text/javascript" src="https://conf.researchr.org/javascript/jquery.tablesorter.pager.min.js?4551cdfea66f45a11ae5548f3d50a06f"></script> <script type="text/javascript" src="https://conf.researchr.org/javascript/tablesorter-init.js?ee6babedf1a6be960e4543a1bb57b421"></script> <script type="text/javascript" src="https://conf.researchr.org/javascript/bootstrap/js/bootstrap.min.js?2f34b630ffe30ba2ff2b91e3f3c322a1"></script> <script type="text/javascript" src="https://conf.researchr.org/javascript/notify.min.js?e86d399917a84b69a4683f504c7a7813"></script> <script type="text/javascript" src="https://conf.researchr.org/javascript/ajax.js?2dbd1cc4f8f2d52ae9c879d7346f4f8a"></script> <meta prefix='og: http://ogp.me/ns#' property='og:description' content='ICSE, the IEEE/ACM International Conference on Software Engineering, is the premier software engineering conference. It will be held April 27-May 3 2025 in Ottawa. Core conference days will be Wednesday April 30 to Friday May 2. 2025 marks the 50th anniversary of ICSE, which was first held in 1975! ICSE provides a forum where researchers, practitioners, and educators gather together to present and discuss research results, innovations, trends, experiences and issues in the field of software engineering. Check the menu above for the program overview (and especially the keynotes, workshop ...'> <script type="text/javascript">var ignoreHashChange = false; function hashChangeFunc(){ // show active tab on hash in url if (window.location.hash !== ''){ var hashTargetAll = $('a[href="' + window.location.hash + '"]'); if(hashTargetAll.length){ var hashTarget = hashTargetAll.filter('[data-toggle="tab"]') if( hashTarget.length ){ hashTarget = hashTarget.first(); hashTarget.tab('show'); } else { hashTarget = hashTargetAll.first(); } // and open parent tabs in case the target element is nested in a tab var parentPane = hashTarget.closest( '.tab-pane' ); if(parentPane.length){ $('.nav a[href="#'+ parentPane.attr('id') +'"]').tab('show'); } var parentCollapse = hashTarget.closest( '.panel-collapse:not(.in)' ).collapse('show'); } } return false; } $(document).ready(function(){ var tabFromRequestUrl = window.location.hash !== '' ? $('a[href="' + window.location.hash + '"][data-toggle="tab"]') : []; var initUrlHash = window.location.hash; hashChangeFunc() if( tabFromRequestUrl.length ){ //Prevent the browser to auto-scroll to the anchor of the tab window.location.hash = ""; } //make in-page #links open the tabs if ($._data( $(window)[0], 'events' ).hashchange == undefined){ $(window).on('hashchange', hashChangeFunc); } if(tabFromRequestUrl.length){ setTimeout( function(){ ignoreHashChange = true; history.replaceState(null, null, initUrlHash); ignoreHashChange = false; }, 10 ); } }); var autoTabFunction = function(node){ var isActivatingFirstTab = false; // remember the hash in the URL without jumping $('a[data-toggle="tab"]:not(.no-hash):not(.bound)').addClass('bound').on('shown.bs.tab', function(e){ if(!isActivatingFirstTab){ var newhash = '#' + $(e.target).attr('href').substr(1); if(history.replaceState){ history.replaceState(null, null, newhash); } else{ location.hash = newhash; } } }); //When no tab is active, set the first one to active $(node).find('.nav-tabs:not(.bound), .nav-pills:not(.bound)').addClass('bound').each(function(){ if( $(this).children().length > 0 && 1 > $(this).find('.active').length){ const firstItem = $(this).find('a[data-toggle="tab"]').first(); isActivatingFirstTab = true; firstItem.tab('show'); isActivatingFirstTab = false; } }); } </script> <script type="text/javascript">(function($){ $(document).ready(function(){ $('ul.dropdown-menu [data-toggle=dropdown]').on('click', function(event) { event.preventDefault(); event.stopPropagation(); $(this).parent().siblings().removeClass('open'); $(this).parent().toggleClass('open'); }); }); })(jQuery); </script> <script async src="https://www.googletagmanager.com/gtag/js?id=G-4G0QWEKVQS"></script> <script>window.dataLayer = window.dataLayer || [];function gtag(){dataLayer.push(arguments);}gtag('js', new Date());gtag('config', 'G-4G0QWEKVQS', {'cookie_domain': 'auto', 'anonymizeIp': true });gtag('config', 'UA-9607501-2', {'cookie_domain': 'auto', 'anonymizeIp': true });</script> <meta prefix='og: http://ogp.me/ns#' property='og:image' content='https://conf.researchr.org/getImage/icse-2025/orig/ICSELogo300dpi.png'> <meta prefix='og: http://ogp.me/ns#' property='og:title' content='ICSE 2025 - Journal-first Papers - ICSE 2025'> <meta name='keywords' content='icse,software,engineering,swe,research,2025'> <meta name='description' content='ICSE, the IEEE/ACM International Conference on Software Engineering, is the premier software engineering conference. It will be held April 27-May 3 2025 in Ottawa. Core conference days will be Wednesday April 30 to Friday May 2. 2025 marks the 50th anniversary of ICSE, which was first held in 1975! ICSE provides a forum where researchers, practitioners, and educators gather together to present and discuss research results, innovations, trends, experiences and issues in the field of software engineering. Check the menu above for the program overview (and especially the keynotes, workshop ...'> <script type="text/javascript">function addEventModalLoadOnClick( containerNode ){ $(containerNode).find( "a[data-event-modal]" ).on("click", function(e){ var eventId = $(this).data("event-modal"); var modalElem = $("#modal-" + eventId); if(modalElem.length){ modalElem.modal({ backdrop: 'static', keyboard: true}, 'show'); } else { var loaderElem = $("#event-modal-loader"); loaderElem.find("input.event-id-input").val( eventId ); loaderElem.find("#load-modal-action").click(); } e.preventDefault(); } ); } </script><script type="text/javascript">function addStarredEventOnClick( containerNode ){ $(containerNode).find( "[data-event-star]" ).on("click", function(e){ var eventId = $(this).data("event-star"); var starEventFormElem = $("#event-star-form"); starEventFormElem.find("input.event-id-input").val( eventId ); starEventFormElem.find("#star-event-action").click(); e.preventDefault(); e.stopPropagation(); } ); } </script><script type="text/javascript">function pauseOnCloseModal( modalid ){ //pauses video (only youtube at the moment) when closing modal $('#' + modalid).on('hidden.bs.modal', function () { $(this).find('.embed-container iframe[src*=enablejsapi]').each( function(){ this.contentWindow.postMessage('{"event":"command","func":"pauseVideo","args":""}', '*') } ); } ) } </script> </head> <body id="track"><div class="frame"><div class="logobar"><div class="container"><a href="https://conf.researchr.org/home/icse-2025" class="navbar-brand navigate"><span class="brand-text">ICSE 2025</span></a><div class="place">Sat 26 April - Sun 4 May 2025 <a href="https://conf.researchr.org/venue/icse-2025/icse-2025-venue" class="navigate">Ottawa, Ontario, Canada</a></div></div></div><div class="navbar navbar-default"><div class="container"><div class="navbar-header"><button type="button" data-toggle="collapse" data-target="#navigationbar" class="navbar-toggle"><span class="sr-only">Toggle navigation</span><span class="icon-bar"></span><span class="icon-bar"></span><span class="icon-bar"></span></button></div><div id="navigationbar" class="navigationbar collapse navbar-collapse"><ul class="block nav navbar-nav"><li class="dropdown"><a href="#" data-toggle="dropdown" class="dropdown-toggle">Attending <span class="caret"></span></a><ul class="block dropdown-menu"><li class="block"><a href="https://conf.researchr.org/venue/icse-2025/icse-2025-venue" class="navigate">Venue: Rogers Centre, Ottawa (formerly Shaw Centre)</a></li><li class="block"><a href="https://conf.researchr.org/attending/icse-2025/registration" class="navigate">Registration</a></li><li class="block"><a href="https://conf.researchr.org/attending/icse-2025/Hotels" class="navigate">Hotels</a></li><li class="block"><a href="https://conf.researchr.org/attending/icse-2025/visa-and-travel-authorization" class="navigate">Visa and Travel Authorization</a></li><li class="block"><a href="https://conf.researchr.org/attending/icse-2025/Travelling" class="navigate">Travelling to Ottawa</a></li><li class="block"><a href="https://conf.researchr.org/attending/icse-2025/travel-support" class="navigate">Travel Support</a></li><li class="block"><a href="https://conf.researchr.org/attending/icse-2025/gettingAroundOttawa" class="navigate">Getting Around Ottawa</a></li><li class="block"><a href="https://conf.researchr.org/attending/icse-2025/food" class="navigate">Food at ICSE2025</a></li><li class="block"><a href="https://conf.researchr.org/attending/icse-2025/social-media" class="navigate">Social media</a></li><li class="block"><a href="https://conf.researchr.org/attending/icse-2025/fun+activities" class="navigate">Fun Activities</a></li><li class="block"><a href="https://conf.researchr.org/attending/icse-2025/Code+of+Conduct" class="navigate">Code of Conduct</a></li><li class="block"><a href="https://conf.researchr.org/attending/icse-2025/Diversity+and+Inclusion+Plan" class="navigate">Equity, Diversity, and Inclusion Plan</a></li><li class="block"><a href="https://conf.researchr.org/attending/icse-2025/Sustainability" class="navigate">Sustainability</a></li></ul></li><li class="dropdown"><a href="#" data-toggle="dropdown" class="dropdown-toggle">Sponsors <span class="caret"></span></a><ul class="block dropdown-menu"><li class="block"><a href="https://conf.researchr.org/info/icse-2025/sponsors-and-supporters" class="navigate">ICSE 2025 Sponsors</a></li><li class="block"><a href="https://conf.researchr.org/info/icse-2025/sponsorships-opportunities" class="navigate">Applying to Be a Sponsor of ICSE 2025</a></li></ul></li><li class="dropdown"><a href="#" data-toggle="dropdown" class="dropdown-toggle">Program <span class="caret"></span></a><ul id="program-menu" class="block dropdown-menu"><li class="block"><a href="https://conf.researchr.org/info/icse-2025/program-overview" class="navigate">Program Overview</a></li><li class="block"><a href="https://conf.researchr.org/info/icse-2025/keynotes" class="navigate">Keynotes</a></li><li class="dropdown dropdown-submenu "><a href="#" data-toggle="dropdown" class="dropdown-toggle"><strong>Panels</strong></a><ul class="block dropdown-menu"><li class="block"><a href="https://conf.researchr.org/info/icse-2025/panels" class="navigate">Panels</a></li><li class="block"><a href="https://conf.researchr.org/info/icse-2025/panel%3A-escaped-from-the-lab%21-does-icse-research-make-a-difference%3F" class="navigate">Panel: Escaped from the Lab! Does ICSE Research Make a Difference?</a></li><li class="block"><a href="https://conf.researchr.org/info/icse-2025/panel%3A-the-future-of-software-engineering-beyond-the-hype-of-ai" class="navigate">Panel: The Future of Software Engineering Beyond the Hype of AI</a></li></ul></li><li class="block"><a href="https://conf.researchr.org/info/icse-2025/receptions-and-banquet" class="navigate">Receptions and Banquet</a></li><li class="block"><a href="https://conf.researchr.org/info/icse-2025/swebok-summit" class="navigate">SWEBOK Summit</a></li><li class="block"><a href="https://conf.researchr.org/info/icse-2025/tutorials" class="navigate">Tutorials</a></li><li class="block"><a href="https://conf.researchr.org/info/icse-2025/networking-events" class="navigate">Networking Events</a></li><li class="block"><a href="https://conf.researchr.org/info/icse-2025/meetings" class="navigate">Meetings and BOF events</a></li><li class="block"><a href="https://conf.researchr.org/info/icse-2025/technical-briefings" class="navigate">Technical Briefings</a></li><li class="block"><a href="https://conf.researchr.org/info/icse-2025/recreational-activities" class="navigate">Recreational Activities</a></li><li class="block"><a href="https://conf.researchr.org/info/icse-2025/submitting-to-icse2025%3A-q%26a" class="navigate">Submitting to ICSE2025: Q&amp;A</a></li><li class="block"><a href="https://conf.researchr.org/info/icse-2025/cybok-bof" class="navigate">CyBOK BOF</a></li><li class="block"><a href="https://conf.researchr.org/info/icse-2025/ieee-tse-50th-anniversary" class="navigate">IEEE TSE 50th Anniversary</a></li><li class="block"><a href="https://conf.researchr.org/info/icse-2025/eu-horizon-program-session" class="navigate">EU Horizon Program Session</a></li><li class="block"><a href="https://conf.researchr.org/info/icse-2025/tentative-main-conference-presentation-schedule" class="navigate">Tentative Main Conference Presentation Schedule</a></li></ul></li><li id="tracks-in-navbar" class="dropdown"><a href="#" data-toggle="dropdown" class="dropdown-toggle">Tracks <span class="caret"></span></a><ul class="block dropdown-menu multi-column columns-3"><div class="row"><div class="col-sm-5"><ul class="block multi-column-dropdown"><li class="block dropdown-header">ICSE 2025</li><li class="block"><a href="https://conf.researchr.org/track/icse-2025/icse-2025-main-icse-plenaries" class="navigate">Main Plenaries</a></li><li class="block"><a href="https://conf.researchr.org/track/icse-2025/icse-2025-research-track" class="navigate">Research Track</a></li><li class="block"><a href="https://conf.researchr.org/track/icse-2025/icse-2025-software-engineering-in-practice" class="navigate">SE In Practice (SEIP)</a></li><li class="block"><a href="https://conf.researchr.org/track/icse-2025/icse-2025-software-engineering-in-society" class="navigate">SE in Society (SEIS)</a></li><li class="block"><a href="https://conf.researchr.org/track/icse-2025/icse-2025-nier" class="navigate">New Ideas and Emerging Results (NIER)</a></li><li class="block"><a href="https://conf.researchr.org/track/icse-2025/icse-2025-journal-first-papers" class="navigate">Journal-first Papers</a></li><li class="block"><a href="https://conf.researchr.org/track/icse-2025/icse-2025-demonstrations" class="navigate">Demonstrations</a></li><li class="block"><a href="https://conf.researchr.org/track/icse-2025/icse-2025-artifact-evaluation" class="navigate">Artifact Evaluation</a></li><li class="block"><a href="https://conf.researchr.org/track/icse-2025/icse-2025-industry-challenge-track" class="navigate">Industry Challenge Track</a></li><li class="block"><a href="https://conf.researchr.org/track/icse-2025/icse-2025-software-engineering-education" class="navigate">Software Engineering Education</a></li><li class="block"><a href="https://conf.researchr.org/track/icse-2025/icse-2025-posters" class="navigate">Posters</a></li><li class="block"><a href="https://conf.researchr.org/track/icse-2025/icse-2025-doctoral-symposium" class="navigate">Doctoral Symposium</a></li><li class="block"><a href="https://conf.researchr.org/track/icse-2025/icse-2025-shadow-research-track-program-committee" class="navigate">Shadow PC</a></li><li class="block"><a href="https://conf.researchr.org/track/icse-2025/icse-2025-workshops" class="navigate">Workshops</a></li><li class="block"><a href="https://conf.researchr.org/track/icse-2025/icse-2025-tutorials" class="navigate">Tutorials and Technical Briefings</a></li><li class="block"><a href="https://conf.researchr.org/track/icse-2025/icse-2025-new-faculty-symposium" class="navigate">New Faculty Symposium</a></li><li class="block"><a href="https://conf.researchr.org/track/icse-2025/icse-2025-symposium-on-software-engineering-in-the-global-south" class="navigate">Symposium on Software Engineering in the Global South (SEiGS)</a></li><li class="block"><a href="https://conf.researchr.org/track/icse-2025/icse-2025-SRC" class="navigate">SRC - ACM Student Research Competition</a></li><li class="block"><a href="https://conf.researchr.org/track/icse-2025/icse-2025-social-and-networking" class="navigate">Social And Networking</a></li><li class="block"><a href="https://conf.researchr.org/track/icse-2025/icse-2025-meetings-and-bofs" class="navigate">Meetings and BOFs</a></li><li class="block"><a href="https://conf.researchr.org/track/icse-2025/icse-2025-smew---student-mentoring-workshop" class="navigate">Student Mentoring Workshop (SMeW)</a></li><li class="block"><a href="https://conf.researchr.org/track/icse-2025/icse-2025-student-volunteers" class="navigate">Student Volunteers</a></li></ul></div><div class="col-sm-7"><ul class="block multi-column-dropdown"><li class="block dropdown-header">Co-hosted Conferences</li><li class="block"><a href="https://conf.researchr.org/home/ast-2025" class="navigate"><strong>AST</strong> </a></li><li class="dropdown dropdown-submenu "><a href="#" data-toggle="dropdown" class="dropdown-toggle"><strong>CAIN</strong></a><ul class="block dropdown-menu"><li class="block"><a href="https://conf.researchr.org/home/cain-2025" class="navigate"><span class="glyphicon glyphicon-home"></span> CAIN</a></li><li class="block"><a href="https://conf.researchr.org/track/cain-2025/cain-2025-call-for-papers" class="navigate"><strong>CAIN</strong> Research and Experience Papers</a></li><li class="block"><a href="https://conf.researchr.org/track/cain-2025/cain-2025-doctoral-symposium" class="navigate"><strong>CAIN</strong> Doctoral Symposium</a></li><li class="block"><a href="https://conf.researchr.org/track/cain-2025/cain-2025-industry-talks" class="navigate"><strong>CAIN</strong> Industry Talks</a></li><li class="block"><a href="https://conf.researchr.org/track/cain-2025/cain-2025-call-for-posters" class="navigate"><strong>CAIN</strong> Posters </a></li><li class="block"><a href="https://conf.researchr.org/track/cain-2025/cain-2025-cain-scope" class="navigate"><strong>CAIN</strong> Scope</a></li></ul></li><li class="dropdown dropdown-submenu "><a href="#" data-toggle="dropdown" class="dropdown-toggle"><strong>CHASE</strong></a><ul class="block dropdown-menu"><li class="block"><a href="https://conf.researchr.org/home/chase-2025" class="navigate"><span class="glyphicon glyphicon-home"></span> CHASE</a></li><li class="block"><a href="https://conf.researchr.org/track/chase-2025/chase-2025-papers" class="navigate"><strong>CHASE</strong> Research Track</a></li><li class="block"><a href="https://conf.researchr.org/track/chase-2025/chase-2025-journal-first" class="navigate"><strong>CHASE</strong> Journal First and Journal Fast</a></li><li class="block"><a href="https://conf.researchr.org/track/chase-2025/chase-2025-decs-papers" class="navigate"><strong>CHASE</strong> Doctoral and Early Career Symposium (DECS)</a></li></ul></li><li class="block"><a href="https://conf.researchr.org/home/icse-2025/cseet-2025" class="navigate"><strong>CSEE&amp;T</strong> Software Engineering Education</a></li><li class="dropdown dropdown-submenu "><a href="#" data-toggle="dropdown" class="dropdown-toggle"><strong>FORGE</strong></a><ul class="block dropdown-menu"><li class="block"><a href="https://conf.researchr.org/home/forge-2025" class="navigate"><span class="glyphicon glyphicon-home"></span> FORGE</a></li><li class="block"><a href="https://conf.researchr.org/track/forge-2025/forge-2025-papers" class="navigate"><strong>FORGE</strong> Research Papers</a></li><li class="block"><a href="https://conf.researchr.org/track/forge-2025/forge-2025-industry-papers" class="navigate"><strong>FORGE</strong> Industry Papers</a></li><li class="block"><a href="https://conf.researchr.org/track/forge-2025/forge-2025-benchmarking" class="navigate"><strong>FORGE</strong> Data and Benchmarking</a></li><li class="block"><a href="https://conf.researchr.org/track/forge-2025/forge-2025-tutorials" class="navigate"><strong>FORGE</strong> Tutorials</a></li><li class="block"><a href="https://conf.researchr.org/track/forge-2025/forge-2025-keynotes" class="navigate"><strong>FORGE</strong> Keynotes</a></li></ul></li><li class="dropdown dropdown-submenu "><a href="#" data-toggle="dropdown" class="dropdown-toggle"><strong>FormaliSE</strong></a><ul class="block dropdown-menu"><li class="block"><a href="https://2025.formalise.org" class="navigate"><strong>FormaliSE</strong> Research Track</a></li><li class="block"><a href="https://2025.formalise.org/track/Formalise-2025-artifact-track" class="navigate"><strong>FormaliSE</strong> Artifact Track</a></li></ul></li><li class="dropdown dropdown-submenu "><a href="#" data-toggle="dropdown" class="dropdown-toggle"><strong>ICPC</strong></a><ul class="block dropdown-menu"><li class="block"><a href="https://conf.researchr.org/home/icpc-2025" class="navigate"><span class="glyphicon glyphicon-home"></span> ICPC</a></li><li class="block"><a href="https://conf.researchr.org/track/icpc-2025/icpc-2025-early-research-achievements-era" class="navigate"><strong>ICPC</strong> Early Research Achievements (ERA)</a></li><li class="block"><a href="https://conf.researchr.org/track/icpc-2025/icpc-2025-vaclav-rajlich-early-career-award" class="navigate"><strong>ICPC</strong> Vaclav Rajlich Early Career Award</a></li><li class="block"><a href="https://conf.researchr.org/track/icpc-2025/icpc-2025-journal-first" class="navigate"><strong>ICPC</strong> Journal First</a></li><li class="block"><a href="https://conf.researchr.org/track/icpc-2025/icpc-2025-replications-and-negative-results--rene-" class="navigate"><strong>ICPC</strong> Replications and Negative Results (RENE)</a></li><li class="block"><a href="https://conf.researchr.org/track/icpc-2025/icpc-2025-research" class="navigate"><strong>ICPC</strong> Research Track</a></li><li class="block"><a href="https://conf.researchr.org/track/icpc-2025/icpc-2025-tool-demonstration" class="navigate"><strong>ICPC</strong> Tool Demonstration</a></li></ul></li><li class="block"><a href="https://conf.researchr.org/home/icsr-2025" class="navigate"><strong>ICSR</strong> </a></li><li class="dropdown dropdown-submenu "><a href="#" data-toggle="dropdown" class="dropdown-toggle"><strong>MOBILESoft</strong></a><ul class="block dropdown-menu"><li class="block"><a href="https://conf.researchr.org/home/mobilesoft-2025" class="navigate"><span class="glyphicon glyphicon-home"></span> MOBILESoft</a></li><li class="block"><a href="https://conf.researchr.org/track/mobilesoft-2025/mobilesoft-2025-research-track" class="navigate"><strong>MOBILESoft</strong> Research Track</a></li><li class="block"><a href="https://conf.researchr.org/track/mobilesoft-2025/mobilesoft-2025-app-track" class="navigate"><strong>MOBILESoft</strong> App Track</a></li></ul></li><li class="dropdown dropdown-submenu "><a href="#" data-toggle="dropdown" class="dropdown-toggle"><strong>MSR</strong></a><ul class="block dropdown-menu"><li class="block"><a href="https://2025.msrconf.org" class="navigate"><span class="glyphicon glyphicon-home"></span> MSR</a></li><li class="block"><a href="https://2025.msrconf.org/track/msr-2025-data-and-tool-showcase-track" class="navigate"><strong>MSR</strong> Data and Tool Showcase Track</a></li><li class="block"><a href="https://2025.msrconf.org/track/msr-2025-foss-award" class="navigate"><strong>MSR</strong> FOSS Award</a></li><li class="block"><a href="https://2025.msrconf.org/track/msr-2025-industry-track" class="navigate"><strong>MSR</strong> Industry Track</a></li><li class="block"><a href="https://2025.msrconf.org/track/msr-2025-junior-pc" class="navigate"><strong>MSR</strong> Junior PC</a></li><li class="block"><a href="https://2025.msrconf.org/track/msr-2025-keynotes" class="navigate"><strong>MSR</strong> Keynotes</a></li><li class="block"><a href="https://2025.msrconf.org/track/msr-2025-msr-awards" class="navigate"><strong>MSR</strong> Awards</a></li><li class="block"><a href="https://2025.msrconf.org/track/msr-2025-mining-challenge" class="navigate"><strong>MSR</strong> Mining Challenge</a></li><li class="block"><a href="https://2025.msrconf.org/track/msr-2025-registered-reports" class="navigate"><strong>MSR</strong> Registered Reports</a></li><li class="block"><a href="https://2025.msrconf.org/track/msr-2025-technical-papers" class="navigate"><strong>MSR</strong> Technical Papers</a></li><li class="block"><a href="https://2025.msrconf.org/track/msr-2025-tutorials" class="navigate"><strong>MSR</strong> Tutorials</a></li><li class="block"><a href="https://2025.msrconf.org/track/msr-2025-vision-and-reflection" class="navigate"><strong>MSR</strong> Vision and Reflection</a></li></ul></li><li class="dropdown dropdown-submenu "><a href="#" data-toggle="dropdown" class="dropdown-toggle"><strong>SEAMS</strong></a><ul class="block dropdown-menu"><li class="block"><a href="https://conf.researchr.org/home/seams-2025" class="navigate"><span class="glyphicon glyphicon-home"></span> SEAMS</a></li><li class="block"><a href="https://conf.researchr.org/track/seams-2025/seams-2025-research-track" class="navigate"><strong>SEAMS</strong> Research Track</a></li><li class="block"><a href="https://conf.researchr.org/track/seams-2025/seams-2025-artifact-track" class="navigate"><strong>SEAMS</strong> Artifact Track</a></li></ul></li><li class="dropdown dropdown-submenu "><a href="#" data-toggle="dropdown" class="dropdown-toggle"><strong>TechDebt</strong></a><ul class="block dropdown-menu"><li class="block"><a href="https://conf.researchr.org/home/TechDebt-2025" class="navigate"><span class="glyphicon glyphicon-home"></span> TechDebt</a></li><li class="block"><a href="https://conf.researchr.org/track/TechDebt-2025/TechDebt-2025-industry-track" class="navigate"><strong>TechDebt</strong> Industry Track</a></li><li class="block"><a href="https://conf.researchr.org/track/TechDebt-2025/TechDebt-2025-main" class="navigate"><strong>TechDebt</strong> Technical Papers</a></li><li class="block"><a href="https://conf.researchr.org/track/TechDebt-2025/TechDebt-2025-journal-first" class="navigate"><strong>TechDebt</strong> Journal First</a></li><li class="block"><a href="https://conf.researchr.org/track/TechDebt-2025/TechDebt-2025-junior-program-committee" class="navigate"><strong>TechDebt</strong> Junior Program Committee</a></li></ul></li><li class="block dropdown-header">Workshops</li><li class="block"><a href="https://conf.researchr.org/home/icse-2025/aiops-2025" class="navigate"><strong>AIOps</strong> AI for Cloud Service</a></li><li class="block"><a href="https://conf.researchr.org/home/icse-2025/apr-2025" class="navigate"><strong>APR</strong> Automated Program Repair</a></li><li class="block"><a href="https://conf.researchr.org/home/icse-2025/botse-2025" class="navigate"><strong>BotSE</strong> Bots in SE</a></li><li class="block"><a href="https://conf.researchr.org/home/icse-2025/deeptest-2025" class="navigate"><strong>DeepTest</strong> Deep Learning &lt;-&gt; Testing</a></li><li class="block"><a href="https://conf.researchr.org/home/icse-2025/designing-2025" class="navigate"><strong>Designing</strong> Software Design</a></li><li class="block"><a href="https://conf.researchr.org/home/icse-2025/encycris-2025" class="navigate"><strong>EnCyCriS</strong> Cybersecurity of Critical Systems</a></li><li class="block"><a href="https://conf.researchr.org/home/icse-2025/ftw-2025" class="navigate"><strong>FTW</strong> Flaky Tests Workshop</a></li><li class="block"><a href="https://conf.researchr.org/home/icse-2025/finanse-2025" class="navigate"><strong>FinanSE</strong> SE Challenges in Financial Firms</a></li><li class="block"><a href="https://conf.researchr.org/home/icse-2025/gas-2025" class="navigate"><strong>GAS</strong> Games and SE</a></li><li class="block"><a href="https://conf.researchr.org/home/icse-2025/ge-icse-2025" class="navigate"><strong>GE@ICSE</strong> Gender Equality</a></li><li class="block"><a href="https://conf.researchr.org/home/icse-2025/gi-2025" class="navigate"><strong>GI</strong> Genetic Improvement</a></li><li class="block"><a href="https://conf.researchr.org/home/icse-2025/greens-2025" class="navigate"><strong>GREENS</strong> Green and Sustainable Software</a></li><li class="block"><a href="https://conf.researchr.org/home/icse-2025/gamify-2025" class="navigate"><strong>Gamify</strong> Gamification in Development + V &amp; V</a></li><li class="block"><a href="https://conf.researchr.org/home/icse-2025/ide-2025" class="navigate"><strong>IDE</strong> Integrated Development Environments</a></li><li class="block"><a href="https://conf.researchr.org/home/icse-2025/iwsib-2025" class="navigate"><strong>IWSiB</strong> </a></li><li class="block"><a href="https://conf.researchr.org/home/icse-2025/llm4code-2025" class="navigate"><strong>LLM4Code</strong> Large Language Models for Code</a></li><li class="block"><a href="https://conf.researchr.org/home/icse-2025/mo2re-2025" class="navigate"><strong>MO2RE</strong> Multi-Discip. Requirements Engineering</a></li><li class="block"><a href="https://conf.researchr.org/home/icse-2025/nlbse-2025" class="navigate"><strong>NLBSE</strong> Natural Language Based SE</a></li><li class="block"><a href="https://conf.researchr.org/home/icse-2025/nse-2025" class="navigate"><strong>NSE</strong> Neuro-Symbolic SE</a></li><li class="block"><a href="https://conf.researchr.org/home/icse-2025/q-se-2025" class="navigate"><strong>Q-SE</strong> Quantum SE</a></li><li class="block"><a href="https://conf.researchr.org/home/icse-2025/raie-2025" class="navigate"><strong>RAIE</strong> Responsible AI Engineering</a></li><li class="block"><a href="https://conf.researchr.org/home/icse-2025/raise-2025" class="navigate"><strong>RAISE</strong> Requirements Engg. for AI-Powered SW</a></li><li class="block"><a href="https://conf.researchr.org/home/icse-2025/rose-2025" class="navigate"><strong>RoSE</strong> Robotics Software Engineering</a></li><li class="block"><a href="https://conf.researchr.org/home/icse-2025/satrends-2025" class="navigate"><strong>SATrends</strong> Software Architecture Trends</a></li><li class="block"><a href="https://conf.researchr.org/home/icse-2025/sbft-2025" class="navigate"><strong>SBFT</strong> Search-Based and Fuzz testing</a></li><li class="block"><a href="https://conf.researchr.org/home/icse-2025/se4ads-2025" class="navigate"><strong>SE4ADS</strong> SE for Autonomous Driving Systems</a></li><li class="block"><a href="https://conf.researchr.org/home/icse-2025/serp4iot-2025" class="navigate"><strong>SERP4IoT</strong> SE for the Internet of Things</a></li><li class="block"><a href="https://conf.researchr.org/home/icse-2025/sesos-2025" class="navigate"><strong>SESoS</strong> Systems of Systems and Ecosystems</a></li><li class="block"><a href="https://conf.researchr.org/home/icse-2025/static-2025" class="navigate"><strong>STATIC</strong> Advancing Static Analysis</a></li><li class="block"><a href="https://conf.researchr.org/home/icse-2025/svm-2025" class="navigate"><strong>SVM</strong> Software Vulnerability Management</a></li><li class="block"><a href="https://conf.researchr.org/home/icse-2025/wetseb-2025" class="navigate"><strong>WETSEB</strong> SE for Blockchain</a></li><li class="block"><a href="https://conf.researchr.org/home/icse-2025/wsese-2025" class="navigate"><strong>WSESE</strong> Empirical Studies in SE</a></li></ul></div></div></ul></li><li class="dropdown"><a href="#" data-toggle="dropdown" class="dropdown-toggle">Organization <span class="caret"></span></a><ul class="block dropdown-menu multi-column columns-2"><div class="row"><div class="col-sm-6"><ul class="block multi-column-dropdown"><li class="block dropdown-header">ICSE 2025 Committees</li><li class="block"><a href="https://conf.researchr.org/committee/icse-2025/icse-2025-organizing-committee" class="navigate">Organizing Committee</a></li><li class="block dropdown-header">Track Committees </li><li class="block"><a href="https://conf.researchr.org/committee/icse-2025/icse-2025-research-track-research-track" class="navigate"><strong>Research Track</strong></a></li><li class="block"><a href="https://conf.researchr.org/committee/icse-2025/icse-2025-software-engineering-in-practice-software-engineering-in-practice" class="navigate"><strong>SE In Practice (SEIP)</strong></a></li><li class="block"><a href="https://conf.researchr.org/committee/icse-2025/icse-2025-software-engineering-in-society-software-engineering-in-society" class="navigate"><strong>SE in Society (SEIS)</strong></a></li><li class="block"><a href="https://conf.researchr.org/committee/icse-2025/icse-2025-new-ideas-and-emerging-results-new-ideas-and-emerging-results" class="navigate"><strong>New Ideas and Emerging Results (NIER)</strong></a></li><li class="block"><a href="https://conf.researchr.org/committee/icse-2025/icse-2025-journal-first-papers-journal-first-papers" class="navigate"><strong>Journal-first Papers</strong></a></li><li class="dropdown dropdown-submenu "><a href="#" data-toggle="dropdown" class="dropdown-toggle"><strong>Demonstrations</strong></a><ul class="block dropdown-menu"><li class="block"><a href="https://conf.researchr.org/committee/icse-2025/icse-2025-demonstrations-demonstrations" class="navigate">Demonstrations</a></li><li class="block"><a href="https://conf.researchr.org/committee/icse-2025/icse-2025-demonstrations-program-committee" class="navigate">Program Committee</a></li></ul></li><li class="block"><a href="https://conf.researchr.org/committee/icse-2025/icse-2025-artifact-evaluation-artifact-evaluation" class="navigate"><strong>Artifact Evaluation</strong></a></li><li class="block"><a href="https://conf.researchr.org/committee/icse-2025/icse-2025-industry-challenge-track-industry-challenge" class="navigate"><strong>Industry Challenge Track</strong></a></li><li class="block"><a href="https://conf.researchr.org/committee/icse-2025/icse-2025-posters-posters" class="navigate"><strong>Posters</strong></a></li><li class="dropdown dropdown-submenu "><a href="#" data-toggle="dropdown" class="dropdown-toggle"><strong>Doctoral Symposium</strong></a><ul class="block dropdown-menu"><li class="block"><a href="https://conf.researchr.org/committee/icse-2025/icse-2025-doctoral-symposium-doctoral-symposium" class="navigate">Doctoral Symposium</a></li><li class="block"><a href="https://conf.researchr.org/committee/icse-2025/icse-2025-doctoral-symposium-program-committee" class="navigate">Program Committee</a></li></ul></li><li class="block"><a href="https://conf.researchr.org/committee/icse-2025/icse-2025-shadow-research-track-program-committee-shadow-pc" class="navigate"><strong>Shadow PC</strong></a></li><li class="block"><a href="https://conf.researchr.org/committee/icse-2025/icse-2025-workshops-workshops" class="navigate"><strong>Workshops</strong></a></li><li class="block"><a href="https://conf.researchr.org/committee/icse-2025/icse-2025-tutorials-tutorials" class="navigate"><strong>Tutorials and Technical Briefings</strong></a></li><li class="block"><a href="https://conf.researchr.org/committee/icse-2025/icse-2025-new-faculty-symposium-new-faculty-symposium" class="navigate"><strong>New Faculty Symposium</strong></a></li><li class="dropdown dropdown-submenu "><a href="#" data-toggle="dropdown" class="dropdown-toggle"><strong>Symposium on Software Engineering in the Global South (SEiGS)</strong></a><ul class="block dropdown-menu"><li class="block"><a href="https://conf.researchr.org/committee/icse-2025/icse-2025-symposium-on-software-engineering-in-the-global-south-organizing-committee" class="navigate">Organizing Committee</a></li><li class="block"><a href="https://conf.researchr.org/committee/icse-2025/icse-2025-symposium-on-software-engineering-in-the-global-south-program-committee" class="navigate">Program Committee</a></li></ul></li><li class="block"><a href="https://conf.researchr.org/committee/icse-2025/icse-2025-SRC-acm-student-research-competition" class="navigate"><strong>SRC - ACM Student Research Competition</strong></a></li><li class="dropdown dropdown-submenu "><a href="#" data-toggle="dropdown" class="dropdown-toggle"><strong>Student Mentoring Workshop (SMeW)</strong></a><ul class="block dropdown-menu"><li class="block"><a href="https://conf.researchr.org/committee/icse-2025/icse-2025-smew---student-mentoring-workshop-student-mentoring-workshop-organizing-committee" class="navigate">Student Mentoring Workshop Organizing Committee</a></li><li class="block"><a href="https://conf.researchr.org/committee/icse-2025/icse-2025-smew---student-mentoring-workshop-smew---student-mentoring-workshop" class="navigate">Student Mentoring Workshop Steering Committee</a></li></ul></li><li class="block"><a href="https://conf.researchr.org/committee/icse-2025/icse-2025-student-volunteers-student-volunteers" class="navigate"><strong>Student Volunteers</strong></a></li><li class="block dropdown-header">Contributors</li><li class="block"><a href="https://conf.researchr.org/people-index/icse-2025" class="navigate"><span class="glyphicon glyphicon-search"></span><sup><span class="glyphicon glyphicon-user"></span></sup> People Index</a></li></ul></div><div class="col-sm-6"><ul class="block multi-column-dropdown"><li class="block dropdown-header">Co-hosted Conferences</li><li class="dropdown dropdown-submenu "><a href="#" data-toggle="dropdown" class="dropdown-toggle"><strong>AST</strong></a><ul class="block dropdown-menu"><li class="block"><a href="https://conf.researchr.org/committee/ast-2025/ast-2025-papers-organizing-committee" class="navigate">Organizing Committee</a></li><li class="block"><a href="https://conf.researchr.org/committee/ast-2025/ast-2025-papers-program-committee" class="navigate">Program Committee</a></li><li class="block"><a href="https://conf.researchr.org/committee/ast-2025/ast-2025-papers-steering-committee" class="navigate">Steering Committee</a></li></ul></li><li class="dropdown dropdown-submenu "><a href="#" data-toggle="dropdown" class="dropdown-toggle"><strong>CAIN</strong></a><ul class="block dropdown-menu"><li class="block"><a href="https://conf.researchr.org/committee/cain-2025/cain-2025-organizing-committee" class="navigate">Organizing Committee</a></li><li class="block"><a href="https://conf.researchr.org/committee/cain-2025/cain-2025-steering-committee" class="navigate">Steering Committee</a></li><li class="block"><a href="https://conf.researchr.org/committee/cain-2025/cain-2025-call-for-papers-program-committee" class="navigate"><strong>Research and Experience Papers</strong></a></li><li class="block"><a href="https://conf.researchr.org/committee/cain-2025/cain-2025-doctoral-symposium-program-committee" class="navigate"><strong>Doctoral Symposium</strong></a></li><li class="block"><a href="https://conf.researchr.org/committee/cain-2025/cain-2025-industry-talks-industry-track-program-committee" class="navigate"><strong>Industry Talks</strong></a></li><li class="block"><a href="https://conf.researchr.org/committee/cain-2025/cain-2025-call-for-posters-posters-committee" class="navigate"><strong>Posters </strong></a></li></ul></li><li class="dropdown dropdown-submenu "><a href="#" data-toggle="dropdown" class="dropdown-toggle"><strong>CHASE</strong></a><ul class="block dropdown-menu"><li class="block"><a href="https://conf.researchr.org/committee/chase-2025/chase-2025-organizing-committee" class="navigate">Organizing Committee</a></li><li class="block"><a href="https://conf.researchr.org/committee/chase-2025/chase-2025-papers-program-committee" class="navigate"><strong>Research Track</strong></a></li><li class="block"><a href="https://conf.researchr.org/committee/chase-2025/chase-2025-decs-papers-pc-members" class="navigate"><strong>Doctoral and Early Career Symposium (DECS)</strong></a></li></ul></li><li class="dropdown dropdown-submenu "><a href="#" data-toggle="dropdown" class="dropdown-toggle"><strong>CSEE&amp;T</strong></a><ul class="block dropdown-menu"><li class="block"><a href="https://conf.researchr.org/committee/icse-2025/cseet-2025-organizing-committee" class="navigate">Organizing Committee</a></li><li class="block"><a href="https://conf.researchr.org/committee/icse-2025/cseet-2025-papers-organization-committee" class="navigate">Organization Committee</a></li><li class="block"><a href="https://conf.researchr.org/committee/icse-2025/cseet-2025-papers-program-committee" class="navigate">Program Committee</a></li></ul></li><li class="dropdown dropdown-submenu "><a href="#" data-toggle="dropdown" class="dropdown-toggle"><strong>FORGE</strong></a><ul class="block dropdown-menu"><li class="block"><a href="https://conf.researchr.org/committee/forge-2025/forge-2025-organization-committee" class="navigate">Organization Committee</a></li><li class="block"><a href="https://conf.researchr.org/committee/forge-2025/forge-2025-papers-program-committee" class="navigate"><strong>Research Papers</strong></a></li><li class="block"><a href="https://conf.researchr.org/committee/forge-2025/forge-2025-industry-papers-organization-committee" class="navigate"><strong>Industry Papers</strong></a></li><li class="block"><a href="https://conf.researchr.org/committee/forge-2025/forge-2025-benchmarking-organization-committee" class="navigate"><strong>Data and Benchmarking</strong></a></li><li class="block"><a href="https://conf.researchr.org/committee/forge-2025/forge-2025-tutorials-organization-committee" class="navigate"><strong>Tutorials</strong></a></li></ul></li><li class="dropdown dropdown-submenu "><a href="#" data-toggle="dropdown" class="dropdown-toggle"><strong>FormaliSE</strong></a><ul class="block dropdown-menu"><li class="block"><a href="https://2025.formalise.org/committee/Formalise-2025-papers-organization-committee" class="navigate"><strong>Research Track</strong> <small>Organization Committee</small></a></li><li class="block"><a href="https://2025.formalise.org/committee/Formalise-2025-papers-program-committee" class="navigate"><strong>Research Track</strong> <small>Program Committee</small></a></li><li class="block"><a href="https://2025.formalise.org/committee/Formalise-2025-artifact-track-artifact-evaluation-committee" class="navigate"><strong>Artifact Track</strong></a></li></ul></li><li class="dropdown dropdown-submenu "><a href="#" data-toggle="dropdown" class="dropdown-toggle"><strong>ICPC</strong></a><ul class="block dropdown-menu"><li class="block"><a href="https://conf.researchr.org/committee/icpc-2025/icpc-2025-organizing-committee" class="navigate">Organizing Committee</a></li><li class="block"><a href="https://conf.researchr.org/committee/icpc-2025/icpc-2025-steering-committee" class="navigate">Steering Committee</a></li><li class="block"><a href="https://conf.researchr.org/committee/icpc-2025/icpc-2025-early-research-achievements-era-program-committee" class="navigate"><strong>Early Research Achievements (ERA)</strong></a></li><li class="block"><a href="https://conf.researchr.org/committee/icpc-2025/icpc-2025-vaclav-rajlich-early-career-award-award-selection-committee" class="navigate"><strong>Vaclav Rajlich Early Career Award</strong></a></li><li class="block"><a href="https://conf.researchr.org/committee/icpc-2025/icpc-2025-journal-first-program-committee" class="navigate"><strong>Journal First</strong></a></li><li class="block"><a href="https://conf.researchr.org/committee/icpc-2025/icpc-2025-replications-and-negative-results--rene--program-committee" class="navigate"><strong>Replications and Negative Results (RENE)</strong></a></li><li class="block"><a href="https://conf.researchr.org/committee/icpc-2025/icpc-2025-research-program-committee" class="navigate"><strong>Research Track</strong></a></li><li class="block"><a href="https://conf.researchr.org/committee/icpc-2025/icpc-2025-tool-demonstration-program-committee" class="navigate"><strong>Tool Demonstration</strong></a></li></ul></li><li class="dropdown dropdown-submenu "><a href="#" data-toggle="dropdown" class="dropdown-toggle"><strong>ICSR</strong></a><ul class="block dropdown-menu"><li class="block"><a href="https://conf.researchr.org/committee/icsr-2025/icsr-2025-organizing-committee" class="navigate">Organizing Committee</a></li><li class="block"><a href="https://conf.researchr.org/committee/icsr-2025/icsr-2025-papers-organization-committee" class="navigate">Organization committee</a></li><li class="block"><a href="https://conf.researchr.org/committee/icsr-2025/icsr-2025-papers-program-committee" class="navigate">Program Committee</a></li></ul></li><li class="dropdown dropdown-submenu "><a href="#" data-toggle="dropdown" class="dropdown-toggle"><strong>MOBILESoft</strong></a><ul class="block dropdown-menu"><li class="block"><a href="https://conf.researchr.org/committee/mobilesoft-2025/mobilesoft-2025-organizing-committee" class="navigate">Organizing Committee</a></li><li class="block"><a href="https://conf.researchr.org/committee/mobilesoft-2025/mobilesoft-2025-research-track-research-track" class="navigate"><strong>Research Track</strong></a></li><li class="block"><a href="https://conf.researchr.org/committee/mobilesoft-2025/mobilesoft-2025-app-track-organizing-committee" class="navigate"><strong>App Track</strong> <small>Organizing Committee</small></a></li><li class="block"><a href="https://conf.researchr.org/committee/mobilesoft-2025/mobilesoft-2025-app-track-program-committee" class="navigate"><strong>App Track</strong> <small>Program Committee</small></a></li></ul></li><li class="dropdown dropdown-submenu "><a href="#" data-toggle="dropdown" class="dropdown-toggle"><strong>MSR</strong></a><ul class="block dropdown-menu"><li class="block"><a href="https://2025.msrconf.org/committee/msr-2025-organizing-committee" class="navigate">Organizing Committee</a></li><li class="block"><a href="https://2025.msrconf.org/committee/msr-2025-data-and-tool-showcase-track-data-and-tool-showcase-track---program-committee" class="navigate"><strong>Data and Tool Showcase Track</strong></a></li><li class="block"><a href="https://2025.msrconf.org/committee/msr-2025-foss-award-foss-award-committee" class="navigate"><strong>FOSS Award</strong></a></li><li class="block"><a href="https://2025.msrconf.org/committee/msr-2025-industry-track-industry-track---program-committee" class="navigate"><strong>Industry Track</strong></a></li><li class="block"><a href="https://2025.msrconf.org/committee/msr-2025-junior-pc-technical-papers---junior-program-committee" class="navigate"><strong>Junior PC</strong></a></li><li class="block"><a href="https://2025.msrconf.org/committee/msr-2025-msr-awards-msr-awards-committee" class="navigate"><strong>MSR Awards</strong></a></li><li class="block"><a href="https://2025.msrconf.org/committee/msr-2025-mining-challenge-mining-challenge---program-committee" class="navigate"><strong>Mining Challenge</strong></a></li><li class="block"><a href="https://2025.msrconf.org/committee/msr-2025-registered-reports-registered-reports---program-committee" class="navigate"><strong>Registered Reports</strong></a></li><li class="block"><a href="https://2025.msrconf.org/committee/msr-2025-technical-papers-technical-papers---program-committee" class="navigate"><strong>Technical Papers</strong></a></li><li class="block"><a href="https://2025.msrconf.org/committee/msr-2025-tutorials-tutorials---program-committee" class="navigate"><strong>Tutorials</strong></a></li><li class="block"><a href="https://2025.msrconf.org/committee/msr-2025-vision-and-reflection-vision-and-reflection---program-committee" class="navigate"><strong>Vision and Reflection</strong></a></li></ul></li><li class="dropdown dropdown-submenu "><a href="#" data-toggle="dropdown" class="dropdown-toggle"><strong>SEAMS</strong></a><ul class="block dropdown-menu"><li class="block"><a href="https://conf.researchr.org/committee/seams-2025/seams-2025-organizing-committee" class="navigate">Organizing Committee</a></li><li class="block"><a href="https://conf.researchr.org/committee/seams-2025/seams-2025-mip-award-committee" class="navigate">MIP Award Committee</a></li><li class="block"><a href="https://conf.researchr.org/committee/seams-2025/seams-2025-research-track-program-committee" class="navigate"><strong>Research Track</strong></a></li><li class="block"><a href="https://conf.researchr.org/committee/seams-2025/seams-2025-artifact-track-program-committee" class="navigate"><strong>Artifact Track</strong></a></li></ul></li><li class="dropdown dropdown-submenu "><a href="#" data-toggle="dropdown" class="dropdown-toggle"><strong>TechDebt</strong></a><ul class="block dropdown-menu"><li class="block"><a href="https://conf.researchr.org/committee/TechDebt-2025/TechDebt-2025-organizing-committee" class="navigate">Organizing Committee</a></li><li class="block"><a href="https://conf.researchr.org/committee/TechDebt-2025/TechDebt-2025-industry-track-industry-track-committee" class="navigate"><strong>Industry Track</strong></a></li><li class="block"><a href="https://conf.researchr.org/committee/TechDebt-2025/TechDebt-2025-main-program-committee" class="navigate"><strong>Technical Papers</strong></a></li><li class="block"><a href="https://conf.researchr.org/committee/TechDebt-2025/TechDebt-2025-junior-program-committee-junior-program-committee" class="navigate"><strong>Junior Program Committee</strong></a></li></ul></li><li class="block dropdown-header">Workshops</li><li class="dropdown dropdown-submenu "><a href="#" data-toggle="dropdown" class="dropdown-toggle"><strong>AIOps</strong></a><ul class="block dropdown-menu"><li class="block small"><a href="https://conf.researchr.org/home/icse-2025/aiops-2025" class="navigate"><span class="glyphicon glyphicon-home"></span> N/A - check homepage</a></li></ul></li><li class="dropdown dropdown-submenu "><a href="#" data-toggle="dropdown" class="dropdown-toggle"><strong>APR</strong></a><ul class="block dropdown-menu"><li class="block"><a href="https://conf.researchr.org/committee/icse-2025/apr-2025-papers-organizing-committee" class="navigate">Organizing Committee</a></li><li class="block"><a href="https://conf.researchr.org/committee/icse-2025/apr-2025-papers-program-committee" class="navigate">Program Committee</a></li></ul></li><li class="dropdown dropdown-submenu "><a href="#" data-toggle="dropdown" class="dropdown-toggle"><strong>BotSE</strong></a><ul class="block dropdown-menu"><li class="block"><a href="https://conf.researchr.org/committee/icse-2025/botse-2025-papers-organizing-committee" class="navigate">Organizing Committee</a></li><li class="block"><a href="https://conf.researchr.org/committee/icse-2025/botse-2025-papers-publicity-chair" class="navigate">Publicity Chair</a></li><li class="block"><a href="https://conf.researchr.org/committee/icse-2025/botse-2025-papers-web-chair" class="navigate">Web Chair</a></li><li class="block"><a href="https://conf.researchr.org/committee/icse-2025/botse-2025-papers-program-committee" class="navigate">Program Committee</a></li></ul></li><li class="dropdown dropdown-submenu "><a href="#" data-toggle="dropdown" class="dropdown-toggle"><strong>DeepTest</strong></a><ul class="block dropdown-menu"><li class="block"><a href="https://conf.researchr.org/committee/icse-2025/deeptest-2025-papers-organizing-committee" class="navigate">Organizing Committee</a></li><li class="block"><a href="https://conf.researchr.org/committee/icse-2025/deeptest-2025-papers-program-committee" class="navigate">Program Committee</a></li></ul></li><li class="dropdown dropdown-submenu "><a href="#" data-toggle="dropdown" class="dropdown-toggle"><strong>Designing</strong></a><ul class="block dropdown-menu"><li class="block"><a href="https://conf.researchr.org/committee/icse-2025/designing-2025-papers-organizing-committee" class="navigate">Organizing Committee</a></li><li class="block"><a href="https://conf.researchr.org/committee/icse-2025/designing-2025-papers-program-committee" class="navigate">Program Committee</a></li></ul></li><li class="dropdown dropdown-submenu "><a href="#" data-toggle="dropdown" class="dropdown-toggle"><strong>EnCyCriS</strong></a><ul class="block dropdown-menu"><li class="block"><a href="https://conf.researchr.org/committee/icse-2025/encycris-2025-papers-organization-committee" class="navigate">Organization Committee</a></li><li class="block"><a href="https://conf.researchr.org/committee/icse-2025/encycris-2025-papers-program-committee" class="navigate">Program Committee</a></li></ul></li><li class="dropdown dropdown-submenu "><a href="#" data-toggle="dropdown" class="dropdown-toggle"><strong>FTW</strong></a><ul class="block dropdown-menu"><li class="block"><a href="https://conf.researchr.org/committee/icse-2025/ftw-2025-papers-organizing-committee" class="navigate">Organizing Committee</a></li><li class="block"><a href="https://conf.researchr.org/committee/icse-2025/ftw-2025-papers-steering-committee" class="navigate">Steering Committee</a></li><li class="block"><a href="https://conf.researchr.org/committee/icse-2025/ftw-2025-papers-program-committee" class="navigate">Program Committee</a></li></ul></li><li class="dropdown dropdown-submenu "><a href="#" data-toggle="dropdown" class="dropdown-toggle"><strong>FinanSE</strong></a><ul class="block dropdown-menu"><li class="block"><a href="https://conf.researchr.org/committee/icse-2025/finanse-2025-papers-organizing-committee" class="navigate">Organizing Committee</a></li><li class="block"><a href="https://conf.researchr.org/committee/icse-2025/finanse-2025-papers-keynote-speaker" class="navigate">Keynote Speaker</a></li><li class="block"><a href="https://conf.researchr.org/committee/icse-2025/finanse-2025-papers-program-committee" class="navigate">Program Committee</a></li></ul></li><li class="dropdown dropdown-submenu "><a href="#" data-toggle="dropdown" class="dropdown-toggle"><strong>GAS</strong></a><ul class="block dropdown-menu"><li class="block"><a href="https://conf.researchr.org/committee/icse-2025/gas-2025-papers-co-organizers" class="navigate">Co-organizers</a></li></ul></li><li class="dropdown dropdown-submenu "><a href="#" data-toggle="dropdown" class="dropdown-toggle"><strong>GE@ICSE</strong></a><ul class="block dropdown-menu"><li class="block"><a href="https://conf.researchr.org/committee/icse-2025/ge-icse-2025-papers-program-committee" class="navigate">Program Committee</a></li></ul></li><li class="dropdown dropdown-submenu "><a href="#" data-toggle="dropdown" class="dropdown-toggle"><strong>GI</strong></a><ul class="block dropdown-menu"><li class="block"><a href="https://conf.researchr.org/committee/icse-2025/gi-2025-papers-organizing-committee" class="navigate">Organizing Committee</a></li></ul></li><li class="dropdown dropdown-submenu "><a href="#" data-toggle="dropdown" class="dropdown-toggle"><strong>GREENS</strong></a><ul class="block dropdown-menu"><li class="block small"><a href="https://conf.researchr.org/home/icse-2025/greens-2025" class="navigate"><span class="glyphicon glyphicon-home"></span> N/A - check homepage</a></li></ul></li><li class="dropdown dropdown-submenu "><a href="#" data-toggle="dropdown" class="dropdown-toggle"><strong>Gamify</strong></a><ul class="block dropdown-menu"><li class="block"><a href="https://conf.researchr.org/committee/icse-2025/gamify-2025-papers-program-committee" class="navigate">Organizing Committee</a></li><li class="block"><a href="https://conf.researchr.org/committee/icse-2025/gamify-2025-papers-proceedings-chair" class="navigate">Proceedings Chair</a></li><li class="block"><a href="https://conf.researchr.org/committee/icse-2025/gamify-2025-papers-web-committee-" class="navigate">Publicity and Web Chair</a></li></ul></li><li class="dropdown dropdown-submenu "><a href="#" data-toggle="dropdown" class="dropdown-toggle"><strong>IDE</strong></a><ul class="block dropdown-menu"><li class="block"><a href="https://conf.researchr.org/committee/icse-2025/ide-2025-papers-organizing-committee" class="navigate">Organizing Committee</a></li><li class="block"><a href="https://conf.researchr.org/committee/icse-2025/ide-2025-papers-program-committee" class="navigate">Program Committee</a></li></ul></li><li class="dropdown dropdown-submenu "><a href="#" data-toggle="dropdown" class="dropdown-toggle"><strong>IWSiB</strong></a><ul class="block dropdown-menu"><li class="block"><a href="https://conf.researchr.org/committee/icse-2025/iwsib-2025-iwsib-2025-organizing-committee" class="navigate">Organizing committee</a></li><li class="block"><a href="https://conf.researchr.org/committee/icse-2025/iwsib-2025-iwsib-2025-program-committee" class="navigate">Program Committee</a></li></ul></li><li class="dropdown dropdown-submenu "><a href="#" data-toggle="dropdown" class="dropdown-toggle"><strong>LLM4Code</strong></a><ul class="block dropdown-menu"><li class="block small"><a href="https://conf.researchr.org/home/icse-2025/llm4code-2025" class="navigate"><span class="glyphicon glyphicon-home"></span> N/A - check homepage</a></li></ul></li><li class="dropdown dropdown-submenu "><a href="#" data-toggle="dropdown" class="dropdown-toggle"><strong>MO2RE</strong></a><ul class="block dropdown-menu"><li class="block"><a href="https://conf.researchr.org/committee/icse-2025/mo2re-2025-organization" class="navigate">Organizing Committee</a></li></ul></li><li class="dropdown dropdown-submenu "><a href="#" data-toggle="dropdown" class="dropdown-toggle"><strong>NLBSE</strong></a><ul class="block dropdown-menu"><li class="block small"><a href="https://conf.researchr.org/home/icse-2025/nlbse-2025" class="navigate"><span class="glyphicon glyphicon-home"></span> N/A - check homepage</a></li></ul></li><li class="dropdown dropdown-submenu "><a href="#" data-toggle="dropdown" class="dropdown-toggle"><strong>NSE</strong></a><ul class="block dropdown-menu"><li class="block"><a href="https://conf.researchr.org/committee/icse-2025/nse-2025-papers-organising-committee" class="navigate">Organising Committee</a></li><li class="block"><a href="https://conf.researchr.org/committee/icse-2025/nse-2025-papers-program-committee" class="navigate">Program Committee</a></li></ul></li><li class="dropdown dropdown-submenu "><a href="#" data-toggle="dropdown" class="dropdown-toggle"><strong>Q-SE</strong></a><ul class="block dropdown-menu"><li class="block"><a href="https://conf.researchr.org/committee/icse-2025/q-se-2025-organizing-committee" class="navigate">Organizing Committee</a></li><li class="block"><a href="https://conf.researchr.org/committee/icse-2025/q-se-2025-program-committee" class="navigate">Program Committee</a></li></ul></li><li class="dropdown dropdown-submenu "><a href="#" data-toggle="dropdown" class="dropdown-toggle"><strong>RAIE</strong></a><ul class="block dropdown-menu"><li class="block"><a href="https://conf.researchr.org/committee/icse-2025/raie-2025-papers-organizing-committee" class="navigate">Organizing Committee</a></li><li class="block"><a href="https://conf.researchr.org/committee/icse-2025/raie-2025-papers-program-committee" class="navigate">Program Committee</a></li><li class="block"><a href="https://conf.researchr.org/committee/icse-2025/raie-2025-papers-publicity-and-web-chair" class="navigate">Publicity and Web Chair</a></li></ul></li><li class="dropdown dropdown-submenu "><a href="#" data-toggle="dropdown" class="dropdown-toggle"><strong>RAISE</strong></a><ul class="block dropdown-menu"><li class="block"><a href="https://conf.researchr.org/committee/icse-2025/raise-2025-papers-organizing-committee" class="navigate">Organizing Committee</a></li><li class="block"><a href="https://conf.researchr.org/committee/icse-2025/raise-2025-papers-program-committee" class="navigate">Program Committee</a></li></ul></li><li class="dropdown dropdown-submenu "><a href="#" data-toggle="dropdown" class="dropdown-toggle"><strong>RoSE</strong></a><ul class="block dropdown-menu"><li class="block"><a href="https://conf.researchr.org/committee/icse-2025/rose-2025-papers-rose-25-oc" class="navigate">RoSE 2025 OC</a></li><li class="block"><a href="https://conf.researchr.org/committee/icse-2025/rose-2025-papers-rose-2025-pc" class="navigate">RoSE 2025 PC</a></li></ul></li><li class="dropdown dropdown-submenu "><a href="#" data-toggle="dropdown" class="dropdown-toggle"><strong>SATrends</strong></a><ul class="block dropdown-menu"><li class="block"><a href="https://conf.researchr.org/committee/icse-2025/satrends-2025-papers-organising-committee" class="navigate">Organising Committee</a></li><li class="block"><a href="https://conf.researchr.org/committee/icse-2025/satrends-2025-papers-program-committee" class="navigate">Program Committee</a></li></ul></li><li class="dropdown dropdown-submenu "><a href="#" data-toggle="dropdown" class="dropdown-toggle"><strong>SBFT</strong></a><ul class="block dropdown-menu"><li class="block small"><a href="https://conf.researchr.org/home/icse-2025/sbft-2025" class="navigate"><span class="glyphicon glyphicon-home"></span> N/A - check homepage</a></li></ul></li><li class="dropdown dropdown-submenu "><a href="#" data-toggle="dropdown" class="dropdown-toggle"><strong>SE4ADS</strong></a><ul class="block dropdown-menu"><li class="block"><a href="https://conf.researchr.org/committee/icse-2025/se4ads-2025-papers-program-board" class="navigate">Organizing Committee</a></li><li class="block"><a href="https://conf.researchr.org/committee/icse-2025/se4ads-2025-papers-program-committee" class="navigate">Program Committee</a></li><li class="block"><a href="https://conf.researchr.org/committee/icse-2025/se4ads-2025-papers-web---publicity" class="navigate">Web &amp; Publicity Chair</a></li></ul></li><li class="dropdown dropdown-submenu "><a href="#" data-toggle="dropdown" class="dropdown-toggle"><strong>SERP4IoT</strong></a><ul class="block dropdown-menu"><li class="block"><a href="https://conf.researchr.org/committee/icse-2025/serp4iot-2025-papers-program-committee" class="navigate">Program Committee</a></li></ul></li><li class="dropdown dropdown-submenu "><a href="#" data-toggle="dropdown" class="dropdown-toggle"><strong>SESoS</strong></a><ul class="block dropdown-menu"><li class="block"><a href="https://conf.researchr.org/committee/icse-2025/sesos-2025-organizing-committee" class="navigate">Organizing Committee</a></li><li class="block"><a href="https://conf.researchr.org/committee/icse-2025/sesos-2025-steering-committee" class="navigate">Steering Committee</a></li><li class="block"><a href="https://conf.researchr.org/committee/icse-2025/sesos-2025-proceedings-chair" class="navigate">Proceedings Chair</a></li><li class="block"><a href="https://conf.researchr.org/committee/icse-2025/sesos-2025-publicity-chairs" class="navigate">Publicity Chairs</a></li><li class="block"><a href="https://conf.researchr.org/committee/icse-2025/sesos-2025-program-committee" class="navigate">Program Committee</a></li></ul></li><li class="dropdown dropdown-submenu "><a href="#" data-toggle="dropdown" class="dropdown-toggle"><strong>STATIC</strong></a><ul class="block dropdown-menu"><li class="block"><a href="https://conf.researchr.org/committee/icse-2025/static-2025-papers-organizing-committee" class="navigate">Organizing Committee</a></li><li class="block"><a href="https://conf.researchr.org/committee/icse-2025/static-2025-papers-program-committee" class="navigate">Program Committee</a></li></ul></li><li class="dropdown dropdown-submenu "><a href="#" data-toggle="dropdown" class="dropdown-toggle"><strong>SVM</strong></a><ul class="block dropdown-menu"><li class="block"><a href="https://conf.researchr.org/committee/icse-2025/svm-2025-organizing-committee" class="navigate">Organizing Committee</a></li><li class="block"><a href="https://conf.researchr.org/committee/icse-2025/svm-2025-papers-organizing-committee" class="navigate">Organizing Committee</a></li><li class="block"><a href="https://conf.researchr.org/committee/icse-2025/svm-2025-papers-proceedings-chair" class="navigate">Proceedings Chair</a></li><li class="block"><a href="https://conf.researchr.org/committee/icse-2025/svm-2025-papers-program-committee" class="navigate">Program Committee</a></li></ul></li><li class="dropdown dropdown-submenu "><a href="#" data-toggle="dropdown" class="dropdown-toggle"><strong>WETSEB</strong></a><ul class="block dropdown-menu"><li class="block small"><a href="https://conf.researchr.org/home/icse-2025/wetseb-2025" class="navigate"><span class="glyphicon glyphicon-home"></span> N/A - check homepage</a></li></ul></li><li class="dropdown dropdown-submenu "><a href="#" data-toggle="dropdown" class="dropdown-toggle"><strong>WSESE</strong></a><ul class="block dropdown-menu"><li class="block"><a href="https://conf.researchr.org/committee/icse-2025/wsese-2025-papers-organizing-committee" class="navigate">Organizing Committee</a></li><li class="block"><a href="https://conf.researchr.org/committee/icse-2025/wsese-2025-papers-web-chair" class="navigate">Web Chair</a></li><li class="block"><a href="https://conf.researchr.org/committee/icse-2025/wsese-2025-papers-proceedings-chair" class="navigate">Proceedings Chair</a></li><li class="block"><a href="https://conf.researchr.org/committee/icse-2025/wsese-2025-papers-program-committee" class="navigate">Program Committee</a></li></ul></li></ul></div></div></ul></li><li class="block"><a href="https://conf.researchr.org/search/icse-2025//all" class="navigate"><span class="glyphicon glyphicon-search"></span><span class="hidden-sm"> Search</span></a></li><li class="dropdown"><a href="#" data-toggle="dropdown" class="dropdown-toggle"><span class="hidden-sm">Series <span class="caret"></span></span><span title="Series" class="visible-sm"><span class="glyphicon glyphicon-repeat"></span> <span class="caret"></span></span></a><ul class="block dropdown-menu"><li class="block"><a href="https://conf.researchr.org/series/icse" class="navigate"><span class="glyphicon glyphicon-home"></span> Series</a></li><li class="block divider"></li><li class="block"><a href="https://conf.researchr.org/home/icse-2026" class="navigate">ICSE 2026</a></li><li class="block"><span class="text-muted" style="margin-left: 2px;"><span class="glyphicon glyphicon-hand-right"></span> ICSE 2025</span></li><li class="block"><a href="https://conf.researchr.org/home/icse-2024" class="navigate">ICSE 2024</a></li><li class="block"><a href="https://conf.researchr.org/home/icse-2023" class="navigate">ICSE 2023</a></li><li class="block"><a href="https://conf.researchr.org/home/icse-2022" class="navigate">ICSE 2022</a></li><li class="block"><a href="https://2021.icse-conferences.org" class="navigate">ICSE 2021</a></li><li class="block"><a href="https://2020.icse-conferences.org" class="navigate">ICSE 2020</a></li><li class="block"><a href="https://2019.icse-conferences.org" class="navigate">ICSE 2019</a></li><li class="block"><a href="https://conf.researchr.org/home/icse-2018" class="navigate">* ICSE 2018 *</a></li></ul></li></ul><div class="navbar-right"><ul class="block nav navbar-nav"><li class="block"><a href="https://conf.researchr.org/signin/icse-2025/https%3A%5Es%5Esconf.researchr.org%5Estrack%5Esicse-2025%5Esicse-2025-journal-first-papers" rel="nofollow" class="navigate">Sign in</a></li><li class="block"><a href="https://conf.researchr.org/signup/icse-2025" class="navigate">Sign up</a></li></ul></div></div></div></div><div id="content" class="container"><div class="page-header"><span class="text-muted small"><span class="glyphicon glyphicon-home"></span> <a href="https://conf.researchr.org/home/icse-2025" class="navigate">ICSE 2025</a> (<a href="https://conf.researchr.org/series/icse" class="navigate">series</a>) / </span><h1>Journal-first Papers<span class="pull-right small h1">ICSE 2025</span></h1></div><div class="row"><div class="col-sm-8"><ul id="tab" class="nav nav-pills"><li><a href="#About" data-toggle="tab">About</a></li><li><a href="#Accepted-Papers" data-toggle="tab">Accepted Papers</a></li></ul><script type="text/javascript">(function(){ var post_process_function = function(n){ var node=(n&&n.nodeType === 1)?n:document; autoTabFunction(node); }; var original_post_process_func = ajax_post_process; ajax_post_process = function(){ original_post_process_func.apply(this,arguments); post_process_function.apply(this,arguments); }; $(document).ready( post_process_function ); })(); </script><div class="tab-content"><div id="About" class="tab-pane "><h2>Call for Contributions</h2><p><p>ICSE has formed partnerships with prestigious software engineering journals to incorporate journal-first papers into the ICSE program. Through this initiative, authors of journal-first papers accepted in the partnering journals will be invited to present their work at ICSE, thus providing an opportunity for the authors to engage directly with the community and offering the ICSE attendees an additional dimension to the research track program.</p> <p>The journals that support the journal-first model as partners with ICSE are:</p> <ul> <li>IEEE Transaction on Software Engineering (IEEE TSE),</li> <li>ACM Transactions on Software Engineering and Methodology (ACM TOSEM),</li> <li>Empirical Software Engineering (EMSE).</li> </ul> <h3><a href="#scope" id="scope" class="anchor-link">Scope</a></h3> <p>A submission to the ICSE 2025 call for journal-first paper presentations must adhere to the following criteria:</p> <ul> <li>The associated journal paper needs to have been accepted to a journal from the above list no earlier than November 1st, 2023 and no later than October 10th, 2024.</li> <li>The paper is in the scope of the conference.</li> <li>The paper does not exclusively report a secondary study, e.g., systematic reviews, mapping studies, surveys.</li> <li>The paper reports completely new research results and/or presents novel contributions that significantly extend and were not previously reported in prior work. <ul> <li>The paper does not extend prior work solely with additional proofs or algorithms (or other such details presented for completeness), additional empirical results, or minor enhancements or variants of the results presented in the prior work.</li> <li>As a rough guide, a journal-first paper should have at least 70% new content over and above the content of previous publications. As such, the expectation is that an extension of a full 8-10 pages conference or workshop paper would not be deemed a journal-first paper.</li> </ul> </li> <li> <p>The paper has not been presented at, and is not under consideration for, journal-first programs of other conferences.</p> </li> </ul> <h3><a href="#how-to-submit" id="how-to-submit" class="anchor-link">How to Submit</a></h3> <p>The authors of any paper that meets the above criteria are invited to submit a (maximum) one-page presentation proposal consisting of the paper’s title, the paper’s authors, an extended abstract, and a pointer to the original journal paper at the journal’s Web site. If the journal paper is related to or builds on a previously published work (such as a tool demo or a poster), then the proposal must clearly and explicitly justify why the paper should be considered a journal first paper.</p> <p>The template to use is the <a href="https://www.ieee.org/conferences/publishing/templates.html">IEEE conference proceedings template, specified in the IEEE Conference Proceedings Formatting Guidelines</a> (title in 24pt font and full text in 10pt type, LaTeX users must use \documentclass[10pt,conference]{IEEEtran} without including the compsoc or compsocconf options). It is important to note that these submissions will <em>not</em> be published. The specified format is so that all the submissions have a consistent look to facilitate the selection process.</p> <p>By submitting your article to an IEEE Publication, you are hereby acknowledging that you and your co-authors are subject to all IEEE Publications Policies.</p> <p>Submission site: <a href="https://icse2025-jf.hotcrp.com/">https://icse2025-jf.hotcrp.com/</a></p> <p>Submissions must not exceed 1 page.</p> <h3><a href="#evaluation-and-selection" id="evaluation-and-selection" class="anchor-link">Evaluation and Selection</a></h3> <p>Authors will be invited to present their paper at ICSE 2025 after a check that the paper satisfies the above listed criteria. As the papers have already been reviewed and accepted by the journals, they will not be reviewed again for technical content. In the case that an exceptionally high number of submissions is received, not all papers will be selected. Priority will be given to the papers that:</p> <ul> <li>Increase opportunities for authors to attend ICSE, who might not otherwise attend. In particular, priority will be given to papers whose specified presenter is not presenting other journal first or main research track papers.</li> <li>Best fit the technical program, offering a balance across the conference topics: preference will be given to topics that are under-represented in the other tracks.</li> <li>Would be ineligible as a journal-first presentation at the next SE3 conference (ICSE/FSE/ASE) – because its acceptance date precedes the next conference’s window of journal acceptance dates for JF presentations.</li> </ul> <p>If there is further need to select from papers with the same priority, then they will be randomly selected. However, we will do our best to avoid this situation.</p> <h3><a href="#important-dates" id="important-dates" class="anchor-link">Important Dates</a></h3> <ul> <li>Journal First Submissions Deadline: 21 October 2024</li> <li>Journal First Acceptance Notification: 10 December, 2024</li> <li>Submissions close at 23:59 AoE (Anywhere on Earth, UTC-12)</li> </ul> <h3><a href="#conference-attendance-expectation" id="conference-attendance-expectation" class="anchor-link">Conference Attendance Expectation</a></h3> <p>If a submission is accepted for the journal-first program, the specified presenter must register for and attend the full 3-day technical conference and present the paper. The presentation is expected to be delivered in person, unless this is impossible due to travel limitations (related to, e.g., health, visa, or COVID-19 prevention). Each journal-first presentation will be scheduled in a session with topically-related Technical Track, NIER, SEIP, and/or SEIS papers. The journal-first manuscripts are published through the journals and will not be part of the ICSE proceedings. The journal-first papers will be listed in the conference program.</p> </p></div><div id="Accepted-Papers" class="tab-pane "><div class="page-header"><h3>Accepted Papers</h3></div><p>The following papers published in journals have been accepted to be presented in the ICSE 2025 Journal First Track, subject to an author registering to attend the conference.</p> <h5> Hao Li, Gopi Krishnan Rajbahadur, Cor-Paul Bezemer, "Studying the Impact of TensorFlow and PyTorch Bindings on Machine Learning Software Quality" </h5> <p><b>Abstract:</b> Bindings for machine learning frameworks (such as TensorFlow and PyTorch) allow developers to integrate a framework’s functionality using a programming language different from the framework’s default language (usually Python). In this paper, we study the impact of using TensorFlow and PyTorch bindings in C#, Rust, Python and JavaScript on the software quality in terms of correctness (training and test accuracy) and time cost (training and inference time) when training and performing inference on five widely used deep learning models. Our experiments show that a model can be trained in one binding and used for inference in another binding for the same framework without losing accuracy. Our study is the first to show that using a non-default binding can help improve machine learning software quality from the time cost perspective compared to the default Python binding while still achieving the same level of correctness.</p>&nbsp;Tags: "Testing and Quality", "AI for SE" &nbsp;<br> &nbsp;<br> <h5> Jaeseong Lee, Simin Chen, Austin Mordahl, Cong Liu, Wei Yang, Shiyi Wei, "Automated Testing Linguistic Capabilities of NLP Models" </h5> <p><b>Abstract:</b> Natural language processing (NLP) has gained widespread adoption in the development of real-world applications. However, the black-box nature of neural networks in NLP applications poses a challenge when evaluating their performance, let alone ensuring it. Recent research has proposed testing techniques to enhance the trustworthiness of NLP-based applications. However, most existing works use a single, aggregated metric (i.e., accuracy) which is difficult for users to assess NLP model performance on fine-grained aspects, such as LCs. To address this limitation, we present ALiCT, an automated testing technique for validating NLP applications based on their LCs. ALiCT takes user-specified LCs as inputs and produces diverse test suite with test oracles for each of given LC. We evaluate ALiCT on two widely adopted NLP tasks, sentiment analysis and hate speech detection, in terms of diversity, effectiveness, and consistency. Using Self-BLEU and syntactic diversity metrics, our findings reveal that ALiCT generates test cases that are 190% and 2213% more diverse in semantics and syntax, respectively, compared to those produced by state-of-the-art techniques. In addition, ALiCT is capable of producing a larger number of NLP model failures in 22 out of 25 LCs over the two NLP applications.</p>&nbsp;Tags: "Formal methods", "Testing and Quality" &nbsp;<br> &nbsp;<br> <h5> SayedHassan Khatoonabadi, Ahmad Abdellatif, Diego Elias Costa, Emad Shihab, "Predicting the First Response Latency of Maintainers and Contributors in Pull Requests" </h5> <p><b>Abstract:</b> The success of a Pull Request (PR) depends on the responsiveness of the maintainers and the contributor during the review process. Being aware of the expected waiting times can lead to better interactions and managed expectations for both the maintainers and the contributor. In this paper, we propose a machine-learning approach to predict the first response latency of the maintainers following the submission of a PR, and the first response latency of the contributor after receiving the first response from the maintainers. We curate a dataset of 20 large and popular open-source projects on GitHub and extract 21 features to characterize projects, contributors, PRs, and review processes. Using these features, we then evaluate seven types of classifiers to identify the best-performing models. We also conduct permutation feature importance and SHAP analyses to understand the importance and the impact of different features on the predicted response latencies. We find that our CatBoost models are the most effective for predicting the first response latencies of both maintainers and contributors. Compared to a dummy classifier that always returns the majority class, these models achieved an average improvement of 29% in AUC-ROC and 51% in AUC-PR for maintainers, as well as 39% in AUC-ROC and 89% in AUC-PR for contributors across the studied projects. The results indicate that our models can aptly predict the first response latencies using the selected features. We also observe that PRs submitted earlier in the week, containing an average number of commits, and with concise descriptions are more likely to receive faster first responses from the maintainers. Similarly, PRs with a lower first response latency from maintainers, that received the first response of maintainers earlier in the week, and containing an average number of commits tend to receive faster first responses from the contributors. Additionally, contributors with a higher acceptance rate and a history of timely responses in the project are likely to both obtain and provide faster first responses. Moreover, we show the effectiveness of our approach in a cross-project setting. Finally, we discuss key guidelines for maintainers, contributors, and researchers to help facilitate the PR review process.</p>&nbsp;Tags: "Prog Comprehension/Reeng/Maint", "AI for SE", "Open Source" &nbsp;<br> &nbsp;<br> <h5> Zachary Karas, Aakash Bansal, Yifan Zhang, Toby Jia-Jun Li, Collin McMillan, Yu Huang, "A Tale of Two Comprehensions? Analyzing Student Programmer Attention During Code Summarization" </h5> <p><b>Abstract:</b> Code summarization is the task of creating short, natural language descriptions of source code. It is an important part of code comprehension and a powerful method of documentation. Previous work has made progress in identifying where programmers focus in code as they write their own summaries (i.e., Writing). However, there is currently a gap in studying programmers’ attention as they read code with pre-written summaries (i.e., Reading). As a result, it is currently unknown how these two forms of code comprehension compare: Reading and Writing. Also, there is a limited understanding of programmer attention with respect to program semantics. We address these shortcomings with a human eye-tracking study (n = 27) comparing Reading and Writing. We examined programmers’ attention with respect to fine-grained program semantics, including their attention sequences (i.e., scan paths). We find distinctions in programmer attention across the comprehension tasks, similarities in reading patterns between them, and differences mediated by demographic factors. This can help guide code comprehension in both computer science education and automated code summarization. Furthermore, we mapped programmers’ gaze data onto the Abstract Syntax Tree to explore another representation of human attention. We find that visual behavior on this structure is not always consistent with that on source code.</p>&nbsp;Tags: "User experience", "Education" &nbsp;<br> &nbsp;<br> <h5> Miguel Setúbal, Tayana Conte, Marcos Kalinowski, Allysson Allex Araújo, "Investigating the Online Recruitment and Selection Journey of Novice Software Engineers: Anti-patterns and Recommendations" </h5> <p><b>Abstract:</b> The growing software development market has increased the demand for qualified professionals in Software Engineering (SE). To this end, companies must enhance their Recruitment and Selection (R&amp;S) processes to maintain high-quality teams, including opening opportunities for beginners, such as trainees and interns. However, given the various judgments and sociotechnical factors involved, this complex process of R&amp;S poses a challenge for recent graduates seeking to enter the market. This paper aims to identify a set of anti-patterns and recommendations for early career SE professionals concerning R&amp;S processes. Under an exploratory and qualitative methodological approach, we conducted six online Focus Groups with 18 recruiters with experience in R&amp;S in the software industry. After completing our qualitative analysis, we identified 12 anti-patterns and 31 actionable recommendations regarding the hiring process focused on entry-level SE professionals. The identified anti-patterns encompass behavioral and technical dimensions innate to R&amp;S processes. These findings provide a rich opportunity for reflection in the SE industry and offer valuable guidance for early-career candidates and organizations. From an academic perspective, this work also raises awareness of the intersection of Human Resources and SE, an area with considerable potential to be expanded in the context of cooperative and human aspects of SE.</p>&nbsp;Tags: "Human/Social" &nbsp;<br> &nbsp;<br> <h5> Wenwei Gu, Jinyang Liu, Zhuangbin Chen, Jianping Zhang, Yuxin Su, Jiazhen Gu, Cong Feng, Zengyin Yang, Yongqiang Yang, Michael Lyu, "Identifying Performance Issues in Cloud Service Systems Based on Relational-Temporal Features" </h5> <p><b>Abstract:</b> Cloud systems, typically comprised of various components (e.g., microservices), are susceptible to performance issues, which may cause service-level agreement violations and financial losses. Identifying performance issues is thus of paramount importance for cloud vendors. In current practice, crucial metrics, i.e., key performance indicators (KPIs), are monitored periodically to provide insight into the operational status of components. Identifying performance issues is often formulated as an anomaly detection problem, which is tackled by analyzing each metric independently. However, this approach overlooks the complex dependencies existing among cloud components. Some graph neural network-based methods take both temporal and relational information into account, however, the correlation violations in the metrics that serve as indicators of underlying performance issues are difficult for them to identify. Furthermore, a large volume of components in a cloud system results in a vast array of noisy metrics. This complexity renders it impractical for engineers to fully comprehend the correlations, making it challenging to identify performance issues accurately. To address these limitations, we propose Identifying Performance Issues based on Relational-Temporal Features (ISOLATE ), a learning-based approach that leverages both the relational and temporal features of metrics to identify performance issues. In particular, it adopts a graph neural network with attention to characterizing the relations among metrics and extracts long-term and multi-scale temporal patterns using a GRU and a convolution network, respectively. The learned graph attention weights can be further used to localize the correlation-violated metrics. Moreover, to relieve the impact of noisy data, ISOLATE utilizes a positive unlabeled learning strategy that tags pseudo labels based on a small portion of confirmed negative examples. Extensive evaluation on both public and industrial datasets shows that ISOLATE outperforms all baseline models with 0.945 F1-score and 0.920 Hit rate@3. The ablation study also proves the effectiveness of the relational-temporal features and the PU-learning strategy. Furthermore, we share the success stories of leveraging ISOLATE to identify performance issues in Huawei Cloud, which demonstrates its superiority in practice.</p>&nbsp;Tags: "Testing and Quality", "Design/Architecture" &nbsp;<br> &nbsp;<br> <h5> Saurabhsingh Rajput, Tim Widmayer, Ziyuan Shang, Maria Kechagia, Federica Sarro, Tushar Sharma, "Enhancing Energy-Awareness in Deep Learning through Fine-Grained Energy Measurement" </h5> <p><b>Abstract:</b> With the increasing usage, scale, and complexity of Deep Learning (DL) models, their rapidly growing energy consumption has become a critical concern. Promoting green development and energy awareness at different granularities is the need of the hour to limit carbon emissions of dl systems. However, the lack of standard and repeatable tools to accurately measure and optimize energy consumption at fine granularity (e.g., at the API level) hinders progress in this area. This paper introduces FECoM (Fine-grained Energy Consumption Meter), a framework for fine-grained DL energy consumption measurement. FECoM enables researchers and developers to profile DL APIS from energy perspective. FECoM addresses the challenges of fine-grained energy measurement using static instrumentation while considering factors such as computational load and temperature stability. We assess FECoM’s capability for fine-grained energy measurement for one of the most popular open-source DL frameworks, namely TENSORFLOW. Using FECoM, we also investigate the impact of parameter size and execution time on energy consumption, enriching our understanding of TENSORFLOW APIS’ energy profiles. Furthermore, we elaborate on the considerations and challenges while designing and implementing a fine-grained energy measurement tool. This work will facilitate further advances in dl energy measurement and the development of energy-aware practices for DL systems.</p>&nbsp;Tags: "Testing and Quality", "AI for SE" &nbsp;<br> &nbsp;<br> <h5> Emanuela Guglielmi, Gabriele Bavota, Rocco Oliveto, Simone Scalabrino, "Automatic Identification of Game Stuttering via Gameplay Videos Analysis" </h5> <p><b>Abstract:</b> Modern video games are extremely complex software systems and, as such, they might suffer from several types of post-release issues. A particularly insidious issue is constituted by drops in the frame rate (i.e., stuttering events), which might have a negative impact on the user experience. Stuttering events are frequently documented in the million of hours of gameplay videos shared by players on platforms such as Twitch or YouTube. From the developers’ perspective, these videos represent a free source of documented “testing activities”. However, especially for popular games, the quantity and length of these videos make impractical their manual inspection. We introduce HASTE, an approach for the automatic detection of stuttering events in gameplay videos that can be exploited to generate candidate bug reports. HASTE firstly splits a given video into visually coherent slices, with the goal of filtering-out those that not representing actual gameplay (e.g., navigating the game settings). Then, it identifies the subset of pixels in the video frames which actually show the game in action excluding additional elements on screen such as the logo of the YouTube channel, on-screen chats etc. In this way, HASTE can exploit state-of-the-art image similarity metrics to identify candidate stuttering events, namely subsequent frames being almost identical in the pixels depicting the game. We evaluate the different steps behind HASTE on a total of 105 videos showing that it can correctly extract video slices with a 76% precision, and can correctly identify the slices related to gameplay with a recall and precision higher than 77%. Overall, HASTE achieves 71% recall and 89% precision for the identification of stuttering events in gameplay videos.</p>&nbsp;Tags: "Analysis", "Testing and Quality", "Games" &nbsp;<br> &nbsp;<br> <h5> Jiho Shin, Hadi Hemmati, Moshi Wei, Song Wang, "Assessing Evaluation Metrics for Neural Test Oracle Generation" </h5> <p><b>Abstract:</b> Recently, deep learning models have shown promising results in test oracle generation. Neural Oracle Generation (NOG) models are commonly evaluated using static (automatic) metrics which are mainly based on textual similarity of the output, e.g. BLEU, ROUGE-L, METEOR, and Accuracy. However, these textual similarity metrics may not reflect the testing effectiveness of the generated oracle within a test suite, which is often measured by dynamic (execution-based) test adequacy metrics such as code coverage and mutation score. In this work, we revisit existing oracle generation studies plus gpt-3.5 to empirically investigate the current standing of their performance in textual similarity and test adequacy metrics. Specifically, we train and run four state-of-the-art test oracle generation models on seven textual similarity and two test adequacy metrics for our analysis. We apply two different correlation analyses between these two different sets of metrics. Surprisingly, we found no significant correlation between the textual similarity metrics and test adequacy metrics. For instance, gpt-3.5 on the jackrabbit-oak project had the highest performance on all seven textual similarity metrics among the studied NOGs. However, it had the lowest test adequacy metrics compared to all the studied NOGs. We further conducted a qualitative analysis to explore the reasons behind our observations. We found that oracles with high textual similarity metrics but low test adequacy metrics tend to have complex or multiple chained method invocations within the oracle's parameters, making them hard for the model to generate completely, affecting the test adequacy metrics. On the other hand, oracles with low textual similarity metrics but high test adequacy metrics tend to have to call different assertion types or a different method that functions similarly to the ones in the ground truth. Overall, this work complements prior studies on test oracle generation with an extensive performance evaluation on textual similarity and test adequacy metrics and provides guidelines for better assessment of deep learning applications in software test generation in the future.</p>&nbsp;Tags: "Testing and Quality", "AI for SE" &nbsp;<br> &nbsp;<br> <h5> Zhe Yu, Joymallya Chakraborty, Tim Menzies, "FairBalance: How to Achieve Equalized Odds With Data Pre-processing" </h5> <p><b>Abstract:</b> This research seeks to benefit the software engineering society by providing a simple yet effective pre-processing approach to achieve equalized odds fairness in machine learning software. Fairness issues have attracted increasing attention since machine learning software is increasingly used for high-stakes and high-risk decisions. It is the responsibility of all software developers to make their software accountable by ensuring that the machine learning software do not perform differently on different sensitive demographic groups—satisfying equalized odds. Different from prior works which either optimize for an equalized odds related metric during the learning process like a black-box, or manipulate the training data following some intuition; this work studies the root cause of the violation of equalized odds and how to tackle it. We found that equalizing the class distribution in each demographic group with sample weights is a necessary condition for achieving equalized odds without modifying the normal training process. In addition, an important partial condition for equalized odds (zero average odds difference) can be guaranteed when the class distributions are weighted to be not only equal but also balanced (1:1). Based on these analyses, we proposed FairBalance, a pre-processing algorithm which balances the class distribution in each demographic group by assigning calculated weights to the training data. On eight real-world datasets, our empirical results show that, at low computational overhead, the proposed pre-processing algorithm FairBalance can significantly improve equalized odds without much, if any damage to the utility. FairBalance also outperforms existing state-of-the-art approaches in terms of equalized odds. To facilitate reuse, reproduction, and validation, we made our scripts available at https://github.com/hil-se/FairBalance .</p>&nbsp;Tags: "Testing and Quality", "AI for SE" &nbsp;<br> &nbsp;<br> <h5> Jon Ayerdi, Asier Iriarte, Pablo Valle, Ibai Roman, Miren Illarramendi, Aitor Arrieta, "MarMot: Metamorphic Runtime Monitoring of Autonomous Driving Systems" </h5> <p><b>Abstract:</b> Autonomous Driving Systems (ADSs) are complex Cyber-Physical Systems (CPSs) that must ensure safety even in uncertain conditions. Modern ADSs often employ Deep Neural Networks (DNNs), which may not produce correct results in every possible driving scenario. Thus, an approach to estimate the confidence of an ADS at runtime is necessary to prevent potentially dangerous situations. In this paper we propose MarMot, an online monitoring approach for ADSs based on Metamorphic Relations (MRs), which are properties of a system that hold among multiple inputs and the corresponding outputs. Using domain-specific MRs, MarMot estimates the uncertainty of the ADS at runtime, allowing the identification of anomalous situations that are likely to cause a faulty behavior of the ADS, such as driving off the road. We perform an empirical assessment of MarMot with five different MRs, using two different subject ADSs, including a small-scale physical ADS and a simulated ADS. Our evaluation encompasses the identification of both external anomalies, e.g., fog, as well as internal anomalies, e.g., faulty DNNs due to mislabeled training data. Our results show that MarMot can identify up to 65% of the external anomalies and 100% of the internal anomalies in the physical ADS, and up to 54% of the external anomalies and 88% of the internal anomalies in the simulated ADS. With these results, MarMot outperforms or is comparable to other state-of-the-art approaches, including SelfOracle, Ensemble, and MC Dropout-based ADS monitors.</p>&nbsp;Tags: "Real-Time", "Analysis" &nbsp;<br> &nbsp;<br> <h5> Baharin A. Jodat, Abhishek Chandar, Shiva Nejati, Mehrdad Sabetzadeh, "Test Generation Strategies for Building Failure Models and Explaining Spurious Failures" </h5> <p><b>Abstract:</b> Test inputs fail not only when the system under test is faulty but also when the inputs are invalid or unrealistic. Failures resulting from invalid or unrealistic test inputs are spurious. Avoiding spurious failures improves the effectiveness of testing in exercising the main functions of a system, particularly for compute-intensive (CI) systems where a single test execution takes significant time. In this article, we propose to build failure models for inferring interpretable rules on test inputs that cause spurious failures. We examine two alternative strategies for building failure models: (1) machine learning (ML)-guided test generation and (2) surrogate-assisted test generation. ML-guided test generation infers boundary regions that separate passing and failing test inputs and samples test inputs from those regions. Surrogate-assisted test generation relies on surrogate models to predict labels for test inputs instead of exercising all the inputs. We propose a novel surrogate-assisted algorithm that uses multiple surrogate models simultaneously, and dynamically selects the prediction from the most accurate model. We empirically evaluate the accuracy of failure models inferred based on surrogate-assisted and ML-guided test generation algorithms. Using case studies from the domains of cyber-physical systems and networks, we show that our proposed surrogate-assisted approach generates failure models with an average accuracy of 83%, significantly outperforming ML-guided test generation and two baselines. Further, our approach learns failure-inducing rules that identify genuine spurious failures as validated against domain knowledge.</p>&nbsp;Tags: "Testing and Quality", "AI for SE" &nbsp;<br> &nbsp;<br> <h5> Wachiraphan Charoenwet, Patanamon Thongtanunam, Van-Thuan Pham, Christoph Treude, "Toward Effective Secure Code Reviews: An Empirical Study of Security-Related Coding Weaknesses" </h5> <p><b>Abstract:</b> Identifying security issues early is encouraged to reduce the latent negative impacts on the software systems. Code review is a widely-used method that allows developers to manually inspect modified code, catching security issues during a software development cycle. However, existing code review studies often focus on known vulnerabilities, neglecting coding weaknesses, which can introduce real-world security issues that are more visible through code review. The practices of code reviews in identifying such coding weaknesses are not yet fully investigated. To better understand this, we conducted an empirical case study in two large open-source projects, OpenSSL and PHP. Based on 135,560 code review comments, we found that reviewers raised security concerns in 35 out of 40 coding weakness categories. Surprisingly, some coding weaknesses related to past vulnerabilities, such as memory errors and resource management, were discussed less often than the vulnerabilities. Developers attempted to address raised security concerns in many cases (39%-41%), but a substantial portion was merely acknowledged (30%-36%), and some went unfixed due to disagreements about solutions (18%-20%). This highlights that coding weaknesses can slip through code review even when identified. Our findings suggest that reviewers can identify various coding weaknesses leading to security issues during code reviews. However, these results also reveal shortcomings in current code review practices, indicating the need for more effective mechanisms or support for increasing awareness of security issue management in code reviews.</p>&nbsp;Tags: "Security", "User experience" &nbsp;<br> &nbsp;<br> <h5> Fang Liu, Zhiyi Fu, Ge Li, Zhi Jin, Hui Liu, Yiyang Hao, Li Zhang, "Non-Autoregressive Line-Level Code Completion" </h5> <p><b>Abstract:</b> Software developers frequently use code completion tools to accelerate software development by suggesting the following code elements. Researchers usually employ AutoRegressive (AR) decoders to complete code sequences in a left-to-right, token-by-token fashion. To improve the accuracy and efficiency of code completion, we argue that tokens within a code statement have the potential to be predicted concurrently. In this article, we first conduct an empirical study to analyze the dependency among the target tokens in line-level code completion. The results suggest that it is potentially practical to generate all statement tokens in parallel. To this end, we introduce SANAR, a simple and effective syntax-aware non-autoregressive model for line-level code completion. To further improve the quality of the generated code, we propose an adaptive and syntax-aware sampling strategy to boost the model’s performance. The experimental results obtained from two widely used datasets indicate that our model outperforms state-of-the-art code completion approaches of similar model size by a considerable margin, and is faster than these models with up to 9× speed-up. Moreover, the extensive results additionally demonstrate that the enhancements achieved by SANAR become even more pronounced with larger model sizes, highlighting their significance.</p>&nbsp;Tags: "IDEs", "AI for SE" &nbsp;<br> &nbsp;<br> <h5> Huizi Hao, Kazi Amit Hasan, Hong Qin, Marcos Macedo, Yuan Tian, Steven Ding, Ahmed E. Hassan, "An Empirical Study on Developers’ Shared Conversations with ChatGPT in GitHub Pull Requests and Issues" </h5> <p><b>Abstract:</b> ChatGPT has significantly impacted software development practices, providing substantial assistance to developers in various tasks, including coding, testing, and debugging. Despite its widespread adoption, the impact of ChatGPT as an assistant in collaborative coding remains largely unexplored. In this paper, we analyze a dataset of 210 and 370 developers’ shared conversations with ChatGPT in GitHub pull requests (PRs) and issues. We manually examined the content of the conversations and characterized the dynamics of the sharing behavior, i.e., understanding the rationale behind the sharing, identifying the locations where the conversations were shared, and determining the roles of the developers who shared them. Our main observations are: (1) Developers seek ChatGPT’s assistance across 16 types of software engineering inquiries. In both conversations shared in PRs and issues, the most frequently encountered inquiry categories include code generation, conceptual questions, how-to guides, issue resolution, and code review. (2) Developers frequently engage with ChatGPT via multi-turn conversations where each prompt can fulfill various roles, such as unveiling initial or new tasks, iterative follow-up, and prompt refinement. Multi-turn conversations account for 33.2% of the conversations shared in PRs and 36.9% in issues. (3) In collaborative coding, developers leverage shared conversations with ChatGPT to facilitate their role-specific contributions, whether as authors of PRs or issues, code reviewers, or collaborators on issues. Our work serves as the first step towards understanding the dynamics between developers and ChatGPT in collaborative software development and opens up new directions for future research on the topic.</p>&nbsp;Tags: "Human/Social", "Process", "AI for SE" &nbsp;<br> &nbsp;<br> <h5> Sallam Abualhaija, Fatma Basak Aydemir, Fabiano Dalpiaz, Davide Dell'Anna, Alessio Ferrari, Xavier Franch, Davide Fucci, "Replication in Requirements Engineering: the NLP for RE Case" </h5> <p><b>Abstract:</b> Natural language processing (NLP) techniques have been widely applied in the requirements engineering (RE) field to support tasks such as classification and ambiguity detection. Despite its empirical vocation, RE research has given limited attention to replication of NLP for RE studies. Replication is hampered by several factors, including the context specificity of the studies, the heterogeneity of the tasks involving NLP, the tasks’ inherent hairiness, and, in turn, the heterogeneous reporting structure. To address these issues, we propose a new artifact, referred to as ID-Card, whose goal is to provide a structured summary of research papers emphasizing replication-relevant information. We construct the ID-Card through a structured, iterative process based on design science. In this article: (i) we report on hands-on experiences of replication; (ii) we review the state-of-the-art and extract replication-relevant information: (iii) we identify, through focus groups, challenges across two typical dimensions of replication: data annotation and tool reconstruction; and (iv) we present the concept and structure of the ID-Card to mitigate the identified challenges. This study aims to create awareness of replication in NLP for RE. We propose an ID-Card that is intended to foster study replication but can also be used in other contexts, e.g., for educational purposes.</p>&nbsp;Tags: "Requirements", "AI for SE" &nbsp;<br> &nbsp;<br> <h5> Partha Chakraborty, Krishna Kanth Arumugam, Mahmoud Alfadel, Mei Nagappan, Shane McIntosh, "Revisiting the Performance of Deep Learning-Based Vulnerability Detection on Realistic Datasets" </h5> <p><b>Abstract:</b> The impact of software vulnerabilities on everyday software systems is concerning. Although deep learning-based models have been proposed for vulnerability detection, their reliability remains a significant concern. While prior evaluation of such models reports impressive recall/F1 scores of up to 99%, we find that these models underperform in practical scenarios, particularly when evaluated on the entire codebases rather than only the fixing commit. In this paper, we introduce a comprehensive dataset ( Real-Vul ) designed to accurately represent real-world scenarios for evaluating vulnerability detection models. We evaluate DeepWukong, LineVul, ReVeal, and IVDetect vulnerability detection approaches and observe a surprisingly significant drop in performance, with precision declining by up to 95 percentage points and F1 scores dropping by up to 91 percentage points. A closer inspection reveals a substantial overlap in the embeddings generated by the models for vulnerable and uncertain samples (non-vulnerable or vulnerability not reported yet), which likely explains why we observe such a large increase in the quantity and rate of false positives. Additionally, we observe fluctuations in model performance based on vulnerability characteristics (e.g., vulnerability types and severity). For example, the studied models achieve 26 percentage points better F1 scores when vulnerabilities are related to information leaks or code injection rather than when vulnerabilities are related to path resolution or predictable return values. Our results highlight the substantial performance gap that still needs to be bridged before deep learning-based vulnerability detection is ready for deployment in practical settings. We dive deeper into why models underperform in realistic settings and our investigation revealed overfitting as a key issue. We address this by introducing an augmentation technique, potentially improving performance by up to 30%. We contribute (a) an approach to creating a dataset that future research can use to improve the practicality of model evaluation; (b) Real-Vul – a comprehensive dataset that adheres to this approach; and (c) empirical evidence that the deep learning-based models struggle to perform in a real-world setting.</p>&nbsp;Tags: "Security", "Testing and Quality", "AI for SE" &nbsp;<br> &nbsp;<br> <h5> Lukas Schulte, Benjamin Ledel, Steffen Herbold, "Studying the explanations for the automated prediction of bug and non-bug issues using LIME and SHAP" </h5> <p><b>Abstract:</b> [Context] The identification of bugs within issues reported to an issue tracking system is crucial for triage. Machine learning models have shown promising results for this task. However, we have only limited knowledge of how such models identify bugs. Explainable AI methods like LIME and SHAP can be used to increase this knowledge. [Objective] We want to understand if explainable AI provides explanations that are reasonable to us as humans and align with our assumptions about the model’s decision-making. We also want to know if the quality of predictions is correlated with the quality of explanations. [Methods] We conduct a study where we rate LIME and SHAP explanations based on their quality of explaining the outcome of an issue type prediction model. For this, we rate the quality of the explanations, i.e., if they align with our expectations and help us understand the underlying machine learning model. [Results] We found that both LIME and SHAP give reasonable explanations and that correct predictions are well explained. Further, we found that SHAP outperforms LIME due to a lower ambiguity and a higher contextuality that can be attributed to the ability of the deep SHAP variant to capture sentence fragments. [Conclusion] We conclude that the model finds explainable signals for both bugs and non-bugs. Also, we recommend that research dealing with the quality of explanations for classification tasks reports and investigates rater agreement, since the rating of explanations is highly subjective.</p>&nbsp;Tags: "Testing and Quality", "AI for SE" &nbsp;<br> &nbsp;<br> <h5> Asmar Muqeet, Tao Yue, Shaukat Ali, Paolo Arcaini, Asmar Muqeet, "Mitigating Noise in Quantum Software Testing Using Machine Learning" </h5> <p><b>Abstract:</b> Quantum Computing (QC) promises computational speedup over classic computing. However, noise exists in nearterm quantum computers. Quantum software testing (for gaining confidence in quantum software’s correctness) is inevitably impacted by noise, i.e., it is impossible to know if a test case failed due to noise or real faults. Existing testing techniques test quantum programs without considering noise, i.e., by executing tests on ideal quantum computer simulators. Consequently, they are not directly applicable to test quantum software on real quantum computers or noisy simulators. Thus, we propose a noise-aware approach (named QOIN ) to alleviate the noise effect on test results of quantum programs. QOIN employs machine learning techniques (e.g., transfer learning) to learn the noise effect of a quantum computer and filter it from a program’s outputs. Such filtered outputs are then used as the input to perform test case assessments (determining the passing or failing of a test case execution against a test oracle). We evaluated QOIN on IBM’s 23 noise models, Google’s two available noise models, and Rigetti’s Quantum Virtual Machine, with six real world and 800 artificial programs. We also generated faulty versions of these programs to check if a failing test case execution can be determined under noise. Results show that QOIN can reduce the noise effect by more than 80% on most noise models. We used an existing test oracle to evaluate QOIN ’s effectiveness in quantum software testing. The results showed that QOIN attained scores of 99%, 75%, and 86% for precision, recall, and F1-score, respectively, for the test oracle across six realworld programs. For artificial programs, QOIN achieved scores of 93%, 79%, and 86% for precision, recall, and F1-score respectively. This highlights QOIN ’s effectiveness in learning noise patterns for noise-aware quantum software testing.</p>&nbsp;Tags: "Testing and Quality", "Quantum" &nbsp;<br> &nbsp;<br> <h5> Markus Borg, Leif Jonsson, Emelie Engström, Béla Bartalos, Attila Szabo, "Adopting Automated Bug Assignment in Practice - A Longitudinal Case Study at Ericsson" </h5> <p><b>Abstract:</b> [Context] The continuous inflow of bug reports is a considerable challenge in large development projects. Inspired by contemporary work on mining software repositories, we designed a prototype bug assignment solution based on machine learning in 2011-2016. The prototype evolved into an internal Ericsson product, TRR, in 2017-2018. TRR’s first bug assignment without human intervention happened in April 2019. [Objective] Our study evaluates the adoption of TRR within its industrial context at Ericsson, i.e., we provide lessons learned related to the productization of a research prototype within a company. Moreover, we investigate 1) how TRR performs in the field, 2) what value TRR provides to Ericsson, and 3) how TRR has influenced the ways of working. [Method] We conduct a preregistered industrial case study combining interviews with TRR stakeholders, minutes from sprint planning meetings, and bug-tracking data. The data analysis includes thematic analysis, descriptive statistics, and Bayesian causal analysis. [Results] TRR is now an incorporated part of the bug assignment process. Considering the abstraction levels of the telecommunications stack, high-level modules are more positive while low-level modules experienced some drawbacks. Most importantly, some bug reports directly reach low-level modules without first having passed through fundamental root-cause analysis steps at higher levels. On average, TRR automatically assigns 30% of the incoming bug reports with an accuracy of 75%. Auto-routed TRs are resolved around 21% faster within Ericsson, and TRR has saved highly seasoned engineers many hours of work. Indirect effects of adopting TRR include process improvements, process awareness, increased communication, and higher job satisfaction. [Conclusions] TRR has saved time at Ericsson, but the adoption of automated bug assignment was more intricate compared to similar endeavors reported from other companies. We primarily attribute the difference to the very large size of the organization and the complex products. Key facilitators in the successful adoption include a gradual introduction, product champions, and careful stakeholder analysis.</p>&nbsp;Tags: "Testing and Quality" &nbsp;<br> &nbsp;<br> <h5> Marc Miltenberger, Steven Arzt, "Precisely Extracting Complex Variable Values from Android Apps" </h5> <p><b>Abstract:</b> Millions of users nowadays rely on their smartphones to process sensitive data through apps from various vendors and sources. Therefore, it is vital to assess these apps for security vulnerabilities and privacy violations. Information such as to which server an app connects through which protocol, and which algorithm it applies for encryption, are usually encoded as variable values and arguments of API calls. However, extracting these values from an app is not trivial. The source code of an app is usually not available, and manual reverse engineering is cumbersome with binary sizes in the tens of megabytes. Current automated tools, however, cannot retrieve values that are computed at runtime through complex transformations. In this article, we present ValDroid, a novel static analysis tool for automatically extracting the set of possible values for a given variable at a given statement in the Dalvik byte code of an Android app. We evaluate ValDroid against existing approaches (JSA, Violist, DroidRA, Harvester, BlueSeal, StringHound, IC3, and COAL) on benchmarks and 794 real-world apps. ValDroid greatly outperforms existing tools. It provides an average F1 score of more than 90%, while only requiring 0.1 s per value on average. For many data types including Network Connections and Dynamic Code Loading, its recall is more than twice the recall of the best existing approaches.</p>&nbsp;Tags: "Formal methods" &nbsp;<br> &nbsp;<br> <h5> Riccardo Coppola, Tommaso Fulcini, Luca Ardito, Marco Torchiano, Emil Alégroth, "On Effectiveness and Efficiency of Gamified Exploratory GUI Testing" </h5> <p><b>Abstract:</b> [Context] Gamification appears to improve enjoyment and quality of execution of software engineering activities, including software testing. Though commonly employed in industry, manual exploratory testing of web application GUIs was proven to be mundane and expensive. Gamification applied to that kind of testing activity has the potential to overcome its limitations, though no empirical research has explored this area yet. [Goal] Collect preliminary insights on how gamification, when performed by novice testers, affects the effectiveness, efficiency, test case realism, and user experience in exploratory testing of web applications. [Method] Common gamification features augment an existing exploratory testing tool: Final Score with Leaderboard, Injected Bugs, Progress Bar, and Exploration Highlights. The original tool and the gamified version are then compared in an experiment involving 144 participants. User experience is elicited using the Technology Acceptance Model (TAM) questionnaire instrument. [Results] Statistical analysis identified several significant differences for metrics that represent the effectiveness and efficiency of tests showing an improvement in coverage when they were developed with gamification. Additionally, user experience is improved with gamification. [Conclusions]. Gamification of exploratory testing has a tangible effect on how testers create test cases for web applications. While the results are mixed, the effects are most beneficial and interesting and warrant more research in the future. Further research shall be aimed at confirming the presented results in the context of state-of-the-art testing tools and real-world development environments.</p>&nbsp;Tags: "Testing and Quality", "User experience" &nbsp;<br> &nbsp;<br> <h5> Danniell Hu, Priscila Santiesteban, Madeline Endres, Westley Weimer, "Towards a Cognitive Model of Dynamic Debugging: Does Identifier Construction Matter?" </h5> <p><b>Abstract:</b> Debugging is a vital and time-consuming process in software engineering. Recently, researchers have begun using neuroimaging to understand the cognitive bases of programming tasks by measuring patterns of neural activity. While exciting, prior studies have only examined small sub-steps in isolation, such as comprehending a method without writing any code or writing a method from scratch without reading any already-existing code. We propose a simple multi-stage debugging model in which programmers transition between Task Comprehension, Fault Localization, Code Editing, Compiling, and Output Comprehension activities. We conduct a human study of $n=28$ participants using a combination of functional near-infrared spectroscopy and standard coding measurements (e.g., time taken, tests passed, etc.). Critically, we find that our proposed debugging stages are both neurally and behaviorally distinct. To the best of our knowledge, this is the first neurally-justified cognitive model of debugging. At the same time, there is significant interest in understanding how programmers from different backgrounds, such as those grappling with challenges in English prose comprehension, are impacted by code features when debugging. We use our cognitive model of debugging to investigate the role of one such feature: identifier construction. Specifically, we investigate how features of identifier construction impact neural activity while debugging by participants with and without reading difficulties. While we find significant differences in cognitive load as a function of morphology and expertise, we do not find significant differences in end-to-end programming outcomes (e.g., time, correctness, etc.). This nuanced result suggests that prior findings on the cognitive importance of identifier naming in isolated sub-steps may not generalize to end-to-end debugging. Finally, in a result relevant to broadening participation in computing, we find no behavioral outcome differences for participants with reading difficulties.</p>&nbsp;Tags: "Testing and Quality" &nbsp;<br> &nbsp;<br> <h5> Emanuele Iannone, Giulia Sellitto, Emanuele Iaccarino, Filomena Ferrucci, Andrea De Lucia, Fabio Palomba, "Early and Realistic Exploitability Prediction of Just-Disclosed Software Vulnerabilities: How Reliable Can It Be?" </h5> <p><b>Abstract:</b> With the rate of discovered and disclosed vulnerabilities escalating, researchers have been experimenting with machine learning to predict whether a vulnerability will be exploited. Existing solutions leverage information unavailable when a CVE is created, making them unsuitable just after the disclosure. This paper experiments with early exploitability prediction models driven exclusively by the initial CVE record, i.e., the original description and the linked online discussions. Leveraging NVD and Exploit Database, we evaluate 72 prediction models trained using six traditional machine learning classifiers, four feature representation schemas, and three data balancing algorithms. We also experiment with five pre-trained large language models (LLMs). The models leverage seven different corpora made by combining three data sources, i.e., CVE description, Security Focus, and BugTraq. The models are evaluated in a realistic, time-aware fashion by removing the training and test instances that cannot be labeled “neutral” with sufficient confidence. The validation reveals that CVE descriptions and Security Focus discussions are the best data to train on. Pre-trained LLMs do not show the expected performance, requiring further pre-training in the security domain. We distill new research directions, identify possible room for improvement, and envision automated systems assisting security experts in assessing the exploitability.</p>&nbsp;Tags: "Testing and Quality", "Security" &nbsp;<br> &nbsp;<br> <h5> Taijara Santana, Paulo Silveira Neto, Eduardo Almeida, Iftekhar Ahmed, "Bug Analysis in Jupyter Notebook Projects: An Empirical Study" </h5> <p><b>Abstract:</b> Computational notebooks, such as Jupyter, have been widely adopted by data scientists to write code for analyzing and visualizing data. Despite their growing adoption and popularity, few studies have been found to understand Jupyter development challenges from the practitioners’ point of view. This article presents a systematic study of bugs and challenges that Jupyter practitioners face through a large-scale empirical investigation. We mined 14,740 commits from 105 GitHub open source projects with Jupyter Notebook code. Next, we analyzed 30,416 StackOverflow posts, which gave us insights into bugs that practitioners face when developing Jupyter Notebook projects. Next, we conducted 19 interviews with data scientists to uncover more details about Jupyter bugs and to gain insight into Jupyter developers’ challenges. Finally, to validate the study results and proposed taxonomy, we conducted a survey with 91 data scientists. We highlight bug categories, their root causes, and the challenges that Jupyter practitioners face.</p>&nbsp;Tags: "Testing and Quality", "AI for SE", "User experience" &nbsp;<br> &nbsp;<br> <h5> Peixun Long, Jianjun Zhao, "Testing Multi-Subroutine Quantum Programs: From Unit Testing to Integration Testing" </h5> <p><b>Abstract:</b> Quantum computing has emerged as a promising field with the potential to revolutionize various domains by harnessing the principles of quantum mechanics. As quantum hardware and algorithms continue to advance, developing high-quality quantum software has become crucial. However, testing quantum programs poses unique challenges due to the distinctive characteristics of quantum systems and the complexity of multi-subroutine programs. This article addresses the specific testing requirements of multi-subroutine quantum programs. We begin by investigating critical properties by surveying existing quantum libraries and providing insights into the challenges of testing these programs. Building upon this understanding, we focus on testing criteria and techniques based on the whole testing process perspective, spanning from unit testing to integration testing. We delve into various aspects, including IO analysis, quantum relation checking, structural testing, behavior testing, integration of subroutine pairs, and test case generation. We also introduce novel testing principles and criteria to guide the testing process. We conduct comprehensive testing on typical quantum subroutines, including diverse mutants and randomized inputs, to evaluate our proposed approach. The analysis of failures provides valuable insights into the effectiveness of our testing methodology. Additionally, we present case studies on representative multi-subroutine quantum programs, demonstrating the practical application and effectiveness of our proposed testing principles and criteria.</p>&nbsp;Tags: "Testing and Quality", "Quantum" &nbsp;<br> &nbsp;<br> <h5> Da Song, Xuan Xie, Jiayang Song, Derui Zhu, Yuheng Huang, Felix Juefei-Xu, Lei Ma, Yuheng Huang, "LUNA: A Model-Based Universal Analysis Framework for Large Language Models" </h5> <p><b>Abstract:</b> Over the past decade, Artificial Intelligence (AI) has had great success recently and is being used in a wide range of academic and industrial fields. More recently, Large Language Models (LLMs) have made rapid advancements that have propelled AI to a new level, enabling and empowering even more diverse applications and industrial domains with intelligence, particularly in areas like software engineering and natural language processing. Nevertheless, a number of emerging trustworthiness concerns and issues exhibited in LLMs, e.g., robustness and hallucination, have already recently received much attention, without properly solving which the widespread adoption of LLMs could be greatly hindered in practice. The distinctive characteristics of LLMs, such as the self-attention mechanism, extremely large neural network scale, and autoregressive generation usage contexts, differ from classic AI software based on Convolutional Neural Networks and Recurrent Neural Networks and present new challenges for quality analysis. Up to the present, it still lacks universal and systematic analysis techniques for LLMs despite the urgent industrial demand across diverse domains. Towards bridging such a gap, in this paper, we initiate an early exploratory study and propose a universal analysis framework for LLMs, named LUNA , which is designed to be general and extensible and enables versatile analysis of LLMs from multiple quality perspectives in a human-interpretable manner. In particular, we first leverage the data from desired trustworthiness perspectives to construct an abstract model as an auxiliary analysis asset and proxy, which is empowered by various abstract model construction methods built-in LUNA . To assess the quality of the abstract model, we collect and define a number of evaluation metrics, aiming at both the abstract model level and the semantics level. Then, the semantics, which is the degree of satisfaction of the LLM w.r.t. the trustworthiness perspective, is bound to and enriches the abstract model with semantics, which enables more detailed analysis applications for diverse purposes, e.g., abnormal behavior detection. To better understand the potential usefulness of our analysis framework LUNA , we conduct a large-scale evaluation, the results of which demonstrate that 1) the abstract model has the potential to distinguish normal and abnormal behavior in LLM, 2) LUNA is effective for the real-world analysis of LLMs in practice, and the hyperparameter settings influence the performance, 3) different evaluation metrics are in different correlations with the analysis performance. In order to encourage further studies in the quality assurance of LLMs, we made all of the code and more detailed experimental results data available on the supplementary website of this paper https://sites.google.com/view/llm-luna.</p>&nbsp;Tags: "AI for SE" &nbsp;<br> &nbsp;<br> <h5> Roman Haas, Raphael Nömmer, Elmar Juergens, Sven Apel, "Optimization of Automated and Manual Software Tests in Industrial Practice: A Survey and Historical Analysis" </h5> <p><b>Abstract:</b> Context : Both automated and manual software testing are widely applied in practice. While being essential for project success and software quality, they are very resource-intensive, thus motivating the pursuit for optimization. Goal : We aim at understanding to what extent test optimization techniques for automated testing from the field of test case selection, prioritization, and test suite minimization can be applied to manual testing processes in practice. Method : We have studied the automated and manual testing process of five industrial study subjects from five different domains with different technological backgrounds and assessed the costs and benefits of test optimization techniques in industrial practice. In particular, we have carried out a cost‚Äìbenefit analysis of two language-agnostic optimization techniques (test impact analysis and Pareto testing a technique we introduce in this paper) on 2,622 real-world failures from our subject's histories. Results : Both techniques maintain most of the fault detection capability while significantly reducing the test runtime. For automated testing, optimized test suites detect, on average, 80% of failures, while saving 66% of execution time, as compared to 81% failure detection rate for manual test suites and an average time saving of 43%. We observe an average speedup of the time to first failure of around 49 compared to a random test ordering. Conclusion : Our results suggest that optimization techniques from automated testing can be transferred to manual testing in industrial practice, resulting in lower test execution time and much lower time-to-feedback, but coming with process-related limitations and requirements for a successful implementation. All study subjects implemented one of our test optimization techniques in their processes, which demonstrates the practical impact of our findings.</p>&nbsp;Tags: "Testing and Quality", "Business" &nbsp;<br> &nbsp;<br> <h5> Maryam Masoudian, Heqing Huang, Morteza Amini, Charles Zhang, "Mole: Efficient Crash Reproduction in Android Applications With Enforcing Necessary UI Events" </h5> <p><b>Abstract:</b> To improve the quality of Android apps, developers use automated debugging and testing solutions to determine whether the previously found crashes are reproducible. However, existing GUI fuzzing solutions for Android apps struggle to reproduce crashes efficiently based solely on a crash stack trace. This trace provides the location in the app where the crash occurs. GUI fuzzing solutions currently in use rely on heuristics to generate UI events. Unfortunately, these events often do not align with the investigation of an app's UI event space to reach a specific location of code. Hence, they generate numerous events unrelated to the crash, leading to an event explosion. To address this issue, a precise static UI model of widgets and screens can greatly enhance the efficiency of a fuzzing tool in its search. Building such a model requires considering all possible combinations of event sequences on widgets since the execution order of events is not statically determined. However, this approach presents scalability challenges in complex apps with several widgets. In this paper, we propose a directed-based fuzzing solution to reduce an app's event domain to the necessary ones to trigger a crash. Our insight is that the dependencies between widgets in their visual presentation and attribute states provide valuable information in precisely identifying events that trigger a crash. We propose an attribute-sensitive reachability analysis (ASRA) to track dependent widgets in reachable paths to the crash point and distinguish between events in terms of their relevancy to be generated in the crash reproduction process. With instrumentation, we inject code to prune irrelevant events, reducing the event domain to search at run time. We used four famous fuzzing tools, Monkey, Ape, Stoat, and FastBot2, to assess the impact of our solution in decreasing the crash reproduction time and increasing the possibility of reproducing a crash. Our results show that the success ratio of reproducing a crash has increased for one-fourth of crashes. In addition, the average reproduction time of a crash becomes at least 2x faster. Wilcoxon Mann-Whitney test shows this enhancement is significant when our tool is used compared to baseline and insensitive reachability analysis.</p>&nbsp;Tags: "Testing and Quality" &nbsp;<br> &nbsp;<br> <h5> Md Ahasanuzzaman, Gustavo A. Oliva, Ahmed E. Hassan, Md Ahasanuzzaman, "Using Knowledge Units of Programming Languages to Recommend Reviewers for Pull Requests: An Empirical Study" </h5> <p><b>Abstract:</b> Determining the right code reviewer for a given code change requires understanding the characteristics of the changed code, identifying the skills of each potential reviewer (expertise profile), and finding a good match between the two. To facilitate this task, we design a code reviewer recommender that operates on the knowledge units (KUs) of a programming language. We define a KU as a cohesive set of key capabilities that are offered by one or more building blocks of a given programming language. We operationalize our KUs using certification exams for the Java programming language. We detect KUs from 10 actively maintained Java projects from GitHub, spanning 290K commits and 65K pull requests (PRs). We generate developer expertise profiles based on the detected KUs. We use these KU-based expertise profiles to build a code reviewer recommender (KUREC). We compare KUREC’s performance to that of seven baseline recommenders. KUREC ranked first along with the top-performing baseline recommender (RF) in a Scott-Knott ESD analysis of recommendation accuracy (the top-5 accuracy of KUREC is 0.84 (median) and the MAP@5 is 0.51 (median)). From a practical standpoint, we highlight that KUREC’s performance is more stable (lower interquartile range) than that of RF, thus making it more consistent and potentially more trustworthy. We also design three new recommenders by combining KUREC with our baseline recommenders. These new combined recommenders outperform both KUREC and the individual baselines. Finally, we evaluate how reasonable the recommendations from KUREC and the combined recommenders are when those deviate from the ground truth. We observe that KUREC is the recommender with the highest percentage of reasonable recommendations (63.4%). Overall we conclude that KUREC and one of the combined recommenders (e.g., AD_HYBRID) are overall superior to the baseline recommenders that we studied. Future work in the area should thus (i) consider KU-based recommenders as baselines and (ii) experiment with combined recommenders.</p>&nbsp;Tags: "Testing and Quality" &nbsp;<br> &nbsp;<br> <h5> Bentley Oakes, Michalis Famelis, Houari Sahraoui, "Building Domain-Specific Machine Learning Workflows: A Conceptual Framework for the State-of-the-Practice" </h5> <p><b>Abstract:</b> Domain experts are increasingly employing machine learning to solve their domain-specific problems. This article presents to software engineering researchers the six key challenges that a domain expert faces in addressing their problem with a computational workflow, and the underlying executable implementation. These challenges arise out of our conceptual framework which presents the “route” of transformations that a domain expert may choose to take while developing their solution. To ground our conceptual framework in the state of the practice, this article discusses a selection of available textual and graphical workflow systems and their support for the transformations described in our framework. Example studies from the literature in various domains are also examined to highlight the tools used by the domain experts as well as a classification of the domain specificity and machine learning usage of their problem, workflow, and implementation. The state of the practice informs our discussion of the six key challenges, where we identify which challenges and transformations are not sufficiently addressed by available tools. We also suggest possible research directions for software engineering researchers to increase the automation of these tools and disseminate best-practice techniques between software engineering and various scientific domains.</p>&nbsp;Tags: "SE for AI" &nbsp;<br> &nbsp;<br> <h5> Anda Liang, Emerson Murphy-Hill, Westley Weimer, Yu Huang, "A Controlled Experiment in Age and Gender Bias When Reading Technical Articles in Software Engineering" </h5> <p><b>Abstract:</b> Online platforms and communities are a critical part of modern software engineering, yet are often affected by human biases. While previous studies investigated human biases and their potential harms against the efficiency and fairness of online communities, they have mainly focused on the open source and Q &amp; A platforms, such as GitHub and Stack Overflow , but overlooked the audience-focused online platforms for delivering programming and SE-related technical articles, where millions of software engineering practitioners share, seek for, and learn from high-quality software engineering articles (i.e., technical articles for SE). Furthermore, most of the previous work has revealed gender and race bias, but we have little knowledge about the effect of age on software engineering practice. In this paper, we propose to investigate the effect of authors’ demographic information (gender and age) on the evaluation of technical articles on software engineering and potential behavioral differences among participants. We conducted a survey-based and controlled human study and collected responses from 540 participants to investigate developers’ evaluation of technical articles for software engineering. By controlling the gender and age of the author profiles of technical articles for SE, we found that raters tend to have more positive content depth evaluations for younger male authors when compared to older male authors and that male participants conduct technical article evaluations faster than female participants, consistent with prior study findings. Surprisingly, different from other software engineering evaluation activities (e.g., code review, pull request, etc.), we did not find a significant difference in the genders of authors on the evaluation outcome of technical articles in SE.</p>&nbsp;Tags: "Human/Social" &nbsp;<br> &nbsp;<br> <h5> Chengjie Lu, Shaukat Ali, Tao Yue, "EpiTESTER: Testing Autonomous Vehicles with Epigenetic Algorithm and Attention Mechanism" </h5> <p><b>Abstract:</b> Testing autonomous vehicles (AVs) under various environmental scenarios that lead the vehicles to unsafe situations is challenging. Given the infinite possible environmental scenarios, it is essential to find critical scenarios efficiently. To this end, we propose a novel testing method, named EpiTESTER , by taking inspiration from epigenetics, which enables species to adapt to sudden environmental changes. In particular, EpiTESTER adopts gene silencing as its epigenetic mechanism, which regulates gene expression to prevent the expression of a certain gene, and the probability of gene expression is dynamically computed as the environment changes. Given different data modalities (e.g., images, lidar point clouds) in the context of AV, EpiTESTER benefits from a multi-model fusion transformer to extract high-level feature representations from environmental factors. Next, it calculates probabilities based on these features with the attention mechanism. To assess the cost-effectiveness of EpiTESTER , we compare it with a probabilistic search algorithm (Simulated Annealing, SA), a classical genetic algorithm (GA) (i.e., without any epigenetic mechanism implemented), and EpiTESTER with equal probability for each gene. We evaluate EpiTESTER with six initial environments from CARLA, an open-source simulator for autonomous driving research, and two end-to-end AV controllers, Interfuser and TCP. Our results show that EpiTESTER achieved a promising performance in identifying critical scenarios compared to the baselines, showing that applying epigenetic mechanisms is a good option for solving practical problems.</p>&nbsp;Tags: "Testing and Quality" &nbsp;<br> &nbsp;<br> <h5> Diego Clerissi, Giovanni Denaro, Marco Mobilio, Leonardo Mariani, "Guess the State: Exploiting Determinism to Improve GUI Exploration Efficiency" </h5> <p><b>Abstract:</b> Many automatic Web testing techniques generate test cases by analyzing the GUI of the Web applications under test, aiming to exercise sequences of actions that are similar to the ones that testers could manually execute. However, the efficiency of the test generation process is severely limited by the cost of analyzing the content of the GUI screens after executing each action. In this paper, we introduce an inference component, Sibilla , which accumulates knowledge about the behavior of the GUI after each action. Sibilla enables the test generators to reuse the results computed for GUI screens that recur multiple times during the test generation process, thus improving the efficiency of Web testing techniques. We experimented Sibilla with Web testing techniques based on three different GUI exploration strategies (Random, Depth-first, and Q-learning) and nine target systems, observing reductions from 22% to 96% of the test generation time.</p>&nbsp;Tags: "Testing and Quality" &nbsp;<br> &nbsp;<br> <h5> Zhimin Zhao, Yihao Chen, Abdul Ali Bangash, Bram Adams, Ahmed E. Hassan, "An Empirical Study of Challenges in Machine Learning Asset Management" </h5> <p><b>Abstract:</b> [Context] In machine learning (ML) applications, assets include not only the ML models themselves, but also the datasets, algorithms, and deployment tools that are essential in the development, training, and implementation of these models. Efficient management of ML assets is critical to ensure optimal resource utilization, consistent model performance, and a streamlined ML development lifecycle. This practice contributes to faster iterations, adaptability, reduced time from model development to deployment, and the delivery of reliable and timely outputs. [Objective] Despite research on ML asset management, there is still a significant knowledge gap on operational challenges, such as model versioning, data traceability, and collaboration issues, faced by asset management tool users. These challenges are crucial because they could directly impact the efficiency, reproducibility, and overall success of machine learning projects. Our study aims to bridge this empirical gap by analyzing user experience, feedback, and needs from Q &amp;A posts, shedding light on the real-world challenges they face and the solutions they have found. [Method] We examine 15, 065 Q &amp;A posts from multiple developer discussion platforms, including Stack Overflow, tool-specific forums, and GitHub/GitLab. Using a mixed-method approach, we classify the posts into knowledge inquiries and problem inquiries. We then apply BERTopic to extract challenge topics and compare their prevalence. Finally, we use the open card sorting approach to summarize solutions from solved inquiries, then cluster them with BERTopic, and analyze the relationship between challenges and solutions. [Results] We identify 133 distinct topics in ML asset management-related inquiries, grouped into 16 macro-topics, with software environment and dependency, model deployment and service, and model creation and training emerging as the most discussed. Additionally, we identify 79 distinct solution topics, classified under 18 macro-topics, with software environment and dependency, feature and component development, and file and directory management as the most proposed. [Conclusions] This study highlights critical areas within ML asset management that need further exploration, particularly around prevalent macro-topics identified as pain points for ML practitioners, emphasizing the need for collaborative efforts between academia, industry, and the broader research community.</p>&nbsp;Tags: "SE for AI" &nbsp;<br> &nbsp;<br> <h5> Iren Mazloomzadeh, Gias Uddin, Foutse Khomh, Ashkan Sami, "Reputation Gaming in Crowd Technical Knowledge Sharing" </h5> <p><b>Abstract:</b> Stack Overflow incentive system awards users with reputation scores to ensure quality. The decentralized nature of the forum may make the incentive system prone to manipulation. This paper offers, for the first time, a comprehensive study of the reported types of reputation manipulation scenarios that might be exercised in Stack Overflow and the prevalence of such reputation gamers by a qualitative study of 1,697 posts from meta Stack Exchange sites. We found four different types of reputation fraud scenarios, such as voting rings where communities form to upvote each other repeatedly on similar posts. We developed algorithms that enable platform managers to automatically identify these suspicious reputation gaming scenarios for review. The first algorithm identifies isolated/semi-isolated communities where probable reputation frauds may occur mostly by collaborating with each other. The second algorithm looks for sudden unusual big jumps in the reputation scores of users. We evaluated the performance of our algorithms by examining the reputation history dashboard of Stack Overflow users from the Stack Overflow website. We observed that around 60-80% of users flagged as suspicious by our algorithms experienced reductions in their reputation scores by Stack Overflow.</p>&nbsp;Tags: "Human/Social" &nbsp;<br> &nbsp;<br> <h5> Hanying Shao, Zishuo Ding, Weiyi Shang, Jinqiu Yang, Nikolaos Tsantalis, "Towards Effectively Testing Machine Translation Systems from White-Box Perspectives" </h5> <p><b>Abstract:</b> Neural Machine Translation (NMT) has experienced significant growth over the last decade. Despite these advancements, machine translation systems still face various issues. In response, metamorphic testing approaches have been introduced for testing machine translation systems. Such approaches involve token replacement, where a single token in the original source sentence is substituted to create mutants. By comparing the translations of mutants with the original translation, potential bugs in the translation systems can be detected. However, the selection of tokens for replacement in the original sentence remains an intriguing problem, deserving further exploration in testing approaches. To address this problem, we design two white-box approaches to identify vulnerable tokens in the source sentence, whose perturbation is most likely to induce translation bugs for a translation system. The first approach, named GRI, utilizes the GRadient Information to identify the vulnerable tokens for replacement, and our second approach, named WALI, uses Word ALignment Information to locate the vulnerable tokens. We evaluate the proposed approaches on a Transformer-based translation system with the News Commentary dataset and 200 English sentences extracted from CNN articles. The results show that both GRI and WALI can effectively generate high-quality test cases for revealing translation bugs. Specifically, our approaches can always outperform state-of-the-art automatic machine translation testing approaches from two aspects: (1) under a certain testing budget (i.e., number of executed test cases), both GRI and WALI can reveal a larger number of bugs than baseline approaches, and (2) when given a predefined testing goal (i.e., number of detected bugs), our approaches always require fewer testing resources (i.e., a reduced number of test cases to execute).</p>&nbsp;Tags: "Testing and Quality" &nbsp;<br> &nbsp;<br> <h5> Belinda Schantong, Norbert Siegmund, Janet Siegmund, "Toward a Theory on Programmer’s Block Inspired by Writer’s Block" </h5> <p><b>Abstract:</b> [Context] Programmer’s block, akin to writer’s block, is a phenomenon where capable programmers struggle to create code. Despite anecdotal evidence, no scientific studies have explored the relationship between programmer’s block and writer’s block. [Objective] The primary objective of this study is to study the presence of blocks during programming and their potential causes. [Method] We conducted semi-structured interviews with experienced programmers to capture their processes, the problems they face, and potential causes. Subsequently, we analyzed the responses through the lens of writing. [Results] We found that among the programmer’s problems during programming, several display strong similarities to writer’s block. Moreover, when investigating possible causes of such blocks, we found a strong relationship between programming and writing activities as well as typical writing strategies employed by programmers. [Conclusions] Strong similarities between programming and writing challenges, processes, and strategies confirm the existence of programmer’s block with similar causes to writer’s block. Thus, strategies from writing used to resolve blocks should be applicable in programming, helping developers to overcome phases of being stuck. Research at the intersection of both areas could lead to productivity gains through reduced developer downtimes.</p>&nbsp;Tags: "Human/Social", "Process" &nbsp;<br> &nbsp;<br> <h5> Neelam Tjikhoeri, Lauren Olson, Emitza Guzman, "Best ends by the best means: ethical concerns in app reviews" </h5> <p><b>Abstract:</b> This work analyzes ethical concerns found in users’ app store reviews. We performed this study because ethical concerns in mobile applications (apps) are widespread, pose severe threats to end users and society, and lack systematic analysis and methods for detection and classification. In addition, app store reviews allow practitioners to collect users’ perspectives, crucial for identifying software flaws, from a geographically distributed and large-scale audience. For our analysis, we collected five million user reviews, developed a set of ethical concerns representative of user preferences, and manually labeled a sample of these reviews. We found that (1) users highly report ethical concerns about censorship, identity theft, and safety (2) user reviews with ethical concerns are longer, more popular, and lowly rated, and (3) there is high automation potential for the classification and filtering of these reviews. Our results highlight the relevance of using app store reviews for the systematic consideration of ethical concerns during software evolution.</p>&nbsp;Tags: "Human/Social" &nbsp;<br> &nbsp;<br> <h5> Bianca Trinkenreich, Fabio Santos, Klaas-Jan Stol, "Predicting Attrition among Software Professionals: Antecedents and Consequences of Burnout and Engagement" </h5> <p><b>Abstract:</b> In this study of burnout and engagement, we address three major themes. First, we offer a review of prior studies of burnout among IT professionals and link these studies to the Job Demands-Resources (JD-R) model. Informed by the JD-R model, we identify three factors that are organizational job resources, and posit that these (a) increase engagement, and (b) decrease burnout. Second, we extend the JD-R by considering software professionals’ intention to stay as a consequence of these two affective states, burnout and engagement. Third, we focus on the importance of factors for intention to stay, and actual retention behavior. We use a unique dataset of over 13,000 respondents at one global IT organization, enriched with employment status 90 days after the initial survey. Leveraging partial least squares structural equation modeling and machine learning, we find that the data mostly support our theoretical model, with some variation across different subgroups of respondents. An importance-performance map analysis suggests that managers may wish to focus on interventions regarding burnout as a predictor of intention to leave. The Machine Learning model suggests that engagement and opportunities to learn are the top two most important factors that explain whether software professionals leave an organization.</p>&nbsp;Tags: "Human/Social" &nbsp;<br> &nbsp;<br> <h5> Ricardo Caldas, Juan Antonio Pinera Garcia, Matei Schiopu, Patrizio Pelliccione, Genaína Nunes Rodrigues, Thorsten Berger, "Runtime Verification and Field-based Testing for ROS-based Robotic Systems" </h5> <p><b>Abstract:</b> Robotic systems are becoming pervasive and adopted in increasingly many domains, such as manufacturing, healthcare, and space exploration. To this end, engineering software has emerged as a crucial discipline for building maintainable and reusable robotic systems. The field of robotics software engineering research has received increasing attention, fostering autonomy as a fundamental goal. However, robotics developers are still challenged trying to achieve this goal given that simulation is not able to deliver solutions to realistically emulate real-world phenomena. Robots also need to operate in unpredictable and uncontrollable environments, which require safe and trustworthy self-adaptation capabilities implemented in software. Typical techniques to address the challenges are runtime verification, field-based testing, and mitigation techniques that enable fail-safe solutions. However, there is no clear guidance to architect ROS-based systems to enable and facilitate runtime verification and field-based testing. This paper aims to fill in this gap by providing guidelines that can help developers and quality assurance (QA) teams when developing, verifying or testing their robots in the field. These guidelines are carefully tailored to address the challenges and requirements of testing robotics systems in real-world scenarios. We conducted (i) a literature review on studies addressing runtime verification and field-based testing for robotic systems, (ii) mined ROS-based applications repositories, and (iii) validated the applicability, clarity, and usefulness via two questionnaires with 55 answers overall. We contribute 20 guidelines: 8 for developers and 12 for QA teams formulated for researchers and practitioners in robotic software engineering. Finally, we map our guidelines to open challenges thus far in runtime verification and field-based testing for ROS-based systems and, we outline promising research directions in the field. Guidelines website and replication package: https://ros-rvft.github.io.</p>&nbsp;Tags: "Testing and Quality" &nbsp;<br> &nbsp;<br> <h5> Daniel Ramos, Ines Lynce, Vasco Manquinho, Ruben Martins, Claire Le Goues, "BatFix: Repairing language model-based transpilation" </h5> <p><b>Abstract:</b> To keep up with changes in requirements, frameworks, and coding practices, software organizations might need to migrate code from one language to another. Source-to-source migration, or transpilation, is often a complex, manual process. Transpilation requires expertise both in the source and target language, making it highly laborious and costly. Languages models for code generation and transpilation are becoming increasingly popular. However, despite capturing code-structure well, code generated by language models is often spurious and contains subtle problems. We propose BatFix, a novel approach that augments language models for transpilation by leveraging program repair and synthesis to fix the code generated by these models. BatFix takes as input both the original program, the target program generated by the machine translation model, and a set of test cases and outputs a repaired program that passes all test cases. Experimental results show that our approach is agnostic to language models and programming languages. BatFix can locate bugs spawning multiple lines and synthesize patches for syntax and semantic bugs for programs migrated from Java to C++ and Python to C++ from multiple language models, including, OpenAI’s Codex.</p>&nbsp;Tags: "Testing and Quality", "Prog Comprehension/Reeng/Maint" &nbsp;<br> &nbsp;<br> <h5> Nimmi Rsshinika Weeraddana, Mahmoud Alfadel, Shane McIntosh, "Characterizing Timeout Builds in Continuous Integration" </h5> <p><b>Abstract:</b> Compute resources that enable Continuous Integration (CI, i.e., the automatic build and test cycle applied to the change sets that development teams produce) are a shared commodity that organizations need to manage. To prevent (erroneous) builds from consuming a large amount of resources, CI service providers often impose a time limit. CI builds that exceed the time limit are automatically terminated. While imposing a time limit helps to prevent abuse of the service, builds that timeout (a) consume the maximum amount of resources that a CI service is willing to provide and (b) leave CI users without an indication of whether the change set will pass or fail the CI process. Therefore, understanding timeout builds and the factors that contribute to them is important for improving the stability and quality of a CI service. In this paper, we investigate the prevalence of timeout builds and the characteristics associated with them. By analyzing a curated dataset of 936 projects that adopt the CircleCI service and report at least one timeout build, we find that the median duration of a timeout build (19.7 minutes) is more than five times that of a build that produces a pass or fail result (3.4 minutes). To better understand the factors contributing to timeout builds, we model timeout builds using characteristics of project build history, build queued time, timeout tendency, size, and author experience based on data collected from 105,663 CI builds. Our model demonstrates a discriminatory power that vastly surpasses that of a random predictor (Area Under the Receiver Operating characteristic Curve, i.e., $AUROC$ = 0.939) and is highly stable in its performance ( $AUROC$ optimism = 0.0001). Moreover, our model reveals that the build history and timeout tendency features are strong indicators of timeout builds, with the timeout status of the most recent build accounting for the largest proportion of the explanatory power. A longitudinal analysis of the incidences of timeout builds (i.e., a study conducted over a period of time) indicates that 64.03% of timeout builds occur consecutively. In such cases, it takes a median of 24 hours before a build that passes or fails occurs. Our results imply that CI providers should exploit build history to anticipate timeout builds.</p>&nbsp;Tags: "Process", "Testing and Quality" &nbsp;<br> &nbsp;<br> <h5> Xinyi Wang, Asmar Muqeet, Tao Yue, Shaukat Ali, Paolo Arcaini, "Test Case Minimization with Quantum Annealing" </h5> <p><b>Abstract:</b> Quantum annealers are specialized quantum computers for solving combinatorial optimization problems with special quantum computing characteristics, e.g., superposition and entanglement. Theoretically, quantum annealers can outperform classic computers. However, current quantum annealers are constrained by a limited number of qubits and cannot demonstrate quantum advantages. Nonetheless, research is needed to develop novel mechanisms to formulate combinatorial optimization problems for quantum annealing (QA). However, QA applications in software engineering remain unexplored. Thus, we propose BootQA, the very first effort at solving test case minimization (TCM) problems on classical software with QA. We provide a novel TCM formulation for QA and utilize bootstrap sampling to optimize the qubit usage. We also implemented our TCM formulation in three other optimization processes: simulated annealing (SA), QA without problem decomposition, and QA with an existing D-Wave problem decomposition strategy, and conducted an empirical evaluation with three real-world TCM datasets. Results show that BootQA outperforms QA without problem decomposition and QA with the existing decomposition strategy regarding effectiveness. Moreover, BootQA’s effectiveness is similar to SA. Finally, BootQA has higher efficiency in terms of time when solving large TCM problems than the other three optimization processes.</p>&nbsp;Tags: "Testing and Quality", "Quantum" &nbsp;<br> &nbsp;<br> <h5> Partha Chakraborty, Mahmoud Alfadel, Mei Nagappan, "RLocator: Reinforcement Learning for Bug Localization" </h5> <p><b>Abstract:</b> Software developers spend a significant portion of time fixing bugs in their projects. To streamline this process, bug localization approaches have been proposed to identify the source code files that are likely responsible for a particular bug. Prior work proposed several similarity-based machine-learning techniques for bug localization. Despite significant advances in these techniques, they do not directly optimize the evaluation measures. We argue that directly optimizing evaluation measures can positively contribute to the performance of bug localization approaches. Therefore, in this paper, we utilize Reinforcement Learning (RL) techniques to directly optimize the ranking metrics. We propose RLocator , a Reinforcement Learning-based bug localization approach. We formulate RLocator using a Markov Decision Process (MDP) to optimize the evaluation measures directly. We present the technique and experimentally evaluate it based on a benchmark dataset of 8,316 bug reports from six highly popular Apache projects. The results of our evaluation reveal that RLocator achieves a Mean Reciprocal Rank (MRR) of 0.62, a Mean Average Precision (MAP) of 0.59, and a Top 1 score of 0.46. We compare RLocator with three state-of-the-art bug localization tools, FLIM, BugLocator, and BL-GAN. Our evaluation reveals that RLocator outperforms both approaches by a substantial margin, with improvements of 38.3% in MAP, 36.73% in MRR, and 23.68% in the Top K metric. These findings highlight that directly optimizing evaluation measures considerably contributes to performance improvement of the bug localization problem.</p>&nbsp;Tags: "Testing and Quality", "AI for SE" &nbsp;<br> &nbsp;<br> <h5> Nima Shiri harzevili, Mohammad Mahdi Mohajer, Moshi Wei, Hung Viet Pham, Song Wang, "History-Driven Fuzzing for Deep Learning Libraries" </h5> <p><b>Abstract:</b> Recently, many Deep Learning (DL) fuzzers have been proposed for API-level testing of DL libraries. However, they either perform unguided input generation (e.g., not considering the relationship between API arguments when generating inputs) or only support a limited set of corner-case test inputs. Furthermore, many developer APIs crucial for library development remain untested, as they are typically not well documented and lack clear usage guidelines, unlike end-user APIs. This makes them a more challenging target for automated testing. To fill this gap, we propose a novel fuzzer named Orion, which combines guided test input generation and corner-case test input generation based on a set of fuzzing heuristic rules constructed from historical data known to trigger critical issues in the underlying implementation of DL APIs. To extract the fuzzing heuristic rules, we first conduct an empirical study on the root cause analysis of 376 vulnerabilities in two of the most popular DL libraries, PyTorch and TensorFlow. We then construct the fuzzing heuristic rules based on the root causes of the extracted historical vulnerabilities. Using these fuzzing heuristic rules, Orion generates corner-case test inputs for API-level fuzzing. In addition, we extend the seed collection of existing studies to include test inputs for developer APIs. Our evaluation shows that Orion reports 135 vulnerabilities in the latest releases of TensorFlow and PyTorch, 76 of which were confirmed by the library developers. Among the 76 confirmed vulnerabilities, 69 were previously unknown, and 7 have already been fixed. The rest are awaiting further confirmation. For end-user APIs, Orion detected 45.58% and 90% more vulnerabilities in TensorFlow and PyTorch, respectively, compared to the state-of-the-art conventional fuzzer, DeepRel. When compared to the state-of-the-art LLM-based DL fuzzer, AtlasFuz, and Orion detected 13.63% more vulnerabilities in TensorFlow and 18.42% more vulnerabilities in PyTorch. Regarding developer APIs, Orion stands out by detecting 117% more vulnerabilities in TensorFlow and 100% more vulnerabilities in PyTorch compared to the most relevant fuzzer designed for developer APIs, such as FreeFuzz.</p>&nbsp;Tags: "Testing and Quality", "AI for SE", "Security" &nbsp;<br> &nbsp;<br> <h5> Xu Yang, Gopi Krishnan Rajbahadur, Dayi Lin, Shaowei Wang, Zhen Ming (Jack) Jiang, "SimClone: Detecting Tabular Data Clones using Value Similarity" </h5> <p><b>Abstract:</b> Data clones are defined as multiple copies of the same data among datasets. Presence of data clones between datasets can cause issues such as difficulties in managing data assets and data license violations when using datasets with clones to build AI software. However, detecting data clones is not trivial. Majority of the prior studies in this area rely on structural information to detect data clones (e.g., font size, column header). However, tabular datasets used to build AI software are typically stored without any structural information. In this paper, we propose a novel method called SimClone for data clone detection in tabular datasets without relying on structural information. SimClone method utilizes value similarities for data clone detection. We also propose a visualization approach as a part of our SimClone method to help locate the exact position of the cloned data between a dataset pair. Our results show that our SimClone outperforms the current state-of-the-art method by at least 20% in terms of both F1-score and AUC. In addition, SimClone’s visualization component helps identify the exact location of the data clone in a dataset with a Precision@10 value of 0.80 in the top 20 true positive predictions.</p>&nbsp;Tags: "Prog Comprehension/Reeng/Maint", "Testing and Quality" &nbsp;<br> &nbsp;<br> <h5> Carolin Brandt, Ali Khatami, Mairieli Wessel, Andy Zaidman, "Shaken, Not Stirred. How Developers Like Their Amplified Tests" </h5> <p><b>Abstract:</b> Test amplification makes systematic changes to existing, manually written tests to provide tests complementary to an automated test suite. We consider developer-centric test amplification, where the developer explores, judges and edits the amplified tests before adding them to their maintained test suite. However, it is as yet unclear which kind of selection and editing steps developers take before including an amplified test into the test suite. In this paper we conduct an open source contribution study, amplifying tests of open source Java projects from GitHub. We report which deficiencies we observe in the amplified tests while manually filtering and editing them to open 39 pull requests with amplified tests. We present a detailed analysis of the maintainer's feedback regarding proposed changes, requested information, and expressed judgment. Our observations provide a basis for practitioners to take an informed decision on whether to adopt developer-centric test amplification. As several of the edits we observe are based on the developer's understanding of the amplified test, we conjecture that developer-centric test amplification should invest in supporting the developer to understand the amplified tests.</p>&nbsp;Tags: "Human/Social" &nbsp;<br> &nbsp;<br> <h5> Stephen John Warnett, Uwe Zdun, "On the Understandability of MLOps System Architectures" </h5> <p><b>Abstract:</b> Machine Learning Operations (MLOps) is the practice of streamlining and optimising the machine learning (ML) workflow, from development to deployment, using DevOps (software development and IT operations) principles and ML-specific activities. Architectural descriptions of MLOps systems often consist of informal textual descriptions and informal graphical system diagrams that vary considerably in consistency, quality, detail, and content. Such descriptions only sometimes follow standards or schemata and may be hard to understand. We aimed to investigate informal textual descriptions and informal graphical MLOps system architecture representations and compare them with semi-formal MLOps system diagrams for those systems. We report on a controlled experiment with sixty-three participants investigating the understandability of MLOps system architecture descriptions based on informal and semi-formal representations. The results indicate that the understandability (quantified by task correctness) of MLOps system descriptions is significantly greater using supplementary semi-formal MLOps system diagrams, that using semi-formal MLOps system diagrams does not significantly increase task duration (and thus hinder understanding), and that task correctness is only significantly correlated with task duration when semi-formal MLOps system diagrams are provided.</p>&nbsp;Tags: "Prog Comprehension/Reeng/Maint" &nbsp;<br> &nbsp;<br> <h5> Zhenyang Xu, Yongqiang Tian, Mengxiao Zhang, Jiarui Zhang, Puzhuo Liu, Yu Jiang, Chengnian Sun, "T-Rec: Fine-Grained Language-Agnostic Program Reduction Guided by Lexical Syntax" </h5> <p><b>Abstract:</b> Program reduction strives to eliminate bug-irrelevant code elements from a bug-triggering program, so that (1) a smaller and more straightforward bug-triggering program can be obtained, (2) and the difference among duplicates (i.e., different programs that trigger the same bug) can be minimized or even eliminated. With such reduction and canonicalization functionality, program reduction facilitates debugging for software, especially language toolchains, such as compilers, interpreters, and debuggers. While many program reduction techniques have been proposed, most of them (especially the language-agnostic ones) overlooked the potential reduction opportunities hidden within tokens. Therefore, their capabilities in terms of reduction and canonicalization are significantly restricted. To fill this gap, we propose T-Rec, a fine-grained language-agnostic program reduction technique guided by lexical syntax. Instead of treating tokens as atomic and irreducible components, T-Rec introduces a fine-grained reduction process that leverages the lexical syntax of programming languages to effectively explore the reduction opportunities in tokens. Through comprehensive evaluations with versatile benchmark suites, we demonstrate that T-Rec significantly improves the reduction and canonicalization capability of two existing language-agnostic program reducers (i.e., Perses and Vulcan). T-Rec enables Perses and Vulcan to further eliminate 1,294 and 1,315 duplicates in a benchmark suite that contains 3,796 test cases that triggers 46 unique bugs. Additionally, T-Rec can also reduce up to 65.52% and 53.73% bytes in the results of Perses and Vulcan on our multi-lingual benchmark suite, respectively.</p>&nbsp;Tags: "Analysis" &nbsp;<br> &nbsp;<br> <h5> Yuanjie Xia, Lizhi Liao, Jinfu Chen, Heng Li, Weiyi Shang, "Reducing the Length of Field-replay Based Load Testing" </h5> <p><b>Abstract:</b> As software systems continuously grow in size and complexity, performance and load related issues have become more common than functional issues. Load testing is usually performed before software releases to ensure that the software system can still provide quality service under a certain load. Therefore, one of the common challenges of load testing is to design realistic workloads that can represent the actual workload in the field. In particular, one of the most widely adopted and intuitive approaches is to directly replay the field workloads in the load testing environment. However, replaying a lengthy, e.g., 48 hours, field workloads is rather resource- and time-consuming, and sometimes even infeasible for large-scale software systems that adopt a rapid release cycle. On the other hand, replaying a short duration of the field workloads may still result in unrealistic load testing. In this work, we propose an automated approach to reduce the length of load testing that is driven by replaying the field workloads. The intuition of our approach is: if the measured performance associated with a particular system behaviour is already stable, we can skip subsequent testing of this system behaviour to reduce the length of the field workloads. In particular, our approach first clusters execution logs that are generated during the system runtime to identify similar system behaviours during the field workloads. Then, we use statistical methods to determine whether the measured performance associated with a system behaviour has been stable. We evaluate our approach on three open-source projects (i.e., OpenMRS , TeaStore , and Apache James ). The results show that our approach can significantly reduce the length of field workloads while the workloads-after-reduction produced by our approach are representative of the original set of workloads. More importantly, the load testing results obtained by replaying the workloads after the reduction have high correlation and similar trend with the original set of workloads. Practitioners can leverage our approach to perform realistic field-replay based load testing while saving the needed resources and time. Our approach sheds light on future research that aims to reduce the cost of load testing for large-scale software systems.</p>&nbsp;Tags: "Testing and Quality", "Analysis" &nbsp;<br> &nbsp;<br> <h5> Jinjing Shi, ZiMeng Xiao, Heyuan Shi, Yu Jiang, Xuelong LI, "QuanTest: Entanglement-Guided Testing of Quantum Neural Network Systems" </h5> <p><b>Abstract:</b> Quantum Neural Network (QNN) combines the Deep Learning (DL) principle with the fundamental theory of quantum mechanics to achieve machine learning tasks with quantum acceleration. Recently, QNN systems have been found to manifest robustness issues similar to classical DL systems. There is an urgent need for ways to test their correctness and security. However, QNN systems differ significantly from traditional quantum software and classical DL systems, posing critical challenges for QNN testing. These challenges include the inapplicability of traditional quantum software testing methods to QNN systems due to differences in programming paradigms and decision logic representations, the dependence of quantum test sample generation on perturbation operators, and the absence of effective information in quantum neurons. In this paper, we propose QuanTest, a quantum entanglement-guided adversarial testing framework to uncover potential erroneous behaviors in QNN systems. We design a quantum entanglement adequacy criterion to quantify the entanglement acquired by the input quantum states from the QNN system, along with two similarity metrics to measure the proximity of generated quantum adversarial examples to the original inputs. Subsequently, QuanTest formulates the problem of generating test inputs that maximize the quantum entanglement adequacy and capture incorrect behaviors of the QNN system as a joint optimization problem and solves it in a gradient-based manner to generate quantum adversarial examples. Experimental results demonstrate that QuanTest possesses the capability to capture erroneous behaviors in QNN systems (generating 67.48%-96.05% more high-quality test samples than the random noise under the same perturbation size constraints). The entanglement-guided approach proves effective in adversarial testing, generating more adversarial examples (maximum increase reached 21.32%).</p>&nbsp;Tags: "SE for AI", "Quantum" &nbsp;<br> &nbsp;<br> <h5> Han Wang, Sijia Yu, Chunyang Chen, Burak Turhan, Xiaodong Zhu, "Beyond Accuracy: An Empirical Study on Unit Testing in Open-source Deep Learning Projects" </h5> <p><b>Abstract:</b> Deep Learning (DL) models have rapidly advanced, focusing on achieving high performance through testing model accuracy and robustness. However, it is unclear whether DL projects, as software systems, are tested thoroughly or functionally correct when there is a need to treat and test them like other software systems. Therefore, we empirically study the unit tests in open-source DL projects, analyzing 9,129 projects from GitHub. We find that: (1) unit tested DL projects have positive correlation with the open-source project metrics and have a higher acceptance rate of pull requests; (2) 68% of the sampled DL projects are not unit tested at all; (3) the layer and utilities (utils) of DL models have the most unit tests. Based on these findings and previous research outcomes, we built a mapping taxonomy between unit tests and faults in DL projects. We discuss the implications of our findings for developers and researchers and highlight the need for unit testing in open-source DL projects to ensure their reliability and stability. The study contributes to this community by raising awareness of the importance of unit testing in DL projects and encouraging further research in this area.</p>&nbsp;Tags: "SE for AI", "Testing and Quality" &nbsp;<br> &nbsp;<br> <h5> Michael Fu, Van Nguyen, Kla Tantithamthavorn, Dinh Phung, Trung Le, "Vision Transformer Inspired Automated Vulnerability Repair" </h5> <p><b>Abstract:</b> Recently, automated vulnerability repair approaches have been widely adopted to combat increasing software security issues. In particular, transformer-based encoder-decoder models achieve competitive results. Whereas vulnerable programs may only consist of a few vulnerable code areas that need repair, existing AVR approaches lack a mechanism guiding their model to pay more attention to vulnerable code areas during repair generation. In this article, we propose a novel vulnerability repair framework inspired by the Vision Transformer based approaches for object detection in the computer vision domain. Similar to the object queries used to locate objects in object detection in computer vision, we introduce and leverage vulnerability queries (VQs) to locate vulnerable code areas and then suggest their repairs. In particular, we leverage the cross-attention mechanism to achieve the cross-match between VQs and their corresponding vulnerable code areas. To strengthen our cross-match and generate more accurate vulnerability repairs, we propose to learn a novel vulnerability mask (VM) and integrate it into decoders’ cross-attention, which makes our VQs pay more attention to vulnerable code areas during repair generation. In addition, we incorporate our VM into encoders’ self-attention to learn embeddings that emphasize the vulnerable areas of a program. Through an extensive evaluation using the real-world 5,417 vulnerabilities, our approach outperforms all of the automated vulnerability repair baseline methods by 2.68% to 32.33%. Additionally, our analysis of the cross-attention map of our approach confirms the design rationale of our VM and its effectiveness. Finally, our survey study with 71 software practitioners highlights the significance and usefulness of AI-generated vulnerability repairs in the realm of software security. The training code and pre-trained models are available at https://github.com/awsm-research/VQM.</p>&nbsp;Tags: "Testing and Quality", "Analysis" &nbsp;<br> &nbsp;<br> <h5> Jon Ayerdi, Valerio Terragni, Gunel Jahangirova, Aitor Arrieta, Paolo Tonella, "GenMorph: Automatically Generating Metamorphic Relations via Genetic Programming" </h5> <p><b>Abstract:</b> Metamorphic testing is a popular approach that aims to alleviate the oracle problem in software testing. At the core of this approach are Metamorphic Relations (MRs), specifying properties that hold among multiple test inputs and corresponding outputs. Deriving MRs is mostly a manual activity, since their automated generation is a challenging and largely unexplored problem. This paper presents GenMorph , a technique to automatically generate MRs for Java methods that involve inputs and outputs that are boolean, numerical, or ordered sequences. GenMorph uses an evolutionary algorithm to search for effective test oracles, i.e., oracles that trigger no false alarms and expose software faults in the method under test. The proposed search algorithm is guided by two fitness functions that measure the number of false alarms and the number of missed faults for the generated MRs. Our results show that GenMorph generates effective MRs for 18 out of 23 methods (mutation score &gt; 20%). Furthermore, it can increase Randoop 's fault detection capability in 7 out of 23 methods, and Evosuite 's in 14 out of 23 methods. When compared with AutoMR , a state-of-the-art MR generator, GenMorph also outperformed its fault detection capability in 9 out of 10 methods.</p>&nbsp;Tags: "Testing and Quality" &nbsp;<br> &nbsp;<br> <h5> Jorge Melegati, Kieran Conboy, Daniel Graziotin, "Qualitative Surveys in Software Engineering Research: Definition, Critical Review, and Guidelines" </h5> <p><b>Abstract:</b> Qualitative surveys are emerging as a popular research method in software engineering (SE), particularly as many aspects of the field are increasingly socio-technical and thus concerned with the subtle, social, and often ambiguous issues that are not amenable to a simple quantitative survey. While many argue that qualitative surveys play a vital role amongst the diverse range of methods employed in SE there are a number of shortcomings that inhibits its use and value. First there is a lack of clarity as to what defines a qualitative survey and what features differentiate it from other methods. There is an absence of a clear set of principles and guidelines for its execution, and what does exist is very inconsistent and sometimes contradictory. These issues undermine the perceived reliability and rigour of this method. Researchers are unsure about how to ensure reliability and rigour when designing qualitative surveys and reviewers are unsure how these should be evaluated. In this paper, we present a systematic mapping study to identify how qualitative surveys have been employed in SE research to date. This paper proposes a set of principles, based on a multidisciplinary review of qualitative surveys and capturing some of the commonalities of the diffuse approaches found. These principles can be used by researchers when choosing whether to do a qualitative survey or not. They can then be used to design their study. The principles can also be used by editors and reviewers to judge the quality and rigour of qualitative surveys. It is hoped that this will result in more widespread use of the method and also more effective and evidence-based reviews of studies that use these methods in the future.</p>&nbsp;Tags: "Research Methods" &nbsp;<br> &nbsp;<br> <h5> Luca Giamattei, Matteo Biagiola, Roberto Pietrantuono, Stefano Russo, Paolo Tonella, "Reinforcement Learning for Online Testing of Autonomous Driving Systems: a Replication and Extension Study" </h5> <p><b>Abstract:</b> In a recent study, Reinforcement Learning (RL) used in combination with many-objective search, has been shown to outperform alternative techniques (random search and many-objective search) for online testing of Deep Neural Network-enabled systems. The empirical evaluation of these techniques was conducted on a state-of-the-art Autonomous Driving System (ADS). This work is a replication and extension of that empirical study. Our replication shows that RL does not outperform pure random test generation in a comparison conducted under the same settings of the original study, but with no confounding factor coming from the way collisions are measured. Our extension aims at eliminating some of the possible reasons for the poor performance of RL observed in our replication: (1) the presence of reward components providing contrasting feedback to the RL agent; (2) the usage of an RL algorithm (Q-learning) which requires discretization of an intrinsically continuous state space. Results show that our new RL agent is able to converge to an effective policy that outperforms random search. Results also highlight other possible improvements, which open to further investigations on how to best leverage RL for online ADS testing.</p>&nbsp;Tags: "SE for AI", "Testing and Quality" &nbsp;<br> &nbsp;<br> <h5> Jiannan Wang, Hung Viet Pham, Qi Li, Lin Tan, Yu Guo, Adnan Aziz, Erik Meijer, "D3: Differential Testing of Distributed Deep Learning with Model Generation" </h5> <p><b>Abstract:</b> Deep Learning (DL) techniques have been widely deployed in many application domains. The growth of DL models’ size and complexity demands distributed training of DL models. Since DL training is complex, software implementing distributed DL training is error-prone. Thus, it is crucial to test distributed deep learning software to improve its reliability and quality. To address this issue, we propose a differential testing technique—D3, which leverages a distributed equivalence rule that we create to test distributed deep learning software. The rationale is that the same model trained with the same model input under different distributed settings should produce equivalent prediction output within certain thresholds. The different output indicates potential bugs in the distributed deep learning software. D3 automatically generates a diverse set of distributed settings, DL models, and model input to test distributed deep learning software. Our evaluation on two of the most popular DL libraries, i.e., PyTorch and TensorFlow, shows that D3 detects 21 bugs, including 12 previously unknown bugs.</p>&nbsp;Tags: "SE for AI", "Testing and Quality" &nbsp;<br> &nbsp;<br> <h5> David N. Palacio, Alejandro Velasco, Nathan Cooper, Alvaro Rodriguez, Kevin Moran, Denys Poshyvanyk, "Toward a Theory of Causation for Interpreting Neural Code Models" </h5> <p><b>Abstract:</b> Neural Language Models of Code, or Neural Code Models (NCMs), are rapidly progressing from research prototypes to commercial developer tools. As such, understanding the capabilities and limitations of such models is becoming critical. However, the abilities of these models are typically measured using automated metrics that often only reveal a portion of their real-world performance. While, in general, the performance of NCMs appears promising, currently much is unknown about how such models arrive at decisions. To this end, this paper introduces do-code , a post hoc interpretability method specific to NCMs that is capable of explaining model predictions. do-code is based upon causal inference to enable programming language-oriented explanations. While the theoretical underpinnings of docodecode are extensible to exploring different model properties, we provide a concrete instantiation that aims to mitigate the impact of spurious correlations by grounding explanations of model behavior in properties of programming languages. To demonstrate the practical benefit of docodecode , we illustrate the insights that our framework can provide by performing a case study on two popular deep learning architectures and ten NCMs. The results of this case study illustrate that our studied NCMs are sensitive to changes in code syntax. All our NCMs, except for the BERT-like model, statistically learn to predict tokens related to blocks of code ( e.g., brackets, parenthesis, semicolon) with less confounding bias as compared to other programming language constructs. These insights demonstrate the potential of docodecode as a useful method to detect and facilitate the elimination of confounding bias in NCMs.</p>&nbsp;Tags: "AI for SE" &nbsp;<br> &nbsp;<br> <h5> Chidera Biringa, Gokhan Kul, "PACE: A Program Analysis Framework for Continuous Performance Prediction" </h5> <p><b>Abstract:</b> Software development teams establish elaborate continuous integration pipelines containing automated test cases to accelerate the development process of software. Automated tests help to verify the correctness of code modifications decreasing the response time to changing requirements. However, when the software teams do not track the performance impact of pending modifications, they may need to spend considerable time refactoring existing code. This article presents PACE, a program analysis framework that provides continuous feedback on the performance impact of pending code updates. We design performance microbenchmarks by mapping the execution time of functional test cases given a code update. We map microbenchmarks to code stylometry features and feed them to predictors for performance predictions. Our experiments achieved significant performance in predicting code performance, outperforming current state-of-the-art by 75% on neural-represented code stylometry features.</p>&nbsp;Tags: "Analysis", "Prog Comprehension/Reeng/Maint" &nbsp;<br> &nbsp;<br> <h5> Costanza Alfieri, Juri Di Rocco, Paola Inverardi, Phuong T. Nguyen, "Exploring User Privacy Awareness on GitHub: An Empirical Study" </h5> <p><b>Abstract:</b> GitHub provides developers with a practical way to distribute source code and collaboratively work on common projects. To enhance account security and privacy, GitHub allows its users to manage access permissions, review audit logs, and enable two-factor authentication. However, despite the endless effort, the platform still faces various issues related to the privacy of its users. This paper presents an empirical study delving into the GitHub ecosystem. Our focus is on investigating the utilization of privacy settings on the platform and identifying various types of sensitive information disclosed by users. Leveraging a dataset comprising 6,132 developers, we report and analyze their activities by means of comments on pull requests. Our findings indicate an active engagement by users with the available privacy settings on GitHub. Notably, we observe the disclosure of different forms of private information within pull request comments. This observation has prompted our exploration into sensitivity detection using a large language model and BERT, to pave the way for a personalized privacy assistant. Our work provides insights into the utilization of existing privacy protection tools, such as privacy settings, along with their inherent limitations. Essentially, we aim to advance research in this field by providing both the motivation for creating such privacy protection tools and a proposed methodology for personalizing them.</p>&nbsp;Tags: "Human/Social", "Security" &nbsp;<br> &nbsp;<br> <h5> Mohammad Hossein Amini, Shervin Naseri, Shiva Nejati, "Evaluating the Impact of Flaky Simulators on Testing Autonomous Driving Systems" </h5> <p><b>Abstract:</b> Simulators are widely used to test Autonomous Driving Systems (ADS), but their potential flakiness can lead to inconsistent test results. We investigate test flakiness in simulation-based testing of ADS by addressing two key questions: (1) How do flaky ADS simulations impact automated testing that relies on randomized algorithms? and (2) Can machine learning (ML) effectively identify flaky ADS tests while decreasing the required number of test reruns? Our empirical results, obtained from two widely-used open-source ADS simulators and five diverse ADS test setups, show that test flakiness in ADS is a common occurrence and can significantly impact the test results obtained by randomized algorithms. Further, our ML classifiers effectively identify flaky ADS tests using only a single test run, achieving F1-scores of 85%, 82% and 96% for three different ADS test setups. Our classifiers significantly outperform our non-ML baseline, which requires executing tests at least twice, by 31%, 21%, and 13% in F1-score performance, respectively. We conclude with a discussion on the scope, implications and limitations of our study. We provide our complete replication package in a Github repository (Github paper 2023).</p>&nbsp;Tags: "SE for AI", "Testing and Quality" &nbsp;<br> &nbsp;<br> <h5> Junxiao Han, Jiahao Zhang, David Lo, Xin Xia, Shuiguang Deng, Minghui Wu, "Understanding Newcomers' Onboarding Process in Deep Learning Projects" </h5> <p><b>Abstract:</b> Attracting and retaining newcomers are critical for the sustainable development of Open Source Software (OSS) projects. Considerable efforts have been made to help newcomers identify and overcome barriers in the onboarding process. However, fewer studies focus on newcomers’ activities before their successful onboarding. Given the rising popularity of deep learning (DL) techniques, we wonder what the onboarding process of DL newcomers is, and if there exist commonalities or differences in the onboarding process for DL and non-DL newcomers. Therefore, we reported a study to understand the growth trends of DL and non-DL newcomers, mine DL and non-DL newcomers’ activities before their successful onboarding (i.e., past activities), and explore the relationships between newcomers’ past activities and their first commit patterns and retention rates. By analyzing 20 DL projects with 9,191 contributors and 20 non-DL projects with 9,839 contributors, and conducting email surveys with contributors, we derived the following findings: 1) DL projects have attracted and retained more newcomers than non-DL projects. 2) Compared to non-DL newcomers, DL newcomers encounter more deployment, documentation, and version issues before their successful onboarding. 3) DL newcomers statistically require more time to successfully onboard compared to non-DL newcomers, and DL newcomers with more past activities (e.g., issues, issue comments, and watch) are prone to submit an intensive first commit (i.e., a commit with many source code and documentation files being modified). Based on the findings, we shed light on the onboarding process for DL and non-DL newcomers, highlight future research directions, and provide practical suggestions to newcomers, researchers, and projects.</p>&nbsp;Tags: "Human/Social", "Process", "Open Source" &nbsp;<br> &nbsp;<br> <h5> Matteo Biagiola, Paolo Tonella, "Boundary State Generation for Testing and Improvement of Autonomous Driving Systems" </h5> <p><b>Abstract:</b> Recent advances in Deep Neural Networks (DNNs) and sensor technologies are enabling autonomous driving systems (ADSs) with an ever-increasing level of autonomy. However, assessing their dependability remains a critical concern. State-of-the-art ADS testing approaches modify the controllable attributes of a simulated driving environment until the ADS misbehaves. In such approaches, environment instances in which the ADS is successful are discarded, despite the possibility that they could contain hidden driving conditions in which the ADS may misbehave. In this paper, we present GenBo (GENerator of BOundary state pairs), a novel test generator for ADS testing. GenBo mutates the driving conditions of the ego vehicle (position, velocity and orientation), collected in a failure-free environment instance, and efficiently generates challenging driving conditions at the behavior boundary (i.e., where the model starts to misbehave) in the same environment instance. We use such boundary conditions to augment the initial training dataset and retrain the DNN model under test. Our evaluation results show that the retrained model has, on average, up to 3 × higher success rate on a separate set of evaluation tracks with respect to the original DNN model.</p>&nbsp;Tags: "SE for AI", "Testing and Quality" &nbsp;<br> &nbsp;<br> <h5> Junjie Li, Jinqiu Yang, "Tracking the Evolution of Static Code Warnings: The State-of-the-Art and a Better Approach" </h5> <p><b>Abstract:</b> Static bug detection tools help developers detect problems in the code, including bad programming practices and potential defects. Recent efforts to integrate static bug detectors in modern software development workflows, such as in code review and continuous integration, are shown to better motivate developers to fix the reported warnings on the fly. A proper mechanism to track the evolution of the reported warnings can better support such integration. Moreover, tracking the static code warnings will benefit many downstream software engineering tasks, such as learning the fix patterns for automated program repair, and learning which warnings are of more interest, so they can be prioritized automatically. In addition, the utilization of tracking tools enables developers to concentrate on the most recent and actionable static warnings rather than being overwhelmed by the thousands of warnings from the entire project. This, in turn, enhances the utilization of static analysis tools. Hence, precisely tracking the warnings by static bug detectors is critical to improving the utilization of static bug detectors further. In this paper, we study the effectiveness of the state-of-the-art (SOTA) solution in tracking static code warnings and propose a better solution based on our analysis of the insufficiency of the SOTA solution. In particular, we examined over 2,000 commits in four large-scale open-source systems (i.e., JClouds, Kafka, Spring-boot, and Guava) and crafted a dataset of 3,451 static code warnings by two static bug detectors (i.e., Spotbugs and PMD). We manually uncovered the ground-truth evolution status of the static warnings: persistent, removed fix , removed non-fix and newly-introduced. Upon manual analysis, we identified the main reasons behind the insufficiency of the SOTA solution. Furthermore, we propose StaticTracker to track static warnings over software development history. Our evaluation shows that StaticTracker significantly improves the tracking precision, i.e., from 64.4% to 90.3% for the evolution statuses combined (removed fix , removed non-fix and newly-introduced).</p>&nbsp;Tags: "Analysis", "Prog Comprehension/Reeng/Maint" &nbsp;<br> &nbsp;<br> <h5> Youssef Esseddiq Ouatiti, Mohammed Sayagh, Noureddine Kerzazi, Bram Adams, Ahmed E. Hassan, Youssef Esseddiq Ouatiti, "The impact of Concept drift and Data leakage on Log Level Prediction Models" </h5> <p><b>Abstract:</b> Developers insert logging statements to collect information about the execution of their systems. Along with a logging framework (e.g., Log4j), practitioners can decide which log statement to print or suppress by tagging each log line with a log level. Since picking the right log level for a new logging statement is not straightforward, machine learning models for log level prediction (LLP) were proposed by prior studies. While these models show good performances, they are still subject to the context in which they are applied, specifically to the way practitioners decide on log levels in different phases of the development history of their projects (e.g., debugging vs. testing). For example, Openstack developers interchangeably increased/decreased the verbosity of their logs across the history of the project in response to code changes (e.g., before vs after fixing a new bug). Thus, the manifestation of these changing log verbosity choices across time can lead to concept drift and data leakage issues, which we wish to quantify in this paper on LLP models. In this paper, we empirically quantify the impact of data leakage and concept drift on the performance and interpretability of LLP models in three large open-source systems. Additionally, we compare the performance and interpretability of several time-aware approaches to tackle time-related issues. We observe that both shallow and deep-learning-based models suffer from both time-related issues. We also observe that training a model on just a window of the historical data (i.e., contextual model) outperforms models that are trained on the whole historical data (i.e., all-knowing model) in the case of our shallow LLP model. Finally, we observe that contextual models exhibit a different (even contradictory) model interpretability, with a (very) weak correlation between the ranking of important features of the pairs of contextual models we compared. Our findings suggest that data leakage and concept drift should be taken into consideration for LLP models. We also invite practitioners to include the size of the historical window as an additional hyperparameter to tune a suitable contextual model instead of leveraging all-knowing models.</p>&nbsp;Tags: "AI for SE" &nbsp;<br> &nbsp;<br> <h5> Delano Oliveira, Reydne Santos, Benedito de Oliveira, Martin Monperrus, Fernando Castor, Fernanda Madeiral, "Understanding Code Understandability Improvements in Code Reviews" </h5> <p><b>Abstract:</b> [Motivation] Code understandability plays a crucial role in software development, as developers spend between 58% and 70% of their time reading source code. Improving code understandability can lead to enhanced productivity and save maintenance costs. [Problem] Experimental studies aim to establish what makes code more or less understandable in a controlled setting, but ignore that what makes code easier to understand in the real world also depends on extraneous elements such as project culture and guidelines, and developers’ background. Not accounting for the influence of these factors may lead to results that are sound but have little external validity. [Objective]: This study aims to investigate how developers improve code understandability during software development through code review comments. Its basic assumption is that code reviewers are specialists in code quality within a project. [Method and Results] We manually analyzed 2,401 code review comments from Java open-source projects on GitHub and find that over 42% of all comments focus on improving code understandability, demonstrating the significance of this aspect in code reviews. We further explored a subset of 385 comments related to code understandability and identified eight categories of code understandability concerns, such as incomplete or inadequate code documentation, bad identifier, and unnecessary code. Among the suggestions to improve code understandability, 83.9% were accepted and integrated into the codebase. Among these, only two (less than 1%) end up being reverted later. We also identified types of patches that improve code understandability, ranging from simple changes (e.g., removing unused code) to more context-dependent improvements (e.g., replacing method calling chain by existing API). Finally, we evaluated the ability of four well-known linters to flag the identified code understandability issues. These linters cover less than 30% of these issues, although some of them could be easily added as new rules. [Implications] Our findings motivate and provide practical insight for the construction of tools to make code more understandable, e.g., understandability improvements are rarely reverted and thus can be used as reliable training data for specialized ML-based tools. This is also supported by our dataset, which can be used to train such models. Finally, our findings can also serve as a basis to develop evidence-based code style guides. [Data Availability] Our data is publicly available at https://codeupcrc.github.io.</p>&nbsp;Tags: "Prog Comprehension/Reeng/Maint" &nbsp;<br> &nbsp;<br> <h5> Aniruddhan Murali, Mahmoud Alfadel, Meiyappan Nagappan, Meng Xu, Chengnian Sun, "AddressWatcher: Sanitizer-Based Localization of Memory Leak Fixes" </h5> <p><b>Abstract:</b> Memory leak bugs are a major problem in C/C++ programs. They occur when memory objects are not deallocated. Developers need to manually deallocate these objects to prevent memory leaks. As such, several techniques have been proposed to automatically fix memory leaks. Although proposed approaches have merit in automatically fixing memory leaks, they present limitations. Static-based approaches attempt to trace the complete semantics of memory object across all paths. However, they have scalability-related challenges when the target program has a large number of paths (path explosion). On the other hand, dynamic approaches can spell out precise semantics of memory object only on a single execution path (it does not consider multiple execution paths). In this paper, we complement prior approaches by designing and implementing a novel framework named AddressWatcher . AddressWatcher allows the semantics of a memory object to be tracked on multiple execution paths. Addresswatcher accomplishes this by using a leak database that allows one to store and compare different execution paths of a leak over several test cases. Also, AddressWatcher performs lightweight instrumentation during compile time that is utilized during the program execution to watch and track memory leak read/writes. We conduct an evaluation of AddressWatcher over five popular packages, namely binutils, openssh, tmux, openssl and git. In 23 out of 50 real-world memory leak bugs, AddressWatcher correctly points to a free location to fix memory leaks. Finally, we submit 25 Pull Requests across 12 popular OSS repositories using AddressWatcher suggestions. Among these, 21 were merged leading to 5 open issues being addressed. In fact, our critical fix prompted a new version release for the calc repository, a program used to find large primes. Furthermore, our contributions through these PRs sparked intense discussions and appreciation in various repositories such as coturn, h2o, and radare2.</p>&nbsp;Tags: "Analysis" &nbsp;<br> &nbsp;<br> <h5> Amir M. Ebrahimi, Bram Adams, Gustavo A. Oliva, Ahmed E. Hassan, "A Large-Scale Exploratory Study on the Proxy Pattern in Ethereum" </h5> <p><b>Abstract:</b> The proxy pattern is a well-known design pattern with numerous use cases in several sectors of the software industry (e.g., network applications, microservices, and IoT). As such, the use of the proxy pattern is also a common approach in the development of complex decentralized applications (DApps) on the Ethereum blockchain. A contract that implements the proxy pattern (proxy contract) acts as a layer between the clients and the target contract, enabling greater flexibility (e.g., data validation checks) and upgradeability (e.g., online smart contract replacement with zero downtime) in DApp development. Despite the importance of proxy contracts, little is known about (i) how their prevalence changed over time, (ii) the ways in which developers integrate proxies in the design of DApps, and (iii) what proxy types are being most commonly leveraged by developers. In this paper, we present a large-scale exploratory study on the use of the proxy pattern in Ethereum. We analyze a dataset of all Ethereum smart contracts as of Sep. 2022 containing 50M smart contracts and 1.6B transactions, and apply both quantitative and qualitative methods in order to (i) determine the prevalence of proxy contracts, (ii) understand the ways they are deployed and integrated into applications, and (iii) uncover the prevalence of different types of proxy contracts. Our findings reveal that 14.2% of all deployed smart contracts are proxy contracts. We show that proxy contracts are being more actively used than non-proxy contracts. Also, the usage of proxy contracts in various contexts, transactions involving proxy contracts, and adoption of proxy contracts by users have shown an upward trend over time, peaking at the end of our study period. They are either deployed through off-chain scripts or on-chain factory contracts, with the former and latter being employed in 39.1% and 60.9% of identified usage contexts in turn. We found that while the majority (67.8%) of proxies act as an interceptor, 32.2% enables upgradeability. Proxy contracts are typically (79%) implemented based on known reference implementations with 29.4% being of type ERC-1167, a class of proxies that aims to cheaply reuse and clone contracts’ functionality. Our evaluation shows that our proposed behavioral proxy detection method has a precision and recall of 100% in detecting active proxies. Finally, we derive a set of practical recommendations for developers and introduce open research questions to guide future research on the topic.</p>&nbsp;Tags: "Design/Architecture", "Process" &nbsp;<br> &nbsp;<br> <h5> Xinyi Wang, Shaukat Ali, Tao Yue, Paolo Arcaini, "Quantum Approximate Optimization Algorithm for Test Case Optimization" </h5> <p><b>Abstract:</b> Test case optimization (TCO) reduces the software testing cost while preserving its effectiveness. However, to solve TCO problems for large-scale and complex software systems, substantial computational resources are required. Quantum approximate optimization algorithms (QAOAs) are promising combinatorial optimization algorithms that rely on quantum computational resources, with the potential to offer increased efficiency compared to classical approaches. Several proof-of-concept applications of QAOAs for solving combinatorial problems, such as portfolio optimization, energy optimization in power systems, and job scheduling, have been proposed. Given the lack of investigation into QAOA’s application for TCO problems, and motivated by the computational challenges of TCO problems and the potential of QAOAs, we present IGDec-QAOA to formulate a TCO problem as a QAOA problem and solve it on both ideal and noisy quantum computer simulators, as well as on a real quantum computer. To solve bigger TCO problems that require many qubits, which are unavailable these days, we integrate a problem decomposition strategy with the QAOA. We performed an empirical evaluation with five TCO problems and four publicly available industrial datasets from ABB, Google, and Orona to compare various configurations of IGDec-QAOA, assess its decomposition strategy of handling large datasets, and compare its performance with classical algorithms (i.e., Genetic Algorithm (GA) and Random Search). Based on the evaluation results achieved on an ideal simulator, we recommend the best configuration of our approach for TCO problems. Also, we demonstrate that our approach can reach the same effectiveness as GA and outperform GA in two out of five test case optimization problems we conducted. In addition, we observe that, on the noisy simulator, IGDec-QAOA achieved similar performance to that from the ideal simulator. Finally, we also demonstrate the feasibility of IGDec-QAOA on a real quantum computer in the presence of noise.</p>&nbsp;Tags: "Testing and Quality", "Quantum" &nbsp;<br> &nbsp;<br> <h5> Alison Fernandez Blanco, Araceli Queirolo Cordova, Alexandre Bergel, Juan Pablo Sandoval Alcocer, "Asking and Answering Questions During Memory Profiling" </h5> <p><b>Abstract:</b> The software engineering community has produced numerous tools, techniques, and methodologies for practitioners to analyze and optimize memory usage during software execution. However, little is known about the actual needs of programmers when analyzing memory behavior and how they use tools to address those needs. We conducted an exploratory study (i) to understand what a programmer needs to know when analyzing memory behavior and (ii) how a programmer finds that information with current tools. From our observations, we provide a catalog of 34 questions programmers ask themselves when analyzing memory behavior. We also report a detailed analysis of how some tools are used to answer these questions and the difficulties participants face during the process. Finally, we present four recommendations to guide researchers and developers in designing, evaluating, and improving memory behavior analysis tools.</p>&nbsp;Tags: "User experience", "Prog Comprehension/Reeng/Maint" &nbsp;<br> &nbsp;<br> <h5> Zeyang Ma, Shouvick Mondal, Tse-Hsun (Peter) Chen, Haoxiang Zhang, Ahmed E. Hassan, Zeyang Ma, "VulNet: Towards improving vulnerability management in the Maven ecosystem" </h5> <p><b>Abstract:</b> Developers rely on software ecosystems such as Maven to manage and reuse external libraries (i.e., dependencies). Due to the complexity of the used dependencies, developers may face challenges in choosing which library to use and whether they should upgrade or downgrade a library. One important factor that affects this decision is the number of potential vulnerabilities in a library and its dependencies. Therefore, state-of-the-art platforms such as Maven Repository (MVN) and Open Source Insights (OSI) help developers in making such a decision by presenting vulnerability information associated with every dependency. In this paper, we first conduct an empirical study to understand how the two platforms, MVN and OSI, present and categorize vulnerability information. We found that these two platforms may either overestimate or underestimate the number of associated vulnerabilities in a dependency, and they lack prioritization mechanisms on which dependencies are more likely to cause an issue. Hence, we propose a tool named VulNet to address the limitations we found in MVN and OSI. Through an evaluation of 19,886 versions of the top 200 popular libraries, we find VulNet includes 90.5% and 65.8% of the dependencies that were omitted by MVN and OSI, respectively. VulNet also helps reduce 27% of potentially unreachable or less impactful vulnerabilities listed by OSI in test dependencies. Finally, our user study with 24 participants gave VulNet an average rating of 4.5/5 in presenting and prioritizing vulnerable dependencies, compared to 2.83 (MVN) and 3.14 (OSI).</p>&nbsp;Tags: "Process", "Security" &nbsp;<br> &nbsp;<br> <h5> Aurora Papotti, Ranindya Paramitha, Fabio Massacci, "On the acceptance by code reviewers of candidate security patches suggested by Automated Program Repair tools." </h5> <p><b>Abstract:</b> [Objective] We investigated whether (possibly wrong) security patches suggested by Automated Program Repairs (APR) for real world projects are recognized by human reviewers. We also investigated whether knowing that a patch was produced by an allegedly specialized tool does change the decision of human reviewers. [Method] We perform an experiment with Master students in Computer Science. In the first phase, using a balanced design, we propose to human reviewers a combination of patches proposed by APR tools for different vulnerabilities and ask reviewers to adopt or reject the proposed patches. In the second phase, we tell participants that some of the proposed patches were generated by security-specialized tools (even if the tool was actually a ‘normal’ APR tool) and measure whether the human reviewers would change their decision to adopt or reject a patch. [Results] It is easier to identify wrong patches than correct patches, and correct patches are not confused with partially correct patches. Also patches from APR Security tools are adopted more often than patches suggested by generic APR tools but there is not enough evidence to verify if ‘bogus’ security claims are distinguishable from ‘true security’ claims. Finally, the number of switches to the patches suggested by security tool is significantly higher after the security information is revealed irrespective of correctness. [Limitations] The experiment was conducted in an academic setting, and focused on a limited sample of popular APR tools and popular vulnerability types.</p>&nbsp;Tags: "User experience", "Human/Social", "Security" &nbsp;<br> &nbsp;<br> <h5> Matteo Paltenghi, Rahul Pandita, Austin Henley, Albert Ziegler, "Follow-Up Attention: An Empirical Study of Developer and Neural Model Code Exploration" </h5> <p><b>Abstract:</b> Recent neural models of code, such as OpenAI Codex and AlphaCode, have demonstrated remarkable proficiency at code generation due to the underlying attention mechanism. However, it often remains unclear how the models actually process code, and to what extent their reasoning and the way their attention mechanism scans the code matches the patterns of developers. A poor understanding of the model reasoning process limits the way in which current neural models are leveraged today, so far mostly for their raw prediction. To fill this gap, this work studies how the processed attention signal of three open large language models - CodeGen, InCoder and GPT-J - agrees with how developers look at and explore code when each answers the same sensemaking questions about code. Furthermore, we contribute an open-source eye-tracking dataset comprising 92 manually-labeled sessions from 25 developers engaged in sensemaking tasks. We empirically evaluate five heuristics that do not use the attention and ten attention-based post-processing approaches of the attention signal of CodeGen against our ground truth of developers exploring code, including the novel concept of follow-up attention which exhibits the highest agreement between model and human attention. Our follow-up attention method can predict the next line a developer will look at with 47% accuracy. This outperforms the baseline prediction accuracy of 42.3%, which uses the session history of other developers to recommend the next line. These results demonstrate the potential of leveraging the attention signal of pre-trained models for effective code exploration.</p>&nbsp;Tags: "Human/Social", "User experience", "AI for SE" &nbsp;<br> &nbsp;<br> <h5> Anwar Ghammam, Rania Khalsi, Marouane Kessentini, Foyzul Hassan, "Efficient Management of Containers for Software Defined Vehicles" </h5> <p><b>Abstract:</b> Containerization technology, such as Docker, is gaining in popularity in newly established software-defined vehicle architectures (SDVA). However, executing those containers can quickly become computationally expensive in constrained environments, given the limited CPU, memory, and energy resources in the Electric Control Units (ECU) of SDVA. Consequently, the efficient management of these containers is crucial for enabling the on-demand usage of the applications in the vehicle based on the available resources while considering several constraints and priorities, including failure tolerance, security, safety, and comfort. In this paper, we propose a dynamic software container management approach for constrained environments such as embedded devices/ECUs in SDVA within smart cars. To address the conflicting objectives and constraints within the vehicle, we design a novel search-based approach based on multi-objective optimization. This approach facilitates the allocation, movement, or suspension of containers between ECUs in the cluster. Collaborating with our industry partner, Ford Motor Company, we evaluate our approach using different real-world software-defined scenarios. These scenarios involve using heterogeneous clusters of ECU devices in vehicles based on real-world software containers and use-case studies from the automotive industry. The experimental results demonstrate that our scheduler outperforms existing scheduling algorithms, including the default Docker scheduler -Spread- commonly used in automotive applications. Our proposed scheduler exhibits superior performance in terms of energy and resource cost efficiency. Specifically, it achieves a 35% reduction in energy consumption in power-saving mode compared to the scheduler employed by Ford Motor Company. Additionally, our scheduler effectively distributes workload among the ECUs in the cluster, minimizing resource usage, and dynamically adjusts to the real-time requirements and constraints of the car environment. This work will serve as a fundamental building block in the automotive industry to efficiently manage software containers in smart vehicles considering constraints and priorities in the real world.</p>&nbsp;Tags: "Prog Comprehension/Reeng/Maint" &nbsp;<br> &nbsp;<br> <h5> Hao Li, Cor-Paul Bezemer, "Bridging the Language Gap: An Empirical Study of Bindings for Open Source Machine Learning Libraries Across Software Package Ecosystems" </h5> <p><b>Abstract:</b> Open source machine learning (ML) libraries enable developers to integrate advanced ML functionality into their own applications. However, popular ML libraries, such as TensorFlow, are not available natively in all programming languages and software package ecosystems. Hence, developers who wish to use an ML library which is not available in their programming language or ecosystem of choice, may need to resort to using a so-called binding library (or binding). Bindings provide support across programming languages and package ecosystems for reusing a host library. For example, the Keras .NET binding provides support for the Keras library in the NuGet (.NET) ecosystem even though the Keras library was written in Python. In this paper, we collect 2,436 cross-ecosystem bindings for 546 ML libraries across 13 software package ecosystems by using an approach called BindFind, which can automatically identify bindings and link them to their host libraries. Furthermore, we conduct an in-depth study of 133 cross-ecosystem bindings and their development for 40 popular open source ML libraries. Our findings reveal that the majority of ML library bindings are maintained by the community, with npm being the most popular ecosystem for these bindings. Our study also indicates that most bindings cover only a limited range of the host library’s releases, often experience considerable delays in supporting new releases, and have widespread technical lag. Our findings highlight key factors to consider for developers integrating bindings for ML libraries and open avenues for researchers to further investigate bindings in software package ecosystems.</p>&nbsp;Tags: "Prog Comprehension/Reeng/Maint" &nbsp;<br> &nbsp;<br> <h5> Michel Maes Bermejo, Alexander Serebrenik, Micael Gallego Carrillo, Francisco Gortázar Bellas, Gregorio Robles, Jesus M. Gonzalez-Barahona, "Hunting bugs: Towards an automated approach to identifying which change caused a bug through regression testing" </h5> <p><b>Abstract:</b> [Context] Finding code changes that introduced bugs is important both for practitioners and researchers, but doing it precisely is a manual, effort-intensive process. The perfect test method is a theoretical construct aimed at detecting Bug-Introducing Changes (BIC) through a theoretical perfect test. This perfect test always fails if the bug is present, and passes otherwise. [Objective] To explore a possible automatic operationalization of the perfect test method. [Method] To use regression tests as substitutes for the perfect test. For this, we transplant the regression tests to past snapshots of the code, and use them to identify the BIC, on a well-known collection of bugs from the Defects4J dataset. [Results] From 809 bugs in the dataset, when running our operationalization of the perfect test method, for 95 of them the BIC was identified precisely and in the remaining 4 cases, a list of candidates including the BIC was provided. [Conclusions] We demonstrate that the operationalization of the perfect test method through regression tests is feasible and can be completely automated in practice when tests can be transplanted and run in past snapshots of the code. Given that implementing regression tests when a bug is fixed is considered a good practice, when developers follow it, they can detect effortlessly bug-introducing changes by using our operationalization of the perfect test method.</p>&nbsp;Tags: "Testing and Quality" &nbsp;<br> &nbsp;<br> <h5> Matteo Biagiola, Andrea Stocco, Vincenzo Riccio, Paolo Tonella, "Two is Better Than One: Digital Siblings to Improve Autonomous Driving Testing" </h5> <p><b>Abstract:</b> Simulation-based testing represents an important step to ensure the reliability of autonomous driving software. In practice, when companies rely on third-party general-purpose simulators, either for in-house or outsourced testing, the generalizability of testing results to real autonomous vehicles is at stake. In this paper, we enhance simulation-based testing by introducing the notion of digital siblings—a multi-simulator approach that tests a given autonomous vehicle on multiple general-purpose simulators built with different technologies, that operate collectively as an ensemble in the testing process. We exemplify our approach on a case study focused on testing the lane-keeping component of an autonomous vehicle. We use two open-source simulators as digital siblings, and we empirically compare such a multi-simulator approach against a digital twin of a physical scaled autonomous vehicle on a large set of test cases. Our approach requires generating and running test cases for each individual simulator, in the form of sequences of road points. Then, test cases are migrated between simulators, using feature maps to characterize the exercised driving conditions. Finally, the joint predicted failure probability is computed, and a failure is reported only in cases of agreement among the siblings. Our empirical evaluation shows that the ensemble failure predictor by the digital siblings is superior to each individual simulator at predicting the failures of the digital twin. We discuss the findings of our case study and detail how our approach can help researchers interested in automated testing of autonomous driving software.</p>&nbsp;Tags: "SE for AI", "Testing and Quality" &nbsp;<br> &nbsp;<br> <h5> Farshad Kazemi, Maxime Lamothe, Shane McIntosh, "Characterizing the Prevalence, Distribution, and Duration of Stale Reviewer Recommendations" </h5> <p><b>Abstract:</b> The appropriate assignment of reviewers is a key factor in determining the value that organizations can derive from code review. While inappropriate reviewer recommendations can hinder the benefits of the code review process, identifying these assignments is challenging. Stale reviewers, i.e., those who no longer contribute to the project, are one type of reviewer recommendation that is certainly inappropriate. Understanding and minimizing this type of recommendation can thus enhance the benefits of the code review process. While recent work demonstrates the existence of stale reviewers, to the best of our knowledge, attempts have yet to be made to characterize and mitigate them. In this paper, we study the prevalence and potential effects. We then propose and assess a strategy to mitigate stale recommendations in existing code reviewer recommendation tools. By applying five code reviewer recommendation approaches (LearnRec, RetentionRec, cHRev, Sofia, and WLRRec) to three thriving open-source systems with 5,806 contributors, we observe that, on average, 12.59% of incorrect recommendations are stale due to developer turnover; however, fewer stale recommendations are made when the recency of contributions is considered by the recommendation objective function. We also investigate which reviewers appear in stale recommendations and observe that the top reviewers account for a considerable proportion of stale recommendations. For instance, in 15.31% of cases, the top-3 reviewers account for at least half of the stale recommendations. Finally, we study how long stale reviewers linger after the candidate leaves the project, observing that contributors who left the project 7.7 years ago are still suggested to review change sets. Based on our findings, we propose separating the reviewer contribution recency from the other factors that are used by the CRR objective function to filter out developers who have not contributed during a specified duration. By evaluating this strategy with different intervals, we assess the potential impact of this choice on the recommended reviewers. The proposed filter reduces the staleness of recommendations, i.e., the Staleness Reduction Ratio (SRR) improves between 21.44%–92.39%. Yet since the strategy may increase active reviewer workload, careful project-specific exploration of the impact of the cut-off setting is crucial.</p>&nbsp;Tags: "Human/Social", "Process" &nbsp;<br> &nbsp;<br> <h5> Ahcheong Lee, Youngseok Choi, Shin Hong, Yunho Kim, Kyutae Cho, Moonzoo Kim, "ZigZagFuzz: Interleaved Fuzzing of Program Options and Files" </h5> <p><b>Abstract:</b> Command-line options (e.g., -l, -F, -R for ls) given to a command-line program can significantly alternate the behaviors of the program. Thus, fuzzing not only file input but also program options can improve test coverage and bug detection. In this paper, we propose ZigZagFuzz which achieves higher test coverage and detects more bugs than the state-of-the-art fuzzers by separately mutating program options and file inputs in an iterative/interleaving manner. ZigZagFuzz applies the following three core ideas. First, to utilize different characteristics of the program option domain and the file input domain, ZigZagFuzz separates phases of mutating program options from ones of mutating file inputs and performs two distinct mutation strategies on the two different domains. Second, to reach deep segments of a target program that are accessed through an interleaving sequence of program option checks and file inputs checks, ZigZagFuzz continuously interleaves phases of mutating program options with phases of mutating file inputs. Finally, to improve fuzzing performance further, ZigZagFuzz periodically shrinks input corpus by removing similar test inputs based on their function coverage. The experiment results on the 20 real-world programs show that ZigZagFuzz improves test coverage and detects 1.9 to 10.6 times more bugs than the state-of-the-art fuzzers that mutate program options such as AFL++-argv, AFL++-all, Eclipser, CarpetFuzz, ConfigFuzz, and POWER. We have reported the new bugs detected by ZigZagFuzz, and the original developers confirmed our bug reports.</p>&nbsp;Tags: "Testing and Quality", "Analysis" &nbsp;<br> &nbsp;<br> <h5> Xin Tan, Xinyue Lv, Jing Jiang, Li Zhang, "Understanding Real-time Collaborative Programming: a Study of Visual Studio Live Share" </h5> <p><b>Abstract:</b> Real-time collaborative programming (RCP) entails developers working simultaneously, regardless of their geographic locations. RCP differs from traditional asynchronous online programming methods, such as Git or SVN, where developers work independently and update the codebase at separate times. Although various real-time code collaboration tools (e.g., Visual Studio Live Share, Code with Me, and Replit) have kept emerging in recent years, none of the existing studies explicitly focus on a deep understanding of the processes or experiences associated with RCP. To this end, we combine interviews and an e-mail survey with the users of Visual Studio Live Share, aiming to understand (i) the scenarios, (ii) the requirements, and (iii) the challenges when developers participate in RCP. We find that developers participate in RCP in 18 different scenarios belonging to six categories, e.g., pair programming, group debugging, and code review. However, existing users’ attitudes toward the usefulness of the current RCP tools in these scenarios were significantly more negative than the expectations of potential users. As for the requirements, the most critical category is live editing, followed by the need for sharing terminals to enable hosts and guests to run commands and see the results, as well as focusing and following, which involves “following” the host’s edit location and “focusing” the guests’ attention on the host with a notification. Under these categories, we identify 17 requirements, but most of them are not well supported by current tools. In terms of challenges, we identify 19 challenges belonging to seven categories. The most severe category of challenges is lagging followed by permissions and conflicts. The above findings indicate that the current RCP tools and even collaborative environment need to be improved greatly and urgently. Based on these findings, we discuss the recommendations for different stakeholders, including practitioners, tool designers, and researchers.</p>&nbsp;Tags: "Human/Social", "Process" &nbsp;<br> &nbsp;<br> <h5> Sarah Fakhoury, Aaditya Naik, Georgios Sakkas, Saikat Chakraborty, Shuvendu Lahiri, "LLM-Based Test-Driven Interactive Code Generation: User Study and Empirical Evaluation" </h5> <p><b>Abstract:</b> Large language models (LLMs) have shown great potential in automating significant aspects of coding by producing natural code from informal natural language (NL) intent. However, given NL is informal, it does not lend easily to checking that the generated code correctly satisfies the user intent. In this paper, we propose a novel interactive workflow TiCoder for guided intent clarification (i.e., partial formalization) through tests to support the generation of more accurate code suggestions. Through a mixed methods user study with 15 programmers, we present an empirical evaluation of the effectiveness of the workflow to improve code generation accuracy. We find that participants using the proposed workflow are significantly more likely to correctly evaluate AI generated code, and report significantly less task-induced cognitive load. Furthermore, we test the potential of the workflow at scale with four different state-of-the-art LLMs on two python datasets, using an idealized proxy for a user feedback. We observe an average absolute improvement of 45.97% in the pass@1 code generation accuracy for both datasets and across all LLMs within 5 user interactions, in addition to the automatic generation of accompanying unit tests.</p>&nbsp;Tags: "AI for SE", "Prog Comprehension/Reeng/Maint" &nbsp;<br> &nbsp;<br> <h5> Deepika Tiwari, Martin Monperrus, Benoit Baudry, "Mimicking Production Behavior With Generated Mocks" </h5> <p><b>Abstract:</b> Mocking allows testing program units in isolation. A developer who writes tests with mocks faces two challenges: design realistic interactions between a unit and its environment; and understand the expected impact of these interactions on the behavior of the unit. In this paper, we propose to monitor an application in production to generate tests that mimic realistic execution scenarios through mocks. Our approach operates in three phases. First, we instrument a set of target methods for which we want to generate tests, as well as the methods that they invoke, which we refer to as mockable method calls. Second, in production, we collect data about the context in which target methods are invoked, as well as the parameters and the returned value for each mockable method call. Third, offline, we analyze the production data to generate test cases with realistic inputs and mock interactions. The approach is automated and implemented in an open-source tool called rick . We evaluate our approach with three real-world, open-source Java applications. rick monitors the invocation of 128 methods in production across the three applications and captures their behavior. Based on this captured data, rick generates test cases that include realistic initial states and test inputs, as well as mocks and stubs. All the generated test cases are executable, and 52.4% of them successfully mimic the complete execution context of the target methods observed in production. The mock-based oracles are also effective at detecting regressions within the target methods, complementing each other in their fault-finding ability. We interview 5 developers from the industry who confirm the relevance of using production observations to design mocks and stubs. Our experimental findings clearly demonstrate the feasibility and added value of generating mocks from production interactions.</p>&nbsp;Tags: "Testing and Quality", "Prog Comprehension/Reeng/Maint" &nbsp;<br> &nbsp;<br> <h5> Xueqi Yang, Mariusz Jakubowski, Li Kang, Haojie Yu, Tim Menzies, "SparseCoder: Advancing Source Code Analysis with Sparse Attention and Learned Token Pruning" </h5> <p><b>Abstract:</b> As software projects rapidly evolve, software artifacts become more complex and defects behind get harder to identify. The emerging Transformerbased approaches, though achieving remarkable performance, struggle with long code sequences due to their self-attention mechanism, which scales quadratically with the sequence length. This paper introduces SparseCoder, an innovative approach incorporating sparse attention and learned token pruning (LTP) method (adapted from natural language processing) to address this limitation. Compared to previous state-of-the-art models (CodeBERT, RoBERTa and CodeT5), our experiments demonstrate that SparseCoder can handle significantly longer input sequences—at least twice as long, within the limits of our hardware resources and data statistics. Additionally, SparseCoder is four times faster than other methods measured in runtime, achieving a 50% reduction in floating point operations per second (FLOPs) with a negligible performance drop of less than 1% compared to Transformers using sparse attention (Sparse Atten). Plotting FLOPs of model inference against token lengths reveals that SparseCoder scales linearly, whereas other methods, including the current state-of-the-art model CodeT5, scale quadratically. Moreover, SparseCoder enhances interpretability by visualizing non-trivial tokens layer-wise.</p>&nbsp;Tags: "AI for SE", "Analysis" &nbsp;<br> &nbsp;<br> <h5> Shandler Mason, Sandeep Kaur Kuttal, "Diversity's Double-Edged Sword: Analyzing Race's Effect on Remote Pair Programming Interactions" </h5> <p><b>Abstract:</b> Remote pair programming is widely used in software development, but no research has examined how race affects these interactions between developers. We embarked on this study due to the historical under representation of Black developers in the tech industry, with White developers comprising the majority. Our study involved 24 experienced developers, forming 12 gender-balanced same- and mixed-race pairs. Pairs collaborated on a programming task using the think-aloud method, followed by individual retrospective interviews. Our findings revealed elevated productivity scores for mixed-race pairs, with no differences in code quality between same- and mixed-race pairs. Mixed-race pairs excelled in task distribution, shared decision-making, and role-exchange but encountered communication challenges, discomfort, and anxiety, shedding light on the complexity of diversity dynamics. Our study emphasizes race’s impact on remote pair programming and underscores the need for diverse tools and methods to address racial disparities for collaboration.</p>&nbsp;Tags: "Process", "Human/Social" &nbsp;<br> &nbsp;<br> <h5> Yuxia Zhang, Zhiqing Qiu, Klaas-Jan Stol, Wenhui Zhu, Jiaxin Zhu, Yingchen Tian, Hui Liu, "Automatic Commit Message Generation: A Critical Review and Directions for Future Work" </h5> <p><b>Abstract:</b> Commit messages are critical for code comprehension and software maintenance. Writing a high-quality message requires skill and effort. To support developers and reduce their effort on this task, several approaches have been proposed to automatically generate commit messages. Despite the promising performance reported, we have identified three significant and prevalent threats in these automated approaches: 1) the datasets used to train and evaluate these approaches contain a considerable amount of ‘noise’; 2) current approaches only consider commits of a limited diff size; and 3) current approaches can only generate the subject of a commit message, not the message body. The first limitation may let the models ‘learn’ inappropriate messages in the training stage, and also lead to inflated performance results in their evaluation. The other two threats can considerably weaken the practical usability of these approaches. Further, with the rapid emergence of large language models (LLMs) that show superior performance in many software engineering tasks, it is worth asking: can LLMs address the challenge of long diffs and whole message generation? This article first reports the results of an empirical study to assess the impact of these three threats on the performance of the state-of-the-art auto generators of commit messages. We collected commit data of the Top 1,000 most-starred Java projects in GitHub and systematically removed noisy commits with bot-submitted and meaningless messages. We then compared the performance of four approaches representative of the state-of-the-art before and after the removal of noisy messages, or with different lengths of commit diffs. We also conducted a qualitative survey with developers to investigate their perspectives on simply generating message subjects. Finally, we evaluate the performance of two representative LLMs, namely UniXcoder and ChatGPT, in generating more practical commit messages. The results demonstrate that generating commit messages is of great practical value, considerable work is needed to mature the current state-of-the-art, and LLMs can be an avenue worth trying to address the current limitations. Our analyses provide insights for future work to achieve better performance in practice.</p>&nbsp;Tags: "Prog Comprehension/Reeng/Maint" &nbsp;<br> &nbsp;<br> <h5> Zimin Chen, Sen Fang, Martin Monperrus, "SUPERSONIC: Learning to Generate Source Code Optimizations in C/C++" </h5> <p><b>Abstract:</b> Software optimization refines programs for resource efficiency while preserving functionality. Traditionally, it is a process done by developers and compilers. This paper introduces a third option, automated optimization at the source code level. We present Supersonic , a neural approach targeting minor source code modifications for optimization. Using a seq2seq model, Supersonic is trained on C/C++ program pairs ( xt , xt+1 ), where xt+1 is an optimized version of xt , and outputs a diff. Supersonic 's performance is benchmarked against OpenAI's GPT-3.5-Turbo and GPT-4 on competitive programming tasks. The experiments show that Supersonic not only outperforms both models on the code optimization task but also minimizes the extent of the change with a model more than 600x smaller than GPT-3.5-Turbo and 3700x smaller than GPT-4.</p>&nbsp;Tags: "Analysis" &nbsp;<br> &nbsp;<br> <h5> Fernando Uyaguari, Silvia T. Acuña, John W. Castro, Davide Fucci, Oscar Dieste, Sira Vegas, "Relevant information in TDD experiment reporting" </h5> <p><b>Abstract:</b> Experiments are a commonly used method of research in software engineering (SE). Researchers report their experiments following detailed guidelines. However, researchers do not, in the field of test-driven development (TDD) at least, specify how they operationalized the response variables and, particularly, the measurement process. This article has three aims: (i) identify the response variable operationalization components in TDD experiments that study external quality; (ii) study their influence on the experimental results; (iii) determine if the experiment reports describe the measurement process components that have an impact on the results. We used two-part sequential mixed methods research. The first part of the research adopts a quantitative approach applying a statistical analysis of the impact of the operationalization components on the experimental results. The second part follows on with a qualitative approach applying a systematic mapping study (SMS). The test suites, intervention types and measurers have an influence on the measurements and results of the statistical analysis of TDD experiments in SE. The test suites have a major impact on both the measurements and the results of the experiments. The intervention type has less impact on the results than on the measurements. While the measurers have an impact on the measurements, this is not transferred to the experimental results. On the other hand, the results of our SMS confirm that TDD experiments do not usually report either the test suites, the test case generation method, or the details of how external quality was measured. A measurement protocol should be used to assure that the measurements made by different measurers are similar. It is necessary to report the test cases, the experimental task and the intervention type in order to be able to reproduce the measurements and statistical analyses, as well as to replicate experiments and build dependable families of experiments.</p>&nbsp;Tags: "Process" &nbsp;<br> &nbsp;<br> </div></div></div><div class="col-sm-4 sidebar"><div class="panel panel-primary"><div class="panel-heading clearfix"><div class="panel-title">Important Dates<span class="pull-right"><small><span class="glyphicon glyphicon-globe"></span><span class="glyphicon glyphicon-time"></span> AoE (UTC-12h)</small></span></div></div><table class="table table-hover important-dates-in-sidebar"><tr href="https://conf.researchr.org/track/icse-2025/icse-2025-journal-first-papers" class="clickable-row past"><td>Tue 10 Dec 2024<br/>Acceptance Notification</td></tr></table></div><div class="panel panel-primary"><div class="panel-heading clearfix"><div class="panel-title"><a href="https://conf.researchr.org/committee/icse-2025/icse-2025-journal-first-papers-journal-first-papers" class="navigate">Journal-first Papers</a></div></div><ul class="list-group"><li class="list-group-item"><a href="https://conf.researchr.org/profile/icse-2025/arnonsturm" class="navigate"><div class="media"><span class="pull-left"><img alt="Arnon Sturm" src="https://conf.researchr.org/getProfileImage/arnonsturm/446ab833-e8d6-467b-b6a8-d5abcb0d003b/micro-avatar.jpg?1711472694000" class="outputimage media-object thumbnail no-bottom-margin"/></span><div class="media-body"><h5 class="media-heading">Arnon Sturm<span class="pull-right"><small>Journal First Co-Chair</small></span></h5><h5 class="media-heading"><span class="text-black">Ben-Gurion University of the Negev</span></h5><h5 class="media-heading"><small>Israel</small></h5></div></div></a></li><li class="list-group-item"><a href="https://conf.researchr.org/profile/icse-2025/haipengcai" class="navigate"><div class="media"><span class="pull-left"><img alt="Haipeng Cai" src="https://conf.researchr.org/getProfileImage/haipengcai/d102c27a-1806-4b65-8bb9-a6c1f60b1f99/micro-avatar.jpg?1725566912000" class="outputimage media-object thumbnail no-bottom-margin"/></span><div class="media-body"><h5 class="media-heading">Haipeng Cai<span class="pull-right"><small>Journal First Co-Chair</small></span></h5><h5 class="media-heading"><span class="text-black">University at Buffalo, SUNY</span></h5><h5 class="media-heading"><small>United States</small></h5></div></div></a></li></ul></div></div></div><div id="messages-placeholder" class="alert alert-warning" style="display:none;"><a data-dismiss="alert" class="close">x</a><em>Tue 25 Feb 20:13</em></div></div><div id="notifications-ph" class="webdsl-placeholder"></div><div id="event-modal-loader" class="webdsl-placeholder"><form name="form_131600131703c411e65b13378d08eb1f6672b5a0259" id="form_131600131703c411e65b13378d08eb1f6672b5a0259" action="https://conf.researchr.org/eventDetailsModalByAjaxConferenceEdition" accept-charset="UTF-8" method="POST" class="hidden"><input type="hidden" name="form_131600131703c411e65b13378d08eb1f6672b5a0259" value="1" /><input type="hidden" name="context" value="icse-2025" /><input name="ae03f7f6f951d515a297b161e922205d" type="text" value="" class="inputString form-control event-id-input"/><button style="position: absolute; left: -9999px; width: 1px; height: 1px;" onclick='javascript:serverInvoke("https://conf.researchr.org/eventDetailsModalByAjaxConferenceEdition","eventDetailsModalByAjaxConferenceEdition_ia0_3c411e65b13378d08eb1f6672b5a0259", [{"name":"context", "value":"icse-2025"},],"form_131600131703c411e65b13378d08eb1f6672b5a0259", this.nextSibling, false,"event-modal-loader"); return false;'></button><a submitid="eventDetailsModalByAjaxConferenceEdition_ia0_3c411e65b13378d08eb1f6672b5a0259" href="javascript:void(0)" onclick="javascript:loadImageElem=this;$(this.previousSibling).click()" id="load-modal-action"></a></form></div><div id="event-star-form" class="webdsl-placeholder"><form name="form_509860938088b48fd14544d4239b498a2cf339e02b" id="form_509860938088b48fd14544d4239b498a2cf339e02b" action="https://conf.researchr.org/eventStarByAjaxConferenceEdition" accept-charset="UTF-8" method="POST" class="hidden"><input type="hidden" name="form_509860938088b48fd14544d4239b498a2cf339e02b" value="1" /><input type="hidden" name="context" value="icse-2025" /><input name="a0b55aa29cf9431a9461b359872014e3" type="text" value="" class="inputString form-control event-id-input"/><button style="position: absolute; left: -9999px; width: 1px; height: 1px;" onclick='javascript:serverInvoke("https://conf.researchr.org/eventStarByAjaxConferenceEdition","eventStarByAjaxConferenceEdition_ia0_88b48fd14544d4239b498a2cf339e02b", [{"name":"context", "value":"icse-2025"},],"form_509860938088b48fd14544d4239b498a2cf339e02b", this.nextSibling, false,"event-star-form"); return false;'></button><a submitid="eventStarByAjaxConferenceEdition_ia0_88b48fd14544d4239b498a2cf339e02b" href="javascript:void(0)" onclick="javascript:loadImageElem=this;$(this.previousSibling).click()" id="star-event-action"></a></form></div><div id="event-modals" class="webdsl-placeholder"></div><script type="text/javascript">(function(){ var post_process_function = function(n){ var node=(n&&n.nodeType === 1)?n:document; addEventModalLoadOnClick(node); addStarredEventOnClick(node); }; var original_post_process_func = ajax_post_process; ajax_post_process = function(){ original_post_process_func.apply(this,arguments); post_process_function.apply(this,arguments); }; $(document).ready( post_process_function ); })(); </script><footer class="footer"><div class="container"><div class="footer-box"><div class="row"><div class="col-sm-3"><h3><a href="https://conf.researchr.org/home/icse-2025" class="navigate"><span class="glyphicon glyphicon-home"></span> ICSE 2025</a></h3><div><a href="https://conf.researchr.org/contact/icse-2025" class="navigate"><span class="glyphicon glyphicon-envelope"></span> contact form</a></div><hr/>using <a href="https://conf.researchr.org" class="navigate">conf.researchr.org</a> (<a href="http://yellowgrass.org/roadmap/conf.researchr.org" class="navigate">v1.69.0</a>)<br/><small><a href="https://conf.researchr.org/support/icse-2025" target="_blank" class="navigate"><span class="glyphicon glyphicon-question-sign"></span> Support page</a></small><br/><small></small></div><div class="col-sm-5"><div class="row"><div class="col-sm-6"><h4>Tracks</h4><a href="https://conf.researchr.org/track/icse-2025/icse-2025-main-icse-plenaries" class="navigate">Main Plenaries</a><br/><a href="https://conf.researchr.org/track/icse-2025/icse-2025-research-track" class="navigate">Research Track</a><br/><a href="https://conf.researchr.org/track/icse-2025/icse-2025-software-engineering-in-practice" class="navigate">SE In Practice (SEIP)</a><br/><a href="https://conf.researchr.org/track/icse-2025/icse-2025-software-engineering-in-society" class="navigate">SE in Society (SEIS)</a><br/><a href="https://conf.researchr.org/track/icse-2025/icse-2025-nier" class="navigate">New Ideas and Emerging Results (NIER)</a><br/><a href="https://conf.researchr.org/track/icse-2025/icse-2025-journal-first-papers" class="navigate">Journal-first Papers</a><br/><a href="https://conf.researchr.org/track/icse-2025/icse-2025-demonstrations" class="navigate">Demonstrations</a><br/><a href="https://conf.researchr.org/track/icse-2025/icse-2025-artifact-evaluation" class="navigate">Artifact Evaluation</a><br/><a href="https://conf.researchr.org/track/icse-2025/icse-2025-industry-challenge-track" class="navigate">Industry Challenge Track</a><br/><a href="https://conf.researchr.org/track/icse-2025/icse-2025-software-engineering-education" class="navigate">Software Engineering Education</a><br/><a href="https://conf.researchr.org/track/icse-2025/icse-2025-posters" class="navigate">Posters</a><br/><a href="https://conf.researchr.org/track/icse-2025/icse-2025-doctoral-symposium" class="navigate">Doctoral Symposium</a><br/><a href="https://conf.researchr.org/track/icse-2025/icse-2025-shadow-research-track-program-committee" class="navigate">Shadow PC</a><br/><a href="https://conf.researchr.org/track/icse-2025/icse-2025-workshops" class="navigate">Workshops</a><br/><a href="https://conf.researchr.org/track/icse-2025/icse-2025-tutorials" class="navigate">Tutorials and Technical Briefings</a><br/><a href="https://conf.researchr.org/track/icse-2025/icse-2025-new-faculty-symposium" class="navigate">New Faculty Symposium</a><br/><a href="https://conf.researchr.org/track/icse-2025/icse-2025-symposium-on-software-engineering-in-the-global-south" class="navigate">Symposium on Software Engineering in the Global South (SEiGS)</a><br/><a href="https://conf.researchr.org/track/icse-2025/icse-2025-SRC" class="navigate">SRC - ACM Student Research Competition</a><br/><a href="https://conf.researchr.org/track/icse-2025/icse-2025-social-and-networking" class="navigate">Social And Networking</a><br/><a href="https://conf.researchr.org/track/icse-2025/icse-2025-meetings-and-bofs" class="navigate">Meetings and BOFs</a><br/><a href="https://conf.researchr.org/track/icse-2025/icse-2025-smew---student-mentoring-workshop" class="navigate">Student Mentoring Workshop (SMeW)</a><br/><a href="https://conf.researchr.org/track/icse-2025/icse-2025-student-volunteers" class="navigate">Student Volunteers</a></div><div class="col-sm-6"><h4>Co-hosted Conferences</h4><a href="https://conf.researchr.org/home/ast-2025" class="navigate">AST 2025</a><br/><a href="https://conf.researchr.org/home/cain-2025" class="navigate">CAIN 2025</a><br/><a href="https://conf.researchr.org/home/chase-2025" class="navigate">CHASE 2025</a><br/><a href="https://conf.researchr.org/home/icse-2025/cseet-2025" class="navigate">CSEE&amp;T 2025</a><br/><a href="https://conf.researchr.org/home/forge-2025" class="navigate">FORGE 2025</a><br/><a href="https://2025.formalise.org" class="navigate">FormaliSE 2025</a><br/><a href="https://conf.researchr.org/home/icpc-2025" class="navigate">ICPC 2025</a><br/><a href="https://conf.researchr.org/home/icsr-2025" class="navigate">ICSR 2025</a><br/><a href="https://conf.researchr.org/home/mobilesoft-2025" class="navigate">MOBILESoft 2025</a><br/><a href="https://2025.msrconf.org" class="navigate">MSR 2025</a><br/><a href="https://conf.researchr.org/home/seams-2025" class="navigate">SEAMS 2025</a><br/><a href="https://conf.researchr.org/home/TechDebt-2025" class="navigate">TechDebt 2025</a><br/><h4>Workshops</h4><a href="https://conf.researchr.org/home/icse-2025/aiops-2025" class="navigate">AIOps 2025</a><br/><a href="https://conf.researchr.org/home/icse-2025/apr-2025" class="navigate">APR 2025</a><br/><a href="https://conf.researchr.org/home/icse-2025/botse-2025" class="navigate">BotSE 2025</a><br/><a href="https://conf.researchr.org/home/icse-2025/deeptest-2025" class="navigate">DeepTest 2025</a><br/><a href="https://conf.researchr.org/home/icse-2025/designing-2025" class="navigate">Designing 2025</a><br/><a href="https://conf.researchr.org/home/icse-2025/encycris-2025" class="navigate">EnCyCriS 2025</a><br/><a href="https://conf.researchr.org/home/icse-2025/ftw-2025" class="navigate">FTW 2025</a><br/><a href="https://conf.researchr.org/home/icse-2025/finanse-2025" class="navigate">FinanSE 2025</a><br/><a href="https://conf.researchr.org/home/icse-2025/gas-2025" class="navigate">GAS 2025</a><br/><a href="https://conf.researchr.org/home/icse-2025/ge-icse-2025" class="navigate">GE@ICSE 2025</a><br/><a href="https://conf.researchr.org/home/icse-2025/gi-2025" class="navigate">GI 2025</a><br/><a href="https://conf.researchr.org/home/icse-2025/greens-2025" class="navigate">GREENS 2025</a><br/><a href="https://conf.researchr.org/home/icse-2025/gamify-2025" class="navigate">Gamify 2025</a><br/><a href="https://conf.researchr.org/home/icse-2025/ide-2025" class="navigate">IDE 2025</a><br/><a href="https://conf.researchr.org/home/icse-2025/iwsib-2025" class="navigate">IWSiB 2025</a><br/><a href="https://conf.researchr.org/home/icse-2025/llm4code-2025" class="navigate">LLM4Code 2025</a><br/><a href="https://conf.researchr.org/home/icse-2025/mo2re-2025" class="navigate">MO2RE 2025</a><br/><a href="https://conf.researchr.org/home/icse-2025/nlbse-2025" class="navigate">NLBSE 2025</a><br/><a href="https://conf.researchr.org/home/icse-2025/nse-2025" class="navigate">NSE 2025</a><br/><a href="https://conf.researchr.org/home/icse-2025/q-se-2025" class="navigate">Q-SE 2025</a><br/><a href="https://conf.researchr.org/home/icse-2025/raie-2025" class="navigate">RAIE 2025</a><br/><a href="https://conf.researchr.org/home/icse-2025/raise-2025" class="navigate">RAISE 2025</a><br/><a href="https://conf.researchr.org/home/icse-2025/rose-2025" class="navigate">RoSE 2025</a><br/><a href="https://conf.researchr.org/home/icse-2025/satrends-2025" class="navigate">SATrends 2025</a><br/><a href="https://conf.researchr.org/home/icse-2025/sbft-2025" class="navigate">SBFT 2025</a><br/><a href="https://conf.researchr.org/home/icse-2025/se4ads-2025" class="navigate">SE4ADS 2025</a><br/><a href="https://conf.researchr.org/home/icse-2025/serp4iot-2025" class="navigate">SERP4IoT 2025</a><br/><a href="https://conf.researchr.org/home/icse-2025/sesos-2025" class="navigate">SESoS</a><br/><a href="https://conf.researchr.org/home/icse-2025/static-2025" class="navigate">STATIC 2025</a><br/><a href="https://conf.researchr.org/home/icse-2025/svm-2025" class="navigate">SVM 2025</a><br/><a href="https://conf.researchr.org/home/icse-2025/wetseb-2025" class="navigate">WETSEB 2025</a><br/><a href="https://conf.researchr.org/home/icse-2025/wsese-2025" class="navigate">WSESE 2025</a></div></div></div><div class="col-sm-2"><h4>Attending</h4><a href="https://conf.researchr.org/venue/icse-2025/icse-2025-venue" class="navigate">Venue: Rogers Centre, Ottawa (formerly Shaw Centre)</a><br><a href="https://conf.researchr.org/attending/icse-2025/registration" class="navigate">Registration</a><br><a href="https://conf.researchr.org/attending/icse-2025/Hotels" class="navigate">Hotels</a><br><a href="https://conf.researchr.org/attending/icse-2025/visa-and-travel-authorization" class="navigate">Visa and Travel Authorization Information for ICSE 2025 and its Co-Located Events</a><br><a href="https://conf.researchr.org/attending/icse-2025/Travelling" class="navigate">Travelling to Ottawa</a><br><a href="https://conf.researchr.org/attending/icse-2025/travel-support" class="navigate">Travel Support</a><br><a href="https://conf.researchr.org/attending/icse-2025/gettingAroundOttawa" class="navigate">Getting Around Ottawa</a><br><a href="https://conf.researchr.org/attending/icse-2025/food" class="navigate">Food at ICSE2025</a><br><a href="https://conf.researchr.org/attending/icse-2025/social-media" class="navigate">Social media</a><br><a href="https://conf.researchr.org/attending/icse-2025/fun+activities" class="navigate">Fun Activities</a><br><a href="https://conf.researchr.org/attending/icse-2025/Code+of+Conduct" class="navigate">Code of Conduct for ICSE 2025 and its Co-Located Events</a><br><a href="https://conf.researchr.org/attending/icse-2025/Diversity+and+Inclusion+Plan" class="navigate">Equity, Diversity, and Inclusion Plan for ICSE 2025</a><br><a href="https://conf.researchr.org/attending/icse-2025/Sustainability" class="navigate">Sustainability</a></div><div class="col-sm-2"><script> // Skrink the carrousel const carDivs = document.getElementsByClassName("carousel"); if(carDivs.length > 0) carDivs[0].setAttribute("style", "transform:scale(0.5); height:210px;"); // Insert the steering committee link above the other committee links in the org. navbar menu const steeringCommitteeURL = 'http://www.icse-conferences.org/committee.html'; const menuItemHTML = '<li><a href="' + steeringCommitteeURL + '" class="navigate" target="_blank">ICSE Steering Committee</a></li>'; $('#navigationbar').find('a[href*="/committee/"]').first().closest('.dropdown-menu ul').prepend( menuItemHTML ); // Insert 47th International Conference on Software Engineering const fullName = "<span class=\"fitifsmall\">"+ "<b>ICSE 2025<br/><span class=\"cutifsmall\">47th International Conference<br/>on Software Engineering</span></b></span>"+ "<span class=\"datewithlogo\"><br/><span class=\"cutifsmall\">Sun</span> 27 April - <span class=\"cutifsmall\">Sat</span> 3 May 2025<br/>Ottawa<span class=\"cutifsmall\">, Ontario, Canada</span></span>"; var brandElements = document.getElementsByClassName("brand-text"); for (var i = 0; i < brandElements.length; i++) { brandElements[i].innerHTML = fullName; } const logobarDiv = document.getElementsByClassName('logobar')[0]; logobarDiv.addEventListener('click', () => { window.location.assign("https://conf.researchr.org/home/icse-2025"); }); // Insert sponsors in Navbar var sponsorLogos=document.createElement("span"); logobarDiv.insertBefore(sponsorLogos,logobarDiv.firstChild); sponsorLogos.style="float: right;vertical-align: top; position:absolute; right: 0px; top: 5px"; sponsorLogos.innerHTML="<a href=\"https://tc.computer.org/tcse/\"><img height=\"24px\" style=\"vertical-align:top\" "+ "src=\"https://conf.researchr.org/getLogo/5d74ba38-698f-46af-9929-3dc0700e6142?1698376867000\" alt=\"TCSE logo\" /></a>"+ "&nbsp;<br/>&nbsp;"+ "<a href=\"https://www2.sigsoft.org/\"><img height=\"24px\" style=\"vertical-align:top\" "+ "src=\"https://conf.researchr.org/getLogo/673e6c15-1ebb-413d-80e6-00c17a7a120a?1698376919000\" alt=\"Sigsoft logo\" /></a>"; sponsorLogos.innerHTML += "<br/>" + "<a href='https://conf.researchr.org/attending/icse-2025/Sustainability'><img src='https://conf.researchr.org/getImage/icse-2025/orig/IEEE-sustainability-badge-R1-125.png' alt='Sustainability badge' " + "style='display: block; margin: 0 auto; height: 44px;' /></a>"; // Fix colcated conference menu labels var tomaybefix = Array.from(document.getElementsByClassName("dropdown-toggle")); var tomaybealsofix = Array.from(document.getElementsByClassName("navigate")); var tofix = tomaybefix.concat(tomaybealsofix); for(var ii = 0; ii < tofix.length; ii++) { var subs = tofix[ii].childNodes; len2 = subs.length; for (var jj = 0; jj < len2; jj++) { // console.log("Found tag "+subs[jj].tagName); if(subs[jj].tagName=="STRONG") { // console.log("Fixing it "+subs[jj]); if(subs[jj].innerText == "CHASE") { subs[jj].insertAdjacentHTML('afterend'," Cooperative and Human Aspects of SE");} else if(subs[jj].innerText == "MSR") { subs[jj].insertAdjacentHTML('afterend'," Mining Software Repositories");} else if(subs[jj].innerText == "SEAMS") { subs[jj].insertAdjacentHTML('afterend'," Adaptive And Self-Managing Systems");} else if(subs[jj].innerText == "ICPC") { subs[jj].insertAdjacentHTML('afterend'," Program Comprehension");} else if(subs[jj].innerText == "FORGE") { subs[jj].insertAdjacentHTML('afterend'," AI Foundation Models and SE");} else if(subs[jj].innerText == "AST") { subs[jj].insertAdjacentHTML('afterend'," Automation of Software Test");} else if(subs[jj].innerText == "ICSR") { subs[jj].insertAdjacentHTML('afterend'," Software Reuse");} else if(subs[jj].innerText == "MOBILESoft") { subs[jj].insertAdjacentHTML('afterend'," Mobile SE and Systems");} else if(subs[jj].innerText == "TechDebt") { subs[jj].insertAdjacentHTML('afterend'," Technical Debt");} else if(subs[jj].innerText == "CAIN") { subs[jj].insertAdjacentHTML('afterend'," SE for Artificial Intelligence");} else if(subs[jj].innerText == "FormalISE") { subs[jj].insertAdjacentHTML('afterend'," Formal Methods");} //else if(subs[jj].innerText == "CSEE&T") { // subs[jj].insertAdjacentHTML('afterend'," Software Engineering Education");} else if(subs[jj].innerText == "IWSiB") { subs[jj].insertAdjacentHTML('afterend'," Software-Intensive Business");} } } } </script> <p><a href="https://www.facebook.com/icseconf" target="socialfb" title="News about ICSE on Facebook (please follow us!)."><img src="https://umple.org/files/FB-logo.jpg" alt="Facebook logo" height=20/>&nbsp;ICSE on Facebook </a>&nbsp; <br/><br/> <a href="https://x.com/ICSEconf" target="socialx" title="News about ICSE on X (formerly Twitter –please follow us!)."><img src="https://cdn.cms-twdigitalassets.com/content/dam/about-twitter/x/brand-toolkit/logo-black.png.twimg.2560.png" alt="X logo" height=20/>&nbsp;ICSE on X</a> <br/><br/> <img src='https://conf.researchr.org/getImage/icse-2025/orig/IEEE-sustainability-badge-R1-125.png' alt='Sustainability badge' style='display: block; margin: 4 auto; height: 50px;' /> </div></div></div></div></footer></div><script type="text/javascript">(function(){ var post_process_function = function(n){ var node=(n&&n.nodeType === 1)?n:document; let defaultplacement = $(document).scrollLeft() > 100 ? 'auto left' : 'auto top'; $(node).find('[title]').each( function(i,el){ var $e=$(el);$e.tooltip({placement: function(tt, elem){ var attval = elem.getAttribute('data-placement'); return attval ? attval : defaultplacement; }, sanitize: false, container: 'body' }) } ).on('show.bs.tooltip', function () { let el = this; while (el && window.getComputedStyle(el).position !== 'fixed') { el = el.parentElement; } if(el) $(this).data('bs.tooltip').options.container = el;});$('.tooltip.fade.in, .ui-tooltip-content').remove(); }; var original_post_process_func = ajax_post_process; ajax_post_process = function(){ original_post_process_func.apply(this,arguments); post_process_function.apply(this,arguments); }; $(document).ready( post_process_function ); })(); </script><script type="text/javascript">jQuery(document).ready(function($) { $(".clickable-row").click(function() { var href = $(this).attr("href"); if( window.location.href.indexOf( href ) < 0 ){ if ($(this).hasClass('new-window') ){ window.open( href ); } else { window.document.location = href; } } }); }); </script></body></html>

Pages: 1 2 3 4 5 6 7 8 9 10