CINXE.COM
Search | arXiv e-print repository
<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8"/> <meta name="viewport" content="width=device-width, initial-scale=1"/> <!-- new favicon config and versions by realfavicongenerator.net --> <link rel="apple-touch-icon" sizes="180x180" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/apple-touch-icon.png"> <link rel="icon" type="image/png" sizes="32x32" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-32x32.png"> <link rel="icon" type="image/png" sizes="16x16" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-16x16.png"> <link rel="manifest" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/site.webmanifest"> <link rel="mask-icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/safari-pinned-tab.svg" color="#b31b1b"> <link rel="shortcut icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon.ico"> <meta name="msapplication-TileColor" content="#b31b1b"> <meta name="msapplication-config" content="images/icons/browserconfig.xml"> <meta name="theme-color" content="#b31b1b"> <!-- end favicon config --> <title>Search | arXiv e-print repository</title> <script defer src="https://static.arxiv.org/static/base/1.0.0a5/fontawesome-free-5.11.2-web/js/all.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/base/1.0.0a5/css/arxivstyle.css" /> <script type="text/x-mathjax-config"> MathJax.Hub.Config({ messageStyle: "none", extensions: ["tex2jax.js"], jax: ["input/TeX", "output/HTML-CSS"], tex2jax: { inlineMath: [ ['$','$'], ["\\(","\\)"] ], displayMath: [ ['$$','$$'], ["\\[","\\]"] ], processEscapes: true, ignoreClass: '.*', processClass: 'mathjax.*' }, TeX: { extensions: ["AMSmath.js", "AMSsymbols.js", "noErrors.js"], noErrors: { inlineDelimiters: ["$","$"], multiLine: false, style: { "font-size": "normal", "border": "" } } }, "HTML-CSS": { availableFonts: ["TeX"] } }); </script> <script src='//static.arxiv.org/MathJax-2.7.3/MathJax.js'></script> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/notification.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/bulma-tooltip.min.css" /> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/search.css" /> <script src="https://code.jquery.com/jquery-3.2.1.slim.min.js" integrity="sha256-k2WSCIexGzOj3Euiig+TlR8gA0EmPjuc79OEeY5L45g=" crossorigin="anonymous"></script> <script src="https://static.arxiv.org/static/search/0.5.6/js/fieldset.js"></script> <style> radio#cf-customfield_11400 { display: none; } </style> </head> <body> <header><a href="#main-container" class="is-sr-only">Skip to main content</a> <!-- contains Cornell logo and sponsor statement --> <div class="attribution level is-marginless" role="banner"> <div class="level-left"> <a class="level-item" href="https://cornell.edu/"><img src="https://static.arxiv.org/static/base/1.0.0a5/images/cornell-reduced-white-SMALL.svg" alt="Cornell University" width="200" aria-label="logo" /></a> </div> <div class="level-right is-marginless"><p class="sponsors level-item is-marginless"><span id="support-ack-url">We gratefully acknowledge support from<br /> the Simons Foundation, <a href="https://info.arxiv.org/about/ourmembers.html">member institutions</a>, and all contributors. <a href="https://info.arxiv.org/about/donate.html">Donate</a></span></p></div> </div> <!-- contains arXiv identity and search bar --> <div class="identity level is-marginless"> <div class="level-left"> <div class="level-item"> <a class="arxiv" href="https://arxiv.org/" aria-label="arxiv-logo"> <img src="https://static.arxiv.org/static/base/1.0.0a5/images/arxiv-logo-one-color-white.svg" aria-label="logo" alt="arxiv logo" width="85" style="width:85px;"/> </a> </div> </div> <div class="search-block level-right"> <form class="level-item mini-search" method="GET" action="https://arxiv.org/search"> <div class="field has-addons"> <div class="control"> <input class="input is-small" type="text" name="query" placeholder="Search..." aria-label="Search term or terms" /> <p class="help"><a href="https://info.arxiv.org/help">Help</a> | <a href="https://arxiv.org/search/advanced">Advanced Search</a></p> </div> <div class="control"> <div class="select is-small"> <select name="searchtype" aria-label="Field to search"> <option value="all" selected="selected">All fields</option> <option value="title">Title</option> <option value="author">Author</option> <option value="abstract">Abstract</option> <option value="comments">Comments</option> <option value="journal_ref">Journal reference</option> <option value="acm_class">ACM classification</option> <option value="msc_class">MSC classification</option> <option value="report_num">Report number</option> <option value="paper_id">arXiv identifier</option> <option value="doi">DOI</option> <option value="orcid">ORCID</option> <option value="author_id">arXiv author ID</option> <option value="help">Help pages</option> <option value="full_text">Full text</option> </select> </div> </div> <input type="hidden" name="source" value="header"> <button class="button is-small is-cul-darker">Search</button> </div> </form> </div> </div> <!-- closes identity --> <div class="container"> <div class="user-tools is-size-7 has-text-right has-text-weight-bold" role="navigation" aria-label="User menu"> <a href="https://arxiv.org/login">Login</a> </div> </div> </header> <main class="container" id="main-container"> <div class="level is-marginless"> <div class="level-left"> <h1 class="title is-clearfix"> Showing 1–11 of 11 results for author: <span class="mathjax">Kim, S G</span> </h1> </div> <div class="level-right is-hidden-mobile"> <!-- feedback for mobile is moved to footer --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a> </span> </div> </div> <div class="content"> <form method="GET" action="/search/cs" aria-role="search"> Searching in archive <strong>cs</strong>. <a href="/search/?searchtype=author&query=Kim%2C+S+G">Search in all archives.</a> <div class="field has-addons-tablet"> <div class="control is-expanded"> <label for="query" class="hidden-label">Search term or terms</label> <input class="input is-medium" id="query" name="query" placeholder="Search term..." type="text" value="Kim, S G"> </div> <div class="select control is-medium"> <label class="is-hidden" for="searchtype">Field</label> <select class="is-medium" id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> </div> <div class="control"> <button class="button is-link is-medium">Search</button> </div> </div> <div class="field"> <div class="control is-size-7"> <label class="radio"> <input checked id="abstracts-0" name="abstracts" type="radio" value="show"> Show abstracts </label> <label class="radio"> <input id="abstracts-1" name="abstracts" type="radio" value="hide"> Hide abstracts </label> </div> </div> <div class="is-clearfix" style="height: 2.5em"> <div class="is-pulled-right"> <a href="/search/advanced?terms-0-term=Kim%2C+S+G&terms-0-field=author&size=50&order=-announced_date_first">Advanced Search</a> </div> </div> <input type="hidden" name="order" value="-announced_date_first"> <input type="hidden" name="size" value="50"> </form> <div class="level breathe-horizontal"> <div class="level-left"> <form method="GET" action="/search/"> <div style="display: none;"> <select id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> <input id="query" name="query" type="text" value="Kim, S G"> <ul id="abstracts"><li><input checked id="abstracts-0" name="abstracts" type="radio" value="show"> <label for="abstracts-0">Show abstracts</label></li><li><input id="abstracts-1" name="abstracts" type="radio" value="hide"> <label for="abstracts-1">Hide abstracts</label></li></ul> </div> <div class="box field is-grouped is-grouped-multiline level-item"> <div class="control"> <span class="select is-small"> <select id="size" name="size"><option value="25">25</option><option selected value="50">50</option><option value="100">100</option><option value="200">200</option></select> </span> <label for="size">results per page</label>. </div> <div class="control"> <label for="order">Sort results by</label> <span class="select is-small"> <select id="order" name="order"><option selected value="-announced_date_first">Announcement date (newest first)</option><option value="announced_date_first">Announcement date (oldest first)</option><option value="-submitted_date">Submission date (newest first)</option><option value="submitted_date">Submission date (oldest first)</option><option value="">Relevance</option></select> </span> </div> <div class="control"> <button class="button is-small is-link">Go</button> </div> </div> </form> </div> </div> <ol class="breathe-horizontal" start="1"> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.18049">arXiv:2411.18049</a> <span> [<a href="https://arxiv.org/pdf/2411.18049">pdf</a>, <a href="https://arxiv.org/format/2411.18049">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> </div> </div> <p class="title is-5 mathjax"> Understanding the Impact of Spatial Immersion in Web Data Stories </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Kim%2C+S+G">Seon Gyeom Kim</a>, <a href="/search/cs?searchtype=author&query=Park%2C+J">Juhyeong Park</a>, <a href="/search/cs?searchtype=author&query=Song%2C+Y">Yutaek Song</a>, <a href="/search/cs?searchtype=author&query=Lee%2C+D">Donggun Lee</a>, <a href="/search/cs?searchtype=author&query=Lee%2C+Y">Yubin Lee</a>, <a href="/search/cs?searchtype=author&query=Rossi%2C+R">Ryan Rossi</a>, <a href="/search/cs?searchtype=author&query=Hoffswell%2C+J">Jane Hoffswell</a>, <a href="/search/cs?searchtype=author&query=Koh%2C+E">Eunyee Koh</a>, <a href="/search/cs?searchtype=author&query=Lee%2C+T+Y">Tak Yeon Lee</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.18049v1-abstract-short" style="display: inline;"> An increasing number of web articles engage the reader with the feeling of being immersed in the data space. However, the exact characteristics of spatial immersion in the context of visual storytelling remain vague. For example, what are the common design patterns of data stories with spatial immersion? How do they affect the reader's experience? To gain a deeper understanding of the subject, we… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.18049v1-abstract-full').style.display = 'inline'; document.getElementById('2411.18049v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.18049v1-abstract-full" style="display: none;"> An increasing number of web articles engage the reader with the feeling of being immersed in the data space. However, the exact characteristics of spatial immersion in the context of visual storytelling remain vague. For example, what are the common design patterns of data stories with spatial immersion? How do they affect the reader's experience? To gain a deeper understanding of the subject, we collected 23 distinct data stories with spatial immersion, and identified six design patterns, such as cinematic camera shots and transitions, intuitive data representations, realism, naturally moving elements, direct manipulation of camera or visualization, and dynamic dimension. Subsequently, we designed four data stories and conducted a crowdsourced user study comparing three design variations (static, animated, and immersive). Our results suggest that data stories with the design patterns for spatial immersion are more interesting and persuasive than static or animated ones, but no single condition was deemed more understandable or trustworthy. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.18049v1-abstract-full').style.display = 'none'; document.getElementById('2411.18049v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 26 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.07451">arXiv:2411.07451</a> <span> [<a href="https://arxiv.org/pdf/2411.07451">pdf</a>, <a href="https://arxiv.org/format/2411.07451">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Optimizing Data Delivery: Insights from User Preferences on Visuals, Tables, and Text </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Luera%2C+R">Reuben Luera</a>, <a href="/search/cs?searchtype=author&query=Rossi%2C+R">Ryan Rossi</a>, <a href="/search/cs?searchtype=author&query=Dernoncourt%2C+F">Franck Dernoncourt</a>, <a href="/search/cs?searchtype=author&query=Siu%2C+A">Alexa Siu</a>, <a href="/search/cs?searchtype=author&query=Kim%2C+S">Sungchul Kim</a>, <a href="/search/cs?searchtype=author&query=Yu%2C+T">Tong Yu</a>, <a href="/search/cs?searchtype=author&query=Zhang%2C+R">Ruiyi Zhang</a>, <a href="/search/cs?searchtype=author&query=Chen%2C+X">Xiang Chen</a>, <a href="/search/cs?searchtype=author&query=Lipka%2C+N">Nedim Lipka</a>, <a href="/search/cs?searchtype=author&query=Zhang%2C+Z">Zhehao Zhang</a>, <a href="/search/cs?searchtype=author&query=Kim%2C+S+G">Seon Gyeom Kim</a>, <a href="/search/cs?searchtype=author&query=Lee%2C+T+Y">Tak Yeon Lee</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.07451v1-abstract-short" style="display: inline;"> In this work, we research user preferences to see a chart, table, or text given a question asked by the user. This enables us to understand when it is best to show a chart, table, or text to the user for the specific question. For this, we conduct a user study where users are shown a question and asked what they would prefer to see and used the data to establish that a user's personal traits does… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.07451v1-abstract-full').style.display = 'inline'; document.getElementById('2411.07451v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.07451v1-abstract-full" style="display: none;"> In this work, we research user preferences to see a chart, table, or text given a question asked by the user. This enables us to understand when it is best to show a chart, table, or text to the user for the specific question. For this, we conduct a user study where users are shown a question and asked what they would prefer to see and used the data to establish that a user's personal traits does influence the data outputs that they prefer. Understanding how user characteristics impact a user's preferences is critical to creating data tools with a better user experience. Additionally, we investigate to what degree an LLM can be used to replicate a user's preference with and without user preference data. Overall, these findings have significant implications pertaining to the development of data tools and the replication of human preferences using LLMs. Furthermore, this work demonstrates the potential use of LLMs to replicate user preference data which has major implications for future user modeling and personalization research. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.07451v1-abstract-full').style.display = 'none'; document.getElementById('2411.07451v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 11 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2406.05270">arXiv:2406.05270</a> <span> [<a href="https://arxiv.org/pdf/2406.05270">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Medical Physics">physics.med-ph</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> </div> </div> <p class="title is-5 mathjax"> fastMRI Breast: A publicly available radial k-space dataset of breast dynamic contrast-enhanced MRI </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Solomon%2C+E">Eddy Solomon</a>, <a href="/search/cs?searchtype=author&query=Johnson%2C+P+M">Patricia M. Johnson</a>, <a href="/search/cs?searchtype=author&query=Tan%2C+Z">Zhengguo Tan</a>, <a href="/search/cs?searchtype=author&query=Tibrewala%2C+R">Radhika Tibrewala</a>, <a href="/search/cs?searchtype=author&query=Lui%2C+Y+W">Yvonne W. Lui</a>, <a href="/search/cs?searchtype=author&query=Knoll%2C+F">Florian Knoll</a>, <a href="/search/cs?searchtype=author&query=Moy%2C+L">Linda Moy</a>, <a href="/search/cs?searchtype=author&query=Kim%2C+S+G">Sungheon Gene Kim</a>, <a href="/search/cs?searchtype=author&query=Heacock%2C+L">Laura Heacock</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2406.05270v1-abstract-short" style="display: inline;"> This data curation work introduces the first large-scale dataset of radial k-space and DICOM data for breast DCE-MRI acquired in diagnostic breast MRI exams. Our dataset includes case-level labels indicating patient age, menopause status, lesion status (negative, benign, and malignant), and lesion type for each case. The public availability of this dataset and accompanying reconstruction code will… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2406.05270v1-abstract-full').style.display = 'inline'; document.getElementById('2406.05270v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2406.05270v1-abstract-full" style="display: none;"> This data curation work introduces the first large-scale dataset of radial k-space and DICOM data for breast DCE-MRI acquired in diagnostic breast MRI exams. Our dataset includes case-level labels indicating patient age, menopause status, lesion status (negative, benign, and malignant), and lesion type for each case. The public availability of this dataset and accompanying reconstruction code will support research and development of fast and quantitative breast image reconstruction and machine learning methods. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2406.05270v1-abstract-full').style.display = 'none'; document.getElementById('2406.05270v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 7 June, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2201.06451">arXiv:2201.06451</a> <span> [<a href="https://arxiv.org/pdf/2201.06451">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> </div> </div> <p class="title is-5 mathjax"> Point & Select: Designing an Interaction Technique for Inputting Surrounding Point of Interests in Driving Context </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Pyun%2C+J">Jaehoon Pyun</a>, <a href="/search/cs?searchtype=author&query=Cho%2C+Y">Younggeol Cho</a>, <a href="/search/cs?searchtype=author&query=Kim%2C+S+G">Seon Gyeom Kim</a>, <a href="/search/cs?searchtype=author&query=Lee%2C+W">Woohun Lee</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2201.06451v1-abstract-short" style="display: inline;"> We propose an interaction technique called "Point & Select." It enables a driver to directly enter a point of interest (POI) into the in-vehicle infotainment system while driving in a city. Point & Select enables the driver to directly indicate with a finger, identify, adjust (if required), and finally confirm the POI on the screen by using buttons on the steering wheel. Based on a comparative eva… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2201.06451v1-abstract-full').style.display = 'inline'; document.getElementById('2201.06451v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2201.06451v1-abstract-full" style="display: none;"> We propose an interaction technique called "Point & Select." It enables a driver to directly enter a point of interest (POI) into the in-vehicle infotainment system while driving in a city. Point & Select enables the driver to directly indicate with a finger, identify, adjust (if required), and finally confirm the POI on the screen by using buttons on the steering wheel. Based on a comparative evaluation of two conditions (driving-only and driving with input-task) on a simulator, we demonstrated the feasibility of the interaction in the driving context from the perspective of driver performance and interaction usability at speeds of 30, 50, and 70 km/h. Although the interaction usage and speed partially affected the driver's mental load, all the participants drove at an acceptable level in each condition. They carried out the task successfully with a success rate of 96.9% and task completion time of 1.82 seconds on average. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2201.06451v1-abstract-full').style.display = 'none'; document.getElementById('2201.06451v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 17 January, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">17 pages</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">ACM Class:</span> H.5.2 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2009.09282">arXiv:2009.09282</a> <span> [<a href="https://arxiv.org/pdf/2009.09282">pdf</a>, <a href="https://arxiv.org/format/2009.09282">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Reducing false-positive biopsies with deep neural networks that utilize local and global information in screening mammograms </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Wu%2C+N">Nan Wu</a>, <a href="/search/cs?searchtype=author&query=Huang%2C+Z">Zhe Huang</a>, <a href="/search/cs?searchtype=author&query=Shen%2C+Y">Yiqiu Shen</a>, <a href="/search/cs?searchtype=author&query=Park%2C+J">Jungkyu Park</a>, <a href="/search/cs?searchtype=author&query=Phang%2C+J">Jason Phang</a>, <a href="/search/cs?searchtype=author&query=Makino%2C+T">Taro Makino</a>, <a href="/search/cs?searchtype=author&query=Kim%2C+S+G">S. Gene Kim</a>, <a href="/search/cs?searchtype=author&query=Cho%2C+K">Kyunghyun Cho</a>, <a href="/search/cs?searchtype=author&query=Heacock%2C+L">Laura Heacock</a>, <a href="/search/cs?searchtype=author&query=Moy%2C+L">Linda Moy</a>, <a href="/search/cs?searchtype=author&query=Geras%2C+K+J">Krzysztof J. Geras</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2009.09282v1-abstract-short" style="display: inline;"> Breast cancer is the most common cancer in women, and hundreds of thousands of unnecessary biopsies are done around the world at a tremendous cost. It is crucial to reduce the rate of biopsies that turn out to be benign tissue. In this study, we build deep neural networks (DNNs) to classify biopsied lesions as being either malignant or benign, with the goal of using these networks as second reader… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2009.09282v1-abstract-full').style.display = 'inline'; document.getElementById('2009.09282v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2009.09282v1-abstract-full" style="display: none;"> Breast cancer is the most common cancer in women, and hundreds of thousands of unnecessary biopsies are done around the world at a tremendous cost. It is crucial to reduce the rate of biopsies that turn out to be benign tissue. In this study, we build deep neural networks (DNNs) to classify biopsied lesions as being either malignant or benign, with the goal of using these networks as second readers serving radiologists to further reduce the number of false positive findings. We enhance the performance of DNNs that are trained to learn from small image patches by integrating global context provided in the form of saliency maps learned from the entire image into their reasoning, similar to how radiologists consider global context when evaluating areas of interest. Our experiments are conducted on a dataset of 229,426 screening mammography exams from 141,473 patients. We achieve an AUC of 0.8 on a test set consisting of 464 benign and 136 malignant lesions. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2009.09282v1-abstract-full').style.display = 'none'; document.getElementById('2009.09282v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 19 September, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2020. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2002.07613">arXiv:2002.07613</a> <span> [<a href="https://arxiv.org/pdf/2002.07613">pdf</a>, <a href="https://arxiv.org/format/2002.07613">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> An interpretable classifier for high-resolution breast cancer screening images utilizing weakly supervised localization </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Shen%2C+Y">Yiqiu Shen</a>, <a href="/search/cs?searchtype=author&query=Wu%2C+N">Nan Wu</a>, <a href="/search/cs?searchtype=author&query=Phang%2C+J">Jason Phang</a>, <a href="/search/cs?searchtype=author&query=Park%2C+J">Jungkyu Park</a>, <a href="/search/cs?searchtype=author&query=Liu%2C+K">Kangning Liu</a>, <a href="/search/cs?searchtype=author&query=Tyagi%2C+S">Sudarshini Tyagi</a>, <a href="/search/cs?searchtype=author&query=Heacock%2C+L">Laura Heacock</a>, <a href="/search/cs?searchtype=author&query=Kim%2C+S+G">S. Gene Kim</a>, <a href="/search/cs?searchtype=author&query=Moy%2C+L">Linda Moy</a>, <a href="/search/cs?searchtype=author&query=Cho%2C+K">Kyunghyun Cho</a>, <a href="/search/cs?searchtype=author&query=Geras%2C+K+J">Krzysztof J. Geras</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2002.07613v1-abstract-short" style="display: inline;"> Medical images differ from natural images in significantly higher resolutions and smaller regions of interest. Because of these differences, neural network architectures that work well for natural images might not be applicable to medical image analysis. In this work, we extend the globally-aware multiple instance classifier, a framework we proposed to address these unique properties of medical im… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2002.07613v1-abstract-full').style.display = 'inline'; document.getElementById('2002.07613v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2002.07613v1-abstract-full" style="display: none;"> Medical images differ from natural images in significantly higher resolutions and smaller regions of interest. Because of these differences, neural network architectures that work well for natural images might not be applicable to medical image analysis. In this work, we extend the globally-aware multiple instance classifier, a framework we proposed to address these unique properties of medical images. This model first uses a low-capacity, yet memory-efficient, network on the whole image to identify the most informative regions. It then applies another higher-capacity network to collect details from chosen regions. Finally, it employs a fusion module that aggregates global and local information to make a final prediction. While existing methods often require lesion segmentation during training, our model is trained with only image-level labels and can generate pixel-level saliency maps indicating possible malignant findings. We apply the model to screening mammography interpretation: predicting the presence or absence of benign and malignant lesions. On the NYU Breast Cancer Screening Dataset, consisting of more than one million images, our model achieves an AUC of 0.93 in classifying breasts with malignant findings, outperforming ResNet-34 and Faster R-CNN. Compared to ResNet-34, our model is 4.1x faster for inference while using 78.4% less GPU memory. Furthermore, we demonstrate, in a reader study, that our model surpasses radiologist-level AUC by a margin of 0.11. The proposed model is available online: https://github.com/nyukat/GMIC. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2002.07613v1-abstract-full').style.display = 'none'; document.getElementById('2002.07613v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 13 February, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2020. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1908.00615">arXiv:1908.00615</a> <span> [<a href="https://arxiv.org/pdf/1908.00615">pdf</a>, <a href="https://arxiv.org/format/1908.00615">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> Improving localization-based approaches for breast cancer screening exam classification </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=F%C3%A9vry%2C+T">Thibault F茅vry</a>, <a href="/search/cs?searchtype=author&query=Phang%2C+J">Jason Phang</a>, <a href="/search/cs?searchtype=author&query=Wu%2C+N">Nan Wu</a>, <a href="/search/cs?searchtype=author&query=Kim%2C+S+G">S. Gene Kim</a>, <a href="/search/cs?searchtype=author&query=Moy%2C+L">Linda Moy</a>, <a href="/search/cs?searchtype=author&query=Cho%2C+K">Kyunghyun Cho</a>, <a href="/search/cs?searchtype=author&query=Geras%2C+K+J">Krzysztof J. Geras</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1908.00615v1-abstract-short" style="display: inline;"> We trained and evaluated a localization-based deep CNN for breast cancer screening exam classification on over 200,000 exams (over 1,000,000 images). Our model achieves an AUC of 0.919 in predicting malignancy in patients undergoing breast cancer screening, reducing the error rate of the baseline (Wu et al., 2019a) by 23%. In addition, the models generates bounding boxes for benign and malignant f… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1908.00615v1-abstract-full').style.display = 'inline'; document.getElementById('1908.00615v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1908.00615v1-abstract-full" style="display: none;"> We trained and evaluated a localization-based deep CNN for breast cancer screening exam classification on over 200,000 exams (over 1,000,000 images). Our model achieves an AUC of 0.919 in predicting malignancy in patients undergoing breast cancer screening, reducing the error rate of the baseline (Wu et al., 2019a) by 23%. In addition, the models generates bounding boxes for benign and malignant findings, providing interpretable predictions. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1908.00615v1-abstract-full').style.display = 'none'; document.getElementById('1908.00615v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 1 August, 2019; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2019. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">MIDL 2019 [arXiv:1907.08612]</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Report number:</span> MIDL/2019/ExtendedAbstract/HyxoAR_AK4 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1907.13057">arXiv:1907.13057</a> <span> [<a href="https://arxiv.org/pdf/1907.13057">pdf</a>, <a href="https://arxiv.org/format/1907.13057">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> Screening Mammogram Classification with Prior Exams </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Park%2C+J">Jungkyu Park</a>, <a href="/search/cs?searchtype=author&query=Phang%2C+J">Jason Phang</a>, <a href="/search/cs?searchtype=author&query=Shen%2C+Y">Yiqiu Shen</a>, <a href="/search/cs?searchtype=author&query=Wu%2C+N">Nan Wu</a>, <a href="/search/cs?searchtype=author&query=Kim%2C+S+G">S. Gene Kim</a>, <a href="/search/cs?searchtype=author&query=Moy%2C+L">Linda Moy</a>, <a href="/search/cs?searchtype=author&query=Cho%2C+K">Kyunghyun Cho</a>, <a href="/search/cs?searchtype=author&query=Geras%2C+K+J">Krzysztof J. Geras</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1907.13057v1-abstract-short" style="display: inline;"> Radiologists typically compare a patient's most recent breast cancer screening exam to their previous ones in making informed diagnoses. To reflect this practice, we propose new neural network models that compare pairs of screening mammograms from the same patient. We train and evaluate our proposed models on over 665,000 pairs of images (over 166,000 pairs of exams). Our best model achieves an AU… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1907.13057v1-abstract-full').style.display = 'inline'; document.getElementById('1907.13057v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1907.13057v1-abstract-full" style="display: none;"> Radiologists typically compare a patient's most recent breast cancer screening exam to their previous ones in making informed diagnoses. To reflect this practice, we propose new neural network models that compare pairs of screening mammograms from the same patient. We train and evaluate our proposed models on over 665,000 pairs of images (over 166,000 pairs of exams). Our best model achieves an AUC of 0.866 in predicting malignancy in patients who underwent breast cancer screening, reducing the error rate of the corresponding baseline. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1907.13057v1-abstract-full').style.display = 'none'; document.getElementById('1907.13057v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 30 July, 2019; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2019. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">MIDL 2019 [arXiv:1907.08612]</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Report number:</span> MIDL/2019/ExtendedAbstract/HkgCdUaMq4 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1903.08297">arXiv:1903.08297</a> <span> [<a href="https://arxiv.org/pdf/1903.08297">pdf</a>, <a href="https://arxiv.org/format/1903.08297">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> Deep Neural Networks Improve Radiologists' Performance in Breast Cancer Screening </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Wu%2C+N">Nan Wu</a>, <a href="/search/cs?searchtype=author&query=Phang%2C+J">Jason Phang</a>, <a href="/search/cs?searchtype=author&query=Park%2C+J">Jungkyu Park</a>, <a href="/search/cs?searchtype=author&query=Shen%2C+Y">Yiqiu Shen</a>, <a href="/search/cs?searchtype=author&query=Huang%2C+Z">Zhe Huang</a>, <a href="/search/cs?searchtype=author&query=Zorin%2C+M">Masha Zorin</a>, <a href="/search/cs?searchtype=author&query=Jastrz%C4%99bski%2C+S">Stanis艂aw Jastrz臋bski</a>, <a href="/search/cs?searchtype=author&query=F%C3%A9vry%2C+T">Thibault F茅vry</a>, <a href="/search/cs?searchtype=author&query=Katsnelson%2C+J">Joe Katsnelson</a>, <a href="/search/cs?searchtype=author&query=Kim%2C+E">Eric Kim</a>, <a href="/search/cs?searchtype=author&query=Wolfson%2C+S">Stacey Wolfson</a>, <a href="/search/cs?searchtype=author&query=Parikh%2C+U">Ujas Parikh</a>, <a href="/search/cs?searchtype=author&query=Gaddam%2C+S">Sushma Gaddam</a>, <a href="/search/cs?searchtype=author&query=Lin%2C+L+L+Y">Leng Leng Young Lin</a>, <a href="/search/cs?searchtype=author&query=Ho%2C+K">Kara Ho</a>, <a href="/search/cs?searchtype=author&query=Weinstein%2C+J+D">Joshua D. Weinstein</a>, <a href="/search/cs?searchtype=author&query=Reig%2C+B">Beatriu Reig</a>, <a href="/search/cs?searchtype=author&query=Gao%2C+Y">Yiming Gao</a>, <a href="/search/cs?searchtype=author&query=Toth%2C+H">Hildegard Toth</a>, <a href="/search/cs?searchtype=author&query=Pysarenko%2C+K">Kristine Pysarenko</a>, <a href="/search/cs?searchtype=author&query=Lewin%2C+A">Alana Lewin</a>, <a href="/search/cs?searchtype=author&query=Lee%2C+J">Jiyon Lee</a>, <a href="/search/cs?searchtype=author&query=Airola%2C+K">Krystal Airola</a>, <a href="/search/cs?searchtype=author&query=Mema%2C+E">Eralda Mema</a>, <a href="/search/cs?searchtype=author&query=Chung%2C+S">Stephanie Chung</a> , et al. (7 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1903.08297v1-abstract-short" style="display: inline;"> We present a deep convolutional neural network for breast cancer screening exam classification, trained and evaluated on over 200,000 exams (over 1,000,000 images). Our network achieves an AUC of 0.895 in predicting whether there is a cancer in the breast, when tested on the screening population. We attribute the high accuracy of our model to a two-stage training procedure, which allows us to use… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1903.08297v1-abstract-full').style.display = 'inline'; document.getElementById('1903.08297v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1903.08297v1-abstract-full" style="display: none;"> We present a deep convolutional neural network for breast cancer screening exam classification, trained and evaluated on over 200,000 exams (over 1,000,000 images). Our network achieves an AUC of 0.895 in predicting whether there is a cancer in the breast, when tested on the screening population. We attribute the high accuracy of our model to a two-stage training procedure, which allows us to use a very high-capacity patch-level network to learn from pixel-level labels alongside a network learning from macroscopic breast-level labels. To validate our model, we conducted a reader study with 14 readers, each reading 720 screening mammogram exams, and find our model to be as accurate as experienced radiologists when presented with the same data. Finally, we show that a hybrid model, averaging probability of malignancy predicted by a radiologist with a prediction of our neural network, is more accurate than either of the two separately. To better understand our results, we conduct a thorough analysis of our network's performance on different subpopulations of the screening population, model design, training procedure, errors, and properties of its internal representations. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1903.08297v1-abstract-full').style.display = 'none'; document.getElementById('1903.08297v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 19 March, 2019; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2019. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">MIDL 2019 [arXiv:1907.08612]</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Report number:</span> MIDL/2019/ExtendedAbstract/SkxYez76FE </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1711.03674">arXiv:1711.03674</a> <span> [<a href="https://arxiv.org/pdf/1711.03674">pdf</a>, <a href="https://arxiv.org/format/1711.03674">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> Breast density classification with deep convolutional neural networks </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Wu%2C+N">Nan Wu</a>, <a href="/search/cs?searchtype=author&query=Geras%2C+K+J">Krzysztof J. Geras</a>, <a href="/search/cs?searchtype=author&query=Shen%2C+Y">Yiqiu Shen</a>, <a href="/search/cs?searchtype=author&query=Su%2C+J">Jingyi Su</a>, <a href="/search/cs?searchtype=author&query=Kim%2C+S+G">S. Gene Kim</a>, <a href="/search/cs?searchtype=author&query=Kim%2C+E">Eric Kim</a>, <a href="/search/cs?searchtype=author&query=Wolfson%2C+S">Stacey Wolfson</a>, <a href="/search/cs?searchtype=author&query=Moy%2C+L">Linda Moy</a>, <a href="/search/cs?searchtype=author&query=Cho%2C+K">Kyunghyun Cho</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1711.03674v1-abstract-short" style="display: inline;"> Breast density classification is an essential part of breast cancer screening. Although a lot of prior work considered this problem as a task for learning algorithms, to our knowledge, all of them used small and not clinically realistic data both for training and evaluation of their models. In this work, we explore the limits of this task with a data set coming from over 200,000 breast cancer scre… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1711.03674v1-abstract-full').style.display = 'inline'; document.getElementById('1711.03674v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1711.03674v1-abstract-full" style="display: none;"> Breast density classification is an essential part of breast cancer screening. Although a lot of prior work considered this problem as a task for learning algorithms, to our knowledge, all of them used small and not clinically realistic data both for training and evaluation of their models. In this work, we explore the limits of this task with a data set coming from over 200,000 breast cancer screening exams. We use this data to train and evaluate a strong convolutional neural network classifier. In a reader study, we find that our model can perform this task comparably to a human expert. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1711.03674v1-abstract-full').style.display = 'none'; document.getElementById('1711.03674v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 9 November, 2017; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2017. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1703.07047">arXiv:1703.07047</a> <span> [<a href="https://arxiv.org/pdf/1703.07047">pdf</a>, <a href="https://arxiv.org/format/1703.07047">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> High-Resolution Breast Cancer Screening with Multi-View Deep Convolutional Neural Networks </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Geras%2C+K+J">Krzysztof J. Geras</a>, <a href="/search/cs?searchtype=author&query=Wolfson%2C+S">Stacey Wolfson</a>, <a href="/search/cs?searchtype=author&query=Shen%2C+Y">Yiqiu Shen</a>, <a href="/search/cs?searchtype=author&query=Wu%2C+N">Nan Wu</a>, <a href="/search/cs?searchtype=author&query=Kim%2C+S+G">S. Gene Kim</a>, <a href="/search/cs?searchtype=author&query=Kim%2C+E">Eric Kim</a>, <a href="/search/cs?searchtype=author&query=Heacock%2C+L">Laura Heacock</a>, <a href="/search/cs?searchtype=author&query=Parikh%2C+U">Ujas Parikh</a>, <a href="/search/cs?searchtype=author&query=Moy%2C+L">Linda Moy</a>, <a href="/search/cs?searchtype=author&query=Cho%2C+K">Kyunghyun Cho</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1703.07047v3-abstract-short" style="display: inline;"> Advances in deep learning for natural images have prompted a surge of interest in applying similar techniques to medical images. The majority of the initial attempts focused on replacing the input of a deep convolutional neural network with a medical image, which does not take into consideration the fundamental differences between these two types of images. Specifically, fine details are necessary… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1703.07047v3-abstract-full').style.display = 'inline'; document.getElementById('1703.07047v3-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1703.07047v3-abstract-full" style="display: none;"> Advances in deep learning for natural images have prompted a surge of interest in applying similar techniques to medical images. The majority of the initial attempts focused on replacing the input of a deep convolutional neural network with a medical image, which does not take into consideration the fundamental differences between these two types of images. Specifically, fine details are necessary for detection in medical images, unlike in natural images where coarse structures matter most. This difference makes it inadequate to use the existing network architectures developed for natural images, because they work on heavily downscaled images to reduce the memory requirements. This hides details necessary to make accurate predictions. Additionally, a single exam in medical imaging often comes with a set of views which must be fused in order to reach a correct conclusion. In our work, we propose to use a multi-view deep convolutional neural network that handles a set of high-resolution medical images. We evaluate it on large-scale mammography-based breast cancer screening (BI-RADS prediction) using 886,000 images. We focus on investigating the impact of the training set size and image size on the prediction accuracy. Our results highlight that performance increases with the size of training set, and that the best performance can only be achieved using the original resolution. In the reader study, performed on a random subset of the test set, we confirmed the efficacy of our model, which achieved performance comparable to a committee of radiologists when presented with the same data. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1703.07047v3-abstract-full').style.display = 'none'; document.getElementById('1703.07047v3-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 27 June, 2018; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 21 March, 2017; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2017. </p> </li> </ol> <div class="is-hidden-tablet"> <!-- feedback for mobile only --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a> </span> </div> </div> </main> <footer> <div class="columns is-desktop" role="navigation" aria-label="Secondary"> <!-- MetaColumn 1 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/about">About</a></li> <li><a href="https://info.arxiv.org/help">Help</a></li> </ul> </div> <div class="column"> <ul class="nav-spaced"> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>contact arXiv</title><desc>Click here to contact arXiv</desc><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg> <a href="https://info.arxiv.org/help/contact.html"> Contact</a> </li> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>subscribe to arXiv mailings</title><desc>Click here to subscribe</desc><path d="M476 3.2L12.5 270.6c-18.1 10.4-15.8 35.6 2.2 43.2L121 358.4l287.3-253.2c5.5-4.9 13.3 2.6 8.6 8.3L176 407v80.5c0 23.6 28.5 32.9 42.5 15.8L282 426l124.6 52.2c14.2 6 30.4-2.9 33-18.2l72-432C515 7.8 493.3-6.8 476 3.2z"/></svg> <a href="https://info.arxiv.org/help/subscribe"> Subscribe</a> </li> </ul> </div> </div> </div> <!-- end MetaColumn 1 --> <!-- MetaColumn 2 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/license/index.html">Copyright</a></li> <li><a href="https://info.arxiv.org/help/policies/privacy_policy.html">Privacy Policy</a></li> </ul> </div> <div class="column sorry-app-links"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/web_accessibility.html">Web Accessibility Assistance</a></li> <li> <p class="help"> <a class="a11y-main-link" href="https://status.arxiv.org" target="_blank">arXiv Operational Status <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 256 512" class="icon filter-dark_grey" role="presentation"><path d="M224.3 273l-136 136c-9.4 9.4-24.6 9.4-33.9 0l-22.6-22.6c-9.4-9.4-9.4-24.6 0-33.9l96.4-96.4-96.4-96.4c-9.4-9.4-9.4-24.6 0-33.9L54.3 103c9.4-9.4 24.6-9.4 33.9 0l136 136c9.5 9.4 9.5 24.6.1 34z"/></svg></a><br> Get status notifications via <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/email/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg>email</a> or <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/slack/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512" class="icon filter-black" role="presentation"><path d="M94.12 315.1c0 25.9-21.16 47.06-47.06 47.06S0 341 0 315.1c0-25.9 21.16-47.06 47.06-47.06h47.06v47.06zm23.72 0c0-25.9 21.16-47.06 47.06-47.06s47.06 21.16 47.06 47.06v117.84c0 25.9-21.16 47.06-47.06 47.06s-47.06-21.16-47.06-47.06V315.1zm47.06-188.98c-25.9 0-47.06-21.16-47.06-47.06S139 32 164.9 32s47.06 21.16 47.06 47.06v47.06H164.9zm0 23.72c25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06H47.06C21.16 243.96 0 222.8 0 196.9s21.16-47.06 47.06-47.06H164.9zm188.98 47.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06h-47.06V196.9zm-23.72 0c0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06V79.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06V196.9zM283.1 385.88c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06v-47.06h47.06zm0-23.72c-25.9 0-47.06-21.16-47.06-47.06 0-25.9 21.16-47.06 47.06-47.06h117.84c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06H283.1z"/></svg>slack</a> </p> </li> </ul> </div> </div> </div> <!-- end MetaColumn 2 --> </div> </footer> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/member_acknowledgement.js"></script> </body> </html>