CINXE.COM
Search | arXiv e-print repository
<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8"/> <meta name="viewport" content="width=device-width, initial-scale=1"/> <!-- new favicon config and versions by realfavicongenerator.net --> <link rel="apple-touch-icon" sizes="180x180" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/apple-touch-icon.png"> <link rel="icon" type="image/png" sizes="32x32" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-32x32.png"> <link rel="icon" type="image/png" sizes="16x16" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-16x16.png"> <link rel="manifest" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/site.webmanifest"> <link rel="mask-icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/safari-pinned-tab.svg" color="#b31b1b"> <link rel="shortcut icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon.ico"> <meta name="msapplication-TileColor" content="#b31b1b"> <meta name="msapplication-config" content="images/icons/browserconfig.xml"> <meta name="theme-color" content="#b31b1b"> <!-- end favicon config --> <title>Search | arXiv e-print repository</title> <script defer src="https://static.arxiv.org/static/base/1.0.0a5/fontawesome-free-5.11.2-web/js/all.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/base/1.0.0a5/css/arxivstyle.css" /> <script type="text/x-mathjax-config"> MathJax.Hub.Config({ messageStyle: "none", extensions: ["tex2jax.js"], jax: ["input/TeX", "output/HTML-CSS"], tex2jax: { inlineMath: [ ['$','$'], ["\\(","\\)"] ], displayMath: [ ['$$','$$'], ["\\[","\\]"] ], processEscapes: true, ignoreClass: '.*', processClass: 'mathjax.*' }, TeX: { extensions: ["AMSmath.js", "AMSsymbols.js", "noErrors.js"], noErrors: { inlineDelimiters: ["$","$"], multiLine: false, style: { "font-size": "normal", "border": "" } } }, "HTML-CSS": { availableFonts: ["TeX"] } }); </script> <script src='//static.arxiv.org/MathJax-2.7.3/MathJax.js'></script> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/notification.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/bulma-tooltip.min.css" /> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/search.css" /> <script src="https://code.jquery.com/jquery-3.2.1.slim.min.js" integrity="sha256-k2WSCIexGzOj3Euiig+TlR8gA0EmPjuc79OEeY5L45g=" crossorigin="anonymous"></script> <script src="https://static.arxiv.org/static/search/0.5.6/js/fieldset.js"></script> <style> radio#cf-customfield_11400 { display: none; } </style> </head> <body> <header><a href="#main-container" class="is-sr-only">Skip to main content</a> <!-- contains Cornell logo and sponsor statement --> <div class="attribution level is-marginless" role="banner"> <div class="level-left"> <a class="level-item" href="https://cornell.edu/"><img src="https://static.arxiv.org/static/base/1.0.0a5/images/cornell-reduced-white-SMALL.svg" alt="Cornell University" width="200" aria-label="logo" /></a> </div> <div class="level-right is-marginless"><p class="sponsors level-item is-marginless"><span id="support-ack-url">We gratefully acknowledge support from<br /> the Simons Foundation, <a href="https://info.arxiv.org/about/ourmembers.html">member institutions</a>, and all contributors. <a href="https://info.arxiv.org/about/donate.html">Donate</a></span></p></div> </div> <!-- contains arXiv identity and search bar --> <div class="identity level is-marginless"> <div class="level-left"> <div class="level-item"> <a class="arxiv" href="https://arxiv.org/" aria-label="arxiv-logo"> <img src="https://static.arxiv.org/static/base/1.0.0a5/images/arxiv-logo-one-color-white.svg" aria-label="logo" alt="arxiv logo" width="85" style="width:85px;"/> </a> </div> </div> <div class="search-block level-right"> <form class="level-item mini-search" method="GET" action="https://arxiv.org/search"> <div class="field has-addons"> <div class="control"> <input class="input is-small" type="text" name="query" placeholder="Search..." aria-label="Search term or terms" /> <p class="help"><a href="https://info.arxiv.org/help">Help</a> | <a href="https://arxiv.org/search/advanced">Advanced Search</a></p> </div> <div class="control"> <div class="select is-small"> <select name="searchtype" aria-label="Field to search"> <option value="all" selected="selected">All fields</option> <option value="title">Title</option> <option value="author">Author</option> <option value="abstract">Abstract</option> <option value="comments">Comments</option> <option value="journal_ref">Journal reference</option> <option value="acm_class">ACM classification</option> <option value="msc_class">MSC classification</option> <option value="report_num">Report number</option> <option value="paper_id">arXiv identifier</option> <option value="doi">DOI</option> <option value="orcid">ORCID</option> <option value="author_id">arXiv author ID</option> <option value="help">Help pages</option> <option value="full_text">Full text</option> </select> </div> </div> <input type="hidden" name="source" value="header"> <button class="button is-small is-cul-darker">Search</button> </div> </form> </div> </div> <!-- closes identity --> <div class="container"> <div class="user-tools is-size-7 has-text-right has-text-weight-bold" role="navigation" aria-label="User menu"> <a href="https://arxiv.org/login">Login</a> </div> </div> </header> <main class="container" id="main-container"> <div class="level is-marginless"> <div class="level-left"> <h1 class="title is-clearfix"> Showing 1–11 of 11 results for author: <span class="mathjax">Baek, J</span> </h1> </div> <div class="level-right is-hidden-mobile"> <!-- feedback for mobile is moved to footer --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a> </span> </div> </div> <div class="content"> <form method="GET" action="/search/eess" aria-role="search"> Searching in archive <strong>eess</strong>. <a href="/search/?searchtype=author&query=Baek%2C+J">Search in all archives.</a> <div class="field has-addons-tablet"> <div class="control is-expanded"> <label for="query" class="hidden-label">Search term or terms</label> <input class="input is-medium" id="query" name="query" placeholder="Search term..." type="text" value="Baek, J"> </div> <div class="select control is-medium"> <label class="is-hidden" for="searchtype">Field</label> <select class="is-medium" id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> </div> <div class="control"> <button class="button is-link is-medium">Search</button> </div> </div> <div class="field"> <div class="control is-size-7"> <label class="radio"> <input checked id="abstracts-0" name="abstracts" type="radio" value="show"> Show abstracts </label> <label class="radio"> <input id="abstracts-1" name="abstracts" type="radio" value="hide"> Hide abstracts </label> </div> </div> <div class="is-clearfix" style="height: 2.5em"> <div class="is-pulled-right"> <a href="/search/advanced?terms-0-term=Baek%2C+J&terms-0-field=author&size=50&order=-announced_date_first">Advanced Search</a> </div> </div> <input type="hidden" name="order" value="-announced_date_first"> <input type="hidden" name="size" value="50"> </form> <div class="level breathe-horizontal"> <div class="level-left"> <form method="GET" action="/search/"> <div style="display: none;"> <select id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> <input id="query" name="query" type="text" value="Baek, J"> <ul id="abstracts"><li><input checked id="abstracts-0" name="abstracts" type="radio" value="show"> <label for="abstracts-0">Show abstracts</label></li><li><input id="abstracts-1" name="abstracts" type="radio" value="hide"> <label for="abstracts-1">Hide abstracts</label></li></ul> </div> <div class="box field is-grouped is-grouped-multiline level-item"> <div class="control"> <span class="select is-small"> <select id="size" name="size"><option value="25">25</option><option selected value="50">50</option><option value="100">100</option><option value="200">200</option></select> </span> <label for="size">results per page</label>. </div> <div class="control"> <label for="order">Sort results by</label> <span class="select is-small"> <select id="order" name="order"><option selected value="-announced_date_first">Announcement date (newest first)</option><option value="announced_date_first">Announcement date (oldest first)</option><option value="-submitted_date">Submission date (newest first)</option><option value="submitted_date">Submission date (oldest first)</option><option value="">Relevance</option></select> </span> </div> <div class="control"> <button class="button is-small is-link">Go</button> </div> </div> </form> </div> </div> <ol class="breathe-horizontal" start="1"> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.06738">arXiv:2411.06738</a> <span> [<a href="https://arxiv.org/pdf/2411.06738">pdf</a>, <a href="https://arxiv.org/format/2411.06738">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> </div> </div> <p class="title is-5 mathjax"> 360-Degree Video Super Resolution and Quality Enhancement Challenge: Methods and Results </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&query=Telili%2C+A">Ahmed Telili</a>, <a href="/search/eess?searchtype=author&query=Hamidouche%2C+W">Wassim Hamidouche</a>, <a href="/search/eess?searchtype=author&query=Farhat%2C+I">Ibrahim Farhat</a>, <a href="/search/eess?searchtype=author&query=Amirpour%2C+H">Hadi Amirpour</a>, <a href="/search/eess?searchtype=author&query=Timmerer%2C+C">Christian Timmerer</a>, <a href="/search/eess?searchtype=author&query=Khadraoui%2C+I">Ibrahim Khadraoui</a>, <a href="/search/eess?searchtype=author&query=Lu%2C+J">Jiajie Lu</a>, <a href="/search/eess?searchtype=author&query=Van+Le%2C+T">The Van Le</a>, <a href="/search/eess?searchtype=author&query=Baek%2C+J">Jeonneung Baek</a>, <a href="/search/eess?searchtype=author&query=Lee%2C+J+Y">Jin Young Lee</a>, <a href="/search/eess?searchtype=author&query=Wei%2C+Y">Yiying Wei</a>, <a href="/search/eess?searchtype=author&query=Sun%2C+X">Xiaopeng Sun</a>, <a href="/search/eess?searchtype=author&query=Gao%2C+Y">Yu Gao</a>, <a href="/search/eess?searchtype=author&query=Huangl%2C+J">JianCheng Huangl</a>, <a href="/search/eess?searchtype=author&query=Zhong%2C+Y">Yujie Zhong</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.06738v1-abstract-short" style="display: inline;"> Omnidirectional (360-degree) video is rapidly gaining popularity due to advancements in immersive technologies like virtual reality (VR) and extended reality (XR). However, real-time streaming of such videos, especially in live mobile scenarios like unmanned aerial vehicles (UAVs), is challenged by limited bandwidth and strict latency constraints. Traditional methods, such as compression and adapt… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.06738v1-abstract-full').style.display = 'inline'; document.getElementById('2411.06738v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.06738v1-abstract-full" style="display: none;"> Omnidirectional (360-degree) video is rapidly gaining popularity due to advancements in immersive technologies like virtual reality (VR) and extended reality (XR). However, real-time streaming of such videos, especially in live mobile scenarios like unmanned aerial vehicles (UAVs), is challenged by limited bandwidth and strict latency constraints. Traditional methods, such as compression and adaptive resolution, help but often compromise video quality and introduce artifacts that degrade the viewer experience. Additionally, the unique spherical geometry of 360-degree video presents challenges not encountered in traditional 2D video. To address these issues, we initiated the 360-degree Video Super Resolution and Quality Enhancement Challenge. This competition encourages participants to develop efficient machine learning solutions to enhance the quality of low-bitrate compressed 360-degree videos, with two tracks focusing on 2x and 4x super-resolution (SR). In this paper, we outline the challenge framework, detailing the two competition tracks and highlighting the SR solutions proposed by the top-performing models. We assess these models within a unified framework, considering quality enhancement, bitrate gain, and computational efficiency. This challenge aims to drive innovation in real-time 360-degree video streaming, improving the quality and accessibility of immersive visual experiences. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.06738v1-abstract-full').style.display = 'none'; document.getElementById('2411.06738v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 11 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">14 pages, 9 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2404.02143">arXiv:2404.02143</a> <span> [<a href="https://arxiv.org/pdf/2404.02143">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Medical Physics">physics.med-ph</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Signal Processing">eess.SP</span> </div> </div> <p class="title is-5 mathjax"> Multiparametric quantification and visualization of liver fat using ultrasound </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&query=Baek%2C+J">Jihye Baek</a>, <a href="/search/eess?searchtype=author&query=Kaffas%2C+A+E">Ahmed El Kaffas</a>, <a href="/search/eess?searchtype=author&query=Kamaya%2C+A">Aya Kamaya</a>, <a href="/search/eess?searchtype=author&query=Hoyt%2C+K">Kenneth Hoyt</a>, <a href="/search/eess?searchtype=author&query=Parker%2C+K+J">Kevin J. Parker</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2404.02143v1-abstract-short" style="display: inline;"> Objectives- Several ultrasound measures have shown promise for assessment of steatosis compared to traditional B-scan, however clinicians may be required to integrate information across the parameters. Here, we propose an integrated multiparametric approach, enabling simple clinical assessment of key information from combined ultrasound parameters. Methods- We have measured 13 parameters related t… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2404.02143v1-abstract-full').style.display = 'inline'; document.getElementById('2404.02143v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2404.02143v1-abstract-full" style="display: none;"> Objectives- Several ultrasound measures have shown promise for assessment of steatosis compared to traditional B-scan, however clinicians may be required to integrate information across the parameters. Here, we propose an integrated multiparametric approach, enabling simple clinical assessment of key information from combined ultrasound parameters. Methods- We have measured 13 parameters related to ultrasound and shear wave elastography. These were measured in 30 human subjects under a study of liver fat. The 13 individual measures are assessed for their predictive value using independent magnetic resonance imaging-derived proton density fat fraction (MRI-PDFF) measurements as a reference standard. In addition, a comprehensive and fine-grain analysis is made of all possible combinations of sub-sets of these parameters to determine if any subset can be efficiently combined to predict fat fraction. Results- We found that as few as four key parameters related to ultrasound propagation are sufficient to generate a linear multiparametric parameter with a correlation against MRI-PDFF values of greater than 0.93. This optimal combination was found to have a classification area under the curve (AUC) approaching 1.0 when applying a threshold for separating steatosis grade zero from higher classes. Furthermore, a strategy is developed for applying local estimates of fat content as a color overlay to produce a visual impression of the extent and distribution of fat within the liver. Conclusion- In principle, this approach can be applied to most clinical ultrasound systems to provide the clinician and patient with a rapid and inexpensive estimate of liver fat content. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2404.02143v1-abstract-full').style.display = 'none'; document.getElementById('2404.02143v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 2 April, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2304.04027">arXiv:2304.04027</a> <span> [<a href="https://arxiv.org/pdf/2304.04027">pdf</a>, <a href="https://arxiv.org/format/2304.04027">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> NeBLa: Neural Beer-Lambert for 3D Reconstruction of Oral Structures from Panoramic Radiographs </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&query=Park%2C+S">Sihwa Park</a>, <a href="/search/eess?searchtype=author&query=Kim%2C+S">Seongjun Kim</a>, <a href="/search/eess?searchtype=author&query=Kwon%2C+D">Doeyoung Kwon</a>, <a href="/search/eess?searchtype=author&query=Jang%2C+Y">Yohan Jang</a>, <a href="/search/eess?searchtype=author&query=Song%2C+I">In-Seok Song</a>, <a href="/search/eess?searchtype=author&query=Baek%2C+S+J">Seung Jun Baek</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2304.04027v6-abstract-short" style="display: inline;"> Panoramic radiography (Panoramic X-ray, PX) is a widely used imaging modality for dental examination. However, PX only provides a flattened 2D image, lacking in a 3D view of the oral structure. In this paper, we propose NeBLa (Neural Beer-Lambert) to estimate 3D oral structures from real-world PX. NeBLa tackles full 3D reconstruction for varying subjects (patients) where each reconstruction is bas… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2304.04027v6-abstract-full').style.display = 'inline'; document.getElementById('2304.04027v6-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2304.04027v6-abstract-full" style="display: none;"> Panoramic radiography (Panoramic X-ray, PX) is a widely used imaging modality for dental examination. However, PX only provides a flattened 2D image, lacking in a 3D view of the oral structure. In this paper, we propose NeBLa (Neural Beer-Lambert) to estimate 3D oral structures from real-world PX. NeBLa tackles full 3D reconstruction for varying subjects (patients) where each reconstruction is based only on a single panoramic image. We create an intermediate representation called simulated PX (SimPX) from 3D Cone-beam computed tomography (CBCT) data based on the Beer-Lambert law of X-ray rendering and rotational principles of PX imaging. SimPX aims at not only truthfully simulating PX, but also facilitates the reverting process back to 3D data. We propose a novel neural model based on ray tracing which exploits both global and local input features to convert SimPX to 3D output. At inference, a real PX image is translated to a SimPX-style image with semantic regularization, and the translated image is processed by generation module to produce high-quality outputs. Experiments show that NeBLa outperforms prior state-of-the-art in reconstruction tasks both quantitatively and qualitatively. Unlike prior methods, NeBLa does not require any prior information such as the shape of dental arches, nor the matched PX-CBCT dataset for training, which is difficult to obtain in clinical practice. Our code is available at https://github.com/sihwa-park/nebla. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2304.04027v6-abstract-full').style.display = 'none'; document.getElementById('2304.04027v6-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 6 February, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 8 April, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">18 pages, 16 figures, Accepted to AAAI 2024</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2207.06560">arXiv:2207.06560</a> <span> [<a href="https://arxiv.org/pdf/2207.06560">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Medical Physics">physics.med-ph</span> </div> </div> <p class="title is-5 mathjax"> Improving the diagnosis of breast cancer based on biophysical ultrasound features utilizing machine learning </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&query=Baek%2C+J">Jihye Baek</a>, <a href="/search/eess?searchtype=author&query=O%27Connell%2C+A+M">Avice M. O'Connell</a>, <a href="/search/eess?searchtype=author&query=Parker%2C+K+J">Kevin J. Parker</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2207.06560v1-abstract-short" style="display: inline;"> The improved diagnostic accuracy of ultrasound breast examinations remains an important goal. In this study, we propose a biophysical feature based machine learning method for breast cancer detection to improve the performance beyond a benchmark deep learning algorithm and to furthermore provide a color overlay visual map of the probability of malignancy within a lesion. This overall framework is… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2207.06560v1-abstract-full').style.display = 'inline'; document.getElementById('2207.06560v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2207.06560v1-abstract-full" style="display: none;"> The improved diagnostic accuracy of ultrasound breast examinations remains an important goal. In this study, we propose a biophysical feature based machine learning method for breast cancer detection to improve the performance beyond a benchmark deep learning algorithm and to furthermore provide a color overlay visual map of the probability of malignancy within a lesion. This overall framework is termed disease specific imaging. Previously, 150 breast lesions were segmented and classified utilizing a modified fully convolutional network and a modified GoogLeNet, respectively. In this study multiparametric analysis was performed within the contoured lesions. Features were extracted from ultrasound radiofrequency, envelope, and log compressed data based on biophysical and morphological models. The support vector machine with a Gaussian kernel constructed a nonlinear hyperplane, and we calculated the distance between the hyperplane and data point of each feature in multiparametric space. The distance can quantitatively assess a lesion, and suggest the probability of malignancy that is color coded and overlaid onto B mode images. Training and evaluation were performed on in vivo patient data. The overall accuracy for the most common types and sizes of breast lesions in our study exceeded 98.0% for classification and 0.98 for an area under the receiver operating characteristic curve, which is more precise than the performance of radiologists and a deep learning system. Further, the correlation between the probability and BI RADS enables a quantitative guideline to predict breast cancer. Therefore, we anticipate that the proposed framework can help radiologists achieve more accurate and convenient breast cancer classification and detection. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2207.06560v1-abstract-full').style.display = 'none'; document.getElementById('2207.06560v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 13 July, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2022. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2104.02895">arXiv:2104.02895</a> <span> [<a href="https://arxiv.org/pdf/2104.02895">pdf</a>, <a href="https://arxiv.org/format/2104.02895">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1007/978-3-030-67070-2_12">10.1007/978-3-030-67070-2_12 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> PyNET-CA: Enhanced PyNET with Channel Attention for End-to-End Mobile Image Signal Processing </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&query=Kim%2C+B">Byung-Hoon Kim</a>, <a href="/search/eess?searchtype=author&query=Song%2C+J">Joonyoung Song</a>, <a href="/search/eess?searchtype=author&query=Ye%2C+J+C">Jong Chul Ye</a>, <a href="/search/eess?searchtype=author&query=Baek%2C+J">JaeHyun Baek</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2104.02895v1-abstract-short" style="display: inline;"> Reconstructing RGB image from RAW data obtained with a mobile device is related to a number of image signal processing (ISP) tasks, such as demosaicing, denoising, etc. Deep neural networks have shown promising results over hand-crafted ISP algorithms on solving these tasks separately, or even replacing the whole reconstruction process with one model. Here, we propose PyNET-CA, an end-to-end mobil… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2104.02895v1-abstract-full').style.display = 'inline'; document.getElementById('2104.02895v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2104.02895v1-abstract-full" style="display: none;"> Reconstructing RGB image from RAW data obtained with a mobile device is related to a number of image signal processing (ISP) tasks, such as demosaicing, denoising, etc. Deep neural networks have shown promising results over hand-crafted ISP algorithms on solving these tasks separately, or even replacing the whole reconstruction process with one model. Here, we propose PyNET-CA, an end-to-end mobile ISP deep learning algorithm for RAW to RGB reconstruction. The model enhances PyNET, a recently proposed state-of-the-art model for mobile ISP, and improve its performance with channel attention and subpixel reconstruction module. We demonstrate the performance of the proposed method with comparative experiments and results from the AIM 2020 learned smartphone ISP challenge. The source code of our implementation is available at https://github.com/egyptdj/skyb-aim2020-public <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2104.02895v1-abstract-full').style.display = 'none'; document.getElementById('2104.02895v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 6 April, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">ECCV 2020 AIM workshop accepted version</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2011.06983">arXiv:2011.06983</a> <span> [<a href="https://arxiv.org/pdf/2011.06983">pdf</a>, <a href="https://arxiv.org/format/2011.06983">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Systems and Control">eess.SY</span> </div> </div> <p class="title is-5 mathjax"> A Secure Distributed Ledger for Transactive Energy: The Electron Volt Exchange (EVE) Blockchain </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&query=Saha%2C+S">Shammya Saha</a>, <a href="/search/eess?searchtype=author&query=Ravi%2C+N">Nikhil Ravi</a>, <a href="/search/eess?searchtype=author&query=Hreinsson%2C+K">Kari Hreinsson</a>, <a href="/search/eess?searchtype=author&query=Baek%2C+J">Jaejong Baek</a>, <a href="/search/eess?searchtype=author&query=Scaglione%2C+A">Anna Scaglione</a>, <a href="/search/eess?searchtype=author&query=Johnson%2C+N+G">Nathan G. Johnson</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2011.06983v1-abstract-short" style="display: inline;"> The adoption of blockchain for Transactive Energy has gained significant momentum as it allows mutually non-trusting agents to trade energy services in a trustless energy market. Research to date has assumed that the built-in Byzantine Fault Tolerance in recording transactions in a ledger is sufficient to ensure integrity. Such work must be extended to address security gaps including random bilate… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2011.06983v1-abstract-full').style.display = 'inline'; document.getElementById('2011.06983v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2011.06983v1-abstract-full" style="display: none;"> The adoption of blockchain for Transactive Energy has gained significant momentum as it allows mutually non-trusting agents to trade energy services in a trustless energy market. Research to date has assumed that the built-in Byzantine Fault Tolerance in recording transactions in a ledger is sufficient to ensure integrity. Such work must be extended to address security gaps including random bilateral transactions that do not guarantee reliable and efficient market operation, and market participants having incentives to cheat when reporting actual production/consumption figures. Work herein introduces the Electron Volt Exchange framework with the following characteristics: 1) a distributed protocol for pricing and scheduling prosumers' production/consumption while keeping constraints and bids private, and 2) a distributed algorithm to prevent theft that verifies prosumers' compliance to scheduled transactions using information from grid sensors (such as smart meters) and mitigates the impact of false data injection attacks. Flexibility and robustness of the approach are demonstrated through simulation and implementation using Hyperledger Fabric. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2011.06983v1-abstract-full').style.display = 'none'; document.getElementById('2011.06983v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 13 November, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted for Applied Energy</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2011.04994">arXiv:2011.04994</a> <span> [<a href="https://arxiv.org/pdf/2011.04994">pdf</a>, <a href="https://arxiv.org/format/2011.04994">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> </div> </div> <p class="title is-5 mathjax"> AIM 2020 Challenge on Learned Image Signal Processing Pipeline </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&query=Ignatov%2C+A">Andrey Ignatov</a>, <a href="/search/eess?searchtype=author&query=Timofte%2C+R">Radu Timofte</a>, <a href="/search/eess?searchtype=author&query=Zhang%2C+Z">Zhilu Zhang</a>, <a href="/search/eess?searchtype=author&query=Liu%2C+M">Ming Liu</a>, <a href="/search/eess?searchtype=author&query=Wang%2C+H">Haolin Wang</a>, <a href="/search/eess?searchtype=author&query=Zuo%2C+W">Wangmeng Zuo</a>, <a href="/search/eess?searchtype=author&query=Zhang%2C+J">Jiawei Zhang</a>, <a href="/search/eess?searchtype=author&query=Zhang%2C+R">Ruimao Zhang</a>, <a href="/search/eess?searchtype=author&query=Peng%2C+Z">Zhanglin Peng</a>, <a href="/search/eess?searchtype=author&query=Ren%2C+S">Sijie Ren</a>, <a href="/search/eess?searchtype=author&query=Dai%2C+L">Linhui Dai</a>, <a href="/search/eess?searchtype=author&query=Liu%2C+X">Xiaohong Liu</a>, <a href="/search/eess?searchtype=author&query=Li%2C+C">Chengqi Li</a>, <a href="/search/eess?searchtype=author&query=Chen%2C+J">Jun Chen</a>, <a href="/search/eess?searchtype=author&query=Ito%2C+Y">Yuichi Ito</a>, <a href="/search/eess?searchtype=author&query=Vasudeva%2C+B">Bhavya Vasudeva</a>, <a href="/search/eess?searchtype=author&query=Deora%2C+P">Puneesh Deora</a>, <a href="/search/eess?searchtype=author&query=Pal%2C+U">Umapada Pal</a>, <a href="/search/eess?searchtype=author&query=Guo%2C+Z">Zhenyu Guo</a>, <a href="/search/eess?searchtype=author&query=Zhu%2C+Y">Yu Zhu</a>, <a href="/search/eess?searchtype=author&query=Liang%2C+T">Tian Liang</a>, <a href="/search/eess?searchtype=author&query=Li%2C+C">Chenghua Li</a>, <a href="/search/eess?searchtype=author&query=Leng%2C+C">Cong Leng</a>, <a href="/search/eess?searchtype=author&query=Pan%2C+Z">Zhihong Pan</a>, <a href="/search/eess?searchtype=author&query=Li%2C+B">Baopu Li</a> , et al. (14 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2011.04994v1-abstract-short" style="display: inline;"> This paper reviews the second AIM learned ISP challenge and provides the description of the proposed solutions and results. The participating teams were solving a real-world RAW-to-RGB mapping problem, where to goal was to map the original low-quality RAW images captured by the Huawei P20 device to the same photos obtained with the Canon 5D DSLR camera. The considered task embraced a number of com… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2011.04994v1-abstract-full').style.display = 'inline'; document.getElementById('2011.04994v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2011.04994v1-abstract-full" style="display: none;"> This paper reviews the second AIM learned ISP challenge and provides the description of the proposed solutions and results. The participating teams were solving a real-world RAW-to-RGB mapping problem, where to goal was to map the original low-quality RAW images captured by the Huawei P20 device to the same photos obtained with the Canon 5D DSLR camera. The considered task embraced a number of complex computer vision subtasks, such as image demosaicing, denoising, white balancing, color and contrast correction, demoireing, etc. The target metric used in this challenge combined fidelity scores (PSNR and SSIM) with solutions' perceptual results measured in a user study. The proposed solutions significantly improved the baseline results, defining the state-of-the-art for practical image signal processing pipeline modeling. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2011.04994v1-abstract-full').style.display = 'none'; document.getElementById('2011.04994v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 10 November, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Published in ECCV 2020 Workshops (Advances in Image Manipulation), https://data.vision.ee.ethz.ch/cvl/aim20/</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2007.10581">arXiv:2007.10581</a> <span> [<a href="https://arxiv.org/pdf/2007.10581">pdf</a>, <a href="https://arxiv.org/format/2007.10581">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Distributed, Parallel, and Cluster Computing">cs.DC</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Multiagent Systems">cs.MA</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Signal Processing">eess.SP</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1109/JIOT.2020.3009540">10.1109/JIOT.2020.3009540 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Heterogeneous Task Offloading and Resource Allocations via Deep Recurrent Reinforcement Learning in Partial Observable Multi-Fog Networks </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&query=Baek%2C+J">Jungyeon Baek</a>, <a href="/search/eess?searchtype=author&query=Kaddoum%2C+G">Georges Kaddoum</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2007.10581v1-abstract-short" style="display: inline;"> As wireless services and applications become more sophisticated and require faster and higher-capacity networks, there is a need for an efficient management of the execution of increasingly complex tasks based on the requirements of each application. In this regard, fog computing enables the integration of virtualized servers into networks and brings cloud services closer to end devices. In contra… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2007.10581v1-abstract-full').style.display = 'inline'; document.getElementById('2007.10581v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2007.10581v1-abstract-full" style="display: none;"> As wireless services and applications become more sophisticated and require faster and higher-capacity networks, there is a need for an efficient management of the execution of increasingly complex tasks based on the requirements of each application. In this regard, fog computing enables the integration of virtualized servers into networks and brings cloud services closer to end devices. In contrast to the cloud server, the computing capacity of fog nodes is limited and thus a single fog node might not be capable of computing-intensive tasks. In this context, task offloading can be particularly useful at the fog nodes by selecting the suitable nodes and proper resource management while guaranteeing the Quality-of-Service (QoS) requirements of the users. This paper studies the design of a joint task offloading and resource allocation control for heterogeneous service tasks in multi-fog nodes systems. This problem is formulated as a partially observable stochastic game, in which each fog node cooperates to maximize the aggregated local rewards while the nodes only have access to local observations. To deal with partial observability, we apply a deep recurrent Q-network (DRQN) approach to approximate the optimal value functions. The solution is then compared to a deep Q-network (DQN) and deep convolutional Q-network (DCQN) approach to evaluate the performance of different neural networks. Moreover, to guarantee the convergence and accuracy of the neural network, an adjusted exploration-exploitation method is adopted. Provided numerical results show that the proposed algorithm can achieve a higher average success rate and lower average overflow than baseline methods. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2007.10581v1-abstract-full').style.display = 'none'; document.getElementById('2007.10581v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 20 July, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2020. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2005.08770">arXiv:2005.08770</a> <span> [<a href="https://arxiv.org/pdf/2005.08770">pdf</a>, <a href="https://arxiv.org/format/2005.08770">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Systems and Control">eess.SY</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Signal Processing">eess.SP</span> </div> </div> <p class="title is-5 mathjax"> Optimal Charging Method for Effective Li-ion Battery Life Extension Based on Reinforcement Learning </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&query=Kim%2C+M">Minho Kim</a>, <a href="/search/eess?searchtype=author&query=Baek%2C+J">Jongchan Baek</a>, <a href="/search/eess?searchtype=author&query=Han%2C+S">Soohee Han</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2005.08770v1-abstract-short" style="display: inline;"> A reinforcement learning-based optimal charging strategy is proposed for Li-ion batteries to extend the battery life and to ensure the end-user convenience. Unlike most previous studies that do not reflect real-world scenario well, in this work, end users can set the charge time flexibly according to their own situation rather than reducing the charge time as much as possible; this is possible by… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2005.08770v1-abstract-full').style.display = 'inline'; document.getElementById('2005.08770v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2005.08770v1-abstract-full" style="display: none;"> A reinforcement learning-based optimal charging strategy is proposed for Li-ion batteries to extend the battery life and to ensure the end-user convenience. Unlike most previous studies that do not reflect real-world scenario well, in this work, end users can set the charge time flexibly according to their own situation rather than reducing the charge time as much as possible; this is possible by using soft actor-critic (SAC), which is one of the state-of-the-art reinforcement learning algorithms. In this way, the battery is more likely to extend its life without disturbing the end-users. The amount of aging is calculated quantitatively based on an accurate electrochemical battery model, which is directly minimized in the optimization procedure with SAC. SAC can deal with not only the flexible charge time but also varying parameters of the battery model caused by aging once the offline learning is completed, which is not the case for the previous studies; in the previous studies, time-consuming optimization has to be implemented for each battery model with a certain set of parameter values. The validation results show that the proposed method can both extend the battery life effectively and ensure the end-user convenience. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2005.08770v1-abstract-full').style.display = 'none'; document.getElementById('2005.08770v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 18 May, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">11 pages, 9 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2005.01056">arXiv:2005.01056</a> <span> [<a href="https://arxiv.org/pdf/2005.01056">pdf</a>, <a href="https://arxiv.org/format/2005.01056">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> NTIRE 2020 Challenge on Perceptual Extreme Super-Resolution: Methods and Results </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&query=Zhang%2C+K">Kai Zhang</a>, <a href="/search/eess?searchtype=author&query=Gu%2C+S">Shuhang Gu</a>, <a href="/search/eess?searchtype=author&query=Timofte%2C+R">Radu Timofte</a>, <a href="/search/eess?searchtype=author&query=Shang%2C+T">Taizhang Shang</a>, <a href="/search/eess?searchtype=author&query=Dai%2C+Q">Qiuju Dai</a>, <a href="/search/eess?searchtype=author&query=Zhu%2C+S">Shengchen Zhu</a>, <a href="/search/eess?searchtype=author&query=Yang%2C+T">Tong Yang</a>, <a href="/search/eess?searchtype=author&query=Guo%2C+Y">Yandong Guo</a>, <a href="/search/eess?searchtype=author&query=Jo%2C+Y">Younghyun Jo</a>, <a href="/search/eess?searchtype=author&query=Yang%2C+S">Sejong Yang</a>, <a href="/search/eess?searchtype=author&query=Kim%2C+S+J">Seon Joo Kim</a>, <a href="/search/eess?searchtype=author&query=Zha%2C+L">Lin Zha</a>, <a href="/search/eess?searchtype=author&query=Jiang%2C+J">Jiande Jiang</a>, <a href="/search/eess?searchtype=author&query=Gao%2C+X">Xinbo Gao</a>, <a href="/search/eess?searchtype=author&query=Lu%2C+W">Wen Lu</a>, <a href="/search/eess?searchtype=author&query=Liu%2C+J">Jing Liu</a>, <a href="/search/eess?searchtype=author&query=Yoon%2C+K">Kwangjin Yoon</a>, <a href="/search/eess?searchtype=author&query=Jeon%2C+T">Taegyun Jeon</a>, <a href="/search/eess?searchtype=author&query=Akita%2C+K">Kazutoshi Akita</a>, <a href="/search/eess?searchtype=author&query=Ooba%2C+T">Takeru Ooba</a>, <a href="/search/eess?searchtype=author&query=Ukita%2C+N">Norimichi Ukita</a>, <a href="/search/eess?searchtype=author&query=Luo%2C+Z">Zhipeng Luo</a>, <a href="/search/eess?searchtype=author&query=Yao%2C+Y">Yuehan Yao</a>, <a href="/search/eess?searchtype=author&query=Xu%2C+Z">Zhenyu Xu</a>, <a href="/search/eess?searchtype=author&query=He%2C+D">Dongliang He</a> , et al. (38 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2005.01056v1-abstract-short" style="display: inline;"> This paper reviews the NTIRE 2020 challenge on perceptual extreme super-resolution with focus on proposed solutions and results. The challenge task was to super-resolve an input image with a magnification factor 16 based on a set of prior examples of low and corresponding high resolution images. The goal is to obtain a network design capable to produce high resolution results with the best percept… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2005.01056v1-abstract-full').style.display = 'inline'; document.getElementById('2005.01056v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2005.01056v1-abstract-full" style="display: none;"> This paper reviews the NTIRE 2020 challenge on perceptual extreme super-resolution with focus on proposed solutions and results. The challenge task was to super-resolve an input image with a magnification factor 16 based on a set of prior examples of low and corresponding high resolution images. The goal is to obtain a network design capable to produce high resolution results with the best perceptual quality and similar to the ground truth. The track had 280 registered participants, and 19 teams submitted the final results. They gauge the state-of-the-art in single image super-resolution. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2005.01056v1-abstract-full').style.display = 'none'; document.getElementById('2005.01056v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 3 May, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">CVPRW 2020</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1905.10488">arXiv:1905.10488</a> <span> [<a href="https://arxiv.org/pdf/1905.10488">pdf</a>, <a href="https://arxiv.org/format/1905.10488">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> GAN2GAN: Generative Noise Learning for Blind Denoising with Single Noisy Images </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&query=Cha%2C+S">Sungmin Cha</a>, <a href="/search/eess?searchtype=author&query=Park%2C+T">Taeeon Park</a>, <a href="/search/eess?searchtype=author&query=Kim%2C+B">Byeongjoon Kim</a>, <a href="/search/eess?searchtype=author&query=Baek%2C+J">Jongduk Baek</a>, <a href="/search/eess?searchtype=author&query=Moon%2C+T">Taesup Moon</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1905.10488v5-abstract-short" style="display: inline;"> We tackle a challenging blind image denoising problem, in which only single distinct noisy images are available for training a denoiser, and no information about noise is known, except for it being zero-mean, additive, and independent of the clean image. In such a setting, which often occurs in practice, it is not possible to train a denoiser with the standard discriminative training or with the r… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1905.10488v5-abstract-full').style.display = 'inline'; document.getElementById('1905.10488v5-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1905.10488v5-abstract-full" style="display: none;"> We tackle a challenging blind image denoising problem, in which only single distinct noisy images are available for training a denoiser, and no information about noise is known, except for it being zero-mean, additive, and independent of the clean image. In such a setting, which often occurs in practice, it is not possible to train a denoiser with the standard discriminative training or with the recently developed Noise2Noise (N2N) training; the former requires the underlying clean image for the given noisy image, and the latter requires two independently realized noisy image pair for a clean image. To that end, we propose GAN2GAN (Generated-Artificial-Noise to Generated-Artificial-Noise) method that first learns a generative model that can 1) simulate the noise in the given noisy images and 2) generate a rough, noisy estimates of the clean images, then 3) iteratively trains a denoiser with subsequently synthesized noisy image pairs (as in N2N), obtained from the generative model. In results, we show the denoiser trained with our GAN2GAN achieves an impressive denoising performance on both synthetic and real-world datasets for the blind denoising setting; it almost approaches the performance of the standard discriminatively-trained or N2N-trained models that have more information than ours, and it significantly outperforms the recent baseline for the same setting, \textit{e.g.}, Noise2Void, and a more conventional yet strong one, BM3D. The official code of our method is available at https://github.com/csm9493/GAN2GAN. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1905.10488v5-abstract-full').style.display = 'none'; document.getElementById('1905.10488v5-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 4 July, 2021; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 24 May, 2019; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2019. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">ICLR 2021 camera ready version</span> </p> </li> </ol> <div class="is-hidden-tablet"> <!-- feedback for mobile only --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a> </span> </div> </div> </main> <footer> <div class="columns is-desktop" role="navigation" aria-label="Secondary"> <!-- MetaColumn 1 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/about">About</a></li> <li><a href="https://info.arxiv.org/help">Help</a></li> </ul> </div> <div class="column"> <ul class="nav-spaced"> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>contact arXiv</title><desc>Click here to contact arXiv</desc><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg> <a href="https://info.arxiv.org/help/contact.html"> Contact</a> </li> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>subscribe to arXiv mailings</title><desc>Click here to subscribe</desc><path d="M476 3.2L12.5 270.6c-18.1 10.4-15.8 35.6 2.2 43.2L121 358.4l287.3-253.2c5.5-4.9 13.3 2.6 8.6 8.3L176 407v80.5c0 23.6 28.5 32.9 42.5 15.8L282 426l124.6 52.2c14.2 6 30.4-2.9 33-18.2l72-432C515 7.8 493.3-6.8 476 3.2z"/></svg> <a href="https://info.arxiv.org/help/subscribe"> Subscribe</a> </li> </ul> </div> </div> </div> <!-- end MetaColumn 1 --> <!-- MetaColumn 2 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/license/index.html">Copyright</a></li> <li><a href="https://info.arxiv.org/help/policies/privacy_policy.html">Privacy Policy</a></li> </ul> </div> <div class="column sorry-app-links"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/web_accessibility.html">Web Accessibility Assistance</a></li> <li> <p class="help"> <a class="a11y-main-link" href="https://status.arxiv.org" target="_blank">arXiv Operational Status <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 256 512" class="icon filter-dark_grey" role="presentation"><path d="M224.3 273l-136 136c-9.4 9.4-24.6 9.4-33.9 0l-22.6-22.6c-9.4-9.4-9.4-24.6 0-33.9l96.4-96.4-96.4-96.4c-9.4-9.4-9.4-24.6 0-33.9L54.3 103c9.4-9.4 24.6-9.4 33.9 0l136 136c9.5 9.4 9.5 24.6.1 34z"/></svg></a><br> Get status notifications via <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/email/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg>email</a> or <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/slack/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512" class="icon filter-black" role="presentation"><path d="M94.12 315.1c0 25.9-21.16 47.06-47.06 47.06S0 341 0 315.1c0-25.9 21.16-47.06 47.06-47.06h47.06v47.06zm23.72 0c0-25.9 21.16-47.06 47.06-47.06s47.06 21.16 47.06 47.06v117.84c0 25.9-21.16 47.06-47.06 47.06s-47.06-21.16-47.06-47.06V315.1zm47.06-188.98c-25.9 0-47.06-21.16-47.06-47.06S139 32 164.9 32s47.06 21.16 47.06 47.06v47.06H164.9zm0 23.72c25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06H47.06C21.16 243.96 0 222.8 0 196.9s21.16-47.06 47.06-47.06H164.9zm188.98 47.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06h-47.06V196.9zm-23.72 0c0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06V79.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06V196.9zM283.1 385.88c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06v-47.06h47.06zm0-23.72c-25.9 0-47.06-21.16-47.06-47.06 0-25.9 21.16-47.06 47.06-47.06h117.84c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06H283.1z"/></svg>slack</a> </p> </li> </ul> </div> </div> </div> <!-- end MetaColumn 2 --> </div> </footer> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/member_acknowledgement.js"></script> </body> </html>