CINXE.COM

Search | arXiv e-print repository

<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8"/> <meta name="viewport" content="width=device-width, initial-scale=1"/> <!-- new favicon config and versions by realfavicongenerator.net --> <link rel="apple-touch-icon" sizes="180x180" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/apple-touch-icon.png"> <link rel="icon" type="image/png" sizes="32x32" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-32x32.png"> <link rel="icon" type="image/png" sizes="16x16" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-16x16.png"> <link rel="manifest" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/site.webmanifest"> <link rel="mask-icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/safari-pinned-tab.svg" color="#b31b1b"> <link rel="shortcut icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon.ico"> <meta name="msapplication-TileColor" content="#b31b1b"> <meta name="msapplication-config" content="images/icons/browserconfig.xml"> <meta name="theme-color" content="#b31b1b"> <!-- end favicon config --> <title>Search | arXiv e-print repository</title> <script defer src="https://static.arxiv.org/static/base/1.0.0a5/fontawesome-free-5.11.2-web/js/all.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/base/1.0.0a5/css/arxivstyle.css" /> <script type="text/x-mathjax-config"> MathJax.Hub.Config({ messageStyle: "none", extensions: ["tex2jax.js"], jax: ["input/TeX", "output/HTML-CSS"], tex2jax: { inlineMath: [ ['$','$'], ["\\(","\\)"] ], displayMath: [ ['$$','$$'], ["\\[","\\]"] ], processEscapes: true, ignoreClass: '.*', processClass: 'mathjax.*' }, TeX: { extensions: ["AMSmath.js", "AMSsymbols.js", "noErrors.js"], noErrors: { inlineDelimiters: ["$","$"], multiLine: false, style: { "font-size": "normal", "border": "" } } }, "HTML-CSS": { availableFonts: ["TeX"] } }); </script> <script src='//static.arxiv.org/MathJax-2.7.3/MathJax.js'></script> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/notification.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/bulma-tooltip.min.css" /> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/search.css" /> <script src="https://code.jquery.com/jquery-3.2.1.slim.min.js" integrity="sha256-k2WSCIexGzOj3Euiig+TlR8gA0EmPjuc79OEeY5L45g=" crossorigin="anonymous"></script> <script src="https://static.arxiv.org/static/search/0.5.6/js/fieldset.js"></script> <style> radio#cf-customfield_11400 { display: none; } </style> </head> <body> <header><a href="#main-container" class="is-sr-only">Skip to main content</a> <!-- contains Cornell logo and sponsor statement --> <div class="attribution level is-marginless" role="banner"> <div class="level-left"> <a class="level-item" href="https://cornell.edu/"><img src="https://static.arxiv.org/static/base/1.0.0a5/images/cornell-reduced-white-SMALL.svg" alt="Cornell University" width="200" aria-label="logo" /></a> </div> <div class="level-right is-marginless"><p class="sponsors level-item is-marginless"><span id="support-ack-url">We gratefully acknowledge support from<br /> the Simons Foundation, <a href="https://info.arxiv.org/about/ourmembers.html">member institutions</a>, and all contributors. <a href="https://info.arxiv.org/about/donate.html">Donate</a></span></p></div> </div> <!-- contains arXiv identity and search bar --> <div class="identity level is-marginless"> <div class="level-left"> <div class="level-item"> <a class="arxiv" href="https://arxiv.org/" aria-label="arxiv-logo"> <img src="https://static.arxiv.org/static/base/1.0.0a5/images/arxiv-logo-one-color-white.svg" aria-label="logo" alt="arxiv logo" width="85" style="width:85px;"/> </a> </div> </div> <div class="search-block level-right"> <form class="level-item mini-search" method="GET" action="https://arxiv.org/search"> <div class="field has-addons"> <div class="control"> <input class="input is-small" type="text" name="query" placeholder="Search..." aria-label="Search term or terms" /> <p class="help"><a href="https://info.arxiv.org/help">Help</a> | <a href="https://arxiv.org/search/advanced">Advanced Search</a></p> </div> <div class="control"> <div class="select is-small"> <select name="searchtype" aria-label="Field to search"> <option value="all" selected="selected">All fields</option> <option value="title">Title</option> <option value="author">Author</option> <option value="abstract">Abstract</option> <option value="comments">Comments</option> <option value="journal_ref">Journal reference</option> <option value="acm_class">ACM classification</option> <option value="msc_class">MSC classification</option> <option value="report_num">Report number</option> <option value="paper_id">arXiv identifier</option> <option value="doi">DOI</option> <option value="orcid">ORCID</option> <option value="author_id">arXiv author ID</option> <option value="help">Help pages</option> <option value="full_text">Full text</option> </select> </div> </div> <input type="hidden" name="source" value="header"> <button class="button is-small is-cul-darker">Search</button> </div> </form> </div> </div> <!-- closes identity --> <div class="container"> <div class="user-tools is-size-7 has-text-right has-text-weight-bold" role="navigation" aria-label="User menu"> <a href="https://arxiv.org/login">Login</a> </div> </div> </header> <main class="container" id="main-container"> <div class="level is-marginless"> <div class="level-left"> <h1 class="title is-clearfix"> Showing 1&ndash;21 of 21 results for author: <span class="mathjax">Jin, P</span> </h1> </div> <div class="level-right is-hidden-mobile"> <!-- feedback for mobile is moved to footer --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a>&nbsp;&nbsp;</span> </div> </div> <div class="content"> <form method="GET" action="/search/eess" aria-role="search"> Searching in archive <strong>eess</strong>. <a href="/search/?searchtype=author&amp;query=Jin%2C+P">Search in all archives.</a> <div class="field has-addons-tablet"> <div class="control is-expanded"> <label for="query" class="hidden-label">Search term or terms</label> <input class="input is-medium" id="query" name="query" placeholder="Search term..." type="text" value="Jin, P"> </div> <div class="select control is-medium"> <label class="is-hidden" for="searchtype">Field</label> <select class="is-medium" id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> </div> <div class="control"> <button class="button is-link is-medium">Search</button> </div> </div> <div class="field"> <div class="control is-size-7"> <label class="radio"> <input checked id="abstracts-0" name="abstracts" type="radio" value="show"> Show abstracts </label> <label class="radio"> <input id="abstracts-1" name="abstracts" type="radio" value="hide"> Hide abstracts </label> </div> </div> <div class="is-clearfix" style="height: 2.5em"> <div class="is-pulled-right"> <a href="/search/advanced?terms-0-term=Jin%2C+P&amp;terms-0-field=author&amp;size=50&amp;order=-announced_date_first">Advanced Search</a> </div> </div> <input type="hidden" name="order" value="-announced_date_first"> <input type="hidden" name="size" value="50"> </form> <div class="level breathe-horizontal"> <div class="level-left"> <form method="GET" action="/search/"> <div style="display: none;"> <select id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> <input id="query" name="query" type="text" value="Jin, P"> <ul id="abstracts"><li><input checked id="abstracts-0" name="abstracts" type="radio" value="show"> <label for="abstracts-0">Show abstracts</label></li><li><input id="abstracts-1" name="abstracts" type="radio" value="hide"> <label for="abstracts-1">Hide abstracts</label></li></ul> </div> <div class="box field is-grouped is-grouped-multiline level-item"> <div class="control"> <span class="select is-small"> <select id="size" name="size"><option value="25">25</option><option selected value="50">50</option><option value="100">100</option><option value="200">200</option></select> </span> <label for="size">results per page</label>. </div> <div class="control"> <label for="order">Sort results by</label> <span class="select is-small"> <select id="order" name="order"><option selected value="-announced_date_first">Announcement date (newest first)</option><option value="announced_date_first">Announcement date (oldest first)</option><option value="-submitted_date">Submission date (newest first)</option><option value="submitted_date">Submission date (oldest first)</option><option value="">Relevance</option></select> </span> </div> <div class="control"> <button class="button is-small is-link">Go</button> </div> </div> </form> </div> </div> <ol class="breathe-horizontal" start="1"> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2502.00619">arXiv:2502.00619</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2502.00619">pdf</a>, <a href="https://arxiv.org/format/2502.00619">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Distribution-aware Fairness Learning in Medical Image Segmentation From A Control-Theoretic Perspective </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Oh%2C+Y">Yujin Oh</a>, <a href="/search/eess?searchtype=author&amp;query=Jin%2C+P">Pengfei Jin</a>, <a href="/search/eess?searchtype=author&amp;query=Park%2C+S">Sangjoon Park</a>, <a href="/search/eess?searchtype=author&amp;query=Kim%2C+S">Sekeun Kim</a>, <a href="/search/eess?searchtype=author&amp;query=Yoon%2C+S">Siyeop Yoon</a>, <a href="/search/eess?searchtype=author&amp;query=Kim%2C+K">Kyungsang Kim</a>, <a href="/search/eess?searchtype=author&amp;query=Kim%2C+J+S">Jin Sung Kim</a>, <a href="/search/eess?searchtype=author&amp;query=Li%2C+X">Xiang Li</a>, <a href="/search/eess?searchtype=author&amp;query=Li%2C+Q">Quanzheng Li</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2502.00619v1-abstract-short" style="display: inline;"> Ensuring fairness in medical image segmentation is critical due to biases in imbalanced clinical data acquisition caused by demographic attributes (e.g., age, sex, race) and clinical factors (e.g., disease severity). To address these challenges, we introduce Distribution-aware Mixture of Experts (dMoE), inspired by optimal control theory. We provide a comprehensive analysis of its underlying mecha&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.00619v1-abstract-full').style.display = 'inline'; document.getElementById('2502.00619v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2502.00619v1-abstract-full" style="display: none;"> Ensuring fairness in medical image segmentation is critical due to biases in imbalanced clinical data acquisition caused by demographic attributes (e.g., age, sex, race) and clinical factors (e.g., disease severity). To address these challenges, we introduce Distribution-aware Mixture of Experts (dMoE), inspired by optimal control theory. We provide a comprehensive analysis of its underlying mechanisms and clarify dMoE&#39;s role in adapting to heterogeneous distributions in medical image segmentation. Furthermore, we integrate dMoE into multiple network architectures, demonstrating its broad applicability across diverse medical image analysis tasks. By incorporating demographic and clinical factors, dMoE achieves state-of-the-art performance on two 2D benchmark datasets and a 3D in-house dataset. Our results highlight the effectiveness of dMoE in mitigating biases from imbalanced distributions, offering a promising approach to bridging control theory and medical image segmentation within fairness learning paradigms. The source code will be made available. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.00619v1-abstract-full').style.display = 'none'; document.getElementById('2502.00619v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 1 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2025. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">12 pages, 3 figures, 9 tables</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.03143">arXiv:2410.03143</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.03143">pdf</a>, <a href="https://arxiv.org/format/2410.03143">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> ECHOPulse: ECG controlled echocardio-grams video generation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Li%2C+Y">Yiwei Li</a>, <a href="/search/eess?searchtype=author&amp;query=Kim%2C+S">Sekeun Kim</a>, <a href="/search/eess?searchtype=author&amp;query=Wu%2C+Z">Zihao Wu</a>, <a href="/search/eess?searchtype=author&amp;query=Jiang%2C+H">Hanqi Jiang</a>, <a href="/search/eess?searchtype=author&amp;query=Pan%2C+Y">Yi Pan</a>, <a href="/search/eess?searchtype=author&amp;query=Jin%2C+P">Pengfei Jin</a>, <a href="/search/eess?searchtype=author&amp;query=Song%2C+S">Sifan Song</a>, <a href="/search/eess?searchtype=author&amp;query=Shi%2C+Y">Yucheng Shi</a>, <a href="/search/eess?searchtype=author&amp;query=Liu%2C+T">Tianming Liu</a>, <a href="/search/eess?searchtype=author&amp;query=Li%2C+Q">Quanzheng Li</a>, <a href="/search/eess?searchtype=author&amp;query=Li%2C+X">Xiang Li</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.03143v2-abstract-short" style="display: inline;"> Echocardiography (ECHO) is essential for cardiac assessments, but its video quality and interpretation heavily relies on manual expertise, leading to inconsistent results from clinical and portable devices. ECHO video generation offers a solution by improving automated monitoring through synthetic data and generating high-quality videos from routine health data. However, existing models often face&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.03143v2-abstract-full').style.display = 'inline'; document.getElementById('2410.03143v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.03143v2-abstract-full" style="display: none;"> Echocardiography (ECHO) is essential for cardiac assessments, but its video quality and interpretation heavily relies on manual expertise, leading to inconsistent results from clinical and portable devices. ECHO video generation offers a solution by improving automated monitoring through synthetic data and generating high-quality videos from routine health data. However, existing models often face high computational costs, slow inference, and rely on complex conditional prompts that require experts&#39; annotations. To address these challenges, we propose ECHOPULSE, an ECG-conditioned ECHO video generation model. ECHOPULSE introduces two key advancements: (1) it accelerates ECHO video generation by leveraging VQ-VAE tokenization and masked visual token modeling for fast decoding, and (2) it conditions on readily accessible ECG signals, which are highly coherent with ECHO videos, bypassing complex conditional prompts. To the best of our knowledge, this is the first work to use time-series prompts like ECG signals for ECHO video generation. ECHOPULSE not only enables controllable synthetic ECHO data generation but also provides updated cardiac function information for disease monitoring and prediction beyond ECG alone. Evaluations on three public and private datasets demonstrate state-of-the-art performance in ECHO video generation across both qualitative and quantitative measures. Additionally, ECHOPULSE can be easily generalized to other modality generation tasks, such as cardiac MRI, fMRI, and 3D CT generation. Demo can seen from \url{https://github.com/levyisthebest/ECHOPulse_Prelease}. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.03143v2-abstract-full').style.display = 'none'; document.getElementById('2410.03143v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 11 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 4 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2408.14977">arXiv:2408.14977</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2408.14977">pdf</a>, <a href="https://arxiv.org/format/2408.14977">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> LN-Gen: Rectal Lymph Nodes Generation via Anatomical Features </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Guo%2C+W">Weidong Guo</a>, <a href="/search/eess?searchtype=author&amp;query=Zhang%2C+H">Hantao Zhang</a>, <a href="/search/eess?searchtype=author&amp;query=Wan%2C+S">Shouhong Wan</a>, <a href="/search/eess?searchtype=author&amp;query=Zou%2C+B">Bingbing Zou</a>, <a href="/search/eess?searchtype=author&amp;query=Wang%2C+W">Wanqin Wang</a>, <a href="/search/eess?searchtype=author&amp;query=Jin%2C+P">Peiquan Jin</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2408.14977v1-abstract-short" style="display: inline;"> Accurate segmentation of rectal lymph nodes is crucial for the staging and treatment planning of rectal cancer. However, the complexity of the surrounding anatomical structures and the scarcity of annotated data pose significant challenges. This study introduces a novel lymph node synthesis technique aimed at generating diverse and realistic synthetic rectal lymph node samples to mitigate the reli&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.14977v1-abstract-full').style.display = 'inline'; document.getElementById('2408.14977v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2408.14977v1-abstract-full" style="display: none;"> Accurate segmentation of rectal lymph nodes is crucial for the staging and treatment planning of rectal cancer. However, the complexity of the surrounding anatomical structures and the scarcity of annotated data pose significant challenges. This study introduces a novel lymph node synthesis technique aimed at generating diverse and realistic synthetic rectal lymph node samples to mitigate the reliance on manual annotation. Unlike direct diffusion methods, which often produce masks that are discontinuous and of suboptimal quality, our approach leverages an implicit SDF-based method for mask generation, ensuring the production of continuous, stable, and morphologically diverse masks. Experimental results demonstrate that our synthetic data significantly improves segmentation performance. Our work highlights the potential of diffusion model for accurately synthesizing structurally complex lesions, such as lymph nodes in rectal cancer, alleviating the challenge of limited annotated data in this field and aiding in advancements in rectal cancer diagnosis and treatment. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.14977v1-abstract-full').style.display = 'none'; document.getElementById('2408.14977v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 27 August, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">8 pages</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2407.04162">arXiv:2407.04162</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2407.04162">pdf</a>, <a href="https://arxiv.org/format/2407.04162">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Measurement Embedded Schr枚dinger Bridge for Inverse Problems </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Wang%2C+Y">Yuang Wang</a>, <a href="/search/eess?searchtype=author&amp;query=Jin%2C+P">Pengfei Jin</a>, <a href="/search/eess?searchtype=author&amp;query=Yoon%2C+S">Siyeop Yoon</a>, <a href="/search/eess?searchtype=author&amp;query=Tivnan%2C+M">Matthew Tivnan</a>, <a href="/search/eess?searchtype=author&amp;query=Li%2C+Q">Quanzheng Li</a>, <a href="/search/eess?searchtype=author&amp;query=Zhang%2C+L">Li Zhang</a>, <a href="/search/eess?searchtype=author&amp;query=Wu%2C+D">Dufan Wu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2407.04162v1-abstract-short" style="display: inline;"> Score-based diffusion models are frequently employed as structural priors in inverse problems. However, their iterative denoising process, initiated from Gaussian noise, often results in slow inference speeds. The Image-to-Image Schr枚dinger Bridge (I$^2$SB), which begins with the corrupted image, presents a promising alternative as a prior for addressing inverse problems. In this work, we introduc&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.04162v1-abstract-full').style.display = 'inline'; document.getElementById('2407.04162v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2407.04162v1-abstract-full" style="display: none;"> Score-based diffusion models are frequently employed as structural priors in inverse problems. However, their iterative denoising process, initiated from Gaussian noise, often results in slow inference speeds. The Image-to-Image Schr枚dinger Bridge (I$^2$SB), which begins with the corrupted image, presents a promising alternative as a prior for addressing inverse problems. In this work, we introduce the Measurement Embedded Schr枚dinger Bridge (MESB). MESB establishes Schr枚dinger Bridges between the distribution of corrupted images and the distribution of clean images given observed measurements. Based on optimal transport theory, we derive the forward and backward processes of MESB. Through validation on diverse inverse problems, our proposed approach exhibits superior performance compared to existing Schr枚dinger Bridge-based inverse problems solvers in both visual quality and quantitative metrics. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.04162v1-abstract-full').style.display = 'none'; document.getElementById('2407.04162v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 22 May, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">14 pages, 2 figures, Neurips preprint</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2403.06069">arXiv:2403.06069</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2403.06069">pdf</a>, <a href="https://arxiv.org/format/2403.06069">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Implicit Image-to-Image Schrodinger Bridge for Image Restoration </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Wang%2C+Y">Yuang Wang</a>, <a href="/search/eess?searchtype=author&amp;query=Yoon%2C+S">Siyeop Yoon</a>, <a href="/search/eess?searchtype=author&amp;query=Jin%2C+P">Pengfei Jin</a>, <a href="/search/eess?searchtype=author&amp;query=Tivnan%2C+M">Matthew Tivnan</a>, <a href="/search/eess?searchtype=author&amp;query=Song%2C+S">Sifan Song</a>, <a href="/search/eess?searchtype=author&amp;query=Chen%2C+Z">Zhennong Chen</a>, <a href="/search/eess?searchtype=author&amp;query=Hu%2C+R">Rui Hu</a>, <a href="/search/eess?searchtype=author&amp;query=Zhang%2C+L">Li Zhang</a>, <a href="/search/eess?searchtype=author&amp;query=Li%2C+Q">Quanzheng Li</a>, <a href="/search/eess?searchtype=author&amp;query=Chen%2C+Z">Zhiqiang Chen</a>, <a href="/search/eess?searchtype=author&amp;query=Wu%2C+D">Dufan Wu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2403.06069v2-abstract-short" style="display: inline;"> Diffusion-based models are widely recognized for their effectiveness in image restoration tasks; however, their iterative denoising process, which begins from Gaussian noise, often results in slow inference speeds. The Image-to-Image Schr枚dinger Bridge (I$^2$SB) presents a promising alternative by starting the generative process from corrupted images and leveraging training techniques from score-b&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2403.06069v2-abstract-full').style.display = 'inline'; document.getElementById('2403.06069v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2403.06069v2-abstract-full" style="display: none;"> Diffusion-based models are widely recognized for their effectiveness in image restoration tasks; however, their iterative denoising process, which begins from Gaussian noise, often results in slow inference speeds. The Image-to-Image Schr枚dinger Bridge (I$^2$SB) presents a promising alternative by starting the generative process from corrupted images and leveraging training techniques from score-based diffusion models. In this paper, we introduce the Implicit Image-to-Image Schr枚dinger Bridge (I$^3$SB) to further accelerate the generative process of I$^2$SB. I$^3$SB reconfigures the generative process into a non-Markovian framework by incorporating the initial corrupted image into each step, while ensuring that the marginal distribution aligns with that of I$^2$SB. This allows for the direct use of the pretrained network from I$^2$SB. Extensive experiments on natural images, human face images, and medical images validate the acceleration benefits of I$^3$SB. Compared to I$^2$SB, I$^3$SB achieves the same perceptual quality with fewer generative steps, while maintaining equal or improved fidelity to the ground truth. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2403.06069v2-abstract-full').style.display = 'none'; document.getElementById('2403.06069v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 27 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 9 March, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">23 pages, 8 figures, submitted to Pattern Recognition</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2401.04148">arXiv:2401.04148</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2401.04148">pdf</a>, <a href="https://arxiv.org/format/2401.04148">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Signal Processing">eess.SP</span> </div> </div> <p class="title is-5 mathjax"> Online Test-Time Adaptation of Spatial-Temporal Traffic Flow Forecasting </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Guo%2C+P">Pengxin Guo</a>, <a href="/search/eess?searchtype=author&amp;query=Jin%2C+P">Pengrong Jin</a>, <a href="/search/eess?searchtype=author&amp;query=Li%2C+Z">Ziyue Li</a>, <a href="/search/eess?searchtype=author&amp;query=Bai%2C+L">Lei Bai</a>, <a href="/search/eess?searchtype=author&amp;query=Zhang%2C+Y">Yu Zhang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2401.04148v1-abstract-short" style="display: inline;"> Accurate spatial-temporal traffic flow forecasting is crucial in aiding traffic managers in implementing control measures and assisting drivers in selecting optimal travel routes. Traditional deep-learning based methods for traffic flow forecasting typically rely on historical data to train their models, which are then used to make predictions on future data. However, the performance of the traine&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2401.04148v1-abstract-full').style.display = 'inline'; document.getElementById('2401.04148v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2401.04148v1-abstract-full" style="display: none;"> Accurate spatial-temporal traffic flow forecasting is crucial in aiding traffic managers in implementing control measures and assisting drivers in selecting optimal travel routes. Traditional deep-learning based methods for traffic flow forecasting typically rely on historical data to train their models, which are then used to make predictions on future data. However, the performance of the trained model usually degrades due to the temporal drift between the historical and future data. To make the model trained on historical data better adapt to future data in a fully online manner, this paper conducts the first study of the online test-time adaptation techniques for spatial-temporal traffic flow forecasting problems. To this end, we propose an Adaptive Double Correction by Series Decomposition (ADCSD) method, which first decomposes the output of the trained model into seasonal and trend-cyclical parts and then corrects them by two separate modules during the testing phase using the latest observed data entry by entry. In the proposed ADCSD method, instead of fine-tuning the whole trained model during the testing phase, a lite network is attached after the trained model, and only the lite network is fine-tuned in the testing process each time a data entry is observed. Moreover, to satisfy that different time series variables may have different levels of temporal drift, two adaptive vectors are adopted to provide different weights for different time series variables. Extensive experiments on four real-world traffic flow forecasting datasets demonstrate the effectiveness of the proposed ADCSD method. The code is available at https://github.com/Pengxin-Guo/ADCSD. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2401.04148v1-abstract-full').style.display = 'none'; document.getElementById('2401.04148v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 8 January, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2309.13539">arXiv:2309.13539</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2309.13539">pdf</a>, <a href="https://arxiv.org/format/2309.13539">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> </div> </div> <p class="title is-5 mathjax"> MediViSTA: Medical Video Segmentation via Temporal Fusion SAM Adaptation for Echocardiography </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Kim%2C+S">Sekeun Kim</a>, <a href="/search/eess?searchtype=author&amp;query=Jin%2C+P">Pengfei Jin</a>, <a href="/search/eess?searchtype=author&amp;query=Chen%2C+C">Cheng Chen</a>, <a href="/search/eess?searchtype=author&amp;query=Kim%2C+K">Kyungsang Kim</a>, <a href="/search/eess?searchtype=author&amp;query=Lyu%2C+Z">Zhiliang Lyu</a>, <a href="/search/eess?searchtype=author&amp;query=Ren%2C+H">Hui Ren</a>, <a href="/search/eess?searchtype=author&amp;query=Kim%2C+S">Sunghwan Kim</a>, <a href="/search/eess?searchtype=author&amp;query=Liu%2C+Z">Zhengliang Liu</a>, <a href="/search/eess?searchtype=author&amp;query=Zhong%2C+A">Aoxiao Zhong</a>, <a href="/search/eess?searchtype=author&amp;query=Liu%2C+T">Tianming Liu</a>, <a href="/search/eess?searchtype=author&amp;query=Li%2C+X">Xiang Li</a>, <a href="/search/eess?searchtype=author&amp;query=Li%2C+Q">Quanzheng Li</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2309.13539v5-abstract-short" style="display: inline;"> Despite achieving impressive results in general-purpose semantic segmentation with strong generalization on natural images, the Segment Anything Model (SAM) has shown less precision and stability in medical image segmentation. In particular, the original SAM architecture is designed for 2D natural images and is therefore not support to handle three-dimensional information, which is particularly im&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2309.13539v5-abstract-full').style.display = 'inline'; document.getElementById('2309.13539v5-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2309.13539v5-abstract-full" style="display: none;"> Despite achieving impressive results in general-purpose semantic segmentation with strong generalization on natural images, the Segment Anything Model (SAM) has shown less precision and stability in medical image segmentation. In particular, the original SAM architecture is designed for 2D natural images and is therefore not support to handle three-dimensional information, which is particularly important for medical imaging modalities that are often volumetric or video data. In this paper, we introduce MediViSTA, a parameter-efficient fine-tuning method designed to adapt the vision foundation model for medical video, with a specific focus on echocardiographic segmentation. To achieve spatial adaptation, we propose a frequency feature fusion technique that injects spatial frequency information from a CNN branch. For temporal adaptation, we integrate temporal adapters within the transformer blocks of the image encoder. Using a fine-tuning strategy, only a small subset of pre-trained parameters is updated, allowing efficient adaptation to echocardiographic data. The effectiveness of our method has been comprehensively evaluated on three datasets, comprising two public datasets and one multi-center in-house dataset. Our method consistently outperforms various state-of-the-art approaches without using any prompts. Furthermore, our model exhibits strong generalization capabilities on unseen datasets, surpassing the second-best approach by 2.15\% in Dice and 0.09 in temporal consistency. The results demonstrate the potential of MediViSTA to significantly advance echocardiographical video segmentation, offering improved accuracy and robustness in cardiac assessment applications. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2309.13539v5-abstract-full').style.display = 'none'; document.getElementById('2309.13539v5-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 6 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 23 September, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2308.08283">arXiv:2308.08283</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2308.08283">pdf</a>, <a href="https://arxiv.org/format/2308.08283">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> CARE: A Large Scale CT Image Dataset and Clinical Applicable Benchmark Model for Rectal Cancer Segmentation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Zhang%2C+H">Hantao Zhang</a>, <a href="/search/eess?searchtype=author&amp;query=Guo%2C+W">Weidong Guo</a>, <a href="/search/eess?searchtype=author&amp;query=Qiu%2C+C">Chenyang Qiu</a>, <a href="/search/eess?searchtype=author&amp;query=Wan%2C+S">Shouhong Wan</a>, <a href="/search/eess?searchtype=author&amp;query=Zou%2C+B">Bingbing Zou</a>, <a href="/search/eess?searchtype=author&amp;query=Wang%2C+W">Wanqin Wang</a>, <a href="/search/eess?searchtype=author&amp;query=Jin%2C+P">Peiquan Jin</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2308.08283v1-abstract-short" style="display: inline;"> Rectal cancer segmentation of CT image plays a crucial role in timely clinical diagnosis, radiotherapy treatment, and follow-up. Although current segmentation methods have shown promise in delineating cancerous tissues, they still encounter challenges in achieving high segmentation precision. These obstacles arise from the intricate anatomical structures of the rectum and the difficulties in perfo&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2308.08283v1-abstract-full').style.display = 'inline'; document.getElementById('2308.08283v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2308.08283v1-abstract-full" style="display: none;"> Rectal cancer segmentation of CT image plays a crucial role in timely clinical diagnosis, radiotherapy treatment, and follow-up. Although current segmentation methods have shown promise in delineating cancerous tissues, they still encounter challenges in achieving high segmentation precision. These obstacles arise from the intricate anatomical structures of the rectum and the difficulties in performing differential diagnosis of rectal cancer. Additionally, a major obstacle is the lack of a large-scale, finely annotated CT image dataset for rectal cancer segmentation. To address these issues, this work introduces a novel large scale rectal cancer CT image dataset CARE with pixel-level annotations for both normal and cancerous rectum, which serves as a valuable resource for algorithm research and clinical application development. Moreover, we propose a novel medical cancer lesion segmentation benchmark model named U-SAM. The model is specifically designed to tackle the challenges posed by the intricate anatomical structures of abdominal organs by incorporating prompt information. U-SAM contains three key components: promptable information (e.g., points) to aid in target area localization, a convolution module for capturing low-level lesion details, and skip-connections to preserve and recover spatial information during the encoding-decoding process. To evaluate the effectiveness of U-SAM, we systematically compare its performance with several popular segmentation methods on the CARE dataset. The generalization of the model is further verified on the WORD dataset. Extensive experiments demonstrate that the proposed U-SAM outperforms state-of-the-art methods on these two datasets. These experiments can serve as the baseline for future research and clinical application development. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2308.08283v1-abstract-full').style.display = 'none'; document.getElementById('2308.08283v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 16 August, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">8 pages</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2307.15388">arXiv:2307.15388</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2307.15388">pdf</a>, <a href="https://arxiv.org/format/2307.15388">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Signal Processing">eess.SP</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Geophysics">physics.geo-ph</span> </div> </div> <p class="title is-5 mathjax"> An Empirical Study of Large-Scale Data-Driven Full Waveform Inversion </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Jin%2C+P">Peng Jin</a>, <a href="/search/eess?searchtype=author&amp;query=Feng%2C+Y">Yinan Feng</a>, <a href="/search/eess?searchtype=author&amp;query=Feng%2C+S">Shihang Feng</a>, <a href="/search/eess?searchtype=author&amp;query=Wang%2C+H">Hanchen Wang</a>, <a href="/search/eess?searchtype=author&amp;query=Chen%2C+Y">Yinpeng Chen</a>, <a href="/search/eess?searchtype=author&amp;query=Consolvo%2C+B">Benjamin Consolvo</a>, <a href="/search/eess?searchtype=author&amp;query=Liu%2C+Z">Zicheng Liu</a>, <a href="/search/eess?searchtype=author&amp;query=Lin%2C+Y">Youzuo Lin</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2307.15388v2-abstract-short" style="display: inline;"> This paper investigates the impact of big data on deep learning models to help solve the full waveform inversion (FWI) problem. While it is well known that big data can boost the performance of deep learning models in many tasks, its effectiveness has not been validated for FWI. To address this gap, we present an empirical study that investigates how deep learning models in FWI behave when trained&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2307.15388v2-abstract-full').style.display = 'inline'; document.getElementById('2307.15388v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2307.15388v2-abstract-full" style="display: none;"> This paper investigates the impact of big data on deep learning models to help solve the full waveform inversion (FWI) problem. While it is well known that big data can boost the performance of deep learning models in many tasks, its effectiveness has not been validated for FWI. To address this gap, we present an empirical study that investigates how deep learning models in FWI behave when trained on OpenFWI, a collection of large-scale, multi-structural, synthetic datasets published recently. In particular, we train and evaluate the FWI models on a combination of 10 2D subsets in OpenFWI that contain 470K pairs of seismic data and velocity maps in total. Our experiments demonstrate that training on the combined dataset yields an average improvement of 13.03% in MAE, 7.19% in MSE and 1.87% in SSIM compared to each split dataset, and an average improvement of 28.60%, 21.55% and 8.22% in the leave-one-out generalization test. We further demonstrate that model capacity needs to scale in accordance with data size for optimal improvement, where our largest model yields an average improvement of 20.06%, 13.39% and 0.72% compared to the smallest one. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2307.15388v2-abstract-full').style.display = 'none'; document.getElementById('2307.15388v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 24 April, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 28 July, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2305.13314">arXiv:2305.13314</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2305.13314">pdf</a>, <a href="https://arxiv.org/format/2305.13314">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Geophysics">physics.geo-ph</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Signal Processing">eess.SP</span> </div> </div> <p class="title is-5 mathjax"> Auto-Linear Phenomenon in Subsurface Imaging </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Feng%2C+Y">Yinan Feng</a>, <a href="/search/eess?searchtype=author&amp;query=Chen%2C+Y">Yinpeng Chen</a>, <a href="/search/eess?searchtype=author&amp;query=Jin%2C+P">Peng Jin</a>, <a href="/search/eess?searchtype=author&amp;query=Feng%2C+S">Shihang Feng</a>, <a href="/search/eess?searchtype=author&amp;query=Liu%2C+Z">Zicheng Liu</a>, <a href="/search/eess?searchtype=author&amp;query=Lin%2C+Y">Youzuo Lin</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2305.13314v3-abstract-short" style="display: inline;"> Subsurface imaging involves solving full waveform inversion (FWI) to predict geophysical properties from measurements. This problem can be reframed as an image-to-image translation, with the usual approach being to train an encoder-decoder network using paired data from two domains: geophysical property and measurement. A recent seminal work (InvLINT) demonstrates there is only a linear mapping be&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2305.13314v3-abstract-full').style.display = 'inline'; document.getElementById('2305.13314v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2305.13314v3-abstract-full" style="display: none;"> Subsurface imaging involves solving full waveform inversion (FWI) to predict geophysical properties from measurements. This problem can be reframed as an image-to-image translation, with the usual approach being to train an encoder-decoder network using paired data from two domains: geophysical property and measurement. A recent seminal work (InvLINT) demonstrates there is only a linear mapping between the latent spaces of the two domains, and the decoder requires paired data for training. This paper extends this direction by demonstrating that only linear mapping necessitates paired data, while both the encoder and decoder can be learned from their respective domains through self-supervised learning. This unveils an intriguing phenomenon (named Auto-Linear) where the self-learned features of two separate domains are automatically linearly correlated. Compared with existing methods, our Auto-Linear has four advantages: (a) solving both forward and inverse modeling simultaneously, (b) applicable to different subsurface imaging tasks and achieving markedly better results than previous methods, (c)enhanced performance, especially in scenarios with limited paired data and in the presence of noisy data, and (d) strong generalization ability of the trained encoder and decoder. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2305.13314v3-abstract-full').style.display = 'none'; document.getElementById('2305.13314v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 21 May, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 27 April, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2304.07143">arXiv:2304.07143</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2304.07143">pdf</a>, <a href="https://arxiv.org/format/2304.07143">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Systems and Control">eess.SY</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1109/TIV.2024.3409468">10.1109/TIV.2024.3409468 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Car-Following Models: A Multidisciplinary Review </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Zhang%2C+T">Tianya Zhang</a>, <a href="/search/eess?searchtype=author&amp;query=D.%2C+P">Ph. D.</a>, <a href="/search/eess?searchtype=author&amp;query=Jin%2C+P+J">Peter J. Jin</a>, <a href="/search/eess?searchtype=author&amp;query=D.%2C+P">Ph. D.</a>, <a href="/search/eess?searchtype=author&amp;query=McQuade%2C+S+T">Sean T. McQuade</a>, <a href="/search/eess?searchtype=author&amp;query=D.%2C+P">Ph. D.</a>, <a href="/search/eess?searchtype=author&amp;query=Bayen%2C+A">Alexandre Bayen</a>, <a href="/search/eess?searchtype=author&amp;query=D.%2C+P">Ph. D.</a>, <a href="/search/eess?searchtype=author&amp;query=Piccoli%2C+B">Benedetto Piccoli</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2304.07143v5-abstract-short" style="display: inline;"> Car-following (CF) algorithms are crucial components of traffic simulations and have been integrated into many production vehicles equipped with Advanced Driving Assistance Systems (ADAS). Insights from the model of car-following behavior help us understand the causes of various macro phenomena that arise from interactions between pairs of vehicles. Car-following models encompass multiple discipli&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2304.07143v5-abstract-full').style.display = 'inline'; document.getElementById('2304.07143v5-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2304.07143v5-abstract-full" style="display: none;"> Car-following (CF) algorithms are crucial components of traffic simulations and have been integrated into many production vehicles equipped with Advanced Driving Assistance Systems (ADAS). Insights from the model of car-following behavior help us understand the causes of various macro phenomena that arise from interactions between pairs of vehicles. Car-following models encompass multiple disciplines, including traffic engineering, physics, dynamic system control, cognitive science, machine learning, and reinforcement learning. This paper presents an extensive survey that highlights the differences, complementarities, and overlaps among microscopic traffic flow and control models based on their underlying principles and design logic. It reviews representative algorithms, ranging from theory-based kinematic models, Psycho-Physical Models, and Adaptive cruise control models to data-driven algorithms like Reinforcement Learning (RL) and Imitation Learning (IL). The manuscript discusses the strengths and limitations of these models and explores their applications in different contexts. This review synthesizes existing researches across different domains to fill knowledge gaps and offer guidance for future research by identifying the latest trends in car following models and their applications. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2304.07143v5-abstract-full').style.display = 'none'; document.getElementById('2304.07143v5-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 16 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 14 April, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">IEEE Transactions on Intelligent Vehicles</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2204.13731">arXiv:2204.13731</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2204.13731">pdf</a>, <a href="https://arxiv.org/format/2204.13731">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Signal Processing">eess.SP</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Geophysics">physics.geo-ph</span> </div> </div> <p class="title is-5 mathjax"> An Intriguing Property of Geophysics Inversion </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Feng%2C+Y">Yinan Feng</a>, <a href="/search/eess?searchtype=author&amp;query=Chen%2C+Y">Yinpeng Chen</a>, <a href="/search/eess?searchtype=author&amp;query=Feng%2C+S">Shihang Feng</a>, <a href="/search/eess?searchtype=author&amp;query=Jin%2C+P">Peng Jin</a>, <a href="/search/eess?searchtype=author&amp;query=Liu%2C+Z">Zicheng Liu</a>, <a href="/search/eess?searchtype=author&amp;query=Lin%2C+Y">Youzuo Lin</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2204.13731v2-abstract-short" style="display: inline;"> Inversion techniques are widely used to reconstruct subsurface physical properties (e.g., velocity, conductivity) from surface-based geophysical measurements (e.g., seismic, electric/magnetic (EM) data). The problems are governed by partial differential equations (PDEs) like the wave or Maxwell&#39;s equations. Solving geophysical inversion problems is challenging due to the ill-posedness and high com&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2204.13731v2-abstract-full').style.display = 'inline'; document.getElementById('2204.13731v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2204.13731v2-abstract-full" style="display: none;"> Inversion techniques are widely used to reconstruct subsurface physical properties (e.g., velocity, conductivity) from surface-based geophysical measurements (e.g., seismic, electric/magnetic (EM) data). The problems are governed by partial differential equations (PDEs) like the wave or Maxwell&#39;s equations. Solving geophysical inversion problems is challenging due to the ill-posedness and high computational cost. To alleviate those issues, recent studies leverage deep neural networks to learn the inversion mappings from measurements to the property directly. In this paper, we show that such a mapping can be well modeled by a very shallow (but not wide) network with only five layers. This is achieved based on our new finding of an intriguing property: a near-linear relationship between the input and output, after applying integral transform in high dimensional space. In particular, when dealing with the inversion from seismic data to subsurface velocity governed by a wave equation, the integral results of velocity with Gaussian kernels are linearly correlated to the integral of seismic data with sine kernels. Furthermore, this property can be easily turned into a light-weight encoder-decoder network for inversion. The encoder contains the integration of seismic data and the linear transformation without need for fine-tuning. The decoder only consists of a single transformer block to reverse the integral of velocity. Experiments show that this interesting property holds for two geophysics inversion problems over four different datasets. Compared to much deeper InversionNet, our method achieves comparable accuracy, but consumes significantly fewer parameters. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2204.13731v2-abstract-full').style.display = 'none'; document.getElementById('2204.13731v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 16 June, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 28 April, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2022. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2202.11377">arXiv:2202.11377</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2202.11377">pdf</a>, <a href="https://arxiv.org/format/2202.11377">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> </div> </div> <p class="title is-5 mathjax"> Multi-scale Sparse Representation-Based Shadow Inpainting for Retinal OCT Images </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Tang%2C+Y">Yaoqi Tang</a>, <a href="/search/eess?searchtype=author&amp;query=Li%2C+Y">Yufan Li</a>, <a href="/search/eess?searchtype=author&amp;query=Liu%2C+H">Hongshan Liu</a>, <a href="/search/eess?searchtype=author&amp;query=Li%2C+J">Jiaxuan Li</a>, <a href="/search/eess?searchtype=author&amp;query=Jin%2C+P">Peiyao Jin</a>, <a href="/search/eess?searchtype=author&amp;query=Gan%2C+Y">Yu Gan</a>, <a href="/search/eess?searchtype=author&amp;query=Ling%2C+Y">Yuye Ling</a>, <a href="/search/eess?searchtype=author&amp;query=Su%2C+Y">Yikai Su</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2202.11377v1-abstract-short" style="display: inline;"> Inpainting shadowed regions cast by superficial blood vessels in retinal optical coherence tomography (OCT) images is critical for accurate and robust machine analysis and clinical diagnosis. Traditional sequence-based approaches such as propagating neighboring information to gradually fill in the missing regions are cost-effective. But they generate less satisfactory outcomes when dealing with la&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2202.11377v1-abstract-full').style.display = 'inline'; document.getElementById('2202.11377v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2202.11377v1-abstract-full" style="display: none;"> Inpainting shadowed regions cast by superficial blood vessels in retinal optical coherence tomography (OCT) images is critical for accurate and robust machine analysis and clinical diagnosis. Traditional sequence-based approaches such as propagating neighboring information to gradually fill in the missing regions are cost-effective. But they generate less satisfactory outcomes when dealing with larger missing regions and texture-rich structures. Emerging deep learning-based methods such as encoder-decoder networks have shown promising results in natural image inpainting tasks. However, they typically need a long computational time for network training in addition to the high demand on the size of datasets, which makes it difficult to be applied on often small medical datasets. To address these challenges, we propose a novel multi-scale shadow inpainting framework for OCT images by synergically applying sparse representation and deep learning: sparse representation is used to extract features from a small amount of training images for further inpainting and to regularize the image after the multi-scale image fusion, while convolutional neural network (CNN) is employed to enhance the image quality. During the image inpainting, we divide preprocessed input images into different branches based on the shadow width to harvest complementary information from different scales. Finally, a sparse representation-based regularizing module is designed to refine the generated contents after multi-scale feature aggregation. Experiments are conducted to compare our proposal versus both traditional and deep learning-based techniques on synthetic and real-world shadows. Results demonstrate that our proposed method achieves favorable image inpainting in terms of visual quality and quantitative metrics, especially when wide shadows are presented. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2202.11377v1-abstract-full').style.display = 'none'; document.getElementById('2202.11377v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 23 February, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2022. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2201.04756">arXiv:2201.04756</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2201.04756">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Signal Processing">eess.SP</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1155/2022/2771085">10.1155/2022/2771085 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Roadside Lidar Vehicle Detection and Tracking Using Range And Intensity Background Subtraction </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Zhang%2C+T">Tianya Zhang</a>, <a href="/search/eess?searchtype=author&amp;query=Jin%2C+P+J">Peter J. Jin</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2201.04756v5-abstract-short" style="display: inline;"> In this paper, we developed the solution of roadside LiDAR object detection using a combination of two unsupervised learning algorithms. The 3D point clouds are firstly converted into spherical coordinates and filled into the elevation-azimuth matrix using a hash function. After that, the raw LiDAR data were rearranged into new data structures to store the information of range, azimuth, and intens&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2201.04756v5-abstract-full').style.display = 'inline'; document.getElementById('2201.04756v5-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2201.04756v5-abstract-full" style="display: none;"> In this paper, we developed the solution of roadside LiDAR object detection using a combination of two unsupervised learning algorithms. The 3D point clouds are firstly converted into spherical coordinates and filled into the elevation-azimuth matrix using a hash function. After that, the raw LiDAR data were rearranged into new data structures to store the information of range, azimuth, and intensity. Then, the Dynamic Mode Decomposition method is applied to decompose the LiDAR data into low-rank backgrounds and sparse foregrounds based on intensity channel pattern recognition. The Coarse Fine Triangle Algorithm (CFTA) automatically finds the dividing value to separate the moving targets from static background according to range information. After intensity and range background subtraction, the foreground moving objects will be detected using a density-based detector and encoded into the state-space model for tracking. The output of the proposed solution includes vehicle trajectories that can enable many mobility and safety applications. The method was validated at both path and point levels and outperformed the state-of-the-art. In contrast to the previous methods that process directly on the scattered and discrete point clouds, the dynamic classification method can establish the less sophisticated linear relationship of the 3D measurement data, which captures the spatial-temporal structure that we often desire. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2201.04756v5-abstract-full').style.display = 'none'; document.getElementById('2201.04756v5-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 7 June, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 12 January, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Journal of Advanced Transportation, 2022 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2111.02926">arXiv:2111.02926</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2111.02926">pdf</a>, <a href="https://arxiv.org/format/2111.02926">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Signal Processing">eess.SP</span> </div> </div> <p class="title is-5 mathjax"> OpenFWI: Large-Scale Multi-Structural Benchmark Datasets for Seismic Full Waveform Inversion </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Deng%2C+C">Chengyuan Deng</a>, <a href="/search/eess?searchtype=author&amp;query=Feng%2C+S">Shihang Feng</a>, <a href="/search/eess?searchtype=author&amp;query=Wang%2C+H">Hanchen Wang</a>, <a href="/search/eess?searchtype=author&amp;query=Zhang%2C+X">Xitong Zhang</a>, <a href="/search/eess?searchtype=author&amp;query=Jin%2C+P">Peng Jin</a>, <a href="/search/eess?searchtype=author&amp;query=Feng%2C+Y">Yinan Feng</a>, <a href="/search/eess?searchtype=author&amp;query=Zeng%2C+Q">Qili Zeng</a>, <a href="/search/eess?searchtype=author&amp;query=Chen%2C+Y">Yinpeng Chen</a>, <a href="/search/eess?searchtype=author&amp;query=Lin%2C+Y">Youzuo Lin</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2111.02926v6-abstract-short" style="display: inline;"> Full waveform inversion (FWI) is widely used in geophysics to reconstruct high-resolution velocity maps from seismic data. The recent success of data-driven FWI methods results in a rapidly increasing demand for open datasets to serve the geophysics community. We present OpenFWI, a collection of large-scale multi-structural benchmark datasets, to facilitate diversified, rigorous, and reproducible&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2111.02926v6-abstract-full').style.display = 'inline'; document.getElementById('2111.02926v6-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2111.02926v6-abstract-full" style="display: none;"> Full waveform inversion (FWI) is widely used in geophysics to reconstruct high-resolution velocity maps from seismic data. The recent success of data-driven FWI methods results in a rapidly increasing demand for open datasets to serve the geophysics community. We present OpenFWI, a collection of large-scale multi-structural benchmark datasets, to facilitate diversified, rigorous, and reproducible research on FWI. In particular, OpenFWI consists of 12 datasets (2.1TB in total) synthesized from multiple sources. It encompasses diverse domains in geophysics (interface, fault, CO2 reservoir, etc.), covers different geological subsurface structures (flat, curve, etc.), and contains various amounts of data samples (2K - 67K). It also includes a dataset for 3D FWI. Moreover, we use OpenFWI to perform benchmarking over four deep learning methods, covering both supervised and unsupervised learning regimes. Along with the benchmarks, we implement additional experiments, including physics-driven methods, complexity analysis, generalization study, uncertainty quantification, and so on, to sharpen our understanding of datasets and methods. The studies either provide valuable insights into the datasets and the performance, or uncover their current limitations. We hope OpenFWI supports prospective research on FWI and inspires future open-source efforts on AI for science. All datasets and related information can be accessed through our website at https://openfwi-lanl.github.io/ <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2111.02926v6-abstract-full').style.display = 'none'; document.getElementById('2111.02926v6-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 23 June, 2023; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 4 November, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">This manuscript has been accepted by NeurIPS 2022 dataset and benchmark track</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2110.07584">arXiv:2110.07584</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2110.07584">pdf</a>, <a href="https://arxiv.org/format/2110.07584">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Signal Processing">eess.SP</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Geophysics">physics.geo-ph</span> </div> </div> <p class="title is-5 mathjax"> Unsupervised Learning of Full-Waveform Inversion: Connecting CNN and Partial Differential Equation in a Loop </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Jin%2C+P">Peng Jin</a>, <a href="/search/eess?searchtype=author&amp;query=Zhang%2C+X">Xitong Zhang</a>, <a href="/search/eess?searchtype=author&amp;query=Chen%2C+Y">Yinpeng Chen</a>, <a href="/search/eess?searchtype=author&amp;query=Huang%2C+S+X">Sharon Xiaolei Huang</a>, <a href="/search/eess?searchtype=author&amp;query=Liu%2C+Z">Zicheng Liu</a>, <a href="/search/eess?searchtype=author&amp;query=Lin%2C+Y">Youzuo Lin</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2110.07584v2-abstract-short" style="display: inline;"> This paper investigates unsupervised learning of Full-Waveform Inversion (FWI), which has been widely used in geophysics to estimate subsurface velocity maps from seismic data. This problem is mathematically formulated by a second order partial differential equation (PDE), but is hard to solve. Moreover, acquiring velocity map is extremely expensive, making it impractical to scale up a supervised&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2110.07584v2-abstract-full').style.display = 'inline'; document.getElementById('2110.07584v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2110.07584v2-abstract-full" style="display: none;"> This paper investigates unsupervised learning of Full-Waveform Inversion (FWI), which has been widely used in geophysics to estimate subsurface velocity maps from seismic data. This problem is mathematically formulated by a second order partial differential equation (PDE), but is hard to solve. Moreover, acquiring velocity map is extremely expensive, making it impractical to scale up a supervised approach to train the mapping from seismic data to velocity maps with convolutional neural networks (CNN). We address these difficulties by integrating PDE and CNN in a loop, thus shifting the paradigm to unsupervised learning that only requires seismic data. In particular, we use finite difference to approximate the forward modeling of PDE as a differentiable operator (from velocity map to seismic data) and model its inversion by CNN (from seismic data to velocity map). Hence, we transform the supervised inversion task into an unsupervised seismic data reconstruction task. We also introduce a new large-scale dataset OpenFWI, to establish a more challenging benchmark for the community. Experiment results show that our model (using seismic data alone) yields comparable accuracy to the supervised counterpart (using both seismic data and velocity map). Furthermore, it outperforms the supervised model when involving more seismic data. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2110.07584v2-abstract-full').style.display = 'none'; document.getElementById('2110.07584v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 18 March, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 14 October, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2021. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2102.13251">arXiv:2102.13251</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2102.13251">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Signal Processing">eess.SP</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Systems and Control">eess.SY</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1155/2021/5590572">10.1155/2021/5590572 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Robust Kalman filter-based dynamic state estimation of natural gas pipeline networks </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Chen%2C+L">Liang Chen</a>, <a href="/search/eess?searchtype=author&amp;query=Jin%2C+P">Peng Jin</a>, <a href="/search/eess?searchtype=author&amp;query=Yang%2C+J">Jing Yang</a>, <a href="/search/eess?searchtype=author&amp;query=Li%2C+Y">Yang Li</a>, <a href="/search/eess?searchtype=author&amp;query=Song%2C+Y">Yi Song</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2102.13251v1-abstract-short" style="display: inline;"> To obtain the accurate transient states of the big scale natural gas pipeline networks under the bad data and non-zero mean noises conditions, a robust Kalman filter-based dynamic state estimation method is proposed using the linearized gas pipeline transient flow equations in this paper. Firstly, the dynamic state estimation model is built. Since the gas pipeline transient flow equations are less&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2102.13251v1-abstract-full').style.display = 'inline'; document.getElementById('2102.13251v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2102.13251v1-abstract-full" style="display: none;"> To obtain the accurate transient states of the big scale natural gas pipeline networks under the bad data and non-zero mean noises conditions, a robust Kalman filter-based dynamic state estimation method is proposed using the linearized gas pipeline transient flow equations in this paper. Firstly, the dynamic state estimation model is built. Since the gas pipeline transient flow equations are less than the states, the boundary conditions are used as supplementary constraints to predict the transient states. To increase the measurement redundancy, the zero mass flow rate constraints at the sink nodes are taken as virtual measurements. Secondly, to ensure the stability under bad data condition, the robust Kalman filter algorithm is proposed by introducing a time-varying scalar matrix to regulate the measurement error variances correctly according to the innovation vector at every time step. At last, the proposed method is applied to a 30-node gas pipeline networks in several kinds of measurement conditions. The simulation shows that the proposed robust dynamic state estimation can decrease the effects of bad data and achieve better estimating results. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2102.13251v1-abstract-full').style.display = 'none'; document.getElementById('2102.13251v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 25 February, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted by Mathematical Problems in Engineering</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Mathematical Problems in Engineering 2021 (2021) 5590572 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2102.04799">arXiv:2102.04799</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2102.04799">pdf</a>, <a href="https://arxiv.org/format/2102.04799">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> </div> </div> <p class="title is-5 mathjax"> Multi-scale GCN-assisted two-stage network for joint segmentation of retinal layers and disc in peripapillary OCT images </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Li%2C+J">Jiaxuan Li</a>, <a href="/search/eess?searchtype=author&amp;query=Jin%2C+P">Peiyao Jin</a>, <a href="/search/eess?searchtype=author&amp;query=Zhu%2C+J">Jianfeng Zhu</a>, <a href="/search/eess?searchtype=author&amp;query=Zou%2C+H">Haidong Zou</a>, <a href="/search/eess?searchtype=author&amp;query=Xu%2C+X">Xun Xu</a>, <a href="/search/eess?searchtype=author&amp;query=Tang%2C+M">Min Tang</a>, <a href="/search/eess?searchtype=author&amp;query=Zhou%2C+M">Minwen Zhou</a>, <a href="/search/eess?searchtype=author&amp;query=Gan%2C+Y">Yu Gan</a>, <a href="/search/eess?searchtype=author&amp;query=He%2C+J">Jiangnan He</a>, <a href="/search/eess?searchtype=author&amp;query=Ling%2C+Y">Yuye Ling</a>, <a href="/search/eess?searchtype=author&amp;query=Su%2C+Y">Yikai Su</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2102.04799v1-abstract-short" style="display: inline;"> An accurate and automated tissue segmentation algorithm for retinal optical coherence tomography (OCT) images is crucial for the diagnosis of glaucoma. However, due to the presence of the optic disc, the anatomical structure of the peripapillary region of the retina is complicated and is challenging for segmentation. To address this issue, we developed a novel graph convolutional network (GCN)-ass&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2102.04799v1-abstract-full').style.display = 'inline'; document.getElementById('2102.04799v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2102.04799v1-abstract-full" style="display: none;"> An accurate and automated tissue segmentation algorithm for retinal optical coherence tomography (OCT) images is crucial for the diagnosis of glaucoma. However, due to the presence of the optic disc, the anatomical structure of the peripapillary region of the retina is complicated and is challenging for segmentation. To address this issue, we developed a novel graph convolutional network (GCN)-assisted two-stage framework to simultaneously label the nine retinal layers and the optic disc. Specifically, a multi-scale global reasoning module is inserted between the encoder and decoder of a U-shape neural network to exploit anatomical prior knowledge and perform spatial reasoning. We conducted experiments on human peripapillary retinal OCT images. The Dice score of the proposed segmentation network is 0.820$\pm$0.001 and the pixel accuracy is 0.830$\pm$0.002, both of which outperform those from other state-of-the-art techniques. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2102.04799v1-abstract-full').style.display = 'none'; document.getElementById('2102.04799v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 9 February, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2021. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1912.05027">arXiv:1912.05027</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1912.05027">pdf</a>, <a href="https://arxiv.org/format/1912.05027">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> </div> </div> <p class="title is-5 mathjax"> SpineNet: Learning Scale-Permuted Backbone for Recognition and Localization </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Du%2C+X">Xianzhi Du</a>, <a href="/search/eess?searchtype=author&amp;query=Lin%2C+T">Tsung-Yi Lin</a>, <a href="/search/eess?searchtype=author&amp;query=Jin%2C+P">Pengchong Jin</a>, <a href="/search/eess?searchtype=author&amp;query=Ghiasi%2C+G">Golnaz Ghiasi</a>, <a href="/search/eess?searchtype=author&amp;query=Tan%2C+M">Mingxing Tan</a>, <a href="/search/eess?searchtype=author&amp;query=Cui%2C+Y">Yin Cui</a>, <a href="/search/eess?searchtype=author&amp;query=Le%2C+Q+V">Quoc V. Le</a>, <a href="/search/eess?searchtype=author&amp;query=Song%2C+X">Xiaodan Song</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1912.05027v3-abstract-short" style="display: inline;"> Convolutional neural networks typically encode an input image into a series of intermediate features with decreasing resolutions. While this structure is suited to classification tasks, it does not perform well for tasks requiring simultaneous recognition and localization (e.g., object detection). The encoder-decoder architectures are proposed to resolve this by applying a decoder network onto a b&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1912.05027v3-abstract-full').style.display = 'inline'; document.getElementById('1912.05027v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1912.05027v3-abstract-full" style="display: none;"> Convolutional neural networks typically encode an input image into a series of intermediate features with decreasing resolutions. While this structure is suited to classification tasks, it does not perform well for tasks requiring simultaneous recognition and localization (e.g., object detection). The encoder-decoder architectures are proposed to resolve this by applying a decoder network onto a backbone model designed for classification tasks. In this paper, we argue encoder-decoder architecture is ineffective in generating strong multi-scale features because of the scale-decreased backbone. We propose SpineNet, a backbone with scale-permuted intermediate features and cross-scale connections that is learned on an object detection task by Neural Architecture Search. Using similar building blocks, SpineNet models outperform ResNet-FPN models by ~3% AP at various scales while using 10-20% fewer FLOPs. In particular, SpineNet-190 achieves 52.5% AP with a MaskR-CNN detector and achieves 52.1% AP with a RetinaNet detector on COCO for a single model without test-time augmentation, significantly outperforms prior art of detectors. SpineNet can transfer to classification tasks, achieving 5% top-1 accuracy improvement on a challenging iNaturalist fine-grained dataset. Code is at: https://github.com/tensorflow/tpu/tree/master/models/official/detection. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1912.05027v3-abstract-full').style.display = 'none'; document.getElementById('1912.05027v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 17 June, 2020; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 10 December, 2019; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2019. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">CVPR 2020</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1808.06602">arXiv:1808.06602</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1808.06602">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Systems and Control">eess.SY</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Optimization and Control">math.OC</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1016/j.apenergy.2016.06.075">10.1016/j.apenergy.2016.06.075 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Optimized Hierarchical Power Oscillations Control for Distributed Generation Under Unbalanced Conditions </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Jin%2C+P">Peng Jin</a>, <a href="/search/eess?searchtype=author&amp;query=Li%2C+Y">Yang Li</a>, <a href="/search/eess?searchtype=author&amp;query=Li%2C+G">Guoqing Li</a>, <a href="/search/eess?searchtype=author&amp;query=Chen%2C+Z">Zhe Chen</a>, <a href="/search/eess?searchtype=author&amp;query=Zhai%2C+X">Xiaojuan Zhai</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1808.06602v1-abstract-short" style="display: inline;"> Control structures have critical influences on converter-interfaced distributed generations (DG) under unbalanced conditions. Most of previous works focus on suppressing active power oscillations and ripples of DC bus voltage. In this paper, the relationship between amplitudes of the active power oscillations and the reactive power oscillations are firstly deduced and the hierarchical control of D&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1808.06602v1-abstract-full').style.display = 'inline'; document.getElementById('1808.06602v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1808.06602v1-abstract-full" style="display: none;"> Control structures have critical influences on converter-interfaced distributed generations (DG) under unbalanced conditions. Most of previous works focus on suppressing active power oscillations and ripples of DC bus voltage. In this paper, the relationship between amplitudes of the active power oscillations and the reactive power oscillations are firstly deduced and the hierarchical control of DG is proposed to reduce power oscillations. The hierarchical control consists of primary and secondary levels. Current references are generated in primary control level and the active power oscillations can be suppressed by a dual current controller. Secondary control reduces the active power and reactive power oscillations simultaneously by optimal model aiming for minimum amplitudes of oscillations. Simulation results show that the proposed secondary control with less injecting negative-sequence current than traditional control methods can effectively limit both active power and reactive power oscillations. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1808.06602v1-abstract-full').style.display = 'none'; document.getElementById('1808.06602v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 17 August, 2018; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2018. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted by Applied Energy</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Applied Energy 194 (2017) 343-352 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1801.07871">arXiv:1801.07871</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1801.07871">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> </div> </div> <p class="title is-5 mathjax"> Snapshot light-field laryngoscope </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/eess?searchtype=author&amp;query=Zhu%2C+S">Shuaishuai Zhu</a>, <a href="/search/eess?searchtype=author&amp;query=Jin%2C+P">Peng Jin</a>, <a href="/search/eess?searchtype=author&amp;query=Liang%2C+R">Rongguang Liang</a>, <a href="/search/eess?searchtype=author&amp;query=Gao%2C+L">Liang Gao</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1801.07871v1-abstract-short" style="display: inline;"> The convergence of recent advances in optical fabrication and digital processing yields a new generation of imaging technology: light-field cameras, which bridge the realms of applied mathematics, optics, and high-performance computing. Herein for the first time, we introduce the paradigm of light-field imaging into laryngoscopy. The resultant probe can image the three-dimensional (3D) shape of vo&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1801.07871v1-abstract-full').style.display = 'inline'; document.getElementById('1801.07871v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1801.07871v1-abstract-full" style="display: none;"> The convergence of recent advances in optical fabrication and digital processing yields a new generation of imaging technology: light-field cameras, which bridge the realms of applied mathematics, optics, and high-performance computing. Herein for the first time, we introduce the paradigm of light-field imaging into laryngoscopy. The resultant probe can image the three-dimensional (3D) shape of vocal folds within a single camera exposure. Furthermore, to improve the spatial resolution, we developed an image fusion algorithm, providing a simple solution to a long-standing problem in light-field imaging. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1801.07871v1-abstract-full').style.display = 'none'; document.getElementById('1801.07871v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 24 January, 2018; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2018. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">15 pages, 6 figures, 1 table</span> </p> </li> </ol> <div class="is-hidden-tablet"> <!-- feedback for mobile only --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a>&nbsp;&nbsp;</span> </div> </div> </main> <footer> <div class="columns is-desktop" role="navigation" aria-label="Secondary"> <!-- MetaColumn 1 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/about">About</a></li> <li><a href="https://info.arxiv.org/help">Help</a></li> </ul> </div> <div class="column"> <ul class="nav-spaced"> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>contact arXiv</title><desc>Click here to contact arXiv</desc><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg> <a href="https://info.arxiv.org/help/contact.html"> Contact</a> </li> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>subscribe to arXiv mailings</title><desc>Click here to subscribe</desc><path d="M476 3.2L12.5 270.6c-18.1 10.4-15.8 35.6 2.2 43.2L121 358.4l287.3-253.2c5.5-4.9 13.3 2.6 8.6 8.3L176 407v80.5c0 23.6 28.5 32.9 42.5 15.8L282 426l124.6 52.2c14.2 6 30.4-2.9 33-18.2l72-432C515 7.8 493.3-6.8 476 3.2z"/></svg> <a href="https://info.arxiv.org/help/subscribe"> Subscribe</a> </li> </ul> </div> </div> </div> <!-- end MetaColumn 1 --> <!-- MetaColumn 2 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/license/index.html">Copyright</a></li> <li><a href="https://info.arxiv.org/help/policies/privacy_policy.html">Privacy Policy</a></li> </ul> </div> <div class="column sorry-app-links"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/web_accessibility.html">Web Accessibility Assistance</a></li> <li> <p class="help"> <a class="a11y-main-link" href="https://status.arxiv.org" target="_blank">arXiv Operational Status <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 256 512" class="icon filter-dark_grey" role="presentation"><path d="M224.3 273l-136 136c-9.4 9.4-24.6 9.4-33.9 0l-22.6-22.6c-9.4-9.4-9.4-24.6 0-33.9l96.4-96.4-96.4-96.4c-9.4-9.4-9.4-24.6 0-33.9L54.3 103c9.4-9.4 24.6-9.4 33.9 0l136 136c9.5 9.4 9.5 24.6.1 34z"/></svg></a><br> Get status notifications via <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/email/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg>email</a> or <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/slack/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512" class="icon filter-black" role="presentation"><path d="M94.12 315.1c0 25.9-21.16 47.06-47.06 47.06S0 341 0 315.1c0-25.9 21.16-47.06 47.06-47.06h47.06v47.06zm23.72 0c0-25.9 21.16-47.06 47.06-47.06s47.06 21.16 47.06 47.06v117.84c0 25.9-21.16 47.06-47.06 47.06s-47.06-21.16-47.06-47.06V315.1zm47.06-188.98c-25.9 0-47.06-21.16-47.06-47.06S139 32 164.9 32s47.06 21.16 47.06 47.06v47.06H164.9zm0 23.72c25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06H47.06C21.16 243.96 0 222.8 0 196.9s21.16-47.06 47.06-47.06H164.9zm188.98 47.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06h-47.06V196.9zm-23.72 0c0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06V79.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06V196.9zM283.1 385.88c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06v-47.06h47.06zm0-23.72c-25.9 0-47.06-21.16-47.06-47.06 0-25.9 21.16-47.06 47.06-47.06h117.84c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06H283.1z"/></svg>slack</a> </p> </li> </ul> </div> </div> </div> <!-- end MetaColumn 2 --> </div> </footer> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/member_acknowledgement.js"></script> </body> </html>

Pages: 1 2 3 4 5 6 7 8 9 10