CINXE.COM

Lightweight Prompt Learning Implicit Degradation Estimation Network for Blind Super Resolution - Peeref

<!DOCTYPE html> <html lang="en"> <head> <link rel="dns-prefetch" href="//www.peeref.com"> <link rel="dns-prefetch" href="//peeref-open.s3.amazonaws.com"> <link rel="dns-prefetch" href="//pagead2.googlesyndication.com"> <link rel="dns-prefetch" href="//www.w3.org"> <link rel="dns-prefetch" href="//doi.org"> <link rel="dns-prefetch" href="//www.linkedin.com"> <link rel="dns-prefetch" href="//twitter.com"> <link rel="dns-prefetch" href="//api.qrserver.com"> <link rel="dns-prefetch" href="//www.facebook.com"> <link rel="dns-prefetch" href="//www.googletagmanager.com"> <meta charset="utf-8"> <title>Lightweight Prompt Learning Implicit Degradation Estimation Network for Blind Super Resolution - Peeref</title> <meta http-equiv="X-UA-Compatible" content="IE=edge, chrome=1"> <meta name="keywords" content="manuscript, research article, comment, full text"> <meta name="description" content="Lightweight Prompt Learning Implicit Degradation Estimation Network for Blind Super Resolution"> <meta name="viewport" content="width=device-width, initial-scale=1.0"> <meta name="viewport" content="width=device-width, initial-scale=1.0, maximum-scale=1.0, user-scalable=no" /> <meta property="og:locale" content="en"> <meta property="og:title" content="Lightweight Prompt Learning Implicit Degradation Estimation Network for Blind Super Resolution - Peeref"> <meta property="og:description" content="Lightweight Prompt Learning Implicit Degradation Estimation Network for Blind Super Resolution"> <meta property="og:url" content="https://www.peeref.com/works/83628583"> <meta property="og:site_name" content="Peeref"> <meta property="og:image" content=""> <meta name="csrf-token" content="UrgOpgrANshX9rKYqtdeMndS4cb0TWBYN89ZoT5N"> <meta http-equiv="Cache-Control" content="no-transform" /> <meta http-equiv="Cache-Control" content="no-siteapp" /> <link rel="shortcut icon" href="https://www.peeref.com/favicon.ico"> <link href="https://peeref-open.s3.amazonaws.com/css/frameworks.bd380c8996823ba04442.css" rel="stylesheet"> <link href="https://peeref-open.s3.amazonaws.com/css/app.3cfd9992ceaf352e0491.css" rel="stylesheet"> <script> if(window.self !== window.top) { location.href = '/static/html/forbidden.html'; } </script> <script> window.Config = { token: "UrgOpgrANshX9rKYqtdeMndS4cb0TWBYN89ZoT5N", url: "https://www.peeref.com/works/83628583", lang: "en", prefix: "", isMobile: "", assetDomain: "https://peeref-open.s3.amazonaws.com/" }; </script> <script async src="https://pagead2.googlesyndication.com/pagead/js/adsbygoogle.js?client=ca-pub-6244001236711438" crossorigin="anonymous"></script> </head> <body> <div id="app"> <div class="bg-warning position-relative w-100 text-center py-2" style="top: 0; z-index: 100000;"> <strong class="text-danger">Peeref no longer supports Internet Explorer. </strong> <a href="/static/html/browser.html">Please upgrade your browser.</a> </div> <nav id="nav" class="navbar navbar-default navbar-static-top navbar-inverse"> <div class="container"> <div class="navbar-header"> <button type="button" class="navbar-toggle collapsed" data-toggle="collapse" data-target="#app-navbar-collapse" aria-expanded="false"> <span class="icon-bar"></span> <span class="icon-bar"></span> <span class="icon-bar"></span> </button> <a class="navbar-brand p-3" href="https://www.peeref.com"> <svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" width="108px" viewBox="0 0 1052 302"><g fill="#FFF"><path d="M227.6,92.9c5.2,13,17.9,22.1,32.7,22.1c19.4,0,35.2-15.8,35.2-35.2s-15.8-35.2-35.2-35.2 c-19.4,0-35.2,15.8-35.2,35.2c0,2.3,0.2,4.5,0.6,6.7l-98.1,24.6L154,59c3.2,1.1,6.6,1.8,10.1,1.8c16.4,0,29.7-13.3,29.7-29.7c0-16.4-13.3-29.7-29.7-29.7s-29.7,13.3-29.7,29.7c0,10.5,5.4,19.7,13.6,25l-27.9,55.1L90.5,94.2c1.4-3.2,2.1-6.8,2.1-10.6 c0-14.9-12.1-27-27-27c-14.9,0-27,12.1-27,27c0,14.9,12.1,27,27,27c8.8,0,16.6-4.2,21.6-10.7l28.5,16.3l-62.2,49.9 c-5.6-5.3-13.1-8.6-21.4-8.6c-17.1,0-30.9,13.8-30.9,30.9c0,17.1,13.8,30.9,30.9,30.9S63,205.4,63,188.4c0-6.3-1.9-12.3-5.2-17.2 l60-48.1l-7.5,106.4c-19.1,0.6-34.4,16.2-34.4,35.5c0,19.6,15.9,35.5,35.5,35.5c19.6,0,35.5-15.9,35.5-35.5c0-17.7-13-32.4-30-35.1 l7.6-107.5l87,67.9c-4.1,6.1-6.4,13.5-6.4,21.4c0,21.4,17.4,38.8,38.8,38.8s38.8-17.4,38.8-38.8c0-21.4-17.4-38.8-38.8-38.8 c-11.1,0-21.2,4.7-28.2,12.2L129,117.6L227.6,92.9z"></path></g><g fill="#FFF"><path d="M368.2,232.3V70.6h52.4c19.9,0,32.8,0.8,38.8,2.4c9.3,2.4,17,7.7,23.3,15.8c6.2,8.1,9.4,18.6,9.4,31.5c0,9.9-1.8,18.3-5.4,25c-3.6,6.8-8.2,12.1-13.7,15.9c-5.6,3.9-11.2,6.4-16.9,7.7c-7.8,1.5-19.1,2.3-33.9,2.3h-21.3v61H368.2z M400.8,98v45.9h17.9c12.9,0,21.5-0.8,25.8-2.5c4.3-1.7,7.7-4.3,10.2-7.9c2.5-3.6,3.7-7.8,3.7-12.6c0-5.9-1.7-10.7-5.2-14.6c-3.5-3.8-7.8-6.2-13.1-7.2c-3.9-0.7-11.7-1.1-23.5-1.1H400.8z"></path><path d="M586.5,195l30.9,5.2c-4,11.3-10.2,19.9-18.8,25.9c-8.6,5.9-19.3,8.9-32.2,8.9c-20.4,0-35.4-6.7-45.2-20c-7.7-10.7-11.6-24.1-11.6-40.4c0-19.4,5.1-34.6,15.2-45.6c10.1-11,23-16.5,38.5-16.5c17.4,0,31.2,5.8,41.3,17.3c10.1,11.5,14.9,29.1,14.5,52.9h-77.7c0.2,9.2,2.7,16.3,7.5,21.5c4.8,5.1,10.7,7.7,17.9,7.7c4.9,0,8.9-1.3,12.2-4S584.8,200.9,586.5,195z M588.3,163.7c-0.2-9-2.5-15.8-6.9-20.5c-4.4-4.7-9.8-7-16.1-7c-6.8,0-12.4,2.5-16.8,7.4c-4.4,4.9-6.6,11.6-6.5,20.1H588.3z"></path><path d="M712.2,195l30.9,5.2c-4,11.3-10.2,19.9-18.8,25.9c-8.6,5.9-19.3,8.9-32.2,8.9c-20.4,0-35.4-6.7-45.2-20c-7.7-10.7-11.6-24.1-11.6-40.4c0-19.4,5.1-34.6,15.2-45.6c10.1-11,23-16.5,38.5-16.5c17.4,0,31.2,5.8,41.3,17.3c10.1,11.5,14.9,29.1,14.5,52.9H667c0.2,9.2,2.7,16.3,7.5,21.5c4.8,5.1,10.7,7.7,17.9,7.7c4.9,0,8.9-1.3,12.2-4S710.5,200.9,712.2,195z M713.9,163.7c-0.2-9-2.5-15.8-6.9-20.5c-4.4-4.7-9.8-7-16.1-7c-6.8,0-12.4,2.5-16.8,7.4c-4.4,4.9-6.6,11.6-6.5,20.1H713.9z"></path><path d="M799.6,232.3h-31V115.2h28.8v16.7c4.9-7.9,9.4-13.1,13.3-15.6c3.9-2.5,8.4-3.8,13.4-3.8c7.1,0,13.9,1.9,20.4,5.8l-9.6,27c-5.2-3.4-10.1-5.1-14.6-5.1c-4.3,0-8,1.2-11,3.6c-3,2.4-5.4,6.7-7.1,13c-1.7,6.3-2.6,19.3-2.6,39.3V232.3z"></path><path d="M925.7,195l30.9,5.2c-4,11.3-10.2,19.9-18.8,25.9c-8.6,5.9-19.3,8.9-32.2,8.9c-20.4,0-35.4-6.7-45.2-20c-7.7-10.7-11.6-24.1-11.6-40.4c0-19.4,5.1-34.6,15.2-45.6c10.1-11,23-16.5,38.5-16.5c17.4,0,31.2,5.8,41.3,17.3c10.1,11.5,14.9,29.1,14.5,52.9h-77.7c0.2,9.2,2.7,16.3,7.5,21.5c4.8,5.1,10.7,7.7,17.9,7.7c4.9,0,8.9-1.3,12.2-4S924,200.9,925.7,195z M927.5,163.7c-0.2-9-2.5-15.8-6.9-20.5c-4.4-4.7-9.8-7-16.1-7c-6.8,0-12.4,2.5-16.8,7.4c-4.4,4.9-6.6,11.6-6.5,20.1H927.5z"></path><path d="M970,115.2h17.2v-8.8c0-9.9,1-17.2,3.1-22.1c2.1-4.9,6-8.8,11.6-11.9c5.6-3.1,12.7-4.6,21.3-4.6c8.8,0,17.5,1.3,25.9,4l-4.2,21.6c-4.9-1.2-9.7-1.8-14.2-1.8c-4.5,0-7.7,1-9.7,3.1c-1.9,2.1-2.9,6.1-2.9,12.1v8.3h23.2v24.4h-23.2v92.8h-31v-92.8H970V115.2z"></path></g></svg> </a> </div> <ul class="collapse navbar-collapse mb-0" id="app-navbar-collapse"> <ul class="nav navbar-nav"> <li class="dropdown dropdown-hover"> <a href="javascript:">Publish <span class="caret"></span></a> <ul class="dropdown-menu dropdown-menu-inverse"> <li><a href="https://www.peeref.com/posters">Posters</a></li> <li><a href="https://www.peeref.com/abstracts">Abstracts</a></li> </ul> </li> <li class="dropdown dropdown-hover"> <a href="javascript:">Discuss <span class="caret"></span></a> <ul class="dropdown-menu dropdown-menu-inverse"> <li><a href="https://www.peeref.com/hubs">Hubs</a></li> <li><a href="https://www.peeref.com/questions">Questions</a></li> </ul> </li> <li><a href="https://www.peeref.com/works">Papers</a></li> <li><a href="https://www.peeref.com/webinars">Webinars</a></li> <li><a href="https://www.peeref.com/funding">Funding</a></li> <li class="dropdown dropdown-hover"> <a href="javascript:">Publishing Tools <span class="caret"></span></a> <ul class="dropdown-menu dropdown-menu-inverse"> <li><a href="https://www.peeref.com/journals">Journals</a></li> <li><a href="https://www.peeref.com/connect">Connect</a></li> <li><a href="https://www.peeref.com/e-collections">Collections</a></li> <li><a href="https://www.peeref.com/reviewer">Reviewers</a></li> </ul> </li> <li class="dropdown dropdown-hover"> <a href="javascript:">Events <span class="caret"></span></a> <ul class="dropdown-menu dropdown-menu-inverse"> <li><a href="https://www.peeref.com/poster-competition">Poster Challenger</a></li> <li><a href="https://www.peeref.com/webinar-competition">Video Presentation</a></li> <li><a href="https://www.peeref.com/artwork-competition">Art of Science</a></li> <li><a href="https://www.peeref.com/events/reviewer-roulette">Reviewer Roulette</a></li> </ul> </li> </ul> <ul id="auth-menu" class="nav navbar-nav navbar-right" style="display: inline-flex; align-items: center;"> <li> <a href="https://www.peeref.com/login"> <strong>My Account</strong> </a> </li> </ul> <ul class="nav navbar-nav navbar-right" style="display: inline-flex; align-items: center; margin-left: 20px;"> <li id="language" class="d-none d-xl-inline-flex"> <a href="javascript:"> <div class="current"> <i class="ivu-icon ivu-icon-md-globe"></i> EN </div> </a> <div class="selection"> <a rel="alternate" hreflang="en" href="https://www.peeref.com/works/83628583" > <span>English</span> </a> <a rel="alternate" hreflang="zh" href="https://www.peeref.com/zh/works/83628583" > <span>中文</span> </a> </div> </li> </ul> </ul> </div> </nav> <main> <div id="top-info-banner" class="container-fluid mb-0"> <div class="container"> <div class="d-flex align-items-center" style="margin-top: 30px;"> <span class="text-white"> <strong class="f18">☆</strong> <span class="f16">4.7</span> </span> <span class="mx-3"></span> <span class="tag">Article</span> </div> <h1 class="title title-for-article"> Lightweight Prompt Learning Implicit Degradation Estimation Network for Blind Super Resolution </h1> <div class="help-links-left"> <p class="pub-info"> IEEE TRANSACTIONS ON IMAGE PROCESSING (2024) </p> </div> </div> </div> <div id="article-sticky-navbar"> <div class="container"> <div class="d-flex justify-content-between flex-wrap flex-md-nowrap"> <div class="d-flex align-items-center mb-2"> <ul class="nav nav-underline f16 font-weight-bold"> <li class="active"> <a href="javascript:;"> Overview </a> </li> <li class=""> <a href="https://www.peeref.com/works/83628583/comments"> Write a Review </a> </li> </ul> </div> <div class="d-flex align-items-center justify-content-md-end flex-wrap flex-md-nowrap"> <div class="mr-3 mt-3 mt-md-0 flex-shrink-0"> <a href="https://doi.org/10.1109/TIP.2024.3442613" target="_blank" class="btn btn-warning btn-circle"> <i class="ivu-icon ivu-icon-md-copy f16"></i> <strong>Get Full Text</strong> </a> </div> <div class="mr-3 mt-3 mt-md-0 flex-shrink-0"> <a href="https://www.peeref.com/works/83628583/add-to-collection" class="btn btn-success btn-circle"> <strong>Add to Collection</strong> </a> </div> <div class="mr-3 mt-3 mt-md-0 flex-shrink-0"> <button class="btn btn-success btn-circle" id="reading-btn"> <strong>Further Reading</strong> </button> </div> <div class="flex-shrink-0 mt-3 mt-md-0"> <div class="dropdown"> <button class="font-weight-bold f24 ivu-btn ivu-btn-default ivu-btn-circle ivu-btn-large ivu-btn-icon-only dropdown-toggle" data-toggle="dropdown"> <i class="ivu-icon ivu-icon-md-more"></i> </button> <ul class="dropdown-menu dropdown-menu-right"> <li> <a href="#" data-target="#export-citation" data-toggle="modal"> <i class="ivu-icon ivu-icon-md-quote text-muted mr-1"></i> Export Citation </a> </li> <li> <a href="#" data-target="#share-paper" data-toggle="modal"> <i class="ivu-icon ivu-icon-md-share-alt text-muted mr-1"></i> Share Paper </a> </li> <li> <a href="https://www.peeref.com/works/83628583/references"> <i class="ivu-icon ivu-icon-md-list text-muted mr-1"></i> References </a> </li> </ul> </div> </div> </div> </div> </div> </div> <div id="article-details" class="container"> <div class="col-md-4 px-0 pr-md-3"> <div class="f15 panel-box rounded shadow-none border"> <div class="mb-3 pb-3"> <h4 class="mt-0">Journal</h4> <div class="f16"> <h5 class="title f16"> <a href="https://www.peeref.com/journals/3390/ieee-transactions-on-image-processing"> IEEE TRANSACTIONS ON IMAGE PROCESSING </a> </h5> <span> Volume 33, Issue -, Pages 4556-4567 </span> </div> </div> <div class="mb-3 pb-3"> <h4 class="mt-0">Publisher</h4> <div class="f16"> <h5 class="title f16 text-primary"> IEEE-INST ELECTRICAL ELECTRONICS ENGINEERS INC </h5> <div class="my-2"> DOI: 10.1109/TIP.2024.3442613 </div> </div> </div> <div class="mb-3 pb-3"> <h4 class="mt-0">Keywords</h4> <div class="f16"> Degradation; Kernel; Wiener filters; Deconvolution; Superresolution; Estimation; Image reconstruction; Image super-resolution; Wiener filter; implicit degradation estimation; prompt learning </div> </div> <div class="mb-3 pb-3"> <h4 class="mt-0">Categories</h4> <div class="f16"> <span class="d-block"> <a href="https://www.peeref.com/works/list?category=Computer+Science%2C+Artificial+Intelligence" target="_blank" class="text-dark btn btn-link p-0 text-left"> Computer Science, Artificial Intelligence </a> </span> <span class="d-block"> <a href="https://www.peeref.com/works/list?category=Engineering%2C+Electrical+%26+Electronic" target="_blank" class="text-dark btn btn-link p-0 text-left"> Engineering, Electrical &amp; Electronic </a> </span> </div> </div> <div class="mb-3 pb-3"> <h4 class="mt-0">Funding</h4> <div class="f16"> <ol class=""> <li>European Union-NextGenerationEU</li> </ol> </div> </div> </div> <div class="f15 panel-box rounded shadow-none border"> <h4 class="mt-0 text-center">Ask authors/readers for more resources</h4> <div class="requests"> <div class="requests-item"> <div class="icon"> <img src="https://peeref-open.s3.amazonaws.com/images/file.png" alt=""> </div> <h4>Protocol</h4> <p> <a href="https://www.peeref.com/works/83628583/resource" class="btn btn-outline-primary btn-sm"> Community support </a> </p> </div> <div class="requests-item"> <div class="icon"> <img src="https://peeref-open.s3.amazonaws.com/images/experiment.png" alt=""> </div> <h4>Reagent</h4> <p> <a href="https://www.peeref.com/works/83628583/resource" class="btn btn-outline-primary btn-sm"> Community support </a> </p> </div> </div> </div> </div> <div class="col-md-8 px-0 pl-md-3"> <div id="article-summary-panel" class="mb-4"> <ul class="nav nav-tabs" style="list-style: none; padding-left: 0;"> <li class="active"> <a href="#ai_summary" data-toggle="tab" class="summary-tab mx-0 f16 text-dark"> <strong>Automated Summary</strong> <strong class="text-danger ml-1"><i>New</i></strong> </a> </li> <li class=""> <a href="#raw_abstract" data-toggle="tab" class="abstract-tab mx-0 f16 text-dark"> <strong>Abstract</strong> </a> </li> </ul> <div class="tab-content border border-top-0"> <div id="ai_summary" class="tab-pane active"> <div class="summary-panel panel-box mb-0 rounded shadow-none"> <div class="f16">This paper proposes a novel blind image super-resolution method that implicitly learns the degradation kernel through a lightweight architecture and a novel loss component, and performs efficient deconvolution using a learnable Wiener filter. In addition, a novel degradation-conditioned prompt layer is proposed, which exploits the estimated kernel to guide the focus on discriminative contextual information, thereby improving the reconstruction effect. Experimental results show that this method achieves good performance under different degradation settings, and the number of parameters/FLOPs is significantly reduced.</div> </div> </div> <div id="raw_abstract" class="tab-pane "> <div class="abstract-panel panel-box mb-0 rounded shadow-none"> <div class="f16">Blind image super-resolution (SR) aims to recover a high-resolution (HR) image from its low-resolution (LR) counterpart under the assumption of unknown degradations. Many existing blind SR methods rely on supervising ground-truth kernels referred to as explicit degradation estimators. However, it is very challenging to obtain the ground-truths for different degradations kernels. Moreover, most of these methods rely on heavy backbone networks, which demand extensive computational resources. Implicit degradation estimators do not require the availability of ground truth kernels, but they see a significant performance gap with the explicit degradation estimators due to such missing information. We present a novel approach that significantly narrows such a gap by means of a lightweight architecture that implicitly learns the degradation kernel with the help of a novel loss component. The kernel is exploited by a learnable Wiener filter that performs efficient deconvolution in the Fourier domain by deriving a closed-form solution. Inspired by prompt-based learning, we also propose a novel degradation-conditioned prompt layer that exploits the estimated kernel to drive the focus on the discriminative contextual information that guides the reconstruction process in recovering the latent HR image. Extensive experiments under different degradation settings demonstrate that our model, named PL-IDENet, yields PSNR and SSIM improvements of more than $0.4dB$ and 1.3%, and $1.4dB$ and 4.8% to the best implicit and explicit blind-SR method, respectively. These results are achieved while maintaining a substantially lower number of parameters/FLOPs (i.e., 25% and 68% fewer parameters than best implicit and explicit methods, respectively).</div> </div> </div> </div> </div> <div class="f15 panel-box rounded shadow-none border"> <h4 class="mt-0 heading-count">Authors</h4> <div class="mb-3"> <article-authors tid="83628583" list="[{&quot;name&quot;:&quot;Asif Hussain Khan&quot;,&quot;sequence&quot;:1},{&quot;name&quot;:&quot;Christian Micheloni&quot;,&quot;sequence&quot;:2},{&quot;name&quot;:&quot;Niki Martinel&quot;,&quot;sequence&quot;:3}]" verified="[]" page="work" ></article-authors> </div> <div class="alert alert-warning mb-0"> <h5 class="mt-0 bg-warning text-dark px-3 rounded d-inline-block"> I am an author on this paper </h5> <div class="font-weight-bold f13"> Click your name to claim this paper and add it to your profile. </div> </div> </div> <div class="f15 panel-box rounded shadow-none border"> <h4 class="mt-0 heading-count">Reviews</h4> <div class="d-flex flex-wrap flex-md-nowrap"> <div class="flex-grow-1"> <h4 class="f16"> Primary Rating <a href="javascript:;" data-toggle="tooltip" data-placement="right" title="The primary rating indicates the level of overall quality for the paper."> <i class="ivu-icon ivu-icon-md-help-circle f18 ml-2"></i> </a> </h4> <div class="d-flex flex-wrap flex-md-nowrap align-items-center alert mb-0"> <div class="d-flex align-items-center justify-content-center"> <Rate disabled allow-half value="4.7" style="font-size: 28px;"></Rate> <strong class="f20 m-3" style="color: #f5a623;">4.7</strong> </div> <div class="text-muted mx-4"> Not enough ratings </div> </div> <h4 class="f16"> Secondary Ratings <a href="javascript:;" data-toggle="tooltip" data-placement="right" title="Secondary ratings independently reflect strengths or weaknesses of the paper."> <i class="ivu-icon ivu-icon-md-help-circle f18 ml-2"></i> </a> </h4> <div class="d-flex flex-wrap flex-md-nowrap alert"> <div class="d-flex flex-shrink-0 align-items-center mr-3"> <h5 class="my-0">Novelty</h5> <strong class="mx-4">-</strong> </div> <div class="d-flex flex-shrink-0 align-items-center mr-3"> <h5 class="my-0">Significance</h5> <strong class="mx-4">-</strong> </div> <div class="d-flex flex-shrink-0 align-items-center mr-3"> <h5 class="my-0">Scientific rigor</h5> <strong class="mx-4">-</strong> </div> </div> </div> <div class="flex-shrink-0"> <div class="border bg-light py-2 px-4"> <h5 class="mb-1">Rate this paper</h5> <Rate class="f24" @on-change="function(value){ location.href='https://www.peeref.com/works/83628583/comments?rating='+value }"></Rate> </div> </div> </div> </div> <div id="collection" class="f15 panel-box rounded shadow-none border"> <h4 class="mt-0 heading-count">Recommended</h4> <div class="my-3"> <ul class="nav nav-pills border-bottom pb-3" style="list-style: none; padding-left: 0;"> <li class="active"> <a href="#articles_from_related" data-toggle="tab" class="mx-0 f15"> <strong>Related</strong> </a> </li> <li class=""> <a href="#articles_from_authors" data-toggle="tab" class="mx-0 f15"> <strong>From Same Authors</strong> </a> </li> <li class=""> <a href="#articles_from_journal" data-toggle="tab" class="mx-0 f15"> <strong>From Same Journal</strong> </a> </li> </ul> <div class="tab-content"> <div id="articles_from_related" class="tab-pane active"> <div class="my-4 border-bottom"> <div> <span class="d-inline-block badge badge-blue"> Article </span> <span class="d-inline-block badge badge-cyan"> Computer Science, Artificial Intelligence </span> </div> <h4> <a href="https://www.peeref.com/works/26537329" class="text-dark hover-underline">Toward Real-World Super-Resolution via Adaptive Downsampling Models</a> </h4> <p class="text-ellipsis-2">Sanghyun Son, Jaeha Kim, Wei-Sheng Lai, Ming-Hsuan Yang, Kyoung Mu Lee</p> <div class="d-flex mb-3"> <div class="flex-shrink-0 d-none d-sm-block"> <img src="https://peeref-open.s3.amazonaws.com/storage/images/covers/3411.jpg" alt="" class="border mr-3" width="100"> </div> <div class="p-3 rounded bg-light-blue"> <strong>Summary:</strong> This study proposes a novel method to simulate an unknown downsampling process without imposing restrictive prior knowledge. By designing a low-frequency loss (LFL) and an adaptive data loss (ADL) in the adversarial training framework, the downsampling model becomes more generalizable and enables more accurate reconstructions. </div> </div> <div class="d-flex justify-content-between"> <p class="font-weight-bold"> <span class="text-primary">IEEE TRANSACTIONS ON PATTERN ANALYSIS AND MACHINE INTELLIGENCE</span> (2022) </p> <div class="flex-shrink-0"> <a class="btn btn-outline-primary btn-sm" href="https://www.peeref.com/works/26537329/add-to-collection" target="_blank"> <strong>Add to Collection</strong> </a> </div> </div> </div> <div class="my-4 border-bottom"> <div> <span class="d-inline-block badge badge-blue"> Article </span> <span class="d-inline-block badge badge-cyan"> Computer Science, Artificial Intelligence </span> </div> <h4> <a href="https://www.peeref.com/works/83860471" class="text-dark hover-underline">Difficulty-Guided Variant Degradation Learning for Blind Image Super-Resolution</a> </h4> <p class="text-ellipsis-2">Jiaxu Leng, Jia Wang, Mengjingcheng Mo, Ji Gan, Wen Lu, Xinbo Gao</p> <div class="d-flex mb-3"> <div class="flex-shrink-0 d-none d-sm-block"> <img src="https://peeref-open.s3.amazonaws.com/storage/images/covers/8837.jpg" alt="" class="border mr-3" width="100"> </div> <div class="p-3 rounded bg-light-blue"> <strong>Summary:</strong> This paper proposes a difficulty-guided variant degradation learning network (DDL-BSR) for blind super-resolution, addressing the problems of performance degradation caused by the spatial invariance assumption and the lack of consideration for the human visual system&#039;s attention differences in existing methods. The network consists of three modules: reconstruction difficulty prediction, space-variant degradation estimation, and degradation and difficulty-informed reconstruction. By predicting the reconstruction difficulty and estimating the space-variant degradation kernels accordingly, and finally combining the degradation kernels and reconstruction difficulty for super-resolution reconstruction. Experimental results show that DDL-BSR outperforms existing methods on various synthetic datasets, and the generated images have better realism and texture quality. </div> </div> <div class="d-flex justify-content-between"> <p class="font-weight-bold"> <span class="text-primary">IEEE TRANSACTIONS ON NEURAL NETWORKS AND LEARNING SYSTEMS</span> (2024) </p> <div class="flex-shrink-0"> <a class="btn btn-outline-primary btn-sm" href="https://www.peeref.com/works/83860471/add-to-collection" target="_blank"> <strong>Add to Collection</strong> </a> </div> </div> </div> <div class="my-4 border-bottom"> <div> <span class="d-inline-block badge badge-blue"> Article </span> <span class="d-inline-block badge badge-cyan"> Computer Science, Artificial Intelligence </span> </div> <h4> <a href="https://www.peeref.com/works/83947295" class="text-dark hover-underline">Empowering Real-World Image Super-Resolution With Flexible Interactive Modulation</a> </h4> <p class="text-ellipsis-2">Chong Mou, Xintao Wang, Yanze Wu, Ying Shan, Jian Zhang</p> <div class="d-flex mb-3"> <div class="flex-shrink-0 d-none d-sm-block"> <img src="https://peeref-open.s3.amazonaws.com/storage/images/covers/3411.jpg" alt="" class="border mr-3" width="100"> </div> <div class="p-3 rounded bg-light-blue"> <strong>Summary:</strong> This paper proposes a novel interactive image restoration method that enables complex image super-resolution in the real world. The method achieves local control and finer-grained, pixel-wise degradation estimation of the image through a metric-learning-based degradation estimation strategy, and also proposes a new metric-augmented loss to further improve performance. Experimental results show that the method performs well in real-world image super-resolution tasks while maintaining a low model complexity. </div> </div> <div class="d-flex justify-content-between"> <p class="font-weight-bold"> <span class="text-primary">IEEE TRANSACTIONS ON PATTERN ANALYSIS AND MACHINE INTELLIGENCE</span> (2024) </p> <div class="flex-shrink-0"> <a class="btn btn-outline-primary btn-sm" href="https://www.peeref.com/works/83947295/add-to-collection" target="_blank"> <strong>Add to Collection</strong> </a> </div> </div> </div> <div class="my-4 border-bottom"> <div> <span class="d-inline-block badge badge-blue"> Article </span> <span class="d-inline-block badge badge-cyan"> Automation &amp; Control Systems </span> </div> <h4> <a href="https://www.peeref.com/works/26924086" class="text-dark hover-underline">Contrastive Learning for Blind Super-Resolution via A Distortion-Specific Network</a> </h4> <p class="text-ellipsis-2">Xinya Wang, Jiayi Ma, Junjun Jiang</p> <div class="d-flex mb-3"> <div class="flex-shrink-0 d-none d-sm-block"> <img src="https://peeref-open.s3.amazonaws.com/storage/images/covers/10893.jpg" alt="" class="border mr-3" width="100"> </div> <div class="p-3 rounded bg-light-blue"> <strong>Summary:</strong> Previous deep learning-based super-resolution methods rely on predefined degradation processes and may suffer from deterioration when the real degradation is inconsistent. In this paper, we propose a contrastive regularization method that exploits blurry and clear images as negative and positive samples, respectively, to improve blind super-resolution performance. We also extract global statistical prior information instead of estimating degradation to capture the distortion characteristics and make our method adaptive to changes in distortions. Experimental results demonstrate that our lightweight CRDNet surpasses state-of-the-art blind super-resolution approaches. </div> </div> <div class="d-flex justify-content-between"> <p class="font-weight-bold"> <span class="text-primary">IEEE-CAA JOURNAL OF AUTOMATICA SINICA</span> (2023) </p> <div class="flex-shrink-0"> <a class="btn btn-outline-primary btn-sm" href="https://www.peeref.com/works/26924086/add-to-collection" target="_blank"> <strong>Add to Collection</strong> </a> </div> </div> </div> <div class="my-4 border-bottom"> <div> <span class="d-inline-block badge badge-blue"> Article </span> <span class="d-inline-block badge badge-cyan"> Optics </span> </div> <h4> <a href="https://www.peeref.com/works/26685783" class="text-dark hover-underline">Resolution, accuracy and precision in super-resolved microscopy images using SUPPOSe</a> </h4> <p class="text-ellipsis-2">Micaela Toscani, Oscar E. Martinez, Sandra Martinez</p> <div class="d-flex mb-3"> <div class="flex-shrink-0 d-none d-sm-block"> <img src="https://peeref-open.s3.amazonaws.com/storage/images/covers/6393.jpg" alt="" class="border mr-3" width="100"> </div> <div class="p-3 rounded bg-light-blue"> <strong>Summary:</strong> This work conducts a performance analysis of the SUPPOSe algorithm, a deconvolution method based on superposition of point sources of equal intensity. By testing with synthetic and experimental images, the accuracy, precision, and resolution of the method are determined under different signal/noise conditions. </div> </div> <div class="d-flex justify-content-between"> <p class="font-weight-bold"> <span class="text-primary">OPTICS AND LASERS IN ENGINEERING</span> (2023) </p> <div class="flex-shrink-0"> <a class="btn btn-outline-primary btn-sm" href="https://www.peeref.com/works/26685783/add-to-collection" target="_blank"> <strong>Add to Collection</strong> </a> </div> </div> </div> <div class="my-4 border-bottom"> <div> <span class="d-inline-block badge badge-blue"> Article </span> <span class="d-inline-block badge badge-cyan"> Geochemistry &amp; Geophysics </span> </div> <h4> <a href="https://www.peeref.com/works/83688071" class="text-dark hover-underline">Unsupervised Degradation Aware and Representation for Real-World Remote Sensing Image Super-Resolution</a> </h4> <p class="text-ellipsis-2">Wen-Zhong Guo, Wu-Ding Weng, Guang-Yong Chen, Jian-Nan Su, Min Gan, C. L. Philip Chen</p> <div class="d-flex mb-3"> <div class="flex-shrink-0 d-none d-sm-block"> <img src="https://peeref-open.s3.amazonaws.com/storage/images/covers/3389.jpg" alt="" class="border mr-3" width="100"> </div> <div class="p-3 rounded bg-light-blue"> <strong>Summary:</strong> In the field of remote sensing, blind super-resolution has attracted attention. Due to the lack of paired data, existing methods use predefined degradation models to synthesize low-resolution images for training and evaluation, but the acquired remote sensing images are often degraded by various factors and require super-resolution reconstruction. Using these images as ground-truth images will limit the model&#039;s ability to restore fine details, resulting in blurry and noisy reconstructions. To address this, an unsupervised degradation-aware network is proposed, which uses natural images as a reference to enable the network to produce clearer reconstructions. It is also found that the patchwise discriminator can perceive the degradation type of different regions in the remote sensing image, and a degradation representation module is designed to estimate the degradation information from LR images and guide the network for adaptive restoration. Experiments show that the proposed framework achieves state-of-the-art restoration performance. </div> </div> <div class="d-flex justify-content-between"> <p class="font-weight-bold"> <span class="text-primary">IEEE TRANSACTIONS ON GEOSCIENCE AND REMOTE SENSING</span> (2024) </p> <div class="flex-shrink-0"> <a class="btn btn-outline-primary btn-sm" href="https://www.peeref.com/works/83688071/add-to-collection" target="_blank"> <strong>Add to Collection</strong> </a> </div> </div> </div> <div class="my-4 border-bottom"> <div> <span class="d-inline-block badge badge-blue"> Article </span> <span class="d-inline-block badge badge-cyan"> Engineering, Electrical &amp; Electronic </span> </div> <h4> <a href="https://www.peeref.com/works/83183057" class="text-dark hover-underline">Dynamic Degradation Intensity Estimation for Adaptive Blind Super-Resolution: A Novel Approach and Benchmark Dataset</a> </h4> <p class="text-ellipsis-2">Guang-Yong Chen, Wu-Ding Weng, Jian-Nan Su, Min Gan, C. L. Philip Chen</p> <div class="d-flex mb-3"> <div class="flex-shrink-0 d-none d-sm-block"> <img src="https://peeref-open.s3.amazonaws.com/storage/images/covers/3369.jpg" alt="" class="border mr-3" width="100"> </div> <div class="p-3 rounded bg-light-blue"> <strong>Summary:</strong> This paper introduces the goal and challenges of blind super-resolution technology, points out the problems of existing methods, proposes a novel degradation intensity estimation module method, and constructs a benchmark dataset closer to the real world. Experimental results show that this method can achieve better image reconstruction results. </div> </div> <div class="d-flex justify-content-between"> <p class="font-weight-bold"> <span class="text-primary">IEEE TRANSACTIONS ON CIRCUITS AND SYSTEMS FOR VIDEO TECHNOLOGY</span> (2024) </p> <div class="flex-shrink-0"> <a class="btn btn-outline-primary btn-sm" href="https://www.peeref.com/works/83183057/add-to-collection" target="_blank"> <strong>Add to Collection</strong> </a> </div> </div> </div> <div class="my-4 border-bottom"> <div> <span class="d-inline-block badge badge-blue"> Article </span> <span class="d-inline-block badge badge-cyan"> Engineering, Electrical &amp; Electronic </span> </div> <h4> <a href="https://www.peeref.com/works/82006963" class="text-dark hover-underline">Arbitrary-Scale Image Super-Resolution via Degradation Perception</a> </h4> <p class="text-ellipsis-2">Wenbo Wan, Zezhu Wang, Zhiyan Wang, Lingchen Gu, Jiande Sun, Qiang Wang</p> <div class="d-flex mb-3"> <div class="flex-shrink-0 d-none d-sm-block"> <img src="https://peeref-open.s3.amazonaws.com/storage/images/covers/10623.jpg" alt="" class="border mr-3" width="100"> </div> <div class="p-3 rounded bg-light-blue"> <strong>Summary:</strong> This paper proposes a method that can adaptively handle varying degradations at different scale factors, which consists of two parts: Image Refinement Network (IRN) and Super-Resolution Encoding Guidance Module (SREGM). IRN uses a dynamic convolution method to deal with different degradations under arbitrary scale factors on a pixel-by-pixel basis, solving the spatial invariance problem of the ordinary convolution kernel. SREGM takes the high-resolution pixel space as a reference frame and uses the modelling results as prior information to guide the high-resolution reconstruction. Experiments show that this method achieves good results in the super-resolution of a single image with an arbitrary scale factor. </div> </div> <div class="d-flex justify-content-between"> <p class="font-weight-bold"> <span class="text-primary">IEEE TRANSACTIONS ON COMPUTATIONAL IMAGING</span> (2024) </p> <div class="flex-shrink-0"> <a class="btn btn-outline-primary btn-sm" href="https://www.peeref.com/works/82006963/add-to-collection" target="_blank"> <strong>Add to Collection</strong> </a> </div> </div> </div> <div class="my-4 border-bottom"> <div> <span class="d-inline-block badge badge-blue"> Article </span> <span class="d-inline-block badge badge-cyan"> Geochemistry &amp; Geophysics </span> </div> <h4> <a href="https://www.peeref.com/works/84828076" class="text-dark hover-underline">Jointly RS Image Deblurring and Super-Resolution With Adjustable-Kernel and Multi-Domain Attention</a> </h4> <p class="text-ellipsis-2">Yan Zhang, Pengcheng Zheng, Chengxiao Zeng, Bin Xiao, Zhenghao Li, Xinbo Gao</p> <div class="d-flex mb-3"> <div class="flex-shrink-0 d-none d-sm-block"> <img src="https://peeref-open.s3.amazonaws.com/storage/images/covers/3389.jpg" alt="" class="border mr-3" width="100"> </div> <div class="p-3 rounded bg-light-blue"> <strong>Summary:</strong> This paper proposes a dual-branch parallel network named AKMD-Net for the joint RS image deblurring and super-resolution task. The network consists of two main branches: deblurring and SR, and designs PAKB, MDAB, AFF, and AW Loss modules to improve performance. Experimental results show that AKMD-Net achieves state-of-the-art quantitative and qualitative performance on commonly used RS image datasets. </div> </div> <div class="d-flex justify-content-between"> <p class="font-weight-bold"> <span class="text-primary">IEEE TRANSACTIONS ON GEOSCIENCE AND REMOTE SENSING</span> (2025) </p> <div class="flex-shrink-0"> <a class="btn btn-outline-primary btn-sm" href="https://www.peeref.com/works/84828076/add-to-collection" target="_blank"> <strong>Add to Collection</strong> </a> </div> </div> </div> <div class="my-4 border-bottom"> <div> <span class="d-inline-block badge badge-blue"> Article </span> <span class="d-inline-block badge badge-cyan"> Geochemistry &amp; Geophysics </span> </div> <h4> <a href="https://www.peeref.com/works/34833486" class="text-dark hover-underline">Multilayer Degradation Representation-Guided Blind Super-Resolution for Remote Sensing Images</a> </h4> <p class="text-ellipsis-2">Xudong Kang, Jier Li, Puhong Duan, Fuyan Ma, Shutao Li</p> <div class="d-flex mb-3"> <div class="flex-shrink-0 d-none d-sm-block"> <img src="https://peeref-open.s3.amazonaws.com/storage/images/covers/3389.jpg" alt="" class="border mr-3" width="100"> </div> <div class="p-3 rounded bg-light-blue"> <strong>Summary:</strong> This paper proposes a multilayer degradation representation-guided blind super-resolution method for remote sensing images, which achieves detail restoration through unsupervised representation learning, degradation-guided deep residual module, and multilayer degradation-aware feature fusion mechanism. </div> </div> <div class="d-flex justify-content-between"> <p class="font-weight-bold"> <span class="text-primary">IEEE TRANSACTIONS ON GEOSCIENCE AND REMOTE SENSING</span> (2022) </p> <div class="flex-shrink-0"> <a class="btn btn-outline-primary btn-sm" href="https://www.peeref.com/works/34833486/add-to-collection" target="_blank"> <strong>Add to Collection</strong> </a> </div> </div> </div> <div class="my-4 border-bottom"> <div> <span class="d-inline-block badge badge-blue"> Article </span> <span class="d-inline-block badge badge-cyan"> Engineering, Electrical &amp; Electronic </span> </div> <h4> <a href="https://www.peeref.com/works/82178453" class="text-dark hover-underline">Dual Circle Contrastive Learning-Based Blind Image Super-Resolution</a> </h4> <p class="text-ellipsis-2">Yajun Qiu, Qiang Zhu, Shuyuan Zhu, Bing Zeng</p> <div class="d-flex mb-3"> <div class="flex-shrink-0 d-none d-sm-block"> <img src="https://peeref-open.s3.amazonaws.com/storage/images/covers/3369.jpg" alt="" class="border mr-3" width="100"> </div> <div class="p-3 rounded bg-light-blue"> <strong>Summary:</strong> This paper proposes a blind image super-resolution (BISR) method based on dual circle contrastive learning (DCCL) and high-efficiency modules. The method solves the problems existing in the generation of degradation representation and super-resolution results in existing methods by designing a degradation extraction network and an information distillation module, and improves the performance of BISR. </div> </div> <div class="d-flex justify-content-between"> <p class="font-weight-bold"> <span class="text-primary">IEEE TRANSACTIONS ON CIRCUITS AND SYSTEMS FOR VIDEO TECHNOLOGY</span> (2024) </p> <div class="flex-shrink-0"> <a class="btn btn-outline-primary btn-sm" href="https://www.peeref.com/works/82178453/add-to-collection" target="_blank"> <strong>Add to Collection</strong> </a> </div> </div> </div> <div class="my-4 border-bottom"> <div> <span class="d-inline-block badge badge-blue"> Article </span> <span class="d-inline-block badge badge-cyan"> Engineering, Electrical &amp; Electronic </span> </div> <h4> <a href="https://www.peeref.com/works/84273478" class="text-dark hover-underline">Frequency Generation for Real-World Image Super-Resolution</a> </h4> <p class="text-ellipsis-2">Wenxue Guan, Haobo Li, Dawei Xu, Jiaxin Liu, Shenghua Gong, Jun Liu</p> <div class="d-flex mb-3"> <div class="flex-shrink-0 d-none d-sm-block"> <img src="https://peeref-open.s3.amazonaws.com/storage/images/covers/3369.jpg" alt="" class="border mr-3" width="100"> </div> <div class="p-3 rounded bg-light-blue"> <strong>Summary:</strong> This paper proposes a Frequency Separation Network (FSN) for separating low-frequency information and generating high-frequency information to reconstruct high-resolution real-world images quickly and accurately. The FSN uses Gaussian filters as the frequency separation module and an adaptive feature fusion module to aggregate different frequency features. Experiments show that the FSN has superior visual quality and generalization ability in various scenarios. </div> </div> <div class="d-flex justify-content-between"> <p class="font-weight-bold"> <span class="text-primary">IEEE TRANSACTIONS ON CIRCUITS AND SYSTEMS FOR VIDEO TECHNOLOGY</span> (2024) </p> <div class="flex-shrink-0"> <a class="btn btn-outline-primary btn-sm" href="https://www.peeref.com/works/84273478/add-to-collection" target="_blank"> <strong>Add to Collection</strong> </a> </div> </div> </div> <div class="my-4 border-bottom"> <div> <span class="d-inline-block badge badge-blue"> Article </span> <span class="d-inline-block badge badge-cyan"> Geochemistry &amp; Geophysics </span> </div> <h4> <a href="https://www.peeref.com/works/35102366" class="text-dark hover-underline">Deep Joint Estimation Network for Satellite Video Super-Resolution With Multiple Degradations</a> </h4> <p class="text-ellipsis-2">Huan Liu, Yanfeng Gu</p> <div class="d-flex mb-3"> <div class="flex-shrink-0 d-none d-sm-block"> <img src="https://peeref-open.s3.amazonaws.com/storage/images/covers/3389.jpg" alt="" class="border mr-3" width="100"> </div> <div class="p-3 rounded bg-light-blue"> <strong>Summary:</strong> This article proposes a deep joint estimation network for satellite video super-resolution (JENSVSR), which can jointly estimate blur kernels and high-resolution frames. By using multiple video frames and feature fusion modules, it achieves better super-resolution results. </div> </div> <div class="d-flex justify-content-between"> <p class="font-weight-bold"> <span class="text-primary">IEEE TRANSACTIONS ON GEOSCIENCE AND REMOTE SENSING</span> (2022) </p> <div class="flex-shrink-0"> <a class="btn btn-outline-primary btn-sm" href="https://www.peeref.com/works/35102366/add-to-collection" target="_blank"> <strong>Add to Collection</strong> </a> </div> </div> </div> <div class="my-4 border-bottom"> <div> <span class="d-inline-block badge badge-blue"> Article </span> <span class="d-inline-block badge badge-cyan"> Computer Science, Information Systems </span> </div> <h4> <a href="https://www.peeref.com/works/23985201" class="text-dark hover-underline">Progressive Residual Learning With Memory Upgrade for Ultrasound Image Blind Super-Resolution</a> </h4> <p class="text-ellipsis-2">Heng Liu, Jianyong Liu, Feng Chen, Caifeng Shan</p> <div class="d-flex mb-3"> <div class="flex-shrink-0 d-none d-sm-block"> <img src="https://peeref-open.s3.amazonaws.com/storage/images/covers/9464.jpg" alt="" class="border mr-3" width="100"> </div> <div class="p-3 rounded bg-light-blue"> <strong>Summary:</strong> In this study, a novel blind super-resolution technique based on deep learning is proposed to reconstruct ultrasound images through a multi-module approach, achieving better performance compared to existing methods. </div> </div> <div class="d-flex justify-content-between"> <p class="font-weight-bold"> <span class="text-primary">IEEE JOURNAL OF BIOMEDICAL AND HEALTH INFORMATICS</span> (2022) </p> <div class="flex-shrink-0"> <a class="btn btn-outline-primary btn-sm" href="https://www.peeref.com/works/23985201/add-to-collection" target="_blank"> <strong>Add to Collection</strong> </a> </div> </div> </div> <div class="my-4 "> <div> <span class="d-inline-block badge badge-blue"> Article </span> <span class="d-inline-block badge badge-cyan"> Computer Science, Artificial Intelligence </span> </div> <h4> <a href="https://www.peeref.com/works/82252878" class="text-dark hover-underline">FDSR: An Interpretable Frequency Division Stepwise Process Based Single-Image Super-Resolution Network</a> </h4> <p class="text-ellipsis-2">Pengcheng Xu, Qun Liu, Huanan Bao, Ruhui Zhang, Lihua Gu, Guoyin Wang</p> <div class="d-flex mb-3"> <div class="flex-shrink-0 d-none d-sm-block"> <img src="https://peeref-open.s3.amazonaws.com/storage/images/covers/3390.jpg" alt="" class="border mr-3" width="100"> </div> <div class="p-3 rounded bg-light-blue"> <strong>Summary:</strong> This study presents an interpretable frequency division SR network that divides the image into different frequencies for reconstruction through a frequency division module and a stepwise reconstruction method, and develops a frequency division loss function to ensure that each ReM operates only at one image frequency, thereby establishing an interpretable framework that visualizes the image reconstruction process. In addition, by reexamining the subpixel layer upsampling process, a displacement generation module is designed, and a new ReM based on interpretable Hessian attention is developed to enhance the network performance. Experiments show that the network is superior to the state-of-the-art methods without the frequency division loss, and the addition of the frequency division loss enhances the interpretability and robustness of the network. </div> </div> <div class="d-flex justify-content-between"> <p class="font-weight-bold"> <span class="text-primary">IEEE TRANSACTIONS ON IMAGE PROCESSING</span> (2024) </p> <div class="flex-shrink-0"> <a class="btn btn-outline-primary btn-sm" href="https://www.peeref.com/works/82252878/add-to-collection" target="_blank"> <strong>Add to Collection</strong> </a> </div> </div> </div> </div> <div id="articles_from_authors" class="tab-pane "> <div class="my-4 border-bottom"> <div> <span class="d-inline-block badge badge-blue"> Article </span> <span class="d-inline-block badge badge-cyan"> Computer Science, Artificial Intelligence </span> </div> <h4> <a href="https://www.peeref.com/works/25905955" class="text-dark hover-underline">Visual Object Tracking in First Person Vision</a> </h4> <p class="text-ellipsis-2">Matteo Dunnhofer, Antonino Furnari, Giovanni Maria Farinella, Christian Micheloni</p> <div class="d-flex mb-3"> <div class="p-3 rounded bg-light-blue"> <strong>Summary:</strong> This paper presents the first systematic investigation of single object tracking in First Person Vision (FPV). The study analyzes the performance of 42 algorithms in FPV and finds that object tracking in FPV poses new challenges to current visual trackers. Despite the difficulties, trackers bring benefits to FPV downstream tasks requiring short-term object tracking. </div> </div> <div class="d-flex justify-content-between"> <p class="font-weight-bold"> <span class="text-primary">INTERNATIONAL JOURNAL OF COMPUTER VISION</span> (2023) </p> <div class="flex-shrink-0"> <a class="btn btn-outline-primary btn-sm" href="https://www.peeref.com/works/25905955/add-to-collection" target="_blank"> <strong>Add to Collection</strong> </a> </div> </div> </div> <div class="my-4 border-bottom"> <div> <span class="d-inline-block badge badge-blue"> Article </span> <span class="d-inline-block badge badge-cyan"> Computer Science, Information Systems </span> </div> <h4> <a href="https://www.peeref.com/works/28067836" class="text-dark hover-underline">Lightweight Implicit Blur Kernel Estimation Network for Blind Image Super-Resolution</a> </h4> <p class="text-ellipsis-2">Asif Hussain Khan, Christian Micheloni, Niki Martinel</p> <div class="d-flex mb-3"> <div class="p-3 rounded bg-light-blue"> <strong>Summary:</strong> Blind image super-resolution (Blind-SR) is the process of generating a high-resolution (HR) version of a low-resolution (LR) image with unknown degradation. We propose a lightweight approach based on a deep convolutional neural network (CNN) and a deep super-resolution residual convolutional generative adversarial network to estimate the blur kernel and restore the HR image. The proposed network is end-to-end trainable and performs favorably with respect to existing approaches. </div> </div> <div class="d-flex justify-content-between"> <p class="font-weight-bold"> <span class="text-primary">INFORMATION</span> (2023) </p> <div class="flex-shrink-0"> <a class="btn btn-outline-primary btn-sm" href="https://www.peeref.com/works/28067836/add-to-collection" target="_blank"> <strong>Add to Collection</strong> </a> </div> </div> </div> <div class="my-4 border-bottom"> <div> <span class="d-inline-block badge badge-blue"> Proceedings Paper </span> <span class="d-inline-block badge badge-cyan"> Computer Science, Artificial Intelligence </span> </div> <h4> <a href="https://www.peeref.com/works/34634922" class="text-dark hover-underline">CVGAN: Image Generation with Capsule Vector-VAE</a> </h4> <p class="text-ellipsis-2">Rita Pucci, Christian Micheloni, Gian Luca Foresti, Niki Martinel</p> <div class="d-flex mb-3"> <div class="p-3 rounded bg-light-blue"> <strong>Summary:</strong> In this paper, a new model called CV-VAE is proposed based on the VQ-VAE architecture. It replaces the discrete bottleneck represented by the quantization code-book with a capsule layer, addressing the challenge of training deep discrete variable models. The CV-VAE demonstrates successful application of capsules for clustering, reintroducing differentiability in the model bottleneck. The model is trained within the Generative Adversarial Paradigm and shows comparable performance to the original VQGAN and VAE in GAN. Images generated by CVGAN have higher quality and improved interpretability of the training process for the latent representation. </div> </div> <div class="d-flex justify-content-between"> <p class="font-weight-bold"> <span class="text-primary">IMAGE ANALYSIS AND PROCESSING, ICIAP 2022, PT I</span> (2022) </p> <div class="flex-shrink-0"> <a class="btn btn-outline-primary btn-sm" href="https://www.peeref.com/works/34634922/add-to-collection" target="_blank"> <strong>Add to Collection</strong> </a> </div> </div> </div> <div class="my-4 border-bottom"> <div> <span class="d-inline-block badge badge-blue"> Article </span> <span class="d-inline-block badge badge-cyan"> Automation &amp; Control Systems </span> </div> <h4> <a href="https://www.peeref.com/works/23520435" class="text-dark hover-underline">Lord of the Rings: Hanoi Pooling and Self-Knowledge Distillation for Fast and Accurate Vehicle Reidentification</a> </h4> <p class="text-ellipsis-2">Niki Martinel, Matteo Dunnhofer, Rita Pucci, Gian Luca Foresti, Christian Micheloni</p> <div class="d-flex mb-3"> <div class="p-3 rounded bg-light-blue"> <strong>Summary:</strong> Vehicle reidentification is crucial for intelligent surveillance systems and smart transportation, but it faces challenges such as occlusions, color and illumination changes, and orientation issues. Researchers have proposed an approach to learn robust feature representations without the need for extra-labeled data and increasing computational complexity. </div> </div> <div class="d-flex justify-content-between"> <p class="font-weight-bold"> <span class="text-primary">IEEE TRANSACTIONS ON INDUSTRIAL INFORMATICS</span> (2022) </p> <div class="flex-shrink-0"> <a class="btn btn-outline-primary btn-sm" href="https://www.peeref.com/works/23520435/add-to-collection" target="_blank"> <strong>Add to Collection</strong> </a> </div> </div> </div> <div class="my-4 border-bottom"> <div> <span class="d-inline-block badge badge-blue"> Article </span> <span class="d-inline-block badge badge-cyan"> Engineering, Biomedical </span> </div> <h4> <a href="https://www.peeref.com/works/26468957" class="text-dark hover-underline">Deep convolutional feature details for better knee disorder diagnoses in magnetic resonance images</a> </h4> <p class="text-ellipsis-2">Matteo Dunnhofer, Niki Martinel, Christian Micheloni</p> <div class="d-flex mb-3"> <div class="p-3 rounded bg-light-blue"> <strong>Summary:</strong> This study proposes a CNN architecture called MRPyrNet for the automatic diagnosis of knee injuries in MRI images. By extracting more relevant features from the knee region, the method improves the diagnostic capability. Experimental results demonstrate that MRPyrNet significantly enhances the diagnostic capability for anterior cruciate ligament tear and meniscal tear compared to baseline methods. </div> </div> <div class="d-flex justify-content-between"> <p class="font-weight-bold"> <span class="text-primary">COMPUTERIZED MEDICAL IMAGING AND GRAPHICS</span> (2022) </p> <div class="flex-shrink-0"> <a class="btn btn-outline-primary btn-sm" href="https://www.peeref.com/works/26468957/add-to-collection" target="_blank"> <strong>Add to Collection</strong> </a> </div> </div> </div> <div class="my-4 border-bottom"> <div> <span class="d-inline-block badge badge-blue"> Article </span> <span class="d-inline-block badge badge-cyan"> Robotics </span> </div> <h4> <a href="https://www.peeref.com/works/23003767" class="text-dark hover-underline">Weakly-Supervised Domain Adaptation of Deep Regression Trackers via Reinforced Knowledge Distillation</a> </h4> <p class="text-ellipsis-2">Matteo Dunnhofer, Niki Martinel, Christian Micheloni</p> <div class="d-flex mb-3"> <div class="p-3 rounded bg-light-blue"> <strong>Summary:</strong> The study introduces a new approach to enhance the accuracy of deep regression trackers and apply them to various robotic vision domains. By utilizing weakly-supervised adaptation strategy and knowledge distillation, real-time speed and high accuracy tracking performance are achieved on embedded devices. </div> </div> <div class="d-flex justify-content-between"> <p class="font-weight-bold"> <span class="text-primary">IEEE ROBOTICS AND AUTOMATION LETTERS</span> (2021) </p> <div class="flex-shrink-0"> <a class="btn btn-outline-primary btn-sm" href="https://www.peeref.com/works/23003767/add-to-collection" target="_blank"> <strong>Add to Collection</strong> </a> </div> </div> </div> <div class="my-4 border-bottom"> <div> <span class="d-inline-block badge badge-blue"> Article </span> <span class="d-inline-block badge badge-cyan"> Computer Science, Artificial Intelligence </span> </div> <h4> <a href="https://www.peeref.com/works/37759211" class="text-dark hover-underline">Deep Pyramidal Pooling With Attention for Person Re-Identification</a> </h4> <p class="text-ellipsis-2">Niki Martinel, Gian Luca Foresti, Christian Micheloni</p> <div class="d-flex justify-content-between"> <p class="font-weight-bold"> <span class="text-primary">IEEE TRANSACTIONS ON IMAGE PROCESSING</span> (2020) </p> <div class="flex-shrink-0"> <a class="btn btn-outline-primary btn-sm" href="https://www.peeref.com/works/37759211/add-to-collection" target="_blank"> <strong>Add to Collection</strong> </a> </div> </div> </div> <div class="my-4 border-bottom"> <div> <span class="d-inline-block badge badge-blue"> Article </span> <span class="d-inline-block badge badge-cyan"> Automation &amp; Control Systems </span> </div> <h4> <a href="https://www.peeref.com/works/6055550" class="text-dark hover-underline">A UAV Video Dataset for Mosaicking and Change Detection From Low-Altitude Flights</a> </h4> <p class="text-ellipsis-2">Danilo Avola, Luigi Cinque, Gian Luca Foresti, Niki Martinel, Daniele Pannone, Claudio Piciarelli</p> <div class="d-flex justify-content-between"> <p class="font-weight-bold"> <span class="text-primary">IEEE TRANSACTIONS ON SYSTEMS MAN CYBERNETICS-SYSTEMS</span> (2020) </p> <div class="flex-shrink-0"> <a class="btn btn-outline-primary btn-sm" href="https://www.peeref.com/works/6055550/add-to-collection" target="_blank"> <strong>Add to Collection</strong> </a> </div> </div> </div> <div class="my-4 border-bottom"> <div> <span class="d-inline-block badge badge-blue"> Article </span> <span class="d-inline-block badge badge-cyan"> Computer Science, Artificial Intelligence </span> </div> <h4> <a href="https://www.peeref.com/works/20277336" class="text-dark hover-underline">Adaptive neural tree exploiting expert nodes to classify high-dimensional data</a> </h4> <p class="text-ellipsis-2">Shadi Abpeikar, Mehdi Ghatee, Gian Luca Foresti, Christian Micheloni</p> <div class="d-flex justify-content-between"> <p class="font-weight-bold"> <span class="text-primary">NEURAL NETWORKS</span> (2020) </p> <div class="flex-shrink-0"> <a class="btn btn-outline-primary btn-sm" href="https://www.peeref.com/works/20277336/add-to-collection" target="_blank"> <strong>Add to Collection</strong> </a> </div> </div> </div> <div class="my-4 "> <div> <span class="d-inline-block badge badge-blue"> Article </span> <span class="d-inline-block badge badge-cyan"> Computer Science, Information Systems </span> </div> <h4> <a href="https://www.peeref.com/works/21105002" class="text-dark hover-underline">Deep interactive encoding with capsule networks for image classification</a> </h4> <p class="text-ellipsis-2">Rita Pucci, Christian Micheloni, Gian Luca Foresti, Niki Martinel</p> <div class="d-flex justify-content-between"> <p class="font-weight-bold"> <span class="text-primary">MULTIMEDIA TOOLS AND APPLICATIONS</span> (2020) </p> <div class="flex-shrink-0"> <a class="btn btn-outline-primary btn-sm" href="https://www.peeref.com/works/21105002/add-to-collection" target="_blank"> <strong>Add to Collection</strong> </a> </div> </div> </div> </div> <div id="articles_from_journal" class="tab-pane "> <div class="my-4 border-bottom"> <div> <span class="d-inline-block badge badge-blue"> Article </span> <span class="d-inline-block badge badge-cyan"> Computer Science, Artificial Intelligence </span> </div> <h4> <a href="https://www.peeref.com/works/83104964" class="text-dark hover-underline">Siamese-DETR for Generic Multi-Object Tracking</a> </h4> <p class="text-ellipsis-2">Qiankun Liu, Yichen Li, Yuqi Jiang, Ying Fu</p> <div class="d-flex mb-3"> <div class="p-3 rounded bg-light-blue"> <strong>Summary:</strong> This paper proposes a simple and effective method for Generic MOT, namely Siamese-DETR. This method does not require expensive pre-trained language models and fine-grained category annotations, and only requires commonly used detection datasets for training. Different from existing methods, Siamese-DETR leverages the inherent object queries in DETR variants and is optimized through dynamic matching training strategy and query tracking. Experimental results show that Siamese-DETR outperforms existing methods on the GMOT-40 dataset. </div> </div> <div class="d-flex justify-content-between"> <p class="font-weight-bold"> <span class="text-primary">IEEE TRANSACTIONS ON IMAGE PROCESSING</span> (2024) </p> <div class="flex-shrink-0"> <a class="btn btn-outline-primary btn-sm" href="https://www.peeref.com/works/83104964/add-to-collection" target="_blank"> <strong>Add to Collection</strong> </a> </div> </div> </div> <div class="my-4 border-bottom"> <div> <span class="d-inline-block badge badge-blue"> Article </span> <span class="d-inline-block badge badge-cyan"> Computer Science, Artificial Intelligence </span> </div> <h4> <a href="https://www.peeref.com/works/84191810" class="text-dark hover-underline">A Deep Stochastic Adaptive Fourier Decomposition Network for Hyperspectral Image Classification</a> </h4> <p class="text-ellipsis-2">Chunbo Cheng, Liming Zhang, Hong Li, Lei Dai, Wenjing Cui</p> <div class="d-flex mb-3"> <div class="p-3 rounded bg-light-blue"> <strong>Summary:</strong> This paper proposes a deep network architecture, SAFDNet, based on the theory of stochastic adaptive Fourier decomposition to address the issues of requiring a large number of labeled images and learning a large number of parameters in deep learning classification methods. SAFDNet utilizes the unsupervised feature extraction capability of SAFD, only requiring a small number of labeled images for classifier training, and uses fewer convolution kernels, reducing the number of parameters. Experimental results show that SAFDNet outperforms other deep learning methods in HSI classification. </div> </div> <div class="d-flex justify-content-between"> <p class="font-weight-bold"> <span class="text-primary">IEEE TRANSACTIONS ON IMAGE PROCESSING</span> (2024) </p> <div class="flex-shrink-0"> <a class="btn btn-outline-primary btn-sm" href="https://www.peeref.com/works/84191810/add-to-collection" target="_blank"> <strong>Add to Collection</strong> </a> </div> </div> </div> <div class="my-4 border-bottom"> <div> <span class="d-inline-block badge badge-blue"> Article </span> <span class="d-inline-block badge badge-cyan"> Computer Science, Artificial Intelligence </span> </div> <h4> <a href="https://www.peeref.com/works/84066494" class="text-dark hover-underline">Exploring the Spectral Prior for Hyperspectral Image Super-Resolution</a> </h4> <p class="text-ellipsis-2">Qian Hu, Xinya Wang, Junjun Jiang, Xiao-Ping Zhang, Jiayi Ma</p> <div class="d-flex mb-3"> <div class="p-3 rounded bg-light-blue"> <strong>Summary:</strong> This text introduces a novel hyperspectral super-resolution network named SNLSR, which can address two challenges of existing methods in handling hyperspectral data, namely, high-dimensional nature and low information utilization efficiency. The network converts the super-resolution problem into the abundance domain and uses a spatial preserve decomposition network and a spatial spectral attention network to estimate and super-resolve the abundance representation, while using a spectral-wise non-local attention module to mine similar pixels for high-frequency detail recovery. Experiments show that this method is superior to other state-of-the-art methods in both visual and metric aspects. </div> </div> <div class="d-flex justify-content-between"> <p class="font-weight-bold"> <span class="text-primary">IEEE TRANSACTIONS ON IMAGE PROCESSING</span> (2024) </p> <div class="flex-shrink-0"> <a class="btn btn-outline-primary btn-sm" href="https://www.peeref.com/works/84066494/add-to-collection" target="_blank"> <strong>Add to Collection</strong> </a> </div> </div> </div> <div class="my-4 border-bottom"> <div> <span class="d-inline-block badge badge-blue"> Article </span> <span class="d-inline-block badge badge-cyan"> Computer Science, Artificial Intelligence </span> </div> <h4> <a href="https://www.peeref.com/works/83497327" class="text-dark hover-underline">Style Consistency Unsupervised Domain Adaptation Medical Image Segmentation</a> </h4> <p class="text-ellipsis-2">Lang Chen, Yun Bian, Jianbin Zeng, Qingquan Meng, Weifang Zhu, Fei Shi, Chengwei Shao, Xinjian Chen, Dehui Xiang</p> <div class="d-flex mb-3"> <div class="p-3 rounded bg-light-blue"> <strong>Summary:</strong> This paper proposes a style consistency unsupervised domain adaptation image segmentation method to mitigate the domain shift between source and target domains in different medical imaging modalities. The method improves the integrity and segmentation accuracy of the organs of interest through techniques such as local phase-enhanced style fusion, phase consistency discriminator, style consistency estimation, and style consistency entropy. Experimental results show that the method outperforms existing methods. </div> </div> <div class="d-flex justify-content-between"> <p class="font-weight-bold"> <span class="text-primary">IEEE TRANSACTIONS ON IMAGE PROCESSING</span> (2024) </p> <div class="flex-shrink-0"> <a class="btn btn-outline-primary btn-sm" href="https://www.peeref.com/works/83497327/add-to-collection" target="_blank"> <strong>Add to Collection</strong> </a> </div> </div> </div> <div class="my-4 border-bottom"> <div> <span class="d-inline-block badge badge-blue"> Article </span> <span class="d-inline-block badge badge-cyan"> Computer Science, Artificial Intelligence </span> </div> <h4> <a href="https://www.peeref.com/works/84087942" class="text-dark hover-underline">CWSCNet: Channel-Weighted Skip Connection Network for Underwater Object Detection</a> </h4> <p class="text-ellipsis-2">Long Chen, Yunzhou Xie, Yaxin Li, Qi Xu, Junyu Dong</p> <div class="d-flex mb-3"> <div class="p-3 rounded bg-light-blue"> <strong>Summary:</strong> This paper proposes a novel channel-weighted skip connection network (CWSCNet) for learning multiple hyper-fusion features to improve multi-scale underwater object detection. The channel-weighted skip connection (CWSC) module in CWSCNet can adaptively adjust the importance of different channels, thereby eliminating feature heterogeneity, enhancing the compatibility of different feature maps, and serving as an effective feature selection strategy that enables the network to focus on learning channels with more object-related information. Experimental results show that CWSCNet achieves comparable or state-of-the-art performance in underwater object detection. </div> </div> <div class="d-flex justify-content-between"> <p class="font-weight-bold"> <span class="text-primary">IEEE TRANSACTIONS ON IMAGE PROCESSING</span> (2024) </p> <div class="flex-shrink-0"> <a class="btn btn-outline-primary btn-sm" href="https://www.peeref.com/works/84087942/add-to-collection" target="_blank"> <strong>Add to Collection</strong> </a> </div> </div> </div> <div class="my-4 border-bottom"> <div> <span class="d-inline-block badge badge-blue"> Article </span> <span class="d-inline-block badge badge-cyan"> Computer Science, Artificial Intelligence </span> </div> <h4> <a href="https://www.peeref.com/works/83125452" class="text-dark hover-underline">Exploring Video Denoising in Thermal Infrared Imaging: Physics-Inspired Noise Generator, Dataset, and Model</a> </h4> <p class="text-ellipsis-2">Lijing Cai, Xiangyu Dong, Kailai Zhou, Xun Cao</p> <div class="d-flex mb-3"> <div class="p-3 rounded bg-light-blue"> <strong>Summary:</strong> This research focuses on thermal infrared video denoising, constructs a relevant dataset, and proposes a multi-domain infrared video denoising network model. This model can effectively denoise and can be applied to commercial cameras and downstream visual tasks. </div> </div> <div class="d-flex justify-content-between"> <p class="font-weight-bold"> <span class="text-primary">IEEE TRANSACTIONS ON IMAGE PROCESSING</span> (2024) </p> <div class="flex-shrink-0"> <a class="btn btn-outline-primary btn-sm" href="https://www.peeref.com/works/83125452/add-to-collection" target="_blank"> <strong>Add to Collection</strong> </a> </div> </div> </div> <div class="my-4 border-bottom"> <div> <span class="d-inline-block badge badge-blue"> Article </span> <span class="d-inline-block badge badge-cyan"> Computer Science, Artificial Intelligence </span> </div> <h4> <a href="https://www.peeref.com/works/83541320" class="text-dark hover-underline">Learning a Non-Locally Regularized Convolutional Sparse Representation for Joint Chromatic and Polarimetric Demosaicking</a> </h4> <p class="text-ellipsis-2">Yidong Luo, Junchao Zhang, Jianbo Shao, Jiandong Tian, Jiayi Ma</p> <div class="d-flex mb-3"> <div class="p-3 rounded bg-light-blue"> <strong>Summary:</strong> The division of focal plane color polarization camera becomes the mainstream in polarimetric imaging, but the current color polarization demosaicking methods have problems. To solve this problem, a non-locally regularized convolutional sparse regularization model is proposed, which can be used to recall more information and convert the CPDM task into an energy function to be solved by ADMM optimization. Experimental results show that this method is superior to the current state-of-the-art methods in terms of quantitative measurement and visual quality. </div> </div> <div class="d-flex justify-content-between"> <p class="font-weight-bold"> <span class="text-primary">IEEE TRANSACTIONS ON IMAGE PROCESSING</span> (2024) </p> <div class="flex-shrink-0"> <a class="btn btn-outline-primary btn-sm" href="https://www.peeref.com/works/83541320/add-to-collection" target="_blank"> <strong>Add to Collection</strong> </a> </div> </div> </div> <div class="my-4 border-bottom"> <div> <span class="d-inline-block badge badge-blue"> Article </span> <span class="d-inline-block badge badge-cyan"> Computer Science, Artificial Intelligence </span> </div> <h4> <a href="https://www.peeref.com/works/84258883" class="text-dark hover-underline">AnlightenDiff: Anchoring Diffusion Probabilistic Model on Low Light Image Enhancement</a> </h4> <p class="text-ellipsis-2">Cheuk-Yiu Chan, Wan-Chi Siu, Yuk-Hee Chan, H. Anthony Chan</p> <div class="d-flex mb-3"> <div class="p-3 rounded bg-light-blue"> <strong>Summary:</strong> This paper presents AnlightenDiff, an anchoring diffusion model for low light image enhancement. The model enhances the low light image to a well-exposed image by iterative refinement and uses an anchoring mechanism to ensure that the enhanced result remains faithful to the input. In addition, a diffusion feature perceptual loss is proposed for the diffusion-based model to utilize different loss functions in the image domain. Experimental results show that AnlightenDiff has a significant effect on low light enhancement and achieves high perceptual quality results. </div> </div> <div class="d-flex justify-content-between"> <p class="font-weight-bold"> <span class="text-primary">IEEE TRANSACTIONS ON IMAGE PROCESSING</span> (2024) </p> <div class="flex-shrink-0"> <a class="btn btn-outline-primary btn-sm" href="https://www.peeref.com/works/84258883/add-to-collection" target="_blank"> <strong>Add to Collection</strong> </a> </div> </div> </div> <div class="my-4 border-bottom"> <div> <span class="d-inline-block badge badge-blue"> Article </span> <span class="d-inline-block badge badge-cyan"> Computer Science, Artificial Intelligence </span> </div> <h4> <a href="https://www.peeref.com/works/83090375" class="text-dark hover-underline">Incrementally Adapting Pretrained Model Using Network Prior for Multi-Focus Image Fusion</a> </h4> <p class="text-ellipsis-2">Xingyu Hu, Junjun Jiang, Chenyang Wang, Xianming Liu, Jiayi Ma</p> <div class="d-flex mb-3"> <div class="p-3 rounded bg-light-blue"> <strong>Summary:</strong> This paper proposes a multi-focus image fusion model that combines the general knowledge of the supervised pre-trained backbone and the external priors optimized on specific test samples to improve the performance of image fusion. The model uses the Incremental Network Prior Adaptation framework to integrate the pre-trained features into a small prior network. Experiments show that this method outperforms existing supervised and unsupervised learning methods on both synthetic and real datasets. </div> </div> <div class="d-flex justify-content-between"> <p class="font-weight-bold"> <span class="text-primary">IEEE TRANSACTIONS ON IMAGE PROCESSING</span> (2024) </p> <div class="flex-shrink-0"> <a class="btn btn-outline-primary btn-sm" href="https://www.peeref.com/works/83090375/add-to-collection" target="_blank"> <strong>Add to Collection</strong> </a> </div> </div> </div> <div class="my-4 border-bottom"> <div> <span class="d-inline-block badge badge-blue"> Article </span> <span class="d-inline-block badge badge-cyan"> Computer Science, Artificial Intelligence </span> </div> <h4> <a href="https://www.peeref.com/works/83104963" class="text-dark hover-underline">SemiRS-COC: Semi-Supervised Classification for Complex Remote Sensing Scenes With Cross-Object Consistency</a> </h4> <p class="text-ellipsis-2">Qiang Liu, Jun Yue, Yang Kuang, Weiying Xie, Leyuan Fang</p> <div class="d-flex mb-3"> <div class="p-3 rounded bg-light-blue"> <strong>Summary:</strong> This paper proposes a novel semi-supervised classification method, SemiRS-COC, for complex RS scenes. It generates reliable object-level pseudo-labels by exploiting the similarity between foreground objects in RS images, effectively addressing the issues of multiple background objects and significant intra-class differences. Experiments show that the proposed method outperforms the state-of-the-art methods on three widely-adopted RS datasets. </div> </div> <div class="d-flex justify-content-between"> <p class="font-weight-bold"> <span class="text-primary">IEEE TRANSACTIONS ON IMAGE PROCESSING</span> (2024) </p> <div class="flex-shrink-0"> <a class="btn btn-outline-primary btn-sm" href="https://www.peeref.com/works/83104963/add-to-collection" target="_blank"> <strong>Add to Collection</strong> </a> </div> </div> </div> <div class="my-4 border-bottom"> <div> <span class="d-inline-block badge badge-blue"> Article </span> <span class="d-inline-block badge badge-cyan"> Computer Science, Artificial Intelligence </span> </div> <h4> <a href="https://www.peeref.com/works/84388488" class="text-dark hover-underline">Cross-Scope Spatial-Spectral Information Aggregation for Hyperspectral Image Super-Resolution</a> </h4> <p class="text-ellipsis-2">Shi Chen, Lefei Zhang, Liangpei Zhang</p> <div class="d-flex mb-3"> <div class="p-3 rounded bg-light-blue"> <strong>Summary:</strong> This passage introduces a cross-scope spatial-spectral Transformer (CST) for hyperspectral image super-resolution. CST effectively captures long-range spatial and spectral similarities by designing cross-attention mechanisms in spatial and spectral dimensions. Experiments show that CST outperforms other methods both quantitatively and visually. </div> </div> <div class="d-flex justify-content-between"> <p class="font-weight-bold"> <span class="text-primary">IEEE TRANSACTIONS ON IMAGE PROCESSING</span> (2024) </p> <div class="flex-shrink-0"> <a class="btn btn-outline-primary btn-sm" href="https://www.peeref.com/works/84388488/add-to-collection" target="_blank"> <strong>Add to Collection</strong> </a> </div> </div> </div> <div class="my-4 border-bottom"> <div> <span class="d-inline-block badge badge-blue"> Article </span> <span class="d-inline-block badge badge-cyan"> Computer Science, Artificial Intelligence </span> </div> <h4> <a href="https://www.peeref.com/works/84065540" class="text-dark hover-underline">Dual Consensus Anchor Learning for Fast Multi-View Clustering</a> </h4> <p class="text-ellipsis-2">Yalan Qin, Chuan Qin, Xinpeng Zhang, Guorui Feng</p> <div class="d-flex mb-3"> <div class="p-3 rounded bg-light-blue"> <strong>Summary:</strong> This paper proposes a Dual consensus Anchor Learning for Fast multi-view clustering (DALF) method, which guarantees the cluster structure correspondence between anchor graph and partition on large-scale multi-view datasets. DALF jointly learns anchors, constructs anchor graphs, and performs partitions under a unified framework with rank constraints imposed on the built Laplacian graphs and orthogonal constraints on the centroid representations. Experiments show that DALF is effective and efficient on different multi-view datasets. </div> </div> <div class="d-flex justify-content-between"> <p class="font-weight-bold"> <span class="text-primary">IEEE TRANSACTIONS ON IMAGE PROCESSING</span> (2024) </p> <div class="flex-shrink-0"> <a class="btn btn-outline-primary btn-sm" href="https://www.peeref.com/works/84065540/add-to-collection" target="_blank"> <strong>Add to Collection</strong> </a> </div> </div> </div> <div class="my-4 border-bottom"> <div> <span class="d-inline-block badge badge-blue"> Article </span> <span class="d-inline-block badge badge-cyan"> Computer Science, Artificial Intelligence </span> </div> <h4> <a href="https://www.peeref.com/works/84066282" class="text-dark hover-underline">Learning Transferable Conceptual Prototypes for Interpretable Unsupervised Domain Adaptation</a> </h4> <p class="text-ellipsis-2">Junyu Gao, Xinhong Ma, Changsheng Xu</p> <div class="d-flex mb-3"> <div class="p-3 rounded bg-light-blue"> <strong>Summary:</strong> This paper proposes an inherently interpretable method, TCPL, for unsupervised domain adaptation. By designing a hierarchical prototypical module and a self-predictive consistent pseudo-label strategy, it can simultaneously interpret and improve the processes of knowledge transfer and decision-making. Experiments show that the method is effective and outperforms existing methods. </div> </div> <div class="d-flex justify-content-between"> <p class="font-weight-bold"> <span class="text-primary">IEEE TRANSACTIONS ON IMAGE PROCESSING</span> (2024) </p> <div class="flex-shrink-0"> <a class="btn btn-outline-primary btn-sm" href="https://www.peeref.com/works/84066282/add-to-collection" target="_blank"> <strong>Add to Collection</strong> </a> </div> </div> </div> <div class="my-4 border-bottom"> <div> <span class="d-inline-block badge badge-blue"> Article </span> <span class="d-inline-block badge badge-cyan"> Computer Science, Artificial Intelligence </span> </div> <h4> <a href="https://www.peeref.com/works/83496172" class="text-dark hover-underline">Change Representation and Extraction in Stripes: Rethinking Unsupervised Hyperspectral Image Change Detection With an Untrained Network</a> </h4> <p class="text-ellipsis-2">Bin Yang, Yin Mao, Licheng Liu, Leyuan Fang, Xinxin Liu</p> <div class="d-flex mb-3"> <div class="p-3 rounded bg-light-blue"> <strong>Summary:</strong> This paragraph mainly describes the current situation and problems of hyperspectral image change detection methods based on deep learning, and proposes a new unsupervised CD method, StripeCD, which represents and models stripe changes by constructing a new feature space and optimizing modeling, thereby improving the accuracy and reliability of detection. This method performs well on three widely used datasets and has the potential for further research on untrained networks. </div> </div> <div class="d-flex justify-content-between"> <p class="font-weight-bold"> <span class="text-primary">IEEE TRANSACTIONS ON IMAGE PROCESSING</span> (2024) </p> <div class="flex-shrink-0"> <a class="btn btn-outline-primary btn-sm" href="https://www.peeref.com/works/83496172/add-to-collection" target="_blank"> <strong>Add to Collection</strong> </a> </div> </div> </div> <div class="my-4 "> <div> <span class="d-inline-block badge badge-blue"> Article </span> <span class="d-inline-block badge badge-cyan"> Computer Science, Artificial Intelligence </span> </div> <h4> <a href="https://www.peeref.com/works/83961896" class="text-dark hover-underline">CrossDiff: Exploring Self-Supervised Representation of Pansharpening via Cross-Predictive Diffusion Model</a> </h4> <p class="text-ellipsis-2">Yinghui Xing, Litao Qu, Shizhou Zhang, Kai Zhang, Yanning Zhang, Lorenzo Bruzzone</p> <div class="d-flex mb-3"> <div class="p-3 rounded bg-light-blue"> <strong>Summary:</strong> In this paper, we propose a cross-predictive diffusion model named CrossDiff for self-supervised representation learning in pansharpening. The model pre-trains the UNet structure by introducing a cross-predictive pretext task and freezes the encoders in the second stage, training only the fusion head to adapt to the pansharpening task. Experiments show that the model is superior in effectiveness and generalization ability compared to existing methods. </div> </div> <div class="d-flex justify-content-between"> <p class="font-weight-bold"> <span class="text-primary">IEEE TRANSACTIONS ON IMAGE PROCESSING</span> (2024) </p> <div class="flex-shrink-0"> <a class="btn btn-outline-primary btn-sm" href="https://www.peeref.com/works/83961896/add-to-collection" target="_blank"> <strong>Add to Collection</strong> </a> </div> </div> </div> </div> </div> </div> </div> </div> </div> <div class="modal fade" id="export-citation" tabindex="-1"> <div class="modal-dialog"> <div class="modal-content"> <div class="modal-header"> <button type="button" class="close" data-dismiss="modal"><span>&times;</span></button> <h4 class="modal-title">Export Citation <b class="text-primary"></b></h4> </div> <div class="modal-body"> <div class="my-3 px-4 f16"> <form action="https://www.peeref.com/works/citation/download" method="GET" target="_blank"> <div class="radio"> <label> <input type="radio" name="fileType" value="PlainText" checked> <strong>Plain Text</strong> </label> </div> <div class="radio"> <label> <input type="radio" name="fileType" value="RIS"> <strong>RIS</strong> <span>- Export format compatible with most major reference management software such as Endnote and Zotero</span> </label> </div> <div class="radio"> <label> <input type="radio" name="fileType" value="BibTeX"> <strong>BibTeX</strong> <span>- Export format compatible with LaTeX</span> </label> </div> <input type="hidden" name="articleId" value="83628583"> <div class="mt-lg-4 text-center"> <button class="citation-download-btn btn btn-primary"> Export </button> </div> </form> </div> </div> </div> </div> </div> <div class="modal fade" id="share-paper" tabindex="-1"> <div class="modal-dialog"> <div class="modal-content"> <div class="modal-header"> <button type="button" class="close" data-dismiss="modal"><span>&times;</span></button> <h4 class="modal-title">Share Paper <b class="text-primary"></b></h4> </div> <div class="modal-body"> <div class="my-4"> <div class="social-share"> <a class="social-share-item email" href="mailto:?subject=Lightweight Prompt Learning Implicit Degradation Estimation Network for Blind Super Resolution&amp;body=https://www.peeref.com/works/83628583" target="_blank"> <img src="https://peeref-open.s3.amazonaws.com/images/social-share-email.svg" height="16"> </a> <a class="social-share-item linkedin" href="https://www.linkedin.com/shareArticle/?url=https://www.peeref.com/works/83628583" target="_blank"> <img src="https://peeref-open.s3.amazonaws.com/images/social-share-linkedin.svg" height="16"> </a> <a class="social-share-item twitter" href="https://twitter.com/share?url=https://www.peeref.com/works/83628583" target="_blank"> <img src="https://peeref-open.s3.amazonaws.com/images/social-share-twitter.svg" height="16"> </a> <a class="social-share-item wechat" href="javascipt:;" onclick="window.open('https://api.qrserver.com/v1/create-qr-code/?size=154x154&data=https://www.peeref.com/works/83628583', 'newwindow', 'height=300, width=300')"> <img src="https://peeref-open.s3.amazonaws.com/images/social-share-wechat.svg" height="16"> </a> <a class="social-share-item facebook" href="https://www.facebook.com/sharer/sharer.php?u=https://www.peeref.com/works/83628583" target="_blank"> <img src="https://peeref-open.s3.amazonaws.com/images/social-share-facebook.svg" height="16"> </a> </div> </div> </div> </div> </div> </div> <back-top-button></back-top-button> </main> <div class="container"> <div class="footer"> <div class="col-md-10"> <div class="d-flex flex-wrap" style="font-size: 15px;"> <a href="https://www.peeref.com/works" class="mx-3 px-2"> Papers </a> <a href="https://www.peeref.com/posters" class="mx-3 px-2"> Posters </a> <a href="https://www.peeref.com/abstracts" class="mx-3 px-2"> Abstracts </a> <a href="https://www.peeref.com/webinars" class="mx-3 px-2"> Webinars </a> <a href="https://www.peeref.com/questions" class="mx-3 px-2"> Questions </a> <a href="https://www.peeref.com/hubs" class="mx-3 px-2"> Hubs </a> <a href="https://www.peeref.com/funding" class="mx-3 px-2"> Funding </a> <a href="https://www.peeref.com/journals" class="mx-3 px-2"> Journals </a> <a href="https://www.peeref.com/connect" class="mx-3 px-2"> Connect </a> <a href="https://www.peeref.com/e-collections" class="mx-3 px-2"> Collections </a> <a href="https://www.peeref.com/reviewer" class="mx-3 px-2"> Reviewers </a> <a href="https://www.peeref.com/about" class="mx-3 px-2"> About Us </a> <a href="https://www.peeref.com/about/faq" class="mx-3 px-2"> FAQs </a> <a href="https://www.peeref.com/mobile-app" class="mx-3 px-2"> Mobile App </a> <a href="https://www.peeref.com/about/privacy" class="mx-3 px-2"> Privacy Policy </a> <a href="https://www.peeref.com/about/terms-of-use" class="mx-3 px-2"> Terms of Use </a> </div> </div> <div class="col-md-2"> <div class="d-flex justify-content-end"> <div class="sns"> <a href="https://www.facebook.com/Peeref-113216326987528" target="_blank"> <i class="ivu-icon ivu-icon-logo-facebook"></i> </a> <a href="https://twitter.com/Peeref1" target="_blank"> <i class="ivu-icon ivu-icon-logo-twitter"></i> </a> </div> </div> </div> </div> <div class="copyright"> © Peeref 2019-2025. All rights reserved. </div> </div> </div> <script src="https://peeref-open.s3.amazonaws.com/js/manifest.42bb1b6291289ba30c76.js"></script> <script src="https://peeref-open.s3.amazonaws.com/js/vendor.362294362b899c82d030.js"></script> <script src="https://peeref-open.s3.amazonaws.com/js/app.ee6e6ad3c472f6e3f93d.js"></script> <script> $(function() { $('#reading-btn').click(function() { const navHeight = $("#article-sticky-navbar").height(); const readingAreaTop = $("#collection").offset().top; const readingAreaHeight = readingAreaTop - navHeight - 30; $("html, body").animate( { scrollTop: readingAreaHeight }, 400 ); return false; }) }); </script> <script src="https://peeref-open.s3.amazonaws.com/vendor/MathJax-274/MathJax.js?config=default" async></script> <script async src="https://www.googletagmanager.com/gtag/js?id=UA-152048790-1"></script> <script> window.dataLayer = window.dataLayer || []; function gtag(){dataLayer.push(arguments);} gtag('js', new Date()); gtag('config', 'UA-152048790-1'); </script> <script type="text/javascript" id="cookiebanner" src="https://peeref-open.s3.amazonaws.com/js/cookiebanner.min.js" data-position="bottom" data-cookie="_uc_" data-expires="Wed, 31 Dec 2025 00:00:00 GMT" data-font-size="15px" data-close-text="Accept" data-close-precedes="false" data-close-style="border-radius: 5px; margin: 3px 15px; padding: 3px 10px; display: inline-block; font-weight: bold; background: rgb(245, 207, 71); color: rgb(16, 75, 125);" data-linkmsg="" data-moreinfo-decoration="underline" data-moreinfo="https://www.peeref.com/about/privacy" data-bg="rgb(16, 75, 125)" data-link="rgb(255, 255, 255)" data-message="Peeref uses cookies to improve your experience. Please read our &lt;a&gt;Privacy Policy&lt;/a&gt; for more details."> </script> </body> </html>

Pages: 1 2 3 4 5 6 7 8 9 10