CINXE.COM

Light-Guided and Cross-Fusion U-Net for Anti-Illumination Image Super-Resolution - Peeref

<!DOCTYPE html> <html lang="en"> <head> <link rel="dns-prefetch" href="//www.peeref.com"> <link rel="dns-prefetch" href="//peeref-open.s3.amazonaws.com"> <link rel="dns-prefetch" href="//www.w3.org"> <link rel="dns-prefetch" href="//doi.org"> <link rel="dns-prefetch" href="//www.linkedin.com"> <link rel="dns-prefetch" href="//twitter.com"> <link rel="dns-prefetch" href="//api.qrserver.com"> <link rel="dns-prefetch" href="//www.facebook.com"> <link rel="dns-prefetch" href="//www.googletagmanager.com"> <meta charset="utf-8"> <title>Light-Guided and Cross-Fusion U-Net for Anti-Illumination Image Super-Resolution - Peeref</title> <meta http-equiv="X-UA-Compatible" content="IE=edge, chrome=1"> <meta name="keywords" content="manuscript, research article, comment, full text"> <meta name="description" content="Light-Guided and Cross-Fusion U-Net for Anti-Illumination Image Super-Resolution"> <meta name="viewport" content="width=device-width, initial-scale=1.0"> <meta name="viewport" content="width=device-width, initial-scale=1.0, maximum-scale=1.0, user-scalable=no" /> <meta property="og:locale" content="en"> <meta property="og:title" content="Light-Guided and Cross-Fusion U-Net for Anti-Illumination Image Super-Resolution - Peeref"> <meta property="og:description" content="Light-Guided and Cross-Fusion U-Net for Anti-Illumination Image Super-Resolution"> <meta property="og:url" content="https://www.peeref.com/works/26530573"> <meta property="og:site_name" content="Peeref"> <meta property="og:image" content=""> <meta name="csrf-token" content="X0ENJvfE0E1hN0iAXJ1HOX4FlyNVfnVjU5rq2FbK"> <meta http-equiv="Cache-Control" content="no-transform" /> <meta http-equiv="Cache-Control" content="no-siteapp" /> <link rel="shortcut icon" href="https://www.peeref.com/favicon.ico"> <link href="https://peeref-open.s3.amazonaws.com/css/frameworks.bd380c8996823ba04442.css" rel="stylesheet"> <link href="https://peeref-open.s3.amazonaws.com/css/app.d9d568bade7a2904f486.css" rel="stylesheet"> <script> if(window.self !== window.top) { location.href = '/static/html/forbidden.html'; } </script> <script> window.Config = { token: "X0ENJvfE0E1hN0iAXJ1HOX4FlyNVfnVjU5rq2FbK", url: "https://www.peeref.com/works/26530573", lang: "en", prefix: "", isMobile: "", assetDomain: "https://peeref-open.s3.amazonaws.com/" }; </script> </head> <body> <div id="app"> <div class="bg-warning position-relative w-100 text-center py-2" style="top: 0; z-index: 100000;"> <strong class="text-danger">Peeref no longer supports Internet Explorer. </strong> <a href="/static/html/browser.html">Please upgrade your browser.</a> </div> <nav id="nav" class="navbar navbar-default navbar-static-top navbar-inverse"> <div class="container"> <div class="navbar-header"> <button type="button" class="navbar-toggle collapsed" data-toggle="collapse" data-target="#app-navbar-collapse" aria-expanded="false"> <span class="icon-bar"></span> <span class="icon-bar"></span> <span class="icon-bar"></span> </button> <a class="navbar-brand p-3" href="https://www.peeref.com"> <svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" width="108px" viewBox="0 0 1052 302"><g fill="#FFF"><path d="M227.6,92.9c5.2,13,17.9,22.1,32.7,22.1c19.4,0,35.2-15.8,35.2-35.2s-15.8-35.2-35.2-35.2 c-19.4,0-35.2,15.8-35.2,35.2c0,2.3,0.2,4.5,0.6,6.7l-98.1,24.6L154,59c3.2,1.1,6.6,1.8,10.1,1.8c16.4,0,29.7-13.3,29.7-29.7c0-16.4-13.3-29.7-29.7-29.7s-29.7,13.3-29.7,29.7c0,10.5,5.4,19.7,13.6,25l-27.9,55.1L90.5,94.2c1.4-3.2,2.1-6.8,2.1-10.6 c0-14.9-12.1-27-27-27c-14.9,0-27,12.1-27,27c0,14.9,12.1,27,27,27c8.8,0,16.6-4.2,21.6-10.7l28.5,16.3l-62.2,49.9 c-5.6-5.3-13.1-8.6-21.4-8.6c-17.1,0-30.9,13.8-30.9,30.9c0,17.1,13.8,30.9,30.9,30.9S63,205.4,63,188.4c0-6.3-1.9-12.3-5.2-17.2 l60-48.1l-7.5,106.4c-19.1,0.6-34.4,16.2-34.4,35.5c0,19.6,15.9,35.5,35.5,35.5c19.6,0,35.5-15.9,35.5-35.5c0-17.7-13-32.4-30-35.1 l7.6-107.5l87,67.9c-4.1,6.1-6.4,13.5-6.4,21.4c0,21.4,17.4,38.8,38.8,38.8s38.8-17.4,38.8-38.8c0-21.4-17.4-38.8-38.8-38.8 c-11.1,0-21.2,4.7-28.2,12.2L129,117.6L227.6,92.9z"></path></g><g fill="#FFF"><path d="M368.2,232.3V70.6h52.4c19.9,0,32.8,0.8,38.8,2.4c9.3,2.4,17,7.7,23.3,15.8c6.2,8.1,9.4,18.6,9.4,31.5c0,9.9-1.8,18.3-5.4,25c-3.6,6.8-8.2,12.1-13.7,15.9c-5.6,3.9-11.2,6.4-16.9,7.7c-7.8,1.5-19.1,2.3-33.9,2.3h-21.3v61H368.2z M400.8,98v45.9h17.9c12.9,0,21.5-0.8,25.8-2.5c4.3-1.7,7.7-4.3,10.2-7.9c2.5-3.6,3.7-7.8,3.7-12.6c0-5.9-1.7-10.7-5.2-14.6c-3.5-3.8-7.8-6.2-13.1-7.2c-3.9-0.7-11.7-1.1-23.5-1.1H400.8z"></path><path d="M586.5,195l30.9,5.2c-4,11.3-10.2,19.9-18.8,25.9c-8.6,5.9-19.3,8.9-32.2,8.9c-20.4,0-35.4-6.7-45.2-20c-7.7-10.7-11.6-24.1-11.6-40.4c0-19.4,5.1-34.6,15.2-45.6c10.1-11,23-16.5,38.5-16.5c17.4,0,31.2,5.8,41.3,17.3c10.1,11.5,14.9,29.1,14.5,52.9h-77.7c0.2,9.2,2.7,16.3,7.5,21.5c4.8,5.1,10.7,7.7,17.9,7.7c4.9,0,8.9-1.3,12.2-4S584.8,200.9,586.5,195z M588.3,163.7c-0.2-9-2.5-15.8-6.9-20.5c-4.4-4.7-9.8-7-16.1-7c-6.8,0-12.4,2.5-16.8,7.4c-4.4,4.9-6.6,11.6-6.5,20.1H588.3z"></path><path d="M712.2,195l30.9,5.2c-4,11.3-10.2,19.9-18.8,25.9c-8.6,5.9-19.3,8.9-32.2,8.9c-20.4,0-35.4-6.7-45.2-20c-7.7-10.7-11.6-24.1-11.6-40.4c0-19.4,5.1-34.6,15.2-45.6c10.1-11,23-16.5,38.5-16.5c17.4,0,31.2,5.8,41.3,17.3c10.1,11.5,14.9,29.1,14.5,52.9H667c0.2,9.2,2.7,16.3,7.5,21.5c4.8,5.1,10.7,7.7,17.9,7.7c4.9,0,8.9-1.3,12.2-4S710.5,200.9,712.2,195z M713.9,163.7c-0.2-9-2.5-15.8-6.9-20.5c-4.4-4.7-9.8-7-16.1-7c-6.8,0-12.4,2.5-16.8,7.4c-4.4,4.9-6.6,11.6-6.5,20.1H713.9z"></path><path d="M799.6,232.3h-31V115.2h28.8v16.7c4.9-7.9,9.4-13.1,13.3-15.6c3.9-2.5,8.4-3.8,13.4-3.8c7.1,0,13.9,1.9,20.4,5.8l-9.6,27c-5.2-3.4-10.1-5.1-14.6-5.1c-4.3,0-8,1.2-11,3.6c-3,2.4-5.4,6.7-7.1,13c-1.7,6.3-2.6,19.3-2.6,39.3V232.3z"></path><path d="M925.7,195l30.9,5.2c-4,11.3-10.2,19.9-18.8,25.9c-8.6,5.9-19.3,8.9-32.2,8.9c-20.4,0-35.4-6.7-45.2-20c-7.7-10.7-11.6-24.1-11.6-40.4c0-19.4,5.1-34.6,15.2-45.6c10.1-11,23-16.5,38.5-16.5c17.4,0,31.2,5.8,41.3,17.3c10.1,11.5,14.9,29.1,14.5,52.9h-77.7c0.2,9.2,2.7,16.3,7.5,21.5c4.8,5.1,10.7,7.7,17.9,7.7c4.9,0,8.9-1.3,12.2-4S924,200.9,925.7,195z M927.5,163.7c-0.2-9-2.5-15.8-6.9-20.5c-4.4-4.7-9.8-7-16.1-7c-6.8,0-12.4,2.5-16.8,7.4c-4.4,4.9-6.6,11.6-6.5,20.1H927.5z"></path><path d="M970,115.2h17.2v-8.8c0-9.9,1-17.2,3.1-22.1c2.1-4.9,6-8.8,11.6-11.9c5.6-3.1,12.7-4.6,21.3-4.6c8.8,0,17.5,1.3,25.9,4l-4.2,21.6c-4.9-1.2-9.7-1.8-14.2-1.8c-4.5,0-7.7,1-9.7,3.1c-1.9,2.1-2.9,6.1-2.9,12.1v8.3h23.2v24.4h-23.2v92.8h-31v-92.8H970V115.2z"></path></g></svg> </a> </div> <ul class="collapse navbar-collapse mb-0" id="app-navbar-collapse"> <ul class="nav navbar-nav"> <li><a href="https://www.peeref.com/works">Papers</a></li> <li><a href="https://www.peeref.com/posters">Posters</a></li> <li><a href="https://www.peeref.com/webinars">Webinars</a></li> <li><a href="https://www.peeref.com/funding">Funding</a></li> <li class="dropdown dropdown-hover"> <a href="javascript:">Discuss <span class="caret"></span></a> <ul class="dropdown-menu dropdown-menu-inverse"> <li><a href="https://www.peeref.com/hubs">Hubs</a></li> <li><a href="https://www.peeref.com/questions">Questions</a></li> </ul> </li> <li class="dropdown dropdown-hover"> <a href="javascript:">Publishing Tools <span class="caret"></span></a> <ul class="dropdown-menu dropdown-menu-inverse"> <li><a href="https://www.peeref.com/journals">Journals</a></li> <li><a href="https://www.peeref.com/connect">Connect</a></li> <li><a href="https://www.peeref.com/e-collections">Collections</a></li> <li><a href="https://www.peeref.com/reviewer">Reviewers</a></li> </ul> </li> <li class="dropdown dropdown-hover"> <a href="javascript:">Events <span class="caret"></span></a> <ul class="dropdown-menu dropdown-menu-inverse"> <li><a href="https://www.peeref.com/poster-competition">Poster Challenger</a></li> <li><a href="https://www.peeref.com/webinar-competition">Video Presentation</a></li> <li><a href="https://www.peeref.com/events/reviewer-roulette">Reviewer Roulette</a></li> </ul> </li> </ul> <ul id="auth-menu" class="nav navbar-nav navbar-right" style="display: inline-flex; align-items: center;"> <li> <a href="https://www.peeref.com/login"> <strong>My Account</strong> </a> </li> </ul> <ul class="nav navbar-nav navbar-right" style="display: inline-flex; align-items: center; margin-left: 20px;"> <li id="language" class="d-none d-xl-inline-flex"> <a href="javascript:"> <div class="current"> <i class="ivu-icon ivu-icon-md-globe"></i> EN </div> </a> <div class="selection"> <a rel="alternate" hreflang="en" href="https://www.peeref.com/works/26530573" > <span>English</span> </a> <a rel="alternate" hreflang="zh" href="https://www.peeref.com/zh/works/26530573" > <span>中文</span> </a> </div> </li> </ul> </ul> </div> </nav> <main> <div id="top-info-banner" class="container-fluid mb-0"> <div class="container"> <div class="d-flex align-items-center" style="margin-top: 30px;"> <span class="text-white"> <strong class="f18">☆</strong> <span class="f16">4.7</span> </span> <span class="mx-3"></span> <span class="tag">Article</span> </div> <h1 class="title title-for-article"> Light-Guided and Cross-Fusion U-Net for Anti-Illumination Image Super-Resolution </h1> <div class="help-links-left"> <p class="pub-info"> IEEE TRANSACTIONS ON CIRCUITS AND SYSTEMS FOR VIDEO TECHNOLOGY (2022) </p> </div> </div> </div> <div id="article-sticky-navbar"> <div class="container"> <div class="d-flex justify-content-between flex-wrap flex-md-nowrap"> <div class="d-flex align-items-center mb-2"> <ul class="nav nav-underline f16 font-weight-bold"> <li class="active"> <a href="javascript:;"> Overview </a> </li> <li class=""> <a href="https://www.peeref.com/works/26530573/comments"> Write a Review </a> </li> </ul> </div> <div class="d-flex align-items-center justify-content-md-end flex-wrap flex-md-nowrap"> <div class="mr-3 mt-3 mt-md-0 flex-shrink-0"> <a href="https://doi.org/10.1109/TCSVT.2022.3194169" target="_blank" class="btn btn-warning btn-circle"> <i class="ivu-icon ivu-icon-md-copy f16"></i> <strong>Get Full Text</strong> </a> </div> <div class="mr-3 mt-3 mt-md-0 flex-shrink-0"> <a href="https://www.peeref.com/works/26530573/add-to-collection" class="btn btn-success btn-circle"> <strong>Add to Collection</strong> </a> </div> <div class="mr-3 mt-3 mt-md-0 flex-shrink-0"> <button class="btn btn-success btn-circle" id="reading-btn"> <strong>Further Reading</strong> </button> </div> <div class="flex-shrink-0 mt-3 mt-md-0"> <div class="dropdown"> <button class="font-weight-bold f24 ivu-btn ivu-btn-default ivu-btn-circle ivu-btn-large ivu-btn-icon-only dropdown-toggle" data-toggle="dropdown"> <i class="ivu-icon ivu-icon-md-more"></i> </button> <ul class="dropdown-menu dropdown-menu-right"> <li> <a href="#" data-target="#export-citation" data-toggle="modal"> <i class="ivu-icon ivu-icon-md-quote text-muted mr-1"></i> Export Citation </a> </li> <li> <a href="#" data-target="#share-paper" data-toggle="modal"> <i class="ivu-icon ivu-icon-md-share-alt text-muted mr-1"></i> Share Paper </a> </li> <li> <a href="https://www.peeref.com/works/26530573/references"> <i class="ivu-icon ivu-icon-md-list text-muted mr-1"></i> References </a> </li> </ul> </div> </div> </div> </div> </div> </div> <div id="article-details" class="container"> <div class="col-md-4 px-0 pr-md-3"> <div class="f15 panel-box rounded shadow-none border"> <div class="mb-3 pb-3"> <h4 class="mt-0">Journal</h4> <div class="f16"> <h5 class="title f16"> <a href="https://www.peeref.com/journals/3369/ieee-transactions-on-circuits-and-systems-for-video-technology"> IEEE TRANSACTIONS ON CIRCUITS AND SYSTEMS FOR VIDEO TECHNOLOGY </a> </h5> <span> Volume 32, Issue 12, Pages 8436-8449 </span> </div> </div> <div class="mb-3 pb-3"> <h4 class="mt-0">Publisher</h4> <div class="f16"> <h5 class="title f16 text-primary"> IEEE-INST ELECTRICAL ELECTRONICS ENGINEERS INC </h5> <div class="my-2"> DOI: 10.1109/TCSVT.2022.3194169 </div> </div> </div> <div class="mb-3 pb-3"> <h4 class="mt-0">Keywords</h4> <div class="f16"> Lighting; Image reconstruction; Image enhancement; Robustness; Interference; Estimation; Superresolution; Image super-resolution; low-light image; anti-illumination; intensity estimation; cross-fusion </div> </div> <div class="mb-3 pb-3"> <h4 class="mt-0">Categories</h4> <div class="f16"> <span class="d-block"> <a href="https://www.peeref.com/works/list?category=Engineering%2C+Electrical+%26+Electronic" target="_blank" class="text-dark btn btn-link p-0 text-left"> Engineering, Electrical &amp; Electronic </a> </span> </div> </div> <div class="mb-3 pb-3"> <h4 class="mt-0">Funding</h4> <div class="f16"> <ol class=""> <li>National Natural Science Foundation of China [51774281]</li> </ol> </div> </div> </div> <div class="f15 panel-box rounded shadow-none border"> <h4 class="mt-0 text-center">Ask authors/readers for more resources</h4> <div class="requests"> <div class="requests-item"> <div class="icon"> <img src="https://peeref-open.s3.amazonaws.com/images/file.png" alt=""> </div> <h4>Protocol</h4> <p> <a href="https://www.peeref.com/works/26530573/resource" class="btn btn-outline-primary btn-sm"> Community support </a> </p> </div> <div class="requests-item"> <div class="icon"> <img src="https://peeref-open.s3.amazonaws.com/images/experiment.png" alt=""> </div> <h4>Reagent</h4> <p> <a href="https://www.peeref.com/works/26530573/resource" class="btn btn-outline-primary btn-sm"> Community support </a> </p> </div> </div> </div> </div> <div class="col-md-8 px-0 pl-md-3"> <div id="article-summary-panel" class="mb-4"> <ul class="nav nav-tabs" style="list-style: none; padding-left: 0;"> <li class="active"> <a href="#ai_summary" data-toggle="tab" class="summary-tab mx-0 f16 text-dark"> <strong>Automated Summary</strong> <strong class="text-danger ml-1"><i>New</i></strong> </a> </li> <li class=""> <a href="#raw_abstract" data-toggle="tab" class="abstract-tab mx-0 f16 text-dark"> <strong>Abstract</strong> </a> </li> </ul> <div class="tab-content border border-top-0"> <div id="ai_summary" class="tab-pane active"> <div class="summary-panel panel-box mb-0 rounded shadow-none"> <div class="f16">This paper proposes a novel approach called LCUN for single image super-resolution (SISR), which improves both the texture details and lighting of low-resolution images. LCUN utilizes a U-Net for SISR (SRU) to reconstruct super-resolution (SR) images and introduces an Advanced Fusion Block (AFB) and an Intensity Estimation Unit (IEU) to enhance lighting effects and texture details.</div> </div> </div> <div id="raw_abstract" class="tab-pane "> <div class="abstract-panel panel-box mb-0 rounded shadow-none"> <div class="f16">The learning-based methods for single image super- resolution (SISR) can reconstruct realistic details, but they suffer severe performance degradation for low-light images because of their ignorance of negative effects of illumination, and even produce overexposure for unevenly illuminated images. In this paper, we pioneer an anti-illumination approach toward SISR named Light-guided and Cross-fusion U-Net (LCUN), which can simultaneously improve the texture details and lighting of low-resolution images. In our design, we develop a U-Net for SISR (SRU) to reconstruct super- resolution (SR) images from coarse to fine, effectively suppressing noise and absorbing illuminance information. In particular, the proposed Intensity Estimation Unit (IEU) generates the light intensity map and innovatively guides SRU to adaptively brighten inconsistent illumination. Further, aiming at efficiently utilizing key features and avoiding light interference, an Advanced Fusion Block (AFB) is developed to cross-fuse low-resolution features, reconstructed features and illuminance features in pairs. Moreover, SRU introduces a gate mechanism to dynamically adjust its composition, overcoming the limitations of fixed-scale SR. LCUN is compared with the retrained SISR methods and the combined SISR methods on low-light and uneven-light images. Extensive experiments demonstrate that LCUN advances the state-of-the-arts SISR methods in terms of objective metrics and visual effects, and it can reconstruct relatively clear textures and cope with complex lighting.</div> </div> </div> </div> </div> <div class="f15 panel-box rounded shadow-none border"> <h4 class="mt-0 heading-count">Authors</h4> <div class="mb-3"> <article-authors tid="26530573" list="[{&quot;name&quot;:&quot;Deqiang Cheng&quot;,&quot;sequence&quot;:1},{&quot;name&quot;:&quot;Liangliang Chen&quot;,&quot;sequence&quot;:2},{&quot;name&quot;:&quot;Chen Lv&quot;,&quot;sequence&quot;:3},{&quot;name&quot;:&quot;Lin Guo&quot;,&quot;sequence&quot;:4},{&quot;name&quot;:&quot;Qiqi Kou&quot;,&quot;sequence&quot;:5}]" verified="[]" page="work" ></article-authors> </div> <div class="alert alert-warning mb-0"> <h5 class="mt-0 bg-warning text-dark px-3 rounded d-inline-block"> I am an author on this paper </h5> <div class="font-weight-bold f13"> Click your name to claim this paper and add it to your profile. </div> </div> </div> <div class="f15 panel-box rounded shadow-none border"> <h4 class="mt-0 heading-count">Reviews</h4> <div class="d-flex flex-wrap flex-md-nowrap"> <div class="flex-grow-1"> <h4 class="f16"> Primary Rating <a href="javascript:;" data-toggle="tooltip" data-placement="right" title="The primary rating indicates the level of overall quality for the paper."> <i class="ivu-icon ivu-icon-md-help-circle f18 ml-2"></i> </a> </h4> <div class="d-flex flex-wrap flex-md-nowrap align-items-center alert mb-0"> <div class="d-flex align-items-center justify-content-center"> <Rate disabled allow-half value="4.7" style="font-size: 28px;"></Rate> <strong class="f20 m-3" style="color: #f5a623;">4.7</strong> </div> <div class="text-muted mx-4"> Not enough ratings </div> </div> <h4 class="f16"> Secondary Ratings <a href="javascript:;" data-toggle="tooltip" data-placement="right" title="Secondary ratings independently reflect strengths or weaknesses of the paper."> <i class="ivu-icon ivu-icon-md-help-circle f18 ml-2"></i> </a> </h4> <div class="d-flex flex-wrap flex-md-nowrap alert"> <div class="d-flex flex-shrink-0 align-items-center mr-3"> <h5 class="my-0">Novelty</h5> <strong class="mx-4">-</strong> </div> <div class="d-flex flex-shrink-0 align-items-center mr-3"> <h5 class="my-0">Significance</h5> <strong class="mx-4">-</strong> </div> <div class="d-flex flex-shrink-0 align-items-center mr-3"> <h5 class="my-0">Scientific rigor</h5> <strong class="mx-4">-</strong> </div> </div> </div> <div class="flex-shrink-0"> <div class="border bg-light py-2 px-4"> <h5 class="mb-1">Rate this paper</h5> <Rate class="f24" @on-change="function(value){ location.href='https://www.peeref.com/works/26530573/comments?rating='+value }"></Rate> </div> </div> </div> </div> <div id="collection" class="f15 panel-box rounded shadow-none border"> <h4 class="mt-0 heading-count">Recommended</h4> <div class="my-3"> <ul class="nav nav-pills border-bottom pb-3" style="list-style: none; padding-left: 0;"> <li class="active"> <a href="#articles_from_related" data-toggle="tab" class="mx-0 f15"> <strong>Related</strong> </a> </li> <li class=""> <a href="#articles_from_authors" data-toggle="tab" class="mx-0 f15"> <strong>From Same Authors</strong> </a> </li> <li class=""> <a href="#articles_from_journal" data-toggle="tab" class="mx-0 f15"> <strong>From Same Journal</strong> </a> </li> </ul> <div class="tab-content"> <div id="articles_from_related" class="tab-pane active"> <div class="my-4 border-bottom"> <div> <span class="d-inline-block badge badge-blue"> Article </span> <span class="d-inline-block badge badge-cyan"> Geochemistry &amp; Geophysics </span> </div> <h4> <a href="https://www.peeref.com/works/33563128" class="text-dark hover-underline">Hyperspectral Image Super-Resolution Network Based on Cross-Scale Nonlocal Attention</a> </h4> <p class="text-ellipsis-2">Shuangliang Li, Yugang Tian, Cheng Wang, Hongxian Wu, Shaolan Zheng</p> <div class="d-flex mb-3"> <div class="flex-shrink-0 d-none d-sm-block"> <img src="https://peeref-open.s3.amazonaws.com/storage/images/covers/3389.jpg" alt="" class="border mr-3" width="100"> </div> <div class="p-3 rounded" style="background-color: #e8f3ff;"> <strong>Summary:</strong> Hyperspectral image super-resolution aims to fuse low-spatial-resolution HSI and high-spatial-resolution multispectral/panchromatic image to obtain high-spatial-resolution HSI. Existing fusion methods overlook the significant spectral and spatial resolution difference between the input images. To address this issue, a cross spectral-scale and shift-window-based cross spatial-scale nonlocal attention network (CSSNet) is designed to effectively fuse the input images and maintain consistency in spectral and spatial features. </div> </div> <div class="d-flex justify-content-between"> <p class="font-weight-bold"> <span class="text-primary">IEEE TRANSACTIONS ON GEOSCIENCE AND REMOTE SENSING</span> (2023) </p> <div class="flex-shrink-0"> <a class="btn btn-outline-primary btn-sm" href="https://www.peeref.com/works/33563128/add-to-collection" target="_blank"> <strong>Add to Collection</strong> </a> </div> </div> </div> <div class="my-4 border-bottom"> <div> <span class="d-inline-block badge badge-blue"> Article </span> <span class="d-inline-block badge badge-cyan"> Computer Science, Information Systems </span> </div> <h4> <a href="https://www.peeref.com/works/81765926" class="text-dark hover-underline">Semantic-Aware Guided Low-Light Image Super-Resolution</a> </h4> <p class="text-ellipsis-2">Sheng Ren, Rui Cao, Wenxue Tan, Yayuan Tang</p> <div class="d-flex mb-3"> <div class="flex-shrink-0 d-none d-sm-block"> <img src="https://peeref-open.s3.amazonaws.com/storage/images/covers/10017.jpg" alt="" class="border mr-3" width="100"> </div> <div class="p-3 rounded" style="background-color: #e8f3ff;"> <strong>Summary:</strong> This paper proposes a semantic-aware guided low-light image super-resolution method to address the issues of low brightness and missing details in low-light image super-resolution reconstruction. The method utilizes the prior knowledge of the semantic network module through the semantic perception guided module, self-calibrated light adjustment module, and lightweight super-resolution module, improving the stability and robustness of the output brightness enhancement features and further enhancing the super-resolution reconstruction capability. Experiments show that the proposed method outperforms other methods in both qualitative and quantitative analysis of low-light image super-resolution reconstruction, demonstrating its high efficiency and effectiveness. </div> </div> <div class="d-flex justify-content-between"> <p class="font-weight-bold"> <span class="text-primary">IEEE ACCESS</span> (2024) </p> <div class="flex-shrink-0"> <a class="btn btn-outline-primary btn-sm" href="https://www.peeref.com/works/81765926/add-to-collection" target="_blank"> <strong>Add to Collection</strong> </a> </div> </div> </div> <div class="my-4 border-bottom"> <div> <span class="d-inline-block badge badge-blue"> Article </span> <span class="d-inline-block badge badge-cyan"> Computer Science, Artificial Intelligence </span> </div> <h4> <a href="https://www.peeref.com/works/21303849" class="text-dark hover-underline">Low-light image enhancement based on multi-illumination estimation</a> </h4> <p class="text-ellipsis-2">Xiaomei Feng, Jinjiang Li, Zhen Hua, Fan Zhang</p> <div class="d-flex mb-3"> <div class="flex-shrink-0 d-none d-sm-block"> <img src="https://peeref-open.s3.amazonaws.com/storage/images/covers/737.jpg" alt="" class="border mr-3" width="100"> </div> <div class="p-3 rounded" style="background-color: #e8f3ff;"> <strong>Summary:</strong> The study proposes a low-light image enhancement method based on multi-illumination estimation, which effectively eliminates the effects of low light and improves image quality. By deriving multiple exposure correction images and using Laplacian multi-scale fusion, exposure adjustment and image synthesis were achieved. </div> </div> <div class="d-flex justify-content-between"> <p class="font-weight-bold"> <span class="text-primary">APPLIED INTELLIGENCE</span> (2021) </p> <div class="flex-shrink-0"> <a class="btn btn-outline-primary btn-sm" href="https://www.peeref.com/works/21303849/add-to-collection" target="_blank"> <strong>Add to Collection</strong> </a> </div> </div> </div> <div class="my-4 border-bottom"> <div> <span class="d-inline-block badge badge-blue"> Article </span> <span class="d-inline-block badge badge-cyan"> Computer Science, Information Systems </span> </div> <h4> <a href="https://www.peeref.com/works/35019206" class="text-dark hover-underline">MESR: Multistage Enhancement Network for Image Super-Resolution</a> </h4> <p class="text-ellipsis-2">Detian Huang, Jian Chen</p> <div class="d-flex mb-3"> <div class="flex-shrink-0 d-none d-sm-block"> <img src="https://peeref-open.s3.amazonaws.com/storage/images/covers/10017.jpg" alt="" class="border mr-3" width="100"> </div> <div class="p-3 rounded" style="background-color: #e8f3ff;"> <strong>Summary:</strong> In this paper, a multi-stage enhancement image network (MESR) is proposed for super-resolution, which can generate reconstructed images with clearer details through two stages of processing, achieving superior performance compared to existing methods. </div> </div> <div class="d-flex justify-content-between"> <p class="font-weight-bold"> <span class="text-primary">IEEE ACCESS</span> (2022) </p> <div class="flex-shrink-0"> <a class="btn btn-outline-primary btn-sm" href="https://www.peeref.com/works/35019206/add-to-collection" target="_blank"> <strong>Add to Collection</strong> </a> </div> </div> </div> <div class="my-4 border-bottom"> <div> <span class="d-inline-block badge badge-blue"> Article </span> <span class="d-inline-block badge badge-cyan"> Computer Science, Artificial Intelligence </span> </div> <h4> <a href="https://www.peeref.com/works/82157985" class="text-dark hover-underline">Self-Supervised Adaptive Illumination Estimation for Low-Light Image Enhancement</a> </h4> <p class="text-ellipsis-2">Ting Yu, Shuai Wang, Wei Chen, F. Richard Yu, Victor C. M. Leung, Zijian Tian</p> <div class="d-flex mb-3"> <div class="p-3 rounded" style="background-color: #e8f3ff;"> <strong>Summary:</strong> This paper proposes a new illumination smoothing loss and a self-supervised adaptive illumination estimation network to balance the structure-preserving and texture-smoothing of illumination maps. Through a local-global adaptive modulation module, adaptive fusion of deep features is achieved. This method can improve image quality under different lighting conditions, and has better performance and generalization ability than other methods. </div> </div> <div class="d-flex justify-content-between"> <p class="font-weight-bold"> <span class="text-primary">IEEE TRANSACTIONS ON EMERGING TOPICS IN COMPUTATIONAL INTELLIGENCE</span> (2024) </p> <div class="flex-shrink-0"> <a class="btn btn-outline-primary btn-sm" href="https://www.peeref.com/works/82157985/add-to-collection" target="_blank"> <strong>Add to Collection</strong> </a> </div> </div> </div> <div class="my-4 border-bottom"> <div> <span class="d-inline-block badge badge-blue"> Article </span> <span class="d-inline-block badge badge-cyan"> Computer Science, Software Engineering </span> </div> <h4> <a href="https://www.peeref.com/works/26758096" class="text-dark hover-underline">Illumination estimation for nature preserving low-light image enhancement</a> </h4> <p class="text-ellipsis-2">Kavinder Singh, Anil Singh Parihar</p> <div class="d-flex mb-3"> <div class="flex-shrink-0 d-none d-sm-block"> <img src="https://peeref-open.s3.amazonaws.com/storage/images/covers/8059.jpg" alt="" class="border mr-3" width="100"> </div> <div class="p-3 rounded" style="background-color: #e8f3ff;"> <strong>Summary:</strong> This paper proposes a new approach for estimating illumination in low-light image enhancement. The approach includes three major tasks: estimation of structure-aware initial illumination, refinement of the estimated illumination, and correction of lightness in refined illumination. A novel structure-aware initial illumination estimation method using multi-scale guided filtering approach is proposed. The algorithm refines the initial estimation by optimizing a new multi-objective function. Additionally, an adaptive illumination adjustment method is proposed for correcting lightness using the estimated illumination. Qualitative and quantitative analysis demonstrates that the proposed algorithm achieves image enhancement with color constancy and preserves natural details. Performance comparison with state-of-the-art algorithms shows the superiority of the proposed algorithm. </div> </div> <div class="d-flex justify-content-between"> <p class="font-weight-bold"> <span class="text-primary">VISUAL COMPUTER</span> (2023) </p> <div class="flex-shrink-0"> <a class="btn btn-outline-primary btn-sm" href="https://www.peeref.com/works/26758096/add-to-collection" target="_blank"> <strong>Add to Collection</strong> </a> </div> </div> </div> <div class="my-4 border-bottom"> <div> <span class="d-inline-block badge badge-blue"> Article </span> <span class="d-inline-block badge badge-cyan"> Engineering, Electrical &amp; Electronic </span> </div> <h4> <a href="https://www.peeref.com/works/25335437" class="text-dark hover-underline">Cross-SRN: Structure-Preserving Super-Resolution Network With Cross Convolution</a> </h4> <p class="text-ellipsis-2">Yuqing Liu, Qi Jia, Xin Fan, Shanshe Wang, Siwei Ma, Wen Gao</p> <div class="d-flex mb-3"> <div class="flex-shrink-0 d-none d-sm-block"> <img src="https://peeref-open.s3.amazonaws.com/storage/images/covers/3369.jpg" alt="" class="border mr-3" width="100"> </div> <div class="p-3 rounded" style="background-color: #e8f3ff;"> <strong>Summary:</strong> In this study, a hierarchical feature exploitation network called Cross-SRN is designed to restore low-resolution images to high-resolution images. The network probes and preserves structural information through multi-scale feature fusion, achieving competitive restoration performance and accurate preservation of edge features. </div> </div> <div class="d-flex justify-content-between"> <p class="font-weight-bold"> <span class="text-primary">IEEE TRANSACTIONS ON CIRCUITS AND SYSTEMS FOR VIDEO TECHNOLOGY</span> (2022) </p> <div class="flex-shrink-0"> <a class="btn btn-outline-primary btn-sm" href="https://www.peeref.com/works/25335437/add-to-collection" target="_blank"> <strong>Add to Collection</strong> </a> </div> </div> </div> <div class="my-4 border-bottom"> <div> <span class="d-inline-block badge badge-blue"> Article </span> <span class="d-inline-block badge badge-cyan"> Computer Science, Information Systems </span> </div> <h4> <a href="https://www.peeref.com/works/81650124" class="text-dark hover-underline">PESiT: Progressive Joint Enhancement and Blind Super-Resolution for Low-Light and Low-Resolution Images Under Total Variation Constraints</a> </h4> <p class="text-ellipsis-2">He Deng, Kai Cheng, Yuqing Li</p> <div class="d-flex mb-3"> <div class="flex-shrink-0 d-none d-sm-block"> <img src="https://peeref-open.s3.amazonaws.com/storage/images/covers/10017.jpg" alt="" class="border mr-3" width="100"> </div> <div class="p-3 rounded" style="background-color: #e8f3ff;"> <strong>Summary:</strong> PESiT is a novel progressive joint enhancement and SR strategy for simultaneously improving the contrast and resolution of low-light and low-resolution images. It consists of an enhanced multi-scale Retinex module and a blind SR module, and has been experimentally demonstrated on public datasets to be superior in processing low-light and low-resolution images. </div> </div> <div class="d-flex justify-content-between"> <p class="font-weight-bold"> <span class="text-primary">IEEE ACCESS</span> (2024) </p> <div class="flex-shrink-0"> <a class="btn btn-outline-primary btn-sm" href="https://www.peeref.com/works/81650124/add-to-collection" target="_blank"> <strong>Add to Collection</strong> </a> </div> </div> </div> <div class="my-4 border-bottom"> <div> <span class="d-inline-block badge badge-blue"> Article </span> <span class="d-inline-block badge badge-cyan"> Computer Science, Software Engineering </span> </div> <h4> <a href="https://www.peeref.com/works/28407028" class="text-dark hover-underline">Deep Light Field Spatial Super-Resolution Using Heterogeneous Imaging</a> </h4> <p class="text-ellipsis-2">Yeyao Chen, Gangyi Jiang, Mei Yu, Haiyong Xu, Yo-Sung Ho</p> <div class="d-flex mb-3"> <div class="flex-shrink-0 d-none d-sm-block"> <img src="https://peeref-open.s3.amazonaws.com/storage/images/covers/3427.jpg" alt="" class="border mr-3" width="100"> </div> <div class="p-3 rounded" style="background-color: #e8f3ff;"> <strong>Summary:</strong> This paper proposes a deep learning-based LF spatial super-resolution method using heterogeneous imaging (LFSSR-HI). The method utilizes an additional high-resolution traditional camera to capture spatial information and enhance the LF image resolution. Experimental results show that the proposed method outperforms existing methods in both qualitative and quantitative comparisons, and preserves more accurate angular consistency. </div> </div> <div class="d-flex justify-content-between"> <p class="font-weight-bold"> <span class="text-primary">IEEE TRANSACTIONS ON VISUALIZATION AND COMPUTER GRAPHICS</span> (2023) </p> <div class="flex-shrink-0"> <a class="btn btn-outline-primary btn-sm" href="https://www.peeref.com/works/28407028/add-to-collection" target="_blank"> <strong>Add to Collection</strong> </a> </div> </div> </div> <div class="my-4 border-bottom"> <div> <span class="d-inline-block badge badge-blue"> Article </span> <span class="d-inline-block badge badge-cyan"> Engineering, Electrical &amp; Electronic </span> </div> <h4> <a href="https://www.peeref.com/works/35126683" class="text-dark hover-underline">Fast and Lightweight Network for Single Frame Structured Illumination Microscopy Super-Resolution</a> </h4> <p class="text-ellipsis-2">Xi Cheng, Jun Li, Qiang Dai, Zhenyong Fu, Jian Yang</p> <div class="d-flex mb-3"> <div class="flex-shrink-0 d-none d-sm-block"> <img src="https://peeref-open.s3.amazonaws.com/storage/images/covers/3397.jpg" alt="" class="border mr-3" width="100"> </div> <div class="p-3 rounded" style="background-color: #e8f3ff;"> <strong>Summary:</strong> In this article, a single-frame structured illumination microscopy (SF-SIM) method based on deep learning is proposed. It achieves real-time and robust super-resolution imaging in low-light and short-exposure environments, and is significantly faster than traditional SIM methods while producing similar results. This method is of great importance for the development of microbiology and medicine. </div> </div> <div class="d-flex justify-content-between"> <p class="font-weight-bold"> <span class="text-primary">IEEE TRANSACTIONS ON INSTRUMENTATION AND MEASUREMENT</span> (2022) </p> <div class="flex-shrink-0"> <a class="btn btn-outline-primary btn-sm" href="https://www.peeref.com/works/35126683/add-to-collection" target="_blank"> <strong>Add to Collection</strong> </a> </div> </div> </div> <div class="my-4 border-bottom"> <div> <span class="d-inline-block badge badge-blue"> Article </span> <span class="d-inline-block badge badge-cyan"> Geochemistry &amp; Geophysics </span> </div> <h4> <a href="https://www.peeref.com/works/23518400" class="text-dark hover-underline">Robust Extraction and Super-Resolution of Low-Resolution Flying Airplane From Satellite Video</a> </h4> <p class="text-ellipsis-2">De-Lei Chen, Lei Zhang, Hua Huang</p> <div class="d-flex mb-3"> <div class="flex-shrink-0 d-none d-sm-block"> <img src="https://peeref-open.s3.amazonaws.com/storage/images/covers/3389.jpg" alt="" class="border mr-3" width="100"> </div> <div class="p-3 rounded" style="background-color: #e8f3ff;"> <strong>Summary:</strong> This article proposes a novel model for extracting low-resolution airplanes from satellite videos and enhances their resolution using a reflective symmetry shape prior. Experimental results demonstrate the effectiveness of the method. </div> </div> <div class="d-flex justify-content-between"> <p class="font-weight-bold"> <span class="text-primary">IEEE TRANSACTIONS ON GEOSCIENCE AND REMOTE SENSING</span> (2022) </p> <div class="flex-shrink-0"> <a class="btn btn-outline-primary btn-sm" href="https://www.peeref.com/works/23518400/add-to-collection" target="_blank"> <strong>Add to Collection</strong> </a> </div> </div> </div> <div class="my-4 border-bottom"> <div> <span class="d-inline-block badge badge-blue"> Article </span> <span class="d-inline-block badge badge-cyan"> Computer Science, Information Systems </span> </div> <h4> <a href="https://www.peeref.com/works/34985797" class="text-dark hover-underline">Cross View Capture for Stereo Image Super-Resolution</a> </h4> <p class="text-ellipsis-2">Xiangyuan Zhu, Kehua Guo, Hui Fang, Liang Chen, Sheng Ren, Bin Hu</p> <div class="d-flex mb-3"> <div class="flex-shrink-0 d-none d-sm-block"> <img src="https://peeref-open.s3.amazonaws.com/storage/images/covers/3404.jpg" alt="" class="border mr-3" width="100"> </div> <div class="p-3 rounded" style="background-color: #e8f3ff;"> <strong>Summary:</strong> This paper proposes a cross view capture network (CVCnet) for stereo image super-resolution by utilizing both global contextual and local features. By designing a cross view block and a cascaded spatial perception module, the proposed method achieves the best performance in stereo image super-resolution tasks while effectively extracting features. </div> </div> <div class="d-flex justify-content-between"> <p class="font-weight-bold"> <span class="text-primary">IEEE TRANSACTIONS ON MULTIMEDIA</span> (2022) </p> <div class="flex-shrink-0"> <a class="btn btn-outline-primary btn-sm" href="https://www.peeref.com/works/34985797/add-to-collection" target="_blank"> <strong>Add to Collection</strong> </a> </div> </div> </div> <div class="my-4 border-bottom"> <div> <span class="d-inline-block badge badge-blue"> Article </span> <span class="d-inline-block badge badge-cyan"> Computer Science, Artificial Intelligence </span> </div> <h4> <a href="https://www.peeref.com/works/83628583" class="text-dark hover-underline">Lightweight Prompt Learning Implicit Degradation Estimation Network for Blind Super Resolution</a> </h4> <p class="text-ellipsis-2">Asif Hussain Khan, Christian Micheloni, Niki Martinel</p> <div class="d-flex mb-3"> <div class="flex-shrink-0 d-none d-sm-block"> <img src="https://peeref-open.s3.amazonaws.com/storage/images/covers/3390.jpg" alt="" class="border mr-3" width="100"> </div> <div class="p-3 rounded" style="background-color: #e8f3ff;"> <strong>Summary:</strong> This paper proposes a novel blind image super-resolution method that implicitly learns the degradation kernel through a lightweight architecture and a novel loss component, and performs efficient deconvolution using a learnable Wiener filter. In addition, a novel degradation-conditioned prompt layer is proposed, which exploits the estimated kernel to guide the focus on discriminative contextual information, thereby improving the reconstruction effect. Experimental results show that this method achieves good performance under different degradation settings, and the number of parameters/FLOPs is significantly reduced. </div> </div> <div class="d-flex justify-content-between"> <p class="font-weight-bold"> <span class="text-primary">IEEE TRANSACTIONS ON IMAGE PROCESSING</span> (2024) </p> <div class="flex-shrink-0"> <a class="btn btn-outline-primary btn-sm" href="https://www.peeref.com/works/83628583/add-to-collection" target="_blank"> <strong>Add to Collection</strong> </a> </div> </div> </div> <div class="my-4 border-bottom"> <div> <span class="d-inline-block badge badge-blue"> Article </span> <span class="d-inline-block badge badge-cyan"> Computer Science, Information Systems </span> </div> <h4> <a href="https://www.peeref.com/works/83340077" class="text-dark hover-underline">CTE-Net: Contextual Texture Enhancement Network for Image Super-Resolution</a> </h4> <p class="text-ellipsis-2">Dong Liu, Xiaofeng Wang, Ruidong Han, Ningning Bai, Jianpeng Hou, Shanmin Pang</p> <div class="d-flex mb-3"> <div class="flex-shrink-0 d-none d-sm-block"> <img src="https://peeref-open.s3.amazonaws.com/storage/images/covers/3404.jpg" alt="" class="border mr-3" width="100"> </div> <div class="p-3 rounded" style="background-color: #e8f3ff;"> <strong>Summary:</strong> This study proposes a context texture enhancement network to improve the level of texture details in image super-resolution. The network consists of a multi-level feature aggregation module and a context information enhancement module, which enhance the learning ability and feature representation ability of the network by integrating a context attention mechanism and using local binary patterns to guide the feature selection strategy. Experimental results show that the method performs well on benchmark datasets. </div> </div> <div class="d-flex justify-content-between"> <p class="font-weight-bold"> <span class="text-primary">IEEE TRANSACTIONS ON MULTIMEDIA</span> (2024) </p> <div class="flex-shrink-0"> <a class="btn btn-outline-primary btn-sm" href="https://www.peeref.com/works/83340077/add-to-collection" target="_blank"> <strong>Add to Collection</strong> </a> </div> </div> </div> <div class="my-4 "> <div> <span class="d-inline-block badge badge-blue"> Article </span> <span class="d-inline-block badge badge-cyan"> Engineering, Electrical &amp; Electronic </span> </div> <h4> <a href="https://www.peeref.com/works/81673038" class="text-dark hover-underline">Multimode Low-Rank Relaxation and Manifold Regularization for Hyperspectral Image Super-Resolution</a> </h4> <p class="text-ellipsis-2">Fei Ma, Shuai Huo, Siyu Liu, Feixia Yang</p> <div class="d-flex mb-3"> <div class="flex-shrink-0 d-none d-sm-block"> <img src="https://peeref-open.s3.amazonaws.com/storage/images/covers/3397.jpg" alt="" class="border mr-3" width="100"> </div> <div class="p-3 rounded" style="background-color: #e8f3ff;"> <strong>Summary:</strong> This article proposes a tensor-based super-resolution model that combines multimode low-rank and graph-based manifold regularization to improve the reconstruction performance of hyperspectral images. The model eliminates the non-uniqueness of tensor rank and suppresses minor components by redefining low-rank relaxation and applying manifold regularization to preserve the spatial-spectral structure. Experimental results show that the model has significant effects in improving the spatial resolution and classification accuracy of reconstructed images. </div> </div> <div class="d-flex justify-content-between"> <p class="font-weight-bold"> <span class="text-primary">IEEE TRANSACTIONS ON INSTRUMENTATION AND MEASUREMENT</span> (2024) </p> <div class="flex-shrink-0"> <a class="btn btn-outline-primary btn-sm" href="https://www.peeref.com/works/81673038/add-to-collection" target="_blank"> <strong>Add to Collection</strong> </a> </div> </div> </div> </div> <div id="articles_from_authors" class="tab-pane "> <div class="my-4 border-bottom"> <div> <span class="d-inline-block badge badge-blue"> Article </span> <span class="d-inline-block badge badge-cyan"> Computer Science, Information Systems </span> </div> <h4> <a href="https://www.peeref.com/works/33524359" class="text-dark hover-underline">Image Super-Resolution Based on Residual Attention and Multi-Scale Feature Fusion</a> </h4> <p class="text-ellipsis-2">Qiqi Kou, Jiamin Zhao, Deqiang Cheng, Zhen Su, Xingguang Zhu</p> <div class="d-flex mb-3"> <div class="p-3 rounded" style="background-color: #e8f3ff;"> <strong>Summary:</strong> This paper proposes a novel super-resolution reconstruction network based on residual attention and multi-scale feature fusion (RAMF). It extracts multi-scale features using a lightweight multi-scale residual module (LMRM) and further cross-connects them to enrich the information of different receptive fields. The network also uses a dense feature fusion structure and a residual spatial attention module (RSAM) for better utilization and retention of high-frequency feature information. Experimental results demonstrate that RAMF achieves better reconstruction effect with fewer parameters, low computational complexity, fast processing speed, and high objective evaluation index. </div> </div> <div class="d-flex justify-content-between"> <p class="font-weight-bold"> <span class="text-primary">IEEE ACCESS</span> (2023) </p> <div class="flex-shrink-0"> <a class="btn btn-outline-primary btn-sm" href="https://www.peeref.com/works/33524359/add-to-collection" target="_blank"> <strong>Add to Collection</strong> </a> </div> </div> </div> <div class="my-4 border-bottom"> <div> <span class="d-inline-block badge badge-blue"> Article </span> <span class="d-inline-block badge badge-cyan"> Energy &amp; Fuels </span> </div> <h4> <a href="https://www.peeref.com/works/28375311" class="text-dark hover-underline">Fast image processing method for coal particle cluster box dimension measurement and its application in diffusion coefficient testing</a> </h4> <p class="text-ellipsis-2">Jingjing Liu, Ruihang Liu, Haoxiang Zhang, He Jiang, Qiqi Kou, Deqiang Cheng, Jiansheng Qian</p> <div class="d-flex mb-3"> <div class="p-3 rounded" style="background-color: #e8f3ff;"> <strong>Summary:</strong> A new method is proposed in this paper to determine the diffusion coefficients of irregular coal particles, and a feasible solution for quantitatively assessing the influence of particle shapes on the diffusion coefficient is provided. The diffusion coefficients can be effectively obtained using image processing techniques and inverse problem-based numerical simulation. The results show that the diffusion coefficient increases with the box dimension of the contour, and the particle shape has a considerable impact on determining the gas diffusion coefficient. </div> </div> <div class="d-flex justify-content-between"> <p class="font-weight-bold"> <span class="text-primary">FUEL</span> (2023) </p> <div class="flex-shrink-0"> <a class="btn btn-outline-primary btn-sm" href="https://www.peeref.com/works/28375311/add-to-collection" target="_blank"> <strong>Add to Collection</strong> </a> </div> </div> </div> <div class="my-4 border-bottom"> <div> <span class="d-inline-block badge badge-blue"> Article </span> <span class="d-inline-block badge badge-cyan"> Chemistry, Analytical </span> </div> <h4> <a href="https://www.peeref.com/works/28693255" class="text-dark hover-underline">Coal Flow Foreign Body Classification Based on ESCBAM and Multi-Channel Feature Fusion</a> </h4> <p class="text-ellipsis-2">Qiqi Kou, Haohui Ma, Jinyang Xu, He Jiang, Deqiang Cheng</p> <div class="d-flex mb-3"> <div class="p-3 rounded" style="background-color: #e8f3ff;"> <strong>Summary:</strong> A novel network based on ESCBAM and multichannel feature fusion is proposed in this paper to address the issues of belt scratching and tearing, coal stacking, and plugging during coal transportation. The proposed method combines multi-channel feature fusion, an information fusion network, and an ESCBAM attention mechanism to achieve effective classification of foreign bodies on the belt conveyor. </div> </div> <div class="d-flex justify-content-between"> <p class="font-weight-bold"> <span class="text-primary">SENSORS</span> (2023) </p> <div class="flex-shrink-0"> <a class="btn btn-outline-primary btn-sm" href="https://www.peeref.com/works/28693255/add-to-collection" target="_blank"> <strong>Add to Collection</strong> </a> </div> </div> </div> <div class="my-4 border-bottom"> <div> <span class="d-inline-block badge badge-blue"> Article </span> <span class="d-inline-block badge badge-cyan"> Computer Science, Information Systems </span> </div> <h4> <a href="https://www.peeref.com/works/26853334" class="text-dark hover-underline">Advertising Image Saliency Prediction Method Based on Score Level Fusion</a> </h4> <p class="text-ellipsis-2">Qiqi Kou, Ruihang Liu, Chen Lv, He Jiang, Deqiang Cheng</p> <div class="d-flex mb-3"> <div class="p-3 rounded" style="background-color: #e8f3ff;"> <strong>Summary:</strong> Currently, most saliency prediction algorithms are designed for natural images and show limitations when applied to advertising images. To address this issue, this paper proposes a saliency prediction algorithm specifically for advertising images. By integrating text candidate regions based on intensity features and an improved MESR algorithm, a two-dimensional text confidence score is obtained. Additionally, a saliency confidence score is obtained from an improved natural image saliency prediction network. These scores are then fused using a score level fusion strategy to generate the final saliency prediction map. Experimental results demonstrate that the proposed model achieves high accuracy, robustness, and real-time performance in advertising images, highlighting its practical and commercial value. </div> </div> <div class="d-flex justify-content-between"> <p class="font-weight-bold"> <span class="text-primary">IEEE ACCESS</span> (2023) </p> <div class="flex-shrink-0"> <a class="btn btn-outline-primary btn-sm" href="https://www.peeref.com/works/26853334/add-to-collection" target="_blank"> <strong>Add to Collection</strong> </a> </div> </div> </div> <div class="my-4 border-bottom"> <div> <span class="d-inline-block badge badge-blue"> Article </span> <span class="d-inline-block badge badge-cyan"> Engineering, Electrical &amp; Electronic </span> </div> <h4> <a href="https://www.peeref.com/works/23434818" class="text-dark hover-underline">Sequences consistency feature learning for video-based person re-identification</a> </h4> <p class="text-ellipsis-2">Kai Zhao, Deqiang Cheng, Qiqi Kou, Jiahan Li, Ruihang Liu</p> <div class="d-flex mb-3"> <div class="p-3 rounded" style="background-color: #e8f3ff;"> <strong>Summary:</strong> This study introduces a novel sequences consistency feature learning framework for video-based person re-identification. By utilizing deep neural networks and sequences consistency loss to learn sequences-invariant features for pedestrians, it effectively reduces intra-class distance and improves performance. </div> </div> <div class="d-flex justify-content-between"> <p class="font-weight-bold"> <span class="text-primary">ELECTRONICS LETTERS</span> (2022) </p> <div class="flex-shrink-0"> <a class="btn btn-outline-primary btn-sm" href="https://www.peeref.com/works/23434818/add-to-collection" target="_blank"> <strong>Add to Collection</strong> </a> </div> </div> </div> <div class="my-4 border-bottom"> <div> <span class="d-inline-block badge badge-blue"> Article </span> <span class="d-inline-block badge badge-cyan"> Engineering, Electrical &amp; Electronic </span> </div> <h4> <a href="https://www.peeref.com/works/24626749" class="text-dark hover-underline">Structure-Preserving and Color-Restoring Up-Sampling for Single Low-Light Image</a> </h4> <p class="text-ellipsis-2">Liangliang Chen, Lin Guo, Deqiang Cheng, Qiqi Kou</p> <div class="d-flex mb-3"> <div class="p-3 rounded" style="background-color: #e8f3ff;"> <strong>Summary:</strong> This study proposes a structure-preserving and color-restoring up-sampling method for single low-light image super-resolution. By distinguishing reflectance and illumination, the limitations of existing methods on low-light images are addressed, and the quality of super-resolution texture is further improved through the reconstruction of gradient information. </div> </div> <div class="d-flex justify-content-between"> <p class="font-weight-bold"> <span class="text-primary">IEEE TRANSACTIONS ON CIRCUITS AND SYSTEMS FOR VIDEO TECHNOLOGY</span> (2022) </p> <div class="flex-shrink-0"> <a class="btn btn-outline-primary btn-sm" href="https://www.peeref.com/works/24626749/add-to-collection" target="_blank"> <strong>Add to Collection</strong> </a> </div> </div> </div> <div class="my-4 border-bottom"> <div> <span class="d-inline-block badge badge-blue"> Article </span> <span class="d-inline-block badge badge-cyan"> Computer Science, Information Systems </span> </div> <h4> <a href="https://www.peeref.com/works/25430614" class="text-dark hover-underline">Locally Regularized Collaborative Representation and an Adaptive Low-Rank Constraint for Single Image Superresolution</a> </h4> <p class="text-ellipsis-2">Rui Gao, Deqiang Cheng, Liangliang Chen, Qiqi Kou</p> <div class="d-flex mb-3"> <div class="p-3 rounded" style="background-color: #e8f3ff;"> <strong>Summary:</strong> A learning-based superresolution reconstruction method is proposed in this study, which improves the quality of recovered images by introducing a local structure prior and a shape-adaptive low-rank constraint, and solving the superresolution model with an iterative optimization algorithm. </div> </div> <div class="d-flex justify-content-between"> <p class="font-weight-bold"> <span class="text-primary">MOBILE INFORMATION SYSTEMS</span> (2022) </p> <div class="flex-shrink-0"> <a class="btn btn-outline-primary btn-sm" href="https://www.peeref.com/works/25430614/add-to-collection" target="_blank"> <strong>Add to Collection</strong> </a> </div> </div> </div> <div class="my-4 border-bottom"> <div> <span class="d-inline-block badge badge-blue"> Article </span> <span class="d-inline-block badge badge-cyan"> Engineering, Multidisciplinary </span> </div> <h4> <a href="https://www.peeref.com/works/22677831" class="text-dark hover-underline">Stereo Matching Algorithm Based on HSV Color Space and Improved Census Transform</a> </h4> <p class="text-ellipsis-2">Chen Lv, Jiahan Li, Qiqi Kou, Huandong Zhuang, Shoufeng Tang</p> <div class="d-flex mb-3"> <div class="p-3 rounded" style="background-color: #e8f3ff;"> <strong>Summary:</strong> The stereo matching algorithm proposed in this study is based on HSV color space and improved census transform, aiming to reduce calculation amount, enhance robustness, and improve matching accuracy by addressing the impact of noise and amplitude distortion. Experimental results demonstrate that the algorithm effectively suppresses noise interference and achieves higher matching accuracy. </div> </div> <div class="d-flex justify-content-between"> <p class="font-weight-bold"> <span class="text-primary">MATHEMATICAL PROBLEMS IN ENGINEERING</span> (2021) </p> <div class="flex-shrink-0"> <a class="btn btn-outline-primary btn-sm" href="https://www.peeref.com/works/22677831/add-to-collection" target="_blank"> <strong>Add to Collection</strong> </a> </div> </div> </div> <div class="my-4 border-bottom"> <div> <span class="d-inline-block badge badge-blue"> Article </span> <span class="d-inline-block badge badge-cyan"> Engineering, Electrical &amp; Electronic </span> </div> <h4> <a href="https://www.peeref.com/works/36421591" class="text-dark hover-underline">Unsupervised Person Re-Identification Based on Measurement Axis</a> </h4> <p class="text-ellipsis-2">Jiahan Li, Deqiang Cheng, Ruihang Liu, Qiqi Kou, Kai Zhao</p> <div class="d-flex mb-3"> <div class="p-3 rounded" style="background-color: #e8f3ff;"> <strong>Summary:</strong> The main focus of unsupervised person re-identification is to cluster unlabeled samples in the target domain, with a particular emphasis on mining deep semantic information and improving the combination of source and target domains. By designing a measurement axis component and implementing a new loss function, the study significantly enhances the network&#039;s migration ability, resulting in superior accuracy compared to existing techniques in experimental results across two person re-identification domains. </div> </div> <div class="d-flex justify-content-between"> <p class="font-weight-bold"> <span class="text-primary">IEEE SIGNAL PROCESSING LETTERS</span> (2021) </p> <div class="flex-shrink-0"> <a class="btn btn-outline-primary btn-sm" href="https://www.peeref.com/works/36421591/add-to-collection" target="_blank"> <strong>Add to Collection</strong> </a> </div> </div> </div> <div class="my-4 border-bottom"> <div> <span class="d-inline-block badge badge-blue"> Article </span> <span class="d-inline-block badge badge-cyan"> Optics </span> </div> <h4> <a href="https://www.peeref.com/works/19839453" class="text-dark hover-underline">Content-guided deep residual network for single image super-resolution</a> </h4> <p class="text-ellipsis-2">Liangliang Chen, Qiqi Kou, Deqiang Cheng, Jie Yao</p> <div class="d-flex justify-content-between"> <p class="font-weight-bold"> <span class="text-primary">OPTIK</span> (2020) </p> <div class="flex-shrink-0"> <a class="btn btn-outline-primary btn-sm" href="https://www.peeref.com/works/19839453/add-to-collection" target="_blank"> <strong>Add to Collection</strong> </a> </div> </div> </div> <div class="my-4 "> <div> <span class="d-inline-block badge badge-blue"> Article </span> <span class="d-inline-block badge badge-cyan"> Computer Science, Information Systems </span> </div> <h4> <a href="https://www.peeref.com/works/38363901" class="text-dark hover-underline">Adaptive Diagonal Total-Variation Generative Adversarial Network for Super-Resolution Imaging</a> </h4> <p class="text-ellipsis-2">Zhang San-You, Cheng De-Qiang, Jiang Dai-Hong, Kou Qi-Qi, Ma Lu</p> <div class="d-flex justify-content-between"> <p class="font-weight-bold"> <span class="text-primary">IEEE ACCESS</span> (2020) </p> <div class="flex-shrink-0"> <a class="btn btn-outline-primary btn-sm" href="https://www.peeref.com/works/38363901/add-to-collection" target="_blank"> <strong>Add to Collection</strong> </a> </div> </div> </div> </div> <div id="articles_from_journal" class="tab-pane "> <div class="my-4 border-bottom"> <div> <span class="d-inline-block badge badge-blue"> Article </span> <span class="d-inline-block badge badge-cyan"> Engineering, Electrical &amp; Electronic </span> </div> <h4> <a href="https://www.peeref.com/works/83021203" class="text-dark hover-underline">Small Sample Image Segmentation by Coupling Convolutions and Transformers</a> </h4> <p class="text-ellipsis-2">Hao Qi, Huiyu Zhou, Junyu Dong, Xinghui Dong</p> <div class="d-flex mb-3"> <div class="p-3 rounded" style="background-color: #e8f3ff;"> <strong>Summary:</strong> This study proposes a dual-stream small sample image segmentation network, ICCT-UNet, which parallelizes a CNN stream and a Transformer stream, exchanges features within each block, and finally uses a residual fusion module to obtain the overall segmentation. Experiments show that this network performs well on medical and defect images, outperforming or at least comparable to similar products. </div> </div> <div class="d-flex justify-content-between"> <p class="font-weight-bold"> <span class="text-primary">IEEE TRANSACTIONS ON CIRCUITS AND SYSTEMS FOR VIDEO TECHNOLOGY</span> (2024) </p> <div class="flex-shrink-0"> <a class="btn btn-outline-primary btn-sm" href="https://www.peeref.com/works/83021203/add-to-collection" target="_blank"> <strong>Add to Collection</strong> </a> </div> </div> </div> <div class="my-4 border-bottom"> <div> <span class="d-inline-block badge badge-blue"> Article </span> <span class="d-inline-block badge badge-cyan"> Engineering, Electrical &amp; Electronic </span> </div> <h4> <a href="https://www.peeref.com/works/83021004" class="text-dark hover-underline">Learning to Search a Lightweight Generalized Network for Medical Image Fusion</a> </h4> <p class="text-ellipsis-2">Pan Mu, Guanyao Wu, Jinyuan Liu, Yuduo Zhang, Xin Fan, Risheng Liu</p> <div class="d-flex mb-3"> <div class="p-3 rounded" style="background-color: #e8f3ff;"> <strong>Summary:</strong> This study proposes an Auto-searching Light-weighted Multi-source Fusion network, ALMFnet, for medical image fusion. The network combines software and hardware knowledge, has a generalized model, can extract and refine multi-source features, and introduces hardware constraints and segmentation masks to reduce model complexity and preserve important details. Experimental results show that the model outperforms previous methods in terms of quantitative scores and model complexity. </div> </div> <div class="d-flex justify-content-between"> <p class="font-weight-bold"> <span class="text-primary">IEEE TRANSACTIONS ON CIRCUITS AND SYSTEMS FOR VIDEO TECHNOLOGY</span> (2024) </p> <div class="flex-shrink-0"> <a class="btn btn-outline-primary btn-sm" href="https://www.peeref.com/works/83021004/add-to-collection" target="_blank"> <strong>Add to Collection</strong> </a> </div> </div> </div> <div class="my-4 border-bottom"> <div> <span class="d-inline-block badge badge-blue"> Article </span> <span class="d-inline-block badge badge-cyan"> Engineering, Electrical &amp; Electronic </span> </div> <h4> <a href="https://www.peeref.com/works/83008033" class="text-dark hover-underline">THISNet: Tooth Instance Segmentation on 3D Dental Models via Highlighting Tooth Regions</a> </h4> <p class="text-ellipsis-2">Pengcheng Li, Chenqiang Gao, Fangcen Liu, Deyu Meng, Yan Yan</p> <div class="d-flex mb-3"> <div class="p-3 rounded" style="background-color: #e8f3ff;"> <strong>Summary:</strong> THISNet is a novel method for automatic tooth instance segmentation on 3D dental models. It highlights tooth regions to improve accuracy and integrates global context information. The method uses end-to-end learning, which is simpler and more efficient than multi-stage training. Experimental results show that THISNet has great potential in various dental clinical applications. </div> </div> <div class="d-flex justify-content-between"> <p class="font-weight-bold"> <span class="text-primary">IEEE TRANSACTIONS ON CIRCUITS AND SYSTEMS FOR VIDEO TECHNOLOGY</span> (2024) </p> <div class="flex-shrink-0"> <a class="btn btn-outline-primary btn-sm" href="https://www.peeref.com/works/83008033/add-to-collection" target="_blank"> <strong>Add to Collection</strong> </a> </div> </div> </div> <div class="my-4 border-bottom"> <div> <span class="d-inline-block badge badge-blue"> Article </span> <span class="d-inline-block badge badge-cyan"> Engineering, Electrical &amp; Electronic </span> </div> <h4> <a href="https://www.peeref.com/works/83021202" class="text-dark hover-underline">A Versatile Multimodal Learning Framework for Zero-Shot Emotion Recognition</a> </h4> <p class="text-ellipsis-2">Fan Qi, Huaiwen Zhang, Xiaoshan Yang, Changsheng Xu</p> <div class="d-flex mb-3"> <div class="p-3 rounded" style="background-color: #e8f3ff;"> <strong>Summary:</strong> This study proposes a versatile zero-shot multi-modal emotion recognition framework that refines emotion label embeddings, decomposes modal features, fuses multi-modal representations, and exploits emotion-guided decoders to improve the recognition of unseen emotion labels. It achieves superior performance on multiple datasets. </div> </div> <div class="d-flex justify-content-between"> <p class="font-weight-bold"> <span class="text-primary">IEEE TRANSACTIONS ON CIRCUITS AND SYSTEMS FOR VIDEO TECHNOLOGY</span> (2024) </p> <div class="flex-shrink-0"> <a class="btn btn-outline-primary btn-sm" href="https://www.peeref.com/works/83021202/add-to-collection" target="_blank"> <strong>Add to Collection</strong> </a> </div> </div> </div> <div class="my-4 border-bottom"> <div> <span class="d-inline-block badge badge-blue"> Article </span> <span class="d-inline-block badge badge-cyan"> Engineering, Electrical &amp; Electronic </span> </div> <h4> <a href="https://www.peeref.com/works/83007628" class="text-dark hover-underline">Latency-Aware Neural Architecture Performance Predictor With Query-to-Tier Technique</a> </h4> <p class="text-ellipsis-2">Bicheng Guo, Lilin Xu, Tao Chen, Peng Ye, Shibo He, Haoyu Liu, Jiming Chen</p> <div class="d-flex mb-3"> <div class="p-3 rounded" style="background-color: #e8f3ff;"> <strong>Summary:</strong> Neural Architecture Search (NAS) is a powerful tool for automating the design of effective image and video processing DNNs. The ranking of accuracy has been advocated for designing an efficient performance predictor for NAS. The previous contrastive method solves the ranking problem by comparing pairs of architectures and predicting their relative performance. However, it only focuses on the ranking between the two involved architectures and neglects the overall quality distribution of the search space, which may lead to generalization issues. Instead, we propose to let the performance predictor focus on the global quality level of a specific architecture and automatically learn the tier embeddings of the entire search space with learnable queries. The proposed method, dubbed Neural Architecture Ranker with Query-to-Tier technique (NARQ2T), globally explores the quality tiers of the search space and classifies each individual into the tier they belong to. Thus, the predictor gains knowledge of the performance distribution of the search space, which helps to generalize its ranking ability to datasets more easily. Due to the encoder-decoder design, our method is able to predict the latency of the searched model without sacrificing performance prediction. At the same time, the global quality distribution facilitates the search phase by directly sampling candidates according to the statistics of quality tiers, without the need to train a search algorithm, such as Reinforcement Learning or Evolutionary Algorithm, thereby simplifying the NAS pipeline and saving computational overheads. The proposed NARQ2T achieves state-of-the-art performance on two widely used datasets for NAS research. In addition, extensive experiments have validated the effectiveness of the designed method. </div> </div> <div class="d-flex justify-content-between"> <p class="font-weight-bold"> <span class="text-primary">IEEE TRANSACTIONS ON CIRCUITS AND SYSTEMS FOR VIDEO TECHNOLOGY</span> (2024) </p> <div class="flex-shrink-0"> <a class="btn btn-outline-primary btn-sm" href="https://www.peeref.com/works/83007628/add-to-collection" target="_blank"> <strong>Add to Collection</strong> </a> </div> </div> </div> <div class="my-4 border-bottom"> <div> <span class="d-inline-block badge badge-blue"> Article </span> <span class="d-inline-block badge badge-cyan"> Engineering, Electrical &amp; Electronic </span> </div> <h4> <a href="https://www.peeref.com/works/83107924" class="text-dark hover-underline">Patch Similarity Self-Knowledge Distillation for Cross-View Geo-Localization</a> </h4> <p class="text-ellipsis-2">Songlian Li, Min Hu, Xiongwu Xiao, Zhigang Tu</p> <div class="d-flex mb-3"> <div class="p-3 rounded" style="background-color: #e8f3ff;"> <strong>Summary:</strong> Cross-view geo-localization is challenging due to differences in scene context and object scale. Existing methods underestimate these differences. This paper proposes a self-distillation framework, PaSS-KD, that provides local and multi-scale knowledge for cross-view image feature extraction and representation. The framework can explore scene context changes and sense object scale changes. Experiments show that the method improves image retrieval performance and fine-grained localization accuracy. </div> </div> <div class="d-flex justify-content-between"> <p class="font-weight-bold"> <span class="text-primary">IEEE TRANSACTIONS ON CIRCUITS AND SYSTEMS FOR VIDEO TECHNOLOGY</span> (2024) </p> <div class="flex-shrink-0"> <a class="btn btn-outline-primary btn-sm" href="https://www.peeref.com/works/83107924/add-to-collection" target="_blank"> <strong>Add to Collection</strong> </a> </div> </div> </div> <div class="my-4 border-bottom"> <div> <span class="d-inline-block badge badge-blue"> Article </span> <span class="d-inline-block badge badge-cyan"> Engineering, Electrical &amp; Electronic </span> </div> <h4> <a href="https://www.peeref.com/works/83020838" class="text-dark hover-underline">Absolute Pose Estimation With a Known Direction by Motion Decoupling</a> </h4> <p class="text-ellipsis-2">Yinlong Liu, Guang Chen, Alois Knoll</p> <div class="d-flex mb-3"> <div class="p-3 rounded" style="background-color: #e8f3ff;"> <strong>Summary:</strong> This paper presents an absolute pose estimation method based on motion decoupling, which utilizes the known prior gravity direction to enhance robustness. By exploring the geometric constraints, the rigid pose is decoupled, and corresponding algorithms are proposed. To suppress outliers, branch-and-bound algorithm and global voting are employed. The method can also solve the problem without knowing the 2D-3D correspondences. Experiments demonstrate its feasibility and superiority. </div> </div> <div class="d-flex justify-content-between"> <p class="font-weight-bold"> <span class="text-primary">IEEE TRANSACTIONS ON CIRCUITS AND SYSTEMS FOR VIDEO TECHNOLOGY</span> (2024) </p> <div class="flex-shrink-0"> <a class="btn btn-outline-primary btn-sm" href="https://www.peeref.com/works/83020838/add-to-collection" target="_blank"> <strong>Add to Collection</strong> </a> </div> </div> </div> <div class="my-4 border-bottom"> <div> <span class="d-inline-block badge badge-blue"> Article </span> <span class="d-inline-block badge badge-cyan"> Engineering, Electrical &amp; Electronic </span> </div> <h4> <a href="https://www.peeref.com/works/83020031" class="text-dark hover-underline">Learning a Single Convolutional Layer Model for Low Light Image Enhancement</a> </h4> <p class="text-ellipsis-2">Yuantong Zhang, Baoxin Teng, Daiqin Yang, Zhenzhong Chen, Haichuan Ma, Gang Li, Wenpeng Ding</p> <div class="d-flex mb-3"> <div class="p-3 rounded" style="background-color: #e8f3ff;"> <strong>Summary:</strong> This paper proposes a Single Convolutional Layer Model (SCLM) for low-light image enhancement, which achieves global low-light enhancement through structural re-parameterization technology and introduces a local adaptation module to address the issue of varying exposure levels in different image regions. Experiments show that this method outperforms other methods, and has fewer parameters and lower inference complexity. The code will be made public. </div> </div> <div class="d-flex justify-content-between"> <p class="font-weight-bold"> <span class="text-primary">IEEE TRANSACTIONS ON CIRCUITS AND SYSTEMS FOR VIDEO TECHNOLOGY</span> (2024) </p> <div class="flex-shrink-0"> <a class="btn btn-outline-primary btn-sm" href="https://www.peeref.com/works/83020031/add-to-collection" target="_blank"> <strong>Add to Collection</strong> </a> </div> </div> </div> <div class="my-4 border-bottom"> <div> <span class="d-inline-block badge badge-blue"> Article </span> <span class="d-inline-block badge badge-cyan"> Engineering, Electrical &amp; Electronic </span> </div> <h4> <a href="https://www.peeref.com/works/83007508" class="text-dark hover-underline">Weakly-Supervised Video Anomaly Detection With Snippet Anomalous Attention</a> </h4> <p class="text-ellipsis-2">Yidan Fan, Yongxin Yu, Wenhuan Lu, Yahong Han</p> <div class="d-flex mb-3"> <div class="p-3 rounded" style="background-color: #e8f3ff;"> <strong>Summary:</strong> This paper focuses on the detection of abnormal events in untrimmed videos and proposes an anomalous attention mechanism to address the lack of frame-wise labels in weakly-supervised video anomaly detection. The mechanism can generate snippet-level anomalous attention without the need for pseudo-label supervision, and improve the detection effect and anomaly localization accuracy by learning different regions of the video through a multi-branch supervision module. Experiments show that the method is effective on benchmark datasets. </div> </div> <div class="d-flex justify-content-between"> <p class="font-weight-bold"> <span class="text-primary">IEEE TRANSACTIONS ON CIRCUITS AND SYSTEMS FOR VIDEO TECHNOLOGY</span> (2024) </p> <div class="flex-shrink-0"> <a class="btn btn-outline-primary btn-sm" href="https://www.peeref.com/works/83007508/add-to-collection" target="_blank"> <strong>Add to Collection</strong> </a> </div> </div> </div> <div class="my-4 border-bottom"> <div> <span class="d-inline-block badge badge-blue"> Article </span> <span class="d-inline-block badge badge-cyan"> Engineering, Electrical &amp; Electronic </span> </div> <h4> <a href="https://www.peeref.com/works/83008030" class="text-dark hover-underline">EUICN: An Efficient Underwater Image Compression Network</a> </h4> <p class="text-ellipsis-2">Mengyao Li, Liquan Shen, Xia Hua, Zhaoyi Tian</p> <div class="d-flex mb-3"> <div class="p-3 rounded" style="background-color: #e8f3ff;"> <strong>Summary:</strong> This paper proposes an efficient underwater image compression network (EUICN) that exploits the underwater imaging transmission map (T-map) and the high correlation of underwater images to achieve more efficient UWI compression through T-map-based quantization and mixture entropy coding. Experimental results show that EUICN outperforms existing methods in terms of PSNR and MS-SSIM. </div> </div> <div class="d-flex justify-content-between"> <p class="font-weight-bold"> <span class="text-primary">IEEE TRANSACTIONS ON CIRCUITS AND SYSTEMS FOR VIDEO TECHNOLOGY</span> (2024) </p> <div class="flex-shrink-0"> <a class="btn btn-outline-primary btn-sm" href="https://www.peeref.com/works/83008030/add-to-collection" target="_blank"> <strong>Add to Collection</strong> </a> </div> </div> </div> <div class="my-4 border-bottom"> <div> <span class="d-inline-block badge badge-blue"> Article </span> <span class="d-inline-block badge badge-cyan"> Engineering, Electrical &amp; Electronic </span> </div> <h4> <a href="https://www.peeref.com/works/83021602" class="text-dark hover-underline">Cross-Level Attentive Feature Aggregation for Change Detection</a> </h4> <p class="text-ellipsis-2">Guangxing Wang, Gong Cheng, Peicheng Zhou, Junwei Han</p> <div class="d-flex mb-3"> <div class="p-3 rounded" style="background-color: #e8f3ff;"> <strong>Summary:</strong> This article focuses on change detection in optical image pairs and proposes a novel approach with two attentive feature aggregation schemes to handle cross-level features. It emphasizes building strong and rich pyramid feature representations for comprehensive matching and differencing. The method also uses progressive fusion of difference feature pyramids and an upsampling head to achieve stable feature fusion for specifying change regions. Experiments show that the approach achieves state-of-the-art performance. </div> </div> <div class="d-flex justify-content-between"> <p class="font-weight-bold"> <span class="text-primary">IEEE TRANSACTIONS ON CIRCUITS AND SYSTEMS FOR VIDEO TECHNOLOGY</span> (2024) </p> <div class="flex-shrink-0"> <a class="btn btn-outline-primary btn-sm" href="https://www.peeref.com/works/83021602/add-to-collection" target="_blank"> <strong>Add to Collection</strong> </a> </div> </div> </div> <div class="my-4 border-bottom"> <div> <span class="d-inline-block badge badge-blue"> Article </span> <span class="d-inline-block badge badge-cyan"> Engineering, Electrical &amp; Electronic </span> </div> <h4> <a href="https://www.peeref.com/works/83020790" class="text-dark hover-underline">Improving Image-Text Matching With Bidirectional Consistency of Cross-Modal Alignment</a> </h4> <p class="text-ellipsis-2">Zhe Li, Lei Zhang, Kun Zhang, Yongdong Zhang, Zhendong Mao</p> <div class="d-flex mb-3"> <div class="p-3 rounded" style="background-color: #e8f3ff;"> <strong>Summary:</strong> In this paper, we propose a novel bidirectional consistency network (BOOM) for cross-modal alignment, which achieves more accurate cross-modal semantic alignments by imposing explicit consistency constraints in both directions. Based on three aspects reflected by alignment consistency, we design a novel systematic multi-granularity consistency constraint, which corrects the bidirectional cross-modal alignment between words and regions from three different perspectives: maximum, distribution, and order. Extensive experiments on two benchmark datasets demonstrate that BOOM achieves state-of-the-art performance. </div> </div> <div class="d-flex justify-content-between"> <p class="font-weight-bold"> <span class="text-primary">IEEE TRANSACTIONS ON CIRCUITS AND SYSTEMS FOR VIDEO TECHNOLOGY</span> (2024) </p> <div class="flex-shrink-0"> <a class="btn btn-outline-primary btn-sm" href="https://www.peeref.com/works/83020790/add-to-collection" target="_blank"> <strong>Add to Collection</strong> </a> </div> </div> </div> <div class="my-4 border-bottom"> <div> <span class="d-inline-block badge badge-blue"> Article </span> <span class="d-inline-block badge badge-cyan"> Engineering, Electrical &amp; Electronic </span> </div> <h4> <a href="https://www.peeref.com/works/83007559" class="text-dark hover-underline">Online Multi-Scale Classification and Global Feature Modulation for Robust Visual Tracking</a> </h4> <p class="text-ellipsis-2">Qi Gao, Mingfeng Yin, Xiang Wu, Di Liu, Yuming Bo</p> <div class="d-flex mb-3"> <div class="p-3 rounded" style="background-color: #e8f3ff;"> <strong>Summary:</strong> This paper presents a novel online multi-scale classification and global feature modulation method, ATOM+, for robust visual tracking. The method enhances target features, generates reliable matching responses, and fuses spatial context information to improve tracking performance, which shows excellent performance in complex scenes and is real-time. </div> </div> <div class="d-flex justify-content-between"> <p class="font-weight-bold"> <span class="text-primary">IEEE TRANSACTIONS ON CIRCUITS AND SYSTEMS FOR VIDEO TECHNOLOGY</span> (2024) </p> <div class="flex-shrink-0"> <a class="btn btn-outline-primary btn-sm" href="https://www.peeref.com/works/83007559/add-to-collection" target="_blank"> <strong>Add to Collection</strong> </a> </div> </div> </div> <div class="my-4 border-bottom"> <div> <span class="d-inline-block badge badge-blue"> Article </span> <span class="d-inline-block badge badge-cyan"> Engineering, Electrical &amp; Electronic </span> </div> <h4> <a href="https://www.peeref.com/works/83021606" class="text-dark hover-underline">Satellite Video Object Tracking Based on Location Prompts</a> </h4> <p class="text-ellipsis-2">Jiahao Wang, Fang Liu, Licheng Jiao, Yingjia Gao, Hao Wang, Lingling Li, Puhua Chen, Xu Liu, Shuo Li</p> <div class="d-flex mb-3"> <div class="p-3 rounded" style="background-color: #e8f3ff;"> <strong>Summary:</strong> This paper proposes a method that leverages location prompts and refines the feature extractor and bounding box refinement module to address the challenges of object tracking in satellite videos. The method also integrates motion features to handle illumination variations. Experiments show that SVLPNet has great potential in satellite video object tracking, and the source code and raw results will be released online. </div> </div> <div class="d-flex justify-content-between"> <p class="font-weight-bold"> <span class="text-primary">IEEE TRANSACTIONS ON CIRCUITS AND SYSTEMS FOR VIDEO TECHNOLOGY</span> (2024) </p> <div class="flex-shrink-0"> <a class="btn btn-outline-primary btn-sm" href="https://www.peeref.com/works/83021606/add-to-collection" target="_blank"> <strong>Add to Collection</strong> </a> </div> </div> </div> <div class="my-4 "> <div> <span class="d-inline-block badge badge-blue"> Article </span> <span class="d-inline-block badge badge-cyan"> Engineering, Electrical &amp; Electronic </span> </div> <h4> <a href="https://www.peeref.com/works/83008021" class="text-dark hover-underline">CKDH: CLIP-Based Knowledge Distillation Hashing for Cross-Modal Retrieval</a> </h4> <p class="text-ellipsis-2">Jiaxing Li, Wai Keung Wong, Lin Jiang, Xiaozhao Fang, Shengli Xie, Yong Xu</p> <div class="d-flex mb-3"> <div class="p-3 rounded" style="background-color: #e8f3ff;"> <strong>Summary:</strong> Deep hashing-based cross-modal retrieval has some problems, such as inability to fully capture semantic relevance and coexistent information, the need for a more comprehensive similarity measure, and the lack of a scalable deployment framework. To solve these problems, a CLIP-based knowledge distillation hashing method is proposed, which fine-tunes CLIP to extract visual features, uses a graph attention network to enhance text features, introduces a more comprehensive similarity measure to supervise the training of the student model, and extracts deep features and generates hash codes through a lightweight network. Experimental results show that this method is superior to some state-of-the-art methods. </div> </div> <div class="d-flex justify-content-between"> <p class="font-weight-bold"> <span class="text-primary">IEEE TRANSACTIONS ON CIRCUITS AND SYSTEMS FOR VIDEO TECHNOLOGY</span> (2024) </p> <div class="flex-shrink-0"> <a class="btn btn-outline-primary btn-sm" href="https://www.peeref.com/works/83008021/add-to-collection" target="_blank"> <strong>Add to Collection</strong> </a> </div> </div> </div> </div> </div> </div> </div> </div> </div> <div class="modal fade" id="export-citation" tabindex="-1"> <div class="modal-dialog"> <div class="modal-content"> <div class="modal-header"> <button type="button" class="close" data-dismiss="modal"><span>&times;</span></button> <h4 class="modal-title">Export Citation <b class="text-primary"></b></h4> </div> <div class="modal-body"> <div class="my-3 px-4 f16"> <form action="https://www.peeref.com/works/citation/download" method="GET" target="_blank"> <div class="radio"> <label> <input type="radio" name="fileType" value="PlainText" checked> <strong>Plain Text</strong> </label> </div> <div class="radio"> <label> <input type="radio" name="fileType" value="RIS"> <strong>RIS</strong> <span>- Export format compatible with most major reference management software such as Endnote and Zotero</span> </label> </div> <div class="radio"> <label> <input type="radio" name="fileType" value="BibTeX"> <strong>BibTeX</strong> <span>- Export format compatible with LaTeX</span> </label> </div> <input type="hidden" name="articleId" value="26530573"> <div class="mt-lg-4 text-center"> <button class="citation-download-btn btn btn-primary"> Export </button> </div> </form> </div> </div> </div> </div> </div> <div class="modal fade" id="share-paper" tabindex="-1"> <div class="modal-dialog"> <div class="modal-content"> <div class="modal-header"> <button type="button" class="close" data-dismiss="modal"><span>&times;</span></button> <h4 class="modal-title">Share Paper <b class="text-primary"></b></h4> </div> <div class="modal-body"> <div class="my-4"> <div class="social-share"> <a class="social-share-item email" href="mailto:?subject=Light-Guided and Cross-Fusion U-Net for Anti-Illumination Image Super-Resolution&amp;body=https://www.peeref.com/works/26530573" target="_blank"> <img src="https://peeref-open.s3.amazonaws.com/images/social-share-email.svg" height="16"> </a> <a class="social-share-item linkedin" href="https://www.linkedin.com/shareArticle/?url=https://www.peeref.com/works/26530573" target="_blank"> <img src="https://peeref-open.s3.amazonaws.com/images/social-share-linkedin.svg" height="16"> </a> <a class="social-share-item twitter" href="https://twitter.com/share?url=https://www.peeref.com/works/26530573" target="_blank"> <img src="https://peeref-open.s3.amazonaws.com/images/social-share-twitter.svg" height="16"> </a> <a class="social-share-item wechat" href="javascipt:;" onclick="window.open('https://api.qrserver.com/v1/create-qr-code/?size=154x154&data=https://www.peeref.com/works/26530573', 'newwindow', 'height=300, width=300')"> <img src="https://peeref-open.s3.amazonaws.com/images/social-share-wechat.svg" height="16"> </a> <a class="social-share-item facebook" href="https://www.facebook.com/sharer/sharer.php?u=https://www.peeref.com/works/26530573" target="_blank"> <img src="https://peeref-open.s3.amazonaws.com/images/social-share-facebook.svg" height="16"> </a> </div> </div> </div> </div> </div> </div> <back-top-button></back-top-button> </main> <div class="container"> <div class="footer"> <div class="col-md-10"> <div class="d-flex flex-wrap" style="font-size: 15px;"> <a href="https://www.peeref.com/webinars" class="mx-3 px-2"> Webinars </a> <a href="https://www.peeref.com/posters" class="mx-3 px-2"> Posters </a> <a href="https://www.peeref.com/questions" class="mx-3 px-2"> Questions </a> <a href="https://www.peeref.com/hubs" class="mx-3 px-2"> Hubs </a> <a href="https://www.peeref.com/funding" class="mx-3 px-2"> Funding </a> <a href="https://www.peeref.com/journals" class="mx-3 px-2"> Journals </a> <a href="https://www.peeref.com/works" class="mx-3 px-2"> Papers </a> <a href="https://www.peeref.com/connect" class="mx-3 px-2"> Connect </a> <a href="https://www.peeref.com/e-collections" class="mx-3 px-2"> Collections </a> <a href="https://www.peeref.com/reviewer" class="mx-3 px-2"> Reviewers </a> <a href="https://www.peeref.com/about" class="mx-3 px-2"> About Us </a> <a href="https://www.peeref.com/about/faq" class="mx-3 px-2"> FAQs </a> <a href="https://www.peeref.com/mobile-app" class="mx-3 px-2"> Mobile App </a> <a href="https://www.peeref.com/about/privacy" class="mx-3 px-2"> Privacy Policy </a> <a href="https://www.peeref.com/about/terms-of-use" class="mx-3 px-2"> Terms of Use </a> </div> </div> <div class="col-md-2"> <div class="d-flex justify-content-end"> <div class="sns"> <a href="https://www.facebook.com/Peeref-113216326987528" target="_blank"> <i class="ivu-icon ivu-icon-logo-facebook"></i> </a> <a href="https://twitter.com/Peeref1" target="_blank"> <i class="ivu-icon ivu-icon-logo-twitter"></i> </a> </div> </div> </div> </div> <div class="copyright"> © Peeref 2019-2024. All rights reserved. </div> </div> </div> <script src="https://peeref-open.s3.amazonaws.com/js/manifest.e1ceb1810ee9bc1c1c1e.js"></script> <script src="https://peeref-open.s3.amazonaws.com/js/vendor.2773c0dd7b2f0df4f857.js"></script> <script src="https://peeref-open.s3.amazonaws.com/js/app.38bf6fe79ed31ff5692d.js"></script> <script> $(function() { $('#reading-btn').click(function() { const navHeight = $("#article-sticky-navbar").height(); const readingAreaTop = $("#collection").offset().top; const readingAreaHeight = readingAreaTop - navHeight - 30; $("html, body").animate( { scrollTop: readingAreaHeight }, 400 ); return false; }) }); </script> <script src="https://peeref-open.s3.amazonaws.com/vendor/MathJax-274/MathJax.js?config=default" async></script> <script async src="https://www.googletagmanager.com/gtag/js?id=UA-152048790-1"></script> <script> window.dataLayer = window.dataLayer || []; function gtag(){dataLayer.push(arguments);} gtag('js', new Date()); gtag('config', 'UA-152048790-1'); </script> <script type="text/javascript" id="cookiebanner" src="https://peeref-open.s3.amazonaws.com/js/cookiebanner.min.js" data-position="bottom" data-cookie="_uc_" data-expires="Wed, 31 Dec 2025 00:00:00 GMT" data-font-size="15px" data-close-text="Accept" data-close-precedes="false" data-close-style="border-radius: 5px; margin: 3px 15px; padding: 3px 10px; display: inline-block; font-weight: bold; background: rgb(245, 207, 71); color: rgb(16, 75, 125);" data-linkmsg="" data-moreinfo-decoration="underline" data-moreinfo="https://www.peeref.com/about/privacy" data-bg="rgb(16, 75, 125)" data-link="rgb(255, 255, 255)" data-message="Peeref uses cookies to improve your experience. Please read our &lt;a&gt;Privacy Policy&lt;/a&gt; for more details."> </script> </body> </html>

Pages: 1 2 3 4 5 6 7 8 9 10