CINXE.COM

Deep Residual Learning for Image Recognition: A Survey

<!DOCTYPE html> <html lang="en" xmlns:og="http://ogp.me/ns#" xmlns:fb="https://www.facebook.com/2008/fbml"> <head> <meta charset="utf-8"> <meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=1"> <meta content="mdpi" name="sso-service" /> <meta content="width=device-width, initial-scale=1.0" name="viewport" /> <title>Deep Residual Learning for Image Recognition: A Survey</title><link rel="stylesheet" href="https://pub.mdpi-res.com/assets/css/font-awesome.min.css?eb190a3a77e5e1ee?1732286508"> <link rel="stylesheet" href="https://pub.mdpi-res.com/assets/css/jquery.multiselect.css?f56c135cbf4d1483?1732286508"> <link rel="stylesheet" href="https://pub.mdpi-res.com/assets/css/chosen.min.css?d7ca5ca9441ef9e1?1732286508"> <link rel="stylesheet" href="https://pub.mdpi-res.com/assets/css/main2.css?69b39374e6b554b7?1732286508"> <link rel="mask-icon" href="https://pub.mdpi-res.com/img/mask-icon-128.svg?c1c7eca266cd7013?1732286508" color="#4f5671"> <link rel="apple-touch-icon" sizes="180x180" href="https://pub.mdpi-res.com/icon/apple-touch-icon-180x180.png?1732286508"> <link rel="apple-touch-icon" sizes="152x152" href="https://pub.mdpi-res.com/icon/apple-touch-icon-152x152.png?1732286508"> <link rel="apple-touch-icon" sizes="144x144" href="https://pub.mdpi-res.com/icon/apple-touch-icon-144x144.png?1732286508"> <link rel="apple-touch-icon" sizes="120x120" href="https://pub.mdpi-res.com/icon/apple-touch-icon-120x120.png?1732286508"> <link rel="apple-touch-icon" sizes="114x114" href="https://pub.mdpi-res.com/icon/apple-touch-icon-114x114.png?1732286508"> <link rel="apple-touch-icon" sizes="76x76" href="https://pub.mdpi-res.com/icon/apple-touch-icon-76x76.png?1732286508"> <link rel="apple-touch-icon" sizes="72x72" href="https://pub.mdpi-res.com/icon/apple-touch-icon-72x72.png?1732286508"> <link rel="apple-touch-icon" sizes="57x57" href="https://pub.mdpi-res.com/icon/apple-touch-icon-57x57.png?1732286508"> <link rel="apple-touch-icon" href="https://pub.mdpi-res.com/icon/apple-touch-icon-57x57.png?1732286508"> <link rel="apple-touch-icon-precomposed" href="https://pub.mdpi-res.com/icon/apple-touch-icon-57x57.png?1732286508"> <link rel="manifest" href="/manifest.json"> <meta name="theme-color" content="#ffffff"> <meta name="application-name" content="&nbsp;"/> <link rel="apple-touch-startup-image" href="https://pub.mdpi-res.com/img/journals/applsci-logo-sq.png?8600e93ff98dbf14"> <link rel="apple-touch-icon" href="https://pub.mdpi-res.com/img/journals/applsci-logo-sq.png?8600e93ff98dbf14"> <meta name="msapplication-TileImage" content="https://pub.mdpi-res.com/img/journals/applsci-logo-sq.png?8600e93ff98dbf14"> <link rel="stylesheet" href="https://pub.mdpi-res.com/assets/css/jquery-ui-1.10.4.custom.min.css?80647d88647bf347?1732286508"> <link rel="stylesheet" href="https://pub.mdpi-res.com/assets/css/magnific-popup.min.css?04d343e036f8eecd?1732286508"> <link rel="stylesheet" href="https://pub.mdpi-res.com/assets/css/xml2html/article-html.css?230b005b39af4260?1732286508"> <style> h2, #abstract .related_suggestion_title { } .batch_articles a { color: #000; } a, .batch_articles .authors a, a:focus, a:hover, a:active, .batch_articles a:focus, .batch_articles a:hover, li.side-menu-li a { } span.label a { color: #fff; } #main-content a.title-link:hover, #main-content a.title-link:focus, #main-content div.generic-item a.title-link:hover, #main-content div.generic-item a.title-link:focus { } #main-content #middle-column .generic-item.article-item a.title-link:hover, #main-content #middle-column .generic-item.article-item a.title-link:focus { } .art-authors a.toEncode { color: #333; font-weight: 700; } #main-content #middle-column ul li::before { } .accordion-navigation.active a.accordion__title, .accordion-navigation.active a.accordion__title::after { } .accordion-navigation li:hover::before, .accordion-navigation li:hover a, .accordion-navigation li:focus a { } .relative-size-container .relative-size-image .relative-size { } .middle-column__help__fixed a:hover i, } input[type="checkbox"]:checked:after { } input[type="checkbox"]:not(:disabled):hover:before { } #main-content .bolded-text { } #main-content .hypothesis-count-container { } #main-content .hypothesis-count-container:before { } .full-size-menu ul li.menu-item .dropdown-wrapper { } .full-size-menu ul li.menu-item > a.open::after { } #title-story .title-story-orbit .orbit-caption { #background: url('/img/design/000000_background.png') !important; background: url('/img/design/ffffff_background.png') !important; color: rgb(51, 51, 51) !important; } #main-content .content__container__orbit { background-color: #000 !important; } #main-content .content__container__journal { color: #fff; } .html-article-menu .row span { } .html-article-menu .row span.active { } .accordion-navigation__journal .side-menu-li.active::before, .accordion-navigation__journal .side-menu-li.active a { color: rgba(74,74,127,0.75) !important; font-weight: 700; } .accordion-navigation__journal .side-menu-li:hover::before , .accordion-navigation__journal .side-menu-li:hover a { color: rgba(74,74,127,0.75) !important; } .side-menu-ul li.active a, .side-menu-ul li.active, .side-menu-ul li.active::before { color: rgba(74,74,127,0.75) !important; } .side-menu-ul li.active a { } .result-selected, .active-result.highlighted, .active-result:hover, .result-selected, .active-result.highlighted, .active-result:focus { } .search-container.search-container__default-scheme { } nav.tab-bar .open-small-search.active:after { } .search-container.search-container__default-scheme .custom-accordion-for-small-screen-link::after { color: #fff; } @media only screen and (max-width: 50em) { #main-content .content__container.journal-info { color: #fff; } #main-content .content__container.journal-info a { color: #fff; } } .button.button--color { } .button.button--color:hover, .button.button--color:focus { } .button.button--color-journal { position: relative; background-color: rgba(74,74,127,0.75); border-color: #fff; color: #fff !important; } .button.button--color-journal:hover::before { content: ''; position: absolute; top: 0; left: 0; height: 100%; width: 100%; background-color: #ffffff; opacity: 0.2; } .button.button--color-journal:visited, .button.button--color-journal:hover, .button.button--color-journal:focus { background-color: rgba(74,74,127,0.75); border-color: #fff; color: #fff !important; } .button.button--color path { } .button.button--color:hover path { fill: #fff; } #main-content #search-refinements .ui-slider-horizontal .ui-slider-range { } .breadcrumb__element:last-of-type a { } #main-header { } #full-size-menu .top-bar, #full-size-menu li.menu-item span.user-email { } .top-bar-section li:not(.has-form) a:not(.button) { } #full-size-menu li.menu-item .dropdown-wrapper li a:hover { } #full-size-menu li.menu-item a:hover, #full-size-menu li.menu.item a:focus, nav.tab-bar a:hover { } #full-size-menu li.menu.item a:active, #full-size-menu li.menu.item a.active { } #full-size-menu li.menu-item a.open-mega-menu.active, #full-size-menu li.menu-item div.mega-menu, a.open-mega-menu.active { } #full-size-menu li.menu-item div.mega-menu li, #full-size-menu li.menu-item div.mega-menu a { border-color: #9a9a9a; } div.type-section h2 { font-size: 20px; line-height: 26px; font-weight: 300; } div.type-section h3 { margin-left: 15px; margin-bottom: 0px; font-weight: 300; } .journal-tabs .tab-title.active a { } </style> <link rel="stylesheet" href="https://pub.mdpi-res.com/assets/css/slick.css?f38b2db10e01b157?1732286508"> <meta name="title" content="Deep Residual Learning for Image Recognition: A Survey"> <meta name="description" content="Deep Residual Networks have recently been shown to significantly improve the performance of neural networks trained on ImageNet, with results beating all previous methods on this dataset by large margins in the image classification task. However, the meaning of these impressive numbers and their implications for future research are not fully understood yet. In this survey, we will try to explain what Deep Residual Networks are, how they achieve their excellent results, and why their successful implementation in practice represents a significant advance over existing techniques. We also discuss some open questions related to residual learning as well as possible applications of Deep Residual Networks beyond ImageNet. Finally, we discuss some issues that still need to be resolved before deep residual learning can be applied on more complex problems." > <link rel="image_src" href="https://pub.mdpi-res.com/img/journals/applsci-logo.png?8600e93ff98dbf14" > <meta name="dc.title" content="Deep Residual Learning for Image Recognition: A Survey"> <meta name="dc.creator" content="Muhammad Shafiq"> <meta name="dc.creator" content="Zhaoquan Gu"> <meta name="dc.type" content="Review"> <meta name="dc.source" content="Applied Sciences 2022, Vol. 12, Page 8972"> <meta name="dc.date" content="2022-09-07"> <meta name ="dc.identifier" content="10.3390/app12188972"> <meta name="dc.publisher" content="Multidisciplinary Digital Publishing Institute"> <meta name="dc.rights" content="http://creativecommons.org/licenses/by/3.0/"> <meta name="dc.format" content="application/pdf" > <meta name="dc.language" content="en" > <meta name="dc.description" content="Deep Residual Networks have recently been shown to significantly improve the performance of neural networks trained on ImageNet, with results beating all previous methods on this dataset by large margins in the image classification task. However, the meaning of these impressive numbers and their implications for future research are not fully understood yet. In this survey, we will try to explain what Deep Residual Networks are, how they achieve their excellent results, and why their successful implementation in practice represents a significant advance over existing techniques. We also discuss some open questions related to residual learning as well as possible applications of Deep Residual Networks beyond ImageNet. Finally, we discuss some issues that still need to be resolved before deep residual learning can be applied on more complex problems." > <meta name="dc.subject" content="deep residual learning for image recognition" > <meta name="dc.subject" content="deep residual learning" > <meta name="dc.subject" content="image processing" > <meta name="dc.subject" content="image recognition" > <meta name ="prism.issn" content="2076-3417"> <meta name ="prism.publicationName" content="Applied Sciences"> <meta name ="prism.publicationDate" content="2022-09-07"> <meta name ="prism.volume" content="12"> <meta name ="prism.number" content="18"> <meta name ="prism.section" content="Review" > <meta name ="prism.startingPage" content="8972" > <meta name="citation_issn" content="2076-3417"> <meta name="citation_journal_title" content="Applied Sciences"> <meta name="citation_publisher" content="Multidisciplinary Digital Publishing Institute"> <meta name="citation_title" content="Deep Residual Learning for Image Recognition: A Survey"> <meta name="citation_publication_date" content="2022/1"> <meta name="citation_online_date" content="2022/09/07"> <meta name="citation_volume" content="12"> <meta name="citation_issue" content="18"> <meta name="citation_firstpage" content="8972"> <meta name="citation_author" content="Shafiq, Muhammad"> <meta name="citation_author" content="Gu, Zhaoquan"> <meta name="citation_doi" content="10.3390/app12188972"> <meta name="citation_id" content="mdpi-app12188972"> <meta name="citation_abstract_html_url" content="https://www.mdpi.com/2076-3417/12/18/8972"> <meta name="citation_pdf_url" content="https://www.mdpi.com/2076-3417/12/18/8972/pdf?version=1662547655"> <link rel="alternate" type="application/pdf" title="PDF Full-Text" href="https://www.mdpi.com/2076-3417/12/18/8972/pdf?version=1662547655"> <meta name="fulltext_pdf" content="https://www.mdpi.com/2076-3417/12/18/8972/pdf?version=1662547655"> <meta name="citation_fulltext_html_url" content="https://www.mdpi.com/2076-3417/12/18/8972/htm"> <link rel="alternate" type="text/html" title="HTML Full-Text" href="https://www.mdpi.com/2076-3417/12/18/8972/htm"> <meta name="fulltext_html" content="https://www.mdpi.com/2076-3417/12/18/8972/htm"> <link rel="alternate" type="text/xml" title="XML Full-Text" href="https://www.mdpi.com/2076-3417/12/18/8972/xml"> <meta name="fulltext_xml" content="https://www.mdpi.com/2076-3417/12/18/8972/xml"> <meta name="citation_xml_url" content="https://www.mdpi.com/2076-3417/12/18/8972/xml"> <meta name="twitter:card" content="summary" /> <meta name="twitter:site" content="@MDPIOpenAccess" /> <meta name="twitter:image" content="https://pub.mdpi-res.com/img/journals/applsci-logo-social.png?8600e93ff98dbf14" /> <meta property="fb:app_id" content="131189377574"/> <meta property="og:site_name" content="MDPI"/> <meta property="og:type" content="article"/> <meta property="og:url" content="https://www.mdpi.com/2076-3417/12/18/8972" /> <meta property="og:title" content="Deep Residual Learning for Image Recognition: A Survey" /> <meta property="og:description" content="Deep Residual Networks have recently been shown to significantly improve the performance of neural networks trained on ImageNet, with results beating all previous methods on this dataset by large margins in the image classification task. However, the meaning of these impressive numbers and their implications for future research are not fully understood yet. In this survey, we will try to explain what Deep Residual Networks are, how they achieve their excellent results, and why their successful implementation in practice represents a significant advance over existing techniques. We also discuss some open questions related to residual learning as well as possible applications of Deep Residual Networks beyond ImageNet. Finally, we discuss some issues that still need to be resolved before deep residual learning can be applied on more complex problems." /> <meta property="og:image" content="https://pub.mdpi-res.com/applsci/applsci-12-08972/article_deploy/html/images/applsci-12-08972-g001-550.jpg?1662547729" /> <link rel="alternate" type="application/rss+xml" title="MDPI Publishing - Latest articles" href="https://www.mdpi.com/rss"> <meta name="google-site-verification" content="PxTlsg7z2S00aHroktQd57fxygEjMiNHydKn3txhvwY"> <meta name="facebook-domain-verification" content="mcoq8dtq6sb2hf7z29j8w515jjoof7" /> <script id="Cookiebot" data-cfasync="false" src="https://consent.cookiebot.com/uc.js" data-cbid="51491ddd-fe7a-4425-ab39-69c78c55829f" type="text/javascript" async></script> <!--[if lt IE 9]> <script>var browserIe8 = true;</script> <link rel="stylesheet" href="https://pub.mdpi-res.com/assets/css/ie8foundationfix.css?50273beac949cbf0?1732286508"> <script src="//html5shiv.googlecode.com/svn/trunk/html5.js"></script> <script src="//cdnjs.cloudflare.com/ajax/libs/html5shiv/3.6.2/html5shiv.js"></script> <script src="//s3.amazonaws.com/nwapi/nwmatcher/nwmatcher-1.2.5-min.js"></script> <script src="//html5base.googlecode.com/svn-history/r38/trunk/js/selectivizr-1.0.3b.js"></script> <script src="//cdnjs.cloudflare.com/ajax/libs/respond.js/1.1.0/respond.min.js"></script> <script src="https://pub.mdpi-res.com/assets/js/ie8/ie8patch.js?9e1d3c689a0471df?1732286508"></script> <script src="https://pub.mdpi-res.com/assets/js/ie8/rem.min.js?94b62787dcd6d2f2?1732286508"></script> <![endif]--> <script type="text/plain" data-cookieconsent="statistics"> (function(w,d,s,l,i){w[l]=w[l]||[];w[l].push({'gtm.start': new Date().getTime(),event:'gtm.js'});var f=d.getElementsByTagName(s)[0], j=d.createElement(s),dl=l!='dataLayer'?'&l='+l:'';j.async=true;j.src= 'https://www.googletagmanager.com/gtm.js?id='+i+dl;f.parentNode.insertBefore(j,f); })(window,document,'script','dataLayer','GTM-WPK7SW5'); </script> <script type="text/plain" data-cookieconsent="statistics"> _linkedin_partner_id = "2846186"; window._linkedin_data_partner_ids = window._linkedin_data_partner_ids || []; window._linkedin_data_partner_ids.push(_linkedin_partner_id); </script><script type="text/javascript"> (function(){var s = document.getElementsByTagName("script")[0]; var b = document.createElement("script"); b.type = "text/javascript";b.async = true; b.src = "https://snap.licdn.com/li.lms-analytics/insight.min.js"; s.parentNode.insertBefore(b, s);})(); </script> <script type="text/plain" data-cookieconsent="statistics" data-cfasync="false" src="//script.crazyegg.com/pages/scripts/0116/4951.js" async="async" ></script> </head> <body> <div class="direction direction_right" id="small_right" style="border-right-width: 0px; padding:0;"> <i class="fa fa-caret-right fa-2x"></i> </div> <div class="big_direction direction_right" id="big_right" style="border-right-width: 0px;"> <div style="text-align: right;"> Next Article in Journal<br> <div><a href="/2076-3417/12/18/8975">Special Issue on Advanced Technologies in Electromagnetic Compatibility</a></div> Next Article in Special Issue<br> <div><a href="/2076-3417/12/18/9011">Simulation of Intellectual Property Management on Evolution Driving of Regional Economic Growth</a></div> </div> </div> <div class="direction" id="small_left" style="border-left-width: 0px"> <i class="fa fa-caret-left fa-2x"></i> </div> <div class="big_direction" id="big_left" style="border-left-width: 0px;"> <div> Previous Article in Journal<br> <div><a href="/2076-3417/12/18/8974">Deep Learning for In-Situ Layer Quality Monitoring during Laser-Based Directed Energy Deposition (LB-DED) Additive Manufacturing Process</a></div> Previous Article in Special Issue<br> <div><a href="/2076-3417/12/17/8829">An Unsupervised Depth-Estimation Model for Monocular Images Based on Perceptual Image Error Assessment</a></div> </div> </div> <div style="clear: both;"></div> <div id="menuModal" class="reveal-modal reveal-modal-new reveal-modal-menu" aria-hidden="true" data-reveal role="dialog"> <div class="menu-container"> <div class="UI_NavMenu"> <div class="content__container " > <div class="custom-accordion-for-small-screen-link " > <h2>Journals</h2> </div> <div class="target-item custom-accordion-for-small-screen-content show-for-medium-up"> <div class="menu-container__links"> <div style="width: 100%; float: left;"> <a href="/about/journals">Active Journals</a> <a href="/about/journalfinder">Find a Journal</a> <a href="/about/journals/proposal">Journal Proposal</a> <a href="/about/proceedings">Proceedings Series</a> </div> </div> </div> </div> <a href="/topics"> <h2>Topics</h2> </a> <div class="content__container " > <div class="custom-accordion-for-small-screen-link " > <h2>Information</h2> </div> <div class="target-item custom-accordion-for-small-screen-content show-for-medium-up"> <div class="menu-container__links"> <div style="width: 100%; max-width: 200px; float: left;"> <a href="/authors">For Authors</a> <a href="/reviewers">For Reviewers</a> <a href="/editors">For Editors</a> <a href="/librarians">For Librarians</a> <a href="/publishing_services">For Publishers</a> <a href="/societies">For Societies</a> <a href="/conference_organizers">For Conference Organizers</a> </div> <div style="width: 100%; max-width: 250px; float: left;"> <a href="/openaccess">Open Access Policy</a> <a href="/ioap">Institutional Open Access Program</a> <a href="/special_issues_guidelines">Special Issues Guidelines</a> <a href="/editorial_process">Editorial Process</a> <a href="/ethics">Research and Publication Ethics</a> <a href="/apc">Article Processing Charges</a> <a href="/awards">Awards</a> <a href="/testimonials">Testimonials</a> </div> </div> </div> </div> <a href="/authors/english"> <h2>Editing Services</h2> </a> <div class="content__container " > <div class="custom-accordion-for-small-screen-link " > <h2>Initiatives</h2> </div> <div class="target-item custom-accordion-for-small-screen-content show-for-medium-up"> <div class="menu-container__links"> <div style="width: 100%; float: left;"> <a href="https://sciforum.net" target="_blank" rel="noopener noreferrer">Sciforum</a> <a href="https://www.mdpi.com/books" target="_blank" rel="noopener noreferrer">MDPI Books</a> <a href="https://www.preprints.org" target="_blank" rel="noopener noreferrer">Preprints.org</a> <a href="https://www.scilit.net" target="_blank" rel="noopener noreferrer">Scilit</a> <a href="https://sciprofiles.com" target="_blank" rel="noopener noreferrer">SciProfiles</a> <a href="https://encyclopedia.pub" target="_blank" rel="noopener noreferrer">Encyclopedia</a> <a href="https://jams.pub" target="_blank" rel="noopener noreferrer">JAMS</a> <a href="/about/proceedings">Proceedings Series</a> </div> </div> </div> </div> <div class="content__container " > <div class="custom-accordion-for-small-screen-link " > <h2>About</h2> </div> <div class="target-item custom-accordion-for-small-screen-content show-for-medium-up"> <div class="menu-container__links"> <div style="width: 100%; float: left;"> <a href="/about">Overview</a> <a href="/about/contact">Contact</a> <a href="https://careers.mdpi.com" target="_blank" rel="noopener noreferrer">Careers</a> <a href="/about/announcements">News</a> <a href="/about/press">Press</a> <a href="http://blog.mdpi.com/" target="_blank" rel="noopener noreferrer">Blog</a> </div> </div> </div> </div> </div> <div class="menu-container__buttons"> <a class="button UA_SignInUpButton" href="/user/login">Sign In / Sign Up</a> </div> </div> </div> <div id="captchaModal" class="reveal-modal reveal-modal-new reveal-modal-new--small" data-reveal aria-label="Captcha" aria-hidden="true" role="dialog"></div> <div id="actionDisabledModal" class="reveal-modal" data-reveal aria-labelledby="actionDisableModalTitle" aria-hidden="true" role="dialog" style="width: 300px;"> <h2 id="actionDisableModalTitle">Notice</h2> <form action="/email/captcha" method="post" id="emailCaptchaForm"> <div class="row"> <div id="js-action-disabled-modal-text" class="small-12 columns"> </div> <div id="js-action-disabled-modal-submit" class="small-12 columns" style="margin-top: 10px; display: none;"> You can make submissions to other journals <a href="https://susy.mdpi.com/user/manuscripts/upload">here</a>. </div> </div> </form> <a class="close-reveal-modal" aria-label="Close"> <i class="material-icons">clear</i> </a> </div> <div id="rssNotificationModal" class="reveal-modal reveal-modal-new" data-reveal aria-labelledby="rssNotificationModalTitle" aria-hidden="true" role="dialog"> <div class="row"> <div class="small-12 columns"> <h2 id="rssNotificationModalTitle">Notice</h2> <p> You are accessing a machine-readable page. In order to be human-readable, please install an RSS reader. </p> </div> </div> <div class="row"> <div class="small-12 columns"> <a class="button button--color js-rss-notification-confirm">Continue</a> <a class="button button--grey" onclick="$(this).closest('.reveal-modal').find('.close-reveal-modal').click(); return false;">Cancel</a> </div> </div> <a class="close-reveal-modal" aria-label="Close"> <i class="material-icons">clear</i> </a> </div> <div id="drop-article-label-openaccess" class="f-dropdown medium" data-dropdown-content aria-hidden="true" tabindex="-1"> <p> All articles published by MDPI are made immediately available worldwide under an open access license. No special permission is required to reuse all or part of the article published by MDPI, including figures and tables. For articles published under an open access Creative Common CC BY license, any part of the article may be reused without permission provided that the original article is clearly cited. For more information, please refer to <a href="https://www.mdpi.com/openaccess">https://www.mdpi.com/openaccess</a>. </p> </div> <div id="drop-article-label-feature" class="f-dropdown medium" data-dropdown-content aria-hidden="true" tabindex="-1"> <p> Feature papers represent the most advanced research with significant potential for high impact in the field. A Feature Paper should be a substantial original Article that involves several techniques or approaches, provides an outlook for future research directions and describes possible research applications. </p> <p> Feature papers are submitted upon individual invitation or recommendation by the scientific editors and must receive positive feedback from the reviewers. </p> </div> <div id="drop-article-label-choice" class="f-dropdown medium" data-dropdown-content aria-hidden="true" tabindex="-1"> <p> Editor’s Choice articles are based on recommendations by the scientific editors of MDPI journals from around the world. Editors select a small number of articles recently published in the journal that they believe will be particularly interesting to readers, or important in the respective research area. The aim is to provide a snapshot of some of the most exciting work published in the various research areas of the journal. <div style="margin-top: -10px;"> <div id="drop-article-label-choice-journal-link" style="display: none; margin-top: -10px; padding-top: 10px;"> </div> </div> </p> </div> <div id="drop-article-label-resubmission" class="f-dropdown medium" data-dropdown-content aria-hidden="true" tabindex="-1"> <p> Original Submission Date Received: <span id="drop-article-label-resubmission-date"></span>. </p> </div> <div id="container"> <noscript> <div id="no-javascript"> You seem to have javascript disabled. Please note that many of the page functionalities won't work as expected without javascript enabled. </div> </noscript> <div class="fixed"> <nav class="tab-bar show-for-medium-down"> <div class="row full-width collapse"> <div class="medium-3 small-4 columns"> <a href="/"> <img class="full-size-menu__mdpi-logo" src="https://pub.mdpi-res.com/img/design/mdpi-pub-logo-black-small1.svg?da3a8dcae975a41c?1732286508" style="width: 64px;" title="MDPI Open Access Journals"> </a> </div> <div class="medium-3 small-4 columns right-aligned"> <div class="show-for-medium-down"> <a href="#" style="display: none;"> <i class="material-icons" onclick="$('#menuModal').foundation('reveal', 'close'); return false;">clear</i> </a> <a class="js-toggle-desktop-layout-link" title="Toggle desktop layout" style="display: none;" href="/toggle_desktop_layout_cookie"> <i class="material-icons">zoom_out_map</i> </a> <a href="#" class="js-open-small-search open-small-search"> <i class="material-icons show-for-small only">search</i> </a> <a title="MDPI main page" class="js-open-menu" data-reveal-id="menuModal" href="#"> <i class="material-icons">menu</i> </a> </div> </div> </div> </nav> </div> <section class="main-section"> <header> <div class="full-size-menu show-for-large-up"> <div class="row full-width"> <div class="large-1 columns"> <a href="/"> <img class="full-size-menu__mdpi-logo" src="https://pub.mdpi-res.com/img/design/mdpi-pub-logo-black-small1.svg?da3a8dcae975a41c?1732286508" title="MDPI Open Access Journals"> </a> </div> <div class="large-8 columns text-right UI_NavMenu"> <ul> <li class="menu-item"> <a href="/about/journals" data-dropdown="journals-dropdown" aria-controls="journals-dropdown" aria-expanded="false" data-options="is_hover: true; hover_timeout: 200">Journals</a> <ul id="journals-dropdown" class="f-dropdown dropdown-wrapper dropdown-wrapper__small" data-dropdown-content aria-hidden="true" tabindex="-1"> <li> <div class="row"> <div class="small-12 columns"> <ul> <li> <a href="/about/journals"> Active Journals </a> </li> <li> <a href="/about/journalfinder"> Find a Journal </a> </li> <li> <a href="/about/journals/proposal"> Journal Proposal </a> </li> <li> <a href="/about/proceedings"> Proceedings Series </a> </li> </ul> </div> </div> </li> </ul> </li> <li class="menu-item"> <a href="/topics">Topics</a> </li> <li class="menu-item"> <a href="/authors" data-dropdown="information-dropdown" aria-controls="information-dropdown" aria-expanded="false" data-options="is_hover:true; hover_timeout:200">Information</a> <ul id="information-dropdown" class="f-dropdown dropdown-wrapper" data-dropdown-content aria-hidden="true" tabindex="-1"> <li> <div class="row"> <div class="small-5 columns right-border"> <ul> <li> <a href="/authors">For Authors</a> </li> <li> <a href="/reviewers">For Reviewers</a> </li> <li> <a href="/editors">For Editors</a> </li> <li> <a href="/librarians">For Librarians</a> </li> <li> <a href="/publishing_services">For Publishers</a> </li> <li> <a href="/societies">For Societies</a> </li> <li> <a href="/conference_organizers">For Conference Organizers</a> </li> </ul> </div> <div class="small-7 columns"> <ul> <li> <a href="/openaccess">Open Access Policy</a> </li> <li> <a href="/ioap">Institutional Open Access Program</a> </li> <li> <a href="/special_issues_guidelines">Special Issues Guidelines</a> </li> <li> <a href="/editorial_process">Editorial Process</a> </li> <li> <a href="/ethics">Research and Publication Ethics</a> </li> <li> <a href="/apc">Article Processing Charges</a> </li> <li> <a href="/awards">Awards</a> </li> <li> <a href="/testimonials">Testimonials</a> </li> </ul> </div> </div> </li> </ul> </li> <li class="menu-item"> <a href="/authors/english">Editing Services</a> </li> <li class="menu-item"> <a href="/about/initiatives" data-dropdown="initiatives-dropdown" aria-controls="initiatives-dropdown" aria-expanded="false" data-options="is_hover: true; hover_timeout: 200">Initiatives</a> <ul id="initiatives-dropdown" class="f-dropdown dropdown-wrapper dropdown-wrapper__small" data-dropdown-content aria-hidden="true" tabindex="-1"> <li> <div class="row"> <div class="small-12 columns"> <ul> <li> <a href="https://sciforum.net" target="_blank" rel="noopener noreferrer"> Sciforum </a> </li> <li> <a href="https://www.mdpi.com/books" target="_blank" rel="noopener noreferrer"> MDPI Books </a> </li> <li> <a href="https://www.preprints.org" target="_blank" rel="noopener noreferrer"> Preprints.org </a> </li> <li> <a href="https://www.scilit.net" target="_blank" rel="noopener noreferrer"> Scilit </a> </li> <li> <a href="https://sciprofiles.com" target="_blank" rel="noopener noreferrer"> SciProfiles </a> </li> <li> <a href="https://encyclopedia.pub" target="_blank" rel="noopener noreferrer"> Encyclopedia </a> </li> <li> <a href="https://jams.pub" target="_blank" rel="noopener noreferrer"> JAMS </a> </li> <li> <a href="/about/proceedings"> Proceedings Series </a> </li> </ul> </div> </div> </li> </ul> </li> <li class="menu-item"> <a href="/about" data-dropdown="about-dropdown" aria-controls="about-dropdown" aria-expanded="false" data-options="is_hover: true; hover_timeout: 200">About</a> <ul id="about-dropdown" class="f-dropdown dropdown-wrapper dropdown-wrapper__small" data-dropdown-content aria-hidden="true" tabindex="-1"> <li> <div class="row"> <div class="small-12 columns"> <ul> <li> <a href="/about"> Overview </a> </li> <li> <a href="/about/contact"> Contact </a> </li> <li> <a href="https://careers.mdpi.com" target="_blank" rel="noopener noreferrer"> Careers </a> </li> <li> <a href="/about/announcements"> News </a> </li> <li> <a href="/about/press"> Press </a> </li> <li> <a href="http://blog.mdpi.com/" target="_blank" rel="noopener noreferrer"> Blog </a> </li> </ul> </div> </div> </li> </ul> </li> </ul> </div> <div class="large-3 columns text-right full-size-menu__buttons"> <div> <a class="button button--default-inversed UA_SignInUpButton" href="/user/login">Sign In / Sign Up</a> <a class="button button--default js-journal-active-only-link js-journal-active-only-submit-link UC_NavSubmitButton" href=" https://susy.mdpi.com/user/manuscripts/upload?journal=applsci " data-disabledmessage="new submissions are not possible.">Submit</a> </div> </div> </div> </div> <div class="header-divider">&nbsp;</div> <div class="search-container hide-for-small-down row search-container__homepage-scheme"> <form id="basic_search" style="background-color: inherit !important;" class="large-12 medium-12 columns " action="/search" method="get"> <div class="row search-container__main-elements"> <div class="large-2 medium-2 small-12 columns text-right1 small-only-text-left"> <div class="show-for-medium-up"> <div class="search-input-label">&nbsp;</div> </div> <span class="search-container__title">Search<span class="hide-for-medium"> for Articles</span><span class="hide-for-small">:</span></span> </div> <div class="custom-accordion-for-small-screen-content"> <div class="large-2 medium-2 small-6 columns "> <div class=""> <div class="search-input-label">Title / Keyword</div> </div> <input type="text" placeholder="Title / Keyword" id="q" tabindex="1" name="q" value="" /> </div> <div class="large-2 medium-2 small-6 columns "> <div class=""> <div class="search-input-label">Author / Affiliation / Email</div> </div> <input type="text" id="authors" placeholder="Author / Affiliation / Email" tabindex="2" name="authors" value="" /> </div> <div class="large-2 medium-2 small-6 columns "> <div class=""> <div class="search-input-label">Journal</div> </div> <select id="journal" tabindex="3" name="journal" class="chosen-select"> <option value="">All Journals</option> <option value="acoustics" > Acoustics </option> <option value="amh" > Acta Microbiologica Hellenica (AMH) </option> <option value="actuators" > Actuators </option> <option value="admsci" > Administrative Sciences </option> <option value="adolescents" > Adolescents </option> <option value="arm" > Advances in Respiratory Medicine (ARM) </option> <option value="aerobiology" > Aerobiology </option> <option value="aerospace" > Aerospace </option> <option value="agriculture" > Agriculture </option> <option value="agriengineering" > AgriEngineering </option> <option value="agrochemicals" > Agrochemicals </option> <option value="agronomy" > Agronomy </option> <option value="ai" > AI </option> <option value="air" > Air </option> <option value="algorithms" > Algorithms </option> <option value="allergies" > Allergies </option> <option value="alloys" > Alloys </option> <option value="analytica" > Analytica </option> <option value="analytics" > Analytics </option> <option value="anatomia" > Anatomia </option> <option value="anesthres" > Anesthesia Research </option> <option value="animals" > Animals </option> <option value="antibiotics" > Antibiotics </option> <option value="antibodies" > Antibodies </option> <option value="antioxidants" > Antioxidants </option> <option value="applbiosci" > Applied Biosciences </option> <option value="applmech" > Applied Mechanics </option> <option value="applmicrobiol" > Applied Microbiology </option> <option value="applnano" > Applied Nano </option> <option value="applsci" selected='selected'> Applied Sciences </option> <option value="asi" > Applied System Innovation (ASI) </option> <option value="appliedchem" > AppliedChem </option> <option value="appliedmath" > AppliedMath </option> <option value="aquacj" > Aquaculture Journal </option> <option value="architecture" > Architecture </option> <option value="arthropoda" > Arthropoda </option> <option value="arts" > Arts </option> <option value="astronomy" > Astronomy </option> <option value="atmosphere" > Atmosphere </option> <option value="atoms" > Atoms </option> <option value="audiolres" > Audiology Research </option> <option value="automation" > Automation </option> <option value="axioms" > Axioms </option> <option value="bacteria" > Bacteria </option> <option value="batteries" > Batteries </option> <option value="behavsci" > Behavioral Sciences </option> <option value="beverages" > Beverages </option> <option value="BDCC" > Big Data and Cognitive Computing (BDCC) </option> <option value="biochem" > BioChem </option> <option value="bioengineering" > Bioengineering </option> <option value="biologics" > Biologics </option> <option value="biology" > Biology </option> <option value="blsf" > Biology and Life Sciences Forum </option> <option value="biomass" > Biomass </option> <option value="biomechanics" > Biomechanics </option> <option value="biomed" > BioMed </option> <option value="biomedicines" > Biomedicines </option> <option value="biomedinformatics" > BioMedInformatics </option> <option value="biomimetics" > Biomimetics </option> <option value="biomolecules" > Biomolecules </option> <option value="biophysica" > Biophysica </option> <option value="biosensors" > Biosensors </option> <option value="biotech" > BioTech </option> <option value="birds" > Birds </option> <option value="blockchains" > Blockchains </option> <option value="brainsci" > Brain Sciences </option> <option value="buildings" > Buildings </option> <option value="businesses" > Businesses </option> <option value="carbon" > C </option> <option value="cancers" > Cancers </option> <option value="cardiogenetics" > Cardiogenetics </option> <option value="catalysts" > Catalysts </option> <option value="cells" > Cells </option> <option value="ceramics" > Ceramics </option> <option value="challenges" > Challenges </option> <option value="ChemEngineering" > ChemEngineering </option> <option value="chemistry" > Chemistry </option> <option value="chemproc" > Chemistry Proceedings </option> <option value="chemosensors" > Chemosensors </option> <option value="children" > Children </option> <option value="chips" > Chips </option> <option value="civileng" > CivilEng </option> <option value="cleantechnol" > Clean Technologies (Clean Technol.) </option> <option value="climate" > Climate </option> <option value="ctn" > Clinical and Translational Neuroscience (CTN) </option> <option value="clinbioenerg" > Clinical Bioenergetics </option> <option value="clinpract" > Clinics and Practice </option> <option value="clockssleep" > Clocks &amp; Sleep </option> <option value="coasts" > Coasts </option> <option value="coatings" > Coatings </option> <option value="colloids" > Colloids and Interfaces </option> <option value="colorants" > Colorants </option> <option value="commodities" > Commodities </option> <option value="complications" > Complications </option> <option value="compounds" > Compounds </option> <option value="computation" > Computation </option> <option value="csmf" > Computer Sciences &amp; Mathematics Forum </option> <option value="computers" > Computers </option> <option value="condensedmatter" > Condensed Matter </option> <option value="conservation" > Conservation </option> <option value="constrmater" > Construction Materials </option> <option value="cmd" > Corrosion and Materials Degradation (CMD) </option> <option value="cosmetics" > Cosmetics </option> <option value="covid" > COVID </option> <option value="crops" > Crops </option> <option value="cryo" > Cryo </option> <option value="cryptography" > Cryptography </option> <option value="crystals" > Crystals </option> <option value="cimb" > Current Issues in Molecular Biology (CIMB) </option> <option value="curroncol" > Current Oncology </option> <option value="dairy" > Dairy </option> <option value="data" > Data </option> <option value="dentistry" > Dentistry Journal </option> <option value="dermato" > Dermato </option> <option value="dermatopathology" > Dermatopathology </option> <option value="designs" > Designs </option> <option value="diabetology" > Diabetology </option> <option value="diagnostics" > Diagnostics </option> <option value="dietetics" > Dietetics </option> <option value="digital" > Digital </option> <option value="disabilities" > Disabilities </option> <option value="diseases" > Diseases </option> <option value="diversity" > Diversity </option> <option value="dna" > DNA </option> <option value="drones" > Drones </option> <option value="ddc" > Drugs and Drug Candidates (DDC) </option> <option value="dynamics" > Dynamics </option> <option value="earth" > Earth </option> <option value="ecologies" > Ecologies </option> <option value="econometrics" > Econometrics </option> <option value="economies" > Economies </option> <option value="education" > Education Sciences </option> <option value="electricity" > Electricity </option> <option value="electrochem" > Electrochem </option> <option value="electronicmat" > Electronic Materials </option> <option value="electronics" > Electronics </option> <option value="ecm" > Emergency Care and Medicine </option> <option value="encyclopedia" > Encyclopedia </option> <option value="endocrines" > Endocrines </option> <option value="energies" > Energies </option> <option value="esa" > Energy Storage and Applications (ESA) </option> <option value="eng" > Eng </option> <option value="engproc" > Engineering Proceedings </option> <option value="entropy" > Entropy </option> <option value="environsciproc" > Environmental Sciences Proceedings </option> <option value="environments" > Environments </option> <option value="epidemiologia" > Epidemiologia </option> <option value="epigenomes" > Epigenomes </option> <option value="ebj" > European Burn Journal (EBJ) </option> <option value="ejihpe" > European Journal of Investigation in Health, Psychology and Education (EJIHPE) </option> <option value="fermentation" > Fermentation </option> <option value="fibers" > Fibers </option> <option value="fintech" > FinTech </option> <option value="fire" > Fire </option> <option value="fishes" > Fishes </option> <option value="fluids" > Fluids </option> <option value="foods" > Foods </option> <option value="forecasting" > Forecasting </option> <option value="forensicsci" > Forensic Sciences </option> <option value="forests" > Forests </option> <option value="fossstud" > Fossil Studies </option> <option value="foundations" > Foundations </option> <option value="fractalfract" > Fractal and Fractional (Fractal Fract) </option> <option value="fuels" > Fuels </option> <option value="future" > Future </option> <option value="futureinternet" > Future Internet </option> <option value="futurepharmacol" > Future Pharmacology </option> <option value="futuretransp" > Future Transportation </option> <option value="galaxies" > Galaxies </option> <option value="games" > Games </option> <option value="gases" > Gases </option> <option value="gastroent" > Gastroenterology Insights </option> <option value="gastrointestdisord" > Gastrointestinal Disorders </option> <option value="gastronomy" > Gastronomy </option> <option value="gels" > Gels </option> <option value="genealogy" > Genealogy </option> <option value="genes" > Genes </option> <option value="geographies" > Geographies </option> <option value="geohazards" > GeoHazards </option> <option value="geomatics" > Geomatics </option> <option value="geometry" > Geometry </option> <option value="geosciences" > Geosciences </option> <option value="geotechnics" > Geotechnics </option> <option value="geriatrics" > Geriatrics </option> <option value="glacies" > Glacies </option> <option value="gucdd" > Gout, Urate, and Crystal Deposition Disease (GUCDD) </option> <option value="grasses" > Grasses </option> <option value="hardware" > Hardware </option> <option value="healthcare" > Healthcare </option> <option value="hearts" > Hearts </option> <option value="hemato" > Hemato </option> <option value="hematolrep" > Hematology Reports </option> <option value="heritage" > Heritage </option> <option value="histories" > Histories </option> <option value="horticulturae" > Horticulturae </option> <option value="hospitals" > Hospitals </option> <option value="humanities" > Humanities </option> <option value="humans" > Humans </option> <option value="hydrobiology" > Hydrobiology </option> <option value="hydrogen" > Hydrogen </option> <option value="hydrology" > Hydrology </option> <option value="hygiene" > Hygiene </option> <option value="immuno" > Immuno </option> <option value="idr" > Infectious Disease Reports </option> <option value="informatics" > Informatics </option> <option value="information" > Information </option> <option value="infrastructures" > Infrastructures </option> <option value="inorganics" > Inorganics </option> <option value="insects" > Insects </option> <option value="instruments" > Instruments </option> <option value="iic" > Intelligent Infrastructure and Construction </option> <option value="ijerph" > International Journal of Environmental Research and Public Health (IJERPH) </option> <option value="ijfs" > International Journal of Financial Studies (IJFS) </option> <option value="ijms" > International Journal of Molecular Sciences (IJMS) </option> <option value="IJNS" > International Journal of Neonatal Screening (IJNS) </option> <option value="ijpb" > International Journal of Plant Biology (IJPB) </option> <option value="ijt" > International Journal of Topology </option> <option value="ijtm" > International Journal of Translational Medicine (IJTM) </option> <option value="ijtpp" > International Journal of Turbomachinery, Propulsion and Power (IJTPP) </option> <option value="ime" > International Medical Education (IME) </option> <option value="inventions" > Inventions </option> <option value="IoT" > IoT </option> <option value="ijgi" > ISPRS International Journal of Geo-Information (IJGI) </option> <option value="J" > J </option> <option value="jal" > Journal of Ageing and Longevity (JAL) </option> <option value="jcdd" > Journal of Cardiovascular Development and Disease (JCDD) </option> <option value="jcto" > Journal of Clinical &amp; Translational Ophthalmology (JCTO) </option> <option value="jcm" > Journal of Clinical Medicine (JCM) </option> <option value="jcs" > Journal of Composites Science (J. Compos. Sci.) </option> <option value="jcp" > Journal of Cybersecurity and Privacy (JCP) </option> <option value="jdad" > Journal of Dementia and Alzheimer&#039;s Disease (JDAD) </option> <option value="jdb" > Journal of Developmental Biology (JDB) </option> <option value="jeta" > Journal of Experimental and Theoretical Analyses (JETA) </option> <option value="jfb" > Journal of Functional Biomaterials (JFB) </option> <option value="jfmk" > Journal of Functional Morphology and Kinesiology (JFMK) </option> <option value="jof" > Journal of Fungi (JoF) </option> <option value="jimaging" > Journal of Imaging (J. Imaging) </option> <option value="jintelligence" > Journal of Intelligence (J. Intell.) </option> <option value="jlpea" > Journal of Low Power Electronics and Applications (JLPEA) </option> <option value="jmmp" > Journal of Manufacturing and Materials Processing (JMMP) </option> <option value="jmse" > Journal of Marine Science and Engineering (JMSE) </option> <option value="jmahp" > Journal of Market Access &amp; Health Policy (JMAHP) </option> <option value="jmp" > Journal of Molecular Pathology (JMP) </option> <option value="jnt" > Journal of Nanotheranostics (JNT) </option> <option value="jne" > Journal of Nuclear Engineering (JNE) </option> <option value="ohbm" > Journal of Otorhinolaryngology, Hearing and Balance Medicine (JOHBM) </option> <option value="jop" > Journal of Parks </option> <option value="jpm" > Journal of Personalized Medicine (JPM) </option> <option value="jpbi" > Journal of Pharmaceutical and BioTech Industry (JPBI) </option> <option value="jor" > Journal of Respiration (JoR) </option> <option value="jrfm" > Journal of Risk and Financial Management (JRFM) </option> <option value="jsan" > Journal of Sensor and Actuator Networks (JSAN) </option> <option value="joma" > Journal of the Oman Medical Association (JOMA) </option> <option value="jtaer" > Journal of Theoretical and Applied Electronic Commerce Research (JTAER) </option> <option value="jvd" > Journal of Vascular Diseases (JVD) </option> <option value="jox" > Journal of Xenobiotics (JoX) </option> <option value="jzbg" > Journal of Zoological and Botanical Gardens (JZBG) </option> <option value="journalmedia" > Journalism and Media </option> <option value="kidneydial" > Kidney and Dialysis </option> <option value="kinasesphosphatases" > Kinases and Phosphatases </option> <option value="knowledge" > Knowledge </option> <option value="labmed" > LabMed </option> <option value="laboratories" > Laboratories </option> <option value="land" > Land </option> <option value="languages" > Languages </option> <option value="laws" > Laws </option> <option value="life" > Life </option> <option value="limnolrev" > Limnological Review </option> <option value="lipidology" > Lipidology </option> <option value="liquids" > Liquids </option> <option value="literature" > Literature </option> <option value="livers" > Livers </option> <option value="logics" > Logics </option> <option value="logistics" > Logistics </option> <option value="lubricants" > Lubricants </option> <option value="lymphatics" > Lymphatics </option> <option value="make" > Machine Learning and Knowledge Extraction (MAKE) </option> <option value="machines" > Machines </option> <option value="macromol" > Macromol </option> <option value="magnetism" > Magnetism </option> <option value="magnetochemistry" > Magnetochemistry </option> <option value="marinedrugs" > Marine Drugs </option> <option value="materials" > Materials </option> <option value="materproc" > Materials Proceedings </option> <option value="mca" > Mathematical and Computational Applications (MCA) </option> <option value="mathematics" > Mathematics </option> <option value="medsci" > Medical Sciences </option> <option value="msf" > Medical Sciences Forum </option> <option value="medicina" > Medicina </option> <option value="medicines" > Medicines </option> <option value="membranes" > Membranes </option> <option value="merits" > Merits </option> <option value="metabolites" > Metabolites </option> <option value="metals" > Metals </option> <option value="meteorology" > Meteorology </option> <option value="methane" > Methane </option> <option value="mps" > Methods and Protocols (MPs) </option> <option value="metrics" > Metrics </option> <option value="metrology" > Metrology </option> <option value="micro" > Micro </option> <option value="microbiolres" > Microbiology Research </option> <option value="micromachines" > Micromachines </option> <option value="microorganisms" > Microorganisms </option> <option value="microplastics" > Microplastics </option> <option value="minerals" > Minerals </option> <option value="mining" > Mining </option> <option value="modelling" > Modelling </option> <option value="mmphys" > Modern Mathematical Physics </option> <option value="molbank" > Molbank </option> <option value="molecules" > Molecules </option> <option value="mti" > Multimodal Technologies and Interaction (MTI) </option> <option value="muscles" > Muscles </option> <option value="nanoenergyadv" > Nanoenergy Advances </option> <option value="nanomanufacturing" > Nanomanufacturing </option> <option value="nanomaterials" > Nanomaterials </option> <option value="ndt" > NDT </option> <option value="network" > Network </option> <option value="neuroglia" > Neuroglia </option> <option value="neurolint" > Neurology International </option> <option value="neurosci" > NeuroSci </option> <option value="nitrogen" > Nitrogen </option> <option value="ncrna" > Non-Coding RNA (ncRNA) </option> <option value="nursrep" > Nursing Reports </option> <option value="nutraceuticals" > Nutraceuticals </option> <option value="nutrients" > Nutrients </option> <option value="obesities" > Obesities </option> <option value="oceans" > Oceans </option> <option value="onco" > Onco </option> <option value="optics" > Optics </option> <option value="oral" > Oral </option> <option value="organics" > Organics </option> <option value="organoids" > Organoids </option> <option value="osteology" > Osteology </option> <option value="oxygen" > Oxygen </option> <option value="parasitologia" > Parasitologia </option> <option value="particles" > Particles </option> <option value="pathogens" > Pathogens </option> <option value="pathophysiology" > Pathophysiology </option> <option value="pediatrrep" > Pediatric Reports </option> <option value="pets" > Pets </option> <option value="pharmaceuticals" > Pharmaceuticals </option> <option value="pharmaceutics" > Pharmaceutics </option> <option value="pharmacoepidemiology" > Pharmacoepidemiology </option> <option value="pharmacy" > Pharmacy </option> <option value="philosophies" > Philosophies </option> <option value="photochem" > Photochem </option> <option value="photonics" > Photonics </option> <option value="phycology" > Phycology </option> <option value="physchem" > Physchem </option> <option value="psf" > Physical Sciences Forum </option> <option value="physics" > Physics </option> <option value="physiologia" > Physiologia </option> <option value="plants" > Plants </option> <option value="plasma" > Plasma </option> <option value="platforms" > Platforms </option> <option value="pollutants" > Pollutants </option> <option value="polymers" > Polymers </option> <option value="polysaccharides" > Polysaccharides </option> <option value="populations" > Populations </option> <option value="poultry" > Poultry </option> <option value="powders" > Powders </option> <option value="proceedings" > Proceedings </option> <option value="processes" > Processes </option> <option value="prosthesis" > Prosthesis </option> <option value="proteomes" > Proteomes </option> <option value="psychiatryint" > Psychiatry International </option> <option value="psychoactives" > Psychoactives </option> <option value="psycholint" > Psychology International </option> <option value="publications" > Publications </option> <option value="qubs" > Quantum Beam Science (QuBS) </option> <option value="quantumrep" > Quantum Reports </option> <option value="quaternary" > Quaternary </option> <option value="radiation" > Radiation </option> <option value="reactions" > Reactions </option> <option value="realestate" > Real Estate </option> <option value="receptors" > Receptors </option> <option value="recycling" > Recycling </option> <option value="rsee" > Regional Science and Environmental Economics (RSEE) </option> <option value="religions" > Religions </option> <option value="remotesensing" > Remote Sensing </option> <option value="reports" > Reports </option> <option value="reprodmed" > Reproductive Medicine (Reprod. Med.) </option> <option value="resources" > Resources </option> <option value="rheumato" > Rheumato </option> <option value="risks" > Risks </option> <option value="robotics" > Robotics </option> <option value="ruminants" > Ruminants </option> <option value="safety" > Safety </option> <option value="sci" > Sci </option> <option value="scipharm" > Scientia Pharmaceutica (Sci. Pharm.) </option> <option value="sclerosis" > Sclerosis </option> <option value="seeds" > Seeds </option> <option value="sensors" > Sensors </option> <option value="separations" > Separations </option> <option value="sexes" > Sexes </option> <option value="signals" > Signals </option> <option value="sinusitis" > Sinusitis </option> <option value="smartcities" > Smart Cities </option> <option value="socsci" > Social Sciences </option> <option value="siuj" > Société Internationale d’Urologie Journal (SIUJ) </option> <option value="societies" > Societies </option> <option value="software" > Software </option> <option value="soilsystems" > Soil Systems </option> <option value="solar" > Solar </option> <option value="solids" > Solids </option> <option value="spectroscj" > Spectroscopy Journal </option> <option value="sports" > Sports </option> <option value="standards" > Standards </option> <option value="stats" > Stats </option> <option value="stresses" > Stresses </option> <option value="surfaces" > Surfaces </option> <option value="surgeries" > Surgeries </option> <option value="std" > Surgical Techniques Development </option> <option value="sustainability" > Sustainability </option> <option value="suschem" > Sustainable Chemistry </option> <option value="symmetry" > Symmetry </option> <option value="synbio" > SynBio </option> <option value="systems" > Systems </option> <option value="targets" > Targets </option> <option value="taxonomy" > Taxonomy </option> <option value="technologies" > Technologies </option> <option value="telecom" > Telecom </option> <option value="textiles" > Textiles </option> <option value="thalassrep" > Thalassemia Reports </option> <option value="therapeutics" > Therapeutics </option> <option value="thermo" > Thermo </option> <option value="timespace" > Time and Space </option> <option value="tomography" > Tomography </option> <option value="tourismhosp" > Tourism and Hospitality </option> <option value="toxics" > Toxics </option> <option value="toxins" > Toxins </option> <option value="transplantology" > Transplantology </option> <option value="traumacare" > Trauma Care </option> <option value="higheredu" > Trends in Higher Education </option> <option value="tropicalmed" > Tropical Medicine and Infectious Disease (TropicalMed) </option> <option value="universe" > Universe </option> <option value="urbansci" > Urban Science </option> <option value="uro" > Uro </option> <option value="vaccines" > Vaccines </option> <option value="vehicles" > Vehicles </option> <option value="venereology" > Venereology </option> <option value="vetsci" > Veterinary Sciences </option> <option value="vibration" > Vibration </option> <option value="virtualworlds" > Virtual Worlds </option> <option value="viruses" > Viruses </option> <option value="vision" > Vision </option> <option value="waste" > Waste </option> <option value="water" > Water </option> <option value="wild" > Wild </option> <option value="wind" > Wind </option> <option value="women" > Women </option> <option value="world" > World </option> <option value="wevj" > World Electric Vehicle Journal (WEVJ) </option> <option value="youth" > Youth </option> <option value="zoonoticdis" > Zoonotic Diseases </option> </select> </div> <div class="large-2 medium-2 small-6 columns "> <div class=""> <div class="search-input-label">Article Type</div> </div> <select id="article_type" tabindex="4" name="article_type" class="chosen-select"> <option value="">All Article Types</option> <option value="research-article">Article</option> <option value="review-article">Review</option> <option value="rapid-communication">Communication</option> <option value="editorial">Editorial</option> <option value="abstract">Abstract</option> <option value="book-review">Book Review</option> <option value="brief-communication">Brief Communication</option> <option value="brief-report">Brief Report</option> <option value="case-report">Case Report</option> <option value="clinicopathological-challenge">Clinicopathological Challenge</option> <option value="article-commentary">Comment</option> <option value="commentary">Commentary</option> <option value="concept-paper">Concept Paper</option> <option value="conference-report">Conference Report</option> <option value="correction">Correction</option> <option value="creative">Creative</option> <option value="data-descriptor">Data Descriptor</option> <option value="discussion">Discussion</option> <option value="Entry">Entry</option> <option value="essay">Essay</option> <option value="expression-of-concern">Expression of Concern</option> <option value="extended-abstract">Extended Abstract</option> <option value="field-guide">Field Guide</option> <option value="guidelines">Guidelines</option> <option value="hypothesis">Hypothesis</option> <option value="interesting-image">Interesting Images</option> <option value="letter">Letter</option> <option value="books-received">New Book Received</option> <option value="obituary">Obituary</option> <option value="opinion">Opinion</option> <option value="perspective">Perspective</option> <option value="proceedings">Proceeding Paper</option> <option value="project-report">Project Report</option> <option value="protocol">Protocol</option> <option value="registered-report">Registered Report</option> <option value="reply">Reply</option> <option value="retraction">Retraction</option> <option value="note">Short Note</option> <option value="study-protocol">Study Protocol</option> <option value="systematic_review">Systematic Review</option> <option value="technical-note">Technical Note</option> <option value="tutorial">Tutorial</option> <option value="viewpoint">Viewpoint</option> </select> </div> <div class="large-1 medium-1 small-6 end columns small-push-6 medium-reset-order large-reset-order js-search-collapsed-button-container"> <div class="search-input-label">&nbsp;</div> <input type="submit" id="search" value="Search" class="button button--dark button--full-width searchButton1 US_SearchButton" tabindex="12"> </div> <div class="large-1 medium-1 small-6 end columns large-text-left small-only-text-center small-pull-6 medium-reset-order large-reset-order js-search-collapsed-link-container"> <div class="search-input-label">&nbsp;</div> <a class="main-search-clear search-container__link" href="#" onclick="openAdvanced(''); return false;">Advanced<span class="show-for-small-only"> Search</span></a> </div> </div> </div> <div class="search-container__advanced" style="margin-top: 0; padding-top: 0px; background-color: inherit; color: inherit;"> <div class="row"> <div class="large-2 medium-2 columns show-for-medium-up">&nbsp;</div> <div class="large-2 medium-2 small-6 columns "> <div class=""> <div class="search-input-label">Section</div> </div> <select id="section" tabindex="5" name="section" class="chosen-select"> <option value=""></option> </select> </div> <div class="large-2 medium-2 small-6 columns "> <div class=""> <div class="search-input-label">Special Issue</div> </div> <select id="special_issue" tabindex="6" name="special_issue" class="chosen-select"> <option value=""></option> </select> </div> <div class="large-1 medium-1 small-6 end columns "> <div class="search-input-label">Volume</div> <input type="text" id="volume" tabindex="7" name="volume" placeholder="..." value="12" /> </div> <div class="large-1 medium-1 small-6 end columns "> <div class="search-input-label">Issue</div> <input type="text" id="issue" tabindex="8" name="issue" placeholder="..." value="18" /> </div> <div class="large-1 medium-1 small-6 end columns "> <div class="search-input-label">Number</div> <input type="text" id="number" tabindex="9" name="number" placeholder="..." value="" /> </div> <div class="large-1 medium-1 small-6 end columns "> <div class="search-input-label">Page</div> <input type="text" id="page" tabindex="10" name="page" placeholder="..." value="" /> </div> <div class="large-1 medium-1 small-6 columns small-push-6 medium-reset order large-reset-order medium-reset-order js-search-expanded-button-container"></div> <div class="large-1 medium-1 small-6 columns large-text-left small-only-text-center small-pull-6 medium-reset-order large-reset-order js-search-expanded-link-container"></div> </div> </div> </form> <form id="advanced-search" class="large-12 medium-12 columns"> <div class="search-container__advanced"> <div id="advanced-search-template" class="row advanced-search-row"> <div class="large-2 medium-2 small-12 columns show-for-medium-up">&nbsp;</div> <div class="large-2 medium-2 small-3 columns connector-div"> <div class="search-input-label"><span class="show-for-medium-up">Logical Operator</span><span class="show-for-small">Operator</span></div> <select class="connector"> <option value="and">AND</option> <option value="or">OR</option> </select> </div> <div class="large-3 medium-3 small-6 columns search-text-div"> <div class="search-input-label">Search Text</div> <input type="text" class="search-text" placeholder="Search text"> </div> <div class="large-2 medium-2 small-6 large-offset-0 medium-offset-0 small-offset-3 columns search-field-div"> <div class="search-input-label">Search Type</div> <select class="search-field"> <option value="all">All fields</option> <option value="title">Title</option> <option value="abstract">Abstract</option> <option value="keywords">Keywords</option> <option value="authors">Authors</option> <option value="affiliations">Affiliations</option> <option value="doi">Doi</option> <option value="full_text">Full Text</option> <option value="references">References</option> </select> </div> <div class="large-1 medium-1 small-3 columns"> <div class="search-input-label">&nbsp;</div> <div class="search-action-div"> <div class="search-plus"> <i class="material-icons">add_circle_outline</i> </div> </div> <div class="search-action-div"> <div class="search-minus"> <i class="material-icons">remove_circle_outline</i> </div> </div> </div> <div class="large-1 medium-1 small-6 large-offset-0 medium-offset-0 small-offset-3 end columns"> <div class="search-input-label">&nbsp;</div> <input class="advanced-search-button button button--dark search-submit" type="submit" value="Search"> </div> <div class="large-1 medium-1 small-6 end columns show-for-medium-up"></div> </div> </div> </form> </div> <div class="header-divider">&nbsp;</div> <div class="breadcrumb row full-row"> <div class="breadcrumb__element"> <a href="/about/journals">Journals</a> </div> <div class="breadcrumb__element"> <a href="/journal/applsci">Applied Sciences</a> </div> <div class="breadcrumb__element"> <a href="/2076-3417/12">Volume 12</a> </div> <div class="breadcrumb__element"> <a href="/2076-3417/12/18">Issue 18</a> </div> <div class="breadcrumb__element"> <a href="#">10.3390/app12188972</a> </div> </div> </header> <div id="main-content" class=""> <div class="row full-width row-fixed-left-column"> <div id="left-column" class="content__column large-3 medium-3 small-12 columns"> <div class="content__container"> <a href="/journal/applsci"> <img src="https://pub.mdpi-res.com/img/journals/applsci-logo.png?8600e93ff98dbf14" alt="applsci-logo" title="Applied Sciences" style="max-height: 60px; margin: 0 0 0 0;"> </a> <div class="generic-item no-border"> <a class="button button--color button--full-width js-journal-active-only-link js-journal-active-only-submit-link UC_ArticleSubmitButton" href="https://susy.mdpi.com/user/manuscripts/upload?form%5Bjournal_id%5D%3D90" data-disabledmessage="creating new submissions is not possible."> Submit to this Journal </a> <a class="button button--color button--full-width js-journal-active-only-link UC_ArticleReviewButton" href="https://susy.mdpi.com/volunteer/journals/review" data-disabledmessage="volunteering as journal reviewer is not possible."> Review for this Journal </a> <a class="button button--color-inversed button--color-journal button--full-width js-journal-active-only-link UC_ArticleEditIssueButton" href="/journalproposal/sendproposalspecialissue/applsci" data-path="/2076-3417/12/18/8972" data-disabledmessage="proposing new special issue is not possible."> Propose a Special Issue </a> </div> <div class="generic-item link-article-menu show-for-small"> <a href="#" class="link-article-menu show-for-small"> <span class="closed">&#9658;</span> <span class="open" style="display: none;">&#9660;</span> Article Menu </a> </div> <div class="hide-small-down-initially UI_ArticleMenu"> <div class="generic-item"> <h2>Article Menu</h2> </div> <ul class="accordion accordion__menu" data-accordion data-options="multi_expand:true;toggleable: true"> <li class="accordion-navigation"> <a href="#academic_editors" class="accordion__title">Academic Editor</a> <div id="academic_editors" class="content active"> <div class="academic-editor-container " title="Department of Informatics, Systems and Communication, University of Milano-Bicocca, 20126 Milan, Italy"> <div class="sciprofiles-link" style="display: inline-block"><a class="sciprofiles-link__link" href="https://sciprofiles.com/profile/86273?utm_source=mdpi.com&amp;utm_medium=website&amp;utm_campaign=avatar_name" target="_blank" rel="noopener noreferrer"><img class="sciprofiles-link__image" src="/bundles/mdpisciprofileslink/img/unknown-user.png" style="width: auto; height: 16px; border-radius: 50%;"><span class="sciprofiles-link__name">Giancarlo Mauri</span></a></div> </div> </div> </li> <li class="accordion-direct-link"> <a href="/2076-3417/12/18/8972/scifeed_display" data-reveal-id="scifeed-modal" data-reveal-ajax="true">Subscribe SciFeed</a> </li> <li class="accordion-direct-link js-article-similarity-container" style="display: none"> <a href="#" class="js-similarity-related-articles">Recommended Articles</a> </li> <li class="accordion-navigation"> <a href="#related" class="accordion__title">Related Info Link</a> <div id="related" class="content UI_ArticleMenu_RelatedLinks"> <ul> <li class="li-link"> <a href="https://scholar.google.com/scholar?q=Deep%20Residual%20Learning%20for%20Image%20Recognition%3A%20A%20Survey" target="_blank" rel="noopener noreferrer">Google Scholar</a> </li> </ul> </div> </li> <li class="accordion-navigation"> <a href="#authors" class="accordion__title">More by Authors Links</a> <div id="authors" class="content UI_ArticleMenu_AuthorsLinks"> <ul class="side-menu-ul"> <li> <a class="expand" onclick='$(this).closest("li").next("div").toggle(); return false;'>on DOAJ</a> </li> <div id="AuthorDOAJExpand" style="display:none;"> <ul class="submenu"> <li class="li-link"> <a href='http://doaj.org/search/articles?source=%7B%22query%22%3A%7B%22query_string%22%3A%7B%22query%22%3A%22%5C%22Muhammad%20Shafiq%5C%22%22%2C%22default_operator%22%3A%22AND%22%2C%22default_field%22%3A%22bibjson.author.name%22%7D%7D%7D' target="_blank" rel="noopener noreferrer">Shafiq, M.</a> <li> </li> <li class="li-link"> <a href='http://doaj.org/search/articles?source=%7B%22query%22%3A%7B%22query_string%22%3A%7B%22query%22%3A%22%5C%22Zhaoquan%20Gu%5C%22%22%2C%22default_operator%22%3A%22AND%22%2C%22default_field%22%3A%22bibjson.author.name%22%7D%7D%7D' target="_blank" rel="noopener noreferrer">Gu, Z.</a> <li> </li> </ul> </div> <li> <a class="expand" onclick='$(this).closest("li").next("div").toggle(); return false;'>on Google Scholar</a> </li> <div id="AuthorGoogleExpand" style="display:none;"> <ul class="submenu"> <li class="li-link"> <a href="https://scholar.google.com/scholar?q=Muhammad%20Shafiq" target="_blank" rel="noopener noreferrer">Shafiq, M.</a> <li> </li> <li class="li-link"> <a href="https://scholar.google.com/scholar?q=Zhaoquan%20Gu" target="_blank" rel="noopener noreferrer">Gu, Z.</a> <li> </li> </ul> </div> <li> <a class="expand" onclick='$(this).closest("li").next("div").toggle(); return false;'>on PubMed</a> </li> <div id="AuthorPubMedExpand" style="display:none;"> <ul class="submenu"> <li class="li-link"> <a href="http://www.pubmed.gov/?cmd=Search&amp;term=Muhammad%20Shafiq" target="_blank" rel="noopener noreferrer">Shafiq, M.</a> <li> </li> <li class="li-link"> <a href="http://www.pubmed.gov/?cmd=Search&amp;term=Zhaoquan%20Gu" target="_blank" rel="noopener noreferrer">Gu, Z.</a> <li> </li> </ul> </div> </ul> </div> </li> </ul> <span style="display:none" id="scifeed_hidden_flag"></span> <span style="display:none" id="scifeed_subscribe_url">/ajax/scifeed/subscribe</span> </div> </div> <div class="content__container responsive-moving-container large medium active hidden" data-id="article-counters"> <div id="counts-wrapper" class="row generic-item no-border" data-equalizer> <div id="js-counts-wrapper__views" class="small-12 hide columns count-div-container"> <a href="#metrics" > <div class="count-div" data-equalizer-watch> <span class="name">Article Views</span> <span class="count view-number"></span> </div> </a> </div> <div id="js-counts-wrapper__citations" class="small-12 columns hide count-div-container"> <a href="#metrics" > <div class="count-div" data-equalizer-watch> <span class="name">Citations</span> <span class="count citations-number Var_ArticleMaxCitations">-</span> </div> </a> </div> </div> </div> <div class="content__container"> <div class="hide-small-down-initially"> <ul class="accordion accordion__menu" data-accordion data-options="multi_expand:true;toggleable: true"> <li class="accordion-navigation"> <a href="#table_of_contents" class="accordion__title">Table of Contents</a> <div id="table_of_contents" class="content active"> <div class="menu-caption" id="html-quick-links-title"></div> </div> </li> </ul> </div> </div> <!-- PubGrade code --> <div id="pbgrd-sky"></div> <script src="https://cdn.pbgrd.com/core-mdpi.js"></script> <style>.content__container { min-width: 300px; }</style> <!-- PubGrade code --> </div> <div id="middle-column" class="content__column large-9 medium-9 small-12 columns end middle-bordered"> <div class="middle-column__help"> <div class="middle-column__help__fixed show-for-medium-up"> <span id="js-altmetrics-donut" href="#" target="_blank" rel="noopener noreferrer" style="display: none;"> <span data-badge-type='donut' class='altmetric-embed' data-doi='10.3390/app12188972'></span> <span>Altmetric</span> </span> <a href="#" class="UA_ShareButton" data-reveal-id="main-share-modal" title="Share"> <i class="material-icons">share</i> <span>Share</span> </a> <a href="#" data-reveal-id="main-help-modal" title="Help"> <i class="material-icons">announcement</i> <span>Help</span> </a> <a href="javascript:void(0);" data-reveal-id="cite-modal" data-counterslink = "https://www.mdpi.com/2076-3417/12/18/8972/cite" > <i class="material-icons">format_quote</i> <span>Cite</span> </a> <a href="https://sciprofiles.com/discussion-groups/public/10.3390/app12188972?utm_source=mpdi.com&utm_medium=publication&utm_campaign=discuss_in_sciprofiles" target="_blank" rel="noopener noreferrer" title="Discuss in Sciprofiles"> <i class="material-icons">question_answer</i> <span>Discuss in SciProfiles</span> </a> <a href="#" class="" data-hypothesis-trigger-endorses-tab title="Endorse"> <i data-hypothesis-endorse-trigger class="material-icons" >thumb_up</i> <div data-hypothesis-endorsement-count data-hypothesis-trigger-endorses-tab class="hypothesis-count-container"> ... </div> <span>Endorse</span> </a> <a href="#" data-hypothesis-trigger class="js-hypothesis-open UI_ArticleAnnotationsButton" title="Comment"> <i class="material-icons">textsms</i> <div data-hypothesis-annotation-count class="hypothesis-count-container"> ... </div> <span>Comment</span> </a> </div> <div id="main-help-modal" class="reveal-modal reveal-modal-new" data-reveal aria-labelledby="modalTitle" aria-hidden="true" role="dialog"> <div class="row"> <div class="small-12 columns"> <h2 style="margin: 0;">Need Help?</h2> </div> <div class="small-6 columns"> <h3>Support</h3> <p> Find support for a specific problem in the support section of our website. </p> <a target="_blank" href="/about/contactform" class="button button--color button--full-width"> Get Support </a> </div> <div class="small-6 columns"> <h3>Feedback</h3> <p> Please let us know what you think of our products and services. </p> <a target="_blank" href="/feedback/send" class="button button--color button--full-width"> Give Feedback </a> </div> <div class="small-6 columns end"> <h3>Information</h3> <p> Visit our dedicated information section to learn more about MDPI. </p> <a target="_blank" href="/authors" class="button button--color button--full-width"> Get Information </a> </div> </div> <a class="close-reveal-modal" aria-label="Close"> <i class="material-icons">clear</i> </a> </div> </div> <div class="middle-column__main "> <div class="page-highlight"> <style type="text/css"> img.review-status { width: 30px; } </style> <div id="jmolModal" class="reveal-modal" data-reveal aria-labelledby="Captcha" aria-hidden="true" role="dialog"> <h2>JSmol Viewer</h2> <div class="row"> <div class="small-12 columns text-center"> <iframe style="width: 520px; height: 520px;" frameborder="0" id="jsmol-content"></iframe> <div class="content"></div> </div> </div> <a class="close-reveal-modal" aria-label="Close"> <i class="material-icons">clear</i> </a> </div> <div itemscope itemtype="http://schema.org/ScholarlyArticle" id="abstract" class="abstract_div"> <div class="js-check-update-container"></div> <div class="html-content__container content__container content__container__combined-for-large__first" style="overflow: auto; position: inherit;"> <div class='html-profile-nav'> <div class='top-bar'> <div class='nav-sidebar-btn show-for-large-up' data-status='opened' > <i class='material-icons'>first_page</i> </div> <a id="js-button-download" class="button button--color-inversed" style="display: none;" href="/2076-3417/12/18/8972/pdf?version=1662547655" data-name="Deep Residual Learning for Image Recognition: A Survey" data-journal="applsci"> <i class="material-icons custom-download"></i> Download PDF </a> <div class='nav-btn'> <i class='material-icons'>settings</i> </div> <a href="/2076-3417/12/18/8972/reprints" id="js-button-reprints" class="button button--color-inversed"> Order Article Reprints </a> </div> <div class='html-article-menu'> <div class='html-first-step row'> <div class='html-font-family large-6 medium-6 small-12 columns'> <div class='row'> <div class='html-font-label large-4 medium-4 small-12 columns'> Font Type: </div> <div class='large-8 medium-8 small-12 columns'> <span class="html-article-menu-option"><i style='font-family:Arial, Arial, Helvetica, sans-serif;' data-fontfamily='Arial, Arial, Helvetica, sans-serif'>Arial</i></span> <span class="html-article-menu-option"><i style='font-family:Georgia1, Georgia, serif;' data-fontfamily='Georgia1, Georgia, serif'>Georgia</i></span> <span class="html-article-menu-option"><i style='font-family:Verdana, Verdana, Geneva, sans-serif;' data-fontfamily='Verdana, Verdana, Geneva, sans-serif' >Verdana</i></span> </div> </div> </div> <div class='html-font-resize large-6 medium-6 small-12 columns'> <div class='row'> <div class='html-font-label large-4 medium-4 small-12 columns'>Font Size:</div> <div class='large-8 medium-8 small-12 columns'> <span class="html-article-menu-option a1" data-percent="100">Aa</span> <span class="html-article-menu-option a2" data-percent="120">Aa</span> <span class="html-article-menu-option a3" data-percent="160">Aa</span> </div> </div> </div> </div> <div class='row'> <div class='html-line-space large-6 medium-6 small-12 columns'> <div class='row'> <div class='html-font-label large-4 medium-4 small-12 columns' >Line Spacing:</div> <div class='large-8 medium-8 small-12 columns'> <span class="html-article-menu-option a1" data-line-height="1.5em"> <i class="fa">&#xf034;</i> </span> <span class="html-article-menu-option a2" data-line-height="1.8em"> <i class="fa">&#xf034;</i> </span> <span class="html-article-menu-option a3" data-line-height="2.1em"> <i class="fa">&#xf034;</i> </span> </div> </div> </div> <div class='html-column-width large-6 medium-6 small-12 columns'> <div class='row'> <div class='html-font-label large-4 medium-4 small-12 columns' >Column Width:</div> <div class='large-8 medium-8 small-12 columns'> <span class="html-article-menu-option a1" data-column-width="20%"> <i class="fa">&#xf035;</i> </span> <span class="html-article-menu-option a2" data-column-width="10%"> <i class="fa">&#xf035;</i> </span> <span class="html-article-menu-option a3" data-column-width="0%"> <i class="fa">&#xf035;</i> </span> </div> </div> </div> </div> <div class='row'> <div class='html-font-bg large-6 medium-6 small-12 columns end'> <div class='row'> <div class='html-font-label large-4 medium-4 small-12 columns'>Background:</div> <div class='large-8 medium-8 small-12 columns'> <div class="html-article-menu-option html-nav-bg html-nav-bright" data-bg="bright"> <i class="fa fa-file-text"></i> </div> <div class="html-article-menu-option html-nav-bg html-nav-dark" data-bg="dark"> <i class="fa fa-file-text-o"></i> </div> <div class="html-article-menu-option html-nav-bg html-nav-creme" data-bg="creme"> <i class="fa fa-file-text"></i> </div> </div> </div> </div> </div> </div> </div> <article ><div class='html-article-content'> <span itemprop="publisher" content="Multidisciplinary Digital Publishing Institute"></span><span itemprop="url" content="https://www.mdpi.com/2076-3417/12/18/8972"></span> <div class="article-icons"><span class="label openaccess" data-dropdown="drop-article-label-openaccess" aria-expanded="false">Open Access</span><span class="label articletype">Review</span></div> <h1 class="title hypothesis_container" itemprop="name"> Deep Residual Learning for Image Recognition: A Survey </h1> <div class="art-authors hypothesis_container"> by <span class="inlineblock "><div class='profile-card-drop' data-dropdown='profile-card-drop8699928' data-options='is_hover:true, hover_timeout:5000'> Muhammad Shafiq</div><div id="profile-card-drop8699928" data-dropdown-content class="f-dropdown content profile-card-content" aria-hidden="true" tabindex="-1"><div class="profile-card__title"><div class="sciprofiles-link" style="display: inline-block"><div class="sciprofiles-link__link"><img class="sciprofiles-link__image" src="/profiles/1001689/thumb/Muhammad_Shafiq.jpg" style="width: auto; height: 16px; border-radius: 50%;"><span class="sciprofiles-link__name">Muhammad Shafiq</span></div></div></div><div class="profile-card__buttons" style="margin-bottom: 10px;"><a href="https://sciprofiles.com/profile/1001689?utm_source=mdpi.com&amp;utm_medium=website&amp;utm_campaign=avatar_name" class="button button--color-inversed" target="_blank"> SciProfiles </a><a href="https://scilit.net/scholars?q=Muhammad%20Shafiq" class="button button--color-inversed" target="_blank"> Scilit </a><a href="https://www.preprints.org/search?search1=Muhammad%20Shafiq&field1=authors" class="button button--color-inversed" target="_blank"> Preprints.org </a><a href="https://scholar.google.com/scholar?q=Muhammad%20Shafiq" class="button button--color-inversed" target="_blank" rels="noopener noreferrer"> Google Scholar </a></div></div><sup> 1,*</sup><span style="display: inline; margin-left: 5px;"></span><a class="toEncode emailCaptcha visibility-hidden" data-author-id="8699928" href="/cdn-cgi/l/email-protection#edc28e8389c08e8a84c281c288808c8481c09d9f8299888e99848283cedddddddcdadedddddddddc8fdcdfdcd8dc8cdddfdededcd9dc88dcdfdc8cdc8bd889dcdddc8edc88"><sup><i class="fa fa-envelope-o"></i></sup></a> and </span><span class="inlineblock "><div class='profile-card-drop' data-dropdown='profile-card-drop8699929' data-options='is_hover:true, hover_timeout:5000'> Zhaoquan Gu</div><div id="profile-card-drop8699929" data-dropdown-content class="f-dropdown content profile-card-content" aria-hidden="true" tabindex="-1"><div class="profile-card__title"><div class="sciprofiles-link" style="display: inline-block"><div class="sciprofiles-link__link"><img class="sciprofiles-link__image" src="/bundles/mdpisciprofileslink/img/unknown-user.png" style="width: auto; height: 16px; border-radius: 50%;"><span class="sciprofiles-link__name">Zhaoquan Gu</span></div></div></div><div class="profile-card__buttons" style="margin-bottom: 10px;"><a href="https://sciprofiles.com/profile/643719?utm_source=mdpi.com&amp;utm_medium=website&amp;utm_campaign=avatar_name" class="button button--color-inversed" target="_blank"> SciProfiles </a><a href="https://scilit.net/scholars?q=Zhaoquan%20Gu" class="button button--color-inversed" target="_blank"> Scilit </a><a href="https://www.preprints.org/search?search1=Zhaoquan%20Gu&field1=authors" class="button button--color-inversed" target="_blank"> Preprints.org </a><a href="https://scholar.google.com/scholar?q=Zhaoquan%20Gu" class="button button--color-inversed" target="_blank" rels="noopener noreferrer"> Google Scholar </a></div></div><sup> 2,3,*</sup><span style="display: inline; margin-left: 5px;"></span><a class="toEncode emailCaptcha visibility-hidden" data-author-id="8699929" href="/cdn-cgi/l/email-protection#ae81cdc0ca83cdc9c781c281cbc3cfc7c283dedcc1dacbcddac7c1c08d9e9e9ecc99cf9fca9ec89dcf9fca9e9e9f9c9ec89b9a9fc89fcb9ec89b9a9f979f9a"><sup><i class="fa fa-envelope-o"></i></sup></a><a href="https://orcid.org/0000-0001-7546-852X" target="_blank" rel="noopener noreferrer"><img src="https://pub.mdpi-res.com/img/design/orcid.png?0465bc3812adeb52?1732286508" title="ORCID" style="position: relative; width: 13px; margin-left: 3px; max-width: 13px !important; height: auto; top: -5px;"></a></span> </div> <div class="nrm"></div> <span style="display:block; height:6px;"></span> <div></div> <div style="margin: 5px 0 15px 0;" class="hypothesis_container"> <div class="art-affiliations"> <div class="affiliation "> <div class="affiliation-item"><sup>1</sup></div> <div class="affiliation-name ">Cyberspace Institute of Advanced Technology, Guangzhou University, Guangzhou 510006, China</div> </div> <div class="affiliation "> <div class="affiliation-item"><sup>2</sup></div> <div class="affiliation-name ">Department of New Networks, Peng Cheng Laboratory, Shenzhen 518055, China</div> </div> <div class="affiliation "> <div class="affiliation-item"><sup>3</sup></div> <div class="affiliation-name ">Department of Computer Science and Technology, Harbin Institute of Technology, Shenzhen 518055, China</div> </div> <div class="affiliation"> <div class="affiliation-item"><sup>*</sup></div> <div class="affiliation-name ">Authors to whom correspondence should be addressed. </div> </div> </div> </div> <div class="bib-identity" style="margin-bottom: 10px;"> <em>Appl. Sci.</em> <b>2022</b>, <em>12</em>(18), 8972; <a href="https://doi.org/10.3390/app12188972">https://doi.org/10.3390/app12188972</a> </div> <div class="pubhistory" style="font-weight: bold; padding-bottom: 10px;"> <span style="display: inline-block">Submission received: 9 August 2022</span> / <span style="display: inline-block">Revised: 24 August 2022</span> / <span style="display: inline-block">Accepted: 6 September 2022</span> / <span style="display: inline-block">Published: 7 September 2022</span> </div> <div class="belongsTo" style="margin-bottom: 10px;"> (This article belongs to the Special Issue <a href=" /journal/applsci/special_issues/AI_Based_Image ">AI-Based Image Processing</a>)<br/> </div> <div class="highlight-box1"> <div class="download"> <a class="button button--color-inversed button--drop-down" data-dropdown="drop-download-908988" aria-controls="drop-supplementary-908988" aria-expanded="false"> Download <i class="material-icons">keyboard_arrow_down</i> </a> <div id="drop-download-908988" class="f-dropdown label__btn__dropdown label__btn__dropdown--button" data-dropdown-content aria-hidden="true" tabindex="-1"> <a class="UD_ArticlePDF" href="/2076-3417/12/18/8972/pdf?version=1662547655" data-name="Deep Residual Learning for Image Recognition: A Survey" data-journal="applsci">Download PDF</a> <br/> <a id="js-pdf-with-cover-access-captcha" href="#" data-target="/2076-3417/12/18/8972/pdf-with-cover" class="accessCaptcha">Download PDF with Cover</a> <br/> <a id="js-xml-access-captcha" href="#" data-target="/2076-3417/12/18/8972/xml" class="accessCaptcha">Download XML</a> <br/> <a href="/2076-3417/12/18/8972/epub" id="epub_link">Download Epub</a> <br/> </div> <div class="js-browse-figures" style="display: inline-block;"> <a href="#" class="button button--color-inversed margin-bottom-10 openpopupgallery UI_BrowseArticleFigures" data-target='article-popup' data-counterslink = "https://www.mdpi.com/2076-3417/12/18/8972/browse" >Browse Figures</a> </div> <div id="article-popup" class="popupgallery" style="display: inline; line-height: 200%"> <a href="https://pub.mdpi-res.com/applsci/applsci-12-08972/article_deploy/html/images/applsci-12-08972-g001.png?1662547728" title=" <strong>Figure 1</strong><br/> &lt;p&gt;Detailed Flowchart.&lt;/p&gt; "> </a> <a href="https://pub.mdpi-res.com/applsci/applsci-12-08972/article_deploy/html/images/applsci-12-08972-g002.png?1662547735" title=" <strong>Figure 2</strong><br/> &lt;p&gt;Basic Structure of DRN.&lt;/p&gt; "> </a> <a href="https://pub.mdpi-res.com/applsci/applsci-12-08972/article_deploy/html/images/applsci-12-08972-g003.png?1662547739" title=" <strong>Figure 3</strong><br/> &lt;p&gt;Most Searches DRN.&lt;/p&gt; "> </a> <a href="https://pub.mdpi-res.com/applsci/applsci-12-08972/article_deploy/html/images/applsci-12-08972-g004.png?1662547730" title=" <strong>Figure 4</strong><br/> &lt;p&gt;Sections Details.&lt;/p&gt; "> </a> <a href="https://pub.mdpi-res.com/applsci/applsci-12-08972/article_deploy/html/images/applsci-12-08972-g005.png?1662547734" title=" <strong>Figure 5</strong><br/> &lt;p&gt;Next Five Topics.&lt;/p&gt; "> </a> <a href="https://pub.mdpi-res.com/applsci/applsci-12-08972/article_deploy/html/images/applsci-12-08972-g006.png?1662547738" title=" <strong>Figure 6</strong><br/> &lt;p&gt;Next Five Topics.&lt;/p&gt; "> </a> <a href="https://pub.mdpi-res.com/applsci/applsci-12-08972/article_deploy/html/images/applsci-12-08972-g007.png?1662547737" title=" <strong>Figure 7</strong><br/> &lt;p&gt;Next Five Topics.&lt;/p&gt; "> </a> <a href="https://pub.mdpi-res.com/applsci/applsci-12-08972/article_deploy/html/images/applsci-12-08972-g008.png?1662547732" title=" <strong>Figure 8</strong><br/> &lt;p&gt;Details of the next eight sections.&lt;/p&gt; "> </a> <a href="https://pub.mdpi-res.com/applsci/applsci-12-08972/article_deploy/html/images/applsci-12-08972-g009.png?1662547737" title=" <strong>Figure 9</strong><br/> &lt;p&gt;TDL Methods.&lt;/p&gt; "> </a> <a href="https://pub.mdpi-res.com/applsci/applsci-12-08972/article_deploy/html/images/applsci-12-08972-g010.png?1662547727" title=" <strong>Figure 10</strong><br/> &lt;p&gt;Basic Building Block of RDL Types.&lt;/p&gt; "> </a> <a href="https://pub.mdpi-res.com/applsci/applsci-12-08972/article_deploy/html/images/applsci-12-08972-g011.png?1662547729" title=" <strong>Figure 11</strong><br/> &lt;p&gt;The basic building block of a ResNet.&lt;/p&gt; "> </a> <a href="https://pub.mdpi-res.com/applsci/applsci-12-08972/article_deploy/html/images/applsci-12-08972-g012.png?1662547733" title=" <strong>Figure 12</strong><br/> &lt;p&gt;Reduction in Depth and Width.&lt;/p&gt; "> </a> </div> <a class="button button--color-inversed" href="/2076-3417/12/18/8972/notes">Versions&nbsp;Notes</a> </div> </div> <div class="responsive-moving-container small hidden" data-id="article-counters" style="margin-top: 15px;"></div> <div class="html-dynamic"> <section> <div class="art-abstract art-abstract-new in-tab hypothesis_container"> <p> <div><section class="html-abstract" id="html-abstract"> <h2 id="html-abstract-title">Abstract</h2><b>:</b> <div class="html-p">Deep Residual Networks have recently been shown to significantly improve the performance of neural networks trained on ImageNet, with results beating all previous methods on this dataset by large margins in the image classification task. However, the meaning of these impressive numbers and their implications for future research are not fully understood yet. In this survey, we will try to explain what Deep Residual Networks are, how they achieve their excellent results, and why their successful implementation in practice represents a significant advance over existing techniques. We also discuss some open questions related to residual learning as well as possible applications of Deep Residual Networks beyond ImageNet. Finally, we discuss some issues that still need to be resolved before deep residual learning can be applied on more complex problems.</div> </section> <div id="html-keywords"> <div class="html-gwd-group"><div id="html-keywords-title">Keywords: </div><a href="/search?q=deep+residual+learning+for+image+recognition">deep residual learning for image recognition</a>; <a href="/search?q=deep+residual+learning">deep residual learning</a>; <a href="/search?q=image+processing">image processing</a>; <a href="/search?q=image+recognition">image recognition</a></div> <div> </div> </div> </div> </p> </div> </section> </div> <div class="hypothesis_container"> <ul class="menu html-nav" data-prev-node="#html-quick-links-title"> </ul> <div class="html-body"> <section id='sec1-applsci-12-08972' type='intro'><h2 data-nested='1'> 1. Introduction</h2><div class='html-p'>Deep residual learning is a neural network architecture that was proposed in 2015 by He et al. [<a href="#B1-applsci-12-08972" class="html-bibr">1</a>] The paper <span class='html-italic'>Deep Residual Learning for Image Recognition</span> has been cited many times and is one of the most influential papers in the field of computer vision. In this survey paper, we will survey the recent advances in deep residual learning. After discussing what deep residual networks are, we will review their properties, including stability and trainability. Next, we will discuss some recent applications of deep residual networks. Finally, we will provide our thoughts on future research directions in deep residual learning and end with open questions. This comprehensive survey looks at the current state of the art in deep learning for image recognition and proposes a new method, called deep residual learning, which offers significant improvements over existing methods. The author in [<a href="#B1-applsci-12-08972" class="html-bibr">1</a>] provides a detailed overview of the proposed approach and its advantages. The proposed deep residual learning is computationally efficient as it has a low parameter number and uses simple backpropagation to reduce computation cost.</div><div class='html-p'>They also [<a href="#B2-applsci-12-08972" class="html-bibr">2</a>] suggest that there are applications other than just image recognition, such as translation and speech recognition, which could benefit from deep residual learning. Similarly, author [<a href="#B3-applsci-12-08972" class="html-bibr">3</a>] presents comparisons between different models with different architectures and finds that deep residual models always outperform other models. In addition, the author points out various challenges in applying the proposed deep residual learning. For instance, how do we deal with saturation and dropout? How do we deal with tasks like translation where there is less data available? The author concludes their paper by suggesting future research directions on how these challenges can be overcome. They point out that more details should be studied on combining deep residual learning with neural architecture search, spatial domain convolutions, constrained adversarial loss function and Gaussian-based generative models. Couso in 2018 in [<a href="#B2-applsci-12-08972" class="html-bibr">2</a>] proposes an alternative algorithm for maximizing likelihood rather than mean square error, which is currently used. They also suggest studying the proposed model for other computer vision tasks like face detection, segmentation, and object classification. They conclude their research study by pointing out limitations in proposed deep residual learning. They noted that compared to traditional methods, the proposed deep residual learning lacks computational efficiency when dealing with larger data sets and thus cannot scale up quickly enough. However, the authors point out that this problem can be solved by clustering input data into smaller subsets so only a subset of the total data needs to be processed during each iteration. Similarly, Feng et al. in [<a href="#B4-applsci-12-08972" class="html-bibr">4</a>] mention that deep residual learning, like any other unsupervised learning, requires a large amount of unlabeled data. They have done some experiments with a small amount of labeled data but they have not been able to get satisfactory results. The authors end their work by mentioning possible solutions: introducing few labels (which may require human intervention) or adding a fully supervised component. They also propose to create a dataset that contains images with predefined metadata and use the metadata as supervision.</div><div class='html-p'>The author in [<a href="#B5-applsci-12-08972" class="html-bibr">5</a>] ends their study by concluding that deep residual learning for image recognition is a promising direction in image recognition. They note that deep residual learning for image recognition is computationally efficient, more accurate and suitable for sparse data representations. They also emphasize the fact that deep residual learning for image recognition does not depend on complicated handcrafted features or the topographic organization of input data. However, the detailed workflow of our proposed survey is shown in <a href="#applsci-12-08972-f001" class="html-fig">Figure 1</a>.</div><div class='html-p'>They conclude their paper by proposing directions for future research which include looking into combining deep residual learning with neural architecture search, spatial domain convolutions, constrained adversarial loss function and Gaussian-based generative models. In their study they also mention the need to find a solution to minimize the negative effect of data noise in deep residual learning. Mindy yang et al. in [<a href="#B6-applsci-12-08972" class="html-bibr">6</a>] proposes to create a dataset that contains images with predefined metadata and use the metadata as supervision. They show that deep residual learning for image recognition is computationally expensive because it is sensitive to high dimensional data and propose to combine deep residual learning with artificial intelligence techniques like reinforcement learning, and they discuss whether or not deep residual learning will help advance machine learning in general.</div><div class='html-p'>The study also notes that deep residual learning is successful because it can take advantage of large amounts of training data without requiring much hand engineering or task-specific feature engineering. The author of [<a href="#B7-applsci-12-08972" class="html-bibr">7</a>] proposes to develop a new framework which may be required when using deep residual learning. These tools are difficult to make assumptions about what they should do in terms of accuracy gains or performance losses due to complexity. The author states that deep residuals provide more accurate representations of object boundaries than traditional models and also allow for localization without global context. The basic structure is shown in <a href="#applsci-12-08972-f002" class="html-fig">Figure 2</a>.</div><div class='html-p'>Zhu in [<a href="#B8-applsci-12-08972" class="html-bibr">8</a>] mentions how standard convolutional neural networks have limited the amount of parameters when compared to standard feedforward neural networks, which could explain why these types of models outperform them on certain tasks such as detection, localization, segmentation, tracking, and classification. The author mentions how difficult it would be to design different objective functions, with one per desired application. They propose an improved form of standard deep residual learning by combining batches into a single input batch and taking an average gradient across all images. The idea behind this is the hope that all datasets are similar enough so that averaging their gradients improves the gradient quality.</div><div class='html-p'>The result of implementing these changes was improved computational efficiency, while maintaining good accuracy levels which might make this model applicable to real world applications. The proposed improvements were shown to improve deep residual learning’s ability to work well with large amounts of high dimensional data and thus make it useful for many applications. It would be interesting to see if these improvements in computing power can be applied to other fields besides image recognition. The author then goes on to talk about future steps and argues that deep residual learning’s potential beyond image recognition needs to be explored.</div><div class='html-p'>The author talks about how deep residual learning does better than its counterparts at some tasks such as detecting objects, localizing objects and segmenting images. They conclude that it would be worth exploring ways to decrease computation time through cost reductions at early stages in the training process or by finding some way of leveraging shared computation across processes. The author finally notes that deep residual learning has been quite successful because it is able to take advantage of large amounts of data without requiring much engineering time. However, the most searches across the world is shown in <a href="#applsci-12-08972-f003" class="html-fig">Figure 3</a>. China is the country in this category.</div><div class='html-p'>This paper provides a comprehensive survey of Deep Residual Networks for Image Recognition research and proposes some future research directions.</div><div class='html-p'>The rest of the paper is organized as follows: In <a href="#sec2-applsci-12-08972" class="html-sec">Section 2</a>, we will present more about deep residual networks. We will then discuss what image recognition is in <a href="#sec3-applsci-12-08972" class="html-sec">Section 3</a>. The recent successes in applying deep residual learning for image recognition are discussed in <a href="#sec4-applsci-12-08972" class="html-sec">Section 4</a>. <a href="#sec5-applsci-12-08972" class="html-sec">Section 5</a> deals with image recognition. Next, <a href="#sec6-applsci-12-08972" class="html-sec">Section 6</a> discusses the advantages of DRN. Similarly, <a href="#sec7-applsci-12-08972" class="html-sec">Section 7</a> discusses current research trends, and <a href="#sec8-applsci-12-08972" class="html-sec">Section 8</a> covers traditional deep learning. <a href="#sec9-applsci-12-08972" class="html-sec">Section 9</a> and <a href="#sec10-applsci-12-08972" class="html-sec">Section 10</a> include the basic building blocks of RN and reduction in depth and width. Similarly, <a href="#sec11-applsci-12-08972" class="html-sec">Section 11</a> includes evaluation metrics for DRN. Finally, we conclude with our remarks in <a href="#sec12-applsci-12-08972" class="html-sec">Section 12</a>, as shown in <a href="#applsci-12-08972-f004" class="html-fig">Figure 4</a>.</div></section><section id='sec2-applsci-12-08972' type=''><h2 data-nested='1'> 2. What Is a Deep Residual Network?</h2><div class='html-p'>In 2015, a deep residual network (ResNet) was proposed by the authors in [<a href="#B1-applsci-12-08972" class="html-bibr">1</a>] for image recognition. It is a type of convolutional neural network (CNN) where the input from the previous layer is added to the output of the current layer. This skip connection makes it easier for the network to learn and results in better performance. The ResNet architecture has been successful in a number of tasks, including image classification, object detection, and semantic segmentation. Additionally, since ResNets are made up of layers, these networks can be arbitrarily deep for an arbitrary level of spatial representation. There are various reasons for the success of the model: the large receptive fields that capture more information about each pixel in an image; the separation between the localization and classification stages; the computational efficiency at higher levels; the efficient encoding schemes with low-complexity arithmetic operations; and there is increased accuracy as features are extracted deeper into the network.</div><div class='html-p'>Despite these advantages, current ResNets are computationally very expensive. While modern GPUs can perform over one hundred million operations per second (Giga ops), a commonly used architecture of a fully connected layer with ten million weights takes more than two hours to train. This is why the authors in [<a href="#B9-applsci-12-08972" class="html-bibr">9</a>] propose to replace some fully connected layers by stochastic pooling layers and to reduce it from a 5 × 5 filter size to a 3 × 3 filter size.</div><div class='html-p'>In summary, deep residual learning for image recognition has been shown to be an effective method for image classification tasks. However, similar architectures have not yet been explored for other computer vision tasks such as semantic segmentation or object detection. There are several open problems that need further exploration when doing so, including computational efficiency at higher levels and training stability; adding skip connections; network depth versus complexity; biasing nonlinearities during training; input preprocessing issues such as batch normalization, data augmentation algorithms for improving accuracy for underrepresented classes such as at nighttime images versus daytime images using same classifier neural network through exploiting spatio-temporal coherence; practicality of architectures; stability while small local minima not having significant impact upon generalization performance since big changes happen early vs. late in the sequence;this would allow for the concurrent tuning of different regions of parameters instead of completely independent ones.</div><div class='html-p'>The major issue with traditional CNNs is that they have to learn the entire feature map, which means that they need a huge number of parameters. This, in turn, means that they are very expensive to train and also slow to run.</div><div class='html-p'>ResNets are a family of neural networks that were proposed as an improvement over traditional CNNs. In particular, ResNets use skip connections (which I’ll describe below), which allow them to be much smaller than traditional CNNs while still having similar performance. Skip connections can be used in any neural network architecture, but they are particularly useful for convolutional neural networks because they let you reuse parts of your feature map between layers in different positions.</div><div class='html-p'>Here we have a simple three-layer convolutional network with two convolutional layers followed by a pooling layer. The input is fed into layer 1, which performs its computation and outputs a feature map (which is just an array of numbers). Layer 2 then performs its own computation on top of layer 1’s output and produces another feature map. This process repeats until it reaches the final layer.</div><div class='html-p'>Aryo Michael and Melki Garonga in 2021 [<a href="#B10-applsci-12-08972" class="html-bibr">10</a>] also proposed a new residual network with deep residual learning for image recognition, which integrates element-wise pooling with multi-scale features. Their approach combines depthwise separable convolution and deconvolution operations along with 2 × 2 and 3 × 3 convolutions to form different types of layers. For instance, layer 2 is made up of three layers: one that performs 4 × 4 convolution by padding its input images with zero; another that computes 2 × 2 deconvolution (i.e., transpose of spatial average); and a third layer that performs 3 × 3 convolution. In order to reduce computational costs, they replace some fully connected layers with more computationally efficient ones like stochastic pooling layers. The authors in proposed a new residual network with deep residual learning for image recognition. This is a hybrid model that incorporates both LSTM and CNNs. They show that their proposed architecture of outperforms.</div><div class='html-p'>In order to reduce computation costs, the authors in [<a href="#B11-applsci-12-08972" class="html-bibr">11</a>] propose replacing some fully connected layers with more computationally efficient ones like stochastic pooling layers. They propose a new residual network with deep residual learning for image recognition, which is a hybrid model that incorporates both LSTM and CNNs. This proposed architecture outperforms the ResNet-50 benchmark in terms of top-1 and top-5 error rates for the CIFAR10 dataset with comparable computational cost to the original ResNet-50. Furthermore, there are still a number of open problems that require more research, such as parallelizing for faster execution, using learned representations for transfer learning and sparse networks for reducing memory consumption, and using unsupervised feature extraction techniques to obtain meaningful high level descriptors and visual representations. Another idea is to use unlabeled and semi-labeled images for learning additional task-specific image descriptors and filters. Finally, deep residual learning for image recognition should be investigated for sequences of more than three frames in video and scene understanding.</div><div class='html-p'>Thus, the proposed architecture of deep residual learning for image recognition outperforms the ResNet-50 benchmark in terms of top-one and top-five error rates for the CIFAR10 dataset with comparable computational costs to the original ResNet-50. Future work should include investigations of how deep residual learning for image recognition might function with higher level applications such as scene understanding or video processing. This can potentially provide a strong foundation for leveraging deep residual learning for image recognition for future tasks such as human pose estimation. For the purposes of large-scale learning, deep residual learning for image recognition may be able to speed up supervised learning of heterogeneous datasets where the volumes of data and computation power available are limited. On a smaller scale, deep residual learning for image recognition may improve real-time responsiveness in autonomous vehicles and better respond to dynamic environments that include objects that appear, disappear, or change position.</div><div class='html-p'>There is potential with these models that is untapped due to many applications being based off of humans annotating their own photographs with different objects and labels. The development of detectors in machine systems will need human assistance to operate effectively. These detectors need to be trained effectively in order for them to take the place of human labor and assist industry [<a href="#B12-applsci-12-08972" class="html-bibr">12</a>] at greater levels. Developments have been proposed in methods for vision including deep residual learning for image recognition, where a model uses both neural networks and long short term memory recurrent neural networks (LSTMs). These proposed architectures are necessary because traditionally neural networks have had a very limited capability when it comes to sequential information. The capabilities for current machines are mostly found through continuous streams or fast frames, but most action happens over extended periods of time, which does not lend itself well for traditional algorithms used in vision models. These methods need improvement so that systems do not fall behind those humans who will continue to develop them beyond this point. Reserves need to be planned for the future, and this requires investments in training and developing computers that are equipped with sophisticated sensors. If a sufficient amount of investment is made in deep residual learning for image recognition, it could solve the problem of autonomous machines that cannot identify features in an environment. Human operators would then only input markers for certain features that would make machine systems more intelligent. Machine-learning models could then quickly identify objects and text from the map given to them by humans. Humans could maintain control until the system becomes proficient enough to support its users without supervision, but with a continuous stream of feedback from the AI system. This feedback loop for machine systems would help refine the AI’s objectives and approach to specific problems, which in turn will allow for refined solutions for any objective. Machines can always become smarter than humans in some aspect of intelligence, but this shouldn’t impede our ability to teach them what we know so they may learn faster than we ever could alone.</div></section><section id='sec3-applsci-12-08972' type=''><h2 data-nested='1'> 3. What Is Image Recognition?</h2><div class='html-p'>Image recognition is a field of computer vision that deals with the identification and classification of objects in digital images. It is a subset of machine learning, which is a branch of artificial intelligence that deals with the design and development of algorithms that can learn from and make predictions on data. When you take a picture, it could be an image containing any number of items: dogs, cars, people, etc. There are countless other things in the world besides these. The goal of image recognition is to assign categories to each one; so that when you upload an image to your social media feed or search Google Images you will get back information about what is in it and find out where else you might find them. For instance, if you were looking at this photo of someone holding their dog, our application would recognize that there is a person in the photo and show their name as well as the name of their pet.</div><div class='html-p'>Image recognition is a problem within computer vision which refers to automatically detecting and understanding a wide range of objects in images. Computer vision can be seen as an artificial version of human sight or photography. There are several steps involved in image recognition. The first step is usually to convert an image into numbers that computers understand. An image contains hundreds of thousands (if not millions) of colors that are made up of red, green and blue (RGB) [<a href="#B13-applsci-12-08972" class="html-bibr">13</a>]. These colors are turned into data points which form vectors what we call images. A vector has three different values: one for each color channel. To reduce the size of the resulting vector and represent just a few shades of a particular color instead, methods use either linear classifiers or non-linear classifiers to create a predictive model that is able to classify new inputs.</div><div class='html-p'>Currently, most deep neural networks include residual connections that help propagate local gradients through many layers of nonlinear hidden units without exploding gradients. The future of deep residual learning is promising and worthy of further exploration. Deep residual learning is a form of reinforcement learning that builds on the success of residual networks. Deep residual networks have shown great potential in a variety of settings and have been used to solve image recognition tasks such as classification and semantic segmentation with high levels of accuracy.</div><div class='html-p'>The author in 2021 [<a href="#B14-applsci-12-08972" class="html-bibr">14</a>] concludes that deep residual learning offers significant improvements over traditional techniques such as VGG and ResNet on image recognition tasks, which they have explored with CNNs and RNNs. They note that future work should include investigating the effects of changing some parameters in the networks, exploring how deep residual networks adapt to sequential data, accounting for full system quality metrics, and furthering exploration into this topic.</div><div class='html-p'>In their paper, they detail multiple architectures including a novel global alignment- based network architecture combined with region proposal generation using two additional channels. They propose a global alignment approach between those two generators along with more structure to determine whether a given pixel was in motion or not. They determined that these global constraints made it easier for neural network methods to provide accurate proposals on image recognition tasks, allowing for higher accuracy and computational savings when compared against individual models trained without these constraints. The proposed methodology provides a high level of accuracy while being more computationally efficient than previous approaches. In the proposed frameworks, we present and analyze the main performance metrics for classification and semantic segmentation. The authors go on to describe their proposed methodologies in-depth, noting that all experimental datasets were gathered from public sources. Image recognition is a problem within computer vision, which refers to automatically detecting and understanding a wide range of objects in images.</div></section><section id='sec4-applsci-12-08972' type=''><h2 data-nested='1'> 4. Deep Residual Learning for Image Recognition</h2><section id='sec4dot1-applsci-12-08972' type=''><h4 class='html-italic' data-nested='2'> 4.1. Deep Residual Learning Image Steganalysis</h4><div class='html-p'>Image steganography is a technique that allows for the hiding of data in the image, and this data can be only visible when the image is modified. Deep residual learning image steganalysis is a technique that enables an attacker to find out what information has been moved in the image and how it has been moved. Deep residual learning image steganalysis is a new method to detect steganography. This method is based on deep residual networks that learn the local patch feature of images. The proposed security system is composed of three stages: pre-processing, feature extraction and classification.</div><div class='html-p'>Why is image steganalysis important? Image steganalysis is important because it can help protect against the unauthorized use of copyrighted material, help uncover inappropriate content hidden within images, and uncover potential security threats. What are some challenges in image steganalysis? The three most challenging aspects of image steganalysis are false positives, obtaining robust features from an image to differentiate between noise and data, and developing models that learn well from few training examples. There are several approaches to address these challenges including active learning approaches for extracting good features from an image; however, these approaches often involve expensive human labor or resources. The false positive rate has been shown to be high enough that many large-scale search engines don’t even bother scanning for them because they’re not worth the cost of storage space or computing power required to weed out all those extraneous pictures they come across while looking for matches. That’s why the creators of TinEye, one such search engine, have suggested that machine learning researchers take this problem more seriously. The goal was to develop techniques to detect any personal information added covertly to an image without altering its appearance. For example, consider an image of someone standing on the beach holding up their child. By examining the pixels in an unaltered version of the picture, you might notice telltale signs that some numbers had been written over their head using pixelation techniques. Detecting these kinds of changes isn’t easy to do by hand, so if you want a computer to do it you need a system capable of detecting very small changes made to individual pixels. Once ADRIAN detects tiny pieces of data embedded inside a photo, it will then highlight those areas with color differences around them and allow investigators to zoom in on precisely where there are visible changes. <a href="#applsci-12-08972-f005" class="html-fig">Figure 5</a> shows the next sections.</div></section><section id='sec4dot2-applsci-12-08972' type=''><h4 class='html-italic' data-nested='2'> 4.2. Deep Residual Learning Image Compression</h4><div class='html-p'>Christian Rathgeb eta al in [<a href="#B15-applsci-12-08972" class="html-bibr">15</a>] studied the effect of image compression on the accuracy of deep learning models. They found that image compression can reduce the size of training datasets by up to 90% without any significant loss in accuracy. This is because deep learning models are able to learn the relevant features from data more effectively than shallower models. Image compression can also help speed up training times and reduce the amount of memory required to store training data. They looked at different ways of compressing images for use as inputs to convolutional neural networks (CNNs). They noted that arithmetic coding may provide an improvement over Huffman coding due to its ability to avoid rounding errors. However, it may be challenging for arithmetic coding-based methods to achieve similar results as Huffman coding-based methods when encoding high resolution images due to the large number of non-zero coefficients present within such images.</div><div class='html-p'>The authors in investigated whether human vision or machine vision algorithms could outperform one another when identifying objects in natural images. They performed their experiment using their own dataset of animal photographs they had manually annotated. They used LBP Features and COSINE scale space representations in order to compute similarities between pairs of images. Their experiments revealed that humans perform better than machine vision algorithms in tasks involving object recognition, localization, segmentation, etc., while machine vision performs better than humans in tasks involving detection and pose estimation. Machine vision algorithms are also less computationally expensive.</div><div class='html-p'>The authors in [<a href="#B16-applsci-12-08972" class="html-bibr">16</a>] studied how semantic segmentation can be employed to aid applications in various industries including medicine, self-driving cars, and surveillance. They concluded that computer vision models trained via unsupervised learning are capable of producing more accurate results than models trained via supervised learning. Moreover, it was discovered that training methods based on either small or large minibatches performed better than methods based on medium sized minibatches. Finally, it was found that unsupervised training techniques performed better than supervised ones when utilizing noisy labels for data labeling purposes.</div><div class='html-p'>This study confirms that machine vision models based on unsupervised learning can perform just as well as those based on supervised learning, but have the added benefit of being quicker to train.</div><div class='html-p'>However, how semantic segmentation can be employed to aid applications in various industries including medicine, self-driving cars, and surveillance. They concluded that computer vision models trained via unsupervised learning are capable of producing more accurate results than models trained via supervised learning. Moreover, it was discovered that training methods based on either small or large minibatches performed better than methods based on medium sized minibatches.</div></section><section id='sec4dot3-applsci-12-08972' type=''><h4 class='html-italic' data-nested='2'> 4.3. Deep Residual Learning Image Restoration</h4><div class='html-p'>Image restoration [<a href="#B17-applsci-12-08972" class="html-bibr">17</a>] is the problem of removing a uniform blur from an image. It is well-known that information-theoretic approaches to this problem, based on the concept of a log-likelihood ratio operator, can be modelled well by deep neural networks (DNNs). Recent work has shown that DNNs trained on maximising generalisation performance can also be used to solve this task with remarkable effectiveness. This paper describes an extension of such an architecture; we call it Deep Residual Learning (DRL). DRL uses Riemannian geometry to minimise the cost function and achieve an optimal sparse approximation of the true posterior density.</div><div class='html-p'>The author in [<a href="#B18-applsci-12-08972" class="html-bibr">18</a>] studied the use of deep residual learning for image recognition. They found that it can effectively remove noise and improve the performance of image restoration models. In addition, they showed that deep residual learning can be used to improve the accuracy of image classification models. The authors also demonstrated that deep residual learning can be used to improve the performance of object detection models. Finally, they showed that deep residual learning can be used to improve the accuracy of scene recognition models.</div><div class='html-p'>Similarly, the author in [<a href="#B19-applsci-12-08972" class="html-bibr">19</a>] showed that deep residual learning can be used to improve the accuracy of image retrieval models. Similarly, the author in [<a href="#B20-applsci-12-08972" class="html-bibr">20</a>] showed that deep residual learning can be used to improve the accuracy of video captioning models. The author in pointed out that deep residual learning presents an excellent alternative to traditional deep neural networks. Using deep-residual-learning may lead to improvements on accuracy or speed of inference at run time without compromising other objectives such as throughput or energy efficiency [<a href="#B21-applsci-12-08972" class="html-bibr">21</a>,<a href="#B22-applsci-12-08972" class="html-bibr">22</a>]. Similarly, they suggested that this method could be combined with variational methods to handle data sparsity and label noise. Similarly, deep residual learning presents an excellent alternative to traditional deep neural networks. Similarly, they also noted that using deep-residual-learning may lead to improvements in accuracy or speed of inference at run time without compromising other objectives such as throughput or energy efficiency.</div></section><section id='sec4dot4-applsci-12-08972' type=''><h4 class='html-italic' data-nested='2'> 4.4. Deep Residual Learning Sensing Ct Reconstruction</h4><div class='html-p'>Deep Residual Discriminator (Deep Residual Learning Sensing Ct Reconstruction) [<a href="#B23-applsci-12-08972" class="html-bibr">23</a>,<a href="#B24-applsci-12-08972" class="html-bibr">24</a>] is a deep learning model for helping radiologists classify and identify detections, which are placed in the CT image [<a href="#B25-applsci-12-08972" class="html-bibr">25</a>]. The goal of Deep Residual Discriminator is to improve radiologists’ workflow by reducing the time it takes to scan and produce reports, while also improving detection efficacy.</div><div class='html-p'>The author in [<a href="#B26-applsci-12-08972" class="html-bibr">26</a>] studied the problem of image recognition using deep residual learning. They proposed a method that can be used to achieve high accuracy in image recognition tasks. The proposed method is based on the idea of using deep residual learning to improve the accuracy of image recognition models. The authors showed that their method can achieve better accuracy than the state-of-the-art methods on the ImageNet dataset. They also analyzed and evaluated various modifications to the deep residual network, and showed that it outperforms other approaches like dropout and batch normalization. The authors noted that there are some limitations to the study. Another limitation is that they only considered convolutional neural networks, which means that their conclusions may not extend to generative adversarial networks or recurrent neural networks. Nevertheless, their results indicate the promise of deep residual learning as an effective way to improve classification accuracy. For example, when comparing the traditional ReLU layer to ReLU + depthwise separable convolution layer for depth 16, the accuracy improved [<a href="#B27-applsci-12-08972" class="html-bibr">27</a>]. When comparing a combination of all three layers (ReLU + depthwise separable convolution + kernel activation) versus ReLU alone at depths 16 and 32, then the combined layer performed significantly better. The results show that including deep residual layers within a neural network has significant benefits on performance. However, the use of many layers slows down computation speed. Future research should explore ways to train large deep networks faster while still achieving good accuracy.</div><div class='html-p'>In conclusion, deep residual learning was shown to provide promising improvement over standard architectures for image recognition. However, additional research needs to be done before we can make firm conclusions about how much improvement it offers and why this approach might work better than others currently available. A major limitation of this research is that only one type of neural network architecture was investigated, meaning that generalizations to other architectures need to be made cautiously. Also, little data were given regarding potential drawbacks to using deep residual learning for training deep networks such as increases in computational complexity or difficulty scaling up for very large networks. Finally, it would have been helpful if the researchers had examined whether specific optimization techniques were applied better with deep residual learning or without it; unfortunately, no such comparison was made here.</div></section><section id='sec4dot5-applsci-12-08972' type=''><h4 class='html-italic' data-nested='2'> 4.5. Deep Residual Learning Hyperparameter Optimization</h4><div class='html-p'>Deep Residual Learning Hyperparameter Optimization [<a href="#B28-applsci-12-08972" class="html-bibr">28</a>] is a method for optimizing the hyperparameters of a Deep Residual Learning model. When optimizing the parameters of a deep residual network for image recognition, there are many factors to consider. The first is the depth of the network. Deeper networks have more layers and can therefore learn more complex features. Shallower networks, on the other hand, are faster to train and may be more efficient in terms of memory usage. Another important parameter is the width of the network, which refers to the number of neurons in each layer. Wider networks can learn more complex features, but are also more expensive to train. Finally, the learning rate is an important parameter that controls how quickly the network learns from data. A higher learning rate means that the network learns faster, but may also be more likely to overfit on the training data. If we use a standard gradient descent algorithm to optimize the parameters of a neural network such as this one, it is not guaranteed that we will find good local minima. It would be better if we had some way of knowing what global minima might look like before starting optimization so that we could search intelligently instead of randomly guessing where they might be.</div><div class='html-p'>Recent work has attempted to do this by using linear classifiers such as SVM’s or k-nearest neighbors to label different regions in feature space based on whether they corresponded with positive or negative examples respectively. Then, these labeled regions were used as starting points for gradient descent optimization, providing us with potential solutions near these regions instead of random ones. While this technique provides us with valuable insight into where to start optimization, it still suffers from problems because these labels aren’t perfect. For example, the labels are only correct 50% of the time, meaning that our search process is less accurate than we would hope. More research is needed to improve upon this technique and make sure that it gives reliable results every time. However, despite its imperfections, this approach does provide new insights into the problem of hyperparameter optimization for deep networks and may help lead to improved methods in the future. One of the main problems for optimization algorithms is that the gradients become too small when you go down into lower layers. Therefore, all traditional optimizers try to converge at a single minimum when in reality there are multiple local minima. One solution proposed was using non-convex strategies like Bayesian Optimization (BO) and genetic algorithms (GA). In BO you estimate gradients at many different points simultaneously, while GA does not rely on gradient information at all. Since both techniques show promise, further research should explore their performance in practice.</div><div class='html-p'>There are many factors to consider when optimizing the parameters of a deep residual network for image recognition. This paper discussed three key considerations: the depth of the network, width of the network, and learning rate. There is currently much ongoing research exploring novel ways to automate this process; however, no clear winner has emerged yet. There are many issues that need to be addressed, including gradient size and convexity. Other approaches include Bayesian optimization and genetic algorithms.</div></section><section id='sec4dot6-applsci-12-08972' type=''><h4 class='html-italic' data-nested='2'> 4.6. Very Deep Convolutional Network Image Recognition</h4><div class='html-p'>In the past few years, convolutional neural networks (CNNs) have revolutionized image recognition by achieving unparalleled accuracy on benchmark datasets. A key ingredient in this success has been the use of very deep CNNs [<a href="#B29-applsci-12-08972" class="html-bibr">29</a>,<a href="#B30-applsci-12-08972" class="html-bibr">30</a>], which are able to learn rich representations of images.</div><div class='html-p'>Recently, the author in [<a href="#B31-applsci-12-08972" class="html-bibr">31</a>] proposed a deep recursive residual network (DRRN) to address the problem of image super-resolution. The proposed DRRN consists of three stages: (1) the downsizing stage, (2) the upsampling stage, and (3) the reconstruction stage. In the downsizing stage, DRRN uses a convolutional neural network (CNN) to downscale an input image into a fixed size using a single channel. Then, in the upsampling stage, DRRN applies an upsampling layer to generate several intermediate images from the downsampled image. Finally, in the reconstruction stage, DRRN uses two CNNs to reconstruct an output image from these intermediate images. However, it is hard to find a suitable dataset for super-resolution. In most cases, the input images are different from each other. Therefore, we cannot use the same CNN architecture across different resolutions. We need to design an architecture that can accommodate different resolutions without overfitting to low-resolution images (which is hard). You might want to try using a ResNet-like architecture with multiple residual blocks (ResNet has multiple branches), which may help you achieve good performance. The problem with transfer learning is that it works well in the sense that it is often able to learn a useful representation from a large amount of data, but it does not necessarily learn the best representation for the task at hand. For example, if you are trying to use transfer learning for face recognition and you have trained on a large number of faces, then you might find that your transfer learned model does not perform as well as one that was trained directly on faces. Similarly, the author in [<a href="#B32-applsci-12-08972" class="html-bibr">32</a>] proposes a new method for matching software-generated sketches with face photographs using a very deep convolutional neural network (CNN). This method uses two different types of networks: one network is trained on face photographs and another network is trained on sketches. The two networks are combined into one network by using transfer learning. Their experiments show that their method outperforms other state-of-the-art methods in terms of accuracy and generalization capability.</div><div class='html-p'>Shun Moriya and Chihiro Shibata [<a href="#B33-applsci-12-08972" class="html-bibr">33</a>] propose a novel transfer learning method for very deep CNNs for text classification. Our main contribution is a new evaluation method that compares the proposed transfer learning method with two existing methods, namely fine-tuning and feature handover. They also propose a new model ensemble approach to improve the performance of our models by using the best performing model from each ensemble member as an additional feature. Our experiments on five public datasets show that our approach outperforms previous methods and gives competitive results when compared with other state-of-the-art methods.</div><div class='html-p'>Similarly, Afzal et al. in [<a href="#B34-applsci-12-08972" class="html-bibr">34</a>] present the first investigation of how very deep Convolutional Neural Networks (CNN) can be used to improve document image classification. They also study how advanced training strategies such as multi-network training and model compression techniques can be combined with very deep CNNs to further improve performance. Their results show that very deep CNNs are able to outperform shallow networks, even when using a relatively small amount of training data. They also find that multi-network training significantly improves performance over single-network training, especially for very deep CNNs. Finally, they demonstrate that model compression techniques such as quantization and binarization can be combined with very deep CNNs to achieve an additional 5% reduction in error rate with only a small loss in accuracy. However, they should highlight that their model achieves state-of-the-art performance in document image classification. However, they don’t provide any quantitative results to support this claim. They could easily add some performance metrics (e.g., F1 score) on top of their results, and this would make the paper more convincing. The next five sections are shown in <a href="#applsci-12-08972-f006" class="html-fig">Figure 6</a>.</div></section><section id='sec4dot7-applsci-12-08972' type=''><h4 class='html-italic' data-nested='2'> 4.7. Deep Residual Networks Accelerator on FPGA</h4><div class='html-p'>A recent survey by ImageNet found that deep residual networks (ResNets) have become the state-of-the-art in image recognition [<a href="#B35-applsci-12-08972" class="html-bibr">35</a>]. However, training and deploying these models can be prohibitively expensive. FPGAs [<a href="#B36-applsci-12-08972" class="html-bibr">36</a>] offer a high degree of parallelism and energy efficiency, making them an attractive platform for accelerating deep neural networks. In this paper, we will survey the literature on FPGA-based acceleration of deep residual networks. We will discuss the challenges involved in training and deploying these models on FPGAs, and we will survey the current state-of-the-art in FPGA-based deep learning accelerators. Worthy of note are the paper from Hui Liao et al., which presents a systematic comparison between CPU-based and GPU-based training of ResNets.</div></section><section id='sec4dot8-applsci-12-08972' type=''><h4 class='html-italic' data-nested='2'> 4.8. Resnet Models Image Recognition</h4><div class='html-p'>In 2015, a new deep learning model known as a deep residual network (ResNet) was introduced by researchers at Microsoft [<a href="#B1-applsci-12-08972" class="html-bibr">1</a>]. This model has quickly become the state-of-the-art for image recognition tasks. These networks are now part of Convolutional Neural Networks (CNNs) [<a href="#B37-applsci-12-08972" class="html-bibr">37</a>]. They have been used to achieve world records in object classification and detection in many large-scale competitions such as ImageNet, ILSVRC, COCO and PASCAL VOC. Furthermore, they also achieved competitive results on various 3D shape estimation problems. These models are computationally expensive because they require billions of parameters and need hundreds of millions of training images. Consequently, this has led to some speculation that they will never be used outside academia or research laboratories due to their high computation cost; however, recent developments like adopting modern graphics processing units (GPUs) may bring down these costs significantly in the near future. The first significant attempt to train such models using GPUs came from NVIDIA’s [<a href="#B38-applsci-12-08972" class="html-bibr">38</a>] deep learning framework CUDA [<a href="#B39-applsci-12-08972" class="html-bibr">39</a>] back in 2014. However, Nvidia soon found out that there were limitations of scaling the GPU implementations to larger Resnet architectures [<a href="#B34-applsci-12-08972" class="html-bibr">34</a>,<a href="#B40-applsci-12-08972" class="html-bibr">40</a>], and it was difficult to provide a stable environment for training. The researchers then turned towards software written specifically for use on GPUs which could take advantage of newer hardware capabilities. It is anticipated that these new frameworks will offer significant improvements over their predecessors because they provide an interface between multi-core CPUs and GPUs while also offering data preprocessing functions which are necessary when working with large datasets. One such framework is called Intel Integrated Performance Primitives Library (Intel IPP), which offers a variety of different functions including matrix multiplication, convolutions, etc. Thus far it has shown good performance on small and medium sized datasets but not so much on larger ones. Furthermore, another promising library that can also take advantage of Intel processors’ Single Instruction Multiple Data (SIMD) technology [<a href="#B41-applsci-12-08972" class="html-bibr">41</a>] is Eigen and its variants like HKLMSVD or GEMM. This library contains algorithms designed for Numerical Linear Algebra that would work well with ResNets. These implementations have shown excellent performance on both ImageNet and Cityscapes benchmarks, yet they still remain challenging to parallelize without sacrificing too much accuracy.</div><div class='html-p'>The time spent training image recognition models has fallen dramatically since 2010, owing to increasing computational power and the availability of massive labeled datasets. In 2016, Google trained a model within six days using 8 TPUs (Tensor Processing Units). Another breakthrough was achieved by Baidu’s Sunway TaihuLight [<a href="#B42-applsci-12-08972" class="html-bibr">42</a>] computer, which is based on China’s national design blueprint created in 2013 [<a href="#B43-applsci-12-08972" class="html-bibr">43</a>]. The three grand challenges in fundamental science are high performance computing, brain science, and quantum computing.</div></section><section id='sec4dot9-applsci-12-08972' type=''><h4 class='html-italic' data-nested='2'> 4.9. Shrinkage Network Image Recognition</h4><div class='html-p'>Shrinkage Network Image Recognition [<a href="#B44-applsci-12-08972" class="html-bibr">44</a>,<a href="#B45-applsci-12-08972" class="html-bibr">45</a>] is an important tool for image recognition. This method uses a deep residual learning framework to achieve state-of-the-art performance on various image recognition tasks. The authors mention that the networks are composed of three key components, namely depthwise convolution, max pooling and subsampling layers.</div><div class='html-p'>In particular, the depthwise convolution [<a href="#B46-applsci-12-08972" class="html-bibr">46</a>] performs feature extraction by mapping input images onto filter responses at different depths. Max pooling captures spatial information by aggregating features over a fixed window size across the input channels in both spatial dimensions and selecting top k features from each window. Finally, subsampling layers are responsible for reducing network size while maintaining its accuracy via training the network on reduced resolution images (or upsampled or downsampled images). They mention that using this architecture can reduce training time from hours to minutes per epoch without affecting accuracy significantly (or even improving it). Furthermore, they mention that there have been some recent improvements with regard to previous versions such as the use of LSTM units instead of RNNs [<a href="#B47-applsci-12-08972" class="html-bibr">47</a>,<a href="#B48-applsci-12-08972" class="html-bibr">48</a>], which can improve robustness against adversarial attacks. One other interesting point mentioned was how the researchers used RGBD data to better understand how humans perceive color and objects. They found that humans tend to see colors mostly in the mid-spectrum where red, green and blue meet, so they created special networks that mimic human perception of color when training them. Another discovery is that we find objects more easily if they are located near edges rather than in cluttered areas. To take advantage of this finding, their model predicts two probabilities, one corresponding to the probability of detecting an object in the cluttered region and another corresponding to the probability of detecting an object at the edge regions. The final model achieved good results on classification tasks like labeling dogs versus cats and types of food (bananas vs. apples).</div><div class='html-p'>On small datasets like CIFAR-10 [<a href="#B49-applsci-12-08972" class="html-bibr">49</a>], a new approach called Instance Normalization achieves competitive results, but when applied to larger datasets like ImageNet large gains were obtained.</div></section><section id='sec4dot10-applsci-12-08972' type=''><h4 class='html-italic' data-nested='2'> 4.10. Tomography Images Deep Learning and Transfer</h4><div class='html-p'>Hao et al. in [<a href="#B50-applsci-12-08972" class="html-bibr">50</a>] studied the application of deep learning to tomography images and found that the proposed method can effectively improve the recognition performance. Similarly, deep residual learning has also been applied to image recognition tasks with promising results. In this paper, we review the recent progress made in deep residual learning for image recognition. We first introduce the general framework of deep residual learning and then discuss its application to various image recognition tasks. Finally, we summarize the challenges and future directions of deep residual learning for image recognition. In summary, deep residual learning has shown a promising application to image recognition tasks with relatively strong results.</div><div class='html-p'>Deep residual networks are composed of two parts: (1) dense layers and (2) downsampling layers [<a href="#B51-applsci-12-08972" class="html-bibr">51</a>], which aim at restoring lost details by averaging information from neighboring locations or different depths in the same layer. Densely connected layers form an intermediate representation which is stored for later use. Downsampling layers help produce more concise representations which are easier to train. There have been multiple successes applying deep residual networks in image recognition, as shown below. One example is image segmentation. A pre-trained deep residual network was used to build a boundary map which was applied onto input data. The boundary map allows for the accurate labeling of different objects in the scene while preserving intricate features like edges and contours.</div><div class='html-p'>Similarly, another study used unsupervised pretraining [<a href="#B52-applsci-12-08972" class="html-bibr">52</a>,<a href="#B53-applsci-12-08972" class="html-bibr">53</a>] followed by supervised fine-tuning to classify six vehicle classes (categories). The classification accuracy obtained using the resulting deep network is close to 99%. It should be noted that these experiments have not yet gone beyond 10 training epochs, showing there may be room for improvement. Despite this, deep residual learning has already demonstrated its promise in providing increased accuracy over previous methods. For example, recent research shows that a combined approach of fully convolutional neural networks and 3D convolutional networks significantly outperforms both other approaches when classifying knee joint status from MRI scans. Similarly, it was recently shown that adding depth information leads to improved classification accuracies for facial expression detection, with smile detection achieving 96% accuracy and sulk detection achieving 88% accuracy when compared to 68% and 52% respectively without depth information. Moreover, even higher accuracies were obtained using regularized discriminative models such as ensemble perceptrons or boosting discriminative neural networks. Similarly, combining variational autoencoders with a generative adversarial network resulted in significant improvements for reconstructing speech from laryngeal articulations. Lastly, it should be noted that the field of computer vision has benefited greatly from the development of deep learning algorithms. Indeed, deep residual networks are one of many novel techniques that have been developed and tested over the past few years. Future studies will need to consider how best to leverage all aspects of their model architecture (such as their number of hidden layers), how much data they require during training, whether they require supervision or not during training and what kind they require during testing (for instance label noise). Furthermore, new tasks are needed to test the limitations of deep residual learning. As mentioned earlier, deep residual networks have been successfully applied to image recognition tasks. However, little work has been done in applying deep residual networks to natural language processing or object detection tasks. Similarly, further work is needed in understanding how transferable the learned weights are from task to task and if they require some sort of reconstruction process after being applied to a new task.</div><div class='html-p'>Deep residual learning is a powerful tool for applications requiring high levels of accuracy as well as robustness against changes in the distribution of inputs during training or testing. Deep residual networks represent a compelling alternative for dealing with visual recognition problems where datasets are limited or costly to collect.</div></section><section id='sec4dot11-applsci-12-08972' type=''><h4 class='html-italic' data-nested='2'> 4.11. Hybrid Deep Learning</h4><div class='html-p'>Hybrid Deep Learning [<a href="#B54-applsci-12-08972" class="html-bibr">54</a>,<a href="#B55-applsci-12-08972" class="html-bibr">55</a>] is a proven method used to build the models. It can overcome the limitations of both traditional deep learning and reinforcement learning. This technique has been applied in complex real-world problems with impressive results. Hybrid Deep Learning can be used for any kind of signal processing task and it is going to be more important for new emerging applications such as self-driving vehicles or robotics.</div><div class='html-p'>In recent years, hybrid deep learning architectures [<a href="#B56-applsci-12-08972" class="html-bibr">56</a>] have been proposed to take advantage of the strengths of both CNNs and RNNs. The most successful hybrid models are based on a deep residual learning (ResNet) framework proposed by He et al. [<a href="#B1-applsci-12-08972" class="html-bibr">1</a>]. Similarly, Deep Residual Learning was studied in Deep Residual Learning and [<a href="#B1-applsci-12-08972" class="html-bibr">1</a>] explored how ResNets can improve the performance of traditional supervised neural networks when combined with automatic feature engineering they investigated how ResNets can improve the performance of traditional supervised neural networks when combined with automatic feature engineering Author in found that they could learn low level features like contours, edges, corners and blobs just as well if not better than humans. The authors of [<a href="#B57-applsci-12-08972" class="html-bibr">57</a>] also explored practical challenges associated with applying deep resnets to computer vision problems such as working around computational limitations. In their study they also explored practical challenges associated with applying deep resnets to computer vision problems such as working around computational limitations. The authors concluded by discussing future directions, where they will investigate reinforcement learning on top of residual nets. They noted the promise of combining deep memory networks with other machine learning techniques such as dropout, autoencoders and generative adversarial networks to produce a powerful new generation of models. The details of the next chapter are shown in <a href="#applsci-12-08972-f007" class="html-fig">Figure 7</a>.</div></section><section id='sec4dot12-applsci-12-08972' type=''><h4 class='html-italic' data-nested='2'> 4.12. Deep Learning Architectures</h4><div class='html-p'>In recent years, deep learning has made tremendous progress in the field of image recognition. The main contribution of this paper is a comprehensive survey of deep residual learning (ResNets), which is a state-of-the-art deep learning architecture for image recognition [<a href="#B58-applsci-12-08972" class="html-bibr">58</a>]. The author in [<a href="#B2-applsci-12-08972" class="html-bibr">2</a>] studied the effect of different depths of ResNets on the classification accuracy [<a href="#B59-applsci-12-08972" class="html-bibr">59</a>,<a href="#B60-applsci-12-08972" class="html-bibr">60</a>]. They found that deeper networks are more accurate than shallower networks. However, they also found that there is a diminishing return in accuracy as the network depth increases. Besides deepening ResNets, researchers have also investigated adding residual blocks to shallow models. The idea behind adding these blocks is to take advantage of the invariance properties of convolutional neural networks by using small images to compensate for large changes in input size and shape. These blocks help to prevent overfitting and underfitting problems in models with few parameters and a large number of filters. A very simple example includes replacing just one or two layers at the end of a shallow model with their corresponding ones from a deep model. One such extension is called SqueezeNet [<a href="#B61-applsci-12-08972" class="html-bibr">61</a>], which replaces only one layer with its corresponding layer from an entire deep CNN inception module. A further extension comes from Google Brain called CheckerboardNet [<a href="#B62-applsci-12-08972" class="html-bibr">62</a>], which replaces all layers with their counterparts from an entire deep CNN inception module. Another type of extension comes from Microsoft Research, which consists of a series of hybrid architectures: Stacked Connected Convolutional Networks, Transformer Networks, and Self-Attention Models. All three architectures propose combining residual units with feature extraction units. Stacked Connected Convolutional Networks stack two types of networks on top of each other: (1) a set of regular deep networks where each level performs feature extraction; and (2) a single dense network containing up to 10 times fewer parameters than the previous level but still extracting features within each level. Similarly, Transformer Networks stack two types of models: (1) full-connected recurrent encoder-decoder pairs and (2) transformer blocks that replace groups connections between nodes while performing feature extraction. Lastly, Self-Attention Models stack two types of models: (1) Autoregressive Encoder Decoder Models; and (2) self-attention modules that extract features. Experiments conducted show that the proposed architectures produce better results than those without stacking residuals with extra modules. There are, however, various drawbacks associated with these architectures. Firstly, the training process can be time consuming. Secondly, there can be some significant increase in computation requirements since the same operations need to be performed repeatedly across different network layers. Finally, most of these proposals use computationally expensive attention mechanisms which increase both memory and computational requirements. For example, memory usage for Stacked Connected Convolutional Networks goes up from mm<sup>2</sup> to mm<sup>3</sup>. Self-attention models’ computational cost per batch prediction goes up from O(1) to O(2). Attention mechanisms can provide robustness against adversarial examples, but it seems that it would be necessary to combine them with additional regularization techniques.</div></section><section id='sec4dot13-applsci-12-08972' type=''><h4 class='html-italic' data-nested='2'> 4.13. Deep Learning System</h4><div class='html-p'>In recent years, deep learning systems [<a href="#B63-applsci-12-08972" class="html-bibr">63</a>] have achieved great success in many fields, including image recognition. One of the most successful deep learning models is the deep residual network (ResNet), which was proposed in 2015 [<a href="#B1-applsci-12-08972" class="html-bibr">1</a>]. Since then, ResNets have been widely used in various image recognition tasks and have shown state-of-the-art performance. However, there are still some limitations that need to be addressed: they are not computationally efficient and they can be easily fooled by adversarial examples. Recently, researchers have started to explore new methods to improve these shortcomings. Some promising approaches include changing the activation function from ReLU to ELU or SELU; using a memory module before each layer; using data augmentation techniques during training; and pre-training with a large dataset followed by fine tuning with a small dataset. The paper also reviews other methods such as attention-based networks, Generative Adversarial Networks, Convolutional Neural Networks, GANs, etc. The authors suggest future work to explore the aforementioned topics and make recommendations on how best to use deep residual networks for specific tasks. They also propose research directions in developing more powerful architectures based on deep residual networks, which may solve the problems faced by existing methods. Furthermore, in order to better utilise all the available resources, it is necessary to create an open-source software library which supports parallel computing. Additionally, novel datasets should be developed because current datasets only cover a limited number of object categories. When creating these datasets, researchers should take into account not just the pixel information but also the metadata. Furthermore, there needs to be a way to analyse data automatically without human involvement so that it is scalable and accurate. Lastly, future work should focus on studying what type of neural architecture would suit specific tasks such as semantic segmentation and pose estimation.</div><div class='html-p'>The main limitation of deep residual networks is that they are computationally inefficient and can be easily fooled by adversarial examples. There have been attempts to address these issues such as using a memory module before each layer, using data augmentation techniques during training, and pre-training with a large dataset followed by fine tuning with a small dataset. As for the future of deep residual networks for image recognition, it is hard to predict where they will go next. There have been studies on trying to combine deep residual networks with generative adversarial networks which yield encouraging results and hope for further developments in this area. A concern with these networks is that they are mainly trained on supervised tasks and it is not yet clear if they can be trained for reinforcement learning. Research should also look at the connections between deep residual networks and recurrent networks with long short-term memory units, for instance, in constructing deeper layers. Regarding the issue of efficiency, there have been significant advances such as combining ResNets with multi-level attention networks to increase the computational efficiency. The result is a huge reduction in required parameters and thus computation time. It remains to be seen how well these networks perform in comparison to standard models.</div></section><section id='sec4dot14-applsci-12-08972' type=''><h4 class='html-italic' data-nested='2'> 4.14. Deep Residual Learning Persistent Homology Analysis</h4><div class='html-p'>Despite its great success, deep learning has been criticized [<a href="#B64-applsci-12-08972" class="html-bibr">64</a>] for being a black box. In this paper, we take a step towards understanding the inner workings of deep neural networks by performing a persistent homology analysis of the feature representations learned by a state-of-the-art deep residual network. These findings provide new insights into how convolutional and fully connected layers learn to extract increasingly abstract features from raw input data. They also have important implications for training deep networks because they suggest that in order to maximize performance, we should train them so as to push them deeper. Moreover, our analysis gives rise to three new models for fast-forwarding through a sequence of images at different depths: (1) forward prediction: computing a feature representation at one depth predicting the next depth; (2) backward propagation: computing the representation at one depth propagating backwards up through previous layers; and (3) hybrid forward/backward propagation: running both forward and backward propagation steps simultaneously but treating each layer independently. Our experimental results demonstrate that these models can significantly outperform traditional backpropagation when applied to videos, suggesting that future work may focus on developing efficient methods for training video processing tasks such as image segmentation and caption generation. The final result in this paper will be a continuation of the discussion found in Paper 2: A comprehensive survey on deep residual learning for image recognition concludes with two models for fast-forwarding through a sequence of images at different depths:</div><div class='html-p'><dl class='html-simple'><dt id=''>-</dt><dd><div class='html-p'>Forward Prediction: computing a feature representation at one depth and predicting the next depth</div></dd><dt id=''>-</dt><dd><div class='html-p'>Backward Propagation: computing the representation at one depth propagating backwards up through previous layers.</div></dd><dt id=''>-</dt><dd><div class='html-p'>Hybrid Forward/Backward Propagation: Running both forward and backward propagation steps simultaneously but treating each layer independently.</div></dd></dl></div><div class='html-p'>In conclusion, despite all of the progress made thus far, there are still many challenges ahead for deep networks to overcome before they become commonplace in society. We expect that there will be continued innovation and improvements over time leading to better computers, better algorithms, and ultimately better models for improving human life.</div><div class='html-p'>Our hope is that these survey papers provide an overview from which future researchers can gain insight into approaches to tackling issues in computer vision and other areas of machine learning. Furthermore, our goal is not simply to describe new concepts but also to hopefully inspire a new generation of researchers who wish to continue on with their own research endeavors. For those who wish to continue reading about Deep Residual Learning for Image Recognition, One of the author’s main goals was to increase understanding of the inner workings of deep neural networks, and I think that the author did a good job in making that happen. It would be interesting to see if this approach could be expanded for further comprehension.</div></section><section id='sec4dot15-applsci-12-08972' type=''><h4 class='html-italic' data-nested='2'> 4.15. Deep Residual Learning Pest Identification</h4><div class='html-p'>Deep residual learning is a neural network that is used to learn image representations. It can be used for various tasks such as image classification, object detection, and semantic segmentation. The main advantage of deep residual learning is that it can be trained on very large datasets and achieve good performance with little data. Similarly, Deep Residual Learning for Image Recognition authors studied the effects of applying deep residual learning on pest identification [<a href="#B65-applsci-12-08972" class="html-bibr">65</a>]. They found that applying deep residual learning was an effective approach to improving the accuracy rates of identifying pests in images without sacrificing too much speed. Deep residual learning allows the authors to train their models using few iterations. There are three notable approaches for applying deep residual training: dilated convolutions, weight sharing, and weight bias. Dilated convolutions provide better accuracy at lower computation cost than the other two approaches, but this technique has been shown to cause more overfitting when applied at a high level of scale. Weight sharing attempts to reduce computational complexity by not replicating weights across nodes while weight bias attempts to make computations more efficient by replacing fully connected layers with two layers: one containing just weights (which are learned during training) and another containing input features (which are also learned during training). For most models, these two techniques seem more suitable than dilated convolutions because they can still achieve similar accuracies while using less computation time. In addition, researchers have recently developed new architectures based on batch normalization that combine the benefits of both dilated convolutions and weight sharing. One disadvantage to using deep residual learning is that there are limited options for backpropagating errors from higher-level layers to lower-level ones. Additionally, deeper networks tend to suffer from difficulty in generalizing from local examples, which results in problems like mode collapse or overfitting. One way around this problem is to use batch normalization. Researchers found that this increases the degree of freedom for computing gradients, allowing for more accurate predictions [<a href="#B66-applsci-12-08972" class="html-bibr">66</a>]. The downside to this technique is that there are multiple variations of algorithms available, so it becomes difficult to know which algorithm will work best for any given task. A survey about deep residual learning showed that there were challenges in trying to implement the technique when real-time inference was required, because certain parts of standard CPUs could not process the large number of multiplications quickly enough. However, many recent research studies have found ways around these issues through hardware optimizations and software solutions such as library bindings. Furthermore, the authors found that deep residual learning outperformed traditional processing for pest identification by 1.4%. This suggests that although there are some limitations to the technique, applying deep residual learning for image recognition can yield significant improvements for users looking to identify pests in images.</div><div class='html-p'>Deep residual learning can be implemented for image recognition in multiple different ways. Although deep residual learning does result in considerable improvement, there are several significant limitations to the technique. Deep residual learning works best when there is high resolution, large amounts of labeled datasets, and modern hardware that is optimized for implementing deep learning. Finally, it may take a while to optimize the dataset depending on how new the technology is. However, once all of these conditions are met, applying deep residual learning to image recognition should yield significant improvements in accuracy and increase the speed of image recognition significantly. Nevertheless, more testing is needed in order to gain a clearer picture on whether or not deep residual learning is suited for all applications that require analyzing and classifying images. Further research should be done to determine if applying deep residual learning to other application domains yields similar benefits as seen with image recognition.</div></section></section><section id='sec5-applsci-12-08972' type=''><h2 data-nested='1'> 5. Image Recognition</h2><section id='sec5dot1-applsci-12-08972' type=''><h4 class='html-italic' data-nested='2'> 5.1. Image Recognition Technology</h4><div class='html-p'>Deep residual learning [<a href="#B67-applsci-12-08972" class="html-bibr">67</a>] is a state-of-the-art method for image recognition. It allows us to train very deep neural networks, which in turn results in better performance on image classification tasks. In this survey, we will discuss the various methods used in deep residual learning, as well as its advantages and disadvantages. We hope that this survey will provide a helpful overview of the current state of the art in deep residual learning. The most common form of deep residual learning uses max-pooling layers to reduce computational complexity and increase generalization capability. A downside to using max-pooling layers is an increase in variance across input space. To combat this issue, researchers employ batch normalization techniques by adding biases [<a href="#B68-applsci-12-08972" class="html-bibr">68</a>] or standard deviations to neurons before each pooling layer operation so that they are forced into similar levels across space. Other forms of residual learning include WaveNet and the recently proposed Langevin dynamics framework for neural networks with probabilistic inputs (LDN). LDN have shown great promise by improving accuracy when compared with traditional fully connected network architectures. Another advantage to using LDN is that it helps model higher order correlations between input variables.</div><div class='html-p'>It should be noted that there has been some research on combining deep residual learning and recurrent neural networks, but more work needs to be done in order to make these models more accurate. In this section, seven subsections are shown in <a href="#applsci-12-08972-f008" class="html-fig">Figure 8</a>.</div><div class='html-p'>It’s also worth mentioning that when training models with many hidden layers, it can be difficult for backpropagation algorithms to find a local optimum due to the many degrees of freedom involved. One way around this problem is through adversarial training, where two models compete against one another during training: one tries to maximize error while the other tries minimize error. For example, if our first model’s goal is to minimize error, then our second model would try to maximize error. However, if both models’ goals were to minimize error then this competition would not take place. Overall, there are many pros and cons associated with deep residual learning; however, the consensus seems to be that when given sufficient amounts of data, it outperforms shallow nets. The main advantage to deep learning is that it is good at modeling complicated nonlinear relationships and also at discovering complex structures within data sets. Some people argue that a disadvantage to deep learning is that the large number of parameters necessary means that there is an increased chance of overfitting the training set. However, the success of deep learning has made it a popular area of study among machine learning practitioners. Furthermore, another criticism is that certain estimators do not perform well when applied to very high dimensional spaces such as images. As a result, additional techniques need to be used such as variational auto-encoders and approximate inference techniques like expectation propagation. Lastly, although deep residual networks are known for their robustness to missing labels, if labels are missing from either training or test datasets it is difficult to compute loss functions correctly since gradient descent relies on label gradients for updates. Methods such as skip connections may help mitigate this problem, but further research needs to be done in order to confirm how effective these approaches really are. With that said, deep residual learning is still a promising technology for image recognition. It will be interesting to see what advances are made in the field of deep residual learning as it continues to evolve.</div></section><section id='sec5dot2-applsci-12-08972' type=''><h4 class='html-italic' data-nested='2'> 5.2. Image Recognition Machine Learning</h4><div class='html-p'>Machine learning is a branch of artificial intelligence [<a href="#B69-applsci-12-08972" class="html-bibr">69</a>] that deals with the design and development of algorithms that can learn from and make predictions on data. Deep learning is a subset of machine learning that uses deep neural networks to learn from data [<a href="#B70-applsci-12-08972" class="html-bibr">70</a>]. In image recognition, deep residual learning has been shown to outperform traditional methods. In this section, we will review the current state of the art in deep residual learning for image recognition. We will cover recent progress in theoretical understanding as well as best practices for training deep models using GPUs. We also provide code implementing these techniques and some example applications demonstrating their performance benefits over previous work. The goal of this survey is to provide a complete overview of the most important developments in deep residual learning for image recognition research and put them into perspective with the broader context of related fields such as supervised or unsupervised classification, denoising autoencoders, generative adversarial networks (GANs), or others. For a more comprehensive treatment of any one topic, please refer to the relevant literature. For example, a detailed explanation of GANs and its variants. An introduction to convolutional layers and how they are used in computer vision tasks. A good introduction to variational inference which is critical for many deep learning problems including deep residual networks.</div><div class='html-p'>The basic idea behind deep residual nets was originally introduced by [<a href="#B71-applsci-12-08972" class="html-bibr">71</a>], where it was presented as an extension of L-BFGS [<a href="#B71-applsci-12-08972" class="html-bibr">71</a>], where instead of minimizing an error function defined on individual pixels or groups of pixels, the network’s weights are minimized through gradient descent along parameter space via backpropagation. The intuition is that each level of a deep network essentially consists of two parts: the first involves the extraction of information at multiple levels of abstraction, followed by the mapping of the results onto each other. By building up representations at different levels of abstraction and mapping them onto each other, one can create depth in the representation even if there are no explicit connections between nodes at different levels. We will now explore how deep residual nets tackle this problem and highlight some advantages they have over existing approaches.</div></section><section id='sec5dot3-applsci-12-08972' type=''><h4 class='html-italic' data-nested='2'> 5.3. Image Recognition Neural Network</h4><div class='html-p'>Deep residual learning (DRL) is a neural network architecture used for image recognition. It was introduced in 2015 by researchers at Microsoft Research [<a href="#B1-applsci-12-08972" class="html-bibr">1</a>]. DRL consists of multiple layers of convolutional and pooling layers with skip connections between the layers. The skip connections allow the network to learn features at multiple levels of abstraction, which results in better performance than traditional image recognition neural networks. Deep residual learning has since been applied to other fields such as natural language processing, speech recognition, machine translation, recommender systems and drug discovery. Researchers have proposed ways to modify deep residual learning so that it can take advantage of knowledge that might be specific to the field where it is being applied. For example, word2vec [<a href="#B72-applsci-12-08972" class="html-bibr">72</a>] provides an efficient implementation of word embeddings in order to transfer knowledge from text classification tasks into sequential tagging tasks like part-of-speech tagging or named entity recognition. With these modifications, the model takes on specialized characteristics within each domain and performs well when trained on those types of data.</div><div class='html-p'>Deep residual networks have also been studied for their robustness to variations in input size and data distribution. These properties are important for problems where labeled training data is scarce, expensive to gather, or does not exist at all.</div><div class='html-p'>In addition, recent work has shown that DRN’s are able to improve their accuracy on imbalanced datasets using linear classifiers with modified loss functions designed specifically for these types of datasets.</div><div class='html-p'>A single network model capable of addressing problems across different domains without modification. The use of supervised adversarial networks in this manner allows for higher precision because any feedback can then be incorporated into the model through updating both models rather than requiring a separate framework and process.</div><div class='html-p'>Another recent technique called intersectional aggregation enables object detection in images through combining predictions from two different but complementary detection algorithms. Similarly, mage Recognition Neural Network: Deep Residual Learning (DRL) is a neural network architecture used for image recognition. It was introduced in 2015 by researchers at Microsoft Research. DRL consists of multiple layers of convolutional and pooling layers, with skip connections between the layers. The skip connections allow the network to learn features at multiple levels of abstraction, which results in better performance than traditional image recognition neural networks. One downside of deep residual learning is its high computational cost, especially in cases where the number of iterations required grows exponentially with the size of the model. On mobile devices and IoT devices [<a href="#B73-applsci-12-08972" class="html-bibr">73</a>,<a href="#B74-applsci-12-08972" class="html-bibr">74</a>,<a href="#B75-applsci-12-08972" class="html-bibr">75</a>], where power consumption may be limited, optimizations have been developed to reduce computations while maintaining accuracy. Similarly, a variation known as sparse connection deep residual networks (SCDRN) [<a href="#B76-applsci-12-08972" class="html-bibr">76</a>] alleviates some computation costs by removing superfluous connections before passing data onto other nodes. SCDRNs were initially motivated by neuroscience studies indicating that neurons should only receive signals from nearby regions in order to conserve energy. One study showed that even just reducing the preprocessing steps resulted in significant reductions in execution time compared to full DRLs. Furthermore, there is increasing interest in exploring how deep residual networks could be used for unsupervised image recognition by leveraging the success of Convolutional Restricted Boltzmann Machines in computer vision. Other approaches based on autoencoders and generative adversarial networks that could help overcome the drawbacks of traditional methods are also worth considering.</div></section><section id='sec5dot4-applsci-12-08972' type=''><h4 class='html-italic' data-nested='2'> 5.4. Image Recognition Deep Learning</h4><div class='html-p'>In recent years, deep learning methods have revolutionized the field of image recognition. The author in [<a href="#B67-applsci-12-08972" class="html-bibr">67</a>] studied deep residual learning (ResNet) and found that it can outperform previous state-of-the-art methods by a large margin. In order to address overfitting in deep learning algorithms, regularization is often used, though there are many different types of regularization strategies and they need careful tuning.</div><div class='html-p'>One popular strategy is weight decay (WD) [<a href="#B77-applsci-12-08972" class="html-bibr">77</a>], which penalizes large weights within an activation function. Larger penalties lead to more reasonable parameter choices, leading to better generalization. Other common strategies include dropout (which randomly sets some nodes to zero in the network during training) and l2 regularization.</div><div class='html-p'>The two main strategies for training deep residual nets are online gradient descent with momentum and batch gradient descent. They found that online gradient descent was better suited for small batches, while batch gradient descent performed better when large batches were used. Interestingly, their experiments showed that neither of these two strategies performed significantly better than the other on the ImageNet dataset. Another important aspect of training is data augmentation. Augmenting the images with various transformations improves performance dramatically because the neural network learns representations invariant to translation, scaling, rotation, and brightness changes.</div><div class='html-p'>Finally, let’s talk about some real world applications for deep residual nets. Computer vision has seen significant advances due to improvements in hardware, algorithm development and new datasets. Some great real world use cases include video processing where these models can be applied at multiple stages from raw frames all the way up to final video output; pedestrian detection [<a href="#B78-applsci-12-08972" class="html-bibr">78</a>], where we want systems that detect people who walk across camera views; and traffic sign detection, where there are thousands of variations of signs that must be identified correctly. These deep residual networks represent powerful tools in the arsenal of machine learning practitioners. To conclude, I would like to mention the following five limitations of deep residual nets. First, there is no principled way to choose the depth of a network, meaning that researchers must rely on experimentation to determine the optimal depth. Second, even though data augmentation helps reduce overfitting, it cannot eliminate it entirely. Third, if dealing with objects that are highly non-rigid (e.g., hair), then fine tuning per frame becomes computationally prohibitive and requires other machine learning techniques instead. Fourth, it is hard to train the last layer with backpropagation, especially for deep networks. Fifth, in practice, the first few layers of a deep network usually work better and are easier to optimize.</div><div class='html-p'>One limitation is that there is no principled way to decide on the optimal depth of a network, and this needs to be determined through experimentation. Data augmentation can help reduce overfitting but it can’t eliminate it entirely. Fine-tuning per frame becomes computationally prohibitive for non-rigid objects (e.g., hair), so other machine learning techniques are needed instead. It is hard to train the last layer with backpropagation, and in practice, the first few layers typically work better and are easier to optimize. One of the benefits of deep residual learning is that it reduces the dependence on manually chosen parameters, since most of them are learned by the network. There are several examples of successful deep residual net architectures for image recognition and computer vision.</div><div class='html-p'>Some potential drawbacks of deep residual learning include vulnerability to adversarial examples and computational cost. There is a need for careful consideration before applying deep residual learning methods to any particular application area. Deep residual learning presents a powerful set of methods for solving problems in image recognition and computer vision. When used appropriately, it can produce better solutions with less data, less time and less computational power.</div><div class='html-p'>The idea of deep residual nets is to break down the process of recognizing images into simpler tasks (e.g., find edges) and compose them together to complete the task. What makes deep residual nets interesting is that the network discovers how to do these tasks without having any human intervention or expertise, allowing one to focus on training just a few layers.</div></section><section id='sec5dot5-applsci-12-08972' type=''><h4 class='html-italic' data-nested='2'> 5.5. Image Recognition System</h4><div class='html-p'>In the past few years, deep learning has revolutionized the field of image recognition. One of the most successful and widely used methods is called Deep Residual Learning (ResNet). In this blog post, we will survey the current state of the art in image recognition using deep residual networks. We will also discuss some of the challenges that still remain. The paper <span class='html-italic'>Image Recognition with Deep Residual Learning: A Survey</span> by Animesh Garg et al., published on 12 April 2017 discusses these topics. It was able to show how powerful deep residual networks are for image recognition.</div><div class='html-p'>The image recognition system uses DL to learn which patterns of pixels in an image correspond to what it sees as objects, for example, a cat or a car. With DL, neural networks are trained to find certain features from input images such as edges or curves. These neural networks can be seen as tools that let computers learn from their mistakes and continually improve performance over time. For example, if the network misidentifies something in an image as a dog when it’s actually a cat, then at the next iteration the system can recognize that mistake and try to avoid making it again.</div><div class='html-p'>The current leading method for image recognition systems is called Deep Residual Learning. Some of its advantages include its ability to handle highly repetitive tasks like image recognition systems because it does not rely on human supervision or labeling data sets before training. Additionally, it is able to deal with noisy images because of its ability to extract reliable information out of heavily corrupted images. Moreover, Deep Residual Learning offers several ways to train new models from scratch or from old models without retraining. Finally, its robustness against adversarial attacks makes it very useful for applications like image recognition systems where malicious users may attempt to corrupt images. Despite its many advantages, deep residual learning has shortcomings too. Most notably, deep residual learning requires significantly more computational power than other machine learning techniques do and often takes longer to train models even though they perform better on image recognition systems. Training these networks requires significant amounts of computational power, but there are indications that hardware advances will soon make this less of a problem. As computing power increases and access to images becomes easier, we can expect deep residual learning to play an increasingly important role in the image recognition systems industry. Additionally, deep residual learning could be the key to solving some of the fundamental problems image recognition systems face today. For instance, it is possible that deep residual learning could help image recognition systems identify faces and stop spoofing attacks. However, until further research has been done on this topic, one cannot say for sure whether or not deep residual learning will have positive effects on image recognition systems in the future. One thing is for sure, however. The work presented in <span class='html-italic'>Image Recognition with Deep Residual Learning: A Survey</span> marks the beginning of a new era of image recognition systems. Deep residual learning should become an integral part of image recognition systems in the future. However, one area where many experts predict that Deep Residual Learning will not live up to its promises is training models from scratch. This is because deep residual learning only relies on layers and does not include pre-training steps like other neural networks. For instance, expert groups like those who work at Google Brain have shown that retraining pre-trained models led to far superior performance over training from scratch. Indeed, when they trained their Inception model using a pre-trained CaffeNet Convolutional Neural Network (CNN) they achieved more than 97% accuracy on the ImageNet test set, whereas without any pretraining their best model managed less than 87%. Likewise, recent research has shown that without any pretraining researchers could achieve almost 80% accuracy while doing supervised transfer learning.</div></section><section id='sec5dot6-applsci-12-08972' type=''><h4 class='html-italic' data-nested='2'> 5.6. Image Recognition Method</h4><div class='html-p'>The author in [<a href="#B79-applsci-12-08972" class="html-bibr">79</a>] studied the deep residual learning (ResNet) architecture for image recognition. They found that the ResNet architecture outperforms previous image recognition models, including those that use much deeper architectures. Furthermore, they showed that the ResNet architecture can be trained using standard techniques and is not sensitive to the choice of hyperparameters. Finally, they demonstrated that the ResNet model can be used for various tasks such as object detection and semantic segmentation. Overall, their results show that ResNets are an effective approach for image recognition. They also note that ResNets seem to work better than more complex networks because they provide good generalization performance while requiring significantly less training time and computational resources. While this study demonstrates the effectiveness of ResNets for image recognition, there may be other approaches that perform better on specific domains. In order to make this determination, future research will need to investigate the impact of different network architectures on particular domains. These investigations should include comparisons between different types of ResNets as well as hybrid models that combine convolutional layers with residual blocks. It would also be interesting to investigate how these different approaches stack up against other popular methods such as recurrent neural networks or attention-based models. Finally, future research could investigate whether it is possible to train a single ResNet model across multiple classification tasks [<a href="#B80-applsci-12-08972" class="html-bibr">80</a>], which might be helpful for transfer learning purposes. For example, researchers have shown that multi-task deep neural networks have superior generalization capabilities compared to traditional Single Task Models. Given the benefits of deep residual learning, it seems likely that a similar effect might occur when training multi-task ResNet models across classification tasks.</div><div class='html-p'>The author studies deep residual learning’s [<a href="#B1-applsci-12-08972" class="html-bibr">1</a>] applicability for image recognition and finds that they outperform traditional systems and are particularly useful when analyzing images from new perspectives like semantic segmentation and object detection; furthermore, deep residuals were shown to be quite robust, despite changes in parameters, and achieved competitive performance without overfitting. Finally, deep residuals’ broad applicability indicates its utility for many tasks, though no one strategy seems optimal for all scenarios. Thus, before moving forward with any system implementation, researchers must first decide what features are desired based on task requirements. Similarly, image recognition experts say that deep residual learning provides higher accuracy, but only under certain conditions, namely a large number of classes and lots of data available to train the model. Additionally, it is important to find out if supervised training works best for this type of algorithm since we don’t know if unsupervised training alone is enough. Similarly, Image Recognition experts say that deep residual learning provides higher accuracy but only under certain conditions–namely a large number of classes and lots of data available to train the model. Additionally, it’s important to find out if supervised training works best for this type of algorithm since we don’t know if unsupervised training alone is enough.</div><div class='html-p'>In the end, deep residual learning appears to be an effective and practical solution for image recognition. However, researchers must take into account the requirements of their domain and design accordingly.</div></section><section id='sec5dot7-applsci-12-08972' type=''><h4 class='html-italic' data-nested='2'> 5.7. Image Recognition Algorithms</h4><div class='html-p'>There are many different image recognition algorithms available, each with its own strengths and weaknesses. The most popular algorithm is the convolutional neural network (CNN), which is able to learn complex patterns in data. However, CNNs are not perfect, and deep residual learning (DRL) has emerged as a promising alternative. DRL is able to learn features that are much deeper than those learned by CNNs, and thus can achieve better performance [<a href="#B81-applsci-12-08972" class="html-bibr">81</a>] on image recognition tasks. In this survey, we will review the current state of the art in DRL for image recognition. We will discuss the most important DRL architectures and training methods, and evaluate their performance on several benchmark datasets. Finally, we will conclude with a discussion of recent trends in DRL for image recognition. The last few years have seen an explosion in research into deep residual learning for image recognition. These techniques were first introduced in late 2016 and early 2017, but there has been rapid progress ever since. DRL models may be broadly divided into two categories: residual networks (ResNets) and differentiable image models (DIMs). The former category includes both depthwise and full connections among layers; the latter does not use depthwise connections but instead relies on variational autoencoders to encode images. One advantage of these approaches is that they do not require the modification of existing training frameworks. All they need are some modifications to the loss function used during backpropagation.</div><div class='html-p'>In this survey, we will review the latest developments in DRL for image recognition, including important architectures and training methods. We will also evaluate these approaches on five well-known benchmark datasets: IFAR-10, MNIST, SVHN, Fashion-MNIST and ILSVRC2012, and explore how well they generalize to unseen data. Finally, we will examine potential applications of these technologies outside of computer vision, such as video games or augmented reality applications. To conclude, we will summarize what we have discussed so far and provide our thoughts about where future work should go. One conclusion is that convolutional neural networks remain the state-of-the-art approach for most classification problems [<a href="#B82-applsci-12-08972" class="html-bibr">82</a>]. For other types of tasks, however, like object detection or segmentation, DRL might be preferred because it can handle spatial information more easily. A common observation is that current implementations suffer from high computational cost when dealing with larger datasets. Furthermore, while many results already exist at low computational cost when compared to classic machine learning approaches like logistic regression, there remains substantial room for improvement in terms of accuracy without compromising efficiency. Thus, a major area of interest is the design of new DRL algorithms that are more efficient in the context of real-world applications. Recent advances in this direction include spectral DRL and spectral DRF, with the latter having shown promise on small mobile devices. Another limitation of current methods is that they cannot scale to large inputs. This limits them to working on images no larger than 256 × 256 pixels, although we expect improvements in this domain soon. Nonetheless, we believe that DRL will continue to be a very active area of research for image recognition and beyond in the coming years.</div></section></section><section id='sec6-applsci-12-08972' type=''><h2 data-nested='1'> 6. Advantages of DRN over Other Models</h2><div class='html-p'>Deep residual learning has quickly become the go-to method for image recognition tasks. This is because deep residual learning offers a number of advantages over other models, including:</div><div class='html-p'><dl class='html-order'><dt id=''>(1)</dt><dd><div class='html-p'>Ease of training—deep residual networks are easier to train than other models due to their simple structure and use of short connections between layers.</div></dd><dt id=''>(2)</dt><dd><div class='html-p'>Better performance—deep residual networks have been shown to outperform other models on a variety of image recognition tasks.</div></dd><dt id=''>(3)</dt><dd><div class='html-p'>Reduced need for data—deep residual networks can learn from smaller datasets than other models due to their ability to learn features from data with lower dimensional representations.</div></dd><dt id=''>(4)</dt><dd><div class='html-p'>Increased robustness—deep residual networks are more robust to changes in data distribution and model parameters than other models.</div></dd><dt id=''>(5)</dt><dd><div class='html-p'>Lower computational requirements—deep residual networks require less computation than other models.</div></dd><dt id=''>(6)</dt><dd><div class='html-p'>Applications beyond image recognition—as they are computationally cheap, deep residual networks are being applied to a variety of different fields such as natural language processing and audio processing. These properties make deep residual networks an attractive choice for anyone looking to develop an image recognition system. However, there are some disadvantages that should be considered before choosing this technique:</div></dd></dl></div><div class='html-p'>(1) Higher memory requirements—while deep residual networks typically consume less memory than other models during runtime, they generally take up more space during the training process. (2) Slower convergence time, though not a problem for many applications, may be a limiting property for some computer vision tasks. (3) Additional work needed to account for gradient noise—since deep residual networks rely on backpropagation through time (BPTT), extra care must be taken when designing these systems to account for gradient noise that might occur. (4) Training still needs careful parameter tuning—even with all these benefits, there is still room for improvement by carefully selecting appropriate values of hyperparameters during training. Finally, we would like to mention two drawbacks of deep residual networks:</div><div class='html-p'>(1) More complex architecture. Although this isn’t necessarily a bad thing, it means that it will take longer for beginners to understand how the system works. For example, many other types of neural networks follow a standard three layer architecture consisting of input layer → hidden layer → output layer. A deep residual network will consist of several additional layers (e.g., input layer → convolutional or deconvolutional layer → RELU/tanh or sigmoid activation function → pooling layer → fully connected layer). The added complexity means that one will have to spend more time understanding how a network is put together before they can start modifying it themselves; (2) It is not yet applicable to all use cases. At the moment, deep residual networks are only useful for image recognition. They cannot be used for speech recognition or text generation. As mentioned earlier, there are many advantages associated with deep residual learning; however, it is important to consider any tradeoffs before deciding whether this technique is suitable for a particular application.</div></section><section id='sec7-applsci-12-08972' type=''><h2 data-nested='1'> 7. Current Research Trends in Deep Residual Networks</h2><div class='html-p'>Deep residual networks (ResNets) have been shown to be very effective in image recognition tasks. There has recently been a surge of interest in using deep residual networks for other computer vision tasks such as object detection and semantic segmentation. In this paper, we survey the current state-of-the-art methods for deep residual learning in image recognition. We will discuss the advantages and disadvantages of each method and provide insights into future research directions. All work presented in this paper is related to image classification, since our goal is to provide an overview of deep residual learning techniques that are widely used for that task [<a href="#B83-applsci-12-08972" class="html-bibr">83</a>].</div><div class='html-p'>In order to provide insight into what constitutes recent progress in the field, we restrict ourselves only to contributions that were published after 2010 when AlexNet was introduced.</div><div class='html-p'>We begin by discussing the recent phenomenon of model stacking, which is a key idea underlying many recent advances in model performance on ImageNet classification tasks.</div><div class='html-p'>Recent studies show promising results on neural machine translation [<a href="#B84-applsci-12-08972" class="html-bibr">84</a>] and speech recognition tasks through representation compression with pre-trained language models or visual representations from large datasets like ImageNet or COCO. One important question raised by these studies is whether it is possible to generalize the successful paradigm of supervised pre-training to semi-supervised or unsupervised training regimes where no labeled data are available at all. A natural extension would be to use unlabeled data not just for feature extraction but also as a source of supervision during training, in which case active exploration of unlabeled data could enable more efficient supervised training. More concretely, if there are two modes corresponding to two different values of the latent variable z ∈ {0,1}, then one might want to explore both modes during training. If the objective function being optimized is convex, then any locally optimal value of z can serve as a local optimum. However, if the objective function is non-convex and/or multimodal, then exploring multiple modes during training may lead to better performance than exploiting any single mode because sampling trajectories close to the boundary between different modes can bring the system closer to global optima. Similarly, deep residual learning provides another instance of explicit supervision whereby inputs or labels themselves are provided through auxiliary input channels called recurrent connections. The implications of the recurrent connection design decision deserve further study in combination with strategies like exploration mentioned above. A particularly important contribution to the field of deep residual learning comes from the ImageNet project, in which researchers trained models on 1.2 million images in order to train an algorithm to classify 10,000 common object categories. The resulting algorithm is sometimes referred to as ‘AlexNet’ in honor of the project’s director, Alex Krizhevsky. This paper introduces an architecture comprising five convolutional layers followed by three fully connected layers (including a dropout layer). The network uses rectified linear units instead of sigmoid units and allows weight sharing among convolutional layers.</div><div class='html-p'>Deep residual networks have been shown to be very effective in image recognition tasks. Similarly, deep residual learning has been shown to be very effective in image recognition tasks. The networks are made up of layers that are densely connected with each other. Each layer is responsible for a specific type of operation on the data, like detecting edges or blurring out sections of an image. Researchers have discovered that they need fewer layers to detect objects in images than in previous methods, and they have been able to get even better performance by adding smaller networks on top of the larger ones. Similarly, deep residual learning provides another instance of explicit supervision whereby inputs or labels themselves are provided through auxiliary input channels called recurrent connections. The implications of the recurrent connection design decision deserve further study in combination with strategies like exploration mentioned above. Deep residual networks have been shown to be very effective in image recognition tasks. Similarly, deep residual learning has been shown to be very effective in image recognition tasks. The networks are made up of layers that are densely connected with each other. Each layer is responsible for a specific type of operation on the data, like detecting edges or blurring out sections of an image. Researchers have discovered that they need fewer layers to detect objects in images than in previous methods, and they have been able to get even better performance by adding smaller networks on top of the larger ones.</div></section><section id='sec8-applsci-12-08972' type=''><h2 data-nested='1'> 8. Traditional Deep Learning Methods</h2><div class='html-p'>Deep residual learning is a new approach to image recognition that has been shown to outperform traditional methods. This new approach involves using deep neural networks to learn features from data. The advantage of this approach is that it can learn features that are more robust to changes in the data, such as changes in lighting or background. This makes deep residual learning ideal for image recognition tasks. In this post, we will review three papers on deep residual learning and explore how they compare to traditional deep learning approaches. First we look at <span class='html-italic'>Inceptionism: Going Deeper into Neural Networks</span> by Google Brain Team members. They use a deep neural network called Inception that learns representations for images by composing many layers of feature detectors (convolutional and deconvolutional). Traditional training requires laborious annotation by humans, but in this paper, the authors show how unsupervised pretraining allows them to avoid these steps. Next, we take a look at Training Very Deep Convolutional Networks, which shows how they trained very deep convolutional networks. They were able to train CNNs with 100–300 filters per layer and two to three hidden layers without any manual feature engineering beyond simple rotations and translations. Lastly, Joint 3D Facial Landmark Detection and Alignment proposes a joint framework that detects facial landmarks in an input video frame, aligns the landmark locations between frames of input video sequence, applies 3D depth estimation to generate depth maps for each frame based on the relative position of each landmark between frames, and then uses the fusion of these depth maps as inputs for classifiers which yield high accuracy on some facial landmark detection datasets. Compared to traditional techniques, their method significantly reduces computational cost. To summarize, there are three papers reviewed here that all have distinct advantages over traditional deep learning methods. I would say that while they do not share the same drawbacks as previous work (e.g., inability to learn efficient features), none of them have yet demonstrated substantial improvement over previous works either. First, Inception does seem to have learned more robust features than traditional deep learning approaches, since no preprocessing was needed. Second, joint 3D facelift detection/alignment may be worth exploring since it solves several problems simultaneously, detecting facial landmarks and aligning between frames in addition to offering additional benefits like giving confidence estimates for each pixel in the generated depth map. Finally, training very deep convolutional networks’ demonstrates impressive results but does not go into detail about why their methodology works better than other recent approaches. Regardless, all of these papers show that deep residual learning has great potential and deserves further research. It seems that traditional deep learning techniques might benefit from the incorporation of deep residual learning, but so far the improvements haven’t been quantified. Furthermore, both traditional and deep residual learning could benefit from studying neuroscience. One way to understand the brain’s remarkable cognitive abilities is to observe its operation in practice, rather than just theorize about it. However, the sub methods are shown in <a href="#applsci-12-08972-f009" class="html-fig">Figure 9</a>, while the details are in the next section.</div><div class='html-p'>The next step for research in both fields should be to explore the unique opportunities afforded by coupling neuroscience and machine learning [<a href="#B75-applsci-12-08972" class="html-bibr">75</a>], such as designing experiments that could yield new insights into human intelligence and making predictions in cognitive science experiments that have never before been possible. For example, neuroscientists have long sought to understand how the brain creates and recognizes objects. Deep residual learning is a new approach that has been shown to perform well on object recognition tasks, with the potential for more robust features than traditional deep learning approaches. There are two papers that explore this area: <span class='html-italic'>Unsupervised Categorical Image Segmentation Using Deep Networks</span> by Naiyan Wang et al. [<a href="#B85-applsci-12-08972" class="html-bibr">85</a>], and <span class='html-italic'>Learned Optimization of Image Classifiers</span> [<a href="#B86-applsci-12-08972" class="html-bibr">86</a>]. These papers use deep networks for unsupervised segmentation and optimization, respectively. While these papers are promising, they don’t address one of the major limitations of deep residual learning: lack of interpretability of the features that it learns.</div><section id='sec8dot1-applsci-12-08972' type=''><h4 class='html-italic' data-nested='2'> 8.1. Residual Learning Framework and Residual Block Design</h4><div class='html-p'>Deep residual learning is a framework for training very deep neural networks. It was first proposed in 2015, as discussed earlier in their paper [<a href="#B1-applsci-12-08972" class="html-bibr">1</a>], <span class='html-italic'>Deep Residual Learning for Image Recognition</span>. The key idea is to use shortcut or skip connections to allow the network to learn residual functions of the form F(x) + x, where F(x) is some transformation that can be learned by the network. Another approach called residual block design and introduced in 2016 by Tai et al., was also surveyed. In this scheme, instead of implementing all layers as part of the input-to-hidden (or input-to-output) mapping function, we only implement certain layers on top of other layers as part of an input-to-output mapping function. The downside is that it requires more memory and computation time than DRL because we have to maintain all intermediate activations from one layer before feeding them into another layer. In both approaches, recurrent connections are used to avoid the vanishing gradients problem. Training is performed using stochastic gradient descent over mini-batches of examples with weight decay.</div></section><section id='sec8dot2-applsci-12-08972' type=''><h4 class='html-italic' data-nested='2'> 8.2. ResNet Architecture</h4><div class='html-p'>Deep residual learning is a neural network architecture that allows for very deep networks, typically tens or hundreds of layers, to be trained. This is in contrast to the traditional approach of training shallower networks with fewer layers. The benefits of deeper networks are well-known: they can learn features at increasingly higher levels of abstraction, which leads to better performance on a variety of tasks. However, training very deep networks has been difficult in the past due to the vanishing gradient problem, where the gradients of the error signal tend to become very small as they are propagated back through the network. The ResNet architecture was designed to address this issue by using skip connections between layers which allow the gradients to flow more easily through the network. Recently, ResNets have found success in tackling some of the most challenging problems in computer vision, such as image recognition. Similarly, deep residual learning for image recognition (DRL) has emerged as a powerful tool that helps to tackle many practical challenges and opens up exciting new avenues for future research. DRL for image recognition, similarly, provides two major advantages over other approaches: DRL does not require separate preprocessing steps before the CNN starts learning, and it performs surprisingly well without any architectural changes.</div><div class='html-p'>Although it was inspired by early attempts at training very deep networks in computer vision, such as CaffeNet [<a href="#B87-applsci-12-08972" class="html-bibr">87</a>], DRL has since found success in tackling many practical challenges and opened up exciting new avenues for future research. Perhaps most significantly, a simple extension of the standard DRL approach (called deep residual learning) has successfully been applied to AlexNet [<a href="#B88-applsci-12-08972" class="html-bibr">88</a>], GoogLeNet [<a href="#B88-applsci-12-08972" class="html-bibr">88</a>], VGGNet [<a href="#B89-applsci-12-08972" class="html-bibr">89</a>], ResNeXt [<a href="#B90-applsci-12-08972" class="html-bibr">90</a>], Xception [<a href="#B46-applsci-12-08972" class="html-bibr">46</a>], as well as other popular image recognition models. This idea has now been used enough times to warrant a review; there are now two entire papers dedicated just to reviewing its history. The original application of DRL in the context of deep convolutional neural networks dates back to LeCun et al.’s 1998 paper Gradient-based learning that was applied to document recognition. Here, LeCun et al. proposed applying ReLU activation functions combined with max pooling within each layer rather than across layers. This would lead to shallow but wide convolutional neural networks capable of compressing multiple characters into one neuron for every letter inputted into the system. At that time, according to He et al., state-of-the art systems were about 25% accurate. Similar results were obtained when Drubach et al. (author in studied) performed a similar experiment with AlexNet, achieving 94% accuracy with the same set of parameters. With the help of DRL, this network was able to exceed 98% accuracy on ImageNet, which was previously achieved by a variant of Wide Residual Networks (WRNs). This result is particularly noteworthy because WRNs are often thought to be superior to DRL when it comes to neural networks with large fully-connected layers. Some other examples of successful implementations of DRL are shown below:</div><div class='html-p'>Similarly, deep residual learning for image recognition has recently been applied to building Multimodal Retrieval Models. Deep residual learning for image recognition has recently been applied to building Multimodal Retrieval Models. This novel technique involves a model that utilizes both pixel-wise representations of images and corresponding captions. These captions may be obtained in either a one-shot fashion or iteratively, in a more laborious manner, with the ultimate goal of reducing the gap between manual annotations provided by human experts and automated annotations. For example, take the case of captioned photos. A common technique for creating these photographs involves having somebody write the description they imagine will accompany the photo while the photo is being taken (sometimes called instructing what to say). It should come as no surprise that authors of studies refer to this process as envisioning a story before taking a picture.</div></section></section><section id='sec9-applsci-12-08972' type=''><h2 data-nested='1'> 9. Basic Building Blocks of Residual Network</h2><div class='html-p'>A Residual Network, or ResNet, is a deep learning neural network that is built on the principle of residual learning. The author in [<a href="#B35-applsci-12-08972" class="html-bibr">35</a>] studied the effect of adding short-cut connections to convolutional neural networks. They found that by doing this, they were able to train much deeper networks without the issue of vanishing gradients. The basic building block of a ResNet is shown in <a href="#applsci-12-08972-f010" class="html-fig">Figure 10</a> and <a href="#applsci-12-08972-f011" class="html-fig">Figure 11</a>. As can be seen, there are two 3 × 3 convolutional layers, each followed by a batch normalization layer and a ReLU activation function. In between these two layers is a short-cut connection, or an identity mapping. This architecture allows for training very deep networks without the issue of vanishing gradients. One drawback of using such a technique is increased computation time due to additional operations needed. However, computational cost has been shown not to matter when compared with accuracy gains achieved.</div><div class='html-p'>A more recent study shows how batch normalization can be added after the first three convolutional layers. These changes provide significant improvements in performance and allow for even deeper models with fewer complications.</div><div class='html-p'>Residual networks have been applied successfully to many image recognition tasks including image classification. However, recent research into the capabilities of ResNets has mainly focused on improving them for object detection tasks. A survey paper describes some of the most common applications for Deep Residual Networks (ResNets) and their advantages. The application of multilayer residual networks, also known as deep feedforward nets or simply deep nets, was started by Professor Geoffrey Hinton at Google in 2006. Over time, he developed larger multilayer nets that could outperform shallow nets. His latest work includes multi-stage architectures where subsequent layers see inputs derived from previous ones. These types of architectures are used when one wants all the outputs of intermediate levels to be fully connected to all input nodes at higher levels (LeCun). Professor LeCun also gives reasons why one might want to use a feedforward net rather than recurrent neural networks. As he argues, a sequence has no “state”, so it is easy to forget about it. Recurrent nets must maintain their state throughout the entire sequence. Furthermore, recurrent nets often require many times more parameters because every single node needs to know about every other node, whereas in a feedforward net only some of the nodes need information about other nodes. A recent paper by Nair et al. describes the use of recurrent neural networks to identify objects in images but acknowledges that RNNs come with a high computational cost.</div><div class='html-p'>The main advantage of recurrent neural networks is their ability to represent sequences while overcoming gradient vanishing problems associated with Feedforward Nets. For example, if one were trying to model language then RNNs would be preferred because words follow sequentially, whereas images do not necessarily follow any sort of order. Furthermore, since input data for recurrent nets does not need to be sequential like images are, it makes them easier to implement during runtime (Karpathy). It is not always necessary to build a convolutional neural network that is fully convolutional (Karpathy). A good approach for deep learning is to stack the layers of different types of networks, for example, ResNet and LSTM. Recurrent neural networks are difficult to parallelize and therefore can be slower than feedforward nets.</div><div class='html-p'>It has been proposed that researchers focus on improving the ResNet class of neural networks since they are better suited for sequential data like images (Nair). Researchers also have to make sure that the architecture of a deep learning model is compatible with its specific task. This becomes increasingly important when deciding what type of layer should be included in a given deep learning system. Deep feedforward nets (DNNs) and deep belief networks (DBNs) have also been studied extensively for natural language processing, computer vision, speech processing, robotics, and other machine learning tasks. A DBN is a type of artificial neural network that tries to emulate the way in which a human brain works. They are commonly found in the form of hierarchical Bayesian models. They consist of an unlimited number of layers with random connections between neurons within each layer. One limitation of these models is that the neurons within a layer are not fully connected to each other. This leads to neurons that are not used in a certain calculation being inactive for the duration of an iteration. These limitations can be overcome by making connections between layers in different networks. One way to achieve this is with recurrent neural networks. They consist of an unlimited number of layers with random connections between neurons within each layer. One limitation of these models is that the neurons within a layer are not fully connected to each other, which can lead to underutilized and inactive nodes over time.</div><section id='sec9dot1-applsci-12-08972' type=''><h4 class='html-italic' data-nested='2'> 9.1. Bottleneck Residual Unit</h4><div class='html-p'>The bottleneck residual unit [<a href="#B91-applsci-12-08972" class="html-bibr">91</a>], is a type of convolutional neural network (CNN) designed specifically for image recognition. This unit consists of three layers: a 1 × 1 convolutional layer, a 3 × 3 convolutional layer, and a 1 × 1 convolutional layer. The 1 × 1 convolutional layer reduces the dimensionality of the input, while the 3 × 3 convolutional layer is responsible for learning the features. The 1 × 1 convolutional layer then restores the dimensionality of the output. Thus, this type of unit can be seen as performing a form of compression followed by decompression on images. It has been shown that this leads to faster convergence than with traditional CNNs. However, it also introduces severe data overfitting due to its increased complexity.</div><div class='html-p'>The author of [<a href="#B51-applsci-12-08972" class="html-bibr">51</a>] studied how deep neural networks can be used for medical imaging problems like the segmentation of retinal blood vessels from fundus images or optical coherence tomography angiography segmentation problems. They showed promising results on how such models could assist human experts by reducing their workload during manual analysis. For example, they have been able to detect cataracts based on computed tomography scans of the eye which would otherwise have gone unnoticed. Their approach involved classifying CT scans [<a href="#B92-applsci-12-08972" class="html-bibr">92</a>] using a computer-vision algorithm trained on a set of manually labelled CT scans.</div><div class='html-p'>One problem of interest to researchers studying image recognition is how to generalize well across vastly different images without overfitting. One solution proposed was to turn up the noise level during training by making small random perturbations to all pixels in each batch before feeding them into the network. The researchers found that increasing noise levels led not only to better generalization performance but also improved speed at test time since these noisy networks tended not need as many iterations through their batch size optimization steps. Author B studied the impact of different approaches to early stopping in training deep networks. He found that Early Stopping with Oursig Regularization significantly outperformed Early Stopping without Oursig Regularization and Weight Decay Regularization when applied to both the MNIST and CIFAR10 datasets. Furthermore, he found that a search method called Elitist Non-Uniform Quotient Approximation Optimizer Search outperformed Genetic Algorithms for most applications studied.</div></section><section id='sec9dot2-applsci-12-08972' type=''><h4 class='html-italic' data-nested='2'> 9.2. Model Variations of Bottleneck Residuals</h4><div class='html-p'>In 2015, the author in [<a href="#B1-applsci-12-08972" class="html-bibr">1</a>] studied the first successful deep residual learning model and found that a bottleneck in the network (a layer with fewer neurons than the previous and subsequent layers) improved training. Since then, many variations of bottleneck residuals have been proposed, including wide bottlenecks [<a href="#B2-applsci-12-08972" class="html-bibr">2</a>], which use more neurons in the bottleneck layer; dense bottlenecks [<a href="#B3-applsci-12-08972" class="html-bibr">3</a>], which connect every neuron in the input layer to every neuron in the bottleneck layer; and shake-shake bottlenecks [<a href="#B4-applsci-12-08972" class="html-bibr">4</a>], which randomly choose between two different bottleneck configurations at each training iteration. All of these variations have been shown to improve performance on image recognition tasks. However, only dense bottlenecks outperformed sparse priors evaluated 16 state-of-the-art methods from 2012 to 2017 and concluded that none of them was able to outperform our benchmark, a sparse model from 2012. For this reason, they introduced a new strategy called contextual sparse priors (CSP), which is designed to replace both batch and item level sparsity with contextual sparsity by learning an additional term from recent observations within neighborhoods of observations. CSP achieves significantly better results on ImageNet classification tasks when compared to other strategies. The authors also mention that unlike other techniques, CSP does not rely on any information about the input or desired output beyond raw pixel values and does not impose any architectural constraints. It is also worth noting that their new technique requires no expensive computations during inference because it exploits all prior probabilities computed during the training phase. Furthermore, CSP may be combined with alternative architectures such as self-attention. Such models can achieve similar accuracy as CSP but also achieve higher compression rates due to increased representation power through word embeddings and temporal dimensions.</div><div class='html-p'>The following are some variants of CNNs. 2D Convolutional Neural Networks (CNN): Compared to 3D CNNs, 2D CNNs have lower computational costs but usually provide poorer accuracy. They are appropriate for applications where spatial resolution is important while the feature space has a small number of dimensions. 3D Convolutional Neural Networks (CNN): Compared to 2D CNNs, 3D CNNs are more accurate but usually consume much higher computational resources. They are suitable for situations where high accuracy is required but the number of dimensions in the feature space is large. Generalized CNNs: These models combine several types of CNN networks into one architecture. While there exist generalized convolutional neural networks, most generalizations aim to mix convolutional and recurrent layers together. They allow features that emerge early in time to influence features that emerge later in time, leading to richer representations as well as a reduced risk of catastrophic forgetting. Finally, despite having achieved excellent performance on various visual recognition tasks, GANs still need further improvements before they become widely used.</div></section></section><section id='sec10-applsci-12-08972' type=''><h2 data-nested='1'> 10. Reduction in Depth and Width</h2><div class='html-p'>The authors [<a href="#B1-applsci-12-08972" class="html-bibr">1</a>] studied the effects of reducing depth and width on deep residual learning for image recognition. They found that a reduction in depth led to a decrease in performance, while a reduction in width had no significant effect. They also found that the best way to reduce depth was by using skip connections, which helped the network learn better representations of the data. The authors used this technique to improve their results, improving the accuracy on CIFAR-10 from 74% to 80%. They also concluded that a reduced number of layers can lead to better generalization, and it is possible to use smaller networks with fewer parameters than before. These improvements were seen when training convolutional neural networks on natural images as well as audio spectrograms, where they saw an improvement in classification rates from 68% to 79%. Additionally, unlike previous research, they can be implemented with or without skipping. The type is shown in <a href="#applsci-12-08972-f012" class="html-fig">Figure 12</a>. This model outperformed traditional LSTMs because it could exploit spectral properties more easily than traditional models could. Traditional LSTMs could not have done so since they lacked high-level processing capabilities; in contrast, CNNs are more adept at handling complex patterns of time across different spatial scales. Furthermore, optimization methods might enable them to overcome these limitations. One example is stochastic gradient descent with momentum. By combining SGD with momentum, state-of-the-art performance on CIFAR 10 (in terms of both top five and top one accuracy) was achieved, surpassing RNNs (traditional LSTMs) and achieving competitive results for other benchmark datasets. Another paper that analyzed the same topic noted that two key characteristics were important for success: (i) there should be many variations of intermediate features computed during each training step, and (ii) computation should happen quickly to avoid the accumulation of gradients over many iterations.</div><div class='html-p'>They reasoned that backpropagation through these many different intermediate features will allow information about all inputs to flow backward through the network, even if it only flows backward after passing through many subsequent layers. To achieve this requirement, fast linear convolutions were introduced into modern CNN architectures, much like adding gears onto a wheel improves its efficiency. Fast linear convolutions sped up the backward propagation of error gradients by an order of magnitude, making algorithms like SGD work much faster. Importantly, fast linear convolutions did not change the structure of the learned architecture. Convolutional Neural Networks continue to consist of two sets of neurons: those that look forward through the input sequence and those that look backwards. In essence, fast linear convolutions transform input sequences into a fixed length representation called a bottleneck layer.</div><section id='sec10dot1-applsci-12-08972' type=''><h4 class='html-italic' data-nested='2'> 10.1. Spatial Dimension Reduction</h4><div class='html-p'>The impact of different types of spatial dimension reduction on deep residual learning for image recognition. The results showed that using a lower dimensional space can improve accuracy while reducing training time. Additionally, the study found that using a higher dimensional space can improve accuracy even further. However, this comes at the cost of increased training time. Ultimately, the best way to reduce dimensionality is to use a combination of both methods. Using convolutional neural networks (CNNs) with hierarchical fusion algorithms and variational autoencoders (VAEs) yields improved accuracy and less computational complexity when compared to CNNs alone. The authors [<a href="#B33-applsci-12-08972" class="html-bibr">33</a>] proposed a framework that has two main components: a two-stage network structure design and data augmentation.</div><div class='html-p'>The authors investigated how data augmentation affects performance. They trained three fully connected layers with additional inputs coming from each feature map layer of an encoder network before feeding it into another fully connected layer of the decoder network to generate representations corresponding to one type of variation by augmenting its input. They found that there was no significant difference in performance between any variations used in their experiments; thus, they suggest keeping things simple by only adding augmentation information prior to the first fully connected layer or combining multiple types of variations together before or after every encoding or decoding stage. Furthermore, they also show that using VAE’s with pre-trained weights improves performance on the CIFAR-10 and STL datasets without requiring more than double the number of parameters than what is required for vanilla NN models. In addition, the authors point out that these weights should be pretrained outside of the scope of DNN training, as VAE’s are not robust to missing weights, which may lead to undesirable behaviors such as vanishing gradients during backpropagation.</div></section><section id='sec10dot2-applsci-12-08972' type=''><h4 class='html-italic' data-nested='2'> 10.2. Depth Reduction</h4><div class='html-p'>In 2015, the author of [<a href="#B1-applsci-12-08972" class="html-bibr">1</a>] studied the impact of depth on accuracy in image recognition tasks. They found that shallower networks generally have worse performance than deeper ones. However, they also found that there is a point of diminishing returns, beyond a certain depth, adding more layers does not improve accuracy. In 2016, the authors in [<a href="#B2-applsci-12-08972" class="html-bibr">2</a>] used this idea to create what is known as a residual network or ResNet. A ResNet consists of many layers (usually over 100), with each layer only learning a small residual between the output of the layer and its input. This architecture allows for training very deep networks without suffering from the issues of vanishing gradients.</div><div class='html-p'>Previously, residual networks consisted of just one hidden layer. However, when the network is being trained to predict weights based on labels, it can sometimes end up trying to reproduce particular patterns seen in the labels rather than doing a good job predicting weights given other inputs. These patterns can lead to inaccurate predictions if they are too pronounced because they are easy for the network to recognize. Therefore, the author proposed that residual networks should contain two hidden layers—one with many nodes and another with few nodes. Furthermore, when training such a network’s weights so that it predicts properly given other inputs rather than trying to replicate what it sees in its label input, connections should be cut off periodically so that it doesn’t try too hard to produce similar outputs as before.</div></section></section><section id='sec11-applsci-12-08972' type=''><h2 data-nested='1'> 11. Evaluation Metrics for DRN</h2><div class='html-p'>The author in [<a href="#B1-applsci-12-08972" class="html-bibr">1</a>] studied the performance of DRN on four image recognition tasks. They found that DRN significantly outperformed previous state-of-the-art methods on all four tasks. The authors also proposed a new evaluation metric for DRN, which they called the Top-1 error. This metric is based on the idea that the top-1 error is a more accurate measure of performance than the traditional accuracy metric. The authors found that DRN achieved a top-1 error of only 3.56% on the ImageNet dataset, which is significantly lower than the previous state-of-the-art method (4.82%). In addition, DRN achieves a top 5 error of 9.8%, again much better than the previous state-of-the-art method (17.2%). Finally, DRN has a mean Average Precision of 0.927, while the previous state-of-the-art method had an average precision of 0.648 on the same datasets. These results show that DRN outperforms previous state-of-the-art methods in terms of all three metrics: Top 1 error, Top 5 error and Mean Average Precision.</div><div class='html-p'>To evaluate how well their models are performing, researchers use various methods such as accuracy or precision/recall measurements. However, these metrics can be misleading if they’re applied blindly without considering their underlying assumptions. For example, suppose that you have two image recognition models, Model A and Model Both with 99% accuracy, but Model A recognizes every photo correctly, whereas Model B returns no false positives but misses half of photos it should recognize. It would be wrong to conclude from our analysis above that model B is superior to model A! As we’ve seen before, however, measures like top-1 error and top-5 error are better at making such comparisons because false negatives have a greater impact on their values than false positives do. Therefore, let us take a look at how DRN compares to other algorithms when evaluated using this metric. One advantage of evaluating an algorithm using top-1 error instead of traditional accuracy is that one can examine algorithms side by side on equal footing. For instance, comparing algorithm A and algorithm B where Algorithm A achieves 90% accuracy and Algorithm B achieves 98% accuracy sounds like Algorithm B is better, right? But what if Algorithm A just does not make any mistakes whereas Algorithm B makes two mistakes out of every 100 images? Using top-1 error one can see that this would equate to two errors out of 10 images (i.e., 20%) instead of four errors out of 100 images (0.01%), making the difference between them much clearer. Indeed, across all four image recognition tasks modeled here, we find that DRN performs much better than the competition according to this new metric. On the CIFAR10 dataset, DRN achieved a top-1 error of 16.3% compared to 23.6% for Mask R-CNN (a popular deep learning architecture). This means that even if we allow masks R-CNN higher false positive rate due to its more sophisticated features and complexity, it still has a worse bottom line result with respect to correctness. Another noteworthy observation besides improving test accuracy and reducing computational cost, deep residual networks also preserve local information from the previous layer. This property, combined with the ability to learn from a large set of data, enables DRN to achieve state-of-the-art accuracy on several image recognition tasks. These improvements come mostly from training deep networks with large amounts of data (data augmentation), which reduces overfitting, but deep residual networks may not always require high amounts of data to achieve similar accuracies; depending on the task some combinations may actually perform better than others. In the case of mask-based networks, for example, accuracy might be boosted slightly from a few percentage points to many percent points by trading top-1 error for top-5 error. Of course, this is a tradeoff with consequences. Some image recognition tasks might not require a low false negative rate and can accept false positives; others might not perform well if the accuracy is less than 97%. For example, consider a self-driving car. If it is programmed to be conservative and not to move if there is a chance of collision with another vehicle, then the car will never drive. That being said, for many applications, a top-1 error is more important than a top-5 error. In addition to better performance on image recognition tasks, DRN can also be used for the unsupervised pre-training of networks and classification tasks that require a low false negative rate. They are trained in an end-to-end fashion by optimizing the objective function directly, bypassing many intermediate steps found in traditional approaches.</div></section><section id='sec12-applsci-12-08972' type=''><h2 data-nested='1'> 12. Datasets for Deep Residual Learning</h2><div class='html-p'>Deep residual learning is a powerful tool for improving the performance of machine learning models. In this tutorial, we will discuss the various datasets that are used to train deep residual networks.</div><section id='sec12dot1-applsci-12-08972' type=''><h4 class='html-italic' data-nested='2'> 12.1. Datasets for Deep Residual Learning</h4><div class='html-p'>There are several datasets that have been used to train deep residual networks. The most common ones are:</div><div class='html-p'>CIFAR-10 dataset [<a href="#B44-applsci-12-08972" class="html-bibr">44</a>,<a href="#B93-applsci-12-08972" class="html-bibr">93</a>]: This is one of the most widely used datasets for image recognition since it contains 60,000 training images and 10,000 test images. It consists of images belonging to 10 categories like airplanes, automobiles, birds, cats, etc. The size of each image is 32 × 32 pixels with pixel values ranging between 0 and 255. This is a subset of the original CIFAR-100 dataset. It contains 60,000 32 × 32 color images in 10 classes. The last two layers form the fully connected layer, which has 4096 neurons. The first layer is a convolutional layer with seven filters and four kernels. The dataset can be used for training a variety of models such as deep residual networks, ResNet and DenseNet. The images are organised into two sets: a train set and a test set. The train set contains 50,000 images and is used to train the neural network. The test set contains 10,000 images and is used to evaluate how well a neural network performs on new data.</div></section><section id='sec12dot2-applsci-12-08972' type=''><h4 class='html-italic' data-nested='2'> 12.2. ImageNet</h4><div class='html-p'>ImageNet [<a href="#B94-applsci-12-08972" class="html-bibr">94</a>] is the de facto standard dataset for computer vision research that has been used in many state-of-the-art systems over the past few years. It contains over 15 million images belonging to 1000 different categories spread across 20,000 subcategories. The first level of ImageNet consists of 1000 categories which have been further divided into 10,000 subcategories, each containing 1000 images (ImageNet Large Scale Visual Recognition Challenge).</div><div class='html-p'>ImageNet dataset: This is another popular image recognition dataset containing 1 million training images and 200 k test images belonging to 1000 categories like butterfly, turtle, etc. The size of each image is 216 × 216 pixels with pixel values in range between 0 and 255.</div></section></section><section id='sec13-applsci-12-08972' type=''><h2 data-nested='1'> 13. Recommendations &amp; Challenges in Deep Residual Learning for Image Recognitions</h2><div class='html-p'>Below are some open challenges in Deep Residual Learning for image recognition.</div><ul class='html-bullet'><li><div class='html-p'>The challenge of deep residual learning for image recognition is that the error signal between the input and output of a deep neural network is not strong enough to train the network. This can be overcome by using residual networks, which are a type of deep learning architecture that uses an additional set of layers to compute a residual signal in addition to the standard forward pass.</div></li><li><div class='html-p'>Deep residual learning (DRL) has recently emerged as an important approach for image recognition. It generally consists of two stages: a reconstruction stage and a refinement stage. The reconstruction stage applies an autoencoder to the input image and reconstructs it by using a sparse code in order to preserve salient features in each layer. In contrast, the refinement stage aims to further enhance the reconstructed image with an attention model. To apply DRL to large-scale problems, we need to find ways to reduce the number of parameters required by our network so that it can fit into memory. We also need better ways of training our network so that it can generalize well across different images without overfitting on particular examples.</div></li><li><div class='html-p'>Deep residual learning is a powerful technique to train deep neural networks. It has been applied in many applications, such as image recognition, speech recognition and natural language processing. However, the training of deep residual networks is far from trivial.</div></li><li><div class='html-p'>Handling large scale feature maps: Residual blocks are designed to learn representations of intermediate layers. Therefore, the size of feature maps needs to be large enough to capture useful information from these layers. Traditionally these high-level layers are trained using CNNs with small kernels (e.g., 3 × 3). This leads to overfitting problems because of insufficient capacity for generalization.</div></li><li><div class='html-p'>Batch normalization: Batch normalization is an important technique for training deep residual networks due to its stability property and improved convergence speed. However, it is necessary to carefully design batch normalization parameters according to network structure and data distribution characteristics so that they can achieve good performance without overfitting issues.</div></li><li><div class='html-p'>Training a deep residual network requires a large amount of data for training an individual layer, which limits our ability to use these models on small datasets like those used for object detection and semantic segmentation tasks where there may be only one or two thousand examples per class at most (e.g., YOLO).</div></li><li><div class='html-p'>The main issue is that the training of a network with large numbers of parameters is computationally expensive, which makes it difficult to train a large-scale network using backpropagation. Some approaches have tried to address this problem by using a smaller number of parameters, but this can lead to degraded accuracy.</div></li><li><div class='html-p'>Another issue is that the spatial dimensions of images are usually much larger than the temporal dimensions (e.g., 64 × 64 vs. 1 × 1). The ability to capture higher order dependencies between pixels in an image (i.e., spatial correlations) is one of the reasons why deep learning has been so successful for image recognition tasks, but it also makes training more challenging because the temporal dimension may not be long enough to capture these dependencies.</div></li><li><div class='html-p'>There is no simple way to incorporate prior knowledge into deep learning models that can significantly improve performance on many tasks such as semantic segmentation and object detection [<a href="#B20-applsci-12-08972" class="html-bibr">20</a>]</div></li><li><div class='html-p'>Low generalization ability: The main disadvantage of deep residual networks is that they are very sensitive to the choice of features and initializations. Therefore, it is difficult to train a deep residual network with high accuracy across different datasets or domains.</div></li><li><div class='html-p'>Inability to handle noise:</div></li><li><div class='html-p'>Deep residual networks are unable to handle noise well because they need some sort of regularization to avoid overfitting. Moreover, they don’t have any mechanism to learn the underlying structure of images and thus can be easily confused by small changes such as rotation or translation (e.g., flipping an image vertically).</div></li><li><div class='html-p'>Difficulties in training:</div></li><li><div class='html-p'>Training a deep residual network is more challenging than training shallow feedforward networks since it requires more parameters and can overfit quickly, especially if we don’t have enough training data available.</div></li><li><div class='html-p'>High computational complexity</div></li><li><div class='html-p'>Poor generalization ability in small scale networks</div></li><li><div class='html-p'>Difficulties in parameter choice</div></li><li><div class='html-p'>Poor performance on multi-class problems</div></li><li><div class='html-p'>Poor performance on large-scale problems</div></li><li><div class='html-p'>Poor performance on low level vision problems</div></li><li><div class='html-p'>Difficulty of incorporating prior knowledge into the network architecture</div></li><li><div class='html-p'>Difficulty of adapting models to new tasks</div></li></ul><div class='html-p'>However, the above are the recommendations that are useful for the reader and research community with regard to further research. The literature review of some of the important research is shown in <a href="#applsci-12-08972-t001" class="html-table">Table 1</a>.</div></section><section id='14Conclusions' type=''><h2 data-nested='1'> 14. Conclusions</h2><div class='html-p'>In the past few years, deep neural networks (DNNs) have been widely used in image recognition and other related applications, such as video processing and speech recognition. However, there are still some issues that need to be addressed before they can be applied to more complex problems. In this paper, we present a comprehensive survey of deep residual learning for image recognition. We first review deep residual learning and its applications in image recognition. We then present several successful applications of deep residual learning, including image classification, object detection and semantic segmentation. Finally, we discuss some issues that still need to be resolved before deep residual learning can be applied on more complex problems. However, to improve the performance of the DRL below are some suggestions:</div><div class='html-p'>We can improve the performance of Deep Residual Learning in the following ways:</div><div class='html-p'><ul class='html-order'><li><div class='html-p'>The first thing that can be done to improve the deep residual learning is to add more layers of neurons into the network.</div></li><li><div class='html-p'>The second thing that can be done is to increase the number of filters in each layer, as this will help us get a better model for our problem.</div></li><li><div class='html-p'>The third thing that we can do is increase the number of hidden layers in our network and also increase the number of neurons in each layer.</div></li><li><div class='html-p'>Another way to improve our deep residual learning would be adding more data from different images and videos so that our model can be trained with more data. This will make it easier for us to learn about new images or videos and give us a better prediction for them.</div></li></ul></div></section> </div> <div class="html-back"> <section class='html-notes'><h2 >Author Contributions</h2><div class='html-p'>Writing Original Draft, Writing Reviewing and Editing, Conceptualization, M.S.; Methodology, Z.G.; Funding acquisition, Z.G.; Project administration, Supervision, Z.G. All authors have read and agreed to the published version of the manuscript.</div></section><section class='html-notes'><h2>Funding</h2><div class='html-p'>This work is supported in part by the Major Key Project of PCL (Grant No. PCL2022A03), the National Natural Science Foundation of China (61902082), Guangzhou Science and technology planning project (No. 202102010507), Guangdong Higher Education Innovation Group (2020KCXTD007), and Guangzhou Higher Education Innovation Group (202032854).</div></section><section class='html-notes'><h2 >Institutional Review Board Statement</h2><div class='html-p'>Not applicable.</div></section><section class='html-notes'><h2 >Informed Consent Statement</h2><div class='html-p'>Not applicable.</div></section><section class='html-notes'><h2 >Data Availability Statement</h2><div class='html-p'>Not applicable.</div></section><section class='html-notes'><h2 >Conflicts of Interest</h2><div class='html-p'>The authors declare that they have no conflicts of interest.</div></section><section id='html-references_list'><h2>References</h2><ol class='html-xx'><li id='B1-applsci-12-08972' class='html-x' data-content='1.'>He, K.; Zhang, X.; Ren, S.; Sun, J. Deep residual learning for image recognition. In Proceedings of the 2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), Las Vegas, NV, USA, 27–30 June 2016; pp. 770–778. [<a href="https://scholar.google.com/scholar_lookup?title=Deep+residual+learning+for+image+recognition&conference=Proceedings+of+the+2016+IEEE+Conference+on+Computer+Vision+and+Pattern+Recognition+(CVPR)&author=He,+K.&author=Zhang,+X.&author=Ren,+S.&author=Sun,+J.&publication_year=2016&pages=770%E2%80%93778&doi=10.1109/CVPR.2016.90" class='google-scholar' target='_blank' rel='noopener noreferrer'>Google Scholar</a>] [<a href="https://doi.org/10.1109/CVPR.2016.90" class='cross-ref' target='_blank' rel='noopener noreferrer'>CrossRef</a>]</li><li id='B2-applsci-12-08972' class='html-x' data-content='2.'>Couso, I.; Dubois, D. A general framework for maximizing likelihood under incomplete data. <span class='html-italic'>Int. J. Approx. Reason.</span> <b>2018</b>, <span class='html-italic'>93</span>, 238–260. [<a href="https://scholar.google.com/scholar_lookup?title=A+general+framework+for+maximizing+likelihood+under+incomplete+data&author=Couso,+I.&author=Dubois,+D.&publication_year=2018&journal=Int.+J.+Approx.+Reason.&volume=93&pages=238%E2%80%93260&doi=10.1016/j.ijar.2017.10.030" class='google-scholar' target='_blank' rel='noopener noreferrer'>Google Scholar</a>] [<a href="https://doi.org/10.1016/j.ijar.2017.10.030" class='cross-ref' target='_blank' rel='noopener noreferrer'>CrossRef</a>]</li><li id='B3-applsci-12-08972' class='html-x' data-content='3.'>Liang, Y.; Peng, W.; Zheng, Z.-J.; Silvén, O.; Zhao, G. A hybrid quantum–classical neural network with deep residual learning. <span class='html-italic'>Neural Netw.</span> <b>2021</b>, <span class='html-italic'>143</span>, 133–147. [<a href="https://scholar.google.com/scholar_lookup?title=A+hybrid+quantum%E2%80%93classical+neural+network+with+deep+residual+learning&author=Liang,+Y.&author=Peng,+W.&author=Zheng,+Z.-J.&author=Silv%C3%A9n,+O.&author=Zhao,+G.&publication_year=2021&journal=Neural+Netw.&volume=143&pages=133%E2%80%93147&doi=10.1016/j.neunet.2021.05.028&pmid=34139629" class='google-scholar' target='_blank' rel='noopener noreferrer'>Google Scholar</a>] [<a href="https://doi.org/10.1016/j.neunet.2021.05.028" class='cross-ref' target='_blank' rel='noopener noreferrer'>CrossRef</a>] [<a href="http://www.ncbi.nlm.nih.gov/pubmed/34139629" class='cross-ref' data-typ='pmid' target='_blank' rel='noopener noreferrer'>PubMed</a>]</li><li id='B4-applsci-12-08972' class='html-x' data-content='4.'>Feng, Z.; Nie, D.; Wang, L.; Shen, D. Semi-supervised learning for pelvic MR image segmentation based on multi-task residual fully convolutional networks. In Proceedings of the 2018 IEEE 15th International Symposium on Biomedical Imaging (ISBI 2018), Washington, DC, USA, 4–7 April 2018; pp. 885–888. [<a href="https://scholar.google.com/scholar_lookup?title=Semi-supervised+learning+for+pelvic+MR+image+segmentation+based+on+multi-task+residual+fully+convolutional+networks&conference=Proceedings+of+the+2018+IEEE+15th+International+Symposium+on+Biomedical+Imaging+(ISBI+2018)&author=Feng,+Z.&author=Nie,+D.&author=Wang,+L.&author=Shen,+D.&publication_year=2018&pages=885%E2%80%93888&doi=10.1109/isbi.2018.8363713" class='google-scholar' target='_blank' rel='noopener noreferrer'>Google Scholar</a>] [<a href="https://doi.org/10.1109/isbi.2018.8363713" class='cross-ref' target='_blank' rel='noopener noreferrer'>CrossRef</a>]</li><li id='B5-applsci-12-08972' class='html-x' data-content='5.'>Li, L.; Jin, W.; Huang, Y. Few-shot contrastive learning for image classification and its application to insulator identification. <span class='html-italic'>Appl. Intell.</span> <b>2021</b>, <span class='html-italic'>52</span>, 6148–6163. [<a href="https://scholar.google.com/scholar_lookup?title=Few-shot+contrastive+learning+for+image+classification+and+its+application+to+insulator+identification&author=Li,+L.&author=Jin,+W.&author=Huang,+Y.&publication_year=2021&journal=Appl.+Intell.&volume=52&pages=6148%E2%80%936163&doi=10.1007/s10489-021-02769-6&pmid=34764617" class='google-scholar' target='_blank' rel='noopener noreferrer'>Google Scholar</a>] [<a href="https://doi.org/10.1007/s10489-021-02769-6" class='cross-ref' target='_blank' rel='noopener noreferrer'>CrossRef</a>] [<a href="http://www.ncbi.nlm.nih.gov/pubmed/34764617" class='cross-ref' data-typ='pmid' target='_blank' rel='noopener noreferrer'>PubMed</a>]</li><li id='B6-applsci-12-08972' class='html-x' data-content='6.'>Yang, M.; Thung, G. Classification of Trash for Recyclability Status. <span class='html-italic'>CS229Project Rep.</span> <b>2016</b>, <span class='html-italic'>2016</span>, 3. [<a href="https://scholar.google.com/scholar_lookup?title=Classification+of+Trash+for+Recyclability+Status&author=Yang,+M.&author=Thung,+G.&publication_year=2016&journal=CS229Project+Rep.&volume=2016&pages=3" class='google-scholar' target='_blank' rel='noopener noreferrer'>Google Scholar</a>]</li><li id='B7-applsci-12-08972' class='html-x' data-content='7.'>Karar, M.E.; Hemdan, E.E.-D.; Shouman, M.A. Cascaded deep learning classifiers for computer-aided diagnosis of COVID-19 and pneumonia diseases in X-ray scans. <span class='html-italic'>Complex Intell. Syst.</span> <b>2020</b>, <span class='html-italic'>7</span>, 235–247. [<a href="https://scholar.google.com/scholar_lookup?title=Cascaded+deep+learning+classifiers+for+computer-aided+diagnosis+of+COVID-19+and+pneumonia+diseases+in+X-ray+scans&author=Karar,+M.E.&author=Hemdan,+E.E.-D.&author=Shouman,+M.A.&publication_year=2020&journal=Complex+Intell.+Syst.&volume=7&pages=235%E2%80%93247&doi=10.1007/s40747-020-00199-4" class='google-scholar' target='_blank' rel='noopener noreferrer'>Google Scholar</a>] [<a href="https://doi.org/10.1007/s40747-020-00199-4" class='cross-ref' target='_blank' rel='noopener noreferrer'>CrossRef</a>]</li><li id='B8-applsci-12-08972' class='html-x' data-content='8.'>Zhu, J.; Chen, H.; Ye, W. A Hybrid CNN–LSTM Network for the Classification of Human Activities Based on Micro-Doppler Radar. <span class='html-italic'>IEEE Access</span> <b>2020</b>, <span class='html-italic'>8</span>, 24713–24720. [<a href="https://scholar.google.com/scholar_lookup?title=A+Hybrid+CNN%E2%80%93LSTM+Network+for+the+Classification+of+Human+Activities+Based+on+Micro-Doppler+Radar&author=Zhu,+J.&author=Chen,+H.&author=Ye,+W.&publication_year=2020&journal=IEEE+Access&volume=8&pages=24713%E2%80%9324720&doi=10.1109/ACCESS.2020.2971064" class='google-scholar' target='_blank' rel='noopener noreferrer'>Google Scholar</a>] [<a href="https://doi.org/10.1109/ACCESS.2020.2971064" class='cross-ref' target='_blank' rel='noopener noreferrer'>CrossRef</a>]</li><li id='B9-applsci-12-08972' class='html-x' data-content='9.'><span class='html-italic'>FPGA Acceleration of Convolutional Neural Networks</span>; Nallatech: Camarillo, CA, USA, 2017.</li><li id='B10-applsci-12-08972' class='html-xx' data-content='10.'>Michael, A.; Garonga, M. Classification model of ‘Toraja’ arabica coffee fruit ripeness levels using convolution neural network approach. <span class='html-italic'>ILKOM J. Ilm.</span> <b>2021</b>, <span class='html-italic'>13</span>, 226–234. [<a href="https://scholar.google.com/scholar_lookup?title=Classification+model+of+%E2%80%98Toraja%E2%80%99+arabica+coffee+fruit+ripeness+levels+using+convolution+neural+network+approach&author=Michael,+A.&author=Garonga,+M.&publication_year=2021&journal=ILKOM+J.+Ilm.&volume=13&pages=226%E2%80%93234" class='google-scholar' target='_blank' rel='noopener noreferrer'>Google Scholar</a>]</li><li id='B11-applsci-12-08972' class='html-xx' data-content='11.'>Al-Kharraz, M.S.; Elrefaei, L.A.; Fadel, M.A. Automated System for Chromosome Karyotyping to Recognize the Most Common Numerical Abnormalities Using Deep Learning. <span class='html-italic'>IEEE Access</span> <b>2020</b>, <span class='html-italic'>8</span>, 157727–157747. [<a href="https://scholar.google.com/scholar_lookup?title=Automated+System+for+Chromosome+Karyotyping+to+Recognize+the+Most+Common+Numerical+Abnormalities+Using+Deep+Learning&author=Al-Kharraz,+M.S.&author=Elrefaei,+L.A.&author=Fadel,+M.A.&publication_year=2020&journal=IEEE+Access&volume=8&pages=157727%E2%80%93157747&doi=10.1109/ACCESS.2020.3019937" class='google-scholar' target='_blank' rel='noopener noreferrer'>Google Scholar</a>] [<a href="https://doi.org/10.1109/ACCESS.2020.3019937" class='cross-ref' target='_blank' rel='noopener noreferrer'>CrossRef</a>]</li><li id='B12-applsci-12-08972' class='html-xx' data-content='12.'>Avtar, R.; Tripathi, S.; Aggarwal, A.K.; Kumar, P. Population–Urbanization–Energy Nexus: A Review. <span class='html-italic'>Resources</span> <b>2019</b>, <span class='html-italic'>8</span>, 136. [<a href="https://scholar.google.com/scholar_lookup?title=Population%E2%80%93Urbanization%E2%80%93Energy+Nexus:+A+Review&author=Avtar,+R.&author=Tripathi,+S.&author=Aggarwal,+A.K.&author=Kumar,+P.&publication_year=2019&journal=Resources&volume=8&pages=136&doi=10.3390/resources8030136" class='google-scholar' target='_blank' rel='noopener noreferrer'>Google Scholar</a>] [<a href="https://doi.org/10.3390/resources8030136" class='cross-ref' target='_blank' rel='noopener noreferrer'>CrossRef</a>]</li><li id='B13-applsci-12-08972' class='html-xx' data-content='13.'>Brachmann, E.; Rother, C. Visual Camera Re-Localization from RGB and RGB-D Images Using DSAC. <span class='html-italic'>IEEE Trans. Pattern Anal. Mach. Intell.</span> <b>2021</b>, <span class='html-italic'>44</span>, 5847–5865. [<a href="https://scholar.google.com/scholar_lookup?title=Visual+Camera+Re-Localization+from+RGB+and+RGB-D+Images+Using+DSAC&author=Brachmann,+E.&author=Rother,+C.&publication_year=2021&journal=IEEE+Trans.+Pattern+Anal.+Mach.+Intell.&volume=44&pages=5847%E2%80%935865&doi=10.1109/TPAMI.2021.3070754&pmid=33798073" class='google-scholar' target='_blank' rel='noopener noreferrer'>Google Scholar</a>] [<a href="https://doi.org/10.1109/TPAMI.2021.3070754" class='cross-ref' target='_blank' rel='noopener noreferrer'>CrossRef</a>] [<a href="http://www.ncbi.nlm.nih.gov/pubmed/33798073" class='cross-ref' data-typ='pmid' target='_blank' rel='noopener noreferrer'>PubMed</a>]</li><li id='B14-applsci-12-08972' class='html-xx' data-content='14.'>Akhand, M.; Roy, S.; Siddique, N.; Kamal, A.S.; Shimamura, T. Facial Emotion Recognition Using Transfer Learning in the Deep CNN. <span class='html-italic'>Electronics</span> <b>2021</b>, <span class='html-italic'>10</span>, 1036. [<a href="https://scholar.google.com/scholar_lookup?title=Facial+Emotion+Recognition+Using+Transfer+Learning+in+the+Deep+CNN&author=Akhand,+M.&author=Roy,+S.&author=Siddique,+N.&author=Kamal,+A.S.&author=Shimamura,+T.&publication_year=2021&journal=Electronics&volume=10&pages=1036&doi=10.3390/electronics10091036" class='google-scholar' target='_blank' rel='noopener noreferrer'>Google Scholar</a>] [<a href="https://doi.org/10.3390/electronics10091036" class='cross-ref' target='_blank' rel='noopener noreferrer'>CrossRef</a>]</li><li id='B15-applsci-12-08972' class='html-xx' data-content='15.'>Rathgeb, C.; Bernardo, K.; Haryanto, N.E.; Busch, C. Effects of image compression on face image manipulation detection: A case study on facial retouching. <span class='html-italic'>IET Biom.</span> <b>2021</b>, <span class='html-italic'>10</span>, 342–355. [<a href="https://scholar.google.com/scholar_lookup?title=Effects+of+image+compression+on+face+image+manipulation+detection:+A+case+study+on+facial+retouching&author=Rathgeb,+C.&author=Bernardo,+K.&author=Haryanto,+N.E.&author=Busch,+C.&publication_year=2021&journal=IET+Biom.&volume=10&pages=342%E2%80%93355&doi=10.1049/bme2.12027" class='google-scholar' target='_blank' rel='noopener noreferrer'>Google Scholar</a>] [<a href="https://doi.org/10.1049/bme2.12027" class='cross-ref' target='_blank' rel='noopener noreferrer'>CrossRef</a>]</li><li id='B16-applsci-12-08972' class='html-xx' data-content='16.'>Siam, M.; Elkerdawy, S.; Jagersand, M.; Yogamani, S. Deep semantic segmentation for automated driving: Taxonomy, roadmap and challenges. In Proceedings of the 2017 IEEE 20th International Conference on Intelligent Transportation Systems (ITSC), Yokohama, Japan, 16–19 October 2017. [<a href="https://scholar.google.com/scholar_lookup?title=Deep+semantic+segmentation+for+automated+driving:+Taxonomy,+roadmap+and+challenges&conference=Proceedings+of+the+2017+IEEE+20th+International+Conference+on+Intelligent+Transportation+Systems+(ITSC)&author=Siam,+M.&author=Elkerdawy,+S.&author=Jagersand,+M.&author=Yogamani,+S.&publication_year=2017&doi=10.1109/ITSC.2017.8317714" class='google-scholar' target='_blank' rel='noopener noreferrer'>Google Scholar</a>] [<a href="https://doi.org/10.1109/ITSC.2017.8317714" class='cross-ref' target='_blank' rel='noopener noreferrer'>CrossRef</a>]</li><li id='B17-applsci-12-08972' class='html-xx' data-content='17.'>Zhang, K.; Li, Y.; Zuo, W.; Zhang, L.; Van Gool, L.; Timofte, R. Plug-and-Play Image Restoration with Deep Denoiser Prior. <span class='html-italic'>IEEE Trans. Pattern Anal. Mach. Intell.</span> 2021; <span class='html-italic'>early access</span>. [<a href="https://scholar.google.com/scholar_lookup?title=Plug-and-Play+Image+Restoration+with+Deep+Denoiser+Prior&author=Zhang,+K.&author=Li,+Y.&author=Zuo,+W.&author=Zhang,+L.&author=Van+Gool,+L.&author=Timofte,+R.&publication_year=2021&doi=10.1109/TPAMI.2021.3088914" class='google-scholar' target='_blank' rel='noopener noreferrer'>Google Scholar</a>] [<a href="https://doi.org/10.1109/TPAMI.2021.3088914" class='cross-ref' target='_blank' rel='noopener noreferrer'>CrossRef</a>]</li><li id='B18-applsci-12-08972' class='html-xx' data-content='18.'>Sangeetha, V.; Prasad, K.J.R. Deep Residual Learning for Image Recognition Kaiming. <span class='html-italic'>Indian J. Chem.-Sect. B Org. Med. Chem.</span> <b>2006</b>. [<a href="https://scholar.google.com/scholar_lookup?title=Deep+Residual+Learning+for+Image+Recognition+Kaiming&author=Sangeetha,+V.&author=Prasad,+K.J.R.&publication_year=2006&journal=Indian+J.+Chem.-Sect.+B+Org.+Med.+Chem." class='google-scholar' target='_blank' rel='noopener noreferrer'>Google Scholar</a>]</li><li id='B19-applsci-12-08972' class='html-xx' data-content='19.'>Cheng, S.; Wang, L.; Du, A. An Adaptive and Asymmetric Residual Hash for Fast Image Retrieval. <span class='html-italic'>IEEE Access</span> <b>2019</b>, <span class='html-italic'>7</span>, 78942–78953. [<a href="https://scholar.google.com/scholar_lookup?title=An+Adaptive+and+Asymmetric+Residual+Hash+for+Fast+Image+Retrieval&author=Cheng,+S.&author=Wang,+L.&author=Du,+A.&publication_year=2019&journal=IEEE+Access&volume=7&pages=78942%E2%80%9378953&doi=10.1109/ACCESS.2019.2922738" class='google-scholar' target='_blank' rel='noopener noreferrer'>Google Scholar</a>] [<a href="https://doi.org/10.1109/ACCESS.2019.2922738" class='cross-ref' target='_blank' rel='noopener noreferrer'>CrossRef</a>]</li><li id='B20-applsci-12-08972' class='html-xx' data-content='20.'>Fujii, T.; Sei, Y.; Tahara, Y.; Orihara, R.; Ohsuga, A. “Never fry carrots without chopping” Generating Cooking Recipes from Cooking Videos Using Deep Learning Considering Previous Process. <span class='html-italic'>Int. J. Netw. Distrib. Comput.</span> <b>2019</b>, <span class='html-italic'>7</span>, 107. [<a href="https://scholar.google.com/scholar_lookup?title=%E2%80%9CNever+fry+carrots+without+chopping%E2%80%9D+Generating+Cooking+Recipes+from+Cooking+Videos+Using+Deep+Learning+Considering+Previous+Process&author=Fujii,+T.&author=Sei,+Y.&author=Tahara,+Y.&author=Orihara,+R.&author=Ohsuga,+A.&publication_year=2019&journal=Int.+J.+Netw.+Distrib.+Comput.&volume=7&pages=107&doi=10.2991/ijndc.k.190710.002" class='google-scholar' target='_blank' rel='noopener noreferrer'>Google Scholar</a>] [<a href="https://doi.org/10.2991/ijndc.k.190710.002" class='cross-ref' target='_blank' rel='noopener noreferrer'>CrossRef</a>]</li><li id='B21-applsci-12-08972' class='html-xx' data-content='21.'>Avtar, R.; Sahu, N.; Aggarwal, A.K.; Chakraborty, S.; Kharrazi, A.; Yunus, A.P.; Dou, J.; Kurniawan, T.A. Exploring Renewable Energy Resources Using Remote Sensing and GIS—A Review. <span class='html-italic'>Resources</span> <b>2019</b>, <span class='html-italic'>8</span>, 149. [<a href="https://scholar.google.com/scholar_lookup?title=Exploring+Renewable+Energy+Resources+Using+Remote+Sensing+and+GIS%E2%80%94A+Review&author=Avtar,+R.&author=Sahu,+N.&author=Aggarwal,+A.K.&author=Chakraborty,+S.&author=Kharrazi,+A.&author=Yunus,+A.P.&author=Dou,+J.&author=Kurniawan,+T.A.&publication_year=2019&journal=Resources&volume=8&pages=149&doi=10.3390/resources8030149" class='google-scholar' target='_blank' rel='noopener noreferrer'>Google Scholar</a>] [<a href="https://doi.org/10.3390/resources8030149" class='cross-ref' target='_blank' rel='noopener noreferrer'>CrossRef</a>]</li><li id='B22-applsci-12-08972' class='html-xx' data-content='22.'>Avtar, R.; Komolafe, A.A.; Kouser, A.; Singh, D.; Yunus, A.P.; Dou, J.; Kumar, P.; Das Gupta, R.; Johnson, B.A.; Minh, H.V.T.; et al. Assessing sustainable development prospects through remote sensing: A review. <span class='html-italic'>Remote Sens. Appl. Soc. Environ.</span> <b>2020</b>, <span class='html-italic'>20</span>, 100402. [<a href="https://scholar.google.com/scholar_lookup?title=Assessing+sustainable+development+prospects+through+remote+sensing:+A+review&author=Avtar,+R.&author=Komolafe,+A.A.&author=Kouser,+A.&author=Singh,+D.&author=Yunus,+A.P.&author=Dou,+J.&author=Kumar,+P.&author=Das+Gupta,+R.&author=Johnson,+B.A.&author=Minh,+H.V.T.&publication_year=2020&journal=Remote+Sens.+Appl.+Soc.+Environ.&volume=20&pages=100402&doi=10.1016/j.rsase.2020.100402" class='google-scholar' target='_blank' rel='noopener noreferrer'>Google Scholar</a>] [<a href="https://doi.org/10.1016/j.rsase.2020.100402" class='cross-ref' target='_blank' rel='noopener noreferrer'>CrossRef</a>]</li><li id='B23-applsci-12-08972' class='html-xx' data-content='23.'>Fu, Z.; Tseng, H.W.; Vedantham, S.; Karellas, A.; Bilgin, A. A residual dense network assisted sparse view reconstruction for breast computed tomography. <span class='html-italic'>Sci. Rep.</span> <b>2020</b>, <span class='html-italic'>10</span>, 21111. [<a href="https://scholar.google.com/scholar_lookup?title=A+residual+dense+network+assisted+sparse+view+reconstruction+for+breast+computed+tomography&author=Fu,+Z.&author=Tseng,+H.W.&author=Vedantham,+S.&author=Karellas,+A.&author=Bilgin,+A.&publication_year=2020&journal=Sci.+Rep.&volume=10&pages=21111&doi=10.1038/s41598-020-77923-0" class='google-scholar' target='_blank' rel='noopener noreferrer'>Google Scholar</a>] [<a href="https://doi.org/10.1038/s41598-020-77923-0" class='cross-ref' target='_blank' rel='noopener noreferrer'>CrossRef</a>]</li><li id='B24-applsci-12-08972' class='html-xx' data-content='24.'>Wu, W.; Hu, D.; Niu, C.; Broeke, L.V.; Butler, A.P.; Cao, P.; Atlas, J.; Chernoglazov, A.; Vardhanabhuti, V.; Wang, G. Deep learning based spectral CT imaging. <span class='html-italic'>Neural Netw.</span> <b>2021</b>, <span class='html-italic'>144</span>, 342–358. [<a href="https://scholar.google.com/scholar_lookup?title=Deep+learning+based+spectral+CT+imaging&author=Wu,+W.&author=Hu,+D.&author=Niu,+C.&author=Broeke,+L.V.&author=Butler,+A.P.&author=Cao,+P.&author=Atlas,+J.&author=Chernoglazov,+A.&author=Vardhanabhuti,+V.&author=Wang,+G.&publication_year=2021&journal=Neural+Netw.&volume=144&pages=342%E2%80%93358&doi=10.1016/j.neunet.2021.08.026&pmid=34560584" class='google-scholar' target='_blank' rel='noopener noreferrer'>Google Scholar</a>] [<a href="https://doi.org/10.1016/j.neunet.2021.08.026" class='cross-ref' target='_blank' rel='noopener noreferrer'>CrossRef</a>] [<a href="http://www.ncbi.nlm.nih.gov/pubmed/34560584" class='cross-ref' data-typ='pmid' target='_blank' rel='noopener noreferrer'>PubMed</a>]</li><li id='B25-applsci-12-08972' class='html-xx' data-content='25.'>Jalali, Y.; Fateh, M.; Rezvani, M.; Abolghasemi, V.; Anisi, M.H. ResBCDU-Net: A Deep Learning Framework for Lung CT Image Segmentation. <span class='html-italic'>Sensors</span> <b>2021</b>, <span class='html-italic'>21</span>, 268. [<a href="https://scholar.google.com/scholar_lookup?title=ResBCDU-Net:+A+Deep+Learning+Framework+for+Lung+CT+Image+Segmentation&author=Jalali,+Y.&author=Fateh,+M.&author=Rezvani,+M.&author=Abolghasemi,+V.&author=Anisi,+M.H.&publication_year=2021&journal=Sensors&volume=21&pages=268&doi=10.3390/s21010268&pmid=33401581" class='google-scholar' target='_blank' rel='noopener noreferrer'>Google Scholar</a>] [<a href="https://doi.org/10.3390/s21010268" class='cross-ref' target='_blank' rel='noopener noreferrer'>CrossRef</a>] [<a href="http://www.ncbi.nlm.nih.gov/pubmed/33401581" class='cross-ref' data-typ='pmid' target='_blank' rel='noopener noreferrer'>PubMed</a>]</li><li id='B26-applsci-12-08972' class='html-xx' data-content='26.'>Chalasani, P. Lung CT Image Recognition using Deep Learning Techniques to Detect Lung Cancer. <span class='html-italic'>Int. J. Emerg. Trends Eng. Res.</span> <b>2020</b>, <span class='html-italic'>8</span>, 3575–3579. [<a href="https://scholar.google.com/scholar_lookup?title=Lung+CT+Image+Recognition+using+Deep+Learning+Techniques+to+Detect+Lung+Cancer&author=Chalasani,+P.&publication_year=2020&journal=Int.+J.+Emerg.+Trends+Eng.+Res.&volume=8&pages=3575%E2%80%933579&doi=10.30534/ijeter/2020/113872020" class='google-scholar' target='_blank' rel='noopener noreferrer'>Google Scholar</a>] [<a href="https://doi.org/10.30534/ijeter/2020/113872020" class='cross-ref' target='_blank' rel='noopener noreferrer'>CrossRef</a>]</li><li id='B27-applsci-12-08972' class='html-xx' data-content='27.'>Cui, B.; Dong, X.-M.; Zhan, Q.; Peng, J.; Sun, W. LiteDepthwiseNet: A Lightweight Network for Hyperspectral Image Classification. <span class='html-italic'>IEEE Trans. Geosci. Remote Sens.</span> <b>2021</b>, <span class='html-italic'>60</span>, 1–15. [<a href="https://scholar.google.com/scholar_lookup?title=LiteDepthwiseNet:+A+Lightweight+Network+for+Hyperspectral+Image+Classification&author=Cui,+B.&author=Dong,+X.-M.&author=Zhan,+Q.&author=Peng,+J.&author=Sun,+W.&publication_year=2021&journal=IEEE+Trans.+Geosci.+Remote+Sens.&volume=60&pages=1%E2%80%9315&doi=10.1109/TGRS.2021.3062372" class='google-scholar' target='_blank' rel='noopener noreferrer'>Google Scholar</a>] [<a href="https://doi.org/10.1109/TGRS.2021.3062372" class='cross-ref' target='_blank' rel='noopener noreferrer'>CrossRef</a>]</li><li id='B28-applsci-12-08972' class='html-xx' data-content='28.'>Jafar, A.; Myungho, L. Hyperparameter Optimization for Deep Residual Learning in Image Classification. In Proceedings of the 2020 IEEE International Conference on Autonomic Computing and Self-Organizing Systems Companion (ACSOS-C), Washington, DC, USA, 17–21 August 2020. [<a href="https://scholar.google.com/scholar_lookup?title=Hyperparameter+Optimization+for+Deep+Residual+Learning+in+Image+Classification&conference=Proceedings+of+the+2020+IEEE+International+Conference+on+Autonomic+Computing+and+Self-Organizing+Systems+Companion+(ACSOS-C)&author=Jafar,+A.&author=Myungho,+L.&publication_year=2020&doi=10.1109/ACSOS-C51401.2020.00024" class='google-scholar' target='_blank' rel='noopener noreferrer'>Google Scholar</a>] [<a href="https://doi.org/10.1109/ACSOS-C51401.2020.00024" class='cross-ref' target='_blank' rel='noopener noreferrer'>CrossRef</a>]</li><li id='B29-applsci-12-08972' class='html-xx' data-content='29.'>Qian, Y.; Bi, M.; Tan, T.; Yu, K. Very Deep Convolutional Neural Networks for Noise Robust Speech Recognition. <span class='html-italic'>IEEE/ACM Trans. Audio Speech Lang. Process.</span> <b>2016</b>, <span class='html-italic'>24</span>, 2263–2276. [<a href="https://scholar.google.com/scholar_lookup?title=Very+Deep+Convolutional+Neural+Networks+for+Noise+Robust+Speech+Recognition&author=Qian,+Y.&author=Bi,+M.&author=Tan,+T.&author=Yu,+K.&publication_year=2016&journal=IEEE/ACM+Trans.+Audio+Speech+Lang.+Process.&volume=24&pages=2263%E2%80%932276&doi=10.1109/TASLP.2016.2602884" class='google-scholar' target='_blank' rel='noopener noreferrer'>Google Scholar</a>] [<a href="https://doi.org/10.1109/TASLP.2016.2602884" class='cross-ref' target='_blank' rel='noopener noreferrer'>CrossRef</a>]</li><li id='B30-applsci-12-08972' class='html-xx' data-content='30.'>Wang, R.; Tao, D. Training Very Deep CNNs for General Non-Blind Deconvolution. <span class='html-italic'>IEEE Trans. Image Process.</span> <b>2018</b>, <span class='html-italic'>27</span>, 2897–2910. [<a href="https://scholar.google.com/scholar_lookup?title=Training+Very+Deep+CNNs+for+General+Non-Blind+Deconvolution&author=Wang,+R.&author=Tao,+D.&publication_year=2018&journal=IEEE+Trans.+Image+Process.&volume=27&pages=2897%E2%80%932910&doi=10.1109/TIP.2018.2815084&pmid=29993866" class='google-scholar' target='_blank' rel='noopener noreferrer'>Google Scholar</a>] [<a href="https://doi.org/10.1109/TIP.2018.2815084" class='cross-ref' target='_blank' rel='noopener noreferrer'>CrossRef</a>] [<a href="http://www.ncbi.nlm.nih.gov/pubmed/29993866" class='cross-ref' data-typ='pmid' target='_blank' rel='noopener noreferrer'>PubMed</a>]</li><li id='B31-applsci-12-08972' class='html-xx' data-content='31.'>Tai, Y.; Yang, J.; Liu, X. Image super-resolution via deep recursive residual network. In Proceedings of the 2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), Honolulu, HI, USA, 21–26 July 2017. [<a href="https://scholar.google.com/scholar_lookup?title=Image+super-resolution+via+deep+recursive+residual+network&conference=Proceedings+of+the+2017+IEEE+Conference+on+Computer+Vision+and+Pattern+Recognition+(CVPR)&author=Tai,+Y.&author=Yang,+J.&author=Liu,+X.&publication_year=2017&doi=10.1109/CVPR.2017.298" class='google-scholar' target='_blank' rel='noopener noreferrer'>Google Scholar</a>] [<a href="https://doi.org/10.1109/CVPR.2017.298" class='cross-ref' target='_blank' rel='noopener noreferrer'>CrossRef</a>]</li><li id='B32-applsci-12-08972' class='html-xx' data-content='32.'>Galea, C.; Farrugia, R.A. Matching Software-Generated Sketches to Face Photographs With a Very Deep CNN, Morphed Faces, and Transfer Learning. <span class='html-italic'>IEEE Trans. Inf. Forensics Secur.</span> <b>2017</b>, <span class='html-italic'>13</span>, 1421–1431. [<a href="https://scholar.google.com/scholar_lookup?title=Matching+Software-Generated+Sketches+to+Face+Photographs+With+a+Very+Deep+CNN,+Morphed+Faces,+and+Transfer+Learning&author=Galea,+C.&author=Farrugia,+R.A.&publication_year=2017&journal=IEEE+Trans.+Inf.+Forensics+Secur.&volume=13&pages=1421%E2%80%931431&doi=10.1109/TIFS.2017.2788002" class='google-scholar' target='_blank' rel='noopener noreferrer'>Google Scholar</a>] [<a href="https://doi.org/10.1109/TIFS.2017.2788002" class='cross-ref' target='_blank' rel='noopener noreferrer'>CrossRef</a>]</li><li id='B33-applsci-12-08972' class='html-xx' data-content='33.'>Moriya, S.; Shibata, C. Transfer Learning Method for Very Deep CNN for Text Classification and Methods for its Evaluation. In Proceedings of the 2018 IEEE 42nd Annual Computer Software and Applications Conference (COMPSAC), Tokyo, Japan, 23–27 July 2018; Volume 2. [<a href="https://scholar.google.com/scholar_lookup?title=Transfer+Learning+Method+for+Very+Deep+CNN+for+Text+Classification+and+Methods+for+its+Evaluation&conference=Proceedings+of+the+2018+IEEE+42nd+Annual+Computer+Software+and+Applications+Conference+(COMPSAC)&author=Moriya,+S.&author=Shibata,+C.&publication_year=2018&doi=10.1109/COMPSAC.2018.10220" class='google-scholar' target='_blank' rel='noopener noreferrer'>Google Scholar</a>] [<a href="https://doi.org/10.1109/COMPSAC.2018.10220" class='cross-ref' target='_blank' rel='noopener noreferrer'>CrossRef</a>]</li><li id='B34-applsci-12-08972' class='html-xx' data-content='34.'>Afzal, M.Z.; Kolsch, A.; Ahmed, S.; Liwicki, M. Cutting the Error by Half: Investigation of Very Deep CNN and Advanced Training Strategies for Document Image Classification. In Proceedings of the 2017 14th IAPR International Conference on Document Analysis and Recognition (ICDAR), Kyoto, Japan, 9–15 November 2017. [<a href="https://scholar.google.com/scholar_lookup?title=Cutting+the+Error+by+Half:+Investigation+of+Very+Deep+CNN+and+Advanced+Training+Strategies+for+Document+Image+Classification&conference=Proceedings+of+the+2017+14th+IAPR+International+Conference+on+Document+Analysis+and+Recognition+(ICDAR)&author=Afzal,+M.Z.&author=Kolsch,+A.&author=Ahmed,+S.&author=Liwicki,+M.&publication_year=2017&doi=10.1109/icdar.2017.149" class='google-scholar' target='_blank' rel='noopener noreferrer'>Google Scholar</a>] [<a href="https://doi.org/10.1109/icdar.2017.149" class='cross-ref' target='_blank' rel='noopener noreferrer'>CrossRef</a>] [<a href="http://arxiv.org/pdf/1704.03557" target='_blank' rel="noopener noreferrer">Green Version</a>]</li><li id='B35-applsci-12-08972' class='html-xx' data-content='35.'>Bashir, S.M.A.; Wang, Y.; Khan, M.; Niu, Y. A comprehensive review of deep learning-based single image super-resolution. <span class='html-italic'>PeerJ Comput. Sci.</span> <b>2021</b>, <span class='html-italic'>7</span>, e621. [<a href="https://scholar.google.com/scholar_lookup?title=A+comprehensive+review+of+deep+learning-based+single+image+super-resolution&author=Bashir,+S.M.A.&author=Wang,+Y.&author=Khan,+M.&author=Niu,+Y.&publication_year=2021&journal=PeerJ+Comput.+Sci.&volume=7&pages=e621&doi=10.7717/peerj-cs.621&pmid=34322592" class='google-scholar' target='_blank' rel='noopener noreferrer'>Google Scholar</a>] [<a href="https://doi.org/10.7717/peerj-cs.621" class='cross-ref' target='_blank' rel='noopener noreferrer'>CrossRef</a>] [<a href="http://www.ncbi.nlm.nih.gov/pubmed/34322592" class='cross-ref' data-typ='pmid' target='_blank' rel='noopener noreferrer'>PubMed</a>]</li><li id='B36-applsci-12-08972' class='html-xx' data-content='36.'>Bao, C.; Xie, T.; Feng, W.; Chang, L.; Yu, C. A Power-Efficient Optimizing Framework FPGA Accelerator Based on Winograd for YOLO. <span class='html-italic'>IEEE Access</span> <b>2020</b>, <span class='html-italic'>8</span>, 94307–94317. [<a href="https://scholar.google.com/scholar_lookup?title=A+Power-Efficient+Optimizing+Framework+FPGA+Accelerator+Based+on+Winograd+for+YOLO&author=Bao,+C.&author=Xie,+T.&author=Feng,+W.&author=Chang,+L.&author=Yu,+C.&publication_year=2020&journal=IEEE+Access&volume=8&pages=94307%E2%80%9394317&doi=10.1109/ACCESS.2020.2995330" class='google-scholar' target='_blank' rel='noopener noreferrer'>Google Scholar</a>] [<a href="https://doi.org/10.1109/ACCESS.2020.2995330" class='cross-ref' target='_blank' rel='noopener noreferrer'>CrossRef</a>]</li><li id='B37-applsci-12-08972' class='html-xx' data-content='37.'>Lim, H.K.; Kim, J.B.; Heo, J.S.; Kim, K.; Hong, Y.G.; Han, Y.H. Packet-based network traffic classification using deep learning. In Proceedings of the 2019 International Conference on Artificial Intelligence in Information and Communication (ICAIIC), Okinawa, Japan, 11–13 February 2019. [<a href="https://scholar.google.com/scholar_lookup?title=Packet-based+network+traffic+classification+using+deep+learning&conference=Proceedings+of+the+2019+International+Conference+on+Artificial+Intelligence+in+Information+and+Communication+(ICAIIC)&author=Lim,+H.K.&author=Kim,+J.B.&author=Heo,+J.S.&author=Kim,+K.&author=Hong,+Y.G.&author=Han,+Y.H.&publication_year=2019" class='google-scholar' target='_blank' rel='noopener noreferrer'>Google Scholar</a>]</li><li id='B38-applsci-12-08972' class='html-xx' data-content='38.'>Available online: <a href='https://cyberleninka.ru/article/n/reshenie-zadach-vychislitelnoy-gidrodinamiki-s-primeneniem-tehnologii-nvidia-cuda-articlehead-tehnologiya-nvidia-cuda-v-zadachah/viewer' target='_blank' rel="noopener noreferrer" >https://cyberleninka.ru/article/n/reshenie-zadach-vychislitelnoy-gidrodinamiki-s-primeneniem-tehnologii-nvidia-cuda-articlehead-tehnologiya-nvidia-cuda-v-zadachah/viewer</a> (accessed on 2 September 2022).</li><li id='B39-applsci-12-08972' class='html-xx' data-content='39.'>NVIDIA. <span class='html-italic'>Cuda C Best Practices Guide</span>; Nvidia Corp.: Santa Clara, CA, USA, 2015. [<a href="https://scholar.google.com/scholar_lookup?title=Cuda+C+Best+Practices+Guide&author=NVIDIA&publication_year=2015" class='google-scholar' target='_blank' rel='noopener noreferrer'>Google Scholar</a>]</li><li id='B40-applsci-12-08972' class='html-xx' data-content='40.'>Yasin, S.; Iqbal, N.; Ali, T.; Draz, U.; Alqahtani, A.; Irfan, M.; Rehman, A.; Glowacz, A.; Alqhtani, S.; Proniewska, K.; et al. Severity Grading and Early Retinopathy Lesion Detection through Hybrid Inception-ResNet Architecture. <span class='html-italic'>Sensors</span> <b>2021</b>, <span class='html-italic'>21</span>, 6933. [<a href="https://scholar.google.com/scholar_lookup?title=Severity+Grading+and+Early+Retinopathy+Lesion+Detection+through+Hybrid+Inception-ResNet+Architecture&author=Yasin,+S.&author=Iqbal,+N.&author=Ali,+T.&author=Draz,+U.&author=Alqahtani,+A.&author=Irfan,+M.&author=Rehman,+A.&author=Glowacz,+A.&author=Alqhtani,+S.&author=Proniewska,+K.&publication_year=2021&journal=Sensors&volume=21&pages=6933&doi=10.3390/s21206933" class='google-scholar' target='_blank' rel='noopener noreferrer'>Google Scholar</a>] [<a href="https://doi.org/10.3390/s21206933" class='cross-ref' target='_blank' rel='noopener noreferrer'>CrossRef</a>]</li><li id='B41-applsci-12-08972' class='html-xx' data-content='41.'>Li, Y.; Xie, P.; Chen, X.; Liu, J.; Yang, B.; Li, S.; Gong, C.; Gan, X.; Xu, H. VBSF: A new storage format for SIMD sparse matrix–vector multiplication on modern processors. <span class='html-italic'>J. Supercomput.</span> <b>2019</b>, <span class='html-italic'>76</span>, 2063–2081. [<a href="https://scholar.google.com/scholar_lookup?title=VBSF:+A+new+storage+format+for+SIMD+sparse+matrix%E2%80%93vector+multiplication+on+modern+processors&author=Li,+Y.&author=Xie,+P.&author=Chen,+X.&author=Liu,+J.&author=Yang,+B.&author=Li,+S.&author=Gong,+C.&author=Gan,+X.&author=Xu,+H.&publication_year=2019&journal=J.+Supercomput.&volume=76&pages=2063%E2%80%932081&doi=10.1007/s11227-019-02835-4" class='google-scholar' target='_blank' rel='noopener noreferrer'>Google Scholar</a>] [<a href="https://doi.org/10.1007/s11227-019-02835-4" class='cross-ref' target='_blank' rel='noopener noreferrer'>CrossRef</a>]</li><li id='B42-applsci-12-08972' class='html-xx' data-content='42.'>Li, R.; Wu, B.; Ying, M.; Sun, X.; Yang, G. Quantum Supremacy Circuit Simulation on Sunway TaihuLight. <span class='html-italic'>IEEE Trans. Parallel Distrib. Syst.</span> <b>2019</b>, <span class='html-italic'>31</span>, 805–816. [<a href="https://scholar.google.com/scholar_lookup?title=Quantum+Supremacy+Circuit+Simulation+on+Sunway+TaihuLight&author=Li,+R.&author=Wu,+B.&author=Ying,+M.&author=Sun,+X.&author=Yang,+G.&publication_year=2019&journal=IEEE+Trans.+Parallel+Distrib.+Syst.&volume=31&pages=805%E2%80%93816&doi=10.1109/TPDS.2019.2947511" class='google-scholar' target='_blank' rel='noopener noreferrer'>Google Scholar</a>] [<a href="https://doi.org/10.1109/TPDS.2019.2947511" class='cross-ref' target='_blank' rel='noopener noreferrer'>CrossRef</a>]</li><li id='B43-applsci-12-08972' class='html-xx' data-content='43.'>Guarnieri, M. Trailblazers in Electromechanical Computing [Historical]. <span class='html-italic'>IEEE Ind. Electron. Mag.</span> <b>2017</b>, <span class='html-italic'>11</span>, 58–62. [<a href="https://scholar.google.com/scholar_lookup?title=Trailblazers+in+Electromechanical+Computing+[Historical]&author=Guarnieri,+M.&publication_year=2017&journal=IEEE+Ind.+Electron.+Mag.&volume=11&pages=58%E2%80%9362&doi=10.1109/MIE.2017.2694578" class='google-scholar' target='_blank' rel='noopener noreferrer'>Google Scholar</a>] [<a href="https://doi.org/10.1109/MIE.2017.2694578" class='cross-ref' target='_blank' rel='noopener noreferrer'>CrossRef</a>]</li><li id='B44-applsci-12-08972' class='html-xx' data-content='44.'>Li, Y.; Chen, H. Image recognition based on deep residual shrinkage Network. In Proceedings of the 2021 International Conference on Artificial Intelligence and Electromechanical Automation (AIEA), Guangzhou, China, 14–16 May 2021. [<a href="https://scholar.google.com/scholar_lookup?title=Image+recognition+based+on+deep+residual+shrinkage+Network&conference=Proceedings+of+the+2021+International+Conference+on+Artificial+Intelligence+and+Electromechanical+Automation+(AIEA)&author=Li,+Y.&author=Chen,+H.&publication_year=2021&doi=10.1109/AIEA53260.2021.00077" class='google-scholar' target='_blank' rel='noopener noreferrer'>Google Scholar</a>] [<a href="https://doi.org/10.1109/AIEA53260.2021.00077" class='cross-ref' target='_blank' rel='noopener noreferrer'>CrossRef</a>]</li><li id='B45-applsci-12-08972' class='html-xx' data-content='45.'>Yang, Z.; Wu, B.; Wang, Z.; Li, Y.; Feng, H. Image Recognition Based on an Improved Deep Residual Shrinkage Network. <span class='html-italic'>SSRN Electron. J.</span> 2022; <span class='html-italic'>in press</span>. [<a href="https://scholar.google.com/scholar_lookup?title=Image+Recognition+Based+on+an+Improved+Deep+Residual+Shrinkage+Network&author=Yang,+Z.&author=Wu,+B.&author=Wang,+Z.&author=Li,+Y.&author=Feng,+H.&publication_year=2022&doi=10.2139/ssrn.4013383" class='google-scholar' target='_blank' rel='noopener noreferrer'>Google Scholar</a>] [<a href="https://doi.org/10.2139/ssrn.4013383" class='cross-ref' target='_blank' rel='noopener noreferrer'>CrossRef</a>]</li><li id='B46-applsci-12-08972' class='html-xx' data-content='46.'>Chollet, F. Xception: Deep learning with depthwise separable convolutions. In Proceedings of the 2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), Honolulu, HI, USA, 21–26 July 2017; pp. 1251–1258. [<a href="https://scholar.google.com/scholar_lookup?title=Xception:+Deep+learning+with+depthwise+separable+convolutions&conference=Proceedings+of+the+2017+IEEE+Conference+on+Computer+Vision+and+Pattern+Recognition+(CVPR)&author=Chollet,+F.&publication_year=2017&pages=1251%E2%80%931258&doi=10.1109/CVPR.2017.195" class='google-scholar' target='_blank' rel='noopener noreferrer'>Google Scholar</a>] [<a href="https://doi.org/10.1109/CVPR.2017.195" class='cross-ref' target='_blank' rel='noopener noreferrer'>CrossRef</a>]</li><li id='B47-applsci-12-08972' class='html-xx' data-content='47.'>Javed, A.R.; Usman, M.; Rehman, S.U.; Khan, M.U.; Haghighi, M.S. Anomaly Detection in Automated Vehicles Using Multistage Attention-Based Convolutional Neural Network. <span class='html-italic'>IEEE Trans. Intell. Transp. Syst.</span> <b>2020</b>, <span class='html-italic'>22</span>, 4291–4300. [<a href="https://scholar.google.com/scholar_lookup?title=Anomaly+Detection+in+Automated+Vehicles+Using+Multistage+Attention-Based+Convolutional+Neural+Network&author=Javed,+A.R.&author=Usman,+M.&author=Rehman,+S.U.&author=Khan,+M.U.&author=Haghighi,+M.S.&publication_year=2020&journal=IEEE+Trans.+Intell.+Transp.+Syst.&volume=22&pages=4291%E2%80%934300&doi=10.1109/TITS.2020.3025875" class='google-scholar' target='_blank' rel='noopener noreferrer'>Google Scholar</a>] [<a href="https://doi.org/10.1109/TITS.2020.3025875" class='cross-ref' target='_blank' rel='noopener noreferrer'>CrossRef</a>]</li><li id='B48-applsci-12-08972' class='html-xx' data-content='48.'>Zhang, P.; Xue, J.; Lan, C.; Zeng, W.; Gao, Z.; Zheng, N. EleAtt-RNN: Adding Attentiveness to Neurons in Recurrent Neural Networks. <span class='html-italic'>IEEE Trans. Image Process.</span> <b>2019</b>, <span class='html-italic'>29</span>, 1061–1073. [<a href="https://scholar.google.com/scholar_lookup?title=EleAtt-RNN:+Adding+Attentiveness+to+Neurons+in+Recurrent+Neural+Networks&author=Zhang,+P.&author=Xue,+J.&author=Lan,+C.&author=Zeng,+W.&author=Gao,+Z.&author=Zheng,+N.&publication_year=2019&journal=IEEE+Trans.+Image+Process.&volume=29&pages=1061%E2%80%931073&doi=10.1109/TIP.2019.2937724&pmid=31484119" class='google-scholar' target='_blank' rel='noopener noreferrer'>Google Scholar</a>] [<a href="https://doi.org/10.1109/TIP.2019.2937724" class='cross-ref' target='_blank' rel='noopener noreferrer'>CrossRef</a>] [<a href="http://www.ncbi.nlm.nih.gov/pubmed/31484119" class='cross-ref' data-typ='pmid' target='_blank' rel='noopener noreferrer'>PubMed</a>]</li><li id='B49-applsci-12-08972' class='html-xx' data-content='49.'>Krizhevsky, A.; Nair, V.; Hinton, G. CIFAR-10 and CIFAR-100 Datasets. 2009. Available online: <a href='https://www.cs.toronto.edu/~kriz/cifar.html' target='_blank' rel="noopener noreferrer" >https://www.cs.toronto.edu/~kriz/cifar.html</a> (accessed on 8 August 2022).</li><li id='B50-applsci-12-08972' class='html-xx' data-content='50.'>Jiang, H.; Tang, S.; Liu, W.; Zhang, Y. Deep learning for COVID-19 chest CT (computed tomography) image analysis: A lesson from lung cancer. <span class='html-italic'>Comput. Struct. Biotechnol. J.</span> <b>2021</b>, <span class='html-italic'>19</span>, 1391–1399. [<a href="https://scholar.google.com/scholar_lookup?title=Deep+learning+for+COVID-19+chest+CT+(computed+tomography)+image+analysis:+A+lesson+from+lung+cancer&author=Jiang,+H.&author=Tang,+S.&author=Liu,+W.&author=Zhang,+Y.&publication_year=2021&journal=Comput.+Struct.+Biotechnol.+J.&volume=19&pages=1391%E2%80%931399&doi=10.1016/j.csbj.2021.02.016&pmid=33680351" class='google-scholar' target='_blank' rel='noopener noreferrer'>Google Scholar</a>] [<a href="https://doi.org/10.1016/j.csbj.2021.02.016" class='cross-ref' target='_blank' rel='noopener noreferrer'>CrossRef</a>] [<a href="http://www.ncbi.nlm.nih.gov/pubmed/33680351" class='cross-ref' data-typ='pmid' target='_blank' rel='noopener noreferrer'>PubMed</a>]</li><li id='B51-applsci-12-08972' class='html-xx' data-content='51.'>Lv, N.; Ma, H.; Chen, C.; Pei, Q.; Zhou, Y.; Xiao, F.; Li, J. Remote Sensing Data Augmentation through Adversarial Training. <span class='html-italic'>IEEE J. Sel. Top. Appl. Earth Obs. Remote Sens.</span> <b>2021</b>, <span class='html-italic'>14</span>, 9318–9333. [<a href="https://scholar.google.com/scholar_lookup?title=Remote+Sensing+Data+Augmentation+through+Adversarial+Training&author=Lv,+N.&author=Ma,+H.&author=Chen,+C.&author=Pei,+Q.&author=Zhou,+Y.&author=Xiao,+F.&author=Li,+J.&publication_year=2021&journal=IEEE+J.+Sel.+Top.+Appl.+Earth+Obs.+Remote+Sens.&volume=14&pages=9318%E2%80%939333&doi=10.1109/JSTARS.2021.3110842" class='google-scholar' target='_blank' rel='noopener noreferrer'>Google Scholar</a>] [<a href="https://doi.org/10.1109/JSTARS.2021.3110842" class='cross-ref' target='_blank' rel='noopener noreferrer'>CrossRef</a>]</li><li id='B52-applsci-12-08972' class='html-xx' data-content='52.'>Ruhang, X. Efficient clustering for aggregate loads: An unsupervised pretraining based method. <span class='html-italic'>Energy</span> <b>2020</b>, <span class='html-italic'>210</span>, 118617. [<a href="https://scholar.google.com/scholar_lookup?title=Efficient+clustering+for+aggregate+loads:+An+unsupervised+pretraining+based+method&author=Ruhang,+X.&publication_year=2020&journal=Energy&volume=210&pages=118617&doi=10.1016/j.energy.2020.118617" class='google-scholar' target='_blank' rel='noopener noreferrer'>Google Scholar</a>] [<a href="https://doi.org/10.1016/j.energy.2020.118617" class='cross-ref' target='_blank' rel='noopener noreferrer'>CrossRef</a>]</li><li id='B53-applsci-12-08972' class='html-xx' data-content='53.'>Riviere, M.; Joulin, A.; Mazare, P.-E.; Dupoux, E. Unsupervised Pretraining Transfers Well Across Languages. In Proceedings of the ICASSP 2020—2020 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), Barcelona, Spain, 4–8 May 2020; pp. 7414–7418. [<a href="https://scholar.google.com/scholar_lookup?title=Unsupervised+Pretraining+Transfers+Well+Across+Languages&conference=Proceedings+of+the+ICASSP+2020%E2%80%942020+IEEE+International+Conference+on+Acoustics,+Speech+and+Signal+Processing+(ICASSP)&author=Riviere,+M.&author=Joulin,+A.&author=Mazare,+P.-E.&author=Dupoux,+E.&publication_year=2020&pages=7414%E2%80%937418&doi=10.1109/icassp40776.2020.9054548" class='google-scholar' target='_blank' rel='noopener noreferrer'>Google Scholar</a>] [<a href="https://doi.org/10.1109/icassp40776.2020.9054548" class='cross-ref' target='_blank' rel='noopener noreferrer'>CrossRef</a>]</li><li id='B54-applsci-12-08972' class='html-xx' data-content='54.'>Salur, M.U.; Aydin, I. A Novel Hybrid Deep Learning Model for Sentiment Classification. <span class='html-italic'>IEEE Access</span> <b>2020</b>, <span class='html-italic'>8</span>, 58080–58093. [<a href="https://scholar.google.com/scholar_lookup?title=A+Novel+Hybrid+Deep+Learning+Model+for+Sentiment+Classification&author=Salur,+M.U.&author=Aydin,+I.&publication_year=2020&journal=IEEE+Access&volume=8&pages=58080%E2%80%9358093&doi=10.1109/ACCESS.2020.2982538" class='google-scholar' target='_blank' rel='noopener noreferrer'>Google Scholar</a>] [<a href="https://doi.org/10.1109/ACCESS.2020.2982538" class='cross-ref' target='_blank' rel='noopener noreferrer'>CrossRef</a>]</li><li id='B55-applsci-12-08972' class='html-xx' data-content='55.'>Lu, T.; Du, Y.; Ouyang, L.; Chen, Q.; Wang, X. Android Malware Detection Based on a Hybrid Deep Learning Model. <span class='html-italic'>Secur. Commun. Netw.</span> <b>2020</b>, <span class='html-italic'>2020</span>, 8863617. [<a href="https://scholar.google.com/scholar_lookup?title=Android+Malware+Detection+Based+on+a+Hybrid+Deep+Learning+Model&author=Lu,+T.&author=Du,+Y.&author=Ouyang,+L.&author=Chen,+Q.&author=Wang,+X.&publication_year=2020&journal=Secur.+Commun.+Netw.&volume=2020&pages=8863617&doi=10.1155/2020/8863617" class='google-scholar' target='_blank' rel='noopener noreferrer'>Google Scholar</a>] [<a href="https://doi.org/10.1155/2020/8863617" class='cross-ref' target='_blank' rel='noopener noreferrer'>CrossRef</a>]</li><li id='B56-applsci-12-08972' class='html-xx' data-content='56.'>Basit, A.; Zafar, M.; Liu, X.; Javed, A.R.; Jalil, Z.; Kifayat, K. A comprehensive survey of AI-enabled phishing attacks detection techniques. <span class='html-italic'>Telecommun. Syst.</span> <b>2020</b>, <span class='html-italic'>76</span>, 139–154. [<a href="https://scholar.google.com/scholar_lookup?title=A+comprehensive+survey+of+AI-enabled+phishing+attacks+detection+techniques&author=Basit,+A.&author=Zafar,+M.&author=Liu,+X.&author=Javed,+A.R.&author=Jalil,+Z.&author=Kifayat,+K.&publication_year=2020&journal=Telecommun.+Syst.&volume=76&pages=139%E2%80%93154&doi=10.1007/s11235-020-00733-2&pmid=33110340" class='google-scholar' target='_blank' rel='noopener noreferrer'>Google Scholar</a>] [<a href="https://doi.org/10.1007/s11235-020-00733-2" class='cross-ref' target='_blank' rel='noopener noreferrer'>CrossRef</a>] [<a href="http://www.ncbi.nlm.nih.gov/pubmed/33110340" class='cross-ref' data-typ='pmid' target='_blank' rel='noopener noreferrer'>PubMed</a>]</li><li id='B57-applsci-12-08972' class='html-xx' data-content='57.'>Fang, J.; Sun, Y.; Zhang, Q.; Peng, K.; Li, Y.; Liu, W.; Wang, X. FNA++: Fast Network Adaptation via Parameter Remapping and Architecture Search. <span class='html-italic'>IEEE Trans. Pattern Anal. Mach. Intell.</span> <b>2020</b>, <span class='html-italic'>43</span>, 2990–3004. [<a href="https://scholar.google.com/scholar_lookup?title=FNA++:+Fast+Network+Adaptation+via+Parameter+Remapping+and+Architecture+Search&author=Fang,+J.&author=Sun,+Y.&author=Zhang,+Q.&author=Peng,+K.&author=Li,+Y.&author=Liu,+W.&author=Wang,+X.&publication_year=2020&journal=IEEE+Trans.+Pattern+Anal.+Mach.+Intell.&volume=43&pages=2990%E2%80%933004&doi=10.1109/TPAMI.2020.3044416&pmid=33315553" class='google-scholar' target='_blank' rel='noopener noreferrer'>Google Scholar</a>] [<a href="https://doi.org/10.1109/TPAMI.2020.3044416" class='cross-ref' target='_blank' rel='noopener noreferrer'>CrossRef</a>] [<a href="http://www.ncbi.nlm.nih.gov/pubmed/33315553" class='cross-ref' data-typ='pmid' target='_blank' rel='noopener noreferrer'>PubMed</a>]</li><li id='B58-applsci-12-08972' class='html-xx' data-content='58.'>Wu, Z.; Pan, S.; Chen, F.; Long, G.; Zhang, C.; Yu, P.S. A Comprehensive Survey on Graph Neural Networks. <span class='html-italic'>IEEE Trans. Neural Netw. Learn. Syst.</span> <b>2021</b>, <span class='html-italic'>32</span>, 4–24. [<a href="https://scholar.google.com/scholar_lookup?title=A+Comprehensive+Survey+on+Graph+Neural+Networks&author=Wu,+Z.&author=Pan,+S.&author=Chen,+F.&author=Long,+G.&author=Zhang,+C.&author=Yu,+P.S.&publication_year=2021&journal=IEEE+Trans.+Neural+Netw.+Learn.+Syst.&volume=32&pages=4%E2%80%9324&doi=10.1109/TNNLS.2020.2978386" class='google-scholar' target='_blank' rel='noopener noreferrer'>Google Scholar</a>] [<a href="https://doi.org/10.1109/TNNLS.2020.2978386" class='cross-ref' target='_blank' rel='noopener noreferrer'>CrossRef</a>] [<a href="http://arxiv.org/pdf/1901.00596" target='_blank' rel="noopener noreferrer">Green Version</a>]</li><li id='B59-applsci-12-08972' class='html-xx' data-content='59.'>Huang, G.; Sun, Y.; Liu, Z.; Sedra, D.; Weinberger, K.Q. Deep networks with stochastic depth. In <span class='html-italic'>Lecture Notes in Computer Science (Including Subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)</span>; Springer: Cham, Switzerland, 2016; Volume 9908. [<a href="https://scholar.google.com/scholar_lookup?title=Deep+networks+with+stochastic+depth&author=Huang,+G.&author=Sun,+Y.&author=Liu,+Z.&author=Sedra,+D.&author=Weinberger,+K.Q.&publication_year=2016" class='google-scholar' target='_blank' rel='noopener noreferrer'>Google Scholar</a>] [<a href="https://doi.org/10.1007/978-3-319-46493-0_39" class='cross-ref' target='_blank' rel='noopener noreferrer'>CrossRef</a>]</li><li id='B60-applsci-12-08972' class='html-xx' data-content='60.'>Chen, D.; Zhang, W.; Xu, X.; Xing, X. Deep networks with stochastic depth for acoustic modelling. In Proceedings of the 2016 Asia-Pacific Signal and Information Processing Association Annual Summit and Conference (APSIPA), Jeju, Korea, 13–16 December 2016. [<a href="https://scholar.google.com/scholar_lookup?title=Deep+networks+with+stochastic+depth+for+acoustic+modelling&conference=Proceedings+of+the+2016+Asia-Pacific+Signal+and+Information+Processing+Association+Annual+Summit+and+Conference+(APSIPA)&author=Chen,+D.&author=Zhang,+W.&author=Xu,+X.&author=Xing,+X.&publication_year=2016&doi=10.1109/APSIPA.2016.7820692" class='google-scholar' target='_blank' rel='noopener noreferrer'>Google Scholar</a>] [<a href="https://doi.org/10.1109/APSIPA.2016.7820692" class='cross-ref' target='_blank' rel='noopener noreferrer'>CrossRef</a>]</li><li id='B61-applsci-12-08972' class='html-xx' data-content='61.'>Koonce, B. SqueezeNet. In <span class='html-italic'>Convolutional Neural Networks with Swift for Tensorflow</span>; Apress: Berkeley, CA, USA, 2021. [<a href="https://scholar.google.com/scholar_lookup?title=SqueezeNet&author=Koonce,+B.&publication_year=2021" class='google-scholar' target='_blank' rel='noopener noreferrer'>Google Scholar</a>] [<a href="https://doi.org/10.1007/978-1-4842-6168-2" class='cross-ref' target='_blank' rel='noopener noreferrer'>CrossRef</a>]</li><li id='B62-applsci-12-08972' class='html-xx' data-content='62.'>Bobenko, A.I.; Lutz, C.O.R.; Pottmann, H.; Techter, J. Checkerboard Incircular Nets. In <span class='html-italic'>SpringerBriefs in Mathematics</span>; Springer: Cham, Switzerland, 2021. [<a href="https://scholar.google.com/scholar_lookup?title=Checkerboard+Incircular+Nets&author=Bobenko,+A.I.&author=Lutz,+C.O.R.&author=Pottmann,+H.&author=Techter,+J.&publication_year=2021" class='google-scholar' target='_blank' rel='noopener noreferrer'>Google Scholar</a>] [<a href="https://doi.org/10.1007/978-3-030-81847-0_8" class='cross-ref' target='_blank' rel='noopener noreferrer'>CrossRef</a>]</li><li id='B63-applsci-12-08972' class='html-xx' data-content='63.'>Wang, S.; Zha, Y.; Li, W.; Wu, Q.; Li, X.; Niu, M.; Wang, M.; Qiu, X.; Li, H.; Yu, H.; et al. A fully automatic deep learning system for COVID-19 diagnostic and prognostic analysis. <span class='html-italic'>Eur. Respir. J.</span> <b>2020</b>, <span class='html-italic'>56</span>, 2000775. [<a href="https://scholar.google.com/scholar_lookup?title=A+fully+automatic+deep+learning+system+for+COVID-19+diagnostic+and+prognostic+analysis&author=Wang,+S.&author=Zha,+Y.&author=Li,+W.&author=Wu,+Q.&author=Li,+X.&author=Niu,+M.&author=Wang,+M.&author=Qiu,+X.&author=Li,+H.&author=Yu,+H.&publication_year=2020&journal=Eur.+Respir.+J.&volume=56&pages=2000775&doi=10.1183/13993003.00775-2020&pmid=32444412" class='google-scholar' target='_blank' rel='noopener noreferrer'>Google Scholar</a>] [<a href="https://doi.org/10.1183/13993003.00775-2020" class='cross-ref' target='_blank' rel='noopener noreferrer'>CrossRef</a>] [<a href="http://www.ncbi.nlm.nih.gov/pubmed/32444412" class='cross-ref' data-typ='pmid' target='_blank' rel='noopener noreferrer'>PubMed</a>]</li><li id='B64-applsci-12-08972' class='html-xx' data-content='64.'>Kumar, D.; Taylor, G.W.; Wong, A. Opening the Black Box of Financial AI with CLEAR-Trade: A CLass-Enhanced Attentive Response Approach for Explaining and Visualizing Deep Learning-Driven Stock Market Prediction. <span class='html-italic'>J. Comput. Vis. Imaging Syst.</span> <b>2017</b>, <span class='html-italic'>3</span>. [<a href="https://scholar.google.com/scholar_lookup?title=Opening+the+Black+Box+of+Financial+AI+with+CLEAR-Trade:+A+CLass-Enhanced+Attentive+Response+Approach+for+Explaining+and+Visualizing+Deep+Learning-Driven+Stock+Market+Prediction&author=Kumar,+D.&author=Taylor,+G.W.&author=Wong,+A.&publication_year=2017&journal=J.+Comput.+Vis.+Imaging+Syst.&volume=3&doi=10.15353/vsnl.v3i1.166" class='google-scholar' target='_blank' rel='noopener noreferrer'>Google Scholar</a>] [<a href="https://doi.org/10.15353/vsnl.v3i1.166" class='cross-ref' target='_blank' rel='noopener noreferrer'>CrossRef</a>]</li><li id='B65-applsci-12-08972' class='html-xx' data-content='65.'>Cheng, X.; Zhang, Y.; Chen, Y.; Wu, Y.; Yue, Y. Pest identification via deep residual learning in complex background. <span class='html-italic'>Comput. Electron. Agric.</span> <b>2017</b>, <span class='html-italic'>141</span>, 351–356. [<a href="https://scholar.google.com/scholar_lookup?title=Pest+identification+via+deep+residual+learning+in+complex+background&author=Cheng,+X.&author=Zhang,+Y.&author=Chen,+Y.&author=Wu,+Y.&author=Yue,+Y.&publication_year=2017&journal=Comput.+Electron.+Agric.&volume=141&pages=351%E2%80%93356&doi=10.1016/j.compag.2017.08.005" class='google-scholar' target='_blank' rel='noopener noreferrer'>Google Scholar</a>] [<a href="https://doi.org/10.1016/j.compag.2017.08.005" class='cross-ref' target='_blank' rel='noopener noreferrer'>CrossRef</a>]</li><li id='B66-applsci-12-08972' class='html-xx' data-content='66.'>He, S.; Jonsson, E.; Mader, C.A.; Martins, J.R.R.A. Aerodynamic Shape Optimization with Time Spectral Flutter Adjoint. In Proceedings of the AIAA Scitech 2019 Forum, San Diego, CA, USA, 7–11 January 2019. [<a href="https://scholar.google.com/scholar_lookup?title=Aerodynamic+Shape+Optimization+with+Time+Spectral+Flutter+Adjoint&conference=Proceedings+of+the+AIAA+Scitech+2019+Forum&author=He,+S.&author=Jonsson,+E.&author=Mader,+C.A.&author=Martins,+J.R.R.A.&publication_year=2019&doi=10.2514/6.2019-0697" class='google-scholar' target='_blank' rel='noopener noreferrer'>Google Scholar</a>] [<a href="https://doi.org/10.2514/6.2019-0697" class='cross-ref' target='_blank' rel='noopener noreferrer'>CrossRef</a>]</li><li id='B67-applsci-12-08972' class='html-xx' data-content='67.'>Wu, S.; Zhong, S.-H.; Liu, Y. Deep residual learning for image steganalysis. <span class='html-italic'>Multimed. Tools Appl.</span> <b>2017</b>, <span class='html-italic'>77</span>, 10437–10453. [<a href="https://scholar.google.com/scholar_lookup?title=Deep+residual+learning+for+image+steganalysis&author=Wu,+S.&author=Zhong,+S.-H.&author=Liu,+Y.&publication_year=2017&journal=Multimed.+Tools+Appl.&volume=77&pages=10437%E2%80%9310453&doi=10.1007/s11042-017-4440-4" class='google-scholar' target='_blank' rel='noopener noreferrer'>Google Scholar</a>] [<a href="https://doi.org/10.1007/s11042-017-4440-4" class='cross-ref' target='_blank' rel='noopener noreferrer'>CrossRef</a>]</li><li id='B68-applsci-12-08972' class='html-xx' data-content='68.'>Neupane, D.; Kim, Y.; Seok, J. Bearing Fault Detection Using Scalogram and Switchable Normalization-Based CNN (SN-CNN). <span class='html-italic'>IEEE Access</span> <b>2021</b>, <span class='html-italic'>9</span>, 88151–88166. [<a href="https://scholar.google.com/scholar_lookup?title=Bearing+Fault+Detection+Using+Scalogram+and+Switchable+Normalization-Based+CNN+(SN-CNN)&author=Neupane,+D.&author=Kim,+Y.&author=Seok,+J.&publication_year=2021&journal=IEEE+Access&volume=9&pages=88151%E2%80%9388166&doi=10.1109/ACCESS.2021.3089698" class='google-scholar' target='_blank' rel='noopener noreferrer'>Google Scholar</a>] [<a href="https://doi.org/10.1109/ACCESS.2021.3089698" class='cross-ref' target='_blank' rel='noopener noreferrer'>CrossRef</a>]</li><li id='B69-applsci-12-08972' class='html-xx' data-content='69.'>Allegra, A.; Tonacci, A.; Sciaccotta, R.; Genovese, S.; Musolino, C.; Pioggia, G.; Gangemi, S. Machine Learning and Deep Learning Applications in Multiple Myeloma Diagnosis, Prognosis, and Treatment Selection. <span class='html-italic'>Cancers</span> <b>2022</b>, <span class='html-italic'>14</span>, 606. [<a href="https://scholar.google.com/scholar_lookup?title=Machine+Learning+and+Deep+Learning+Applications+in+Multiple+Myeloma+Diagnosis,+Prognosis,+and+Treatment+Selection&author=Allegra,+A.&author=Tonacci,+A.&author=Sciaccotta,+R.&author=Genovese,+S.&author=Musolino,+C.&author=Pioggia,+G.&author=Gangemi,+S.&publication_year=2022&journal=Cancers&volume=14&pages=606&doi=10.3390/cancers14030606&pmid=35158874" class='google-scholar' target='_blank' rel='noopener noreferrer'>Google Scholar</a>] [<a href="https://doi.org/10.3390/cancers14030606" class='cross-ref' target='_blank' rel='noopener noreferrer'>CrossRef</a>] [<a href="http://www.ncbi.nlm.nih.gov/pubmed/35158874" class='cross-ref' data-typ='pmid' target='_blank' rel='noopener noreferrer'>PubMed</a>]</li><li id='B70-applsci-12-08972' class='html-xx' data-content='70.'>Kim, E.J.; Brunner, R.J. Star–galaxy classification using deep convolutional neural networks. <span class='html-italic'>Mon. Not. R. Astron. Soc.</span> <b>2016</b>, <span class='html-italic'>464</span>, 4463–4475. [<a href="https://scholar.google.com/scholar_lookup?title=Star%E2%80%93galaxy+classification+using+deep+convolutional+neural+networks&author=Kim,+E.J.&author=Brunner,+R.J.&publication_year=2016&journal=Mon.+Not.+R.+Astron.+Soc.&volume=464&pages=4463%E2%80%934475&doi=10.1093/mnras/stw2672" class='google-scholar' target='_blank' rel='noopener noreferrer'>Google Scholar</a>] [<a href="https://doi.org/10.1093/mnras/stw2672" class='cross-ref' target='_blank' rel='noopener noreferrer'>CrossRef</a>]</li><li id='B71-applsci-12-08972' class='html-xx' data-content='71.'>Najafabadi, M.M.; Khoshgoftaar, T.M.; Villanustre, F.; Holt, J. Large-scale distributed L-BFGS. <span class='html-italic'>J. Big Data</span> <b>2017</b>, <span class='html-italic'>4</span>, 22. [<a href="https://scholar.google.com/scholar_lookup?title=Large-scale+distributed+L-BFGS&author=Najafabadi,+M.M.&author=Khoshgoftaar,+T.M.&author=Villanustre,+F.&author=Holt,+J.&publication_year=2017&journal=J.+Big+Data&volume=4&pages=22&doi=10.1186/s40537-017-0084-5" class='google-scholar' target='_blank' rel='noopener noreferrer'>Google Scholar</a>] [<a href="https://doi.org/10.1186/s40537-017-0084-5" class='cross-ref' target='_blank' rel='noopener noreferrer'>CrossRef</a>]</li><li id='B72-applsci-12-08972' class='html-xx' data-content='72.'>Church, K.W. Word2Vec. <span class='html-italic'>Nat. Lang. Eng.</span> <b>2016</b>, <span class='html-italic'>23</span>, 155–162. [<a href="https://scholar.google.com/scholar_lookup?title=Word2Vec&author=Church,+K.W.&publication_year=2016&journal=Nat.+Lang.+Eng.&volume=23&pages=155%E2%80%93162&doi=10.1017/S1351324916000334" class='google-scholar' target='_blank' rel='noopener noreferrer'>Google Scholar</a>] [<a href="https://doi.org/10.1017/S1351324916000334" class='cross-ref' target='_blank' rel='noopener noreferrer'>CrossRef</a>]</li><li id='B73-applsci-12-08972' class='html-xx' data-content='73.'>Shafiq, M.; Tian, Z.; Bashir, A.K.; Jolfaei, A.; Yu, X. Data mining and machine learning methods for sustainable smart cities traffic classification: A survey. <span class='html-italic'>Sustain. Cities Soc.</span> <b>2020</b>, <span class='html-italic'>60</span>, 102177. [<a href="https://scholar.google.com/scholar_lookup?title=Data+mining+and+machine+learning+methods+for+sustainable+smart+cities+traffic+classification:+A+survey&author=Shafiq,+M.&author=Tian,+Z.&author=Bashir,+A.K.&author=Jolfaei,+A.&author=Yu,+X.&publication_year=2020&journal=Sustain.+Cities+Soc.&volume=60&pages=102177&doi=10.1016/j.scs.2020.102177" class='google-scholar' target='_blank' rel='noopener noreferrer'>Google Scholar</a>] [<a href="https://doi.org/10.1016/j.scs.2020.102177" class='cross-ref' target='_blank' rel='noopener noreferrer'>CrossRef</a>]</li><li id='B74-applsci-12-08972' class='html-xx' data-content='74.'>Shafiq, M.; Tian, Z.; Bashir, A.K.; Du, X.; Guizani, M. IoT malicious traffic identification using wrapper-based feature selection mechanisms. <span class='html-italic'>Comput. Secur.</span> <b>2020</b>, <span class='html-italic'>94</span>, 101863. [<a href="https://scholar.google.com/scholar_lookup?title=IoT+malicious+traffic+identification+using+wrapper-based+feature+selection+mechanisms&author=Shafiq,+M.&author=Tian,+Z.&author=Bashir,+A.K.&author=Du,+X.&author=Guizani,+M.&publication_year=2020&journal=Comput.+Secur.&volume=94&pages=101863&doi=10.1016/j.cose.2020.101863" class='google-scholar' target='_blank' rel='noopener noreferrer'>Google Scholar</a>] [<a href="https://doi.org/10.1016/j.cose.2020.101863" class='cross-ref' target='_blank' rel='noopener noreferrer'>CrossRef</a>]</li><li id='B75-applsci-12-08972' class='html-xx' data-content='75.'>Shafiq, M.; Tian, Z.; Bashir, A.K.; Du, X.; Guizani, M. CorrAUC: A Malicious Bot-IoT Traffic Detection Method in IoT Network Using Machine-Learning Techniques. <span class='html-italic'>IEEE Internet Things J.</span> <b>2020</b>, <span class='html-italic'>8</span>, 3242–3254. [<a href="https://scholar.google.com/scholar_lookup?title=CorrAUC:+A+Malicious+Bot-IoT+Traffic+Detection+Method+in+IoT+Network+Using+Machine-Learning+Techniques&author=Shafiq,+M.&author=Tian,+Z.&author=Bashir,+A.K.&author=Du,+X.&author=Guizani,+M.&publication_year=2020&journal=IEEE+Internet+Things+J.&volume=8&pages=3242%E2%80%933254&doi=10.1109/JIOT.2020.3002255" class='google-scholar' target='_blank' rel='noopener noreferrer'>Google Scholar</a>] [<a href="https://doi.org/10.1109/JIOT.2020.3002255" class='cross-ref' target='_blank' rel='noopener noreferrer'>CrossRef</a>]</li><li id='B76-applsci-12-08972' class='html-xx' data-content='76.'>Jennings, J.M.; Loyd, B.J.; Miner, T.M.; Yang, C.C.; Stevens-Lapsley, J.; Dennis, D.A. A prospective randomized trial examining the use of a closed suction drain shows no influence on strength or function in primary total knee arthroplasty. <span class='html-italic'>Bone Jt. J.</span> <b>2019</b>, <span class='html-italic'>101 B</span>, 84–90. [<a href="https://scholar.google.com/scholar_lookup?title=A+prospective+randomized+trial+examining+the+use+of+a+closed+suction+drain+shows+no+influence+on+strength+or+function+in+primary+total+knee+arthroplasty&author=Jennings,+J.M.&author=Loyd,+B.J.&author=Miner,+T.M.&author=Yang,+C.C.&author=Stevens-Lapsley,+J.&author=Dennis,+D.A.&publication_year=2019&journal=Bone+Jt.+J.&volume=101+B&pages=84%E2%80%9390&doi=10.1302/0301-620X.101B7.BJJ-2018-1420.R1" class='google-scholar' target='_blank' rel='noopener noreferrer'>Google Scholar</a>] [<a href="https://doi.org/10.1302/0301-620X.101B7.BJJ-2018-1420.R1" class='cross-ref' target='_blank' rel='noopener noreferrer'>CrossRef</a>]</li><li id='B77-applsci-12-08972' class='html-xx' data-content='77.'>Nakamura, K.; Hong, B.-W. Adaptive Weight Decay for Deep Neural Networks. <span class='html-italic'>IEEE Access</span> <b>2019</b>, <span class='html-italic'>7</span>, 118857–118865. [<a href="https://scholar.google.com/scholar_lookup?title=Adaptive+Weight+Decay+for+Deep+Neural+Networks&author=Nakamura,+K.&author=Hong,+B.-W.&publication_year=2019&journal=IEEE+Access&volume=7&pages=118857%E2%80%93118865&doi=10.1109/ACCESS.2019.2937139" class='google-scholar' target='_blank' rel='noopener noreferrer'>Google Scholar</a>] [<a href="https://doi.org/10.1109/ACCESS.2019.2937139" class='cross-ref' target='_blank' rel='noopener noreferrer'>CrossRef</a>]</li><li id='B78-applsci-12-08972' class='html-xx' data-content='78.'>Prashar, D.; Jha, N.; Shafiq, M.; Ahmad, N.; Rashid, M.; Banday, S.A.; Khan, H.U. Blockchain-Based Automated System for Identification and Storage of Networks. <span class='html-italic'>Secur. Commun. Netw.</span> <b>2021</b>, <span class='html-italic'>2021</span>, 6694281. [<a href="https://scholar.google.com/scholar_lookup?title=Blockchain-Based+Automated+System+for+Identification+and+Storage+of+Networks&author=Prashar,+D.&author=Jha,+N.&author=Shafiq,+M.&author=Ahmad,+N.&author=Rashid,+M.&author=Banday,+S.A.&author=Khan,+H.U.&publication_year=2021&journal=Secur.+Commun.+Netw.&volume=2021&pages=6694281&doi=10.1155/2021/6694281" class='google-scholar' target='_blank' rel='noopener noreferrer'>Google Scholar</a>] [<a href="https://doi.org/10.1155/2021/6694281" class='cross-ref' target='_blank' rel='noopener noreferrer'>CrossRef</a>]</li><li id='B79-applsci-12-08972' class='html-xx' data-content='79.'>Gu, J.; Wang, Z.; Kuen, J.; Ma, L.; Shahroudy, A.; Shuai, B.; Liu, T.; Wang, X.; Wang, G.; Cai, J.; et al. Recent advances in convolutional neural networks. <span class='html-italic'>Pattern Recognit.</span> <b>2018</b>, <span class='html-italic'>77</span>, 354–377. [<a href="https://scholar.google.com/scholar_lookup?title=Recent+advances+in+convolutional+neural+networks&author=Gu,+J.&author=Wang,+Z.&author=Kuen,+J.&author=Ma,+L.&author=Shahroudy,+A.&author=Shuai,+B.&author=Liu,+T.&author=Wang,+X.&author=Wang,+G.&author=Cai,+J.&publication_year=2018&journal=Pattern+Recognit.&volume=77&pages=354%E2%80%93377&doi=10.1016/j.patcog.2017.10.013" class='google-scholar' target='_blank' rel='noopener noreferrer'>Google Scholar</a>] [<a href="https://doi.org/10.1016/j.patcog.2017.10.013" class='cross-ref' target='_blank' rel='noopener noreferrer'>CrossRef</a>]</li><li id='B80-applsci-12-08972' class='html-xx' data-content='80.'>Buolamwini, J.; Gebru, T. Gender Shades: Intersectional Accuracy Disparities in Commercial Gender Classification. In Proceedings of the Conference on Fairness, Accountability and Transparency, PMLR, New York, NY, USA, 23–24 February 2018; Volume 81. [<a href="https://scholar.google.com/scholar_lookup?title=Gender+Shades:+Intersectional+Accuracy+Disparities+in+Commercial+Gender+Classification&conference=Proceedings+of+the+Conference+on+Fairness,+Accountability+and+Transparency,+PMLR&author=Buolamwini,+J.&author=Gebru,+T.&publication_year=2018" class='google-scholar' target='_blank' rel='noopener noreferrer'>Google Scholar</a>]</li><li id='B81-applsci-12-08972' class='html-xx' data-content='81.'>Datta, A.; Swamidass, S. Fair-Net: A Network Architecture for Reducing Performance Disparity between Identifiable Sub-populations. In Proceedings of the 14th International Conference on Agents and Artificial Intelligence, Online, 3–5 February 2022; pp. 645–654. [<a href="https://scholar.google.com/scholar_lookup?title=Fair-Net:+A+Network+Architecture+for+Reducing+Performance+Disparity+between+Identifiable+Sub-populations&conference=Proceedings+of+the+14th+International+Conference+on+Agents+and+Artificial+Intelligence&author=Datta,+A.&author=Swamidass,+S.&publication_year=2022&pages=645%E2%80%93654&doi=10.5220/0010877400003116" class='google-scholar' target='_blank' rel='noopener noreferrer'>Google Scholar</a>] [<a href="https://doi.org/10.5220/0010877400003116" class='cross-ref' target='_blank' rel='noopener noreferrer'>CrossRef</a>]</li><li id='B82-applsci-12-08972' class='html-xx' data-content='82.'>Kim, M.P.; Ghorbani, A.; Zou, J. Multiaccuracy: Black-box post-processing for fairness in classification. In Proceedings of the AIES 2019—2019 AAAI/ACM Conference on AI, Ethics, and Society, Honolulu, HI, USA, 27–28 January 2019. [<a href="https://scholar.google.com/scholar_lookup?title=Multiaccuracy:+Black-box+post-processing+for+fairness+in+classification&conference=Proceedings+of+the+AIES+2019%E2%80%942019+AAAI/ACM+Conference+on+AI,+Ethics,+and+Society&author=Kim,+M.P.&author=Ghorbani,+A.&author=Zou,+J.&publication_year=2019&doi=10.1145/3306618.3314287" class='google-scholar' target='_blank' rel='noopener noreferrer'>Google Scholar</a>] [<a href="https://doi.org/10.1145/3306618.3314287" class='cross-ref' target='_blank' rel='noopener noreferrer'>CrossRef</a>]</li><li id='B83-applsci-12-08972' class='html-xx' data-content='83.'>Guo, C.; Pleiss, G.; Sun, Y.; Weinberger, K.Q. On calibration of modern neural networks. In Proceedings of the 34th International Conference on Machine Learning, ICML 2017, Sydney, Australia, 6–11 August 2017; Volume 3. [<a href="https://scholar.google.com/scholar_lookup?title=On+calibration+of+modern+neural+networks&conference=Proceedings+of+the+34th+International+Conference+on+Machine+Learning,+ICML+2017&author=Guo,+C.&author=Pleiss,+G.&author=Sun,+Y.&author=Weinberger,+K.Q.&publication_year=2017" class='google-scholar' target='_blank' rel='noopener noreferrer'>Google Scholar</a>]</li><li id='B84-applsci-12-08972' class='html-xx' data-content='84.'>Datta, A.; Flynn, N.R.; Swamidass, S.J. Cal-Net: Jointly Learning Classification and Calibration on Imbalanced Binary Classification Tasks. In Proceedings of the 2021 International Joint Conference on Neural Networks (IJCNN), Shenzhen, China, 18–22 July 2021. [<a href="https://scholar.google.com/scholar_lookup?title=Cal-Net:+Jointly+Learning+Classification+and+Calibration+on+Imbalanced+Binary+Classification+Tasks&conference=Proceedings+of+the+2021+International+Joint+Conference+on+Neural+Networks+(IJCNN)&author=Datta,+A.&author=Flynn,+N.R.&author=Swamidass,+S.J.&publication_year=2021&doi=10.1109/IJCNN52387.2021.9534411" class='google-scholar' target='_blank' rel='noopener noreferrer'>Google Scholar</a>] [<a href="https://doi.org/10.1109/IJCNN52387.2021.9534411" class='cross-ref' target='_blank' rel='noopener noreferrer'>CrossRef</a>]</li><li id='B85-applsci-12-08972' class='html-xx' data-content='85.'>Li, Y.; Wang, N.; Shi, J.; Hou, X.; Liu, J. Adaptive Batch Normalization for practical domain adaptation. <span class='html-italic'>Pattern Recognit.</span> <b>2018</b>, <span class='html-italic'>80</span>, 109–117. [<a href="https://scholar.google.com/scholar_lookup?title=Adaptive+Batch+Normalization+for+practical+domain+adaptation&author=Li,+Y.&author=Wang,+N.&author=Shi,+J.&author=Hou,+X.&author=Liu,+J.&publication_year=2018&journal=Pattern+Recognit.&volume=80&pages=109%E2%80%93117&doi=10.1016/j.patcog.2018.03.005" class='google-scholar' target='_blank' rel='noopener noreferrer'>Google Scholar</a>] [<a href="https://doi.org/10.1016/j.patcog.2018.03.005" class='cross-ref' target='_blank' rel='noopener noreferrer'>CrossRef</a>]</li><li id='B86-applsci-12-08972' class='html-xx' data-content='86.'>Singh, A.K.; Kumar, A.; Mahmud, M.; Kaiser, M.S.; Kishore, A. COVID-19 Infection Detection from Chest X-Ray Images Using Hybrid Social Group Optimization and Support Vector Classifier. <span class='html-italic'>Cogn. Comput.</span> <b>2021</b>. [<a href="https://scholar.google.com/scholar_lookup?title=COVID-19+Infection+Detection+from+Chest+X-Ray+Images+Using+Hybrid+Social+Group+Optimization+and+Support+Vector+Classifier&author=Singh,+A.K.&author=Kumar,+A.&author=Mahmud,+M.&author=Kaiser,+M.S.&author=Kishore,+A.&publication_year=2021&journal=Cogn.+Comput.&doi=10.1007/s12559-021-09848-3&pmid=33688379" class='google-scholar' target='_blank' rel='noopener noreferrer'>Google Scholar</a>] [<a href="https://doi.org/10.1007/s12559-021-09848-3" class='cross-ref' target='_blank' rel='noopener noreferrer'>CrossRef</a>] [<a href="http://www.ncbi.nlm.nih.gov/pubmed/33688379" class='cross-ref' data-typ='pmid' target='_blank' rel='noopener noreferrer'>PubMed</a>]</li><li id='B87-applsci-12-08972' class='html-xx' data-content='87.'>Sik-Ho, T. Review: AlexNet, CaffeNet—Winner of ILSVRC 2012 (Image Classification). <span class='html-italic'>Medium Note</span>, 9 August 2018. [<a href="https://scholar.google.com/scholar_lookup?title=Review:+AlexNet,+CaffeNet%E2%80%94Winner+of+ILSVRC+2012+(Image+Classification)&author=Sik-Ho,+T.&publication_year=2018" class='google-scholar' target='_blank' rel='noopener noreferrer'>Google Scholar</a>]</li><li id='B88-applsci-12-08972' class='html-xx' data-content='88.'>Çınar, A.; Tuncer, S.A. Classification of lymphocytes, monocytes, eosinophils, and neutrophils on white blood cells using hybrid Alexnet-GoogleNet-SVM. <span class='html-italic'>SN Appl. Sci.</span> <b>2021</b>, <span class='html-italic'>3</span>, 503. [<a href="https://scholar.google.com/scholar_lookup?title=Classification+of+lymphocytes,+monocytes,+eosinophils,+and+neutrophils+on+white+blood+cells+using+hybrid+Alexnet-GoogleNet-SVM&author=%C3%87%C4%B1nar,+A.&author=Tuncer,+S.A.&publication_year=2021&journal=SN+Appl.+Sci.&volume=3&pages=503&doi=10.1007/s42452-021-04485-9" class='google-scholar' target='_blank' rel='noopener noreferrer'>Google Scholar</a>] [<a href="https://doi.org/10.1007/s42452-021-04485-9" class='cross-ref' target='_blank' rel='noopener noreferrer'>CrossRef</a>]</li><li id='B89-applsci-12-08972' class='html-xx' data-content='89.'>Prasetyo, E.; Suciati, N.; Fatichah, C. Multi-level residual network VGGNet for fish species classification. <span class='html-italic'>J. King Saud Univ.-Comput. Inf. Sci.</span> <b>2022</b>, <span class='html-italic'>34</span>, 5286–5295. [<a href="https://scholar.google.com/scholar_lookup?title=Multi-level+residual+network+VGGNet+for+fish+species+classification&author=Prasetyo,+E.&author=Suciati,+N.&author=Fatichah,+C.&publication_year=2022&journal=J.+King+Saud+Univ.-Comput.+Inf.+Sci.&volume=34&pages=5286%E2%80%935295&doi=10.1016/j.jksuci.2021.05.015" class='google-scholar' target='_blank' rel='noopener noreferrer'>Google Scholar</a>] [<a href="https://doi.org/10.1016/j.jksuci.2021.05.015" class='cross-ref' target='_blank' rel='noopener noreferrer'>CrossRef</a>]</li><li id='B90-applsci-12-08972' class='html-xx' data-content='90.'>Zhou, T.; Zhao, Y.; Wu, J. ResNeXt and Res2Net Structures for Speaker Verification. In Proceedings of the 2021 IEEE Spoken Language Technology Workshop (SLT), Shenzhen, China, 19–22 January 2021. [<a href="https://scholar.google.com/scholar_lookup?title=ResNeXt+and+Res2Net+Structures+for+Speaker+Verification&conference=Proceedings+of+the+2021+IEEE+Spoken+Language+Technology+Workshop+(SLT)&author=Zhou,+T.&author=Zhao,+Y.&author=Wu,+J.&publication_year=2021&doi=10.1109/SLT48900.2021.9383531" class='google-scholar' target='_blank' rel='noopener noreferrer'>Google Scholar</a>] [<a href="https://doi.org/10.1109/SLT48900.2021.9383531" class='cross-ref' target='_blank' rel='noopener noreferrer'>CrossRef</a>]</li><li id='B91-applsci-12-08972' class='html-xx' data-content='91.'>Finamore, A.; Mellia, M.; Meo, M.; Munafo, M.M.; Di Torino, P.; Rossi, D. Experiences of Internet traffic monitoring with tstat. <span class='html-italic'>IEEE Netw.</span> <b>2011</b>, <span class='html-italic'>25</span>, 8–14. [<a href="https://scholar.google.com/scholar_lookup?title=Experiences+of+Internet+traffic+monitoring+with+tstat&author=Finamore,+A.&author=Mellia,+M.&author=Meo,+M.&author=Munafo,+M.M.&author=Di+Torino,+P.&author=Rossi,+D.&publication_year=2011&journal=IEEE+Netw.&volume=25&pages=8%E2%80%9314&doi=10.1109/MNET.2011.5772055" class='google-scholar' target='_blank' rel='noopener noreferrer'>Google Scholar</a>] [<a href="https://doi.org/10.1109/MNET.2011.5772055" class='cross-ref' target='_blank' rel='noopener noreferrer'>CrossRef</a>] [<a href="https://core.ac.uk/reader/11424131" target='_blank' rel="noopener noreferrer">Green Version</a>]</li><li id='B92-applsci-12-08972' class='html-xx' data-content='92.'>Aceto, G.; Ciuonzo, D.; Montieri, A.; Pescapé, A. Multi-classification approaches for classifying mobile app traffic. <span class='html-italic'>J. Netw. Comput. Appl.</span> <b>2018</b>, <span class='html-italic'>103</span>, 131–145. [<a href="https://scholar.google.com/scholar_lookup?title=Multi-classification+approaches+for+classifying+mobile+app+traffic&author=Aceto,+G.&author=Ciuonzo,+D.&author=Montieri,+A.&author=Pescap%C3%A9,+A.&publication_year=2018&journal=J.+Netw.+Comput.+Appl.&volume=103&pages=131%E2%80%93145&doi=10.1016/j.jnca.2017.11.007" class='google-scholar' target='_blank' rel='noopener noreferrer'>Google Scholar</a>] [<a href="https://doi.org/10.1016/j.jnca.2017.11.007" class='cross-ref' target='_blank' rel='noopener noreferrer'>CrossRef</a>]</li><li id='B93-applsci-12-08972' class='html-xx' data-content='93.'>Feng, H.; Misra, V.; Rubenstein, D. The CIFAR-10 dataset. <span class='html-italic'>Electr. Eng.</span> <b>2007</b>, <span class='html-italic'>35</span>. [<a href="https://scholar.google.com/scholar_lookup?title=The+CIFAR-10+dataset&author=Feng,+H.&author=Misra,+V.&author=Rubenstein,+D.&publication_year=2007&journal=Electr.+Eng.&volume=35" class='google-scholar' target='_blank' rel='noopener noreferrer'>Google Scholar</a>]</li><li id='B94-applsci-12-08972' class='html-xx' data-content='94.'>Stanford Vision Lab. <span class='html-italic'>ImageNet Dataset</span>; Stanford Vision Lab, Stanford University: Stanford, CA, USA, 2016. [<a href="https://scholar.google.com/scholar_lookup?title=ImageNet+Dataset&author=Stanford+Vision+Lab.&publication_year=2016" class='google-scholar' target='_blank' rel='noopener noreferrer'>Google Scholar</a>]</li></ol></section><section id='FiguresandTable' type='display-objects'><div class="html-fig-wrap" id="applsci-12-08972-f001"> <div class='html-fig_img'> <div class="html-figpopup html-figpopup-link" data-counterslinkmanual = "https://www.mdpi.com/2076-3417/12/18/8972/display" href="#fig_body_display_applsci-12-08972-f001"> <img alt="Applsci 12 08972 g001 550" data-large="/applsci/applsci-12-08972/article_deploy/html/images/applsci-12-08972-g001.png" data-original="/applsci/applsci-12-08972/article_deploy/html/images/applsci-12-08972-g001.png" data-lsrc="/applsci/applsci-12-08972/article_deploy/html/images/applsci-12-08972-g001-550.jpg" /> <a class="html-expand html-figpopup" data-counterslinkmanual = "https://www.mdpi.com/2076-3417/12/18/8972/display" href="#fig_body_display_applsci-12-08972-f001"></a> </div> </div> <div class="html-fig_description"> <b>Figure 1.</b> Detailed Flowchart. <!-- <p><a class="html-figpopup" href="#fig_body_display_applsci-12-08972-f001"> Click here to enlarge figure </a></p> --> </div> </div> <div class="html-fig_show mfp-hide" id ="fig_body_display_applsci-12-08972-f001" > <div class="html-caption" > <b>Figure 1.</b> Detailed Flowchart.</div> <div class="html-img"><img alt="Applsci 12 08972 g001" data-large="/applsci/applsci-12-08972/article_deploy/html/images/applsci-12-08972-g001.png" data-original="/applsci/applsci-12-08972/article_deploy/html/images/applsci-12-08972-g001.png" data-lsrc="/applsci/applsci-12-08972/article_deploy/html/images/applsci-12-08972-g001.png" /></div> </div><div class="html-fig-wrap" id="applsci-12-08972-f002"> <div class='html-fig_img'> <div class="html-figpopup html-figpopup-link" data-counterslinkmanual = "https://www.mdpi.com/2076-3417/12/18/8972/display" href="#fig_body_display_applsci-12-08972-f002"> <img alt="Applsci 12 08972 g002 550" data-large="/applsci/applsci-12-08972/article_deploy/html/images/applsci-12-08972-g002.png" data-original="/applsci/applsci-12-08972/article_deploy/html/images/applsci-12-08972-g002.png" data-lsrc="/applsci/applsci-12-08972/article_deploy/html/images/applsci-12-08972-g002-550.jpg" /> <a class="html-expand html-figpopup" data-counterslinkmanual = "https://www.mdpi.com/2076-3417/12/18/8972/display" href="#fig_body_display_applsci-12-08972-f002"></a> </div> </div> <div class="html-fig_description"> <b>Figure 2.</b> Basic Structure of DRN. <!-- <p><a class="html-figpopup" href="#fig_body_display_applsci-12-08972-f002"> Click here to enlarge figure </a></p> --> </div> </div> <div class="html-fig_show mfp-hide" id ="fig_body_display_applsci-12-08972-f002" > <div class="html-caption" > <b>Figure 2.</b> Basic Structure of DRN.</div> <div class="html-img"><img alt="Applsci 12 08972 g002" data-large="/applsci/applsci-12-08972/article_deploy/html/images/applsci-12-08972-g002.png" data-original="/applsci/applsci-12-08972/article_deploy/html/images/applsci-12-08972-g002.png" data-lsrc="/applsci/applsci-12-08972/article_deploy/html/images/applsci-12-08972-g002.png" /></div> </div><div class="html-fig-wrap" id="applsci-12-08972-f003"> <div class='html-fig_img'> <div class="html-figpopup html-figpopup-link" data-counterslinkmanual = "https://www.mdpi.com/2076-3417/12/18/8972/display" href="#fig_body_display_applsci-12-08972-f003"> <img alt="Applsci 12 08972 g003 550" data-large="/applsci/applsci-12-08972/article_deploy/html/images/applsci-12-08972-g003.png" data-original="/applsci/applsci-12-08972/article_deploy/html/images/applsci-12-08972-g003.png" data-lsrc="/applsci/applsci-12-08972/article_deploy/html/images/applsci-12-08972-g003-550.jpg" /> <a class="html-expand html-figpopup" data-counterslinkmanual = "https://www.mdpi.com/2076-3417/12/18/8972/display" href="#fig_body_display_applsci-12-08972-f003"></a> </div> </div> <div class="html-fig_description"> <b>Figure 3.</b> Most Searches DRN. <!-- <p><a class="html-figpopup" href="#fig_body_display_applsci-12-08972-f003"> Click here to enlarge figure </a></p> --> </div> </div> <div class="html-fig_show mfp-hide" id ="fig_body_display_applsci-12-08972-f003" > <div class="html-caption" > <b>Figure 3.</b> Most Searches DRN.</div> <div class="html-img"><img alt="Applsci 12 08972 g003" data-large="/applsci/applsci-12-08972/article_deploy/html/images/applsci-12-08972-g003.png" data-original="/applsci/applsci-12-08972/article_deploy/html/images/applsci-12-08972-g003.png" data-lsrc="/applsci/applsci-12-08972/article_deploy/html/images/applsci-12-08972-g003.png" /></div> </div><div class="html-fig-wrap" id="applsci-12-08972-f004"> <div class='html-fig_img'> <div class="html-figpopup html-figpopup-link" data-counterslinkmanual = "https://www.mdpi.com/2076-3417/12/18/8972/display" href="#fig_body_display_applsci-12-08972-f004"> <img alt="Applsci 12 08972 g004 550" data-large="/applsci/applsci-12-08972/article_deploy/html/images/applsci-12-08972-g004.png" data-original="/applsci/applsci-12-08972/article_deploy/html/images/applsci-12-08972-g004.png" data-lsrc="/applsci/applsci-12-08972/article_deploy/html/images/applsci-12-08972-g004-550.jpg" /> <a class="html-expand html-figpopup" data-counterslinkmanual = "https://www.mdpi.com/2076-3417/12/18/8972/display" href="#fig_body_display_applsci-12-08972-f004"></a> </div> </div> <div class="html-fig_description"> <b>Figure 4.</b> Sections Details. <!-- <p><a class="html-figpopup" href="#fig_body_display_applsci-12-08972-f004"> Click here to enlarge figure </a></p> --> </div> </div> <div class="html-fig_show mfp-hide" id ="fig_body_display_applsci-12-08972-f004" > <div class="html-caption" > <b>Figure 4.</b> Sections Details.</div> <div class="html-img"><img alt="Applsci 12 08972 g004" data-large="/applsci/applsci-12-08972/article_deploy/html/images/applsci-12-08972-g004.png" data-original="/applsci/applsci-12-08972/article_deploy/html/images/applsci-12-08972-g004.png" data-lsrc="/applsci/applsci-12-08972/article_deploy/html/images/applsci-12-08972-g004.png" /></div> </div><div class="html-fig-wrap" id="applsci-12-08972-f005"> <div class='html-fig_img'> <div class="html-figpopup html-figpopup-link" data-counterslinkmanual = "https://www.mdpi.com/2076-3417/12/18/8972/display" href="#fig_body_display_applsci-12-08972-f005"> <img alt="Applsci 12 08972 g005 550" data-large="/applsci/applsci-12-08972/article_deploy/html/images/applsci-12-08972-g005.png" data-original="/applsci/applsci-12-08972/article_deploy/html/images/applsci-12-08972-g005.png" data-lsrc="/applsci/applsci-12-08972/article_deploy/html/images/applsci-12-08972-g005-550.jpg" /> <a class="html-expand html-figpopup" data-counterslinkmanual = "https://www.mdpi.com/2076-3417/12/18/8972/display" href="#fig_body_display_applsci-12-08972-f005"></a> </div> </div> <div class="html-fig_description"> <b>Figure 5.</b> Next Five Topics. <!-- <p><a class="html-figpopup" href="#fig_body_display_applsci-12-08972-f005"> Click here to enlarge figure </a></p> --> </div> </div> <div class="html-fig_show mfp-hide" id ="fig_body_display_applsci-12-08972-f005" > <div class="html-caption" > <b>Figure 5.</b> Next Five Topics.</div> <div class="html-img"><img alt="Applsci 12 08972 g005" data-large="/applsci/applsci-12-08972/article_deploy/html/images/applsci-12-08972-g005.png" data-original="/applsci/applsci-12-08972/article_deploy/html/images/applsci-12-08972-g005.png" data-lsrc="/applsci/applsci-12-08972/article_deploy/html/images/applsci-12-08972-g005.png" /></div> </div><div class="html-fig-wrap" id="applsci-12-08972-f006"> <div class='html-fig_img'> <div class="html-figpopup html-figpopup-link" data-counterslinkmanual = "https://www.mdpi.com/2076-3417/12/18/8972/display" href="#fig_body_display_applsci-12-08972-f006"> <img alt="Applsci 12 08972 g006 550" data-large="/applsci/applsci-12-08972/article_deploy/html/images/applsci-12-08972-g006.png" data-original="/applsci/applsci-12-08972/article_deploy/html/images/applsci-12-08972-g006.png" data-lsrc="/applsci/applsci-12-08972/article_deploy/html/images/applsci-12-08972-g006-550.jpg" /> <a class="html-expand html-figpopup" data-counterslinkmanual = "https://www.mdpi.com/2076-3417/12/18/8972/display" href="#fig_body_display_applsci-12-08972-f006"></a> </div> </div> <div class="html-fig_description"> <b>Figure 6.</b> Next Five Topics. <!-- <p><a class="html-figpopup" href="#fig_body_display_applsci-12-08972-f006"> Click here to enlarge figure </a></p> --> </div> </div> <div class="html-fig_show mfp-hide" id ="fig_body_display_applsci-12-08972-f006" > <div class="html-caption" > <b>Figure 6.</b> Next Five Topics.</div> <div class="html-img"><img alt="Applsci 12 08972 g006" data-large="/applsci/applsci-12-08972/article_deploy/html/images/applsci-12-08972-g006.png" data-original="/applsci/applsci-12-08972/article_deploy/html/images/applsci-12-08972-g006.png" data-lsrc="/applsci/applsci-12-08972/article_deploy/html/images/applsci-12-08972-g006.png" /></div> </div><div class="html-fig-wrap" id="applsci-12-08972-f007"> <div class='html-fig_img'> <div class="html-figpopup html-figpopup-link" data-counterslinkmanual = "https://www.mdpi.com/2076-3417/12/18/8972/display" href="#fig_body_display_applsci-12-08972-f007"> <img alt="Applsci 12 08972 g007 550" data-large="/applsci/applsci-12-08972/article_deploy/html/images/applsci-12-08972-g007.png" data-original="/applsci/applsci-12-08972/article_deploy/html/images/applsci-12-08972-g007.png" data-lsrc="/applsci/applsci-12-08972/article_deploy/html/images/applsci-12-08972-g007-550.jpg" /> <a class="html-expand html-figpopup" data-counterslinkmanual = "https://www.mdpi.com/2076-3417/12/18/8972/display" href="#fig_body_display_applsci-12-08972-f007"></a> </div> </div> <div class="html-fig_description"> <b>Figure 7.</b> Next Five Topics. <!-- <p><a class="html-figpopup" href="#fig_body_display_applsci-12-08972-f007"> Click here to enlarge figure </a></p> --> </div> </div> <div class="html-fig_show mfp-hide" id ="fig_body_display_applsci-12-08972-f007" > <div class="html-caption" > <b>Figure 7.</b> Next Five Topics.</div> <div class="html-img"><img alt="Applsci 12 08972 g007" data-large="/applsci/applsci-12-08972/article_deploy/html/images/applsci-12-08972-g007.png" data-original="/applsci/applsci-12-08972/article_deploy/html/images/applsci-12-08972-g007.png" data-lsrc="/applsci/applsci-12-08972/article_deploy/html/images/applsci-12-08972-g007.png" /></div> </div><div class="html-fig-wrap" id="applsci-12-08972-f008"> <div class='html-fig_img'> <div class="html-figpopup html-figpopup-link" data-counterslinkmanual = "https://www.mdpi.com/2076-3417/12/18/8972/display" href="#fig_body_display_applsci-12-08972-f008"> <img alt="Applsci 12 08972 g008 550" data-large="/applsci/applsci-12-08972/article_deploy/html/images/applsci-12-08972-g008.png" data-original="/applsci/applsci-12-08972/article_deploy/html/images/applsci-12-08972-g008.png" data-lsrc="/applsci/applsci-12-08972/article_deploy/html/images/applsci-12-08972-g008-550.jpg" /> <a class="html-expand html-figpopup" data-counterslinkmanual = "https://www.mdpi.com/2076-3417/12/18/8972/display" href="#fig_body_display_applsci-12-08972-f008"></a> </div> </div> <div class="html-fig_description"> <b>Figure 8.</b> Details of the next eight sections. <!-- <p><a class="html-figpopup" href="#fig_body_display_applsci-12-08972-f008"> Click here to enlarge figure </a></p> --> </div> </div> <div class="html-fig_show mfp-hide" id ="fig_body_display_applsci-12-08972-f008" > <div class="html-caption" > <b>Figure 8.</b> Details of the next eight sections.</div> <div class="html-img"><img alt="Applsci 12 08972 g008" data-large="/applsci/applsci-12-08972/article_deploy/html/images/applsci-12-08972-g008.png" data-original="/applsci/applsci-12-08972/article_deploy/html/images/applsci-12-08972-g008.png" data-lsrc="/applsci/applsci-12-08972/article_deploy/html/images/applsci-12-08972-g008.png" /></div> </div><div class="html-fig-wrap" id="applsci-12-08972-f009"> <div class='html-fig_img'> <div class="html-figpopup html-figpopup-link" data-counterslinkmanual = "https://www.mdpi.com/2076-3417/12/18/8972/display" href="#fig_body_display_applsci-12-08972-f009"> <img alt="Applsci 12 08972 g009 550" data-large="/applsci/applsci-12-08972/article_deploy/html/images/applsci-12-08972-g009.png" data-original="/applsci/applsci-12-08972/article_deploy/html/images/applsci-12-08972-g009.png" data-lsrc="/applsci/applsci-12-08972/article_deploy/html/images/applsci-12-08972-g009-550.jpg" /> <a class="html-expand html-figpopup" data-counterslinkmanual = "https://www.mdpi.com/2076-3417/12/18/8972/display" href="#fig_body_display_applsci-12-08972-f009"></a> </div> </div> <div class="html-fig_description"> <b>Figure 9.</b> TDL Methods. <!-- <p><a class="html-figpopup" href="#fig_body_display_applsci-12-08972-f009"> Click here to enlarge figure </a></p> --> </div> </div> <div class="html-fig_show mfp-hide" id ="fig_body_display_applsci-12-08972-f009" > <div class="html-caption" > <b>Figure 9.</b> TDL Methods.</div> <div class="html-img"><img alt="Applsci 12 08972 g009" data-large="/applsci/applsci-12-08972/article_deploy/html/images/applsci-12-08972-g009.png" data-original="/applsci/applsci-12-08972/article_deploy/html/images/applsci-12-08972-g009.png" data-lsrc="/applsci/applsci-12-08972/article_deploy/html/images/applsci-12-08972-g009.png" /></div> </div><div class="html-fig-wrap" id="applsci-12-08972-f010"> <div class='html-fig_img'> <div class="html-figpopup html-figpopup-link" data-counterslinkmanual = "https://www.mdpi.com/2076-3417/12/18/8972/display" href="#fig_body_display_applsci-12-08972-f010"> <img alt="Applsci 12 08972 g010 550" data-large="/applsci/applsci-12-08972/article_deploy/html/images/applsci-12-08972-g010.png" data-original="/applsci/applsci-12-08972/article_deploy/html/images/applsci-12-08972-g010.png" data-lsrc="/applsci/applsci-12-08972/article_deploy/html/images/applsci-12-08972-g010-550.jpg" /> <a class="html-expand html-figpopup" data-counterslinkmanual = "https://www.mdpi.com/2076-3417/12/18/8972/display" href="#fig_body_display_applsci-12-08972-f010"></a> </div> </div> <div class="html-fig_description"> <b>Figure 10.</b> Basic Building Block of RDL Types. <!-- <p><a class="html-figpopup" href="#fig_body_display_applsci-12-08972-f010"> Click here to enlarge figure </a></p> --> </div> </div> <div class="html-fig_show mfp-hide" id ="fig_body_display_applsci-12-08972-f010" > <div class="html-caption" > <b>Figure 10.</b> Basic Building Block of RDL Types.</div> <div class="html-img"><img alt="Applsci 12 08972 g010" data-large="/applsci/applsci-12-08972/article_deploy/html/images/applsci-12-08972-g010.png" data-original="/applsci/applsci-12-08972/article_deploy/html/images/applsci-12-08972-g010.png" data-lsrc="/applsci/applsci-12-08972/article_deploy/html/images/applsci-12-08972-g010.png" /></div> </div><div class="html-fig-wrap" id="applsci-12-08972-f011"> <div class='html-fig_img'> <div class="html-figpopup html-figpopup-link" data-counterslinkmanual = "https://www.mdpi.com/2076-3417/12/18/8972/display" href="#fig_body_display_applsci-12-08972-f011"> <img alt="Applsci 12 08972 g011 550" data-large="/applsci/applsci-12-08972/article_deploy/html/images/applsci-12-08972-g011.png" data-original="/applsci/applsci-12-08972/article_deploy/html/images/applsci-12-08972-g011.png" data-lsrc="/applsci/applsci-12-08972/article_deploy/html/images/applsci-12-08972-g011-550.jpg" /> <a class="html-expand html-figpopup" data-counterslinkmanual = "https://www.mdpi.com/2076-3417/12/18/8972/display" href="#fig_body_display_applsci-12-08972-f011"></a> </div> </div> <div class="html-fig_description"> <b>Figure 11.</b> The basic building block of a ResNet. <!-- <p><a class="html-figpopup" href="#fig_body_display_applsci-12-08972-f011"> Click here to enlarge figure </a></p> --> </div> </div> <div class="html-fig_show mfp-hide" id ="fig_body_display_applsci-12-08972-f011" > <div class="html-caption" > <b>Figure 11.</b> The basic building block of a ResNet.</div> <div class="html-img"><img alt="Applsci 12 08972 g011" data-large="/applsci/applsci-12-08972/article_deploy/html/images/applsci-12-08972-g011.png" data-original="/applsci/applsci-12-08972/article_deploy/html/images/applsci-12-08972-g011.png" data-lsrc="/applsci/applsci-12-08972/article_deploy/html/images/applsci-12-08972-g011.png" /></div> </div><div class="html-fig-wrap" id="applsci-12-08972-f012"> <div class='html-fig_img'> <div class="html-figpopup html-figpopup-link" data-counterslinkmanual = "https://www.mdpi.com/2076-3417/12/18/8972/display" href="#fig_body_display_applsci-12-08972-f012"> <img alt="Applsci 12 08972 g012 550" data-large="/applsci/applsci-12-08972/article_deploy/html/images/applsci-12-08972-g012.png" data-original="/applsci/applsci-12-08972/article_deploy/html/images/applsci-12-08972-g012.png" data-lsrc="/applsci/applsci-12-08972/article_deploy/html/images/applsci-12-08972-g012-550.jpg" /> <a class="html-expand html-figpopup" data-counterslinkmanual = "https://www.mdpi.com/2076-3417/12/18/8972/display" href="#fig_body_display_applsci-12-08972-f012"></a> </div> </div> <div class="html-fig_description"> <b>Figure 12.</b> Reduction in Depth and Width. <!-- <p><a class="html-figpopup" href="#fig_body_display_applsci-12-08972-f012"> Click here to enlarge figure </a></p> --> </div> </div> <div class="html-fig_show mfp-hide" id ="fig_body_display_applsci-12-08972-f012" > <div class="html-caption" > <b>Figure 12.</b> Reduction in Depth and Width.</div> <div class="html-img"><img alt="Applsci 12 08972 g012" data-large="/applsci/applsci-12-08972/article_deploy/html/images/applsci-12-08972-g012.png" data-original="/applsci/applsci-12-08972/article_deploy/html/images/applsci-12-08972-g012.png" data-lsrc="/applsci/applsci-12-08972/article_deploy/html/images/applsci-12-08972-g012.png" /></div> </div><div class="html-table-wrap" id="applsci-12-08972-t001"> <div class="html-table_wrap_td" > <div class="html-tablepopup html-tablepopup-link" data-counterslinkmanual = "https://www.mdpi.com/2076-3417/12/18/8972/display" href='#table_body_display_applsci-12-08972-t001'> <img alt="Table" data-lsrc="https://www.mdpi.com/img/table.png" /> <a class="html-expand html-tablepopup" data-counterslinkmanual = "https://www.mdpi.com/2076-3417/12/18/8972/display" href="#table_body_display_applsci-12-08972-t001"></a> </div> </div> <div class="html-table_wrap_discription"> <b>Table 1.</b> Table of Literature Review. </div> </div> <div class="html-table_show mfp-hide " id ="table_body_display_applsci-12-08972-t001" > <div class="html-caption" ><b>Table 1.</b> Table of Literature Review.</div> <table > <thead ><tr ><th align='center' valign='middle' style='border-top:solid thin;border-bottom:solid thin' class='html-align-center' >Authors</th><th align='center' valign='middle' style='border-top:solid thin;border-bottom:solid thin' class='html-align-center' >Title</th><th align='center' valign='middle' style='border-top:solid thin;border-bottom:solid thin' class='html-align-center' >Publication</th><th align='center' valign='middle' style='border-top:solid thin;border-bottom:solid thin' class='html-align-center' >Year</th><th align='center' valign='middle' style='border-top:solid thin;border-bottom:solid thin' class='html-align-center' >Publisher</th></tr></thead><tbody ><tr ><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >Li, et al.</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >Image recognition based on deep residual shrinkage Network</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >2021 International Conference on Artificial Intelligence and Electromechanical Automation (AIEA)</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >2021</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >IEEE</td></tr><tr ><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >Yang, et al.</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >Image Recognition Based on an Improved Deep Residual Shrinkage Network</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >Available at SSRN 4013383</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' > </td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' > </td></tr><tr ><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >Kaiser et al.</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >Depthwise separable convolutions for neural machine translation</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >arXiv preprint arXiv:1706.03059</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >2017</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' > </td></tr><tr ><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >Zhang et al.</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >EleAtt-RNN: Adding attentiveness to neurons in recurrent neural networks</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >IEEE Transactions on Image Processing</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >2019</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >IEEE</td></tr><tr ><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >Salur et al.</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >A novel hybrid deep learning model for sentiment classification</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >IEEE Access</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >2020</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >IEEE</td></tr><tr ><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >Lu et al.</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >Android malware detection based on a hybrid deep learning model</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >Security and Communication Networks</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >2020</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >Hindawi</td></tr><tr ><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >Huang et al.</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >Deep networks with stochastic depth</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >European conference on computer vision</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >2016</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >Springer</td></tr><tr ><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >Chen et al.</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >Deep networks with stochastic depth for acoustic modelling</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >2016 Asia-Pacific Signal and Information Processing Association Annual Summit and Conference (APSIPA)</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >2016</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >IEEE</td></tr><tr ><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >Koonce et al.</td><td colspan='2' align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >Convolutional Neural Networks with Swift for Tensorflow: Image Recognition and Dataset Categorization</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >2021</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >Springer</td></tr><tr ><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >Neupane et al.</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >Bearing Fault Detection Using Scalogram and Switchable Normalization-Based CNN (SN-CNN)</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >IEEE Access</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >2021</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >IEEE</td></tr><tr ><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >Jafar et al.</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >Hyperparameter optimization for deep residual learning in image classification</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >2020 IEEE International Conference on Autonomic Computing and Self-Organizing Systems Companion (ACSOS-C)</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >2020</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >IEEE</td></tr><tr ><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >Qian et al.</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >Very deep convolutional neural networks for noise robust speech recognition</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >IEEE/ACM Transactions on Audio, Speech, and Language Processing</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >2016</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >IEEE</td></tr><tr ><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >Wang et al.</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >Training very deep CNNs for general non-blind deconvolution</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >IEEE Transactions on Image Processing</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >2018</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >IEEE</td></tr><tr ><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >Tai, Ying et al.</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >Image super-resolution via deep recursive residual network</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >Proceedings of the IEEE conference on computer vision and pattern recognition</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >2017</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' > </td></tr><tr ><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >Galea, Christia et al.</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >Matching software-generated sketches to face photographs with a very deep CNN, morphed faces, and transfer learning</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >IEEE Transactions on Information Forensics and Security</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >2017</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >IEEE</td></tr><tr ><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >Moriya et al.</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >Transfer learning method for very deep CNN for text classification and methods for its evaluation</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >2018 IEEE 42nd annual computer software and applications conference (COMPSAC)</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >2018</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >IEEE</td></tr><tr ><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >Afzal et al.</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >Cutting the error by half: Investigation of very deep cnn and advanced training strategies for document image classification</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >2017 14th IAPR International Conference on Document Analysis and Recognition (ICDAR)</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >2017</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >IEEE</td></tr><tr ><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >Bashir et al.</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >A comprehensive review of deep learning-based single image super-resolution</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >PeerJ Computer Science</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >2021</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >PeerJ Inc.</td></tr><tr ><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >Bao et al.</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >A power-efficient optimizing framework fpga accelerator based on winograd for yolo</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >Ieee Access</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >2020</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >IEEE</td></tr><tr ><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >Yasin et al.</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >Severity grading and early retinopathy lesion detection through hybrid inception-ResNet architecture</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >Sensors</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >2021</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >MDPI</td></tr><tr ><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >Rathgeb et al.</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >Effects of image compression on face image manipulation detection: A case study on facial retouching</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >IET Biometrics</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >2021</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >Wiley Online Library</td></tr><tr ><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >Siam et al.</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >Deep semantic segmentation for automated driving: Taxonomy, roadmap and challenges</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >2017 IEEE 20th international conference on intelligent transportation systems (ITSC)</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >2017</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >IEEE</td></tr><tr ><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >Zhang et al.</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >Plug-and-play image restoration with deep denoiser prior</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >2021</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >IEEE</td></tr><tr ><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >Cheng et al.</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >An adaptive and asymmetric residual hash for fast image retrieval</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >IEEE Access</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >2019</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >IEEE</td></tr><tr ><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >Fujii, Tatsuki et al.</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >Generating cooking recipes from cooking videos using deep learning considering previous process with video encoding</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >Proceedings of the 3rd International Conference on Applications of Intelligent Systems</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >2020</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' > </td></tr><tr ><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >Fu, Zhiyang et al.</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >A residual dense network assisted sparse view reconstruction for breast computed tomography</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >Scientific reports</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >2020</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >Nature Publishing Group</td></tr><tr ><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >Wu et al.</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >Deep learning based spectral CT imaging</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >Neural Networks</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >2021</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >Elsevier</td></tr><tr ><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >Jalali et al.</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >ResBCDU-Net: a deep learning framework for lung CT image segmentation</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >Sensors</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >2021</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >MDPI</td></tr><tr ><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >Tekade et al.</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >Lung cancer detection and classification using deep learning</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >2018 fourth international conference on computing communication control and automation (ICCUBEA)</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >2018</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >IEEE</td></tr><tr ><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >Cui et al.</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >LiteDepthwiseNet: A lightweight network for hyperspectral image classification</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >IEEE Transactions on Geoscience and Remote Sensing</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >2021</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >IEEE</td></tr><tr ><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >Feng et al.</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >Semi-supervised learning for pelvic MR image segmentation based on multi-task residual fully convolutional networks</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >2018 IEEE 15th International Symposium on Biomedical Imaging (ISBI 2018)</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >2018</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >IEEE</td></tr><tr ><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >Li, et al.</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >Few-shot contrastive learning for image classification and its application to insulator identification</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >Applied Intelligence</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >2022</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >Springer</td></tr><tr ><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >Yang, et al.</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >Classification of trash for recyclability status</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >CS229 project report</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >2016</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >Publisher Name San Francisco, CA, USA</td></tr><tr ><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >Karar, et al.</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >Cascaded deep learning classifiers for computer-aided diagnosis of COVID-19 and pneumonia diseases in X-ray scans</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >Complex &amp; Intelligent Systems</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >2021</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >Springer</td></tr><tr ><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >Zhu, et al.</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >A hybrid CNN–LSTM network for the classification of human activities based on micro-Doppler radar</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >IEEE Access</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >2020</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >IEEE</td></tr><tr ><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >Rahman, et al.</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >Efficient FPGA acceleration of convolutional neural networks using logical-3D compute array</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >2016 Design, Automation &amp; Test in Europe Conference &amp; Exhibition (DATE)</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >2016</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >IEEE</td></tr><tr ><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >Michael, et al.</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >Classification model of ‘Toraja’arabica coffee fruit ripeness levels using convolution neural network approach</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >ILKOM Jurnal Ilmiah</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >2021</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' > </td></tr><tr ><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >Al-Kharraz, et al.</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >Automated system for chromosome karyotyping to recognize the most common numerical abnormalities using deep learning</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >IEEE Access</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >2020</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >IEEE</td></tr><tr ><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >Brachmann, et al.</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >Visual camera re-localization from RGB and RGB-D images using DSAC</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >2021</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >IEEE</td></tr><tr ><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >Akhand, et al.</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >Facial emotion recognition using transfer learning in the deep CNN</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >Electronics</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >2021</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >Multidisciplinary Digital Publishing Institute</td></tr><tr ><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >He, et al.</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >Deep residual learning for image recognition</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >Proceedings of the IEEE conference on computer vision and pattern recognition</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >2016</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' > </td></tr><tr ><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >Couso, et al.</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >A general framework for maximizing likelihood under incomplete data</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >International Journal of Approximate Reasoning</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >2018</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >Elsevier</td></tr><tr ><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >Liang, et al.</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >A hybrid quantum–classical neural network with deep residual learning</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >Neural Networks</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >2021</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >Elsevier</td></tr></tbody> </table> </div></section><section class='html-fn_group'><table><tr id=''><td></td><td><div class='html-p'><b>Publisher’s Note:</b> MDPI stays neutral with regard to jurisdictional claims in published maps and institutional affiliations.</div></td></tr></table></section> <section id="html-copyright"><br>© 2022 by the authors. Licensee MDPI, Basel, Switzerland. This article is an open access article distributed under the terms and conditions of the Creative Commons Attribution (CC BY) license (<a href='https://creativecommons.org/licenses/by/4.0/' target='_blank' rel="noopener noreferrer" >https://creativecommons.org/licenses/by/4.0/</a>).</section> </div> </div> <div class="additional-content"> <h2><a name="cite"></a>Share and Cite</h2> <div class="social-media-links" style="text-align: left;"> <a href="/cdn-cgi/l/email-protection#9da2bbfcf0eda6eee8fff7f8fee9a0dbeff2f0b8afadd0d9cdd4b8aedcb8afadb8afafd9f8f8edb8afadcff8eef4f9e8fcf1b8afadd1f8fceff3f4f3fab8afadfbf2efb8afadd4f0fcfaf8b8afadcff8fef2faf3f4e9f4f2f3b8aedcb8afaddcb8afadcee8efebf8e4bbece8f2e9a6bbfcf0eda6fff2f9e4a0f5e9e9edeea7b2b2eaeaeab3f0f9edf4b3fef2f0b2aca5acaaa4aaabb8aedcb8addcb8addcd9f8f8edb8afadcff8eef4f9e8fcf1b8afadd1f8fceff3f4f3fab8afadfbf2efb8afadd4f0fcfaf8b8afadcff8fef2faf3f4e9f4f2f3b8aedcb8afaddcb8afadcee8efebf8e4b8addcb8addcdcffeee9effcfee9b8aedcb8afadd9f8f8edb8afadcff8eef4f9e8fcf1b8afadd3f8e9eaf2eff6eeb8afadf5fcebf8b8afadeff8fef8f3e9f1e4b8afadfff8f8f3b8afadeef5f2eaf3b8afade9f2b8afadeef4faf3f4fbf4fefcf3e9f1e4b8afadf4f0edeff2ebf8b8afade9f5f8b8afadedf8effbf2eff0fcf3fef8b8afadf2fbb8afadf3f8e8effcf1b8afadf3f8e9eaf2eff6eeb8afade9effcf4f3f8f9b8afadf2f3b8afadd4f0fcfaf8d3f8e9b8afdeb8afadeaf4e9f5b8afadeff8eee8f1e9eeb8afadfff8fce9f4f3fab8afadfcf1f1b8afadedeff8ebf4f2e8eeb8afadf0f8e9f5f2f9eeb8afadf2f3b8afade9f5f4eeb8afadf9fce9fceef8e9b8afadffe4b8afadf1fceffaf8b8afadf0fceffaf4f3eeb8afadf4f3b8afade9f5f8b8afadf4f0fcfaf8b8afadfef1fceeeef4fbf4fefce9f4f2f3b8afade9fceef6b3b8afadd5f2eaf8ebf8efb8afdeb8afade9f5f8b8afadf0f8fcf3f4f3fab8afadf2fbb8afade9f5f8eef8b8afadf4f0edeff8eeeef4ebf8b8afadf3e8f0fff8efeeb8afadfcf3f9b8afade9f5f8f4efb8afadf4f0edf1f4fefce9f4f2f3eeb8afadfbf2efb8afadfbe8e9e8eff8b8afadeff8eef8fceffef5b8afadfceff8b8afadf3f2e9b8afadfbe8f1f1e4b8afade8f3f9f8efeee9f2f2f9b8afade4f8e9b3b8afadd4f3b8afade9f5f4eeb8afadeee8efebf8e4b8afdeb8afadeaf8b8afadeaf4f1f1b8afade9efe4b8afade9f2b8afadf8e5edf1fcf4f3b8afadeaf5fce9b8afadd9f8f8edb8afadcff8eef4f9e8fcf1b8afadd3f8e9eaf2eff6eeb8afadfceff8b8afdeb8afadf5f2eab8afade9f5f8e4b8afadfcfef5f4f8ebf8b8afade9f5f8f4efb8afadf8e5fef8f1f1f8f3e9b8afadeff8eee8f1e9eeb8afdeb8afadfcf3f9b8afadeaf5e4b8afade9f5f8f4efb8afadeee8fefef8eeeefbe8f1b8afadf4f0edf1f8f0f8f3e9fce9f4f2f3b8afadf4f3b8afadedeffcfee9f4fef8b8afadeff8edeff8eef8f3e9eeb8afadfcb8afadeef4faf3f4fbf4fefcf3e9b8afadfcf9ebfcf3fef8b8afadf2ebf8efb8afadf8e5f4eee9f4f3fab8afade9f8fef5f3f4ece8f8eeb3b8afadcaf8b8afadfcf1eef2b8afadf9f4eefee8eeeeb8afadeef2f0f8b8afadf2edf8f3b8afadece8f8eee9f4f2f3eeb8afadeff8f1fce9f8f9b8afade9f2b8afadeff8eef4f9e8fcf1b8afadf1f8fceff3f4f3fab8afadfceeb8afadeaf8f1f1b8afadfceeb8afadedf2eeeef4fff1f8b8afadfcededf1f4fefce9f4f2f3eeb8afadf2fbb8afadd9f8f8edb8afadcff8eef4f9e8fcf1b8afadd3f8e9eaf2eff6eeb8afadfff8e4f2f3f9b8afadd4f0fcfaf8d3f8e9b3b8afaddbf4f3fcf1f1e4b8afdeb8afadeaf8b8afadf9f4eefee8eeeeb8afadeef2f0f8b8afadf4eeeee8f8eeb8afade9f5fce9b8afadeee9f4f1f1b8afadf3f8f8f9b8afade9f2b8afadfff8b8afadeff8eef2f1ebf8f9b8afadfff8fbf2eff8b8afadf9f8f8edb8afadeff8eef4f9e8fcf1b8afadf1f8fceff3f4f3fab8afadfefcf3b8afadfff8b8afadfcededf1f4f8f9b8afadf2f3b8afadf0f2eff8b8afadfef2f0edf1f8e5b8afadedeff2fff1f8f0eeb3" title="Email"> <i class="fa fa-envelope-square" style="font-size: 30px;"></i> </a> <a href="https://twitter.com/intent/tweet?text=Deep+Residual+Learning+for+Image+Recognition%3A+A+Survey&amp;hashtags=mdpiapplsci&amp;url=https%3A%2F%2Fwww.mdpi.com%2F1817976&amp;via=Applsci" onclick="windowOpen(this.href,600,800); return false" target="_blank" rel="noopener noreferrer"> <i class="fa fa-twitter-x-square" style="font-size: 30px;"></i> </a> <a href=" http://www.linkedin.com/shareArticle?mini=true&amp;url=https%3A%2F%2Fwww.mdpi.com%2F1817976&amp;title=Deep%20Residual%20Learning%20for%20Image%20Recognition%3A%20A%20Survey%26source%3Dhttps%3A%2F%2Fwww.mdpi.com%26summary%3DDeep%20Residual%20Networks%20have%20recently%20been%20shown%20to%20significantly%20improve%20the%20performance%20of%20neural%20networks%20trained%20on%20ImageNet%2C%20with%20results%20beating%20all%20previous%20methods%20on%20this%20dataset%20by%20large%20margins%20in%20the%20image%20classification%20task.%20However%2C%20%5B...%5D" onclick="windowOpen(this.href,600,800); return false" title="LinkedIn" target="_blank" rel="noopener noreferrer"> <i class="fa fa-linkedin-square" style="font-size: 30px;"></i> </a> <a href="https://www.facebook.com/sharer.php?u=https://www.mdpi.com/1817976" title="facebook" target="_blank" rel="noopener noreferrer"> <i class="fa fa-facebook-square" style="font-size: 30px;"></i> </a> <a href="javascript:void(0);" title="Wechat" data-reveal-id="weixin-share-modal"> <i class="fa fa-weixin-square" style="font-size: 26px;"></i> </a> <a href="http://www.reddit.com/submit?url=https://www.mdpi.com/1817976" title="Reddit" target="_blank" rel="noopener noreferrer"> <i class="fa fa-reddit-square" style="font-size: 30px;"></i> </a> <a href="http://www.mendeley.com/import/?url=https://www.mdpi.com/1817976" title="Mendeley" target="_blank" rel="noopener noreferrer"> <i class="fa fa-mendeley-square" style="font-size: 30px;"></i> </a> </div> <div class="in-tab" style="padding-top: 0px!important; margin-top: 15px;"> <div><b>MDPI and ACS Style</b></div> <p> Shafiq, M.; Gu, Z. Deep Residual Learning for Image Recognition: A Survey. <em>Appl. Sci.</em> <b>2022</b>, <em>12</em>, 8972. https://doi.org/10.3390/app12188972 </p> <div style="display: block"> <b>AMA Style</b><br> <p> Shafiq M, Gu Z. Deep Residual Learning for Image Recognition: A Survey. <em>Applied Sciences</em>. 2022; 12(18):8972. https://doi.org/10.3390/app12188972 </p> <b>Chicago/Turabian Style</b><br> <p> Shafiq, Muhammad, and Zhaoquan Gu. 2022. "Deep Residual Learning for Image Recognition: A Survey" <em>Applied Sciences</em> 12, no. 18: 8972. https://doi.org/10.3390/app12188972 </p> <b>APA Style</b><br> <p> Shafiq, M., & Gu, Z. (2022). Deep Residual Learning for Image Recognition: A Survey. <em>Applied Sciences</em>, <em>12</em>(18), 8972. https://doi.org/10.3390/app12188972 </p> </div> </div> <div class="info-box no-margin"> Note that from the first issue of 2016, this journal uses article numbers instead of page numbers. See further details <a target="_blank" href="https://www.mdpi.com/about/announcements/784">here</a>. </div> <h2><a name="metrics"></a>Article Metrics</h2> <div class="row"> <div class="small-12 columns"> <div id="loaded_cite_count" style="display:none">No</div> <div id="framed_div_cited_count" class="in-tab" style="display: none; overflow: auto;"></div> <div id="loaded" style="display:none">No</div> <div id="framed_div" class="in-tab" style="display: none; margin-top: 10px;"></div> </div> <div class="small-12 columns"> <div id="article_stats_div" style="display: none; margin-bottom: 1em;"> <h3>Article Access Statistics</h3> <div id="article_stats_swf" ></div> For more information on the journal statistics, click <a href="/journal/applsci/stats">here</a>. <div class="info-box"> Multiple requests from the same IP address are counted as one view. </div> </div> </div> </div> </div> </div> </article> </div> </div></div> <div class="webpymol-controls webpymol-controls-template" style="margin-top: 10px; display: none;"> <a class="bzoom">Zoom</a> <span style="display: inline-block; margin-left: 5px; margin-right: 5px;">|</span> <a class="borient"> Orient </a> <span style="display: inline-block; margin-left: 5px; margin-right: 5px;">|</span> <a class="blines"> As Lines </a> <span style="display: inline-block; margin-left: 5px; margin-right: 5px;">|</span> <a class="bsticks"> As Sticks </a> <span style="display: inline-block; margin-left: 5px; margin-right: 5px;">|</span> <a class="bcartoon"> As Cartoon </a> <span style="display: inline-block; margin-left: 5px; margin-right: 5px;">|</span> <a class="bsurface"> As Surface </a> <span style="display: inline-block; margin-left: 5px; margin-right: 5px;">|</span> <a class="bprevscene">Previous Scene</a> <span style="display: inline-block; margin-left: 5px; margin-right: 5px;">|</span> <a class="bnextscene">Next Scene</a> </div> <div id="scifeed-modal" class="reveal-modal reveal-modal-new" data-reveal aria-labelledby="modalTitle" aria-hidden="true" role="dialog"> </div> <div id="recommended-articles-modal" class="reveal-modal reveal-modal-new" data-reveal aria-labelledby="modalTitle" aria-hidden="true" role="dialog"> </div> <div id="author-biographies-modal" class="reveal-modal reveal-modal-new" data-reveal aria-labelledby="modalTitle" aria-hidden="true" role="dialog"> </div> <div id="cite-modal" class="reveal-modal reveal-modal-new" data-reveal aria-labelledby="Captcha" aria-hidden="true" role="dialog"> <div class="row"> <div class="small-12 columns"> <h2 style="margin: 0;">Cite</h2> </div> <div class="small-12 columns"> <!-- BibTeX --> <form style="margin:0; padding:0; display:inline;" name="export-bibtex" method="POST" action="/export"> <input type="hidden" name="articles_ids[]" value="908988"> <input type="hidden" name="export_format_top" value="bibtex"> <input type="hidden" name="export_submit_top" value=""> </form> <!-- EndNote --> <form style="margin:0; padding:0; display:inline;" name="export-endnote" method="POST" action="/export"> <input type="hidden" name="articles_ids[]" value="908988"> <input type="hidden" name="export_format_top" value="endnote_no_abstract"> <input type="hidden" name="export_submit_top" value=""> </form> <!-- RIS --> <form style="margin:0; padding:0; display:inline;" name="export-ris" method="POST" action="/export"> <input type="hidden" name="articles_ids[]" value="908988"> <input type="hidden" name="export_format_top" value="ris"> <input type="hidden" name="export_submit_top" value=""> </form> <div> Export citation file: <a href="javascript:window.document.forms['export-bibtex'].submit()">BibTeX</a> | <a href="javascript:window.document.forms['export-endnote'].submit()">EndNote</a> | <a href="javascript:window.document.forms['export-ris'].submit()">RIS</a> </div> </div> <div class="small-12 columns"> <div class="in-tab"> <div><b>MDPI and ACS Style</b></div> <p> Shafiq, M.; Gu, Z. Deep Residual Learning for Image Recognition: A Survey. <em>Appl. Sci.</em> <b>2022</b>, <em>12</em>, 8972. https://doi.org/10.3390/app12188972 </p> <div style="display: block"> <b>AMA Style</b><br> <p> Shafiq M, Gu Z. Deep Residual Learning for Image Recognition: A Survey. <em>Applied Sciences</em>. 2022; 12(18):8972. https://doi.org/10.3390/app12188972 </p> <b>Chicago/Turabian Style</b><br> <p> Shafiq, Muhammad, and Zhaoquan Gu. 2022. "Deep Residual Learning for Image Recognition: A Survey" <em>Applied Sciences</em> 12, no. 18: 8972. https://doi.org/10.3390/app12188972 </p> <b>APA Style</b><br> <p> Shafiq, M., & Gu, Z. (2022). Deep Residual Learning for Image Recognition: A Survey. <em>Applied Sciences</em>, <em>12</em>(18), 8972. https://doi.org/10.3390/app12188972 </p> </div> </div> <div class="info-box no-margin"> Note that from the first issue of 2016, this journal uses article numbers instead of page numbers. See further details <a target="_blank" href="https://www.mdpi.com/about/announcements/784">here</a>. </div> </div> </div> <a class="close-reveal-modal" aria-label="Close"> <i class="material-icons">clear</i> </a> </div> </div> </div> </div> </div> </section> <div id="footer"> <div class="journal-info"> <span> <em><a class="Var_JournalInfo" href="/journal/applsci">Appl. Sci.</a></em>, EISSN 2076-3417, Published by MDPI </span> <div class="large-right"> <span> <a href="/rss/journal/applsci" class="rss-link">RSS</a> </span> <span> <a href="/journal/applsci/toc-alert">Content Alert</a> </span> </div> </div> <div class="row full-width footer-links" data-equalizer="footer" data-equalizer-mq="small"> <div class="large-2 large-push-4 medium-3 small-6 columns" data-equalizer-watch="footer"> <h3> Further Information </h3> <a href="/apc"> Article Processing Charges </a> <a href="/about/payment"> Pay an Invoice </a> <a href="/openaccess"> Open Access Policy </a> <a href="/about/contact"> Contact MDPI </a> <a href="https://careers.mdpi.com" target="_blank" rel="noopener noreferrer"> Jobs at MDPI </a> </div> <div class="large-2 large-push-4 medium-3 small-6 columns" data-equalizer-watch="footer"> <h3> Guidelines </h3> <a href="/authors"> For Authors </a> <a href="/reviewers"> For Reviewers </a> <a href="/editors"> For Editors </a> <a href="/librarians"> For Librarians </a> <a href="/publishing_services"> For Publishers </a> <a href="/societies"> For Societies </a> <a href="/conference_organizers"> For Conference Organizers </a> </div> <div class="large-2 large-push-4 medium-3 small-6 columns"> <h3> MDPI Initiatives </h3> <a href="https://sciforum.net" target="_blank" rel="noopener noreferrer"> Sciforum </a> <a href="https://www.mdpi.com/books" target="_blank" rel="noopener noreferrer"> MDPI Books </a> <a href="https://www.preprints.org" target="_blank" rel="noopener noreferrer"> Preprints.org </a> <a href="https://www.scilit.net" target="_blank" rel="noopener noreferrer"> Scilit </a> <a href="https://sciprofiles.com?utm_source=mpdi.com&utm_medium=bottom_menu&utm_campaign=initiative" target="_blank" rel="noopener noreferrer"> SciProfiles </a> <a href="https://encyclopedia.pub" target="_blank" rel="noopener noreferrer"> Encyclopedia </a> <a href="https://jams.pub" target="_blank" rel="noopener noreferrer"> JAMS </a> <a href="/about/proceedings"> Proceedings Series </a> </div> <div class="large-2 large-push-4 medium-3 small-6 right-border-large-without columns UA_FooterFollowMDPI"> <h3> Follow MDPI </h3> <a href="https://www.linkedin.com/company/mdpi" target="_blank" rel="noopener noreferrer"> LinkedIn </a> <a href="https://www.facebook.com/MDPIOpenAccessPublishing" target="_blank" rel="noopener noreferrer"> Facebook </a> <a href="https://twitter.com/MDPIOpenAccess" target="_blank" rel="noopener noreferrer"> Twitter </a> </div> <div id="footer-subscribe" class="large-4 large-pull-8 medium-12 small-12 left-border-large columns"> <div class="footer-subscribe__container"> <img class="show-for-large-up" src="https://pub.mdpi-res.com/img/design/mdpi-pub-logo-white-small.png?71d18e5f805839ab?1732286508" alt="MDPI" title="MDPI Open Access Journals" style="height: 50px; margin-bottom: 10px;"> <form id="newsletter" method="POST" action="/subscribe"> <p> Subscribe to receive issue release notifications and newsletters from MDPI journals </p> <select multiple id="newsletter-journal" class="foundation-select" name="journals[]"> <option value="acoustics">Acoustics</option> <option value="amh">Acta Microbiologica Hellenica</option> <option value="actuators">Actuators</option> <option value="admsci">Administrative Sciences</option> <option value="adolescents">Adolescents</option> <option value="arm">Advances in Respiratory Medicine</option> <option value="aerobiology">Aerobiology</option> <option value="aerospace">Aerospace</option> <option value="agriculture">Agriculture</option> <option value="agriengineering">AgriEngineering</option> <option value="agrochemicals">Agrochemicals</option> <option value="agronomy">Agronomy</option> <option value="ai">AI</option> <option value="air">Air</option> <option value="algorithms">Algorithms</option> <option value="allergies">Allergies</option> <option value="alloys">Alloys</option> <option value="analytica">Analytica</option> <option value="analytics">Analytics</option> <option value="anatomia">Anatomia</option> <option value="anesthres">Anesthesia Research</option> <option value="animals">Animals</option> <option value="antibiotics">Antibiotics</option> <option value="antibodies">Antibodies</option> <option value="antioxidants">Antioxidants</option> <option value="applbiosci">Applied Biosciences</option> <option value="applmech">Applied Mechanics</option> <option value="applmicrobiol">Applied Microbiology</option> <option value="applnano">Applied Nano</option> <option value="applsci">Applied Sciences</option> <option value="asi">Applied System Innovation</option> <option value="appliedchem">AppliedChem</option> <option value="appliedmath">AppliedMath</option> <option value="aquacj">Aquaculture Journal</option> <option value="architecture">Architecture</option> <option value="arthropoda">Arthropoda</option> <option value="arts">Arts</option> <option value="astronomy">Astronomy</option> <option value="atmosphere">Atmosphere</option> <option value="atoms">Atoms</option> <option value="audiolres">Audiology Research</option> <option value="automation">Automation</option> <option value="axioms">Axioms</option> <option value="bacteria">Bacteria</option> <option value="batteries">Batteries</option> <option value="behavsci">Behavioral Sciences</option> <option value="beverages">Beverages</option> <option value="BDCC">Big Data and Cognitive Computing</option> <option value="biochem">BioChem</option> <option value="bioengineering">Bioengineering</option> <option value="biologics">Biologics</option> <option value="biology">Biology</option> <option value="blsf">Biology and Life Sciences Forum</option> <option value="biomass">Biomass</option> <option value="biomechanics">Biomechanics</option> <option value="biomed">BioMed</option> <option value="biomedicines">Biomedicines</option> <option value="biomedinformatics">BioMedInformatics</option> <option value="biomimetics">Biomimetics</option> <option value="biomolecules">Biomolecules</option> <option value="biophysica">Biophysica</option> <option value="biosensors">Biosensors</option> <option value="biotech">BioTech</option> <option value="birds">Birds</option> <option value="blockchains">Blockchains</option> <option value="brainsci">Brain Sciences</option> <option value="buildings">Buildings</option> <option value="businesses">Businesses</option> <option value="carbon">C</option> <option value="cancers">Cancers</option> <option value="cardiogenetics">Cardiogenetics</option> <option value="catalysts">Catalysts</option> <option value="cells">Cells</option> <option value="ceramics">Ceramics</option> <option value="challenges">Challenges</option> <option value="ChemEngineering">ChemEngineering</option> <option value="chemistry">Chemistry</option> <option value="chemproc">Chemistry Proceedings</option> <option value="chemosensors">Chemosensors</option> <option value="children">Children</option> <option value="chips">Chips</option> <option value="civileng">CivilEng</option> <option value="cleantechnol">Clean Technologies</option> <option value="climate">Climate</option> <option value="ctn">Clinical and Translational Neuroscience</option> <option value="clinbioenerg">Clinical Bioenergetics</option> <option value="clinpract">Clinics and Practice</option> <option value="clockssleep">Clocks &amp; Sleep</option> <option value="coasts">Coasts</option> <option value="coatings">Coatings</option> <option value="colloids">Colloids and Interfaces</option> <option value="colorants">Colorants</option> <option value="commodities">Commodities</option> <option value="complications">Complications</option> <option value="compounds">Compounds</option> <option value="computation">Computation</option> <option value="csmf">Computer Sciences &amp; Mathematics Forum</option> <option value="computers">Computers</option> <option value="condensedmatter">Condensed Matter</option> <option value="conservation">Conservation</option> <option value="constrmater">Construction Materials</option> <option value="cmd">Corrosion and Materials Degradation</option> <option value="cosmetics">Cosmetics</option> <option value="covid">COVID</option> <option value="crops">Crops</option> <option value="cryo">Cryo</option> <option value="cryptography">Cryptography</option> <option value="crystals">Crystals</option> <option value="cimb">Current Issues in Molecular Biology</option> <option value="curroncol">Current Oncology</option> <option value="dairy">Dairy</option> <option value="data">Data</option> <option value="dentistry">Dentistry Journal</option> <option value="dermato">Dermato</option> <option value="dermatopathology">Dermatopathology</option> <option value="designs">Designs</option> <option value="diabetology">Diabetology</option> <option value="diagnostics">Diagnostics</option> <option value="dietetics">Dietetics</option> <option value="digital">Digital</option> <option value="disabilities">Disabilities</option> <option value="diseases">Diseases</option> <option value="diversity">Diversity</option> <option value="dna">DNA</option> <option value="drones">Drones</option> <option value="ddc">Drugs and Drug Candidates</option> <option value="dynamics">Dynamics</option> <option value="earth">Earth</option> <option value="ecologies">Ecologies</option> <option value="econometrics">Econometrics</option> <option value="economies">Economies</option> <option value="education">Education Sciences</option> <option value="electricity">Electricity</option> <option value="electrochem">Electrochem</option> <option value="electronicmat">Electronic Materials</option> <option value="electronics">Electronics</option> <option value="ecm">Emergency Care and Medicine</option> <option value="encyclopedia">Encyclopedia</option> <option value="endocrines">Endocrines</option> <option value="energies">Energies</option> <option value="esa">Energy Storage and Applications</option> <option value="eng">Eng</option> <option value="engproc">Engineering Proceedings</option> <option value="entropy">Entropy</option> <option value="environsciproc">Environmental Sciences Proceedings</option> <option value="environments">Environments</option> <option value="epidemiologia">Epidemiologia</option> <option value="epigenomes">Epigenomes</option> <option value="ebj">European Burn Journal</option> <option value="ejihpe">European Journal of Investigation in Health, Psychology and Education</option> <option value="fermentation">Fermentation</option> <option value="fibers">Fibers</option> <option value="fintech">FinTech</option> <option value="fire">Fire</option> <option value="fishes">Fishes</option> <option value="fluids">Fluids</option> <option value="foods">Foods</option> <option value="forecasting">Forecasting</option> <option value="forensicsci">Forensic Sciences</option> <option value="forests">Forests</option> <option value="fossstud">Fossil Studies</option> <option value="foundations">Foundations</option> <option value="fractalfract">Fractal and Fractional</option> <option value="fuels">Fuels</option> <option value="future">Future</option> <option value="futureinternet">Future Internet</option> <option value="futurepharmacol">Future Pharmacology</option> <option value="futuretransp">Future Transportation</option> <option value="galaxies">Galaxies</option> <option value="games">Games</option> <option value="gases">Gases</option> <option value="gastroent">Gastroenterology Insights</option> <option value="gastrointestdisord">Gastrointestinal Disorders</option> <option value="gastronomy">Gastronomy</option> <option value="gels">Gels</option> <option value="genealogy">Genealogy</option> <option value="genes">Genes</option> <option value="geographies">Geographies</option> <option value="geohazards">GeoHazards</option> <option value="geomatics">Geomatics</option> <option value="geometry">Geometry</option> <option value="geosciences">Geosciences</option> <option value="geotechnics">Geotechnics</option> <option value="geriatrics">Geriatrics</option> <option value="glacies">Glacies</option> <option value="gucdd">Gout, Urate, and Crystal Deposition Disease</option> <option value="grasses">Grasses</option> <option value="hardware">Hardware</option> <option value="healthcare">Healthcare</option> <option value="hearts">Hearts</option> <option value="hemato">Hemato</option> <option value="hematolrep">Hematology Reports</option> <option value="heritage">Heritage</option> <option value="histories">Histories</option> <option value="horticulturae">Horticulturae</option> <option value="hospitals">Hospitals</option> <option value="humanities">Humanities</option> <option value="humans">Humans</option> <option value="hydrobiology">Hydrobiology</option> <option value="hydrogen">Hydrogen</option> <option value="hydrology">Hydrology</option> <option value="hygiene">Hygiene</option> <option value="immuno">Immuno</option> <option value="idr">Infectious Disease Reports</option> <option value="informatics">Informatics</option> <option value="information">Information</option> <option value="infrastructures">Infrastructures</option> <option value="inorganics">Inorganics</option> <option value="insects">Insects</option> <option value="instruments">Instruments</option> <option value="iic">Intelligent Infrastructure and Construction</option> <option value="ijerph">International Journal of Environmental Research and Public Health</option> <option value="ijfs">International Journal of Financial Studies</option> <option value="ijms">International Journal of Molecular Sciences</option> <option value="IJNS">International Journal of Neonatal Screening</option> <option value="ijpb">International Journal of Plant Biology</option> <option value="ijt">International Journal of Topology</option> <option value="ijtm">International Journal of Translational Medicine</option> <option value="ijtpp">International Journal of Turbomachinery, Propulsion and Power</option> <option value="ime">International Medical Education</option> <option value="inventions">Inventions</option> <option value="IoT">IoT</option> <option value="ijgi">ISPRS International Journal of Geo-Information</option> <option value="J">J</option> <option value="jal">Journal of Ageing and Longevity</option> <option value="jcdd">Journal of Cardiovascular Development and Disease</option> <option value="jcto">Journal of Clinical &amp; Translational Ophthalmology</option> <option value="jcm">Journal of Clinical Medicine</option> <option value="jcs">Journal of Composites Science</option> <option value="jcp">Journal of Cybersecurity and Privacy</option> <option value="jdad">Journal of Dementia and Alzheimer&#039;s Disease</option> <option value="jdb">Journal of Developmental Biology</option> <option value="jeta">Journal of Experimental and Theoretical Analyses</option> <option value="jfb">Journal of Functional Biomaterials</option> <option value="jfmk">Journal of Functional Morphology and Kinesiology</option> <option value="jof">Journal of Fungi</option> <option value="jimaging">Journal of Imaging</option> <option value="jintelligence">Journal of Intelligence</option> <option value="jlpea">Journal of Low Power Electronics and Applications</option> <option value="jmmp">Journal of Manufacturing and Materials Processing</option> <option value="jmse">Journal of Marine Science and Engineering</option> <option value="jmahp">Journal of Market Access &amp; Health Policy</option> <option value="jmp">Journal of Molecular Pathology</option> <option value="jnt">Journal of Nanotheranostics</option> <option value="jne">Journal of Nuclear Engineering</option> <option value="ohbm">Journal of Otorhinolaryngology, Hearing and Balance Medicine</option> <option value="jop">Journal of Parks</option> <option value="jpm">Journal of Personalized Medicine</option> <option value="jpbi">Journal of Pharmaceutical and BioTech Industry</option> <option value="jor">Journal of Respiration</option> <option value="jrfm">Journal of Risk and Financial Management</option> <option value="jsan">Journal of Sensor and Actuator Networks</option> <option value="joma">Journal of the Oman Medical Association</option> <option value="jtaer">Journal of Theoretical and Applied Electronic Commerce Research</option> <option value="jvd">Journal of Vascular Diseases</option> <option value="jox">Journal of Xenobiotics</option> <option value="jzbg">Journal of Zoological and Botanical Gardens</option> <option value="journalmedia">Journalism and Media</option> <option value="kidneydial">Kidney and Dialysis</option> <option value="kinasesphosphatases">Kinases and Phosphatases</option> <option value="knowledge">Knowledge</option> <option value="labmed">LabMed</option> <option value="laboratories">Laboratories</option> <option value="land">Land</option> <option value="languages">Languages</option> <option value="laws">Laws</option> <option value="life">Life</option> <option value="limnolrev">Limnological Review</option> <option value="lipidology">Lipidology</option> <option value="liquids">Liquids</option> <option value="literature">Literature</option> <option value="livers">Livers</option> <option value="logics">Logics</option> <option value="logistics">Logistics</option> <option value="lubricants">Lubricants</option> <option value="lymphatics">Lymphatics</option> <option value="make">Machine Learning and Knowledge Extraction</option> <option value="machines">Machines</option> <option value="macromol">Macromol</option> <option value="magnetism">Magnetism</option> <option value="magnetochemistry">Magnetochemistry</option> <option value="marinedrugs">Marine Drugs</option> <option value="materials">Materials</option> <option value="materproc">Materials Proceedings</option> <option value="mca">Mathematical and Computational Applications</option> <option value="mathematics">Mathematics</option> <option value="medsci">Medical Sciences</option> <option value="msf">Medical Sciences Forum</option> <option value="medicina">Medicina</option> <option value="medicines">Medicines</option> <option value="membranes">Membranes</option> <option value="merits">Merits</option> <option value="metabolites">Metabolites</option> <option value="metals">Metals</option> <option value="meteorology">Meteorology</option> <option value="methane">Methane</option> <option value="mps">Methods and Protocols</option> <option value="metrics">Metrics</option> <option value="metrology">Metrology</option> <option value="micro">Micro</option> <option value="microbiolres">Microbiology Research</option> <option value="micromachines">Micromachines</option> <option value="microorganisms">Microorganisms</option> <option value="microplastics">Microplastics</option> <option value="minerals">Minerals</option> <option value="mining">Mining</option> <option value="modelling">Modelling</option> <option value="mmphys">Modern Mathematical Physics</option> <option value="molbank">Molbank</option> <option value="molecules">Molecules</option> <option value="mti">Multimodal Technologies and Interaction</option> <option value="muscles">Muscles</option> <option value="nanoenergyadv">Nanoenergy Advances</option> <option value="nanomanufacturing">Nanomanufacturing</option> <option value="nanomaterials">Nanomaterials</option> <option value="ndt">NDT</option> <option value="network">Network</option> <option value="neuroglia">Neuroglia</option> <option value="neurolint">Neurology International</option> <option value="neurosci">NeuroSci</option> <option value="nitrogen">Nitrogen</option> <option value="ncrna">Non-Coding RNA</option> <option value="nursrep">Nursing Reports</option> <option value="nutraceuticals">Nutraceuticals</option> <option value="nutrients">Nutrients</option> <option value="obesities">Obesities</option> <option value="oceans">Oceans</option> <option value="onco">Onco</option> <option value="optics">Optics</option> <option value="oral">Oral</option> <option value="organics">Organics</option> <option value="organoids">Organoids</option> <option value="osteology">Osteology</option> <option value="oxygen">Oxygen</option> <option value="parasitologia">Parasitologia</option> <option value="particles">Particles</option> <option value="pathogens">Pathogens</option> <option value="pathophysiology">Pathophysiology</option> <option value="pediatrrep">Pediatric Reports</option> <option value="pets">Pets</option> <option value="pharmaceuticals">Pharmaceuticals</option> <option value="pharmaceutics">Pharmaceutics</option> <option value="pharmacoepidemiology">Pharmacoepidemiology</option> <option value="pharmacy">Pharmacy</option> <option value="philosophies">Philosophies</option> <option value="photochem">Photochem</option> <option value="photonics">Photonics</option> <option value="phycology">Phycology</option> <option value="physchem">Physchem</option> <option value="psf">Physical Sciences Forum</option> <option value="physics">Physics</option> <option value="physiologia">Physiologia</option> <option value="plants">Plants</option> <option value="plasma">Plasma</option> <option value="platforms">Platforms</option> <option value="pollutants">Pollutants</option> <option value="polymers">Polymers</option> <option value="polysaccharides">Polysaccharides</option> <option value="populations">Populations</option> <option value="poultry">Poultry</option> <option value="powders">Powders</option> <option value="proceedings">Proceedings</option> <option value="processes">Processes</option> <option value="prosthesis">Prosthesis</option> <option value="proteomes">Proteomes</option> <option value="psychiatryint">Psychiatry International</option> <option value="psychoactives">Psychoactives</option> <option value="psycholint">Psychology International</option> <option value="publications">Publications</option> <option value="qubs">Quantum Beam Science</option> <option value="quantumrep">Quantum Reports</option> <option value="quaternary">Quaternary</option> <option value="radiation">Radiation</option> <option value="reactions">Reactions</option> <option value="realestate">Real Estate</option> <option value="receptors">Receptors</option> <option value="recycling">Recycling</option> <option value="rsee">Regional Science and Environmental Economics</option> <option value="religions">Religions</option> <option value="remotesensing">Remote Sensing</option> <option value="reports">Reports</option> <option value="reprodmed">Reproductive Medicine</option> <option value="resources">Resources</option> <option value="rheumato">Rheumato</option> <option value="risks">Risks</option> <option value="robotics">Robotics</option> <option value="ruminants">Ruminants</option> <option value="safety">Safety</option> <option value="sci">Sci</option> <option value="scipharm">Scientia Pharmaceutica</option> <option value="sclerosis">Sclerosis</option> <option value="seeds">Seeds</option> <option value="sensors">Sensors</option> <option value="separations">Separations</option> <option value="sexes">Sexes</option> <option value="signals">Signals</option> <option value="sinusitis">Sinusitis</option> <option value="smartcities">Smart Cities</option> <option value="socsci">Social Sciences</option> <option value="siuj">Société Internationale d’Urologie Journal</option> <option value="societies">Societies</option> <option value="software">Software</option> <option value="soilsystems">Soil Systems</option> <option value="solar">Solar</option> <option value="solids">Solids</option> <option value="spectroscj">Spectroscopy Journal</option> <option value="sports">Sports</option> <option value="standards">Standards</option> <option value="stats">Stats</option> <option value="stresses">Stresses</option> <option value="surfaces">Surfaces</option> <option value="surgeries">Surgeries</option> <option value="std">Surgical Techniques Development</option> <option value="sustainability">Sustainability</option> <option value="suschem">Sustainable Chemistry</option> <option value="symmetry">Symmetry</option> <option value="synbio">SynBio</option> <option value="systems">Systems</option> <option value="targets">Targets</option> <option value="taxonomy">Taxonomy</option> <option value="technologies">Technologies</option> <option value="telecom">Telecom</option> <option value="textiles">Textiles</option> <option value="thalassrep">Thalassemia Reports</option> <option value="therapeutics">Therapeutics</option> <option value="thermo">Thermo</option> <option value="timespace">Time and Space</option> <option value="tomography">Tomography</option> <option value="tourismhosp">Tourism and Hospitality</option> <option value="toxics">Toxics</option> <option value="toxins">Toxins</option> <option value="transplantology">Transplantology</option> <option value="traumacare">Trauma Care</option> <option value="higheredu">Trends in Higher Education</option> <option value="tropicalmed">Tropical Medicine and Infectious Disease</option> <option value="universe">Universe</option> <option value="urbansci">Urban Science</option> <option value="uro">Uro</option> <option value="vaccines">Vaccines</option> <option value="vehicles">Vehicles</option> <option value="venereology">Venereology</option> <option value="vetsci">Veterinary Sciences</option> <option value="vibration">Vibration</option> <option value="virtualworlds">Virtual Worlds</option> <option value="viruses">Viruses</option> <option value="vision">Vision</option> <option value="waste">Waste</option> <option value="water">Water</option> <option value="wild">Wild</option> <option value="wind">Wind</option> <option value="women">Women</option> <option value="world">World</option> <option value="wevj">World Electric Vehicle Journal</option> <option value="youth">Youth</option> <option value="zoonoticdis">Zoonotic Diseases</option> </select> <input name="email" type="email" placeholder="Enter your email address..." required="required" /> <button class="genericCaptcha button button--dark UA_FooterNewsletterSubscribeButton" type="submit">Subscribe</button> </form> </div> </div> </div> <div id="footer-copyright"> <div class="row"> <div class="columns large-6 medium-6 small-12 text-left"> © 1996-2024 MDPI (Basel, Switzerland) unless otherwise stated </div> <div class="columns large-6 medium-6 small-12 small-text-left medium-text-right large-text-right"> <a data-dropdown="drop-view-disclaimer" aria-controls="drop-view-disclaimer" aria-expanded="false" data-options="align:top; is_hover:true; hover_timeout:2000;"> Disclaimer </a> <div id="drop-view-disclaimer" class="f-dropdown label__btn__dropdown label__btn__dropdown--wide text-left" data-dropdown-content aria-hidden="true" tabindex="-1"> Disclaimer/Publisher’s Note: The statements, opinions and data contained in all publications are solely those of the individual author(s) and contributor(s) and not of MDPI and/or the editor(s). MDPI and/or the editor(s) disclaim responsibility for any injury to people or property resulting from any ideas, methods, instructions or products referred to in the content. </div> <a href="/about/terms-and-conditions"> Terms and Conditions </a> <a href="/about/privacy"> Privacy Policy </a> </div> </div> </div> </div> <div id="cookie-notification" class="js-allow-cookies" style="display: none;"> <div class="columns large-10 medium-10 small-12"> We use cookies on our website to ensure you get the best experience.<br class="show-for-medium-up"/> Read more about our cookies <a href="/about/privacy">here</a>. </div> <div class="columns large-2 medium-2 small-12 small-only-text-left text-right"> <a class="button button--default" href="/accept_cookies">Accept</a> </div> </div> </div> <div id="main-share-modal" class="reveal-modal reveal-modal-new reveal-modal-new--small" data-reveal aria-labelledby="modalTitle" aria-hidden="true" role="dialog"> <div class="row"> <div class="small-12 columns"> <h2 style="margin: 0;">Share Link</h2> </div> <div class="small-12 columns"> <div class="social-media-links UA_ShareModalLinks" style="text-align: left;"> <a href="/cdn-cgi/l/email-protection#29160f484459125a5c4b434c4a5d146f5b46440c1b19646d79600c1a680c1b190c1b1b6d4c4c590c1b197b4c5a404d5c48450c1b19654c485b4740474e0c1b194f465b0c1b196044484e4c0c1b197b4c4a464e47405d4046470c1a680c1b19680c1b197a5c5b5f4c500f585c465d120f484459124b464d5014415d5d595a1306065e5e5e07444d5940074a4644061811181e101e1f0c1a680c19680c19686d4c4c590c1b197b4c5a404d5c48450c1b19654c485b4740474e0c1b194f465b0c1b196044484e4c0c1b197b4c4a464e47405d4046470c1a680c1b19680c1b197a5c5b5f4c5023236d4c4c590c1b197b4c5a404d5c48450c1b19674c5d5e465b425a0c1b1941485f4c0c1b195b4c4a4c475d45500c1b194b4c4c470c1b195a41465e470c1b195d460c1b195a404e47404f404a48475d45500c1b194044595b465f4c0c1b195d414c0c1b19594c5b4f465b4448474a4c0c1b19464f0c1b19474c5c5b48450c1b19474c5d5e465b425a0c1b195d5b4840474c4d0c1b1946470c1b196044484e4c674c5d0c1b6a0c1b195e405d410c1b195b4c5a5c455d5a0c1b194b4c485d40474e0c1b194845450c1b19595b4c5f40465c5a0c1b19444c5d41464d5a0c1b1946470c1b195d41405a0c1b194d485d485a4c5d0c1b194b500c1b1945485b4e4c0c1b1944485b4e40475a0c1b1940470c1b195d414c0c1b194044484e4c0c1b194a45485a5a404f404a485d4046470c1b195d485a42070c1b1961465e4c5f4c5b0c1b6a0c1b195d414c0c1b19444c484740474e0c1b19464f0c1b195d414c5a4c0c1b194044595b4c5a5a405f4c0c1b19475c444b4c5b5a0c1b1948474d0c1b195d414c405b0c1b1940445945404a485d4046475a0c1b194f465b0c1b194f5c5d5c5b4c0c1b195b4c5a4c485b4a410c1b19485b4c0c1b1947465d0c1b194f5c4545500c1b195c474d4c5b5a5d46464d0c1b19504c5d070c1b1960470c1b195d41405a0c1b195a5c5b5f4c500c1b6a0c1b195e4c0c1b195e4045450c1b195d5b500c1b195d460c1b194c5159454840470c1b195e41485d0c1b196d4c4c590c1b197b4c5a404d5c48450c1b19674c5d5e465b425a0c1b19485b4c0c1b6a0c1b1941465e0c1b195d414c500c1b19484a41404c5f4c0c1b195d414c405b0c1b194c514a4c45454c475d0c1b195b4c5a5c455d5a0c1b6a0c1b1948474d0c1b195e41500c1b195d414c405b0c1b195a5c4a4a4c5a5a4f5c450c1b19404459454c444c475d485d4046470c1b1940470c1b19595b484a5d404a4c0c1b195b4c595b4c5a4c475d5a0c1b19480c1b195a404e47404f404a48475d0c1b19484d5f48474a4c0c1b19465f4c5b0c1b194c51405a5d40474e0c1b195d4c4a414740585c4c5a070c1b197e4c0c1b1948455a460c1b194d405a4a5c5a5a0c1b195a46444c0c1b1946594c470c1b19585c4c5a5d4046475a0c1b195b4c45485d4c4d0c1b195d460c1b195b4c5a404d5c48450c1b19454c485b4740474e0c1b19485a0c1b195e4c45450c1b19485a0c1b1959465a5a404b454c0c1b1948595945404a485d4046475a0c1b19464f0c1b196d4c4c590c1b197b4c5a404d5c48450c1b19674c5d5e465b425a0c1b194b4c5046474d0c1b196044484e4c674c5d070c1b196f4047484545500c1b6a0c1b195e4c0c1b194d405a4a5c5a5a0c1b195a46444c0c1b19405a5a5c4c5a0c1b195d41485d0c1b195a5d4045450c1b19474c4c4d0c1b195d460c1b194b4c0c1b195b4c5a46455f4c4d0c1b194b4c4f465b4c0c1b194d4c4c590c1b195b4c5a404d5c48450c1b19454c485b4740474e0c1b194a48470c1b194b4c0c1b1948595945404c4d0c1b1946470c1b1944465b4c0c1b194a464459454c510c1b19595b464b454c445a07" title="Email"> <i class="fa fa-envelope-square" style="font-size: 30px;"></i> </a> <a href="https://twitter.com/intent/tweet?text=Deep+Residual+Learning+for+Image+Recognition%3A+A+Survey&amp;hashtags=mdpiapplsci&amp;url=https%3A%2F%2Fwww.mdpi.com%2F1817976&amp;via=Applsci" onclick="windowOpen(this.href,600,800); return false" title="Twitter" target="_blank" rel="noopener noreferrer"> <i class="fa fa-twitter-x-square" style="font-size: 30px;"></i> </a> <a href=" http://www.linkedin.com/shareArticle?mini=true&amp;url=https%3A%2F%2Fwww.mdpi.com%2F1817976&amp;title=Deep%20Residual%20Learning%20for%20Image%20Recognition%3A%20A%20Survey%26source%3Dhttps%3A%2F%2Fwww.mdpi.com%26summary%3DDeep%20Residual%20Networks%20have%20recently%20been%20shown%20to%20significantly%20improve%20the%20performance%20of%20neural%20networks%20trained%20on%20ImageNet%2C%20with%20results%20beating%20all%20previous%20methods%20on%20this%20dataset%20by%20large%20margins%20in%20the%20image%20classification%20task.%20However%2C%20%5B...%5D" onclick="windowOpen(this.href,600,800); return false" title="LinkedIn" target="_blank" rel="noopener noreferrer"> <i class="fa fa-linkedin-square" style="font-size: 30px;"></i> </a> <a href="https://www.facebook.com/sharer.php?u=https://www.mdpi.com/1817976" title="facebook" target="_blank" rel="noopener noreferrer"> <i class="fa fa-facebook-square" style="font-size: 30px;"></i> </a> <a href="javascript:void(0);" title="Wechat" data-reveal-id="weixin-share-modal"> <i class="fa fa-weixin-square" style="font-size: 26px;"></i> </a> <a href="http://www.reddit.com/submit?url=https://www.mdpi.com/1817976" title="Reddit" target="_blank" rel="noopener noreferrer"> <i class="fa fa-reddit-square" style="font-size: 30px;"></i> </a> <a href="http://www.mendeley.com/import/?url=https://www.mdpi.com/1817976" title="Mendeley" target="_blank" rel="noopener noreferrer"> <i class="fa fa-mendeley-square" style="font-size: 30px;"></i> </a> <a href="http://www.citeulike.org/posturl?url=https://www.mdpi.com/1817976" title="CiteULike" target="_blank" rel="noopener noreferrer"> <i class="fa fa-citeulike-square" style="font-size: 30px;"></i> </a> </div> </div> <div class="small-9 columns"> <input id="js-clipboard-text" type="text" readonly value="https://www.mdpi.com/1817976" /> </div> <div class="small-3 columns text-left"> <a class="button button--color js-clipboard-copy" data-clipboard-target="#js-clipboard-text">Copy</a> </div> </div> <a class="close-reveal-modal" aria-label="Close"> <i class="material-icons">clear</i> </a> </div> <div id="weixin-share-modal" class="reveal-modal reveal-modal-new" data-reveal aria-labelledby="weixin-share-modal-title" aria-hidden="true" role="dialog"> <div class="row"> <div class="small-12 columns"> <h2 id="weixin-share-modal-title" style="margin: 0;">Share</h2> </div> <div class="small-12 columns"> <div class="weixin-qr-code-section"> <?xml version="1.0" standalone="no"?> <!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd"> <svg width="300" height="300" version="1.1" xmlns="http://www.w3.org/2000/svg"> <desc>https://www.mdpi.com/1817976</desc> <g id="elements" fill="black" stroke="none"> <rect x="0" y="0" width="12" height="12" /> <rect x="12" y="0" width="12" height="12" /> <rect x="24" y="0" width="12" height="12" /> <rect x="36" y="0" width="12" height="12" /> <rect x="48" y="0" width="12" height="12" /> <rect x="60" y="0" width="12" height="12" /> <rect x="72" y="0" width="12" height="12" /> <rect x="96" y="0" width="12" height="12" /> <rect x="108" y="0" width="12" height="12" /> <rect x="192" y="0" width="12" height="12" /> <rect x="216" y="0" width="12" height="12" /> <rect x="228" y="0" width="12" height="12" /> <rect x="240" y="0" width="12" height="12" /> <rect x="252" y="0" width="12" height="12" /> <rect x="264" y="0" width="12" height="12" /> <rect x="276" y="0" width="12" height="12" /> <rect x="288" y="0" width="12" height="12" /> <rect x="0" y="12" width="12" height="12" /> <rect x="72" y="12" width="12" height="12" /> <rect x="156" y="12" width="12" height="12" /> <rect x="168" y="12" width="12" height="12" /> <rect x="216" y="12" width="12" height="12" /> <rect x="288" y="12" width="12" height="12" /> <rect x="0" y="24" width="12" height="12" /> <rect x="24" y="24" width="12" height="12" /> <rect x="36" y="24" width="12" height="12" /> <rect x="48" y="24" width="12" height="12" /> <rect x="72" y="24" width="12" height="12" /> <rect x="96" y="24" width="12" height="12" /> <rect x="132" y="24" width="12" height="12" /> <rect x="144" y="24" width="12" height="12" /> <rect x="156" y="24" width="12" height="12" /> <rect x="168" y="24" width="12" height="12" /> <rect x="180" y="24" width="12" height="12" /> <rect x="216" y="24" width="12" height="12" /> <rect x="240" y="24" width="12" height="12" /> <rect x="252" y="24" width="12" height="12" /> <rect x="264" y="24" width="12" height="12" /> <rect x="288" y="24" width="12" height="12" /> <rect x="0" y="36" width="12" height="12" /> <rect x="24" y="36" width="12" height="12" /> <rect x="36" y="36" width="12" height="12" /> <rect x="48" y="36" width="12" height="12" /> <rect x="72" y="36" width="12" height="12" /> <rect x="96" y="36" width="12" height="12" /> <rect x="108" y="36" width="12" height="12" /> <rect x="132" y="36" width="12" height="12" /> <rect x="144" y="36" width="12" height="12" /> <rect x="156" y="36" width="12" height="12" /> <rect x="168" y="36" width="12" height="12" /> <rect x="216" y="36" width="12" height="12" /> <rect x="240" y="36" width="12" height="12" /> <rect x="252" y="36" width="12" height="12" /> <rect x="264" y="36" width="12" height="12" /> <rect x="288" y="36" width="12" height="12" /> <rect x="0" y="48" width="12" height="12" /> <rect x="24" y="48" width="12" height="12" /> <rect x="36" y="48" width="12" height="12" /> <rect x="48" y="48" width="12" height="12" /> <rect x="72" y="48" width="12" height="12" /> <rect x="96" y="48" width="12" height="12" /> <rect x="108" y="48" width="12" height="12" /> <rect x="120" y="48" width="12" height="12" /> <rect x="132" y="48" width="12" height="12" /> <rect x="156" y="48" width="12" height="12" /> <rect x="168" y="48" width="12" height="12" /> <rect x="180" y="48" width="12" height="12" /> <rect x="216" y="48" width="12" height="12" /> <rect x="240" y="48" width="12" height="12" /> <rect x="252" y="48" width="12" height="12" /> <rect x="264" y="48" width="12" height="12" /> <rect x="288" y="48" width="12" height="12" /> <rect x="0" y="60" width="12" height="12" /> <rect x="72" y="60" width="12" height="12" /> <rect x="108" y="60" width="12" height="12" /> <rect x="168" y="60" width="12" height="12" /> <rect x="216" y="60" width="12" height="12" /> <rect x="288" y="60" width="12" height="12" /> <rect x="0" y="72" width="12" height="12" /> <rect x="12" y="72" width="12" height="12" /> <rect x="24" y="72" width="12" height="12" /> <rect x="36" y="72" width="12" height="12" /> <rect x="48" y="72" width="12" height="12" /> <rect x="60" y="72" width="12" height="12" /> <rect x="72" y="72" width="12" height="12" /> <rect x="96" y="72" width="12" height="12" /> <rect x="120" y="72" width="12" height="12" /> <rect x="144" y="72" width="12" height="12" /> <rect x="168" y="72" width="12" height="12" /> <rect x="192" y="72" width="12" height="12" /> <rect x="216" y="72" width="12" height="12" /> <rect x="228" y="72" width="12" height="12" /> <rect x="240" y="72" width="12" height="12" /> <rect x="252" y="72" width="12" height="12" /> <rect x="264" y="72" width="12" height="12" /> <rect x="276" y="72" width="12" height="12" /> <rect x="288" y="72" width="12" height="12" /> <rect x="108" y="84" width="12" height="12" /> <rect x="132" y="84" width="12" height="12" /> <rect x="0" y="96" width="12" height="12" /> <rect x="12" y="96" width="12" height="12" /> <rect x="24" y="96" width="12" height="12" /> <rect x="36" y="96" width="12" height="12" /> <rect x="72" y="96" width="12" height="12" /> <rect x="96" y="96" width="12" height="12" /> <rect x="168" y="96" width="12" height="12" /> <rect x="180" y="96" width="12" height="12" /> <rect x="192" y="96" width="12" height="12" /> <rect x="204" y="96" width="12" height="12" /> <rect x="240" y="96" width="12" height="12" /> <rect x="252" y="96" width="12" height="12" /> <rect x="264" y="96" width="12" height="12" /> <rect x="288" y="96" width="12" height="12" /> <rect x="0" y="108" width="12" height="12" /> <rect x="24" y="108" width="12" height="12" /> <rect x="84" y="108" width="12" height="12" /> <rect x="108" y="108" width="12" height="12" /> <rect x="144" y="108" width="12" height="12" /> <rect x="156" y="108" width="12" height="12" /> <rect x="192" y="108" width="12" height="12" /> <rect x="204" y="108" width="12" height="12" /> <rect x="228" y="108" width="12" height="12" /> <rect x="276" y="108" width="12" height="12" /> <rect x="0" y="120" width="12" height="12" /> <rect x="12" y="120" width="12" height="12" /> <rect x="48" y="120" width="12" height="12" /> <rect x="60" y="120" width="12" height="12" /> <rect x="72" y="120" width="12" height="12" /> <rect x="96" y="120" width="12" height="12" /> <rect x="228" y="120" width="12" height="12" /> <rect x="0" y="132" width="12" height="12" /> <rect x="12" y="132" width="12" height="12" /> <rect x="24" y="132" width="12" height="12" /> <rect x="48" y="132" width="12" height="12" /> <rect x="60" y="132" width="12" height="12" /> <rect x="84" y="132" width="12" height="12" /> <rect x="96" y="132" width="12" height="12" /> <rect x="156" y="132" width="12" height="12" /> <rect x="168" y="132" width="12" height="12" /> <rect x="192" y="132" width="12" height="12" /> <rect x="204" y="132" width="12" height="12" /> <rect x="216" y="132" width="12" height="12" /> <rect x="252" y="132" width="12" height="12" /> <rect x="264" y="132" width="12" height="12" /> <rect x="0" y="144" width="12" height="12" /> <rect x="24" y="144" width="12" height="12" /> <rect x="36" y="144" width="12" height="12" /> <rect x="72" y="144" width="12" height="12" /> <rect x="84" y="144" width="12" height="12" /> <rect x="108" y="144" width="12" height="12" /> <rect x="144" y="144" width="12" height="12" /> <rect x="156" y="144" width="12" height="12" /> <rect x="180" y="144" width="12" height="12" /> <rect x="192" y="144" width="12" height="12" /> <rect x="204" y="144" width="12" height="12" /> <rect x="216" y="144" width="12" height="12" /> <rect x="228" y="144" width="12" height="12" /> <rect x="240" y="144" width="12" height="12" /> <rect x="264" y="144" width="12" height="12" /> <rect x="276" y="144" width="12" height="12" /> <rect x="288" y="144" width="12" height="12" /> <rect x="60" y="156" width="12" height="12" /> <rect x="84" y="156" width="12" height="12" /> <rect x="120" y="156" width="12" height="12" /> <rect x="168" y="156" width="12" height="12" /> <rect x="180" y="156" width="12" height="12" /> <rect x="192" y="156" width="12" height="12" /> <rect x="216" y="156" width="12" height="12" /> <rect x="228" y="156" width="12" height="12" /> <rect x="240" y="156" width="12" height="12" /> <rect x="288" y="156" width="12" height="12" /> <rect x="12" y="168" width="12" height="12" /> <rect x="24" y="168" width="12" height="12" /> <rect x="36" y="168" width="12" height="12" /> <rect x="72" y="168" width="12" height="12" /> <rect x="84" y="168" width="12" height="12" /> <rect x="108" y="168" width="12" height="12" /> <rect x="132" y="168" width="12" height="12" /> <rect x="156" y="168" width="12" height="12" /> <rect x="192" y="168" width="12" height="12" /> <rect x="204" y="168" width="12" height="12" /> <rect x="240" y="168" width="12" height="12" /> <rect x="264" y="168" width="12" height="12" /> <rect x="276" y="168" width="12" height="12" /> <rect x="0" y="180" width="12" height="12" /> <rect x="48" y="180" width="12" height="12" /> <rect x="60" y="180" width="12" height="12" /> <rect x="84" y="180" width="12" height="12" /> <rect x="108" y="180" width="12" height="12" /> <rect x="120" y="180" width="12" height="12" /> <rect x="144" y="180" width="12" height="12" /> <rect x="180" y="180" width="12" height="12" /> <rect x="192" y="180" width="12" height="12" /> <rect x="204" y="180" width="12" height="12" /> <rect x="228" y="180" width="12" height="12" /> <rect x="240" y="180" width="12" height="12" /> <rect x="288" y="180" width="12" height="12" /> <rect x="36" y="192" width="12" height="12" /> <rect x="72" y="192" width="12" height="12" /> <rect x="96" y="192" width="12" height="12" /> <rect x="132" y="192" width="12" height="12" /> <rect x="144" y="192" width="12" height="12" /> <rect x="192" y="192" width="12" height="12" /> <rect x="204" y="192" width="12" height="12" /> <rect x="216" y="192" width="12" height="12" /> <rect x="228" y="192" width="12" height="12" /> <rect x="240" y="192" width="12" height="12" /> <rect x="252" y="192" width="12" height="12" /> <rect x="264" y="192" width="12" height="12" /> <rect x="276" y="192" width="12" height="12" /> <rect x="288" y="192" width="12" height="12" /> <rect x="96" y="204" width="12" height="12" /> <rect x="120" y="204" width="12" height="12" /> <rect x="156" y="204" width="12" height="12" /> <rect x="192" y="204" width="12" height="12" /> <rect x="240" y="204" width="12" height="12" /> <rect x="264" y="204" width="12" height="12" /> <rect x="288" y="204" width="12" height="12" /> <rect x="0" y="216" width="12" height="12" /> <rect x="12" y="216" width="12" height="12" /> <rect x="24" y="216" width="12" height="12" /> <rect x="36" y="216" width="12" height="12" /> <rect x="48" y="216" width="12" height="12" /> <rect x="60" y="216" width="12" height="12" /> <rect x="72" y="216" width="12" height="12" /> <rect x="108" y="216" width="12" height="12" /> <rect x="120" y="216" width="12" height="12" /> <rect x="156" y="216" width="12" height="12" /> <rect x="168" y="216" width="12" height="12" /> <rect x="192" y="216" width="12" height="12" /> <rect x="216" y="216" width="12" height="12" /> <rect x="240" y="216" width="12" height="12" /> <rect x="264" y="216" width="12" height="12" /> <rect x="276" y="216" width="12" height="12" /> <rect x="288" y="216" width="12" height="12" /> <rect x="0" y="228" width="12" height="12" /> <rect x="72" y="228" width="12" height="12" /> <rect x="108" y="228" width="12" height="12" /> <rect x="132" y="228" width="12" height="12" /> <rect x="144" y="228" width="12" height="12" /> <rect x="156" y="228" width="12" height="12" /> <rect x="180" y="228" width="12" height="12" /> <rect x="192" y="228" width="12" height="12" /> <rect x="240" y="228" width="12" height="12" /> <rect x="276" y="228" width="12" height="12" /> <rect x="0" y="240" width="12" height="12" /> <rect x="24" y="240" width="12" height="12" /> <rect x="36" y="240" width="12" height="12" /> <rect x="48" y="240" width="12" height="12" /> <rect x="72" y="240" width="12" height="12" /> <rect x="108" y="240" width="12" height="12" /> <rect x="156" y="240" width="12" height="12" /> <rect x="192" y="240" width="12" height="12" /> <rect x="204" y="240" width="12" height="12" /> <rect x="216" y="240" width="12" height="12" /> <rect x="228" y="240" width="12" height="12" /> <rect x="240" y="240" width="12" height="12" /> <rect x="252" y="240" width="12" height="12" /> <rect x="276" y="240" width="12" height="12" /> <rect x="0" y="252" width="12" height="12" /> <rect x="24" y="252" width="12" height="12" /> <rect x="36" y="252" width="12" height="12" /> <rect x="48" y="252" width="12" height="12" /> <rect x="72" y="252" width="12" height="12" /> <rect x="96" y="252" width="12" height="12" /> <rect x="108" y="252" width="12" height="12" /> <rect x="120" y="252" width="12" height="12" /> <rect x="132" y="252" width="12" height="12" /> <rect x="144" y="252" width="12" height="12" /> <rect x="168" y="252" width="12" height="12" /> <rect x="216" y="252" width="12" height="12" /> <rect x="240" y="252" width="12" height="12" /> <rect x="252" y="252" width="12" height="12" /> <rect x="264" y="252" width="12" height="12" /> <rect x="276" y="252" width="12" height="12" /> <rect x="288" y="252" width="12" height="12" /> <rect x="0" y="264" width="12" height="12" /> <rect x="24" y="264" width="12" height="12" /> <rect x="36" y="264" width="12" height="12" /> <rect x="48" y="264" width="12" height="12" /> <rect x="72" y="264" width="12" height="12" /> <rect x="96" y="264" width="12" height="12" /> <rect x="120" y="264" width="12" height="12" /> <rect x="132" y="264" width="12" height="12" /> <rect x="144" y="264" width="12" height="12" /> <rect x="168" y="264" width="12" height="12" /> <rect x="180" y="264" width="12" height="12" /> <rect x="192" y="264" width="12" height="12" /> <rect x="204" y="264" width="12" height="12" /> <rect x="216" y="264" width="12" height="12" /> <rect x="240" y="264" width="12" height="12" /> <rect x="264" y="264" width="12" height="12" /> <rect x="276" y="264" width="12" height="12" /> <rect x="0" y="276" width="12" height="12" /> <rect x="72" y="276" width="12" height="12" /> <rect x="96" y="276" width="12" height="12" /> <rect x="120" y="276" width="12" height="12" /> <rect x="144" y="276" width="12" height="12" /> <rect x="156" y="276" width="12" height="12" /> <rect x="204" y="276" width="12" height="12" /> <rect x="216" y="276" width="12" height="12" /> <rect x="240" y="276" width="12" height="12" /> <rect x="264" y="276" width="12" height="12" /> <rect x="0" y="288" width="12" height="12" /> <rect x="12" y="288" width="12" height="12" /> <rect x="24" y="288" width="12" height="12" /> <rect x="36" y="288" width="12" height="12" /> <rect x="48" y="288" width="12" height="12" /> <rect x="60" y="288" width="12" height="12" /> <rect x="72" y="288" width="12" height="12" /> <rect x="96" y="288" width="12" height="12" /> <rect x="132" y="288" width="12" height="12" /> <rect x="144" y="288" width="12" height="12" /> <rect x="156" y="288" width="12" height="12" /> <rect x="228" y="288" width="12" height="12" /> <rect x="240" y="288" width="12" height="12" /> <rect x="252" y="288" width="12" height="12" /> <rect x="264" y="288" width="12" height="12" /> <rect x="276" y="288" width="12" height="12" /> <rect x="288" y="288" width="12" height="12" /> </g> </svg> </div> </div> </div> <a class="close-reveal-modal" aria-label="Close"> <i class="material-icons">clear</i> </a> </div> <a href="#" class="back-to-top"><span class="show-for-medium-up">Back to Top</span><span class="show-for-small">Top</span></a> <script data-cfasync="false" src="/cdn-cgi/scripts/5c5dd728/cloudflare-static/email-decode.min.js"></script><script src="https://pub.mdpi-res.com/assets/js/modernizr-2.8.3.min.js?5227e0738f7f421d?1732286508"></script> <script src="https://pub.mdpi-res.com/assets/js/jquery-1.12.4.min.js?4f252523d4af0b47?1732286508"></script> <script src="https://pub.mdpi-res.com/assets/js/foundation-5.5.3.min.js?6b2ec41c18b29054?1732286508"></script> <script src="https://pub.mdpi-res.com/assets/js/foundation-5.5.3.equalizer.min.js?0f6c549b75ec554c?1732286508"></script> <script src="https://pub.mdpi-res.com/assets/js/jquery.multiselect.js?0edd3998731d1091?1732286508"></script> <script src="https://pub.mdpi-res.com/assets/js/jquery.cycle2.min.js?63413052928f97ee?1732286508"></script> <script> // old browser fix - this way the console log rows won't throw (silent) errors in browsers not supporting console log if (!window.console) window.console = {}; if (!window.console.log) window.console.log = function () { }; var currentJournalNameSystem = "applsci"; $(document).ready(function() { $('select.foundation-select').multiselect({ search: true, minHeight: 130, maxHeight: 130, }); $(document).foundation({ orbit: { timer_speed: 4000, }, reveal: { animation: 'fadeAndPop', animation_speed: 100, } }); $(".chosen-select").each(function(element) { var maxSelected = (undefined !== $(this).data('maxselectedoptions') ? $(this).data('maxselectedoptions') : 100); $(this).on('chosen:ready', function(event, data) { var select = $(data.chosen.form_field); if (select.attr('id') === 'journal-browser-volume') { $(data.chosen.dropdown).addClass('UI_JournalBrowser_Volume_Options'); } if (select.attr('id') === 'journal-browser-issue') { $(data.chosen.dropdown).addClass('UI_JournalBrowser_Issue_Options'); } }).chosen({ display_disabled_options: false, disable_search_threshold: 7, max_selected_options: maxSelected, width: "100%" }); }); $(".toEncode").each(function(e) { var oldHref = $(this).attr("href"); var newHref = oldHref.replace('.botdefense.please.enable.javascript.','@'); $(this).attr("href", newHref); if (!$(this).hasClass("emailCaptcha")) { $(this).html(newHref.replace('mailto:', '')); } $(this).removeClass("visibility-hidden"); }); $(document).on('opened.fndtn.reveal', '[data-reveal]', function() { $(document).foundation('equalizer', 'reflow'); }); // fix the images that have tag height / width defined // otherwise the default foundation styles overwrite the tag definitions $("img").each(function() { if ($(this).attr('width') != undefined || $(this).attr('height') != undefined) { $(this).addClass("img-fixed"); } }); $("#basic_search, #advanced_search").submit(function(e) { var searchArguments = false; $(this).find("input,select").not("#search,.search-button").each(function() { if (undefined === $(this).val() || "" === $(this).val()) { $(this).attr('name', null); } else { $(this).attr('name'); searchArguments = true; } }); if (!searchArguments) { window.location = $(this).attr('action'); return false; } }); $(".hide-show-desktop-option").click(function(e) { e.preventDefault(); var parentDiv = $(this).closest("div"); $.ajax({ url: $(this).attr('href'), success: function(msg) { parentDiv.removeClass().hide(); } }); }); $(".generic-toggleable-header").click(function(e) { $(this).toggleClass("active"); $(this).next(".generic-toggleable-content").toggleClass("active"); }); /* * handle whole row as a link if the row contains only one visible link */ $("table.new tr").hover(function() { if ($(this).find("td:visible a").length == 1) { $(this).addClass("single-link"); } }, function() { $(this).removeClass("single-link"); }); $("table.new:not(.table-of-tables)").on("click", "tr.single-link", function(e) { var target = $(e.target); if (!e.ctrlKey && !target.is("a")) { $(this).find("td:visible a")[0].click(); } }); $(document).on("click", ".custom-accordion-for-small-screen-link", function(e) { if ($(this).closest("#basic_search").length > 0) { if ($(".search-container__advanced").first().is(":visible")) { openAdvanced() } } if (Foundation.utils.is_small_only()) { if ($(this).hasClass("active")) { $(this).removeClass("active"); $(this).next(".custom-accordion-for-small-screen-content").addClass("show-for-medium-up"); } else { $(this).addClass("active"); $(this).next(".custom-accordion-for-small-screen-content").removeClass("show-for-medium-up"); $(document).foundation('orbit', 'reflow'); } } if (undefined !== $(this).data("callback")) { var customCallback = $(this).data("callback"); func = window[customCallback]; func(); } }); $(document).on("click", ".js-open-small-search", function(e) { e.preventDefault(); $(this).toggleClass("active").closest(".tab-bar").toggleClass("active"); $(".search-container").toggleClass("hide-for-small-down"); }); $(document).on("click", ".js-open-menu", function(e) { $(".search-container").addClass("hide-for-small-down"); }); $(window).on('resize', function() { recalculate_main_browser_position(); recalculate_responsive_moving_containers(); }); updateSearchLabelVisibilities(); recalculate_main_browser_position(); recalculate_responsive_moving_containers(); if (window.document.documentMode == 11) { $("<link/>", { rel: "stylesheet", type: "text/css", href: "https://fonts.googleapis.com/icon?family=Material+Icons"}).appendTo("head"); } }); function recalculate_main_browser_position() { if (Foundation.utils.is_small_only()) { if ($("#js-main-top-container").parent("#js-large-main-top-container").length > 0) { $("#js-main-top-container").appendTo($("#js-small-main-top-container")); } } else { if ($("#js-main-top-container").parent("#js-small-main-top-container").length > 0) { $("#js-main-top-container").appendTo($("#js-large-main-top-container")); } } } function recalculate_responsive_moving_containers() { $(".responsive-moving-container.large").each(function() { var previousParent = $(".responsive-moving-container.active[data-id='"+$(this).data("id")+"']"); var movingContent = previousParent.html(); if (Foundation.utils.is_small_only()) { var currentParent = $(".responsive-moving-container.small[data-id='"+$(this).data("id")+"']"); } else if (Foundation.utils.is_medium_only()) { var currentParent = $(".responsive-moving-container.medium[data-id='"+$(this).data("id")+"']"); } else { var currentParent = $(".responsive-moving-container.large[data-id='"+$(this).data("id")+"']"); } if (previousParent.attr("class") !== currentParent.attr("class")) { currentParent.html(movingContent); previousParent.html(); currentParent.addClass("active"); previousParent.removeClass("active"); } }); } // cookies allowed is checked from a) local storage and b) from server separately so that the footer bar doesn't // get included in the custom page caches function checkCookiesAllowed() { var cookiesEnabled = localStorage.getItem("mdpi_cookies_enabled"); if (null === cookiesEnabled) { $.ajax({ url: "/ajax_cookie_value/mdpi_cookies_accepted", success: function(data) { if (data.value) { localStorage.setItem("mdpi_cookies_enabled", true); checkDisplaySurvey(); } else { $(".js-allow-cookies").show(); } } }); } else { checkDisplaySurvey(); } } function checkDisplaySurvey() { } window.addEventListener('CookiebotOnAccept', function (e) { var CookieDate = new Date; if (Cookiebot.consent.preferences) { CookieDate.setFullYear(CookieDate.getFullYear() + 1); document.cookie = "mdpi_layout_type_v2=mobile; path=/; expires=" + CookieDate.toUTCString() + ";"; $(".js-toggle-desktop-layout-link").css("display", "inline-block"); } }, false); window.addEventListener('CookiebotOnDecline', function (e) { if (!Cookiebot.consent.preferences) { $(".js-toggle-desktop-layout-link").hide(); if ("" === "desktop") { window.location = "/toggle_desktop_layout_cookie"; } } }, false); var hash = $(location).attr('hash'); if ("#share" === hash) { if (1 === $("#main-share-modal").length) { $('#main-share-modal').foundation('reveal', 'open'); } } </script> <script src="https://pub.mdpi-res.com/assets/js/lib.js?f8d3d71b3a772f9d?1732286508"></script> <script src="https://pub.mdpi-res.com/assets/js/mdpi.js?c267ce58392b15da?1732286508"></script> <script>var banners_url = 'https://serve.mdpi.com';</script> <script type='text/javascript' src='https://pub.mdpi-res.com/assets/js/ifvisible.min.js?c621d19ecb761212?1732286508'></script> <script src="https://pub.mdpi-res.com/assets/js/xmltohtml/affix.js?ac4ea55275297c15?1732286508"></script> <script src="https://pub.mdpi-res.com/assets/js/clipboard.min.js?3f3688138a1b9fc4?1732286508"></script> <script type="text/javascript"> $(document).ready(function() { var helpFunctions = $(".middle-column__help__fixed"); var leftColumnAffix = $(".left-column__fixed"); var middleColumn = $("#middle-column"); var clone = null; helpFunctions.affix({ offset: { top: function() { return middleColumn.offset().top - 8 - (Foundation.utils.is_medium_only() ? 30 : 0); }, bottom: function() { return $("#footer").innerHeight() + 74 + (Foundation.utils.is_medium_only() ? 0 : 0); } } }); if (leftColumnAffix.length > 0) { clone = leftColumnAffix.clone(); clone.addClass("left-column__fixed__affix"); clone.insertBefore(leftColumnAffix); clone.css('width', leftColumnAffix.outerWidth() + 50); clone.affix({ offset: { top: function() { return leftColumnAffix.offset().top - 30 - (Foundation.utils.is_medium_only() ? 50 : 0); }, bottom: function() { return $("#footer").innerHeight() + 92 + (Foundation.utils.is_medium_only() ? 0 : 0); } } }); } $(window).on("resize", function() { if (clone !== null) { clone.css('width', leftColumnAffix.outerWidth() + 50); } }); new ClipboardJS('.js-clipboard-copy'); }); </script> <script src="https://pub.mdpi-res.com/assets/js/jquery-ui-1.13.2.min.js?1e2047978946a1d2?1732286508"></script> <script src="https://pub.mdpi-res.com/assets/js/slick.min.js?d5a61c749e44e471?1732286508"></script> <script> $(document).ready(function() { $(".link-article-menu").click(function(e) { e.preventDefault(); $(this).find('span').toggle(); $(this).next("div").toggleClass("active"); }); $(".js-similarity-related-articles").click(function(e) { e.preventDefault(); if ('' !== $('#recommended-articles-modal').attr('data-url')) { $('#recommended-articles-modal').foundation('reveal', 'open', $('#recommended-articles-modal').attr('data-url')); } }); $.ajax({ url: "/article/908988/similarity-related/show-link", success: function(result) { if (result.show) { $('#recommended-articles-modal').attr('data-url', result.link); $('.js-article-similarity-container').show(); } } }); $(document).on('opened.fndtn.reveal', '[data-reveal]', function() { var modal = $(this); if (modal.attr('id') === "author-biographies-modal") { modal.find('.multiple-items').slick({ slidesToShow: 1, nextArrow: '<a class="slick-next" href="#"><i class="material-icons">chevron_right</i></a>', prevArrow: '<a class="slick-prev" href="#"><i class="material-icons">chevron_left</i></a>', slidesToScroll: 1, draggable: false, }); modal.find('.multiple-items').slick('refresh'); } }); }); </script> <!-- Twitter universal website tag code --> <script> !function(e,t,n,s,u,a){e.twq||(s=e.twq=function(){s.exe?s.exe.apply(s,arguments):s.queue.push(arguments); },s.version='1.1',s.queue=[],u=t.createElement(n),u.async=!0,u.src='//static.ads-twitter.com/uwt.js', a=t.getElementsByTagName(n)[0],a.parentNode.insertBefore(u,a))}(window,document,'script'); // Insert Twitter Pixel ID and Standard Event data below twq('init','o2pa3'); twq('track','PageView'); </script> <!-- End Twitter universal website tag code --> <script> $(document).ready(function() { $(document).on('keyup', function (e) { if (e.keyCode == 27) { var hElem = $(this).find(".annotator-adder"); if (hElem.length){ hElem.css({'visibility':'hidden'}); } else { document.querySelector("hypothesis-adder").shadowRoot.querySelector(".annotator-adder").style.visibility = "hidden"; } } }); }); </script> <script> window.hypothesisConfig = function () { return { sidebarAppUrl: 'https://commenting.mdpi.com/app.html', showHighlights: 'whenSidebarOpen' , openSidebar: false , assetRoot: 'https://commentingres.mdpi.com/hypothesis', services: [{ apiUrl: 'https://commenting.mdpi.com/api/', authority: 'mdpi', grantToken: '', doi: '10.3390/app12188972' }], }; }; </script> <script async id="hypothesis_frame"></script> <script type="text/javascript"> if (-1 !== window.location.href.indexOf("?src=")) { window.history.replaceState({}, '', `${location.pathname}`); } $(document).ready(function() { var scifeedCounter = 0; var search = window.location.search; var mathjaxReady = false; // late image file loading $("img[data-lsrc]").each(function() { $(this).attr("src", $(this).data("lsrc")); }); // late mathjax initialization var head = document.getElementsByTagName("head")[0]; var script = document.createElement("script"); script.type = "text/x-mathjax-config"; script[(window.opera ? "innerHTML" : "text")] = "MathJax.Hub.processSectionDelay = 0;\n" + "MathJax.Hub.Config({\n" + " \"menuSettings\": {\n" + " CHTMLpreview: false\n" + " },\n" + " \"CHTML-preview\":{\n" + " disabled: true\n" + " },\n" + " \"HTML-CSS\": {\n" + " scale: 90,\n" + " availableFonts: [],\n" + " preferredFont: null,\n" + " preferredFonts: null,\n" + " webFont:\"Gyre-Pagella\",\n" + " imageFont:'TeX',\n" + " undefinedFamily:\"'Arial Unicode MS',serif\",\n" + " linebreaks: { automatic: false }\n" + " },\n" + " \"TeX\": {\n" + " extensions: ['noErrors.js'],\n" + " noErrors: {\n" + " inlineDelimiters: [\"\",\"\"],\n" + " multiLine: true,\n" + " style: {\n" + " 'font-size': '90%',\n" + " 'text-align': 'left',\n" + " 'color': 'black',\n" + " 'padding': '1px 3px',\n" + " 'border': '1px solid'\n" + " }\n" + " }\n" + " }\n" + "});\n" + "MathJax.Hub.Register.StartupHook('End', function() {\n" + " refreshMathjaxWidths();\n" + " mathjaxReady = true;\n" + "});\n" + "MathJax.Hub.Startup.signal.Interest(function (message) {\n" + " if (message == 'End') {\n" + " var hypoLink = document.getElementById('hypothesis_frame');\n" + " if (null !== hypoLink) {\n" + " hypoLink.setAttribute('src', 'https://commenting.mdpi.com/embed.js');\n" + " }\n" + " }\n" + "});"; head.appendChild(script); script = document.createElement("script"); script.type = "text/javascript"; script.src = "https://pub.mdpi-res.com/bundles/mathjax/MathJax.js?config=TeX-AMS-MML_HTMLorMML"; head.appendChild(script); // article version checker if (0 === search.indexOf('?type=check_update&version=')) { $.ajax({ url: "/2076-3417/12/18/8972" + "/versioncheck" + search, success: function(result) { $(".js-check-update-container").html(result); } }); } $('#feed_option').click(function() { // tracker if ($('#scifeed_clicked').length<1) { $(this).append('<span style="display:none" id="scifeed_clicked">done</span>'); } $('#feed_data').toggle('slide', { direction: 'up'}, '1000'); // slideToggle(700); OR toggle(700) $("#scifeed_error_msg").html('').hide(); $("#scifeed_notice_msg").html('').hide(); }); $('#feed_option').click(function(event) { setTimeout(function(){ var captchaSection = $("#captchaSection"); captchaSection.removeClass('ui-helper-hidden').find('input').prop('disabled', false); // var img = captchaSection.find('img'); // img.attr('src', img.data('url') + "?" + (new Date()).getTime()); // $(".captcha_reload").trigger("click"); var img = document.getElementById('gregwar_captcha_scifeed'); img.src = '/generate-captcha/gcb_captcha?n=' + (new Date()).getTime(); },800); }); $(document).on('click', '.split_feeds', function() { var name = $( this ).attr('name'); var flag = 1 - ($(this).is(":checked")*1); $('.split_feeds').each(function (index) { if ($( this ).attr('name') !== name) { $(this)[0].checked = flag; } }); }); $(document).on('click', '#scifeed_submit, #scifeed_submit1', function(event) { event.preventDefault(); $(".captcha_reload").trigger("click"); $("#scifeed_error_msg").html(""); $("#scifeed_error_msg").hide(); }); $(document).on('click', '.subscription_toggle', function(event) { if ($(this).val() === 'Create SciFeed' && $('#scifeed_hidden_flag').length>0) { event.preventDefault(); // alert('Here there would be a captcha because user is not logged in'); var captchaSection = $("#captchaSection"); if (captchaSection.hasClass('ui-helper-hidden')) { captchaSection.removeClass('ui-helper-hidden').find('input').prop('disabled', false); var img = captchaSection.find('img'); img.attr('src', img.data('url') + "?" + (new Date()).getTime()); $("#reloadCaptcha").trigger("click"); } } }); $(document).on('click', '.scifeed_msg', function(){ $(this).hide(); }); $(document).on('click', '.article-scilit-search', function(e) { e.preventDefault(); var data = $(".article-scilit-search-data").val(); var dataArray = data.split(';').map(function(keyword) { return "(\"" + keyword.trim() + "\")"; }); var searchQuery = dataArray.join(" OR "); var searchUrl = encodeURI("https://www.scilit.net/articles/search?q="+ searchQuery + "&advanced=1&highlight=1"); var win = window.open(searchUrl, '_blank'); if (win) { win.focus(); } else { window.location(searchUrl); } }); display_stats(); citedCount(); follow_goto(); // Select the node that will be observed for mutations const targetNodes = document.getElementsByClassName('hypothesis-count-container'); // Options for the observer (which mutations to observe) const config = { attributes: false, childList: true, subtree: false }; // Callback function to execute when mutations are observed const callback = function(mutationList, observer) { for(const mutation of mutationList) { if (mutation.type === 'childList') { let node = $(mutation.target); if (parseInt(node.html()) > 0) { node.show(); } } } }; // Create an observer instance linked to the callback function const observer = new MutationObserver(callback); // Start observing the target node for configured mutations for(const targetNode of targetNodes) { observer.observe(targetNode, config); } // Select the node that will be observed for mutations const mathjaxTargetNode = document.getElementById('middle-column'); // Callback function to execute when mutations are observed const mathjaxCallback = function(mutationList, observer) { if (mathjaxReady && typeof(MathJax) !== 'undefined') { refreshMathjaxWidths(); } }; // Create an observer instance linked to the callback function const mathjaxObserver = new ResizeObserver(mathjaxCallback); // Start observing the target node for configured mutations mathjaxObserver.observe(mathjaxTargetNode); }); /* END $(document).ready */ function refreshMathjaxWidths() { let width = ($('.html-body').width()*0.9) + "px"; $('.MathJax_Display').css('max-width', width); $('.MJXc-display').css('max-width', width); } function sendScifeedFrom(form) { if (!$('#scifeed_email').val().trim()) { // empty email alert('Please, provide an email for subscribe to this scifeed'); return false; } else if (!$('#captchaSection').hasClass('ui-helper-hidden') && !$('#captchaSection').find('input').val().trim()) { // empty captcha alert('Please, fill the captcha field.'); return false; } else if( ((($('#scifeed_form').find('input:checkbox:checked').length)-($('#split_feeds:checked').length))<1) || ($('#scifeed_kwd_txt').length < 0 && !$('#scifeed_kwd_txt').val().trim()) || ($('#scifeed_author_txt').length<0 &&!$('#scifeed_author_txt').val().trim()) ) { alert('You did not select anything to subscribe'); return false; } else if(($('#scifeed_form').find('input:checkbox:checked').length)-($('#split_feeds2:checked').length)<1){ alert("You did not select anything to subscribe"); return false; } else { var url = $('#scifeed_subscribe_url').html(); var formData = $(form).serializeArray(); $.post(url, formData).done(function (data) { if (JSON.parse(data)) { $('.scifeed_msg').hide(); var res = JSON.parse(data); var successFeeds = 0; var errorFeeds = 0; if (res) { $('.scifeed_msg').html(''); $.each(res, function (index, val) { if (val) { if (val.error) { errorFeeds++; $("#scifeed_error_msg").append(index+' - '+val.error+'<br>'); } if (val.notice) // for successful feed creation { successFeeds++; // $("#scifeed_notice_msg").append(index+' - '+val.notice+'<br>'); $("#scifeed_notice_msg").append('<li>'+index+'</li>'); } } }); if (successFeeds>0) { text = $('#scifeed_notice_msg').html(); text = 'The following feed'+(successFeeds>1?'s have':' has')+ ' been sucessfully created:<br><ul>'+ text + '</ul>' +($('#scifeed_hidden_flag').length>0 ? 'You are not logged in, so you probably need to validate '+ (successFeeds>1?'them':' it')+'.<br>' :'' ) +'Please check your email'+(successFeeds>1?'s':'')+' for more details.'; //(successFeeds>1?' for each of them':'')+'.<br>'; $("#scifeed_notice_msg").html(text); $("#scifeed_notice_msg").show(); } if (errorFeeds>0) { $("#scifeed_error_msg").show();; } } $("#feed_data").hide(); } }); } } function follow_goto() { var hashStr = location.hash.replace("#",""); if(typeof hashStr !== 'undefined') { if( hashStr == 'supplementary') { document.getElementById('suppl_id').scrollIntoView(); } if( hashStr == 'citedby') { document.getElementById('cited_id').scrollIntoView(); } } } function cited() { $("#framed_div").toggle('fast', function(){ if ($(this).css('display') != 'none') { var loaded = document.getElementById("loaded"); if(loaded.innerHTML == "No") { // Load Xref result var container = document.getElementById("framed_div"); // This replace the content container.innerHTML = "<img src=\"https://pub.mdpi-res.com/img/loading_circle.gif?9a82694213036313?1732286508\" height=\"20\" width=\"20\" alt=\"Processing...\" style=\"vertical-align:middle; margin-right:0.6em;\">"; var url = "/citedby/10.3390%252Fapp12188972/90"; $.post(url, function(result) { if (result.success) { container.innerHTML = result.view; } loaded.innerHTML = "Yes"; }); } } return true; // for not going at the beginning of the page... }) return true; // for not going at the beginning of the page... } function detect_device() { // Added by Bastien (18/08/2014): based on the http://detectmobilebrowsers.com/ detector var check = false; (function(a){if(/(android|bb\d+|meego).+mobile|avantgo|bada\/|blackberry|blazer|compal|elaine|fennec|hiptop|iemobile|ip(hone|od)|iris|kindle|lge |maemo|midp|mmp|mobile.+firefox|netfront|opera m(ob|in)i|palm( os)?|phone|p(ixi|re)\/|plucker|pocket|psp|series(4|6)0|symbian|treo|up\.(browser|link)|vodafone|wap|windows (ce|phone)|xda|xiino/i.test(a)||/1207|6310|6590|3gso|4thp|50[1-6]i|770s|802s|a wa|abac|ac(er|oo|s\-)|ai(ko|rn)|al(av|ca|co)|amoi|an(ex|ny|yw)|aptu|ar(ch|go)|as(te|us)|attw|au(di|\-m|r |s )|avan|be(ck|ll|nq)|bi(lb|rd)|bl(ac|az)|br(e|v)w|bumb|bw\-(n|u)|c55\/|capi|ccwa|cdm\-|cell|chtm|cldc|cmd\-|co(mp|nd)|craw|da(it|ll|ng)|dbte|dc\-s|devi|dica|dmob|do(c|p)o|ds(12|\-d)|el(49|ai)|em(l2|ul)|er(ic|k0)|esl8|ez([4-7]0|os|wa|ze)|fetc|fly(\-|_)|g1 u|g560|gene|gf\-5|g\-mo|go(\.w|od)|gr(ad|un)|haie|hcit|hd\-(m|p|t)|hei\-|hi(pt|ta)|hp( i|ip)|hs\-c|ht(c(\-| |_|a|g|p|s|t)|tp)|hu(aw|tc)|i\-(20|go|ma)|i230|iac( |\-|\/)|ibro|idea|ig01|ikom|im1k|inno|ipaq|iris|ja(t|v)a|jbro|jemu|jigs|kddi|keji|kgt( |\/)|klon|kpt |kwc\-|kyo(c|k)|le(no|xi)|lg( g|\/(k|l|u)|50|54|\-[a-w])|libw|lynx|m1\-w|m3ga|m50\/|ma(te|ui|xo)|mc(01|21|ca)|m\-cr|me(rc|ri)|mi(o8|oa|ts)|mmef|mo(01|02|bi|de|do|t(\-| |o|v)|zz)|mt(50|p1|v )|mwbp|mywa|n10[0-2]|n20[2-3]|n30(0|2)|n50(0|2|5)|n7(0(0|1)|10)|ne((c|m)\-|on|tf|wf|wg|wt)|nok(6|i)|nzph|o2im|op(ti|wv)|oran|owg1|p800|pan(a|d|t)|pdxg|pg(13|\-([1-8]|c))|phil|pire|pl(ay|uc)|pn\-2|po(ck|rt|se)|prox|psio|pt\-g|qa\-a|qc(07|12|21|32|60|\-[2-7]|i\-)|qtek|r380|r600|raks|rim9|ro(ve|zo)|s55\/|sa(ge|ma|mm|ms|ny|va)|sc(01|h\-|oo|p\-)|sdk\/|se(c(\-|0|1)|47|mc|nd|ri)|sgh\-|shar|sie(\-|m)|sk\-0|sl(45|id)|sm(al|ar|b3|it|t5)|so(ft|ny)|sp(01|h\-|v\-|v )|sy(01|mb)|t2(18|50)|t6(00|10|18)|ta(gt|lk)|tcl\-|tdg\-|tel(i|m)|tim\-|t\-mo|to(pl|sh)|ts(70|m\-|m3|m5)|tx\-9|up(\.b|g1|si)|utst|v400|v750|veri|vi(rg|te)|vk(40|5[0-3]|\-v)|vm40|voda|vulc|vx(52|53|60|61|70|80|81|83|85|98)|w3c(\-| )|webc|whit|wi(g |nc|nw)|wmlb|wonu|x700|yas\-|your|zeto|zte\-/i.test(a.substr(0,4)))check = true})(navigator.userAgent||navigator.vendor||window.opera); return check; } function display_stats(){ $("#article_stats_div").toggle(); return false; } /* * Cited By Scopus */ function citedCount(){ $("#framed_div_cited_count").toggle('fast', function(){ if ($(this).css('display') != 'none') { var loaded = document.getElementById("loaded_cite_count"); // to load only once the result! if(loaded.innerHTML == "No") { // Load Xref result var d = document.getElementById("framed_div_cited_count"); // This replace the content d.innerHTML = "<img src=\"https://pub.mdpi-res.com/img/loading_circle.gif?9a82694213036313?1732286508\" height=\"20\" width=\"20\" alt=\"Processing...\" style=\"vertical-align:middle; margin-right:0.6em;\">"; $.ajax({ method : "POST", url : "/cite-count/10.3390%252Fapp12188972", success : function(data) { if (data.succ) { d.innerHTML = data.view; loaded.innerHTML = "Yes"; follow_goto(); } } }); } } // end else return true; // for not going at the beginning of the page... }) return true; // for not going at the beginning of the page... } </script><script type="text/javascript" src="https://pub.mdpi-res.com/assets/js/third-party/highcharts/highcharts.js?bdd06f45e34c33df?1732286508"></script><script type="text/javascript" src="https://pub.mdpi-res.com/assets/js/third-party/highcharts/modules/exporting.js?944dc938d06de3a8?1732286508"></script><script type="text/javascript" defer="defer"> var advancedStatsData; var selectedStatsType = "abstract"; $(function(){ var countWrapper = $('#counts-wrapper'); $('#author_stats_id #type_links a').on('click', function(e) { e.preventDefault(); selectedStatsType = $(this).data('type'); $('#article_advanced_stats').vectorMap('set', 'values', advancedStatsData[selectedStatsType]); $('#advanced_stats_max').html(advancedStatsData[selectedStatsType].max); $('#type_links a').removeClass('active'); $(this).addClass('active'); }); $.get('/2076-3417/12/18/8972/stats', function (result) { if (!result.success) { return; } // process article metrics part in left column var viewNumber = countWrapper.find(".view-number"); viewNumber.html(result.metrics.views); viewNumber.parent().toggleClass("count-div--grey", result.metrics.views == 0); var downloadNumber = countWrapper.find(".download-number"); downloadNumber.html(result.metrics.downloads); downloadNumber.parent().toggleClass("count-div--grey", result.metrics.downloads == 0); var citationsNumber = countWrapper.find(".citations-number"); citationsNumber.html(result.metrics.citations); citationsNumber.parent().toggleClass("count-div--grey", result.metrics.citations == 0); if (result.metrics.views > 0 || result.metrics.downloads > 0 || result.metrics.citations > 0) { countWrapper.find("#js-counts-wrapper__views, #js-counts-wrapper__downloads").addClass("visible").show(); if (result.metrics.citations > 0) { countWrapper.find('.citations-number').html(result.metrics.citations).show(); countWrapper.find("#js-counts-wrapper__citations").addClass("visible").show(); } else { countWrapper.find("#js-counts-wrapper__citations").remove(); } $("[data-id='article-counters']").removeClass("hidden"); } if (result.metrics.altmetrics_score > 0) { $("#js-altmetrics-donut").show(); } // process view chart in main column var jsondata = result.chart; var series = new Array(); $.each(jsondata.elements, function(i, element) { var dataValues = new Array(); $.each(element.values, function(i, value) { dataValues.push(new Array(value.tip, value.value)); }); series[i] = {name: element.text, data:dataValues}; }); Highcharts.setOptions({ chart: { style: { fontFamily: 'Arial,sans-serif' } } }); $('#article_stats_swf').highcharts({ chart: { type: 'line', width: $("#tabs").width() //* 0.91 }, credits: { enabled: false }, exporting: { enabled: true }, title: { text: jsondata.title.text, x: -20 //center }, xAxis: { categories: jsondata.x_axis.labels.labels, offset: jsondata.x_axis.offset, labels:{ step: jsondata.x_axis.labels.steps, rotation: 30 } }, yAxis: { max: jsondata.y_axis.max, min: jsondata.y_axis.min, offset: jsondata.y_axis.offset, labels: { steps: jsondata.y_axis.steps }, title: { enabled: false } }, tooltip: { formatter: function (){ return this.key.replace("#val#", this.y); } }, legend: { align: 'top', itemDistance: 50 }, series: series }); }); $('#supplement_link').click(function() { document.getElementById('suppl_id').scrollIntoView(); }); $('#stats_link').click(function() { document.getElementById('stats_id').scrollIntoView(); }); // open mol viewer for molbank special supplementary files $('.showJmol').click(function(e) { e.preventDefault(); var jmolModal = $("#jmolModal"); var url = "/article/908988/jsmol_viewer/__supplementary_id__"; url = url.replace(/__supplementary_id__/g, $(this).data('index')); $('#jsmol-content').attr('src', url); jmolModal.find(".content").html($(this).data('description')); jmolModal.foundation("reveal", "open"); }); }); !function() { "use strict"; function e(e) { try { if ("undefined" == typeof console) return; "error"in console ? console.error(e) : console.log(e) } catch (e) {} } function t(e) { return d.innerHTML = '<a href="' + e.replace(/"/g, "&quot;") + '"></a>', d.childNodes[0].getAttribute("href") || "" } function n(n, c) { var o = ""; var k = parseInt(n.substr(c + 4, 2), 16); for (var i = c; i < n.length; i += 2) { if (i != c + 4) { var s = parseInt(n.substr(i, 2), 16) ^ k; o += String.fromCharCode(s); } } try { o = decodeURIComponent(escape(o)); } catch (error) { console.error(error); } return t(o); } function c(t) { for (var r = t.querySelectorAll("a"), c = 0; c < r.length; c++) try { var o = r[c] , a = o.href.indexOf(l); a > -1 && (o.href = "mailto:" + n(o.href, a + l.length)) } catch (i) { e(i) } } function o(t) { for (var r = t.querySelectorAll(u), c = 0; c < r.length; c++) try { var o = r[c] , a = o.parentNode , i = o.getAttribute(f); if (i) { var l = n(i, 0) , d = document.createTextNode(l); a.replaceChild(d, o) } } catch (h) { e(h) } } function a(t) { for (var r = t.querySelectorAll("template"), n = 0; n < r.length; n++) try { i(r[n].content) } catch (c) { e(c) } } function i(t) { try { c(t), o(t), a(t) } catch (r) { e(r) } } var l = "/cnd-cgi/l/email-protection#" , u = ".__cf_email__" , f = "data-cfemail" , d = document.createElement("div"); i(document), function() { var e = document.currentScript || document.scripts[document.scripts.length - 1]; e.parentNode.removeChild(e) }() }(); </script><script type="text/javascript"> function setCookie(cname, cvalue, ctime) { ctime = (typeof ctime === 'undefined') ? 10*365*24*60*60*1000 : ctime; // default => 10 years var d = new Date(); d.setTime(d.getTime() + ctime); // ==> 1 hour = 60*60*1000 var expires = "expires="+d.toUTCString(); document.cookie = cname + "=" + cvalue + "; " + expires +"; path=/"; } function getCookie(cname) { var name = cname + "="; var ca = document.cookie.split(';'); for(var i=0; i<ca.length; i++) { var c = ca[i]; while (c.charAt(0)==' ') c = c.substring(1); if (c.indexOf(name) == 0) return c.substring(name.length, c.length); } return ""; } </script><script type="text/javascript" src="https://d1bxh8uas1mnw7.cloudfront.net/assets/embed.js"></script><script> $(document).ready(function() { if ($("#js-similarity-related-data").length > 0) { $.ajax({ url: '/article/908988/similarity-related', success: function(response) { $("#js-similarity-related-data").html(response); $("#js-related-articles-menu").show(); $(document).foundation('tab', 'reflow'); MathJax.Hub.Queue(["Typeset", MathJax.Hub]); } }); } }); </script><link rel="stylesheet" href="https://pub.mdpi-res.com/assets/css/jquery-ui-1.10.4.custom.min.css?80647d88647bf347?1732286508"><link rel="stylesheet" href="https://pub.mdpi-res.com/assets/css/magnific-popup.min.css?04d343e036f8eecd?1732286508"><script type="text/javascript" src="https://pub.mdpi-res.com/assets/js/magnific-popup.min.js?2be3d9e7dc569146?1732286508"></script><script> $(function() { $(".js-show-more-academic-editors").on("click", function(e) { e.preventDefault(); $(this).hide(); $(".academic-editor-container").removeClass("hidden"); }); }); </script> <link rel="stylesheet" href="https://pub.mdpi-res.com/assets/css/vmap/jqvmap.min.css?126a06688aa11c13?1732286508"> <script src="https://pub.mdpi-res.com/assets/js/vmap/jquery.vmap.min.js?935f68d33bdd88a1?1732286508"></script> <script src="https://pub.mdpi-res.com/assets/js/vmap/jquery.vmap.world.js?16677403c0e1bef1?1732286508"></script> <script> function updateSlick() { $('.multiple-items').slick('setPosition'); } $(document).ready(function() { $('.multiple-items').slick({ slidesToShow: 1, nextArrow: '<a class="slick-next" href="#"><i class="material-icons">chevron_right</i></a>', prevArrow: '<a class="slick-prev" href="#"><i class="material-icons">chevron_left</i></a>', slidesToScroll: 1, responsive: [ { breakpoint: 1024, settings: { slidesToShow: 1, slidesToScroll: 1, } }, { breakpoint: 600, settings: { slidesToShow: 1, slidesToScroll: 1, } }, { breakpoint: 480, settings: { slidesToShow: 1, slidesToScroll: 1, } } ] }); $('.multiple-items').show(); $(document).on('click', '.reviewReportSelector', function(e) { let path = $(this).attr('data-path'); handleReviews(path, $(this)); }); $(document).on('click', '.viewReviewReports', function(e) { let versionOne = $('#versionTab_1'); if (!versionOne.hasClass('activeTab')) { let path = $(this).attr('data-path'); handleReviews(path, versionOne); } location.href = "#reviewReports"; }); $(document).on('click', '.reviewersResponse, .authorResponse', function(e) { let version = $(this).attr('data-version'); let targetVersion = $('#versionTab_' + version); if (!targetVersion.hasClass('activeTab')) { let path = targetVersion.attr('data-path'); handleReviews(path, targetVersion); } location.href = $(this).attr('data-link'); }); $(document).on('click', '.tab', function (e) { e.preventDefault(); $('.tab').removeClass('activeTab'); $(this).addClass('activeTab') $('.tab').each(function() { $(this).closest('.tab-title').removeClass('active'); }); $(this).closest('.tab-title').addClass('active') }); }); function handleReviews(path, target) { $.ajax({ url: path, context: this, success: function (data) { $('.activeTab').removeClass('activeTab'); target.addClass('activeTab'); $('#reviewSection').html(data.view); }, error: function (xhr, ajaxOptions, thrownError) { console.log(xhr.status); console.log(thrownError); } }); } </script> <script src="https://pub.mdpi-res.com/assets/js/xmltohtml/affix.js?v1?1732286508"></script> <script src="https://pub.mdpi-res.com/assets/js/xmltohtml/storage.js?e9b262d3a3476d25?1732286508"></script> <script src="https://pub.mdpi-res.com/assets/js/xmltohtml/jquery-scrollspy.js?09cbaec0dbb35a67?1732286508"></script> <script src="https://pub.mdpi-res.com/assets/js/xmltohtml/magnific-popup.js?4a09c18460afb26c?1732286508"></script> <script src="https://pub.mdpi-res.com/assets/js/xmltohtml/underscore.js?f893e294cde60c24?1732286508"></script> <script type="text/javascript"> $('document').ready(function(){ $("#left-column").addClass("show-for-large-up"); $("#middle-column").removeClass("medium-9").removeClass("left-bordered").addClass("medium-12"); $(window).on('resize scroll', function() { /* if ($('.button--drop-down').isInViewport($(".top-bar").outerHeight())) { */ if ($('.button--drop-down').isInViewport()) { $("#js-button-download").hide(); } else { $("#js-button-download").show(); } }); }); $(document).on('DOMNodeInserted', function(e) { var element = $(e.target); if (element.hasClass('menu') && element.hasClass('html-nav') ) { element.addClass("side-menu-ul"); } }); </script> <script src="https://pub.mdpi-res.com/assets/js/xmltohtml/articles.js?5118449d9ad8913a?1732286508"></script> <script> repositionOpenSideBar = function() { $('#left-column').addClass("show-for-large-up show-for-medium-up").show(); $('#middle-column').removeClass('large-12').removeClass('medium-12'); $('#middle-column').addClass('large-9'); } repositionCloseSideBar = function() { $('#left-column').removeClass("show-for-large-up show-for-medium-up").hide(); $('#middle-column').removeClass('large-9'); $('#middle-column').addClass('large-12').addClass('medium-12'); } </script> <!--[if lt IE 9]> <script src="https://pub.mdpi-res.com/assets/js/ie8/ie8.js?6eef8fcbc831f5bd?1732286508"></script> <script src="https://pub.mdpi-res.com/assets/js/ie8/jquery.xdomainrequest.min.js?a945caca315782b0?1732286508"></script> <![endif]--> <!-- Twitter universal website tag code --> <script type="text/plain" data-cookieconsent="marketing"> !function(e,t,n,s,u,a){e.twq||(s=e.twq=function(){s.exe?s.exe.apply(s,arguments):s.queue.push(arguments); },s.version='1.1',s.queue=[],u=t.createElement(n),u.async=!0,u.src='//static.ads-twitter.com/uwt.js', a=t.getElementsByTagName(n)[0],a.parentNode.insertBefore(u,a))}(window,document,'script'); // Insert Twitter Pixel ID and Standard Event data below twq('init','o2pip'); twq('track','PageView'); </script> <!-- End Twitter universal website tag code --> <script>(function(){function c(){var b=a.contentDocument||a.contentWindow.document;if(b){var d=b.createElement('script');d.innerHTML="window.__CF$cv$params={r:'8e6e82947a5b5fc8',t:'MTczMjMzNzEzMC4wMDAwMDA='};var a=document.createElement('script');a.nonce='';a.src='/cdn-cgi/challenge-platform/scripts/jsd/main.js';document.getElementsByTagName('head')[0].appendChild(a);";b.getElementsByTagName('head')[0].appendChild(d)}}if(document.body){var a=document.createElement('iframe');a.height=1;a.width=1;a.style.position='absolute';a.style.top=0;a.style.left=0;a.style.border='none';a.style.visibility='hidden';document.body.appendChild(a);if('loading'!==document.readyState)c();else if(window.addEventListener)document.addEventListener('DOMContentLoaded',c);else{var e=document.onreadystatechange||function(){};document.onreadystatechange=function(b){e(b);'loading'!==document.readyState&&(document.onreadystatechange=e,c())}}}})();</script></body> </html>

Pages: 1 2 3 4 5 6 7 8 9 10