CINXE.COM

Comparing Vision Transformers and Convolutional Neural Networks for Image Classification: A Literature Review

<!DOCTYPE html> <html lang="en" xmlns:og="http://ogp.me/ns#" xmlns:fb="https://www.facebook.com/2008/fbml"> <head> <meta charset="utf-8"> <meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=1"> <meta content="mdpi" name="sso-service" /> <meta content="width=device-width, initial-scale=1.0" name="viewport" /> <title>Comparing Vision Transformers and Convolutional Neural Networks for Image Classification: A Literature Review</title><link rel="stylesheet" href="https://pub.mdpi-res.com/assets/css/font-awesome.min.css?eb190a3a77e5e1ee?1732087095"> <link rel="stylesheet" href="https://pub.mdpi-res.com/assets/css/jquery.multiselect.css?f56c135cbf4d1483?1732087095"> <link rel="stylesheet" href="https://pub.mdpi-res.com/assets/css/chosen.min.css?d7ca5ca9441ef9e1?1732087095"> <link rel="stylesheet" href="https://pub.mdpi-res.com/assets/css/main2.css?69b39374e6b554b7?1732087095"> <link rel="mask-icon" href="https://pub.mdpi-res.com/img/mask-icon-128.svg?c1c7eca266cd7013?1732087095" color="#4f5671"> <link rel="apple-touch-icon" sizes="180x180" href="https://pub.mdpi-res.com/icon/apple-touch-icon-180x180.png?1732087095"> <link rel="apple-touch-icon" sizes="152x152" href="https://pub.mdpi-res.com/icon/apple-touch-icon-152x152.png?1732087095"> <link rel="apple-touch-icon" sizes="144x144" href="https://pub.mdpi-res.com/icon/apple-touch-icon-144x144.png?1732087095"> <link rel="apple-touch-icon" sizes="120x120" href="https://pub.mdpi-res.com/icon/apple-touch-icon-120x120.png?1732087095"> <link rel="apple-touch-icon" sizes="114x114" href="https://pub.mdpi-res.com/icon/apple-touch-icon-114x114.png?1732087095"> <link rel="apple-touch-icon" sizes="76x76" href="https://pub.mdpi-res.com/icon/apple-touch-icon-76x76.png?1732087095"> <link rel="apple-touch-icon" sizes="72x72" href="https://pub.mdpi-res.com/icon/apple-touch-icon-72x72.png?1732087095"> <link rel="apple-touch-icon" sizes="57x57" href="https://pub.mdpi-res.com/icon/apple-touch-icon-57x57.png?1732087095"> <link rel="apple-touch-icon" href="https://pub.mdpi-res.com/icon/apple-touch-icon-57x57.png?1732087095"> <link rel="apple-touch-icon-precomposed" href="https://pub.mdpi-res.com/icon/apple-touch-icon-57x57.png?1732087095"> <link rel="manifest" href="/manifest.json"> <meta name="theme-color" content="#ffffff"> <meta name="application-name" content="&nbsp;"/> <link rel="apple-touch-startup-image" href="https://pub.mdpi-res.com/img/journals/applsci-logo-sq.png?8600e93ff98dbf14"> <link rel="apple-touch-icon" href="https://pub.mdpi-res.com/img/journals/applsci-logo-sq.png?8600e93ff98dbf14"> <meta name="msapplication-TileImage" content="https://pub.mdpi-res.com/img/journals/applsci-logo-sq.png?8600e93ff98dbf14"> <link rel="stylesheet" href="https://pub.mdpi-res.com/assets/css/jquery-ui-1.10.4.custom.min.css?80647d88647bf347?1732087095"> <link rel="stylesheet" href="https://pub.mdpi-res.com/assets/css/magnific-popup.min.css?04d343e036f8eecd?1732087095"> <link rel="stylesheet" href="https://pub.mdpi-res.com/assets/css/xml2html/article-html.css?230b005b39af4260?1732087095"> <style> h2, #abstract .related_suggestion_title { } .batch_articles a { color: #000; } a, .batch_articles .authors a, a:focus, a:hover, a:active, .batch_articles a:focus, .batch_articles a:hover, li.side-menu-li a { } span.label a { color: #fff; } #main-content a.title-link:hover, #main-content a.title-link:focus, #main-content div.generic-item a.title-link:hover, #main-content div.generic-item a.title-link:focus { } #main-content #middle-column .generic-item.article-item a.title-link:hover, #main-content #middle-column .generic-item.article-item a.title-link:focus { } .art-authors a.toEncode { color: #333; font-weight: 700; } #main-content #middle-column ul li::before { } .accordion-navigation.active a.accordion__title, .accordion-navigation.active a.accordion__title::after { } .accordion-navigation li:hover::before, .accordion-navigation li:hover a, .accordion-navigation li:focus a { } .relative-size-container .relative-size-image .relative-size { } .middle-column__help__fixed a:hover i, } input[type="checkbox"]:checked:after { } input[type="checkbox"]:not(:disabled):hover:before { } #main-content .bolded-text { } #main-content .hypothesis-count-container { } #main-content .hypothesis-count-container:before { } .full-size-menu ul li.menu-item .dropdown-wrapper { } .full-size-menu ul li.menu-item > a.open::after { } #title-story .title-story-orbit .orbit-caption { #background: url('/img/design/000000_background.png') !important; background: url('/img/design/ffffff_background.png') !important; color: rgb(51, 51, 51) !important; } #main-content .content__container__orbit { background-color: #000 !important; } #main-content .content__container__journal { color: #fff; } .html-article-menu .row span { } .html-article-menu .row span.active { } .accordion-navigation__journal .side-menu-li.active::before, .accordion-navigation__journal .side-menu-li.active a { color: rgba(74,74,127,0.75) !important; font-weight: 700; } .accordion-navigation__journal .side-menu-li:hover::before , .accordion-navigation__journal .side-menu-li:hover a { color: rgba(74,74,127,0.75) !important; } .side-menu-ul li.active a, .side-menu-ul li.active, .side-menu-ul li.active::before { color: rgba(74,74,127,0.75) !important; } .side-menu-ul li.active a { } .result-selected, .active-result.highlighted, .active-result:hover, .result-selected, .active-result.highlighted, .active-result:focus { } .search-container.search-container__default-scheme { } nav.tab-bar .open-small-search.active:after { } .search-container.search-container__default-scheme .custom-accordion-for-small-screen-link::after { color: #fff; } @media only screen and (max-width: 50em) { #main-content .content__container.journal-info { color: #fff; } #main-content .content__container.journal-info a { color: #fff; } } .button.button--color { } .button.button--color:hover, .button.button--color:focus { } .button.button--color-journal { position: relative; background-color: rgba(74,74,127,0.75); border-color: #fff; color: #fff !important; } .button.button--color-journal:hover::before { content: ''; position: absolute; top: 0; left: 0; height: 100%; width: 100%; background-color: #ffffff; opacity: 0.2; } .button.button--color-journal:visited, .button.button--color-journal:hover, .button.button--color-journal:focus { background-color: rgba(74,74,127,0.75); border-color: #fff; color: #fff !important; } .button.button--color path { } .button.button--color:hover path { fill: #fff; } #main-content #search-refinements .ui-slider-horizontal .ui-slider-range { } .breadcrumb__element:last-of-type a { } #main-header { } #full-size-menu .top-bar, #full-size-menu li.menu-item span.user-email { } .top-bar-section li:not(.has-form) a:not(.button) { } #full-size-menu li.menu-item .dropdown-wrapper li a:hover { } #full-size-menu li.menu-item a:hover, #full-size-menu li.menu.item a:focus, nav.tab-bar a:hover { } #full-size-menu li.menu.item a:active, #full-size-menu li.menu.item a.active { } #full-size-menu li.menu-item a.open-mega-menu.active, #full-size-menu li.menu-item div.mega-menu, a.open-mega-menu.active { } #full-size-menu li.menu-item div.mega-menu li, #full-size-menu li.menu-item div.mega-menu a { border-color: #9a9a9a; } div.type-section h2 { font-size: 20px; line-height: 26px; font-weight: 300; } div.type-section h3 { margin-left: 15px; margin-bottom: 0px; font-weight: 300; } .journal-tabs .tab-title.active a { } </style> <link rel="stylesheet" href="https://pub.mdpi-res.com/assets/css/slick.css?f38b2db10e01b157?1732087095"> <meta name="title" content="Comparing Vision Transformers and Convolutional Neural Networks for Image Classification: A Literature Review"> <meta name="description" content="Transformers are models that implement a mechanism of self-attention, individually weighting the importance of each part of the input data. Their use in image classification tasks is still somewhat limited since researchers have so far chosen Convolutional Neural Networks for image classification and transformers were more targeted to Natural Language Processing (NLP) tasks. Therefore, this paper presents a literature review that shows the differences between Vision Transformers (ViT) and Convolutional Neural Networks. The state of the art that used the two architectures for image classification was reviewed and an attempt was made to understand what factors may influence the performance of the two deep learning architectures based on the datasets used, image size, number of target classes (for the classification problems), hardware, and evaluated architectures and top results. The objective of this work is to identify which of the architectures is the best for image classification and under what conditions. This paper also describes the importance of the Multi-Head Attention mechanism for improving the performance of ViT in image classification." > <link rel="image_src" href="https://pub.mdpi-res.com/img/journals/applsci-logo.png?8600e93ff98dbf14" > <meta name="dc.title" content="Comparing Vision Transformers and Convolutional Neural Networks for Image Classification: A Literature Review"> <meta name="dc.creator" content="José Maurício"> <meta name="dc.creator" content="Inês Domingues"> <meta name="dc.creator" content="Jorge Bernardino"> <meta name="dc.type" content="Review"> <meta name="dc.source" content="Applied Sciences 2023, Vol. 13, Page 5521"> <meta name="dc.date" content="2023-04-28"> <meta name ="dc.identifier" content="10.3390/app13095521"> <meta name="dc.publisher" content="Multidisciplinary Digital Publishing Institute"> <meta name="dc.rights" content="http://creativecommons.org/licenses/by/3.0/"> <meta name="dc.format" content="application/pdf" > <meta name="dc.language" content="en" > <meta name="dc.description" content="Transformers are models that implement a mechanism of self-attention, individually weighting the importance of each part of the input data. Their use in image classification tasks is still somewhat limited since researchers have so far chosen Convolutional Neural Networks for image classification and transformers were more targeted to Natural Language Processing (NLP) tasks. Therefore, this paper presents a literature review that shows the differences between Vision Transformers (ViT) and Convolutional Neural Networks. The state of the art that used the two architectures for image classification was reviewed and an attempt was made to understand what factors may influence the performance of the two deep learning architectures based on the datasets used, image size, number of target classes (for the classification problems), hardware, and evaluated architectures and top results. The objective of this work is to identify which of the architectures is the best for image classification and under what conditions. This paper also describes the importance of the Multi-Head Attention mechanism for improving the performance of ViT in image classification." > <meta name="dc.subject" content="transformers" > <meta name="dc.subject" content="Vision Transformers (ViT)" > <meta name="dc.subject" content="convolutional neural networks" > <meta name="dc.subject" content="multi-head attention" > <meta name="dc.subject" content="image classification" > <meta name ="prism.issn" content="2076-3417"> <meta name ="prism.publicationName" content="Applied Sciences"> <meta name ="prism.publicationDate" content="2023-04-28"> <meta name ="prism.volume" content="13"> <meta name ="prism.number" content="9"> <meta name ="prism.section" content="Review" > <meta name ="prism.startingPage" content="5521" > <meta name="citation_issn" content="2076-3417"> <meta name="citation_journal_title" content="Applied Sciences"> <meta name="citation_publisher" content="Multidisciplinary Digital Publishing Institute"> <meta name="citation_title" content="Comparing Vision Transformers and Convolutional Neural Networks for Image Classification: A Literature Review"> <meta name="citation_publication_date" content="2023/1"> <meta name="citation_online_date" content="2023/04/28"> <meta name="citation_volume" content="13"> <meta name="citation_issue" content="9"> <meta name="citation_firstpage" content="5521"> <meta name="citation_author" content="Maurício, José"> <meta name="citation_author" content="Domingues, Inês"> <meta name="citation_author" content="Bernardino, Jorge"> <meta name="citation_doi" content="10.3390/app13095521"> <meta name="citation_id" content="mdpi-app13095521"> <meta name="citation_abstract_html_url" content="https://www.mdpi.com/2076-3417/13/9/5521"> <meta name="citation_pdf_url" content="https://www.mdpi.com/2076-3417/13/9/5521/pdf?version=1683174100"> <link rel="alternate" type="application/pdf" title="PDF Full-Text" href="https://www.mdpi.com/2076-3417/13/9/5521/pdf?version=1683174100"> <meta name="fulltext_pdf" content="https://www.mdpi.com/2076-3417/13/9/5521/pdf?version=1683174100"> <meta name="citation_fulltext_html_url" content="https://www.mdpi.com/2076-3417/13/9/5521/htm"> <link rel="alternate" type="text/html" title="HTML Full-Text" href="https://www.mdpi.com/2076-3417/13/9/5521/htm"> <meta name="fulltext_html" content="https://www.mdpi.com/2076-3417/13/9/5521/htm"> <link rel="alternate" type="text/xml" title="XML Full-Text" href="https://www.mdpi.com/2076-3417/13/9/5521/xml"> <meta name="fulltext_xml" content="https://www.mdpi.com/2076-3417/13/9/5521/xml"> <meta name="citation_xml_url" content="https://www.mdpi.com/2076-3417/13/9/5521/xml"> <meta name="twitter:card" content="summary" /> <meta name="twitter:site" content="@MDPIOpenAccess" /> <meta name="twitter:image" content="https://pub.mdpi-res.com/img/journals/applsci-logo-social.png?8600e93ff98dbf14" /> <meta property="fb:app_id" content="131189377574"/> <meta property="og:site_name" content="MDPI"/> <meta property="og:type" content="article"/> <meta property="og:url" content="https://www.mdpi.com/2076-3417/13/9/5521" /> <meta property="og:title" content="Comparing Vision Transformers and Convolutional Neural Networks for Image Classification: A Literature Review" /> <meta property="og:description" content="Transformers are models that implement a mechanism of self-attention, individually weighting the importance of each part of the input data. Their use in image classification tasks is still somewhat limited since researchers have so far chosen Convolutional Neural Networks for image classification and transformers were more targeted to Natural Language Processing (NLP) tasks. Therefore, this paper presents a literature review that shows the differences between Vision Transformers (ViT) and Convolutional Neural Networks. The state of the art that used the two architectures for image classification was reviewed and an attempt was made to understand what factors may influence the performance of the two deep learning architectures based on the datasets used, image size, number of target classes (for the classification problems), hardware, and evaluated architectures and top results. The objective of this work is to identify which of the architectures is the best for image classification and under what conditions. This paper also describes the importance of the Multi-Head Attention mechanism for improving the performance of ViT in image classification." /> <meta property="og:image" content="https://pub.mdpi-res.com/applsci/applsci-13-05521/article_deploy/html/images/applsci-13-05521-g001-550.jpg?1683174171" /> <link rel="alternate" type="application/rss+xml" title="MDPI Publishing - Latest articles" href="https://www.mdpi.com/rss"> <meta name="google-site-verification" content="PxTlsg7z2S00aHroktQd57fxygEjMiNHydKn3txhvwY"> <meta name="facebook-domain-verification" content="mcoq8dtq6sb2hf7z29j8w515jjoof7" /> <script id="Cookiebot" data-cfasync="false" src="https://consent.cookiebot.com/uc.js" data-cbid="51491ddd-fe7a-4425-ab39-69c78c55829f" type="text/javascript" async></script> <!--[if lt IE 9]> <script>var browserIe8 = true;</script> <link rel="stylesheet" href="https://pub.mdpi-res.com/assets/css/ie8foundationfix.css?50273beac949cbf0?1732087095"> <script src="//html5shiv.googlecode.com/svn/trunk/html5.js"></script> <script src="//cdnjs.cloudflare.com/ajax/libs/html5shiv/3.6.2/html5shiv.js"></script> <script src="//s3.amazonaws.com/nwapi/nwmatcher/nwmatcher-1.2.5-min.js"></script> <script src="//html5base.googlecode.com/svn-history/r38/trunk/js/selectivizr-1.0.3b.js"></script> <script src="//cdnjs.cloudflare.com/ajax/libs/respond.js/1.1.0/respond.min.js"></script> <script src="https://pub.mdpi-res.com/assets/js/ie8/ie8patch.js?9e1d3c689a0471df?1732087095"></script> <script src="https://pub.mdpi-res.com/assets/js/ie8/rem.min.js?94b62787dcd6d2f2?1732087095"></script> <![endif]--> <script type="text/plain" data-cookieconsent="statistics"> (function(w,d,s,l,i){w[l]=w[l]||[];w[l].push({'gtm.start': new Date().getTime(),event:'gtm.js'});var f=d.getElementsByTagName(s)[0], j=d.createElement(s),dl=l!='dataLayer'?'&l='+l:'';j.async=true;j.src= 'https://www.googletagmanager.com/gtm.js?id='+i+dl;f.parentNode.insertBefore(j,f); })(window,document,'script','dataLayer','GTM-WPK7SW5'); </script> <script type="text/plain" data-cookieconsent="statistics"> _linkedin_partner_id = "2846186"; window._linkedin_data_partner_ids = window._linkedin_data_partner_ids || []; window._linkedin_data_partner_ids.push(_linkedin_partner_id); </script><script type="text/javascript"> (function(){var s = document.getElementsByTagName("script")[0]; var b = document.createElement("script"); b.type = "text/javascript";b.async = true; b.src = "https://snap.licdn.com/li.lms-analytics/insight.min.js"; s.parentNode.insertBefore(b, s);})(); </script> <script type="text/plain" data-cookieconsent="statistics" data-cfasync="false" src="//script.crazyegg.com/pages/scripts/0116/4951.js" async="async" ></script> </head> <body> <div class="direction direction_right" id="small_right" style="border-right-width: 0px; padding:0;"> <i class="fa fa-caret-right fa-2x"></i> </div> <div class="big_direction direction_right" id="big_right" style="border-right-width: 0px;"> <div style="text-align: right;"> Next Article in Journal<br> <div><a href="/2076-3417/13/9/5522">A Numerical Simulation of the Subsidence Reduction Effect of Different Grouting Schemes in Multi-Coal Seam Goafs</a></div> Next Article in Special Issue<br> <div><a href="/2076-3417/13/18/10496">Unsupervised Community Detection Algorithm with Stochastic Competitive Learning Incorporating Local Node Similarity</a></div> </div> </div> <div class="direction" id="small_left" style="border-left-width: 0px"> <i class="fa fa-caret-left fa-2x"></i> </div> <div class="big_direction" id="big_left" style="border-left-width: 0px;"> <div> Previous Article in Journal<br> <div><a href="/2076-3417/13/9/5519">The Influence of Oscillation Parameters on the Formation of Overhead Welding Seams in the Narrow-Gap GMAW Process</a></div> Previous Article in Special Issue<br> <div><a href="/2076-3417/13/7/4503">Integrating Spherical Fuzzy Sets and the Objective Weights Consideration of Risk Factors for Handling Risk-Ranking Issues</a></div> </div> </div> <div style="clear: both;"></div> <div id="menuModal" class="reveal-modal reveal-modal-new reveal-modal-menu" aria-hidden="true" data-reveal role="dialog"> <div class="menu-container"> <div class="UI_NavMenu"> <div class="content__container " > <div class="custom-accordion-for-small-screen-link " > <h2>Journals</h2> </div> <div class="target-item custom-accordion-for-small-screen-content show-for-medium-up"> <div class="menu-container__links"> <div style="width: 100%; float: left;"> <a href="/about/journals">Active Journals</a> <a href="/about/journalfinder">Find a Journal</a> <a href="/about/journals/proposal">Journal Proposal</a> <a href="/about/proceedings">Proceedings Series</a> </div> </div> </div> </div> <a href="/topics"> <h2>Topics</h2> </a> <div class="content__container " > <div class="custom-accordion-for-small-screen-link " > <h2>Information</h2> </div> <div class="target-item custom-accordion-for-small-screen-content show-for-medium-up"> <div class="menu-container__links"> <div style="width: 100%; max-width: 200px; float: left;"> <a href="/authors">For Authors</a> <a href="/reviewers">For Reviewers</a> <a href="/editors">For Editors</a> <a href="/librarians">For Librarians</a> <a href="/publishing_services">For Publishers</a> <a href="/societies">For Societies</a> <a href="/conference_organizers">For Conference Organizers</a> </div> <div style="width: 100%; max-width: 250px; float: left;"> <a href="/openaccess">Open Access Policy</a> <a href="/ioap">Institutional Open Access Program</a> <a href="/special_issues_guidelines">Special Issues Guidelines</a> <a href="/editorial_process">Editorial Process</a> <a href="/ethics">Research and Publication Ethics</a> <a href="/apc">Article Processing Charges</a> <a href="/awards">Awards</a> <a href="/testimonials">Testimonials</a> </div> </div> </div> </div> <a href="/authors/english"> <h2>Editing Services</h2> </a> <div class="content__container " > <div class="custom-accordion-for-small-screen-link " > <h2>Initiatives</h2> </div> <div class="target-item custom-accordion-for-small-screen-content show-for-medium-up"> <div class="menu-container__links"> <div style="width: 100%; float: left;"> <a href="https://sciforum.net" target="_blank" rel="noopener noreferrer">Sciforum</a> <a href="https://www.mdpi.com/books" target="_blank" rel="noopener noreferrer">MDPI Books</a> <a href="https://www.preprints.org" target="_blank" rel="noopener noreferrer">Preprints.org</a> <a href="https://www.scilit.net" target="_blank" rel="noopener noreferrer">Scilit</a> <a href="https://sciprofiles.com" target="_blank" rel="noopener noreferrer">SciProfiles</a> <a href="https://encyclopedia.pub" target="_blank" rel="noopener noreferrer">Encyclopedia</a> <a href="https://jams.pub" target="_blank" rel="noopener noreferrer">JAMS</a> <a href="/about/proceedings">Proceedings Series</a> </div> </div> </div> </div> <div class="content__container " > <div class="custom-accordion-for-small-screen-link " > <h2>About</h2> </div> <div class="target-item custom-accordion-for-small-screen-content show-for-medium-up"> <div class="menu-container__links"> <div style="width: 100%; float: left;"> <a href="/about">Overview</a> <a href="/about/contact">Contact</a> <a href="https://careers.mdpi.com" target="_blank" rel="noopener noreferrer">Careers</a> <a href="/about/announcements">News</a> <a href="/about/press">Press</a> <a href="http://blog.mdpi.com/" target="_blank" rel="noopener noreferrer">Blog</a> </div> </div> </div> </div> </div> <div class="menu-container__buttons"> <a class="button UA_SignInUpButton" href="/user/login">Sign In / Sign Up</a> </div> </div> </div> <div id="captchaModal" class="reveal-modal reveal-modal-new reveal-modal-new--small" data-reveal aria-label="Captcha" aria-hidden="true" role="dialog"></div> <div id="actionDisabledModal" class="reveal-modal" data-reveal aria-labelledby="actionDisableModalTitle" aria-hidden="true" role="dialog" style="width: 300px;"> <h2 id="actionDisableModalTitle">Notice</h2> <form action="/email/captcha" method="post" id="emailCaptchaForm"> <div class="row"> <div id="js-action-disabled-modal-text" class="small-12 columns"> </div> <div id="js-action-disabled-modal-submit" class="small-12 columns" style="margin-top: 10px; display: none;"> You can make submissions to other journals <a href="https://susy.mdpi.com/user/manuscripts/upload">here</a>. </div> </div> </form> <a class="close-reveal-modal" aria-label="Close"> <i class="material-icons">clear</i> </a> </div> <div id="rssNotificationModal" class="reveal-modal reveal-modal-new" data-reveal aria-labelledby="rssNotificationModalTitle" aria-hidden="true" role="dialog"> <div class="row"> <div class="small-12 columns"> <h2 id="rssNotificationModalTitle">Notice</h2> <p> You are accessing a machine-readable page. In order to be human-readable, please install an RSS reader. </p> </div> </div> <div class="row"> <div class="small-12 columns"> <a class="button button--color js-rss-notification-confirm">Continue</a> <a class="button button--grey" onclick="$(this).closest('.reveal-modal').find('.close-reveal-modal').click(); return false;">Cancel</a> </div> </div> <a class="close-reveal-modal" aria-label="Close"> <i class="material-icons">clear</i> </a> </div> <div id="drop-article-label-openaccess" class="f-dropdown medium" data-dropdown-content aria-hidden="true" tabindex="-1"> <p> All articles published by MDPI are made immediately available worldwide under an open access license. No special permission is required to reuse all or part of the article published by MDPI, including figures and tables. For articles published under an open access Creative Common CC BY license, any part of the article may be reused without permission provided that the original article is clearly cited. For more information, please refer to <a href="https://www.mdpi.com/openaccess">https://www.mdpi.com/openaccess</a>. </p> </div> <div id="drop-article-label-feature" class="f-dropdown medium" data-dropdown-content aria-hidden="true" tabindex="-1"> <p> Feature papers represent the most advanced research with significant potential for high impact in the field. A Feature Paper should be a substantial original Article that involves several techniques or approaches, provides an outlook for future research directions and describes possible research applications. </p> <p> Feature papers are submitted upon individual invitation or recommendation by the scientific editors and must receive positive feedback from the reviewers. </p> </div> <div id="drop-article-label-choice" class="f-dropdown medium" data-dropdown-content aria-hidden="true" tabindex="-1"> <p> Editor’s Choice articles are based on recommendations by the scientific editors of MDPI journals from around the world. Editors select a small number of articles recently published in the journal that they believe will be particularly interesting to readers, or important in the respective research area. The aim is to provide a snapshot of some of the most exciting work published in the various research areas of the journal. <div style="margin-top: -10px;"> <div id="drop-article-label-choice-journal-link" style="display: none; margin-top: -10px; padding-top: 10px;"> </div> </div> </p> </div> <div id="drop-article-label-resubmission" class="f-dropdown medium" data-dropdown-content aria-hidden="true" tabindex="-1"> <p> Original Submission Date Received: <span id="drop-article-label-resubmission-date"></span>. </p> </div> <div id="container"> <noscript> <div id="no-javascript"> You seem to have javascript disabled. Please note that many of the page functionalities won't work as expected without javascript enabled. </div> </noscript> <div class="fixed"> <nav class="tab-bar show-for-medium-down"> <div class="row full-width collapse"> <div class="medium-3 small-4 columns"> <a href="/"> <img class="full-size-menu__mdpi-logo" src="https://pub.mdpi-res.com/img/design/mdpi-pub-logo-black-small1.svg?da3a8dcae975a41c?1732087095" style="width: 64px;" title="MDPI Open Access Journals"> </a> </div> <div class="medium-3 small-4 columns right-aligned"> <div class="show-for-medium-down"> <a href="#" style="display: none;"> <i class="material-icons" onclick="$('#menuModal').foundation('reveal', 'close'); return false;">clear</i> </a> <a class="js-toggle-desktop-layout-link" title="Toggle desktop layout" style="display: none;" href="/toggle_desktop_layout_cookie"> <i class="material-icons">zoom_out_map</i> </a> <a href="#" class="js-open-small-search open-small-search"> <i class="material-icons show-for-small only">search</i> </a> <a title="MDPI main page" class="js-open-menu" data-reveal-id="menuModal" href="#"> <i class="material-icons">menu</i> </a> </div> </div> </div> </nav> </div> <section class="main-section"> <header> <div class="full-size-menu show-for-large-up"> <div class="row full-width"> <div class="large-1 columns"> <a href="/"> <img class="full-size-menu__mdpi-logo" src="https://pub.mdpi-res.com/img/design/mdpi-pub-logo-black-small1.svg?da3a8dcae975a41c?1732087095" title="MDPI Open Access Journals"> </a> </div> <div class="large-8 columns text-right UI_NavMenu"> <ul> <li class="menu-item"> <a href="/about/journals" data-dropdown="journals-dropdown" aria-controls="journals-dropdown" aria-expanded="false" data-options="is_hover: true; hover_timeout: 200">Journals</a> <ul id="journals-dropdown" class="f-dropdown dropdown-wrapper dropdown-wrapper__small" data-dropdown-content aria-hidden="true" tabindex="-1"> <li> <div class="row"> <div class="small-12 columns"> <ul> <li> <a href="/about/journals"> Active Journals </a> </li> <li> <a href="/about/journalfinder"> Find a Journal </a> </li> <li> <a href="/about/journals/proposal"> Journal Proposal </a> </li> <li> <a href="/about/proceedings"> Proceedings Series </a> </li> </ul> </div> </div> </li> </ul> </li> <li class="menu-item"> <a href="/topics">Topics</a> </li> <li class="menu-item"> <a href="/authors" data-dropdown="information-dropdown" aria-controls="information-dropdown" aria-expanded="false" data-options="is_hover:true; hover_timeout:200">Information</a> <ul id="information-dropdown" class="f-dropdown dropdown-wrapper" data-dropdown-content aria-hidden="true" tabindex="-1"> <li> <div class="row"> <div class="small-5 columns right-border"> <ul> <li> <a href="/authors">For Authors</a> </li> <li> <a href="/reviewers">For Reviewers</a> </li> <li> <a href="/editors">For Editors</a> </li> <li> <a href="/librarians">For Librarians</a> </li> <li> <a href="/publishing_services">For Publishers</a> </li> <li> <a href="/societies">For Societies</a> </li> <li> <a href="/conference_organizers">For Conference Organizers</a> </li> </ul> </div> <div class="small-7 columns"> <ul> <li> <a href="/openaccess">Open Access Policy</a> </li> <li> <a href="/ioap">Institutional Open Access Program</a> </li> <li> <a href="/special_issues_guidelines">Special Issues Guidelines</a> </li> <li> <a href="/editorial_process">Editorial Process</a> </li> <li> <a href="/ethics">Research and Publication Ethics</a> </li> <li> <a href="/apc">Article Processing Charges</a> </li> <li> <a href="/awards">Awards</a> </li> <li> <a href="/testimonials">Testimonials</a> </li> </ul> </div> </div> </li> </ul> </li> <li class="menu-item"> <a href="/authors/english">Editing Services</a> </li> <li class="menu-item"> <a href="/about/initiatives" data-dropdown="initiatives-dropdown" aria-controls="initiatives-dropdown" aria-expanded="false" data-options="is_hover: true; hover_timeout: 200">Initiatives</a> <ul id="initiatives-dropdown" class="f-dropdown dropdown-wrapper dropdown-wrapper__small" data-dropdown-content aria-hidden="true" tabindex="-1"> <li> <div class="row"> <div class="small-12 columns"> <ul> <li> <a href="https://sciforum.net" target="_blank" rel="noopener noreferrer"> Sciforum </a> </li> <li> <a href="https://www.mdpi.com/books" target="_blank" rel="noopener noreferrer"> MDPI Books </a> </li> <li> <a href="https://www.preprints.org" target="_blank" rel="noopener noreferrer"> Preprints.org </a> </li> <li> <a href="https://www.scilit.net" target="_blank" rel="noopener noreferrer"> Scilit </a> </li> <li> <a href="https://sciprofiles.com" target="_blank" rel="noopener noreferrer"> SciProfiles </a> </li> <li> <a href="https://encyclopedia.pub" target="_blank" rel="noopener noreferrer"> Encyclopedia </a> </li> <li> <a href="https://jams.pub" target="_blank" rel="noopener noreferrer"> JAMS </a> </li> <li> <a href="/about/proceedings"> Proceedings Series </a> </li> </ul> </div> </div> </li> </ul> </li> <li class="menu-item"> <a href="/about" data-dropdown="about-dropdown" aria-controls="about-dropdown" aria-expanded="false" data-options="is_hover: true; hover_timeout: 200">About</a> <ul id="about-dropdown" class="f-dropdown dropdown-wrapper dropdown-wrapper__small" data-dropdown-content aria-hidden="true" tabindex="-1"> <li> <div class="row"> <div class="small-12 columns"> <ul> <li> <a href="/about"> Overview </a> </li> <li> <a href="/about/contact"> Contact </a> </li> <li> <a href="https://careers.mdpi.com" target="_blank" rel="noopener noreferrer"> Careers </a> </li> <li> <a href="/about/announcements"> News </a> </li> <li> <a href="/about/press"> Press </a> </li> <li> <a href="http://blog.mdpi.com/" target="_blank" rel="noopener noreferrer"> Blog </a> </li> </ul> </div> </div> </li> </ul> </li> </ul> </div> <div class="large-3 columns text-right full-size-menu__buttons"> <div> <a class="button button--default-inversed UA_SignInUpButton" href="/user/login">Sign In / Sign Up</a> <a class="button button--default js-journal-active-only-link js-journal-active-only-submit-link UC_NavSubmitButton" href=" https://susy.mdpi.com/user/manuscripts/upload?journal=applsci " data-disabledmessage="new submissions are not possible.">Submit</a> </div> </div> </div> </div> <div class="header-divider">&nbsp;</div> <div class="search-container hide-for-small-down row search-container__homepage-scheme"> <form id="basic_search" style="background-color: inherit !important;" class="large-12 medium-12 columns " action="/search" method="get"> <div class="row search-container__main-elements"> <div class="large-2 medium-2 small-12 columns text-right1 small-only-text-left"> <div class="show-for-medium-up"> <div class="search-input-label">&nbsp;</div> </div> <span class="search-container__title">Search<span class="hide-for-medium"> for Articles</span><span class="hide-for-small">:</span></span> </div> <div class="custom-accordion-for-small-screen-content"> <div class="large-2 medium-2 small-6 columns "> <div class=""> <div class="search-input-label">Title / Keyword</div> </div> <input type="text" placeholder="Title / Keyword" id="q" tabindex="1" name="q" value="" /> </div> <div class="large-2 medium-2 small-6 columns "> <div class=""> <div class="search-input-label">Author / Affiliation / Email</div> </div> <input type="text" id="authors" placeholder="Author / Affiliation / Email" tabindex="2" name="authors" value="" /> </div> <div class="large-2 medium-2 small-6 columns "> <div class=""> <div class="search-input-label">Journal</div> </div> <select id="journal" tabindex="3" name="journal" class="chosen-select"> <option value="">All Journals</option> <option value="acoustics" > Acoustics </option> <option value="amh" > Acta Microbiologica Hellenica (AMH) </option> <option value="actuators" > Actuators </option> <option value="admsci" > Administrative Sciences </option> <option value="adolescents" > Adolescents </option> <option value="arm" > Advances in Respiratory Medicine (ARM) </option> <option value="aerobiology" > Aerobiology </option> <option value="aerospace" > Aerospace </option> <option value="agriculture" > Agriculture </option> <option value="agriengineering" > AgriEngineering </option> <option value="agrochemicals" > Agrochemicals </option> <option value="agronomy" > Agronomy </option> <option value="ai" > AI </option> <option value="air" > Air </option> <option value="algorithms" > Algorithms </option> <option value="allergies" > Allergies </option> <option value="alloys" > Alloys </option> <option value="analytica" > Analytica </option> <option value="analytics" > Analytics </option> <option value="anatomia" > Anatomia </option> <option value="anesthres" > Anesthesia Research </option> <option value="animals" > Animals </option> <option value="antibiotics" > Antibiotics </option> <option value="antibodies" > Antibodies </option> <option value="antioxidants" > Antioxidants </option> <option value="applbiosci" > Applied Biosciences </option> <option value="applmech" > Applied Mechanics </option> <option value="applmicrobiol" > Applied Microbiology </option> <option value="applnano" > Applied Nano </option> <option value="applsci" selected='selected'> Applied Sciences </option> <option value="asi" > Applied System Innovation (ASI) </option> <option value="appliedchem" > AppliedChem </option> <option value="appliedmath" > AppliedMath </option> <option value="aquacj" > Aquaculture Journal </option> <option value="architecture" > Architecture </option> <option value="arthropoda" > Arthropoda </option> <option value="arts" > Arts </option> <option value="astronomy" > Astronomy </option> <option value="atmosphere" > Atmosphere </option> <option value="atoms" > Atoms </option> <option value="audiolres" > Audiology Research </option> <option value="automation" > Automation </option> <option value="axioms" > Axioms </option> <option value="bacteria" > Bacteria </option> <option value="batteries" > Batteries </option> <option value="behavsci" > Behavioral Sciences </option> <option value="beverages" > Beverages </option> <option value="BDCC" > Big Data and Cognitive Computing (BDCC) </option> <option value="biochem" > BioChem </option> <option value="bioengineering" > Bioengineering </option> <option value="biologics" > Biologics </option> <option value="biology" > Biology </option> <option value="blsf" > Biology and Life Sciences Forum </option> <option value="biomass" > Biomass </option> <option value="biomechanics" > Biomechanics </option> <option value="biomed" > BioMed </option> <option value="biomedicines" > Biomedicines </option> <option value="biomedinformatics" > BioMedInformatics </option> <option value="biomimetics" > Biomimetics </option> <option value="biomolecules" > Biomolecules </option> <option value="biophysica" > Biophysica </option> <option value="biosensors" > Biosensors </option> <option value="biotech" > BioTech </option> <option value="birds" > Birds </option> <option value="blockchains" > Blockchains </option> <option value="brainsci" > Brain Sciences </option> <option value="buildings" > Buildings </option> <option value="businesses" > Businesses </option> <option value="carbon" > C </option> <option value="cancers" > Cancers </option> <option value="cardiogenetics" > Cardiogenetics </option> <option value="catalysts" > Catalysts </option> <option value="cells" > Cells </option> <option value="ceramics" > Ceramics </option> <option value="challenges" > Challenges </option> <option value="ChemEngineering" > ChemEngineering </option> <option value="chemistry" > Chemistry </option> <option value="chemproc" > Chemistry Proceedings </option> <option value="chemosensors" > Chemosensors </option> <option value="children" > Children </option> <option value="chips" > Chips </option> <option value="civileng" > CivilEng </option> <option value="cleantechnol" > Clean Technologies (Clean Technol.) </option> <option value="climate" > Climate </option> <option value="ctn" > Clinical and Translational Neuroscience (CTN) </option> <option value="clinbioenerg" > Clinical Bioenergetics </option> <option value="clinpract" > Clinics and Practice </option> <option value="clockssleep" > Clocks &amp; Sleep </option> <option value="coasts" > Coasts </option> <option value="coatings" > Coatings </option> <option value="colloids" > Colloids and Interfaces </option> <option value="colorants" > Colorants </option> <option value="commodities" > Commodities </option> <option value="complications" > Complications </option> <option value="compounds" > Compounds </option> <option value="computation" > Computation </option> <option value="csmf" > Computer Sciences &amp; Mathematics Forum </option> <option value="computers" > Computers </option> <option value="condensedmatter" > Condensed Matter </option> <option value="conservation" > Conservation </option> <option value="constrmater" > Construction Materials </option> <option value="cmd" > Corrosion and Materials Degradation (CMD) </option> <option value="cosmetics" > Cosmetics </option> <option value="covid" > COVID </option> <option value="crops" > Crops </option> <option value="cryo" > Cryo </option> <option value="cryptography" > Cryptography </option> <option value="crystals" > Crystals </option> <option value="cimb" > Current Issues in Molecular Biology (CIMB) </option> <option value="curroncol" > Current Oncology </option> <option value="dairy" > Dairy </option> <option value="data" > Data </option> <option value="dentistry" > Dentistry Journal </option> <option value="dermato" > Dermato </option> <option value="dermatopathology" > Dermatopathology </option> <option value="designs" > Designs </option> <option value="diabetology" > Diabetology </option> <option value="diagnostics" > Diagnostics </option> <option value="dietetics" > Dietetics </option> <option value="digital" > Digital </option> <option value="disabilities" > Disabilities </option> <option value="diseases" > Diseases </option> <option value="diversity" > Diversity </option> <option value="dna" > DNA </option> <option value="drones" > Drones </option> <option value="ddc" > Drugs and Drug Candidates (DDC) </option> <option value="dynamics" > Dynamics </option> <option value="earth" > Earth </option> <option value="ecologies" > Ecologies </option> <option value="econometrics" > Econometrics </option> <option value="economies" > Economies </option> <option value="education" > Education Sciences </option> <option value="electricity" > Electricity </option> <option value="electrochem" > Electrochem </option> <option value="electronicmat" > Electronic Materials </option> <option value="electronics" > Electronics </option> <option value="ecm" > Emergency Care and Medicine </option> <option value="encyclopedia" > Encyclopedia </option> <option value="endocrines" > Endocrines </option> <option value="energies" > Energies </option> <option value="esa" > Energy Storage and Applications (ESA) </option> <option value="eng" > Eng </option> <option value="engproc" > Engineering Proceedings </option> <option value="entropy" > Entropy </option> <option value="environsciproc" > Environmental Sciences Proceedings </option> <option value="environments" > Environments </option> <option value="epidemiologia" > Epidemiologia </option> <option value="epigenomes" > Epigenomes </option> <option value="ebj" > European Burn Journal (EBJ) </option> <option value="ejihpe" > European Journal of Investigation in Health, Psychology and Education (EJIHPE) </option> <option value="fermentation" > Fermentation </option> <option value="fibers" > Fibers </option> <option value="fintech" > FinTech </option> <option value="fire" > Fire </option> <option value="fishes" > Fishes </option> <option value="fluids" > Fluids </option> <option value="foods" > Foods </option> <option value="forecasting" > Forecasting </option> <option value="forensicsci" > Forensic Sciences </option> <option value="forests" > Forests </option> <option value="fossstud" > Fossil Studies </option> <option value="foundations" > Foundations </option> <option value="fractalfract" > Fractal and Fractional (Fractal Fract) </option> <option value="fuels" > Fuels </option> <option value="future" > Future </option> <option value="futureinternet" > Future Internet </option> <option value="futurepharmacol" > Future Pharmacology </option> <option value="futuretransp" > Future Transportation </option> <option value="galaxies" > Galaxies </option> <option value="games" > Games </option> <option value="gases" > Gases </option> <option value="gastroent" > Gastroenterology Insights </option> <option value="gastrointestdisord" > Gastrointestinal Disorders </option> <option value="gastronomy" > Gastronomy </option> <option value="gels" > Gels </option> <option value="genealogy" > Genealogy </option> <option value="genes" > Genes </option> <option value="geographies" > Geographies </option> <option value="geohazards" > GeoHazards </option> <option value="geomatics" > Geomatics </option> <option value="geometry" > Geometry </option> <option value="geosciences" > Geosciences </option> <option value="geotechnics" > Geotechnics </option> <option value="geriatrics" > Geriatrics </option> <option value="glacies" > Glacies </option> <option value="gucdd" > Gout, Urate, and Crystal Deposition Disease (GUCDD) </option> <option value="grasses" > Grasses </option> <option value="hardware" > Hardware </option> <option value="healthcare" > Healthcare </option> <option value="hearts" > Hearts </option> <option value="hemato" > Hemato </option> <option value="hematolrep" > Hematology Reports </option> <option value="heritage" > Heritage </option> <option value="histories" > Histories </option> <option value="horticulturae" > Horticulturae </option> <option value="hospitals" > Hospitals </option> <option value="humanities" > Humanities </option> <option value="humans" > Humans </option> <option value="hydrobiology" > Hydrobiology </option> <option value="hydrogen" > Hydrogen </option> <option value="hydrology" > Hydrology </option> <option value="hygiene" > Hygiene </option> <option value="immuno" > Immuno </option> <option value="idr" > Infectious Disease Reports </option> <option value="informatics" > Informatics </option> <option value="information" > Information </option> <option value="infrastructures" > Infrastructures </option> <option value="inorganics" > Inorganics </option> <option value="insects" > Insects </option> <option value="instruments" > Instruments </option> <option value="iic" > Intelligent Infrastructure and Construction </option> <option value="ijerph" > International Journal of Environmental Research and Public Health (IJERPH) </option> <option value="ijfs" > International Journal of Financial Studies (IJFS) </option> <option value="ijms" > International Journal of Molecular Sciences (IJMS) </option> <option value="IJNS" > International Journal of Neonatal Screening (IJNS) </option> <option value="ijpb" > International Journal of Plant Biology (IJPB) </option> <option value="ijt" > International Journal of Topology </option> <option value="ijtm" > International Journal of Translational Medicine (IJTM) </option> <option value="ijtpp" > International Journal of Turbomachinery, Propulsion and Power (IJTPP) </option> <option value="ime" > International Medical Education (IME) </option> <option value="inventions" > Inventions </option> <option value="IoT" > IoT </option> <option value="ijgi" > ISPRS International Journal of Geo-Information (IJGI) </option> <option value="J" > J </option> <option value="jal" > Journal of Ageing and Longevity (JAL) </option> <option value="jcdd" > Journal of Cardiovascular Development and Disease (JCDD) </option> <option value="jcto" > Journal of Clinical &amp; Translational Ophthalmology (JCTO) </option> <option value="jcm" > Journal of Clinical Medicine (JCM) </option> <option value="jcs" > Journal of Composites Science (J. Compos. Sci.) </option> <option value="jcp" > Journal of Cybersecurity and Privacy (JCP) </option> <option value="jdad" > Journal of Dementia and Alzheimer&#039;s Disease (JDAD) </option> <option value="jdb" > Journal of Developmental Biology (JDB) </option> <option value="jeta" > Journal of Experimental and Theoretical Analyses (JETA) </option> <option value="jfb" > Journal of Functional Biomaterials (JFB) </option> <option value="jfmk" > Journal of Functional Morphology and Kinesiology (JFMK) </option> <option value="jof" > Journal of Fungi (JoF) </option> <option value="jimaging" > Journal of Imaging (J. Imaging) </option> <option value="jintelligence" > Journal of Intelligence (J. Intell.) </option> <option value="jlpea" > Journal of Low Power Electronics and Applications (JLPEA) </option> <option value="jmmp" > Journal of Manufacturing and Materials Processing (JMMP) </option> <option value="jmse" > Journal of Marine Science and Engineering (JMSE) </option> <option value="jmahp" > Journal of Market Access &amp; Health Policy (JMAHP) </option> <option value="jmp" > Journal of Molecular Pathology (JMP) </option> <option value="jnt" > Journal of Nanotheranostics (JNT) </option> <option value="jne" > Journal of Nuclear Engineering (JNE) </option> <option value="ohbm" > Journal of Otorhinolaryngology, Hearing and Balance Medicine (JOHBM) </option> <option value="jop" > Journal of Parks </option> <option value="jpm" > Journal of Personalized Medicine (JPM) </option> <option value="jpbi" > Journal of Pharmaceutical and BioTech Industry (JPBI) </option> <option value="jor" > Journal of Respiration (JoR) </option> <option value="jrfm" > Journal of Risk and Financial Management (JRFM) </option> <option value="jsan" > Journal of Sensor and Actuator Networks (JSAN) </option> <option value="joma" > Journal of the Oman Medical Association (JOMA) </option> <option value="jtaer" > Journal of Theoretical and Applied Electronic Commerce Research (JTAER) </option> <option value="jvd" > Journal of Vascular Diseases (JVD) </option> <option value="jox" > Journal of Xenobiotics (JoX) </option> <option value="jzbg" > Journal of Zoological and Botanical Gardens (JZBG) </option> <option value="journalmedia" > Journalism and Media </option> <option value="kidneydial" > Kidney and Dialysis </option> <option value="kinasesphosphatases" > Kinases and Phosphatases </option> <option value="knowledge" > Knowledge </option> <option value="labmed" > LabMed </option> <option value="laboratories" > Laboratories </option> <option value="land" > Land </option> <option value="languages" > Languages </option> <option value="laws" > Laws </option> <option value="life" > Life </option> <option value="limnolrev" > Limnological Review </option> <option value="lipidology" > Lipidology </option> <option value="liquids" > Liquids </option> <option value="literature" > Literature </option> <option value="livers" > Livers </option> <option value="logics" > Logics </option> <option value="logistics" > Logistics </option> <option value="lubricants" > Lubricants </option> <option value="lymphatics" > Lymphatics </option> <option value="make" > Machine Learning and Knowledge Extraction (MAKE) </option> <option value="machines" > Machines </option> <option value="macromol" > Macromol </option> <option value="magnetism" > Magnetism </option> <option value="magnetochemistry" > Magnetochemistry </option> <option value="marinedrugs" > Marine Drugs </option> <option value="materials" > Materials </option> <option value="materproc" > Materials Proceedings </option> <option value="mca" > Mathematical and Computational Applications (MCA) </option> <option value="mathematics" > Mathematics </option> <option value="medsci" > Medical Sciences </option> <option value="msf" > Medical Sciences Forum </option> <option value="medicina" > Medicina </option> <option value="medicines" > Medicines </option> <option value="membranes" > Membranes </option> <option value="merits" > Merits </option> <option value="metabolites" > Metabolites </option> <option value="metals" > Metals </option> <option value="meteorology" > Meteorology </option> <option value="methane" > Methane </option> <option value="mps" > Methods and Protocols (MPs) </option> <option value="metrics" > Metrics </option> <option value="metrology" > Metrology </option> <option value="micro" > Micro </option> <option value="microbiolres" > Microbiology Research </option> <option value="micromachines" > Micromachines </option> <option value="microorganisms" > Microorganisms </option> <option value="microplastics" > Microplastics </option> <option value="minerals" > Minerals </option> <option value="mining" > Mining </option> <option value="modelling" > Modelling </option> <option value="mmphys" > Modern Mathematical Physics </option> <option value="molbank" > Molbank </option> <option value="molecules" > Molecules </option> <option value="mti" > Multimodal Technologies and Interaction (MTI) </option> <option value="muscles" > Muscles </option> <option value="nanoenergyadv" > Nanoenergy Advances </option> <option value="nanomanufacturing" > Nanomanufacturing </option> <option value="nanomaterials" > Nanomaterials </option> <option value="ndt" > NDT </option> <option value="network" > Network </option> <option value="neuroglia" > Neuroglia </option> <option value="neurolint" > Neurology International </option> <option value="neurosci" > NeuroSci </option> <option value="nitrogen" > Nitrogen </option> <option value="ncrna" > Non-Coding RNA (ncRNA) </option> <option value="nursrep" > Nursing Reports </option> <option value="nutraceuticals" > Nutraceuticals </option> <option value="nutrients" > Nutrients </option> <option value="obesities" > Obesities </option> <option value="oceans" > Oceans </option> <option value="onco" > Onco </option> <option value="optics" > Optics </option> <option value="oral" > Oral </option> <option value="organics" > Organics </option> <option value="organoids" > Organoids </option> <option value="osteology" > Osteology </option> <option value="oxygen" > Oxygen </option> <option value="parasitologia" > Parasitologia </option> <option value="particles" > Particles </option> <option value="pathogens" > Pathogens </option> <option value="pathophysiology" > Pathophysiology </option> <option value="pediatrrep" > Pediatric Reports </option> <option value="pets" > Pets </option> <option value="pharmaceuticals" > Pharmaceuticals </option> <option value="pharmaceutics" > Pharmaceutics </option> <option value="pharmacoepidemiology" > Pharmacoepidemiology </option> <option value="pharmacy" > Pharmacy </option> <option value="philosophies" > Philosophies </option> <option value="photochem" > Photochem </option> <option value="photonics" > Photonics </option> <option value="phycology" > Phycology </option> <option value="physchem" > Physchem </option> <option value="psf" > Physical Sciences Forum </option> <option value="physics" > Physics </option> <option value="physiologia" > Physiologia </option> <option value="plants" > Plants </option> <option value="plasma" > Plasma </option> <option value="platforms" > Platforms </option> <option value="pollutants" > Pollutants </option> <option value="polymers" > Polymers </option> <option value="polysaccharides" > Polysaccharides </option> <option value="populations" > Populations </option> <option value="poultry" > Poultry </option> <option value="powders" > Powders </option> <option value="proceedings" > Proceedings </option> <option value="processes" > Processes </option> <option value="prosthesis" > Prosthesis </option> <option value="proteomes" > Proteomes </option> <option value="psychiatryint" > Psychiatry International </option> <option value="psychoactives" > Psychoactives </option> <option value="psycholint" > Psychology International </option> <option value="publications" > Publications </option> <option value="qubs" > Quantum Beam Science (QuBS) </option> <option value="quantumrep" > Quantum Reports </option> <option value="quaternary" > Quaternary </option> <option value="radiation" > Radiation </option> <option value="reactions" > Reactions </option> <option value="realestate" > Real Estate </option> <option value="receptors" > Receptors </option> <option value="recycling" > Recycling </option> <option value="rsee" > Regional Science and Environmental Economics (RSEE) </option> <option value="religions" > Religions </option> <option value="remotesensing" > Remote Sensing </option> <option value="reports" > Reports </option> <option value="reprodmed" > Reproductive Medicine (Reprod. Med.) </option> <option value="resources" > Resources </option> <option value="rheumato" > Rheumato </option> <option value="risks" > Risks </option> <option value="robotics" > Robotics </option> <option value="ruminants" > Ruminants </option> <option value="safety" > Safety </option> <option value="sci" > Sci </option> <option value="scipharm" > Scientia Pharmaceutica (Sci. Pharm.) </option> <option value="sclerosis" > Sclerosis </option> <option value="seeds" > Seeds </option> <option value="sensors" > Sensors </option> <option value="separations" > Separations </option> <option value="sexes" > Sexes </option> <option value="signals" > Signals </option> <option value="sinusitis" > Sinusitis </option> <option value="smartcities" > Smart Cities </option> <option value="socsci" > Social Sciences </option> <option value="siuj" > Société Internationale d’Urologie Journal (SIUJ) </option> <option value="societies" > Societies </option> <option value="software" > Software </option> <option value="soilsystems" > Soil Systems </option> <option value="solar" > Solar </option> <option value="solids" > Solids </option> <option value="spectroscj" > Spectroscopy Journal </option> <option value="sports" > Sports </option> <option value="standards" > Standards </option> <option value="stats" > Stats </option> <option value="stresses" > Stresses </option> <option value="surfaces" > Surfaces </option> <option value="surgeries" > Surgeries </option> <option value="std" > Surgical Techniques Development </option> <option value="sustainability" > Sustainability </option> <option value="suschem" > Sustainable Chemistry </option> <option value="symmetry" > Symmetry </option> <option value="synbio" > SynBio </option> <option value="systems" > Systems </option> <option value="targets" > Targets </option> <option value="taxonomy" > Taxonomy </option> <option value="technologies" > Technologies </option> <option value="telecom" > Telecom </option> <option value="textiles" > Textiles </option> <option value="thalassrep" > Thalassemia Reports </option> <option value="therapeutics" > Therapeutics </option> <option value="thermo" > Thermo </option> <option value="timespace" > Time and Space </option> <option value="tomography" > Tomography </option> <option value="tourismhosp" > Tourism and Hospitality </option> <option value="toxics" > Toxics </option> <option value="toxins" > Toxins </option> <option value="transplantology" > Transplantology </option> <option value="traumacare" > Trauma Care </option> <option value="higheredu" > Trends in Higher Education </option> <option value="tropicalmed" > Tropical Medicine and Infectious Disease (TropicalMed) </option> <option value="universe" > Universe </option> <option value="urbansci" > Urban Science </option> <option value="uro" > Uro </option> <option value="vaccines" > Vaccines </option> <option value="vehicles" > Vehicles </option> <option value="venereology" > Venereology </option> <option value="vetsci" > Veterinary Sciences </option> <option value="vibration" > Vibration </option> <option value="virtualworlds" > Virtual Worlds </option> <option value="viruses" > Viruses </option> <option value="vision" > Vision </option> <option value="waste" > Waste </option> <option value="water" > Water </option> <option value="wild" > Wild </option> <option value="wind" > Wind </option> <option value="women" > Women </option> <option value="world" > World </option> <option value="wevj" > World Electric Vehicle Journal (WEVJ) </option> <option value="youth" > Youth </option> <option value="zoonoticdis" > Zoonotic Diseases </option> </select> </div> <div class="large-2 medium-2 small-6 columns "> <div class=""> <div class="search-input-label">Article Type</div> </div> <select id="article_type" tabindex="4" name="article_type" class="chosen-select"> <option value="">All Article Types</option> <option value="research-article">Article</option> <option value="review-article">Review</option> <option value="rapid-communication">Communication</option> <option value="editorial">Editorial</option> <option value="abstract">Abstract</option> <option value="book-review">Book Review</option> <option value="brief-communication">Brief Communication</option> <option value="brief-report">Brief Report</option> <option value="case-report">Case Report</option> <option value="clinicopathological-challenge">Clinicopathological Challenge</option> <option value="article-commentary">Comment</option> <option value="commentary">Commentary</option> <option value="concept-paper">Concept Paper</option> <option value="conference-report">Conference Report</option> <option value="correction">Correction</option> <option value="creative">Creative</option> <option value="data-descriptor">Data Descriptor</option> <option value="discussion">Discussion</option> <option value="Entry">Entry</option> <option value="essay">Essay</option> <option value="expression-of-concern">Expression of Concern</option> <option value="extended-abstract">Extended Abstract</option> <option value="field-guide">Field Guide</option> <option value="guidelines">Guidelines</option> <option value="hypothesis">Hypothesis</option> <option value="interesting-image">Interesting Images</option> <option value="letter">Letter</option> <option value="books-received">New Book Received</option> <option value="obituary">Obituary</option> <option value="opinion">Opinion</option> <option value="perspective">Perspective</option> <option value="proceedings">Proceeding Paper</option> <option value="project-report">Project Report</option> <option value="protocol">Protocol</option> <option value="registered-report">Registered Report</option> <option value="reply">Reply</option> <option value="retraction">Retraction</option> <option value="note">Short Note</option> <option value="study-protocol">Study Protocol</option> <option value="systematic_review">Systematic Review</option> <option value="technical-note">Technical Note</option> <option value="tutorial">Tutorial</option> <option value="viewpoint">Viewpoint</option> </select> </div> <div class="large-1 medium-1 small-6 end columns small-push-6 medium-reset-order large-reset-order js-search-collapsed-button-container"> <div class="search-input-label">&nbsp;</div> <input type="submit" id="search" value="Search" class="button button--dark button--full-width searchButton1 US_SearchButton" tabindex="12"> </div> <div class="large-1 medium-1 small-6 end columns large-text-left small-only-text-center small-pull-6 medium-reset-order large-reset-order js-search-collapsed-link-container"> <div class="search-input-label">&nbsp;</div> <a class="main-search-clear search-container__link" href="#" onclick="openAdvanced(''); return false;">Advanced<span class="show-for-small-only"> Search</span></a> </div> </div> </div> <div class="search-container__advanced" style="margin-top: 0; padding-top: 0px; background-color: inherit; color: inherit;"> <div class="row"> <div class="large-2 medium-2 columns show-for-medium-up">&nbsp;</div> <div class="large-2 medium-2 small-6 columns "> <div class=""> <div class="search-input-label">Section</div> </div> <select id="section" tabindex="5" name="section" class="chosen-select"> <option value=""></option> </select> </div> <div class="large-2 medium-2 small-6 columns "> <div class=""> <div class="search-input-label">Special Issue</div> </div> <select id="special_issue" tabindex="6" name="special_issue" class="chosen-select"> <option value=""></option> </select> </div> <div class="large-1 medium-1 small-6 end columns "> <div class="search-input-label">Volume</div> <input type="text" id="volume" tabindex="7" name="volume" placeholder="..." value="13" /> </div> <div class="large-1 medium-1 small-6 end columns "> <div class="search-input-label">Issue</div> <input type="text" id="issue" tabindex="8" name="issue" placeholder="..." value="9" /> </div> <div class="large-1 medium-1 small-6 end columns "> <div class="search-input-label">Number</div> <input type="text" id="number" tabindex="9" name="number" placeholder="..." value="" /> </div> <div class="large-1 medium-1 small-6 end columns "> <div class="search-input-label">Page</div> <input type="text" id="page" tabindex="10" name="page" placeholder="..." value="" /> </div> <div class="large-1 medium-1 small-6 columns small-push-6 medium-reset order large-reset-order medium-reset-order js-search-expanded-button-container"></div> <div class="large-1 medium-1 small-6 columns large-text-left small-only-text-center small-pull-6 medium-reset-order large-reset-order js-search-expanded-link-container"></div> </div> </div> </form> <form id="advanced-search" class="large-12 medium-12 columns"> <div class="search-container__advanced"> <div id="advanced-search-template" class="row advanced-search-row"> <div class="large-2 medium-2 small-12 columns show-for-medium-up">&nbsp;</div> <div class="large-2 medium-2 small-3 columns connector-div"> <div class="search-input-label"><span class="show-for-medium-up">Logical Operator</span><span class="show-for-small">Operator</span></div> <select class="connector"> <option value="and">AND</option> <option value="or">OR</option> </select> </div> <div class="large-3 medium-3 small-6 columns search-text-div"> <div class="search-input-label">Search Text</div> <input type="text" class="search-text" placeholder="Search text"> </div> <div class="large-2 medium-2 small-6 large-offset-0 medium-offset-0 small-offset-3 columns search-field-div"> <div class="search-input-label">Search Type</div> <select class="search-field"> <option value="all">All fields</option> <option value="title">Title</option> <option value="abstract">Abstract</option> <option value="keywords">Keywords</option> <option value="authors">Authors</option> <option value="affiliations">Affiliations</option> <option value="doi">Doi</option> <option value="full_text">Full Text</option> <option value="references">References</option> </select> </div> <div class="large-1 medium-1 small-3 columns"> <div class="search-input-label">&nbsp;</div> <div class="search-action-div"> <div class="search-plus"> <i class="material-icons">add_circle_outline</i> </div> </div> <div class="search-action-div"> <div class="search-minus"> <i class="material-icons">remove_circle_outline</i> </div> </div> </div> <div class="large-1 medium-1 small-6 large-offset-0 medium-offset-0 small-offset-3 end columns"> <div class="search-input-label">&nbsp;</div> <input class="advanced-search-button button button--dark search-submit" type="submit" value="Search"> </div> <div class="large-1 medium-1 small-6 end columns show-for-medium-up"></div> </div> </div> </form> </div> <div class="header-divider">&nbsp;</div> <div class="breadcrumb row full-row"> <div class="breadcrumb__element"> <a href="/about/journals">Journals</a> </div> <div class="breadcrumb__element"> <a href="/journal/applsci">Applied Sciences</a> </div> <div class="breadcrumb__element"> <a href="/2076-3417/13">Volume 13</a> </div> <div class="breadcrumb__element"> <a href="/2076-3417/13/9">Issue 9</a> </div> <div class="breadcrumb__element"> <a href="#">10.3390/app13095521</a> </div> </div> </header> <div id="main-content" class=""> <div class="row full-width row-fixed-left-column"> <div id="left-column" class="content__column large-3 medium-3 small-12 columns"> <div class="content__container"> <a href="/journal/applsci"> <img src="https://pub.mdpi-res.com/img/journals/applsci-logo.png?8600e93ff98dbf14" alt="applsci-logo" title="Applied Sciences" style="max-height: 60px; margin: 0 0 0 0;"> </a> <div class="generic-item no-border"> <a class="button button--color button--full-width js-journal-active-only-link js-journal-active-only-submit-link UC_ArticleSubmitButton" href="https://susy.mdpi.com/user/manuscripts/upload?form%5Bjournal_id%5D%3D90" data-disabledmessage="creating new submissions is not possible."> Submit to this Journal </a> <a class="button button--color button--full-width js-journal-active-only-link UC_ArticleReviewButton" href="https://susy.mdpi.com/volunteer/journals/review" data-disabledmessage="volunteering as journal reviewer is not possible."> Review for this Journal </a> <a class="button button--color-inversed button--color-journal button--full-width js-journal-active-only-link UC_ArticleEditIssueButton" href="/journalproposal/sendproposalspecialissue/applsci" data-path="/2076-3417/13/9/5521" data-disabledmessage="proposing new special issue is not possible."> Propose a Special Issue </a> </div> <div class="generic-item link-article-menu show-for-small"> <a href="#" class="link-article-menu show-for-small"> <span class="closed">&#9658;</span> <span class="open" style="display: none;">&#9660;</span> Article Menu </a> </div> <div class="hide-small-down-initially UI_ArticleMenu"> <div class="generic-item"> <h2>Article Menu</h2> </div> <ul class="accordion accordion__menu" data-accordion data-options="multi_expand:true;toggleable: true"> <li class="accordion-navigation"> <a href="#academic_editors" class="accordion__title">Academic Editor</a> <div id="academic_editors" class="content active"> <div class="academic-editor-container " title="School of Computing and Mathematical Sciences, University of Leicester, Leicester LE1 7RH, UK"> <div class="sciprofiles-link" style="display: inline-block"><a class="sciprofiles-link__link" href="https://sciprofiles.com/profile/4763?utm_source=mdpi.com&amp;utm_medium=website&amp;utm_campaign=avatar_name" target="_blank" rel="noopener noreferrer"><img class="sciprofiles-link__image" src="/profiles/4763/thumb/Yudong_Zhang.png" style="width: auto; height: 16px; border-radius: 50%;"><span class="sciprofiles-link__name">Yu-Dong Zhang</span></a></div> </div> </div> </li> <li class="accordion-direct-link"> <a href="/2076-3417/13/9/5521/scifeed_display" data-reveal-id="scifeed-modal" data-reveal-ajax="true">Subscribe SciFeed</a> </li> <li class="accordion-direct-link js-article-similarity-container" style="display: none"> <a href="#" class="js-similarity-related-articles">Recommended Articles</a> </li> <li class="accordion-navigation"> <a href="#related" class="accordion__title">Related Info Link</a> <div id="related" class="content UI_ArticleMenu_RelatedLinks"> <ul> <li class="li-link"> <a href="https://scholar.google.com/scholar?q=Comparing%20Vision%20Transformers%20and%20Convolutional%20Neural%20Networks%20for%20Image%20Classification%3A%20A%20Literature%20Review" target="_blank" rel="noopener noreferrer">Google Scholar</a> </li> </ul> </div> </li> <li class="accordion-navigation"> <a href="#authors" class="accordion__title">More by Authors Links</a> <div id="authors" class="content UI_ArticleMenu_AuthorsLinks"> <ul class="side-menu-ul"> <li> <a class="expand" onclick='$(this).closest("li").next("div").toggle(); return false;'>on DOAJ</a> </li> <div id="AuthorDOAJExpand" style="display:none;"> <ul class="submenu"> <li class="li-link"> <a href='http://doaj.org/search/articles?source=%7B%22query%22%3A%7B%22query_string%22%3A%7B%22query%22%3A%22%5C%22Jos%C3%A9%20Maur%C3%ADcio%5C%22%22%2C%22default_operator%22%3A%22AND%22%2C%22default_field%22%3A%22bibjson.author.name%22%7D%7D%7D' target="_blank" rel="noopener noreferrer">Maurício, J.</a> <li> </li> <li class="li-link"> <a href='http://doaj.org/search/articles?source=%7B%22query%22%3A%7B%22query_string%22%3A%7B%22query%22%3A%22%5C%22In%C3%AAs%20Domingues%5C%22%22%2C%22default_operator%22%3A%22AND%22%2C%22default_field%22%3A%22bibjson.author.name%22%7D%7D%7D' target="_blank" rel="noopener noreferrer">Domingues, I.</a> <li> </li> <li class="li-link"> <a href='http://doaj.org/search/articles?source=%7B%22query%22%3A%7B%22query_string%22%3A%7B%22query%22%3A%22%5C%22Jorge%20Bernardino%5C%22%22%2C%22default_operator%22%3A%22AND%22%2C%22default_field%22%3A%22bibjson.author.name%22%7D%7D%7D' target="_blank" rel="noopener noreferrer">Bernardino, J.</a> <li> </li> </ul> </div> <li> <a class="expand" onclick='$(this).closest("li").next("div").toggle(); return false;'>on Google Scholar</a> </li> <div id="AuthorGoogleExpand" style="display:none;"> <ul class="submenu"> <li class="li-link"> <a href="https://scholar.google.com/scholar?q=Jos%C3%A9%20Maur%C3%ADcio" target="_blank" rel="noopener noreferrer">Maurício, J.</a> <li> </li> <li class="li-link"> <a href="https://scholar.google.com/scholar?q=In%C3%AAs%20Domingues" target="_blank" rel="noopener noreferrer">Domingues, I.</a> <li> </li> <li class="li-link"> <a href="https://scholar.google.com/scholar?q=Jorge%20Bernardino" target="_blank" rel="noopener noreferrer">Bernardino, J.</a> <li> </li> </ul> </div> <li> <a class="expand" onclick='$(this).closest("li").next("div").toggle(); return false;'>on PubMed</a> </li> <div id="AuthorPubMedExpand" style="display:none;"> <ul class="submenu"> <li class="li-link"> <a href="http://www.pubmed.gov/?cmd=Search&amp;term=Jos%C3%A9%20Maur%C3%ADcio" target="_blank" rel="noopener noreferrer">Maurício, J.</a> <li> </li> <li class="li-link"> <a href="http://www.pubmed.gov/?cmd=Search&amp;term=In%C3%AAs%20Domingues" target="_blank" rel="noopener noreferrer">Domingues, I.</a> <li> </li> <li class="li-link"> <a href="http://www.pubmed.gov/?cmd=Search&amp;term=Jorge%20Bernardino" target="_blank" rel="noopener noreferrer">Bernardino, J.</a> <li> </li> </ul> </div> </ul> </div> </li> </ul> <span style="display:none" id="scifeed_hidden_flag"></span> <span style="display:none" id="scifeed_subscribe_url">/ajax/scifeed/subscribe</span> </div> </div> <div class="content__container responsive-moving-container large medium active hidden" data-id="article-counters"> <div id="counts-wrapper" class="row generic-item no-border" data-equalizer> <div id="js-counts-wrapper__views" class="small-12 hide columns count-div-container"> <a href="#metrics" > <div class="count-div" data-equalizer-watch> <span class="name">Article Views</span> <span class="count view-number"></span> </div> </a> </div> <div id="js-counts-wrapper__citations" class="small-12 columns hide count-div-container"> <a href="#metrics" > <div class="count-div" data-equalizer-watch> <span class="name">Citations</span> <span class="count citations-number Var_ArticleMaxCitations">-</span> </div> </a> </div> </div> </div> <div class="content__container"> <div class="hide-small-down-initially"> <ul class="accordion accordion__menu" data-accordion data-options="multi_expand:true;toggleable: true"> <li class="accordion-navigation"> <a href="#table_of_contents" class="accordion__title">Table of Contents</a> <div id="table_of_contents" class="content active"> <div class="menu-caption" id="html-quick-links-title"></div> </div> </li> </ul> </div> </div> <!-- PubGrade code --> <div id="pbgrd-sky"></div> <script src="https://cdn.pbgrd.com/core-mdpi.js"></script> <style>.content__container { min-width: 300px; }</style> <!-- PubGrade code --> </div> <div id="middle-column" class="content__column large-9 medium-9 small-12 columns end middle-bordered"> <div class="middle-column__help"> <div class="middle-column__help__fixed show-for-medium-up"> <span id="js-altmetrics-donut" href="#" target="_blank" rel="noopener noreferrer" style="display: none;"> <span data-badge-type='donut' class='altmetric-embed' data-doi='10.3390/app13095521'></span> <span>Altmetric</span> </span> <a href="#" class="UA_ShareButton" data-reveal-id="main-share-modal" title="Share"> <i class="material-icons">share</i> <span>Share</span> </a> <a href="#" data-reveal-id="main-help-modal" title="Help"> <i class="material-icons">announcement</i> <span>Help</span> </a> <a href="javascript:void(0);" data-reveal-id="cite-modal" data-counterslink = "https://www.mdpi.com/2076-3417/13/9/5521/cite" > <i class="material-icons">format_quote</i> <span>Cite</span> </a> <a href="https://sciprofiles.com/discussion-groups/public/10.3390/app13095521?utm_source=mpdi.com&utm_medium=publication&utm_campaign=discuss_in_sciprofiles" target="_blank" rel="noopener noreferrer" title="Discuss in Sciprofiles"> <i class="material-icons">question_answer</i> <span>Discuss in SciProfiles</span> </a> <a href="#" class="" data-hypothesis-trigger-endorses-tab title="Endorse"> <i data-hypothesis-endorse-trigger class="material-icons" >thumb_up</i> <div data-hypothesis-endorsement-count data-hypothesis-trigger-endorses-tab class="hypothesis-count-container"> ... </div> <span>Endorse</span> </a> <a href="#" data-hypothesis-trigger class="js-hypothesis-open UI_ArticleAnnotationsButton" title="Comment"> <i class="material-icons">textsms</i> <div data-hypothesis-annotation-count class="hypothesis-count-container"> ... </div> <span>Comment</span> </a> </div> <div id="main-help-modal" class="reveal-modal reveal-modal-new" data-reveal aria-labelledby="modalTitle" aria-hidden="true" role="dialog"> <div class="row"> <div class="small-12 columns"> <h2 style="margin: 0;">Need Help?</h2> </div> <div class="small-6 columns"> <h3>Support</h3> <p> Find support for a specific problem in the support section of our website. </p> <a target="_blank" href="/about/contactform" class="button button--color button--full-width"> Get Support </a> </div> <div class="small-6 columns"> <h3>Feedback</h3> <p> Please let us know what you think of our products and services. </p> <a target="_blank" href="/feedback/send" class="button button--color button--full-width"> Give Feedback </a> </div> <div class="small-6 columns end"> <h3>Information</h3> <p> Visit our dedicated information section to learn more about MDPI. </p> <a target="_blank" href="/authors" class="button button--color button--full-width"> Get Information </a> </div> </div> <a class="close-reveal-modal" aria-label="Close"> <i class="material-icons">clear</i> </a> </div> </div> <div class="middle-column__main "> <div class="page-highlight"> <style type="text/css"> img.review-status { width: 30px; } </style> <div id="jmolModal" class="reveal-modal" data-reveal aria-labelledby="Captcha" aria-hidden="true" role="dialog"> <h2>JSmol Viewer</h2> <div class="row"> <div class="small-12 columns text-center"> <iframe style="width: 520px; height: 520px;" frameborder="0" id="jsmol-content"></iframe> <div class="content"></div> </div> </div> <a class="close-reveal-modal" aria-label="Close"> <i class="material-icons">clear</i> </a> </div> <div itemscope itemtype="http://schema.org/ScholarlyArticle" id="abstract" class="abstract_div"> <div class="js-check-update-container"></div> <div class="html-content__container content__container content__container__combined-for-large__first" style="overflow: auto; position: inherit;"> <div class='html-profile-nav'> <div class='top-bar'> <div class='nav-sidebar-btn show-for-large-up' data-status='opened' > <i class='material-icons'>first_page</i> </div> <a id="js-button-download" class="button button--color-inversed" style="display: none;" href="/2076-3417/13/9/5521/pdf?version=1683174100" data-name="Comparing Vision Transformers and Convolutional Neural Networks for Image Classification: A Literature Review" data-journal="applsci"> <i class="material-icons custom-download"></i> Download PDF </a> <div class='nav-btn'> <i class='material-icons'>settings</i> </div> <a href="/2076-3417/13/9/5521/reprints" id="js-button-reprints" class="button button--color-inversed"> Order Article Reprints </a> </div> <div class='html-article-menu'> <div class='html-first-step row'> <div class='html-font-family large-6 medium-6 small-12 columns'> <div class='row'> <div class='html-font-label large-4 medium-4 small-12 columns'> Font Type: </div> <div class='large-8 medium-8 small-12 columns'> <span class="html-article-menu-option"><i style='font-family:Arial, Arial, Helvetica, sans-serif;' data-fontfamily='Arial, Arial, Helvetica, sans-serif'>Arial</i></span> <span class="html-article-menu-option"><i style='font-family:Georgia1, Georgia, serif;' data-fontfamily='Georgia1, Georgia, serif'>Georgia</i></span> <span class="html-article-menu-option"><i style='font-family:Verdana, Verdana, Geneva, sans-serif;' data-fontfamily='Verdana, Verdana, Geneva, sans-serif' >Verdana</i></span> </div> </div> </div> <div class='html-font-resize large-6 medium-6 small-12 columns'> <div class='row'> <div class='html-font-label large-4 medium-4 small-12 columns'>Font Size:</div> <div class='large-8 medium-8 small-12 columns'> <span class="html-article-menu-option a1" data-percent="100">Aa</span> <span class="html-article-menu-option a2" data-percent="120">Aa</span> <span class="html-article-menu-option a3" data-percent="160">Aa</span> </div> </div> </div> </div> <div class='row'> <div class='html-line-space large-6 medium-6 small-12 columns'> <div class='row'> <div class='html-font-label large-4 medium-4 small-12 columns' >Line Spacing:</div> <div class='large-8 medium-8 small-12 columns'> <span class="html-article-menu-option a1" data-line-height="1.5em"> <i class="fa">&#xf034;</i> </span> <span class="html-article-menu-option a2" data-line-height="1.8em"> <i class="fa">&#xf034;</i> </span> <span class="html-article-menu-option a3" data-line-height="2.1em"> <i class="fa">&#xf034;</i> </span> </div> </div> </div> <div class='html-column-width large-6 medium-6 small-12 columns'> <div class='row'> <div class='html-font-label large-4 medium-4 small-12 columns' >Column Width:</div> <div class='large-8 medium-8 small-12 columns'> <span class="html-article-menu-option a1" data-column-width="20%"> <i class="fa">&#xf035;</i> </span> <span class="html-article-menu-option a2" data-column-width="10%"> <i class="fa">&#xf035;</i> </span> <span class="html-article-menu-option a3" data-column-width="0%"> <i class="fa">&#xf035;</i> </span> </div> </div> </div> </div> <div class='row'> <div class='html-font-bg large-6 medium-6 small-12 columns end'> <div class='row'> <div class='html-font-label large-4 medium-4 small-12 columns'>Background:</div> <div class='large-8 medium-8 small-12 columns'> <div class="html-article-menu-option html-nav-bg html-nav-bright" data-bg="bright"> <i class="fa fa-file-text"></i> </div> <div class="html-article-menu-option html-nav-bg html-nav-dark" data-bg="dark"> <i class="fa fa-file-text-o"></i> </div> <div class="html-article-menu-option html-nav-bg html-nav-creme" data-bg="creme"> <i class="fa fa-file-text"></i> </div> </div> </div> </div> </div> </div> </div> <article ><div class='html-article-content'> <span itemprop="publisher" content="Multidisciplinary Digital Publishing Institute"></span><span itemprop="url" content="https://www.mdpi.com/2076-3417/13/9/5521"></span> <div class="article-icons"><span class="label openaccess" data-dropdown="drop-article-label-openaccess" aria-expanded="false">Open Access</span><span class='label choice' data-dropdown='drop-article-label-choice' aria-expanded='false' data-editorschoiceaddition='<a href="/journal/applsci/editors_choice">More Editor’s choice articles in journal <em>Applied Sciences</em>.</a>'>Editor’s Choice</span><span class="label articletype">Review</span></div> <h1 class="title hypothesis_container" itemprop="name"> Comparing Vision Transformers and Convolutional Neural Networks for Image Classification: A Literature Review </h1> <div class="art-authors hypothesis_container"> by <span class="inlineblock "><div class='profile-card-drop' data-dropdown='profile-card-drop11074483' data-options='is_hover:true, hover_timeout:5000'> José Maurício</div><div id="profile-card-drop11074483" data-dropdown-content class="f-dropdown content profile-card-content" aria-hidden="true" tabindex="-1"><div class="profile-card__title"><div class="sciprofiles-link" style="display: inline-block"><div class="sciprofiles-link__link"><img class="sciprofiles-link__image" src="/bundles/mdpisciprofileslink/img/unknown-user.png" style="width: auto; height: 16px; border-radius: 50%;"><span class="sciprofiles-link__name">José Maurício</span></div></div></div><div class="profile-card__buttons" style="margin-bottom: 10px;"><a href="https://sciprofiles.com/profile/author/NlhlekFPZlRmWXg0ZE1OdDk0eVE4TjFNWkRydHRRek9odjc0cmEyZ3p4RT0=?utm_source=mdpi.com&amp;utm_medium=website&amp;utm_campaign=avatar_name" class="button button--color-inversed" target="_blank"> SciProfiles </a><a href="https://scilit.net/scholars?q=Jos%C3%A9%20Maur%C3%ADcio" class="button button--color-inversed" target="_blank"> Scilit </a><a href="https://www.preprints.org/search?search1=Jos%C3%A9%20Maur%C3%ADcio&field1=authors" class="button button--color-inversed" target="_blank"> Preprints.org </a><a href="https://scholar.google.com/scholar?q=Jos%C3%A9%20Maur%C3%ADcio" class="button button--color-inversed" target="_blank" rels="noopener noreferrer"> Google Scholar </a></div></div><sup></sup><span style="display: inline; margin-left: 5px;"></span><a class="toEncode emailCaptcha visibility-hidden" data-author-id="11074483" href="/cdn-cgi/l/email-protection#26094548420b45414f094a09434b474f4a0b565449524345524f49480516161315101713171316131f1317131213111316131213161417161e171416121614124017171713"><sup><i class="fa fa-envelope-o"></i></sup></a><a href="https://orcid.org/0000-0001-8234-9481" target="_blank" rel="noopener noreferrer"><img src="https://pub.mdpi-res.com/img/design/orcid.png?0465bc3812adeb52?1732087095" title="ORCID" style="position: relative; width: 13px; margin-left: 3px; max-width: 13px !important; height: auto; top: -5px;"></a>, </span><span class="inlineblock "><div class='profile-card-drop' data-dropdown='profile-card-drop11074484' data-options='is_hover:true, hover_timeout:5000'> Inês Domingues</div><div id="profile-card-drop11074484" data-dropdown-content class="f-dropdown content profile-card-content" aria-hidden="true" tabindex="-1"><div class="profile-card__title"><div class="sciprofiles-link" style="display: inline-block"><div class="sciprofiles-link__link"><img class="sciprofiles-link__image" src="/profiles/402033/thumb/Inês_Domingues.jpg" style="width: auto; height: 16px; border-radius: 50%;"><span class="sciprofiles-link__name">Inês Domingues</span></div></div></div><div class="profile-card__buttons" style="margin-bottom: 10px;"><a href="https://sciprofiles.com/profile/402033?utm_source=mdpi.com&amp;utm_medium=website&amp;utm_campaign=avatar_name" class="button button--color-inversed" target="_blank"> SciProfiles </a><a href="https://scilit.net/scholars?q=In%C3%AAs%20Domingues" class="button button--color-inversed" target="_blank"> Scilit </a><a href="https://www.preprints.org/search?search1=In%C3%AAs%20Domingues&field1=authors" class="button button--color-inversed" target="_blank"> Preprints.org </a><a href="https://scholar.google.com/scholar?q=In%C3%AAs%20Domingues" class="button button--color-inversed" target="_blank" rels="noopener noreferrer"> Google Scholar </a></div></div><sup></sup><span style="display: inline; margin-left: 5px;"></span><a class="toEncode emailCaptcha visibility-hidden" data-author-id="11074484" href="/cdn-cgi/l/email-protection#200f434e440d4347490f4c0f454d41494c0d50524f54454354494f4e0310101017161910431141141710441016101410101017104511431043114112191010114110431041141711191144"><sup><i class="fa fa-envelope-o"></i></sup></a><a href="https://orcid.org/0000-0002-2334-7280" target="_blank" rel="noopener noreferrer"><img src="https://pub.mdpi-res.com/img/design/orcid.png?0465bc3812adeb52?1732087095" title="ORCID" style="position: relative; width: 13px; margin-left: 3px; max-width: 13px !important; height: auto; top: -5px;"></a> and </span><span class="inlineblock "><div class='profile-card-drop' data-dropdown='profile-card-drop11074485' data-options='is_hover:true, hover_timeout:5000'> Jorge Bernardino</div><div id="profile-card-drop11074485" data-dropdown-content class="f-dropdown content profile-card-content" aria-hidden="true" tabindex="-1"><div class="profile-card__title"><div class="sciprofiles-link" style="display: inline-block"><div class="sciprofiles-link__link"><img class="sciprofiles-link__image" src="/profiles/274546/thumb/Jorge_Bernardino.png" style="width: auto; height: 16px; border-radius: 50%;"><span class="sciprofiles-link__name">Jorge Bernardino</span></div></div></div><div class="profile-card__buttons" style="margin-bottom: 10px;"><a href="https://sciprofiles.com/profile/274546?utm_source=mdpi.com&amp;utm_medium=website&amp;utm_campaign=avatar_name" class="button button--color-inversed" target="_blank"> SciProfiles </a><a href="https://scilit.net/scholars?q=Jorge%20Bernardino" class="button button--color-inversed" target="_blank"> Scilit </a><a href="https://www.preprints.org/search?search1=Jorge%20Bernardino&field1=authors" class="button button--color-inversed" target="_blank"> Preprints.org </a><a href="https://scholar.google.com/scholar?q=Jorge%20Bernardino" class="button button--color-inversed" target="_blank" rels="noopener noreferrer"> Google Scholar </a></div></div><sup> *</sup><span style="display: inline; margin-left: 5px;"></span><a class="toEncode emailCaptcha visibility-hidden" data-author-id="11074485" href="/cdn-cgi/l/email-protection#7f501c111b521c18165013501a121e1613520f0d100b1a1c0b1610115c4f4f4f4a491e4e474f1b4f194d1e4f4c4e464f194f464b4b4e1e4e1a"><sup><i class="fa fa-envelope-o"></i></sup></a><a href="https://orcid.org/0000-0001-9660-2011" target="_blank" rel="noopener noreferrer"><img src="https://pub.mdpi-res.com/img/design/orcid.png?0465bc3812adeb52?1732087095" title="ORCID" style="position: relative; width: 13px; margin-left: 3px; max-width: 13px !important; height: auto; top: -5px;"></a></span> </div> <div class="nrm"></div> <span style="display:block; height:6px;"></span> <div></div> <div style="margin: 5px 0 15px 0;" class="hypothesis_container"> <div class="art-affiliations"> <div class="affiliation "> <div class="affiliation-name ">Polytechnic of Coimbra, Coimbra Institute of Engineering (ISEC), Rua Pedro Nunes, 3030-199 Coimbra, Portugal</div> </div> <div class="affiliation"> <div class="affiliation-item"><sup>*</sup></div> <div class="affiliation-name ">Author to whom correspondence should be addressed. </div> </div> </div> </div> <div class="bib-identity" style="margin-bottom: 10px;"> <em>Appl. Sci.</em> <b>2023</b>, <em>13</em>(9), 5521; <a href="https://doi.org/10.3390/app13095521">https://doi.org/10.3390/app13095521</a> </div> <div class="pubhistory" style="font-weight: bold; padding-bottom: 10px;"> <span style="display: inline-block">Submission received: 20 March 2023</span> / <span style="display: inline-block">Revised: 19 April 2023</span> / <span style="display: inline-block">Accepted: 26 April 2023</span> / <span style="display: inline-block">Published: 28 April 2023</span> </div> <div class="belongsTo" style="margin-bottom: 10px;"> (This article belongs to the Special Issue <a href=" /journal/applsci/special_issues/AI_Complex_Network ">Artificial Intelligence in Complex Networks</a>)<br/> </div> <div class="highlight-box1"> <div class="download"> <a class="button button--color-inversed button--drop-down" data-dropdown="drop-download-1136135" aria-controls="drop-supplementary-1136135" aria-expanded="false"> Download <i class="material-icons">keyboard_arrow_down</i> </a> <div id="drop-download-1136135" class="f-dropdown label__btn__dropdown label__btn__dropdown--button" data-dropdown-content aria-hidden="true" tabindex="-1"> <a class="UD_ArticlePDF" href="/2076-3417/13/9/5521/pdf?version=1683174100" data-name="Comparing Vision Transformers and Convolutional Neural Networks for Image Classification: A Literature Review" data-journal="applsci">Download PDF</a> <br/> <a id="js-pdf-with-cover-access-captcha" href="#" data-target="/2076-3417/13/9/5521/pdf-with-cover" class="accessCaptcha">Download PDF with Cover</a> <br/> <a id="js-xml-access-captcha" href="#" data-target="/2076-3417/13/9/5521/xml" class="accessCaptcha">Download XML</a> <br/> <a href="/2076-3417/13/9/5521/epub" id="epub_link">Download Epub</a> <br/> </div> <div class="js-browse-figures" style="display: inline-block;"> <a href="#" class="button button--color-inversed margin-bottom-10 openpopupgallery UI_BrowseArticleFigures" data-target='article-popup' data-counterslink = "https://www.mdpi.com/2076-3417/13/9/5521/browse" >Browse Figures</a> </div> <div id="article-popup" class="popupgallery" style="display: inline; line-height: 200%"> <a href="https://pub.mdpi-res.com/applsci/applsci-13-05521/article_deploy/html/images/applsci-13-05521-g001.png?1683174170" title=" <strong>Figure 1</strong><br/> &lt;p&gt;Example of an architecture of the ViT, based on [&lt;a href=&quot;#B1-applsci-13-05521&quot; class=&quot;html-bibr&quot;&gt;1&lt;/a&gt;].&lt;/p&gt; "> </a> <a href="https://pub.mdpi-res.com/applsci/applsci-13-05521/article_deploy/html/images/applsci-13-05521-g002.png?1683174172" title=" <strong>Figure 2</strong><br/> &lt;p&gt;Example of an architecture of a CNN, based on [&lt;a href=&quot;#B2-applsci-13-05521&quot; class=&quot;html-bibr&quot;&gt;2&lt;/a&gt;].&lt;/p&gt; "> </a> <a href="https://pub.mdpi-res.com/applsci/applsci-13-05521/article_deploy/html/images/applsci-13-05521-g003.png?1683174171" title=" <strong>Figure 3</strong><br/> &lt;p&gt;Distribution of the selected studies by years.&lt;/p&gt; "> </a> <a href="https://pub.mdpi-res.com/applsci/applsci-13-05521/article_deploy/html/images/applsci-13-05521-g004.png?1683174168" title=" <strong>Figure 4</strong><br/> &lt;p&gt;Distribution of the selected studies by application area.&lt;/p&gt; "> </a> </div> <a class="button button--color-inversed" href="/2076-3417/13/9/5521/notes">Versions&nbsp;Notes</a> </div> </div> <div class="responsive-moving-container small hidden" data-id="article-counters" style="margin-top: 15px;"></div> <div class="html-dynamic"> <section> <div class="art-abstract art-abstract-new in-tab hypothesis_container"> <p> <div><section class="html-abstract" id="html-abstract"> <h2 id="html-abstract-title">Abstract</h2><b>:</b> <div class="html-p">Transformers are models that implement a mechanism of self-attention, individually weighting the importance of each part of the input data. Their use in image classification tasks is still somewhat limited since researchers have so far chosen Convolutional Neural Networks for image classification and transformers were more targeted to Natural Language Processing (NLP) tasks. Therefore, this paper presents a literature review that shows the differences between Vision Transformers (ViT) and Convolutional Neural Networks. The state of the art that used the two architectures for image classification was reviewed and an attempt was made to understand what factors may influence the performance of the two deep learning architectures based on the datasets used, image size, number of target classes (for the classification problems), hardware, and evaluated architectures and top results. The objective of this work is to identify which of the architectures is the best for image classification and under what conditions. This paper also describes the importance of the Multi-Head Attention mechanism for improving the performance of ViT in image classification.</div> </section> <div id="html-keywords"> <div class="html-gwd-group"><div id="html-keywords-title">Keywords: </div><a href="/search?q=transformers">transformers</a>; <a href="/search?q=Vision+Transformers+%28ViT%29">Vision Transformers (ViT)</a>; <a href="/search?q=convolutional+neural+networks">convolutional neural networks</a>; <a href="/search?q=multi-head+attention">multi-head attention</a>; <a href="/search?q=image+classification">image classification</a></div> <div> </div> </div> </div> </p> </div> </section> </div> <div class="hypothesis_container"> <ul class="menu html-nav" data-prev-node="#html-quick-links-title"> </ul> <div class="html-body"> <section id='sec1-applsci-13-05521' type='intro'><h2 data-nested='1'> 1. Introduction</h2><div class='html-p'>Nowadays, transformers have become the preferred models for performing Natural Language Processing (NLP) tasks. They offer scalability and computational efficiency, allowing models to be trained with more than a hundred billion parameters without saturating model performance. Inspired by the success of the transformers applied to NLP and assuming that the self-attention mechanism could also be beneficial for image classification tasks, it was proposed to use the same architecture, with few modifications, to perform image classification [<a href="#B1-applsci-13-05521" class="html-bibr">1</a>]. The author’s proposal was an architecture, called Vision Transformers (ViT), which consists of breaking the image into 2D patches and providing this linear sequence of patches as input to the model. <a href="#applsci-13-05521-f001" class="html-fig">Figure 1</a> presents the architecture proposed by the authors.</div><div class='html-p'>In contrast to this deep learning architecture, there is another very popular tool for processing large volumes of data called Convolutional Neural Networks (CNN). The CNN is an architecture that consists of multiple layers and has demonstrated good performance in various computer vision tasks such as object detection or image segmentation, as well as NLP problems [<a href="#B2-applsci-13-05521" class="html-bibr">2</a>]. The typical CNN architecture starts with convolutional layers that pass through the kernels or filters, from left to right of the image, extracting computationally interpretable features. The first layer extracts low-level features (e.g., colours, gradient orientation, edges, etc.), and subsequent layers extract high-level features. Next, the pooling layers reduce the information extracted by the convolutional layers, preserving the most important features. Finally, the fully-connected layers are fed with the flattened output of the convolutional and pooling layers and perform the classification. Its architecture is shown in <a href="#applsci-13-05521-f002" class="html-fig">Figure 2</a>.</div><div class='html-p'>With the increasing interest in Vision Transformers as a novel architecture for image recognition tasks, and the established success of CNNs in image classification, this work aims to review the state of the art in comparing Vision Transformers (ViT) and Convolutional Neural Networks (CNN) for image classification. Transformers offer advantages such as the ability to model long-range dependencies, adapt to different input sizes, and the potential for parallel processing, making them suitable for image tasks. However, Vision Transformers also face challenges such as computational complexity, model size, scalability to large datasets, interpretability, robustness to adversarial attacks, and generalization performance. These points highlight the importance of comparing ViTs with older and established CNN models.</div><div class='html-p'>The overall goal of this work is to understand what conditions have the most influence on the performance of the two Deep Learning architectures, and what characteristics differ between the two architectures, that allow them to perform differently for the same objective. Some of the aspects that will be compared include datasets considerations, robustness, performance, evaluation, interpretability, and architecture. Specifically, we aim to answer the following research questions: </div><div class='html-p'><ul class='html-simple'><li><div class='html-p'>RQ1—Can the ViT architecture have a better performance than the CNN architecture, regardless of the characteristics of the dataset?</div></li><li><div class='html-p'>RQ2—What influences CNNs that do not to perform as well as ViTs?</div></li><li><div class='html-p'>RQ3—How does the Multi-Head Attention mechanism, which is a key component of ViTs, influence the performance of these models in image classification?</div></li></ul></div><div class='html-p'>In order to address these research questions, a literature review was conducted by searching various databases such as Google Scholar, Scopus, Web of Science, ACM Digital Library, and Science Direct using specific search terms. This paper presents the results of this review and analyses the methodologies and findings from the selected papers.</div><div class='html-p'>The rest of this paper is structured as follows. <a href="#sec2-applsci-13-05521" class="html-sec">Section 2</a> describes the research methodology and search results. <a href="#sec3-applsci-13-05521" class="html-sec">Section 3</a> presents the knowledge, methodology, and results found in the selected documents. <a href="#sec4-applsci-13-05521" class="html-sec">Section 4</a> provides a brief overview of the reviewed papers and attempts to answer the three research questions. <a href="#sec5-applsci-13-05521" class="html-sec">Section 5</a> discusses threats to the validity of the research. <a href="#sec6-applsci-13-05521" class="html-sec">Section 6</a> overviews the strengths and weaknesses of each architecture and suggests future research directions, and <a href="#sec7-applsci-13-05521" class="html-sec">Section 7</a> presents the main conclusions of this work.</div></section><section id='sec2-applsci-13-05521' type='methods'><h2 data-nested='1'> 2. Research Methodology</h2><div class='html-p'>The purpose of a literature review is to evaluate, analyse and summarize the existing literature on a specific research topic, in order to facilitate the emergence of theoretical frameworks [<a href="#B3-applsci-13-05521" class="html-bibr">3</a>]. In this literature review, the aim is to synthesize the knowledge base, critically evaluate the methods used and analyze the results obtained in order to identify the shortcomings and improve the two aforementioned deep learning architectures for image classification. The methodology for conducting this literature review is based on the guidelines presented in [<a href="#B3-applsci-13-05521" class="html-bibr">3</a>,<a href="#B4-applsci-13-05521" class="html-bibr">4</a>].</div><section id='sec2dot1-applsci-13-05521' type=''><h4 class='html-italic' data-nested='2'> 2.1. Data Sources</h4><div class='html-p'>ACM Digital Library, Google Scholar, Science Direct, Scopus, and Web of Science, were chosen as the data sources to extract the primary studies. The number of results found after searching papers in each of the data sources is shown in <a href="#applsci-13-05521-t001" class="html-table">Table 1</a>.</div></section><section id='sec2dot2-applsci-13-05521' type=''><h4 class='html-italic' data-nested='2'> 2.2. Search String</h4><div class='html-p'>The research questions developed for this paper served as the basis for the search strings utilized in each of the data sources. <a href="#applsci-13-05521-t002" class="html-table">Table 2</a> provides a list of the search strings used in each electronic database.</div></section><section id='sec2dot3-applsci-13-05521' type=''><h4 class='html-italic' data-nested='2'> 2.3. Inclusion Criteria</h4><div class='html-p'>The inclusion criteria set to select the papers were that the studies were recent, had been written in English, and were published between January 2021 and December 2022. This choice of publication dates is based on the fact that ViTs were not proposed until the end of 2020 [<a href="#B1-applsci-13-05521" class="html-bibr">1</a>]. In addition, the studies had to demonstrate a comparison between CNNs and ViTs for image classification and could use any pre-trained model of the two architectures. Studies that presented a proposal for a hybrid architecture, where they combined the two architectures into one, were also considered. The dataset used during the studies did not have to be a specific one, but it had to be a dataset of images that allowed classification using both deep learning architectures.</div></section><section id='sec2dot4-applsci-13-05521' type=''><h4 class='html-italic' data-nested='2'> 2.4. Exclusion Criteria</h4><div class='html-p'>Studies that oriented their research on using only one of the two deep learning architectures (i.e., Vision Transforms, or Convolutional Neural Networks) were excluded. Additionally, papers that were discovered to be redundant when searches were conducted throughout the chosen databases were eliminated. It was also defined that one of the exclusion criteria will be that the papers would have more than seven citations.</div><div class='html-p'>In summary, with the application of these criteria, 10,690 papers were excluded from the Google Scholar database, 89 papers from Web of Science, 53 papers from Scopus, 19,158 papers from ACM Digital Library, and 1434 papers from Science Direct.</div></section><section id='sec2dot5-applsci-13-05521' type=''><h4 class='html-italic' data-nested='2'> 2.5. Results</h4><div class='html-p'>After applying the inclusion and exclusion criteria to the papers obtained in each of the electronic databases, seventeen (17) papers were selected for the literature review. <a href="#applsci-13-05521-t003" class="html-table">Table 3</a> lists all the papers selected for this work, the year of publication and the type of publication.</div><div class='html-p'><a href="#applsci-13-05521-f003" class="html-fig">Figure 3</a> shows the distribution of the selected papers by year of publication.</div><div class='html-p'><a href="#applsci-13-05521-f004" class="html-fig">Figure 4</a> shows the distribution of the selected studies by application area. In the figure, most of the papers are generic in their application area. In these papers without a specific application area, the authors try to better understand the characteristics of the two architectures. For example, between CNNs and ViTs, the authors have tried to understand which of the architectures is more transferable. If architectures based on transformers are more robust than CNNs. And if the ViT will be able to see the same information as CNN with a different architecture. Within the health domain, some studies have been developed in different sub-areas, such as breast cancer, to show that ViT can be better than CNNs. The figure also shows that some work has been done, albeit to a lesser extent, in other application areas. Agriculture stands out with two papers comparing ViTs with CNNs.</div></section></section><section id='sec3-applsci-13-05521' type=''><h2 data-nested='1'> 3. Findings</h2><div class='html-p'>An overview of the studies selected through the research methodology is shown in <a href="#applsci-13-05521-t004" class="html-table">Table 4</a>. This information summarizes the authors’ approach, the findings, and other architectures that were used to build a comparative study. Therefore, to address the research questions, this section will offer an overview of the data found in the collected papers.</div><div class='html-p'>In the study developed in [<a href="#B12-applsci-13-05521" class="html-bibr">12</a>], the authors aimed to compare the two architectures (i.e., ViT and CNN), as well as the creation of a hybrid model that corresponded to the combination of the two. The experiment was conducted using the ImageNet dataset and perturbations were applied to the dataset images. It was concluded that ViT can perform better and be more resilient on images with natural or adverse disturbances than CNN. It was also found in this work that the combination of the two architectures results in a 10% improvement in accuracy (Acc).</div><div class='html-p'>The work done in [<a href="#B14-applsci-13-05521" class="html-bibr">14</a>] aimed to compare Vision Transformers (ViT) with Convolutional Neural Networks (CNN) for digital holography, where the goal was to reconstruct amplitude and phase by extracting the distance of the object from the hologram. In this work, DenseNet201, DenseNet169, EfficientNetB4, EfficientNetB7, ViT-B/16, ViT-B32 and ViT-L/16 architectures were compared with a total of 3400 images. They were divided into four datasets, original images with or without filters, and negative images with or without filters. The authors concluded that ViT despite having an accuracy like CNN, was more robust because, due to the self-attention mechanism, it can learn the entire hologram rather than a specific area.</div><div class='html-p'>The authors in [<a href="#B7-applsci-13-05521" class="html-bibr">7</a>] studied the performance of ViT in comparison with other architectures to detect pneumonia, through chest X-ray images. Therefore, a ViT model, a CNN network developed by the authors and the VGG-16 network were used for the study which focussed on a dataset with 5856 images. After the experiments performed, the authors concluded that ViT was better than CNN with 96.45% accuracy, 86.38% validation accuracy, 10.8% loss and 18.25% validation loss. In this work, it was highlighted that ViT has a self-attention mechanism that allows splitting the image into small patches that are trainable, and each part of the image can be given an importance. However, the attention mechanism as opposed to the convolutional layers makes ViT’s performance saturate fast when the goal is scalability.</div><div class='html-p'>In the study [<a href="#B21-applsci-13-05521" class="html-bibr">21</a>] the goal was to compare ViT with state-of-art CNN networks to classify UAV images to monitor crops and weeds. The authors compared the influence of the size of the training dataset on the performance of the architectures and found that ViT performed better with fewer images than CNN networks in terms of F1-Score. They concluded that ViT-B/16 was the best model to do crop and weed monitoring. In comparison with CNN networks, ViT could better learn the patterns of images in small datasets due to the self-attention mechanism.</div><div class='html-p'>In the scope of lung diseases, the authors in [<a href="#B11-applsci-13-05521" class="html-bibr">11</a>] investigated the performance of ViT models to automatically classify emphysema subtypes through Computed Tomography (CT) images in comparison with CNN networks. In this study, they performed a comparative study between the two architectures using a dataset collected by the authors (3192 patches) and a public dataset of 168 patches taken from 115 HRCT slides. In addition to this, they also verified the importance of pre-trained models. They concluded that ViT failed to generalize when trained with fewer images, because when comparing the pre-training accuracy with 91.27% on the training and 70.59% on the test.</div><div class='html-p'>In the work in [<a href="#B9-applsci-13-05521" class="html-bibr">9</a>], a comparison between state-of-the-art CNNs and ViT models for Breast ultrasound image classification was developed. The study was performed with two different datasets: the first containing 780 images and the second containing 163 images. The following architectures were selected for the study: ResNet50, VGG-16, Inception, NASNET, ViT-S/32, ViT-B/32, ViT-Ti/16, R + ViT-Ti/16 and R26 + ViT-S/16. ViT models were found to perform better than CNN networks for image classification. The authors also highlighted that ViT models could perform better when they were trained with a small dataset, because via the attention mechanism, it was possible to collect more information from different patches, instead of collecting information from the image.</div><div class='html-p'>Benz et al., in [<a href="#B5-applsci-13-05521" class="html-bibr">5</a>], compared ViT models, with the MLP-Mixer architecture and with CNNs. The goal was to evaluate which architecture was more robust in image classification. The study consisted of generating perturbations and adverse examples in the images and understanding which of the architectures was most robust. However, this study did not aim to analyse the causes. Therefore, the authors concluded that ViT were more robust than CNNs to adversarial attacks and from a features perspective CNN networks were more sensitive to high-frequency features. It was also described that the shift-variance property of convolutional layers may be at the origin of the lack of robustness of the network in the classification of images that have been transformed.</div><div class='html-p'>The authors in [<a href="#B15-applsci-13-05521" class="html-bibr">15</a>] performed an analysis between ViT and CNN models aimed at detecting deepfake images. The experiment consisted in using the ForgeryNet dataset with 2.9 million images and 220 thousand video clips, together with three different image manipulation techniques, where they tried to train the models with real and manipulated images. By training the ViT-B model and the EfficientNetV2 network the authors demonstrated that the CNN network could generalize better and obtain higher training accuracy. However, ViT could have better generalization, reducing the bias in the identification of anomalies introduced by one or more different techniques to introduce anomalies.</div><div class='html-p'>Chao Xin et al. [<a href="#B17-applsci-13-05521" class="html-bibr">17</a>] aimed to compare their ViT model with CNN networks and with another ViT model to perform image classification to detect skin cancer. The experiment conducted by the authors used a public HAM10000 dataset with dermatoscopic skin cancer images and a clinical dataset collected through dermoscopy. In this study, a multi-scale image and the overlapping sliding window were used to serialize the images. They also used contrastive learning to improve the similarity of different labels and minimize the similarity in the same label. Thus, the ViT model developed was better for skin cancer classification using these techniques. However, the authors also demonstrated the effectiveness of balancing the dataset on the model performance, but they did not present the F1-Score values before the dataset is balanced to verify the improvement.</div><div class='html-p'>The authors in [<a href="#B19-applsci-13-05521" class="html-bibr">19</a>] aimed to study if ViT models could be an alternative to CNNs in time-critical applications. That is, for edge computing instances and IoT networks, applications using deep learning models consume multiple computational resources. The experiment used pre-trained networks such as ResNet152, DenseNet201, InceptionV3, and SL-ViT with three different datasets in the scope of images, audio, and video. They concluded that the ViT model introduced less overhead and performed better than the architectures used. It was also shown that increasing the kernel size of convolutional layers and using dilated convolutional caused a reduction in the accuracy of a CNN network.</div><div class='html-p'>In a study carried out in [<a href="#B20-applsci-13-05521" class="html-bibr">20</a>], the authors tried to find in ViTs an alternative solution to CNN networks for asphalt and concrete crack detection. The authors concluded that ViTs, due to the self-attention mechanism, had better performance in crack detection images with intense noise. CNN networks in the same images suffered from a high number of false negative rates, as well as the presence of biases in image classification.</div><div class='html-p'>Haolan Wang in [<a href="#B16-applsci-13-05521" class="html-bibr">16</a>] aimed to analyse eight different Vision Transformers and compare them with the performance of a pre-trained CNN network and without the pre-trained parameters to perform traffic signal recognition in autonomous driving systems. In this study, three different datasets with images of real-world traffic signals were used. This allowed the authors to conclude that the pre-trained DenseNet161 network had a higher accuracy than the ViT models to do traffic sign recognition. However, it was found in this work that ViT models performed better than the DenseNet161 network without the pre-trained parameters. From this work, it was also possible to conclude that the ViT models with a total number of parameters equal to or greater than the CNN networks, used during the experiment, had a shorter training time.</div><div class='html-p'>The work done in [<a href="#B13-applsci-13-05521" class="html-bibr">13</a>] compared CNN networks with Vision Transformers models for the classification of Diabetic Foot Ulcer images. For the study, the authors decided to use the following architectures: Big Image Transfer (BiT), EfficientNet, ViT-base and Data-efficient Image Transformers (DeIT) upon a dataset composed of 15,683 images. A further aim of this study to compare the performance of deep learning models using Stochastic Gradient Descent (SGD) [<a href="#B22-applsci-13-05521" class="html-bibr">22</a>] with Sharpness-Aware Optimization (SAM) [<a href="#B23-applsci-13-05521" class="html-bibr">23</a>,<a href="#B24-applsci-13-05521" class="html-bibr">24</a>]. These two tools are optimizers that seek to minimize the value of the loss function, improving the generalization ability of the model. However, SAM minimizes the value of the loss function and the sharpness loss, looking for parameters in the neighbourhood with a low loss. Therefore, this work concluded that the SAM optimizer originated an improvement in the values of F1-Score, AUC, Recall and Precision in all the architectures used. However, the authors did not present the training and test values that allow for evaluating the improvement in the generalization of the models. Therefore, the BiT-ResNetX50 model with the SAM optimizer obtained the best performance for the classification of Diabetic Foot Ulcer images with F1-Score = 57.71%, AUC = 87.68%, Recall = 61.88%, and Precision = 57.74%.</div><div class='html-p'>The authors in [<a href="#B18-applsci-13-05521" class="html-bibr">18</a>] performed a comparative study between ViT models and CNN networks used in the state of the art with a model developed by them, where they combined CNN and transformers to perform insect pest recognition to protect agriculture worldwide. This study involved three public datasets: the IP102 dataset, the D0 dataset and Li’s dataset. The algorithm created by the authors consisted of using the sequence of inputs formed by the CNN feature maps to make the model more efficient, and a flexible attention-based classification head was implemented to use the spatial information. Comparing the results obtained, the proposed model obtained a better performance in insect pest recognition with an accuracy of 74.897%. This work demonstrated that fine-tuning worked better on Vision Transformers than CNN, but on the other hand, this caused the number of parameters, the size, and the inference time of the model to increase significantly with respect to CNN networks. Through their experiments, the authors also demonstrated the advantage of using decoder layers in the proposed model to perform image classification. The greater the number of decoder layers, the greater the accuracy value of the model. However, this increase in the number of decoder layers increased the number of parameters, the size, and the inference time of the model. In other words, the architecture to process the images consumes far greater computational resources, which may not compensate for the increase in accuracy value with few layers. In the case of this study, the increase from one layer to three decoder layers represented only an increase of 0.478% in the accuracy value.</div><div class='html-p'>Several authors in [<a href="#B6-applsci-13-05521" class="html-bibr">6</a>,<a href="#B8-applsci-13-05521" class="html-bibr">8</a>,<a href="#B10-applsci-13-05521" class="html-bibr">10</a>] went deeper into the investigation and aimed to understand how the learning process of Vision Transformers works if ViT could be more transferable and better understand if the transform-based architecture were more robust than CNNs. In this sense, the authors in [<a href="#B8-applsci-13-05521" class="html-bibr">8</a>] intended to analyse the internal representations of ViT and CNN structures in image classification benchmarks and found differences between them. One of the differences was that ViT has greater similarity between high and low layers, while CNN architecture needs more low layers to compute similar representations in smaller datasets. This is due to the self-attention layers implemented in ViT, which allows it to aggregate information from other spatial locations, vastly different from the fixed field sizes in CNNs. They also observed that ViTs in the lower, self-attention layers can access information from local heads (small distances) and global heads (large distances). Whereas CNNs have access to information locally in the lower layers. On the other hand, the authors in [<a href="#B10-applsci-13-05521" class="html-bibr">10</a>] systematically analysed the transfer learning capacity in the two architectures. The study was conducted by comparing the performance of the two architectures on single-task and multi-task learning problems, using the ImageNet dataset. Through this study, the authors concluded that the transform-based architecture contained more transferable representations compared to convolutional networks for fine-tuning, presenting better performance and robustness in multi-task learning problems.</div><div class='html-p'>In another study carried out in [<a href="#B6-applsci-13-05521" class="html-bibr">6</a>], the goal was to prove if ViT were more robust than CNN as the most recent studies have shown. The authors developed their work comparing the robustness of the two architectures using two different types of perturbations: adversarial samples, which consists in evaluating the robustness of deep learning architectures in images with human-caused perturbations (i.e., data augmentation) and out-of-distribution samples, which consists in evaluating the robustness of the architectures in benchmarks of classification images. Through this experiment, it was demonstrated that by replacing the activation function ReLU by the activation function of transformer-based architecture (i.e., GELU) the CNN network was more robust than ViT in adversarial samples. In this study, it was also demonstrated that CNN networks were more robust than ViT in patch-based attacks. However, the authors concluded that the self-attention mechanism was the key to the robustness of the transformer-based architecture in most of the experiments performed.</div><div class="html-table-wrap" id="applsci-13-05521-t004"> <div class="html-table_wrap_td" > <div class="html-tablepopup html-tablepopup-link" data-counterslinkmanual = "https://www.mdpi.com/2076-3417/13/9/5521/display" href='#table_body_display_applsci-13-05521-t004'> <img alt="Table" data-lsrc="https://www.mdpi.com/img/table.png" /> <a class="html-expand html-tablepopup" data-counterslinkmanual = "https://www.mdpi.com/2076-3417/13/9/5521/display" href="#table_body_display_applsci-13-05521-t004"></a> </div> </div> <div class="html-table_wrap_discription"> <b>Table 4.</b> Overview of selected studies. </div> </div> <div class="html-table_show mfp-hide " id ="table_body_display_applsci-13-05521-t004" > <div class="html-caption" ><b>Table 4.</b> Overview of selected studies.</div> <table > <thead ><tr ><th align='center' valign='middle' style='border-top:solid thin;border-bottom:solid thin' class='html-align-center' >Ref.</th><th align='center' valign='middle' style='border-top:solid thin;border-bottom:solid thin' class='html-align-center' >Datasets</th><th align='center' valign='middle' style='border-top:solid thin;border-bottom:solid thin' class='html-align-center' >Images Size</th><th align='center' valign='middle' style='border-top:solid thin;border-bottom:solid thin' class='html-align-center' >Number of<br>Classes</th><th align='center' valign='middle' style='border-top:solid thin;border-bottom:solid thin' class='html-align-center' >Hardware</th><th align='center' valign='middle' style='border-top:solid thin;border-bottom:solid thin' class='html-align-center' >Evaluated<br>Architectures</th><th align='center' valign='middle' style='border-top:solid thin;border-bottom:solid thin' class='html-align-center' >Best<br>Architecture</th><th align='center' valign='middle' style='border-top:solid thin;border-bottom:solid thin' class='html-align-center' >Best Results</th></tr></thead><tbody ><tr ><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >[<a href="#B5-applsci-13-05521" class="html-bibr">5</a>]</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >ImageNet-1K (more than 1.431 M images) for training and ImageNet-C for validation</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >224 × 224</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >2</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >N/A</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >ViT-B/16, ViT-L/16, Mixer-B/16, Mixer-B/16, RN18 (SWSL), RN50 (SWSL), RN18 and RN50</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >ViT-L/16</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >82.89% of Acc</td></tr><tr ><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >[<a href="#B6-applsci-13-05521" class="html-bibr">6</a>]</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >ImageNet-A; ImageNet-C and Stylized ImageNet</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >224 × 224</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >N/A</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >N/A</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >ResNet50 and DeiT-S</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >N/A</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >N/A</td></tr><tr ><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >[<a href="#B7-applsci-13-05521" class="html-bibr">7</a>]</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >5856 images collected by X-ray</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >250 × 250 to ViT<br>224 × 224 to CNN</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >2</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >Intel Core i5-8300H 2.30 GHz</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >ViT, CNN and VGG16</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >ViT</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >96.45% Acc, 86.38% val. Acc, 10.92% loss and 18.25% val. Loss</td></tr><tr ><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >[<a href="#B8-applsci-13-05521" class="html-bibr">8</a>]</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >ImageNetILSVRC 2012 (1.78 M images)</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >224 × 224</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >1000</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >N/A</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >ViT-B/32, ViT-L/16, ViT-H/14, ResNet50 and ResNet152</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >N/A</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >N/A</td></tr><tr ><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >[<a href="#B9-applsci-13-05521" class="html-bibr">9</a>]</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >Public dataset 1 [<a href="#B25-applsci-13-05521" class="html-bibr">25</a>] with 780 images; Public dataset 2 [<a href="#B26-applsci-13-05521" class="html-bibr">26</a>] with 163 images</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >224 × 224</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >3</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >N/A</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >ViT-S/32, ViT-B/32, ViT-Ti/16, R26 + S/16, R + Ti/16, VGG, Inception and NASNET</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >ViT-B/32</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >86.7% Acc and 95% AUC</td></tr><tr ><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >[<a href="#B10-applsci-13-05521" class="html-bibr">10</a>]</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >Flower 102 (4080 to 11,016 images); CUB 200 (11,788 images); Indoor 67 (15,620 images); NY Depth V2 (1449 images); WikiArt; COVID-19 Image Data Collection (700 images); Caltech101 (9146 images); FG-NET (1002 images)</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >384 × 384; 224 × 224; 300 × 300</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >40 to 102</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >N/A</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >R-101 × 3, R-152 × 4, ViT-B/16, ViT-L/16, and Swim-B</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >N/A</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >N/A</td></tr><tr ><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >[<a href="#B11-applsci-13-05521" class="html-bibr">11</a>]</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >3192 images collected by CT and 160 images of a public dataset with CT biomarkers</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >61 × 61</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >4</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >Intel Core i7-9700 3.0 GHz, 326 GB RAM; NVIDIA GeForce RTX 2080 Ti (116 GB DDR6)</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >AlexNet, VGG-16, InceptionV3, MobileNetV2, ResNet34, ResNet50 and ViT</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >ViT</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >95.95% Acc</td></tr><tr ><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >[<a href="#B12-applsci-13-05521" class="html-bibr">12</a>]</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >ImageNet-C benchmark</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >224 × 224</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >2</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >NVIDIA Quadro A6000</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >ViT-L/16, CNN, hybrid model (BiT-M + ResNet152 × 4</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >Hybrid model</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >99.20% Acc</td></tr><tr ><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >[<a href="#B13-applsci-13-05521" class="html-bibr">13</a>]</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >Dataset provided in DFUC 2021 challenge (15,683 images)</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >224 × 224</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >4</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >NVIDIA GeForce RTX 3080, 10 GB memory</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >EfficientNetB3, BiT-ResNeXt50, ViT-B/16 and DeiT-S/16</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >BiT-ResNeXt50</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >88.49% AUC, 61.53% F1-Score, 65.59% recall and 60.53% precision</td></tr><tr ><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >[<a href="#B14-applsci-13-05521" class="html-bibr">14</a>]</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >3400 images collected by holographic camera</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >512 × 512</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >10</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >NVIDIA V100</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >EfficientnetB7, Densenet169 and ViT-B/16</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >ViT-B/16</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >99% Acc</td></tr><tr ><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >[<a href="#B15-applsci-13-05521" class="html-bibr">15</a>]</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >ForgeryNet with 2.9 M images</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >N/A</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >2</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >N/A</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >EfficientNetV2 and ViT-B</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >N/A</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >N/A</td></tr><tr ><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >[<a href="#B16-applsci-13-05521" class="html-bibr">16</a>]</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >German dataset (51,830 images); Indian dataset (1976 images); Chinese dataset (18,168 images)</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >128 × 128</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >15, 43 and 103</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >AMD Ryzen 7 5800H; NVIDIA GeForce RTX 3070</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >DenseNet161, ViT, DeepViT, MLP-Mixer, CvT, PiT, CaiT, CCT, CrossViT and Twins-SVT</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >CCT</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >99.04% Acc</td></tr><tr ><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >[<a href="#B17-applsci-13-05521" class="html-bibr">17</a>]</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >HAM10000 dataset (10,015 images); 1016 images collected by dermoscopy</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >224 × 224</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >3</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >Intel i7; 2x NVIDIA RTX 3060, 12 GB</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >MobileNetV2, ResNet50, InceptionV2, ViT and Proposed ViT model</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >Proposed ViT model</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >94.10% Acc, 94.10% precision and 94.10% F1-Score</td></tr><tr ><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >[<a href="#B18-applsci-13-05521" class="html-bibr">18</a>]</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >IP102 dataset (75,222 images); D0 dataset (4508 images; Li’s dataset (5629 images)</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >224 × 224; 480 × 480</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >10 to 102</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >Train: Intel Xeon; 8x NVIDIA Tesla V100, 256 GB.<br>Test: Intel Core; NVIDIA GTX 1060 Ti, 16 GB </td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >ResNet, EfficientNetB0, EfficientB1, RepVGG, VGG-16, ViT-L/16 and hybrid model proposed</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >Proposed hybrid model</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >99.47% Acc on the D0 dataset and 97.94% Acc on the Li’s dataset</td></tr><tr ><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >[<a href="#B19-applsci-13-05521" class="html-bibr">19</a>]</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >CIFAR-10 and CIFAR-100 (6000 images); Speech commands (100,503 1-second audio clips); GTZAM (1,00,030-second audio clips); DISCO (1935 images)</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >224 × 224 px; 1024 × 576 px.<br>Spectrograms: 229 × 229 samples; 512 × 256 samples</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >10 to 100</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >4x NVIDIA 2080 Ti</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >SL-ViT, ResNet152, DenseNet201 and InceptionV3</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >SL-ViT</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >71.89% Acc</td></tr><tr ><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >[<a href="#B20-applsci-13-05521" class="html-bibr">20</a>]</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >CrackTree260 (260 images); Ozegenel (458 images); Lab’s on dataset (80,000 images)</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >256 × 256; 448 × 448</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >2</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >N/A</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >TransUNet, U-Net, DeepLabv3+ and CNN + ViT</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >CNN + ViT</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >99.55% Acc and 99.57% precision</td></tr><tr ><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >[<a href="#B21-applsci-13-05521" class="html-bibr">21</a>]</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >10,265 images collected by Pilgrim technologies UAV with Sony ILCE-7R-36 mega pixels</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >64 × 64</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >5</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >Intel Xeon E5-1620 V4 3.50 GHz with 8 processor, 16 GB RAM; NVIDIA Quadro M2000</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >ViT-B/16, ViT-B/32, EfficientNetB, EfficientNetB1 and ResNet50</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >ViT-B/16</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >99.8% Acc</td></tr></tbody> </table> </div></section><section id='sec4-applsci-13-05521' type='discussion'><h2 data-nested='1'> 4. Discussion</h2><div class='html-p'>The results can be summarized as follows. In [<a href="#B12-applsci-13-05521" class="html-bibr">12</a>], ViTs were found to perform better and be more resilient to images with natural or adverse disturbances compared to CNNs. Another study [<a href="#B14-applsci-13-05521" class="html-bibr">14</a>] concluded that ViTs are more robust in digital holography because they can access the entire hologram rather than just a specific area, giving them an advantage. ViTs have also been found to outperform CNNs in detecting pneumonia in chest X-ray images [<a href="#B7-applsci-13-05521" class="html-bibr">7</a>] and in classifying UAV images for crop and weed monitoring with small datasets [<a href="#B21-applsci-13-05521" class="html-bibr">21</a>]. However, it has been noted that ViT performance may saturate if scalability is the goal [<a href="#B7-applsci-13-05521" class="html-bibr">7</a>]. In a study on classifying emphysema subtypes in CT images [<a href="#B11-applsci-13-05521" class="html-bibr">11</a>], ViTs were found to struggle with generalization when trained on fewer images. Nevertheless, ViTs were found to outperform CNNs in breast ultrasound image classification, especially with small datasets [<a href="#B9-applsci-13-05521" class="html-bibr">9</a>]. Another study [<a href="#B5-applsci-13-05521" class="html-bibr">5</a>] found that ViTs are more robust to adversarial attacks and that CNNs are more sensitive to high-frequency features. The authors in [<a href="#B15-applsci-13-05521" class="html-bibr">15</a>] found that CNNs had higher training accuracy and better generalization, but ViTs showed potential to reduce bias in anomaly detection. In [<a href="#B17-applsci-13-05521" class="html-bibr">17</a>], the authors claimed that the ViT model showed better performance for skin cancer classification. ViTs have also been shown to introduce less overhead and perform better for time-critical applications in edge computing and IoT networks [<a href="#B19-applsci-13-05521" class="html-bibr">19</a>]. In [<a href="#B20-applsci-13-05521" class="html-bibr">20</a>], the authors investigated the use of ViTs for asphalt and concrete crack detection and found that ViTs performed better due to the self-attention mechanism, especially in images with intense noise and biases. Wang [<a href="#B16-applsci-13-05521" class="html-bibr">16</a>] found that a pre-trained CNN network had higher accuracy, but the ViT models performed better than the non-pre-trained CNN network and had a shorter training time. The authors in [<a href="#B13-applsci-13-05521" class="html-bibr">13</a>] used several models for diabetic foot ulcer image classification and compared SGD and SAM optimizers, concluding that the SAM optimizer improved several evaluation metrics. In [<a href="#B18-applsci-13-05521" class="html-bibr">18</a>], the authors showed that fine-tuning performed better on ViT models than CNNs for insect pest recognition.</div><div class='html-p'>Therefore, based on the information gathered from the selected papers, we attempt to answer the research questions posed in <a href="#sec1-applsci-13-05521" class="html-sec">Section 1</a>:</div><div class='html-p'><ul class='html-simple'><li><div class='html-p'>RQ1—Can the ViT architecture have a better performance than the CNN architecture, regardless of the characteristics of the dataset?</div></li></ul></div><div class='html-p'>The literature review shows that ViT in image processing can be more efficient in smaller datasets due to the increase of relations created between images through the self-attention mechanism. However, it is also shown that if ViT trained with little data will have less generalization ability and worse performance compared to CNN’s.</div><div class='html-p'></div><div class='html-p'><ul class='html-simple'><li><div class='html-p'>RQ2—What influences the CNNs that do not allow them to perform as well as the ViTs?</div></li></ul></div><div class='html-p'>Shift-invariance is a limitation of CNN that makes the same architecture not have a satisfactory performance because the introduction of noise in the input images makes the same architecture unable to get the maximum information from the central pixels. However, the authors in [<a href="#B27-applsci-13-05521" class="html-bibr">27</a>] propose the addition of an anti-aliasing filter which combines blurring with subsampling in the Convolutional, MaxPooling and AveragePooling layers. Demonstrating through the experiment carried out that the application of this filter originates a greater generalization capacity and an increase in the accuracy of CNN. Furthermore, increasing the kernel size in convolutional layers and using dilated convolution have been shown as limitations that deteriorate the performance of CNNs against ViTs.</div><div class='html-p'></div><div class='html-p'><ul class='html-simple'><li><div class='html-p'>RQ3—How does the Multi-Head Attention mechanism, which is a key component of ViTs, influence the performance of these models in image classification?</div></li></ul></div><div class='html-p'>The Attention mechanism is described as the mapping of a query and a set of key-value pairs to an output, the output being the result of a weighted sum of the values, in which the weight given is calculated, through the query with the corresponding key by a compatibility function. Multi-head Attention mechanism instead of performing a single attention function will perform multiple projections of attention functions [<a href="#B28-applsci-13-05521" class="html-bibr">28</a>]. This mechanism improves the ViT architecture because it allows it to extract more information from each pixel of the images that have been placed inside the embedding. In addition, this mechanism can have better performance if the images have more secondary elements that illustrate the central element. And since this mechanism performs several computations in parallel, it reduces the computational cost [<a href="#B29-applsci-13-05521" class="html-bibr">29</a>].</div><div class='html-p'>Overall, ViTs have shown promising performance compared to CNNs in various applications, but there are limitations and factors that can affect their performance, such as dataset size, scalability, and pre-training accuracy.</div></section><section id='sec5-applsci-13-05521' type=''><h2 data-nested='1'> 5. Threats to Validity</h2><div class='html-p'>This section discusses internal and external validity threats. The validity of the entire process performed in this study is demonstrated and how the results of this study can be replicated in other future experiments.</div><div class='html-p'>In this literature review, different search strings were used in each of the selected data sources, resulting in different results from each source. This approach may introduce a bias into the validation of the study, as it makes it difficult to draw conclusions about the diversity of studies obtained by replicating the same search. In addition, the maturity of the work was identified as an internal threat to validity, as the ViT architecture is relatively new and only a limited number of research projects have been conducted using it. In order to draw more comprehensive conclusions about the robustness of ViT compared to CNN, it is imperative that this architecture is further disseminated and deployed, thereby making more research available for analysis.</div><div class='html-p'>In addition to these threats, this study did not use methods that would allow to quantitatively and qualitatively analyse the results obtained in the selected papers. This may bias the validity of this review in demonstrating which of the deep learning architectures is more efficient in image processing.</div><div class='html-p'>The findings obtained in this study could be replicated in other future research in image classification. However, the results obtained may not be the same as those described by the selected papers because it has been proven that for different problems and different methodologies used, the results are different. In addition, the authors do not describe in sufficient detail all the methodologies they used, nor the conditions under which the experiment was performed. </div></section><section id='sec6-applsci-13-05521' type=''><h2 data-nested='1'> 6. Strengths, Limitations, and Future Research Directions</h2><div class='html-p'>The review made it possible to identify not only the strengths of each architecture (outlined in <a href="#sec6dot1-applsci-13-05521" class="html-sec">Section 6.1</a>), but also their potential for improvement (described in <a href="#sec6dot2-applsci-13-05521" class="html-sec">Section 6.2</a>). Future research directions were also derived from this and are presented in <a href="#sec6dot3-applsci-13-05521" class="html-sec">Section 6.3</a>.</div><section id='sec6dot1-applsci-13-05521' type=''><h4 class='html-italic' data-nested='2'> 6.1. Strengths</h4><div class='html-p'>Both CNNs and ViTs have their own advantages, and some common ones. This section will explore these in more detail, including considerations on the Datasets, Robustness, Performance optimization, Evaluation, Explainability and Interpretability, and Architectures.</div><section id='sec6dot1dot1-applsci-13-05521' type=''><h4 class='' data-nested='3'> 6.1.1. Dataset Considerations</h4><div class='html-p'>CNNs have been widely used and extensively studied for image-related tasks, resulting in a rich literature, established architectures, and pre-trained models, making them accessible and convenient for many datasets. On the other hand, ViTs can process patches in parallel, which can lead to efficient computation, especially for large-scale datasets, and allow faster training and inference. ViTs can also handle images of different sizes and aspect ratios without losing resolution, making them more scalable and adaptable to different datasets and applications.</div></section><section id='sec6dot1dot2-applsci-13-05521' type=''><h4 class='' data-nested='3'> 6.1.2. Robustness</h4><div class='html-p'>CNNs are inherently translation-invariant, making them robust to small changes in object position or orientation within an image. The main advantage of ViTs is their ability to effectively capture global contextual information through the self-attention mechanism, enabling them to model long-range dependencies and contextual relationships, which can improve robustness in tasks that require understanding global context. ViTs can also adaptively adjust the receptive fields of the self-attention mechanism based on input data, allowing them to better capture both local and global features, making them more robust to changes in scale, rotation, or perspective of objects.</div><div class='html-p'>Both architectures can be trained using data augmentation techniques, such as random cropping, flipping, and rotation, which can help improve robustness to changes in input data and reduce overfitting. Another technique is known as adversarial training, where they are trained on adversarial examples: perturbed images designed to confuse the model, to improve its ability to handle input data with adversarial perturbations. Combining models, using ensemble methods, such as bagging or boosting, can also improve robustness by exploiting the diversity of multiple models, which can help mitigate the effects of individual model weaknesses.</div></section><section id='sec6dot1dot3-applsci-13-05521' type=''><h4 class='' data-nested='3'> 6.1.3. Performance Optimization</h4><div class='html-p'>CNNs can be effectively compressed using techniques such as pruning, quantization, and knowledge distillation, reducing model size and improving inference efficiency without significant loss of performance. They are also well-suited for hardware optimization, with specialized hardware accelerators (e.g., GPUs, TPUs) designed to perform convolutional operations efficiently, leading to optimized performance in terms of speed and energy consumption.</div><div class='html-p'>ViTs can efficiently scale to handle high-resolution images or large-scale datasets because they operate on the entire image at once and do not require processing of local receptive fields at multiple spatial scales, potentially resulting in improved performance in terms of scalability.</div><div class='html-p'>Transfer Learning for pre-train on large-scale datasets and fine-tune on smaller datasets can potentially lead to improved performance with limited available data and can be used with both architectures.</div></section><section id='sec6dot1dot4-applsci-13-05521' type=''><h4 class='' data-nested='3'> 6.1.4. Evaluation</h4><div class='html-p'>CNNs have been widely used in image classification tasks for many years, resulting in well-established benchmarks and evaluation metrics that allow meaningful comparison and evaluation of model performance. The standardized evaluation protocols, such as cross-validation or hold-out validation, which provide a consistent framework for evaluating and comparing model performance across different datasets and tasks, are applicable for both architectures.</div></section><section id='sec6dot1dot5-applsci-13-05521' type=''><h4 class='' data-nested='3'> 6.1.5. Explainability and Interpretability</h4><div class='html-p'>CNNs produce feature maps that can be visualized, making it possible to interpret the behaviour of the model by visualizing the learned features or activations in different layers. They capture local features in images, such as edges or textures, which can lead to interpretable features that are visually meaningful and can provide insight into how the model is processing the input images. ViTs, on the other hand, are designed to capture global contextual information, making them potentially more interpretable in tasks that require an understanding of long-range dependencies or global context. They have a hierarchical structure with self-attention heads that can be visualized and interpreted individually, providing insights into how different heads attend to different features or regions in the input images.</div></section><section id='sec6dot1dot6-applsci-13-05521' type=''><h4 class='' data-nested='3'> 6.1.6. Architecture</h4><div class='html-p'>CNNs have a wide range of established architecture variants, such as VGG, ResNet, and Inception, with proven effectiveness in various image classification tasks. These architectures are well-tested and widely used in the deep learning community. ViTs can be easily modified to accommodate different input sizes, patch sizes, and depth, providing flexibility in architecture design and optimization.</div></section></section><section id='sec6dot2-applsci-13-05521' type=''><h4 class='html-italic' data-nested='2'> 6.2. Limitations</h4><div class='html-p'>Despite their many advantages and the breakthroughs made over the years. There are still some drawbacks to the architectures studied. This section focuses on these. </div><section id='sec6dot2dot1-applsci-13-05521' type=''><h4 class='' data-nested='3'> 6.2.1. Dataset Considerations</h4><div class='html-p'>CNNs can be susceptible to biases present in training datasets, such as biased sampling or label noise, which can affect the validity of training results. They typically operate on fixed input spatial resolutions, which may not be optimal for images of varying size or aspect ratio, resulting in information loss or distortion. While pre-trained models for CNNs are well-established, pre-trained models for ViT are (still) less common for some datasets, which may affect the ease of use in some situations.</div></section><section id='sec6dot2dot2-applsci-13-05521' type=''><h4 class='' data-nested='3'> 6.2.2. Robustness</h4><div class='html-p'>CNNs may struggle to capture long-range contextual information, as they focus primarily on local feature extraction, which may limit the ability to understand global context, leading to reduced robustness in tasks that require global context, such as scene understanding, image captioning or fine-grained recognition.</div><div class='html-p'>Both architectures can be prone to overfitting, especially when the training data is limited or noisy, which can lead to reduced robustness to input data outside the training distribution. Adversarial attacks can also pose a challenge to the robustness of both architectures. In particular, ViTs do not have an inherent spatial inductive bias like CNNs, which are specifically designed to exploit the spatial locality of images. This can make them more vulnerable to certain types of adversarial attacks that rely on spatial information, such as spatially transformed adversarial examples.</div></section><section id='sec6dot2dot3-applsci-13-05521' type=''><h4 class='' data-nested='3'> 6.2.3. Performance Optimization</h4><div class='html-p'>CNNs can suffer from reduced performance and increased memory consumption when applied to high-resolution images or large-scale datasets, as they require processing of local receptive fields at multiple spatial scales, leading to increased computational requirements. Compared to CNNs, ViTs are computationally expensive, especially as the image size increases or model depth increases, which may limit their use in certain resource-constrained environments. Reduced computational complexity can sometimes result in decreased robustness, as models may not have the ability to learn complex features that can help generalize well to adversarial examples.</div></section><section id='sec6dot2dot4-applsci-13-05521' type=''><h4 class='' data-nested='3'> 6.2.4. Evaluation</h4><div class='html-p'>As mentioned above, CNNs are primarily designed for local feature extraction and may struggle to capture long-range contextual dependencies, which can limit the evaluation performance in tasks that require understanding of global context or long-term dependencies. ViTs are relatively newer than CNNs, and as such, may lack well-established benchmarks or evaluation metrics for specific tasks or datasets, which can make performance evaluation difficult and less standardized.</div></section><section id='sec6dot2dot5-applsci-13-05521' type=''><h4 class='' data-nested='3'> 6.2.5. Explainability and Interpretability</h4><div class='html-p'>Despite well-established methods for model interpretation, CNNs still lack full interpretability because the complex interactions between layers and neurons can make it difficult to fully understand the model’s decision-making process, particularly in deeper layers of the network.</div><div class='html-p'>While ViTs produce attention maps for interpretability, the complex interactions between self-attention heads can still present challenges in accurately interpreting the model’s behaviour. ViTs can have multiple heads attending to different regions, which can make it difficult to interpret the interactions between different attention heads and to understand the reasoning behind model predictions.</div></section><section id='sec6dot2dot6-applsci-13-05521' type=''><h4 class='' data-nested='3'> 6.2.6. Architecture</h4><div class='html-p'>CNNs typically have fixed, predefined model architectures, which may limit the flexibility to adapt to specific task requirements or incorporate domain-specific knowledge, potentially affecting performance optimization. For ViTs, the availability of established architecture variants is still limited, which may require more experimentation and exploration to find optimal architectures for specific tasks.</div></section></section><section id='sec6dot3-applsci-13-05521' type=''><h4 class='html-italic' data-nested='2'> 6.3. Future Research Directions</h4><div class='html-p'>As future research, meta-analysis or systematic reviews should be conducted within the scope of this review to provide the scientific community with more detail on which of the architectures is more effective at image classification, in addition to specifying under what conditions a particular architecture stands out from the others. It is therefore necessary to facilitate the choice of the deep learning architecture to be used in future image classification problems. This section aims to provide guidelines for future research in this area.</div><section id='sec6dot3dot1-applsci-13-05521' type=''><h4 class='' data-nested='3'> 6.3.1. Dataset Considerations</h4><div class='html-p'>The datasets used in most studies may not be representative of real-world scenarios. Future research should consider using more diverse datasets that better reflect the complexity and variability of real-world images. As an example, it would be interesting to study the impact that image resolution might have on the performance of deep learning architectures. That is, it would be important to find out in which of the architectures (i.e., ViT, CNN, and MLP-Mixer) the resolution of the images will influence their performance, as well as what impact it will have on the processing time of the deep learning architectures.</div></section><section id='sec6dot3dot2-applsci-13-05521' type=''><h4 class='' data-nested='3'> 6.3.2. Robustness</h4><div class='html-p'>As documented in [<a href="#B5-applsci-13-05521" class="html-bibr">5</a>], deep learning models are typically vulnerable to adversarial attacks, where small perturbations to an input image can cause the model to misclassify it. Future research should focus on developing architectures that are more robust to adversarial attacks (for example by further augmenting the robustness of ViTs), as well as exploring ways to detect and defend against these attacks.</div><div class='html-p'>Beyond that, most studies (as the ones reviewed in this work) have focused on the performance of deep learning architectures on image classification tasks, but there are many other image processing tasks (such as object detection, segmentation, and captioning) that could benefit from the use of these architectures. Future research should further explore the effectiveness of these architectures on these tasks.</div></section><section id='sec6dot3dot3-applsci-13-05521' type=''><h4 class='' data-nested='3'> 6.3.3. Performance Optimization</h4><div class='html-p'>Deep learning architectures require substantial amounts of labelled data to achieve high performance. However, labelling data is time-consuming and expensive. Future research should explore ways to improve the efficiency of deep learning models, such as developing semi-supervised learning methods or transfer learning (following up on the finding in [<a href="#B10-applsci-13-05521" class="html-bibr">10</a>]) that can leverage pre-trained models.</div><div class='html-p'>In addition, the necessity of large amounts of labelled data requires significant computational resources, which limits the deployment on resource-constrained devices. Future research should focus on developing architectures that are optimized for deployment on these devices, as well as exploring ways to reduce the computational cost of existing architectures. It should explore the advantages of the implementation of the knowledge distillation of deep learning architectures to reduce computational resources.</div></section><section id='sec6dot3dot4-applsci-13-05521' type=''><h4 class='' data-nested='3'> 6.3.4. Evaluation</h4><div class='html-p'>The adequacy of the metrics to the task and problem at hand is also another suggested line of future research. Most studies have used standard performance metrics (such as accuracy and F1-score) to evaluate the performance of deep learning architectures. Future research should consider using more diverse metrics that better capture the strengths and weaknesses of different architectures.</div></section><section id='sec6dot3dot5-applsci-13-05521' type=''><h4 class='' data-nested='3'> 6.3.5. Explainability and Interpretability</h4><div class='html-p'>Deep learning models are often considered as black boxes because they do not provide insight into the decision-making process. This may prevent the usage of the models in certain areas such as justice and healthcare [<a href="#B30-applsci-13-05521" class="html-bibr">30</a>], among others. Future research should focus on making these models more interpretable and explainable. For example, by designing transformer architectures that provide visual explanations of their decisions or by developing methods for extracting features that are easily interpretable.</div></section><section id='sec6dot3dot6-applsci-13-05521' type=''><h4 class='' data-nested='3'> 6.3.6. Architecture</h4><div class='html-p'>In future investigations, it will be necessary to study the impact of the MLP-Mixer deep learning architecture in image processing, what are the characteristics that allow it to have a performance superior to CNNs, but inferior to the performance obtained by the ViT architecture [<a href="#B5-applsci-13-05521" class="html-bibr">5</a>]. Future research should also focus on developing novel architectures that can achieve high performance with fewer parameters or that are more efficient in terms of computation and memory usage. </div></section></section></section><section id='sec7-applsci-13-05521' type='conclusions'><h2 data-nested='1'> 7. Conclusions</h2><div class='html-p'>This work has reviewed recent studies done in image processing to give more information about the performance of the two architectures and what distinguishes them. A common feature across all papers is that transformer-based architecture or the combination of ViTs with CNN allows for better accuracy compared to CNN networks. It has also been shown that this new architecture, even with hyperparameters fine-tuning, can be lighter than the CNN, consuming fewer computational resources and taking less training time as demonstrated in the works [<a href="#B16-applsci-13-05521" class="html-bibr">16</a>,<a href="#B19-applsci-13-05521" class="html-bibr">19</a>].</div><div class='html-p'>In summary, the ViT architecture is more robust than CNN networks for images that have noise or are augmented. It manages to perform better compared to CNN due to the self-attention mechanism because it makes the overall image information accessible from the highest to the lowest layers [<a href="#B12-applsci-13-05521" class="html-bibr">12</a>]. On the other hand, CNN’s can generalize better with smaller datasets and get better accuracy than ViTs, but in contrast, ViTs have the advantage of learning information better with fewer images. This is because the images are divided into small patches, so there is a greater diversity of relationships between them. </div></section> </div> <div class="html-back"> <section class='html-notes'><h2 >Author Contributions</h2><div class='html-p'>Conceptualization, J.B.; Methodology, J.M. and J.B.; Software, J.M; Validation, J.M., I.D. and J.B.; Formal analysis, J.M., I.D. and J.B.; Investigation, J.M.; Resources, J.M.; Data curation, J.M.; Writing—original draft preparation, J.M.; Writing—review and editing, J.M., I.D. and J.B.; Supervision, J.B. and I.D.; Project administration, J.B. and I.D.; Funding acquisition, J.B. All authors have read and agreed to the published version of the manuscript.</div></section><section class='html-notes'><h2>Funding</h2><div class='html-p'>This research received no external funding.</div></section><section class='html-notes'><h2 >Data Availability Statement</h2><div class='html-p'>Data sharing is not applicable to this article.</div></section><section class='html-notes'><h2 >Conflicts of Interest</h2><div class='html-p'>The authors declare no conflict of interest.</div></section><section id='html-references_list'><h2>References</h2><ol class='html-xx'><li id='B1-applsci-13-05521' class='html-x' data-content='1.'>Dosovitskiy, A.; Beyer, L.; Kolesnikov, A.; Weissenborn, D.; Zhai, X.; Unterthiner, T.; Dehghani, M.; Minderer, M.; Heigold, G.; Gelly, S.; et al. An Image Is Worth 16x16 Words: Transformers for Image Recognition at Scale. <span class='html-italic'>arXiv</span> <b>2020</b>, arXiv:2010.11929. [<a href="https://scholar.google.com/scholar_lookup?title=An+Image+Is+Worth+16x16+Words:+Transformers+for+Image+Recognition+at+Scale&author=Dosovitskiy,+A.&author=Beyer,+L.&author=Kolesnikov,+A.&author=Weissenborn,+D.&author=Zhai,+X.&author=Unterthiner,+T.&author=Dehghani,+M.&author=Minderer,+M.&author=Heigold,+G.&author=Gelly,+S.&publication_year=2020&journal=arXiv&doi=10.48550/ARXIV.2010.11929" class='google-scholar' target='_blank' rel='noopener noreferrer'>Google Scholar</a>] [<a href="https://doi.org/10.48550/ARXIV.2010.11929" class='cross-ref' target='_blank' rel='noopener noreferrer'>CrossRef</a>]</li><li id='B2-applsci-13-05521' class='html-x' data-content='2.'>Saha, S. A Comprehensive Guide to Convolutional Neural Networks—The ELI5 Way. Available online: <a href='https://towardsdatascience.com/a-comprehensive-guide-to-convolutional-neural-networks-the-eli5-way-3bd2b1164a53' target='_blank' rel="noopener noreferrer" >https://towardsdatascience.com/a-comprehensive-guide-to-convolutional-neural-networks-the-eli5-way-3bd2b1164a53</a> (accessed on 8 January 2023).</li><li id='B3-applsci-13-05521' class='html-x' data-content='3.'>Snyder, H. Literature Review as a Research Methodology: An Overview and Guidelines. <span class='html-italic'>J. Bus. Res.</span> <b>2019</b>, <span class='html-italic'>104</span>, 333–339. [<a href="https://scholar.google.com/scholar_lookup?title=Literature+Review+as+a+Research+Methodology:+An+Overview+and+Guidelines&author=Snyder,+H.&publication_year=2019&journal=J.+Bus.+Res.&volume=104&pages=333%E2%80%93339&doi=10.1016/j.jbusres.2019.07.039" class='google-scholar' target='_blank' rel='noopener noreferrer'>Google Scholar</a>] [<a href="https://doi.org/10.1016/j.jbusres.2019.07.039" class='cross-ref' target='_blank' rel='noopener noreferrer'>CrossRef</a>]</li><li id='B4-applsci-13-05521' class='html-x' data-content='4.'>Matloob, F.; Ghazal, T.M.; Taleb, N.; Aftab, S.; Ahmad, M.; Khan, M.A.; Abbas, S.; Soomro, T.R. Software Defect Prediction Using Ensemble Learning: A Systematic Literature Review. <span class='html-italic'>IEEE Access</span> <b>2021</b>, <span class='html-italic'>9</span>, 98754–98771. [<a href="https://scholar.google.com/scholar_lookup?title=Software+Defect+Prediction+Using+Ensemble+Learning:+A+Systematic+Literature+Review&author=Matloob,+F.&author=Ghazal,+T.M.&author=Taleb,+N.&author=Aftab,+S.&author=Ahmad,+M.&author=Khan,+M.A.&author=Abbas,+S.&author=Soomro,+T.R.&publication_year=2021&journal=IEEE+Access&volume=9&pages=98754%E2%80%9398771&doi=10.1109/ACCESS.2021.3095559" class='google-scholar' target='_blank' rel='noopener noreferrer'>Google Scholar</a>] [<a href="https://doi.org/10.1109/ACCESS.2021.3095559" class='cross-ref' target='_blank' rel='noopener noreferrer'>CrossRef</a>]</li><li id='B5-applsci-13-05521' class='html-x' data-content='5.'>Benz, P.; Ham, S.; Zhang, C.; Karjauv, A.; Kweon, I.S. Adversarial Robustness Comparison of Vision Transformer and MLP-Mixer to CNNs. <span class='html-italic'>arXiv</span> <b>2021</b>, arXiv:2110.02797. [<a href="https://scholar.google.com/scholar_lookup?title=Adversarial+Robustness+Comparison+of+Vision+Transformer+and+MLP-Mixer+to+CNNs&author=Benz,+P.&author=Ham,+S.&author=Zhang,+C.&author=Karjauv,+A.&author=Kweon,+I.S.&publication_year=2021&journal=arXiv&doi=10.48550/ARXIV.2110.02797" class='google-scholar' target='_blank' rel='noopener noreferrer'>Google Scholar</a>] [<a href="https://doi.org/10.48550/ARXIV.2110.02797" class='cross-ref' target='_blank' rel='noopener noreferrer'>CrossRef</a>]</li><li id='B6-applsci-13-05521' class='html-x' data-content='6.'>Bai, Y.; Mei, J.; Yuille, A.; Xie, C. Are Transformers More Robust Than CNNs? <span class='html-italic'>arXiv</span> <b>2021</b>, arXiv:2111.05464. [<a href="https://scholar.google.com/scholar_lookup?title=Are+Transformers+More+Robust+Than+CNNs?&author=Bai,+Y.&author=Mei,+J.&author=Yuille,+A.&author=Xie,+C.&publication_year=2021&journal=arXiv&doi=10.48550/ARXIV.2111.05464" class='google-scholar' target='_blank' rel='noopener noreferrer'>Google Scholar</a>] [<a href="https://doi.org/10.48550/ARXIV.2111.05464" class='cross-ref' target='_blank' rel='noopener noreferrer'>CrossRef</a>]</li><li id='B7-applsci-13-05521' class='html-x' data-content='7.'>Tyagi, K.; Pathak, G.; Nijhawan, R.; Mittal, A. Detecting Pneumonia Using Vision Transformer and Comparing with Other Techniques. In Proceedings of the 2021 5th International Conference on Electronics, Communication and Aerospace Technology (ICECA), IEEE, Coimbatore, India, 2 December 2021; pp. 12–16. [<a href="https://scholar.google.com/scholar_lookup?title=Detecting+Pneumonia+Using+Vision+Transformer+and+Comparing+with+Other+Techniques&conference=Proceedings+of+the+2021+5th+International+Conference+on+Electronics,+Communication+and+Aerospace+Technology+(ICECA),+IEEE&author=Tyagi,+K.&author=Pathak,+G.&author=Nijhawan,+R.&author=Mittal,+A.&publication_year=2021&pages=12%E2%80%9316" class='google-scholar' target='_blank' rel='noopener noreferrer'>Google Scholar</a>]</li><li id='B8-applsci-13-05521' class='html-x' data-content='8.'>Raghu, M.; Unterthiner, T.; Kornblith, S.; Zhang, C.; Dosovitskiy, A. Do Vision Transformers See Like Convolutional Neural Networks? <span class='html-italic'>arXiv</span> <b>2021</b>, arXiv:2108.08810. [<a href="https://scholar.google.com/scholar_lookup?title=Do+Vision+Transformers+See+Like+Convolutional+Neural+Networks?&author=Raghu,+M.&author=Unterthiner,+T.&author=Kornblith,+S.&author=Zhang,+C.&author=Dosovitskiy,+A.&publication_year=2021&journal=arXiv&doi=10.48550/ARXIV.2108.08810" class='google-scholar' target='_blank' rel='noopener noreferrer'>Google Scholar</a>] [<a href="https://doi.org/10.48550/ARXIV.2108.08810" class='cross-ref' target='_blank' rel='noopener noreferrer'>CrossRef</a>]</li><li id='B9-applsci-13-05521' class='html-x' data-content='9.'>Gheflati, B.; Rivaz, H. Vision Transformer for Classification of Breast Ultrasound Images. <span class='html-italic'>arXiv</span> <b>2021</b>, arXiv:2110.14731. [<a href="https://scholar.google.com/scholar_lookup?title=Vision+Transformer+for+Classification+of+Breast+Ultrasound+Images&author=Gheflati,+B.&author=Rivaz,+H.&publication_year=2021&journal=arXiv&doi=10.48550/ARXIV.2110.14731" class='google-scholar' target='_blank' rel='noopener noreferrer'>Google Scholar</a>] [<a href="https://doi.org/10.48550/ARXIV.2110.14731" class='cross-ref' target='_blank' rel='noopener noreferrer'>CrossRef</a>]</li><li id='B10-applsci-13-05521' class='html-xx' data-content='10.'>Zhou, H.-Y.; Lu, C.; Yang, S.; Yu, Y. ConvNets vs. Transformers: Whose Visual Representations Are More Transferable? In Proceedings of the 2021 IEEE/CVF International Conference on Computer Vision Workshops (ICCVW), IEEE, Montreal, BC, Canada, 17 October 2021; pp. 2230–2238. [<a href="https://scholar.google.com/scholar_lookup?title=ConvNets+vs.+Transformers:+Whose+Visual+Representations+Are+More+Transferable?&conference=Proceedings+of+the+2021+IEEE/CVF+International+Conference+on+Computer+Vision+Workshops+(ICCVW),+IEEE&author=Zhou,+H.-Y.&author=Lu,+C.&author=Yang,+S.&author=Yu,+Y.&publication_year=2021&pages=2230%E2%80%932238" class='google-scholar' target='_blank' rel='noopener noreferrer'>Google Scholar</a>]</li><li id='B11-applsci-13-05521' class='html-xx' data-content='11.'>Wu, Y.; Qi, S.; Sun, Y.; Xia, S.; Yao, Y.; Qian, W. A Vision Transformer for Emphysema Classification Using CT Images. <span class='html-italic'>Phys. Med. Biol.</span> <b>2021</b>, <span class='html-italic'>66</span>, 245016. [<a href="https://scholar.google.com/scholar_lookup?title=A+Vision+Transformer+for+Emphysema+Classification+Using+CT+Images&author=Wu,+Y.&author=Qi,+S.&author=Sun,+Y.&author=Xia,+S.&author=Yao,+Y.&author=Qian,+W.&publication_year=2021&journal=Phys.+Med.+Biol.&volume=66&pages=245016&doi=10.1088/1361-6560/ac3dc8" class='google-scholar' target='_blank' rel='noopener noreferrer'>Google Scholar</a>] [<a href="https://doi.org/10.1088/1361-6560/ac3dc8" class='cross-ref' target='_blank' rel='noopener noreferrer'>CrossRef</a>]</li><li id='B12-applsci-13-05521' class='html-xx' data-content='12.'>Filipiuk, M.; Singh, V. Comparing Vision Transformers and Convolutional Nets for Safety Critical Systems. <span class='html-italic'>AAAI Workshop Artif. Intell. Saf.</span> <b>2022</b>, <span class='html-italic'>3087</span>, 1–5. [<a href="https://scholar.google.com/scholar_lookup?title=Comparing+Vision+Transformers+and+Convolutional+Nets+for+Safety+Critical+Systems&author=Filipiuk,+M.&author=Singh,+V.&publication_year=2022&journal=AAAI+Workshop+Artif.+Intell.+Saf.&volume=3087&pages=1%E2%80%935" class='google-scholar' target='_blank' rel='noopener noreferrer'>Google Scholar</a>]</li><li id='B13-applsci-13-05521' class='html-xx' data-content='13.'>Galdran, A.; Carneiro, G.; Ballester, M.A.G. Convolutional Nets Versus Vision Transformers for Diabetic Foot Ulcer Classification. <span class='html-italic'>arXiv</span> <b>2022</b>, arXiv:2111.06894. [<a href="https://scholar.google.com/scholar_lookup?title=Convolutional+Nets+Versus+Vision+Transformers+for+Diabetic+Foot+Ulcer+Classification&author=Galdran,+A.&author=Carneiro,+G.&author=Ballester,+M.A.G.&publication_year=2022&journal=arXiv&doi=10.48550/ARXIV.2111.06894" class='google-scholar' target='_blank' rel='noopener noreferrer'>Google Scholar</a>] [<a href="https://doi.org/10.48550/ARXIV.2111.06894" class='cross-ref' target='_blank' rel='noopener noreferrer'>CrossRef</a>]</li><li id='B14-applsci-13-05521' class='html-xx' data-content='14.'>Cuenat, S.; Couturier, R. Convolutional Neural Network (CNN) vs Vision Transformer (ViT) for Digital Holography. In Proceedings of the 2022 2nd International Conference on Computer, Control and Robotics (ICCCR), IEEE, Shanghai, China, 18 March 2022; pp. 235–240. [<a href="https://scholar.google.com/scholar_lookup?title=Convolutional+Neural+Network+(CNN)+vs+Vision+Transformer+(ViT)+for+Digital+Holography&conference=Proceedings+of+the+2022+2nd+International+Conference+on+Computer,+Control+and+Robotics+(ICCCR),+IEEE&author=Cuenat,+S.&author=Couturier,+R.&publication_year=2022&pages=235%E2%80%93240" class='google-scholar' target='_blank' rel='noopener noreferrer'>Google Scholar</a>]</li><li id='B15-applsci-13-05521' class='html-xx' data-content='15.'>Coccomini, D.A.; Caldelli, R.; Falchi, F.; Gennaro, C.; Amato, G. Cross-Forgery Analysis of Vision Transformers and CNNs for Deepfake Image Detection. In Proceedings of the 1st International Workshop on Multimedia AI against Disinformation, Newark, NJ, USA, 27–30 June 2022; Association for Computing Machinery: New York, NY, USA, 2022; pp. 52–58. [<a href="https://scholar.google.com/scholar_lookup?title=Cross-Forgery+Analysis+of+Vision+Transformers+and+CNNs+for+Deepfake+Image+Detection&conference=Proceedings+of+the+1st+International+Workshop+on+Multimedia+AI+against+Disinformation&author=Coccomini,+D.A.&author=Caldelli,+R.&author=Falchi,+F.&author=Gennaro,+C.&author=Amato,+G.&publication_year=2022&pages=52%E2%80%9358" class='google-scholar' target='_blank' rel='noopener noreferrer'>Google Scholar</a>]</li><li id='B16-applsci-13-05521' class='html-xx' data-content='16.'>Wang, H. Traffic Sign Recognition with Vision Transformers. In Proceedings of the 6th International Conference on Information System and Data Mining, Silicon Valley, CA, USA, 27–29 May 2022; Association for Computing Machinery: New York, NY, USA, 2022; pp. 55–61. [<a href="https://scholar.google.com/scholar_lookup?title=Traffic+Sign+Recognition+with+Vision+Transformers&conference=Proceedings+of+the+6th+International+Conference+on+Information+System+and+Data+Mining&author=Wang,+H.&publication_year=2022&pages=55%E2%80%9361" class='google-scholar' target='_blank' rel='noopener noreferrer'>Google Scholar</a>]</li><li id='B17-applsci-13-05521' class='html-xx' data-content='17.'>Xin, C.; Liu, Z.; Zhao, K.; Miao, L.; Ma, Y.; Zhu, X.; Zhou, Q.; Wang, S.; Li, L.; Yang, F.; et al. An Improved Transformer Network for Skin Cancer Classification. <span class='html-italic'>Comput. Biol. Med.</span> <b>2022</b>, <span class='html-italic'>149</span>, 105939. [<a href="https://scholar.google.com/scholar_lookup?title=An+Improved+Transformer+Network+for+Skin+Cancer+Classification&author=Xin,+C.&author=Liu,+Z.&author=Zhao,+K.&author=Miao,+L.&author=Ma,+Y.&author=Zhu,+X.&author=Zhou,+Q.&author=Wang,+S.&author=Li,+L.&author=Yang,+F.&publication_year=2022&journal=Comput.+Biol.+Med.&volume=149&pages=105939&doi=10.1016/j.compbiomed.2022.105939" class='google-scholar' target='_blank' rel='noopener noreferrer'>Google Scholar</a>] [<a href="https://doi.org/10.1016/j.compbiomed.2022.105939" class='cross-ref' target='_blank' rel='noopener noreferrer'>CrossRef</a>]</li><li id='B18-applsci-13-05521' class='html-xx' data-content='18.'>Peng, Y.; Wang, Y. CNN and Transformer Framework for Insect Pest Classification. <span class='html-italic'>Ecol. Inform.</span> <b>2022</b>, <span class='html-italic'>72</span>, 101846. [<a href="https://scholar.google.com/scholar_lookup?title=CNN+and+Transformer+Framework+for+Insect+Pest+Classification&author=Peng,+Y.&author=Wang,+Y.&publication_year=2022&journal=Ecol.+Inform.&volume=72&pages=101846&doi=10.1016/j.ecoinf.2022.101846" class='google-scholar' target='_blank' rel='noopener noreferrer'>Google Scholar</a>] [<a href="https://doi.org/10.1016/j.ecoinf.2022.101846" class='cross-ref' target='_blank' rel='noopener noreferrer'>CrossRef</a>]</li><li id='B19-applsci-13-05521' class='html-xx' data-content='19.'>Bakhtiarnia, A.; Zhang, Q.; Iosifidis, A. Single-Layer Vision Transformers for More Accurate Early Exits with Less Overhead. <span class='html-italic'>Neural Netw.</span> <b>2022</b>, <span class='html-italic'>153</span>, 461–473. [<a href="https://scholar.google.com/scholar_lookup?title=Single-Layer+Vision+Transformers+for+More+Accurate+Early+Exits+with+Less+Overhead&author=Bakhtiarnia,+A.&author=Zhang,+Q.&author=Iosifidis,+A.&publication_year=2022&journal=Neural+Netw.&volume=153&pages=461%E2%80%93473&doi=10.1016/j.neunet.2022.06.038" class='google-scholar' target='_blank' rel='noopener noreferrer'>Google Scholar</a>] [<a href="https://doi.org/10.1016/j.neunet.2022.06.038" class='cross-ref' target='_blank' rel='noopener noreferrer'>CrossRef</a>]</li><li id='B20-applsci-13-05521' class='html-xx' data-content='20.'>Asadi Shamsabadi, E.; Xu, C.; Rao, A.S.; Nguyen, T.; Ngo, T.; Dias-da-Costa, D. Vision Transformer-Based Autonomous Crack Detection on Asphalt and Concrete Surfaces. <span class='html-italic'>Autom. Constr.</span> <b>2022</b>, <span class='html-italic'>140</span>, 104316. [<a href="https://scholar.google.com/scholar_lookup?title=Vision+Transformer-Based+Autonomous+Crack+Detection+on+Asphalt+and+Concrete+Surfaces&author=Asadi+Shamsabadi,+E.&author=Xu,+C.&author=Rao,+A.S.&author=Nguyen,+T.&author=Ngo,+T.&author=Dias-da-Costa,+D.&publication_year=2022&journal=Autom.+Constr.&volume=140&pages=104316&doi=10.1016/j.autcon.2022.104316" class='google-scholar' target='_blank' rel='noopener noreferrer'>Google Scholar</a>] [<a href="https://doi.org/10.1016/j.autcon.2022.104316" class='cross-ref' target='_blank' rel='noopener noreferrer'>CrossRef</a>]</li><li id='B21-applsci-13-05521' class='html-xx' data-content='21.'>Reedha, R.; Dericquebourg, E.; Canals, R.; Hafiane, A. Vision Transformers for Weeds and Crops Classification of High Resolution UAV Images. <span class='html-italic'>Remote Sens.</span> <b>2022</b>, <span class='html-italic'>14</span>, 592. [<a href="https://scholar.google.com/scholar_lookup?title=Vision+Transformers+for+Weeds+and+Crops+Classification+of+High+Resolution+UAV+Images&author=Reedha,+R.&author=Dericquebourg,+E.&author=Canals,+R.&author=Hafiane,+A.&publication_year=2022&journal=Remote+Sens.&volume=14&pages=592&doi=10.3390/rs14030592" class='google-scholar' target='_blank' rel='noopener noreferrer'>Google Scholar</a>] [<a href="https://doi.org/10.3390/rs14030592" class='cross-ref' target='_blank' rel='noopener noreferrer'>CrossRef</a>]</li><li id='B22-applsci-13-05521' class='html-xx' data-content='22.'>Bottou, L.; Bousquet, O. The Tradeoffs of Large Scale Learning. In <span class='html-italic'>Advances in Neural Information Processing Systems</span>; Platt, J., Koller, D., Singer, Y., Roweis, S., Eds.; Curran Associates, Inc.: Vancouver, BC, Canada, 2007; Volume 20. [<a href="https://scholar.google.com/scholar_lookup?title=The+Tradeoffs+of+Large+Scale+Learning&author=Bottou,+L.&author=Bousquet,+O.&publication_year=2007" class='google-scholar' target='_blank' rel='noopener noreferrer'>Google Scholar</a>]</li><li id='B23-applsci-13-05521' class='html-xx' data-content='23.'>Foret, P.; Kleiner, A.; Mobahi, H.; Neyshabur, B. Sharpness-Aware Minimization for Efficiently Improving Generalization. <span class='html-italic'>arXiv</span> <b>2020</b>, arXiv:2010.01412. [<a href="https://scholar.google.com/scholar_lookup?title=Sharpness-Aware+Minimization+for+Efficiently+Improving+Generalization&author=Foret,+P.&author=Kleiner,+A.&author=Mobahi,+H.&author=Neyshabur,+B.&publication_year=2020&journal=arXiv&doi=10.48550/ARXIV.2010.01412" class='google-scholar' target='_blank' rel='noopener noreferrer'>Google Scholar</a>] [<a href="https://doi.org/10.48550/ARXIV.2010.01412" class='cross-ref' target='_blank' rel='noopener noreferrer'>CrossRef</a>]</li><li id='B24-applsci-13-05521' class='html-xx' data-content='24.'>Korpelevich, G.M. The Extragradient Method for Finding Saddle Points and Other Problems. <span class='html-italic'>Ekon. Mat. Metod. </span> <b>1976</b>, <span class='html-italic'>12</span>, 747–756. [<a href="https://scholar.google.com/scholar_lookup?title=The+Extragradient+Method+for+Finding+Saddle+Points+and+Other+Problems&author=Korpelevich,+G.M.&publication_year=1976&journal=Ekon.+Mat.+Metod.&volume=12&pages=747%E2%80%93756" class='google-scholar' target='_blank' rel='noopener noreferrer'>Google Scholar</a>]</li><li id='B25-applsci-13-05521' class='html-xx' data-content='25.'>Al-Dhabyani, W.; Gomaa, M.; Khaled, H.; Fahmy, A. Dataset of Breast Ultrasound Images. <span class='html-italic'>Data Brief</span> <b>2020</b>, <span class='html-italic'>28</span>, 104863. [<a href="https://scholar.google.com/scholar_lookup?title=Dataset+of+Breast+Ultrasound+Images&author=Al-Dhabyani,+W.&author=Gomaa,+M.&author=Khaled,+H.&author=Fahmy,+A.&publication_year=2020&journal=Data+Brief&volume=28&pages=104863&doi=10.1016/j.dib.2019.104863" class='google-scholar' target='_blank' rel='noopener noreferrer'>Google Scholar</a>] [<a href="https://doi.org/10.1016/j.dib.2019.104863" class='cross-ref' target='_blank' rel='noopener noreferrer'>CrossRef</a>]</li><li id='B26-applsci-13-05521' class='html-xx' data-content='26.'>Yap, M.H.; Pons, G.; Marti, J.; Ganau, S.; Sentis, M.; Zwiggelaar, R.; Davison, A.K.; Marti, R. Automated Breast Ultrasound Lesions Detection Using Convolutional Neural Networks. <span class='html-italic'>IEEE J. Biomed. Health Inform.</span> <b>2018</b>, <span class='html-italic'>22</span>, 1218–1226. [<a href="https://scholar.google.com/scholar_lookup?title=Automated+Breast+Ultrasound+Lesions+Detection+Using+Convolutional+Neural+Networks&author=Yap,+M.H.&author=Pons,+G.&author=Marti,+J.&author=Ganau,+S.&author=Sentis,+M.&author=Zwiggelaar,+R.&author=Davison,+A.K.&author=Marti,+R.&publication_year=2018&journal=IEEE+J.+Biomed.+Health+Inform.&volume=22&pages=1218%E2%80%931226&doi=10.1109/JBHI.2017.2731873" class='google-scholar' target='_blank' rel='noopener noreferrer'>Google Scholar</a>] [<a href="https://doi.org/10.1109/JBHI.2017.2731873" class='cross-ref' target='_blank' rel='noopener noreferrer'>CrossRef</a>]</li><li id='B27-applsci-13-05521' class='html-xx' data-content='27.'>Zhang, R. Making Convolutional Networks Shift-Invariant Again. <span class='html-italic'>arXiv</span> <b>2019</b>, arXiv:1904.11486. [<a href="https://scholar.google.com/scholar_lookup?title=Making+Convolutional+Networks+Shift-Invariant+Again&author=Zhang,+R.&publication_year=2019&journal=arXiv&doi=10.48550/ARXIV.1904.11486" class='google-scholar' target='_blank' rel='noopener noreferrer'>Google Scholar</a>] [<a href="https://doi.org/10.48550/ARXIV.1904.11486" class='cross-ref' target='_blank' rel='noopener noreferrer'>CrossRef</a>]</li><li id='B28-applsci-13-05521' class='html-xx' data-content='28.'>Vaswani, A.; Shazeer, N.M.; Parmar, N.; Uszkoreit, J.; Jones, L.; Gomez, A.N.; Kaiser, L.; Polosukhin, I. Attention Is All You Need. <span class='html-italic'>Neural Inf. Process. Syst.</span> <b>2017</b>, <span class='html-italic'>30</span>, 3762. [<a href="https://scholar.google.com/scholar_lookup?title=Attention+Is+All+You+Need&author=Vaswani,+A.&author=Shazeer,+N.M.&author=Parmar,+N.&author=Uszkoreit,+J.&author=Jones,+L.&author=Gomez,+A.N.&author=Kaiser,+L.&author=Polosukhin,+I.&publication_year=2017&journal=Neural+Inf.+Process.+Syst.&volume=30&pages=3762&doi=10.48550/arXiv.1706.03762" class='google-scholar' target='_blank' rel='noopener noreferrer'>Google Scholar</a>] [<a href="https://doi.org/10.48550/arXiv.1706.03762" class='cross-ref' target='_blank' rel='noopener noreferrer'>CrossRef</a>]</li><li id='B29-applsci-13-05521' class='html-xx' data-content='29.'>Zhou, D.; Kang, B.; Jin, X.; Yang, L.; Lian, X.; Jiang, Z.; Hou, Q.; Feng, J. DeepViT: Towards Deeper Vision Transformer. <span class='html-italic'>arXiv</span> <b>2021</b>, arXiv:2103.11886. [<a href="https://scholar.google.com/scholar_lookup?title=DeepViT:+Towards+Deeper+Vision+Transformer&author=Zhou,+D.&author=Kang,+B.&author=Jin,+X.&author=Yang,+L.&author=Lian,+X.&author=Jiang,+Z.&author=Hou,+Q.&author=Feng,+J.&publication_year=2021&journal=arXiv&doi=10.48550/ARXIV.2103.11886" class='google-scholar' target='_blank' rel='noopener noreferrer'>Google Scholar</a>] [<a href="https://doi.org/10.48550/ARXIV.2103.11886" class='cross-ref' target='_blank' rel='noopener noreferrer'>CrossRef</a>]</li><li id='B30-applsci-13-05521' class='html-xx' data-content='30.'>Amorim, J.P.; Domingues, I.; Abreu, P.H.; Santos, J.A.M. Interpreting Deep Learning Models for Ordinal Problems. In Proceedings of the European Symposium on Artificial Neural Networks, Bruges, Belgium, 25–27 April 2018. [<a href="https://scholar.google.com/scholar_lookup?title=Interpreting+Deep+Learning+Models+for+Ordinal+Problems&conference=Proceedings+of+the+European+Symposium+on+Artificial+Neural+Networks&author=Amorim,+J.P.&author=Domingues,+I.&author=Abreu,+P.H.&author=Santos,+J.A.M.&publication_year=2018" class='google-scholar' target='_blank' rel='noopener noreferrer'>Google Scholar</a>]</li></ol></section><section id='FiguresandTables' type='display-objects'><div class="html-fig-wrap" id="applsci-13-05521-f001"> <div class='html-fig_img'> <div class="html-figpopup html-figpopup-link" data-counterslinkmanual = "https://www.mdpi.com/2076-3417/13/9/5521/display" href="#fig_body_display_applsci-13-05521-f001"> <img alt="Applsci 13 05521 g001 550" data-large="/applsci/applsci-13-05521/article_deploy/html/images/applsci-13-05521-g001.png" data-original="/applsci/applsci-13-05521/article_deploy/html/images/applsci-13-05521-g001.png" data-lsrc="/applsci/applsci-13-05521/article_deploy/html/images/applsci-13-05521-g001-550.jpg" /> <a class="html-expand html-figpopup" data-counterslinkmanual = "https://www.mdpi.com/2076-3417/13/9/5521/display" href="#fig_body_display_applsci-13-05521-f001"></a> </div> </div> <div class="html-fig_description"> <b>Figure 1.</b> Example of an architecture of the ViT, based on [<a href="#B1-applsci-13-05521" class="html-bibr">1</a>]. <!-- <p><a class="html-figpopup" href="#fig_body_display_applsci-13-05521-f001"> Click here to enlarge figure </a></p> --> </div> </div> <div class="html-fig_show mfp-hide" id ="fig_body_display_applsci-13-05521-f001" > <div class="html-caption" > <b>Figure 1.</b> Example of an architecture of the ViT, based on [<a href="#B1-applsci-13-05521" class="html-bibr">1</a>].</div> <div class="html-img"><img alt="Applsci 13 05521 g001" data-large="/applsci/applsci-13-05521/article_deploy/html/images/applsci-13-05521-g001.png" data-original="/applsci/applsci-13-05521/article_deploy/html/images/applsci-13-05521-g001.png" data-lsrc="/applsci/applsci-13-05521/article_deploy/html/images/applsci-13-05521-g001.png" /></div> </div><div class="html-fig-wrap" id="applsci-13-05521-f002"> <div class='html-fig_img'> <div class="html-figpopup html-figpopup-link" data-counterslinkmanual = "https://www.mdpi.com/2076-3417/13/9/5521/display" href="#fig_body_display_applsci-13-05521-f002"> <img alt="Applsci 13 05521 g002 550" data-large="/applsci/applsci-13-05521/article_deploy/html/images/applsci-13-05521-g002.png" data-original="/applsci/applsci-13-05521/article_deploy/html/images/applsci-13-05521-g002.png" data-lsrc="/applsci/applsci-13-05521/article_deploy/html/images/applsci-13-05521-g002-550.jpg" /> <a class="html-expand html-figpopup" data-counterslinkmanual = "https://www.mdpi.com/2076-3417/13/9/5521/display" href="#fig_body_display_applsci-13-05521-f002"></a> </div> </div> <div class="html-fig_description"> <b>Figure 2.</b> Example of an architecture of a CNN, based on [<a href="#B2-applsci-13-05521" class="html-bibr">2</a>]. <!-- <p><a class="html-figpopup" href="#fig_body_display_applsci-13-05521-f002"> Click here to enlarge figure </a></p> --> </div> </div> <div class="html-fig_show mfp-hide" id ="fig_body_display_applsci-13-05521-f002" > <div class="html-caption" > <b>Figure 2.</b> Example of an architecture of a CNN, based on [<a href="#B2-applsci-13-05521" class="html-bibr">2</a>].</div> <div class="html-img"><img alt="Applsci 13 05521 g002" data-large="/applsci/applsci-13-05521/article_deploy/html/images/applsci-13-05521-g002.png" data-original="/applsci/applsci-13-05521/article_deploy/html/images/applsci-13-05521-g002.png" data-lsrc="/applsci/applsci-13-05521/article_deploy/html/images/applsci-13-05521-g002.png" /></div> </div><div class="html-fig-wrap" id="applsci-13-05521-f003"> <div class='html-fig_img'> <div class="html-figpopup html-figpopup-link" data-counterslinkmanual = "https://www.mdpi.com/2076-3417/13/9/5521/display" href="#fig_body_display_applsci-13-05521-f003"> <img alt="Applsci 13 05521 g003 550" data-large="/applsci/applsci-13-05521/article_deploy/html/images/applsci-13-05521-g003.png" data-original="/applsci/applsci-13-05521/article_deploy/html/images/applsci-13-05521-g003.png" data-lsrc="/applsci/applsci-13-05521/article_deploy/html/images/applsci-13-05521-g003-550.jpg" /> <a class="html-expand html-figpopup" data-counterslinkmanual = "https://www.mdpi.com/2076-3417/13/9/5521/display" href="#fig_body_display_applsci-13-05521-f003"></a> </div> </div> <div class="html-fig_description"> <b>Figure 3.</b> Distribution of the selected studies by years. <!-- <p><a class="html-figpopup" href="#fig_body_display_applsci-13-05521-f003"> Click here to enlarge figure </a></p> --> </div> </div> <div class="html-fig_show mfp-hide" id ="fig_body_display_applsci-13-05521-f003" > <div class="html-caption" > <b>Figure 3.</b> Distribution of the selected studies by years.</div> <div class="html-img"><img alt="Applsci 13 05521 g003" data-large="/applsci/applsci-13-05521/article_deploy/html/images/applsci-13-05521-g003.png" data-original="/applsci/applsci-13-05521/article_deploy/html/images/applsci-13-05521-g003.png" data-lsrc="/applsci/applsci-13-05521/article_deploy/html/images/applsci-13-05521-g003.png" /></div> </div><div class="html-fig-wrap" id="applsci-13-05521-f004"> <div class='html-fig_img'> <div class="html-figpopup html-figpopup-link" data-counterslinkmanual = "https://www.mdpi.com/2076-3417/13/9/5521/display" href="#fig_body_display_applsci-13-05521-f004"> <img alt="Applsci 13 05521 g004 550" data-large="/applsci/applsci-13-05521/article_deploy/html/images/applsci-13-05521-g004.png" data-original="/applsci/applsci-13-05521/article_deploy/html/images/applsci-13-05521-g004.png" data-lsrc="/applsci/applsci-13-05521/article_deploy/html/images/applsci-13-05521-g004-550.jpg" /> <a class="html-expand html-figpopup" data-counterslinkmanual = "https://www.mdpi.com/2076-3417/13/9/5521/display" href="#fig_body_display_applsci-13-05521-f004"></a> </div> </div> <div class="html-fig_description"> <b>Figure 4.</b> Distribution of the selected studies by application area. <!-- <p><a class="html-figpopup" href="#fig_body_display_applsci-13-05521-f004"> Click here to enlarge figure </a></p> --> </div> </div> <div class="html-fig_show mfp-hide" id ="fig_body_display_applsci-13-05521-f004" > <div class="html-caption" > <b>Figure 4.</b> Distribution of the selected studies by application area.</div> <div class="html-img"><img alt="Applsci 13 05521 g004" data-large="/applsci/applsci-13-05521/article_deploy/html/images/applsci-13-05521-g004.png" data-original="/applsci/applsci-13-05521/article_deploy/html/images/applsci-13-05521-g004.png" data-lsrc="/applsci/applsci-13-05521/article_deploy/html/images/applsci-13-05521-g004.png" /></div> </div><div class="html-table-wrap" id="applsci-13-05521-t001"> <div class="html-table_wrap_td" > <div class="html-tablepopup html-tablepopup-link" data-counterslinkmanual = "https://www.mdpi.com/2076-3417/13/9/5521/display" href='#table_body_display_applsci-13-05521-t001'> <img alt="Table" data-lsrc="https://www.mdpi.com/img/table.png" /> <a class="html-expand html-tablepopup" data-counterslinkmanual = "https://www.mdpi.com/2076-3417/13/9/5521/display" href="#table_body_display_applsci-13-05521-t001"></a> </div> </div> <div class="html-table_wrap_discription"> <b>Table 1.</b> Data sources and the number of obtained results. </div> </div> <div class="html-table_show mfp-hide " id ="table_body_display_applsci-13-05521-t001" > <div class="html-caption" ><b>Table 1.</b> Data sources and the number of obtained results.</div> <table > <thead ><tr ><th align='center' valign='middle' style='border-top:solid thin;border-bottom:solid thin' class='html-align-center' >Data Source</th><th align='center' valign='middle' style='border-top:solid thin;border-bottom:solid thin' class='html-align-center' >Number of Results</th><th align='center' valign='middle' style='border-top:solid thin;border-bottom:solid thin' class='html-align-center' >Number of Selected Papers</th></tr></thead><tbody ><tr ><td align='center' valign='middle' class='html-align-center' >ACM Digital Library</td><td align='right' valign='middle' class='html-align-right' >19,159             </td><td align='right' valign='middle' class='html-align-right' >1                         </td></tr><tr ><td align='center' valign='middle' class='html-align-center' >Google Scholar</td><td align='right' valign='middle' class='html-align-right' >10,700             </td><td align='right' valign='middle' class='html-align-right' >10                         </td></tr><tr ><td align='center' valign='middle' class='html-align-center' >Science Direct</td><td align='right' valign='middle' class='html-align-right' >1437             </td><td align='right' valign='middle' class='html-align-right' >3                         </td></tr><tr ><td align='center' valign='middle' class='html-align-center' >Scopus</td><td align='right' valign='middle' class='html-align-right' >55             </td><td align='right' valign='middle' class='html-align-right' >2                         </td></tr><tr ><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >Web of Science</td><td align='right' valign='middle' style='border-bottom:solid thin' class='html-align-right' >90             </td><td align='right' valign='middle' style='border-bottom:solid thin' class='html-align-right' >1                         </td></tr></tbody> </table> </div><div class="html-table-wrap" id="applsci-13-05521-t002"> <div class="html-table_wrap_td" > <div class="html-tablepopup html-tablepopup-link" data-counterslinkmanual = "https://www.mdpi.com/2076-3417/13/9/5521/display" href='#table_body_display_applsci-13-05521-t002'> <img alt="Table" data-lsrc="https://www.mdpi.com/img/table.png" /> <a class="html-expand html-tablepopup" data-counterslinkmanual = "https://www.mdpi.com/2076-3417/13/9/5521/display" href="#table_body_display_applsci-13-05521-t002"></a> </div> </div> <div class="html-table_wrap_discription"> <b>Table 2.</b> Data sources and used search string. </div> </div> <div class="html-table_show mfp-hide " id ="table_body_display_applsci-13-05521-t002" > <div class="html-caption" ><b>Table 2.</b> Data sources and used search string.</div> <table > <thead ><tr ><th align='center' valign='middle' style='border-top:solid thin;border-bottom:solid thin' class='html-align-center' >Data Source</th><th align='center' valign='middle' style='border-top:solid thin;border-bottom:solid thin' class='html-align-center' >Search String</th></tr></thead><tbody ><tr ><td align='center' valign='middle' class='html-align-center' >ACM Digital Library</td><td align='center' valign='middle' class='html-align-center' >((Vision Transformers) AND (convolutional neural networks) AND (images classification) AND (comparing))</td></tr><tr ><td align='center' valign='middle' class='html-align-center' >Google Scholar</td><td align='center' valign='middle' class='html-align-center' >((ViT) AND (CNN) AND (Images Classification) OR (Comparing) OR (Vision Transformers) OR (convolutional neural networks) OR (differences))</td></tr><tr ><td align='center' valign='middle' class='html-align-center' >Science Direct</td><td align='center' valign='middle' class='html-align-center' >((Vision Transformers) AND (convolutional neural networks) AND (images classification) AND (comparing))</td></tr><tr ><td align='center' valign='middle' class='html-align-center' >Scopus</td><td align='center' valign='middle' class='html-align-center' >((ViT) AND (CNN) AND (comparing))</td></tr><tr ><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >Web of Science</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >((ViT) AND (CNN) AND (comparing))</td></tr></tbody> </table> </div><div class="html-table-wrap" id="applsci-13-05521-t003"> <div class="html-table_wrap_td" > <div class="html-tablepopup html-tablepopup-link" data-counterslinkmanual = "https://www.mdpi.com/2076-3417/13/9/5521/display" href='#table_body_display_applsci-13-05521-t003'> <img alt="Table" data-lsrc="https://www.mdpi.com/img/table.png" /> <a class="html-expand html-tablepopup" data-counterslinkmanual = "https://www.mdpi.com/2076-3417/13/9/5521/display" href="#table_body_display_applsci-13-05521-t003"></a> </div> </div> <div class="html-table_wrap_discription"> <b>Table 3.</b> List of selected studies. </div> </div> <div class="html-table_show mfp-hide " id ="table_body_display_applsci-13-05521-t003" > <div class="html-caption" ><b>Table 3.</b> List of selected studies.</div> <table > <thead ><tr ><th align='center' valign='middle' style='border-top:solid thin;border-bottom:solid thin' class='html-align-center' >Ref.</th><th align='center' valign='middle' style='border-top:solid thin;border-bottom:solid thin' class='html-align-center' >Title</th><th align='center' valign='middle' style='border-top:solid thin;border-bottom:solid thin' class='html-align-center' >Year</th><th align='center' valign='middle' style='border-top:solid thin;border-bottom:solid thin' class='html-align-center' >Type</th></tr></thead><tbody ><tr ><td align='center' valign='middle' class='html-align-center' >[<a href="#B5-applsci-13-05521" class="html-bibr">5</a>]</td><td align='center' valign='middle' class='html-align-center' >Adversarial Robustness Comparison of Vision Transformer and MLP-Mixer to CNNs</td><td align='center' valign='middle' class='html-align-center' >2021</td><td align='center' valign='middle' class='html-align-center' >Conference</td></tr><tr ><td align='center' valign='middle' class='html-align-center' >[<a href="#B6-applsci-13-05521" class="html-bibr">6</a>]</td><td align='center' valign='middle' class='html-align-center' >Are Transformers More Robust Than CNNs?</td><td align='center' valign='middle' class='html-align-center' >2021</td><td align='center' valign='middle' class='html-align-center' >Conference</td></tr><tr ><td align='center' valign='middle' class='html-align-center' >[<a href="#B7-applsci-13-05521" class="html-bibr">7</a>]</td><td align='center' valign='middle' class='html-align-center' >Detecting Pneumonia using Vision Transformer and comparing with other techniques</td><td align='center' valign='middle' class='html-align-center' >2021</td><td align='center' valign='middle' class='html-align-center' >Conference</td></tr><tr ><td align='center' valign='middle' class='html-align-center' >[<a href="#B8-applsci-13-05521" class="html-bibr">8</a>]</td><td align='center' valign='middle' class='html-align-center' >Do Vision Transformers See Like Convolutional Neural Networks?</td><td align='center' valign='middle' class='html-align-center' >2021</td><td align='center' valign='middle' class='html-align-center' >Conference</td></tr><tr ><td align='center' valign='middle' class='html-align-center' >[<a href="#B9-applsci-13-05521" class="html-bibr">9</a>]</td><td align='center' valign='middle' class='html-align-center' >Vision Transformer for Classification of Breast Ultrasound Images</td><td align='center' valign='middle' class='html-align-center' >2021</td><td align='center' valign='middle' class='html-align-center' >Conference</td></tr><tr ><td align='center' valign='middle' class='html-align-center' >[<a href="#B10-applsci-13-05521" class="html-bibr">10</a>]</td><td align='center' valign='middle' class='html-align-center' >ConvNets vs. Transformers: Whose Visual Representations are More Transferable?</td><td align='center' valign='middle' class='html-align-center' >2021</td><td align='center' valign='middle' class='html-align-center' >Conference</td></tr><tr ><td align='center' valign='middle' class='html-align-center' >[<a href="#B11-applsci-13-05521" class="html-bibr">11</a>]</td><td align='center' valign='middle' class='html-align-center' >A vision transformer for emphysema classification using CT images </td><td align='center' valign='middle' class='html-align-center' >2021</td><td align='center' valign='middle' class='html-align-center' >Journal</td></tr><tr ><td align='center' valign='middle' class='html-align-center' >[<a href="#B12-applsci-13-05521" class="html-bibr">12</a>]</td><td align='center' valign='middle' class='html-align-center' >Comparing Vision Transformers and Convolutional Nets for Safety Critical Systems</td><td align='center' valign='middle' class='html-align-center' >2022</td><td align='center' valign='middle' class='html-align-center' >Conference</td></tr><tr ><td align='center' valign='middle' class='html-align-center' >[<a href="#B13-applsci-13-05521" class="html-bibr">13</a>]</td><td align='center' valign='middle' class='html-align-center' >Convolutional Nets Versus Vision Transformers for Diabetic Foot Ulcer Classification</td><td align='center' valign='middle' class='html-align-center' >2022</td><td align='center' valign='middle' class='html-align-center' >Conference</td></tr><tr ><td align='center' valign='middle' class='html-align-center' >[<a href="#B14-applsci-13-05521" class="html-bibr">14</a>]</td><td align='center' valign='middle' class='html-align-center' >Convolutional Neural Network (CNN) vs Vision Transformer (ViT) for Digital Holography</td><td align='center' valign='middle' class='html-align-center' >2022</td><td align='center' valign='middle' class='html-align-center' >Conference</td></tr><tr ><td align='center' valign='middle' class='html-align-center' >[<a href="#B15-applsci-13-05521" class="html-bibr">15</a>]</td><td align='center' valign='middle' class='html-align-center' >Cross-Forgery Analysis of Vision Transformers and CNNs for Deepfake Image Detection</td><td align='center' valign='middle' class='html-align-center' >2022</td><td align='center' valign='middle' class='html-align-center' >Conference</td></tr><tr ><td align='center' valign='middle' class='html-align-center' >[<a href="#B16-applsci-13-05521" class="html-bibr">16</a>]</td><td align='center' valign='middle' class='html-align-center' >Traffic Sign Recognition with Vision Transformers</td><td align='center' valign='middle' class='html-align-center' >2022</td><td align='center' valign='middle' class='html-align-center' >Conference</td></tr><tr ><td align='center' valign='middle' class='html-align-center' >[<a href="#B17-applsci-13-05521" class="html-bibr">17</a>]</td><td align='center' valign='middle' class='html-align-center' >An improved transformer network for skin cancer classification</td><td align='center' valign='middle' class='html-align-center' >2022</td><td align='center' valign='middle' class='html-align-center' >Journal</td></tr><tr ><td align='center' valign='middle' class='html-align-center' >[<a href="#B18-applsci-13-05521" class="html-bibr">18</a>]</td><td align='center' valign='middle' class='html-align-center' >CNN and transformer framework for insect pest classification</td><td align='center' valign='middle' class='html-align-center' >2022</td><td align='center' valign='middle' class='html-align-center' >Journal</td></tr><tr ><td align='center' valign='middle' class='html-align-center' >[<a href="#B19-applsci-13-05521" class="html-bibr">19</a>]</td><td align='center' valign='middle' class='html-align-center' >Single-layer Vision Transformers for more accurate early exits with less overhead</td><td align='center' valign='middle' class='html-align-center' >2022</td><td align='center' valign='middle' class='html-align-center' >Journal</td></tr><tr ><td align='center' valign='middle' class='html-align-center' >[<a href="#B20-applsci-13-05521" class="html-bibr">20</a>]</td><td align='center' valign='middle' class='html-align-center' >Vision transformer-based autonomous crack detection on asphalt and concrete surfaces</td><td align='center' valign='middle' class='html-align-center' >2022</td><td align='center' valign='middle' class='html-align-center' >Journal</td></tr><tr ><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >[<a href="#B21-applsci-13-05521" class="html-bibr">21</a>]</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >Vision Transformers for Weeds and Crops Classification of High-Resolution UAV Images</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >2022</td><td align='center' valign='middle' style='border-bottom:solid thin' class='html-align-center' >Journal</td></tr></tbody> </table> </div></section><section class='html-fn_group'><table><tr id=''><td></td><td><div class='html-p'><b>Disclaimer/Publisher’s Note:</b> The statements, opinions and data contained in all publications are solely those of the individual author(s) and contributor(s) and not of MDPI and/or the editor(s). MDPI and/or the editor(s) disclaim responsibility for any injury to people or property resulting from any ideas, methods, instructions or products referred to in the content.</div></td></tr></table></section> <section id="html-copyright"><br>© 2023 by the authors. Licensee MDPI, Basel, Switzerland. This article is an open access article distributed under the terms and conditions of the Creative Commons Attribution (CC BY) license (<a href='https://creativecommons.org/licenses/by/4.0/' target='_blank' rel="noopener noreferrer" >https://creativecommons.org/licenses/by/4.0/</a>).</section> </div> </div> <div class="additional-content"> <h2><a name="cite"></a>Share and Cite</h2> <div class="social-media-links" style="text-align: left;"> <a href="/cdn-cgi/l/email-protection#dde2fbbcb0ade6aea8bfb7b8bea9e09bafb2b0f8efed90998d94f8ee9cf8efedf8efef9eb2b0adbcafb4b3baf8efed8bb4aeb4b2b3f8efed89afbcb3aebbb2afb0b8afaef8efedbcb3b9f8efed9eb2b3abb2b1a8a9b4b2b3bcb1f8efed93b8a8afbcb1f8efed93b8a9aab2afb6aef8efedbbb2aff8efed94b0bcbab8f8efed9eb1bcaeaeb4bbb4bebca9b4b2b3f8ee9cf8efed9cf8efed91b4a9b8afbca9a8afb8f8efed8fb8abb4b8aafbaca8b2a9e6fbbcb0ade6bfb2b9a4e0b5a9a9adaee7f2f2aaaaaaf3b0b9adb4f3beb2b0f2efefeaefefeaedf8ee9cf8ed9cf8ed9c9eb2b0adbcafb4b3baf8efed8bb4aeb4b2b3f8efed89afbcb3aebbb2afb0b8afaef8efedbcb3b9f8efed9eb2b3abb2b1a8a9b4b2b3bcb1f8efed93b8a8afbcb1f8efed93b8a9aab2afb6aef8efedbbb2aff8efed94b0bcbab8f8efed9eb1bcaeaeb4bbb4bebca9b4b2b3f8ee9cf8efed9cf8efed91b4a9b8afbca9a8afb8f8efed8fb8abb4b8aaf8ed9cf8ed9c9cbfaea9afbcbea9f8ee9cf8efed89afbcb3aebbb2afb0b8afaef8efedbcafb8f8efedb0b2b9b8b1aef8efeda9b5bca9f8efedb4b0adb1b8b0b8b3a9f8efedbcf8efedb0b8beb5bcb3b4aeb0f8efedb2bbf8efedaeb8b1bbf0bca9a9b8b3a9b4b2b3f8ef9ef8efedb4b3b9b4abb4b9a8bcb1b1a4f8efedaab8b4bab5a9b4b3baf8efeda9b5b8f8efedb4b0adb2afa9bcb3beb8f8efedb2bbf8efedb8bcbeb5f8efedadbcafa9f8efedb2bbf8efeda9b5b8f8efedb4b3ada8a9f8efedb9bca9bcf3f8efed89b5b8b4aff8efeda8aeb8f8efedb4b3f8efedb4b0bcbab8f8efedbeb1bcaeaeb4bbb4bebca9b4b2b3f8efeda9bcaeb6aef8efedb4aef8efedaea9b4b1b1f8efedaeb2b0b8aab5bca9f8efedb1b4b0b4a9b8b9f8efedaeb4b3beb8f8efedafb8aeb8bcafbeb5b8afaef8efedb5bcabb8f8efedaeb2f8efedbbbcaff8efedbeb5b2aeb8b3f8efed9eb2b3abb2b1a8a9b4b2b3bcb1f8efed93b8a8afbcb1f8efed93b8a9aab2afb6aef8efedbbb2aff8efedb4b0bcbab8f8efedbeb1bcaeaeb4bbb4bebca9b4b2b3f8efedbcb3b9f8efeda9afbcb3aebbb2afb0b8afaef8efedaab8afb8f8efedb0b2afb8f8efeda9bcafbab8a9b8b9f8efeda9b2f8efed93bca9a8afbcb1f8efed91bcb3baa8bcbab8f8efed8dafb2beb8aeaeb4b3baf8efedf8efe593918df8efe4f8efeda9bcaeb6aef3f8efed89b5b8afb8bbb2afb8f8ef9ef8efeda9b5b4aef8efedadbcadb8aff8efedadafb8aeb8b3a9aef8efedbcf8efedb1b4a9b8afbca9a8afb8f8efedafb8abb4b8aaf8efeda9b5bca9f8efedaeb5b2aaaef8efeda9b5b8f8efedb9b4bbbbb8afb8b3beb8aef8efedbfb8a9aab8b8b3f8efed8bb4aeb4b2b3f8efed89afbcb3aebbb2afb0b8afaef8efedf8efe58bb489f8efe4f8efedbcb3b9f8efed9eb2b3abb2b1a8a9b4b2b3bcb1f8efed93b8a8afbcb1f8efed93b8a9aab2afb6aef3f8efed89b5b8f8efedaea9bca9b8f8efedb2bbf8efeda9b5b8f8efedbcafa9f8efeda9b5bca9f8efeda8aeb8b9f8efeda9b5b8f8efeda9aab2f8efedbcafbeb5b4a9b8bea9a8afb8aef8efedbbb2aff8efedb4b0bcbab8f8efedbeb1bcaeaeb4bbb4bebca9b4b2b3f8efedaabcaef8efedafb8abb4b8aab8b9f8efedbcb3b9f8efedbcb3f8efedbca9a9b8b0ada9f8efedaabcaef8efedb0bcb9b8f8efeda9b2f8efeda8b3b9b8afaea9bcb3b9f8efedaab5bca9f8efedbbbcbea9b2afaef8efedb0bca4f8efedb4b3bbb1a8b8b3beb8f8efeda9b5b8f8efedadb8afbbb2afb0bcb3beb8f8efedb2bbf8efeda9b5b8f8efeda9aab2f8efedb9b8b8adf8efedb1b8bcafb3b4b3baf8efedbcafbeb5b4a9b8bea9a8afb8aef8efedbfbcaeb8b9f8efedb2b3f8efeda9b5b8f8efedb9bca9bcaeb8a9aef8efeda8aeb8b9f8ef9ef8efedb4b0bcbab8f8efedaeb4a7b8f8ef9ef8efedb3a8b0bfb8aff8efedb2bbf8efeda9bcafbab8a9f8efedbeb1bcaeaeb8aef8efedf8efe5bbb2aff8efeda9b5b8f8efedbeb1bcaeaeb4bbb4bebca9b4b2b3f8efedadafb2bfb1b8b0aef8efe4f8ef9ef8efedb5bcafb9aabcafb8f8ef9ef8efedbcb3b9f8efedb8abbcb1a8bca9b8b9f8efedbcafbeb5b4a9b8bea9a8afb8aef8efedbcb3b9f8efeda9b2adf8efedafb8aea8b1a9aef3f8efed89b5b8f8efedb2bfb7b8bea9b4abb8f8efedb2bbf8efeda9b5b4aef8efedaab2afb6f8efedb4aef8efeda9b2f8efedb4b9b8b3a9b4bba4f8efedaab5b4beb5f8efedb2bbf8efeda9b5b8f8efedbcafbeb5b4a9b8bea9a8afb8aef8efedb4aef8efeda9b5b8f8efedbfb8aea9f8efedbbb2aff8efedb4b0bcbab8f8efedbeb1bcaeaeb4bbb4bebca9b4b2b3f8efedbcb3b9f8efeda8b3b9b8aff8efedaab5bca9f8efedbeb2b3b9b4a9b4b2b3aef3f8efed89b5b4aef8efedadbcadb8aff8efedbcb1aeb2f8efedb9b8aebeafb4bfb8aef8efeda9b5b8f8efedb4b0adb2afa9bcb3beb8f8efedb2bbf8efeda9b5b8f8efed90a8b1a9b4f095b8bcb9f8efed9ca9a9b8b3a9b4b2b3f8efedb0b8beb5bcb3b4aeb0f8efedbbb2aff8efedb4b0adafb2abb4b3baf8efeda9b5b8f8efedadb8afbbb2afb0bcb3beb8f8efedb2bbf8efed8bb489f8efedb4b3f8efedb4b0bcbab8f8efedbeb1bcaeaeb4bbb4bebca9b4b2b3f3" title="Email"> <i class="fa fa-envelope-square" style="font-size: 30px;"></i> </a> <a href="https://twitter.com/intent/tweet?text=Comparing+Vision+Transformers+and+Convolutional+Neural+Networks+for+Image+Classification%3A+A+Literature+Review&amp;hashtags=mdpiapplsci&amp;url=https%3A%2F%2Fwww.mdpi.com%2F2272270&amp;via=Applsci" onclick="windowOpen(this.href,600,800); return false" target="_blank" rel="noopener noreferrer"> <i class="fa fa-twitter-x-square" style="font-size: 30px;"></i> </a> <a href=" http://www.linkedin.com/shareArticle?mini=true&amp;url=https%3A%2F%2Fwww.mdpi.com%2F2272270&amp;title=Comparing%20Vision%20Transformers%20and%20Convolutional%20Neural%20Networks%20for%20Image%20Classification%3A%20A%20Literature%20Review%26source%3Dhttps%3A%2F%2Fwww.mdpi.com%26summary%3DTransformers%20are%20models%20that%20implement%20a%20mechanism%20of%20self-attention%2C%20individually%20weighting%20the%20importance%20of%20each%20part%20of%20the%20input%20data.%20Their%20use%20in%20image%20classification%20tasks%20is%20still%20somewhat%20limited%20since%20researchers%20have%20so%20far%20chosen%20%5B...%5D" onclick="windowOpen(this.href,600,800); return false" title="LinkedIn" target="_blank" rel="noopener noreferrer"> <i class="fa fa-linkedin-square" style="font-size: 30px;"></i> </a> <a href="https://www.facebook.com/sharer.php?u=https://www.mdpi.com/2272270" title="facebook" target="_blank" rel="noopener noreferrer"> <i class="fa fa-facebook-square" style="font-size: 30px;"></i> </a> <a href="javascript:void(0);" title="Wechat" data-reveal-id="weixin-share-modal"> <i class="fa fa-weixin-square" style="font-size: 26px;"></i> </a> <a href="http://www.reddit.com/submit?url=https://www.mdpi.com/2272270" title="Reddit" target="_blank" rel="noopener noreferrer"> <i class="fa fa-reddit-square" style="font-size: 30px;"></i> </a> <a href="http://www.mendeley.com/import/?url=https://www.mdpi.com/2272270" title="Mendeley" target="_blank" rel="noopener noreferrer"> <i class="fa fa-mendeley-square" style="font-size: 30px;"></i> </a> </div> <div class="in-tab" style="padding-top: 0px!important; margin-top: 15px;"> <div><b>MDPI and ACS Style</b></div> <p> Maurício, J.; Domingues, I.; Bernardino, J. Comparing Vision Transformers and Convolutional Neural Networks for Image Classification: A Literature Review. <em>Appl. Sci.</em> <b>2023</b>, <em>13</em>, 5521. https://doi.org/10.3390/app13095521 </p> <div style="display: block"> <b>AMA Style</b><br> <p> Maurício J, Domingues I, Bernardino J. Comparing Vision Transformers and Convolutional Neural Networks for Image Classification: A Literature Review. <em>Applied Sciences</em>. 2023; 13(9):5521. https://doi.org/10.3390/app13095521 </p> <b>Chicago/Turabian Style</b><br> <p> Maurício, José, Inês Domingues, and Jorge Bernardino. 2023. "Comparing Vision Transformers and Convolutional Neural Networks for Image Classification: A Literature Review" <em>Applied Sciences</em> 13, no. 9: 5521. https://doi.org/10.3390/app13095521 </p> <b>APA Style</b><br> <p> Maurício, J., Domingues, I., & Bernardino, J. (2023). Comparing Vision Transformers and Convolutional Neural Networks for Image Classification: A Literature Review. <em>Applied Sciences</em>, <em>13</em>(9), 5521. https://doi.org/10.3390/app13095521 </p> </div> </div> <div class="info-box no-margin"> Note that from the first issue of 2016, this journal uses article numbers instead of page numbers. See further details <a target="_blank" href="https://www.mdpi.com/about/announcements/784">here</a>. </div> <h2><a name="metrics"></a>Article Metrics</h2> <div class="row"> <div class="small-12 columns"> <div id="loaded_cite_count" style="display:none">No</div> <div id="framed_div_cited_count" class="in-tab" style="display: none; overflow: auto;"></div> <div id="loaded" style="display:none">No</div> <div id="framed_div" class="in-tab" style="display: none; margin-top: 10px;"></div> </div> <div class="small-12 columns"> <div id="article_stats_div" style="display: none; margin-bottom: 1em;"> <h3>Article Access Statistics</h3> <div id="article_stats_swf" ></div> For more information on the journal statistics, click <a href="/journal/applsci/stats">here</a>. <div class="info-box"> Multiple requests from the same IP address are counted as one view. </div> </div> </div> </div> </div> </div> </article> </div> </div></div> <div class="webpymol-controls webpymol-controls-template" style="margin-top: 10px; display: none;"> <a class="bzoom">Zoom</a> <span style="display: inline-block; margin-left: 5px; margin-right: 5px;">|</span> <a class="borient"> Orient </a> <span style="display: inline-block; margin-left: 5px; margin-right: 5px;">|</span> <a class="blines"> As Lines </a> <span style="display: inline-block; margin-left: 5px; margin-right: 5px;">|</span> <a class="bsticks"> As Sticks </a> <span style="display: inline-block; margin-left: 5px; margin-right: 5px;">|</span> <a class="bcartoon"> As Cartoon </a> <span style="display: inline-block; margin-left: 5px; margin-right: 5px;">|</span> <a class="bsurface"> As Surface </a> <span style="display: inline-block; margin-left: 5px; margin-right: 5px;">|</span> <a class="bprevscene">Previous Scene</a> <span style="display: inline-block; margin-left: 5px; margin-right: 5px;">|</span> <a class="bnextscene">Next Scene</a> </div> <div id="scifeed-modal" class="reveal-modal reveal-modal-new" data-reveal aria-labelledby="modalTitle" aria-hidden="true" role="dialog"> </div> <div id="recommended-articles-modal" class="reveal-modal reveal-modal-new" data-reveal aria-labelledby="modalTitle" aria-hidden="true" role="dialog"> </div> <div id="author-biographies-modal" class="reveal-modal reveal-modal-new" data-reveal aria-labelledby="modalTitle" aria-hidden="true" role="dialog"> </div> <div id="cite-modal" class="reveal-modal reveal-modal-new" data-reveal aria-labelledby="Captcha" aria-hidden="true" role="dialog"> <div class="row"> <div class="small-12 columns"> <h2 style="margin: 0;">Cite</h2> </div> <div class="small-12 columns"> <!-- BibTeX --> <form style="margin:0; padding:0; display:inline;" name="export-bibtex" method="POST" action="/export"> <input type="hidden" name="articles_ids[]" value="1136135"> <input type="hidden" name="export_format_top" value="bibtex"> <input type="hidden" name="export_submit_top" value=""> </form> <!-- EndNote --> <form style="margin:0; padding:0; display:inline;" name="export-endnote" method="POST" action="/export"> <input type="hidden" name="articles_ids[]" value="1136135"> <input type="hidden" name="export_format_top" value="endnote_no_abstract"> <input type="hidden" name="export_submit_top" value=""> </form> <!-- RIS --> <form style="margin:0; padding:0; display:inline;" name="export-ris" method="POST" action="/export"> <input type="hidden" name="articles_ids[]" value="1136135"> <input type="hidden" name="export_format_top" value="ris"> <input type="hidden" name="export_submit_top" value=""> </form> <div> Export citation file: <a href="javascript:window.document.forms['export-bibtex'].submit()">BibTeX</a> | <a href="javascript:window.document.forms['export-endnote'].submit()">EndNote</a> | <a href="javascript:window.document.forms['export-ris'].submit()">RIS</a> </div> </div> <div class="small-12 columns"> <div class="in-tab"> <div><b>MDPI and ACS Style</b></div> <p> Maurício, J.; Domingues, I.; Bernardino, J. Comparing Vision Transformers and Convolutional Neural Networks for Image Classification: A Literature Review. <em>Appl. Sci.</em> <b>2023</b>, <em>13</em>, 5521. https://doi.org/10.3390/app13095521 </p> <div style="display: block"> <b>AMA Style</b><br> <p> Maurício J, Domingues I, Bernardino J. Comparing Vision Transformers and Convolutional Neural Networks for Image Classification: A Literature Review. <em>Applied Sciences</em>. 2023; 13(9):5521. https://doi.org/10.3390/app13095521 </p> <b>Chicago/Turabian Style</b><br> <p> Maurício, José, Inês Domingues, and Jorge Bernardino. 2023. "Comparing Vision Transformers and Convolutional Neural Networks for Image Classification: A Literature Review" <em>Applied Sciences</em> 13, no. 9: 5521. https://doi.org/10.3390/app13095521 </p> <b>APA Style</b><br> <p> Maurício, J., Domingues, I., & Bernardino, J. (2023). Comparing Vision Transformers and Convolutional Neural Networks for Image Classification: A Literature Review. <em>Applied Sciences</em>, <em>13</em>(9), 5521. https://doi.org/10.3390/app13095521 </p> </div> </div> <div class="info-box no-margin"> Note that from the first issue of 2016, this journal uses article numbers instead of page numbers. See further details <a target="_blank" href="https://www.mdpi.com/about/announcements/784">here</a>. </div> </div> </div> <a class="close-reveal-modal" aria-label="Close"> <i class="material-icons">clear</i> </a> </div> </div> </div> </div> </div> </section> <div id="footer"> <div class="journal-info"> <span> <em><a class="Var_JournalInfo" href="/journal/applsci">Appl. Sci.</a></em>, EISSN 2076-3417, Published by MDPI </span> <div class="large-right"> <span> <a href="/rss/journal/applsci" class="rss-link">RSS</a> </span> <span> <a href="/journal/applsci/toc-alert">Content Alert</a> </span> </div> </div> <div class="row full-width footer-links" data-equalizer="footer" data-equalizer-mq="small"> <div class="large-2 large-push-4 medium-3 small-6 columns" data-equalizer-watch="footer"> <h3> Further Information </h3> <a href="/apc"> Article Processing Charges </a> <a href="/about/payment"> Pay an Invoice </a> <a href="/openaccess"> Open Access Policy </a> <a href="/about/contact"> Contact MDPI </a> <a href="https://careers.mdpi.com" target="_blank" rel="noopener noreferrer"> Jobs at MDPI </a> </div> <div class="large-2 large-push-4 medium-3 small-6 columns" data-equalizer-watch="footer"> <h3> Guidelines </h3> <a href="/authors"> For Authors </a> <a href="/reviewers"> For Reviewers </a> <a href="/editors"> For Editors </a> <a href="/librarians"> For Librarians </a> <a href="/publishing_services"> For Publishers </a> <a href="/societies"> For Societies </a> <a href="/conference_organizers"> For Conference Organizers </a> </div> <div class="large-2 large-push-4 medium-3 small-6 columns"> <h3> MDPI Initiatives </h3> <a href="https://sciforum.net" target="_blank" rel="noopener noreferrer"> Sciforum </a> <a href="https://www.mdpi.com/books" target="_blank" rel="noopener noreferrer"> MDPI Books </a> <a href="https://www.preprints.org" target="_blank" rel="noopener noreferrer"> Preprints.org </a> <a href="https://www.scilit.net" target="_blank" rel="noopener noreferrer"> Scilit </a> <a href="https://sciprofiles.com?utm_source=mpdi.com&utm_medium=bottom_menu&utm_campaign=initiative" target="_blank" rel="noopener noreferrer"> SciProfiles </a> <a href="https://encyclopedia.pub" target="_blank" rel="noopener noreferrer"> Encyclopedia </a> <a href="https://jams.pub" target="_blank" rel="noopener noreferrer"> JAMS </a> <a href="/about/proceedings"> Proceedings Series </a> </div> <div class="large-2 large-push-4 medium-3 small-6 right-border-large-without columns UA_FooterFollowMDPI"> <h3> Follow MDPI </h3> <a href="https://www.linkedin.com/company/mdpi" target="_blank" rel="noopener noreferrer"> LinkedIn </a> <a href="https://www.facebook.com/MDPIOpenAccessPublishing" target="_blank" rel="noopener noreferrer"> Facebook </a> <a href="https://twitter.com/MDPIOpenAccess" target="_blank" rel="noopener noreferrer"> Twitter </a> </div> <div id="footer-subscribe" class="large-4 large-pull-8 medium-12 small-12 left-border-large columns"> <div class="footer-subscribe__container"> <img class="show-for-large-up" src="https://pub.mdpi-res.com/img/design/mdpi-pub-logo-white-small.png?71d18e5f805839ab?1732087095" alt="MDPI" title="MDPI Open Access Journals" style="height: 50px; margin-bottom: 10px;"> <form id="newsletter" method="POST" action="/subscribe"> <p> Subscribe to receive issue release notifications and newsletters from MDPI journals </p> <select multiple id="newsletter-journal" class="foundation-select" name="journals[]"> <option value="acoustics">Acoustics</option> <option value="amh">Acta Microbiologica Hellenica</option> <option value="actuators">Actuators</option> <option value="admsci">Administrative Sciences</option> <option value="adolescents">Adolescents</option> <option value="arm">Advances in Respiratory Medicine</option> <option value="aerobiology">Aerobiology</option> <option value="aerospace">Aerospace</option> <option value="agriculture">Agriculture</option> <option value="agriengineering">AgriEngineering</option> <option value="agrochemicals">Agrochemicals</option> <option value="agronomy">Agronomy</option> <option value="ai">AI</option> <option value="air">Air</option> <option value="algorithms">Algorithms</option> <option value="allergies">Allergies</option> <option value="alloys">Alloys</option> <option value="analytica">Analytica</option> <option value="analytics">Analytics</option> <option value="anatomia">Anatomia</option> <option value="anesthres">Anesthesia Research</option> <option value="animals">Animals</option> <option value="antibiotics">Antibiotics</option> <option value="antibodies">Antibodies</option> <option value="antioxidants">Antioxidants</option> <option value="applbiosci">Applied Biosciences</option> <option value="applmech">Applied Mechanics</option> <option value="applmicrobiol">Applied Microbiology</option> <option value="applnano">Applied Nano</option> <option value="applsci">Applied Sciences</option> <option value="asi">Applied System Innovation</option> <option value="appliedchem">AppliedChem</option> <option value="appliedmath">AppliedMath</option> <option value="aquacj">Aquaculture Journal</option> <option value="architecture">Architecture</option> <option value="arthropoda">Arthropoda</option> <option value="arts">Arts</option> <option value="astronomy">Astronomy</option> <option value="atmosphere">Atmosphere</option> <option value="atoms">Atoms</option> <option value="audiolres">Audiology Research</option> <option value="automation">Automation</option> <option value="axioms">Axioms</option> <option value="bacteria">Bacteria</option> <option value="batteries">Batteries</option> <option value="behavsci">Behavioral Sciences</option> <option value="beverages">Beverages</option> <option value="BDCC">Big Data and Cognitive Computing</option> <option value="biochem">BioChem</option> <option value="bioengineering">Bioengineering</option> <option value="biologics">Biologics</option> <option value="biology">Biology</option> <option value="blsf">Biology and Life Sciences Forum</option> <option value="biomass">Biomass</option> <option value="biomechanics">Biomechanics</option> <option value="biomed">BioMed</option> <option value="biomedicines">Biomedicines</option> <option value="biomedinformatics">BioMedInformatics</option> <option value="biomimetics">Biomimetics</option> <option value="biomolecules">Biomolecules</option> <option value="biophysica">Biophysica</option> <option value="biosensors">Biosensors</option> <option value="biotech">BioTech</option> <option value="birds">Birds</option> <option value="blockchains">Blockchains</option> <option value="brainsci">Brain Sciences</option> <option value="buildings">Buildings</option> <option value="businesses">Businesses</option> <option value="carbon">C</option> <option value="cancers">Cancers</option> <option value="cardiogenetics">Cardiogenetics</option> <option value="catalysts">Catalysts</option> <option value="cells">Cells</option> <option value="ceramics">Ceramics</option> <option value="challenges">Challenges</option> <option value="ChemEngineering">ChemEngineering</option> <option value="chemistry">Chemistry</option> <option value="chemproc">Chemistry Proceedings</option> <option value="chemosensors">Chemosensors</option> <option value="children">Children</option> <option value="chips">Chips</option> <option value="civileng">CivilEng</option> <option value="cleantechnol">Clean Technologies</option> <option value="climate">Climate</option> <option value="ctn">Clinical and Translational Neuroscience</option> <option value="clinbioenerg">Clinical Bioenergetics</option> <option value="clinpract">Clinics and Practice</option> <option value="clockssleep">Clocks &amp; Sleep</option> <option value="coasts">Coasts</option> <option value="coatings">Coatings</option> <option value="colloids">Colloids and Interfaces</option> <option value="colorants">Colorants</option> <option value="commodities">Commodities</option> <option value="complications">Complications</option> <option value="compounds">Compounds</option> <option value="computation">Computation</option> <option value="csmf">Computer Sciences &amp; Mathematics Forum</option> <option value="computers">Computers</option> <option value="condensedmatter">Condensed Matter</option> <option value="conservation">Conservation</option> <option value="constrmater">Construction Materials</option> <option value="cmd">Corrosion and Materials Degradation</option> <option value="cosmetics">Cosmetics</option> <option value="covid">COVID</option> <option value="crops">Crops</option> <option value="cryo">Cryo</option> <option value="cryptography">Cryptography</option> <option value="crystals">Crystals</option> <option value="cimb">Current Issues in Molecular Biology</option> <option value="curroncol">Current Oncology</option> <option value="dairy">Dairy</option> <option value="data">Data</option> <option value="dentistry">Dentistry Journal</option> <option value="dermato">Dermato</option> <option value="dermatopathology">Dermatopathology</option> <option value="designs">Designs</option> <option value="diabetology">Diabetology</option> <option value="diagnostics">Diagnostics</option> <option value="dietetics">Dietetics</option> <option value="digital">Digital</option> <option value="disabilities">Disabilities</option> <option value="diseases">Diseases</option> <option value="diversity">Diversity</option> <option value="dna">DNA</option> <option value="drones">Drones</option> <option value="ddc">Drugs and Drug Candidates</option> <option value="dynamics">Dynamics</option> <option value="earth">Earth</option> <option value="ecologies">Ecologies</option> <option value="econometrics">Econometrics</option> <option value="economies">Economies</option> <option value="education">Education Sciences</option> <option value="electricity">Electricity</option> <option value="electrochem">Electrochem</option> <option value="electronicmat">Electronic Materials</option> <option value="electronics">Electronics</option> <option value="ecm">Emergency Care and Medicine</option> <option value="encyclopedia">Encyclopedia</option> <option value="endocrines">Endocrines</option> <option value="energies">Energies</option> <option value="esa">Energy Storage and Applications</option> <option value="eng">Eng</option> <option value="engproc">Engineering Proceedings</option> <option value="entropy">Entropy</option> <option value="environsciproc">Environmental Sciences Proceedings</option> <option value="environments">Environments</option> <option value="epidemiologia">Epidemiologia</option> <option value="epigenomes">Epigenomes</option> <option value="ebj">European Burn Journal</option> <option value="ejihpe">European Journal of Investigation in Health, Psychology and Education</option> <option value="fermentation">Fermentation</option> <option value="fibers">Fibers</option> <option value="fintech">FinTech</option> <option value="fire">Fire</option> <option value="fishes">Fishes</option> <option value="fluids">Fluids</option> <option value="foods">Foods</option> <option value="forecasting">Forecasting</option> <option value="forensicsci">Forensic Sciences</option> <option value="forests">Forests</option> <option value="fossstud">Fossil Studies</option> <option value="foundations">Foundations</option> <option value="fractalfract">Fractal and Fractional</option> <option value="fuels">Fuels</option> <option value="future">Future</option> <option value="futureinternet">Future Internet</option> <option value="futurepharmacol">Future Pharmacology</option> <option value="futuretransp">Future Transportation</option> <option value="galaxies">Galaxies</option> <option value="games">Games</option> <option value="gases">Gases</option> <option value="gastroent">Gastroenterology Insights</option> <option value="gastrointestdisord">Gastrointestinal Disorders</option> <option value="gastronomy">Gastronomy</option> <option value="gels">Gels</option> <option value="genealogy">Genealogy</option> <option value="genes">Genes</option> <option value="geographies">Geographies</option> <option value="geohazards">GeoHazards</option> <option value="geomatics">Geomatics</option> <option value="geometry">Geometry</option> <option value="geosciences">Geosciences</option> <option value="geotechnics">Geotechnics</option> <option value="geriatrics">Geriatrics</option> <option value="glacies">Glacies</option> <option value="gucdd">Gout, Urate, and Crystal Deposition Disease</option> <option value="grasses">Grasses</option> <option value="hardware">Hardware</option> <option value="healthcare">Healthcare</option> <option value="hearts">Hearts</option> <option value="hemato">Hemato</option> <option value="hematolrep">Hematology Reports</option> <option value="heritage">Heritage</option> <option value="histories">Histories</option> <option value="horticulturae">Horticulturae</option> <option value="hospitals">Hospitals</option> <option value="humanities">Humanities</option> <option value="humans">Humans</option> <option value="hydrobiology">Hydrobiology</option> <option value="hydrogen">Hydrogen</option> <option value="hydrology">Hydrology</option> <option value="hygiene">Hygiene</option> <option value="immuno">Immuno</option> <option value="idr">Infectious Disease Reports</option> <option value="informatics">Informatics</option> <option value="information">Information</option> <option value="infrastructures">Infrastructures</option> <option value="inorganics">Inorganics</option> <option value="insects">Insects</option> <option value="instruments">Instruments</option> <option value="iic">Intelligent Infrastructure and Construction</option> <option value="ijerph">International Journal of Environmental Research and Public Health</option> <option value="ijfs">International Journal of Financial Studies</option> <option value="ijms">International Journal of Molecular Sciences</option> <option value="IJNS">International Journal of Neonatal Screening</option> <option value="ijpb">International Journal of Plant Biology</option> <option value="ijt">International Journal of Topology</option> <option value="ijtm">International Journal of Translational Medicine</option> <option value="ijtpp">International Journal of Turbomachinery, Propulsion and Power</option> <option value="ime">International Medical Education</option> <option value="inventions">Inventions</option> <option value="IoT">IoT</option> <option value="ijgi">ISPRS International Journal of Geo-Information</option> <option value="J">J</option> <option value="jal">Journal of Ageing and Longevity</option> <option value="jcdd">Journal of Cardiovascular Development and Disease</option> <option value="jcto">Journal of Clinical &amp; Translational Ophthalmology</option> <option value="jcm">Journal of Clinical Medicine</option> <option value="jcs">Journal of Composites Science</option> <option value="jcp">Journal of Cybersecurity and Privacy</option> <option value="jdad">Journal of Dementia and Alzheimer&#039;s Disease</option> <option value="jdb">Journal of Developmental Biology</option> <option value="jeta">Journal of Experimental and Theoretical Analyses</option> <option value="jfb">Journal of Functional Biomaterials</option> <option value="jfmk">Journal of Functional Morphology and Kinesiology</option> <option value="jof">Journal of Fungi</option> <option value="jimaging">Journal of Imaging</option> <option value="jintelligence">Journal of Intelligence</option> <option value="jlpea">Journal of Low Power Electronics and Applications</option> <option value="jmmp">Journal of Manufacturing and Materials Processing</option> <option value="jmse">Journal of Marine Science and Engineering</option> <option value="jmahp">Journal of Market Access &amp; Health Policy</option> <option value="jmp">Journal of Molecular Pathology</option> <option value="jnt">Journal of Nanotheranostics</option> <option value="jne">Journal of Nuclear Engineering</option> <option value="ohbm">Journal of Otorhinolaryngology, Hearing and Balance Medicine</option> <option value="jop">Journal of Parks</option> <option value="jpm">Journal of Personalized Medicine</option> <option value="jpbi">Journal of Pharmaceutical and BioTech Industry</option> <option value="jor">Journal of Respiration</option> <option value="jrfm">Journal of Risk and Financial Management</option> <option value="jsan">Journal of Sensor and Actuator Networks</option> <option value="joma">Journal of the Oman Medical Association</option> <option value="jtaer">Journal of Theoretical and Applied Electronic Commerce Research</option> <option value="jvd">Journal of Vascular Diseases</option> <option value="jox">Journal of Xenobiotics</option> <option value="jzbg">Journal of Zoological and Botanical Gardens</option> <option value="journalmedia">Journalism and Media</option> <option value="kidneydial">Kidney and Dialysis</option> <option value="kinasesphosphatases">Kinases and Phosphatases</option> <option value="knowledge">Knowledge</option> <option value="labmed">LabMed</option> <option value="laboratories">Laboratories</option> <option value="land">Land</option> <option value="languages">Languages</option> <option value="laws">Laws</option> <option value="life">Life</option> <option value="limnolrev">Limnological Review</option> <option value="lipidology">Lipidology</option> <option value="liquids">Liquids</option> <option value="literature">Literature</option> <option value="livers">Livers</option> <option value="logics">Logics</option> <option value="logistics">Logistics</option> <option value="lubricants">Lubricants</option> <option value="lymphatics">Lymphatics</option> <option value="make">Machine Learning and Knowledge Extraction</option> <option value="machines">Machines</option> <option value="macromol">Macromol</option> <option value="magnetism">Magnetism</option> <option value="magnetochemistry">Magnetochemistry</option> <option value="marinedrugs">Marine Drugs</option> <option value="materials">Materials</option> <option value="materproc">Materials Proceedings</option> <option value="mca">Mathematical and Computational Applications</option> <option value="mathematics">Mathematics</option> <option value="medsci">Medical Sciences</option> <option value="msf">Medical Sciences Forum</option> <option value="medicina">Medicina</option> <option value="medicines">Medicines</option> <option value="membranes">Membranes</option> <option value="merits">Merits</option> <option value="metabolites">Metabolites</option> <option value="metals">Metals</option> <option value="meteorology">Meteorology</option> <option value="methane">Methane</option> <option value="mps">Methods and Protocols</option> <option value="metrics">Metrics</option> <option value="metrology">Metrology</option> <option value="micro">Micro</option> <option value="microbiolres">Microbiology Research</option> <option value="micromachines">Micromachines</option> <option value="microorganisms">Microorganisms</option> <option value="microplastics">Microplastics</option> <option value="minerals">Minerals</option> <option value="mining">Mining</option> <option value="modelling">Modelling</option> <option value="mmphys">Modern Mathematical Physics</option> <option value="molbank">Molbank</option> <option value="molecules">Molecules</option> <option value="mti">Multimodal Technologies and Interaction</option> <option value="muscles">Muscles</option> <option value="nanoenergyadv">Nanoenergy Advances</option> <option value="nanomanufacturing">Nanomanufacturing</option> <option value="nanomaterials">Nanomaterials</option> <option value="ndt">NDT</option> <option value="network">Network</option> <option value="neuroglia">Neuroglia</option> <option value="neurolint">Neurology International</option> <option value="neurosci">NeuroSci</option> <option value="nitrogen">Nitrogen</option> <option value="ncrna">Non-Coding RNA</option> <option value="nursrep">Nursing Reports</option> <option value="nutraceuticals">Nutraceuticals</option> <option value="nutrients">Nutrients</option> <option value="obesities">Obesities</option> <option value="oceans">Oceans</option> <option value="onco">Onco</option> <option value="optics">Optics</option> <option value="oral">Oral</option> <option value="organics">Organics</option> <option value="organoids">Organoids</option> <option value="osteology">Osteology</option> <option value="oxygen">Oxygen</option> <option value="parasitologia">Parasitologia</option> <option value="particles">Particles</option> <option value="pathogens">Pathogens</option> <option value="pathophysiology">Pathophysiology</option> <option value="pediatrrep">Pediatric Reports</option> <option value="pets">Pets</option> <option value="pharmaceuticals">Pharmaceuticals</option> <option value="pharmaceutics">Pharmaceutics</option> <option value="pharmacoepidemiology">Pharmacoepidemiology</option> <option value="pharmacy">Pharmacy</option> <option value="philosophies">Philosophies</option> <option value="photochem">Photochem</option> <option value="photonics">Photonics</option> <option value="phycology">Phycology</option> <option value="physchem">Physchem</option> <option value="psf">Physical Sciences Forum</option> <option value="physics">Physics</option> <option value="physiologia">Physiologia</option> <option value="plants">Plants</option> <option value="plasma">Plasma</option> <option value="platforms">Platforms</option> <option value="pollutants">Pollutants</option> <option value="polymers">Polymers</option> <option value="polysaccharides">Polysaccharides</option> <option value="populations">Populations</option> <option value="poultry">Poultry</option> <option value="powders">Powders</option> <option value="proceedings">Proceedings</option> <option value="processes">Processes</option> <option value="prosthesis">Prosthesis</option> <option value="proteomes">Proteomes</option> <option value="psychiatryint">Psychiatry International</option> <option value="psychoactives">Psychoactives</option> <option value="psycholint">Psychology International</option> <option value="publications">Publications</option> <option value="qubs">Quantum Beam Science</option> <option value="quantumrep">Quantum Reports</option> <option value="quaternary">Quaternary</option> <option value="radiation">Radiation</option> <option value="reactions">Reactions</option> <option value="realestate">Real Estate</option> <option value="receptors">Receptors</option> <option value="recycling">Recycling</option> <option value="rsee">Regional Science and Environmental Economics</option> <option value="religions">Religions</option> <option value="remotesensing">Remote Sensing</option> <option value="reports">Reports</option> <option value="reprodmed">Reproductive Medicine</option> <option value="resources">Resources</option> <option value="rheumato">Rheumato</option> <option value="risks">Risks</option> <option value="robotics">Robotics</option> <option value="ruminants">Ruminants</option> <option value="safety">Safety</option> <option value="sci">Sci</option> <option value="scipharm">Scientia Pharmaceutica</option> <option value="sclerosis">Sclerosis</option> <option value="seeds">Seeds</option> <option value="sensors">Sensors</option> <option value="separations">Separations</option> <option value="sexes">Sexes</option> <option value="signals">Signals</option> <option value="sinusitis">Sinusitis</option> <option value="smartcities">Smart Cities</option> <option value="socsci">Social Sciences</option> <option value="siuj">Société Internationale d’Urologie Journal</option> <option value="societies">Societies</option> <option value="software">Software</option> <option value="soilsystems">Soil Systems</option> <option value="solar">Solar</option> <option value="solids">Solids</option> <option value="spectroscj">Spectroscopy Journal</option> <option value="sports">Sports</option> <option value="standards">Standards</option> <option value="stats">Stats</option> <option value="stresses">Stresses</option> <option value="surfaces">Surfaces</option> <option value="surgeries">Surgeries</option> <option value="std">Surgical Techniques Development</option> <option value="sustainability">Sustainability</option> <option value="suschem">Sustainable Chemistry</option> <option value="symmetry">Symmetry</option> <option value="synbio">SynBio</option> <option value="systems">Systems</option> <option value="targets">Targets</option> <option value="taxonomy">Taxonomy</option> <option value="technologies">Technologies</option> <option value="telecom">Telecom</option> <option value="textiles">Textiles</option> <option value="thalassrep">Thalassemia Reports</option> <option value="therapeutics">Therapeutics</option> <option value="thermo">Thermo</option> <option value="timespace">Time and Space</option> <option value="tomography">Tomography</option> <option value="tourismhosp">Tourism and Hospitality</option> <option value="toxics">Toxics</option> <option value="toxins">Toxins</option> <option value="transplantology">Transplantology</option> <option value="traumacare">Trauma Care</option> <option value="higheredu">Trends in Higher Education</option> <option value="tropicalmed">Tropical Medicine and Infectious Disease</option> <option value="universe">Universe</option> <option value="urbansci">Urban Science</option> <option value="uro">Uro</option> <option value="vaccines">Vaccines</option> <option value="vehicles">Vehicles</option> <option value="venereology">Venereology</option> <option value="vetsci">Veterinary Sciences</option> <option value="vibration">Vibration</option> <option value="virtualworlds">Virtual Worlds</option> <option value="viruses">Viruses</option> <option value="vision">Vision</option> <option value="waste">Waste</option> <option value="water">Water</option> <option value="wild">Wild</option> <option value="wind">Wind</option> <option value="women">Women</option> <option value="world">World</option> <option value="wevj">World Electric Vehicle Journal</option> <option value="youth">Youth</option> <option value="zoonoticdis">Zoonotic Diseases</option> </select> <input name="email" type="email" placeholder="Enter your email address..." required="required" /> <button class="genericCaptcha button button--dark UA_FooterNewsletterSubscribeButton" type="submit">Subscribe</button> </form> </div> </div> </div> <div id="footer-copyright"> <div class="row"> <div class="columns large-6 medium-6 small-12 text-left"> © 1996-2024 MDPI (Basel, Switzerland) unless otherwise stated </div> <div class="columns large-6 medium-6 small-12 small-text-left medium-text-right large-text-right"> <a data-dropdown="drop-view-disclaimer" aria-controls="drop-view-disclaimer" aria-expanded="false" data-options="align:top; is_hover:true; hover_timeout:2000;"> Disclaimer </a> <div id="drop-view-disclaimer" class="f-dropdown label__btn__dropdown label__btn__dropdown--wide text-left" data-dropdown-content aria-hidden="true" tabindex="-1"> Disclaimer/Publisher’s Note: The statements, opinions and data contained in all publications are solely those of the individual author(s) and contributor(s) and not of MDPI and/or the editor(s). MDPI and/or the editor(s) disclaim responsibility for any injury to people or property resulting from any ideas, methods, instructions or products referred to in the content. </div> <a href="/about/terms-and-conditions"> Terms and Conditions </a> <a href="/about/privacy"> Privacy Policy </a> </div> </div> </div> </div> <div id="cookie-notification" class="js-allow-cookies" style="display: none;"> <div class="columns large-10 medium-10 small-12"> We use cookies on our website to ensure you get the best experience.<br class="show-for-medium-up"/> Read more about our cookies <a href="/about/privacy">here</a>. </div> <div class="columns large-2 medium-2 small-12 small-only-text-left text-right"> <a class="button button--default" href="/accept_cookies">Accept</a> </div> </div> </div> <div id="main-share-modal" class="reveal-modal reveal-modal-new reveal-modal-new--small" data-reveal aria-labelledby="modalTitle" aria-hidden="true" role="dialog"> <div class="row"> <div class="small-12 columns"> <h2 style="margin: 0;">Share Link</h2> </div> <div class="small-12 columns"> <div class="social-media-links UA_ShareModalLinks" style="text-align: left;"> <a href="/cdn-cgi/l/email-protection#576871363a276c2422353d3234236a1125383a7265671a13071e72641672656772656514383a2736253e3930726567013e243e383972656703253639243138253a32252472656736393372656714383921383b22233e3839363b72656719322225363b7265671932232038253c247265673138257265671e3a363032726567143b3624243e313e3436233e3839726416726567167265671b3e23322536232225327265670532213e322071262238236c71363a276c3538332e6a3f232327246d7878202020793a33273e7934383a786565606565606772641672671672671614383a2736253e3930726567013e243e383972656703253639243138253a32252472656736393372656714383921383b22233e3839363b72656719322225363b7265671932232038253c247265673138257265671e3a363032726567143b3624243e313e3436233e3839726416726567167265671b3e23322536232225327265670532213e32205d5d03253639243138253a3225247265673625327265673a3833323b24726567233f36237265673e3a273b323a323923726567367265673a32343f36393e243a726567383172656724323b317a3623233239233e38397265147265673e39333e213e3322363b3b2e72656720323e303f233e3930726567233f327265673e3a273825233639343272656738317265673236343f726567273625237265673831726567233f327265673e392722237265673336233679726567033f323e257265672224327265673e397265673e3a363032726567343b3624243e313e3436233e38397265672336243c247265673e2472656724233e3b3b72656724383a32203f36237265673b3e3a3e233233726567243e393432726567253224323625343f3225247265673f3621327265672438726567313625726567343f3824323972656714383921383b22233e3839363b72656719322225363b7265671932232038253c247265673138257265673e3a363032726567343b3624243e313e3436233e383972656736393372656723253639243138253a322524726567203225327265673a382532726567233625303223323372656723387265671936232225363b7265671b36393022363032726567072538343224243e393072656772656f191b0772656e7265672336243c2479726567033f32253231382532726514726567233f3e2472656727362732257265672725322432392324726567367265673b3e23322536232225327265672532213e3220726567233f3623726567243f382024726567233f32726567333e31313225323934322472656735322320323239726567013e243e383972656703253639243138253a32252472656772656f013e0372656e72656736393372656714383921383b22233e3839363b72656719322225363b7265671932232038253c2479726567033f3272656724233623327265673831726567233f32726567362523726567233f362372656722243233726567233f327265672320387265673625343f3e23323423222532247265673138257265673e3a363032726567343b3624243e313e3436233e38397265672036247265672532213e322032337265673639337265673639726567362323323a27237265672036247265673a363332726567233872656722393332252423363933726567203f3623726567313634233825247265673a362e7265673e39313b2232393432726567233f327265672732253138253a363934327265673831726567233f32726567232038726567333232277265673b323625393e39307265673625343f3e233234232225322472656735362432337265673839726567233f327265673336233624322324726567222432337265147265673e3a363032726567243e2d3272651472656739223a3532257265673831726567233625303223726567343b362424322472656772656f313825726567233f32726567343b3624243e313e3436233e3839726567272538353b323a2472656e7265147265673f362533203625327265147265673639337265673221363b22362332337265673625343f3e2332342322253224726567363933726567233827726567253224223b232479726567033f3272656738353d3234233e21327265673831726567233f3e247265672038253c7265673e2472656723387265673e333239233e312e726567203f3e343f7265673831726567233f327265673625343f3e23323423222532247265673e24726567233f32726567353224237265673138257265673e3a363032726567343b3624243e313e3436233e38397265673639337265672239333225726567203f3623726567343839333e233e38392479726567033f3e247265672736273225726567363b243872656733322434253e353224726567233f327265673e3a27382523363934327265673831726567233f327265671a223b233e7a1f3236337265671623233239233e38397265673a32343f36393e243a7265673138257265673e3a272538213e3930726567233f327265672732253138253a363934327265673831726567013e037265673e397265673e3a363032726567343b3624243e313e3436233e383979" title="Email"> <i class="fa fa-envelope-square" style="font-size: 30px;"></i> </a> <a href="https://twitter.com/intent/tweet?text=Comparing+Vision+Transformers+and+Convolutional+Neural+Networks+for+Image+Classification%3A+A+Literature+Review&amp;hashtags=mdpiapplsci&amp;url=https%3A%2F%2Fwww.mdpi.com%2F2272270&amp;via=Applsci" onclick="windowOpen(this.href,600,800); return false" title="Twitter" target="_blank" rel="noopener noreferrer"> <i class="fa fa-twitter-x-square" style="font-size: 30px;"></i> </a> <a href=" http://www.linkedin.com/shareArticle?mini=true&amp;url=https%3A%2F%2Fwww.mdpi.com%2F2272270&amp;title=Comparing%20Vision%20Transformers%20and%20Convolutional%20Neural%20Networks%20for%20Image%20Classification%3A%20A%20Literature%20Review%26source%3Dhttps%3A%2F%2Fwww.mdpi.com%26summary%3DTransformers%20are%20models%20that%20implement%20a%20mechanism%20of%20self-attention%2C%20individually%20weighting%20the%20importance%20of%20each%20part%20of%20the%20input%20data.%20Their%20use%20in%20image%20classification%20tasks%20is%20still%20somewhat%20limited%20since%20researchers%20have%20so%20far%20chosen%20%5B...%5D" onclick="windowOpen(this.href,600,800); return false" title="LinkedIn" target="_blank" rel="noopener noreferrer"> <i class="fa fa-linkedin-square" style="font-size: 30px;"></i> </a> <a href="https://www.facebook.com/sharer.php?u=https://www.mdpi.com/2272270" title="facebook" target="_blank" rel="noopener noreferrer"> <i class="fa fa-facebook-square" style="font-size: 30px;"></i> </a> <a href="javascript:void(0);" title="Wechat" data-reveal-id="weixin-share-modal"> <i class="fa fa-weixin-square" style="font-size: 26px;"></i> </a> <a href="http://www.reddit.com/submit?url=https://www.mdpi.com/2272270" title="Reddit" target="_blank" rel="noopener noreferrer"> <i class="fa fa-reddit-square" style="font-size: 30px;"></i> </a> <a href="http://www.mendeley.com/import/?url=https://www.mdpi.com/2272270" title="Mendeley" target="_blank" rel="noopener noreferrer"> <i class="fa fa-mendeley-square" style="font-size: 30px;"></i> </a> <a href="http://www.citeulike.org/posturl?url=https://www.mdpi.com/2272270" title="CiteULike" target="_blank" rel="noopener noreferrer"> <i class="fa fa-citeulike-square" style="font-size: 30px;"></i> </a> </div> </div> <div class="small-9 columns"> <input id="js-clipboard-text" type="text" readonly value="https://www.mdpi.com/2272270" /> </div> <div class="small-3 columns text-left"> <a class="button button--color js-clipboard-copy" data-clipboard-target="#js-clipboard-text">Copy</a> </div> </div> <a class="close-reveal-modal" aria-label="Close"> <i class="material-icons">clear</i> </a> </div> <div id="weixin-share-modal" class="reveal-modal reveal-modal-new" data-reveal aria-labelledby="weixin-share-modal-title" aria-hidden="true" role="dialog"> <div class="row"> <div class="small-12 columns"> <h2 id="weixin-share-modal-title" style="margin: 0;">Share</h2> </div> <div class="small-12 columns"> <div class="weixin-qr-code-section"> <?xml version="1.0" standalone="no"?> <!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd"> <svg width="300" height="300" version="1.1" xmlns="http://www.w3.org/2000/svg"> <desc>https://www.mdpi.com/2272270</desc> <g id="elements" fill="black" stroke="none"> <rect x="0" y="0" width="12" height="12" /> <rect x="12" y="0" width="12" height="12" /> <rect x="24" y="0" width="12" height="12" /> <rect x="36" y="0" width="12" height="12" /> <rect x="48" y="0" width="12" height="12" /> <rect x="60" y="0" width="12" height="12" /> <rect x="72" y="0" width="12" height="12" /> <rect x="108" y="0" width="12" height="12" /> <rect x="192" y="0" width="12" height="12" /> <rect x="216" y="0" width="12" height="12" /> <rect x="228" y="0" width="12" height="12" /> <rect x="240" y="0" width="12" height="12" /> <rect x="252" y="0" width="12" height="12" /> <rect x="264" y="0" width="12" height="12" /> <rect x="276" y="0" width="12" height="12" /> <rect x="288" y="0" width="12" height="12" /> <rect x="0" y="12" width="12" height="12" /> <rect x="72" y="12" width="12" height="12" /> <rect x="96" y="12" width="12" height="12" /> <rect x="108" y="12" width="12" height="12" /> <rect x="132" y="12" width="12" height="12" /> <rect x="144" y="12" width="12" height="12" /> <rect x="156" y="12" width="12" height="12" /> <rect x="180" y="12" width="12" height="12" /> <rect x="216" y="12" width="12" height="12" /> <rect x="288" y="12" width="12" height="12" /> <rect x="0" y="24" width="12" height="12" /> <rect x="24" y="24" width="12" height="12" /> <rect x="36" y="24" width="12" height="12" /> <rect x="48" y="24" width="12" height="12" /> <rect x="72" y="24" width="12" height="12" /> <rect x="108" y="24" width="12" height="12" /> <rect x="120" y="24" width="12" height="12" /> <rect x="132" y="24" width="12" height="12" /> <rect x="168" y="24" width="12" height="12" /> <rect x="192" y="24" width="12" height="12" /> <rect x="216" y="24" width="12" height="12" /> <rect x="240" y="24" width="12" height="12" /> <rect x="252" y="24" width="12" height="12" /> <rect x="264" y="24" width="12" height="12" /> <rect x="288" y="24" width="12" height="12" /> <rect x="0" y="36" width="12" height="12" /> <rect x="24" y="36" width="12" height="12" /> <rect x="36" y="36" width="12" height="12" /> <rect x="48" y="36" width="12" height="12" /> <rect x="72" y="36" width="12" height="12" /> <rect x="96" y="36" width="12" height="12" /> <rect x="108" y="36" width="12" height="12" /> <rect x="132" y="36" width="12" height="12" /> <rect x="144" y="36" width="12" height="12" /> <rect x="156" y="36" width="12" height="12" /> <rect x="168" y="36" width="12" height="12" /> <rect x="216" y="36" width="12" height="12" /> <rect x="240" y="36" width="12" height="12" /> <rect x="252" y="36" width="12" height="12" /> <rect x="264" y="36" width="12" height="12" /> <rect x="288" y="36" width="12" height="12" /> <rect x="0" y="48" width="12" height="12" /> <rect x="24" y="48" width="12" height="12" /> <rect x="36" y="48" width="12" height="12" /> <rect x="48" y="48" width="12" height="12" /> <rect x="72" y="48" width="12" height="12" /> <rect x="120" y="48" width="12" height="12" /> <rect x="144" y="48" width="12" height="12" /> <rect x="156" y="48" width="12" height="12" /> <rect x="216" y="48" width="12" height="12" /> <rect x="240" y="48" width="12" height="12" /> <rect x="252" y="48" width="12" height="12" /> <rect x="264" y="48" width="12" height="12" /> <rect x="288" y="48" width="12" height="12" /> <rect x="0" y="60" width="12" height="12" /> <rect x="72" y="60" width="12" height="12" /> <rect x="96" y="60" width="12" height="12" /> <rect x="120" y="60" width="12" height="12" /> <rect x="144" y="60" width="12" height="12" /> <rect x="156" y="60" width="12" height="12" /> <rect x="168" y="60" width="12" height="12" /> <rect x="180" y="60" width="12" height="12" /> <rect x="192" y="60" width="12" height="12" /> <rect x="216" y="60" width="12" height="12" /> <rect x="288" y="60" width="12" height="12" /> <rect x="0" y="72" width="12" height="12" /> <rect x="12" y="72" width="12" height="12" /> <rect x="24" y="72" width="12" height="12" /> <rect x="36" y="72" width="12" height="12" /> <rect x="48" y="72" width="12" height="12" /> <rect x="60" y="72" width="12" height="12" /> <rect x="72" y="72" width="12" height="12" /> <rect x="96" y="72" width="12" height="12" /> <rect x="120" y="72" width="12" height="12" /> <rect x="144" y="72" width="12" height="12" /> <rect x="168" y="72" width="12" height="12" /> <rect x="192" y="72" width="12" height="12" /> <rect x="216" y="72" width="12" height="12" /> <rect x="228" y="72" width="12" height="12" /> <rect x="240" y="72" width="12" height="12" /> <rect x="252" y="72" width="12" height="12" /> <rect x="264" y="72" width="12" height="12" /> <rect x="276" y="72" width="12" height="12" /> <rect x="288" y="72" width="12" height="12" /> <rect x="144" y="84" width="12" height="12" /> <rect x="168" y="84" width="12" height="12" /> <rect x="180" y="84" width="12" height="12" /> <rect x="0" y="96" width="12" height="12" /> <rect x="12" y="96" width="12" height="12" /> <rect x="24" y="96" width="12" height="12" /> <rect x="36" y="96" width="12" height="12" /> <rect x="48" y="96" width="12" height="12" /> <rect x="72" y="96" width="12" height="12" /> <rect x="84" y="96" width="12" height="12" /> <rect x="96" y="96" width="12" height="12" /> <rect x="108" y="96" width="12" height="12" /> <rect x="120" y="96" width="12" height="12" /> <rect x="144" y="96" width="12" height="12" /> <rect x="156" y="96" width="12" height="12" /> <rect x="168" y="96" width="12" height="12" /> <rect x="204" y="96" width="12" height="12" /> <rect x="228" y="96" width="12" height="12" /> <rect x="252" y="96" width="12" height="12" /> <rect x="276" y="96" width="12" height="12" /> <rect x="0" y="108" width="12" height="12" /> <rect x="24" y="108" width="12" height="12" /> <rect x="36" y="108" width="12" height="12" /> <rect x="84" y="108" width="12" height="12" /> <rect x="96" y="108" width="12" height="12" /> <rect x="108" y="108" width="12" height="12" /> <rect x="144" y="108" width="12" height="12" /> <rect x="156" y="108" width="12" height="12" /> <rect x="192" y="108" width="12" height="12" /> <rect x="204" y="108" width="12" height="12" /> <rect x="228" y="108" width="12" height="12" /> <rect x="276" y="108" width="12" height="12" /> <rect x="36" y="120" width="12" height="12" /> <rect x="48" y="120" width="12" height="12" /> <rect x="60" y="120" width="12" height="12" /> <rect x="72" y="120" width="12" height="12" /> <rect x="96" y="120" width="12" height="12" /> <rect x="108" y="120" width="12" height="12" /> <rect x="132" y="120" width="12" height="12" /> <rect x="168" y="120" width="12" height="12" /> <rect x="180" y="120" width="12" height="12" /> <rect x="204" y="120" width="12" height="12" /> <rect x="216" y="120" width="12" height="12" /> <rect x="228" y="120" width="12" height="12" /> <rect x="240" y="120" width="12" height="12" /> <rect x="252" y="120" width="12" height="12" /> <rect x="276" y="120" width="12" height="12" /> <rect x="288" y="120" width="12" height="12" /> <rect x="0" y="132" width="12" height="12" /> <rect x="12" y="132" width="12" height="12" /> <rect x="48" y="132" width="12" height="12" /> <rect x="60" y="132" width="12" height="12" /> <rect x="108" y="132" width="12" height="12" /> <rect x="120" y="132" width="12" height="12" /> <rect x="168" y="132" width="12" height="12" /> <rect x="180" y="132" width="12" height="12" /> <rect x="204" y="132" width="12" height="12" /> <rect x="228" y="132" width="12" height="12" /> <rect x="288" y="132" width="12" height="12" /> <rect x="36" y="144" width="12" height="12" /> <rect x="72" y="144" width="12" height="12" /> <rect x="108" y="144" width="12" height="12" /> <rect x="132" y="144" width="12" height="12" /> <rect x="156" y="144" width="12" height="12" /> <rect x="180" y="144" width="12" height="12" /> <rect x="192" y="144" width="12" height="12" /> <rect x="204" y="144" width="12" height="12" /> <rect x="216" y="144" width="12" height="12" /> <rect x="228" y="144" width="12" height="12" /> <rect x="240" y="144" width="12" height="12" /> <rect x="264" y="144" width="12" height="12" /> <rect x="276" y="144" width="12" height="12" /> <rect x="288" y="144" width="12" height="12" /> <rect x="0" y="156" width="12" height="12" /> <rect x="12" y="156" width="12" height="12" /> <rect x="36" y="156" width="12" height="12" /> <rect x="60" y="156" width="12" height="12" /> <rect x="108" y="156" width="12" height="12" /> <rect x="120" y="156" width="12" height="12" /> <rect x="144" y="156" width="12" height="12" /> <rect x="192" y="156" width="12" height="12" /> <rect x="204" y="156" width="12" height="12" /> <rect x="228" y="156" width="12" height="12" /> <rect x="252" y="156" width="12" height="12" /> <rect x="276" y="156" width="12" height="12" /> <rect x="0" y="168" width="12" height="12" /> <rect x="48" y="168" width="12" height="12" /> <rect x="60" y="168" width="12" height="12" /> <rect x="72" y="168" width="12" height="12" /> <rect x="96" y="168" width="12" height="12" /> <rect x="120" y="168" width="12" height="12" /> <rect x="132" y="168" width="12" height="12" /> <rect x="144" y="168" width="12" height="12" /> <rect x="180" y="168" width="12" height="12" /> <rect x="204" y="168" width="12" height="12" /> <rect x="216" y="168" width="12" height="12" /> <rect x="228" y="168" width="12" height="12" /> <rect x="240" y="168" width="12" height="12" /> <rect x="252" y="168" width="12" height="12" /> <rect x="276" y="168" width="12" height="12" /> <rect x="288" y="168" width="12" height="12" /> <rect x="0" y="180" width="12" height="12" /> <rect x="24" y="180" width="12" height="12" /> <rect x="84" y="180" width="12" height="12" /> <rect x="96" y="180" width="12" height="12" /> <rect x="108" y="180" width="12" height="12" /> <rect x="120" y="180" width="12" height="12" /> <rect x="144" y="180" width="12" height="12" /> <rect x="180" y="180" width="12" height="12" /> <rect x="192" y="180" width="12" height="12" /> <rect x="204" y="180" width="12" height="12" /> <rect x="228" y="180" width="12" height="12" /> <rect x="240" y="180" width="12" height="12" /> <rect x="288" y="180" width="12" height="12" /> <rect x="0" y="192" width="12" height="12" /> <rect x="48" y="192" width="12" height="12" /> <rect x="72" y="192" width="12" height="12" /> <rect x="96" y="192" width="12" height="12" /> <rect x="168" y="192" width="12" height="12" /> <rect x="180" y="192" width="12" height="12" /> <rect x="192" y="192" width="12" height="12" /> <rect x="204" y="192" width="12" height="12" /> <rect x="216" y="192" width="12" height="12" /> <rect x="228" y="192" width="12" height="12" /> <rect x="240" y="192" width="12" height="12" /> <rect x="264" y="192" width="12" height="12" /> <rect x="96" y="204" width="12" height="12" /> <rect x="108" y="204" width="12" height="12" /> <rect x="120" y="204" width="12" height="12" /> <rect x="132" y="204" width="12" height="12" /> <rect x="180" y="204" width="12" height="12" /> <rect x="192" y="204" width="12" height="12" /> <rect x="240" y="204" width="12" height="12" /> <rect x="252" y="204" width="12" height="12" /> <rect x="0" y="216" width="12" height="12" /> <rect x="12" y="216" width="12" height="12" /> <rect x="24" y="216" width="12" height="12" /> <rect x="36" y="216" width="12" height="12" /> <rect x="48" y="216" width="12" height="12" /> <rect x="60" y="216" width="12" height="12" /> <rect x="72" y="216" width="12" height="12" /> <rect x="96" y="216" width="12" height="12" /> <rect x="120" y="216" width="12" height="12" /> <rect x="156" y="216" width="12" height="12" /> <rect x="168" y="216" width="12" height="12" /> <rect x="192" y="216" width="12" height="12" /> <rect x="216" y="216" width="12" height="12" /> <rect x="240" y="216" width="12" height="12" /> <rect x="264" y="216" width="12" height="12" /> <rect x="276" y="216" width="12" height="12" /> <rect x="288" y="216" width="12" height="12" /> <rect x="0" y="228" width="12" height="12" /> <rect x="72" y="228" width="12" height="12" /> <rect x="108" y="228" width="12" height="12" /> <rect x="120" y="228" width="12" height="12" /> <rect x="144" y="228" width="12" height="12" /> <rect x="156" y="228" width="12" height="12" /> <rect x="168" y="228" width="12" height="12" /> <rect x="192" y="228" width="12" height="12" /> <rect x="240" y="228" width="12" height="12" /> <rect x="252" y="228" width="12" height="12" /> <rect x="288" y="228" width="12" height="12" /> <rect x="0" y="240" width="12" height="12" /> <rect x="24" y="240" width="12" height="12" /> <rect x="36" y="240" width="12" height="12" /> <rect x="48" y="240" width="12" height="12" /> <rect x="72" y="240" width="12" height="12" /> <rect x="96" y="240" width="12" height="12" /> <rect x="108" y="240" width="12" height="12" /> <rect x="120" y="240" width="12" height="12" /> <rect x="144" y="240" width="12" height="12" /> <rect x="180" y="240" width="12" height="12" /> <rect x="192" y="240" width="12" height="12" /> <rect x="204" y="240" width="12" height="12" /> <rect x="216" y="240" width="12" height="12" /> <rect x="228" y="240" width="12" height="12" /> <rect x="240" y="240" width="12" height="12" /> <rect x="264" y="240" width="12" height="12" /> <rect x="276" y="240" width="12" height="12" /> <rect x="288" y="240" width="12" height="12" /> <rect x="0" y="252" width="12" height="12" /> <rect x="24" y="252" width="12" height="12" /> <rect x="36" y="252" width="12" height="12" /> <rect x="48" y="252" width="12" height="12" /> <rect x="72" y="252" width="12" height="12" /> <rect x="96" y="252" width="12" height="12" /> <rect x="108" y="252" width="12" height="12" /> <rect x="132" y="252" width="12" height="12" /> <rect x="144" y="252" width="12" height="12" /> <rect x="168" y="252" width="12" height="12" /> <rect x="216" y="252" width="12" height="12" /> <rect x="240" y="252" width="12" height="12" /> <rect x="252" y="252" width="12" height="12" /> <rect x="264" y="252" width="12" height="12" /> <rect x="276" y="252" width="12" height="12" /> <rect x="288" y="252" width="12" height="12" /> <rect x="0" y="264" width="12" height="12" /> <rect x="24" y="264" width="12" height="12" /> <rect x="36" y="264" width="12" height="12" /> <rect x="48" y="264" width="12" height="12" /> <rect x="72" y="264" width="12" height="12" /> <rect x="96" y="264" width="12" height="12" /> <rect x="120" y="264" width="12" height="12" /> <rect x="144" y="264" width="12" height="12" /> <rect x="192" y="264" width="12" height="12" /> <rect x="252" y="264" width="12" height="12" /> <rect x="264" y="264" width="12" height="12" /> <rect x="288" y="264" width="12" height="12" /> <rect x="0" y="276" width="12" height="12" /> <rect x="72" y="276" width="12" height="12" /> <rect x="96" y="276" width="12" height="12" /> <rect x="108" y="276" width="12" height="12" /> <rect x="168" y="276" width="12" height="12" /> <rect x="180" y="276" width="12" height="12" /> <rect x="192" y="276" width="12" height="12" /> <rect x="204" y="276" width="12" height="12" /> <rect x="228" y="276" width="12" height="12" /> <rect x="240" y="276" width="12" height="12" /> <rect x="252" y="276" width="12" height="12" /> <rect x="288" y="276" width="12" height="12" /> <rect x="0" y="288" width="12" height="12" /> <rect x="12" y="288" width="12" height="12" /> <rect x="24" y="288" width="12" height="12" /> <rect x="36" y="288" width="12" height="12" /> <rect x="48" y="288" width="12" height="12" /> <rect x="60" y="288" width="12" height="12" /> <rect x="72" y="288" width="12" height="12" /> <rect x="96" y="288" width="12" height="12" /> <rect x="108" y="288" width="12" height="12" /> <rect x="120" y="288" width="12" height="12" /> <rect x="132" y="288" width="12" height="12" /> <rect x="144" y="288" width="12" height="12" /> <rect x="228" y="288" width="12" height="12" /> <rect x="240" y="288" width="12" height="12" /> <rect x="252" y="288" width="12" height="12" /> <rect x="264" y="288" width="12" height="12" /> <rect x="276" y="288" width="12" height="12" /> <rect x="288" y="288" width="12" height="12" /> </g> </svg> </div> </div> </div> <a class="close-reveal-modal" aria-label="Close"> <i class="material-icons">clear</i> </a> </div> <a href="#" class="back-to-top"><span class="show-for-medium-up">Back to Top</span><span class="show-for-small">Top</span></a> <script data-cfasync="false" src="/cdn-cgi/scripts/5c5dd728/cloudflare-static/email-decode.min.js"></script><script src="https://pub.mdpi-res.com/assets/js/modernizr-2.8.3.min.js?5227e0738f7f421d?1732087095"></script> <script src="https://pub.mdpi-res.com/assets/js/jquery-1.12.4.min.js?4f252523d4af0b47?1732087095"></script> <script src="https://pub.mdpi-res.com/assets/js/foundation-5.5.3.min.js?6b2ec41c18b29054?1732087095"></script> <script src="https://pub.mdpi-res.com/assets/js/foundation-5.5.3.equalizer.min.js?0f6c549b75ec554c?1732087095"></script> <script src="https://pub.mdpi-res.com/assets/js/jquery.multiselect.js?0edd3998731d1091?1732087095"></script> <script src="https://pub.mdpi-res.com/assets/js/jquery.cycle2.min.js?63413052928f97ee?1732087095"></script> <script> // old browser fix - this way the console log rows won't throw (silent) errors in browsers not supporting console log if (!window.console) window.console = {}; if (!window.console.log) window.console.log = function () { }; var currentJournalNameSystem = "applsci"; $(document).ready(function() { $('select.foundation-select').multiselect({ search: true, minHeight: 130, maxHeight: 130, }); $(document).foundation({ orbit: { timer_speed: 4000, }, reveal: { animation: 'fadeAndPop', animation_speed: 100, } }); $(".chosen-select").each(function(element) { var maxSelected = (undefined !== $(this).data('maxselectedoptions') ? $(this).data('maxselectedoptions') : 100); $(this).on('chosen:ready', function(event, data) { var select = $(data.chosen.form_field); if (select.attr('id') === 'journal-browser-volume') { $(data.chosen.dropdown).addClass('UI_JournalBrowser_Volume_Options'); } if (select.attr('id') === 'journal-browser-issue') { $(data.chosen.dropdown).addClass('UI_JournalBrowser_Issue_Options'); } }).chosen({ display_disabled_options: false, disable_search_threshold: 7, max_selected_options: maxSelected, width: "100%" }); }); $(".toEncode").each(function(e) { var oldHref = $(this).attr("href"); var newHref = oldHref.replace('.botdefense.please.enable.javascript.','@'); $(this).attr("href", newHref); if (!$(this).hasClass("emailCaptcha")) { $(this).html(newHref.replace('mailto:', '')); } $(this).removeClass("visibility-hidden"); }); $(document).on('opened.fndtn.reveal', '[data-reveal]', function() { $(document).foundation('equalizer', 'reflow'); }); // fix the images that have tag height / width defined // otherwise the default foundation styles overwrite the tag definitions $("img").each(function() { if ($(this).attr('width') != undefined || $(this).attr('height') != undefined) { $(this).addClass("img-fixed"); } }); $("#basic_search, #advanced_search").submit(function(e) { var searchArguments = false; $(this).find("input,select").not("#search,.search-button").each(function() { if (undefined === $(this).val() || "" === $(this).val()) { $(this).attr('name', null); } else { $(this).attr('name'); searchArguments = true; } }); if (!searchArguments) { window.location = $(this).attr('action'); return false; } }); $(".hide-show-desktop-option").click(function(e) { e.preventDefault(); var parentDiv = $(this).closest("div"); $.ajax({ url: $(this).attr('href'), success: function(msg) { parentDiv.removeClass().hide(); } }); }); $(".generic-toggleable-header").click(function(e) { $(this).toggleClass("active"); $(this).next(".generic-toggleable-content").toggleClass("active"); }); /* * handle whole row as a link if the row contains only one visible link */ $("table.new tr").hover(function() { if ($(this).find("td:visible a").length == 1) { $(this).addClass("single-link"); } }, function() { $(this).removeClass("single-link"); }); $("table.new:not(.table-of-tables)").on("click", "tr.single-link", function(e) { var target = $(e.target); if (!e.ctrlKey && !target.is("a")) { $(this).find("td:visible a")[0].click(); } }); $(document).on("click", ".custom-accordion-for-small-screen-link", function(e) { if ($(this).closest("#basic_search").length > 0) { if ($(".search-container__advanced").first().is(":visible")) { openAdvanced() } } if (Foundation.utils.is_small_only()) { if ($(this).hasClass("active")) { $(this).removeClass("active"); $(this).next(".custom-accordion-for-small-screen-content").addClass("show-for-medium-up"); } else { $(this).addClass("active"); $(this).next(".custom-accordion-for-small-screen-content").removeClass("show-for-medium-up"); $(document).foundation('orbit', 'reflow'); } } if (undefined !== $(this).data("callback")) { var customCallback = $(this).data("callback"); func = window[customCallback]; func(); } }); $(document).on("click", ".js-open-small-search", function(e) { e.preventDefault(); $(this).toggleClass("active").closest(".tab-bar").toggleClass("active"); $(".search-container").toggleClass("hide-for-small-down"); }); $(document).on("click", ".js-open-menu", function(e) { $(".search-container").addClass("hide-for-small-down"); }); $(window).on('resize', function() { recalculate_main_browser_position(); recalculate_responsive_moving_containers(); }); updateSearchLabelVisibilities(); recalculate_main_browser_position(); recalculate_responsive_moving_containers(); if (window.document.documentMode == 11) { $("<link/>", { rel: "stylesheet", type: "text/css", href: "https://fonts.googleapis.com/icon?family=Material+Icons"}).appendTo("head"); } }); function recalculate_main_browser_position() { if (Foundation.utils.is_small_only()) { if ($("#js-main-top-container").parent("#js-large-main-top-container").length > 0) { $("#js-main-top-container").appendTo($("#js-small-main-top-container")); } } else { if ($("#js-main-top-container").parent("#js-small-main-top-container").length > 0) { $("#js-main-top-container").appendTo($("#js-large-main-top-container")); } } } function recalculate_responsive_moving_containers() { $(".responsive-moving-container.large").each(function() { var previousParent = $(".responsive-moving-container.active[data-id='"+$(this).data("id")+"']"); var movingContent = previousParent.html(); if (Foundation.utils.is_small_only()) { var currentParent = $(".responsive-moving-container.small[data-id='"+$(this).data("id")+"']"); } else if (Foundation.utils.is_medium_only()) { var currentParent = $(".responsive-moving-container.medium[data-id='"+$(this).data("id")+"']"); } else { var currentParent = $(".responsive-moving-container.large[data-id='"+$(this).data("id")+"']"); } if (previousParent.attr("class") !== currentParent.attr("class")) { currentParent.html(movingContent); previousParent.html(); currentParent.addClass("active"); previousParent.removeClass("active"); } }); } // cookies allowed is checked from a) local storage and b) from server separately so that the footer bar doesn't // get included in the custom page caches function checkCookiesAllowed() { var cookiesEnabled = localStorage.getItem("mdpi_cookies_enabled"); if (null === cookiesEnabled) { $.ajax({ url: "/ajax_cookie_value/mdpi_cookies_accepted", success: function(data) { if (data.value) { localStorage.setItem("mdpi_cookies_enabled", true); checkDisplaySurvey(); } else { $(".js-allow-cookies").show(); } } }); } else { checkDisplaySurvey(); } } function checkDisplaySurvey() { } window.addEventListener('CookiebotOnAccept', function (e) { var CookieDate = new Date; if (Cookiebot.consent.preferences) { CookieDate.setFullYear(CookieDate.getFullYear() + 1); document.cookie = "mdpi_layout_type_v2=mobile; path=/; expires=" + CookieDate.toUTCString() + ";"; $(".js-toggle-desktop-layout-link").css("display", "inline-block"); } }, false); window.addEventListener('CookiebotOnDecline', function (e) { if (!Cookiebot.consent.preferences) { $(".js-toggle-desktop-layout-link").hide(); if ("" === "desktop") { window.location = "/toggle_desktop_layout_cookie"; } } }, false); var hash = $(location).attr('hash'); if ("#share" === hash) { if (1 === $("#main-share-modal").length) { $('#main-share-modal').foundation('reveal', 'open'); } } </script> <script src="https://pub.mdpi-res.com/assets/js/lib.js?f8d3d71b3a772f9d?1732087095"></script> <script src="https://pub.mdpi-res.com/assets/js/mdpi.js?c267ce58392b15da?1732087095"></script> <script>var banners_url = 'https://serve.mdpi.com';</script> <script type='text/javascript' src='https://pub.mdpi-res.com/assets/js/ifvisible.min.js?c621d19ecb761212?1732087095'></script> <script src="https://pub.mdpi-res.com/assets/js/xmltohtml/affix.js?ac4ea55275297c15?1732087095"></script> <script src="https://pub.mdpi-res.com/assets/js/clipboard.min.js?3f3688138a1b9fc4?1732087095"></script> <script type="text/javascript"> $(document).ready(function() { var helpFunctions = $(".middle-column__help__fixed"); var leftColumnAffix = $(".left-column__fixed"); var middleColumn = $("#middle-column"); var clone = null; helpFunctions.affix({ offset: { top: function() { return middleColumn.offset().top - 8 - (Foundation.utils.is_medium_only() ? 30 : 0); }, bottom: function() { return $("#footer").innerHeight() + 74 + (Foundation.utils.is_medium_only() ? 0 : 0); } } }); if (leftColumnAffix.length > 0) { clone = leftColumnAffix.clone(); clone.addClass("left-column__fixed__affix"); clone.insertBefore(leftColumnAffix); clone.css('width', leftColumnAffix.outerWidth() + 50); clone.affix({ offset: { top: function() { return leftColumnAffix.offset().top - 30 - (Foundation.utils.is_medium_only() ? 50 : 0); }, bottom: function() { return $("#footer").innerHeight() + 92 + (Foundation.utils.is_medium_only() ? 0 : 0); } } }); } $(window).on("resize", function() { if (clone !== null) { clone.css('width', leftColumnAffix.outerWidth() + 50); } }); new ClipboardJS('.js-clipboard-copy'); }); </script> <script src="https://pub.mdpi-res.com/assets/js/jquery-ui-1.13.2.min.js?1e2047978946a1d2?1732087095"></script> <script src="https://pub.mdpi-res.com/assets/js/slick.min.js?d5a61c749e44e471?1732087095"></script> <script> $(document).ready(function() { $(".link-article-menu").click(function(e) { e.preventDefault(); $(this).find('span').toggle(); $(this).next("div").toggleClass("active"); }); $(".js-similarity-related-articles").click(function(e) { e.preventDefault(); if ('' !== $('#recommended-articles-modal').attr('data-url')) { $('#recommended-articles-modal').foundation('reveal', 'open', $('#recommended-articles-modal').attr('data-url')); } }); $.ajax({ url: "/article/1136135/similarity-related/show-link", success: function(result) { if (result.show) { $('#recommended-articles-modal').attr('data-url', result.link); $('.js-article-similarity-container').show(); } } }); $(document).on('opened.fndtn.reveal', '[data-reveal]', function() { var modal = $(this); if (modal.attr('id') === "author-biographies-modal") { modal.find('.multiple-items').slick({ slidesToShow: 1, nextArrow: '<a class="slick-next" href="#"><i class="material-icons">chevron_right</i></a>', prevArrow: '<a class="slick-prev" href="#"><i class="material-icons">chevron_left</i></a>', slidesToScroll: 1, draggable: false, }); modal.find('.multiple-items').slick('refresh'); } }); }); </script> <!-- Twitter universal website tag code --> <script> !function(e,t,n,s,u,a){e.twq||(s=e.twq=function(){s.exe?s.exe.apply(s,arguments):s.queue.push(arguments); },s.version='1.1',s.queue=[],u=t.createElement(n),u.async=!0,u.src='//static.ads-twitter.com/uwt.js', a=t.getElementsByTagName(n)[0],a.parentNode.insertBefore(u,a))}(window,document,'script'); // Insert Twitter Pixel ID and Standard Event data below twq('init','o2pa3'); twq('track','PageView'); </script> <!-- End Twitter universal website tag code --> <script> $(document).ready(function() { $(document).on('keyup', function (e) { if (e.keyCode == 27) { var hElem = $(this).find(".annotator-adder"); if (hElem.length){ hElem.css({'visibility':'hidden'}); } else { document.querySelector("hypothesis-adder").shadowRoot.querySelector(".annotator-adder").style.visibility = "hidden"; } } }); }); </script> <script> window.hypothesisConfig = function () { return { sidebarAppUrl: 'https://commenting.mdpi.com/app.html', showHighlights: 'whenSidebarOpen' , openSidebar: false , assetRoot: 'https://commentingres.mdpi.com/hypothesis', services: [{ apiUrl: 'https://commenting.mdpi.com/api/', authority: 'mdpi', grantToken: '', doi: '10.3390/app13095521' }], }; }; </script> <script async id="hypothesis_frame"></script> <script type="text/javascript"> if (-1 !== window.location.href.indexOf("?src=")) { window.history.replaceState({}, '', `${location.pathname}`); } $(document).ready(function() { var scifeedCounter = 0; var search = window.location.search; var mathjaxReady = false; // late image file loading $("img[data-lsrc]").each(function() { $(this).attr("src", $(this).data("lsrc")); }); // late mathjax initialization var head = document.getElementsByTagName("head")[0]; var script = document.createElement("script"); script.type = "text/x-mathjax-config"; script[(window.opera ? "innerHTML" : "text")] = "MathJax.Hub.processSectionDelay = 0;\n" + "MathJax.Hub.Config({\n" + " \"menuSettings\": {\n" + " CHTMLpreview: false\n" + " },\n" + " \"CHTML-preview\":{\n" + " disabled: true\n" + " },\n" + " \"HTML-CSS\": {\n" + " scale: 90,\n" + " availableFonts: [],\n" + " preferredFont: null,\n" + " preferredFonts: null,\n" + " webFont:\"Gyre-Pagella\",\n" + " imageFont:'TeX',\n" + " undefinedFamily:\"'Arial Unicode MS',serif\",\n" + " linebreaks: { automatic: false }\n" + " },\n" + " \"TeX\": {\n" + " extensions: ['noErrors.js'],\n" + " noErrors: {\n" + " inlineDelimiters: [\"\",\"\"],\n" + " multiLine: true,\n" + " style: {\n" + " 'font-size': '90%',\n" + " 'text-align': 'left',\n" + " 'color': 'black',\n" + " 'padding': '1px 3px',\n" + " 'border': '1px solid'\n" + " }\n" + " }\n" + " }\n" + "});\n" + "MathJax.Hub.Register.StartupHook('End', function() {\n" + " refreshMathjaxWidths();\n" + " mathjaxReady = true;\n" + "});\n" + "MathJax.Hub.Startup.signal.Interest(function (message) {\n" + " if (message == 'End') {\n" + " var hypoLink = document.getElementById('hypothesis_frame');\n" + " if (null !== hypoLink) {\n" + " hypoLink.setAttribute('src', 'https://commenting.mdpi.com/embed.js');\n" + " }\n" + " }\n" + "});"; head.appendChild(script); script = document.createElement("script"); script.type = "text/javascript"; script.src = "https://pub.mdpi-res.com/bundles/mathjax/MathJax.js?config=TeX-AMS-MML_HTMLorMML"; head.appendChild(script); // article version checker if (0 === search.indexOf('?type=check_update&version=')) { $.ajax({ url: "/2076-3417/13/9/5521" + "/versioncheck" + search, success: function(result) { $(".js-check-update-container").html(result); } }); } $('#feed_option').click(function() { // tracker if ($('#scifeed_clicked').length<1) { $(this).append('<span style="display:none" id="scifeed_clicked">done</span>'); } $('#feed_data').toggle('slide', { direction: 'up'}, '1000'); // slideToggle(700); OR toggle(700) $("#scifeed_error_msg").html('').hide(); $("#scifeed_notice_msg").html('').hide(); }); $('#feed_option').click(function(event) { setTimeout(function(){ var captchaSection = $("#captchaSection"); captchaSection.removeClass('ui-helper-hidden').find('input').prop('disabled', false); // var img = captchaSection.find('img'); // img.attr('src', img.data('url') + "?" + (new Date()).getTime()); // $(".captcha_reload").trigger("click"); var img = document.getElementById('gregwar_captcha_scifeed'); img.src = '/generate-captcha/gcb_captcha?n=' + (new Date()).getTime(); },800); }); $(document).on('click', '.split_feeds', function() { var name = $( this ).attr('name'); var flag = 1 - ($(this).is(":checked")*1); $('.split_feeds').each(function (index) { if ($( this ).attr('name') !== name) { $(this)[0].checked = flag; } }); }); $(document).on('click', '#scifeed_submit, #scifeed_submit1', function(event) { event.preventDefault(); $(".captcha_reload").trigger("click"); $("#scifeed_error_msg").html(""); $("#scifeed_error_msg").hide(); }); $(document).on('click', '.subscription_toggle', function(event) { if ($(this).val() === 'Create SciFeed' && $('#scifeed_hidden_flag').length>0) { event.preventDefault(); // alert('Here there would be a captcha because user is not logged in'); var captchaSection = $("#captchaSection"); if (captchaSection.hasClass('ui-helper-hidden')) { captchaSection.removeClass('ui-helper-hidden').find('input').prop('disabled', false); var img = captchaSection.find('img'); img.attr('src', img.data('url') + "?" + (new Date()).getTime()); $("#reloadCaptcha").trigger("click"); } } }); $(document).on('click', '.scifeed_msg', function(){ $(this).hide(); }); $(document).on('click', '.article-scilit-search', function(e) { e.preventDefault(); var data = $(".article-scilit-search-data").val(); var dataArray = data.split(';').map(function(keyword) { return "(\"" + keyword.trim() + "\")"; }); var searchQuery = dataArray.join(" OR "); var searchUrl = encodeURI("https://www.scilit.net/articles/search?q="+ searchQuery + "&advanced=1&highlight=1"); var win = window.open(searchUrl, '_blank'); if (win) { win.focus(); } else { window.location(searchUrl); } }); display_stats(); citedCount(); follow_goto(); // Select the node that will be observed for mutations const targetNodes = document.getElementsByClassName('hypothesis-count-container'); // Options for the observer (which mutations to observe) const config = { attributes: false, childList: true, subtree: false }; // Callback function to execute when mutations are observed const callback = function(mutationList, observer) { for(const mutation of mutationList) { if (mutation.type === 'childList') { let node = $(mutation.target); if (parseInt(node.html()) > 0) { node.show(); } } } }; // Create an observer instance linked to the callback function const observer = new MutationObserver(callback); // Start observing the target node for configured mutations for(const targetNode of targetNodes) { observer.observe(targetNode, config); } // Select the node that will be observed for mutations const mathjaxTargetNode = document.getElementById('middle-column'); // Callback function to execute when mutations are observed const mathjaxCallback = function(mutationList, observer) { if (mathjaxReady && typeof(MathJax) !== 'undefined') { refreshMathjaxWidths(); } }; // Create an observer instance linked to the callback function const mathjaxObserver = new ResizeObserver(mathjaxCallback); // Start observing the target node for configured mutations mathjaxObserver.observe(mathjaxTargetNode); }); /* END $(document).ready */ function refreshMathjaxWidths() { let width = ($('.html-body').width()*0.9) + "px"; $('.MathJax_Display').css('max-width', width); $('.MJXc-display').css('max-width', width); } function sendScifeedFrom(form) { if (!$('#scifeed_email').val().trim()) { // empty email alert('Please, provide an email for subscribe to this scifeed'); return false; } else if (!$('#captchaSection').hasClass('ui-helper-hidden') && !$('#captchaSection').find('input').val().trim()) { // empty captcha alert('Please, fill the captcha field.'); return false; } else if( ((($('#scifeed_form').find('input:checkbox:checked').length)-($('#split_feeds:checked').length))<1) || ($('#scifeed_kwd_txt').length < 0 && !$('#scifeed_kwd_txt').val().trim()) || ($('#scifeed_author_txt').length<0 &&!$('#scifeed_author_txt').val().trim()) ) { alert('You did not select anything to subscribe'); return false; } else if(($('#scifeed_form').find('input:checkbox:checked').length)-($('#split_feeds2:checked').length)<1){ alert("You did not select anything to subscribe"); return false; } else { var url = $('#scifeed_subscribe_url').html(); var formData = $(form).serializeArray(); $.post(url, formData).done(function (data) { if (JSON.parse(data)) { $('.scifeed_msg').hide(); var res = JSON.parse(data); var successFeeds = 0; var errorFeeds = 0; if (res) { $('.scifeed_msg').html(''); $.each(res, function (index, val) { if (val) { if (val.error) { errorFeeds++; $("#scifeed_error_msg").append(index+' - '+val.error+'<br>'); } if (val.notice) // for successful feed creation { successFeeds++; // $("#scifeed_notice_msg").append(index+' - '+val.notice+'<br>'); $("#scifeed_notice_msg").append('<li>'+index+'</li>'); } } }); if (successFeeds>0) { text = $('#scifeed_notice_msg').html(); text = 'The following feed'+(successFeeds>1?'s have':' has')+ ' been sucessfully created:<br><ul>'+ text + '</ul>' +($('#scifeed_hidden_flag').length>0 ? 'You are not logged in, so you probably need to validate '+ (successFeeds>1?'them':' it')+'.<br>' :'' ) +'Please check your email'+(successFeeds>1?'s':'')+' for more details.'; //(successFeeds>1?' for each of them':'')+'.<br>'; $("#scifeed_notice_msg").html(text); $("#scifeed_notice_msg").show(); } if (errorFeeds>0) { $("#scifeed_error_msg").show();; } } $("#feed_data").hide(); } }); } } function follow_goto() { var hashStr = location.hash.replace("#",""); if(typeof hashStr !== 'undefined') { if( hashStr == 'supplementary') { document.getElementById('suppl_id').scrollIntoView(); } if( hashStr == 'citedby') { document.getElementById('cited_id').scrollIntoView(); } } } function cited() { $("#framed_div").toggle('fast', function(){ if ($(this).css('display') != 'none') { var loaded = document.getElementById("loaded"); if(loaded.innerHTML == "No") { // Load Xref result var container = document.getElementById("framed_div"); // This replace the content container.innerHTML = "<img src=\"https://pub.mdpi-res.com/img/loading_circle.gif?9a82694213036313?1732087095\" height=\"20\" width=\"20\" alt=\"Processing...\" style=\"vertical-align:middle; margin-right:0.6em;\">"; var url = "/citedby/10.3390%252Fapp13095521/90"; $.post(url, function(result) { if (result.success) { container.innerHTML = result.view; } loaded.innerHTML = "Yes"; }); } } return true; // for not going at the beginning of the page... }) return true; // for not going at the beginning of the page... } function detect_device() { // Added by Bastien (18/08/2014): based on the http://detectmobilebrowsers.com/ detector var check = false; (function(a){if(/(android|bb\d+|meego).+mobile|avantgo|bada\/|blackberry|blazer|compal|elaine|fennec|hiptop|iemobile|ip(hone|od)|iris|kindle|lge |maemo|midp|mmp|mobile.+firefox|netfront|opera m(ob|in)i|palm( os)?|phone|p(ixi|re)\/|plucker|pocket|psp|series(4|6)0|symbian|treo|up\.(browser|link)|vodafone|wap|windows (ce|phone)|xda|xiino/i.test(a)||/1207|6310|6590|3gso|4thp|50[1-6]i|770s|802s|a wa|abac|ac(er|oo|s\-)|ai(ko|rn)|al(av|ca|co)|amoi|an(ex|ny|yw)|aptu|ar(ch|go)|as(te|us)|attw|au(di|\-m|r |s )|avan|be(ck|ll|nq)|bi(lb|rd)|bl(ac|az)|br(e|v)w|bumb|bw\-(n|u)|c55\/|capi|ccwa|cdm\-|cell|chtm|cldc|cmd\-|co(mp|nd)|craw|da(it|ll|ng)|dbte|dc\-s|devi|dica|dmob|do(c|p)o|ds(12|\-d)|el(49|ai)|em(l2|ul)|er(ic|k0)|esl8|ez([4-7]0|os|wa|ze)|fetc|fly(\-|_)|g1 u|g560|gene|gf\-5|g\-mo|go(\.w|od)|gr(ad|un)|haie|hcit|hd\-(m|p|t)|hei\-|hi(pt|ta)|hp( i|ip)|hs\-c|ht(c(\-| |_|a|g|p|s|t)|tp)|hu(aw|tc)|i\-(20|go|ma)|i230|iac( |\-|\/)|ibro|idea|ig01|ikom|im1k|inno|ipaq|iris|ja(t|v)a|jbro|jemu|jigs|kddi|keji|kgt( |\/)|klon|kpt |kwc\-|kyo(c|k)|le(no|xi)|lg( g|\/(k|l|u)|50|54|\-[a-w])|libw|lynx|m1\-w|m3ga|m50\/|ma(te|ui|xo)|mc(01|21|ca)|m\-cr|me(rc|ri)|mi(o8|oa|ts)|mmef|mo(01|02|bi|de|do|t(\-| |o|v)|zz)|mt(50|p1|v )|mwbp|mywa|n10[0-2]|n20[2-3]|n30(0|2)|n50(0|2|5)|n7(0(0|1)|10)|ne((c|m)\-|on|tf|wf|wg|wt)|nok(6|i)|nzph|o2im|op(ti|wv)|oran|owg1|p800|pan(a|d|t)|pdxg|pg(13|\-([1-8]|c))|phil|pire|pl(ay|uc)|pn\-2|po(ck|rt|se)|prox|psio|pt\-g|qa\-a|qc(07|12|21|32|60|\-[2-7]|i\-)|qtek|r380|r600|raks|rim9|ro(ve|zo)|s55\/|sa(ge|ma|mm|ms|ny|va)|sc(01|h\-|oo|p\-)|sdk\/|se(c(\-|0|1)|47|mc|nd|ri)|sgh\-|shar|sie(\-|m)|sk\-0|sl(45|id)|sm(al|ar|b3|it|t5)|so(ft|ny)|sp(01|h\-|v\-|v )|sy(01|mb)|t2(18|50)|t6(00|10|18)|ta(gt|lk)|tcl\-|tdg\-|tel(i|m)|tim\-|t\-mo|to(pl|sh)|ts(70|m\-|m3|m5)|tx\-9|up(\.b|g1|si)|utst|v400|v750|veri|vi(rg|te)|vk(40|5[0-3]|\-v)|vm40|voda|vulc|vx(52|53|60|61|70|80|81|83|85|98)|w3c(\-| )|webc|whit|wi(g |nc|nw)|wmlb|wonu|x700|yas\-|your|zeto|zte\-/i.test(a.substr(0,4)))check = true})(navigator.userAgent||navigator.vendor||window.opera); return check; } function display_stats(){ $("#article_stats_div").toggle(); return false; } /* * Cited By Scopus */ function citedCount(){ $("#framed_div_cited_count").toggle('fast', function(){ if ($(this).css('display') != 'none') { var loaded = document.getElementById("loaded_cite_count"); // to load only once the result! if(loaded.innerHTML == "No") { // Load Xref result var d = document.getElementById("framed_div_cited_count"); // This replace the content d.innerHTML = "<img src=\"https://pub.mdpi-res.com/img/loading_circle.gif?9a82694213036313?1732087095\" height=\"20\" width=\"20\" alt=\"Processing...\" style=\"vertical-align:middle; margin-right:0.6em;\">"; $.ajax({ method : "POST", url : "/cite-count/10.3390%252Fapp13095521", success : function(data) { if (data.succ) { d.innerHTML = data.view; loaded.innerHTML = "Yes"; follow_goto(); } } }); } } // end else return true; // for not going at the beginning of the page... }) return true; // for not going at the beginning of the page... } </script><script type="text/javascript" src="https://pub.mdpi-res.com/assets/js/third-party/highcharts/highcharts.js?bdd06f45e34c33df?1732087095"></script><script type="text/javascript" src="https://pub.mdpi-res.com/assets/js/third-party/highcharts/modules/exporting.js?944dc938d06de3a8?1732087095"></script><script type="text/javascript" defer="defer"> var advancedStatsData; var selectedStatsType = "abstract"; $(function(){ var countWrapper = $('#counts-wrapper'); $('#author_stats_id #type_links a').on('click', function(e) { e.preventDefault(); selectedStatsType = $(this).data('type'); $('#article_advanced_stats').vectorMap('set', 'values', advancedStatsData[selectedStatsType]); $('#advanced_stats_max').html(advancedStatsData[selectedStatsType].max); $('#type_links a').removeClass('active'); $(this).addClass('active'); }); $.get('/2076-3417/13/9/5521/stats', function (result) { if (!result.success) { return; } // process article metrics part in left column var viewNumber = countWrapper.find(".view-number"); viewNumber.html(result.metrics.views); viewNumber.parent().toggleClass("count-div--grey", result.metrics.views == 0); var downloadNumber = countWrapper.find(".download-number"); downloadNumber.html(result.metrics.downloads); downloadNumber.parent().toggleClass("count-div--grey", result.metrics.downloads == 0); var citationsNumber = countWrapper.find(".citations-number"); citationsNumber.html(result.metrics.citations); citationsNumber.parent().toggleClass("count-div--grey", result.metrics.citations == 0); if (result.metrics.views > 0 || result.metrics.downloads > 0 || result.metrics.citations > 0) { countWrapper.find("#js-counts-wrapper__views, #js-counts-wrapper__downloads").addClass("visible").show(); if (result.metrics.citations > 0) { countWrapper.find('.citations-number').html(result.metrics.citations).show(); countWrapper.find("#js-counts-wrapper__citations").addClass("visible").show(); } else { countWrapper.find("#js-counts-wrapper__citations").remove(); } $("[data-id='article-counters']").removeClass("hidden"); } if (result.metrics.altmetrics_score > 0) { $("#js-altmetrics-donut").show(); } // process view chart in main column var jsondata = result.chart; var series = new Array(); $.each(jsondata.elements, function(i, element) { var dataValues = new Array(); $.each(element.values, function(i, value) { dataValues.push(new Array(value.tip, value.value)); }); series[i] = {name: element.text, data:dataValues}; }); Highcharts.setOptions({ chart: { style: { fontFamily: 'Arial,sans-serif' } } }); $('#article_stats_swf').highcharts({ chart: { type: 'line', width: $("#tabs").width() //* 0.91 }, credits: { enabled: false }, exporting: { enabled: true }, title: { text: jsondata.title.text, x: -20 //center }, xAxis: { categories: jsondata.x_axis.labels.labels, offset: jsondata.x_axis.offset, labels:{ step: jsondata.x_axis.labels.steps, rotation: 30 } }, yAxis: { max: jsondata.y_axis.max, min: jsondata.y_axis.min, offset: jsondata.y_axis.offset, labels: { steps: jsondata.y_axis.steps }, title: { enabled: false } }, tooltip: { formatter: function (){ return this.key.replace("#val#", this.y); } }, legend: { align: 'top', itemDistance: 50 }, series: series }); }); $('#supplement_link').click(function() { document.getElementById('suppl_id').scrollIntoView(); }); $('#stats_link').click(function() { document.getElementById('stats_id').scrollIntoView(); }); // open mol viewer for molbank special supplementary files $('.showJmol').click(function(e) { e.preventDefault(); var jmolModal = $("#jmolModal"); var url = "/article/1136135/jsmol_viewer/__supplementary_id__"; url = url.replace(/__supplementary_id__/g, $(this).data('index')); $('#jsmol-content').attr('src', url); jmolModal.find(".content").html($(this).data('description')); jmolModal.foundation("reveal", "open"); }); }); !function() { "use strict"; function e(e) { try { if ("undefined" == typeof console) return; "error"in console ? console.error(e) : console.log(e) } catch (e) {} } function t(e) { return d.innerHTML = '<a href="' + e.replace(/"/g, "&quot;") + '"></a>', d.childNodes[0].getAttribute("href") || "" } function n(n, c) { var o = ""; var k = parseInt(n.substr(c + 4, 2), 16); for (var i = c; i < n.length; i += 2) { if (i != c + 4) { var s = parseInt(n.substr(i, 2), 16) ^ k; o += String.fromCharCode(s); } } try { o = decodeURIComponent(escape(o)); } catch (error) { console.error(error); } return t(o); } function c(t) { for (var r = t.querySelectorAll("a"), c = 0; c < r.length; c++) try { var o = r[c] , a = o.href.indexOf(l); a > -1 && (o.href = "mailto:" + n(o.href, a + l.length)) } catch (i) { e(i) } } function o(t) { for (var r = t.querySelectorAll(u), c = 0; c < r.length; c++) try { var o = r[c] , a = o.parentNode , i = o.getAttribute(f); if (i) { var l = n(i, 0) , d = document.createTextNode(l); a.replaceChild(d, o) } } catch (h) { e(h) } } function a(t) { for (var r = t.querySelectorAll("template"), n = 0; n < r.length; n++) try { i(r[n].content) } catch (c) { e(c) } } function i(t) { try { c(t), o(t), a(t) } catch (r) { e(r) } } var l = "/cnd-cgi/l/email-protection#" , u = ".__cf_email__" , f = "data-cfemail" , d = document.createElement("div"); i(document), function() { var e = document.currentScript || document.scripts[document.scripts.length - 1]; e.parentNode.removeChild(e) }() }(); </script><script type="text/javascript"> function setCookie(cname, cvalue, ctime) { ctime = (typeof ctime === 'undefined') ? 10*365*24*60*60*1000 : ctime; // default => 10 years var d = new Date(); d.setTime(d.getTime() + ctime); // ==> 1 hour = 60*60*1000 var expires = "expires="+d.toUTCString(); document.cookie = cname + "=" + cvalue + "; " + expires +"; path=/"; } function getCookie(cname) { var name = cname + "="; var ca = document.cookie.split(';'); for(var i=0; i<ca.length; i++) { var c = ca[i]; while (c.charAt(0)==' ') c = c.substring(1); if (c.indexOf(name) == 0) return c.substring(name.length, c.length); } return ""; } </script><script type="text/javascript" src="https://d1bxh8uas1mnw7.cloudfront.net/assets/embed.js"></script><script> $(document).ready(function() { if ($("#js-similarity-related-data").length > 0) { $.ajax({ url: '/article/1136135/similarity-related', success: function(response) { $("#js-similarity-related-data").html(response); $("#js-related-articles-menu").show(); $(document).foundation('tab', 'reflow'); MathJax.Hub.Queue(["Typeset", MathJax.Hub]); } }); } }); </script><link rel="stylesheet" href="https://pub.mdpi-res.com/assets/css/jquery-ui-1.10.4.custom.min.css?80647d88647bf347?1732087095"><link rel="stylesheet" href="https://pub.mdpi-res.com/assets/css/magnific-popup.min.css?04d343e036f8eecd?1732087095"><script type="text/javascript" src="https://pub.mdpi-res.com/assets/js/magnific-popup.min.js?2be3d9e7dc569146?1732087095"></script><script> $(function() { $(".js-show-more-academic-editors").on("click", function(e) { e.preventDefault(); $(this).hide(); $(".academic-editor-container").removeClass("hidden"); }); }); </script> <link rel="stylesheet" href="https://pub.mdpi-res.com/assets/css/vmap/jqvmap.min.css?126a06688aa11c13?1732087095"> <script src="https://pub.mdpi-res.com/assets/js/vmap/jquery.vmap.min.js?935f68d33bdd88a1?1732087095"></script> <script src="https://pub.mdpi-res.com/assets/js/vmap/jquery.vmap.world.js?16677403c0e1bef1?1732087095"></script> <script> function updateSlick() { $('.multiple-items').slick('setPosition'); } $(document).ready(function() { $('.multiple-items').slick({ slidesToShow: 1, nextArrow: '<a class="slick-next" href="#"><i class="material-icons">chevron_right</i></a>', prevArrow: '<a class="slick-prev" href="#"><i class="material-icons">chevron_left</i></a>', slidesToScroll: 1, responsive: [ { breakpoint: 1024, settings: { slidesToShow: 1, slidesToScroll: 1, } }, { breakpoint: 600, settings: { slidesToShow: 1, slidesToScroll: 1, } }, { breakpoint: 480, settings: { slidesToShow: 1, slidesToScroll: 1, } } ] }); $('.multiple-items').show(); $(document).on('click', '.reviewReportSelector', function(e) { let path = $(this).attr('data-path'); handleReviews(path, $(this)); }); $(document).on('click', '.viewReviewReports', function(e) { let versionOne = $('#versionTab_1'); if (!versionOne.hasClass('activeTab')) { let path = $(this).attr('data-path'); handleReviews(path, versionOne); } location.href = "#reviewReports"; }); $(document).on('click', '.reviewersResponse, .authorResponse', function(e) { let version = $(this).attr('data-version'); let targetVersion = $('#versionTab_' + version); if (!targetVersion.hasClass('activeTab')) { let path = targetVersion.attr('data-path'); handleReviews(path, targetVersion); } location.href = $(this).attr('data-link'); }); $(document).on('click', '.tab', function (e) { e.preventDefault(); $('.tab').removeClass('activeTab'); $(this).addClass('activeTab') $('.tab').each(function() { $(this).closest('.tab-title').removeClass('active'); }); $(this).closest('.tab-title').addClass('active') }); }); function handleReviews(path, target) { $.ajax({ url: path, context: this, success: function (data) { $('.activeTab').removeClass('activeTab'); target.addClass('activeTab'); $('#reviewSection').html(data.view); }, error: function (xhr, ajaxOptions, thrownError) { console.log(xhr.status); console.log(thrownError); } }); } </script> <script src="https://pub.mdpi-res.com/assets/js/xmltohtml/affix.js?v1?1732087095"></script> <script src="https://pub.mdpi-res.com/assets/js/xmltohtml/storage.js?e9b262d3a3476d25?1732087095"></script> <script src="https://pub.mdpi-res.com/assets/js/xmltohtml/jquery-scrollspy.js?09cbaec0dbb35a67?1732087095"></script> <script src="https://pub.mdpi-res.com/assets/js/xmltohtml/magnific-popup.js?4a09c18460afb26c?1732087095"></script> <script src="https://pub.mdpi-res.com/assets/js/xmltohtml/underscore.js?f893e294cde60c24?1732087095"></script> <script type="text/javascript"> $('document').ready(function(){ $("#left-column").addClass("show-for-large-up"); $("#middle-column").removeClass("medium-9").removeClass("left-bordered").addClass("medium-12"); $(window).on('resize scroll', function() { /* if ($('.button--drop-down').isInViewport($(".top-bar").outerHeight())) { */ if ($('.button--drop-down').isInViewport()) { $("#js-button-download").hide(); } else { $("#js-button-download").show(); } }); }); $(document).on('DOMNodeInserted', function(e) { var element = $(e.target); if (element.hasClass('menu') && element.hasClass('html-nav') ) { element.addClass("side-menu-ul"); } }); </script> <script src="https://pub.mdpi-res.com/assets/js/xmltohtml/articles.js?5118449d9ad8913a?1732087095"></script> <script> repositionOpenSideBar = function() { $('#left-column').addClass("show-for-large-up show-for-medium-up").show(); $('#middle-column').removeClass('large-12').removeClass('medium-12'); $('#middle-column').addClass('large-9'); } repositionCloseSideBar = function() { $('#left-column').removeClass("show-for-large-up show-for-medium-up").hide(); $('#middle-column').removeClass('large-9'); $('#middle-column').addClass('large-12').addClass('medium-12'); } </script> <!--[if lt IE 9]> <script src="https://pub.mdpi-res.com/assets/js/ie8/ie8.js?6eef8fcbc831f5bd?1732087095"></script> <script src="https://pub.mdpi-res.com/assets/js/ie8/jquery.xdomainrequest.min.js?a945caca315782b0?1732087095"></script> <![endif]--> <!-- Twitter universal website tag code --> <script type="text/plain" data-cookieconsent="marketing"> !function(e,t,n,s,u,a){e.twq||(s=e.twq=function(){s.exe?s.exe.apply(s,arguments):s.queue.push(arguments); },s.version='1.1',s.queue=[],u=t.createElement(n),u.async=!0,u.src='//static.ads-twitter.com/uwt.js', a=t.getElementsByTagName(n)[0],a.parentNode.insertBefore(u,a))}(window,document,'script'); // Insert Twitter Pixel ID and Standard Event data below twq('init','o2pip'); twq('track','PageView'); </script> <!-- End Twitter universal website tag code --> <script>(function(){function c(){var b=a.contentDocument||a.contentWindow.document;if(b){var d=b.createElement('script');d.innerHTML="window.__CF$cv$params={r:'8e6ccd84a91e44a3',t:'MTczMjMxOTIyNy4wMDAwMDA='};var a=document.createElement('script');a.nonce='';a.src='/cdn-cgi/challenge-platform/scripts/jsd/main.js';document.getElementsByTagName('head')[0].appendChild(a);";b.getElementsByTagName('head')[0].appendChild(d)}}if(document.body){var a=document.createElement('iframe');a.height=1;a.width=1;a.style.position='absolute';a.style.top=0;a.style.left=0;a.style.border='none';a.style.visibility='hidden';document.body.appendChild(a);if('loading'!==document.readyState)c();else if(window.addEventListener)document.addEventListener('DOMContentLoaded',c);else{var e=document.onreadystatechange||function(){};document.onreadystatechange=function(b){e(b);'loading'!==document.readyState&&(document.onreadystatechange=e,c())}}}})();</script></body> </html>

Pages: 1 2 3 4 5 6 7 8 9 10