CINXE.COM
Low Light Image Enhancement with Multi-Stage Interconnected Autoencoders Integration in Pix-to-Pix GAN
<!DOCTYPE html> <html lang="en" dir="ltr"> <head> <!-- Google tag (gtag.js) --> <script async src="https://www.googletagmanager.com/gtag/js?id=G-P63WKM1TM1"></script> <script> window.dataLayer = window.dataLayer || []; function gtag(){dataLayer.push(arguments);} gtag('js', new Date()); gtag('config', 'G-P63WKM1TM1'); </script> <!-- Yandex.Metrika counter --> <script type="text/javascript" > (function(m,e,t,r,i,k,a){m[i]=m[i]||function(){(m[i].a=m[i].a||[]).push(arguments)}; m[i].l=1*new Date(); for (var j = 0; j < document.scripts.length; j++) {if (document.scripts[j].src === r) { return; }} k=e.createElement(t),a=e.getElementsByTagName(t)[0],k.async=1,k.src=r,a.parentNode.insertBefore(k,a)}) (window, document, "script", "https://mc.yandex.ru/metrika/tag.js", "ym"); ym(55165297, "init", { clickmap:false, trackLinks:true, accurateTrackBounce:true, webvisor:false }); </script> <noscript><div><img src="https://mc.yandex.ru/watch/55165297" style="position:absolute; left:-9999px;" alt="" /></div></noscript> <!-- /Yandex.Metrika counter --> <!-- Matomo --> <!-- End Matomo Code --> <title>Low Light Image Enhancement with Multi-Stage Interconnected Autoencoders Integration in Pix-to-Pix GAN</title> <meta name="description" content="Low Light Image Enhancement with Multi-Stage Interconnected Autoencoders Integration in Pix-to-Pix GAN"> <meta name="keywords" content="Low light image enhancement, deep learning, convolutional neural network, image processing."> <meta name="viewport" content="width=device-width, initial-scale=1, minimum-scale=1, maximum-scale=1, user-scalable=no"> <meta charset="utf-8"> <meta name="citation_title" content="Low Light Image Enhancement with Multi-Stage Interconnected Autoencoders Integration in Pix-to-Pix GAN"> <meta name="citation_author" content="Muhammad Atif"> <meta name="citation_author" content="Cang Yan"> <meta name="citation_publication_date" content="2024/11/01"> <meta name="citation_journal_title" content="International Journal of Computer and Information Engineering"> <meta name="citation_volume" content="18"> <meta name="citation_issue" content="11"> <meta name="citation_firstpage" content="657"> <meta name="citation_lastpage" content="666"> <meta name="citation_pdf_url" content="https://publications.waset.org/10013873/pdf"> <link href="https://cdn.waset.org/favicon.ico" type="image/x-icon" rel="shortcut icon"> <link href="https://cdn.waset.org/static/plugins/bootstrap-4.2.1/css/bootstrap.min.css" rel="stylesheet"> <link href="https://cdn.waset.org/static/plugins/fontawesome/css/all.min.css" rel="stylesheet"> <link href="https://cdn.waset.org/static/css/site.css?v=150220211555" rel="stylesheet"> </head> <body> <header> <div class="container"> <nav class="navbar navbar-expand-lg navbar-light"> <a class="navbar-brand" href="https://waset.org"> <img src="https://cdn.waset.org/static/images/wasetc.png" alt="Open Science Research Excellence" title="Open Science Research Excellence" /> </a> <button class="d-block d-lg-none navbar-toggler ml-auto" type="button" data-toggle="collapse" data-target="#navbarMenu" aria-controls="navbarMenu" aria-expanded="false" aria-label="Toggle navigation"> <span class="navbar-toggler-icon"></span> </button> <div class="w-100"> <div class="d-none d-lg-flex flex-row-reverse"> <form method="get" action="https://waset.org/search" class="form-inline my-2 my-lg-0"> <input class="form-control mr-sm-2" type="search" placeholder="Search Conferences" value="" name="q" aria-label="Search"> <button class="btn btn-light my-2 my-sm-0" type="submit"><i class="fas fa-search"></i></button> </form> </div> <div class="collapse navbar-collapse mt-1" id="navbarMenu"> <ul class="navbar-nav ml-auto align-items-center" id="mainNavMenu"> <li class="nav-item"> <a class="nav-link" href="https://waset.org/conferences" title="Conferences in 2024/2025/2026">Conferences</a> </li> <li class="nav-item"> <a class="nav-link" href="https://waset.org/disciplines" title="Disciplines">Disciplines</a> </li> <li class="nav-item"> <a class="nav-link" href="https://waset.org/committees" rel="nofollow">Committees</a> </li> <li class="nav-item dropdown"> <a class="nav-link dropdown-toggle" href="#" id="navbarDropdownPublications" role="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false"> Publications </a> <div class="dropdown-menu" aria-labelledby="navbarDropdownPublications"> <a class="dropdown-item" href="https://publications.waset.org/abstracts">Abstracts</a> <a class="dropdown-item" href="https://publications.waset.org">Periodicals</a> <a class="dropdown-item" href="https://publications.waset.org/archive">Archive</a> </div> </li> <li class="nav-item"> <a class="nav-link" href="https://waset.org/page/support" title="Support">Support</a> </li> </ul> </div> </div> </nav> </div> </header> <main> <div class="container mt-4"> <div class="row"> <div class="col-md-9 mx-auto"> <form method="get" action="https://publications.waset.org/search"> <div id="custom-search-input"> <div class="input-group"> <i class="fas fa-search"></i> <input type="text" class="search-query" name="q" placeholder="Author, Title, Abstract, Keywords" value=""> <input type="submit" class="btn_search" value="Search"> </div> </div> </form> </div> </div> <div class="row mt-3"> <div class="col-sm-3"> <div class="card"> <div class="card-body"><strong>Commenced</strong> in January 2007</div> </div> </div> <div class="col-sm-3"> <div class="card"> <div class="card-body"><strong>Frequency:</strong> Monthly</div> </div> </div> <div class="col-sm-3"> <div class="card"> <div class="card-body"><strong>Edition:</strong> International</div> </div> </div> <div class="col-sm-3"> <div class="card"> <div class="card-body"><strong>Paper Count:</strong> 33093</div> </div> </div> </div> <div class="card publication-listing mt-3 mb-3"> <h5 class="card-header" style="font-size:.9rem">Low Light Image Enhancement with Multi-Stage Interconnected Autoencoders Integration in Pix-to-Pix GAN</h5> <div class="card-body"> <p class="card-text"><strong>Authors:</strong> <a href="https://publications.waset.org/search?q=Muhammad%20Atif">Muhammad Atif</a>, <a href="https://publications.waset.org/search?q=Cang%20Yan"> Cang Yan</a> </p> <p class="card-text"><strong>Abstract:</strong></p> <p>The enhancement of low-light images is a significant area of study aimed at enhancing the quality of captured images in challenging lighting environments. Recently, methods based on Convolutional Neural Networks (CNN) have gained prominence as they offer state-of-the-art performance. However, many approaches based on CNN rely on increasing the size and complexity of the neural network. In this study, we propose an alternative method for improving low-light images using an Autoencoders-based multiscale knowledge transfer model. Our method leverages the power of three autoencoders, where the encoders of the first two autoencoders are directly connected to the decoder of the third autoencoder. Additionally, the decoder of the first two autoencoders is connected to the encoder of the third autoencoder. This architecture enables effective knowledge transfer, allowing the third autoencoder to learn and benefit from the enhanced knowledge extracted by the first two autoencoders. We further integrate the proposed model into the Pix-to-Pix GAN framework. By integrating our proposed model as the generator in the GAN framework, we aim to produce enhanced images that not only exhibit improved visual quality but also possess a more authentic and realistic appearance. These experimental results, both qualitative and quantitative, show that our method is better than the state-of-the-art methodologies.</p> <iframe src="https://publications.waset.org/10013873.pdf" style="width:100%; height:400px;" frameborder="0"></iframe> <p class="card-text"><strong>Keywords:</strong> <a href="https://publications.waset.org/search?q=Low%20light%20image%20enhancement" title="Low light image enhancement">Low light image enhancement</a>, <a href="https://publications.waset.org/search?q=deep%20learning" title=" deep learning"> deep learning</a>, <a href="https://publications.waset.org/search?q=convolutional%20neural%20network" title=" convolutional neural network"> convolutional neural network</a>, <a href="https://publications.waset.org/search?q=image%20processing." title=" image processing."> image processing.</a> </p> <a href="https://publications.waset.org/10013873/low-light-image-enhancement-with-multi-stage-interconnected-autoencoders-integration-in-pix-to-pix-gan" class="btn btn-primary btn-sm">Procedia</a> <a href="https://publications.waset.org/10013873/apa" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">APA</a> <a href="https://publications.waset.org/10013873/bibtex" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">BibTeX</a> <a href="https://publications.waset.org/10013873/chicago" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">Chicago</a> <a href="https://publications.waset.org/10013873/endnote" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">EndNote</a> <a href="https://publications.waset.org/10013873/harvard" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">Harvard</a> <a href="https://publications.waset.org/10013873/json" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">JSON</a> <a href="https://publications.waset.org/10013873/mla" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">MLA</a> <a href="https://publications.waset.org/10013873/ris" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">RIS</a> <a href="https://publications.waset.org/10013873/xml" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">XML</a> <a href="https://publications.waset.org/10013873/iso690" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">ISO 690</a> <a href="https://publications.waset.org/10013873.pdf" target="_blank" class="btn btn-primary btn-sm">PDF</a> <span class="bg-info text-light px-1 py-1 float-right rounded"> Downloads <span class="badge badge-light">38</span> </span> <p class="card-text"><strong>References:</strong></p> <br>[1] Badrinarayanan, V., A. Kendall, and R. Cipolla, Segnet: A deep convolutional encoder-decoder architecture for image segmentation. IEEE transactions on pattern analysis and machine intelligence, 2017. 39(12): p. 2481-2495. <br>[2] He, K., et al. Mask r-cnn. in Proceedings of the IEEE international conference on computer vision. 2017. <br>[3] Taigman, Y., et al. Deepface: Closing the gap to human-level performance in face verification. in Proceedings of the IEEE conference on computer vision and pattern recognition. 2014. <br>[4] Schroff, F., D. Kalenichenko, and J. Philbin. Facenet: A unified embedding for face recognition and clustering. in Proceedings of the IEEE conference on computer vision and pattern recognition. 2015. <br>[5] Zuiderveld, K., VIII. 5. Contrast limited adaptive histogram equalization. Graph. Gems 474–485 (1994). <br>[6] Lee, J.-T., et al. Depth-guided adaptive contrast enhancement using 2D histograms. in 2014 IEEE International Conference on Image Processing (ICIP). 2014. IEEE. <br>[7] Land, E.H., The retinex theory of color vision. Scientific American, 1977. 237(6): p. 108-129. <br>[8] Fu, X., et al. A weighted variational model for simultaneous reflectance and illumination estimation. in Proceedings of the IEEE conference on computer vision and pattern recognition. 2016. <br>[9] Lore, K.G., A. Akintayo, and S. Sarkar, LLNet: A deep autoencoder approach to natural low-light image enhancement. Pattern Recognition, 2017. 61: p. 650-662. <br>[10] Wei, C., et al., Deep retinex decomposition for low-light enhancement. arXiv preprint arXiv:1808.04560, 2018. <br>[11] Lv, F., et al. MBLLEN: Low-Light Image/Video Enhancement Using CNNs. in BMVC. 2018. <br>[12] Li, Z., Y. Wang, and J. Zhang, Low-light image enhancement with knowledge distillation. Neurocomputing, 2023. 518: p. 332-343. <br>[13] Fan, S., et al., LACN: A lightweight attention-guided ConvNeXt network for low-light image enhancement. Engineering Applications of Artificial Intelligence, 2023. 117: p. 105632. <br>[14] Zhao, L., et al., Learning deep texture-structure decomposition for low-light image restoration and enhancement. Neurocomputing, 2023. 524: p. 126-141. <br>[15] Jiang, Y., et al., Enlightengan: Deep light enhancement without paired supervision. IEEE transactions on image processing, 2021. 30: p. 2340-2349. <br>[16] Baldi, P., Autoencoders, Unsupervised Learning, and Deep Architectures, in Proceedings of ICML Workshop on Unsupervised and Transfer Learning, G. Isabelle, et al., Editors. 2012, PMLR: Proceedings of Machine Learning Research. p. 37--49. <br>[17] Park, S., et al., Dual autoencoder network for retinex-based low-light image enhancement. IEEE Access, 2018. 6: p. 22084-22093. <br>[18] Goodfellow, I., et al., Generative adversarial nets. Advances in neural information processing systems, 2014. 27. <br>[19] Fu, Y., et al., LE-GAN: Unsupervised low-light image enhancement network using attention module and identity invariant loss. Knowledge-Based Systems, 2022. 240: p. 108010. <br>[20] Stark, J.A., Adaptive image contrast enhancement using generalizations of histogram equalization. IEEE Transactions on image processing, 2000. 9(5): p. 889-896. <br>[21] Lee, C., C. Lee, and C.-S. Kim, Contrast enhancement based on layered difference representation of 2D histograms. IEEE transactions on image processing, 2013. 22(12): p. 5372-5384. <br>[22] Coltuc, D., P. Bolon, and J.-M. Chassery, Exact histogram specification. IEEE Transactions on Image processing, 2006. 15(5): p. 1143-1152. <br>[23] Jobson, D.J., Z.-u. Rahman, and G.A. Woodell, Properties and performance of a center/surround retinex. IEEE transactions on image processing, 1997. 6(3): p. 451-462. <br>[24] Jobson, D.J., Z.-u. Rahman, and G.A. Woodell, A multiscale retinex for bridging the gap between color images and the human observation of scenes. IEEE Transactions on Image processing, 1997. 6(7): p. 965-976. <br>[25] Guo, X., Y. Li, and H. Ling, LIME: Low-light image enhancement via illumination map estimation. IEEE Transactions on image processing, 2016. 26(2): p. 982-993. <br>[26] Guo, X. LIME: A method for low-light image enhancement. in Proceedings of the 24th ACM international conference on Multimedia. 2016. <br>[27] Wang, S., et al., Naturalness preserved enhancement algorithm for non-uniform illumination images. IEEE transactions on image processing, 2013. 22(9): p. 3538-3548. <br>[28] Li, M., et al., Structure-revealing low-light image enhancement via robust retinex model. IEEE Transactions on Image Processing, 2018. 27(6): p. 2828-2841. <br>[29] Fu, X., et al., A fusion-based enhancing method for weakly illuminated images. Signal Processing, 2016. 129: p. 82-96. <br>[30] Wang, W., et al. Gladnet: Low-light enhancement network with global awareness. in 2018 13th IEEE international conference on automatic face & gesture recognition (FG 2018). 2018. IEEE. <br>[31] Wang, R., et al. Underexposed photo enhancement using deep illumination estimation. in Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. 2019. <br>[32] Zhang, Y., J. Zhang, and X. Guo. Kindling the darkness: A practical low-light image enhancer. in Proceedings of the 27th ACM international conference on multimedia. 2019. <br>[33] Wang, L.-W., et al., Lightening network for low-light image enhancement. IEEE Transactions on Image Processing, 2020. 29: p. 7984-7996. <br>[34] Zhang, F., et al. Learning temporal consistency for low light video enhancement from single images. in Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. 2021. <br>[35] Liu, R., et al. Retinex-inspired unrolling with cooperative prior architecture search for low-light image enhancement. in Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 2021. <br>[36] Jiang, Z., et al., A switched view of Retinex: Deep self-regularized low-light image enhancement. Neurocomputing, 2021. 454: p. 361-372. <br>[37] Guo, C., et al. Zero-reference deep curve estimation for low-light image enhancement. in Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. 2020. <br>[38] Yang, W., et al. From fidelity to perceptual quality: A semi-supervised approach for low-light image enhancement. in Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. 2020. <br>[39] Yang, Q., et al., A lowlight image enhancement method learning from both paired and unpaired data by adversarial training. Neurocomputing, 2021. 433: p. 83-95. <br>[40] Liu, Y., et al. PD-GAN: perceptual-details gan for extremely noisy low light image enhancement. in ICASSP 2021-2021 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP). 2021. IEEE. <br>[41] Zhang, J., et al., DEGAN: Decompose-Enhance-GAN Network for Simultaneous Low-Light Image Lightening and Denoising. Electronics, 2023. 12(14): p. 3038. <br>[42] Isola, P., et al. Image-to-image translation with conditional adversarial networks. in Proceedings of the IEEE conference on computer vision and pattern recognition. 2017. <br>[43] Cai, J., S. Gu, and L. Zhang, Learning a deep single image contrast enhancer from multi-exposure images. IEEE Transactions on Image Processing, 2018. 27(4): p. 2049-2062. <br>[44] Singh, K. and A.S. Parihar, DSE-Net: Deep simultaneous estimation network for low-light image enhancement. Journal of Visual Communication and Image Representation, 2023. 91: p. 103780. <br>[45] Deng, J., et al. Low-light Image Enhancement based on Joint Decomposition and Denoising U-Net Network. in 2020 IEEE Intl Conf on Parallel & Distributed Processing with Applications, Big Data & Cloud Computing, Sustainable Computing & Communications, Social Computing & Networking (ISPA/BDCloud/SocialCom/SustainCom). 2020. IEEE. <br>[46] C. Li et al., "Low-Light Image and Video Enhancement Using Deep Learning: A Survey," in IEEE Transactions on Pattern Analysis and Machine Intelligence, vol. 44, no. 12, pp. 9396-9416, 1 Dec. 2022, doi: 10.1109/TPAMI.2021.3126387. </div> </div> </div> </main> <footer> <div id="infolinks" class="pt-3 pb-2"> <div class="container"> <div style="background-color:#f5f5f5;" class="p-3"> <div class="row"> <div class="col-md-2"> <ul class="list-unstyled"> About <li><a href="https://waset.org/page/support">About Us</a></li> <li><a href="https://waset.org/page/support#legal-information">Legal</a></li> <li><a target="_blank" rel="nofollow" href="https://publications.waset.org/static/files/WASET-16th-foundational-anniversary.pdf">WASET celebrates its 16th foundational anniversary</a></li> </ul> </div> <div class="col-md-2"> <ul class="list-unstyled"> Account <li><a href="https://waset.org/profile">My Account</a></li> </ul> </div> <div class="col-md-2"> <ul class="list-unstyled"> Explore <li><a href="https://waset.org/disciplines">Disciplines</a></li> <li><a href="https://waset.org/conferences">Conferences</a></li> <li><a href="https://waset.org/conference-programs">Conference Program</a></li> <li><a href="https://waset.org/committees">Committees</a></li> <li><a href="https://publications.waset.org">Publications</a></li> </ul> </div> <div class="col-md-2"> <ul class="list-unstyled"> Research <li><a href="https://publications.waset.org/abstracts">Abstracts</a></li> <li><a href="https://publications.waset.org">Periodicals</a></li> <li><a href="https://publications.waset.org/archive">Archive</a></li> </ul> </div> <div class="col-md-2"> <ul class="list-unstyled"> Open Science <li><a target="_blank" rel="nofollow" href="https://publications.waset.org/static/files/Open-Science-Philosophy.pdf">Open Science Philosophy</a></li> <li><a target="_blank" rel="nofollow" href="https://publications.waset.org/static/files/Open-Science-Award.pdf">Open Science Award</a></li> <li><a target="_blank" rel="nofollow" href="https://publications.waset.org/static/files/Open-Society-Open-Science-and-Open-Innovation.pdf">Open Innovation</a></li> <li><a target="_blank" rel="nofollow" href="https://publications.waset.org/static/files/Postdoctoral-Fellowship-Award.pdf">Postdoctoral Fellowship Award</a></li> <li><a target="_blank" rel="nofollow" href="https://publications.waset.org/static/files/Scholarly-Research-Review.pdf">Scholarly Research Review</a></li> </ul> </div> <div class="col-md-2"> <ul class="list-unstyled"> Support <li><a href="https://waset.org/page/support">Support</a></li> <li><a href="https://waset.org/profile/messages/create">Contact Us</a></li> <li><a href="https://waset.org/profile/messages/create">Report Abuse</a></li> </ul> </div> </div> </div> </div> </div> <div class="container text-center"> <hr style="margin-top:0;margin-bottom:.3rem;"> <a href="https://creativecommons.org/licenses/by/4.0/" target="_blank" class="text-muted small">Creative Commons Attribution 4.0 International License</a> <div id="copy" class="mt-2">© 2024 World Academy of Science, Engineering and Technology</div> </div> </footer> <a href="javascript:" id="return-to-top"><i class="fas fa-arrow-up"></i></a> <div class="modal" id="modal-template"> <div class="modal-dialog"> <div class="modal-content"> <div class="row m-0 mt-1"> <div class="col-md-12"> <button type="button" class="close" data-dismiss="modal" aria-label="Close"><span aria-hidden="true">×</span></button> </div> </div> <div class="modal-body"></div> </div> </div> </div> <script src="https://cdn.waset.org/static/plugins/jquery-3.3.1.min.js"></script> <script src="https://cdn.waset.org/static/plugins/bootstrap-4.2.1/js/bootstrap.bundle.min.js"></script> <script src="https://cdn.waset.org/static/js/site.js?v=150220211556"></script> <script> jQuery(document).ready(function() { /*jQuery.get("https://publications.waset.org/xhr/user-menu", function (response) { jQuery('#mainNavMenu').append(response); });*/ jQuery.get({ url: "https://publications.waset.org/xhr/user-menu", cache: false }).then(function(response){ jQuery('#mainNavMenu').append(response); }); }); </script> </body> </html>