CINXE.COM

Search results for: sensor fusion

<!DOCTYPE html> <html lang="en" dir="ltr"> <head> <!-- Google tag (gtag.js) --> <script async src="https://www.googletagmanager.com/gtag/js?id=G-P63WKM1TM1"></script> <script> window.dataLayer = window.dataLayer || []; function gtag(){dataLayer.push(arguments);} gtag('js', new Date()); gtag('config', 'G-P63WKM1TM1'); </script> <!-- Yandex.Metrika counter --> <script type="text/javascript" > (function(m,e,t,r,i,k,a){m[i]=m[i]||function(){(m[i].a=m[i].a||[]).push(arguments)}; m[i].l=1*new Date(); for (var j = 0; j < document.scripts.length; j++) {if (document.scripts[j].src === r) { return; }} k=e.createElement(t),a=e.getElementsByTagName(t)[0],k.async=1,k.src=r,a.parentNode.insertBefore(k,a)}) (window, document, "script", "https://mc.yandex.ru/metrika/tag.js", "ym"); ym(55165297, "init", { clickmap:false, trackLinks:true, accurateTrackBounce:true, webvisor:false }); </script> <noscript><div><img src="https://mc.yandex.ru/watch/55165297" style="position:absolute; left:-9999px;" alt="" /></div></noscript> <!-- /Yandex.Metrika counter --> <!-- Matomo --> <script> var _paq = window._paq = window._paq || []; /* tracker methods like "setCustomDimension" should be called before "trackPageView" */ _paq.push(['trackPageView']); _paq.push(['enableLinkTracking']); (function() { var u="//matomo.waset.org/"; _paq.push(['setTrackerUrl', u+'matomo.php']); _paq.push(['setSiteId', '2']); var d=document, g=d.createElement('script'), s=d.getElementsByTagName('script')[0]; g.async=true; g.src=u+'matomo.js'; s.parentNode.insertBefore(g,s); })(); </script> <!-- End Matomo Code --> <title>Search results for: sensor fusion</title> <meta name="description" content="Search results for: sensor fusion"> <meta name="keywords" content="sensor fusion"> <meta name="viewport" content="width=device-width, initial-scale=1, minimum-scale=1, maximum-scale=1, user-scalable=no"> <meta charset="utf-8"> <link href="https://cdn.waset.org/favicon.ico" type="image/x-icon" rel="shortcut icon"> <link href="https://cdn.waset.org/static/plugins/bootstrap-4.2.1/css/bootstrap.min.css" rel="stylesheet"> <link href="https://cdn.waset.org/static/plugins/fontawesome/css/all.min.css" rel="stylesheet"> <link href="https://cdn.waset.org/static/css/site.css?v=150220211555" rel="stylesheet"> </head> <body> <header> <div class="container"> <nav class="navbar navbar-expand-lg navbar-light"> <a class="navbar-brand" href="https://waset.org"> <img src="https://cdn.waset.org/static/images/wasetc.png" alt="Open Science Research Excellence" title="Open Science Research Excellence" /> </a> <button class="d-block d-lg-none navbar-toggler ml-auto" type="button" data-toggle="collapse" data-target="#navbarMenu" aria-controls="navbarMenu" aria-expanded="false" aria-label="Toggle navigation"> <span class="navbar-toggler-icon"></span> </button> <div class="w-100"> <div class="d-none d-lg-flex flex-row-reverse"> <form method="get" action="https://waset.org/search" class="form-inline my-2 my-lg-0"> <input class="form-control mr-sm-2" type="search" placeholder="Search Conferences" value="sensor fusion" name="q" aria-label="Search"> <button class="btn btn-light my-2 my-sm-0" type="submit"><i class="fas fa-search"></i></button> </form> </div> <div class="collapse navbar-collapse mt-1" id="navbarMenu"> <ul class="navbar-nav ml-auto align-items-center" id="mainNavMenu"> <li class="nav-item"> <a class="nav-link" href="https://waset.org/conferences" title="Conferences in 2025/2026/2027">Conferences</a> </li> <li class="nav-item"> <a class="nav-link" href="https://waset.org/disciplines" title="Disciplines">Disciplines</a> </li> <li class="nav-item"> <a class="nav-link" href="https://waset.org/committees" rel="nofollow">Committees</a> </li> <li class="nav-item dropdown"> <a class="nav-link dropdown-toggle" href="#" id="navbarDropdownPublications" role="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false"> Publications </a> <div class="dropdown-menu" aria-labelledby="navbarDropdownPublications"> <a class="dropdown-item" href="https://publications.waset.org/abstracts">Abstracts</a> <a class="dropdown-item" href="https://publications.waset.org">Periodicals</a> <a class="dropdown-item" href="https://publications.waset.org/archive">Archive</a> </div> </li> <li class="nav-item"> <a class="nav-link" href="https://waset.org/page/support" title="Support">Support</a> </li> </ul> </div> </div> </nav> </div> </header> <main> <div class="container mt-4"> <div class="row"> <div class="col-md-9 mx-auto"> <form method="get" action="https://publications.waset.org/abstracts/search"> <div id="custom-search-input"> <div class="input-group"> <i class="fas fa-search"></i> <input type="text" class="search-query" name="q" placeholder="Author, Title, Abstract, Keywords" value="sensor fusion"> <input type="submit" class="btn_search" value="Search"> </div> </div> </form> </div> </div> <div class="row mt-3"> <div class="col-sm-3"> <div class="card"> <div class="card-body"><strong>Commenced</strong> in January 2007</div> </div> </div> <div class="col-sm-3"> <div class="card"> <div class="card-body"><strong>Frequency:</strong> Monthly</div> </div> </div> <div class="col-sm-3"> <div class="card"> <div class="card-body"><strong>Edition:</strong> International</div> </div> </div> <div class="col-sm-3"> <div class="card"> <div class="card-body"><strong>Paper Count:</strong> 1869</div> </div> </div> </div> <h1 class="mt-3 mb-3 text-center" style="font-size:1.6rem;">Search results for: sensor fusion</h1> <div class="card paper-listing mb-3 mt-3"> <h5 class="card-header" style="font-size:.9rem"><span class="badge badge-info">1869</span> Implementation of Sensor Fusion Structure of 9-Axis Sensors on the Multipoint Control Unit</h5> <div class="card-body"> <p class="card-text"><strong>Authors:</strong> <a href="https://publications.waset.org/abstracts/search?q=Jun%20Gil%20Ahn">Jun Gil Ahn</a>, <a href="https://publications.waset.org/abstracts/search?q=Jong%20Tae%20Kim"> Jong Tae Kim</a> </p> <p class="card-text"><strong>Abstract:</strong></p> In this paper, we study the sensor fusion structure on the multipoint control unit (MCU). Sensor fusion using Kalman filter for 9-axis sensors is considered. The 9-axis inertial sensor is the combination of 3-axis accelerometer, 3-axis gyroscope and 3-axis magnetometer. We implement the sensor fusion structure among the sensor hubs in MCU and measure the execution time, power consumptions, and total energy. Experiments with real data from 9-axis sensor in 20Mhz show that the average power consumptions are 44mW and 48mW on Cortx-M0 and Cortex-M3 MCU, respectively. Execution times are 613.03 us and 305.6 us respectively. <p class="card-text"><strong>Keywords:</strong> <a href="https://publications.waset.org/abstracts/search?q=9-axis%20sensor" title="9-axis sensor">9-axis sensor</a>, <a href="https://publications.waset.org/abstracts/search?q=Kalman%20filter" title=" Kalman filter"> Kalman filter</a>, <a href="https://publications.waset.org/abstracts/search?q=MCU" title=" MCU"> MCU</a>, <a href="https://publications.waset.org/abstracts/search?q=sensor%20fusion" title=" sensor fusion"> sensor fusion</a> </p> <a href="https://publications.waset.org/abstracts/84323/implementation-of-sensor-fusion-structure-of-9-axis-sensors-on-the-multipoint-control-unit" class="btn btn-primary btn-sm">Procedia</a> <a href="https://publications.waset.org/abstracts/84323.pdf" target="_blank" class="btn btn-primary btn-sm">PDF</a> <span class="bg-info text-light px-1 py-1 float-right rounded"> Downloads <span class="badge badge-light">509</span> </span> </div> </div> <div class="card paper-listing mb-3 mt-3"> <h5 class="card-header" style="font-size:.9rem"><span class="badge badge-info">1868</span> Performance of Hybrid Image Fusion: Implementation of Dual-Tree Complex Wavelet Transform Technique </h5> <div class="card-body"> <p class="card-text"><strong>Authors:</strong> <a href="https://publications.waset.org/abstracts/search?q=Manoj%20Gupta">Manoj Gupta</a>, <a href="https://publications.waset.org/abstracts/search?q=Nirmendra%20Singh%20Bhadauria"> Nirmendra Singh Bhadauria</a> </p> <p class="card-text"><strong>Abstract:</strong></p> Most of the applications in image processing require high spatial and high spectral resolution in a single image. For example satellite image system, the traffic monitoring system, and long range sensor fusion system all use image processing. However, most of the available equipment is not capable of providing this type of data. The sensor in the surveillance system can only cover the view of a small area for a particular focus, yet the demanding application of this system requires a view with a high coverage of the field. Image fusion provides the possibility of combining different sources of information. In this paper, we have decomposed the image using DTCWT and then fused using average and hybrid of (maxima and average) pixel level techniques and then compared quality of both the images using PSNR. <p class="card-text"><strong>Keywords:</strong> <a href="https://publications.waset.org/abstracts/search?q=image%20fusion" title="image fusion">image fusion</a>, <a href="https://publications.waset.org/abstracts/search?q=DWT" title=" DWT"> DWT</a>, <a href="https://publications.waset.org/abstracts/search?q=DT-CWT" title=" DT-CWT"> DT-CWT</a>, <a href="https://publications.waset.org/abstracts/search?q=PSNR" title=" PSNR"> PSNR</a>, <a href="https://publications.waset.org/abstracts/search?q=average%20image%20fusion" title=" average image fusion"> average image fusion</a>, <a href="https://publications.waset.org/abstracts/search?q=hybrid%20image%20fusion" title=" hybrid image fusion"> hybrid image fusion</a> </p> <a href="https://publications.waset.org/abstracts/19207/performance-of-hybrid-image-fusion-implementation-of-dual-tree-complex-wavelet-transform-technique" class="btn btn-primary btn-sm">Procedia</a> <a href="https://publications.waset.org/abstracts/19207.pdf" target="_blank" class="btn btn-primary btn-sm">PDF</a> <span class="bg-info text-light px-1 py-1 float-right rounded"> Downloads <span class="badge badge-light">612</span> </span> </div> </div> <div class="card paper-listing mb-3 mt-3"> <h5 class="card-header" style="font-size:.9rem"><span class="badge badge-info">1867</span> Overview of a Quantum Model for Decision Support in a Sensor Network</h5> <div class="card-body"> <p class="card-text"><strong>Authors:</strong> <a href="https://publications.waset.org/abstracts/search?q=Shahram%20Payandeh">Shahram Payandeh</a> </p> <p class="card-text"><strong>Abstract:</strong></p> This paper presents an overview of a model which can be used as a part of a decision support system when fusing information from multiple sensing environment. Data fusion has been widely studied in the past few decades and numerous frameworks have been proposed to facilitate decision making process under uncertainties. Multi-sensor data fusion technology plays an increasingly significant role during people tracking and activity recognition. This paper presents an overview of a quantum model as a part of a decision-making process in the context of multi-sensor data fusion. The paper presents basic definitions and relationships associating the decision-making process and quantum model formulation in the presence of uncertainties. <p class="card-text"><strong>Keywords:</strong> <a href="https://publications.waset.org/abstracts/search?q=quantum%20model" title="quantum model">quantum model</a>, <a href="https://publications.waset.org/abstracts/search?q=sensor%20space" title=" sensor space"> sensor space</a>, <a href="https://publications.waset.org/abstracts/search?q=sensor%20network" title=" sensor network"> sensor network</a>, <a href="https://publications.waset.org/abstracts/search?q=decision%20support" title=" decision support"> decision support</a> </p> <a href="https://publications.waset.org/abstracts/119110/overview-of-a-quantum-model-for-decision-support-in-a-sensor-network" class="btn btn-primary btn-sm">Procedia</a> <a href="https://publications.waset.org/abstracts/119110.pdf" target="_blank" class="btn btn-primary btn-sm">PDF</a> <span class="bg-info text-light px-1 py-1 float-right rounded"> Downloads <span class="badge badge-light">234</span> </span> </div> </div> <div class="card paper-listing mb-3 mt-3"> <h5 class="card-header" style="font-size:.9rem"><span class="badge badge-info">1866</span> Investigating Activity Recognition Using 9-Axis Sensors and Filters in Wearable Devices</h5> <div class="card-body"> <p class="card-text"><strong>Authors:</strong> <a href="https://publications.waset.org/abstracts/search?q=Jun%20Gil%20Ahn">Jun Gil Ahn</a>, <a href="https://publications.waset.org/abstracts/search?q=Jong%20Kang%20Park"> Jong Kang Park</a>, <a href="https://publications.waset.org/abstracts/search?q=Jong%20Tae%20Kim"> Jong Tae Kim</a> </p> <p class="card-text"><strong>Abstract:</strong></p> In this paper, we analyze major components of activity recognition (AR) in wearable device with 9-axis sensors and sensor fusion filters. 9-axis sensors commonly include 3-axis accelerometer, 3-axis gyroscope and 3-axis magnetometer. We chose sensor fusion filters as Kalman filter and Direction Cosine Matrix (DCM) filter. We also construct sensor fusion data from each activity sensor data and perform classification by accuracy of AR using Na&iuml;ve Bayes and SVM. According to the classification results, we observed that the DCM filter and the specific combination of the sensing axes are more effective for AR in wearable devices while classifying walking, running, ascending and descending. <p class="card-text"><strong>Keywords:</strong> <a href="https://publications.waset.org/abstracts/search?q=accelerometer" title="accelerometer">accelerometer</a>, <a href="https://publications.waset.org/abstracts/search?q=activity%20recognition" title=" activity recognition"> activity recognition</a>, <a href="https://publications.waset.org/abstracts/search?q=directiona%20cosine%20matrix%20filter" title=" directiona cosine matrix filter"> directiona cosine matrix filter</a>, <a href="https://publications.waset.org/abstracts/search?q=gyroscope" title=" gyroscope"> gyroscope</a>, <a href="https://publications.waset.org/abstracts/search?q=Kalman%20filter" title=" Kalman filter"> Kalman filter</a>, <a href="https://publications.waset.org/abstracts/search?q=magnetometer" title=" magnetometer"> magnetometer</a> </p> <a href="https://publications.waset.org/abstracts/56198/investigating-activity-recognition-using-9-axis-sensors-and-filters-in-wearable-devices" class="btn btn-primary btn-sm">Procedia</a> <a href="https://publications.waset.org/abstracts/56198.pdf" target="_blank" class="btn btn-primary btn-sm">PDF</a> <span class="bg-info text-light px-1 py-1 float-right rounded"> Downloads <span class="badge badge-light">336</span> </span> </div> </div> <div class="card paper-listing mb-3 mt-3"> <h5 class="card-header" style="font-size:.9rem"><span class="badge badge-info">1865</span> An Adaptive Back-Propagation Network and Kalman Filter Based Multi-Sensor Fusion Method for Train Location System</h5> <div class="card-body"> <p class="card-text"><strong>Authors:</strong> <a href="https://publications.waset.org/abstracts/search?q=Yu-ding%20Du">Yu-ding Du</a>, <a href="https://publications.waset.org/abstracts/search?q=Qi-lian%20Bao"> Qi-lian Bao</a>, <a href="https://publications.waset.org/abstracts/search?q=Nassim%20Bessaad"> Nassim Bessaad</a>, <a href="https://publications.waset.org/abstracts/search?q=Lin%20Liu"> Lin Liu</a> </p> <p class="card-text"><strong>Abstract:</strong></p> The Global Navigation Satellite System (GNSS) is regarded as an effective approach for the purpose of replacing the large amount used track-side balises in modern train localization systems. This paper describes a method based on the data fusion of a GNSS receiver sensor and an odometer sensor that can significantly improve the positioning accuracy. A digital track map is needed as another sensor to project two-dimensional GNSS position to one-dimensional along-track distance due to the fact that the train’s position can only be constrained on the track. A model trained by BP neural network is used to estimate the trend positioning error which is related to the specific location and proximate processing of the digital track map. Considering that in some conditions the satellite signal failure will lead to the increase of GNSS positioning error, a detection step for GNSS signal is applied. An adaptive weighted fusion algorithm is presented to reduce the standard deviation of train speed measurement. Finally an Extended Kalman Filter (EKF) is used for the fusion of the projected 1-D GNSS positioning data and the 1-D train speed data to get the estimate position. Experimental results suggest that the proposed method performs well, which can reduce positioning error notably. <p class="card-text"><strong>Keywords:</strong> <a href="https://publications.waset.org/abstracts/search?q=multi-sensor%20data%20fusion" title="multi-sensor data fusion">multi-sensor data fusion</a>, <a href="https://publications.waset.org/abstracts/search?q=train%20positioning" title=" train positioning"> train positioning</a>, <a href="https://publications.waset.org/abstracts/search?q=GNSS" title=" GNSS"> GNSS</a>, <a href="https://publications.waset.org/abstracts/search?q=odometer" title=" odometer"> odometer</a>, <a href="https://publications.waset.org/abstracts/search?q=digital%20track%20map" title=" digital track map"> digital track map</a>, <a href="https://publications.waset.org/abstracts/search?q=map%20matching" title=" map matching"> map matching</a>, <a href="https://publications.waset.org/abstracts/search?q=BP%20neural%20network" title=" BP neural network"> BP neural network</a>, <a href="https://publications.waset.org/abstracts/search?q=adaptive%20weighted%20fusion" title=" adaptive weighted fusion"> adaptive weighted fusion</a>, <a href="https://publications.waset.org/abstracts/search?q=Kalman%20filter" title=" Kalman filter"> Kalman filter</a> </p> <a href="https://publications.waset.org/abstracts/98264/an-adaptive-back-propagation-network-and-kalman-filter-based-multi-sensor-fusion-method-for-train-location-system" class="btn btn-primary btn-sm">Procedia</a> <a href="https://publications.waset.org/abstracts/98264.pdf" target="_blank" class="btn btn-primary btn-sm">PDF</a> <span class="bg-info text-light px-1 py-1 float-right rounded"> Downloads <span class="badge badge-light">259</span> </span> </div> </div> <div class="card paper-listing mb-3 mt-3"> <h5 class="card-header" style="font-size:.9rem"><span class="badge badge-info">1864</span> Real-Time Sensor Fusion for Mobile Robot Localization in an Oil and Gas Refinery</h5> <div class="card-body"> <p class="card-text"><strong>Authors:</strong> <a href="https://publications.waset.org/abstracts/search?q=Adewole%20A.%20Ayoade">Adewole A. Ayoade</a>, <a href="https://publications.waset.org/abstracts/search?q=Marshall%20R.%20Sweatt"> Marshall R. Sweatt</a>, <a href="https://publications.waset.org/abstracts/search?q=John%20P.%20H.%20Steele"> John P. H. Steele</a>, <a href="https://publications.waset.org/abstracts/search?q=Qi%20Han"> Qi Han</a>, <a href="https://publications.waset.org/abstracts/search?q=Khaled%20Al-Wahedi"> Khaled Al-Wahedi</a>, <a href="https://publications.waset.org/abstracts/search?q=Hamad%20Karki"> Hamad Karki</a>, <a href="https://publications.waset.org/abstracts/search?q=William%20A.%20Yearsley"> William A. Yearsley</a> </p> <p class="card-text"><strong>Abstract:</strong></p> Understanding the behavioral characteristics of sensors is a crucial step in fusing data from several sensors of different types. This paper introduces a practical, real-time approach to integrate heterogeneous sensor data to achieve higher accuracy than would be possible from any one individual sensor in localizing a mobile robot. We use this approach in both indoor and outdoor environments and it is especially appropriate for those environments like oil and gas refineries due to their sparse and featureless nature. We have studied the individual contribution of each sensor data to the overall combined accuracy achieved from the fusion process. A Sequential Update Extended Kalman Filter(EKF) using validation gates was used to integrate GPS data, Compass data, WiFi data, Inertial Measurement Unit(IMU) data, Vehicle Velocity, and pose estimates from Fiducial marker system. Results show that the approach can enable a mobile robot to navigate autonomously in any environment using a priori information. <p class="card-text"><strong>Keywords:</strong> <a href="https://publications.waset.org/abstracts/search?q=inspection%20mobile%20robot" title="inspection mobile robot">inspection mobile robot</a>, <a href="https://publications.waset.org/abstracts/search?q=navigation" title=" navigation"> navigation</a>, <a href="https://publications.waset.org/abstracts/search?q=sensor%20fusion" title=" sensor fusion"> sensor fusion</a>, <a href="https://publications.waset.org/abstracts/search?q=sequential%20update%20extended%20Kalman%20filter" title=" sequential update extended Kalman filter"> sequential update extended Kalman filter</a> </p> <a href="https://publications.waset.org/abstracts/42808/real-time-sensor-fusion-for-mobile-robot-localization-in-an-oil-and-gas-refinery" class="btn btn-primary btn-sm">Procedia</a> <a href="https://publications.waset.org/abstracts/42808.pdf" target="_blank" class="btn btn-primary btn-sm">PDF</a> <span class="bg-info text-light px-1 py-1 float-right rounded"> Downloads <span class="badge badge-light">479</span> </span> </div> </div> <div class="card paper-listing mb-3 mt-3"> <h5 class="card-header" style="font-size:.9rem"><span class="badge badge-info">1863</span> A Multi Sensor Monochrome Video Fusion Using Image Quality Assessment</h5> <div class="card-body"> <p class="card-text"><strong>Authors:</strong> <a href="https://publications.waset.org/abstracts/search?q=M.%20Prema%20Kumar">M. Prema Kumar</a>, <a href="https://publications.waset.org/abstracts/search?q=P.%20Rajesh%20Kumar"> P. Rajesh Kumar</a> </p> <p class="card-text"><strong>Abstract:</strong></p> The increasing interest in image fusion (combining images of two or more modalities such as infrared and visible light radiation) has led to a need for accurate and reliable image assessment methods. This paper gives a novel approach of merging the information content from several videos taken from the same scene in order to rack up a combined video that contains the finest information coming from different source videos. This process is known as video fusion which helps in providing superior quality (The term quality, connote measurement on the particular application.) image than the source images. In this technique different sensors (whose redundant information can be reduced) are used for various cameras that are imperative for capturing the required images and also help in reducing. In this paper Image fusion technique based on multi-resolution singular value decomposition (MSVD) has been used. The image fusion by MSVD is almost similar to that of wavelets. The idea behind MSVD is to replace the FIR filters in wavelet transform with singular value decomposition (SVD). It is computationally very simple and is well suited for real time applications like in remote sensing and in astronomy. <p class="card-text"><strong>Keywords:</strong> <a href="https://publications.waset.org/abstracts/search?q=multi%20sensor%20image%20fusion" title="multi sensor image fusion">multi sensor image fusion</a>, <a href="https://publications.waset.org/abstracts/search?q=MSVD" title=" MSVD"> MSVD</a>, <a href="https://publications.waset.org/abstracts/search?q=image%20processing" title=" image processing"> image processing</a>, <a href="https://publications.waset.org/abstracts/search?q=monochrome%20video" title=" monochrome video"> monochrome video</a> </p> <a href="https://publications.waset.org/abstracts/14866/a-multi-sensor-monochrome-video-fusion-using-image-quality-assessment" class="btn btn-primary btn-sm">Procedia</a> <a href="https://publications.waset.org/abstracts/14866.pdf" target="_blank" class="btn btn-primary btn-sm">PDF</a> <span class="bg-info text-light px-1 py-1 float-right rounded"> Downloads <span class="badge badge-light">580</span> </span> </div> </div> <div class="card paper-listing mb-3 mt-3"> <h5 class="card-header" style="font-size:.9rem"><span class="badge badge-info">1862</span> Multi-Sensor Image Fusion for Visible and Infrared Thermal Images</h5> <div class="card-body"> <p class="card-text"><strong>Authors:</strong> <a href="https://publications.waset.org/abstracts/search?q=Amit%20Kumar%20Happy">Amit Kumar Happy</a> </p> <p class="card-text"><strong>Abstract:</strong></p> This paper is motivated by the importance of multi-sensor image fusion with a specific focus on infrared (IR) and visual image (VI) fusion for various applications, including military reconnaissance. Image fusion can be defined as the process of combining two or more source images into a single composite image with extended information content that improves visual perception or feature extraction. These images can be from different modalities like visible camera & IR thermal imager. While visible images are captured by reflected radiations in the visible spectrum, the thermal images are formed from thermal radiation (infrared) that may be reflected or self-emitted. A digital color camera captures the visible source image, and a thermal infrared camera acquires the thermal source image. In this paper, some image fusion algorithms based upon multi-scale transform (MST) and region-based selection rule with consistency verification have been proposed and presented. This research includes the implementation of the proposed image fusion algorithm in MATLAB along with a comparative analysis to decide the optimum number of levels for MST and the coefficient fusion rule. The results are presented, and several commonly used evaluation metrics are used to assess the suggested method's validity. Experiments show that the proposed approach is capable of producing good fusion results. While deploying our image fusion algorithm approaches, we observe several challenges from the popular image fusion methods. While high computational cost and complex processing steps of image fusion algorithms provide accurate fused results, they also make it hard to become deployed in systems and applications that require a real-time operation, high flexibility, and low computation ability. So, the methods presented in this paper offer good results with minimum time complexity. <p class="card-text"><strong>Keywords:</strong> <a href="https://publications.waset.org/abstracts/search?q=image%20fusion" title="image fusion">image fusion</a>, <a href="https://publications.waset.org/abstracts/search?q=IR%20thermal%20imager" title=" IR thermal imager"> IR thermal imager</a>, <a href="https://publications.waset.org/abstracts/search?q=multi-sensor" title=" multi-sensor"> multi-sensor</a>, <a href="https://publications.waset.org/abstracts/search?q=multi-scale%20transform" title=" multi-scale transform"> multi-scale transform</a> </p> <a href="https://publications.waset.org/abstracts/138086/multi-sensor-image-fusion-for-visible-and-infrared-thermal-images" class="btn btn-primary btn-sm">Procedia</a> <a href="https://publications.waset.org/abstracts/138086.pdf" target="_blank" class="btn btn-primary btn-sm">PDF</a> <span class="bg-info text-light px-1 py-1 float-right rounded"> Downloads <span class="badge badge-light">121</span> </span> </div> </div> <div class="card paper-listing mb-3 mt-3"> <h5 class="card-header" style="font-size:.9rem"><span class="badge badge-info">1861</span> Improved Distance Estimation in Dynamic Environments through Multi-Sensor Fusion with Extended Kalman Filter</h5> <div class="card-body"> <p class="card-text"><strong>Authors:</strong> <a href="https://publications.waset.org/abstracts/search?q=Iffat%20Ara%20Ebu">Iffat Ara Ebu</a>, <a href="https://publications.waset.org/abstracts/search?q=Fahmida%20Islam"> Fahmida Islam</a>, <a href="https://publications.waset.org/abstracts/search?q=Mohammad%20Abdus%20Shahid%20Rafi"> Mohammad Abdus Shahid Rafi</a>, <a href="https://publications.waset.org/abstracts/search?q=Mahfuzur%20Rahman"> Mahfuzur Rahman</a>, <a href="https://publications.waset.org/abstracts/search?q=Umar%20Iqbal"> Umar Iqbal</a>, <a href="https://publications.waset.org/abstracts/search?q=John%20Ball"> John Ball</a> </p> <p class="card-text"><strong>Abstract:</strong></p> The application of multi-sensor fusion for enhanced distance estimation accuracy in dynamic environments is crucial for advanced driver assistance systems (ADAS) and autonomous vehicles. Limitations of single sensors such as cameras or radar in adverse conditions motivate the use of combined camera and radar data to improve reliability, adaptability, and object recognition. A multi-sensor fusion approach using an extended Kalman filter (EKF) is proposed to combine sensor measurements with a dynamic system model, achieving robust and accurate distance estimation. The research utilizes the Mississippi State University Autonomous Vehicular Simulator (MAVS) to create a controlled environment for data collection. Data analysis is performed using MATLAB. Qualitative (visualization of fused data vs ground truth) and quantitative metrics (RMSE, MAE) are employed for performance assessment. Initial results with simulated data demonstrate accurate distance estimation compared to individual sensors. The optimal sensor measurement noise variance and plant noise variance parameters within the EKF are identified, and the algorithm is validated with real-world data from a Chevrolet Blazer. In summary, this research demonstrates that multi-sensor fusion with an EKF significantly improves distance estimation accuracy in dynamic environments. This is supported by comprehensive evaluation metrics, with validation transitioning from simulated to real-world data, paving the way for safer and more reliable autonomous vehicle control. <p class="card-text"><strong>Keywords:</strong> <a href="https://publications.waset.org/abstracts/search?q=sensor%20fusion" title="sensor fusion">sensor fusion</a>, <a href="https://publications.waset.org/abstracts/search?q=EKF" title=" EKF"> EKF</a>, <a href="https://publications.waset.org/abstracts/search?q=MATLAB" title=" MATLAB"> MATLAB</a>, <a href="https://publications.waset.org/abstracts/search?q=MAVS" title=" MAVS"> MAVS</a>, <a href="https://publications.waset.org/abstracts/search?q=autonomous%20vehicle" title=" autonomous vehicle"> autonomous vehicle</a>, <a href="https://publications.waset.org/abstracts/search?q=ADAS" title=" ADAS"> ADAS</a> </p> <a href="https://publications.waset.org/abstracts/187474/improved-distance-estimation-in-dynamic-environments-through-multi-sensor-fusion-with-extended-kalman-filter" class="btn btn-primary btn-sm">Procedia</a> <a href="https://publications.waset.org/abstracts/187474.pdf" target="_blank" class="btn btn-primary btn-sm">PDF</a> <span class="bg-info text-light px-1 py-1 float-right rounded"> Downloads <span class="badge badge-light">60</span> </span> </div> </div> <div class="card paper-listing mb-3 mt-3"> <h5 class="card-header" style="font-size:.9rem"><span class="badge badge-info">1860</span> Real Time Lidar and Radar High-Level Fusion for Obstacle Detection and Tracking with Evaluation on a Ground Truth</h5> <div class="card-body"> <p class="card-text"><strong>Authors:</strong> <a href="https://publications.waset.org/abstracts/search?q=Hatem%20Hajri">Hatem Hajri</a>, <a href="https://publications.waset.org/abstracts/search?q=Mohamed-Cherif%20Rahal"> Mohamed-Cherif Rahal</a> </p> <p class="card-text"><strong>Abstract:</strong></p> Both Lidars and Radars are sensors for obstacle detection. While Lidars are very accurate on obstacles positions and less accurate on their velocities, Radars are more precise on obstacles velocities and less precise on their positions. Sensor fusion between Lidar and Radar aims at improving obstacle detection using advantages of the two sensors. The present paper proposes a real-time Lidar/Radar data fusion algorithm for obstacle detection and tracking based on the global nearest neighbour standard filter (GNN). This algorithm is implemented and embedded in an automative vehicle as a component generated by a real-time multisensor software. The benefits of data fusion comparing with the use of a single sensor are illustrated through several tracking scenarios (on a highway and on a bend) and using real-time kinematic sensors mounted on the ego and tracked vehicles as a ground truth. <p class="card-text"><strong>Keywords:</strong> <a href="https://publications.waset.org/abstracts/search?q=ground%20truth" title="ground truth">ground truth</a>, <a href="https://publications.waset.org/abstracts/search?q=Hungarian%20algorithm" title=" Hungarian algorithm"> Hungarian algorithm</a>, <a href="https://publications.waset.org/abstracts/search?q=lidar%20Radar%20data%20fusion" title=" lidar Radar data fusion"> lidar Radar data fusion</a>, <a href="https://publications.waset.org/abstracts/search?q=global%20nearest%20neighbor%20filter" title=" global nearest neighbor filter"> global nearest neighbor filter</a> </p> <a href="https://publications.waset.org/abstracts/95451/real-time-lidar-and-radar-high-level-fusion-for-obstacle-detection-and-tracking-with-evaluation-on-a-ground-truth" class="btn btn-primary btn-sm">Procedia</a> <a href="https://publications.waset.org/abstracts/95451.pdf" target="_blank" class="btn btn-primary btn-sm">PDF</a> <span class="bg-info text-light px-1 py-1 float-right rounded"> Downloads <span class="badge badge-light">180</span> </span> </div> </div> <div class="card paper-listing mb-3 mt-3"> <h5 class="card-header" style="font-size:.9rem"><span class="badge badge-info">1859</span> Depth Camera Aided Dead-Reckoning Localization of Autonomous Mobile Robots in Unstructured GNSS-Denied Environments</h5> <div class="card-body"> <p class="card-text"><strong>Authors:</strong> <a href="https://publications.waset.org/abstracts/search?q=David%20L.%20Olson">David L. Olson</a>, <a href="https://publications.waset.org/abstracts/search?q=Stephen%20B.%20H.%20Bruder"> Stephen B. H. Bruder</a>, <a href="https://publications.waset.org/abstracts/search?q=Adam%20S.%20Watkins"> Adam S. Watkins</a>, <a href="https://publications.waset.org/abstracts/search?q=Cleon%20E.%20Davis"> Cleon E. Davis</a> </p> <p class="card-text"><strong>Abstract:</strong></p> In global navigation satellite systems (GNSS), denied settings such as indoor environments, autonomous mobile robots are often limited to dead-reckoning navigation techniques to determine their position, velocity, and attitude (PVA). Localization is typically accomplished by employing an inertial measurement unit (IMU), which, while precise in nature, accumulates errors rapidly and severely degrades the localization solution. Standard sensor fusion methods, such as Kalman filtering, aim to fuse precise IMU measurements with accurate aiding sensors to establish a precise and accurate solution. In indoor environments, where GNSS and no other a priori information is known about the environment, effective sensor fusion is difficult to achieve, as accurate aiding sensor choices are sparse. However, an opportunity arises by employing a depth camera in the indoor environment. A depth camera can capture point clouds of the surrounding floors and walls. Extracting attitude from these surfaces can serve as an accurate aiding source, which directly combats errors that arise due to gyroscope imperfections. This configuration for sensor fusion leads to a dramatic reduction of PVA error compared to traditional aiding sensor configurations. This paper provides the theoretical basis for the depth camera aiding sensor method, initial expectations of performance benefit via simulation, and hardware implementation, thus verifying its veracity. Hardware implementation is performed on the Quanser Qbot 2™ mobile robot, with a Vector-Nav VN-200™ IMU and Kinect™ camera from Microsoft. <p class="card-text"><strong>Keywords:</strong> <a href="https://publications.waset.org/abstracts/search?q=autonomous%20mobile%20robotics" title="autonomous mobile robotics">autonomous mobile robotics</a>, <a href="https://publications.waset.org/abstracts/search?q=dead%20reckoning" title=" dead reckoning"> dead reckoning</a>, <a href="https://publications.waset.org/abstracts/search?q=depth%20camera" title=" depth camera"> depth camera</a>, <a href="https://publications.waset.org/abstracts/search?q=inertial%20navigation" title=" inertial navigation"> inertial navigation</a>, <a href="https://publications.waset.org/abstracts/search?q=Kalman%20filtering" title=" Kalman filtering"> Kalman filtering</a>, <a href="https://publications.waset.org/abstracts/search?q=localization" title=" localization"> localization</a>, <a href="https://publications.waset.org/abstracts/search?q=sensor%20fusion" title=" sensor fusion"> sensor fusion</a> </p> <a href="https://publications.waset.org/abstracts/134870/depth-camera-aided-dead-reckoning-localization-of-autonomous-mobile-robots-in-unstructured-gnss-denied-environments" class="btn btn-primary btn-sm">Procedia</a> <a href="https://publications.waset.org/abstracts/134870.pdf" target="_blank" class="btn btn-primary btn-sm">PDF</a> <span class="bg-info text-light px-1 py-1 float-right rounded"> Downloads <span class="badge badge-light">211</span> </span> </div> </div> <div class="card paper-listing mb-3 mt-3"> <h5 class="card-header" style="font-size:.9rem"><span class="badge badge-info">1858</span> Product Design and Development of Wearable Assistant Device</h5> <div class="card-body"> <p class="card-text"><strong>Authors:</strong> <a href="https://publications.waset.org/abstracts/search?q=Hao-Jun%20Hong">Hao-Jun Hong</a>, <a href="https://publications.waset.org/abstracts/search?q=Jung-Tang%20Huang"> Jung-Tang Huang</a> </p> <p class="card-text"><strong>Abstract:</strong></p> The world is gradually becoming an aging society, and with the lack of laboring forces, this phenomenon is affecting the nation’s economy growth. Although nursing centers are booming in recent years, the lack of medical resources are yet to be resolved, thus creating an innovative wearable medical device could be a vital solution. This research is focused on the design and development of a wearable device which obtains a more precise heart failure measurement than products on the market. The method used by the device is based on the sensor fusion and big data algorithm. From the test result, the modified structure of wearable device can significantly decrease the MA (Motion Artifact) and provide users a more cozy and accurate physical monitor experience. <p class="card-text"><strong>Keywords:</strong> <a href="https://publications.waset.org/abstracts/search?q=big%20data" title="big data">big data</a>, <a href="https://publications.waset.org/abstracts/search?q=heart%20failure" title=" heart failure"> heart failure</a>, <a href="https://publications.waset.org/abstracts/search?q=motion%20artifact" title=" motion artifact"> motion artifact</a>, <a href="https://publications.waset.org/abstracts/search?q=sensor%20fusion" title=" sensor fusion"> sensor fusion</a>, <a href="https://publications.waset.org/abstracts/search?q=wearable%20medical%20device" title=" wearable medical device"> wearable medical device</a> </p> <a href="https://publications.waset.org/abstracts/59226/product-design-and-development-of-wearable-assistant-device" class="btn btn-primary btn-sm">Procedia</a> <a href="https://publications.waset.org/abstracts/59226.pdf" target="_blank" class="btn btn-primary btn-sm">PDF</a> <span class="bg-info text-light px-1 py-1 float-right rounded"> Downloads <span class="badge badge-light">355</span> </span> </div> </div> <div class="card paper-listing mb-3 mt-3"> <h5 class="card-header" style="font-size:.9rem"><span class="badge badge-info">1857</span> Routing Protocol in Ship Dynamic Positioning Based on WSN Clustering Data Fusion System</h5> <div class="card-body"> <p class="card-text"><strong>Authors:</strong> <a href="https://publications.waset.org/abstracts/search?q=Zhou%20Mo">Zhou Mo</a>, <a href="https://publications.waset.org/abstracts/search?q=Dennis%20Chow"> Dennis Chow</a> </p> <p class="card-text"><strong>Abstract:</strong></p> In the dynamic positioning system (DPS) for vessels, the reliable information transmission between each note basically relies on the wireless protocols. From the perspective of cluster-based routing protocols for wireless sensor networks, the data fusion technology based on the sleep scheduling mechanism and remaining energy in network layer is proposed, which applies the sleep scheduling mechanism to the routing protocols, considering the remaining energy of node and location information when selecting cluster-head. The problem of uneven distribution of nodes in each cluster is solved by the Equilibrium. At the same time, Classified Forwarding Mechanism as well as Redelivery Policy strategy is adopted to avoid congestion in the transmission of huge amount of data, reduce the delay in data delivery and enhance the real-time response. In this paper, a simulation test is conducted to improve the routing protocols, which turn out to reduce the energy consumption of nodes and increase the efficiency of data delivery. <p class="card-text"><strong>Keywords:</strong> <a href="https://publications.waset.org/abstracts/search?q=DPS%20for%20vessel" title="DPS for vessel">DPS for vessel</a>, <a href="https://publications.waset.org/abstracts/search?q=wireless%20sensor%20network" title=" wireless sensor network"> wireless sensor network</a>, <a href="https://publications.waset.org/abstracts/search?q=data%20fusion" title=" data fusion"> data fusion</a>, <a href="https://publications.waset.org/abstracts/search?q=routing%20protocols" title=" routing protocols"> routing protocols</a> </p> <a href="https://publications.waset.org/abstracts/44328/routing-protocol-in-ship-dynamic-positioning-based-on-wsn-clustering-data-fusion-system" class="btn btn-primary btn-sm">Procedia</a> <a href="https://publications.waset.org/abstracts/44328.pdf" target="_blank" class="btn btn-primary btn-sm">PDF</a> <span class="bg-info text-light px-1 py-1 float-right rounded"> Downloads <span class="badge badge-light">531</span> </span> </div> </div> <div class="card paper-listing mb-3 mt-3"> <h5 class="card-header" style="font-size:.9rem"><span class="badge badge-info">1856</span> Sensor Registration in Multi-Static Sonar Fusion Detection</h5> <div class="card-body"> <p class="card-text"><strong>Authors:</strong> <a href="https://publications.waset.org/abstracts/search?q=Longxiang%20Guo">Longxiang Guo</a>, <a href="https://publications.waset.org/abstracts/search?q=Haoyan%20Hao"> Haoyan Hao</a>, <a href="https://publications.waset.org/abstracts/search?q=Xueli%20Sheng"> Xueli Sheng</a>, <a href="https://publications.waset.org/abstracts/search?q=Hanjun%20Yu"> Hanjun Yu</a>, <a href="https://publications.waset.org/abstracts/search?q=Jingwei%20Yin"> Jingwei Yin</a> </p> <p class="card-text"><strong>Abstract:</strong></p> In order to prevent target splitting and ensure the accuracy of fusion, system error registration is an important step in multi-static sonar fusion detection system. To eliminate the inherent system errors including distance error and angle error of each sonar in detection, this paper uses offline estimation method for error registration. Suppose several sonars from different platforms work together to detect a target. The target position detected by each sonar is based on each sonar’s own reference coordinate system. Based on the two-dimensional stereo projection method, this paper uses real-time quality control (RTQC) method and least squares (LS) method to estimate sensor biases. The RTQC method takes the average value of each sonar’s data as the observation value and the LS method makes the least square processing of each sonar’s data to get the observation value. In the underwater acoustic environment, matlab simulation is carried out and the simulation results show that both algorithms can estimate the distance and angle error of sonar system. The performance of the two algorithms is also compared through the root mean square error and the influence of measurement noise on registration accuracy is explored by simulation. The system error convergence of RTQC method is rapid, but the distribution of targets has a serious impact on its performance. LS method can not be affected by target distribution, but the increase of random noise will slow down the convergence rate. LS method is an improvement of RTQC method, which is widely used in two-dimensional registration. The improved method can be used for underwater multi-target detection registration. <p class="card-text"><strong>Keywords:</strong> <a href="https://publications.waset.org/abstracts/search?q=data%20fusion" title="data fusion">data fusion</a>, <a href="https://publications.waset.org/abstracts/search?q=multi-static%20sonar%20detection" title=" multi-static sonar detection"> multi-static sonar detection</a>, <a href="https://publications.waset.org/abstracts/search?q=offline%20estimation" title=" offline estimation"> offline estimation</a>, <a href="https://publications.waset.org/abstracts/search?q=sensor%20registration%20problem" title=" sensor registration problem"> sensor registration problem</a> </p> <a href="https://publications.waset.org/abstracts/103631/sensor-registration-in-multi-static-sonar-fusion-detection" class="btn btn-primary btn-sm">Procedia</a> <a href="https://publications.waset.org/abstracts/103631.pdf" target="_blank" class="btn btn-primary btn-sm">PDF</a> <span class="bg-info text-light px-1 py-1 float-right rounded"> Downloads <span class="badge badge-light">175</span> </span> </div> </div> <div class="card paper-listing mb-3 mt-3"> <h5 class="card-header" style="font-size:.9rem"><span class="badge badge-info">1855</span> Research on Routing Protocol in Ship Dynamic Positioning Based on WSN Clustering Data Fusion System</h5> <div class="card-body"> <p class="card-text"><strong>Authors:</strong> <a href="https://publications.waset.org/abstracts/search?q=Zhou%20Mo">Zhou Mo</a>, <a href="https://publications.waset.org/abstracts/search?q=Dennis%20Chow"> Dennis Chow</a> </p> <p class="card-text"><strong>Abstract:</strong></p> In the dynamic positioning system (DPS) for vessels, the reliable information transmission between each note basically relies on the wireless protocols. From the perspective of cluster-based routing pro-tocols for wireless sensor networks, the data fusion technology based on the sleep scheduling mechanism and remaining energy in network layer is proposed, which applies the sleep scheduling mechanism to the routing protocols, considering the remaining energy of node and location information when selecting cluster-head. The problem of uneven distribution of nodes in each cluster is solved by the Equilibrium. At the same time, Classified Forwarding Mechanism as well as Redelivery Policy strategy is adopted to avoid congestion in the transmission of huge amount of data, reduce the delay in data delivery and enhance the real-time response. In this paper, a simulation test is conducted to improve the routing protocols, which turns out to reduce the energy consumption of nodes and increase the efficiency of data delivery. <p class="card-text"><strong>Keywords:</strong> <a href="https://publications.waset.org/abstracts/search?q=DPS%20for%20vessel" title="DPS for vessel">DPS for vessel</a>, <a href="https://publications.waset.org/abstracts/search?q=wireless%20sensor%20network" title=" wireless sensor network"> wireless sensor network</a>, <a href="https://publications.waset.org/abstracts/search?q=data%20fusion" title=" data fusion"> data fusion</a>, <a href="https://publications.waset.org/abstracts/search?q=routing%20protocols" title=" routing protocols"> routing protocols</a> </p> <a href="https://publications.waset.org/abstracts/44860/research-on-routing-protocol-in-ship-dynamic-positioning-based-on-wsn-clustering-data-fusion-system" class="btn btn-primary btn-sm">Procedia</a> <a href="https://publications.waset.org/abstracts/44860.pdf" target="_blank" class="btn btn-primary btn-sm">PDF</a> <span class="bg-info text-light px-1 py-1 float-right rounded"> Downloads <span class="badge badge-light">475</span> </span> </div> </div> <div class="card paper-listing mb-3 mt-3"> <h5 class="card-header" style="font-size:.9rem"><span class="badge badge-info">1854</span> Scheduling Nodes Activity and Data Communication for Target Tracking in Wireless Sensor Networks</h5> <div class="card-body"> <p class="card-text"><strong>Authors:</strong> <a href="https://publications.waset.org/abstracts/search?q=AmirHossein%20Mohajerzadeh">AmirHossein Mohajerzadeh</a>, <a href="https://publications.waset.org/abstracts/search?q=Mohammad%20Alishahi"> Mohammad Alishahi</a>, <a href="https://publications.waset.org/abstracts/search?q=Saeed%20Aslishahi"> Saeed Aslishahi</a>, <a href="https://publications.waset.org/abstracts/search?q=Mohsen%20Zabihi"> Mohsen Zabihi</a> </p> <p class="card-text"><strong>Abstract:</strong></p> In this paper, we consider sensor nodes with the capability of measuring the bearings (relative angle to the target). We use geometric methods to select a set of observer nodes which are responsible for collecting data from the target. Considering the characteristics of target tracking applications, it is clear that significant numbers of sensor nodes are usually inactive. Therefore, in order to minimize the total network energy consumption, a set of sensor nodes, called sentinel, is periodically selected for monitoring, controlling the environment and transmitting data through the network. The other nodes are inactive. Furthermore, the proposed algorithm provides a joint scheduling and routing algorithm to transmit data between network nodes and the fusion center (FC) in which not only provides an efficient way to estimate the target position but also provides an efficient target tracking. Performance evaluation confirms the superiority of the proposed algorithm. <p class="card-text"><strong>Keywords:</strong> <a href="https://publications.waset.org/abstracts/search?q=coverage" title="coverage">coverage</a>, <a href="https://publications.waset.org/abstracts/search?q=routing" title=" routing"> routing</a>, <a href="https://publications.waset.org/abstracts/search?q=scheduling" title=" scheduling"> scheduling</a>, <a href="https://publications.waset.org/abstracts/search?q=target%20tracking" title=" target tracking"> target tracking</a>, <a href="https://publications.waset.org/abstracts/search?q=wireless%20sensor%20networks" title=" wireless sensor networks"> wireless sensor networks</a> </p> <a href="https://publications.waset.org/abstracts/46939/scheduling-nodes-activity-and-data-communication-for-target-tracking-in-wireless-sensor-networks" class="btn btn-primary btn-sm">Procedia</a> <a href="https://publications.waset.org/abstracts/46939.pdf" target="_blank" class="btn btn-primary btn-sm">PDF</a> <span class="bg-info text-light px-1 py-1 float-right rounded"> Downloads <span class="badge badge-light">383</span> </span> </div> </div> <div class="card paper-listing mb-3 mt-3"> <h5 class="card-header" style="font-size:.9rem"><span class="badge badge-info">1853</span> A Wireless Sensor Network Protocol for a Car Parking Space Monitoring System</h5> <div class="card-body"> <p class="card-text"><strong>Authors:</strong> <a href="https://publications.waset.org/abstracts/search?q=Jung-Ho%20Moon">Jung-Ho Moon</a>, <a href="https://publications.waset.org/abstracts/search?q=Myung-Gon%20Yoon"> Myung-Gon Yoon</a>, <a href="https://publications.waset.org/abstracts/search?q=Tae%20Kwon%20Ha"> Tae Kwon Ha</a> </p> <p class="card-text"><strong>Abstract:</strong></p> This paper presents a wireless sensor network protocol for a car parking monitoring system. A wireless sensor network for the purpose is composed of multiple sensor nodes, a sink node, a gateway, and a server. Each of the sensor nodes is equipped with a 3-axis AMR sensor and deployed in the center of a parking space. The sensor node reads its sensor values periodically and transmits the data to the sink node if the current and immediate past sensor values show a difference exceeding a threshold value. The operations of the sink and sensor nodes are described in detail along with flow diagrams. The protocol allows a low-duty cycle operation of the sensor nodes and a flexible adjustment of the threshold value used by the sensor nodes. <p class="card-text"><strong>Keywords:</strong> <a href="https://publications.waset.org/abstracts/search?q=car%20parking%20monitoring" title="car parking monitoring">car parking monitoring</a>, <a href="https://publications.waset.org/abstracts/search?q=sensor%20node" title=" sensor node"> sensor node</a>, <a href="https://publications.waset.org/abstracts/search?q=wireless%20sensor%20network" title=" wireless sensor network"> wireless sensor network</a>, <a href="https://publications.waset.org/abstracts/search?q=network%20protocol" title=" network protocol"> network protocol</a> </p> <a href="https://publications.waset.org/abstracts/11153/a-wireless-sensor-network-protocol-for-a-car-parking-space-monitoring-system" class="btn btn-primary btn-sm">Procedia</a> <a href="https://publications.waset.org/abstracts/11153.pdf" target="_blank" class="btn btn-primary btn-sm">PDF</a> <span class="bg-info text-light px-1 py-1 float-right rounded"> Downloads <span class="badge badge-light">546</span> </span> </div> </div> <div class="card paper-listing mb-3 mt-3"> <h5 class="card-header" style="font-size:.9rem"><span class="badge badge-info">1852</span> The Optimization of Decision Rules in Multimodal Decision-Level Fusion Scheme</h5> <div class="card-body"> <p class="card-text"><strong>Authors:</strong> <a href="https://publications.waset.org/abstracts/search?q=Andrey%20V.%20Timofeev">Andrey V. Timofeev</a>, <a href="https://publications.waset.org/abstracts/search?q=Dmitry%20V.%20Egorov"> Dmitry V. Egorov</a> </p> <p class="card-text"><strong>Abstract:</strong></p> This paper introduces an original method of parametric optimization of the structure for multimodal decision-level fusion scheme which combines the results of the partial solution of the classification task obtained from assembly of the mono-modal classifiers. As a result, a multimodal fusion classifier which has the minimum value of the total error rate has been obtained. <p class="card-text"><strong>Keywords:</strong> <a href="https://publications.waset.org/abstracts/search?q=classification%20accuracy" title="classification accuracy">classification accuracy</a>, <a href="https://publications.waset.org/abstracts/search?q=fusion%20solution" title=" fusion solution"> fusion solution</a>, <a href="https://publications.waset.org/abstracts/search?q=total%20error%20rate" title=" total error rate"> total error rate</a>, <a href="https://publications.waset.org/abstracts/search?q=multimodal%20fusion%20classifier" title=" multimodal fusion classifier"> multimodal fusion classifier</a> </p> <a href="https://publications.waset.org/abstracts/26088/the-optimization-of-decision-rules-in-multimodal-decision-level-fusion-scheme" class="btn btn-primary btn-sm">Procedia</a> <a href="https://publications.waset.org/abstracts/26088.pdf" target="_blank" class="btn btn-primary btn-sm">PDF</a> <span class="bg-info text-light px-1 py-1 float-right rounded"> Downloads <span class="badge badge-light">473</span> </span> </div> </div> <div class="card paper-listing mb-3 mt-3"> <h5 class="card-header" style="font-size:.9rem"><span class="badge badge-info">1851</span> Age Determination from Epiphyseal Union of Bones at Shoulder Joint in Girls of Central India</h5> <div class="card-body"> <p class="card-text"><strong>Authors:</strong> <a href="https://publications.waset.org/abstracts/search?q=B.%20Tirpude">B. Tirpude</a>, <a href="https://publications.waset.org/abstracts/search?q=V.%20Surwade"> V. Surwade</a>, <a href="https://publications.waset.org/abstracts/search?q=P.%20Murkey"> P. Murkey</a>, <a href="https://publications.waset.org/abstracts/search?q=P.%20Wankhade"> P. Wankhade</a>, <a href="https://publications.waset.org/abstracts/search?q=S.%20Meena"> S. Meena </a> </p> <p class="card-text"><strong>Abstract:</strong></p> There is no statistical data to establish variation in epiphyseal fusion in girls in central India population. This significant oversight can lead to exclusion of persons of interest in a forensic investigation. Epiphyseal fusion of proximal end of humerus in eighty females were analyzed on radiological basis to assess the range of variation of epiphyseal fusion at each age. In the study, the X ray films of the subjects were divided into three groups on the basis of degree of fusion. Firstly, those which were showing No Epiphyseal Fusion (N), secondly those showing Partial Union (PC), and thirdly those showing Complete Fusion (C). Observations made were compared with the previous studies. <p class="card-text"><strong>Keywords:</strong> <a href="https://publications.waset.org/abstracts/search?q=epiphyseal%20union" title="epiphyseal union">epiphyseal union</a>, <a href="https://publications.waset.org/abstracts/search?q=shoulder%20joint" title=" shoulder joint"> shoulder joint</a>, <a href="https://publications.waset.org/abstracts/search?q=proximal%20end%20of%20humerus" title=" proximal end of humerus"> proximal end of humerus</a> </p> <a href="https://publications.waset.org/abstracts/19684/age-determination-from-epiphyseal-union-of-bones-at-shoulder-joint-in-girls-of-central-india" class="btn btn-primary btn-sm">Procedia</a> <a href="https://publications.waset.org/abstracts/19684.pdf" target="_blank" class="btn btn-primary btn-sm">PDF</a> <span class="bg-info text-light px-1 py-1 float-right rounded"> Downloads <span class="badge badge-light">510</span> </span> </div> </div> <div class="card paper-listing mb-3 mt-3"> <h5 class="card-header" style="font-size:.9rem"><span class="badge badge-info">1850</span> An Extended Domain-Specific Modeling Language for Marine Observatory Relying on Enterprise Architecture</h5> <div class="card-body"> <p class="card-text"><strong>Authors:</strong> <a href="https://publications.waset.org/abstracts/search?q=Charbel%20Aoun">Charbel Aoun</a>, <a href="https://publications.waset.org/abstracts/search?q=Loic%20Lagadec"> Loic Lagadec</a> </p> <p class="card-text"><strong>Abstract:</strong></p> A Sensor Network (SN) is considered as an operation of two phases: (1) the observation/measuring, which means the accumulation of the gathered data at each sensor node; (2) transferring the collected data to some processing center (e.g., Fusion Servers) within the SN. Therefore, an underwater sensor network can be defined as a sensor network deployed underwater that monitors underwater activity. The deployed sensors, such as Hydrophones, are responsible for registering underwater activity and transferring it to more advanced components. The process of data exchange between the aforementioned components perfectly defines the Marine Observatory (MO) concept which provides information on ocean state, phenomena and processes. The first step towards the implementation of this concept is defining the environmental constraints and the required tools and components (Marine Cables, Smart Sensors, Data Fusion Server, etc). The logical and physical components that are used in these observatories perform some critical functions such as the localization of underwater moving objects. These functions can be orchestrated with other services (e.g. military or civilian reaction). In this paper, we present an extension to our MO meta-model that is used to generate a design tool (ArchiMO). We propose new constraints to be taken into consideration at design time. We illustrate our proposal with an example from the MO domain. Additionally, we generate the corresponding simulation code using our self-developed domain-specific model compiler. On the one hand, this illustrates our approach in relying on Enterprise Architecture (EA) framework that respects: multiple views, perspectives of stakeholders, and domain specificity. On the other hand, it helps reducing both complexity and time spent in design activity, while preventing from design modeling errors during porting this activity in the MO domain. As conclusion, this work aims to demonstrate that we can improve the design activity of complex system based on the use of MDE technologies and a domain-specific modeling language with the associated tooling. The major improvement is to provide an early validation step via models and simulation approach to consolidate the system design. <p class="card-text"><strong>Keywords:</strong> <a href="https://publications.waset.org/abstracts/search?q=smart%20sensors" title="smart sensors">smart sensors</a>, <a href="https://publications.waset.org/abstracts/search?q=data%20fusion" title=" data fusion"> data fusion</a>, <a href="https://publications.waset.org/abstracts/search?q=distributed%20fusion%20architecture" title=" distributed fusion architecture"> distributed fusion architecture</a>, <a href="https://publications.waset.org/abstracts/search?q=sensor%20networks" title=" sensor networks"> sensor networks</a>, <a href="https://publications.waset.org/abstracts/search?q=domain%20specific%20modeling%20language" title=" domain specific modeling language"> domain specific modeling language</a>, <a href="https://publications.waset.org/abstracts/search?q=enterprise%20architecture" title=" enterprise architecture"> enterprise architecture</a>, <a href="https://publications.waset.org/abstracts/search?q=underwater%20moving%20object" title=" underwater moving object"> underwater moving object</a>, <a href="https://publications.waset.org/abstracts/search?q=localization" title=" localization"> localization</a>, <a href="https://publications.waset.org/abstracts/search?q=marine%20observatory" title=" marine observatory"> marine observatory</a>, <a href="https://publications.waset.org/abstracts/search?q=NS-3" title=" NS-3"> NS-3</a>, <a href="https://publications.waset.org/abstracts/search?q=IMS" title=" IMS"> IMS</a> </p> <a href="https://publications.waset.org/abstracts/135443/an-extended-domain-specific-modeling-language-for-marine-observatory-relying-on-enterprise-architecture" class="btn btn-primary btn-sm">Procedia</a> <a href="https://publications.waset.org/abstracts/135443.pdf" target="_blank" class="btn btn-primary btn-sm">PDF</a> <span class="bg-info text-light px-1 py-1 float-right rounded"> Downloads <span class="badge badge-light">185</span> </span> </div> </div> <div class="card paper-listing mb-3 mt-3"> <h5 class="card-header" style="font-size:.9rem"><span class="badge badge-info">1849</span> A Hybrid Image Fusion Model for Generating High Spatial-Temporal-Spectral Resolution Data Using OLI-MODIS-Hyperion Satellite Imagery</h5> <div class="card-body"> <p class="card-text"><strong>Authors:</strong> <a href="https://publications.waset.org/abstracts/search?q=Yongquan%20Zhao">Yongquan Zhao</a>, <a href="https://publications.waset.org/abstracts/search?q=Bo%20Huang"> Bo Huang</a> </p> <p class="card-text"><strong>Abstract:</strong></p> Spatial, Temporal, and Spectral Resolution (STSR) are three key characteristics of Earth observation satellite sensors; however, any single satellite sensor cannot provide Earth observations with high STSR simultaneously because of the hardware technology limitations of satellite sensors. On the other hand, a conflicting circumstance is that the demand for high STSR has been growing with the remote sensing application development. Although image fusion technology provides a feasible means to overcome the limitations of the current Earth observation data, the current fusion technologies cannot enhance all STSR simultaneously and provide high enough resolution improvement level. This study proposes a Hybrid Spatial-Temporal-Spectral image Fusion Model (HSTSFM) to generate synthetic satellite data with high STSR simultaneously, which blends the high spatial resolution from the panchromatic image of Landsat-8 Operational Land Imager (OLI), the high temporal resolution from the multi-spectral image of Moderate Resolution Imaging Spectroradiometer (MODIS), and the high spectral resolution from the hyper-spectral image of Hyperion to produce high STSR images. The proposed HSTSFM contains three fusion modules: (1) spatial-spectral image fusion; (2) spatial-temporal image fusion; (3) temporal-spectral image fusion. A set of test data with both phenological and land cover type changes in Beijing suburb area, China is adopted to demonstrate the performance of the proposed method. The experimental results indicate that HSTSFM can produce fused image that has good spatial and spectral fidelity to the reference image, which means it has the potential to generate synthetic data to support the studies that require high STSR satellite imagery. <p class="card-text"><strong>Keywords:</strong> <a href="https://publications.waset.org/abstracts/search?q=hybrid%20spatial-temporal-spectral%20fusion" title="hybrid spatial-temporal-spectral fusion">hybrid spatial-temporal-spectral fusion</a>, <a href="https://publications.waset.org/abstracts/search?q=high%20resolution%20synthetic%20imagery" title=" high resolution synthetic imagery"> high resolution synthetic imagery</a>, <a href="https://publications.waset.org/abstracts/search?q=least%20square%20regression" title=" least square regression"> least square regression</a>, <a href="https://publications.waset.org/abstracts/search?q=sparse%20representation" title=" sparse representation"> sparse representation</a>, <a href="https://publications.waset.org/abstracts/search?q=spectral%20transformation" title=" spectral transformation"> spectral transformation</a> </p> <a href="https://publications.waset.org/abstracts/74667/a-hybrid-image-fusion-model-for-generating-high-spatial-temporal-spectral-resolution-data-using-oli-modis-hyperion-satellite-imagery" class="btn btn-primary btn-sm">Procedia</a> <a href="https://publications.waset.org/abstracts/74667.pdf" target="_blank" class="btn btn-primary btn-sm">PDF</a> <span class="bg-info text-light px-1 py-1 float-right rounded"> Downloads <span class="badge badge-light">240</span> </span> </div> </div> <div class="card paper-listing mb-3 mt-3"> <h5 class="card-header" style="font-size:.9rem"><span class="badge badge-info">1848</span> A Decision Support System for the Detection of Illicit Substance Production Sites</h5> <div class="card-body"> <p class="card-text"><strong>Authors:</strong> <a href="https://publications.waset.org/abstracts/search?q=Krystian%20Chachula">Krystian Chachula</a>, <a href="https://publications.waset.org/abstracts/search?q=Robert%20Nowak"> Robert Nowak</a> </p> <p class="card-text"><strong>Abstract:</strong></p> Manufacturing home-made explosives and synthetic drugs is an increasing problem in Europe. To combat that, a data fusion system is proposed for the detection and localization of production sites in urban environments. The data consists of measurements of properties of wastewater performed by various sensors installed in a sewage network. A four-stage fusion strategy allows detecting sources of waste products from known chemical reactions. First, suspicious measurements are used to compute the amount and position of discharged compounds. Then, this information is propagated through the sewage network to account for missing sensors. The next step is clustering and the formation of tracks. Eventually, tracks are used to reconstruct discharge events. Sensor measurements are simulated by a subsystem based on real-world data. In this paper, different discharge scenarios are considered to show how the parameters of used algorithms affect the effectiveness of the proposed system. This research is a part of the SYSTEM project (SYnergy of integrated Sensors and Technologies for urban sEcured environMent). <p class="card-text"><strong>Keywords:</strong> <a href="https://publications.waset.org/abstracts/search?q=continuous%20monitoring" title="continuous monitoring">continuous monitoring</a>, <a href="https://publications.waset.org/abstracts/search?q=information%20fusion%20and%20sensors" title=" information fusion and sensors"> information fusion and sensors</a>, <a href="https://publications.waset.org/abstracts/search?q=internet%20of%20things" title=" internet of things"> internet of things</a>, <a href="https://publications.waset.org/abstracts/search?q=multisensor%20fusion" title=" multisensor fusion"> multisensor fusion</a> </p> <a href="https://publications.waset.org/abstracts/129281/a-decision-support-system-for-the-detection-of-illicit-substance-production-sites" class="btn btn-primary btn-sm">Procedia</a> <a href="https://publications.waset.org/abstracts/129281.pdf" target="_blank" class="btn btn-primary btn-sm">PDF</a> <span class="bg-info text-light px-1 py-1 float-right rounded"> Downloads <span class="badge badge-light">123</span> </span> </div> </div> <div class="card paper-listing mb-3 mt-3"> <h5 class="card-header" style="font-size:.9rem"><span class="badge badge-info">1847</span> RV-YOLOX: Object Detection on Inland Waterways Based on Optimized YOLOX Through Fusion of Vision and 3+1D Millimeter Wave Radar</h5> <div class="card-body"> <p class="card-text"><strong>Authors:</strong> <a href="https://publications.waset.org/abstracts/search?q=Zixian%20Zhang">Zixian Zhang</a>, <a href="https://publications.waset.org/abstracts/search?q=Shanliang%20Yao"> Shanliang Yao</a>, <a href="https://publications.waset.org/abstracts/search?q=Zile%20Huang"> Zile Huang</a>, <a href="https://publications.waset.org/abstracts/search?q=Zhaodong%20Wu"> Zhaodong Wu</a>, <a href="https://publications.waset.org/abstracts/search?q=Xiaohui%20Zhu"> Xiaohui Zhu</a>, <a href="https://publications.waset.org/abstracts/search?q=Yong%20Yue"> Yong Yue</a>, <a href="https://publications.waset.org/abstracts/search?q=Jieming%20Ma"> Jieming Ma</a> </p> <p class="card-text"><strong>Abstract:</strong></p> Unmanned Surface Vehicles (USVs) are valuable due to their ability to perform dangerous and time-consuming tasks on the water. Object detection tasks are significant in these applications. However, inherent challenges, such as the complex distribution of obstacles, reflections from shore structures, water surface fog, etc., hinder the performance of object detection of USVs. To address these problems, this paper provides a fusion method for USVs to effectively detect objects in the inland surface environment, utilizing vision sensors and 3+1D Millimeter-wave radar. MMW radar is complementary to vision sensors, providing robust environmental information. The radar 3D point cloud is transferred to 2D radar pseudo image to unify radar and vision information format by utilizing the point transformer. We propose a multi-source object detection network (RV-YOLOX )based on radar-vision fusion for inland waterways environment. The performance is evaluated on our self-recording waterways dataset. Compared with the YOLOX network, our fusion network significantly improves detection accuracy, especially for objects with bad light conditions. <p class="card-text"><strong>Keywords:</strong> <a href="https://publications.waset.org/abstracts/search?q=inland%20waterways" title="inland waterways">inland waterways</a>, <a href="https://publications.waset.org/abstracts/search?q=YOLO" title=" YOLO"> YOLO</a>, <a href="https://publications.waset.org/abstracts/search?q=sensor%20fusion" title=" sensor fusion"> sensor fusion</a>, <a href="https://publications.waset.org/abstracts/search?q=self-attention" title=" self-attention"> self-attention</a> </p> <a href="https://publications.waset.org/abstracts/164399/rv-yolox-object-detection-on-inland-waterways-based-on-optimized-yolox-through-fusion-of-vision-and-31d-millimeter-wave-radar" class="btn btn-primary btn-sm">Procedia</a> <a href="https://publications.waset.org/abstracts/164399.pdf" target="_blank" class="btn btn-primary btn-sm">PDF</a> <span class="bg-info text-light px-1 py-1 float-right rounded"> Downloads <span class="badge badge-light">145</span> </span> </div> </div> <div class="card paper-listing mb-3 mt-3"> <h5 class="card-header" style="font-size:.9rem"><span class="badge badge-info">1846</span> Changes in the Median Sacral Crest Associated with Sacrocaudal Fusion in the Greyhound</h5> <div class="card-body"> <p class="card-text"><strong>Authors:</strong> <a href="https://publications.waset.org/abstracts/search?q=S.%20M.%20Ismail">S. M. Ismail</a>, <a href="https://publications.waset.org/abstracts/search?q=H-H%20Yen"> H-H Yen</a>, <a href="https://publications.waset.org/abstracts/search?q=C.%20M.%20Murray"> C. M. Murray</a>, <a href="https://publications.waset.org/abstracts/search?q=H.%20M.%20S.%20Davies"> H. M. S. Davies</a> </p> <p class="card-text"><strong>Abstract:</strong></p> A recent study reported a 33% incidence of complete sacrocaudal fusion in greyhounds compared to a 3% incidence in other dogs. In the dog, the median sacral crest is formed by the fusion of sacral spinous processes. Separation of the 1st spinous process from the median crest of the sacrum in the dog has been reported as a diagnostic tool of type one lumbosacral transitional vertebra (LTV). LTV is a congenital spinal anomaly, which includes either sacralization of the caudal lumbar part or lumbarization of the most cranial sacral segment of the spine. In this study, the absence or reduction of fusion (presence of separation) between the 1st and 2ndspinous processes of the median sacral crest has been identified in association with sacrocaudal fusion in the greyhound, without any feature of LTV. In order to provide quantitative data on the absence or reduction of fusion in the median sacral crest between the 1st and 2nd sacral spinous processes, in association with sacrocaudal fusion. 204 dog sacrums free of any pathological changes (192 greyhound, 9 beagles and 3 labradors) were grouped based on the occurrence and types of fusion and the presence, absence, or reduction in the median sacral crest between the 1st and 2nd sacral spinous processes., Sacrums were described and classified as follows: F: Complete fusion (crest is present), N: Absence (fusion is absent), and R: Short crest (fusion reduced but not absent (reduction). The incidence of sacrocaudal fusion in the 204 sacrums: 57% of the sacrums were standard (3 vertebrae) and 43% were fused (4 vertebrae). Type of sacrum had a significant (p < .05) association with the absence and reduction of fusion between the 1st and 2nd sacral spinous processes of the median sacral crest. In the 108 greyhounds with standard sacrums (3 vertebrae) the percentages of F, N and R were 45% 23% and 23% respectively, while in the 84 fused (4 vertebrae) sacrums, the percentages of F, N and R were 3%, 87% and 10% respectively and these percentages were significantly different between standard (3 vertebrae) and fused (4 vertebrae) sacrums (p < .05). This indicates that absence of spinous process fusion in the median sacral crest was found in a large percentage of the greyhounds in this study and was found to be particularly prevalent in those with sacrocaudal fusion – therefore in this breed, at least, absence of sacral spinous process fusion may be unlikely to be associated with LTV. <p class="card-text"><strong>Keywords:</strong> <a href="https://publications.waset.org/abstracts/search?q=greyhound" title="greyhound">greyhound</a>, <a href="https://publications.waset.org/abstracts/search?q=median%20sacral%20crest" title=" median sacral crest"> median sacral crest</a>, <a href="https://publications.waset.org/abstracts/search?q=sacrocaudal%20fusion" title=" sacrocaudal fusion"> sacrocaudal fusion</a>, <a href="https://publications.waset.org/abstracts/search?q=sacral%20spinous%20process" title=" sacral spinous process"> sacral spinous process</a> </p> <a href="https://publications.waset.org/abstracts/47980/changes-in-the-median-sacral-crest-associated-with-sacrocaudal-fusion-in-the-greyhound" class="btn btn-primary btn-sm">Procedia</a> <a href="https://publications.waset.org/abstracts/47980.pdf" target="_blank" class="btn btn-primary btn-sm">PDF</a> <span class="bg-info text-light px-1 py-1 float-right rounded"> Downloads <span class="badge badge-light">455</span> </span> </div> </div> <div class="card paper-listing mb-3 mt-3"> <h5 class="card-header" style="font-size:.9rem"><span class="badge badge-info">1845</span> Efficient Feature Fusion for Noise Iris in Unconstrained Environment</h5> <div class="card-body"> <p class="card-text"><strong>Authors:</strong> <a href="https://publications.waset.org/abstracts/search?q=Yao-Hong%20Tsai">Yao-Hong Tsai</a> </p> <p class="card-text"><strong>Abstract:</strong></p> This paper presents an efficient fusion algorithm for iris images to generate stable feature for recognition in unconstrained environment. Recently, iris recognition systems are focused on real scenarios in our daily life without the subject’s cooperation. Under large variation in the environment, the objective of this paper is to combine information from multiple images of the same iris. The result of image fusion is a new image which is more stable for further iris recognition than each original noise iris image. A wavelet-based approach for multi-resolution image fusion is applied in the fusion process. The detection of the iris image is based on Adaboost algorithm and then local binary pattern (LBP) histogram is then applied to texture classification with the weighting scheme. Experiment showed that the generated features from the proposed fusion algorithm can improve the performance for verification system through iris recognition. <p class="card-text"><strong>Keywords:</strong> <a href="https://publications.waset.org/abstracts/search?q=image%20fusion" title="image fusion">image fusion</a>, <a href="https://publications.waset.org/abstracts/search?q=iris%20recognition" title=" iris recognition"> iris recognition</a>, <a href="https://publications.waset.org/abstracts/search?q=local%20binary%20pattern" title=" local binary pattern"> local binary pattern</a>, <a href="https://publications.waset.org/abstracts/search?q=wavelet" title=" wavelet"> wavelet</a> </p> <a href="https://publications.waset.org/abstracts/17027/efficient-feature-fusion-for-noise-iris-in-unconstrained-environment" class="btn btn-primary btn-sm">Procedia</a> <a href="https://publications.waset.org/abstracts/17027.pdf" target="_blank" class="btn btn-primary btn-sm">PDF</a> <span class="bg-info text-light px-1 py-1 float-right rounded"> Downloads <span class="badge badge-light">372</span> </span> </div> </div> <div class="card paper-listing mb-3 mt-3"> <h5 class="card-header" style="font-size:.9rem"><span class="badge badge-info">1844</span> Multimodal Deep Learning for Human Activity Recognition</h5> <div class="card-body"> <p class="card-text"><strong>Authors:</strong> <a href="https://publications.waset.org/abstracts/search?q=Ons%20Slimene">Ons Slimene</a>, <a href="https://publications.waset.org/abstracts/search?q=Aroua%20Taamallah"> Aroua Taamallah</a>, <a href="https://publications.waset.org/abstracts/search?q=Maha%20Khemaja"> Maha Khemaja</a> </p> <p class="card-text"><strong>Abstract:</strong></p> In recent years, human activity recognition (HAR) has been a key area of research due to its diverse applications. It has garnered increasing attention in the field of computer vision. HAR plays an important role in people’s daily lives as it has the ability to learn advanced knowledge about human activities from data. In HAR, activities are usually represented by exploiting different types of sensors, such as embedded sensors or visual sensors. However, these sensors have limitations, such as local obstacles, image-related obstacles, sensor unreliability, and consumer concerns. Recently, several deep learning-based approaches have been proposed for HAR and these approaches are classified into two categories based on the type of data used: vision-based approaches and sensor-based approaches. This research paper highlights the importance of multimodal data fusion from skeleton data obtained from videos and data generated by embedded sensors using deep neural networks for achieving HAR. We propose a deep multimodal fusion network based on a twostream architecture. These two streams use the Convolutional Neural Network combined with the Bidirectional LSTM (CNN BILSTM) to process skeleton data and data generated by embedded sensors and the fusion at the feature level is considered. The proposed model was evaluated on a public OPPORTUNITY++ dataset and produced a accuracy of 96.77%. <p class="card-text"><strong>Keywords:</strong> <a href="https://publications.waset.org/abstracts/search?q=human%20activity%20recognition" title="human activity recognition">human activity recognition</a>, <a href="https://publications.waset.org/abstracts/search?q=action%20recognition" title=" action recognition"> action recognition</a>, <a href="https://publications.waset.org/abstracts/search?q=sensors" title=" sensors"> sensors</a>, <a href="https://publications.waset.org/abstracts/search?q=vision" title=" vision"> vision</a>, <a href="https://publications.waset.org/abstracts/search?q=human-centric%20sensing" title=" human-centric sensing"> human-centric sensing</a>, <a href="https://publications.waset.org/abstracts/search?q=deep%20learning" title=" deep learning"> deep learning</a>, <a href="https://publications.waset.org/abstracts/search?q=context-awareness" title=" context-awareness"> context-awareness</a> </p> <a href="https://publications.waset.org/abstracts/162633/multimodal-deep-learning-for-human-activity-recognition" class="btn btn-primary btn-sm">Procedia</a> <a href="https://publications.waset.org/abstracts/162633.pdf" target="_blank" class="btn btn-primary btn-sm">PDF</a> <span class="bg-info text-light px-1 py-1 float-right rounded"> Downloads <span class="badge badge-light">108</span> </span> </div> </div> <div class="card paper-listing mb-3 mt-3"> <h5 class="card-header" style="font-size:.9rem"><span class="badge badge-info">1843</span> Sampling Two-Channel Nonseparable Wavelets and Its Applications in Multispectral Image Fusion</h5> <div class="card-body"> <p class="card-text"><strong>Authors:</strong> <a href="https://publications.waset.org/abstracts/search?q=Bin%20Liu">Bin Liu</a>, <a href="https://publications.waset.org/abstracts/search?q=Weijie%20Liu"> Weijie Liu</a>, <a href="https://publications.waset.org/abstracts/search?q=Bin%20Sun"> Bin Sun</a>, <a href="https://publications.waset.org/abstracts/search?q=Yihui%20Luo"> Yihui Luo </a> </p> <p class="card-text"><strong>Abstract:</strong></p> In order to solve the problem of lower spatial resolution and block effect in the fusion method based on separable wavelet transform in the resulting fusion image, a new sampling mode based on multi-resolution analysis of two-channel non separable wavelet transform, whose dilation matrix is [1,1;1,-1], is presented and a multispectral image fusion method based on this kind of sampling mode is proposed. Filter banks related to this kind of wavelet are constructed, and multiresolution decomposition of the intensity of the MS and panchromatic image are performed in the sampled mode using the constructed filter bank. The low- and high-frequency coefficients are fused by different fusion rules. The experiment results show that this method has good visual effect. The fusion performance has been noted to outperform the IHS fusion method, as well as, the fusion methods based on DWT, IHS-DWT, IHS-Contourlet transform, and IHS-Curvelet transform in preserving both spectral quality and high spatial resolution information. Furthermore, when compared with the fusion method based on nonsubsampled two-channel non separable wavelet, the proposed method has been observed to have higher spatial resolution and good global spectral information. <p class="card-text"><strong>Keywords:</strong> <a href="https://publications.waset.org/abstracts/search?q=image%20fusion" title="image fusion">image fusion</a>, <a href="https://publications.waset.org/abstracts/search?q=two-channel%20sampled%20nonseparable%20wavelets" title=" two-channel sampled nonseparable wavelets"> two-channel sampled nonseparable wavelets</a>, <a href="https://publications.waset.org/abstracts/search?q=multispectral%20image" title=" multispectral image"> multispectral image</a>, <a href="https://publications.waset.org/abstracts/search?q=panchromatic%20image" title=" panchromatic image"> panchromatic image</a> </p> <a href="https://publications.waset.org/abstracts/15357/sampling-two-channel-nonseparable-wavelets-and-its-applications-in-multispectral-image-fusion" class="btn btn-primary btn-sm">Procedia</a> <a href="https://publications.waset.org/abstracts/15357.pdf" target="_blank" class="btn btn-primary btn-sm">PDF</a> <span class="bg-info text-light px-1 py-1 float-right rounded"> Downloads <span class="badge badge-light">446</span> </span> </div> </div> <div class="card paper-listing mb-3 mt-3"> <h5 class="card-header" style="font-size:.9rem"><span class="badge badge-info">1842</span> Variations in the Angulation of the First Sacral Spinous Process Angle Associated with Sacrocaudal Fusion in Greyhounds</h5> <div class="card-body"> <p class="card-text"><strong>Authors:</strong> <a href="https://publications.waset.org/abstracts/search?q=Sa%27ad%20M.%20Ismail">Sa&#039;ad M. Ismail</a>, <a href="https://publications.waset.org/abstracts/search?q=Hung-Hsun%20Yen"> Hung-Hsun Yen</a>, <a href="https://publications.waset.org/abstracts/search?q=Christina%20M.%20Murray"> Christina M. Murray</a>, <a href="https://publications.waset.org/abstracts/search?q=Helen%20M.%20S.%20Davies"> Helen M. S. Davies</a> </p> <p class="card-text"><strong>Abstract:</strong></p> In the dog, the median sacral crest is formed by the fusion of three sacral spinous processes. In greyhounds with standard sacrums, this fusion in the median sacral crest consists of the fusion of three sacral spinous processes while it consists of four in greyhounds with sacrocaudal fusion. In the present study, variations in the angulation of the first sacral spinous process in association with different types of sacrocaudal fusion in the greyhound were investigated. Sacrums were collected from 207 greyhounds (102 sacrums; type A (unfused) and 105 with different types of sacrocaudal fusion; types: B, C and D). Sacrums were cleaned by boiling and dried and then were placed on their ventral surface on a flat surface and photographed from the left side using a digital camera at a fixed distance. The first sacral spinous process angle (1st SPA) was defined as the angle formed between the cranial border of the cranial ridge of the first sacral spinous process and the line extending across the most dorsal surface points of the spinous processes of the S1, S2, and S3. Image-Pro Express Version 5.0 imaging software was used to draw and measure the angles. Two photographs were taken for each sacrum and two repeat measurements were also taken of each angle. The mean value of the 1st SPA in greyhounds with sacrocaudal fusion was less (98.99°, SD ± 11, n = 105) than those in greyhounds with standard sacrums (99.77°, SD ± 9.18, n = 102) but was not significantly different (P < 0.05). Among greyhounds with different types of sacrocaudal fusion the mean value of the 1st SPA was as follows: type B; 97.73°, SD ± 10.94, n = 39, type C: 101.42°, SD ± 10.51, n = 52, and type D: 94.22°, SD ± 11.30, n = 12. For all types of fusion these angles were significantly different from each other (P < 0.05). Comparing the mean value of the1st SPA in standard sacrums (Type A) with that for each type of fusion separately showed that the only significantly different angulation (P < 0.05) was between standard sacrums and sacrums with sacrocaudal fusion sacrum type D (only body fusion between the S1 and Ca1). Different types of sacrocaudal fusion were associated with variations in the angle of the first sacral spinous process. These variations may affect the alignment and biomechanics of the sacral area and the pattern of movement and/or the force produced by both hind limbs to the cranial parts of the body and may alter the loading of other parts of the body. We concluded that any variations in the sacrum anatomical features might change the function of the sacrum or surrounding anatomical structures during movement. <p class="card-text"><strong>Keywords:</strong> <a href="https://publications.waset.org/abstracts/search?q=angulation%20of%20first%20sacral%20spinous%20process" title="angulation of first sacral spinous process">angulation of first sacral spinous process</a>, <a href="https://publications.waset.org/abstracts/search?q=biomechanics" title=" biomechanics"> biomechanics</a>, <a href="https://publications.waset.org/abstracts/search?q=greyhound" title=" greyhound"> greyhound</a>, <a href="https://publications.waset.org/abstracts/search?q=locomotion" title=" locomotion"> locomotion</a>, <a href="https://publications.waset.org/abstracts/search?q=sacrocaudal%20fusion" title=" sacrocaudal fusion"> sacrocaudal fusion</a> </p> <a href="https://publications.waset.org/abstracts/74942/variations-in-the-angulation-of-the-first-sacral-spinous-process-angle-associated-with-sacrocaudal-fusion-in-greyhounds" class="btn btn-primary btn-sm">Procedia</a> <a href="https://publications.waset.org/abstracts/74942.pdf" target="_blank" class="btn btn-primary btn-sm">PDF</a> <span class="bg-info text-light px-1 py-1 float-right rounded"> Downloads <span class="badge badge-light">318</span> </span> </div> </div> <div class="card paper-listing mb-3 mt-3"> <h5 class="card-header" style="font-size:.9rem"><span class="badge badge-info">1841</span> Remotely Sensed Data Fusion to Extract Vegetation Cover in the Cultural Park of Tassili, South of Algeria</h5> <div class="card-body"> <p class="card-text"><strong>Authors:</strong> <a href="https://publications.waset.org/abstracts/search?q=Y.%20Fekir">Y. Fekir</a>, <a href="https://publications.waset.org/abstracts/search?q=K.%20Mederbal"> K. Mederbal</a>, <a href="https://publications.waset.org/abstracts/search?q=M.%20A.%20Hammadouche"> M. A. Hammadouche</a>, <a href="https://publications.waset.org/abstracts/search?q=D.%20Anteur"> D. Anteur</a> </p> <p class="card-text"><strong>Abstract:</strong></p> The cultural park of the Tassili, occupying a large area of Algeria, is characterized by a rich vegetative biodiversity to be preserved and managed both in time and space. The management of a large area (case of Tassili), by its complexity, needs large amounts of data, which for the most part, are spatially localized (DEM, satellite images and socio-economic information etc.), where the use of conventional and traditional methods is quite difficult. The remote sensing, by its efficiency in environmental applications, became an indispensable solution for this kind of studies. Multispectral imaging sensors have been very useful in the last decade in very interesting applications of remote sensing. They can aid in several domains such as the de¬tection and identification of diverse surface targets, topographical details, and geological features. In this work, we try to extract vegetative areas using fusion techniques between data acquired from sensor on-board the Earth Observing 1 (EO-1) satellite and Landsat ETM+ and TM sensors. We have used images acquired over the Oasis of Djanet in the National Park of Tassili in the south of Algeria. Fusion technqiues were applied on the obtained image to extract the vegetative fraction of the different classes of land use. We compare the obtained results in vegetation end member extraction with vegetation indices calculated from both Hyperion and other multispectral sensors. <p class="card-text"><strong>Keywords:</strong> <a href="https://publications.waset.org/abstracts/search?q=Landsat%20ETM%2B" title="Landsat ETM+">Landsat ETM+</a>, <a href="https://publications.waset.org/abstracts/search?q=EO1" title=" EO1"> EO1</a>, <a href="https://publications.waset.org/abstracts/search?q=data%20fusion" title=" data fusion"> data fusion</a>, <a href="https://publications.waset.org/abstracts/search?q=vegetation" title=" vegetation"> vegetation</a>, <a href="https://publications.waset.org/abstracts/search?q=Tassili" title=" Tassili"> Tassili</a>, <a href="https://publications.waset.org/abstracts/search?q=Algeria" title=" Algeria"> Algeria</a> </p> <a href="https://publications.waset.org/abstracts/9997/remotely-sensed-data-fusion-to-extract-vegetation-cover-in-the-cultural-park-of-tassili-south-of-algeria" class="btn btn-primary btn-sm">Procedia</a> <a href="https://publications.waset.org/abstracts/9997.pdf" target="_blank" class="btn btn-primary btn-sm">PDF</a> <span class="bg-info text-light px-1 py-1 float-right rounded"> Downloads <span class="badge badge-light">438</span> </span> </div> </div> <div class="card paper-listing mb-3 mt-3"> <h5 class="card-header" style="font-size:.9rem"><span class="badge badge-info">1840</span> Valuation on MEMS Pressure Sensors and Device Applications</h5> <div class="card-body"> <p class="card-text"><strong>Authors:</strong> <a href="https://publications.waset.org/abstracts/search?q=Nurul%20Amziah%20Md%20Yunus">Nurul Amziah Md Yunus</a>, <a href="https://publications.waset.org/abstracts/search?q=Izhal%20Abdul%20Halin"> Izhal Abdul Halin</a>, <a href="https://publications.waset.org/abstracts/search?q=Nasri%20Sulaiman"> Nasri Sulaiman</a>, <a href="https://publications.waset.org/abstracts/search?q=Noor%20Faezah%20Ismail"> Noor Faezah Ismail</a>, <a href="https://publications.waset.org/abstracts/search?q=Ong%20Kai%20Sheng"> Ong Kai Sheng</a> </p> <p class="card-text"><strong>Abstract:</strong></p> The MEMS pressure sensor has been introduced and presented in this paper. The types of pressure sensor and its theory of operation are also included. The latest MEMS technology, the fabrication processes of pressure sensor are explored and discussed. Besides, various device applications of pressure sensor such as tire pressure monitoring system, diesel particulate filter and others are explained. Due to further miniaturization of the device nowadays, the pressure sensor with nanotechnology (NEMS) is also reviewed. The NEMS pressure sensor is expected to have better performance as well as lower in its cost. It has gained an excellent popularity in many applications. <p class="card-text"><strong>Keywords:</strong> <a href="https://publications.waset.org/abstracts/search?q=pressure%20sensor" title="pressure sensor">pressure sensor</a>, <a href="https://publications.waset.org/abstracts/search?q=diaphragm" title=" diaphragm"> diaphragm</a>, <a href="https://publications.waset.org/abstracts/search?q=MEMS" title=" MEMS"> MEMS</a>, <a href="https://publications.waset.org/abstracts/search?q=automotive%20application" title=" automotive application"> automotive application</a>, <a href="https://publications.waset.org/abstracts/search?q=biomedical%20application" title=" biomedical application"> biomedical application</a>, <a href="https://publications.waset.org/abstracts/search?q=NEMS" title=" NEMS"> NEMS</a> </p> <a href="https://publications.waset.org/abstracts/28395/valuation-on-mems-pressure-sensors-and-device-applications" class="btn btn-primary btn-sm">Procedia</a> <a href="https://publications.waset.org/abstracts/28395.pdf" target="_blank" class="btn btn-primary btn-sm">PDF</a> <span class="bg-info text-light px-1 py-1 float-right rounded"> Downloads <span class="badge badge-light">677</span> </span> </div> </div> <ul class="pagination"> <li class="page-item disabled"><span class="page-link">&lsaquo;</span></li> <li class="page-item active"><span class="page-link">1</span></li> <li class="page-item"><a class="page-link" href="https://publications.waset.org/abstracts/search?q=sensor%20fusion&amp;page=2">2</a></li> <li class="page-item"><a class="page-link" href="https://publications.waset.org/abstracts/search?q=sensor%20fusion&amp;page=3">3</a></li> <li class="page-item"><a class="page-link" href="https://publications.waset.org/abstracts/search?q=sensor%20fusion&amp;page=4">4</a></li> <li class="page-item"><a class="page-link" href="https://publications.waset.org/abstracts/search?q=sensor%20fusion&amp;page=5">5</a></li> <li class="page-item"><a class="page-link" href="https://publications.waset.org/abstracts/search?q=sensor%20fusion&amp;page=6">6</a></li> <li class="page-item"><a class="page-link" href="https://publications.waset.org/abstracts/search?q=sensor%20fusion&amp;page=7">7</a></li> <li class="page-item"><a class="page-link" href="https://publications.waset.org/abstracts/search?q=sensor%20fusion&amp;page=8">8</a></li> <li class="page-item"><a class="page-link" href="https://publications.waset.org/abstracts/search?q=sensor%20fusion&amp;page=9">9</a></li> <li class="page-item"><a class="page-link" href="https://publications.waset.org/abstracts/search?q=sensor%20fusion&amp;page=10">10</a></li> <li class="page-item disabled"><span class="page-link">...</span></li> <li class="page-item"><a class="page-link" href="https://publications.waset.org/abstracts/search?q=sensor%20fusion&amp;page=62">62</a></li> <li class="page-item"><a class="page-link" href="https://publications.waset.org/abstracts/search?q=sensor%20fusion&amp;page=63">63</a></li> <li class="page-item"><a class="page-link" href="https://publications.waset.org/abstracts/search?q=sensor%20fusion&amp;page=2" rel="next">&rsaquo;</a></li> </ul> </div> </main> <footer> <div id="infolinks" class="pt-3 pb-2"> <div class="container"> <div style="background-color:#f5f5f5;" class="p-3"> <div class="row"> <div class="col-md-2"> <ul class="list-unstyled"> About <li><a href="https://waset.org/page/support">About Us</a></li> <li><a href="https://waset.org/page/support#legal-information">Legal</a></li> <li><a target="_blank" rel="nofollow" href="https://publications.waset.org/static/files/WASET-16th-foundational-anniversary.pdf">WASET celebrates its 16th foundational anniversary</a></li> </ul> </div> <div class="col-md-2"> <ul class="list-unstyled"> Account <li><a href="https://waset.org/profile">My Account</a></li> </ul> </div> <div class="col-md-2"> <ul class="list-unstyled"> Explore <li><a href="https://waset.org/disciplines">Disciplines</a></li> <li><a href="https://waset.org/conferences">Conferences</a></li> <li><a href="https://waset.org/conference-programs">Conference Program</a></li> <li><a href="https://waset.org/committees">Committees</a></li> <li><a href="https://publications.waset.org">Publications</a></li> </ul> </div> <div class="col-md-2"> <ul class="list-unstyled"> Research <li><a href="https://publications.waset.org/abstracts">Abstracts</a></li> <li><a href="https://publications.waset.org">Periodicals</a></li> <li><a href="https://publications.waset.org/archive">Archive</a></li> </ul> </div> <div class="col-md-2"> <ul class="list-unstyled"> Open Science <li><a target="_blank" rel="nofollow" href="https://publications.waset.org/static/files/Open-Science-Philosophy.pdf">Open Science Philosophy</a></li> <li><a target="_blank" rel="nofollow" href="https://publications.waset.org/static/files/Open-Science-Award.pdf">Open Science Award</a></li> <li><a target="_blank" rel="nofollow" href="https://publications.waset.org/static/files/Open-Society-Open-Science-and-Open-Innovation.pdf">Open Innovation</a></li> <li><a target="_blank" rel="nofollow" href="https://publications.waset.org/static/files/Postdoctoral-Fellowship-Award.pdf">Postdoctoral Fellowship Award</a></li> <li><a target="_blank" rel="nofollow" href="https://publications.waset.org/static/files/Scholarly-Research-Review.pdf">Scholarly Research Review</a></li> </ul> </div> <div class="col-md-2"> <ul class="list-unstyled"> Support <li><a href="https://waset.org/page/support">Support</a></li> <li><a href="https://waset.org/profile/messages/create">Contact Us</a></li> <li><a href="https://waset.org/profile/messages/create">Report Abuse</a></li> </ul> </div> </div> </div> </div> </div> <div class="container text-center"> <hr style="margin-top:0;margin-bottom:.3rem;"> <a href="https://creativecommons.org/licenses/by/4.0/" target="_blank" class="text-muted small">Creative Commons Attribution 4.0 International License</a> <div id="copy" class="mt-2">&copy; 2025 World Academy of Science, Engineering and Technology</div> </div> </footer> <a href="javascript:" id="return-to-top"><i class="fas fa-arrow-up"></i></a> <div class="modal" id="modal-template"> <div class="modal-dialog"> <div class="modal-content"> <div class="row m-0 mt-1"> <div class="col-md-12"> <button type="button" class="close" data-dismiss="modal" aria-label="Close"><span aria-hidden="true">&times;</span></button> </div> </div> <div class="modal-body"></div> </div> </div> </div> <script src="https://cdn.waset.org/static/plugins/jquery-3.3.1.min.js"></script> <script src="https://cdn.waset.org/static/plugins/bootstrap-4.2.1/js/bootstrap.bundle.min.js"></script> <script src="https://cdn.waset.org/static/js/site.js?v=150220211556"></script> <script> jQuery(document).ready(function() { /*jQuery.get("https://publications.waset.org/xhr/user-menu", function (response) { jQuery('#mainNavMenu').append(response); });*/ jQuery.get({ url: "https://publications.waset.org/xhr/user-menu", cache: false }).then(function(response){ jQuery('#mainNavMenu').append(response); }); }); </script> </body> </html>

Pages: 1 2 3 4 5 6 7 8 9 10