CINXE.COM
EAI Endorsed Transactions on Scalable Information Systems - EUDL
<html><head><title>EAI Endorsed Transactions on Scalable Information Systems - EUDL</title><link rel="icon" href="/images/favicon.ico"><link rel="stylesheet" type="text/css" href="/css/screen.css"><link rel="stylesheet" href="/css/zenburn.css"><meta http-equiv="Content-Type" content="charset=utf-8"><meta name="viewport" content="width=device-width, initial-scale=1.0"><meta name="Description" content="Visit the new journal website to submit and consult our contents: https://publications.eai.eu/index.php/sis/index"><script type="text/javascript" src="https://services.eai.eu//load-signup-form/EAI"></script><script type="text/javascript" src="https://services.eai.eu//ujs/forms/signup/sso-client.js"></script><script type="text/javascript">if (!window.EUDL){ window.EUDL={} };EUDL.cas_url="https://account.eai.eu/cas";EUDL.profile_url="https://account.eai.eu";if(window.SSO){SSO.set_mode('eai')};</script><script type="text/javascript" src="/js/jquery.js"></script><script type="text/javascript" src="/js/jquery.cookie.js"></script><script type="text/javascript" src="/js/sso.js"></script><script type="text/javascript" src="/js/jscal2.js"></script><script type="text/javascript" src="/js/lang/en.js"></script><script type="text/javascript" src="/js/jquery.colorbox-min.js"></script><script type="text/javascript" src="/js/eudl.js"></script><script type="text/javascript" src="/js/journal.js"></script><script type="text/javascript" src="/js/tabs.js"></script><link rel="stylesheet" type="text/css" href="/css/jscal/jscal2.css"><link rel="stylesheet" type="text/css" href="/css/jscal/eudl/eudl.css"><link rel="stylesheet" type="text/css" href="/css/colorbox.css"></head><body><div id="eudl-page-head"><div id="eudl-page-header"><section id="user-area"><div><nav id="right-nav"><a href="/about">About</a> | <a href="/contact">Contact Us</a> | <a class="register" href="https://account.eai.eu/register?service=http%3A%2F%2Feudl.eu%2Fissue%2Fsis%2F9%2F34">Register</a> | <a class="login" href="https://account.eai.eu/cas/login?service=http%3A%2F%2Feudl.eu%2Fissue%2Fsis%2F9%2F34">Login</a></nav></div></section></div></div><div id="eudl-page"><header><section id="topbar-ads"><div><a href="https://eudl.eu/"><img class="eudl-logo-top" src="https://eudl.eu/images/eudl-logo.png"></a><img class="eudl-ads-top" src="https://eudl.eu/images/eai-eudl.jpg"></div></section><section id="menu"><nav><a href="/proceedings" class=""><span>Proceedings</span><span class="icon"></span></a><a href="/series" class=""><span>Series</span><span class="icon"></span></a><a href="/journals" class="current"><span>Journals</span><span class="icon"></span></a><a href="/content" class=""><span>Search</span><span class="icon"></span></a><a href="http://eai.eu/">EAI</a></nav></section></header><div id="eaientran"></div><section id="content"><section id="journal"><form class="search-form" id="article_search" method="get"><section class="cover-and-filters"><section class="cover"><a href="/journal/sis" title="EAI Endorsed Transactions on Scalable Information Systems"><img src="/attachment/48013"></a></section><section class="issn"><strong>ISSN: </strong>2032-9407</section><section class="escripts link"><a href="https://escripts.eai.eu/paper/submit">Submit Article</a></section><section class="instructions link"><a href="/instructions">Submission Instructions</a></section><section class="ethics link"><a href="/ethics">Ethics and Malpractice Statement</a></section><section class="back-to-journal link"><a href="/journal/sis">Back to Journal Page</a></section><section class="browse-filters"><div class="browse-by"><a class="browse-link">2024<span class="pointer"></span></a><div class="filters"><a href="/issue/sis/11/6" class="filter ">Issue 6</a><a href="/issue/sis/11/5" class="filter ">Issue 5</a><a href="/issue/sis/11/4" class="filter ">Issue 4</a><a href="/issue/sis/11/3" class="filter ">Issue 3</a><a href="/issue/sis/11/2" class="filter ">Issue 2</a><a href="/issue/sis/11/1" class="filter ">Issue 1</a></div><a class="browse-link">2023<span class="pointer"></span></a><div class="filters"><a href="/issue/sis/10/6" class="filter ">Issue 6</a><a href="/issue/sis/10/5" class="filter ">Issue 5</a><a href="/issue/sis/10/4" class="filter ">Issue 4</a><a href="/issue/sis/10/3" class="filter ">Issue 3</a><a href="/issue/sis/10/2" class="filter ">Issue 2</a><a href="/issue/sis/10/1" class="filter ">Issue 1</a></div><a class="browse-link">2022<span class="pointer"></span></a><div class="filters"><a href="/issue/sis/9/6" class="filter ">Issue 6</a><a href="/issue/sis/9/5" class="filter ">Issue 5</a><a href="/issue/sis/9/4" class="filter ">Issue 4</a><a href="/issue/sis/9/36" class="filter ">Issue 36</a><a href="/issue/sis/9/35" class="filter ">Issue 35</a><a href="/issue/sis/9/34" class="filter current">Issue 34</a></div><a class="browse-link">2021<span class="pointer"></span></a><div class="filters"><a href="/issue/sis/8/33" class="filter ">Issue 33</a><a href="/issue/sis/8/32" class="filter ">Issue 32</a><a href="/issue/sis/8/31" class="filter ">Issue 31</a><a href="/issue/sis/8/30" class="filter ">Issue 30</a><a href="/issue/sis/8/29" class="filter ">Issue 29</a></div><a class="browse-link">2020<span class="pointer"></span></a><div class="filters"><a href="/issue/sis/7/28" class="filter ">Issue 28</a><a href="/issue/sis/7/27" class="filter ">Issue 27</a><a href="/issue/sis/7/26" class="filter ">Issue 26</a><a href="/issue/sis/7/25" class="filter ">Issue 25</a><a href="/issue/sis/7/24" class="filter ">Issue 24</a></div><a class="browse-link">2019<span class="pointer"></span></a><div class="filters"><a href="/issue/sis/6/23" class="filter ">Issue 23</a><a href="/issue/sis/6/22" class="filter ">Issue 22</a><a href="/issue/sis/6/21" class="filter ">Issue 21</a><a href="/issue/sis/6/20" class="filter ">Issue 20</a></div><a class="browse-link">2018<span class="pointer"></span></a><div class="filters"><a href="/issue/sis/5/19" class="filter ">Issue 19</a><a href="/issue/sis/5/18" class="filter ">Issue 18</a><a href="/issue/sis/5/17" class="filter ">Issue 17</a><a href="/issue/sis/5/16" class="filter ">Issue 16</a></div><a class="browse-link">2017<span class="pointer"></span></a><div class="filters"><a href="/issue/sis/4/15" class="filter ">Issue 15</a><a href="/issue/sis/4/14" class="filter ">Issue 14</a><a href="/issue/sis/4/13" class="filter ">Issue 13</a><a href="/issue/sis/4/12" class="filter ">Issue 12</a></div><a class="browse-link">2016<span class="pointer"></span></a><div class="filters"><a href="/issue/sis/3/11" class="filter ">Issue 11</a><a href="/issue/sis/3/10" class="filter ">Issue 10</a><a href="/issue/sis/3/9" class="filter ">Issue 9</a><a href="/issue/sis/3/8" class="filter ">Issue 8</a></div><a class="browse-link">2015<span class="pointer"></span></a><div class="filters"><a href="/issue/sis/2/7" class="filter ">Issue 7</a><a href="/issue/sis/2/6" class="filter ">Issue 6</a><a href="/issue/sis/2/5" class="filter ">Issue 5</a><a href="/issue/sis/2/4" class="filter ">Issue 4</a></div><a class="browse-link">2014<span class="pointer"></span></a><div class="filters"><a href="/issue/sis/1/3" class="filter ">Issue 3</a><a href="/issue/sis/1/2" class="filter ">Issue 2</a><a href="/issue/sis/1/1" class="filter ">Issue 1</a></div></div></section></section><section class="info-and-search"><div class="manage-menu"></div><a href="/journal/sis"><h1>EAI Endorsed Transactions on Scalable Information Systems</h1></a><section class="issue-number">Issue 34, 2022</section><section class="editors"><strong>Editor(s)-in-Chief: </strong><span class="editor">Hua Wang</span>, <span class="editor">Xiaohua Jia</span> and <span class="editor">Manik Sharma</span></section><section class="issue-tabs"><div class="tabs"><ul><li><a name="articles">Articles</a></li><li><a name="meta">Information</a></li></ul></div><div class="content"><div name="articles"><section id="publications-results" class="search-results"><ul class="results-list"><li class="result-item article-light first"><h3><a href="/doi/10.4108/eai.3-9-2021.170905">A spatio-temporal attention fusion model for students behaviour recognition</a></h3><dl class="metadata"><dt class="title">Appears in: </dt><dd class="value">sis<span class="info-separator"> </span><strong>22</strong><span class="info-separator">(</span>34<span class="info-separator">)</span><span class="info-separator">: </span>e1</dd><br><dt class="title">Author: </dt><dd class="value">Xiaoli Wang</dd><br><dt class="title">Abstract: </dt><dd class="value abstract"><span class="shortened">Student behavior analysis can reflect students' learning situation in real time, which provides an important basis for optimizing classroom teaching strategies and improving teaching methods. It is an important task for smart classroom to explore how to use big dat…</span><span class="full">Student behavior analysis can reflect students' learning situation in real time, which provides an important basis for optimizing classroom teaching strategies and improving teaching methods. It is an important task for smart classroom to <br>explore how to use big data to detect and recognize students behavior. Traditional recognition methods have some defects, such as low efficiency, edge blur, time-consuming, etc. In this paper, we propose a new students behaviour recognition <br>method based on spatio-temporal attention fusion model. It makes full use of key spatio-temporal information of video, the problem of spatio-temporal information redundancy is solved. Firstly, the channel attention mechanism is introduced into the spatio-temporal network, and the channel information is calibrated by modeling the dependency relationship between feature channels. It can improve the expression ability of features. Secondly, a time attention model based on convolutional neural network (CNN) is proposed, which uses fewer parameters to learn the attention score of each frame, <br>focusing on the frames with obvious behaviour amplitude. Meanwhile, a multi-spatial attention model is presented to calculate the attention score of each position in each frame from different angles, extract several saliency areas of behaviour, and fuse the spatio-temporal features to further enhance the feature representation of video. Finally, the fused features are input into the classification network, and the behaviour recognition results are obtained by combining the two output streams according to different weights. Experiment results on HMDB51, UCF101 datasets and eight typical classroom behaviors of students show that the proposed method can effectively recognize the behaviours in videos. The accuracy of HMDB51 is higher than 90%, that of UCF101 and real data are higher than 90%.<br></span> <span class="expander more"><a class="trigger">more »</a></span></dd></dl></li><li class="result-item article-light"><h3><a href="/doi/10.4108/eai.3-9-2021.170906">Channel space weighted fusion-oriented feature pyramid network for motor imagery EEG signal recognition</a></h3><dl class="metadata"><dt class="title">Appears in: </dt><dd class="value">sis<span class="info-separator"> </span><strong>22</strong><span class="info-separator">(</span>34<span class="info-separator">)</span><span class="info-separator">: </span>e2</dd><br><dt class="title">Author: </dt><dd class="value">Wenhao Yang</dd><br><dt class="title">Abstract: </dt><dd class="value abstract"><span class="shortened">In order to solve the problems of weak generalization ability and low classification accuracy in motor imagery EEG signal classification, this paper proposes a channel space weighted fusion-oriented feature pyramid network for motor imagery EEG signal recognition. First, the sho…</span><span class="full">In order to solve the problems of weak generalization ability and low classification accuracy in motor imagery EEG signal classification, this paper proposes a channel space weighted fusion-oriented feature pyramid network for motor imagery EEG signal recognition. First, the short-time Fourier transform is used to obtain the EEG time-frequency map. Then, it builds a new feature pyramid network(FPN). The attention mechanism module is integrated into the FPN module, and the channel spatial weighted fusion-oriented feature pyramid network is proposed. This new structure can not only learn the weight of important channel features in the feature map, but also learn the representation of important feature areas in the network layers. Meanwhile, Skip-FPN module is added into the network structure, which fuses more details of EEG signals through short connections. The Dropout layer is added to prevent network training from over-fitting. In the classification model, we improve the AdaBoost algorithm to automatically update the base learner according to the classification error rate. Finally, the proposed model is used to classify the test data and the Kappa value is used as the evaluation index. Compared with the state-of-the-art motor image EEG signal recognition methods, the proposed method achieves better performance on the BCI Competition IV 2b data set. It has good generalization ability and can improve the classification effect.<br></span> <span class="expander more"><a class="trigger">more »</a></span></dd></dl></li><li class="result-item article-light"><h3><a href="/doi/10.4108/eai.14-9-2021.170952">Secure Data Fusion Analysis on Certificateless Short Signature Scheme Based on Integrated Neural Networks and Elliptic Curve Cryptography</a></h3><dl class="metadata"><dt class="title">Appears in: </dt><dd class="value">sis<span class="info-separator"> </span><strong>22</strong><span class="info-separator">(</span>34<span class="info-separator">)</span><span class="info-separator">: </span>e3</dd><br><dt class="title">Authors: </dt><dd class="value">Lina Zou, Xueying Wang, Lifeng Deng</dd><br><dt class="title">Abstract: </dt><dd class="value abstract"><span class="shortened">In the traditional public key cryptosystem based on certificates, the issuance and management of user certificates are realized through the authoritative certificate center, but amount of time is spent in the transmission and verification of user public key certificates. After a malicious user ob…</span><span class="full">In the traditional public key cryptosystem based on certificates, the issuance and management of user <br>certificates are realized through the authoritative certificate center, but amount of time is spent in the <br>transmission and verification of user public key certificates. After a malicious user obtaining legitimate users’ <br>private keys, he can select a secret value and signature process to generate the final private key, public key and signature. And he will announce that he is the legal user, while others are unable to distinguish this process. This is the defect of traditional digital signature scheme without certificate. Therefore, this paper proposes a certificateless short signature scheme based on integrated neural networks and elliptic curve cryptography for secure data fusion analysis. The security of the solution is based on Inv-CDH problem. The complete security <br>proof is given under the stochastic predictor model. It is proved that the new model can resist existence forgery in adaptive selective message attack with new adversary. Experiment results show that the calculation amount of our proposed certificateless short signature scheme is small and the efficiency is high compared with other state-of-the-art schemes.<br></span> <span class="expander more"><a class="trigger">more »</a></span></dd></dl></li><li class="result-item article-light"><h3><a href="/doi/10.4108/eai.14-9-2021.170953">A novel A* method fusing bio-inspired algorithm for mobile robot path planning</a></h3><dl class="metadata"><dt class="title">Appears in: </dt><dd class="value">sis<span class="info-separator"> </span><strong>22</strong><span class="info-separator">(</span>34<span class="info-separator">)</span><span class="info-separator">: </span>e4</dd><br><dt class="title">Authors: </dt><dd class="value">Yang Sun, Haipeng Wang</dd><br><dt class="title">Abstract: </dt><dd class="value abstract"><span class="shortened">The path planning of mobile robot is to find an optimal collision-free path in time distance or space from the starting point to the target point in a given environment. With the popularization and application of mobile robots, if the efficiency of mobile robots path is not high,…</span><span class="full">The path planning of mobile robot is to find an optimal collision-free path in time distance or space from the starting point to the target point in a given environment. With the popularization and application of mobile robots, if the efficiency of mobile robots path is not high, the working quality will be seriously affected. How to quickly plan an effective safe path is of great research significance and practical application value. Therefore, we propose a novel A* algorithm based on Bio-inspired algorithm for mobile robot path planning. Firstly, the synchronous bidirectional A* algorithm is used to optimize <br>the pheromone of ant colony algorithm, and the transition probability and pheromone update mechanism of ant colony algorithm are improved, so that the global optimization speed of the algorithm is faster and the path length of mobile robot is shortened. Furthermore, the static path is used to initialize the pigeon algorithm. Then, the improved pigeon algorithm is utilized to plan the local path of the mobile robot, and the simulated annealing criterion is introduced to solve the local optimal problem. The logarithmic S-type transfer function is adopted to optimize the step size of the pigeon number, so that the collision with the dynamic obstacles can be better avoided. Finally, a modified B-spline curve is used to smooth and re-plan the path. The simulation results show that the proposed method can realize path planning more effectively in complex dynamic environment.<br></span> <span class="expander more"><a class="trigger">more »</a></span></dd></dl></li><li class="result-item article-light"><h3><a href="/doi/10.4108/eai.14-9-2021.170954">Multi-feature data fusion based on common space model and recurrent convolutional neural networks for EEG tristimania recognition used in upper limb rehabilitation exercises</a></h3><dl class="metadata"><dt class="title">Appears in: </dt><dd class="value">sis<span class="info-separator"> </span><strong>22</strong><span class="info-separator">(</span>34<span class="info-separator">)</span><span class="info-separator">: </span>e5</dd><br><dt class="title">Author: </dt><dd class="value">Hudun Sun</dd><br><dt class="title">Abstract: </dt><dd class="value abstract"><span class="shortened">Traditional tristimania recognition methods cannot accurately recognize the mood of patients, which cannot provide effective adjuvant therapy for rehabilitation. Therefore, this paper proposes a new multi-feature data fusion method for Electroencephalography (EEG) tri…</span><span class="full">Traditional tristimania recognition methods cannot accurately recognize the mood of patients, which cannot provide effective adjuvant therapy for rehabilitation. Therefore, this paper proposes a new multi-feature data fusion method for <br>Electroencephalography (EEG) tristimania recognition. It combines common space model and recurrent convolutional neural networks to classify the tristimania group and control group. According to the phase lock value, the phase <br>synchronization functional network between electrode channels is constructed, and the functional connection modes of two kinds under different frequency bands are analyzed. The Xception network and LSTM are used as two non-interfering parts to extract two feature matrices from EEG tristimania signals. They are fused into a single feature matrix by merge algorithm. The single feature matrix is input into the recurrent convolutional neural networks (RCNN) for feature extraction and pooling. L2 regularized Softmax function is used as the classifier to complete the training and testing of RCNN. Finally, combining the Fisher score feature selection method and the classifier dependency structure, a low dimensional and efficient feature subset is obtained. Experimental results on public tristimania data sets validate that the proposed method has better effect in terms of accuracy and PLV compared with other strategies.<br></span> <span class="expander more"><a class="trigger">more »</a></span></dd></dl></li><li class="result-item article-light"><h3><a href="/doi/10.4108/eai.21-9-2021.170964">Spatio-temporal weight Tai Chi motion feature extraction based on deep network cross-layer feature fusion</a></h3><dl class="metadata"><dt class="title">Appears in: </dt><dd class="value">sis<span class="info-separator"> </span><strong>22</strong><span class="info-separator">(</span>34<span class="info-separator">)</span><span class="info-separator">: </span>e6</dd><br><dt class="title">Authors: </dt><dd class="value">Naiqiu Wu, Yang Shi</dd><br><dt class="title">Abstract: </dt><dd class="value abstract"><span class="shortened">Tai Chi is a valuable exercise for human health. The research on Tai Chi is helpful to improve people's exercise level. There is a problem with low efficiency in traditional Tai Chi motion feature extraction. Therefore, we propose a spatio-temporal weight Ta…</span><span class="full">Tai Chi is a valuable exercise for human health. The research on Tai Chi is helpful to improve people's exercise level. There is a problem with low efficiency in traditional Tai Chi motion feature extraction. Therefore, we propose a spatio-temporal weight Tai Chi motion feature extraction based on deep network cross-layer feature fusion. According to the selected motion spatio-temporal sample, the corresponding spatio-temporal motion key frame is extracted and output in the form of static image. The initial motion image is preprocessed by motion object detection and image enhancement. Traditional convolutional neural network extracts features from the shallow to the deep and builds a classifier for image classification, which is easy to ignore the shallow features. Based on the AlexNet network, a CL-AlexNet network is proposed. Batch normalization (BN) is used for data normalization. The cross-connection structure is introduced and the sensitivity analysis is performed. The Inception module is embedded for multi-scale depth feature extraction. It integrates deep features and shallow features. The spatio-temporal weight adaptive interpolation method is used to reduce the error of <br>edge detection. From the edge features and the motion spatio-temporal features, it realizes motion features extraction, and outputs the extraction results. Compared with the state-of-the-art feature extraction algorithms, the experiment results show that the proposed algorithm can extract more effective features. The recognition rate exceeds 90%. It can be used as guidance and evidence for Tai Chi training.<br></span> <span class="expander more"><a class="trigger">more »</a></span></dd></dl></li><li class="result-item article-light"><h3><a href="/doi/10.4108/eai.21-9-2021.170965">Multi-scale information fusion based on convolution kernel pyramid and dilated convolution for Wushu moving object detection</a></h3><dl class="metadata"><dt class="title">Appears in: </dt><dd class="value">sis<span class="info-separator"> </span><strong>22</strong><span class="info-separator">(</span>34<span class="info-separator">)</span><span class="info-separator">: </span>e7</dd><br><dt class="title">Author: </dt><dd class="value">Yuhang Li</dd><br><dt class="title">Abstract: </dt><dd class="value abstract"><span class="shortened">In complex background, the accuracy of moving object detection can be affected by some factors such as illumination change, short occlusion and background movement. This paper proposes a new multi-scale information fusion based on convolution kernel pyramid and dil…</span><span class="full">In complex background, the accuracy of moving object detection can be affected by some factors such as illumination change, short occlusion and background movement. This paper proposes a new multi-scale information fusion based on convolution kernel pyramid and dilated convolution for Wushu moving object detection. The proposed model uses a variety of ways to fuse the feature information. First, the multi-layer feature map information with different sizes is fused by the per-pixel addition method. Then the feature map of different stages is splicing in the channel dimension to form the information fusion feature layer with rich semantic information and detail information as the prediction layer of the model. In this model, convolution kernel pyramid structure is introduced into the anchor frame mechanism to solve the multi-scale <br>problem of detecting objects. The number of parameters increased by large convolution kernel is reduced by using dilated convolution to reduce the number of anchor frames reasonably. Experimental results show that the proposed fusion algorithm has certain anti-interference ability and high precision for moving object detection in complex environment compared the state-of-the-art methods.<br></span> <span class="expander more"><a class="trigger">more »</a></span></dd></dl></li><li class="result-item article-light"><h3><a href="/doi/10.4108/eai.28-9-2021.171169">Dynamic constitutive analysis of aluminum alloy materials commonly used in railway vehicles big data and its application in LS-DYNA</a></h3><dl class="metadata"><dt class="title">Appears in: </dt><dd class="value">sis<span class="info-separator"> </span><strong>22</strong><span class="info-separator">(</span>34<span class="info-separator">)</span><span class="info-separator">: </span>e8</dd><br><dt class="title">Authors: </dt><dd class="value">Juxing Liang, Jia Sun, Wei Wei, Rashid Ali Laghari</dd><br><dt class="title">Abstract: </dt><dd class="value abstract"><span class="shortened">The mechanical test for the three commonly used aluminum alloy materials (6005A-T6, 6008-T6 and 6082-T6) for rail vehicle big data are carried at different temperatures and different strain rates in this study. It can be seen that there are significant differ…</span><span class="full">The mechanical test for the three commonly used aluminum alloy materials (6005A-T6, 6008-T6 and 6082-T6) for rail vehicle big data are carried at different temperatures and different strain rates in this study. It can be seen that there are significant difference mechanical properties for these aluminum alloy. The main differences between them are the sensitivity of the strain rate and temperature. 6008-T6 and 6005A-T6 alloys have lower sensitivity to strain rate, while 6082-T6 alloy has higher sensitivity to strain rate and temperature. Based on the experimental data, the two commonly used dynamic material models of LS-DYNA, the Cowper-Symonds constitutive model and the Johnson-Cook constitutive model, are fitted. And then the two constitutive models are verified and compared by the dropping hammer impact test of the anti-climbing device. The results show that the Cowper-Symonds constitutive model has higher accuracy.<br></span> <span class="expander more"><a class="trigger">more »</a></span></dd></dl></li><li class="result-item article-light"><h3><a href="/doi/10.4108/eai.6-10-2021.171248">Adaptive and ADRC information fusion method for high speed train braking system</a></h3><dl class="metadata"><dt class="title">Appears in: </dt><dd class="value">sis<span class="info-separator"> </span><strong>22</strong><span class="info-separator">(</span>34<span class="info-separator">)</span><span class="info-separator">: </span>e9</dd><br><dt class="title">Authors: </dt><dd class="value">Xiaojun Ma, Yuhua Qin, Dequan Kong, Desheng Liu, Chaoyang Wang</dd><br><dt class="title">Abstract: </dt><dd class="value abstract"><span class="shortened">Aiming at the problem of poor adaptability and lag of traditional braking control methods of high-speed train, a high-speed train braking information fusion method based on adaptive linear auto disturbance rejection is proposed to arrange the transition process for…</span><span class="full">Aiming at the problem of poor adaptability and lag of traditional braking control methods of high-speed train, a high-speed train braking information fusion method based on adaptive linear auto disturbance rejection is proposed to arrange the transition process for accurate braking and stable operation of the train, and an extended state observer is designed to estimate and compensate the internal disturbance and external disturbance, so as to enhance the anti-interference ability of the system, <br>By introducing adaptive control into linear ADRC, the real-time adaptive self-tuning of parameters is realized, the efficiency of parameter tuning is improved, and the problem that too many parameters have a direct impact on the control effect in ADRC is solved. The simulation results show that the control method can estimate and compensate the disturbance well, shows good robustness, and can track the ideal parking curve quickly and accurately.<br></span> <span class="expander more"><a class="trigger">more »</a></span></dd></dl></li><li class="result-item article-light"><h3><a href="/doi/10.4108/eai.19-10-2021.171467">A Novel Eye-Tracking Device Designed with a Head Gesture Control Module</a></h3><dl class="metadata"><dt class="title">Appears in: </dt><dd class="value">sis<span class="info-separator"> </span><strong>22</strong><span class="info-separator">(</span>34<span class="info-separator">)</span><span class="info-separator">: </span>e10</dd><br><dt class="title">Authors: </dt><dd class="value">Shahid Karim, Muhammad Shakir, Ali Sheikh, Shahzor Memon, Halar Mustafa, Vishal Kumar</dd><br><dt class="title">Abstract: </dt><dd class="value abstract"><span class="shortened">In this paper a novel eye-tracking device designed which uses tiny CCD cameras to capture the eye image from the screen with the help of a video capture card. In the head gesture control mode, a light source projector is turned on and the CCD camera detects the position of the light source. The…</span><span class="full">In this paper a novel eye-tracking device designed which uses tiny CCD cameras to capture the eye image from the screen <br>with the help of a video capture card. In the head gesture control mode, a light source projector is turned on and the CCD <br>camera detects the position of the light source. The locations of the spots on the screen and on the image pupil of the eye <br>image are calculated, compared with the previous point and are subsequently mapped to the point on the screen. The <br>movement increment-coordinate control is also discussed which could improve the ease of use of the computer. We investigate the use of non-rigid head fixation using a helmet that constrains only general head orientation and allows some <br>freedom of movement. Device results simulated with the help of software which achieves excellent timing performance due to the use of live data streaming, instead of the traditionally employed data storage mode for processing analogous eye <br>position data.<br></span> <span class="expander more"><a class="trigger">more »</a></span></dd></dl></li><li class="result-item article-light"><h3><a href="/doi/10.4108/eai.20-10-2021.171547">New Approaches for Automatic Face Recognition Based on Deep Learning Models and Local Handcrafted ALTP</a></h3><dl class="metadata"><dt class="title">Appears in: </dt><dd class="value">sis<span class="info-separator"> </span><strong>22</strong><span class="info-separator">(</span>34<span class="info-separator">)</span><span class="info-separator">: </span>e11</dd><br><dt class="title">Authors: </dt><dd class="value">Abdessalam Hattab, Ali Behloul</dd><br><dt class="title">Abstract: </dt><dd class="value abstract"><span class="shortened">Face recognition is one of the most important topics in biometrics, where it achieved great success under controlled scenarios. Still, its accuracy degraded significantly in unconstrained conditions. To meet this challenge, we proposed a handcraft method based on extracting impor…</span><span class="full">Face recognition is one of the most important topics in biometrics, where it achieved great success under controlled scenarios. <br>Still, its accuracy degraded significantly in unconstrained conditions. To meet this challenge, we proposed a handcraft <br>method based on extracting important regions from the face image. We have been using Scale-Invariant Feature Transform <br>(SIFT) besides the Adaptive Local Ternary Patterns (ALTP). We have achieved an accuracy of 99.75% on the ORL database <br>and 94.70% on the FERET database. Then, we proposed a second method based on deep learning to achieve more accurate <br>face recognition. The deep learning models failed to achieve a high accuracy rate because they require a large amount of <br>training data. We used firstly Data Augmentation to solve this failure. However, this solution does not show high <br>performance. Secondly, our proposed ImageNet pre-trained AlexNet-v2 and VGG16 models with LinearSVC increased the <br>accuracy rate to 100% for the both databases.<br></span> <span class="expander more"><a class="trigger">more »</a></span></dd></dl></li><li class="result-item article-light"><h3><a href="/doi/10.4108/eai.20-10-2021.171548">Feature Extraction using CNN for Peripheral Blood Cells Recognition</a></h3><dl class="metadata"><dt class="title">Appears in: </dt><dd class="value">sis<span class="info-separator"> </span><strong>22</strong><span class="info-separator">(</span>34<span class="info-separator">)</span><span class="info-separator">: </span>e12</dd><br><dt class="title">Authors: </dt><dd class="value">Mohammed Ammar, Mostafa El Habib Daho, Khaled Harrar, Amel Laidi</dd><br><dt class="title">Abstract: </dt><dd class="value abstract"><span class="shortened">INTRODUCTION: The diagnosis of hematological diseases is based on the morphological differentiation of the peripheral blood cell types. OBJECTIVES: In this work, a hybrid model based on CNN features extraction and machine learning classifiers were proposed to improve peripheral blood cell imag…</span><span class="full">INTRODUCTION: The diagnosis of hematological diseases is based on the morphological differentiation of the peripheral blood cell types. <br> <br> <br>OBJECTIVES: In this work, a hybrid model based on CNN features extraction and machine learning classifiers were proposed to improve peripheral blood cell image classification. <br> <br> <br>METHODS: At first, a CNN model composed of four convolution layers and three fully connected layers was proposed. Second, the features from the deeper layers of the CNN classifier were extracted. Third, several models were trained and tested on the data. Moreover, a combination of CNN with traditional machine learning classifiers was carried out. This includes CNN_KNN, CNN_SVM (Linear), CNN_SVM (RBF), and CNN_AdaboostM1. The proposed methods were validated on two datasets. We have used a public dataset containing 12444 images with four types of leukocytes to find the best optimizer function(eosinophil, lymphocyte, monocyte, and neutrophil images). The second dataset contains 17,092 images divided into eight groups: lymphocytes, neutrophils, monocytes). the second public dataset was used to find the best combination of CNN and the machine learning algorithms. the dataset containing 17,092 images: lymphocytes, neutrophils, monocytes, eosinophils, basophils, immature granulocytes, erythroblasts, and platelets. <br> <br>RESULTS: The results reveal that CNN combined with AdaBoost decision tree classifier provided the best <br>performance in terms of cells recognition with an accuracy of 88.8%, demonstrating the performance of the proposed approach. <br> <br> <br>CONCLUSION: The obtained results show that the proposed system can be used in clinical practice.<br></span> <span class="expander more"><a class="trigger">more »</a></span></dd></dl></li></ul></section></div><div name="meta"><h2>Scope</h2><div class="abstract"><div class="shortened"><p>As the data volumes continue to increase and the ways of information dispersion across the globe continue to diversify, new scalable methods and structures are needed for efficiently processing those distributed and autonomous data. Grid computing, P2P technology, distributed information retrieval …</p></div><div class="full"><p>As the data volumes continue to increase and the ways of information dispersion across the globe continue to diversify, new scalable methods and structures are needed for efficiently processing those distributed and autonomous data. Grid computing, P2P technology, distributed information retrieval technology and networking technology all must be merged to address the scalability concern.</p></div> <span class="expander more"><a class="trigger">more »</a></span></div><h2>Topics</h2><div class="abstract"><div class="shortened"><p>The scope of the journal includes:</p> <ul> <li>Scalable distributed information systems</li> <li>Scalable grid information systems</li> <li>Parallel information processing and systems</li> <li>Web information searching and retrieval</li> <li>Data mining</li> <li>Content delivery networks (CDN)</li> <li>VLDB</li> <li>P2P systems</li> <li>Scalable mobile…</li> </ul></div><div class="full"><p>The scope of the journal includes:</p> <ul> <li>Scalable distributed information systems</li> <li>Scalable grid information systems</li> <li>Parallel information processing and systems</li> <li>Web information searching and retrieval</li> <li>Data mining</li> <li>Content delivery networks (CDN)</li> <li>VLDB</li> <li>P2P systems</li> <li>Scalable mobile and wireless database systems</li> <li>Large scale sensor network systems</li> <li>Index compression methods</li> <li>Architectures for scalability</li> <li>Scalable information system applications</li> <li>Evaluation metrics for scalability</li> <li>Information security</li> </ul></div> <span class="expander more"><a class="trigger">more »</a></span></div><h2>Indexing</h2><div class="abstract"><div class="shortened"><ul> <li><a href="https://mjl.clarivate.com/home">Web of Science Core Collection</a></li> <li><a href="https://www.engineeringvillage.com/home.url">Ei Compendex</a></li> <li><a href="https://doaj.org/toc/2032-9407">DOAJ</a></li> <li><a href="https://search.crossref.org/?q=2032-9407">CrossRef</a></li> <li>[EBSCO Discovery Service](https://www.ebsco.com/products/ebsco-disco…</li> </ul></div><div class="full"><ul> <li><a href="https://mjl.clarivate.com/home">Web of Science Core Collection</a></li> <li><a href="https://www.engineeringvillage.com/home.url">Ei Compendex</a></li> <li><a href="https://doaj.org/toc/2032-9407">DOAJ</a></li> <li><a href="https://search.crossref.org/?q=2032-9407">CrossRef</a></li> <li><a href="https://www.ebsco.com/products/ebsco-discovery-service">EBSCO Discovery Service</a></li> <li><a href="https://www.worldcat.org/title/eai-endorsed-transactions-on-scalable-information-systems/oclc/913714002&referer=brief_results">OCLC Discovery Services</a></li> <li><a href="https://europub.co.uk/journals/8124">EuroPub</a></li> <li><a href="http://miar.ub.edu/issn/2032-9407">MIAR</a></li> <li><a href="https://rzblx1.uni-regensburg.de/ezeit/detail.phtml?bibid=AAAAA&colors=7&lang=de&jour_id=237211">Elektronische Zeitschriftenbibliothek</a></li> <li><a href="https://publons.com/journal/37157/icst-transactions-on-scalable-information-systems">Publons</a></li> <li><a href="http://ulrichsweb.serialssolutions.com/login">UlrichsWEB</a></li> <li><a href="https://www.heal-link.gr/en/home-2/">Hellenic Academic Libraries Link</a></li> <li><a href="https://www.ingentaconnect.com/content/doaj/20329407">Ingenta Connect</a></li> <li><a href="https://www.proquest.com/products-services/Publicly-Available-Content-Database.html#overviewlinkSection">Publicly Available Content Database (ProQuest)</a></li> <li><a href="https://www.proquest.com/products-services/adv_tech_aero.html">Advanced Technologies & Aerospace Database (ProQuest)</a></li> <li><a href="https://www.proquest.com/products-services/databases/pq_scitech.html">SciTech Premium Collection (ProQuest)</a></li> <li><a href="https://scholar.google.sk/scholar?start=0&q=source:eai+source:endorsed+source:transactions+source:on+source:scalable+source:information+source:systems&hl=es&as_sdt=0,5&as_ylo=2018">Google Scholar</a></li> </ul></div> <span class="expander more"><a class="trigger">more »</a></span></div><h2>Special Issues</h2><div class="abstract"><div class="shortened"><p><em>Call for Papers:</em> <a href="https://escripts.eai.eu/publication/366">Special issue on: Real-time image information processing with deep neural networks and data mining technologies</a> (Manuscript submission deadline: 2022-02-28; Notification of acceptance: 2022-04-15; Submission of final revised paper: 2022-…</p></div><div class="full"><p><em>Call for Papers:</em> <a href="https://escripts.eai.eu/publication/366">Special issue on: Real-time image information processing with deep neural networks and data mining technologies</a> (Manuscript submission deadline: 2022-02-28; Notification of acceptance: 2022-04-15; Submission of final revised paper: 2022-05-15; Publication of special issue (tentative): 2022-06-15)</p> <p><em>Guest Editor:</em> Dr. Prof. Hang Li (Northeastern University, China) <em>Guest Editor:</em> Dr. Prof. Jochen Schiewe (HafenCity Universität Hamburg, Germany)</p></div> <span class="expander more"><a class="trigger">more »</a></span></div><h2>Editorial Board</h2><div class="abstract"><div class="shortened"><ul> <li>Editors-in-Chief</li> <li>Hua Wang, Victoria University, Australia</li> <li>Xiaohua Jia, City University of Hong Kong</li> <li>Editorial board</li> <li>Manik Sharma, DAV University, India</li> <li>Ajay Kattepur (Tata Consultancy Services)</li> <li>Aniello Castiglione (University of Salerno)</li> <li>Chang Choi (Chosun University)</li> <li>Cho-…</li> </ul></div><div class="full"><ul> <li>Editors-in-Chief</li> <li>Hua Wang, Victoria University, Australia</li> <li>Xiaohua Jia, City University of Hong Kong</li> <li>Editorial board</li> <li>Manik Sharma, DAV University, India</li> <li>Ajay Kattepur (Tata Consultancy Services)</li> <li>Aniello Castiglione (University of Salerno)</li> <li>Chang Choi (Chosun University)</li> <li>Cho-Li Wang (University of Hong Kong)</li> <li>Daniel S. Katz (University of Chicago)</li> <li>Fabrizio Silvestri (ISTI – CNR, Italy)</li> <li>Hamed Taherdoost (Hamta Business Solution Snd)</li> <li>Heng Tao Shen (University of Queensland)</li> <li>Houbing Song (Embry-Riddle Aeronautical University)</li> <li>José Manuel Machado (University of Minho, Portugal)</li> <li>Jose Merseguer (Universidad de Zaragoza)</li> <li>Jie Li (University of Tsukuba)</li> <li>Lin Yun (Harbin Engineering University)</li> <li>Phan Cong Vinh (Nguyen Tat Thanh University)</li> <li>Raj Gururajan (University of Southern Queensland)</li> <li>Sherman Chow (Chinese University of Hong Kong)</li> <li>Silva Fábio (University of Minho, Portugal)</li> <li>Steve Beitzel (Telcordia)</li> <li>Tzung-Pei Hong (National University of Kaohsiung, Kaohsing City, Taiwan)</li> <li>Wang-Chien Lee (The Pennsylvania State University)</li> <li>Weili Wu (The University of Texas at Dallas)</li> <li>Xueyan Tang (Nanyang Technological University)</li> <li>Vijayakumar Ponnusamy (SRM University, India)</li> <li>J Amudhavel (KL University, India)</li> <li>Yingshu Li (Georgia State University)</li> <li>Jerry Chun-Wei Lin (Western Norway University of Applied Sciences, Norway)</li> <li>Karolj Skala (Ruđer Bošković Institute, Croatia)</li> <li>Xiao-Zhi Gao (University of Eastern Finland, Finland)</li> <li>Thaier Hayajneh (Fordham University, USA)</li> <li>Chin-Ling Chen (Chaoyang University of Technology, Taiwan)</li> <li>Nuno M. Garcia (Faculty of Sciences, University of Lisbon, Portugal)</li> <li>Arianna D'Ulizia (Consiglio Nazionale delle Ricerche (CNR), Italy)</li> <li>Robertas Damaševičius (Kaunas University of Technology (KTU), Lithuania)</li> <li>Hiep Xuan Huynh (Can Tho University, VietNam)</li> <li>Ji Zhang (University of Southern Queensland, Australia)</li> <li>Xiaohui Tao (University of Southern Queensland, Australia)</li> <li>Ye Wang (National University of Defense Technology, China)</li> <li>Nageswara Rao Moparthi (KL University, India)</li> <li>Shuai Liu (Hunan Normal University, China)</li> <li>Prof Xiaoming Fu (Georg-August-University of Goettingen, Germany)</li> <li>Prof Zhisheng Huang (Vrije University of Amsterdam)</li> <li>Prof Rose Quan (Northumbria University, UK)</li> <li>Prof Shi Dong (Zhoukou Normal University, China)</li> <li>Dr Limei Peng (Kyungpook National University, South Korea)</li> <li>Prof Hui Ma( Victoria University of Wellington, New Zealand)</li> <li>Dr. Venkatesan Subramanian (Indian Institute of Information Technology – Allahabad, India)</li> <li>Dr Pon Harshavardhanan (VIT Bhopal University, India)</li> <li>Dr. Manish Kumar (The Indian Institute of Information Technology, Allahabad, India)</li> <li>Muzammil Hussain, University of Management and Technology, Lahore, Pakistan</li> <li>Michael Bewong, Charles Sturt University, Australia</li> <li>Shabir Ahmad, Gachon University, Korea</li> <li>Vu Nguyen, University of Science, Vietnam</li> <li>Xiaodi Huang, Charles Sturt University, Australia</li> <li>Jianming Yong, University of Southern Queensland, Australia</li> <li>Yogeshwar Vijayakumar Navandar; National Institute of Technology, Indian.</li> <li>Zhengyi Chai, Tiangong University in China, China</li> <li>Chuanlong Wang, Taiyuan Normal University, China</li> <li>Chin-Feng Lee, Chaoyang University of Technology, Taiwan</li> <li>Hsing-Chung Chen (Jack Chen), Asia University, Taiwan</li> <li>Wen-Yang Lin, National University of Kaohsiung, Taiwan</li> <li>Chun-Hao Chen, National Kaohsiung University of Science and Technology, Taiwan</li> <li>Mudasir Mohd, University of Kashmir, India.</li> <li>BalaAnand Muthu, INTI International University, Malaysia.</li> <li>Md Rafiqul Islam, Australian Institute of Higher Education, Australia.</li> <li>Jin Wang, Institute of Applied Physics and Computational Mathematics, China.</li> <li>Chandu Thota, University of Nicosia, Cyprus.</li> <li>Haris M. Khalid, University of Dubai, UAE.</li> <li>Dr. G. Reza Nasiri, Alzahra University, Tehran, Iran.</li> <li>Siuly Siuly, Victoria University, Australia</li> <li>Bishnu Prasad Gautam, Kanazawa Gakuin University, Japan</li> <li>Sivaparthipan C B, Bharathiar University, India</li> <li>Ting-Chia Hsu, National Taiwan Normal University, Taiwan</li> <li>Punitha Palanisamy, Tagore IET, India</li> <li>Lakshmana Kumar R, Tagore IET, India</li> <li>Weiwei Jiang, Beijing University of Posts and Telecommunications, Taiwan</li> </ul></div> <span class="expander more"><a class="trigger">more »</a></span></div><h2>Journal Blurb</h2><div class="abstract"><div class="shortened"><p>Visit the new journal website to submit and consult our contents: https://publications.eai.eu/index.php/sis/index</p></div><div class="full"><p>Visit the new journal website to submit and consult our contents: https://publications.eai.eu/index.php/sis/index</p></div> <span class="expander more"><a class="trigger">more »</a></span></div></div></div></section><section class="publication-info"><dl class="metadata"><dt class="title">Publisher</dt> <dd class="value">EAI</dd> <dt class="title">ISSN</dt> <dd class="value">2032-9407</dd> <dt class="title">Volume</dt> <dd class="value">9</dd></dl><dl class="metadata"><dt class="title">Published</dt> <dd class="value">2022-01-21</dd></dl></section></section></form></section></section><div class="clear"></div><footer><div class="links"><a href="https://www.ebsco.com/" target="_blank"><img class="logo ebsco-logo" src="/images/ebsco.png" alt="EBSCO"></a><a href="https://www.proquest.com/" target="_blank"><img class="logo proquest-logo" src="/images/proquest.png" alt="ProQuest"></a><a href="https://dblp.uni-trier.de/db/journals/publ/icst.html" target="_blank"><img class="logo dblp-logo" src="/images/dblp.png" alt="DBLP"></a><a href="https://doaj.org/search?source=%7B%22query%22%3A%7B%22filtered%22%3A%7B%22filter%22%3A%7B%22bool%22%3A%7B%22must%22%3A%5B%7B%22term%22%3A%7B%22index.publisher.exact%22%3A%22European%20Alliance%20for%20Innovation%20(EAI)%22%7D%7D%5D%7D%7D%2C%22query%22%3A%7B%22query_string%22%3A%7B%22query%22%3A%22european%20alliance%20for%20innovation%22%2C%22default_operator%22%3A%22AND%22%2C%22default_field%22%3A%22index.publisher%22%7D%7D%7D%7D%7Dj" target="_blank"><img class="logo doaj-logo" src="/images/doaj.jpg" alt="DOAJ"></a><a href="https://www.portico.org/publishers/eai/" target="_blank"><img class="logo portico-logo" src="/images/portico.png" alt="Portico"></a><a href="http://eai.eu/" target="_blank"><img class="logo eai-logo" src="/images/eai.png"></a></div></footer></div><div class="footer-container"><div class="footer-width"><div class="footer-column logo-column"><a href="https://eai.eu/"><img src="https://eudl.eu/images/logo_new-1-1.png" alt="EAI Logo"></a></div><div class="footer-column"><h4>About EAI</h4><ul><li><a href="https://eai.eu/who-we-are/">Who We Are</a></li><li><a href="https://eai.eu/leadership/">Leadership</a></li><li><a href="https://eai.eu/research-areas/">Research Areas</a></li><li><a href="https://eai.eu/partners/">Partners</a></li><li><a href="https://eai.eu/media-center/">Media Center</a></li></ul></div><div class="footer-column"><h4>Community</h4><ul><li><a href="https://eai.eu/eai-community/">Membership</a></li><li><a href="https://eai.eu/conferences/">Conference</a></li><li><a href="https://eai.eu/recognition/">Recognition</a></li><li><a href="https://eai.eu/corporate-sponsorship">Sponsor Us</a></li></ul></div><div class="footer-column"><h4>Publish with EAI</h4><ul><li><a href="https://eai.eu/publishing">Publishing</a></li><li><a href="https://eai.eu/journals/">Journals</a></li><li><a href="https://eai.eu/proceedings/">Proceedings</a></li><li><a href="https://eai.eu/books/">Books</a></li><li><a href="https://eudl.eu/">EUDL</a></li></ul></div></div></div><script type="text/javascript" src="https://eudl.eu/js/gacode.js"></script><script src="/js/highlight.pack.js"></script><script>hljs.initHighlightingOnLoad();</script><script type="application/ld+json">{"@context":"http://schema.org","@type":"BreadcrumbList","itemListElement":[{"@type":"ListItem","position":1,"item":{"@id":"http://eudl.eu","name":"Home","image":null}},{"@type":"ListItem","position":2,"item":{"@id":"http://eudl.eu/journals","name":"Journals","image":null}},{"@type":"ListItem","position":3,"item":{"@id":"http://eudl.eu/journal/sis","name":"sis","image":null}},{"@type":"ListItem","position":4,"item":{"@id":"/issue/sis/9/34","name":"Issue 34","image":null}}]}</script></body></html>