CINXE.COM
EAI Endorsed Transactions on Scalable Information Systems - EUDL
<html><head><title>EAI Endorsed Transactions on Scalable Information Systems - EUDL</title><link rel="icon" href="/images/favicon.ico"><link rel="stylesheet" type="text/css" href="/css/screen.css"><link rel="stylesheet" href="/css/zenburn.css"><meta http-equiv="Content-Type" content="charset=utf-8"><meta name="viewport" content="width=device-width, initial-scale=1.0"><meta name="Description" content="Visit the new journal website to submit and consult our contents: https://publications.eai.eu/index.php/sis/index"><script type="text/javascript" src="https://services.eai.eu//load-signup-form/EAI"></script><script type="text/javascript" src="https://services.eai.eu//ujs/forms/signup/sso-client.js"></script><script type="text/javascript">if (!window.EUDL){ window.EUDL={} };EUDL.cas_url="https://account.eai.eu/cas";EUDL.profile_url="https://account.eai.eu";if(window.SSO){SSO.set_mode('eai')};</script><script type="text/javascript" src="/js/jquery.js"></script><script type="text/javascript" src="/js/jquery.cookie.js"></script><script type="text/javascript" src="/js/sso.js"></script><script type="text/javascript" src="/js/jscal2.js"></script><script type="text/javascript" src="/js/lang/en.js"></script><script type="text/javascript" src="/js/jquery.colorbox-min.js"></script><script type="text/javascript" src="/js/eudl.js"></script><script type="text/javascript" src="/js/journal.js"></script><script type="text/javascript" src="/js/tabs.js"></script><link rel="stylesheet" type="text/css" href="/css/jscal/jscal2.css"><link rel="stylesheet" type="text/css" href="/css/jscal/eudl/eudl.css"><link rel="stylesheet" type="text/css" href="/css/colorbox.css"></head><body><div id="eudl-page-head"><div id="eudl-page-header"><section id="user-area"><div><nav id="right-nav"><a href="/about">About</a> | <a href="/contact">Contact Us</a> | <a class="register" href="https://account.eai.eu/register?service=http%3A%2F%2Feudl.eu%2Fissue%2Fsis%2F9%2F36">Register</a> | <a class="login" href="https://account.eai.eu/cas/login?service=http%3A%2F%2Feudl.eu%2Fissue%2Fsis%2F9%2F36">Login</a></nav></div></section></div></div><div id="eudl-page"><header><section id="topbar-ads"><div><a href="https://eudl.eu/"><img class="eudl-logo-top" src="https://eudl.eu/images/eudl-logo.png"></a><img class="eudl-ads-top" src="https://eudl.eu/images/eai-eudl.jpg"></div></section><section id="menu"><nav><a href="/proceedings" class=""><span>Proceedings</span><span class="icon"></span></a><a href="/series" class=""><span>Series</span><span class="icon"></span></a><a href="/journals" class="current"><span>Journals</span><span class="icon"></span></a><a href="/content" class=""><span>Search</span><span class="icon"></span></a><a href="http://eai.eu/">EAI</a></nav></section></header><div id="eaientran"></div><section id="content"><section id="journal"><form class="search-form" id="article_search" method="get"><section class="cover-and-filters"><section class="cover"><a href="/journal/sis" title="EAI Endorsed Transactions on Scalable Information Systems"><img src="/attachment/49669"></a></section><section class="issn"><strong>ISSN: </strong>2032-9407</section><section class="escripts link"><a href="https://escripts.eai.eu/paper/submit">Submit Article</a></section><section class="instructions link"><a href="/instructions">Submission Instructions</a></section><section class="ethics link"><a href="/ethics">Ethics and Malpractice Statement</a></section><section class="back-to-journal link"><a href="/journal/sis">Back to Journal Page</a></section><section class="browse-filters"><div class="browse-by"><a class="browse-link">2024<span class="pointer"></span></a><div class="filters"><a href="/issue/sis/11/6" class="filter ">Issue 6</a><a href="/issue/sis/11/5" class="filter ">Issue 5</a><a href="/issue/sis/11/4" class="filter ">Issue 4</a><a href="/issue/sis/11/3" class="filter ">Issue 3</a><a href="/issue/sis/11/2" class="filter ">Issue 2</a><a href="/issue/sis/11/1" class="filter ">Issue 1</a></div><a class="browse-link">2023<span class="pointer"></span></a><div class="filters"><a href="/issue/sis/10/6" class="filter ">Issue 6</a><a href="/issue/sis/10/5" class="filter ">Issue 5</a><a href="/issue/sis/10/4" class="filter ">Issue 4</a><a href="/issue/sis/10/3" class="filter ">Issue 3</a><a href="/issue/sis/10/2" class="filter ">Issue 2</a><a href="/issue/sis/10/1" class="filter ">Issue 1</a></div><a class="browse-link">2022<span class="pointer"></span></a><div class="filters"><a href="/issue/sis/9/6" class="filter ">Issue 6</a><a href="/issue/sis/9/5" class="filter ">Issue 5</a><a href="/issue/sis/9/4" class="filter ">Issue 4</a><a href="/issue/sis/9/36" class="filter current">Issue 36</a><a href="/issue/sis/9/35" class="filter ">Issue 35</a><a href="/issue/sis/9/34" class="filter ">Issue 34</a></div><a class="browse-link">2021<span class="pointer"></span></a><div class="filters"><a href="/issue/sis/8/33" class="filter ">Issue 33</a><a href="/issue/sis/8/32" class="filter ">Issue 32</a><a href="/issue/sis/8/31" class="filter ">Issue 31</a><a href="/issue/sis/8/30" class="filter ">Issue 30</a><a href="/issue/sis/8/29" class="filter ">Issue 29</a></div><a class="browse-link">2020<span class="pointer"></span></a><div class="filters"><a href="/issue/sis/7/28" class="filter ">Issue 28</a><a href="/issue/sis/7/27" class="filter ">Issue 27</a><a href="/issue/sis/7/26" class="filter ">Issue 26</a><a href="/issue/sis/7/25" class="filter ">Issue 25</a><a href="/issue/sis/7/24" class="filter ">Issue 24</a></div><a class="browse-link">2019<span class="pointer"></span></a><div class="filters"><a href="/issue/sis/6/23" class="filter ">Issue 23</a><a href="/issue/sis/6/22" class="filter ">Issue 22</a><a href="/issue/sis/6/21" class="filter ">Issue 21</a><a href="/issue/sis/6/20" class="filter ">Issue 20</a></div><a class="browse-link">2018<span class="pointer"></span></a><div class="filters"><a href="/issue/sis/5/19" class="filter ">Issue 19</a><a href="/issue/sis/5/18" class="filter ">Issue 18</a><a href="/issue/sis/5/17" class="filter ">Issue 17</a><a href="/issue/sis/5/16" class="filter ">Issue 16</a></div><a class="browse-link">2017<span class="pointer"></span></a><div class="filters"><a href="/issue/sis/4/15" class="filter ">Issue 15</a><a href="/issue/sis/4/14" class="filter ">Issue 14</a><a href="/issue/sis/4/13" class="filter ">Issue 13</a><a href="/issue/sis/4/12" class="filter ">Issue 12</a></div><a class="browse-link">2016<span class="pointer"></span></a><div class="filters"><a href="/issue/sis/3/11" class="filter ">Issue 11</a><a href="/issue/sis/3/10" class="filter ">Issue 10</a><a href="/issue/sis/3/9" class="filter ">Issue 9</a><a href="/issue/sis/3/8" class="filter ">Issue 8</a></div><a class="browse-link">2015<span class="pointer"></span></a><div class="filters"><a href="/issue/sis/2/7" class="filter ">Issue 7</a><a href="/issue/sis/2/6" class="filter ">Issue 6</a><a href="/issue/sis/2/5" class="filter ">Issue 5</a><a href="/issue/sis/2/4" class="filter ">Issue 4</a></div><a class="browse-link">2014<span class="pointer"></span></a><div class="filters"><a href="/issue/sis/1/3" class="filter ">Issue 3</a><a href="/issue/sis/1/2" class="filter ">Issue 2</a><a href="/issue/sis/1/1" class="filter ">Issue 1</a></div></div></section></section><section class="info-and-search"><div class="manage-menu"></div><a href="/journal/sis"><h1>EAI Endorsed Transactions on Scalable Information Systems</h1></a><section class="issue-number">Issue 36, 2022</section><section class="editors"><strong>Editor(s)-in-Chief: </strong><span class="editor">Hua Wang</span>, <span class="editor">Xiaohua Jia</span> and <span class="editor">Manik Sharma</span></section><section class="issue-tabs"><div class="tabs"><ul><li><a name="articles">Articles</a></li><li><a name="meta">Information</a></li></ul></div><div class="content"><div name="articles"><section id="publications-results" class="search-results"><ul class="results-list"><li class="result-item article-light first"><h3><a href="/doi/10.4108/eai.16-11-2021.172132">A novel image clustering method based on coupled convolutional and graph convolutional network</a></h3><dl class="metadata"><dt class="title">Appears in: </dt><dd class="value">sis<span class="info-separator"> </span><strong>22</strong><span class="info-separator">(</span>36<span class="info-separator">)</span><span class="info-separator">: </span>e1</dd><br><dt class="title">Author: </dt><dd class="value">Rangjun Li</dd><br><dt class="title">Abstract: </dt><dd class="value abstract"><span class="shortened">Image clustering is a key and challenging task in the field of machine learning and computer vision. Technically, image clustering is the process of grouping images without the use of any supervisory information in order to retain similar images within the same…</span><span class="full">Image clustering is a key and challenging task in the field of machine learning and computer vision. Technically, image <br>clustering is the process of grouping images without the use of any supervisory information in order to retain similar <br>images within the same cluster. This paper proposes a novel image clustering method based on coupled convolutional and <br>graph convolutional network. It solves the problem that the deep clustering method usually only focuses on the useful <br>features extracted from the sample itself, and seldom considers the structural information behind the sample. Experimental <br>results show that the proposed algorithm can effectively extract more discriminative deep features, and the model achieves <br>good clustering effect due to the combination of attribute information and structure information of samples in GCN.<br></span> <span class="expander more"><a class="trigger">more »</a></span></dd></dl></li><li class="result-item article-light"><h3><a href="/doi/10.4108/eai.16-11-2021.172133">LRSDSFD: low-rank sparse decomposition and symmetrical frame difference method for moving video foreground-background separation</a></h3><dl class="metadata"><dt class="title">Appears in: </dt><dd class="value">sis<span class="info-separator"> </span><strong>22</strong><span class="info-separator">(</span>36<span class="info-separator">)</span><span class="info-separator">: </span>e2</dd><br><dt class="title">Author: </dt><dd class="value">Hongqiao Gao</dd><br><dt class="title">Abstract: </dt><dd class="value abstract"><span class="shortened">In scenes with dynamic background or measurement noise, the low-rank sparse decomposition background modeling algorithm based on kernel norm constraint is easy to separate the moving background or noise as part of the foreground and the foreground at the same time, a…</span><span class="full">In scenes with dynamic background or measurement noise, the low-rank sparse decomposition background modeling <br>algorithm based on kernel norm constraint is easy to separate the moving background or noise as part of the foreground <br>and the foreground at the same time, and it has poor modeling performance for complex background. In order to solve this <br>problem, this paper proposes a low-rank sparse decomposition and symmetrical frame difference method for moving video <br>foreground-background separation. Firstly, low-rank sparse decomposition is used to constrain the background matrix. <br>Secondly, the moving objects in the region of interest (ROI) are extracted by symmetrical frame difference method, and <br>the background image is obtained by block background modeling. Numerical experiments show that compared with other <br>five main algorithms, the proposed algorithm can separate moving objects more accurately in the scene with dynamic <br>background.<br></span> <span class="expander more"><a class="trigger">more »</a></span></dd></dl></li><li class="result-item article-light"><h3><a href="/doi/10.4108/eai.19-11-2021.172213">Correlation temporal feature extraction network via residual network for English relation extraction</a></h3><dl class="metadata"><dt class="title">Appears in: </dt><dd class="value">sis<span class="info-separator"> </span><strong>22</strong><span class="info-separator">(</span>36<span class="info-separator">)</span><span class="info-separator">: </span>e3</dd><br><dt class="title">Author: </dt><dd class="value">Ping Li</dd><br><dt class="title">Abstract: </dt><dd class="value abstract"><span class="shortened">In relation extraction, a major challenge is the absence of annotated samples. Relation extraction aims to extract the relationships between entity pairs from a large amount of unstructured data. To solve the above problems, this paper presents a new method for Engl…</span><span class="full">In relation extraction, a major challenge is the absence of annotated samples. Relation extraction aims to extract the relationships between entity pairs from a large amount of unstructured data. To solve the above problems, this paper presents a new method for English relation extraction based on correlation temporal feature extraction network via residual network. Firstly, the attention mechanism and recurrent neural network are used to obtain the temporal features of English word correlation. Secondly, a multi-branch feature sensing convolutional neural network is constructed to obtain global and local temporal correlation features respectively. Residual network can dynamically reduce the influence of noise data and better extract the deep information of English text. Finally, the relation extraction is realized with Softmax classifier. <br>Experimental results show that the proposed method can extract English relation effectively than other methods.<br></span> <span class="expander more"><a class="trigger">more »</a></span></dd></dl></li><li class="result-item article-light"><h3><a href="/doi/10.4108/eai.19-11-2021.172214">Convolutional block attention module based on visual mechanism for robot image edge detection</a></h3><dl class="metadata"><dt class="title">Appears in: </dt><dd class="value">sis<span class="info-separator"> </span><strong>22</strong><span class="info-separator">(</span>36<span class="info-separator">)</span><span class="info-separator">: </span>e4</dd><br><dt class="title">Authors: </dt><dd class="value">Aiyun Ju, Zhongli Wang</dd><br><dt class="title">Abstract: </dt><dd class="value abstract"><span class="shortened">In recent years, with the continuous development of computer vision, digital image and other information technology, its application in robot image has attracted many domestic and foreign scholars to conduct researches. Edge detection technology based on traditional deep learnin…</span><span class="full">In recent years, with the continuous development of computer vision, digital image and other information technology, its application in robot image has attracted many domestic and foreign scholars to conduct researches. Edge detection technology based on traditional deep learning produces messy and fuzzy edge lines. Therefore, we present a new convolutional block attention module (CBAM) based on visual mechanism for robot image edge detection. CBAM is added into the trunk network, and a down-sampling technique with translation invariance is adopted. Some down-sampling operations in the trunk network are removed to retain the details of the image. Meanwhile, the extended convolution technique is used to increase the model's receptive field. Training is carried out on BSDS500 and PASCAL VOL Context datasets. We use the image pyramid technique to enhance the edges quality during testing. Experimental results show that the proposed model can extract image contour more clearly than other networks, and can solve the problem of edge blur.<br></span> <span class="expander more"><a class="trigger">more »</a></span></dd></dl></li><li class="result-item article-light"><h3><a href="/doi/10.4108/eai.22-11-2021.172215">Self-organizing incremental and graph convolution neural network for English implicit discourse relation recognition</a></h3><dl class="metadata"><dt class="title">Appears in: </dt><dd class="value">sis<span class="info-separator"> </span><strong>22</strong><span class="info-separator">(</span>36<span class="info-separator">)</span><span class="info-separator">: </span>e5</dd><br><dt class="title">Author: </dt><dd class="value">Yubo Geng</dd><br><dt class="title">Abstract: </dt><dd class="value abstract"><span class="shortened">Implicit discourse relation recognition is a sub-task of discourse relation recognition, which is challenging because it is difficult to learn the argument representation with rich semantic information and interactive information. To solve this problem, this pap…</span><span class="full">Implicit discourse relation recognition is a sub-task of discourse relation recognition, which is challenging because it is <br>difficult to learn the argument representation with rich semantic information and interactive information. To solve this <br>problem, this paper proposes a self-organizing incremental and graph convolution neural network for English implicit <br>discourse relation recognition. The method adopts the preliminary training language model BERT (Bidirectional Encoder <br>Representation from Transformers) coding argument for argument. A classification model based on self-organizing <br>incremental and graph convolutional neural network is constructed to obtain the argument representation which is helpful <br>for English implicit discourse relation recognition. The experimental results show that the proposed method is superior to <br>the benchmark model in terms of contingency relations and expansion relations.<br></span> <span class="expander more"><a class="trigger">more »</a></span></dd></dl></li><li class="result-item article-light"><h3><a href="/doi/10.4108/eai.22-11-2021.172216">Double-channel cascade-based generative adversarial network for power equipment infrared and visible image fusion</a></h3><dl class="metadata"><dt class="title">Appears in: </dt><dd class="value">sis<span class="info-separator"> </span><strong>22</strong><span class="info-separator">(</span>36<span class="info-separator">)</span><span class="info-separator">: </span>e6</dd><br><dt class="title">Authors: </dt><dd class="value">Jihong Wang, Haiyan Yu</dd><br><dt class="title">Abstract: </dt><dd class="value abstract"><span class="shortened">At present, visible light imaging sensor and infrared imaging sensor are two commonly used sensors, which are widely used in aviation, navigation and other military fields of detection, monitoring and tracking. Due to their different working principles, their performance is d…</span><span class="full">At present, visible light imaging sensor and infrared imaging sensor are two commonly used sensors, which are widely used in aviation, navigation and other military fields of detection, monitoring and tracking. Due to their different working <br>principles, their performance is different. The infrared imaging sensor records the infrared radiation information of the target itself by acquiring the infrared radiation of the ground target. It identifies the target by detecting the thermal radiation difference between the target and the background, so it has special recognition and camouflage ability, such as finding people, vehicles and artillery hidden in the woods and grass. Although the infrared imaging sensor has a good <br>detection performance for thermal targets, it is insensitive to the brightness changes of the scene and has low imaging resolution, which is not conducive to human eyes interpretation. Visible light imaging sensor is sensitive to the reflection of the target scene and has nothing to do with the thermal contrast of the target scene. The obtained image has high clarity and can provide the details of the target scene. Therefore, the fusion of infrared and visible images will be beneficial to the combination of infrared image's better target indication characteristics and visible image's scene clearing information. In this paper, we propose a double-channel cascade-based generative adversarial network for power equipment infrared and visible image fusion. The experimental results show that the fusion image not only retains the target information of the infrared image, but also retains more details of the visible image, and achieves better performance in both subjective and objective evaluation<br></span> <span class="expander more"><a class="trigger">more »</a></span></dd></dl></li><li class="result-item article-light"><h3><a href="/doi/10.4108/eai.22-11-2021.172217">A novel one-stage object detection network for multi-scene vehicle attribute recognition</a></h3><dl class="metadata"><dt class="title">Appears in: </dt><dd class="value">sis<span class="info-separator"> </span><strong>22</strong><span class="info-separator">(</span>36<span class="info-separator">)</span><span class="info-separator">: </span>e7</dd><br><dt class="title">Author: </dt><dd class="value">Jiefei Zhang</dd><br><dt class="title">Abstract: </dt><dd class="value abstract"><span class="shortened">In recent years, with the continuous development of computer vision technology, computer vision has been widely used in many scientific research fields and civil applications. As one of the basic tasks of many advanced visual tasks, object detection has important research signific…</span><span class="full">In recent years, with the continuous development of computer vision technology, computer vision has been widely used in many scientific research fields and civil applications. As one of the basic tasks of many advanced visual tasks, object detection has important research significance in the field of computer vision and practical applications. At present, with the joint efforts of many scholars, the research on object detection based on deep learning has made remarkable progress. However, in some special weather, such as rainy days, foggy days, nights and the lack of visible light source, the visual distance and visibility are very poor, and the obtained images cannot be used normally, thus affecting the result of object detection. To solve the above problem, this paper proposes a novel one-stage object detection network for multi-scene <br>vehicle attribute recognition, which mainly contains vehicle type and color attributes. The one-stage object detection network YOLOv3 is used as the basic network, and GIOU loss function is used to replace MSE loss function. Finally, experimental results show that the accuracy of the proposed algorithm is improved significantly on public data sets.<br></span> <span class="expander more"><a class="trigger">more »</a></span></dd></dl></li><li class="result-item article-light"><h3><a href="/doi/10.4108/eai.26-11-2021.172304">DSSESKN: A depthwise separable squeeze-and-excitation selective kernel network for art image classification</a></h3><dl class="metadata"><dt class="title">Appears in: </dt><dd class="value">sis<span class="info-separator"> </span><strong>22</strong><span class="info-separator">(</span>36<span class="info-separator">)</span><span class="info-separator">: </span>e8</dd><br><dt class="title">Author: </dt><dd class="value">Shaojie Zhang</dd><br><dt class="title">Abstract: </dt><dd class="value abstract"><span class="shortened">Image classification is one of the key technologies of content-based image retrieval, and it is also the focus and hotspot of image content analysis research in recent years. Through the image processing and analysis technology to automatically analyze the image content to comple…</span><span class="full">Image classification is one of the key technologies of content-based image retrieval, and it is also the focus and hotspot of <br>image content analysis research in recent years. Through the image processing and analysis technology to automatically <br>analyze the image content to complete the management and retrieval of images, this process is the main content for image <br>classification. Faced with massive digital Chinese art works, how to achieve effective management and retrieval for them <br>has become an urgent problem to be solved. Traditional image retrieval technology is mainly based on image annotation, <br>which has many problems, such as large workload and not objective enough. In this paper, we propose a depthwise <br>separable squeeze-and-excitation selective kernel network (DSSESKN) for art image classification. SKNet (Slective <br>Kernel Network) is used to adaptively adjust the receptive field to extract the global and detailed features of the image. We <br>use SENet (squeeze-and-excitation network) to enhance the channel features. SKNet and SENet are fused to built the <br>DSSESKN. The convolution kernel on the branch of DSSESKN module is used to extract the global feature and local <br>detail features of the input image. The feature maps on the branches are fused, and the fused feature maps are compressed <br>and activated. The processed feature weights are mapped to the feature maps of different branches and feature fusion is <br>carried out. Art images are classified by deep separable convolution. Finally, we conduct experiments with other state-of-the-art classification methods, the results show that the effectiveness of the DSSESKN obtains the better effect.<br></span> <span class="expander more"><a class="trigger">more »</a></span></dd></dl></li><li class="result-item article-light"><h3><a href="/doi/10.4108/eai.7-12-2021.172362">Encoder-decoder structure based on conditional random field for building extraction in remote sensing images</a></h3><dl class="metadata"><dt class="title">Appears in: </dt><dd class="value">sis<span class="info-separator"> </span><strong>22</strong><span class="info-separator">(</span>36<span class="info-separator">)</span><span class="info-separator">: </span>e9</dd><br><dt class="title">Author: </dt><dd class="value">Yian Xu</dd><br><dt class="title">Abstract: </dt><dd class="value abstract"><span class="shortened">This article has been retracted, and the retraction notice can be found here: http://dx.doi.org/10.4108/eai.8-4-2022.173801. The application of building extraction involves a wide range of fields, including urban planning, land use analysis and change detection. It is difficult t…</span><span class="full">This article has been retracted, and the retraction notice can be found here: http://dx.doi.org/10.4108/eai.8-4-2022.173801. The application of building extraction involves a wide range of fields, including urban planning, land use analysis and <br>change detection. It is difficult to determine whether each pixel is a building or not because of the large difference within <br>the building category. Therefore, automatic building extraction from aerial images is still a challenging research topic. <br>Although deep convolutional networks have many advantages, the networks used for image-level classification cannot be <br>directly used for pixel-level building extraction tasks. This is caused by successive steps larger than one in the pooling or <br>convolution layer. These operations will reduce the spatial resolution of feature maps. Therefore, the spatial resolution of <br>the output feature map is no longer consistent with that of the input, which cannot meet the task requirements of pixel- <br>level building extraction. In this paper, we propose a encoder-decoder structure based on conditional random field for <br>building extraction in remote sensing images. The problem of boundary information lost by unitary potential energy in <br>traditional conditional random field is solved through multi-scale building information. It also preserves the local structure <br>information. The network consists of two parts: encoder sub-network and decoder sub-network. The encoder sub-network <br>compresses the spatial resolution of the input image to complete the feature extraction. The decoder sub-network improves <br>the spatial resolution from features and completes building extraction. Experimental results show that the proposed <br>framework is superior to other comparison methods in terms of the accuracy on open data sets, and can extract building <br>information in complex scenes well.<br></span> <span class="expander more"><a class="trigger">more »</a></span></dd></dl></li><li class="result-item article-light"><h3><a href="/doi/10.4108/eai.7-12-2021.172363">Modified dark channel prior based on multi-scale Retinex for power image defogging</a></h3><dl class="metadata"><dt class="title">Appears in: </dt><dd class="value">sis<span class="info-separator"> </span><strong>22</strong><span class="info-separator">(</span>36<span class="info-separator">)</span><span class="info-separator">: </span>e10</dd><br><dt class="title">Authors: </dt><dd class="value">Haiyan Yu, Jihong Wang</dd><br><dt class="title">Abstract: </dt><dd class="value abstract"><span class="shortened">This article has been retracted, and the retraction notice can be found here: http://dx.doi.org/10.4108/eai.8-4-2022.173800. At present, defogging technologies can be roughly divided into two categories: the first category is the method of defogging based on image enhancement…</span><span class="full">This article has been retracted, and the retraction notice can be found here: http://dx.doi.org/10.4108/eai.8-4-2022.173800. At present, defogging technologies can be roughly divided into two categories: the first category is the method of <br>defogging based on image enhancement non-physical model. This method does not start from the essence of optical <br>imaging, but only improves the visual effect of the image by improving the contrast and color of the image, so as to <br>achieve the purpose of defogging. The commonly used methods include histogram equalization, contrast enhancement and <br>automatic color levels, Retinex theory and wavelet transform, etc. The second type is based on atmospheric scattering <br>physical model. This method analyzes the degradation mechanism in the process of imaging, establishes the degradation <br>model of foggy image, and restores the real scene without fog by using the prior knowledge in the degradation process. <br>This method needs to obtain prior conditions as model parameters, but the prior conditions are often difficult to obtain. In <br>this paper, an adaptive power image defogging algorithm based on multi-scale Retinex and modified dark channel is <br>proposed. Sobel operator is used to detect the edges of luminance components and multi-scale Retinex algorithm is used to <br>eliminate luminance components. A priori theory of dark channel optimization by guided filtering is used to obtain rough <br>estimated transmittance. The global atmospheric light value is selected by quadtree subspace search method. In order to <br>eliminate the phenomenon that the restored image is dark as a whole and cannot display details, the brightness value is <br>corrected by two-dimensional gamma function, and finally the restored defogging image is obtained. The experimental <br>results show that the proposed algorithm can effectively restore the details of foggy images, completely remove foggy <br>images, have good color brightness, and the images are clear and natural.<br></span> <span class="expander more"><a class="trigger">more »</a></span></dd></dl></li><li class="result-item article-light"><h3><a href="/doi/10.4108/eai.16-12-2021.172437">Parallax information fusion-based for dance moving image posture extraction</a></h3><dl class="metadata"><dt class="title">Appears in: </dt><dd class="value">sis<span class="info-separator"> </span><strong>22</strong><span class="info-separator">(</span>36<span class="info-separator">)</span><span class="info-separator">: </span>e11</dd><br><dt class="title">Authors: </dt><dd class="value">Yin Lyu, Lin Teng</dd><br><dt class="title">Abstract: </dt><dd class="value abstract"><span class="shortened">This article has been retracted, and the retraction notice can be found here: http://dx.doi.org/10.4108/eai.8-4-2022.173799. The existing motion image posture contour extraction results have low definition and serious detail loss. To solve this problem, we propose a novel dance moving image postur…</span><span class="full">This article has been retracted, and the retraction notice can be found here: http://dx.doi.org/10.4108/eai.8-4-2022.173799. The existing motion image posture contour extraction results have low definition and serious detail loss. To solve this <br>problem, we propose a novel dance moving image posture extraction method based on parallax information fusion. Firstly, <br>the image with motion information is statistically analyzed by using the information fusion process to determine the <br>position of the motion region. After the noise is reduced by morphological processing, the initial motion posture profile is <br>obtained. The parallax between different control points and the center is used as the active contour model to shape the <br>contraction force and expansion force, which can effectively assist the initial edge contour curve to gradually approach the <br>real edge contour. Finally, the contour of the current moving image is extracted from the sequence image contour to obtain <br>the attitude contour of the moving image. The experimental results show that the proposed method can extract the contour <br>of the moving image clearly with less detail loss, which proves that the proposed method has strong practical performance <br>and can effectively find the contour of the moving object.<br></span> <span class="expander more"><a class="trigger">more »</a></span></dd></dl></li><li class="result-item article-light"><h3><a href="/doi/10.4108/eai.17-12-2021.172438">A multi-keyword parallel ciphertext retrieval scheme based on inverted index under the robot distributed system</a></h3><dl class="metadata"><dt class="title">Appears in: </dt><dd class="value">sis<span class="info-separator"> </span><strong>22</strong><span class="info-separator">(</span>36<span class="info-separator">)</span><span class="info-separator">: </span>e12</dd><br><dt class="title">Authors: </dt><dd class="value">Jiyue Wang, Xi Zhang, Yonggang Zhu</dd><br><dt class="title">Abstract: </dt><dd class="value abstract"><span class="shortened">This article has been retracted, and the retraction notice can be found here: http://dx.doi.org/10.4108/eai.8-4-2022.173798. The traditional ciphertext retrieval scheme has some problems, such as low retrieval performance, lack of single keyword retrieval mode and limitation of single machine reso…</span><span class="full">This article has been retracted, and the retraction notice can be found here: http://dx.doi.org/10.4108/eai.8-4-2022.173798. The traditional ciphertext retrieval scheme has some problems, such as low retrieval performance, lack of single keyword <br>retrieval mode and limitation of single machine resources in traditional single server architecture. At the same time, for <br>searchable encryption, it needs to balance the data security and retrieval efficiency. In this paper, a multi-keyword parallel <br>ciphertext retrieval system based on inverted index is proposed. The system adopts different index encryption methods to <br>improve the performance of ciphertext retrieval. Through the segmentation of ciphertext inverted index, the block retrieval <br>of inverted index is realized, which overcomes the limitation of single machine resources and improves the retrieval <br>efficiency. By combining the characteristics of distribution, the traditional single-machine retrieval architecture is <br>extended and multi-keyword parallel retrieval is realized. The experimental results show that compared with SSE-1 <br>scheme, the proposed scheme can improve the efficiency of retrieval, update and other operations on the premise of <br>ensuring the security of ciphertext data, achieve multi-keyword retrieval, and dynamically expand the distributed <br>architecture of the system. Finally, it can improve the system load capacity.<br></span> <span class="expander more"><a class="trigger">more »</a></span></dd></dl></li><li class="result-item article-light"><h3><a href="/doi/10.4108/eai.17-12-2021.172439">A novel Gauss-Laplace operator based on multi-scale convolution for dance motion image enhancement</a></h3><dl class="metadata"><dt class="title">Appears in: </dt><dd class="value">sis<span class="info-separator"> </span><strong>22</strong><span class="info-separator">(</span>36<span class="info-separator">)</span><span class="info-separator">: </span>e13</dd><br><dt class="title">Authors: </dt><dd class="value">Dianhuai Shen, Xueying Jiang, Lin Teng</dd><br><dt class="title">Abstract: </dt><dd class="value abstract"><span class="shortened">This article has been retracted, and the retraction notice can be found here: http://dx.doi.org/10.4108/eai.8-4-2022.173797. Traditional image enhancement methods have the problems of low contrast and fuzzy details. Therefore, we propose a novel Gauss-Laplace operator based on multi-scale convolut…</span><span class="full">This article has been retracted, and the retraction notice can be found here: http://dx.doi.org/10.4108/eai.8-4-2022.173797. Traditional image enhancement methods have the problems of low contrast and fuzzy details. Therefore, we propose a <br>novel Gauss-Laplace operator based on multi-scale convolution for dance motion image enhancement. Firstly, multi-scale <br>convolution is used to preprocess the image. Then, we improve the traditional Laplace edge detection operator and <br>combine it with Gauss filter. The Gaussian filter is used to smooth the image and suppress the noise, and the edge <br>detection is processed based on the Laplace gradient edge detector. The detail image extracted by Gauss-Laplace operator <br>and the image with brightness enhancement are linearly weighted fused to reconstruct the image with clear detail edge and <br>strong contrast. Experiments are carried out with detailed images in different scenes. It is compared with traditional <br>methods to verify the effectiveness of the proposed method.<br></span> <span class="expander more"><a class="trigger">more »</a></span></dd></dl></li><li class="result-item article-light"><h3><a href="/doi/10.4108/eai.5-1-2022.172780">CenterNet-SPP based on multi-feature fusion for basketball posture recognition</a></h3><dl class="metadata"><dt class="title">Appears in: </dt><dd class="value">sis<span class="info-separator"> </span><strong>22</strong><span class="info-separator">(</span>36<span class="info-separator">)</span><span class="info-separator">: </span>e14</dd><br><dt class="title">Author: </dt><dd class="value">Zhouxiang Jin</dd><br><dt class="title">Abstract: </dt><dd class="value abstract"><span class="shortened">This article has been retracted, and the retraction notice can be found here: http://dx.doi.org/10.4108/eai.8-4-2022.173788. Aiming at the problem that the existing posture recognition algorithms can not fully reflect the dynamic characteristics of athletes' posture, this paper proposes a CenterNe…</span><span class="full">This article has been retracted, and the retraction notice can be found here: http://dx.doi.org/10.4108/eai.8-4-2022.173788. Aiming at the problem that the existing posture recognition algorithms can not fully reflect the dynamic characteristics of <br>athletes' posture, this paper proposes a CenterNet-SPP model based on multi-feature fusion algorithm for basketball <br>posture recognition. Firstly, motion posture images are collected by optical image collector, and then gray scale <br>transformation is performed to improve the image quality. Furthermore, body contour and motion posture region are <br>obtained based on shadow elimination technology and inter-frame difference method. Finally, radon transform and discrete <br>wavelet transform are used to extract the motion posture region and body contour, and the two complementary features are <br>fused and then input into the CenterNet-SPP network to realize the final posture recognition. Experimental results show <br>that the recognition accuracy of the proposed method is higher than that of other new methods.<br></span> <span class="expander more"><a class="trigger">more »</a></span></dd></dl></li><li class="result-item article-light"><h3><a href="/doi/10.4108/eai.8-4-2022.173801">RETRACTED: Encoder-decoder structure based on conditional random field for building extraction in remote sensing images [EAI Endorsed Scal Inf Syst (2022), Online First]</a></h3><dl class="metadata"><dt class="title">Appears in: </dt><dd class="value">sis<span class="info-separator"> </span><strong>22</strong><span class="info-separator">(</span>36<span class="info-separator">)</span><span class="info-separator">: </span>e15</dd><br><dt class="title">Author: </dt><dd class="value">Yian Xu</dd><br><dt class="title">Abstract: </dt><dd class="value abstract"><span class="shortened">We, the Publisher, have retracted the following article: Yian Xu (2022). Encoder-decoder structure based on conditional random field for building extraction in remote sensing images. EAI Endorsed Scal Inf Syst. http://dx.doi.org/10.4108/eai.7-12-2021.172362 The authors submitted the article to …</span><span class="full">We, the Publisher, have retracted the following article: <br>Yian Xu (2022). Encoder-decoder structure based on conditional random field for building extraction in remote sensing images. EAI Endorsed Scal Inf Syst. http://dx.doi.org/10.4108/eai.7-12-2021.172362 <br> <br>The authors submitted the article to the Special Issue on “Real-time image information processing with deep neural networks and data mining technologies”, edited by the Guest Editors Dr Prof. Hang Li (Northeastern University, China) and Dr Prof. Jochen Schiewe, HafenCity Universität Hamburg, Germany. <br> <br>From our Research Integrity Team, we performed auditing of the editorial process of this Special Issue, and we identified misconduct during the review process. The generated reviews were simple, generalistic, without rigour, and the same for every submission. <br> <br>Following the COPE guidelines, we decided to RETRACT this article because “Peer review manipulation suspected after publication”. <br> <br>We informed the authors about this decision. The retracted article will remain, and it has been watermarked as “RETRACTED”.<br></span> <span class="expander more"><a class="trigger">more »</a></span></dd></dl></li><li class="result-item article-light"><h3><a href="/doi/10.4108/eai.8-4-2022.173800">RETRACTED: Modified dark channel prior based on multi-scale Retinex for power image defogging [EAI Endorsed Scal Inf Syst (2022), Online First]</a></h3><dl class="metadata"><dt class="title">Appears in: </dt><dd class="value">sis<span class="info-separator"> </span><strong>22</strong><span class="info-separator">(</span>36<span class="info-separator">)</span><span class="info-separator">: </span>e16</dd><br><dt class="title">Authors: </dt><dd class="value">Haiyan Yu, Jihong Wang</dd><br><dt class="title">Abstract: </dt><dd class="value abstract"><span class="shortened">We, the Publisher, have retracted the following article: Haiyan Yu, Jihong Wang (2022). Modified dark channel prior based on multi-scale Retinex for power image defogging. EAI Endorsed Scal Inf Syst. http://dx.doi.org/10.4108/eai.7-12-2021.172363 The authors submitted the article to the Special…</span><span class="full">We, the Publisher, have retracted the following article: <br>Haiyan Yu, Jihong Wang (2022). Modified dark channel prior based on multi-scale Retinex for power image defogging. EAI Endorsed Scal Inf Syst. http://dx.doi.org/10.4108/eai.7-12-2021.172363 <br> <br>The authors submitted the article to the Special Issue on “Real-time image information processing with deep neural networks and data mining technologies”, edited by the Guest Editors Dr Prof. Hang Li (Northeastern University, China) and Dr Prof. Jochen Schiewe, HafenCity Universität Hamburg, Germany. <br> <br>From our Research Integrity Team, we performed auditing of the editorial process of this Special Issue, and we identified misconduct during the review process. The generated reviews were simple, generalistic, without rigour, and the same for every submission. <br> <br>Following the COPE guidelines, we decided to RETRACT this article because “Peer review manipulation suspected after publication”. <br> <br>We informed the authors about this decision. The retracted article will remain, and it has been watermarked as “RETRACTED”.<br></span> <span class="expander more"><a class="trigger">more »</a></span></dd></dl></li><li class="result-item article-light"><h3><a href="/doi/10.4108/eai.8-4-2022.173799">RETRACTED: Parallax information fusion-based for dance moving image posture extraction [EAI Endorsed Scal Inf Syst (2022), Online First]</a></h3><dl class="metadata"><dt class="title">Appears in: </dt><dd class="value">sis<span class="info-separator"> </span><strong>22</strong><span class="info-separator">(</span>36<span class="info-separator">)</span><span class="info-separator">: </span>e17</dd><br><dt class="title">Authors: </dt><dd class="value">Yin Lyu, Lin Teng</dd><br><dt class="title">Abstract: </dt><dd class="value abstract"><span class="shortened">We, the Publisher, have retracted the following article: Yin Lyu, Lin Teng (2022). Parallax information fusion-based for dance moving image posture extraction. EAI Endorsed Scal Inf Syst. http://dx.doi.org/10.4108/eai.16-12-2021.172437 The authors submitted the article to the Special Issue on “…</span><span class="full">We, the Publisher, have retracted the following article: <br>Yin Lyu, Lin Teng (2022). Parallax information fusion-based for dance moving image posture extraction. EAI Endorsed Scal Inf Syst. http://dx.doi.org/10.4108/eai.16-12-2021.172437 <br> <br>The authors submitted the article to the Special Issue on “Real-time image information processing with deep neural networks and data mining technologies”, edited by the Guest Editors Dr Prof. Hang Li (Northeastern University, China) and Dr Prof. Jochen Schiewe, HafenCity Universität Hamburg, Germany. <br> <br>From our Research Integrity Team, we performed auditing of the editorial process of this Special Issue, and we identified misconduct during the review process. The generated reviews were simple, generalistic, without rigour, and the same for every submission. <br> <br>Following the COPE guidelines, we decided to RETRACT this article because “Peer review manipulation suspected after publication”. <br> <br>We informed the authors about this decision. The retracted article will remain, and it has been watermarked as “RETRACTED”.<br></span> <span class="expander more"><a class="trigger">more »</a></span></dd></dl></li><li class="result-item article-light"><h3><a href="/doi/10.4108/eai.8-4-2022.173798">RETRACTED: A multi-keyword parallel ciphertext retrieval scheme based on inverted index under the robot Distributed system [EAI Endorsed Scal Inf Syst (2022), Online First]</a></h3><dl class="metadata"><dt class="title">Appears in: </dt><dd class="value">sis<span class="info-separator"> </span><strong>22</strong><span class="info-separator">(</span>36<span class="info-separator">)</span><span class="info-separator">: </span>e18</dd><br><dt class="title">Authors: </dt><dd class="value">Jiyue Wang, Xi Zhang, Yonggang Zhu</dd><br><dt class="title">Abstract: </dt><dd class="value abstract"><span class="shortened">We, the Publisher, have retracted the following article: Jiyue Wang, Xi Zhang, Yonggang Zhu (2022). A multi-keyword parallel ciphertext retrieval scheme based on inverted index under the robot Distributed system. EAI Endorsed Scal Inf Syst. http://dx.doi.org/10.4108/eai.17-12-2021.172438 The au…</span><span class="full">We, the Publisher, have retracted the following article: <br>Jiyue Wang, Xi Zhang, Yonggang Zhu (2022). A multi-keyword parallel ciphertext retrieval scheme based on inverted index under the robot Distributed system. EAI Endorsed Scal Inf Syst. http://dx.doi.org/10.4108/eai.17-12-2021.172438 <br> <br>The authors submitted the article to the Special Issue on “Real-time image information processing with deep neural networks and data mining technologies”, edited by the Guest Editors Dr Prof. Hang Li (Northeastern University, China) and Dr Prof. Jochen Schiewe, HafenCity Universität Hamburg, Germany. <br> <br>From our Research Integrity Team, we performed auditing of the editorial process of this Special Issue, and we identified misconduct during the review process. The generated reviews were simple, generalistic, without rigour, and the same for every submission. <br> <br>Following the COPE guidelines, we decided to RETRACT this article because “Peer review manipulation suspected after publication”. <br> <br>We informed the authors about this decision. The retracted article will remain, and it has been watermarked as “RETRACTED”.<br></span> <span class="expander more"><a class="trigger">more »</a></span></dd></dl></li><li class="result-item article-light"><h3><a href="/doi/10.4108/eai.8-4-2022.173797">RETRACTED: A novel Gauss-Laplace operator based on multi-scale convolution for dance motion image enhancement [EAI Endorsed Scal Inf Syst (2022), Online First]</a></h3><dl class="metadata"><dt class="title">Appears in: </dt><dd class="value">sis<span class="info-separator"> </span><strong>22</strong><span class="info-separator">(</span>36<span class="info-separator">)</span><span class="info-separator">: </span>e19</dd><br><dt class="title">Authors: </dt><dd class="value">Dianhuai Shen, Xueying Jiang, Lin Teng</dd><br><dt class="title">Abstract: </dt><dd class="value abstract"><span class="shortened">We, the Publisher, have retracted the following article: Dianhuai Shen, Xueying Jiang, Lin Teng (2022). A novel Gauss-Laplace operator based on multi-scale convolution for dance motion image enhancement. EAI Endorsed Scal Inf Syst. http://dx.doi.org/10.4108/eai.17-12-2021.172439 The authors sub…</span><span class="full">We, the Publisher, have retracted the following article: <br>Dianhuai Shen, Xueying Jiang, Lin Teng (2022). A novel Gauss-Laplace operator based on multi-scale convolution for dance motion image enhancement. EAI Endorsed Scal Inf Syst. http://dx.doi.org/10.4108/eai.17-12-2021.172439 <br> <br>The authors submitted the article to the Special Issue on “Real-time image information processing with deep neural networks and data mining technologies”, edited by the Guest Editors Dr Prof. Hang Li (Northeastern University, China) and Dr Prof. Jochen Schiewe, HafenCity Universität Hamburg, Germany. <br> <br> <br>From our Research Integrity Team, we performed auditing of the editorial process of this Special Issue, and we identified misconduct during the review process. The generated reviews were simple, generalistic, without rigour, and the same for every submission. <br> <br> <br>Following the COPE guidelines, we decided to RETRACT this article because “Peer review manipulation suspected after publication”. <br> <br> <br>We informed the authors about this decision. The retracted article will remain, and it has been watermarked as “RETRACTED”.<br></span> <span class="expander more"><a class="trigger">more »</a></span></dd></dl></li><li class="result-item article-light"><h3><a href="/doi/10.4108/eai.8-4-2022.173788">RETRACTED: CenterNet-SPP based on multi-feature fusion for basketball posture recognition [EAI Endorsed Scal Inf Syst (2022), Online First]</a></h3><dl class="metadata"><dt class="title">Appears in: </dt><dd class="value">sis<span class="info-separator"> </span><strong>22</strong><span class="info-separator">(</span>36<span class="info-separator">)</span><span class="info-separator">: </span>e20</dd><br><dt class="title">Author: </dt><dd class="value">Zhouxiang Jin</dd><br><dt class="title">Abstract: </dt><dd class="value abstract"><span class="shortened">We, the Publisher, have retracted the following article: Zhouxiang Jin (2022). CenterNet-SPP based on multi-feature fusion for basketball posture recognition. EAI Endorsed Scal Inf Syst. http://dx.doi.org/10.4108/eai.5-1-2022.172780 The authors submitted the article to the Special Issue on “R…</span><span class="full">We, the Publisher, have retracted the following article: <br>Zhouxiang Jin (2022). CenterNet-SPP based on multi-feature fusion for basketball posture recognition. EAI Endorsed Scal Inf Syst. http://dx.doi.org/10.4108/eai.5-1-2022.172780 <br> <br> <br>The authors submitted the article to the Special Issue on “Real-time image information processing with deep neural networks and data mining technologies”, edited by the Guest Editors Dr Prof. Hang Li (Northeastern University, China) and Dr Prof. Jochen Schiewe, HafenCity Universität Hamburg, Germany. <br> <br> <br>From our Research Integrity Team, we performed auditing of the editorial process of this Special Issue, and we identified misconduct during the review process. The generated reviews were simple, generalistic, without rigour, and the same for every submission. <br> <br> <br>Following the COPE guidelines, we decided to RETRACT this article because “Peer review manipulation suspected after publication”. <br> <br> <br>We informed the authors about this decision. The retracted article will remain, and it has been watermarked as “RETRACTED”.<br></span> <span class="expander more"><a class="trigger">more »</a></span></dd></dl></li></ul></section></div><div name="meta"><h2>Scope</h2><div class="abstract"><div class="shortened"><p>As the data volumes continue to increase and the ways of information dispersion across the globe continue to diversify, new scalable methods and structures are needed for efficiently processing those distributed and autonomous data. Grid computing, P2P technology, distributed information retrieval …</p></div><div class="full"><p>As the data volumes continue to increase and the ways of information dispersion across the globe continue to diversify, new scalable methods and structures are needed for efficiently processing those distributed and autonomous data. Grid computing, P2P technology, distributed information retrieval technology and networking technology all must be merged to address the scalability concern.</p></div> <span class="expander more"><a class="trigger">more »</a></span></div><h2>Topics</h2><div class="abstract"><div class="shortened"><p>The scope of the journal includes:</p> <ul> <li>Scalable distributed information systems</li> <li>Scalable grid information systems</li> <li>Parallel information processing and systems</li> <li>Web information searching and retrieval</li> <li>Data mining</li> <li>Content delivery networks (CDN)</li> <li>VLDB</li> <li>P2P systems</li> <li>Scalable mobile…</li> </ul></div><div class="full"><p>The scope of the journal includes:</p> <ul> <li>Scalable distributed information systems</li> <li>Scalable grid information systems</li> <li>Parallel information processing and systems</li> <li>Web information searching and retrieval</li> <li>Data mining</li> <li>Content delivery networks (CDN)</li> <li>VLDB</li> <li>P2P systems</li> <li>Scalable mobile and wireless database systems</li> <li>Large scale sensor network systems</li> <li>Index compression methods</li> <li>Architectures for scalability</li> <li>Scalable information system applications</li> <li>Evaluation metrics for scalability</li> <li>Information security</li> </ul></div> <span class="expander more"><a class="trigger">more »</a></span></div><h2>Indexing</h2><div class="abstract"><div class="shortened"><ul> <li><a href="https://mjl.clarivate.com/home">Web of Science Core Collection</a></li> <li><a href="https://www.engineeringvillage.com/home.url">Ei Compendex</a></li> <li><a href="https://doaj.org/toc/2032-9407">DOAJ</a></li> <li><a href="https://search.crossref.org/?q=2032-9407">CrossRef</a></li> <li>[EBSCO Discovery Service](https://www.ebsco.com/products/ebsco-disco…</li> </ul></div><div class="full"><ul> <li><a href="https://mjl.clarivate.com/home">Web of Science Core Collection</a></li> <li><a href="https://www.engineeringvillage.com/home.url">Ei Compendex</a></li> <li><a href="https://doaj.org/toc/2032-9407">DOAJ</a></li> <li><a href="https://search.crossref.org/?q=2032-9407">CrossRef</a></li> <li><a href="https://www.ebsco.com/products/ebsco-discovery-service">EBSCO Discovery Service</a></li> <li><a href="https://www.worldcat.org/title/eai-endorsed-transactions-on-scalable-information-systems/oclc/913714002&referer=brief_results">OCLC Discovery Services</a></li> <li><a href="https://europub.co.uk/journals/8124">EuroPub</a></li> <li><a href="http://miar.ub.edu/issn/2032-9407">MIAR</a></li> <li><a href="https://rzblx1.uni-regensburg.de/ezeit/detail.phtml?bibid=AAAAA&colors=7&lang=de&jour_id=237211">Elektronische Zeitschriftenbibliothek</a></li> <li><a href="https://publons.com/journal/37157/icst-transactions-on-scalable-information-systems">Publons</a></li> <li><a href="http://ulrichsweb.serialssolutions.com/login">UlrichsWEB</a></li> <li><a href="https://www.heal-link.gr/en/home-2/">Hellenic Academic Libraries Link</a></li> <li><a href="https://www.ingentaconnect.com/content/doaj/20329407">Ingenta Connect</a></li> <li><a href="https://www.proquest.com/products-services/Publicly-Available-Content-Database.html#overviewlinkSection">Publicly Available Content Database (ProQuest)</a></li> <li><a href="https://www.proquest.com/products-services/adv_tech_aero.html">Advanced Technologies & Aerospace Database (ProQuest)</a></li> <li><a href="https://www.proquest.com/products-services/databases/pq_scitech.html">SciTech Premium Collection (ProQuest)</a></li> <li><a href="https://scholar.google.sk/scholar?start=0&q=source:eai+source:endorsed+source:transactions+source:on+source:scalable+source:information+source:systems&hl=es&as_sdt=0,5&as_ylo=2018">Google Scholar</a></li> </ul></div> <span class="expander more"><a class="trigger">more »</a></span></div><h2>Special Issues</h2><div class="abstract"><div class="shortened"><p><em>Call for Papers:</em> <a href="https://escripts.eai.eu/publication/366">Special issue on: Real-time image information processing with deep neural networks and data mining technologies</a> (Manuscript submission deadline: 2022-02-28; Notification of acceptance: 2022-04-15; Submission of final revised paper: 2022-…</p></div><div class="full"><p><em>Call for Papers:</em> <a href="https://escripts.eai.eu/publication/366">Special issue on: Real-time image information processing with deep neural networks and data mining technologies</a> (Manuscript submission deadline: 2022-02-28; Notification of acceptance: 2022-04-15; Submission of final revised paper: 2022-05-15; Publication of special issue (tentative): 2022-06-15)</p> <p><em>Guest Editor:</em> Dr. Prof. Hang Li (Northeastern University, China) <em>Guest Editor:</em> Dr. Prof. Jochen Schiewe (HafenCity Universität Hamburg, Germany)</p></div> <span class="expander more"><a class="trigger">more »</a></span></div><h2>Editorial Board</h2><div class="abstract"><div class="shortened"><ul> <li>Editors-in-Chief</li> <li>Hua Wang, Victoria University, Australia</li> <li>Xiaohua Jia, City University of Hong Kong</li> <li>Editorial board</li> <li>Manik Sharma, DAV University, India</li> <li>Ajay Kattepur (Tata Consultancy Services)</li> <li>Aniello Castiglione (University of Salerno)</li> <li>Chang Choi (Chosun University)</li> <li>Cho-…</li> </ul></div><div class="full"><ul> <li>Editors-in-Chief</li> <li>Hua Wang, Victoria University, Australia</li> <li>Xiaohua Jia, City University of Hong Kong</li> <li>Editorial board</li> <li>Manik Sharma, DAV University, India</li> <li>Ajay Kattepur (Tata Consultancy Services)</li> <li>Aniello Castiglione (University of Salerno)</li> <li>Chang Choi (Chosun University)</li> <li>Cho-Li Wang (University of Hong Kong)</li> <li>Daniel S. Katz (University of Chicago)</li> <li>Fabrizio Silvestri (ISTI – CNR, Italy)</li> <li>Hamed Taherdoost (Hamta Business Solution Snd)</li> <li>Heng Tao Shen (University of Queensland)</li> <li>Houbing Song (Embry-Riddle Aeronautical University)</li> <li>José Manuel Machado (University of Minho, Portugal)</li> <li>Jose Merseguer (Universidad de Zaragoza)</li> <li>Jie Li (University of Tsukuba)</li> <li>Lin Yun (Harbin Engineering University)</li> <li>Phan Cong Vinh (Nguyen Tat Thanh University)</li> <li>Raj Gururajan (University of Southern Queensland)</li> <li>Sherman Chow (Chinese University of Hong Kong)</li> <li>Silva Fábio (University of Minho, Portugal)</li> <li>Steve Beitzel (Telcordia)</li> <li>Tzung-Pei Hong (National University of Kaohsiung, Kaohsing City, Taiwan)</li> <li>Wang-Chien Lee (The Pennsylvania State University)</li> <li>Weili Wu (The University of Texas at Dallas)</li> <li>Xueyan Tang (Nanyang Technological University)</li> <li>Vijayakumar Ponnusamy (SRM University, India)</li> <li>J Amudhavel (KL University, India)</li> <li>Yingshu Li (Georgia State University)</li> <li>Jerry Chun-Wei Lin (Western Norway University of Applied Sciences, Norway)</li> <li>Karolj Skala (Ruđer Bošković Institute, Croatia)</li> <li>Xiao-Zhi Gao (University of Eastern Finland, Finland)</li> <li>Thaier Hayajneh (Fordham University, USA)</li> <li>Chin-Ling Chen (Chaoyang University of Technology, Taiwan)</li> <li>Nuno M. Garcia (Faculty of Sciences, University of Lisbon, Portugal)</li> <li>Arianna D'Ulizia (Consiglio Nazionale delle Ricerche (CNR), Italy)</li> <li>Robertas Damaševičius (Kaunas University of Technology (KTU), Lithuania)</li> <li>Hiep Xuan Huynh (Can Tho University, VietNam)</li> <li>Ji Zhang (University of Southern Queensland, Australia)</li> <li>Xiaohui Tao (University of Southern Queensland, Australia)</li> <li>Ye Wang (National University of Defense Technology, China)</li> <li>Nageswara Rao Moparthi (KL University, India)</li> <li>Shuai Liu (Hunan Normal University, China)</li> <li>Prof Xiaoming Fu (Georg-August-University of Goettingen, Germany)</li> <li>Prof Zhisheng Huang (Vrije University of Amsterdam)</li> <li>Prof Rose Quan (Northumbria University, UK)</li> <li>Prof Shi Dong (Zhoukou Normal University, China)</li> <li>Dr Limei Peng (Kyungpook National University, South Korea)</li> <li>Prof Hui Ma( Victoria University of Wellington, New Zealand)</li> <li>Dr. Venkatesan Subramanian (Indian Institute of Information Technology – Allahabad, India)</li> <li>Dr Pon Harshavardhanan (VIT Bhopal University, India)</li> <li>Dr. Manish Kumar (The Indian Institute of Information Technology, Allahabad, India)</li> <li>Muzammil Hussain, University of Management and Technology, Lahore, Pakistan</li> <li>Michael Bewong, Charles Sturt University, Australia</li> <li>Shabir Ahmad, Gachon University, Korea</li> <li>Vu Nguyen, University of Science, Vietnam</li> <li>Xiaodi Huang, Charles Sturt University, Australia</li> <li>Jianming Yong, University of Southern Queensland, Australia</li> <li>Yogeshwar Vijayakumar Navandar; National Institute of Technology, Indian.</li> <li>Zhengyi Chai, Tiangong University in China, China</li> <li>Chuanlong Wang, Taiyuan Normal University, China</li> <li>Chin-Feng Lee, Chaoyang University of Technology, Taiwan</li> <li>Hsing-Chung Chen (Jack Chen), Asia University, Taiwan</li> <li>Wen-Yang Lin, National University of Kaohsiung, Taiwan</li> <li>Chun-Hao Chen, National Kaohsiung University of Science and Technology, Taiwan</li> <li>Mudasir Mohd, University of Kashmir, India.</li> <li>BalaAnand Muthu, INTI International University, Malaysia.</li> <li>Md Rafiqul Islam, Australian Institute of Higher Education, Australia.</li> <li>Jin Wang, Institute of Applied Physics and Computational Mathematics, China.</li> <li>Chandu Thota, University of Nicosia, Cyprus.</li> <li>Haris M. Khalid, University of Dubai, UAE.</li> <li>Dr. G. Reza Nasiri, Alzahra University, Tehran, Iran.</li> <li>Siuly Siuly, Victoria University, Australia</li> <li>Bishnu Prasad Gautam, Kanazawa Gakuin University, Japan</li> <li>Sivaparthipan C B, Bharathiar University, India</li> <li>Ting-Chia Hsu, National Taiwan Normal University, Taiwan</li> <li>Punitha Palanisamy, Tagore IET, India</li> <li>Lakshmana Kumar R, Tagore IET, India</li> <li>Weiwei Jiang, Beijing University of Posts and Telecommunications, Taiwan</li> </ul></div> <span class="expander more"><a class="trigger">more »</a></span></div><h2>Journal Blurb</h2><div class="abstract"><div class="shortened"><p>Visit the new journal website to submit and consult our contents: https://publications.eai.eu/index.php/sis/index</p></div><div class="full"><p>Visit the new journal website to submit and consult our contents: https://publications.eai.eu/index.php/sis/index</p></div> <span class="expander more"><a class="trigger">more »</a></span></div></div></div></section><section class="publication-info"><dl class="metadata"><dt class="title">Publisher</dt> <dd class="value">EAI</dd> <dt class="title">ISSN</dt> <dd class="value">2032-9407</dd> <dt class="title">Volume</dt> <dd class="value">9</dd></dl><dl class="metadata"><dt class="title">Published</dt> <dd class="value">2022-04-28</dd></dl></section></section></form></section></section><div class="clear"></div><footer><div class="links"><a href="https://www.ebsco.com/" target="_blank"><img class="logo ebsco-logo" src="/images/ebsco.png" alt="EBSCO"></a><a href="https://www.proquest.com/" target="_blank"><img class="logo proquest-logo" src="/images/proquest.png" alt="ProQuest"></a><a href="https://dblp.uni-trier.de/db/journals/publ/icst.html" target="_blank"><img class="logo dblp-logo" src="/images/dblp.png" alt="DBLP"></a><a href="https://doaj.org/search?source=%7B%22query%22%3A%7B%22filtered%22%3A%7B%22filter%22%3A%7B%22bool%22%3A%7B%22must%22%3A%5B%7B%22term%22%3A%7B%22index.publisher.exact%22%3A%22European%20Alliance%20for%20Innovation%20(EAI)%22%7D%7D%5D%7D%7D%2C%22query%22%3A%7B%22query_string%22%3A%7B%22query%22%3A%22european%20alliance%20for%20innovation%22%2C%22default_operator%22%3A%22AND%22%2C%22default_field%22%3A%22index.publisher%22%7D%7D%7D%7D%7Dj" target="_blank"><img class="logo doaj-logo" src="/images/doaj.jpg" alt="DOAJ"></a><a href="https://www.portico.org/publishers/eai/" target="_blank"><img class="logo portico-logo" src="/images/portico.png" alt="Portico"></a><a href="http://eai.eu/" target="_blank"><img class="logo eai-logo" src="/images/eai.png"></a></div></footer></div><div class="footer-container"><div class="footer-width"><div class="footer-column logo-column"><a href="https://eai.eu/"><img src="https://eudl.eu/images/logo_new-1-1.png" alt="EAI Logo"></a></div><div class="footer-column"><h4>About EAI</h4><ul><li><a href="https://eai.eu/who-we-are/">Who We Are</a></li><li><a href="https://eai.eu/leadership/">Leadership</a></li><li><a href="https://eai.eu/research-areas/">Research Areas</a></li><li><a href="https://eai.eu/partners/">Partners</a></li><li><a href="https://eai.eu/media-center/">Media Center</a></li></ul></div><div class="footer-column"><h4>Community</h4><ul><li><a href="https://eai.eu/eai-community/">Membership</a></li><li><a href="https://eai.eu/conferences/">Conference</a></li><li><a href="https://eai.eu/recognition/">Recognition</a></li><li><a href="https://eai.eu/corporate-sponsorship">Sponsor Us</a></li></ul></div><div class="footer-column"><h4>Publish with EAI</h4><ul><li><a href="https://eai.eu/publishing">Publishing</a></li><li><a href="https://eai.eu/journals/">Journals</a></li><li><a href="https://eai.eu/proceedings/">Proceedings</a></li><li><a href="https://eai.eu/books/">Books</a></li><li><a href="https://eudl.eu/">EUDL</a></li></ul></div></div></div><script type="text/javascript" src="https://eudl.eu/js/gacode.js"></script><script src="/js/highlight.pack.js"></script><script>hljs.initHighlightingOnLoad();</script><script type="application/ld+json">{"@context":"http://schema.org","@type":"BreadcrumbList","itemListElement":[{"@type":"ListItem","position":1,"item":{"@id":"http://eudl.eu","name":"Home","image":null}},{"@type":"ListItem","position":2,"item":{"@id":"http://eudl.eu/journals","name":"Journals","image":null}},{"@type":"ListItem","position":3,"item":{"@id":"http://eudl.eu/journal/sis","name":"sis","image":null}},{"@type":"ListItem","position":4,"item":{"@id":"/issue/sis/9/36","name":"Issue 36","image":null}}]}</script></body></html>